aboutsummaryrefslogtreecommitdiff
path: root/unsupported
diff options
context:
space:
mode:
Diffstat (limited to 'unsupported')
-rw-r--r--unsupported/Eigen/AdolcForward2
-rw-r--r--unsupported/Eigen/AlignedVector340
-rw-r--r--unsupported/Eigen/CMakeLists.txt31
-rw-r--r--unsupported/Eigen/CXX11/CMakeLists.txt8
-rw-r--r--unsupported/Eigen/CXX11/Tensor152
-rw-r--r--unsupported/Eigen/CXX11/TensorSymmetry42
-rw-r--r--unsupported/Eigen/CXX11/ThreadPool65
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/README.md1757
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/Tensor.h527
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorArgMax.h299
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h181
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorBase.h1010
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h392
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h384
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorConcatenation.h361
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h628
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h56
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorContractionCuda.h1391
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h467
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h1052
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorConversion.h279
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h1104
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h212
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorCustomOp.h313
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorDevice.h68
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorDeviceCuda.h337
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorDeviceDefault.h81
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h122
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h279
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorDimensionList.h236
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h428
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h181
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h633
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h288
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h371
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorFFT.h651
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h389
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h167
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h109
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h489
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorGenerator.h185
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorGlobalFunctions.h33
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorIO.h79
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h509
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h725
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorInflation.h229
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorInitializer.h82
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorIntDiv.h253
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h209
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorMacros.h54
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorMap.h321
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorMeta.h218
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h888
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h397
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorPatch.h269
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorRandom.h276
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h781
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h750
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h242
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorRef.h429
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorReverse.h288
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorScan.h287
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorShuffling.h264
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h146
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorStriding.h338
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSycl.h82
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclConvertToDeviceExpression.h121
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h239
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h204
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h177
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclLeafCount.h114
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclPlaceHolderExpr.h181
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclRun.h70
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclTuple.h234
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h272
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorUInt128.h248
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorVolumePatch.h608
-rw-r--r--unsupported/Eigen/CXX11/src/TensorSymmetry/DynamicSymmetry.h293
-rw-r--r--unsupported/Eigen/CXX11/src/TensorSymmetry/StaticSymmetry.h236
-rw-r--r--unsupported/Eigen/CXX11/src/TensorSymmetry/Symmetry.h338
-rw-r--r--unsupported/Eigen/CXX11/src/TensorSymmetry/util/TemplateGroupTheory.h666
-rw-r--r--unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h233
-rw-r--r--unsupported/Eigen/CXX11/src/ThreadPool/NonBlockingThreadPool.h274
-rw-r--r--unsupported/Eigen/CXX11/src/ThreadPool/RunQueue.h210
-rw-r--r--unsupported/Eigen/CXX11/src/ThreadPool/SimpleThreadPool.h154
-rw-r--r--unsupported/Eigen/CXX11/src/ThreadPool/ThreadEnvironment.h38
-rw-r--r--unsupported/Eigen/CXX11/src/ThreadPool/ThreadLocal.h22
-rw-r--r--unsupported/Eigen/CXX11/src/ThreadPool/ThreadPoolInterface.h33
-rw-r--r--unsupported/Eigen/CXX11/src/ThreadPool/ThreadYield.h20
-rw-r--r--unsupported/Eigen/CXX11/src/util/CXX11Meta.h542
-rw-r--r--unsupported/Eigen/CXX11/src/util/CXX11Workarounds.h88
-rw-r--r--unsupported/Eigen/CXX11/src/util/EmulateArray.h267
-rw-r--r--unsupported/Eigen/CXX11/src/util/EmulateCXX11Meta.h311
-rw-r--r--unsupported/Eigen/CXX11/src/util/MaxSizeVector.h141
-rw-r--r--unsupported/Eigen/EulerAngles43
-rw-r--r--unsupported/Eigen/IterativeSolvers5
-rw-r--r--unsupported/Eigen/KroneckerProduct2
-rw-r--r--unsupported/Eigen/MPRealSupport104
-rw-r--r--unsupported/Eigen/MatrixFunctions76
-rw-r--r--unsupported/Eigen/OpenGLSupport20
-rw-r--r--unsupported/Eigen/SVD39
-rw-r--r--unsupported/Eigen/SparseExtra3
-rw-r--r--unsupported/Eigen/SpecialFunctions63
-rw-r--r--unsupported/Eigen/src/AutoDiff/AutoDiffJacobian.h49
-rwxr-xr-x[-rw-r--r--]unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h224
-rw-r--r--unsupported/Eigen/src/AutoDiff/CMakeLists.txt6
-rw-r--r--unsupported/Eigen/src/BVH/CMakeLists.txt6
-rw-r--r--unsupported/Eigen/src/CMakeLists.txt14
-rw-r--r--unsupported/Eigen/src/Eigenvalues/ArpackSelfAdjointEigenSolver.h14
-rw-r--r--unsupported/Eigen/src/EulerAngles/CMakeLists.txt6
-rw-r--r--unsupported/Eigen/src/EulerAngles/EulerAngles.h386
-rw-r--r--unsupported/Eigen/src/EulerAngles/EulerSystem.h326
-rw-r--r--unsupported/Eigen/src/FFT/CMakeLists.txt6
-rw-r--r--unsupported/Eigen/src/IterativeSolvers/CMakeLists.txt6
-rw-r--r--unsupported/Eigen/src/IterativeSolvers/DGMRES.h61
-rw-r--r--unsupported/Eigen/src/IterativeSolvers/GMRES.h409
-rw-r--r--unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h278
-rw-r--r--unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h39
-rw-r--r--unsupported/Eigen/src/IterativeSolvers/MINRES.h87
-rw-r--r--unsupported/Eigen/src/IterativeSolvers/Scaling.h6
-rw-r--r--unsupported/Eigen/src/KroneckerProduct/CMakeLists.txt6
-rw-r--r--unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h169
-rw-r--r--unsupported/Eigen/src/LevenbergMarquardt/CMakeLists.txt6
-rw-r--r--unsupported/Eigen/src/LevenbergMarquardt/LMcovar.h1
-rw-r--r--unsupported/Eigen/src/LevenbergMarquardt/LMpar.h2
-rw-r--r--unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h9
-rw-r--r--unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h31
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/CMakeLists.txt6
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h665
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h708
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h131
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h507
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixPower.h397
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h450
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/StemFunction.h172
-rw-r--r--unsupported/Eigen/src/MoreVectorization/CMakeLists.txt6
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/CMakeLists.txt6
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h4
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h25
-rw-r--r--unsupported/Eigen/src/NumericalDiff/CMakeLists.txt6
-rw-r--r--unsupported/Eigen/src/Polynomials/CMakeLists.txt6
-rw-r--r--unsupported/Eigen/src/Polynomials/PolynomialSolver.h33
-rw-r--r--unsupported/Eigen/src/Polynomials/PolynomialUtils.h2
-rw-r--r--unsupported/Eigen/src/SVD/BDCSVD.h748
-rw-r--r--unsupported/Eigen/src/SVD/CMakeLists.txt6
-rw-r--r--unsupported/Eigen/src/SVD/JacobiSVD.h782
-rw-r--r--unsupported/Eigen/src/SVD/SVDBase.h236
-rw-r--r--unsupported/Eigen/src/SVD/TODOBdcsvd.txt29
-rw-r--r--unsupported/Eigen/src/SVD/doneInBDCSVD.txt21
-rw-r--r--unsupported/Eigen/src/Skyline/CMakeLists.txt6
-rw-r--r--unsupported/Eigen/src/Skyline/SkylineProduct.h6
-rw-r--r--unsupported/Eigen/src/SparseExtra/BlockSparseMatrix.h1079
-rw-r--r--unsupported/Eigen/src/SparseExtra/CMakeLists.txt6
-rw-r--r--unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h71
-rw-r--r--unsupported/Eigen/src/SparseExtra/MarketIO.h21
-rw-r--r--unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h43
-rw-r--r--unsupported/Eigen/src/SparseExtra/RandomSetter.h10
-rw-r--r--unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsArrayAPI.h124
-rw-r--r--unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsFunctors.h236
-rw-r--r--unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsHalf.h47
-rw-r--r--unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsImpl.h1565
-rw-r--r--unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsPacketMath.h58
-rw-r--r--unsupported/Eigen/src/SpecialFunctions/arch/CUDA/CudaSpecialFunctions.h165
-rw-r--r--unsupported/Eigen/src/Splines/CMakeLists.txt6
-rw-r--r--unsupported/Eigen/src/Splines/Spline.h74
-rw-r--r--unsupported/Eigen/src/Splines/SplineFitting.h274
-rw-r--r--unsupported/Eigen/src/Splines/SplineFwd.h11
-rw-r--r--unsupported/doc/Overview.dox11
-rw-r--r--unsupported/doc/examples/BVH_Example.cpp4
-rw-r--r--unsupported/doc/examples/EulerAngles.cpp46
-rw-r--r--unsupported/test/CMakeLists.txt177
-rw-r--r--unsupported/test/EulerAngles.cpp208
-rw-r--r--unsupported/test/FFTW.cpp32
-rw-r--r--unsupported/test/NonLinearOptimization.cpp46
-rw-r--r--unsupported/test/alignedvector3.cpp29
-rw-r--r--unsupported/test/autodiff.cpp206
-rw-r--r--unsupported/test/autodiff_scalar.cpp83
-rw-r--r--unsupported/test/bdcsvd.cpp213
-rw-r--r--unsupported/test/cxx11_eventcount.cpp142
-rw-r--r--unsupported/test/cxx11_meta.cpp357
-rw-r--r--unsupported/test/cxx11_non_blocking_thread_pool.cpp107
-rw-r--r--unsupported/test/cxx11_runqueue.cpp235
-rw-r--r--unsupported/test/cxx11_tensor_argmax.cpp294
-rw-r--r--unsupported/test/cxx11_tensor_argmax_cuda.cu254
-rw-r--r--unsupported/test/cxx11_tensor_assign.cpp370
-rw-r--r--unsupported/test/cxx11_tensor_broadcast_sycl.cpp74
-rw-r--r--unsupported/test/cxx11_tensor_broadcasting.cpp194
-rw-r--r--unsupported/test/cxx11_tensor_cast_float16_cuda.cu82
-rw-r--r--unsupported/test/cxx11_tensor_casts.cpp115
-rw-r--r--unsupported/test/cxx11_tensor_chipping.cpp425
-rw-r--r--unsupported/test/cxx11_tensor_comparisons.cpp84
-rw-r--r--unsupported/test/cxx11_tensor_complex_cuda.cu153
-rw-r--r--unsupported/test/cxx11_tensor_complex_cwise_ops_cuda.cu97
-rw-r--r--unsupported/test/cxx11_tensor_concatenation.cpp137
-rw-r--r--unsupported/test/cxx11_tensor_const.cpp62
-rw-r--r--unsupported/test/cxx11_tensor_contract_cuda.cu216
-rw-r--r--unsupported/test/cxx11_tensor_contraction.cpp545
-rw-r--r--unsupported/test/cxx11_tensor_convolution.cpp149
-rw-r--r--unsupported/test/cxx11_tensor_cuda.cu1287
-rw-r--r--unsupported/test/cxx11_tensor_custom_index.cpp100
-rw-r--r--unsupported/test/cxx11_tensor_custom_op.cpp111
-rw-r--r--unsupported/test/cxx11_tensor_device.cu390
-rw-r--r--unsupported/test/cxx11_tensor_device_sycl.cpp31
-rw-r--r--unsupported/test/cxx11_tensor_dimension.cpp69
-rw-r--r--unsupported/test/cxx11_tensor_empty.cpp40
-rw-r--r--unsupported/test/cxx11_tensor_expr.cpp314
-rw-r--r--unsupported/test/cxx11_tensor_fft.cpp273
-rw-r--r--unsupported/test/cxx11_tensor_fixed_size.cpp261
-rw-r--r--unsupported/test/cxx11_tensor_forced_eval.cpp79
-rw-r--r--unsupported/test/cxx11_tensor_forced_eval_sycl.cpp70
-rw-r--r--unsupported/test/cxx11_tensor_generator.cpp91
-rw-r--r--unsupported/test/cxx11_tensor_ifft.cpp154
-rw-r--r--unsupported/test/cxx11_tensor_image_patch.cpp757
-rw-r--r--unsupported/test/cxx11_tensor_index_list.cpp386
-rw-r--r--unsupported/test/cxx11_tensor_inflation.cpp81
-rw-r--r--unsupported/test/cxx11_tensor_intdiv.cpp147
-rw-r--r--unsupported/test/cxx11_tensor_io.cpp136
-rw-r--r--unsupported/test/cxx11_tensor_layout_swap.cpp61
-rw-r--r--unsupported/test/cxx11_tensor_lvalue.cpp42
-rw-r--r--unsupported/test/cxx11_tensor_map.cpp277
-rw-r--r--unsupported/test/cxx11_tensor_math.cpp46
-rw-r--r--unsupported/test/cxx11_tensor_mixed_indices.cpp53
-rw-r--r--unsupported/test/cxx11_tensor_morphing.cpp485
-rw-r--r--unsupported/test/cxx11_tensor_notification.cpp81
-rw-r--r--unsupported/test/cxx11_tensor_of_complex.cpp103
-rw-r--r--unsupported/test/cxx11_tensor_of_const_values.cpp105
-rw-r--r--unsupported/test/cxx11_tensor_of_float16_cuda.cu494
-rw-r--r--unsupported/test/cxx11_tensor_of_strings.cpp152
-rw-r--r--unsupported/test/cxx11_tensor_padding.cpp93
-rw-r--r--unsupported/test/cxx11_tensor_patch.cpp172
-rw-r--r--unsupported/test/cxx11_tensor_random.cpp78
-rw-r--r--unsupported/test/cxx11_tensor_random_cuda.cu88
-rw-r--r--unsupported/test/cxx11_tensor_reduction.cpp508
-rw-r--r--unsupported/test/cxx11_tensor_reduction_cuda.cu157
-rw-r--r--unsupported/test/cxx11_tensor_reduction_sycl.cpp138
-rw-r--r--unsupported/test/cxx11_tensor_ref.cpp248
-rw-r--r--unsupported/test/cxx11_tensor_reverse.cpp190
-rw-r--r--unsupported/test/cxx11_tensor_roundings.cpp62
-rw-r--r--unsupported/test/cxx11_tensor_scan.cpp110
-rw-r--r--unsupported/test/cxx11_tensor_scan_cuda.cu79
-rw-r--r--unsupported/test/cxx11_tensor_shuffling.cpp228
-rw-r--r--unsupported/test/cxx11_tensor_simple.cpp327
-rw-r--r--unsupported/test/cxx11_tensor_striding.cpp119
-rw-r--r--unsupported/test/cxx11_tensor_sugar.cpp81
-rw-r--r--unsupported/test/cxx11_tensor_sycl.cpp159
-rw-r--r--unsupported/test/cxx11_tensor_symmetry.cpp818
-rw-r--r--unsupported/test/cxx11_tensor_thread_pool.cpp373
-rw-r--r--unsupported/test/cxx11_tensor_uint128.cpp160
-rw-r--r--unsupported/test/cxx11_tensor_volume_patch.cpp112
-rw-r--r--unsupported/test/forward_adolc.cpp4
-rw-r--r--unsupported/test/jacobisvd.cpp198
-rw-r--r--unsupported/test/kronecker_product.cpp89
-rw-r--r--unsupported/test/levenberg_marquardt.cpp93
-rw-r--r--unsupported/test/matrix_function.cpp6
-rw-r--r--unsupported/test/matrix_functions.h42
-rw-r--r--unsupported/test/matrix_power.cpp171
-rw-r--r--unsupported/test/minres.cpp19
-rw-r--r--unsupported/test/mpreal/mpreal.h854
-rw-r--r--unsupported/test/mpreal_support.cpp10
-rw-r--r--unsupported/test/polynomialsolver.cpp7
-rw-r--r--unsupported/test/sparse_extra.cpp1
-rw-r--r--unsupported/test/special_functions.cpp345
-rw-r--r--unsupported/test/splines.cpp73
-rw-r--r--unsupported/test/svd_common.h261
264 files changed, 53983 insertions, 5813 deletions
diff --git a/unsupported/Eigen/AdolcForward b/unsupported/Eigen/AdolcForward
index 2627decd0..15f5f0731 100644
--- a/unsupported/Eigen/AdolcForward
+++ b/unsupported/Eigen/AdolcForward
@@ -25,7 +25,7 @@
#ifndef NUMBER_DIRECTIONS
# define NUMBER_DIRECTIONS 2
#endif
-#include <adolc/adouble.h>
+#include <adolc/adtl.h>
// adolc defines some very stupid macros:
#if defined(malloc)
diff --git a/unsupported/Eigen/AlignedVector3 b/unsupported/Eigen/AlignedVector3
index 7b45e6cce..47a86d4c0 100644
--- a/unsupported/Eigen/AlignedVector3
+++ b/unsupported/Eigen/AlignedVector3
@@ -57,6 +57,11 @@ template<typename _Scalar> class AlignedVector3
inline Index rows() const { return 3; }
inline Index cols() const { return 1; }
+
+ Scalar* data() { return m_coeffs.data(); }
+ const Scalar* data() const { return m_coeffs.data(); }
+ Index innerStride() const { return 1; }
+ Index outerStride() const { return 3; }
inline const Scalar& coeff(Index row, Index col) const
{ return m_coeffs.coeff(row, col); }
@@ -100,7 +105,7 @@ template<typename _Scalar> class AlignedVector3
};
template<typename Derived>
- inline explicit AlignedVector3(const MatrixBase<Derived>& other)
+ inline AlignedVector3(const MatrixBase<Derived>& other)
{
generic_assign_selector<Derived>::run(*this,other.derived());
}
@@ -108,6 +113,12 @@ template<typename _Scalar> class AlignedVector3
inline AlignedVector3& operator=(const AlignedVector3& other)
{ m_coeffs = other.m_coeffs; return *this; }
+ template <typename Derived>
+ inline AlignedVector3& operator=(const MatrixBase<Derived>& other)
+ {
+ generic_assign_selector<Derived>::run(*this,other.derived());
+ return *this;
+ }
inline AlignedVector3 operator+(const AlignedVector3& other) const
{ return AlignedVector3(m_coeffs + other.m_coeffs); }
@@ -148,7 +159,7 @@ template<typename _Scalar> class AlignedVector3
m_coeffs /= norm();
}
- inline AlignedVector3 normalized()
+ inline AlignedVector3 normalized() const
{
return AlignedVector3(m_coeffs / norm());
}
@@ -177,12 +188,35 @@ template<typename _Scalar> class AlignedVector3
}
template<typename Derived>
- inline bool isApprox(const MatrixBase<Derived>& other, RealScalar eps=NumTraits<Scalar>::dummy_precision()) const
+ inline bool isApprox(const MatrixBase<Derived>& other, const RealScalar& eps=NumTraits<Scalar>::dummy_precision()) const
{
return m_coeffs.template head<3>().isApprox(other,eps);
}
+
+ CoeffType& coeffs() { return m_coeffs; }
+ const CoeffType& coeffs() const { return m_coeffs; }
};
+namespace internal {
+
+template<typename _Scalar>
+struct eval<AlignedVector3<_Scalar>, Dense>
+{
+ typedef const AlignedVector3<_Scalar>& type;
+};
+
+template<typename Scalar>
+struct evaluator<AlignedVector3<Scalar> >
+ : evaluator<Matrix<Scalar,4,1> >
+{
+ typedef AlignedVector3<Scalar> XprType;
+ typedef evaluator<Matrix<Scalar,4,1> > Base;
+
+ evaluator(const XprType &m) : Base(m.coeffs()) {}
+};
+
+}
+
//@}
}
diff --git a/unsupported/Eigen/CMakeLists.txt b/unsupported/Eigen/CMakeLists.txt
index e1fbf97e2..631a06014 100644
--- a/unsupported/Eigen/CMakeLists.txt
+++ b/unsupported/Eigen/CMakeLists.txt
@@ -1,11 +1,32 @@
-set(Eigen_HEADERS AdolcForward AlignedVector3 ArpackSupport AutoDiff BVH FFT IterativeSolvers KroneckerProduct LevenbergMarquardt
- MatrixFunctions MoreVectorization MPRealSupport NonLinearOptimization NumericalDiff OpenGLSupport Polynomials
- Skyline SparseExtra Splines
- )
+set(Eigen_HEADERS
+ AdolcForward
+ AlignedVector3
+ ArpackSupport
+ AutoDiff
+ BVH
+ EulerAngles
+ FFT
+ IterativeSolvers
+ KroneckerProduct
+ LevenbergMarquardt
+ MatrixFunctions
+ MoreVectorization
+ MPRealSupport
+ NonLinearOptimization
+ NumericalDiff
+ OpenGLSupport
+ Polynomials
+ Skyline
+ SparseExtra
+ SpecialFunctions
+ Splines
+ )
install(FILES
${Eigen_HEADERS}
DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen COMPONENT Devel
)
-add_subdirectory(src)
+install(DIRECTORY src DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen COMPONENT Devel FILES_MATCHING PATTERN "*.h")
+
+add_subdirectory(CXX11)
diff --git a/unsupported/Eigen/CXX11/CMakeLists.txt b/unsupported/Eigen/CXX11/CMakeLists.txt
new file mode 100644
index 000000000..385ed240c
--- /dev/null
+++ b/unsupported/Eigen/CXX11/CMakeLists.txt
@@ -0,0 +1,8 @@
+set(Eigen_CXX11_HEADERS Tensor TensorSymmetry ThreadPool)
+
+install(FILES
+ ${Eigen_CXX11_HEADERS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/CXX11 COMPONENT Devel
+ )
+
+install(DIRECTORY src DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/CXX11 COMPONENT Devel FILES_MATCHING PATTERN "*.h")
diff --git a/unsupported/Eigen/CXX11/Tensor b/unsupported/Eigen/CXX11/Tensor
new file mode 100644
index 000000000..7ecb4c74d
--- /dev/null
+++ b/unsupported/Eigen/CXX11/Tensor
@@ -0,0 +1,152 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+// Copyright (C) 2013 Christian Seiler <christian@iwakd.de>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+//#ifndef EIGEN_CXX11_TENSOR_MODULE
+//#define EIGEN_CXX11_TENSOR_MODULE
+
+#include "../../../Eigen/Core"
+
+#ifdef EIGEN_USE_SYCL
+#undef min
+#undef max
+#undef isnan
+#undef isinf
+#undef isfinite
+#include <SYCL/sycl.hpp>
+#include <map>
+#include <memory>
+#include <utility>
+#endif
+
+#include <Eigen/src/Core/util/DisableStupidWarnings.h>
+
+#include "../SpecialFunctions"
+#include "src/util/CXX11Meta.h"
+#include "src/util/MaxSizeVector.h"
+
+/** \defgroup CXX11_Tensor_Module Tensor Module
+ *
+ * This module provides a Tensor class for storing arbitrarily indexed
+ * objects.
+ *
+ * \code
+ * #include <Eigen/CXX11/Tensor>
+ * \endcode
+ */
+
+#include <cmath>
+#include <cstddef>
+#include <cstring>
+
+#ifdef _WIN32
+typedef __int16 int16_t;
+typedef unsigned __int16 uint16_t;
+typedef __int32 int32_t;
+typedef unsigned __int32 uint32_t;
+typedef __int64 int64_t;
+typedef unsigned __int64 uint64_t;
+#else
+#include <stdint.h>
+#endif
+
+#if __cplusplus > 199711 || EIGEN_COMP_MSVC >= 1900
+#include <random>
+#endif
+
+#ifdef _WIN32
+#include <windows.h>
+#elif defined(__APPLE__)
+#include <mach/mach_time.h>
+#else
+#include <time.h>
+#endif
+
+#ifdef EIGEN_USE_THREADS
+#include "ThreadPool"
+#endif
+
+#ifdef EIGEN_USE_GPU
+#include <iostream>
+#include <cuda_runtime.h>
+#if __cplusplus >= 201103L
+#include <atomic>
+#include <unistd.h>
+#endif
+#endif
+
+#include "src/Tensor/TensorMacros.h"
+#include "src/Tensor/TensorForwardDeclarations.h"
+#include "src/Tensor/TensorMeta.h"
+#include "src/Tensor/TensorFunctors.h"
+#include "src/Tensor/TensorCostModel.h"
+#include "src/Tensor/TensorDeviceDefault.h"
+#include "src/Tensor/TensorDeviceThreadPool.h"
+#include "src/Tensor/TensorDeviceCuda.h"
+#include "src/Tensor/TensorDeviceSycl.h"
+#include "src/Tensor/TensorIndexList.h"
+#include "src/Tensor/TensorDimensionList.h"
+#include "src/Tensor/TensorDimensions.h"
+#include "src/Tensor/TensorInitializer.h"
+#include "src/Tensor/TensorTraits.h"
+#include "src/Tensor/TensorRandom.h"
+#include "src/Tensor/TensorUInt128.h"
+#include "src/Tensor/TensorIntDiv.h"
+#include "src/Tensor/TensorGlobalFunctions.h"
+
+#include "src/Tensor/TensorBase.h"
+
+#include "src/Tensor/TensorEvaluator.h"
+#include "src/Tensor/TensorExpr.h"
+#include "src/Tensor/TensorReduction.h"
+#include "src/Tensor/TensorReductionCuda.h"
+#include "src/Tensor/TensorArgMax.h"
+#include "src/Tensor/TensorConcatenation.h"
+#include "src/Tensor/TensorContractionMapper.h"
+#include "src/Tensor/TensorContractionBlocking.h"
+#include "src/Tensor/TensorContraction.h"
+#include "src/Tensor/TensorContractionThreadPool.h"
+#include "src/Tensor/TensorContractionCuda.h"
+#include "src/Tensor/TensorConversion.h"
+#include "src/Tensor/TensorConvolution.h"
+#include "src/Tensor/TensorFFT.h"
+#include "src/Tensor/TensorPatch.h"
+#include "src/Tensor/TensorImagePatch.h"
+#include "src/Tensor/TensorVolumePatch.h"
+#include "src/Tensor/TensorBroadcasting.h"
+#include "src/Tensor/TensorChipping.h"
+#include "src/Tensor/TensorInflation.h"
+#include "src/Tensor/TensorLayoutSwap.h"
+#include "src/Tensor/TensorMorphing.h"
+#include "src/Tensor/TensorPadding.h"
+#include "src/Tensor/TensorReverse.h"
+#include "src/Tensor/TensorShuffling.h"
+#include "src/Tensor/TensorStriding.h"
+#include "src/Tensor/TensorCustomOp.h"
+#include "src/Tensor/TensorEvalTo.h"
+#include "src/Tensor/TensorForcedEval.h"
+#include "src/Tensor/TensorGenerator.h"
+#include "src/Tensor/TensorAssign.h"
+#include "src/Tensor/TensorScan.h"
+
+#include "src/Tensor/TensorSycl.h"
+#include "src/Tensor/TensorExecutor.h"
+#include "src/Tensor/TensorDevice.h"
+
+#include "src/Tensor/TensorStorage.h"
+#include "src/Tensor/Tensor.h"
+#include "src/Tensor/TensorFixedSize.h"
+#include "src/Tensor/TensorMap.h"
+#include "src/Tensor/TensorRef.h"
+
+#include "src/Tensor/TensorIO.h"
+
+#include <Eigen/src/Core/util/ReenableStupidWarnings.h>
+
+//#endif // EIGEN_CXX11_TENSOR_MODULE
diff --git a/unsupported/Eigen/CXX11/TensorSymmetry b/unsupported/Eigen/CXX11/TensorSymmetry
new file mode 100644
index 000000000..fb1b0c0fb
--- /dev/null
+++ b/unsupported/Eigen/CXX11/TensorSymmetry
@@ -0,0 +1,42 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2013 Christian Seiler <christian@iwakd.de>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSORSYMMETRY_MODULE
+#define EIGEN_CXX11_TENSORSYMMETRY_MODULE
+
+#include <unsupported/Eigen/CXX11/Tensor>
+
+#include <Eigen/src/Core/util/DisableStupidWarnings.h>
+
+#include "src/util/CXX11Meta.h"
+
+/** \defgroup CXX11_TensorSymmetry_Module Tensor Symmetry Module
+ *
+ * This module provides a classes that allow for the definition of
+ * symmetries w.r.t. tensor indices.
+ *
+ * Including this module will implicitly include the Tensor module.
+ *
+ * \code
+ * #include <Eigen/TensorSymmetry>
+ * \endcode
+ */
+
+#include "src/TensorSymmetry/util/TemplateGroupTheory.h"
+#include "src/TensorSymmetry/Symmetry.h"
+#include "src/TensorSymmetry/StaticSymmetry.h"
+#include "src/TensorSymmetry/DynamicSymmetry.h"
+
+#include <Eigen/src/Core/util/ReenableStupidWarnings.h>
+
+#endif // EIGEN_CXX11_TENSORSYMMETRY_MODULE
+
+/*
+ * kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle;
+ */
diff --git a/unsupported/Eigen/CXX11/ThreadPool b/unsupported/Eigen/CXX11/ThreadPool
new file mode 100644
index 000000000..09d637e9a
--- /dev/null
+++ b/unsupported/Eigen/CXX11/ThreadPool
@@ -0,0 +1,65 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_THREADPOOL_MODULE
+#define EIGEN_CXX11_THREADPOOL_MODULE
+
+#include "../../../Eigen/Core"
+
+#include <Eigen/src/Core/util/DisableStupidWarnings.h>
+
+/** \defgroup CXX11_ThreadPool_Module C++11 ThreadPool Module
+ *
+ * This module provides 2 threadpool implementations
+ * - a simple reference implementation
+ * - a faster non blocking implementation
+ *
+ * This module requires C++11.
+ *
+ * \code
+ * #include <Eigen/CXX11/ThreadPool>
+ * \endcode
+ */
+
+
+// The code depends on CXX11, so only include the module if the
+// compiler supports it.
+#if __cplusplus > 199711L || EIGEN_COMP_MSVC >= 1900
+#include <cstddef>
+#include <cstring>
+#include <stdint.h>
+#include <time.h>
+
+#include <vector>
+#include <atomic>
+#include <condition_variable>
+#include <deque>
+#include <mutex>
+#include <thread>
+#include <functional>
+#include <memory>
+
+#include "src/util/CXX11Meta.h"
+#include "src/util/MaxSizeVector.h"
+
+#include "src/ThreadPool/ThreadLocal.h"
+#include "src/ThreadPool/ThreadYield.h"
+#include "src/ThreadPool/EventCount.h"
+#include "src/ThreadPool/RunQueue.h"
+#include "src/ThreadPool/ThreadPoolInterface.h"
+#include "src/ThreadPool/ThreadEnvironment.h"
+#include "src/ThreadPool/SimpleThreadPool.h"
+#include "src/ThreadPool/NonBlockingThreadPool.h"
+
+#endif
+
+#include <Eigen/src/Core/util/ReenableStupidWarnings.h>
+
+#endif // EIGEN_CXX11_THREADPOOL_MODULE
+
diff --git a/unsupported/Eigen/CXX11/src/Tensor/README.md b/unsupported/Eigen/CXX11/src/Tensor/README.md
new file mode 100644
index 000000000..02146527b
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/README.md
@@ -0,0 +1,1757 @@
+# Eigen Tensors
+
+Tensors are multidimensional arrays of elements. Elements are typically scalars,
+but more complex types such as strings are also supported.
+
+[TOC]
+
+## Tensor Classes
+
+You can manipulate a tensor with one of the following classes. They all are in
+the namespace ```::Eigen.```
+
+
+### Class Tensor<data_type, rank>
+
+This is the class to use to create a tensor and allocate memory for it. The
+class is templatized with the tensor datatype, such as float or int, and the
+tensor rank. The rank is the number of dimensions, for example rank 2 is a
+matrix.
+
+Tensors of this class are resizable. For example, if you assign a tensor of a
+different size to a Tensor, that tensor is resized to match its new value.
+
+#### Constructor Tensor<data_type, rank>(size0, size1, ...)
+
+Constructor for a Tensor. The constructor must be passed ```rank``` integers
+indicating the sizes of the instance along each of the the ```rank```
+dimensions.
+
+ // Create a tensor of rank 3 of sizes 2, 3, 4. This tensor owns
+ // memory to hold 24 floating point values (24 = 2 x 3 x 4).
+ Tensor<float, 3> t_3d(2, 3, 4);
+
+ // Resize t_3d by assigning a tensor of different sizes, but same rank.
+ t_3d = Tensor<float, 3>(3, 4, 3);
+
+#### Constructor Tensor<data_type, rank>(size_array)
+
+Constructor where the sizes for the constructor are specified as an array of
+values instead of an explicitly list of parameters. The array type to use is
+```Eigen::array<Eigen::Index>```. The array can be constructed automatically
+from an initializer list.
+
+ // Create a tensor of strings of rank 2 with sizes 5, 7.
+ Tensor<string, 2> t_2d({5, 7});
+
+
+### Class TensorFixedSize<data_type, Sizes<size0, size1, ...>>
+
+Class to use for tensors of fixed size, where the size is known at compile
+time. Fixed sized tensors can provide very fast computations because all their
+dimensions are known by the compiler. FixedSize tensors are not resizable.
+
+If the total number of elements in a fixed size tensor is small enough the
+tensor data is held onto the stack and does not cause heap allocation and free.
+
+ // Create a 4 x 3 tensor of floats.
+ TensorFixedSize<float, Sizes<4, 3>> t_4x3;
+
+### Class TensorMap<Tensor<data_type, rank>>
+
+This is the class to use to create a tensor on top of memory allocated and
+owned by another part of your code. It allows to view any piece of allocated
+memory as a Tensor. Instances of this class do not own the memory where the
+data are stored.
+
+A TensorMap is not resizable because it does not own the memory where its data
+are stored.
+
+#### Constructor TensorMap<Tensor<data_type, rank>>(data, size0, size1, ...)
+
+Constructor for a Tensor. The constructor must be passed a pointer to the
+storage for the data, and "rank" size attributes. The storage has to be
+large enough to hold all the data.
+
+ // Map a tensor of ints on top of stack-allocated storage.
+ int storage[128]; // 2 x 4 x 2 x 8 = 128
+ TensorMap<int, 4> t_4d(storage, 2, 4, 2, 8);
+
+ // The same storage can be viewed as a different tensor.
+ // You can also pass the sizes as an array.
+ TensorMap<int, 2> t_2d(storage, 16, 8);
+
+ // You can also map fixed-size tensors. Here we get a 1d view of
+ // the 2d fixed-size tensor.
+ Tensor<float, Sizes<4, 5>> t_4x3;
+ TensorMap<float, 1> t_12(t_4x3, 12);
+
+
+#### Class TensorRef
+
+See Assigning to a TensorRef below.
+
+## Accessing Tensor Elements
+
+#### <data_type> tensor(index0, index1...)
+
+Return the element at position ```(index0, index1...)``` in tensor
+```tensor```. You must pass as many parameters as the rank of ```tensor```.
+The expression can be used as an l-value to set the value of the element at the
+specified position. The value returned is of the datatype of the tensor.
+
+ // Set the value of the element at position (0, 1, 0);
+ Tensor<float, 3> t_3d(2, 3, 4);
+ t_3d(0, 1, 0) = 12.0f;
+
+ // Initialize all elements to random values.
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 4; ++k) {
+ t_3d(i, j, k) = ...some random value...;
+ }
+ }
+ }
+
+ // Print elements of a tensor.
+ for (int i = 0; i < 2; ++i) {
+ LOG(INFO) << t_3d(i, 0, 0);
+ }
+
+
+## TensorLayout
+
+The tensor library supports 2 layouts: ```ColMajor``` (the default) and
+```RowMajor```. Only the default column major layout is currently fully
+supported, and it is therefore not recommended to attempt to use the row major
+layout at the moment.
+
+The layout of a tensor is optionally specified as part of its type. If not
+specified explicitly column major is assumed.
+
+ Tensor<float, 3, ColMajor> col_major; // equivalent to Tensor<float, 3>
+ TensorMap<Tensor<float, 3, RowMajor> > row_major(data, ...);
+
+All the arguments to an expression must use the same layout. Attempting to mix
+different layouts will result in a compilation error.
+
+It is possible to change the layout of a tensor or an expression using the
+```swap_layout()``` method. Note that this will also reverse the order of the
+dimensions.
+
+ Tensor<float, 2, ColMajor> col_major(2, 4);
+ Tensor<float, 2, RowMajor> row_major(2, 4);
+
+ Tensor<float, 2> col_major_result = col_major; // ok, layouts match
+ Tensor<float, 2> col_major_result = row_major; // will not compile
+
+ // Simple layout swap
+ col_major_result = row_major.swap_layout();
+ eigen_assert(col_major_result.dimension(0) == 4);
+ eigen_assert(col_major_result.dimension(1) == 2);
+
+ // Swap the layout and preserve the order of the dimensions
+ array<int, 2> shuffle(1, 0);
+ col_major_result = row_major.swap_layout().shuffle(shuffle);
+ eigen_assert(col_major_result.dimension(0) == 2);
+ eigen_assert(col_major_result.dimension(1) == 4);
+
+
+## Tensor Operations
+
+The Eigen Tensor library provides a vast library of operations on Tensors:
+numerical operations such as addition and multiplication, geometry operations
+such as slicing and shuffling, etc. These operations are available as methods
+of the Tensor classes, and in some cases as operator overloads. For example
+the following code computes the elementwise addition of two tensors:
+
+ Tensor<float, 3> t1(2, 3, 4);
+ ...set some values in t1...
+ Tensor<float, 3> t2(2, 3, 4);
+ ...set some values in t2...
+ // Set t3 to the element wise sum of t1 and t2
+ Tensor<float, 3> t3 = t1 + t2;
+
+While the code above looks easy enough, it is important to understand that the
+expression ```t1 + t2``` is not actually adding the values of the tensors. The
+expression instead constructs a "tensor operator" object of the class
+TensorCwiseBinaryOp<scalar_sum>, which has references to the tensors
+```t1``` and ```t2```. This is a small C++ object that knows how to add
+```t1``` and ```t2```. It is only when the value of the expression is assigned
+to the tensor ```t3``` that the addition is actually performed. Technically,
+this happens through the overloading of ```operator=()``` in the Tensor class.
+
+This mechanism for computing tensor expressions allows for lazy evaluation and
+optimizations which are what make the tensor library very fast.
+
+Of course, the tensor operators do nest, and the expression ```t1 + t2 *
+0.3f``` is actually represented with the (approximate) tree of operators:
+
+ TensorCwiseBinaryOp<scalar_sum>(t1, TensorCwiseUnaryOp<scalar_mul>(t2, 0.3f))
+
+
+### Tensor Operations and C++ "auto"
+
+Because Tensor operations create tensor operators, the C++ ```auto``` keyword
+does not have its intuitive meaning. Consider these 2 lines of code:
+
+ Tensor<float, 3> t3 = t1 + t2;
+ auto t4 = t1 + t2;
+
+In the first line we allocate the tensor ```t3``` and it will contain the
+result of the addition of ```t1``` and ```t2```. In the second line, ```t4```
+is actually the tree of tensor operators that will compute the addition of
+```t1``` and ```t2```. In fact, ```t4``` is *not* a tensor and you cannot get
+the values of its elements:
+
+ Tensor<float, 3> t3 = t1 + t2;
+ cout << t3(0, 0, 0); // OK prints the value of t1(0, 0, 0) + t2(0, 0, 0)
+
+ auto t4 = t1 + t2;
+ cout << t4(0, 0, 0); // Compilation error!
+
+When you use ```auto``` you do not get a Tensor as a result but instead a
+non-evaluated expression. So only use ```auto``` to delay evaluation.
+
+Unfortunately, there is no single underlying concrete type for holding
+non-evaluated expressions, hence you have to use auto in the case when you do
+want to hold non-evaluated expressions.
+
+When you need the results of set of tensor computations you have to assign the
+result to a Tensor that will be capable of holding onto them. This can be
+either a normal Tensor, a fixed size Tensor, or a TensorMap on an existing
+piece of memory. All the following will work:
+
+ auto t4 = t1 + t2;
+
+ Tensor<float, 3> result = t4; // Could also be: result(t4);
+ cout << result(0, 0, 0);
+
+ TensorMap<float, 4> result(<a float* with enough space>, <size0>, ...) = t4;
+ cout << result(0, 0, 0);
+
+ TensorFixedSize<float, Sizes<size0, ...>> result = t4;
+ cout << result(0, 0, 0);
+
+Until you need the results, you can keep the operation around, and even reuse
+it for additional operations. As long as you keep the expression as an
+operation, no computation is performed.
+
+ // One way to compute exp((t1 + t2) * 0.2f);
+ auto t3 = t1 + t2;
+ auto t4 = t3 * 0.2f;
+ auto t5 = t4.exp();
+ Tensor<float, 3> result = t5;
+
+ // Another way, exactly as efficient as the previous one:
+ Tensor<float, 3> result = ((t1 + t2) * 0.2f).exp();
+
+### Controlling When Expression are Evaluated
+
+There are several ways to control when expressions are evaluated:
+
+* Assignment to a Tensor, TensorFixedSize, or TensorMap.
+* Use of the eval() method.
+* Assignment to a TensorRef.
+
+#### Assigning to a Tensor, TensorFixedSize, or TensorMap.
+
+The most common way to evaluate an expression is to assign it to a Tensor. In
+the example below, the ```auto``` declarations make the intermediate values
+"Operations", not Tensors, and do not cause the expressions to be evaluated.
+The assignment to the Tensor ```result``` causes the evaluation of all the
+operations.
+
+ auto t3 = t1 + t2; // t3 is an Operation.
+ auto t4 = t3 * 0.2f; // t4 is an Operation.
+ auto t5 = t4.exp(); // t5 is an Operation.
+ Tensor<float, 3> result = t5; // The operations are evaluated.
+
+If you know the ranks and sizes of the Operation value you can assign the
+Operation to a TensorFixedSize instead of a Tensor, which is a bit more
+efficient.
+
+ // We know that the result is a 4x4x2 tensor!
+ TensorFixedSize<float, 4, 4, 2> result = t5;
+
+Simiarly, assigning an expression to a TensorMap causes its evaluation. Like
+tensors of type TensorFixedSize, TensorMaps cannot be resized so they have to
+have the rank and sizes of the expression that are assigned to them.
+
+#### Calling eval().
+
+When you compute large composite expressions, you sometimes want to tell Eigen
+that an intermediate value in the expression tree is worth evaluating ahead of
+time. This is done by inserting a call to the ```eval()``` method of the
+expression Operation.
+
+ // The previous example could have been written:
+ Tensor<float, 3> result = ((t1 + t2) * 0.2f).exp();
+
+ // If you want to compute (t1 + t2) once ahead of time you can write:
+ Tensor<float, 3> result = ((t1 + t2).eval() * 0.2f).exp();
+
+Semantically, calling ```eval()``` is equivalent to materializing the value of
+the expression in a temporary Tensor of the right size. The code above in
+effect does:
+
+ // .eval() knows the size!
+ TensorFixedSize<float, 4, 4, 2> tmp = t1 + t2;
+ Tensor<float, 3> result = (tmp * 0.2f).exp();
+
+Note that the return value of ```eval()``` is itself an Operation, so the
+following code does not do what you may think:
+
+ // Here t3 is an evaluation Operation. t3 has not been evaluated yet.
+ auto t3 = (t1 + t2).eval();
+
+ // You can use t3 in another expression. Still no evaluation.
+ auto t4 = (t3 * 0.2f).exp();
+
+ // The value is evaluated when you assign the Operation to a Tensor, using
+ // an intermediate tensor to represent t3.x
+ Tensor<float, 3> result = t4;
+
+While in the examples above calling ```eval()``` does not make a difference in
+performance, in other cases it can make a huge difference. In the expression
+below the ```broadcast()``` expression causes the ```X.maximum()``` expression
+to be evaluated many times:
+
+ Tensor<...> X ...;
+ Tensor<...> Y = ((X - X.maximum(depth_dim).reshape(dims2d).broadcast(bcast))
+ * beta).exp();
+
+Inserting a call to ```eval()``` between the ```maximum()``` and
+```reshape()``` calls guarantees that maximum() is only computed once and
+greatly speeds-up execution:
+
+ Tensor<...> Y =
+ ((X - X.maximum(depth_dim).eval().reshape(dims2d).broadcast(bcast))
+ * beta).exp();
+
+In the other example below, the tensor ```Y``` is both used in the expression
+and its assignment. This is an aliasing problem and if the evaluation is not
+done in the right order Y will be updated incrementally during the evaluation
+resulting in bogus results:
+
+ Tensor<...> Y ...;
+ Y = Y / (Y.sum(depth_dim).reshape(dims2d).broadcast(bcast));
+
+Inserting a call to ```eval()``` between the ```sum()``` and ```reshape()```
+expressions ensures that the sum is computed before any updates to ```Y``` are
+done.
+
+ Y = Y / (Y.sum(depth_dim).eval().reshape(dims2d).broadcast(bcast));
+
+Note that an eval around the full right hand side expression is not needed
+because the generated has to compute the i-th value of the right hand side
+before assigning it to the left hand side.
+
+However, if you were assigning the expression value to a shuffle of ```Y```
+then you would need to force an eval for correctness by adding an ```eval()```
+call for the right hand side:
+
+ Y.shuffle(...) =
+ (Y / (Y.sum(depth_dim).eval().reshape(dims2d).broadcast(bcast))).eval();
+
+
+#### Assigning to a TensorRef.
+
+If you need to access only a few elements from the value of an expression you
+can avoid materializing the value in a full tensor by using a TensorRef.
+
+A TensorRef is a small wrapper class for any Eigen Operation. It provides
+overloads for the ```()``` operator that let you access individual values in
+the expression. TensorRef is convenient, because the Operation themselves do
+not provide a way to access individual elements.
+
+ // Create a TensorRef for the expression. The expression is not
+ // evaluated yet.
+ TensorRef<Tensor<float, 3> > ref = ((t1 + t2) * 0.2f).exp();
+
+ // Use "ref" to access individual elements. The expression is evaluated
+ // on the fly.
+ float at_0 = ref(0, 0, 0);
+ cout << ref(0, 1, 0);
+
+Only use TensorRef when you need a subset of the values of the expression.
+TensorRef only computes the values you access. However note that if you are
+going to access all the values it will be much faster to materialize the
+results in a Tensor first.
+
+In some cases, if the full Tensor result would be very large, you may save
+memory by accessing it as a TensorRef. But not always. So don't count on it.
+
+
+### Controlling How Expressions Are Evaluated
+
+The tensor library provides several implementations of the various operations
+such as contractions and convolutions. The implementations are optimized for
+different environments: single threaded on CPU, multi threaded on CPU, or on a
+GPU using cuda. Additional implementations may be added later.
+
+You can choose which implementation to use with the ```device()``` call. If
+you do not choose an implementation explicitly the default implementation that
+uses a single thread on the CPU is used.
+
+The default implementation has been optimized for recent Intel CPUs, taking
+advantage of SSE, AVX, and FMA instructions. Work is ongoing to tune the
+library on ARM CPUs. Note that you need to pass compiler-dependent flags
+to enable the use of SSE, AVX, and other instructions.
+
+For example, the following code adds two tensors using the default
+single-threaded CPU implementation:
+
+ Tensor<float, 2> a(30, 40);
+ Tensor<float, 2> b(30, 40);
+ Tensor<float, 2> c = a + b;
+
+To choose a different implementation you have to insert a ```device()``` call
+before the assignment of the result. For technical C++ reasons this requires
+that the Tensor for the result be declared on its own. This means that you
+have to know the size of the result.
+
+ Eigen::Tensor<float, 2> c(30, 40);
+ c.device(...) = a + b;
+
+The call to ```device()``` must be the last call on the left of the operator=.
+
+You must pass to the ```device()``` call an Eigen device object. There are
+presently three devices you can use: DefaultDevice, ThreadPoolDevice and
+GpuDevice.
+
+
+#### Evaluating With the DefaultDevice
+
+This is exactly the same as not inserting a ```device()``` call.
+
+ DefaultDevice my_device;
+ c.device(my_device) = a + b;
+
+#### Evaluating with a Thread Pool
+
+ // Create the Eigen ThreadPoolDevice.
+ Eigen::ThreadPoolDevice my_device(4 /* number of threads to use */);
+
+ // Now just use the device when evaluating expressions.
+ Eigen::Tensor<float, 2> c(30, 50);
+ c.device(my_device) = a.contract(b, dot_product_dims);
+
+
+#### Evaluating On GPU
+
+This is presently a bit more complicated than just using a thread pool device.
+You need to create a GPU device but you also need to explicitly allocate the
+memory for tensors with cuda.
+
+
+## API Reference
+
+### Datatypes
+
+In the documentation of the tensor methods and Operation we mention datatypes
+that are tensor-type specific:
+
+#### <Tensor-Type>::Dimensions
+
+Acts like an array of ints. Has an ```int size``` attribute, and can be
+indexed like an array to access individual values. Used to represent the
+dimensions of a tensor. See ```dimensions()```.
+
+#### <Tensor-Type>::Index
+
+Acts like an ```int```. Used for indexing tensors along their dimensions. See
+```operator()```, ```dimension()```, and ```size()```.
+
+#### <Tensor-Type>::Scalar
+
+Represents the datatype of individual tensor elements. For example, for a
+```Tensor<float>```, ```Scalar``` is the type ```float```. See
+```setConstant()```.
+
+#### <Operation>
+
+We use this pseudo type to indicate that a tensor Operation is returned by a
+method. We indicate in the text the type and dimensions of the tensor that the
+Operation returns after evaluation.
+
+The Operation will have to be evaluated, for example by assigning it to a
+tensor, before you can access the values of the resulting tensor. You can also
+access the values through a TensorRef.
+
+
+## Built-in Tensor Methods
+
+These are usual C++ methods that act on tensors immediately. They are not
+Operations which provide delayed evaluation of their results. Unless specified
+otherwise, all the methods listed below are available on all tensor classes:
+Tensor, TensorFixedSize, and TensorMap.
+
+## Metadata
+
+### int NumDimensions
+
+Constant value indicating the number of dimensions of a Tensor. This is also
+known as the tensor "rank".
+
+ Eigen::Tensor<float, 2> a(3, 4);
+ cout << "Dims " << a.NumDimensions;
+ => Dims 2
+
+### Dimensions dimensions()
+
+Returns an array-like object representing the dimensions of the tensor.
+The actual type of the dimensions() result is <Tensor-Type>::Dimensions.
+
+ Eigen::Tensor<float, 2> a(3, 4);
+ const Eigen::Tensor<float, 2>::Dimensions& d = a.dimensions();
+ cout << "Dim size: " << d.size << ", dim 0: " << d[0]
+ << ", dim 1: " << d[1];
+ => Dim size: 2, dim 0: 3, dim 1: 4
+
+If you use a C++11 compiler, you can use ```auto``` to simplify the code:
+
+ const auto& d = a.dimensions();
+ cout << "Dim size: " << d.size << ", dim 0: " << d[0]
+ << ", dim 1: " << d[1];
+ => Dim size: 2, dim 0: 3, dim 1: 4
+
+### Index dimension(Index n)
+
+Returns the n-th dimension of the tensor. The actual type of the
+```dimension()``` result is ```<Tensor-Type>::Index```, but you can
+always use it like an int.
+
+ Eigen::Tensor<float, 2> a(3, 4);
+ int dim1 = a.dimension(1);
+ cout << "Dim 1: " << dim1;
+ => Dim 1: 4
+
+### Index size()
+
+Returns the total number of elements in the tensor. This is the product of all
+the tensor dimensions. The actual type of the ```size()``` result is
+```<Tensor-Type>::Index```, but you can always use it like an int.
+
+ Eigen::Tensor<float, 2> a(3, 4);
+ cout << "Size: " << a.size();
+ => Size: 12
+
+
+### Getting Dimensions From An Operation
+
+A few operations provide ```dimensions()``` directly,
+e.g. ```TensorReslicingOp```. Most operations defer calculating dimensions
+until the operation is being evaluated. If you need access to the dimensions
+of a deferred operation, you can wrap it in a TensorRef (see Assigning to a
+TensorRef above), which provides ```dimensions()``` and ```dimension()``` as
+above.
+
+TensorRef can also wrap the plain Tensor types, so this is a useful idiom in
+templated contexts where the underlying object could be either a raw Tensor
+or some deferred operation (e.g. a slice of a Tensor). In this case, the
+template code can wrap the object in a TensorRef and reason about its
+dimensionality while remaining agnostic to the underlying type.
+
+
+## Constructors
+
+### Tensor
+
+Creates a tensor of the specified size. The number of arguments must be equal
+to the rank of the tensor. The content of the tensor is not initialized.
+
+ Eigen::Tensor<float, 2> a(3, 4);
+ cout << "NumRows: " << a.dimension(0) << " NumCols: " << a.dimension(1) << endl;
+ => NumRows: 3 NumCols: 4
+
+### TensorFixedSize
+
+Creates a tensor of the specified size. The number of arguments in the Size<>
+template parameter determines the rank of the tensor. The content of the tensor
+is not initialized.
+
+ Eigen::TensorFixedSize<float, Size<3, 4>> a;
+ cout << "Rank: " << a.rank() << endl;
+ => Rank: 2
+ cout << "NumRows: " << a.dimension(0) << " NumCols: " << a.dimension(1) << endl;
+ => NumRows: 3 NumCols: 4
+
+### TensorMap
+
+Creates a tensor mapping an existing array of data. The data must not be freed
+until the TensorMap is discarded, and the size of the data must be large enough
+to accomodate of the coefficients of the tensor.
+
+ float data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
+ Eigen::TensorMap<float, 2> a(data, 3, 4);
+ cout << "NumRows: " << a.dimension(0) << " NumCols: " << a.dimension(1) << endl;
+ => NumRows: 3 NumCols: 4
+ cout << "a(1, 2): " << a(1, 2) << endl;
+ => a(1, 2): 9
+
+
+## Contents Initialization
+
+When a new Tensor or a new TensorFixedSize are created, memory is allocated to
+hold all the tensor elements, but the memory is not initialized. Similarly,
+when a new TensorMap is created on top of non-initialized memory the memory its
+contents are not initialized.
+
+You can use one of the methods below to initialize the tensor memory. These
+have an immediate effect on the tensor and return the tensor itself as a
+result. These are not tensor Operations which delay evaluation.
+
+### <Tensor-Type> setConstant(const Scalar& val)
+
+Sets all elements of the tensor to the constant value ```val```. ```Scalar```
+is the type of data stored in the tensor. You can pass any value that is
+convertible to that type.
+
+Returns the tensor itself in case you want to chain another call.
+
+ a.setConstant(12.3f);
+ cout << "Constant: " << endl << a << endl << endl;
+ =>
+ Constant:
+ 12.3 12.3 12.3 12.3
+ 12.3 12.3 12.3 12.3
+ 12.3 12.3 12.3 12.3
+
+Note that ```setConstant()``` can be used on any tensor where the element type
+has a copy constructor and an ```operator=()```:
+
+ Eigen::Tensor<string, 2> a(2, 3);
+ a.setConstant("yolo");
+ cout << "String tensor: " << endl << a << endl << endl;
+ =>
+ String tensor:
+ yolo yolo yolo
+ yolo yolo yolo
+
+
+### <Tensor-Type> setZero()
+
+Fills the tensor with zeros. Equivalent to ```setConstant(Scalar(0))```.
+Returns the tensor itself in case you want to chain another call.
+
+ a.setZero();
+ cout << "Zeros: " << endl << a << endl << endl;
+ =>
+ Zeros:
+ 0 0 0 0
+ 0 0 0 0
+ 0 0 0 0
+
+
+### <Tensor-Type> setValues({..initializer_list})
+
+Fills the tensor with explicit values specified in a std::initializer_list.
+The type of the initializer list depends on the type and rank of the tensor.
+
+If the tensor has rank N, the initializer list must be nested N times. The
+most deeply nested lists must contains P scalars of the Tensor type where P is
+the size of the last dimension of the Tensor.
+
+For example, for a ```TensorFixedSize<float, 2, 3>``` the initializer list must
+contains 2 lists of 3 floats each.
+
+```setValues()``` returns the tensor itself in case you want to chain another
+call.
+
+ Eigen::Tensor<float, 2> a(2, 3);
+ a.setValues({{0.0f, 1.0f, 2.0f}, {3.0f, 4.0f, 5.0f}});
+ cout << "a" << endl << a << endl << endl;
+ =>
+ a
+ 0 1 2
+ 3 4 5
+
+If a list is too short, the corresponding elements of the tensor will not be
+changed. This is valid at each level of nesting. For example the following
+code only sets the values of the first row of the tensor.
+
+ Eigen::Tensor<int, 2> a(2, 3);
+ a.setConstant(1000);
+ a.setValues({{10, 20, 30}});
+ cout << "a" << endl << a << endl << endl;
+ =>
+ a
+ 10 20 30
+ 1000 1000 1000
+
+### <Tensor-Type> setRandom()
+
+Fills the tensor with random values. Returns the tensor itself in case you
+want to chain another call.
+
+ a.setRandom();
+ cout << "Random: " << endl << a << endl << endl;
+ =>
+ Random:
+ 0.680375 0.59688 -0.329554 0.10794
+ -0.211234 0.823295 0.536459 -0.0452059
+ 0.566198 -0.604897 -0.444451 0.257742
+
+You can customize ```setRandom()``` by providing your own random number
+generator as a template argument:
+
+ a.setRandom<MyRandomGenerator>();
+
+Here, ```MyRandomGenerator``` must be a struct with the following member
+functions, where Scalar and Index are the same as ```<Tensor-Type>::Scalar```
+and ```<Tensor-Type>::Index```.
+
+See ```struct UniformRandomGenerator``` in TensorFunctors.h for an example.
+
+ // Custom number generator for use with setRandom().
+ struct MyRandomGenerator {
+ // Default and copy constructors. Both are needed
+ MyRandomGenerator() { }
+ MyRandomGenerator(const MyRandomGenerator& ) { }
+
+ // Return a random value to be used. "element_location" is the
+ // location of the entry to set in the tensor, it can typically
+ // be ignored.
+ Scalar operator()(Eigen::DenseIndex element_location,
+ Eigen::DenseIndex /*unused*/ = 0) const {
+ return <randomly generated value of type T>;
+ }
+
+ // Same as above but generates several numbers at a time.
+ typename internal::packet_traits<Scalar>::type packetOp(
+ Eigen::DenseIndex packet_location, Eigen::DenseIndex /*unused*/ = 0) const {
+ return <a packet of randomly generated values>;
+ }
+ };
+
+You can also use one of the 2 random number generators that are part of the
+tensor library:
+* UniformRandomGenerator
+* NormalRandomGenerator
+
+
+## Data Access
+
+The Tensor, TensorFixedSize, and TensorRef classes provide the following
+accessors to access the tensor coefficients:
+
+ const Scalar& operator()(const array<Index, NumIndices>& indices)
+ const Scalar& operator()(Index firstIndex, IndexTypes... otherIndices)
+ Scalar& operator()(const array<Index, NumIndices>& indices)
+ Scalar& operator()(Index firstIndex, IndexTypes... otherIndices)
+
+The number of indices must be equal to the rank of the tensor. Moreover, these
+accessors are not available on tensor expressions. In order to access the
+values of a tensor expression, the expression must either be evaluated or
+wrapped in a TensorRef.
+
+
+### Scalar* data() and const Scalar* data() const
+
+Returns a pointer to the storage for the tensor. The pointer is const if the
+tensor was const. This allows direct access to the data. The layout of the
+data depends on the tensor layout: RowMajor or ColMajor.
+
+This access is usually only needed for special cases, for example when mixing
+Eigen Tensor code with other libraries.
+
+Scalar is the type of data stored in the tensor.
+
+ Eigen::Tensor<float, 2> a(3, 4);
+ float* a_data = a.data();
+ a_data[0] = 123.45f;
+ cout << "a(0, 0): " << a(0, 0);
+ => a(0, 0): 123.45
+
+
+## Tensor Operations
+
+All the methods documented below return non evaluated tensor ```Operations```.
+These can be chained: you can apply another Tensor Operation to the value
+returned by the method.
+
+The chain of Operation is evaluated lazily, typically when it is assigned to a
+tensor. See "Controlling when Expression are Evaluated" for more details about
+their evaluation.
+
+### <Operation> constant(const Scalar& val)
+
+Returns a tensor of the same type and dimensions as the original tensor but
+where all elements have the value ```val```.
+
+This is useful, for example, when you want to add or subtract a constant from a
+tensor, or multiply every element of a tensor by a scalar.
+
+ Eigen::Tensor<float, 2> a(2, 3);
+ a.setConstant(1.0f);
+ Eigen::Tensor<float, 2> b = a + a.constant(2.0f);
+ Eigen::Tensor<float, 2> c = b * b.constant(0.2f);
+ cout << "a" << endl << a << endl << endl;
+ cout << "b" << endl << b << endl << endl;
+ cout << "c" << endl << c << endl << endl;
+ =>
+ a
+ 1 1 1
+ 1 1 1
+
+ b
+ 3 3 3
+ 3 3 3
+
+ c
+ 0.6 0.6 0.6
+ 0.6 0.6 0.6
+
+### <Operation> random()
+
+Returns a tensor of the same type and dimensions as the current tensor
+but where all elements have random values.
+
+This is for example useful to add random values to an existing tensor.
+The generation of random values can be customized in the same manner
+as for ```setRandom()```.
+
+ Eigen::Tensor<float, 2> a(2, 3);
+ a.setConstant(1.0f);
+ Eigen::Tensor<float, 2> b = a + a.random();
+ cout << "a" << endl << a << endl << endl;
+ cout << "b" << endl << b << endl << endl;
+ =>
+ a
+ 1 1 1
+ 1 1 1
+
+ b
+ 1.68038 1.5662 1.82329
+ 0.788766 1.59688 0.395103
+
+
+## Unary Element Wise Operations
+
+All these operations take a single input tensor as argument and return a tensor
+of the same type and dimensions as the tensor to which they are applied. The
+requested operations are applied to each element independently.
+
+### <Operation> operator-()
+
+Returns a tensor of the same type and dimensions as the original tensor
+containing the opposite values of the original tensor.
+
+ Eigen::Tensor<float, 2> a(2, 3);
+ a.setConstant(1.0f);
+ Eigen::Tensor<float, 2> b = -a;
+ cout << "a" << endl << a << endl << endl;
+ cout << "b" << endl << b << endl << endl;
+ =>
+ a
+ 1 1 1
+ 1 1 1
+
+ b
+ -1 -1 -1
+ -1 -1 -1
+
+### <Operation> sqrt()
+
+Returns a tensor of the same type and dimensions as the original tensor
+containing the square roots of the original tensor.
+
+### <Operation> rsqrt()
+
+Returns a tensor of the same type and dimensions as the original tensor
+containing the inverse square roots of the original tensor.
+
+### <Operation> square()
+
+Returns a tensor of the same type and dimensions as the original tensor
+containing the squares of the original tensor values.
+
+### <Operation> inverse()
+
+Returns a tensor of the same type and dimensions as the original tensor
+containing the inverse of the original tensor values.
+
+### <Operation> exp()
+
+Returns a tensor of the same type and dimensions as the original tensor
+containing the exponential of the original tensor.
+
+### <Operation> log()
+
+Returns a tensor of the same type and dimensions as the original tensor
+containing the natural logarithms of the original tensor.
+
+### <Operation> abs()
+
+Returns a tensor of the same type and dimensions as the original tensor
+containing the absolute values of the original tensor.
+
+### <Operation> pow(Scalar exponent)
+
+Returns a tensor of the same type and dimensions as the original tensor
+containing the coefficients of the original tensor to the power of the
+exponent.
+
+The type of the exponent, Scalar, is always the same as the type of the
+tensor coefficients. For example, only integer exponents can be used in
+conjuntion with tensors of integer values.
+
+You can use cast() to lift this restriction. For example this computes
+cubic roots of an int Tensor:
+
+ Eigen::Tensor<int, 2> a(2, 3);
+ a.setValues({{0, 1, 8}, {27, 64, 125}});
+ Eigen::Tensor<double, 2> b = a.cast<double>().pow(1.0 / 3.0);
+ cout << "a" << endl << a << endl << endl;
+ cout << "b" << endl << b << endl << endl;
+ =>
+ a
+ 0 1 8
+ 27 64 125
+
+ b
+ 0 1 2
+ 3 4 5
+
+### <Operation> operator * (Scalar scale)
+
+Multiplies all the coefficients of the input tensor by the provided scale.
+
+### <Operation> cwiseMax(Scalar threshold)
+TODO
+
+### <Operation> cwiseMin(Scalar threshold)
+TODO
+
+### <Operation> unaryExpr(const CustomUnaryOp& func)
+TODO
+
+
+## Binary Element Wise Operations
+
+These operations take two input tensors as arguments. The 2 input tensors should
+be of the same type and dimensions. The result is a tensor of the same
+dimensions as the tensors to which they are applied, and unless otherwise
+specified it is also of the same type. The requested operations are applied to
+each pair of elements independently.
+
+### <Operation> operator+(const OtherDerived& other)
+
+Returns a tensor of the same type and dimensions as the input tensors
+containing the coefficient wise sums of the inputs.
+
+### <Operation> operator-(const OtherDerived& other)
+
+Returns a tensor of the same type and dimensions as the input tensors
+containing the coefficient wise differences of the inputs.
+
+### <Operation> operator*(const OtherDerived& other)
+
+Returns a tensor of the same type and dimensions as the input tensors
+containing the coefficient wise products of the inputs.
+
+### <Operation> operator/(const OtherDerived& other)
+
+Returns a tensor of the same type and dimensions as the input tensors
+containing the coefficient wise quotients of the inputs.
+
+This operator is not supported for integer types.
+
+### <Operation> cwiseMax(const OtherDerived& other)
+
+Returns a tensor of the same type and dimensions as the input tensors
+containing the coefficient wise maximums of the inputs.
+
+### <Operation> cwiseMin(const OtherDerived& other)
+
+Returns a tensor of the same type and dimensions as the input tensors
+containing the coefficient wise mimimums of the inputs.
+
+### <Operation> Logical operators
+
+The following logical operators are supported as well:
+
+* operator&&(const OtherDerived& other)
+* operator||(const OtherDerived& other)
+* operator<(const OtherDerived& other)
+* operator<=(const OtherDerived& other)
+* operator>(const OtherDerived& other)
+* operator>=(const OtherDerived& other)
+* operator==(const OtherDerived& other)
+* operator!=(const OtherDerived& other)
+
+They all return a tensor of boolean values.
+
+
+## Selection (select(const ThenDerived& thenTensor, const ElseDerived& elseTensor)
+
+Selection is a coefficient-wise ternary operator that is the tensor equivalent
+to the if-then-else operation.
+
+ Tensor<bool, 3> if = ...;
+ Tensor<float, 3> then = ...;
+ Tensor<float, 3> else = ...;
+ Tensor<float, 3> result = if.select(then, else);
+
+The 3 arguments must be of the same dimensions, which will also be the dimension
+of the result. The 'if' tensor must be of type boolean, the 'then' and the
+'else' tensor must be of the same type, which will also be the type of the
+result.
+
+Each coefficient in the result is equal to the corresponding coefficient in the
+'then' tensor if the corresponding value in the 'if' tensor is true. If not, the
+resulting coefficient will come from the 'else' tensor.
+
+
+## Contraction
+
+Tensor *contractions* are a generalization of the matrix product to the
+multidimensional case.
+
+ // Create 2 matrices using tensors of rank 2
+ Eigen::Tensor<int, 2> a(2, 3);
+ a.setValues({{1, 2, 3}, {6, 5, 4}});
+ Eigen::Tensor<int, 2> b(3, 2);
+ a.setValues({{1, 2}, {4, 5}, {5, 6}});
+
+ // Compute the traditional matrix product
+ array<IndexPair<int>, 1> product_dims = { IndexPair(1, 0) };
+ Eigen::Tensor<int, 2> AB = a.contract(b, product_dims);
+
+ // Compute the product of the transpose of the matrices
+ array<IndexPair<int>, 1> transpose_product_dims = { IndexPair(0, 1) };
+ Eigen::Tensor<int, 2> AtBt = a.contract(b, transposed_product_dims);
+
+
+## Reduction Operations
+
+A *Reduction* operation returns a tensor with fewer dimensions than the
+original tensor. The values in the returned tensor are computed by applying a
+*reduction operator* to slices of values from the original tensor. You specify
+the dimensions along which the slices are made.
+
+The Eigen Tensor library provides a set of predefined reduction operators such
+as ```maximum()``` and ```sum()``` and lets you define additional operators by
+implementing a few methods from a reductor template.
+
+### Reduction Dimensions
+
+All reduction operations take a single parameter of type
+```<TensorType>::Dimensions``` which can always be specified as an array of
+ints. These are called the "reduction dimensions." The values are the indices
+of the dimensions of the input tensor over which the reduction is done. The
+parameter can have at most as many element as the rank of the input tensor;
+each element must be less than the tensor rank, as it indicates one of the
+dimensions to reduce.
+
+Each dimension of the input tensor should occur at most once in the reduction
+dimensions as the implementation does not remove duplicates.
+
+The order of the values in the reduction dimensions does not affect the
+results, but the code may execute faster if you list the dimensions in
+increasing order.
+
+Example: Reduction along one dimension.
+
+ // Create a tensor of 2 dimensions
+ Eigen::Tensor<int, 2> a(2, 3);
+ a.setValues({{1, 2, 3}, {6, 5, 4}});
+ // Reduce it along the second dimension (1)...
+ Eigen::array<int, 1> dims({1 /* dimension to reduce */});
+ // ...using the "maximum" operator.
+ // The result is a tensor with one dimension. The size of
+ // that dimension is the same as the first (non-reduced) dimension of a.
+ Eigen::Tensor<int, 1> b = a.maximum(dims);
+ cout << "a" << endl << a << endl << endl;
+ cout << "b" << endl << b << endl << endl;
+ =>
+ a
+ 1 2 3
+ 6 5 4
+
+ b
+ 3
+ 6
+
+Example: Reduction along two dimensions.
+
+ Eigen::Tensor<float, 3, Eigen::ColMajor> a(2, 3, 4);
+ a.setValues({{{0.0f, 1.0f, 2.0f, 3.0f},
+ {7.0f, 6.0f, 5.0f, 4.0f},
+ {8.0f, 9.0f, 10.0f, 11.0f}},
+ {{12.0f, 13.0f, 14.0f, 15.0f},
+ {19.0f, 18.0f, 17.0f, 16.0f},
+ {20.0f, 21.0f, 22.0f, 23.0f}}});
+ // The tensor a has 3 dimensions. We reduce along the
+ // first 2, resulting in a tensor with a single dimension
+ // of size 4 (the last dimension of a.)
+ // Note that we pass the array of reduction dimensions
+ // directly to the maximum() call.
+ Eigen::Tensor<float, 1, Eigen::ColMajor> b =
+ a.maximum(Eigen::array<int, 2>({0, 1}));
+ cout << "b" << endl << b << endl << endl;
+ =>
+ b
+ 20
+ 21
+ 22
+ 23
+
+#### Reduction along all dimensions
+
+As a special case, if you pass no parameter to a reduction operation the
+original tensor is reduced along *all* its dimensions. The result is a
+scalar, represented as a zero-dimension tensor.
+
+ Eigen::Tensor<float, 3> a(2, 3, 4);
+ a.setValues({{{0.0f, 1.0f, 2.0f, 3.0f},
+ {7.0f, 6.0f, 5.0f, 4.0f},
+ {8.0f, 9.0f, 10.0f, 11.0f}},
+ {{12.0f, 13.0f, 14.0f, 15.0f},
+ {19.0f, 18.0f, 17.0f, 16.0f},
+ {20.0f, 21.0f, 22.0f, 23.0f}}});
+ // Reduce along all dimensions using the sum() operator.
+ Eigen::Tensor<float, 0> b = a.sum();
+ cout << "b" << endl << b << endl << endl;
+ =>
+ b
+ 276
+
+
+### <Operation> sum(const Dimensions& new_dims)
+### <Operation> sum()
+
+Reduce a tensor using the sum() operator. The resulting values
+are the sum of the reduced values.
+
+### <Operation> mean(const Dimensions& new_dims)
+### <Operation> mean()
+
+Reduce a tensor using the mean() operator. The resulting values
+are the mean of the reduced values.
+
+### <Operation> maximum(const Dimensions& new_dims)
+### <Operation> maximum()
+
+Reduce a tensor using the maximum() operator. The resulting values are the
+largest of the reduced values.
+
+### <Operation> minimum(const Dimensions& new_dims)
+### <Operation> minimum()
+
+Reduce a tensor using the minimum() operator. The resulting values
+are the smallest of the reduced values.
+
+### <Operation> prod(const Dimensions& new_dims)
+### <Operation> prod()
+
+Reduce a tensor using the prod() operator. The resulting values
+are the product of the reduced values.
+
+### <Operation> all(const Dimensions& new_dims)
+### <Operation> all()
+Reduce a tensor using the all() operator. Casts tensor to bool and then checks
+whether all elements are true. Runs through all elements rather than
+short-circuiting, so may be significantly inefficient.
+
+### <Operation> any(const Dimensions& new_dims)
+### <Operation> any()
+Reduce a tensor using the any() operator. Casts tensor to bool and then checks
+whether any element is true. Runs through all elements rather than
+short-circuiting, so may be significantly inefficient.
+
+
+### <Operation> reduce(const Dimensions& new_dims, const Reducer& reducer)
+
+Reduce a tensor using a user-defined reduction operator. See ```SumReducer```
+in TensorFunctors.h for information on how to implement a reduction operator.
+
+
+## Scan Operations
+
+A *Scan* operation returns a tensor with the same dimensions as the original
+tensor. The operation performs an inclusive scan along the specified
+axis, which means it computes a running total along the axis for a given
+reduction operation.
+If the reduction operation corresponds to summation, then this computes the
+prefix sum of the tensor along the given axis.
+
+Example:
+dd a comment to this line
+
+ // Create a tensor of 2 dimensions
+ Eigen::Tensor<int, 2> a(2, 3);
+ a.setValues({{1, 2, 3}, {4, 5, 6}});
+ // Scan it along the second dimension (1) using summation
+ Eigen::Tensor<int, 2> b = a.cumsum(1);
+ // The result is a tensor with the same size as the input
+ cout << "a" << endl << a << endl << endl;
+ cout << "b" << endl << b << endl << endl;
+ =>
+ a
+ 1 2 3
+ 6 5 4
+
+ b
+ 1 3 6
+ 4 9 15
+
+### <Operation> cumsum(const Index& axis)
+
+Perform a scan by summing consecutive entries.
+
+### <Operation> cumprod(const Index& axis)
+
+Perform a scan by multiplying consecutive entries.
+
+
+## Convolutions
+
+### <Operation> convolve(const Kernel& kernel, const Dimensions& dims)
+
+Returns a tensor that is the output of the convolution of the input tensor with the kernel,
+along the specified dimensions of the input tensor. The dimension size for dimensions of the output tensor
+which were part of the convolution will be reduced by the formula:
+output_dim_size = input_dim_size - kernel_dim_size + 1 (requires: input_dim_size >= kernel_dim_size).
+The dimension sizes for dimensions that were not part of the convolution will remain the same.
+Performance of the convolution can depend on the length of the stride(s) of the input tensor dimension(s) along which the
+convolution is computed (the first dimension has the shortest stride for ColMajor, whereas RowMajor's shortest stride is
+for the last dimension).
+
+ // Compute convolution along the second and third dimension.
+ Tensor<float, 4, DataLayout> input(3, 3, 7, 11);
+ Tensor<float, 2, DataLayout> kernel(2, 2);
+ Tensor<float, 4, DataLayout> output(3, 2, 6, 11);
+ input.setRandom();
+ kernel.setRandom();
+
+ Eigen::array<ptrdiff_t, 2> dims({1, 2}); // Specify second and third dimension for convolution.
+ output = input.convolve(kernel, dims);
+
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 2; ++j) {
+ for (int k = 0; k < 6; ++k) {
+ for (int l = 0; l < 11; ++l) {
+ const float result = output(i,j,k,l);
+ const float expected = input(i,j+0,k+0,l) * kernel(0,0) +
+ input(i,j+1,k+0,l) * kernel(1,0) +
+ input(i,j+0,k+1,l) * kernel(0,1) +
+ input(i,j+1,k+1,l) * kernel(1,1);
+ VERIFY_IS_APPROX(result, expected);
+ }
+ }
+ }
+ }
+
+
+## Geometrical Operations
+
+These operations return a Tensor with different dimensions than the original
+Tensor. They can be used to access slices of tensors, see them with different
+dimensions, or pad tensors with additional data.
+
+### <Operation> reshape(const Dimensions& new_dims)
+
+Returns a view of the input tensor that has been reshaped to the specified
+new dimensions. The argument new_dims is an array of Index values. The
+rank of the resulting tensor is equal to the number of elements in new_dims.
+
+The product of all the sizes in the new dimension array must be equal to
+the number of elements in the input tensor.
+
+ // Increase the rank of the input tensor by introducing a new dimension
+ // of size 1.
+ Tensor<float, 2> input(7, 11);
+ array<int, 3> three_dims{{7, 11, 1}};
+ Tensor<float, 3> result = input.reshape(three_dims);
+
+ // Decrease the rank of the input tensor by merging 2 dimensions;
+ array<int, 1> one_dim{{7 * 11}};
+ Tensor<float, 1> result = input.reshape(one_dim);
+
+This operation does not move any data in the input tensor, so the resulting
+contents of a reshaped Tensor depend on the data layout of the original Tensor.
+
+For example this is what happens when you ```reshape()``` a 2D ColMajor tensor
+to one dimension:
+
+ Eigen::Tensor<float, 2, Eigen::ColMajor> a(2, 3);
+ a.setValues({{0.0f, 100.0f, 200.0f}, {300.0f, 400.0f, 500.0f}});
+ Eigen::array<Eigen::DenseIndex, 1> one_dim({3 * 2});
+ Eigen::Tensor<float, 1, Eigen::ColMajor> b = a.reshape(one_dim);
+ cout << "b" << endl << b << endl;
+ =>
+ b
+ 0
+ 300
+ 100
+ 400
+ 200
+ 500
+
+This is what happens when the 2D Tensor is RowMajor:
+
+ Eigen::Tensor<float, 2, Eigen::RowMajor> a(2, 3);
+ a.setValues({{0.0f, 100.0f, 200.0f}, {300.0f, 400.0f, 500.0f}});
+ Eigen::array<Eigen::DenseIndex, 1> one_dim({3 * 2});
+ Eigen::Tensor<float, 1, Eigen::RowMajor> b = a.reshape(one_dim);
+ cout << "b" << endl << b << endl;
+ =>
+ b
+ 0
+ 100
+ 200
+ 300
+ 400
+ 500
+
+The reshape operation is a lvalue. In other words, it can be used on the left
+side of the assignment operator.
+
+The previous example can be rewritten as follow:
+
+ Eigen::Tensor<float, 2, Eigen::ColMajor> a(2, 3);
+ a.setValues({{0.0f, 100.0f, 200.0f}, {300.0f, 400.0f, 500.0f}});
+ Eigen::array<Eigen::DenseIndex, 2> two_dim({2, 3});
+ Eigen::Tensor<float, 1, Eigen::ColMajor> b;
+ b.reshape(two_dim) = a;
+ cout << "b" << endl << b << endl;
+ =>
+ b
+ 0
+ 300
+ 100
+ 400
+ 200
+ 500
+
+Note that "b" itself was not reshaped but that instead the assignment is done to
+the reshape view of b.
+
+
+### <Operation> shuffle(const Shuffle& shuffle)
+
+Returns a copy of the input tensor whose dimensions have been
+reordered according to the specified permutation. The argument shuffle
+is an array of Index values. Its size is the rank of the input
+tensor. It must contain a permutation of 0, 1, ..., rank - 1. The i-th
+dimension of the output tensor equals to the size of the shuffle[i]-th
+dimension of the input tensor. For example:
+
+ // Shuffle all dimensions to the left by 1.
+ Tensor<float, 3> input(20, 30, 50);
+ // ... set some values in input.
+ Tensor<float, 3> output = input.shuffle({1, 2, 0})
+
+ eigen_assert(output.dimension(0) == 30);
+ eigen_assert(output.dimension(1) == 50);
+ eigen_assert(output.dimension(2) == 20);
+
+Indices into the output tensor are shuffled accordingly to formulate
+indices into the input tensor. For example, one can assert in the above
+code snippet that:
+
+ eigen_assert(output(3, 7, 11) == input(11, 3, 7));
+
+In general, one can assert that
+
+ eigen_assert(output(..., indices[shuffle[i]], ...) ==
+ input(..., indices[i], ...))
+
+The shuffle operation results in a lvalue, which means that it can be assigned
+to. In other words, it can be used on the left side of the assignment operator.
+
+Let's rewrite the previous example to take advantage of this feature:
+
+ // Shuffle all dimensions to the left by 1.
+ Tensor<float, 3> input(20, 30, 50);
+ // ... set some values in input.
+ Tensor<float, 3> output(30, 50, 20);
+ output.shuffle({2, 0, 1}) = input;
+
+
+### <Operation> stride(const Strides& strides)
+
+Returns a view of the input tensor that strides (skips stride-1
+elements) along each of the dimensions. The argument strides is an
+array of Index values. The dimensions of the resulting tensor are
+ceil(input_dimensions[i] / strides[i]).
+
+For example this is what happens when you ```stride()``` a 2D tensor:
+
+ Eigen::Tensor<int, 2> a(4, 3);
+ a.setValues({{0, 100, 200}, {300, 400, 500}, {600, 700, 800}, {900, 1000, 1100}});
+ Eigen::array<Eigen::DenseIndex, 2> strides({3, 2});
+ Eigen::Tensor<int, 2> b = a.stride(strides);
+ cout << "b" << endl << b << endl;
+ =>
+ b
+ 0 200
+ 900 1100
+
+It is possible to assign a tensor to a stride:
+ Tensor<float, 3> input(20, 30, 50);
+ // ... set some values in input.
+ Tensor<float, 3> output(40, 90, 200);
+ output.stride({2, 3, 4}) = input;
+
+
+### <Operation> slice(const StartIndices& offsets, const Sizes& extents)
+
+Returns a sub-tensor of the given tensor. For each dimension i, the slice is
+made of the coefficients stored between offset[i] and offset[i] + extents[i] in
+the input tensor.
+
+ Eigen::Tensor<int, 2> a(4, 3);
+ a.setValues({{0, 100, 200}, {300, 400, 500},
+ {600, 700, 800}, {900, 1000, 1100}});
+ Eigen::array<int, 2> offsets = {1, 0};
+ Eigen::array<int, 2> extents = {2, 2};
+ Eigen::Tensor<int, 1> slice = a.slice(offsets, extents);
+ cout << "a" << endl << a << endl;
+ =>
+ a
+ 0 100 200
+ 300 400 500
+ 600 700 800
+ 900 1000 1100
+ cout << "slice" << endl << slice << endl;
+ =>
+ slice
+ 300 400
+ 600 700
+
+
+### <Operation> chip(const Index offset, const Index dim)
+
+A chip is a special kind of slice. It is the subtensor at the given offset in
+the dimension dim. The returned tensor has one fewer dimension than the input
+tensor: the dimension dim is removed.
+
+For example, a matrix chip would be either a row or a column of the input
+matrix.
+
+ Eigen::Tensor<int, 2> a(4, 3);
+ a.setValues({{0, 100, 200}, {300, 400, 500},
+ {600, 700, 800}, {900, 1000, 1100}});
+ Eigen::Tensor<int, 1> row_3 = a.chip(2, 0);
+ Eigen::Tensor<int, 1> col_2 = a.chip(1, 1);
+ cout << "a" << endl << a << endl;
+ =>
+ a
+ 0 100 200
+ 300 400 500
+ 600 700 800
+ 900 1000 1100
+ cout << "row_3" << endl << row_3 << endl;
+ =>
+ row_3
+ 600 700 800
+ cout << "col_2" << endl << col_2 << endl;
+ =>
+ col_2
+ 100 400 700 1000
+
+It is possible to assign values to a tensor chip since the chip operation is a
+lvalue. For example:
+
+ Eigen::Tensor<int, 1> a(3);
+ a.setValues({{100, 200, 300}});
+ Eigen::Tensor<int, 2> b(2, 3);
+ b.setZero();
+ b.chip(0, 0) = a;
+ cout << "a" << endl << a << endl;
+ =>
+ a
+ 100
+ 200
+ 300
+ cout << "b" << endl << b << endl;
+ =>
+ b
+ 100 200 300
+ 0 0 0
+
+
+### <Operation> reverse(const ReverseDimensions& reverse)
+
+Returns a view of the input tensor that reverses the order of the coefficients
+along a subset of the dimensions. The argument reverse is an array of boolean
+values that indicates whether or not the order of the coefficients should be
+reversed along each of the dimensions. This operation preserves the dimensions
+of the input tensor.
+
+For example this is what happens when you ```reverse()``` the first dimension
+of a 2D tensor:
+
+ Eigen::Tensor<int, 2> a(4, 3);
+ a.setValues({{0, 100, 200}, {300, 400, 500},
+ {600, 700, 800}, {900, 1000, 1100}});
+ Eigen::array<bool, 2> reverse({true, false});
+ Eigen::Tensor<int, 2> b = a.reverse(reverse);
+ cout << "a" << endl << a << endl << "b" << endl << b << endl;
+ =>
+ a
+ 0 100 200
+ 300 400 500
+ 600 700 800
+ 900 1000 1100
+ b
+ 900 1000 1100
+ 600 700 800
+ 300 400 500
+ 0 100 200
+
+
+### <Operation> broadcast(const Broadcast& broadcast)
+
+Returns a view of the input tensor in which the input is replicated one to many
+times.
+The broadcast argument specifies how many copies of the input tensor need to be
+made in each of the dimensions.
+
+ Eigen::Tensor<int, 2> a(2, 3);
+ a.setValues({{0, 100, 200}, {300, 400, 500}});
+ Eigen::array<int, 2> bcast({3, 2});
+ Eigen::Tensor<int, 2> b = a.broadcast(bcast);
+ cout << "a" << endl << a << endl << "b" << endl << b << endl;
+ =>
+ a
+ 0 100 200
+ 300 400 500
+ b
+ 0 100 200 0 100 200
+ 300 400 500 300 400 500
+ 0 100 200 0 100 200
+ 300 400 500 300 400 500
+ 0 100 200 0 100 200
+ 300 400 500 300 400 500
+
+### <Operation> concatenate(const OtherDerived& other, Axis axis)
+
+TODO
+
+### <Operation> pad(const PaddingDimensions& padding)
+
+Returns a view of the input tensor in which the input is padded with zeros.
+
+ Eigen::Tensor<int, 2> a(2, 3);
+ a.setValues({{0, 100, 200}, {300, 400, 500}});
+ Eigen::array<pair<int, int>, 2> paddings;
+ paddings[0] = make_pair(0, 1);
+ paddings[1] = make_pair(2, 3);
+ Eigen::Tensor<int, 2> b = a.pad(paddings);
+ cout << "a" << endl << a << endl << "b" << endl << b << endl;
+ =>
+ a
+ 0 100 200
+ 300 400 500
+ b
+ 0 0 0 0
+ 0 0 0 0
+ 0 100 200 0
+ 300 400 500 0
+ 0 0 0 0
+ 0 0 0 0
+ 0 0 0 0
+
+
+### <Operation> extract_patches(const PatchDims& patch_dims)
+
+Returns a tensor of coefficient patches extracted from the input tensor, where
+each patch is of dimension specified by 'patch_dims'. The returned tensor has
+one greater dimension than the input tensor, which is used to index each patch.
+The patch index in the output tensor depends on the data layout of the input
+tensor: the patch index is the last dimension ColMajor layout, and the first
+dimension in RowMajor layout.
+
+For example, given the following input tensor:
+
+ Eigen::Tensor<float, 2, DataLayout> tensor(3,4);
+ tensor.setValues({{0.0f, 1.0f, 2.0f, 3.0f},
+ {4.0f, 5.0f, 6.0f, 7.0f},
+ {8.0f, 9.0f, 10.0f, 11.0f}});
+
+ cout << "tensor: " << endl << tensor << endl;
+=>
+tensor:
+ 0 1 2 3
+ 4 5 6 7
+ 8 9 10 11
+
+Six 2x2 patches can be extracted and indexed using the following code:
+
+ Eigen::Tensor<float, 3, DataLayout> patch;
+ Eigen::array<ptrdiff_t, 2> patch_dims;
+ patch_dims[0] = 2;
+ patch_dims[1] = 2;
+ patch = tensor.extract_patches(patch_dims);
+ for (int k = 0; k < 6; ++k) {
+ cout << "patch index: " << k << endl;
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 2; ++j) {
+ if (DataLayout == ColMajor) {
+ cout << patch(i, j, k) << " ";
+ } else {
+ cout << patch(k, i, j) << " ";
+ }
+ }
+ cout << endl;
+ }
+ }
+
+This code results in the following output when the data layout is ColMajor:
+
+patch index: 0
+0 1
+4 5
+patch index: 1
+4 5
+8 9
+patch index: 2
+1 2
+5 6
+patch index: 3
+5 6
+9 10
+patch index: 4
+2 3
+6 7
+patch index: 5
+6 7
+10 11
+
+This code results in the following output when the data layout is RowMajor:
+(NOTE: the set of patches is the same as in ColMajor, but are indexed differently).
+
+patch index: 0
+0 1
+4 5
+patch index: 1
+1 2
+5 6
+patch index: 2
+2 3
+6 7
+patch index: 3
+4 5
+8 9
+patch index: 4
+5 6
+9 10
+patch index: 5
+6 7
+10 11
+
+### <Operation> extract_image_patches(const Index patch_rows, const Index patch_cols,
+ const Index row_stride, const Index col_stride,
+ const PaddingType padding_type)
+
+Returns a tensor of coefficient image patches extracted from the input tensor,
+which is expected to have dimensions ordered as follows (depending on the data
+layout of the input tensor, and the number of additional dimensions 'N'):
+
+*) ColMajor
+1st dimension: channels (of size d)
+2nd dimension: rows (of size r)
+3rd dimension: columns (of size c)
+4th-Nth dimension: time (for video) or batch (for bulk processing).
+
+*) RowMajor (reverse order of ColMajor)
+1st-Nth dimension: time (for video) or batch (for bulk processing).
+N+1'th dimension: columns (of size c)
+N+2'th dimension: rows (of size r)
+N+3'th dimension: channels (of size d)
+
+The returned tensor has one greater dimension than the input tensor, which is
+used to index each patch. The patch index in the output tensor depends on the
+data layout of the input tensor: the patch index is the 4'th dimension in
+ColMajor layout, and the 4'th from the last dimension in RowMajor layout.
+
+For example, given the following input tensor with the following dimension
+sizes:
+ *) depth: 2
+ *) rows: 3
+ *) columns: 5
+ *) batch: 7
+
+ Tensor<float, 4> tensor(2,3,5,7);
+ Tensor<float, 4, RowMajor> tensor_row_major = tensor.swap_layout();
+
+2x2 image patches can be extracted and indexed using the following code:
+
+*) 2D patch: ColMajor (patch indexed by second-to-last dimension)
+ Tensor<float, 5> twod_patch;
+ twod_patch = tensor.extract_image_patches<2, 2>();
+ // twod_patch.dimension(0) == 2
+ // twod_patch.dimension(1) == 2
+ // twod_patch.dimension(2) == 2
+ // twod_patch.dimension(3) == 3*5
+ // twod_patch.dimension(4) == 7
+
+*) 2D patch: RowMajor (patch indexed by the second dimension)
+ Tensor<float, 5, RowMajor> twod_patch_row_major;
+ twod_patch_row_major = tensor_row_major.extract_image_patches<2, 2>();
+ // twod_patch_row_major.dimension(0) == 7
+ // twod_patch_row_major.dimension(1) == 3*5
+ // twod_patch_row_major.dimension(2) == 2
+ // twod_patch_row_major.dimension(3) == 2
+ // twod_patch_row_major.dimension(4) == 2
+
+## Special Operations
+
+### <Operation> cast<T>()
+
+Returns a tensor of type T with the same dimensions as the original tensor.
+The returned tensor contains the values of the original tensor converted to
+type T.
+
+ Eigen::Tensor<float, 2> a(2, 3);
+ Eigen::Tensor<int, 2> b = a.cast<int>();
+
+This can be useful for example if you need to do element-wise division of
+Tensors of integers. This is not currently supported by the Tensor library
+but you can easily cast the tensors to floats to do the division:
+
+ Eigen::Tensor<int, 2> a(2, 3);
+ a.setValues({{0, 1, 2}, {3, 4, 5}});
+ Eigen::Tensor<int, 2> b =
+ (a.cast<float>() / a.constant(2).cast<float>()).cast<int>();
+ cout << "a" << endl << a << endl << endl;
+ cout << "b" << endl << b << endl << endl;
+ =>
+ a
+ 0 1 2
+ 3 4 5
+
+ b
+ 0 0 1
+ 1 2 2
+
+
+### <Operation> eval()
+
+TODO
+
+
+## Representation of scalar values
+
+Scalar values are often represented by tensors of size 1 and rank 1. It would be
+more logical and user friendly to use tensors of rank 0 instead. For example
+Tensor<T, N>::maximum() currently returns a Tensor<T, 1>. Similarly, the inner
+product of 2 1d tensors (through contractions) returns a 1d tensor. In the
+future these operations might be updated to return 0d tensors instead.
+
+## Limitations
+
+* The number of tensor dimensions is currently limited to 250 when using a
+ compiler that supports cxx11. It is limited to only 5 for older compilers.
+* The IndexList class requires a cxx11 compliant compiler. You can use an
+ array of indices instead if you don't have access to a modern compiler.
+* On GPUs only floating point values are properly tested and optimized for.
+* Complex and integer values are known to be broken on GPUs. If you try to use
+ them you'll most likely end up triggering a static assertion failure such as
+ EIGEN_STATIC_ASSERT(packetSize > 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
+
+
diff --git a/unsupported/Eigen/CXX11/src/Tensor/Tensor.h b/unsupported/Eigen/CXX11/src/Tensor/Tensor.h
new file mode 100644
index 000000000..1940a9692
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/Tensor.h
@@ -0,0 +1,527 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+// Copyright (C) 2013 Christian Seiler <christian@iwakd.de>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_H
+#define EIGEN_CXX11_TENSOR_TENSOR_H
+
+namespace Eigen {
+
+/** \class Tensor
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief The tensor class.
+ *
+ * The %Tensor class is the work-horse for all \em dense tensors within Eigen.
+ *
+ * The %Tensor class encompasses only dynamic-size objects so far.
+ *
+ * The first two template parameters are required:
+ * \tparam Scalar_ \anchor tensor_tparam_scalar Numeric type, e.g. float, double, int or std::complex<float>.
+ * User defined scalar types are supported as well (see \ref user_defined_scalars "here").
+ * \tparam NumIndices_ Number of indices (i.e. rank of the tensor)
+ *
+ * The remaining template parameters are optional -- in most cases you don't have to worry about them.
+ * \tparam Options_ \anchor tensor_tparam_options A combination of either \b #RowMajor or \b #ColMajor, and of either
+ * \b #AutoAlign or \b #DontAlign.
+ * The former controls \ref TopicStorageOrders "storage order", and defaults to column-major. The latter controls alignment, which is required
+ * for vectorization. It defaults to aligning tensors. Note that tensors currently do not support any operations that profit from vectorization.
+ * Support for such operations (i.e. adding two tensors etc.) is planned.
+ *
+ * You can access elements of tensors using normal subscripting:
+ *
+ * \code
+ * Eigen::Tensor<double, 4> t(10, 10, 10, 10);
+ * t(0, 1, 2, 3) = 42.0;
+ * \endcode
+ *
+ * This class can be extended with the help of the plugin mechanism described on the page
+ * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_TENSOR_PLUGIN.
+ *
+ * <i><b>Some notes:</b></i>
+ *
+ * <dl>
+ * <dt><b>Relation to other parts of Eigen:</b></dt>
+ * <dd>The midterm developement goal for this class is to have a similar hierarchy as Eigen uses for matrices, so that
+ * taking blocks or using tensors in expressions is easily possible, including an interface with the vector/matrix code
+ * by providing .asMatrix() and .asVector() (or similar) methods for rank 2 and 1 tensors. However, currently, the %Tensor
+ * class does not provide any of these features and is only available as a stand-alone class that just allows for
+ * coefficient access. Also, when fixed-size tensors are implemented, the number of template arguments is likely to
+ * change dramatically.</dd>
+ * </dl>
+ *
+ * \ref TopicStorageOrders
+ */
+
+template<typename Scalar_, int NumIndices_, int Options_, typename IndexType_>
+class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_, IndexType_> >
+{
+ public:
+ typedef Tensor<Scalar_, NumIndices_, Options_, IndexType_> Self;
+ typedef TensorBase<Tensor<Scalar_, NumIndices_, Options_, IndexType_> > Base;
+ typedef typename Eigen::internal::nested<Self>::type Nested;
+ typedef typename internal::traits<Self>::StorageKind StorageKind;
+ typedef typename internal::traits<Self>::Index Index;
+ typedef Scalar_ Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef typename Base::CoeffReturnType CoeffReturnType;
+
+ enum {
+ IsAligned = bool(EIGEN_MAX_ALIGN_BYTES>0) & !(Options_&DontAlign),
+ Layout = Options_ & RowMajor ? RowMajor : ColMajor,
+ CoordAccess = true,
+ RawAccess = true
+ };
+
+ static const int Options = Options_;
+ static const int NumIndices = NumIndices_;
+ typedef DSizes<Index, NumIndices_> Dimensions;
+
+ protected:
+ TensorStorage<Scalar, Dimensions, Options> m_storage;
+
+#ifdef EIGEN_HAS_SFINAE
+ template<typename CustomIndices>
+ struct isOfNormalIndex{
+ static const bool is_array = internal::is_base_of<array<Index, NumIndices>, CustomIndices>::value;
+ static const bool is_int = NumTraits<CustomIndices>::IsInteger;
+ static const bool value = is_array | is_int;
+ };
+#endif
+
+ public:
+ // Metadata
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rank() const { return NumIndices; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index dimension(std::size_t n) const { return m_storage.dimensions()[n]; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_storage.dimensions(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index size() const { return m_storage.size(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar *data() { return m_storage.data(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar *data() const { return m_storage.data(); }
+
+ // This makes EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
+ // work, because that uses base().coeffRef() - and we don't yet
+ // implement a similar class hierarchy
+ inline Self& base() { return *this; }
+ inline const Self& base() const { return *this; }
+
+#if EIGEN_HAS_VARIADIC_TEMPLATES
+ template<typename... IndexTypes>
+ EIGEN_DEVICE_FUNC inline const Scalar& coeff(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
+ {
+ // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
+ EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ return coeff(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
+ }
+#endif
+
+ // normal indices
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(const array<Index, NumIndices>& indices) const
+ {
+ eigen_internal_assert(checkIndexRange(indices));
+ return m_storage.data()[linearizedIndex(indices)];
+ }
+
+ // custom indices
+#ifdef EIGEN_HAS_SFINAE
+ template<typename CustomIndices,
+ EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) )
+ >
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(CustomIndices& indices) const
+ {
+ return coeff(internal::customIndices2Array<Index,NumIndices>(indices));
+ }
+#endif
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff() const
+ {
+ EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
+ return m_storage.data()[0];
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const
+ {
+ eigen_internal_assert(index >= 0 && index < size());
+ return m_storage.data()[index];
+ }
+
+#if EIGEN_HAS_VARIADIC_TEMPLATES
+ template<typename... IndexTypes>
+ inline Scalar& coeffRef(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
+ {
+ // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
+ EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ return coeffRef(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
+ }
+#endif
+
+ // normal indices
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(const array<Index, NumIndices>& indices)
+ {
+ eigen_internal_assert(checkIndexRange(indices));
+ return m_storage.data()[linearizedIndex(indices)];
+ }
+
+ // custom indices
+#ifdef EIGEN_HAS_SFINAE
+ template<typename CustomIndices,
+ EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) )
+ >
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(CustomIndices& indices)
+ {
+ return coeffRef(internal::customIndices2Array<Index,NumIndices>(indices));
+ }
+#endif
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef()
+ {
+ EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
+ return m_storage.data()[0];
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)
+ {
+ eigen_internal_assert(index >= 0 && index < size());
+ return m_storage.data()[index];
+ }
+
+#if EIGEN_HAS_VARIADIC_TEMPLATES
+ template<typename... IndexTypes>
+ inline const Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
+ {
+ // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
+ EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ return this->operator()(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
+ }
+#else
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1) const
+ {
+ return coeff(array<Index, 2>(i0, i1));
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2) const
+ {
+ return coeff(array<Index, 3>(i0, i1, i2));
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3) const
+ {
+ return coeff(array<Index, 4>(i0, i1, i2, i3));
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const
+ {
+ return coeff(array<Index, 5>(i0, i1, i2, i3, i4));
+ }
+#endif
+
+ // custom indices
+#ifdef EIGEN_HAS_SFINAE
+ template<typename CustomIndices,
+ EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) )
+ >
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(CustomIndices& indices) const
+ {
+ return coeff(internal::customIndices2Array<Index,NumIndices>(indices));
+ }
+#endif
+
+ // normal indices
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(const array<Index, NumIndices>& indices) const
+ {
+ return coeff(indices);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(Index index) const
+ {
+ eigen_internal_assert(index >= 0 && index < size());
+ return coeff(index);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()() const
+ {
+ EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
+ return coeff();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator[](Index index) const
+ {
+ // The bracket operator is only for vectors, use the parenthesis operator instead.
+ EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE);
+ return coeff(index);
+ }
+
+#if EIGEN_HAS_VARIADIC_TEMPLATES
+ template<typename... IndexTypes>
+ inline Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
+ {
+ // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
+ EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ return operator()(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
+ }
+#else
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1)
+ {
+ return coeffRef(array<Index, 2>(i0, i1));
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2)
+ {
+ return coeffRef(array<Index, 3>(i0, i1, i2));
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3)
+ {
+ return coeffRef(array<Index, 4>(i0, i1, i2, i3));
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4)
+ {
+ return coeffRef(array<Index, 5>(i0, i1, i2, i3, i4));
+ }
+#endif
+
+ // normal indices
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(const array<Index, NumIndices>& indices)
+ {
+ return coeffRef(indices);
+ }
+
+ // custom indices
+#ifdef EIGEN_HAS_SFINAE
+ template<typename CustomIndices,
+ EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) )
+ >
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(CustomIndices& indices)
+ {
+ return coeffRef(internal::customIndices2Array<Index,NumIndices>(indices));
+ }
+#endif
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index index)
+ {
+ eigen_assert(index >= 0 && index < size());
+ return coeffRef(index);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()()
+ {
+ EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
+ return coeffRef();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator[](Index index)
+ {
+ // The bracket operator is only for vectors, use the parenthesis operator instead
+ EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ return coeffRef(index);
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Tensor()
+ : m_storage()
+ {
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Tensor(const Self& other)
+ : m_storage(other.m_storage)
+ {
+ }
+
+#if EIGEN_HAS_VARIADIC_TEMPLATES
+ template<typename... IndexTypes>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index firstDimension, IndexTypes... otherDimensions)
+ : m_storage(firstDimension, otherDimensions...)
+ {
+ // The number of dimensions used to construct a tensor must be equal to the rank of the tensor.
+ EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ }
+#else
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Tensor(Index dim1)
+ : m_storage(dim1, array<Index, 1>(dim1))
+ {
+ EIGEN_STATIC_ASSERT(1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2)
+ : m_storage(dim1*dim2, array<Index, 2>(dim1, dim2))
+ {
+ EIGEN_STATIC_ASSERT(2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3)
+ : m_storage(dim1*dim2*dim3, array<Index, 3>(dim1, dim2, dim3))
+ {
+ EIGEN_STATIC_ASSERT(3 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3, Index dim4)
+ : m_storage(dim1*dim2*dim3*dim4, array<Index, 4>(dim1, dim2, dim3, dim4))
+ {
+ EIGEN_STATIC_ASSERT(4 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3, Index dim4, Index dim5)
+ : m_storage(dim1*dim2*dim3*dim4*dim5, array<Index, 5>(dim1, dim2, dim3, dim4, dim5))
+ {
+ EIGEN_STATIC_ASSERT(5 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ }
+#endif
+
+ /** Normal Dimension */
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Tensor(const array<Index, NumIndices>& dimensions)
+ : m_storage(internal::array_prod(dimensions), dimensions)
+ {
+ EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
+ }
+
+ template<typename OtherDerived>
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Tensor(const TensorBase<OtherDerived, ReadOnlyAccessors>& other)
+ {
+ typedef TensorAssignOp<Tensor, const OtherDerived> Assign;
+ Assign assign(*this, other.derived());
+ resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions());
+ internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
+ }
+ template<typename OtherDerived>
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Tensor(const TensorBase<OtherDerived, WriteAccessors>& other)
+ {
+ typedef TensorAssignOp<Tensor, const OtherDerived> Assign;
+ Assign assign(*this, other.derived());
+ resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions());
+ internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Tensor& operator=(const Tensor& other)
+ {
+ typedef TensorAssignOp<Tensor, const Tensor> Assign;
+ Assign assign(*this, other);
+ resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions());
+ internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
+ return *this;
+ }
+ template<typename OtherDerived>
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Tensor& operator=(const OtherDerived& other)
+ {
+ typedef TensorAssignOp<Tensor, const OtherDerived> Assign;
+ Assign assign(*this, other);
+ resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions());
+ internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
+ return *this;
+ }
+
+#if EIGEN_HAS_VARIADIC_TEMPLATES
+ template<typename... IndexTypes> EIGEN_DEVICE_FUNC
+ void resize(Index firstDimension, IndexTypes... otherDimensions)
+ {
+ // The number of dimensions used to resize a tensor must be equal to the rank of the tensor.
+ EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ resize(array<Index, NumIndices>{{firstDimension, otherDimensions...}});
+ }
+#endif
+
+ /** Normal Dimension */
+ EIGEN_DEVICE_FUNC void resize(const array<Index, NumIndices>& dimensions)
+ {
+ int i;
+ Index size = Index(1);
+ for (i = 0; i < NumIndices; i++) {
+ internal::check_rows_cols_for_overflow<Dynamic>::run(size, dimensions[i]);
+ size *= dimensions[i];
+ }
+ #ifdef EIGEN_INITIALIZE_COEFFS
+ bool size_changed = size != this->size();
+ m_storage.resize(size, dimensions);
+ if(size_changed) EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
+ #else
+ m_storage.resize(size, dimensions);
+ #endif
+ }
+
+ // Why this overload, DSizes is derived from array ??? //
+ EIGEN_DEVICE_FUNC void resize(const DSizes<Index, NumIndices>& dimensions) {
+ array<Index, NumIndices> dims;
+ for (int i = 0; i < NumIndices; ++i) {
+ dims[i] = dimensions[i];
+ }
+ resize(dims);
+ }
+
+ EIGEN_DEVICE_FUNC
+ void resize()
+ {
+ EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
+ // Nothing to do: rank 0 tensors have fixed size
+ }
+
+ /** Custom Dimension */
+#ifdef EIGEN_HAS_SFINAE
+ template<typename CustomDimension,
+ EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomDimension>::value) )
+ >
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize(CustomDimension& dimensions)
+ {
+ resize(internal::customIndices2Array<Index,NumIndices>(dimensions));
+ }
+#endif
+
+#ifndef EIGEN_EMULATE_CXX11_META_H
+ template <typename std::ptrdiff_t... Indices>
+ EIGEN_DEVICE_FUNC
+ void resize(const Sizes<Indices...>& dimensions) {
+ array<Index, NumIndices> dims;
+ for (int i = 0; i < NumIndices; ++i) {
+ dims[i] = static_cast<Index>(dimensions[i]);
+ }
+ resize(dims);
+ }
+#else
+ template <std::size_t V1, std::size_t V2, std::size_t V3, std::size_t V4, std::size_t V5>
+ EIGEN_DEVICE_FUNC
+ void resize(const Sizes<V1, V2, V3, V4, V5>& dimensions) {
+ array<Index, NumIndices> dims;
+ for (int i = 0; i < NumIndices; ++i) {
+ dims[i] = static_cast<Index>(dimensions[i]);
+ }
+ resize(dims);
+ }
+#endif
+
+ protected:
+
+ bool checkIndexRange(const array<Index, NumIndices>& indices) const
+ {
+ using internal::array_apply_and_reduce;
+ using internal::array_zip_and_reduce;
+ using internal::greater_equal_zero_op;
+ using internal::logical_and_op;
+ using internal::lesser_op;
+
+ return
+ // check whether the indices are all >= 0
+ array_apply_and_reduce<logical_and_op, greater_equal_zero_op>(indices) &&
+ // check whether the indices fit in the dimensions
+ array_zip_and_reduce<logical_and_op, lesser_op>(indices, m_storage.dimensions());
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index linearizedIndex(const array<Index, NumIndices>& indices) const
+ {
+ if (Options&RowMajor) {
+ return m_storage.dimensions().IndexOfRowMajor(indices);
+ } else {
+ return m_storage.dimensions().IndexOfColMajor(indices);
+ }
+ }
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorArgMax.h b/unsupported/Eigen/CXX11/src/Tensor/TensorArgMax.h
new file mode 100644
index 000000000..d06f40cd8
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorArgMax.h
@@ -0,0 +1,299 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2015 Eugene Brevdo <ebrevdo@gmail.com>
+// Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_ARG_MAX_H
+#define EIGEN_CXX11_TENSOR_TENSOR_ARG_MAX_H
+
+namespace Eigen {
+namespace internal {
+
+/** \class TensorIndexTuple
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Tensor + Index Tuple class.
+ *
+ *
+ */
+template<typename XprType>
+struct traits<TensorIndexTupleOp<XprType> > : public traits<XprType>
+{
+ typedef traits<XprType> XprTraits;
+ typedef typename XprTraits::StorageKind StorageKind;
+ typedef typename XprTraits::Index Index;
+ typedef Tuple<Index, typename XprTraits::Scalar> Scalar;
+ typedef typename XprType::Nested Nested;
+ typedef typename remove_reference<Nested>::type _Nested;
+ static const int NumDimensions = XprTraits::NumDimensions;
+ static const int Layout = XprTraits::Layout;
+};
+
+template<typename XprType>
+struct eval<TensorIndexTupleOp<XprType>, Eigen::Dense>
+{
+ typedef const TensorIndexTupleOp<XprType>& type;
+};
+
+template<typename XprType>
+struct nested<TensorIndexTupleOp<XprType>, 1,
+ typename eval<TensorIndexTupleOp<XprType> >::type>
+{
+ typedef TensorIndexTupleOp<XprType> type;
+};
+
+} // end namespace internal
+
+template<typename XprType>
+class TensorIndexTupleOp : public TensorBase<TensorIndexTupleOp<XprType>, ReadOnlyAccessors>
+{
+ public:
+ typedef typename Eigen::internal::traits<TensorIndexTupleOp>::Scalar Scalar;
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
+ typedef typename Eigen::internal::nested<TensorIndexTupleOp>::type Nested;
+ typedef typename Eigen::internal::traits<TensorIndexTupleOp>::StorageKind StorageKind;
+ typedef typename Eigen::internal::traits<TensorIndexTupleOp>::Index Index;
+ typedef Tuple<Index, typename XprType::CoeffReturnType> CoeffReturnType;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIndexTupleOp(const XprType& expr)
+ : m_xpr(expr) {}
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename XprType::Nested>::type&
+ expression() const { return m_xpr; }
+
+ protected:
+ typename XprType::Nested m_xpr;
+};
+
+// Eval as rvalue
+template<typename ArgType, typename Device>
+struct TensorEvaluator<const TensorIndexTupleOp<ArgType>, Device>
+{
+ typedef TensorIndexTupleOp<ArgType> XprType;
+ typedef typename XprType::Index Index;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+
+ typedef typename TensorEvaluator<ArgType, Device>::Dimensions Dimensions;
+ static const int NumDims = internal::array_size<Dimensions>::value;
+
+ enum {
+ IsAligned = /*TensorEvaluator<ArgType, Device>::IsAligned*/ false,
+ PacketAccess = /*TensorEvaluator<ArgType, Device>::PacketAccess*/ false,
+ BlockAccess = false,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ CoordAccess = false, // to be implemented
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
+ : m_impl(op.expression(), device) { }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const {
+ return m_impl.dimensions();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) {
+ m_impl.evalSubExprsIfNeeded(NULL);
+ return true;
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
+ m_impl.cleanup();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
+ {
+ return CoeffReturnType(index, m_impl.coeff(index));
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
+ costPerCoeff(bool vectorized) const {
+ return m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, 1);
+ }
+
+ EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
+
+ protected:
+ TensorEvaluator<ArgType, Device> m_impl;
+};
+
+namespace internal {
+
+/** \class TensorTupleIndex
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Converts to Tensor<Tuple<Index, Scalar> > and reduces to Tensor<Index>.
+ *
+ */
+template<typename ReduceOp, typename Dims, typename XprType>
+struct traits<TensorTupleReducerOp<ReduceOp, Dims, XprType> > : public traits<XprType>
+{
+ typedef traits<XprType> XprTraits;
+ typedef typename XprTraits::StorageKind StorageKind;
+ typedef typename XprTraits::Index Index;
+ typedef Index Scalar;
+ typedef typename XprType::Nested Nested;
+ typedef typename remove_reference<Nested>::type _Nested;
+ static const int NumDimensions = XprTraits::NumDimensions - array_size<Dims>::value;
+ static const int Layout = XprTraits::Layout;
+};
+
+template<typename ReduceOp, typename Dims, typename XprType>
+struct eval<TensorTupleReducerOp<ReduceOp, Dims, XprType>, Eigen::Dense>
+{
+ typedef const TensorTupleReducerOp<ReduceOp, Dims, XprType>& type;
+};
+
+template<typename ReduceOp, typename Dims, typename XprType>
+struct nested<TensorTupleReducerOp<ReduceOp, Dims, XprType>, 1,
+ typename eval<TensorTupleReducerOp<ReduceOp, Dims, XprType> >::type>
+{
+ typedef TensorTupleReducerOp<ReduceOp, Dims, XprType> type;
+};
+
+} // end namespace internal
+
+template<typename ReduceOp, typename Dims, typename XprType>
+class TensorTupleReducerOp : public TensorBase<TensorTupleReducerOp<ReduceOp, Dims, XprType>, ReadOnlyAccessors>
+{
+ public:
+ typedef typename Eigen::internal::traits<TensorTupleReducerOp>::Scalar Scalar;
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
+ typedef typename Eigen::internal::nested<TensorTupleReducerOp>::type Nested;
+ typedef typename Eigen::internal::traits<TensorTupleReducerOp>::StorageKind StorageKind;
+ typedef typename Eigen::internal::traits<TensorTupleReducerOp>::Index Index;
+ typedef Index CoeffReturnType;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorTupleReducerOp(const XprType& expr,
+ const ReduceOp& reduce_op,
+ const int return_dim,
+ const Dims& reduce_dims)
+ : m_xpr(expr), m_reduce_op(reduce_op), m_return_dim(return_dim), m_reduce_dims(reduce_dims) {}
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename XprType::Nested>::type&
+ expression() const { return m_xpr; }
+
+ EIGEN_DEVICE_FUNC
+ const ReduceOp& reduce_op() const { return m_reduce_op; }
+
+ EIGEN_DEVICE_FUNC
+ const Dims& reduce_dims() const { return m_reduce_dims; }
+
+ EIGEN_DEVICE_FUNC
+ int return_dim() const { return m_return_dim; }
+
+ protected:
+ typename XprType::Nested m_xpr;
+ const ReduceOp m_reduce_op;
+ const int m_return_dim;
+ const Dims m_reduce_dims;
+};
+
+// Eval as rvalue
+template<typename ReduceOp, typename Dims, typename ArgType, typename Device>
+struct TensorEvaluator<const TensorTupleReducerOp<ReduceOp, Dims, ArgType>, Device>
+{
+ typedef TensorTupleReducerOp<ReduceOp, Dims, ArgType> XprType;
+ typedef typename XprType::Index Index;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename TensorIndexTupleOp<ArgType>::CoeffReturnType TupleType;
+ typedef typename TensorEvaluator<const TensorReductionOp<ReduceOp, Dims, const TensorIndexTupleOp<ArgType> >, Device>::Dimensions Dimensions;
+ typedef typename TensorEvaluator<const TensorIndexTupleOp<ArgType> , Device>::Dimensions InputDimensions;
+ static const int NumDims = internal::array_size<InputDimensions>::value;
+ typedef array<Index, NumDims> StrideDims;
+
+ enum {
+ IsAligned = /*TensorEvaluator<ArgType, Device>::IsAligned*/ false,
+ PacketAccess = /*TensorEvaluator<ArgType, Device>::PacketAccess*/ false,
+ BlockAccess = false,
+ Layout = TensorEvaluator<const TensorReductionOp<ReduceOp, Dims, const TensorIndexTupleOp<ArgType> >, Device>::Layout,
+ CoordAccess = false, // to be implemented
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
+ : m_orig_impl(op.expression(), device),
+ m_impl(op.expression().index_tuples().reduce(op.reduce_dims(), op.reduce_op()), device),
+ m_return_dim(op.return_dim()) {
+
+ gen_strides(m_orig_impl.dimensions(), m_strides);
+ if (Layout == static_cast<int>(ColMajor)) {
+ const Index total_size = internal::array_prod(m_orig_impl.dimensions());
+ m_stride_mod = (m_return_dim < NumDims - 1) ? m_strides[m_return_dim + 1] : total_size;
+ } else {
+ const Index total_size = internal::array_prod(m_orig_impl.dimensions());
+ m_stride_mod = (m_return_dim > 0) ? m_strides[m_return_dim - 1] : total_size;
+ }
+ m_stride_div = m_strides[m_return_dim];
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const {
+ return m_impl.dimensions();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) {
+ m_impl.evalSubExprsIfNeeded(NULL);
+ return true;
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
+ m_impl.cleanup();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const {
+ const TupleType v = m_impl.coeff(index);
+ return (m_return_dim < 0) ? v.first : (v.first % m_stride_mod) / m_stride_div;
+ }
+
+ EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
+ costPerCoeff(bool vectorized) const {
+ const double compute_cost = 1.0 +
+ (m_return_dim < 0 ? 0.0 : (TensorOpCost::ModCost<Index>() + TensorOpCost::DivCost<Index>()));
+ return m_orig_impl.costPerCoeff(vectorized) +
+ m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, compute_cost);
+ }
+
+ private:
+ EIGEN_DEVICE_FUNC void gen_strides(const InputDimensions& dims, StrideDims& strides) {
+ if (m_return_dim < 0) {
+ return; // Won't be using the strides.
+ }
+ eigen_assert(m_return_dim < NumDims &&
+ "Asking to convert index to a dimension outside of the rank");
+
+ // Calculate m_stride_div and m_stride_mod, which are used to
+ // calculate the value of an index w.r.t. the m_return_dim.
+ if (Layout == static_cast<int>(ColMajor)) {
+ strides[0] = 1;
+ for (int i = 1; i < NumDims; ++i) {
+ strides[i] = strides[i-1] * dims[i-1];
+ }
+ } else {
+ strides[NumDims-1] = 1;
+ for (int i = NumDims - 2; i >= 0; --i) {
+ strides[i] = strides[i+1] * dims[i+1];
+ }
+ }
+ }
+
+ protected:
+ TensorEvaluator<const TensorIndexTupleOp<ArgType>, Device> m_orig_impl;
+ TensorEvaluator<const TensorReductionOp<ReduceOp, Dims, const TensorIndexTupleOp<ArgType> >, Device> m_impl;
+ const int m_return_dim;
+ StrideDims m_strides;
+ Index m_stride_mod;
+ Index m_stride_div;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_ARG_MAX_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h b/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h
new file mode 100644
index 000000000..166be200c
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h
@@ -0,0 +1,181 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H
+#define EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H
+
+namespace Eigen {
+
+/** \class TensorAssign
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief The tensor assignment class.
+ *
+ * This class is represents the assignment of the values resulting from the evaluation of
+ * the rhs expression to the memory locations denoted by the lhs expression.
+ */
+namespace internal {
+template<typename LhsXprType, typename RhsXprType>
+struct traits<TensorAssignOp<LhsXprType, RhsXprType> >
+{
+ typedef typename LhsXprType::Scalar Scalar;
+ typedef typename traits<LhsXprType>::StorageKind StorageKind;
+ typedef typename promote_index_type<typename traits<LhsXprType>::Index,
+ typename traits<RhsXprType>::Index>::type Index;
+ typedef typename LhsXprType::Nested LhsNested;
+ typedef typename RhsXprType::Nested RhsNested;
+ typedef typename remove_reference<LhsNested>::type _LhsNested;
+ typedef typename remove_reference<RhsNested>::type _RhsNested;
+ static const std::size_t NumDimensions = internal::traits<LhsXprType>::NumDimensions;
+ static const int Layout = internal::traits<LhsXprType>::Layout;
+
+ enum {
+ Flags = 0
+ };
+};
+
+template<typename LhsXprType, typename RhsXprType>
+struct eval<TensorAssignOp<LhsXprType, RhsXprType>, Eigen::Dense>
+{
+ typedef const TensorAssignOp<LhsXprType, RhsXprType>& type;
+};
+
+template<typename LhsXprType, typename RhsXprType>
+struct nested<TensorAssignOp<LhsXprType, RhsXprType>, 1, typename eval<TensorAssignOp<LhsXprType, RhsXprType> >::type>
+{
+ typedef TensorAssignOp<LhsXprType, RhsXprType> type;
+};
+
+} // end namespace internal
+
+
+
+template<typename LhsXprType, typename RhsXprType>
+class TensorAssignOp : public TensorBase<TensorAssignOp<LhsXprType, RhsXprType> >
+{
+ public:
+ typedef typename Eigen::internal::traits<TensorAssignOp>::Scalar Scalar;
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
+ typedef typename LhsXprType::CoeffReturnType CoeffReturnType;
+ typedef typename Eigen::internal::nested<TensorAssignOp>::type Nested;
+ typedef typename Eigen::internal::traits<TensorAssignOp>::StorageKind StorageKind;
+ typedef typename Eigen::internal::traits<TensorAssignOp>::Index Index;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorAssignOp(LhsXprType& lhs, const RhsXprType& rhs)
+ : m_lhs_xpr(lhs), m_rhs_xpr(rhs) {}
+
+ /** \returns the nested expressions */
+ EIGEN_DEVICE_FUNC
+ typename internal::remove_all<typename LhsXprType::Nested>::type&
+ lhsExpression() const { return *((typename internal::remove_all<typename LhsXprType::Nested>::type*)&m_lhs_xpr); }
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename RhsXprType::Nested>::type&
+ rhsExpression() const { return m_rhs_xpr; }
+
+ protected:
+ typename internal::remove_all<typename LhsXprType::Nested>::type& m_lhs_xpr;
+ const typename internal::remove_all<typename RhsXprType::Nested>::type& m_rhs_xpr;
+};
+
+
+template<typename LeftArgType, typename RightArgType, typename Device>
+struct TensorEvaluator<const TensorAssignOp<LeftArgType, RightArgType>, Device>
+{
+ typedef TensorAssignOp<LeftArgType, RightArgType> XprType;
+ typedef typename XprType::Index Index;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ typedef typename TensorEvaluator<RightArgType, Device>::Dimensions Dimensions;
+ static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+
+ enum {
+ IsAligned = TensorEvaluator<LeftArgType, Device>::IsAligned & TensorEvaluator<RightArgType, Device>::IsAligned,
+ PacketAccess = TensorEvaluator<LeftArgType, Device>::PacketAccess & TensorEvaluator<RightArgType, Device>::PacketAccess,
+ Layout = TensorEvaluator<LeftArgType, Device>::Layout,
+ RawAccess = TensorEvaluator<LeftArgType, Device>::RawAccess
+ };
+
+ EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device) :
+ m_leftImpl(op.lhsExpression(), device),
+ m_rightImpl(op.rhsExpression(), device)
+ {
+ EIGEN_STATIC_ASSERT((static_cast<int>(TensorEvaluator<LeftArgType, Device>::Layout) == static_cast<int>(TensorEvaluator<RightArgType, Device>::Layout)), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ }
+
+ EIGEN_DEVICE_FUNC const Dimensions& dimensions() const
+ {
+ // The dimensions of the lhs and the rhs tensors should be equal to prevent
+ // overflows and ensure the result is fully initialized.
+ // TODO: use left impl instead if right impl dimensions are known at compile time.
+ return m_rightImpl.dimensions();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar*) {
+ eigen_assert(dimensions_match(m_leftImpl.dimensions(), m_rightImpl.dimensions()));
+ m_leftImpl.evalSubExprsIfNeeded(NULL);
+ // If the lhs provides raw access to its storage area (i.e. if m_leftImpl.data() returns a non
+ // null value), attempt to evaluate the rhs expression in place. Returns true iff in place
+ // evaluation isn't supported and the caller still needs to manually assign the values generated
+ // by the rhs to the lhs.
+ return m_rightImpl.evalSubExprsIfNeeded(m_leftImpl.data());
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
+ m_leftImpl.cleanup();
+ m_rightImpl.cleanup();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalScalar(Index i) {
+ m_leftImpl.coeffRef(i) = m_rightImpl.coeff(i);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalPacket(Index i) {
+ const int LhsStoreMode = TensorEvaluator<LeftArgType, Device>::IsAligned ? Aligned : Unaligned;
+ const int RhsLoadMode = TensorEvaluator<RightArgType, Device>::IsAligned ? Aligned : Unaligned;
+ m_leftImpl.template writePacket<LhsStoreMode>(i, m_rightImpl.template packet<RhsLoadMode>(i));
+ }
+ EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const
+ {
+ return m_leftImpl.coeff(index);
+ }
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const
+ {
+ return m_leftImpl.template packet<LoadMode>(index);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
+ costPerCoeff(bool vectorized) const {
+ // We assume that evalPacket or evalScalar is called to perform the
+ // assignment and account for the cost of the write here, but reduce left
+ // cost by one load because we are using m_leftImpl.coeffRef.
+ TensorOpCost left = m_leftImpl.costPerCoeff(vectorized);
+ return m_rightImpl.costPerCoeff(vectorized) +
+ TensorOpCost(
+ numext::maxi(0.0, left.bytes_loaded() - sizeof(CoeffReturnType)),
+ left.bytes_stored(), left.compute_cycles()) +
+ TensorOpCost(0, sizeof(CoeffReturnType), 0, vectorized, PacketSize);
+ }
+
+ /// required by sycl in order to extract the accessor
+ const TensorEvaluator<LeftArgType, Device>& left_impl() const { return m_leftImpl; }
+ /// required by sycl in order to extract the accessor
+ const TensorEvaluator<RightArgType, Device>& right_impl() const { return m_rightImpl; }
+
+ EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return m_leftImpl.data(); }
+
+ private:
+ TensorEvaluator<LeftArgType, Device> m_leftImpl;
+ TensorEvaluator<RightArgType, Device> m_rightImpl;
+};
+
+}
+
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h b/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h
new file mode 100644
index 000000000..7a45a5cf4
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h
@@ -0,0 +1,1010 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_BASE_H
+#define EIGEN_CXX11_TENSOR_TENSOR_BASE_H
+
+// clang-format off
+
+namespace Eigen {
+
+/** \class TensorBase
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief The tensor base class.
+ *
+ * This class is the common parent of the Tensor and TensorMap class, thus
+ * making it possible to use either class interchangably in expressions.
+ */
+
+template<typename Derived>
+class TensorBase<Derived, ReadOnlyAccessors>
+{
+ public:
+ typedef internal::traits<Derived> DerivedTraits;
+ typedef typename DerivedTraits::Scalar Scalar;
+ typedef typename DerivedTraits::Index Index;
+ typedef typename internal::remove_const<Scalar>::type CoeffReturnType;
+ static const int NumDimensions = DerivedTraits::NumDimensions;
+
+ // Generic nullary operation support.
+ template <typename CustomNullaryOp> EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseNullaryOp<CustomNullaryOp, const Derived>
+ nullaryExpr(const CustomNullaryOp& func) const {
+ return TensorCwiseNullaryOp<CustomNullaryOp, const Derived>(derived(), func);
+ }
+
+ // Coefficient-wise nullary operators
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseNullaryOp<internal::scalar_constant_op<Scalar>, const Derived>
+ constant(const Scalar& value) const {
+ return nullaryExpr(internal::scalar_constant_op<Scalar>(value));
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseNullaryOp<internal::UniformRandomGenerator<Scalar>, const Derived>
+ random() const {
+ return nullaryExpr(internal::UniformRandomGenerator<Scalar>());
+ }
+ template <typename RandomGenerator> EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseNullaryOp<RandomGenerator, const Derived>
+ random(const RandomGenerator& gen = RandomGenerator()) const {
+ return nullaryExpr(gen);
+ }
+
+ // Tensor generation
+ template <typename Generator> EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorGeneratorOp<Generator, const Derived>
+ generate(const Generator& generator) const {
+ return TensorGeneratorOp<Generator, const Derived>(derived(), generator);
+ }
+
+ // Generic unary operation support.
+ template <typename CustomUnaryOp> EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<CustomUnaryOp, const Derived>
+ unaryExpr(const CustomUnaryOp& func) const {
+ return TensorCwiseUnaryOp<CustomUnaryOp, const Derived>(derived(), func);
+ }
+
+ // Coefficient-wise unary operators
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_opposite_op<Scalar>, const Derived>
+ operator-() const {
+ return unaryExpr(internal::scalar_opposite_op<Scalar>());
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_sqrt_op<Scalar>, const Derived>
+ sqrt() const {
+ return unaryExpr(internal::scalar_sqrt_op<Scalar>());
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_sign_op<Scalar>, const Derived>
+ sign() const {
+ return unaryExpr(internal::scalar_sign_op<Scalar>());
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_rsqrt_op<Scalar>, const Derived>
+ rsqrt() const {
+ return unaryExpr(internal::scalar_rsqrt_op<Scalar>());
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_square_op<Scalar>, const Derived>
+ square() const {
+ return unaryExpr(internal::scalar_square_op<Scalar>());
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_cube_op<Scalar>, const Derived>
+ cube() const {
+ return unaryExpr(internal::scalar_cube_op<Scalar>());
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_inverse_op<Scalar>, const Derived>
+ inverse() const {
+ return unaryExpr(internal::scalar_inverse_op<Scalar>());
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_tanh_op<Scalar>, const Derived>
+ tanh() const {
+ return unaryExpr(internal::scalar_tanh_op<Scalar>());
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_lgamma_op<Scalar>, const Derived>
+ lgamma() const {
+ return unaryExpr(internal::scalar_lgamma_op<Scalar>());
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_digamma_op<Scalar>, const Derived>
+ digamma() const {
+ return unaryExpr(internal::scalar_digamma_op<Scalar>());
+ }
+
+ // igamma(a = this, x = other)
+ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorCwiseBinaryOp<internal::scalar_igamma_op<Scalar>, const Derived, const OtherDerived>
+ igamma(const OtherDerived& other) const {
+ return binaryExpr(other.derived(), internal::scalar_igamma_op<Scalar>());
+ }
+
+ // igammac(a = this, x = other)
+ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorCwiseBinaryOp<internal::scalar_igammac_op<Scalar>, const Derived, const OtherDerived>
+ igammac(const OtherDerived& other) const {
+ return binaryExpr(other.derived(), internal::scalar_igammac_op<Scalar>());
+ }
+
+ // zeta(x = this, q = other)
+ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorCwiseBinaryOp<internal::scalar_zeta_op<Scalar>, const Derived, const OtherDerived>
+ zeta(const OtherDerived& other) const {
+ return binaryExpr(other.derived(), internal::scalar_zeta_op<Scalar>());
+ }
+
+ // polygamma(n = this, x = other)
+ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorCwiseBinaryOp<internal::scalar_polygamma_op<Scalar>, const Derived, const OtherDerived>
+ polygamma(const OtherDerived& other) const {
+ return binaryExpr(other.derived(), internal::scalar_polygamma_op<Scalar>());
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_erf_op<Scalar>, const Derived>
+ erf() const {
+ return unaryExpr(internal::scalar_erf_op<Scalar>());
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_erfc_op<Scalar>, const Derived>
+ erfc() const {
+ return unaryExpr(internal::scalar_erfc_op<Scalar>());
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_sigmoid_op<Scalar>, const Derived>
+ sigmoid() const {
+ return unaryExpr(internal::scalar_sigmoid_op<Scalar>());
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_exp_op<Scalar>, const Derived>
+ exp() const {
+ return unaryExpr(internal::scalar_exp_op<Scalar>());
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_log_op<Scalar>, const Derived>
+ log() const {
+ return unaryExpr(internal::scalar_log_op<Scalar>());
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_log1p_op<Scalar>, const Derived>
+ log1p() const {
+ return unaryExpr(internal::scalar_log1p_op<Scalar>());
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_abs_op<Scalar>, const Derived>
+ abs() const {
+ return unaryExpr(internal::scalar_abs_op<Scalar>());
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, const Derived>
+ conjugate() const {
+ return unaryExpr(internal::scalar_conjugate_op<Scalar>());
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::bind2nd_op<internal::scalar_pow_op<Scalar,Scalar> >, const Derived>
+ pow(Scalar exponent) const {
+ return unaryExpr(internal::bind2nd_op<internal::scalar_pow_op<Scalar,Scalar> >(exponent));
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_real_op<Scalar>, const Derived>
+ real() const {
+ return unaryExpr(internal::scalar_real_op<Scalar>());
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_imag_op<Scalar>, const Derived>
+ imag() const {
+ return unaryExpr(internal::scalar_imag_op<Scalar>());
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::bind2nd_op<internal::scalar_sum_op<Scalar,Scalar> >, const Derived>
+ operator+ (Scalar rhs) const {
+ return unaryExpr(internal::bind2nd_op<internal::scalar_sum_op<Scalar,Scalar> >(rhs));
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE friend
+ const TensorCwiseUnaryOp<internal::bind1st_op<internal::scalar_sum_op<Scalar> >, const Derived>
+ operator+ (Scalar lhs, const Derived& rhs) {
+ return rhs.unaryExpr(internal::bind1st_op<internal::scalar_sum_op<Scalar> >(lhs));
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::bind2nd_op<internal::scalar_difference_op<Scalar,Scalar> >, const Derived>
+ operator- (Scalar rhs) const {
+ EIGEN_STATIC_ASSERT((NumTraits<Scalar>::IsSigned || internal::is_same<Scalar, const std::complex<float> >::value), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ return unaryExpr(internal::bind2nd_op<internal::scalar_difference_op<Scalar,Scalar> >(rhs));
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE friend
+ const TensorCwiseUnaryOp<internal::bind1st_op<internal::scalar_difference_op<Scalar> >, const Derived>
+ operator- (Scalar lhs, const Derived& rhs) {
+ return rhs.unaryExpr(internal::bind1st_op<internal::scalar_difference_op<Scalar> >(lhs));
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::bind2nd_op<internal::scalar_product_op<Scalar,Scalar> >, const Derived>
+ operator* (Scalar rhs) const {
+ return unaryExpr(internal::bind2nd_op<internal::scalar_product_op<Scalar,Scalar> >(rhs));
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE friend
+ const TensorCwiseUnaryOp<internal::bind1st_op<internal::scalar_product_op<Scalar> >, const Derived>
+ operator* (Scalar lhs, const Derived& rhs) {
+ return rhs.unaryExpr(internal::bind1st_op<internal::scalar_product_op<Scalar> >(lhs));
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::bind2nd_op<internal::scalar_quotient_op<Scalar,Scalar> >, const Derived>
+ operator/ (Scalar rhs) const {
+ return unaryExpr(internal::bind2nd_op<internal::scalar_quotient_op<Scalar,Scalar> >(rhs));
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE friend
+ const TensorCwiseUnaryOp<internal::bind1st_op<internal::scalar_quotient_op<Scalar> >, const Derived>
+ operator/ (Scalar lhs, const Derived& rhs) {
+ return rhs.unaryExpr(internal::bind1st_op<internal::scalar_quotient_op<Scalar> >(lhs));
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_mod_op<Scalar>, const Derived>
+ operator% (Scalar rhs) const {
+ EIGEN_STATIC_ASSERT(NumTraits<Scalar>::IsInteger, YOU_MADE_A_PROGRAMMING_MISTAKE_TRY_MOD);
+ return unaryExpr(internal::scalar_mod_op<Scalar>(rhs));
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseBinaryOp<internal::scalar_max_op<Scalar>, const Derived, const TensorCwiseNullaryOp<internal::scalar_constant_op<Scalar>, const Derived> >
+ cwiseMax(Scalar threshold) const {
+ return cwiseMax(constant(threshold));
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseBinaryOp<internal::scalar_min_op<Scalar>, const Derived, const TensorCwiseNullaryOp<internal::scalar_constant_op<Scalar>, const Derived> >
+ cwiseMin(Scalar threshold) const {
+ return cwiseMin(constant(threshold));
+ }
+
+ template <typename NewType> EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorConversionOp<NewType, const Derived>
+ cast() const {
+ return TensorConversionOp<NewType, const Derived>(derived());
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_round_op<Scalar>, const Derived>
+ round() const {
+ return unaryExpr(internal::scalar_round_op<Scalar>());
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_ceil_op<Scalar>, const Derived>
+ ceil() const {
+ return unaryExpr(internal::scalar_ceil_op<Scalar>());
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_floor_op<Scalar>, const Derived>
+ floor() const {
+ return unaryExpr(internal::scalar_floor_op<Scalar>());
+ }
+
+ // Generic binary operation support.
+ template <typename CustomBinaryOp, typename OtherDerived> EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseBinaryOp<CustomBinaryOp, const Derived, const OtherDerived>
+ binaryExpr(const OtherDerived& other, const CustomBinaryOp& func) const {
+ return TensorCwiseBinaryOp<CustomBinaryOp, const Derived, const OtherDerived>(derived(), other, func);
+ }
+
+ // Coefficient-wise binary operators.
+ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorCwiseBinaryOp<internal::scalar_sum_op<Scalar>, const Derived, const OtherDerived>
+ operator+(const OtherDerived& other) const {
+ return binaryExpr(other.derived(), internal::scalar_sum_op<Scalar>());
+ }
+
+ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorCwiseBinaryOp<internal::scalar_difference_op<Scalar>, const Derived, const OtherDerived>
+ operator-(const OtherDerived& other) const {
+ return binaryExpr(other.derived(), internal::scalar_difference_op<Scalar>());
+ }
+
+ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorCwiseBinaryOp<internal::scalar_product_op<Scalar>, const Derived, const OtherDerived>
+ operator*(const OtherDerived& other) const {
+ return binaryExpr(other.derived(), internal::scalar_product_op<Scalar>());
+ }
+
+ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorCwiseBinaryOp<internal::scalar_quotient_op<Scalar>, const Derived, const OtherDerived>
+ operator/(const OtherDerived& other) const {
+ return binaryExpr(other.derived(), internal::scalar_quotient_op<Scalar>());
+ }
+
+ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorCwiseBinaryOp<internal::scalar_max_op<Scalar>, const Derived, const OtherDerived>
+ cwiseMax(const OtherDerived& other) const {
+ return binaryExpr(other.derived(), internal::scalar_max_op<Scalar>());
+ }
+
+ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorCwiseBinaryOp<internal::scalar_min_op<Scalar>, const Derived, const OtherDerived>
+ cwiseMin(const OtherDerived& other) const {
+ return binaryExpr(other.derived(), internal::scalar_min_op<Scalar>());
+ }
+
+ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorCwiseBinaryOp<internal::scalar_boolean_and_op, const Derived, const OtherDerived>
+ operator&&(const OtherDerived& other) const {
+ return binaryExpr(other.derived(), internal::scalar_boolean_and_op());
+ }
+
+ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorCwiseBinaryOp<internal::scalar_boolean_or_op, const Derived, const OtherDerived>
+ operator||(const OtherDerived& other) const {
+ return binaryExpr(other.derived(), internal::scalar_boolean_or_op());
+ }
+
+ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorCwiseBinaryOp<internal::scalar_boolean_xor_op, const Derived, const OtherDerived>
+ operator^(const OtherDerived& other) const {
+ return binaryExpr(other.derived(), internal::scalar_boolean_xor_op());
+ }
+
+ // Comparisons and tests.
+ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorCwiseBinaryOp<internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_LT>, const Derived, const OtherDerived>
+ operator<(const OtherDerived& other) const {
+ return binaryExpr(other.derived(), internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_LT>());
+ }
+ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorCwiseBinaryOp<internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_LE>, const Derived, const OtherDerived>
+ operator<=(const OtherDerived& other) const {
+ return binaryExpr(other.derived(), internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_LE>());
+ }
+ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorCwiseBinaryOp<internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_GT>, const Derived, const OtherDerived>
+ operator>(const OtherDerived& other) const {
+ return binaryExpr(other.derived(), internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_GT>());
+ }
+ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorCwiseBinaryOp<internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_GE>, const Derived, const OtherDerived>
+ operator>=(const OtherDerived& other) const {
+ return binaryExpr(other.derived(), internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_GE>());
+ }
+
+ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorCwiseBinaryOp<internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_EQ>, const Derived, const OtherDerived>
+ operator==(const OtherDerived& other) const {
+ return binaryExpr(other.derived(), internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_EQ>());
+ }
+
+ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorCwiseBinaryOp<internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_NEQ>, const Derived, const OtherDerived>
+ operator!=(const OtherDerived& other) const {
+ return binaryExpr(other.derived(), internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_NEQ>());
+ }
+
+ // comparisons and tests for Scalars
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseBinaryOp<internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_LT>, const Derived, const TensorCwiseNullaryOp<internal::scalar_constant_op<Scalar>, const Derived> >
+ operator<(Scalar threshold) const {
+ return operator<(constant(threshold));
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseBinaryOp<internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_LE>, const Derived, const TensorCwiseNullaryOp<internal::scalar_constant_op<Scalar>, const Derived> >
+ operator<=(Scalar threshold) const {
+ return operator<=(constant(threshold));
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseBinaryOp<internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_GT>, const Derived, const TensorCwiseNullaryOp<internal::scalar_constant_op<Scalar>, const Derived> >
+ operator>(Scalar threshold) const {
+ return operator>(constant(threshold));
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseBinaryOp<internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_GE>, const Derived, const TensorCwiseNullaryOp<internal::scalar_constant_op<Scalar>, const Derived> >
+ operator>=(Scalar threshold) const {
+ return operator>=(constant(threshold));
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseBinaryOp<internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_EQ>, const Derived, const TensorCwiseNullaryOp<internal::scalar_constant_op<Scalar>, const Derived> >
+ operator==(Scalar threshold) const {
+ return operator==(constant(threshold));
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseBinaryOp<internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_NEQ>, const Derived, const TensorCwiseNullaryOp<internal::scalar_constant_op<Scalar>, const Derived> >
+ operator!=(Scalar threshold) const {
+ return operator!=(constant(threshold));
+ }
+
+ // Checks
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_isnan_op<Scalar>, const Derived>
+ (isnan)() const {
+ return unaryExpr(internal::scalar_isnan_op<Scalar>());
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_isinf_op<Scalar>, const Derived>
+ (isinf)() const {
+ return unaryExpr(internal::scalar_isinf_op<Scalar>());
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_isfinite_op<Scalar>, const Derived>
+ (isfinite)() const {
+ return unaryExpr(internal::scalar_isfinite_op<Scalar>());
+ }
+
+ // Coefficient-wise ternary operators.
+ template<typename ThenDerived, typename ElseDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorSelectOp<const Derived, const ThenDerived, const ElseDerived>
+ select(const ThenDerived& thenTensor, const ElseDerived& elseTensor) const {
+ return TensorSelectOp<const Derived, const ThenDerived, const ElseDerived>(derived(), thenTensor.derived(), elseTensor.derived());
+ }
+
+ // Contractions.
+ typedef Eigen::IndexPair<Index> DimensionPair;
+
+ template<typename OtherDerived, typename Dimensions> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorContractionOp<const Dimensions, const Derived, const OtherDerived>
+ contract(const OtherDerived& other, const Dimensions& dims) const {
+ return TensorContractionOp<const Dimensions, const Derived, const OtherDerived>(derived(), other.derived(), dims);
+ }
+
+ // Convolutions.
+ template<typename KernelDerived, typename Dimensions> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorConvolutionOp<const Dimensions, const Derived, const KernelDerived>
+ convolve(const KernelDerived& kernel, const Dimensions& dims) const {
+ return TensorConvolutionOp<const Dimensions, const Derived, const KernelDerived>(derived(), kernel.derived(), dims);
+ }
+
+ // Fourier transforms
+ template <int FFTDataType, int FFTDirection, typename FFT> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorFFTOp<const FFT, const Derived, FFTDataType, FFTDirection>
+ fft(const FFT& fft) const {
+ return TensorFFTOp<const FFT, const Derived, FFTDataType, FFTDirection>(derived(), fft);
+ }
+
+ // Scan.
+ typedef TensorScanOp<internal::SumReducer<CoeffReturnType>, const Derived> TensorScanSumOp;
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorScanSumOp
+ cumsum(const Index& axis, bool exclusive = false) const {
+ return TensorScanSumOp(derived(), axis, exclusive);
+ }
+
+ typedef TensorScanOp<internal::ProdReducer<CoeffReturnType>, const Derived> TensorScanProdOp;
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorScanProdOp
+ cumprod(const Index& axis, bool exclusive = false) const {
+ return TensorScanProdOp(derived(), axis, exclusive);
+ }
+
+ template <typename Reducer>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorScanOp<Reducer, const Derived>
+ scan(const Index& axis, const Reducer& reducer, bool exclusive = false) const {
+ return TensorScanOp<Reducer, const Derived>(derived(), axis, exclusive, reducer);
+ }
+
+ // Reductions.
+ template <typename Dims> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorReductionOp<internal::SumReducer<CoeffReturnType>, const Dims, const Derived>
+ sum(const Dims& dims) const {
+ return TensorReductionOp<internal::SumReducer<CoeffReturnType>, const Dims, const Derived>(derived(), dims, internal::SumReducer<CoeffReturnType>());
+ }
+
+ const TensorReductionOp<internal::SumReducer<CoeffReturnType>, const DimensionList<Index, NumDimensions>, const Derived>
+ sum() const {
+ DimensionList<Index, NumDimensions> in_dims;
+ return TensorReductionOp<internal::SumReducer<CoeffReturnType>, const DimensionList<Index, NumDimensions>, const Derived>(derived(), in_dims, internal::SumReducer<CoeffReturnType>());
+ }
+
+ template <typename Dims> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorReductionOp<internal::MeanReducer<CoeffReturnType>, const Dims, const Derived>
+ mean(const Dims& dims) const {
+ return TensorReductionOp<internal::MeanReducer<CoeffReturnType>, const Dims, const Derived>(derived(), dims, internal::MeanReducer<CoeffReturnType>());
+ }
+
+ const TensorReductionOp<internal::MeanReducer<CoeffReturnType>, const DimensionList<Index, NumDimensions>, const Derived>
+ mean() const {
+ DimensionList<Index, NumDimensions> in_dims;
+ return TensorReductionOp<internal::MeanReducer<CoeffReturnType>, const DimensionList<Index, NumDimensions>, const Derived>(derived(), in_dims, internal::MeanReducer<CoeffReturnType>());
+ }
+
+ template <typename Dims> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorReductionOp<internal::ProdReducer<CoeffReturnType>, const Dims, const Derived>
+ prod(const Dims& dims) const {
+ return TensorReductionOp<internal::ProdReducer<CoeffReturnType>, const Dims, const Derived>(derived(), dims, internal::ProdReducer<CoeffReturnType>());
+ }
+
+ const TensorReductionOp<internal::ProdReducer<CoeffReturnType>, const DimensionList<Index, NumDimensions>, const Derived>
+ prod() const {
+ DimensionList<Index, NumDimensions> in_dims;
+ return TensorReductionOp<internal::ProdReducer<CoeffReturnType>, const DimensionList<Index, NumDimensions>, const Derived>(derived(), in_dims, internal::ProdReducer<CoeffReturnType>());
+ }
+
+ template <typename Dims> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorReductionOp<internal::MaxReducer<CoeffReturnType>, const Dims, const Derived>
+ maximum(const Dims& dims) const {
+ return TensorReductionOp<internal::MaxReducer<CoeffReturnType>, const Dims, const Derived>(derived(), dims, internal::MaxReducer<CoeffReturnType>());
+ }
+
+ const TensorReductionOp<internal::MaxReducer<CoeffReturnType>, const DimensionList<Index, NumDimensions>, const Derived>
+ maximum() const {
+ DimensionList<Index, NumDimensions> in_dims;
+ return TensorReductionOp<internal::MaxReducer<CoeffReturnType>, const DimensionList<Index, NumDimensions>, const Derived>(derived(), in_dims, internal::MaxReducer<CoeffReturnType>());
+ }
+
+ template <typename Dims> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorReductionOp<internal::MinReducer<CoeffReturnType>, const Dims, const Derived>
+ minimum(const Dims& dims) const {
+ return TensorReductionOp<internal::MinReducer<CoeffReturnType>, const Dims, const Derived>(derived(), dims, internal::MinReducer<CoeffReturnType>());
+ }
+
+ const TensorReductionOp<internal::MinReducer<CoeffReturnType>, const DimensionList<Index, NumDimensions>, const Derived>
+ minimum() const {
+ DimensionList<Index, NumDimensions> in_dims;
+ return TensorReductionOp<internal::MinReducer<CoeffReturnType>, const DimensionList<Index, NumDimensions>, const Derived>(derived(), in_dims, internal::MinReducer<CoeffReturnType>());
+ }
+
+ template <typename Dims> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorReductionOp<internal::AndReducer, const Dims, const TensorConversionOp<bool, const Derived> >
+ all(const Dims& dims) const {
+ return cast<bool>().reduce(dims, internal::AndReducer());
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorReductionOp<internal::AndReducer, const DimensionList<Index, NumDimensions>, const TensorConversionOp<bool, const Derived> >
+ all() const {
+ DimensionList<Index, NumDimensions> in_dims;
+ return cast<bool>().reduce(in_dims, internal::AndReducer());
+ }
+
+ template <typename Dims> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorReductionOp<internal::OrReducer, const Dims, const TensorConversionOp<bool, const Derived> >
+ any(const Dims& dims) const {
+ return cast<bool>().reduce(dims, internal::OrReducer());
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorReductionOp<internal::OrReducer, const DimensionList<Index, NumDimensions>, const TensorConversionOp<bool, const Derived> >
+ any() const {
+ DimensionList<Index, NumDimensions> in_dims;
+ return cast<bool>().reduce(in_dims, internal::OrReducer());
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorTupleReducerOp<
+ internal::ArgMaxTupleReducer<Tuple<Index, CoeffReturnType> >,
+ const array<Index, NumDimensions>, const Derived>
+ argmax() const {
+ array<Index, NumDimensions> in_dims;
+ for (int d = 0; d < NumDimensions; ++d) in_dims[d] = d;
+ return TensorTupleReducerOp<
+ internal::ArgMaxTupleReducer<Tuple<Index, CoeffReturnType> >,
+ const array<Index, NumDimensions>,
+ const Derived>(derived(), internal::ArgMaxTupleReducer<Tuple<Index, CoeffReturnType> >(), -1, in_dims);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorTupleReducerOp<
+ internal::ArgMinTupleReducer<Tuple<Index, CoeffReturnType> >,
+ const array<Index, NumDimensions>, const Derived>
+ argmin() const {
+ array<Index, NumDimensions> in_dims;
+ for (int d = 0; d < NumDimensions; ++d) in_dims[d] = d;
+ return TensorTupleReducerOp<
+ internal::ArgMinTupleReducer<Tuple<Index, CoeffReturnType> >,
+ const array<Index, NumDimensions>,
+ const Derived>(derived(), internal::ArgMinTupleReducer<Tuple<Index, CoeffReturnType> >(), -1, in_dims);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorTupleReducerOp<
+ internal::ArgMaxTupleReducer<Tuple<Index, CoeffReturnType> >,
+ const array<Index, 1>, const Derived>
+ argmax(const int return_dim) const {
+ array<Index, 1> in_dims;
+ in_dims[0] = return_dim;
+ return TensorTupleReducerOp<
+ internal::ArgMaxTupleReducer<Tuple<Index, CoeffReturnType> >,
+ const array<Index, 1>,
+ const Derived>(derived(), internal::ArgMaxTupleReducer<Tuple<Index, CoeffReturnType> >(), return_dim, in_dims);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorTupleReducerOp<
+ internal::ArgMinTupleReducer<Tuple<Index, CoeffReturnType> >,
+ const array<Index, 1>, const Derived>
+ argmin(const int return_dim) const {
+ array<Index, 1> in_dims;
+ in_dims[0] = return_dim;
+ return TensorTupleReducerOp<
+ internal::ArgMinTupleReducer<Tuple<Index, CoeffReturnType> >,
+ const array<Index, 1>,
+ const Derived>(derived(), internal::ArgMinTupleReducer<Tuple<Index, CoeffReturnType> >(), return_dim, in_dims);
+ }
+
+ template <typename Reducer, typename Dims> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorReductionOp<Reducer, const Dims, const Derived>
+ reduce(const Dims& dims, const Reducer& reducer) const {
+ return TensorReductionOp<Reducer, const Dims, const Derived>(derived(), dims, reducer);
+ }
+
+ template <typename Broadcast> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorBroadcastingOp<const Broadcast, const Derived>
+ broadcast(const Broadcast& broadcast) const {
+ return TensorBroadcastingOp<const Broadcast, const Derived>(derived(), broadcast);
+ }
+
+ template <typename Axis, typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorConcatenationOp<Axis, const Derived, const OtherDerived>
+ concatenate(const OtherDerived& other, Axis axis) const {
+ return TensorConcatenationOp<Axis, const Derived, const OtherDerived>(derived(), other.derived(), axis);
+ }
+
+ template <typename PatchDims> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorPatchOp<const PatchDims, const Derived>
+ extract_patches(const PatchDims& patch_dims) const {
+ return TensorPatchOp<const PatchDims, const Derived>(derived(), patch_dims);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorImagePatchOp<Dynamic, Dynamic, const Derived>
+ extract_image_patches(const Index patch_rows = 1, const Index patch_cols = 1,
+ const Index row_stride = 1, const Index col_stride = 1,
+ const Index in_row_stride = 1, const Index in_col_stride = 1,
+ const PaddingType padding_type = PADDING_SAME, const Scalar padding_value = Scalar(0)) const {
+ return TensorImagePatchOp<Dynamic, Dynamic, const Derived>(derived(), patch_rows, patch_cols, row_stride, col_stride,
+ in_row_stride, in_col_stride, 1, 1, padding_type, padding_value);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorImagePatchOp<Dynamic, Dynamic, const Derived>
+ extract_image_patches(const Index patch_rows, const Index patch_cols,
+ const Index row_stride, const Index col_stride,
+ const Index in_row_stride, const Index in_col_stride,
+ const Index row_inflate_stride, const Index col_inflate_stride,
+ const Index padding_top, const Index padding_bottom,
+ const Index padding_left,const Index padding_right,
+ const Scalar padding_value) const {
+ return TensorImagePatchOp<Dynamic, Dynamic, const Derived>(derived(), patch_rows, patch_cols, row_stride, col_stride,
+ in_row_stride, in_col_stride, row_inflate_stride, col_inflate_stride,
+ padding_top, padding_bottom, padding_left, padding_right, padding_value);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorVolumePatchOp<Dynamic, Dynamic, Dynamic, const Derived>
+ extract_volume_patches(const Index patch_planes, const Index patch_rows, const Index patch_cols,
+ const Index plane_stride = 1, const Index row_stride = 1, const Index col_stride = 1,
+ const PaddingType padding_type = PADDING_SAME, const Scalar padding_value = Scalar(0)) const {
+ return TensorVolumePatchOp<Dynamic, Dynamic, Dynamic, const Derived>(derived(), patch_planes, patch_rows, patch_cols, plane_stride, row_stride, col_stride, 1, 1, 1, 1, 1, 1, padding_type, padding_value);
+ }
+
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorVolumePatchOp<Dynamic, Dynamic, Dynamic, const Derived>
+ extract_volume_patches(const Index patch_planes, const Index patch_rows, const Index patch_cols,
+ const Index plane_stride, const Index row_stride, const Index col_stride,
+ const Index plane_inflate_stride, const Index row_inflate_stride, const Index col_inflate_stride,
+ const Index padding_top_z, const Index padding_bottom_z,
+ const Index padding_top, const Index padding_bottom,
+ const Index padding_left, const Index padding_right, const Scalar padding_value = Scalar(0)) const {
+ return TensorVolumePatchOp<Dynamic, Dynamic, Dynamic, const Derived>(derived(), patch_planes, patch_rows, patch_cols, plane_stride, row_stride, col_stride, 1, 1, 1, plane_inflate_stride, row_inflate_stride, col_inflate_stride, padding_top_z, padding_bottom_z, padding_top, padding_bottom, padding_left, padding_right, padding_value);
+ }
+
+ // Morphing operators.
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorLayoutSwapOp<const Derived>
+ swap_layout() const {
+ return TensorLayoutSwapOp<const Derived>(derived());
+ }
+ template <typename NewDimensions> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorReshapingOp<const NewDimensions, const Derived>
+ reshape(const NewDimensions& newDimensions) const {
+ return TensorReshapingOp<const NewDimensions, const Derived>(derived(), newDimensions);
+ }
+ template <typename StartIndices, typename Sizes> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorSlicingOp<const StartIndices, const Sizes, const Derived>
+ slice(const StartIndices& startIndices, const Sizes& sizes) const {
+ return TensorSlicingOp<const StartIndices, const Sizes, const Derived>(derived(), startIndices, sizes);
+ }
+ template <typename StartIndices, typename StopIndices, typename Strides> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorStridingSlicingOp<const StartIndices, const StopIndices, const Strides, const Derived>
+ stridedSlice(const StartIndices& startIndices, const StopIndices& stopIndices, const Strides& strides) const {
+ return TensorStridingSlicingOp<const StartIndices, const StopIndices, const Strides,
+ const Derived>(derived(), startIndices, stopIndices, strides);
+ }
+ template <Index DimId> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorChippingOp<DimId, const Derived>
+ chip(const Index offset) const {
+ return TensorChippingOp<DimId, const Derived>(derived(), offset, DimId);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorChippingOp<Dynamic, const Derived>
+ chip(const Index offset, const Index dim) const {
+ return TensorChippingOp<Dynamic, const Derived>(derived(), offset, dim);
+ }
+ template <typename ReverseDimensions> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorReverseOp<const ReverseDimensions, const Derived>
+ reverse(const ReverseDimensions& rev) const {
+ return TensorReverseOp<const ReverseDimensions, const Derived>(derived(), rev);
+ }
+ template <typename PaddingDimensions> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorPaddingOp<const PaddingDimensions, const Derived>
+ pad(const PaddingDimensions& padding) const {
+ return TensorPaddingOp<const PaddingDimensions, const Derived>(derived(), padding, internal::scalar_cast_op<int, Scalar>()(0));
+ }
+ template <typename PaddingDimensions> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorPaddingOp<const PaddingDimensions, const Derived>
+ pad(const PaddingDimensions& padding, const Scalar padding_value) const {
+ return TensorPaddingOp<const PaddingDimensions, const Derived>(derived(), padding, padding_value);
+ }
+ template <typename Shuffle> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorShufflingOp<const Shuffle, const Derived>
+ shuffle(const Shuffle& shuffle) const {
+ return TensorShufflingOp<const Shuffle, const Derived>(derived(), shuffle);
+ }
+ template <typename Strides> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorStridingOp<const Strides, const Derived>
+ stride(const Strides& strides) const {
+ return TensorStridingOp<const Strides, const Derived>(derived(), strides);
+ }
+ template <typename Strides> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorInflationOp<const Strides, const Derived>
+ inflate(const Strides& strides) const {
+ return TensorInflationOp<const Strides, const Derived>(derived(), strides);
+ }
+
+ // Returns a tensor containing index/value tuples
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorIndexTupleOp<const Derived>
+ index_tuples() const {
+ return TensorIndexTupleOp<const Derived>(derived());
+ }
+
+ // Support for custom unary and binary operations
+ template <typename CustomUnaryFunc>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorCustomUnaryOp<const CustomUnaryFunc, const Derived> customOp(const CustomUnaryFunc& op) const {
+ return TensorCustomUnaryOp<const CustomUnaryFunc, const Derived>(derived(), op);
+ }
+ template <typename OtherDerived, typename CustomBinaryFunc>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorCustomBinaryOp<const CustomBinaryFunc, const Derived, const OtherDerived> customOp(const OtherDerived& other, const CustomBinaryFunc& op) const {
+ return TensorCustomBinaryOp<const CustomBinaryFunc, const Derived, const OtherDerived>(derived(), other, op);
+ }
+
+ // Force the evaluation of the expression.
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorForcedEvalOp<const Derived> eval() const {
+ return TensorForcedEvalOp<const Derived>(derived());
+ }
+
+ protected:
+ template <typename Scalar, int NumIndices, int Options, typename IndexType> friend class Tensor;
+ template <typename Scalar, typename Dimensions, int Option, typename IndexTypes> friend class TensorFixedSize;
+ template <typename OtherDerived, int AccessLevel> friend class TensorBase;
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Derived& derived() const { return *static_cast<const Derived*>(this); }
+};
+
+template<typename Derived, int AccessLevel = internal::accessors_level<Derived>::value>
+class TensorBase : public TensorBase<Derived, ReadOnlyAccessors> {
+ public:
+ typedef internal::traits<Derived> DerivedTraits;
+ typedef typename DerivedTraits::Scalar Scalar;
+ typedef typename DerivedTraits::Index Index;
+ typedef Scalar CoeffReturnType;
+ static const int NumDimensions = DerivedTraits::NumDimensions;
+
+ template <typename Scalar, int NumIndices, int Options, typename IndexType> friend class Tensor;
+ template <typename Scalar, typename Dimensions, int Option, typename IndexTypes> friend class TensorFixedSize;
+ template <typename OtherDerived, int OtherAccessLevel> friend class TensorBase;
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Derived& setZero() {
+ return setConstant(Scalar(0));
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Derived& setConstant(const Scalar& val) {
+ return derived() = this->constant(val);
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Derived& setRandom() {
+ return derived() = this->random();
+ }
+ template <typename RandomGenerator> EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Derived& setRandom() {
+ return derived() = this->template random<RandomGenerator>();
+ }
+
+#if EIGEN_HAS_VARIADIC_TEMPLATES
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Derived& setValues(
+ const typename internal::Initializer<Derived, NumDimensions>::InitList& vals) {
+ TensorEvaluator<Derived, DefaultDevice> eval(derived(), DefaultDevice());
+ internal::initialize_tensor<Derived, NumDimensions>(eval, vals);
+ return derived();
+ }
+#endif // EIGEN_HAS_VARIADIC_TEMPLATES
+
+ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Derived& operator+=(const OtherDerived& other) {
+ return derived() = derived() + other.derived();
+ }
+ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Derived& operator-=(const OtherDerived& other) {
+ return derived() = derived() - other.derived();
+ }
+ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Derived& operator*=(const OtherDerived& other) {
+ return derived() = derived() * other.derived();
+ }
+ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Derived& operator/=(const OtherDerived& other) {
+ return derived() = derived() / other.derived();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorLayoutSwapOp<const Derived>
+ swap_layout() const {
+ return TensorLayoutSwapOp<const Derived>(derived());
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ TensorLayoutSwapOp<Derived>
+ swap_layout() {
+ return TensorLayoutSwapOp<Derived>(derived());
+ }
+
+ template <typename Axis, typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorConcatenationOp<const Axis, const Derived, const OtherDerived>
+ concatenate(const OtherDerived& other, const Axis& axis) const {
+ return TensorConcatenationOp<const Axis, const Derived, const OtherDerived>(derived(), other, axis);
+ }
+ template <typename Axis, typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ TensorConcatenationOp<const Axis, Derived, OtherDerived>
+ concatenate(const OtherDerived& other, const Axis& axis) {
+ return TensorConcatenationOp<const Axis, Derived, OtherDerived>(derived(), other, axis);
+ }
+
+ template <typename NewDimensions> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorReshapingOp<const NewDimensions, const Derived>
+ reshape(const NewDimensions& newDimensions) const {
+ return TensorReshapingOp<const NewDimensions, const Derived>(derived(), newDimensions);
+ }
+ template <typename NewDimensions> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ TensorReshapingOp<const NewDimensions, Derived>
+ reshape(const NewDimensions& newDimensions) {
+ return TensorReshapingOp<const NewDimensions, Derived>(derived(), newDimensions);
+ }
+
+ template <typename StartIndices, typename Sizes> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorSlicingOp<const StartIndices, const Sizes, const Derived>
+ slice(const StartIndices& startIndices, const Sizes& sizes) const {
+ return TensorSlicingOp<const StartIndices, const Sizes, const Derived>(derived(), startIndices, sizes);
+ }
+ template <typename StartIndices, typename Sizes> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ TensorSlicingOp<const StartIndices, const Sizes, Derived>
+ slice(const StartIndices& startIndices, const Sizes& sizes) {
+ return TensorSlicingOp<const StartIndices, const Sizes, Derived>(derived(), startIndices, sizes);
+ }
+
+ template <typename StartIndices, typename StopIndices, typename Strides> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorStridingSlicingOp<const StartIndices, const StopIndices, const Strides, const Derived>
+ stridedSlice(const StartIndices& startIndices, const StopIndices& stopIndices, const Strides& strides) const {
+ return TensorStridingSlicingOp<const StartIndices, const StopIndices, const Strides,
+ const Derived>(derived(), startIndices, stopIndices, strides);
+ }
+ template <typename StartIndices, typename StopIndices, typename Strides> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ TensorStridingSlicingOp<const StartIndices, const StopIndices, const Strides, Derived>
+ stridedSlice(const StartIndices& startIndices, const StopIndices& stopIndices, const Strides& strides) {
+ return TensorStridingSlicingOp<const StartIndices, const StopIndices, const Strides,
+ Derived>(derived(), startIndices, stopIndices, strides);
+ }
+
+ template <DenseIndex DimId> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorChippingOp<DimId, const Derived>
+ chip(const Index offset) const {
+ return TensorChippingOp<DimId, const Derived>(derived(), offset, DimId);
+ }
+ template <Index DimId> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ TensorChippingOp<DimId, Derived>
+ chip(const Index offset) {
+ return TensorChippingOp<DimId, Derived>(derived(), offset, DimId);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorChippingOp<Dynamic, const Derived>
+ chip(const Index offset, const Index dim) const {
+ return TensorChippingOp<Dynamic, const Derived>(derived(), offset, dim);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ TensorChippingOp<Dynamic, Derived>
+ chip(const Index offset, const Index dim) {
+ return TensorChippingOp<Dynamic, Derived>(derived(), offset, dim);
+ }
+
+ template <typename ReverseDimensions> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorReverseOp<const ReverseDimensions, const Derived>
+ reverse(const ReverseDimensions& rev) const {
+ return TensorReverseOp<const ReverseDimensions, const Derived>(derived(), rev);
+ }
+ template <typename ReverseDimensions> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ TensorReverseOp<const ReverseDimensions, Derived>
+ reverse(const ReverseDimensions& rev) {
+ return TensorReverseOp<const ReverseDimensions, Derived>(derived(), rev);
+ }
+
+ template <typename Shuffle> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorShufflingOp<const Shuffle, const Derived>
+ shuffle(const Shuffle& shuffle) const {
+ return TensorShufflingOp<const Shuffle, const Derived>(derived(), shuffle);
+ }
+ template <typename Shuffle> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ TensorShufflingOp<const Shuffle, Derived>
+ shuffle(const Shuffle& shuffle) {
+ return TensorShufflingOp<const Shuffle, Derived>(derived(), shuffle);
+ }
+
+ template <typename Strides> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorStridingOp<const Strides, const Derived>
+ stride(const Strides& strides) const {
+ return TensorStridingOp<const Strides, const Derived>(derived(), strides);
+ }
+ template <typename Strides> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ TensorStridingOp<const Strides, Derived>
+ stride(const Strides& strides) {
+ return TensorStridingOp<const Strides, Derived>(derived(), strides);
+ }
+
+ // Select the device on which to evaluate the expression.
+ template <typename DeviceType>
+ TensorDevice<Derived, DeviceType> device(const DeviceType& device) {
+ return TensorDevice<Derived, DeviceType>(device, derived());
+ }
+
+ protected:
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Derived& derived() { return *static_cast<Derived*>(this); }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Derived& derived() const { return *static_cast<const Derived*>(this); }
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_BASE_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h b/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h
new file mode 100644
index 000000000..4cfe300eb
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h
@@ -0,0 +1,392 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_BROADCASTING_H
+#define EIGEN_CXX11_TENSOR_TENSOR_BROADCASTING_H
+
+namespace Eigen {
+
+/** \class TensorBroadcasting
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Tensor broadcasting class.
+ *
+ *
+ */
+namespace internal {
+template<typename Broadcast, typename XprType>
+struct traits<TensorBroadcastingOp<Broadcast, XprType> > : public traits<XprType>
+{
+ typedef typename XprType::Scalar Scalar;
+ typedef traits<XprType> XprTraits;
+ typedef typename XprTraits::StorageKind StorageKind;
+ typedef typename XprTraits::Index Index;
+ typedef typename XprType::Nested Nested;
+ typedef typename remove_reference<Nested>::type _Nested;
+ static const int NumDimensions = XprTraits::NumDimensions;
+ static const int Layout = XprTraits::Layout;
+};
+
+template<typename Broadcast, typename XprType>
+struct eval<TensorBroadcastingOp<Broadcast, XprType>, Eigen::Dense>
+{
+ typedef const TensorBroadcastingOp<Broadcast, XprType>& type;
+};
+
+template<typename Broadcast, typename XprType>
+struct nested<TensorBroadcastingOp<Broadcast, XprType>, 1, typename eval<TensorBroadcastingOp<Broadcast, XprType> >::type>
+{
+ typedef TensorBroadcastingOp<Broadcast, XprType> type;
+};
+
+template <typename Dims>
+struct is_input_scalar {
+ static const bool value = false;
+};
+template <>
+struct is_input_scalar<Sizes<> > {
+ static const bool value = true;
+};
+#ifndef EIGEN_EMULATE_CXX11_META_H
+template <typename std::size_t... Indices>
+struct is_input_scalar<Sizes<Indices...> > {
+ static const bool value = (Sizes<Indices...>::total_size == 1);
+};
+#endif
+
+} // end namespace internal
+
+
+
+template<typename Broadcast, typename XprType>
+class TensorBroadcastingOp : public TensorBase<TensorBroadcastingOp<Broadcast, XprType>, ReadOnlyAccessors>
+{
+ public:
+ typedef typename Eigen::internal::traits<TensorBroadcastingOp>::Scalar Scalar;
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename Eigen::internal::nested<TensorBroadcastingOp>::type Nested;
+ typedef typename Eigen::internal::traits<TensorBroadcastingOp>::StorageKind StorageKind;
+ typedef typename Eigen::internal::traits<TensorBroadcastingOp>::Index Index;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBroadcastingOp(const XprType& expr, const Broadcast& broadcast)
+ : m_xpr(expr), m_broadcast(broadcast) {}
+
+ EIGEN_DEVICE_FUNC
+ const Broadcast& broadcast() const { return m_broadcast; }
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename XprType::Nested>::type&
+ expression() const { return m_xpr; }
+
+ protected:
+ typename XprType::Nested m_xpr;
+ const Broadcast m_broadcast;
+};
+
+
+// Eval as rvalue
+template<typename Broadcast, typename ArgType, typename Device>
+struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
+{
+ typedef TensorBroadcastingOp<Broadcast, ArgType> XprType;
+ typedef typename XprType::Index Index;
+ static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
+ typedef DSizes<Index, NumDims> Dimensions;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename TensorEvaluator<ArgType, Device>::Dimensions InputDimensions;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+
+ enum {
+ IsAligned = true,
+ PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
+ : m_broadcast(op.broadcast()),m_impl(op.expression(), device)
+ {
+ // The broadcasting op doesn't change the rank of the tensor. One can't broadcast a scalar
+ // and store the result in a scalar. Instead one should reshape the scalar into a a N-D
+ // tensor with N >= 1 of 1 element first and then broadcast.
+ EIGEN_STATIC_ASSERT((NumDims > 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ const InputDimensions& input_dims = m_impl.dimensions();
+ const Broadcast& broadcast = op.broadcast();
+ for (int i = 0; i < NumDims; ++i) {
+ eigen_assert(input_dims[i] > 0);
+ m_dimensions[i] = input_dims[i] * broadcast[i];
+ }
+
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ m_inputStrides[0] = 1;
+ m_outputStrides[0] = 1;
+ for (int i = 1; i < NumDims; ++i) {
+ m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1];
+ m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1];
+ }
+ } else {
+ m_inputStrides[NumDims-1] = 1;
+ m_outputStrides[NumDims-1] = 1;
+ for (int i = NumDims-2; i >= 0; --i) {
+ m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1];
+ m_outputStrides[i] = m_outputStrides[i+1] * m_dimensions[i+1];
+ }
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) {
+ m_impl.evalSubExprsIfNeeded(NULL);
+ return true;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
+ m_impl.cleanup();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE CoeffReturnType coeff(Index index) const
+ {
+ if (internal::is_input_scalar<typename internal::remove_all<InputDimensions>::type>::value) {
+ return m_impl.coeff(0);
+ }
+
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ return coeffColMajor(index);
+ } else {
+ return coeffRowMajor(index);
+ }
+ }
+
+ // TODO: attempt to speed this up. The integer divisions and modulo are slow
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeffColMajor(Index index) const
+ {
+ Index inputIndex = 0;
+ for (int i = NumDims - 1; i > 0; --i) {
+ const Index idx = index / m_outputStrides[i];
+ if (internal::index_statically_eq<Broadcast>(i, 1)) {
+ eigen_assert(idx < m_impl.dimensions()[i]);
+ inputIndex += idx * m_inputStrides[i];
+ } else {
+ if (internal::index_statically_eq<InputDimensions>(i, 1)) {
+ eigen_assert(idx % m_impl.dimensions()[i] == 0);
+ } else {
+ inputIndex += (idx % m_impl.dimensions()[i]) * m_inputStrides[i];
+ }
+ }
+ index -= idx * m_outputStrides[i];
+ }
+ if (internal::index_statically_eq<Broadcast>(0, 1)) {
+ eigen_assert(index < m_impl.dimensions()[0]);
+ inputIndex += index;
+ } else {
+ if (internal::index_statically_eq<InputDimensions>(0, 1)) {
+ eigen_assert(index % m_impl.dimensions()[0] == 0);
+ } else {
+ inputIndex += (index % m_impl.dimensions()[0]);
+ }
+ }
+ return m_impl.coeff(inputIndex);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeffRowMajor(Index index) const
+ {
+ Index inputIndex = 0;
+ for (int i = 0; i < NumDims - 1; ++i) {
+ const Index idx = index / m_outputStrides[i];
+ if (internal::index_statically_eq<Broadcast>(i, 1)) {
+ eigen_assert(idx < m_impl.dimensions()[i]);
+ inputIndex += idx * m_inputStrides[i];
+ } else {
+ if (internal::index_statically_eq<InputDimensions>(i, 1)) {
+ eigen_assert(idx % m_impl.dimensions()[i] == 0);
+ } else {
+ inputIndex += (idx % m_impl.dimensions()[i]) * m_inputStrides[i];
+ }
+ }
+ index -= idx * m_outputStrides[i];
+ }
+ if (internal::index_statically_eq<Broadcast>(NumDims-1, 1)) {
+ eigen_assert(index < m_impl.dimensions()[NumDims-1]);
+ inputIndex += index;
+ } else {
+ if (internal::index_statically_eq<InputDimensions>(NumDims-1, 1)) {
+ eigen_assert(index % m_impl.dimensions()[NumDims-1] == 0);
+ } else {
+ inputIndex += (index % m_impl.dimensions()[NumDims-1]);
+ }
+ }
+ return m_impl.coeff(inputIndex);
+ }
+
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketReturnType packet(Index index) const
+ {
+ if (internal::is_input_scalar<typename internal::remove_all<InputDimensions>::type>::value) {
+ return internal::pset1<PacketReturnType>(m_impl.coeff(0));
+ }
+
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ return packetColMajor<LoadMode>(index);
+ } else {
+ return packetRowMajor<LoadMode>(index);
+ }
+ }
+
+ // Ignore the LoadMode and always use unaligned loads since we can't guarantee
+ // the alignment at compile time.
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetColMajor(Index index) const
+ {
+ EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
+ eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
+
+ const Index originalIndex = index;
+
+ Index inputIndex = 0;
+ for (int i = NumDims - 1; i > 0; --i) {
+ const Index idx = index / m_outputStrides[i];
+ if (internal::index_statically_eq<Broadcast>(i, 1)) {
+ eigen_assert(idx < m_impl.dimensions()[i]);
+ inputIndex += idx * m_inputStrides[i];
+ } else {
+ if (internal::index_statically_eq<InputDimensions>(i, 1)) {
+ eigen_assert(idx % m_impl.dimensions()[i] == 0);
+ } else {
+ inputIndex += (idx % m_impl.dimensions()[i]) * m_inputStrides[i];
+ }
+ }
+ index -= idx * m_outputStrides[i];
+ }
+ Index innermostLoc;
+ if (internal::index_statically_eq<Broadcast>(0, 1)) {
+ eigen_assert(index < m_impl.dimensions()[0]);
+ innermostLoc = index;
+ } else {
+ if (internal::index_statically_eq<InputDimensions>(0, 1)) {
+ eigen_assert(index % m_impl.dimensions()[0] == 0);
+ innermostLoc = 0;
+ } else {
+ innermostLoc = index % m_impl.dimensions()[0];
+ }
+ }
+ inputIndex += innermostLoc;
+
+ // Todo: this could be extended to the second dimension if we're not
+ // broadcasting alongside the first dimension, and so on.
+ if (innermostLoc + PacketSize <= m_impl.dimensions()[0]) {
+ return m_impl.template packet<Unaligned>(inputIndex);
+ } else {
+ EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
+ values[0] = m_impl.coeff(inputIndex);
+ for (int i = 1; i < PacketSize; ++i) {
+ values[i] = coeffColMajor(originalIndex+i);
+ }
+ PacketReturnType rslt = internal::pload<PacketReturnType>(values);
+ return rslt;
+ }
+ }
+
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetRowMajor(Index index) const
+ {
+ EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
+ eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
+
+ const Index originalIndex = index;
+
+ Index inputIndex = 0;
+ for (int i = 0; i < NumDims - 1; ++i) {
+ const Index idx = index / m_outputStrides[i];
+ if (internal::index_statically_eq<Broadcast>(i, 1)) {
+ eigen_assert(idx < m_impl.dimensions()[i]);
+ inputIndex += idx * m_inputStrides[i];
+ } else {
+ if (internal::index_statically_eq<InputDimensions>(i, 1)) {
+ eigen_assert(idx % m_impl.dimensions()[i] == 0);
+ } else {
+ inputIndex += (idx % m_impl.dimensions()[i]) * m_inputStrides[i];
+ }
+ }
+ index -= idx * m_outputStrides[i];
+ }
+ Index innermostLoc;
+ if (internal::index_statically_eq<Broadcast>(NumDims-1, 1)) {
+ eigen_assert(index < m_impl.dimensions()[NumDims-1]);
+ innermostLoc = index;
+ } else {
+ if (internal::index_statically_eq<InputDimensions>(NumDims-1, 1)) {
+ eigen_assert(index % m_impl.dimensions()[NumDims-1] == 0);
+ innermostLoc = 0;
+ } else {
+ innermostLoc = index % m_impl.dimensions()[NumDims-1];
+ }
+ }
+ inputIndex += innermostLoc;
+
+ // Todo: this could be extended to the second dimension if we're not
+ // broadcasting alongside the first dimension, and so on.
+ if (innermostLoc + PacketSize <= m_impl.dimensions()[NumDims-1]) {
+ return m_impl.template packet<Unaligned>(inputIndex);
+ } else {
+ EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
+ values[0] = m_impl.coeff(inputIndex);
+ for (int i = 1; i < PacketSize; ++i) {
+ values[i] = coeffRowMajor(originalIndex+i);
+ }
+ PacketReturnType rslt = internal::pload<PacketReturnType>(values);
+ return rslt;
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
+ costPerCoeff(bool vectorized) const {
+ double compute_cost = TensorOpCost::AddCost<Index>();
+ if (NumDims > 0) {
+ for (int i = NumDims - 1; i > 0; --i) {
+ compute_cost += TensorOpCost::DivCost<Index>();
+ if (internal::index_statically_eq<Broadcast>(i, 1)) {
+ compute_cost +=
+ TensorOpCost::MulCost<Index>() + TensorOpCost::AddCost<Index>();
+ } else {
+ if (!internal::index_statically_eq<InputDimensions>(i, 1)) {
+ compute_cost += TensorOpCost::MulCost<Index>() +
+ TensorOpCost::ModCost<Index>() +
+ TensorOpCost::AddCost<Index>();
+ }
+ }
+ compute_cost +=
+ TensorOpCost::MulCost<Index>() + TensorOpCost::AddCost<Index>();
+ }
+ }
+ return m_impl.costPerCoeff(vectorized) +
+ TensorOpCost(0, 0, compute_cost, vectorized, PacketSize);
+ }
+
+ EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
+
+ const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
+
+ Broadcast functor() const { return m_broadcast; }
+
+ protected:
+ const Broadcast m_broadcast;
+ Dimensions m_dimensions;
+ array<Index, NumDims> m_outputStrides;
+ array<Index, NumDims> m_inputStrides;
+ TensorEvaluator<ArgType, Device> m_impl;
+};
+
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_BROADCASTING_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h b/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h
new file mode 100644
index 000000000..1ba7ef170
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h
@@ -0,0 +1,384 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_CHIPPING_H
+#define EIGEN_CXX11_TENSOR_TENSOR_CHIPPING_H
+
+namespace Eigen {
+
+/** \class TensorKChippingReshaping
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief A chip is a thin slice, corresponding to a column or a row in a 2-d tensor.
+ *
+ *
+ */
+
+namespace internal {
+template<DenseIndex DimId, typename XprType>
+struct traits<TensorChippingOp<DimId, XprType> > : public traits<XprType>
+{
+ typedef typename XprType::Scalar Scalar;
+ typedef traits<XprType> XprTraits;
+ typedef typename XprTraits::StorageKind StorageKind;
+ typedef typename XprTraits::Index Index;
+ typedef typename XprType::Nested Nested;
+ typedef typename remove_reference<Nested>::type _Nested;
+ static const int NumDimensions = XprTraits::NumDimensions - 1;
+ static const int Layout = XprTraits::Layout;
+};
+
+template<DenseIndex DimId, typename XprType>
+struct eval<TensorChippingOp<DimId, XprType>, Eigen::Dense>
+{
+ typedef const TensorChippingOp<DimId, XprType>& type;
+};
+
+template<DenseIndex DimId, typename XprType>
+struct nested<TensorChippingOp<DimId, XprType>, 1, typename eval<TensorChippingOp<DimId, XprType> >::type>
+{
+ typedef TensorChippingOp<DimId, XprType> type;
+};
+
+template <DenseIndex DimId>
+struct DimensionId
+{
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DimensionId(DenseIndex dim) {
+ eigen_assert(dim == DimId);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex actualDim() const {
+ return DimId;
+ }
+};
+template <>
+struct DimensionId<Dynamic>
+{
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DimensionId(DenseIndex dim) : actual_dim(dim) {
+ eigen_assert(dim >= 0);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex actualDim() const {
+ return actual_dim;
+ }
+ private:
+ const DenseIndex actual_dim;
+};
+
+
+} // end namespace internal
+
+
+
+template<DenseIndex DimId, typename XprType>
+class TensorChippingOp : public TensorBase<TensorChippingOp<DimId, XprType> >
+{
+ public:
+ typedef typename Eigen::internal::traits<TensorChippingOp>::Scalar Scalar;
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename Eigen::internal::nested<TensorChippingOp>::type Nested;
+ typedef typename Eigen::internal::traits<TensorChippingOp>::StorageKind StorageKind;
+ typedef typename Eigen::internal::traits<TensorChippingOp>::Index Index;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorChippingOp(const XprType& expr, const Index offset, const Index dim)
+ : m_xpr(expr), m_offset(offset), m_dim(dim) {
+ }
+
+ EIGEN_DEVICE_FUNC
+ const Index offset() const { return m_offset; }
+ EIGEN_DEVICE_FUNC
+ const Index dim() const { return m_dim.actualDim(); }
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename XprType::Nested>::type&
+ expression() const { return m_xpr; }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE TensorChippingOp& operator = (const TensorChippingOp& other)
+ {
+ typedef TensorAssignOp<TensorChippingOp, const TensorChippingOp> Assign;
+ Assign assign(*this, other);
+ internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
+ return *this;
+ }
+
+ template<typename OtherDerived>
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE TensorChippingOp& operator = (const OtherDerived& other)
+ {
+ typedef TensorAssignOp<TensorChippingOp, const OtherDerived> Assign;
+ Assign assign(*this, other);
+ internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
+ return *this;
+ }
+
+ protected:
+ typename XprType::Nested m_xpr;
+ const Index m_offset;
+ const internal::DimensionId<DimId> m_dim;
+};
+
+
+// Eval as rvalue
+template<DenseIndex DimId, typename ArgType, typename Device>
+struct TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device>
+{
+ typedef TensorChippingOp<DimId, ArgType> XprType;
+ static const int NumInputDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
+ static const int NumDims = NumInputDims-1;
+ typedef typename XprType::Index Index;
+ typedef DSizes<Index, NumDims> Dimensions;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+
+
+ enum {
+ // Alignment can't be guaranteed at compile time since it depends on the
+ // slice offsets.
+ IsAligned = false,
+ PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ CoordAccess = false, // to be implemented
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
+ : m_impl(op.expression(), device), m_dim(op.dim()), m_device(device)
+ {
+ EIGEN_STATIC_ASSERT((NumInputDims >= 1), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ eigen_assert(NumInputDims > m_dim.actualDim());
+
+ const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
+ eigen_assert(op.offset() < input_dims[m_dim.actualDim()]);
+
+ int j = 0;
+ for (int i = 0; i < NumInputDims; ++i) {
+ if (i != m_dim.actualDim()) {
+ m_dimensions[j] = input_dims[i];
+ ++j;
+ }
+ }
+
+ m_stride = 1;
+ m_inputStride = 1;
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int i = 0; i < m_dim.actualDim(); ++i) {
+ m_stride *= input_dims[i];
+ m_inputStride *= input_dims[i];
+ }
+ } else {
+ for (int i = NumInputDims-1; i > m_dim.actualDim(); --i) {
+ m_stride *= input_dims[i];
+ m_inputStride *= input_dims[i];
+ }
+ }
+ m_inputStride *= input_dims[m_dim.actualDim()];
+ m_inputOffset = m_stride * op.offset();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) {
+ m_impl.evalSubExprsIfNeeded(NULL);
+ return true;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
+ m_impl.cleanup();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
+ {
+ return m_impl.coeff(srcCoeff(index));
+ }
+
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
+ {
+ EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
+ eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
+
+ if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == 0) ||
+ (static_cast<int>(Layout) == static_cast<int>(RowMajor) && m_dim.actualDim() == NumInputDims-1)) {
+ // m_stride is equal to 1, so let's avoid the integer division.
+ eigen_assert(m_stride == 1);
+ Index inputIndex = index * m_inputStride + m_inputOffset;
+ EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
+ for (int i = 0; i < PacketSize; ++i) {
+ values[i] = m_impl.coeff(inputIndex);
+ inputIndex += m_inputStride;
+ }
+ PacketReturnType rslt = internal::pload<PacketReturnType>(values);
+ return rslt;
+ } else if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == NumInputDims - 1) ||
+ (static_cast<int>(Layout) == static_cast<int>(RowMajor) && m_dim.actualDim() == 0)) {
+ // m_stride is aways greater than index, so let's avoid the integer division.
+ eigen_assert(m_stride > index);
+ return m_impl.template packet<LoadMode>(index + m_inputOffset);
+ } else {
+ const Index idx = index / m_stride;
+ const Index rem = index - idx * m_stride;
+ if (rem + PacketSize <= m_stride) {
+ Index inputIndex = idx * m_inputStride + m_inputOffset + rem;
+ return m_impl.template packet<LoadMode>(inputIndex);
+ } else {
+ // Cross the stride boundary. Fallback to slow path.
+ EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
+ for (int i = 0; i < PacketSize; ++i) {
+ values[i] = coeff(index);
+ ++index;
+ }
+ PacketReturnType rslt = internal::pload<PacketReturnType>(values);
+ return rslt;
+ }
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
+ costPerCoeff(bool vectorized) const {
+ double cost = 0;
+ if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) &&
+ m_dim.actualDim() == 0) ||
+ (static_cast<int>(Layout) == static_cast<int>(RowMajor) &&
+ m_dim.actualDim() == NumInputDims - 1)) {
+ cost += TensorOpCost::MulCost<Index>() + TensorOpCost::AddCost<Index>();
+ } else if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) &&
+ m_dim.actualDim() == NumInputDims - 1) ||
+ (static_cast<int>(Layout) == static_cast<int>(RowMajor) &&
+ m_dim.actualDim() == 0)) {
+ cost += TensorOpCost::AddCost<Index>();
+ } else {
+ cost += 3 * TensorOpCost::MulCost<Index>() + TensorOpCost::DivCost<Index>() +
+ 3 * TensorOpCost::AddCost<Index>();
+ }
+
+ return m_impl.costPerCoeff(vectorized) +
+ TensorOpCost(0, 0, cost, vectorized, PacketSize);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType* data() const {
+ CoeffReturnType* result = const_cast<CoeffReturnType*>(m_impl.data());
+ if (((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == NumDims) ||
+ (static_cast<int>(Layout) == static_cast<int>(RowMajor) && m_dim.actualDim() == 0)) &&
+ result) {
+ return result + m_inputOffset;
+ } else {
+ return NULL;
+ }
+ }
+
+ protected:
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const
+ {
+ Index inputIndex;
+ if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == 0) ||
+ (static_cast<int>(Layout) == static_cast<int>(RowMajor) && m_dim.actualDim() == NumInputDims-1)) {
+ // m_stride is equal to 1, so let's avoid the integer division.
+ eigen_assert(m_stride == 1);
+ inputIndex = index * m_inputStride + m_inputOffset;
+ } else if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == NumInputDims-1) ||
+ (static_cast<int>(Layout) == static_cast<int>(RowMajor) && m_dim.actualDim() == 0)) {
+ // m_stride is aways greater than index, so let's avoid the integer division.
+ eigen_assert(m_stride > index);
+ inputIndex = index + m_inputOffset;
+ } else {
+ const Index idx = index / m_stride;
+ inputIndex = idx * m_inputStride + m_inputOffset;
+ index -= idx * m_stride;
+ inputIndex += index;
+ }
+ return inputIndex;
+ }
+
+ Dimensions m_dimensions;
+ Index m_stride;
+ Index m_inputOffset;
+ Index m_inputStride;
+ TensorEvaluator<ArgType, Device> m_impl;
+ const internal::DimensionId<DimId> m_dim;
+ const Device& m_device;
+};
+
+
+// Eval as lvalue
+template<DenseIndex DimId, typename ArgType, typename Device>
+struct TensorEvaluator<TensorChippingOp<DimId, ArgType>, Device>
+ : public TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device>
+{
+ typedef TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device> Base;
+ typedef TensorChippingOp<DimId, ArgType> XprType;
+ static const int NumInputDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
+ static const int NumDims = NumInputDims-1;
+ typedef typename XprType::Index Index;
+ typedef DSizes<Index, NumDims> Dimensions;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+
+ enum {
+ IsAligned = false,
+ PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
+ : Base(op, device)
+ { }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index)
+ {
+ return this->m_impl.coeffRef(this->srcCoeff(index));
+ }
+
+ template <int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ void writePacket(Index index, const PacketReturnType& x)
+ {
+ EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
+
+ if ((static_cast<int>(this->Layout) == static_cast<int>(ColMajor) && this->m_dim.actualDim() == 0) ||
+ (static_cast<int>(this->Layout) == static_cast<int>(RowMajor) && this->m_dim.actualDim() == NumInputDims-1)) {
+ // m_stride is equal to 1, so let's avoid the integer division.
+ eigen_assert(this->m_stride == 1);
+ EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
+ internal::pstore<CoeffReturnType, PacketReturnType>(values, x);
+ Index inputIndex = index * this->m_inputStride + this->m_inputOffset;
+ for (int i = 0; i < PacketSize; ++i) {
+ this->m_impl.coeffRef(inputIndex) = values[i];
+ inputIndex += this->m_inputStride;
+ }
+ } else if ((static_cast<int>(this->Layout) == static_cast<int>(ColMajor) && this->m_dim.actualDim() == NumInputDims-1) ||
+ (static_cast<int>(this->Layout) == static_cast<int>(RowMajor) && this->m_dim.actualDim() == 0)) {
+ // m_stride is aways greater than index, so let's avoid the integer division.
+ eigen_assert(this->m_stride > index);
+ this->m_impl.template writePacket<StoreMode>(index + this->m_inputOffset, x);
+ } else {
+ const Index idx = index / this->m_stride;
+ const Index rem = index - idx * this->m_stride;
+ if (rem + PacketSize <= this->m_stride) {
+ const Index inputIndex = idx * this->m_inputStride + this->m_inputOffset + rem;
+ this->m_impl.template writePacket<StoreMode>(inputIndex, x);
+ } else {
+ // Cross stride boundary. Fallback to slow path.
+ EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
+ internal::pstore<CoeffReturnType, PacketReturnType>(values, x);
+ for (int i = 0; i < PacketSize; ++i) {
+ this->coeffRef(index) = values[i];
+ ++index;
+ }
+ }
+ }
+ }
+};
+
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_CHIPPING_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorConcatenation.h b/unsupported/Eigen/CXX11/src/Tensor/TensorConcatenation.h
new file mode 100644
index 000000000..59bf90d93
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorConcatenation.h
@@ -0,0 +1,361 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONCATENATION_H
+#define EIGEN_CXX11_TENSOR_TENSOR_CONCATENATION_H
+
+namespace Eigen {
+
+/** \class TensorConcatenationOp
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Tensor concatenation class.
+ *
+ *
+ */
+namespace internal {
+template<typename Axis, typename LhsXprType, typename RhsXprType>
+struct traits<TensorConcatenationOp<Axis, LhsXprType, RhsXprType> >
+{
+ // Type promotion to handle the case where the types of the lhs and the rhs are different.
+ typedef typename promote_storage_type<typename LhsXprType::Scalar,
+ typename RhsXprType::Scalar>::ret Scalar;
+ typedef typename promote_storage_type<typename traits<LhsXprType>::StorageKind,
+ typename traits<RhsXprType>::StorageKind>::ret StorageKind;
+ typedef typename promote_index_type<typename traits<LhsXprType>::Index,
+ typename traits<RhsXprType>::Index>::type Index;
+ typedef typename LhsXprType::Nested LhsNested;
+ typedef typename RhsXprType::Nested RhsNested;
+ typedef typename remove_reference<LhsNested>::type _LhsNested;
+ typedef typename remove_reference<RhsNested>::type _RhsNested;
+ static const int NumDimensions = traits<LhsXprType>::NumDimensions;
+ static const int Layout = traits<LhsXprType>::Layout;
+ enum { Flags = 0 };
+};
+
+template<typename Axis, typename LhsXprType, typename RhsXprType>
+struct eval<TensorConcatenationOp<Axis, LhsXprType, RhsXprType>, Eigen::Dense>
+{
+ typedef const TensorConcatenationOp<Axis, LhsXprType, RhsXprType>& type;
+};
+
+template<typename Axis, typename LhsXprType, typename RhsXprType>
+struct nested<TensorConcatenationOp<Axis, LhsXprType, RhsXprType>, 1, typename eval<TensorConcatenationOp<Axis, LhsXprType, RhsXprType> >::type>
+{
+ typedef TensorConcatenationOp<Axis, LhsXprType, RhsXprType> type;
+};
+
+} // end namespace internal
+
+
+template<typename Axis, typename LhsXprType, typename RhsXprType>
+class TensorConcatenationOp : public TensorBase<TensorConcatenationOp<Axis, LhsXprType, RhsXprType>, WriteAccessors>
+{
+ public:
+ typedef typename internal::traits<TensorConcatenationOp>::Scalar Scalar;
+ typedef typename internal::traits<TensorConcatenationOp>::StorageKind StorageKind;
+ typedef typename internal::traits<TensorConcatenationOp>::Index Index;
+ typedef typename internal::nested<TensorConcatenationOp>::type Nested;
+ typedef typename internal::promote_storage_type<typename LhsXprType::CoeffReturnType,
+ typename RhsXprType::CoeffReturnType>::ret CoeffReturnType;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorConcatenationOp(const LhsXprType& lhs, const RhsXprType& rhs, Axis axis)
+ : m_lhs_xpr(lhs), m_rhs_xpr(rhs), m_axis(axis) {}
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename LhsXprType::Nested>::type&
+ lhsExpression() const { return m_lhs_xpr; }
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename RhsXprType::Nested>::type&
+ rhsExpression() const { return m_rhs_xpr; }
+
+ EIGEN_DEVICE_FUNC const Axis& axis() const { return m_axis; }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE TensorConcatenationOp& operator = (const TensorConcatenationOp& other)
+ {
+ typedef TensorAssignOp<TensorConcatenationOp, const TensorConcatenationOp> Assign;
+ Assign assign(*this, other);
+ internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
+ return *this;
+ }
+
+ template<typename OtherDerived>
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE TensorConcatenationOp& operator = (const OtherDerived& other)
+ {
+ typedef TensorAssignOp<TensorConcatenationOp, const OtherDerived> Assign;
+ Assign assign(*this, other);
+ internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
+ return *this;
+ }
+
+ protected:
+ typename LhsXprType::Nested m_lhs_xpr;
+ typename RhsXprType::Nested m_rhs_xpr;
+ const Axis m_axis;
+};
+
+
+// Eval as rvalue
+template<typename Axis, typename LeftArgType, typename RightArgType, typename Device>
+struct TensorEvaluator<const TensorConcatenationOp<Axis, LeftArgType, RightArgType>, Device>
+{
+ typedef TensorConcatenationOp<Axis, LeftArgType, RightArgType> XprType;
+ typedef typename XprType::Index Index;
+ static const int NumDims = internal::array_size<typename TensorEvaluator<LeftArgType, Device>::Dimensions>::value;
+ static const int RightNumDims = internal::array_size<typename TensorEvaluator<RightArgType, Device>::Dimensions>::value;
+ typedef DSizes<Index, NumDims> Dimensions;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ enum {
+ IsAligned = false,
+ PacketAccess = TensorEvaluator<LeftArgType, Device>::PacketAccess & TensorEvaluator<RightArgType, Device>::PacketAccess,
+ Layout = TensorEvaluator<LeftArgType, Device>::Layout,
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
+ : m_leftImpl(op.lhsExpression(), device), m_rightImpl(op.rhsExpression(), device), m_axis(op.axis())
+ {
+ EIGEN_STATIC_ASSERT((static_cast<int>(TensorEvaluator<LeftArgType, Device>::Layout) == static_cast<int>(TensorEvaluator<RightArgType, Device>::Layout) || NumDims == 1), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((NumDims == RightNumDims), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((NumDims > 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
+
+ eigen_assert(0 <= m_axis && m_axis < NumDims);
+ const Dimensions& lhs_dims = m_leftImpl.dimensions();
+ const Dimensions& rhs_dims = m_rightImpl.dimensions();
+ {
+ int i = 0;
+ for (; i < m_axis; ++i) {
+ eigen_assert(lhs_dims[i] > 0);
+ eigen_assert(lhs_dims[i] == rhs_dims[i]);
+ m_dimensions[i] = lhs_dims[i];
+ }
+ eigen_assert(lhs_dims[i] > 0); // Now i == m_axis.
+ eigen_assert(rhs_dims[i] > 0);
+ m_dimensions[i] = lhs_dims[i] + rhs_dims[i];
+ for (++i; i < NumDims; ++i) {
+ eigen_assert(lhs_dims[i] > 0);
+ eigen_assert(lhs_dims[i] == rhs_dims[i]);
+ m_dimensions[i] = lhs_dims[i];
+ }
+ }
+
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ m_leftStrides[0] = 1;
+ m_rightStrides[0] = 1;
+ m_outputStrides[0] = 1;
+
+ for (int j = 1; j < NumDims; ++j) {
+ m_leftStrides[j] = m_leftStrides[j-1] * lhs_dims[j-1];
+ m_rightStrides[j] = m_rightStrides[j-1] * rhs_dims[j-1];
+ m_outputStrides[j] = m_outputStrides[j-1] * m_dimensions[j-1];
+ }
+ } else {
+ m_leftStrides[NumDims - 1] = 1;
+ m_rightStrides[NumDims - 1] = 1;
+ m_outputStrides[NumDims - 1] = 1;
+
+ for (int j = NumDims - 2; j >= 0; --j) {
+ m_leftStrides[j] = m_leftStrides[j+1] * lhs_dims[j+1];
+ m_rightStrides[j] = m_rightStrides[j+1] * rhs_dims[j+1];
+ m_outputStrides[j] = m_outputStrides[j+1] * m_dimensions[j+1];
+ }
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
+
+ // TODO(phli): Add short-circuit memcpy evaluation if underlying data are linear?
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/)
+ {
+ m_leftImpl.evalSubExprsIfNeeded(NULL);
+ m_rightImpl.evalSubExprsIfNeeded(NULL);
+ return true;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup()
+ {
+ m_leftImpl.cleanup();
+ m_rightImpl.cleanup();
+ }
+
+ // TODO(phli): attempt to speed this up. The integer divisions and modulo are slow.
+ // See CL/76180724 comments for more ideas.
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
+ {
+ // Collect dimension-wise indices (subs).
+ array<Index, NumDims> subs;
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int i = NumDims - 1; i > 0; --i) {
+ subs[i] = index / m_outputStrides[i];
+ index -= subs[i] * m_outputStrides[i];
+ }
+ subs[0] = index;
+ } else {
+ for (int i = 0; i < NumDims - 1; ++i) {
+ subs[i] = index / m_outputStrides[i];
+ index -= subs[i] * m_outputStrides[i];
+ }
+ subs[NumDims - 1] = index;
+ }
+
+ const Dimensions& left_dims = m_leftImpl.dimensions();
+ if (subs[m_axis] < left_dims[m_axis]) {
+ Index left_index;
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ left_index = subs[0];
+ for (int i = 1; i < NumDims; ++i) {
+ left_index += (subs[i] % left_dims[i]) * m_leftStrides[i];
+ }
+ } else {
+ left_index = subs[NumDims - 1];
+ for (int i = NumDims - 2; i >= 0; --i) {
+ left_index += (subs[i] % left_dims[i]) * m_leftStrides[i];
+ }
+ }
+ return m_leftImpl.coeff(left_index);
+ } else {
+ subs[m_axis] -= left_dims[m_axis];
+ const Dimensions& right_dims = m_rightImpl.dimensions();
+ Index right_index;
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ right_index = subs[0];
+ for (int i = 1; i < NumDims; ++i) {
+ right_index += (subs[i] % right_dims[i]) * m_rightStrides[i];
+ }
+ } else {
+ right_index = subs[NumDims - 1];
+ for (int i = NumDims - 2; i >= 0; --i) {
+ right_index += (subs[i] % right_dims[i]) * m_rightStrides[i];
+ }
+ }
+ return m_rightImpl.coeff(right_index);
+ }
+ }
+
+ // TODO(phli): Add a real vectorization.
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
+ {
+ const int packetSize = internal::unpacket_traits<PacketReturnType>::size;
+ EIGEN_STATIC_ASSERT((packetSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
+ eigen_assert(index + packetSize - 1 < dimensions().TotalSize());
+
+ EIGEN_ALIGN_MAX CoeffReturnType values[packetSize];
+ for (int i = 0; i < packetSize; ++i) {
+ values[i] = coeff(index+i);
+ }
+ PacketReturnType rslt = internal::pload<PacketReturnType>(values);
+ return rslt;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
+ costPerCoeff(bool vectorized) const {
+ const double compute_cost = NumDims * (2 * TensorOpCost::AddCost<Index>() +
+ 2 * TensorOpCost::MulCost<Index>() +
+ TensorOpCost::DivCost<Index>() +
+ TensorOpCost::ModCost<Index>());
+ const double lhs_size = m_leftImpl.dimensions().TotalSize();
+ const double rhs_size = m_rightImpl.dimensions().TotalSize();
+ return (lhs_size / (lhs_size + rhs_size)) *
+ m_leftImpl.costPerCoeff(vectorized) +
+ (rhs_size / (lhs_size + rhs_size)) *
+ m_rightImpl.costPerCoeff(vectorized) +
+ TensorOpCost(0, 0, compute_cost);
+ }
+
+ EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
+
+ protected:
+ Dimensions m_dimensions;
+ array<Index, NumDims> m_outputStrides;
+ array<Index, NumDims> m_leftStrides;
+ array<Index, NumDims> m_rightStrides;
+ TensorEvaluator<LeftArgType, Device> m_leftImpl;
+ TensorEvaluator<RightArgType, Device> m_rightImpl;
+ const Axis m_axis;
+};
+
+// Eval as lvalue
+template<typename Axis, typename LeftArgType, typename RightArgType, typename Device>
+ struct TensorEvaluator<TensorConcatenationOp<Axis, LeftArgType, RightArgType>, Device>
+ : public TensorEvaluator<const TensorConcatenationOp<Axis, LeftArgType, RightArgType>, Device>
+{
+ typedef TensorEvaluator<const TensorConcatenationOp<Axis, LeftArgType, RightArgType>, Device> Base;
+ typedef TensorConcatenationOp<Axis, LeftArgType, RightArgType> XprType;
+ typedef typename Base::Dimensions Dimensions;
+ enum {
+ IsAligned = false,
+ PacketAccess = TensorEvaluator<LeftArgType, Device>::PacketAccess & TensorEvaluator<RightArgType, Device>::PacketAccess,
+ Layout = TensorEvaluator<LeftArgType, Device>::Layout,
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(XprType& op, const Device& device)
+ : Base(op, device)
+ {
+ EIGEN_STATIC_ASSERT((static_cast<int>(Layout) == static_cast<int>(ColMajor)), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ }
+
+ typedef typename XprType::Index Index;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index)
+ {
+ // Collect dimension-wise indices (subs).
+ array<Index, Base::NumDims> subs;
+ for (int i = Base::NumDims - 1; i > 0; --i) {
+ subs[i] = index / this->m_outputStrides[i];
+ index -= subs[i] * this->m_outputStrides[i];
+ }
+ subs[0] = index;
+
+ const Dimensions& left_dims = this->m_leftImpl.dimensions();
+ if (subs[this->m_axis] < left_dims[this->m_axis]) {
+ Index left_index = subs[0];
+ for (int i = 1; i < Base::NumDims; ++i) {
+ left_index += (subs[i] % left_dims[i]) * this->m_leftStrides[i];
+ }
+ return this->m_leftImpl.coeffRef(left_index);
+ } else {
+ subs[this->m_axis] -= left_dims[this->m_axis];
+ const Dimensions& right_dims = this->m_rightImpl.dimensions();
+ Index right_index = subs[0];
+ for (int i = 1; i < Base::NumDims; ++i) {
+ right_index += (subs[i] % right_dims[i]) * this->m_rightStrides[i];
+ }
+ return this->m_rightImpl.coeffRef(right_index);
+ }
+ }
+
+ template <int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ void writePacket(Index index, const PacketReturnType& x)
+ {
+ const int packetSize = internal::unpacket_traits<PacketReturnType>::size;
+ EIGEN_STATIC_ASSERT((packetSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
+ eigen_assert(index + packetSize - 1 < this->dimensions().TotalSize());
+
+ EIGEN_ALIGN_MAX CoeffReturnType values[packetSize];
+ internal::pstore<CoeffReturnType, PacketReturnType>(values, x);
+ for (int i = 0; i < packetSize; ++i) {
+ coeffRef(index+i) = values[i];
+ }
+ }
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_CONCATENATION_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h
new file mode 100644
index 000000000..20b29e5fd
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h
@@ -0,0 +1,628 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_H
+#define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_H
+
+namespace Eigen {
+
+/** \class TensorContraction
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Tensor contraction class.
+ *
+ *
+ */
+namespace internal {
+
+template<typename Dimensions, typename LhsXprType, typename RhsXprType>
+struct traits<TensorContractionOp<Dimensions, LhsXprType, RhsXprType> >
+{
+ // Type promotion to handle the case where the types of the lhs and the rhs are different.
+ typedef typename gebp_traits<typename remove_const<typename LhsXprType::Scalar>::type,
+ typename remove_const<typename RhsXprType::Scalar>::type>::ResScalar Scalar;
+
+ typedef typename promote_storage_type<typename traits<LhsXprType>::StorageKind,
+ typename traits<RhsXprType>::StorageKind>::ret StorageKind;
+ typedef typename promote_index_type<typename traits<LhsXprType>::Index,
+ typename traits<RhsXprType>::Index>::type Index;
+ typedef typename LhsXprType::Nested LhsNested;
+ typedef typename RhsXprType::Nested RhsNested;
+ typedef typename remove_reference<LhsNested>::type _LhsNested;
+ typedef typename remove_reference<RhsNested>::type _RhsNested;
+
+ // From NumDims below.
+ static const int NumDimensions = traits<RhsXprType>::NumDimensions + traits<RhsXprType>::NumDimensions - 2 * array_size<Dimensions>::value;
+ static const int Layout = traits<LhsXprType>::Layout;
+
+ enum {
+ Flags = 0
+ };
+};
+
+template<typename Dimensions, typename LhsXprType, typename RhsXprType>
+struct eval<TensorContractionOp<Dimensions, LhsXprType, RhsXprType>, Eigen::Dense>
+{
+ typedef const TensorContractionOp<Dimensions, LhsXprType, RhsXprType>& type;
+};
+
+template<typename Dimensions, typename LhsXprType, typename RhsXprType>
+struct nested<TensorContractionOp<Dimensions, LhsXprType, RhsXprType>, 1, typename eval<TensorContractionOp<Dimensions, LhsXprType, RhsXprType> >::type>
+{
+ typedef TensorContractionOp<Dimensions, LhsXprType, RhsXprType> type;
+};
+
+template<typename Indices_, typename LeftArgType_, typename RightArgType_, typename Device_>
+struct traits<TensorEvaluator<const TensorContractionOp<Indices_, LeftArgType_, RightArgType_>, Device_> > {
+ typedef Indices_ Indices;
+ typedef LeftArgType_ LeftArgType;
+ typedef RightArgType_ RightArgType;
+ typedef Device_ Device;
+
+ // From NumDims below.
+ static const int NumDimensions = traits<LeftArgType_>::NumDimensions + traits<RightArgType_>::NumDimensions - 2 * array_size<Indices_>::value;
+};
+
+} // end namespace internal
+
+template<typename Indices, typename LhsXprType, typename RhsXprType>
+class TensorContractionOp : public TensorBase<TensorContractionOp<Indices, LhsXprType, RhsXprType>, ReadOnlyAccessors>
+{
+ public:
+ typedef typename Eigen::internal::traits<TensorContractionOp>::Scalar Scalar;
+ typedef typename internal::gebp_traits<typename LhsXprType::CoeffReturnType,
+ typename RhsXprType::CoeffReturnType>::ResScalar CoeffReturnType;
+ typedef typename Eigen::internal::nested<TensorContractionOp>::type Nested;
+ typedef typename Eigen::internal::traits<TensorContractionOp>::StorageKind StorageKind;
+ typedef typename Eigen::internal::traits<TensorContractionOp>::Index Index;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorContractionOp(
+ const LhsXprType& lhs, const RhsXprType& rhs, const Indices& dims)
+ : m_lhs_xpr(lhs), m_rhs_xpr(rhs), m_indices(dims) {}
+
+ EIGEN_DEVICE_FUNC
+ const Indices& indices() const { return m_indices; }
+
+ /** \returns the nested expressions */
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename LhsXprType::Nested>::type&
+ lhsExpression() const { return m_lhs_xpr; }
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename RhsXprType::Nested>::type&
+ rhsExpression() const { return m_rhs_xpr; }
+
+ protected:
+ typename LhsXprType::Nested m_lhs_xpr;
+ typename RhsXprType::Nested m_rhs_xpr;
+ const Indices m_indices;
+};
+
+
+template<typename Derived>
+struct TensorContractionEvaluatorBase
+{
+ typedef typename internal::traits<Derived>::Indices Indices;
+ typedef typename internal::traits<Derived>::LeftArgType LeftArgType;
+ typedef typename internal::traits<Derived>::RightArgType RightArgType;
+ typedef typename internal::traits<Derived>::Device Device;
+
+ typedef TensorContractionOp<Indices, LeftArgType, RightArgType> XprType;
+ typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar;
+ typedef typename XprType::Index Index;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+
+ enum {
+ IsAligned = true,
+ PacketAccess = (internal::unpacket_traits<PacketReturnType>::size > 1),
+ Layout = TensorEvaluator<LeftArgType, Device>::Layout,
+ CoordAccess = false, // to be implemented
+ RawAccess = true
+ };
+
+ // Most of the code is assuming that both input tensors are ColMajor. If the
+ // inputs are RowMajor, we will "cheat" by swapping the LHS and RHS:
+ // If we want to compute A * B = C, where A is LHS and B is RHS, the code
+ // will pretend B is LHS and A is RHS.
+ typedef typename internal::conditional<
+ static_cast<int>(Layout) == static_cast<int>(ColMajor), LeftArgType, RightArgType>::type EvalLeftArgType;
+ typedef typename internal::conditional<
+ static_cast<int>(Layout) == static_cast<int>(ColMajor), RightArgType, LeftArgType>::type EvalRightArgType;
+
+ static const int LDims =
+ internal::array_size<typename TensorEvaluator<EvalLeftArgType, Device>::Dimensions>::value;
+ static const int RDims =
+ internal::array_size<typename TensorEvaluator<EvalRightArgType, Device>::Dimensions>::value;
+ static const int ContractDims = internal::array_size<Indices>::value;
+ static const int NumDims = LDims + RDims - 2 * ContractDims;
+
+ typedef array<Index, ContractDims> contract_t;
+ typedef array<Index, LDims - ContractDims> left_nocontract_t;
+ typedef array<Index, RDims - ContractDims> right_nocontract_t;
+
+ typedef DSizes<Index, NumDims> Dimensions;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ TensorContractionEvaluatorBase(const XprType& op, const Device& device)
+ : m_leftImpl(choose(Cond<static_cast<int>(Layout) == static_cast<int>(ColMajor)>(),
+ op.lhsExpression(), op.rhsExpression()), device),
+ m_rightImpl(choose(Cond<static_cast<int>(Layout) == static_cast<int>(ColMajor)>(),
+ op.rhsExpression(), op.lhsExpression()), device),
+ m_device(device),
+ m_result(NULL) {
+ EIGEN_STATIC_ASSERT((static_cast<int>(TensorEvaluator<LeftArgType, Device>::Layout) ==
+ static_cast<int>(TensorEvaluator<RightArgType, Device>::Layout)),
+ YOU_MADE_A_PROGRAMMING_MISTAKE);
+
+
+ DSizes<Index, LDims> eval_left_dims;
+ DSizes<Index, RDims> eval_right_dims;
+ array<IndexPair<Index>, ContractDims> eval_op_indices;
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ // For ColMajor, we keep using the existing dimensions
+ for (int i = 0; i < LDims; i++) {
+ eval_left_dims[i] = m_leftImpl.dimensions()[i];
+ }
+ for (int i = 0; i < RDims; i++) {
+ eval_right_dims[i] = m_rightImpl.dimensions()[i];
+ }
+ // We keep the pairs of contracting indices.
+ for (int i = 0; i < ContractDims; i++) {
+ eval_op_indices[i].first = op.indices()[i].first;
+ eval_op_indices[i].second = op.indices()[i].second;
+ }
+ } else {
+ // For RowMajor, we need to reverse the existing dimensions
+ for (int i = 0; i < LDims; i++) {
+ eval_left_dims[i] = m_leftImpl.dimensions()[LDims - i - 1];
+ }
+ for (int i = 0; i < RDims; i++) {
+ eval_right_dims[i] = m_rightImpl.dimensions()[RDims - i - 1];
+ }
+ // We need to flip all the pairs of contracting indices as well as
+ // reversing the dimensions.
+ for (int i = 0; i < ContractDims; i++) {
+ eval_op_indices[i].first = LDims - 1 - op.indices()[ContractDims - 1 - i].second;
+ eval_op_indices[i].second = RDims - 1 - op.indices()[ContractDims - 1 - i].first;
+ }
+ }
+
+ // Check for duplicate axes and make sure the first index in eval_op_indices
+ // is increasing. Using O(n^2) sorting is OK since ContractDims is small
+ for (int i = 0; i < ContractDims; i++) {
+ for (int j = i + 1; j < ContractDims; j++) {
+ eigen_assert(eval_op_indices[j].first != eval_op_indices[i].first &&
+ eval_op_indices[j].second != eval_op_indices[i].second &&
+ "contraction axes should be unique");
+ if (eval_op_indices[j].first < eval_op_indices[i].first) {
+ numext::swap(eval_op_indices[j], eval_op_indices[i]);
+ }
+ }
+ }
+
+ array<Index, LDims> lhs_strides;
+ lhs_strides[0] = 1;
+ for (int i = 0; i < LDims-1; ++i) {
+ lhs_strides[i+1] = lhs_strides[i] * eval_left_dims[i];
+ }
+
+ array<Index, RDims> rhs_strides;
+ rhs_strides[0] = 1;
+ for (int i = 0; i < RDims-1; ++i) {
+ rhs_strides[i+1] = rhs_strides[i] * eval_right_dims[i];
+ }
+
+ if (m_i_strides.size() > 0) m_i_strides[0] = 1;
+ if (m_j_strides.size() > 0) m_j_strides[0] = 1;
+ if (m_k_strides.size() > 0) m_k_strides[0] = 1;
+
+ m_i_size = 1;
+ m_j_size = 1;
+ m_k_size = 1;
+
+ // To compute the dimension, we simply concatenate the non-contracting
+ // dimensions of the left and then the right tensor. Additionally, we also
+ // compute the strides corresponding to the left non-contracting
+ // dimensions and right non-contracting dimensions.
+ m_lhs_inner_dim_contiguous = true;
+ int dim_idx = 0;
+ unsigned int nocontract_idx = 0;
+
+ for (int i = 0; i < LDims; i++) {
+ // find if we are contracting on index i of left tensor
+ bool contracting = false;
+ for (int j = 0; j < ContractDims; j++) {
+ if (eval_op_indices[j].first == i) {
+ contracting = true;
+ break;
+ }
+ }
+ if (!contracting) {
+ // add dimension size to output dimensions
+ m_dimensions[dim_idx] = eval_left_dims[i];
+ m_left_nocontract_strides[nocontract_idx] = lhs_strides[i];
+ if (dim_idx != i) {
+ m_lhs_inner_dim_contiguous = false;
+ }
+ if (nocontract_idx+1 < internal::array_size<left_nocontract_t>::value) {
+ m_i_strides[nocontract_idx+1] =
+ m_i_strides[nocontract_idx] * eval_left_dims[i];
+ } else {
+ m_i_size = m_i_strides[nocontract_idx] * eval_left_dims[i];
+ }
+ dim_idx++;
+ nocontract_idx++;
+ }
+ }
+
+ nocontract_idx = 0;
+ for (int i = 0; i < RDims; i++) {
+ bool contracting = false;
+ // find if we are contracting on index i of right tensor
+ for (int j = 0; j < ContractDims; j++) {
+ if (eval_op_indices[j].second == i) {
+ contracting = true;
+ break;
+ }
+ }
+ if (!contracting) {
+ m_dimensions[dim_idx] = eval_right_dims[i];
+ if (nocontract_idx+1 < internal::array_size<right_nocontract_t>::value) {
+ m_j_strides[nocontract_idx+1] =
+ m_j_strides[nocontract_idx] * eval_right_dims[i];
+ } else {
+ m_j_size = m_j_strides[nocontract_idx] * eval_right_dims[i];
+ }
+ m_right_nocontract_strides[nocontract_idx] = rhs_strides[i];
+ dim_idx++;
+ nocontract_idx++;
+ }
+ }
+
+ // Now compute the strides corresponding to the contracting dimensions. We
+ // assumed above that non-contracting axes are represented in the same order
+ // in the matrix as they are in the tensor. This is not the case for
+ // contracting axes. As the contracting axes must be of the same size in
+ // each tensor, we'll only look at the first tensor here.
+ m_rhs_inner_dim_contiguous = true;
+ m_rhs_inner_dim_reordered = false;
+ for (int i = 0; i < ContractDims; i++) {
+ Index left = eval_op_indices[i].first;
+ Index right = eval_op_indices[i].second;
+
+ Index size = eval_left_dims[left];
+ eigen_assert(size == eval_right_dims[right] &&
+ "Contraction axes must be same size");
+
+ if (i+1 < static_cast<int>(internal::array_size<contract_t>::value)) {
+ m_k_strides[i+1] = m_k_strides[i] * size;
+ } else {
+ m_k_size = m_k_strides[i] * size;
+ }
+ m_left_contracting_strides[i] = lhs_strides[left];
+ m_right_contracting_strides[i] = rhs_strides[right];
+
+ if (i > 0 && right < eval_op_indices[i-1].second) {
+ m_rhs_inner_dim_reordered = true;
+ }
+ if (right != i) {
+ m_rhs_inner_dim_contiguous = false;
+ }
+ }
+
+ // If the layout is RowMajor, we need to reverse the m_dimensions
+ if (static_cast<int>(Layout) == static_cast<int>(RowMajor)) {
+ for (int i = 0, j = NumDims - 1; i < j; i++, j--) {
+ numext::swap(m_dimensions[i], m_dimensions[j]);
+ }
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data) {
+ m_leftImpl.evalSubExprsIfNeeded(NULL);
+ m_rightImpl.evalSubExprsIfNeeded(NULL);
+ if (data) {
+ evalTo(data);
+ return false;
+ } else {
+ m_result = static_cast<Scalar *>(m_device.allocate(dimensions().TotalSize() * sizeof(Scalar)));
+ evalTo(m_result);
+ return true;
+ }
+ }
+
+ EIGEN_DEVICE_FUNC void evalTo(Scalar* buffer) const {
+ if (this->m_lhs_inner_dim_contiguous) {
+ if (this->m_rhs_inner_dim_contiguous) {
+ if (this->m_rhs_inner_dim_reordered) {
+ static_cast<const Derived*>(this)->template evalProduct<true, true, true, Unaligned>(buffer);
+ }
+ else {
+ static_cast<const Derived*>(this)->template evalProduct<true, true, false, Unaligned>(buffer);
+ }
+ }
+ else {
+ if (this->m_rhs_inner_dim_reordered) {
+ static_cast<const Derived*>(this)->template evalProduct<true, false, true, Unaligned>(buffer);
+ }
+ else {
+ static_cast<const Derived*>(this)->template evalProduct<true, false, false, Unaligned>(buffer);
+ }
+ }
+ }
+ else {
+ if (this->m_rhs_inner_dim_contiguous) {
+ if (this->m_rhs_inner_dim_reordered) {
+ static_cast<const Derived*>(this)->template evalProduct<false, true, true, Unaligned>(buffer);
+ }
+ else {
+ static_cast<const Derived*>(this)->template evalProduct<false, true, false, Unaligned>(buffer);
+ }
+ }
+ else {
+ if (this->m_rhs_inner_dim_reordered) {
+ static_cast<const Derived*>(this)->template evalProduct<false, false, true, Unaligned>(buffer);
+ }
+ else {
+ static_cast<const Derived*>(this)->template evalProduct<false, false, false, Unaligned>(buffer);
+ }
+ }
+ }
+ }
+
+ template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment>
+ EIGEN_DEVICE_FUNC void evalGemv(Scalar* buffer) const {
+ const Index rows = m_i_size;
+ const Index cols = m_k_size;
+
+ typedef typename internal::remove_const<typename EvalLeftArgType::Scalar>::type LhsScalar;
+ typedef typename internal::remove_const<typename EvalRightArgType::Scalar>::type RhsScalar;
+ typedef TensorEvaluator<EvalLeftArgType, Device> LeftEvaluator;
+ typedef TensorEvaluator<EvalRightArgType, Device> RightEvaluator;
+ const Index lhs_packet_size = internal::unpacket_traits<typename LeftEvaluator::PacketReturnType>::size;
+ const Index rhs_packet_size = internal::unpacket_traits<typename RightEvaluator::PacketReturnType>::size;
+ const int lhs_alignment = LeftEvaluator::IsAligned ? Aligned : Unaligned;
+ const int rhs_alignment = RightEvaluator::IsAligned ? Aligned : Unaligned;
+ typedef internal::TensorContractionInputMapper<LhsScalar, Index, internal::Lhs,
+ LeftEvaluator, left_nocontract_t,
+ contract_t, lhs_packet_size,
+ lhs_inner_dim_contiguous,
+ false, lhs_alignment> LhsMapper;
+
+ typedef internal::TensorContractionInputMapper<RhsScalar, Index, internal::Rhs,
+ RightEvaluator, right_nocontract_t,
+ contract_t, rhs_packet_size,
+ rhs_inner_dim_contiguous,
+ rhs_inner_dim_reordered, rhs_alignment> RhsMapper;
+
+ LhsMapper lhs(m_leftImpl, m_left_nocontract_strides, m_i_strides,
+ m_left_contracting_strides, m_k_strides);
+ RhsMapper rhs(m_rightImpl, m_right_nocontract_strides, m_j_strides,
+ m_right_contracting_strides, m_k_strides);
+
+ const Scalar alpha(1);
+ const Index resIncr(1);
+
+ // zero out the result buffer (which must be of size at least rows * sizeof(Scalar)
+ m_device.memset(buffer, 0, rows * sizeof(Scalar));
+
+ internal::general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,false,RhsScalar,RhsMapper,false>::run(
+ rows, cols, lhs, rhs,
+ buffer, resIncr, alpha);
+ }
+
+ template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment>
+ EIGEN_DEVICE_FUNC void evalGemm(Scalar* buffer) const {
+ // columns in left side, rows in right side
+ const Index k = this->m_k_size;
+
+ // rows in left side
+ const Index m = this->m_i_size;
+
+ // columns in right side
+ const Index n = this->m_j_size;
+
+ // zero out the result buffer (which must be of size at least m * n * sizeof(Scalar)
+ this->m_device.memset(buffer, 0, m * n * sizeof(Scalar));
+
+ // define mr, nr, and all of my data mapper types
+ typedef typename internal::remove_const<typename EvalLeftArgType::Scalar>::type LhsScalar;
+ typedef typename internal::remove_const<typename EvalRightArgType::Scalar>::type RhsScalar;
+ typedef typename internal::gebp_traits<LhsScalar, RhsScalar> Traits;
+
+ const Index nr = Traits::nr;
+ const Index mr = Traits::mr;
+
+ typedef TensorEvaluator<EvalLeftArgType, Device> LeftEvaluator;
+ typedef TensorEvaluator<EvalRightArgType, Device> RightEvaluator;
+
+ const Index lhs_packet_size = internal::unpacket_traits<typename LeftEvaluator::PacketReturnType>::size;
+ const Index rhs_packet_size = internal::unpacket_traits<typename RightEvaluator::PacketReturnType>::size;
+
+ typedef internal::TensorContractionInputMapper<LhsScalar, Index, internal::Lhs,
+ LeftEvaluator, left_nocontract_t,
+ contract_t, lhs_packet_size,
+ lhs_inner_dim_contiguous,
+ false, Unaligned> LhsMapper;
+
+ typedef internal::TensorContractionInputMapper<RhsScalar, Index, internal::Rhs,
+ RightEvaluator, right_nocontract_t,
+ contract_t, rhs_packet_size,
+ rhs_inner_dim_contiguous,
+ rhs_inner_dim_reordered, Unaligned> RhsMapper;
+
+ typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper;
+
+ // Declare GEBP packing and kernel structs
+ internal::gemm_pack_lhs<LhsScalar, Index, typename LhsMapper::SubMapper, mr, Traits::LhsProgress, ColMajor> pack_lhs;
+ internal::gemm_pack_rhs<RhsScalar, Index, typename RhsMapper::SubMapper, nr, ColMajor> pack_rhs;
+
+ internal::gebp_kernel<LhsScalar, RhsScalar, Index, OutputMapper, mr, nr, false, false> gebp;
+
+ // initialize data mappers
+ LhsMapper lhs(this->m_leftImpl, this->m_left_nocontract_strides, this->m_i_strides,
+ this->m_left_contracting_strides, this->m_k_strides);
+
+ RhsMapper rhs(this->m_rightImpl, this->m_right_nocontract_strides, this->m_j_strides,
+ this->m_right_contracting_strides, this->m_k_strides);
+
+ OutputMapper output(buffer, m);
+
+ // Sizes of the blocks to load in cache. See the Goto paper for details.
+ internal::TensorContractionBlocking<LhsMapper, RhsMapper, Index, internal::ShardByCol> blocking(k, m, n, 1);
+ const Index kc = blocking.kc();
+ const Index mc = numext::mini(m, blocking.mc());
+ const Index nc = numext::mini(n, blocking.nc());
+ const Index sizeA = mc * kc;
+ const Index sizeB = kc * nc;
+
+ LhsScalar* blockA = static_cast<LhsScalar *>(this->m_device.allocate(sizeA * sizeof(LhsScalar)));
+ RhsScalar* blockB = static_cast<RhsScalar *>(this->m_device.allocate(sizeB * sizeof(RhsScalar)));
+
+ for(Index i2=0; i2<m; i2+=mc)
+ {
+ const Index actual_mc = numext::mini(i2+mc,m)-i2;
+ for (Index k2 = 0; k2 < k; k2 += kc) {
+ // make sure we don't overshoot right edge of left matrix, then pack vertical panel
+ const Index actual_kc = numext::mini(k2 + kc, k) - k2;
+ pack_lhs(blockA, lhs.getSubMapper(i2, k2), actual_kc, actual_mc, 0, 0);
+
+ // series of horizontal blocks
+ for (Index j2 = 0; j2 < n; j2 += nc) {
+ // make sure we don't overshoot right edge of right matrix, then pack block
+ const Index actual_nc = numext::mini(j2 + nc, n) - j2;
+ pack_rhs(blockB, rhs.getSubMapper(k2, j2), actual_kc, actual_nc, 0, 0);
+
+ // call gebp (matrix kernel)
+ // The parameters here are copied from Eigen's GEMM implementation
+ gebp(output.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, Scalar(1), -1, -1, 0, 0);
+ }
+ }
+ }
+
+ this->m_device.deallocate(blockA);
+ this->m_device.deallocate(blockB);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
+ m_leftImpl.cleanup();
+ m_rightImpl.cleanup();
+
+ if (m_result != NULL) {
+ m_device.deallocate(m_result);
+ m_result = NULL;
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const {
+ return m_result[index];
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool) const {
+ return TensorOpCost(sizeof(CoeffReturnType), 0, 0);
+ }
+
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const {
+ return internal::ploadt<PacketReturnType, LoadMode>(m_result + index);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar* data() const { return m_result; }
+
+ protected:
+ // Prevent assignment
+ TensorContractionEvaluatorBase& operator = (const TensorContractionEvaluatorBase&);
+ Dimensions m_dimensions;
+
+ contract_t m_k_strides;
+ contract_t m_left_contracting_strides;
+ contract_t m_right_contracting_strides;
+
+ bool m_lhs_inner_dim_contiguous;
+ bool m_rhs_inner_dim_contiguous;
+ bool m_rhs_inner_dim_reordered;
+
+ left_nocontract_t m_i_strides;
+ right_nocontract_t m_j_strides;
+ left_nocontract_t m_left_nocontract_strides;
+ right_nocontract_t m_right_nocontract_strides;
+
+ Index m_i_size;
+ Index m_j_size;
+ Index m_k_size;
+
+ TensorEvaluator<EvalLeftArgType, Device> m_leftImpl;
+ TensorEvaluator<EvalRightArgType, Device> m_rightImpl;
+ const Device& m_device;
+ Scalar* m_result;
+};
+
+
+// evaluator for default device
+template<typename Indices, typename LeftArgType, typename RightArgType, typename Device>
+struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType>, Device> :
+ public TensorContractionEvaluatorBase<
+ TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType>, Device> > {
+ typedef TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType>, Device> Self;
+ typedef TensorContractionEvaluatorBase<Self> Base;
+
+ typedef TensorContractionOp<Indices, LeftArgType, RightArgType> XprType;
+ typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar;
+ typedef typename XprType::Index Index;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+
+ enum {
+ Layout = TensorEvaluator<LeftArgType, Device>::Layout
+ };
+
+ // Most of the code is assuming that both input tensors are ColMajor. If the
+ // inputs are RowMajor, we will "cheat" by swapping the LHS and RHS:
+ // If we want to compute A * B = C, where A is LHS and B is RHS, the code
+ // will pretend B is LHS and A is RHS.
+ typedef typename internal::conditional<
+ static_cast<int>(Layout) == static_cast<int>(ColMajor), LeftArgType, RightArgType>::type EvalLeftArgType;
+ typedef typename internal::conditional<
+ static_cast<int>(Layout) == static_cast<int>(ColMajor), RightArgType, LeftArgType>::type EvalRightArgType;
+
+ static const int LDims =
+ internal::array_size<typename TensorEvaluator<EvalLeftArgType, Device>::Dimensions>::value;
+ static const int RDims =
+ internal::array_size<typename TensorEvaluator<EvalRightArgType, Device>::Dimensions>::value;
+ static const int ContractDims = internal::array_size<Indices>::value;
+
+ typedef array<Index, ContractDims> contract_t;
+ typedef array<Index, LDims - ContractDims> left_nocontract_t;
+ typedef array<Index, RDims - ContractDims> right_nocontract_t;
+
+ static const int NumDims = LDims + RDims - 2 * ContractDims;
+
+ // Could we use NumDimensions here?
+ typedef DSizes<Index, NumDims> Dimensions;
+
+ EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device) :
+ Base(op, device) { }
+
+ template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment>
+ EIGEN_DEVICE_FUNC void evalProduct(Scalar* buffer) const {
+ if (this->m_j_size == 1) {
+ this->template evalGemv<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>(buffer);
+ return;
+ }
+
+ this->template evalGemm<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>(buffer);
+ }
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h
new file mode 100644
index 000000000..5cf7b4f71
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h
@@ -0,0 +1,56 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_BLOCKING_H
+#define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_BLOCKING_H
+
+
+namespace Eigen {
+namespace internal {
+
+enum {
+ ShardByRow = 0,
+ ShardByCol = 1
+};
+
+
+// Default Blocking Strategy
+template <typename LhsMapper, typename RhsMapper, typename Index, int ShardingType=ShardByCol>
+class TensorContractionBlocking {
+ public:
+
+ typedef typename LhsMapper::Scalar LhsScalar;
+ typedef typename RhsMapper::Scalar RhsScalar;
+
+ EIGEN_DEVICE_FUNC TensorContractionBlocking(Index k, Index m, Index n, Index num_threads = 1) :
+ kc_(k), mc_(m), nc_(n)
+ {
+ if (ShardingType == ShardByCol) {
+ computeProductBlockingSizes<LhsScalar, RhsScalar, 1>(kc_, mc_, nc_, num_threads);
+ }
+ else {
+ computeProductBlockingSizes<LhsScalar, RhsScalar, 1>(kc_, nc_, mc_, num_threads);
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Index kc() const { return kc_; }
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Index mc() const { return mc_; }
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Index nc() const { return nc_; }
+
+ private:
+ Index kc_;
+ Index mc_;
+ Index nc_;
+};
+
+
+} // end namespace internal
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_BLOCKING_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionCuda.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionCuda.h
new file mode 100644
index 000000000..d65dbb40f
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionCuda.h
@@ -0,0 +1,1391 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014-2015 Benoit Steiner <benoit.steiner.goog@gmail.com>
+// Copyright (C) 2015 Navdeep Jaitly <ndjaitly@google.com>
+// Copyright (C) 2014 Eric Martin <eric@ericmart.in>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_CUDA_H
+#define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_CUDA_H
+
+#if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
+
+namespace Eigen {
+
+template<typename Scalar, typename Index, typename LhsMapper,
+ typename RhsMapper, typename OutputMapper, bool needs_edge_check>
+__device__ EIGEN_STRONG_INLINE void
+EigenContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs,
+ const OutputMapper output, Scalar* lhs_shmem, Scalar* rhs_shmem,
+ const Index m_size, const Index n_size, const Index k_size) {
+
+ const Index m_block_idx = blockIdx.x;
+ const Index n_block_idx = blockIdx.y;
+
+ const Index base_m = 64 * m_block_idx;
+ const Index base_n = 64 * n_block_idx;
+
+ // declare and initialize 64 registers for output 8x8 block
+
+ // prefetch registers
+ Scalar lhs_pf0;
+ Scalar lhs_pf1;
+ Scalar lhs_pf2;
+ Scalar lhs_pf3;
+ Scalar lhs_pf4;
+ Scalar lhs_pf5;
+ Scalar lhs_pf6;
+ Scalar lhs_pf7;
+
+ Scalar rhs_pf0;
+ Scalar rhs_pf1;
+ Scalar rhs_pf2;
+ Scalar rhs_pf3;
+ Scalar rhs_pf4;
+ Scalar rhs_pf5;
+ Scalar rhs_pf6;
+ Scalar rhs_pf7;
+
+ // shared memory is formatted
+ // (contract idx in block, nocontract idx in block, block idx)
+ // where block idx is column major. This transposition limits the number of
+ // bank conflicts when reading the LHS. The core idea is that since the contracting
+ // index is shared by both sides, then the contracting index should be in threadIdx.x.
+
+ // On the LHS, we pad each row inside of each block with an extra element. This makes
+ // each block 8 rows of 9 elements, which is 72 elements. This gives no bank conflicts
+ // on writes and very few 2-way conflicts on reads. There is an 8x8 grid of these blocks.
+
+ // On the RHS we just add 8 padding elements to the end of each block. This gives no bank
+ // conflicts on writes and also none on reads.
+
+ // storage indices
+ const Index lhs_store_idx_base = threadIdx.y * 72 + threadIdx.x * 9 + threadIdx.z;
+ const Index rhs_store_idx_base = threadIdx.y * 72 + threadIdx.z * 8 + threadIdx.x;
+
+ const Index lhs_store_idx_0 = lhs_store_idx_base + 576 * 0;
+ const Index lhs_store_idx_1 = lhs_store_idx_base + 576 * 1;
+ const Index lhs_store_idx_2 = lhs_store_idx_base + 576 * 2;
+ const Index lhs_store_idx_3 = lhs_store_idx_base + 576 * 3;
+ const Index lhs_store_idx_4 = lhs_store_idx_base + 576 * 4;
+ const Index lhs_store_idx_5 = lhs_store_idx_base + 576 * 5;
+ const Index lhs_store_idx_6 = lhs_store_idx_base + 576 * 6;
+ const Index lhs_store_idx_7 = lhs_store_idx_base + 576 * 7;
+
+ const Index rhs_store_idx_0 = rhs_store_idx_base + 576 * 0;
+ const Index rhs_store_idx_1 = rhs_store_idx_base + 576 * 1;
+ const Index rhs_store_idx_2 = rhs_store_idx_base + 576 * 2;
+ const Index rhs_store_idx_3 = rhs_store_idx_base + 576 * 3;
+ const Index rhs_store_idx_4 = rhs_store_idx_base + 576 * 4;
+ const Index rhs_store_idx_5 = rhs_store_idx_base + 576 * 5;
+ const Index rhs_store_idx_6 = rhs_store_idx_base + 576 * 6;
+ const Index rhs_store_idx_7 = rhs_store_idx_base + 576 * 7;
+
+ // in the loading code, the following variables are important:
+ // threadIdx.x: the vertical position in an 8x8 block
+ // threadIdx.y: the vertical index of the 8x8 block in the grid
+ // threadIdx.z: the horizontal position in an 8x8 block
+ // k: the horizontal index of the 8x8 block in the grid
+ //
+ // The k parameter is implicit (it was the loop counter for a loop that went
+ // from 0 to <8, but now that loop is unrolled in the below code.
+
+ const Index load_idx_vert = threadIdx.x + 8 * threadIdx.y;
+ const Index lhs_vert = base_m + load_idx_vert;
+
+#define prefetchIntoRegisters(base_k) \
+ { \
+ lhs_pf0 = conv(0); \
+ lhs_pf1 = conv(0); \
+ lhs_pf2 = conv(0); \
+ lhs_pf3 = conv(0); \
+ lhs_pf4 = conv(0); \
+ lhs_pf5 = conv(0); \
+ lhs_pf6 = conv(0); \
+ lhs_pf7 = conv(0); \
+ \
+ rhs_pf0 = conv(0); \
+ rhs_pf1 = conv(0); \
+ rhs_pf2 = conv(0); \
+ rhs_pf3 = conv(0); \
+ rhs_pf4 = conv(0); \
+ rhs_pf5 = conv(0); \
+ rhs_pf6 = conv(0); \
+ rhs_pf7 = conv(0); \
+ \
+ if (!needs_edge_check || lhs_vert < m_size) { \
+ const Index lhs_horiz_0 = base_k + threadIdx.z + 0 * 8; \
+ const Index lhs_horiz_1 = base_k + threadIdx.z + 1 * 8; \
+ const Index lhs_horiz_2 = base_k + threadIdx.z + 2 * 8; \
+ const Index lhs_horiz_3 = base_k + threadIdx.z + 3 * 8; \
+ const Index lhs_horiz_4 = base_k + threadIdx.z + 4 * 8; \
+ const Index lhs_horiz_5 = base_k + threadIdx.z + 5 * 8; \
+ const Index lhs_horiz_6 = base_k + threadIdx.z + 6 * 8; \
+ const Index lhs_horiz_7 = base_k + threadIdx.z + 7 * 8; \
+ \
+ if (!needs_edge_check || lhs_horiz_7 < k_size) { \
+ lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
+ lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \
+ lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \
+ lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \
+ lhs_pf4 = lhs(lhs_vert, lhs_horiz_4); \
+ lhs_pf5 = lhs(lhs_vert, lhs_horiz_5); \
+ lhs_pf6 = lhs(lhs_vert, lhs_horiz_6); \
+ lhs_pf7 = lhs(lhs_vert, lhs_horiz_7); \
+ } else if (lhs_horiz_6 < k_size) { \
+ lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
+ lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \
+ lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \
+ lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \
+ lhs_pf4 = lhs(lhs_vert, lhs_horiz_4); \
+ lhs_pf5 = lhs(lhs_vert, lhs_horiz_5); \
+ lhs_pf6 = lhs(lhs_vert, lhs_horiz_6); \
+ } else if (lhs_horiz_5 < k_size) { \
+ lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
+ lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \
+ lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \
+ lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \
+ lhs_pf4 = lhs(lhs_vert, lhs_horiz_4); \
+ lhs_pf5 = lhs(lhs_vert, lhs_horiz_5); \
+ } else if (lhs_horiz_4 < k_size) { \
+ lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
+ lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \
+ lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \
+ lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \
+ lhs_pf4 = lhs(lhs_vert, lhs_horiz_4); \
+ } else if (lhs_horiz_3 < k_size) { \
+ lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
+ lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \
+ lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \
+ lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \
+ } else if (lhs_horiz_2 < k_size) { \
+ lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
+ lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \
+ lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \
+ } else if (lhs_horiz_1 < k_size) { \
+ lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
+ lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \
+ } else if (lhs_horiz_0 < k_size) { \
+ lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
+ } \
+ } \
+ \
+ const Index rhs_vert = base_k + load_idx_vert; \
+ if (!needs_edge_check || rhs_vert < k_size) { \
+ const Index rhs_horiz_0 = base_n + threadIdx.z + 0 * 8; \
+ const Index rhs_horiz_1 = base_n + threadIdx.z + 1 * 8; \
+ const Index rhs_horiz_2 = base_n + threadIdx.z + 2 * 8; \
+ const Index rhs_horiz_3 = base_n + threadIdx.z + 3 * 8; \
+ const Index rhs_horiz_4 = base_n + threadIdx.z + 4 * 8; \
+ const Index rhs_horiz_5 = base_n + threadIdx.z + 5 * 8; \
+ const Index rhs_horiz_6 = base_n + threadIdx.z + 6 * 8; \
+ const Index rhs_horiz_7 = base_n + threadIdx.z + 7 * 8; \
+ \
+ if (rhs_horiz_7 < n_size) { \
+ rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
+ rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \
+ rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \
+ rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \
+ rhs_pf4 = rhs(rhs_vert, rhs_horiz_4); \
+ rhs_pf5 = rhs(rhs_vert, rhs_horiz_5); \
+ rhs_pf6 = rhs(rhs_vert, rhs_horiz_6); \
+ rhs_pf7 = rhs(rhs_vert, rhs_horiz_7); \
+ } else if (rhs_horiz_6 < n_size) { \
+ rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
+ rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \
+ rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \
+ rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \
+ rhs_pf4 = rhs(rhs_vert, rhs_horiz_4); \
+ rhs_pf5 = rhs(rhs_vert, rhs_horiz_5); \
+ rhs_pf6 = rhs(rhs_vert, rhs_horiz_6); \
+ } else if (rhs_horiz_5 < n_size) { \
+ rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
+ rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \
+ rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \
+ rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \
+ rhs_pf4 = rhs(rhs_vert, rhs_horiz_4); \
+ rhs_pf5 = rhs(rhs_vert, rhs_horiz_5); \
+ } else if (rhs_horiz_4 < n_size) { \
+ rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
+ rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \
+ rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \
+ rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \
+ rhs_pf4 = rhs(rhs_vert, rhs_horiz_4); \
+ } else if (rhs_horiz_3 < n_size) { \
+ rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
+ rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \
+ rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \
+ rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \
+ } else if (rhs_horiz_2 < n_size) { \
+ rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
+ rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \
+ rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \
+ } else if (rhs_horiz_1 < n_size) { \
+ rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
+ rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \
+ } else if (rhs_horiz_0 < n_size) { \
+ rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
+ } \
+ } \
+ } \
+
+#define writeRegToShmem(_) \
+ lhs_shmem[lhs_store_idx_0] = lhs_pf0; \
+ rhs_shmem[rhs_store_idx_0] = rhs_pf0; \
+ \
+ lhs_shmem[lhs_store_idx_1] = lhs_pf1; \
+ rhs_shmem[rhs_store_idx_1] = rhs_pf1; \
+ \
+ lhs_shmem[lhs_store_idx_2] = lhs_pf2; \
+ rhs_shmem[rhs_store_idx_2] = rhs_pf2; \
+ \
+ lhs_shmem[lhs_store_idx_3] = lhs_pf3; \
+ rhs_shmem[rhs_store_idx_3] = rhs_pf3; \
+ \
+ lhs_shmem[lhs_store_idx_4] = lhs_pf4; \
+ rhs_shmem[rhs_store_idx_4] = rhs_pf4; \
+ \
+ lhs_shmem[lhs_store_idx_5] = lhs_pf5; \
+ rhs_shmem[rhs_store_idx_5] = rhs_pf5; \
+ \
+ lhs_shmem[lhs_store_idx_6] = lhs_pf6; \
+ rhs_shmem[rhs_store_idx_6] = rhs_pf6; \
+ \
+ lhs_shmem[lhs_store_idx_7] = lhs_pf7; \
+ rhs_shmem[rhs_store_idx_7] = rhs_pf7; \
+
+ // declare and initialize result array
+#define res(i, j) _res_##i##j
+#define initResultRow(i) \
+ Scalar res(i, 0) = conv(0); \
+ Scalar res(i, 1) = conv(0); \
+ Scalar res(i, 2) = conv(0); \
+ Scalar res(i, 3) = conv(0); \
+ Scalar res(i, 4) = conv(0); \
+ Scalar res(i, 5) = conv(0); \
+ Scalar res(i, 6) = conv(0); \
+ Scalar res(i, 7) = conv(0); \
+
+ internal::scalar_cast_op<int, Scalar> conv;
+ initResultRow(0);
+ initResultRow(1);
+ initResultRow(2);
+ initResultRow(3);
+ initResultRow(4);
+ initResultRow(5);
+ initResultRow(6);
+ initResultRow(7);
+#undef initResultRow
+
+ for (Index base_k = 0; base_k < k_size; base_k += 64) {
+ // wait for previous iteration to finish with shmem. Despite common sense,
+ // the code is a bit faster with this here then at bottom of loop
+ __syncthreads();
+
+ prefetchIntoRegisters(base_k);
+ writeRegToShmem();
+
+ #undef prefetchIntoRegisters
+ #undef writeRegToShmem
+
+ // wait for shared mem packing to be done before starting computation
+ __syncthreads();
+
+ // compute 8x8 matrix product by outer product. This involves packing one column
+ // of LHS and one row of RHS into registers (takes 16 registers).
+
+#define lcol(i) _lcol##i
+ Scalar lcol(0);
+ Scalar lcol(1);
+ Scalar lcol(2);
+ Scalar lcol(3);
+ Scalar lcol(4);
+ Scalar lcol(5);
+ Scalar lcol(6);
+ Scalar lcol(7);
+
+#define rrow(j) _rrow##j
+ Scalar rrow(0);
+ Scalar rrow(1);
+ Scalar rrow(2);
+ Scalar rrow(3);
+ Scalar rrow(4);
+ Scalar rrow(5);
+ Scalar rrow(6);
+ Scalar rrow(7);
+
+ // Now x corresponds to k, y to m, and z to n
+ const Scalar* lhs_block = &lhs_shmem[threadIdx.x + 9 * threadIdx.y];
+ const Scalar* rhs_block = &rhs_shmem[threadIdx.x + 8 * threadIdx.z];
+
+#define lhs_element(i, j) lhs_block[72 * ((i) + 8 * (j))]
+#define rhs_element(i, j) rhs_block[72 * ((i) + 8 * (j))]
+
+#define loadData(i, j) \
+ lcol(0) = lhs_element(0, j); \
+ rrow(0) = rhs_element(i, 0); \
+ lcol(1) = lhs_element(1, j); \
+ rrow(1) = rhs_element(i, 1); \
+ lcol(2) = lhs_element(2, j); \
+ rrow(2) = rhs_element(i, 2); \
+ lcol(3) = lhs_element(3, j); \
+ rrow(3) = rhs_element(i, 3); \
+ lcol(4) = lhs_element(4, j); \
+ rrow(4) = rhs_element(i, 4); \
+ lcol(5) = lhs_element(5, j); \
+ rrow(5) = rhs_element(i, 5); \
+ lcol(6) = lhs_element(6, j); \
+ rrow(6) = rhs_element(i, 6); \
+ lcol(7) = lhs_element(7, j); \
+ rrow(7) = rhs_element(i, 7); \
+
+#define computeCol(j) \
+ res(0, j) += lcol(0) * rrow(j); \
+ res(1, j) += lcol(1) * rrow(j); \
+ res(2, j) += lcol(2) * rrow(j); \
+ res(3, j) += lcol(3) * rrow(j); \
+ res(4, j) += lcol(4) * rrow(j); \
+ res(5, j) += lcol(5) * rrow(j); \
+ res(6, j) += lcol(6) * rrow(j); \
+ res(7, j) += lcol(7) * rrow(j); \
+
+#define computePass(i) \
+ loadData(i, i); \
+ \
+ computeCol(0); \
+ computeCol(1); \
+ computeCol(2); \
+ computeCol(3); \
+ computeCol(4); \
+ computeCol(5); \
+ computeCol(6); \
+ computeCol(7); \
+
+ computePass(0);
+ computePass(1);
+ computePass(2);
+ computePass(3);
+ computePass(4);
+ computePass(5);
+ computePass(6);
+ computePass(7);
+
+#undef lcol
+#undef rrow
+#undef lhs_element
+#undef rhs_element
+#undef loadData
+#undef computeCol
+#undef computePass
+ } // end loop over k
+
+ // we've now iterated over all of the large (ie width 64) k blocks and
+ // accumulated results in registers. At this point thread (x, y, z) contains
+ // the sum across all big k blocks of the product of little k block of index (x, y)
+ // with block of index (y, z). To compute the final output, we need to reduce
+ // the 8 threads over y by summation.
+#define shuffleInc(i, j, mask) res(i, j) += __shfl_xor(res(i, j), mask)
+
+#define reduceRow(i, mask) \
+ shuffleInc(i, 0, mask); \
+ shuffleInc(i, 1, mask); \
+ shuffleInc(i, 2, mask); \
+ shuffleInc(i, 3, mask); \
+ shuffleInc(i, 4, mask); \
+ shuffleInc(i, 5, mask); \
+ shuffleInc(i, 6, mask); \
+ shuffleInc(i, 7, mask); \
+
+#define reduceMatrix(mask) \
+ reduceRow(0, mask); \
+ reduceRow(1, mask); \
+ reduceRow(2, mask); \
+ reduceRow(3, mask); \
+ reduceRow(4, mask); \
+ reduceRow(5, mask); \
+ reduceRow(6, mask); \
+ reduceRow(7, mask); \
+
+ // actually perform the reduction, now each thread of index (_, y, z)
+ // contains the correct values in its registers that belong in the output
+ // block
+ reduceMatrix(1);
+ reduceMatrix(2);
+ reduceMatrix(4);
+
+#undef shuffleInc
+#undef reduceRow
+#undef reduceMatrix
+
+ // now we need to copy the 64 values into main memory. We can't split work
+ // among threads because all variables are in registers. There's 2 ways
+ // to do this:
+ // (1) have 1 thread do 64 writes from registers into global memory
+ // (2) have 1 thread do 64 writes into shared memory, and then 8 threads
+ // each do 8 writes into global memory. We can just overwrite the shared
+ // memory from the problem we just solved.
+ // (2) is slightly faster than (1) due to less branching and more ILP
+
+ // TODO: won't yield much gain, but could just use currently unused shared mem
+ // and then we won't have to sync
+ // wait for shared mem to be out of use
+ __syncthreads();
+
+#define writeResultShmem(i, j) \
+ lhs_shmem[i + 8 * threadIdx.y + 64 * threadIdx.z + 512 * j] = res(i, j); \
+
+#define writeRow(i) \
+ writeResultShmem(i, 0); \
+ writeResultShmem(i, 1); \
+ writeResultShmem(i, 2); \
+ writeResultShmem(i, 3); \
+ writeResultShmem(i, 4); \
+ writeResultShmem(i, 5); \
+ writeResultShmem(i, 6); \
+ writeResultShmem(i, 7); \
+
+ if (threadIdx.x == 0) {
+ writeRow(0);
+ writeRow(1);
+ writeRow(2);
+ writeRow(3);
+ writeRow(4);
+ writeRow(5);
+ writeRow(6);
+ writeRow(7);
+ }
+#undef writeResultShmem
+#undef writeRow
+
+ const int max_i_write = numext::mini((int)((m_size - base_m - threadIdx.y + 7) / 8), 8);
+ const int max_j_write = numext::mini((int)((n_size - base_n - threadIdx.z + 7) / 8), 8);
+
+ if (threadIdx.x < max_i_write) {
+ if (max_j_write == 8) {
+ // TODO: can i trade bank conflicts for coalesced writes?
+ Scalar val0 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 0];
+ Scalar val1 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 1];
+ Scalar val2 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 2];
+ Scalar val3 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 3];
+ Scalar val4 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 4];
+ Scalar val5 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 5];
+ Scalar val6 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 6];
+ Scalar val7 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 7];
+
+ output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 0) = val0;
+ output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 1) = val1;
+ output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 2) = val2;
+ output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 3) = val3;
+ output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 4) = val4;
+ output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 5) = val5;
+ output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 6) = val6;
+ output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 7) = val7;
+ } else {
+#pragma unroll 7
+ for (int j = 0; j < max_j_write; j++) {
+ Scalar val = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * j];
+ output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * j) = val;
+ }
+ }
+ }
+#undef res
+}
+
+
+template<typename Scalar, typename Index, typename LhsMapper,
+ typename RhsMapper, typename OutputMapper>
+__global__ void
+__launch_bounds__(512)
+EigenContractionKernel(const LhsMapper lhs, const RhsMapper rhs,
+ const OutputMapper output,
+ const Index m_size, const Index n_size, const Index k_size) {
+ __shared__ Scalar lhs_shmem[72 * 64];
+ __shared__ Scalar rhs_shmem[72 * 64];
+
+ const Index m_block_idx = blockIdx.x;
+ const Index n_block_idx = blockIdx.y;
+
+ const Index base_m = 64 * m_block_idx;
+ const Index base_n = 64 * n_block_idx;
+
+ if (base_m + 63 < m_size && base_n + 63 < n_size) {
+ EigenContractionKernelInternal<Scalar, Index, LhsMapper, RhsMapper, OutputMapper, false>(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size);
+ } else {
+ EigenContractionKernelInternal<Scalar, Index, LhsMapper, RhsMapper, OutputMapper, true>(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size);
+ }
+}
+
+
+template<typename Index, typename LhsMapper,
+ typename RhsMapper, typename OutputMapper, bool CHECK_LHS_BOUNDARY,
+ bool CHECK_RHS_BOUNDARY>
+__device__ EIGEN_STRONG_INLINE void
+EigenFloatContractionKernelInternal16x16(const LhsMapper lhs, const RhsMapper rhs,
+ const OutputMapper output, float2 lhs_shmem2[][16],
+ float2 rhs_shmem2[][8], const Index m_size,
+ const Index n_size, const Index k_size,
+ const Index base_m, const Index base_n) {
+ typedef float Scalar;
+
+ // prefetch registers
+ float4 lhs_pf0, rhs_pf0;
+
+ float4 results[4];
+ for (int i=0; i < 4; i++) {
+ results[i].x = results[i].y = results[i].z = results[i].w = 0;
+ }
+
+
+#define prefetch_lhs(reg, row, col) \
+ if (!CHECK_LHS_BOUNDARY) { \
+ if (col < k_size) { \
+ reg =lhs.loadPacket<Unaligned>(row, col); \
+ } \
+ } else { \
+ if (col < k_size) { \
+ if (row + 3 < m_size) { \
+ reg =lhs.loadPacket<Unaligned>(row, col); \
+ } else if (row + 2 < m_size) { \
+ reg.x =lhs(row + 0, col); \
+ reg.y =lhs(row + 1, col); \
+ reg.z =lhs(row + 2, col); \
+ } else if (row + 1 < m_size) { \
+ reg.x =lhs(row + 0, col); \
+ reg.y =lhs(row + 1, col); \
+ } else if (row < m_size) { \
+ reg.x =lhs(row + 0, col); \
+ } \
+ } \
+ } \
+
+
+ Index lhs_vert = base_m+threadIdx.x*4;
+
+ for (Index k = 0; k < k_size; k += 16) {
+ lhs_pf0 = internal::pset1<float4>(0);
+ rhs_pf0 = internal::pset1<float4>(0);
+
+ Index lhs_horiz = threadIdx.y+k;
+ prefetch_lhs(lhs_pf0, lhs_vert, lhs_horiz)
+
+ Index rhs_vert = k+(threadIdx.x%4)*4;
+ Index rhs_horiz0 = (threadIdx.x>>2)+threadIdx.y*4+base_n;
+
+ if (!CHECK_RHS_BOUNDARY) {
+ if ((rhs_vert + 3) < k_size) {
+ // just CHECK_RHS_BOUNDARY
+ rhs_pf0 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
+ } else if (rhs_vert + 2 < k_size) {
+ // just CHECK_RHS_BOUNDARY
+ rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
+ rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
+ rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0);
+ } else if (rhs_vert + 1 < k_size) {
+ rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
+ rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
+ } else if (rhs_vert < k_size) {
+ rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
+ }
+ } else {
+ if (rhs_horiz0 < n_size) {
+ if ((rhs_vert + 3) < k_size) {
+ rhs_pf0 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
+ } else if ((rhs_vert + 2) < k_size) {
+ rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
+ rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
+ rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0);
+ } else if ((rhs_vert + 1) < k_size) {
+ rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
+ rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
+ } else if (rhs_vert < k_size) {
+ rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
+ }
+ }
+ }
+ float x1, x2 ;
+ // the following can be a bitwise operation..... some day.
+ if((threadIdx.x%8) < 4) {
+ x1 = rhs_pf0.y;
+ x2 = rhs_pf0.w;
+ } else {
+ x1 = rhs_pf0.x;
+ x2 = rhs_pf0.z;
+ }
+ x1 = __shfl_xor(x1, 4);
+ x2 = __shfl_xor(x2, 4);
+ if((threadIdx.x%8) < 4) {
+ rhs_pf0.y = x1;
+ rhs_pf0.w = x2;
+ } else {
+ rhs_pf0.x = x1;
+ rhs_pf0.z = x2;
+ }
+
+ // We have 64 features.
+ // Row 0 -> times (0, 4, 8, 12, 1, 5, 9, 13) for features 0, 1.
+ // Row 1 -> times (0, 4, 8, 12, 1, 5, 9, 13) for features 2, 3.
+ // ...
+ // Row 31 -> times (0, 4, 8, 12, 1, 5, 9, 13) for features 62, 63
+ // Row 32 -> times (2, 6, 10, 14, 3, 7, 11, 15) for features 0, 1
+ // ...
+ rhs_shmem2[(threadIdx.x>>3)+ threadIdx.y*2][threadIdx.x%8] = make_float2(rhs_pf0.x, rhs_pf0.y);
+ rhs_shmem2[(threadIdx.x>>3)+ threadIdx.y*2+32][threadIdx.x%8] = make_float2(rhs_pf0.z, rhs_pf0.w);
+
+ // Row 0 (time 0) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61)
+ // Row 1 (time 1) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61)
+ // ...
+ // Row 15 (time 15) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61)
+ // Row 16 (time 0) -> features (2, 3), (6, 7), .. (30, 31), (34, 35), .. (62, 63)
+ // ...
+
+ lhs_shmem2[threadIdx.y][threadIdx.x] = make_float2(lhs_pf0.x, lhs_pf0.y);
+ lhs_shmem2[threadIdx.y+16][threadIdx.x] = make_float2(lhs_pf0.z, lhs_pf0.w);
+
+
+#define add_vals(fl1, fl2, fr1, fr2)\
+ results[0].x += fl1.x * fr1.x;\
+ results[0].y += fl1.y * fr1.x;\
+ results[0].z += fl2.x * fr1.x;\
+ results[0].w += fl2.y * fr1.x;\
+\
+ results[1].x += fl1.x * fr1.y;\
+ results[1].y += fl1.y * fr1.y;\
+ results[1].z += fl2.x * fr1.y;\
+ results[1].w += fl2.y * fr1.y;\
+\
+ results[2].x += fl1.x * fr2.x;\
+ results[2].y += fl1.y * fr2.x;\
+ results[2].z += fl2.x * fr2.x;\
+ results[2].w += fl2.y * fr2.x;\
+\
+ results[3].x += fl1.x * fr2.y;\
+ results[3].y += fl1.y * fr2.y;\
+ results[3].z += fl2.x * fr2.y;\
+ results[3].w += fl2.y * fr2.y;\
+
+ __syncthreads();
+
+ // Do the multiplies.
+ #pragma unroll
+ for (int koff = 0; koff < 16; koff ++) {
+ // 32 x threads.
+ float2 fl1 = lhs_shmem2[koff][threadIdx.x];
+ float2 fl2 = lhs_shmem2[koff + 16][threadIdx.x];
+
+ int start_feature = threadIdx.y * 4;
+ float2 fr1 = rhs_shmem2[(start_feature>>1) + 32*((koff%4)/2)][koff/4 + (koff%2)*4];
+ float2 fr2 = rhs_shmem2[(start_feature>>1) + 1 + 32*((koff%4)/2)][koff/4 + (koff%2)*4];
+
+ add_vals(fl1, fl2, fr1, fr2)
+ }
+ __syncthreads();
+ }
+
+#undef prefetch_lhs
+#undef add_vals
+
+ Index horiz_base = threadIdx.y*4+base_n;
+ if (!CHECK_LHS_BOUNDARY && !CHECK_RHS_BOUNDARY) {
+ for (int i = 0; i < 4; i++) {
+ output(lhs_vert, horiz_base + i) = results[i].x;
+ output(lhs_vert + 1, horiz_base + i) = results[i].y;
+ output(lhs_vert + 2, horiz_base + i) = results[i].z;
+ output(lhs_vert + 3, horiz_base + i) = results[i].w;
+ }
+ } else if (!CHECK_RHS_BOUNDARY) {
+ // CHECK LHS
+ if (lhs_vert + 3 < m_size) {
+ for (int i = 0; i < 4; i++) {
+ output(lhs_vert, horiz_base + i) = results[i].x;
+ output(lhs_vert + 1, horiz_base + i) = results[i].y;
+ output(lhs_vert + 2, horiz_base + i) = results[i].z;
+ output(lhs_vert + 3, horiz_base + i) = results[i].w;
+ }
+ } else if (lhs_vert + 2 < m_size) {
+ for (int i = 0; i < 4; i++) {
+ output(lhs_vert, horiz_base + i) = results[i].x;
+ output(lhs_vert + 1, horiz_base + i) = results[i].y;
+ output(lhs_vert + 2, horiz_base + i) = results[i].z;
+ }
+ } else if (lhs_vert + 1 < m_size) {
+ for (int i = 0; i < 4; i++) {
+ output(lhs_vert, horiz_base + i) = results[i].x;
+ output(lhs_vert + 1, horiz_base + i) = results[i].y;
+ }
+ } else if (lhs_vert < m_size) {
+ for (int i = 0; i < 4; i++) {
+ output(lhs_vert, horiz_base + i) = results[i].x;
+ }
+ }
+ } else if (!CHECK_LHS_BOUNDARY) {
+ // CHECK RHS
+ /*
+ int ncols_rem = fminf(n_size- horiz_base, 4);
+ for (int i = 0; i < ncols_rem; i++) {
+ output(lhs_vert, horiz_base + i) = results[i].x;
+ output(lhs_vert + 1, horiz_base + i) = results[i].y;
+ output(lhs_vert + 2, horiz_base + i) = results[i].z;
+ output(lhs_vert + 3, horiz_base + i) = results[i].w;
+ }*/
+ for (int i = 0; i < 4; i++) {
+ if (horiz_base+i < n_size) {
+ output(lhs_vert, horiz_base + i) = results[i].x;
+ output(lhs_vert + 1, horiz_base + i) = results[i].y;
+ output(lhs_vert + 2, horiz_base + i) = results[i].z;
+ output(lhs_vert + 3, horiz_base + i) = results[i].w;
+ }
+ }
+ } else {
+ // CHECK both boundaries.
+ for (int i = 0; i < 4; i++) {
+ if (horiz_base+i < n_size) {
+ if (lhs_vert < m_size)
+ output(lhs_vert, horiz_base + i) = results[i].x;
+ if (lhs_vert + 1 < m_size)
+ output(lhs_vert + 1, horiz_base + i) = results[i].y;
+ if (lhs_vert + 2 < m_size)
+ output(lhs_vert + 2, horiz_base + i) = results[i].z;
+ if (lhs_vert + 3 < m_size)
+ output(lhs_vert + 3, horiz_base + i) = results[i].w;
+ }
+ }
+ }
+}
+
+
+template<typename Index, typename LhsMapper,
+ typename RhsMapper, typename OutputMapper, bool CHECK_LHS_BOUNDARY,
+ bool CHECK_RHS_BOUNDARY>
+__device__ EIGEN_STRONG_INLINE void
+EigenFloatContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs,
+ const OutputMapper output, float2 lhs_shmem2[][32],
+ float2 rhs_shmem2[][8], const Index m_size,
+ const Index n_size, const Index k_size,
+ const Index base_m, const Index base_n) {
+ typedef float Scalar;
+
+ // prefetch registers
+ float4 lhs_pf0, lhs_pf1, lhs_pf2, lhs_pf3;
+ float4 rhs_pf0, rhs_pf1;
+
+ float4 results[8];
+ for (int i=0; i < 8; i++) {
+ results[i].x = results[i].y = results[i].z = results[i].w = 0;
+ }
+
+
+ Index lhs_vert = base_m+threadIdx.x*4+(threadIdx.y%4)*32;
+ for (Index k = 0; k < k_size; k += 32) {
+ lhs_pf0 = internal::pset1<float4>(0);
+ lhs_pf1 = internal::pset1<float4>(0);
+ lhs_pf2 = internal::pset1<float4>(0);
+ lhs_pf3 = internal::pset1<float4>(0);
+
+ rhs_pf0 = internal::pset1<float4>(0);
+ rhs_pf1 = internal::pset1<float4>(0);
+
+ if (!CHECK_LHS_BOUNDARY) {
+ if ((threadIdx.y/4+k+24) < k_size) {
+ lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
+ lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
+ lhs_pf2 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
+ lhs_pf3 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+24));
+ } else if ((threadIdx.y/4+k+16) < k_size) {
+ lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
+ lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
+ lhs_pf2 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
+ } else if ((threadIdx.y/4+k+8) < k_size) {
+ lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
+ lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
+ } else if ((threadIdx.y/4+k) < k_size) {
+ lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
+ }
+ } else {
+ // just CHECK_LHS_BOUNDARY
+ if (lhs_vert + 3 < m_size) {
+ if ((threadIdx.y/4+k+24) < k_size) {
+ lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
+ lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
+ lhs_pf2 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
+ lhs_pf3 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+24));
+ } else if ((threadIdx.y/4+k+16) < k_size) {
+ lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
+ lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
+ lhs_pf2 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
+ } else if ((threadIdx.y/4+k+8) < k_size) {
+ lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
+ lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
+ } else if ((threadIdx.y/4+k) < k_size) {
+ lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
+ }
+ } else if (lhs_vert + 2 < m_size) {
+ if ((threadIdx.y/4+k+24) < k_size) {
+ lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
+ lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
+ lhs_pf0.z =lhs(lhs_vert + 2, (threadIdx.y/4+k));
+ lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
+ lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8));
+ lhs_pf1.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+8));
+ lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16));
+ lhs_pf2.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+16));
+ lhs_pf2.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+16));
+ lhs_pf3.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+24));
+ lhs_pf3.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+24));
+ lhs_pf3.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+24));
+ } else if ((threadIdx.y/4+k+16) < k_size) {
+ lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
+ lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
+ lhs_pf0.z =lhs(lhs_vert + 2, (threadIdx.y/4+k));
+ lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
+ lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8));
+ lhs_pf1.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+8));
+ lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16));
+ lhs_pf2.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+16));
+ lhs_pf2.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+16));
+ } else if ((threadIdx.y/4+k+8) < k_size) {
+ lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
+ lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
+ lhs_pf0.z =lhs(lhs_vert + 2, (threadIdx.y/4+k));
+ lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
+ lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8));
+ lhs_pf1.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+8));
+ } else if ((threadIdx.y/4+k) < k_size) {
+ lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
+ lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
+ lhs_pf0.z =lhs(lhs_vert + 2, (threadIdx.y/4+k));
+ }
+ } else if (lhs_vert + 1 < m_size) {
+ if ((threadIdx.y/4+k+24) < k_size) {
+ lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
+ lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
+ lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
+ lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8));
+ lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16));
+ lhs_pf2.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+16));
+ lhs_pf3.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+24));
+ lhs_pf3.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+24));
+ } else if ((threadIdx.y/4+k+16) < k_size) {
+ lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
+ lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
+ lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
+ lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8));
+ lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16));
+ lhs_pf2.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+16));
+ } else if ((threadIdx.y/4+k+8) < k_size) {
+ lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
+ lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
+ lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
+ lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8));
+ } else if ((threadIdx.y/4+k) < k_size) {
+ lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
+ lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
+ }
+ } else if (lhs_vert < m_size) {
+ if ((threadIdx.y/4+k+24) < k_size) {
+ lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
+ lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
+ lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16));
+ lhs_pf3.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+24));
+ } else if ((threadIdx.y/4+k+16) < k_size) {
+ lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
+ lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
+ lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16));
+ } else if ((threadIdx.y/4+k+8) < k_size) {
+ lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
+ lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
+ } else if ((threadIdx.y/4+k) < k_size) {
+ lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
+ }
+ }
+ }
+ __syncthreads();
+ Index rhs_vert = k+threadIdx.x*4;
+ Index rhs_horiz0 = threadIdx.y*2+base_n;
+ Index rhs_horiz1 = threadIdx.y*2+1+base_n;
+ if (!CHECK_RHS_BOUNDARY) {
+ if ((rhs_vert + 3) < k_size) {
+ // just CHECK_RHS_BOUNDARY
+ rhs_pf0 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
+ rhs_pf1 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz1);
+ } else if (rhs_vert + 2 < k_size) {
+ // just CHECK_RHS_BOUNDARY
+ rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
+ rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
+ rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0);
+ rhs_pf1.x = rhs(rhs_vert, rhs_horiz1);
+ rhs_pf1.y = rhs(rhs_vert + 1, rhs_horiz1);
+ rhs_pf1.z = rhs(rhs_vert + 2, rhs_horiz1);
+ } else if (rhs_vert + 1 < k_size) {
+ rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
+ rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
+ rhs_pf1.x = rhs(rhs_vert, rhs_horiz1);
+ rhs_pf1.y = rhs(rhs_vert + 1, rhs_horiz1);
+ } else if (rhs_vert < k_size) {
+ rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
+ rhs_pf1.x = rhs(rhs_vert, rhs_horiz1);
+ }
+ } else {
+ if (rhs_horiz1 < n_size) {
+ if ((rhs_vert + 3) < k_size) {
+ // just CHECK_RHS_BOUNDARY
+ rhs_pf0 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
+ rhs_pf1 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz1);
+ } else if (rhs_vert + 2 < k_size) {
+ // just CHECK_RHS_BOUNDARY
+ rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
+ rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
+ rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0);
+ rhs_pf1.x = rhs(rhs_vert, rhs_horiz1);
+ rhs_pf1.y = rhs(rhs_vert + 1, rhs_horiz1);
+ rhs_pf1.z = rhs(rhs_vert + 2, rhs_horiz1);
+ } else if (k+threadIdx.x*4 + 1 < k_size) {
+ rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
+ rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
+ rhs_pf1.x = rhs(rhs_vert, rhs_horiz1);
+ rhs_pf1.y = rhs(rhs_vert + 1, rhs_horiz1);
+ } else if (k+threadIdx.x*4 < k_size) {
+ rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
+ rhs_pf1.x = rhs(rhs_vert, rhs_horiz1);
+ }
+ } else if (rhs_horiz0 < n_size) {
+ if ((rhs_vert + 3) < k_size) {
+ // just CHECK_RHS_BOUNDARY
+ rhs_pf0 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
+ } else if ((rhs_vert + 2) < k_size) {
+ // just CHECK_RHS_BOUNDARY
+ rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
+ rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
+ rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0);
+ } else if ((rhs_vert + 1) < k_size) {
+ rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
+ rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
+ } else if (rhs_vert < k_size) {
+ rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
+ }
+ }
+ }
+ __syncthreads();
+ // Loaded. Do computation
+ // Row 0 -> times (0, 4, 8, .. 28) for features 0, 1.
+ // Row 1 -> times (0, 4, 8, .. 28) for features 2, 3.
+ // ..
+ // Row 31 -> times (0, 4, 8, .. 28) for features 62, 63
+ rhs_shmem2[threadIdx.y][threadIdx.x] = make_float2(rhs_pf0.x, rhs_pf1.x);
+ // Row 32 -> times (1, 5, 9, .. 29) for features 0, 1.
+ // Row 33 -> times (1, 5, 9, .. 29) for features 2, 3.
+ // ..
+ rhs_shmem2[threadIdx.y+32][threadIdx.x] = make_float2(rhs_pf0.y, rhs_pf1.y);
+ // Row 64 -> times (2, 6, 10, .. 30) for features 0, 1.
+ // Row 65 -> times (2, 6, 10, .. 30) for features 2, 3.
+ rhs_shmem2[threadIdx.y+64][threadIdx.x] = make_float2(rhs_pf0.z, rhs_pf1.z);
+ // Row 96 -> times (3, 7, 11, .. 31) for features 0, 1.
+ // Row 97 -> times (3, 7, 11, .. 31) for features 2, 3.
+ rhs_shmem2[threadIdx.y+96][threadIdx.x] = make_float2(rhs_pf0.w, rhs_pf1.w);
+
+ // LHS.
+ // Row 0 (time 0) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61) .. (124, 125)
+ // Row 1 (time 1) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61) .. (124, 125)
+ // ...
+ // Row 8 (time 0) -> features (2, 3), (6, 7), .. (30, 31), (34, 35), .. (62, 63) .. (126, 127)
+ // Row 15 (time 7) -> features (2, 3), (6, 7), .. (30, 31), (34, 35), .. (62, 63) .. (126, 127)
+
+
+#define add_vals(a_feat1, a_feat2, f1, f2, f3, f4)\
+ results[0].x += a_feat1.x * f1.x;\
+ results[1].x += a_feat1.x * f1.y;\
+ results[2].x += a_feat1.x * f2.x;\
+ results[3].x += a_feat1.x * f2.y;\
+ results[4].x += a_feat1.x * f3.x;\
+ results[5].x += a_feat1.x * f3.y;\
+ results[6].x += a_feat1.x * f4.x;\
+ results[7].x += a_feat1.x * f4.y;\
+\
+ results[0].y += a_feat1.y * f1.x;\
+ results[1].y += a_feat1.y * f1.y;\
+ results[2].y += a_feat1.y * f2.x;\
+ results[3].y += a_feat1.y * f2.y;\
+ results[4].y += a_feat1.y * f3.x;\
+ results[5].y += a_feat1.y * f3.y;\
+ results[6].y += a_feat1.y * f4.x;\
+ results[7].y += a_feat1.y * f4.y;\
+\
+ results[0].z += a_feat2.x * f1.x;\
+ results[1].z += a_feat2.x * f1.y;\
+ results[2].z += a_feat2.x * f2.x;\
+ results[3].z += a_feat2.x * f2.y;\
+ results[4].z += a_feat2.x * f3.x;\
+ results[5].z += a_feat2.x * f3.y;\
+ results[6].z += a_feat2.x * f4.x;\
+ results[7].z += a_feat2.x * f4.y;\
+\
+ results[0].w += a_feat2.y * f1.x;\
+ results[1].w += a_feat2.y * f1.y;\
+ results[2].w += a_feat2.y * f2.x;\
+ results[3].w += a_feat2.y * f2.y;\
+ results[4].w += a_feat2.y * f3.x;\
+ results[5].w += a_feat2.y * f3.y;\
+ results[6].w += a_feat2.y * f4.x;\
+ results[7].w += a_feat2.y * f4.y;\
+
+ lhs_shmem2[threadIdx.y/4][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf0.x, lhs_pf0.y);
+ lhs_shmem2[threadIdx.y/4+8][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf1.x, lhs_pf1.y);
+ lhs_shmem2[threadIdx.y/4+16][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf2.x, lhs_pf2.y);
+ lhs_shmem2[threadIdx.y/4+24][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf3.x, lhs_pf3.y);
+
+ lhs_shmem2[threadIdx.y/4 + 32][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf0.z, lhs_pf0.w);
+ lhs_shmem2[threadIdx.y/4 + 40][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf1.z, lhs_pf1.w);
+ lhs_shmem2[threadIdx.y/4 + 48][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf2.z, lhs_pf2.w);
+ lhs_shmem2[threadIdx.y/4 + 56][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf3.z, lhs_pf3.w);
+
+ __syncthreads();
+
+ // Do the multiplies.
+ #pragma unroll
+ for (int koff = 0; koff < 32; koff ++) {
+ float2 a3 = lhs_shmem2[koff][threadIdx.x + (threadIdx.y % 4) * 8];
+ float2 a4 = lhs_shmem2[koff + 32][threadIdx.x + (threadIdx.y % 4) * 8];
+
+ // first feature is at (threadIdx.y/4) * 8 last is at start + 8.
+ int start_feature = (threadIdx.y / 4) * 8;
+
+ float2 br1 = rhs_shmem2[start_feature/2 + (koff % 4) * 32][koff/4];
+ float2 br2 = rhs_shmem2[start_feature/2 + 1 + (koff % 4) * 32][koff/4];
+ float2 br3 = rhs_shmem2[start_feature/2 + 2 + (koff % 4) * 32][koff/4];
+ float2 br4 = rhs_shmem2[start_feature/2 + 3 + (koff % 4) * 32][koff/4];
+
+ add_vals(a3, a4, br1, br2, br3, br4)
+ }
+ __syncthreads();
+ } // end loop over k
+
+
+ __syncthreads();
+ Index horiz_base = (threadIdx.y/4)*8+base_n;
+ if (!CHECK_LHS_BOUNDARY && !CHECK_RHS_BOUNDARY) {
+ for (int i = 0; i < 8; i++) {
+ output(lhs_vert, horiz_base + i) = results[i].x;
+ output(lhs_vert + 1, horiz_base + i) = results[i].y;
+ output(lhs_vert + 2, horiz_base + i) = results[i].z;
+ output(lhs_vert + 3, horiz_base + i) = results[i].w;
+ }
+ } else if (!CHECK_RHS_BOUNDARY) {
+ if (lhs_vert + 3 < m_size) {
+ for (int i = 0; i < 8; i++) {
+ output(lhs_vert, horiz_base + i) = results[i].x;
+ output(lhs_vert + 1, horiz_base + i) = results[i].y;
+ output(lhs_vert + 2, horiz_base + i) = results[i].z;
+ output(lhs_vert + 3, horiz_base + i) = results[i].w;
+ }
+ } else if (lhs_vert + 2 < m_size) {
+ for (int i = 0; i < 8; i++) {
+ output(lhs_vert, horiz_base + i) = results[i].x;
+ output(lhs_vert + 1, horiz_base + i) = results[i].y;
+ output(lhs_vert + 2, horiz_base + i) = results[i].z;
+ }
+ } else if (lhs_vert + 1 < m_size) {
+ for (int i = 0; i < 8; i++) {
+ output(lhs_vert, horiz_base + i) = results[i].x;
+ output(lhs_vert + 1, horiz_base + i) = results[i].y;
+ }
+ } else if (lhs_vert < m_size) {
+ for (int i = 0; i < 8; i++) {
+ output(lhs_vert, horiz_base + i) = results[i].x;
+ }
+ }
+ } else if (!CHECK_LHS_BOUNDARY) {
+ // CHECK BOUNDARY_B
+ for (int i = 0; i < 8; i++) {
+ if (horiz_base + i < n_size) {
+ output(lhs_vert, horiz_base + i) = results[i].x;
+ output(lhs_vert + 1, horiz_base + i) = results[i].y;
+ output(lhs_vert + 2, horiz_base + i) = results[i].z;
+ output(lhs_vert + 3, horiz_base + i) = results[i].w;
+ }
+ }
+ } else {
+ // CHECK both boundaries.
+ for (int i = 0; i < 8; i++) {
+ if (horiz_base + i < n_size) {
+ if (lhs_vert < m_size)
+ output(lhs_vert, horiz_base + i) = results[i].x;
+ if (lhs_vert + 1 < m_size)
+ output(lhs_vert + 1, horiz_base + i) = results[i].y;
+ if (lhs_vert + 2 < m_size)
+ output(lhs_vert + 2, horiz_base + i) = results[i].z;
+ if (lhs_vert + 3 < m_size)
+ output(lhs_vert + 3, horiz_base + i) = results[i].w;
+ }
+ }
+ }
+}
+
+
+template<typename Index, typename LhsMapper,
+ typename RhsMapper, typename OutputMapper>
+__global__ void
+__launch_bounds__(256)
+EigenFloatContractionKernel(const LhsMapper lhs, const RhsMapper rhs,
+ const OutputMapper output,
+ const Index m_size, const Index n_size, const Index k_size) {
+ __shared__ float2 lhs_shmem[64*32];
+ __shared__ float2 rhs_shmem[128*8];
+
+ typedef float2 LHS_MEM[64][32];
+ typedef float2 RHS_MEM[128][8];
+
+ typedef float2 LHS_MEM16x16[32][16];
+ typedef float2 RHS_MEM16x16[64][8];
+
+ const Index m_block_idx = blockIdx.x;
+ const Index n_block_idx = blockIdx.y;
+
+ const Index base_m = 128 * m_block_idx;
+ const Index base_n = 64 * n_block_idx;
+
+ bool check_rhs = (base_n + 63) >= n_size;
+ bool check_lhs128 = (base_m + 127) >= m_size;
+
+ if (!check_rhs) {
+ if (!check_lhs128) {
+ // >= 128 rows left
+ EigenFloatContractionKernelInternal<Index, LhsMapper, RhsMapper, OutputMapper, false, false>(
+ lhs, rhs, output, *((LHS_MEM *) lhs_shmem), *((RHS_MEM *) rhs_shmem), m_size, n_size, k_size, base_m, base_n);
+ } else {
+ EigenFloatContractionKernelInternal<Index, LhsMapper, RhsMapper, OutputMapper, true, false>(
+ lhs, rhs, output, *((LHS_MEM *) lhs_shmem), *((RHS_MEM *) rhs_shmem), m_size, n_size, k_size, base_m, base_n);
+ }
+ } else {
+ if (!check_lhs128) {
+ // >= 128 rows left
+ EigenFloatContractionKernelInternal<Index, LhsMapper, RhsMapper, OutputMapper, false, true>(
+ lhs, rhs, output, *((LHS_MEM *) lhs_shmem), *((RHS_MEM *) rhs_shmem), m_size, n_size, k_size, base_m, base_n);
+ } else {
+ EigenFloatContractionKernelInternal<Index, LhsMapper, RhsMapper, OutputMapper, true, true>(
+ lhs, rhs, output, *((LHS_MEM *) lhs_shmem), *((RHS_MEM *) rhs_shmem), m_size, n_size, k_size, base_m, base_n);
+ }
+ }
+}
+
+template<typename Index, typename LhsMapper,
+ typename RhsMapper, typename OutputMapper>
+__global__ void
+__launch_bounds__(256)
+EigenFloatContractionKernel16x16(const LhsMapper lhs, const RhsMapper rhs,
+ const OutputMapper output,
+ const Index m_size, const Index n_size, const Index k_size) {
+ __shared__ float2 lhs_shmem[32][16];
+ __shared__ float2 rhs_shmem[64][8];
+
+ const Index m_block_idx = blockIdx.x;
+ const Index n_block_idx = blockIdx.y;
+
+ const Index base_m = 64 * m_block_idx;
+ const Index base_n = 64 * n_block_idx;
+
+ if (base_m + 63 < m_size) {
+ if (base_n + 63 < n_size) {
+ EigenFloatContractionKernelInternal16x16<Index, LhsMapper, RhsMapper, OutputMapper, false, false>(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size, base_m, base_n);
+ } else {
+ EigenFloatContractionKernelInternal16x16<Index, LhsMapper, RhsMapper, OutputMapper, false, true>(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size, base_m, base_n);
+ }
+ } else {
+ if (base_n + 63 < n_size) {
+ EigenFloatContractionKernelInternal16x16<Index, LhsMapper, RhsMapper, OutputMapper, true, false>(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size, base_m, base_n);
+ } else {
+ EigenFloatContractionKernelInternal16x16<Index, LhsMapper, RhsMapper, OutputMapper, true, true>(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size, base_m, base_n);
+ }
+ }
+}
+
+
+template<typename Indices, typename LeftArgType, typename RightArgType>
+struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType>, GpuDevice> :
+ public TensorContractionEvaluatorBase<TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType>, GpuDevice> > {
+
+ typedef GpuDevice Device;
+
+ typedef TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType>, Device> Self;
+ typedef TensorContractionEvaluatorBase<Self> Base;
+
+ typedef TensorContractionOp<Indices, LeftArgType, RightArgType> XprType;
+ typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar;
+ typedef typename XprType::Index Index;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, GpuDevice>::type PacketReturnType;
+
+ enum {
+ Layout = TensorEvaluator<LeftArgType, Device>::Layout,
+ };
+
+ // Most of the code is assuming that both input tensors are ColMajor. If the
+ // inputs are RowMajor, we will "cheat" by swapping the LHS and RHS:
+ // If we want to compute A * B = C, where A is LHS and B is RHS, the code
+ // will pretend B is LHS and A is RHS.
+ typedef typename internal::conditional<
+ static_cast<int>(Layout) == static_cast<int>(ColMajor), LeftArgType, RightArgType>::type EvalLeftArgType;
+ typedef typename internal::conditional<
+ static_cast<int>(Layout) == static_cast<int>(ColMajor), RightArgType, LeftArgType>::type EvalRightArgType;
+
+ static const int LDims =
+ internal::array_size<typename TensorEvaluator<EvalLeftArgType, Device>::Dimensions>::value;
+ static const int RDims =
+ internal::array_size<typename TensorEvaluator<EvalRightArgType, Device>::Dimensions>::value;
+ static const int ContractDims = internal::array_size<Indices>::value;
+
+ typedef array<Index, LDims> left_dim_mapper_t;
+ typedef array<Index, RDims> right_dim_mapper_t;
+
+ typedef array<Index, ContractDims> contract_t;
+ typedef array<Index, LDims - ContractDims> left_nocontract_t;
+ typedef array<Index, RDims - ContractDims> right_nocontract_t;
+
+ static const int NumDims = LDims + RDims - 2 * ContractDims;
+
+ typedef DSizes<Index, NumDims> Dimensions;
+
+ // typedefs needed in evalTo
+ typedef typename internal::remove_const<typename EvalLeftArgType::Scalar>::type LhsScalar;
+ typedef typename internal::remove_const<typename EvalRightArgType::Scalar>::type RhsScalar;
+
+ typedef TensorEvaluator<EvalLeftArgType, Device> LeftEvaluator;
+ typedef TensorEvaluator<EvalRightArgType, Device> RightEvaluator;
+
+ typedef typename LeftEvaluator::Dimensions LeftDimensions;
+ typedef typename RightEvaluator::Dimensions RightDimensions;
+
+ EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device) :
+ Base(op, device) {}
+
+ // We need to redefine this method to make nvcc happy
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data) {
+ this->m_leftImpl.evalSubExprsIfNeeded(NULL);
+ this->m_rightImpl.evalSubExprsIfNeeded(NULL);
+ if (data) {
+ evalTo(data);
+ return false;
+ } else {
+ this->m_result = static_cast<Scalar *>(this->m_device.allocate(this->dimensions().TotalSize() * sizeof(Scalar)));
+ evalTo(this->m_result);
+ return true;
+ }
+ }
+
+ void evalTo(Scalar* buffer) const {
+ if (this->m_lhs_inner_dim_contiguous) {
+ if (this->m_rhs_inner_dim_contiguous) {
+ if (this->m_rhs_inner_dim_reordered) {
+ evalTyped<true, true, true, Unaligned>(buffer);
+ }
+ else {
+ evalTyped<true, true, false, Unaligned>(buffer);
+ }
+ }
+ else {
+ if (this->m_rhs_inner_dim_reordered) {
+ evalTyped<true, false, true, Unaligned>(buffer);
+ }
+ else {
+ evalTyped<true, false, false, Unaligned>(buffer);
+ }
+ }
+ }
+ else {
+ if (this->m_rhs_inner_dim_contiguous) {
+ if (this->m_rhs_inner_dim_reordered) {
+ evalTyped<false, true, true, Unaligned>(buffer);
+ }
+ else {
+ evalTyped<false, true, false, Unaligned>(buffer);
+ }
+ }
+ else {
+ if (this->m_rhs_inner_dim_reordered) {
+ evalTyped<false, false, true, Unaligned>(buffer);
+ }
+ else {
+ evalTyped<false, false, false, Unaligned>(buffer);
+ }
+ }
+ }
+ }
+
+ template <typename LhsScalar, typename RhsScalar, typename Index, typename LhsMapper, typename RhsMapper, typename OutputMapper> struct LaunchKernels {
+ static void Run(const LhsMapper& lhs, const RhsMapper& rhs, const OutputMapper& output, Index m, Index n, Index k, const GpuDevice& device) {
+ const Index m_blocks = (m + 63) / 64;
+ const Index n_blocks = (n + 63) / 64;
+ const dim3 num_blocks(m_blocks, n_blocks, 1);
+ const dim3 block_size(8, 8, 8);
+ LAUNCH_CUDA_KERNEL((EigenContractionKernel<Scalar, Index, LhsMapper, RhsMapper, OutputMapper>), num_blocks, block_size, 0, device, lhs, rhs, output, m, n, k);
+ }
+ };
+
+ template <typename Index, typename LhsMapper, typename RhsMapper, typename OutputMapper> struct LaunchKernels<float, float, Index, LhsMapper, RhsMapper, OutputMapper> {
+ static void Run(const LhsMapper& lhs, const RhsMapper& rhs, const OutputMapper& output, Index m, Index n, Index k, const GpuDevice& device) {
+ if (m < 768 || n < 768) {
+ const Index m_blocks = (m + 63) / 64;
+ const Index n_blocks = (n + 63) / 64;
+ const dim3 num_blocks(m_blocks, n_blocks, 1);
+ const dim3 block_size(16, 16, 1);
+ LAUNCH_CUDA_KERNEL((EigenFloatContractionKernel16x16<Index, LhsMapper, RhsMapper, OutputMapper>), num_blocks, block_size, 0, device, lhs, rhs, output, m, n, k);
+ } else {
+ const Index m_blocks = (m + 127) / 128;
+ const Index n_blocks = (n + 63) / 64;
+ const dim3 num_blocks(m_blocks, n_blocks, 1);
+ const dim3 block_size(8, 32, 1);
+ LAUNCH_CUDA_KERNEL((EigenFloatContractionKernel<Index, LhsMapper, RhsMapper, OutputMapper>), num_blocks, block_size, 0, device, lhs, rhs, output, m, n, k);
+ }
+ }
+ };
+
+ template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment>
+ void evalTyped(Scalar* buffer) const {
+ // columns in left side, rows in right side
+ const Index k = this->m_k_size;
+ EIGEN_UNUSED_VARIABLE(k)
+
+ // rows in left side
+ const Index m = this->m_i_size;
+
+ // columns in right side
+ const Index n = this->m_j_size;
+
+ // zero out the result buffer (which must be of size at least m * n * sizeof(Scalar)
+ this->m_device.memset(buffer, 0, m * n * sizeof(Scalar));
+
+ typedef internal::TensorContractionInputMapper<LhsScalar, Index, internal::Lhs,
+ LeftEvaluator, left_nocontract_t,
+ contract_t, 4,
+ lhs_inner_dim_contiguous,
+ false, Unaligned> LhsMapper;
+
+ typedef internal::TensorContractionInputMapper<RhsScalar, Index, internal::Rhs,
+ RightEvaluator, right_nocontract_t,
+ contract_t, 4,
+ rhs_inner_dim_contiguous,
+ rhs_inner_dim_reordered, Unaligned> RhsMapper;
+
+ typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper;
+
+
+ // initialize data mappers
+ LhsMapper lhs(this->m_leftImpl, this->m_left_nocontract_strides, this->m_i_strides,
+ this->m_left_contracting_strides, this->m_k_strides);
+
+ RhsMapper rhs(this->m_rightImpl, this->m_right_nocontract_strides, this->m_j_strides,
+ this->m_right_contracting_strides, this->m_k_strides);
+
+ OutputMapper output(buffer, m);
+
+ setCudaSharedMemConfig(cudaSharedMemBankSizeEightByte);
+ LaunchKernels<LhsScalar, RhsScalar, Index, LhsMapper, RhsMapper, OutputMapper>::Run(lhs, rhs, output, m, n, k, this->m_device);
+ }
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_USE_GPU and __CUDACC__
+#endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_CUDA_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h
new file mode 100644
index 000000000..9b2cb3ff6
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h
@@ -0,0 +1,467 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_MAPPER_H
+#define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_MAPPER_H
+
+namespace Eigen {
+
+namespace internal {
+
+enum {
+ Rhs = 0,
+ Lhs = 1
+};
+
+/*
+ * Implementation of the Eigen blas_data_mapper class for tensors.
+ */
+
+template <typename Tensor, bool HasRawAccess> struct CoeffLoader {
+ enum {
+ DirectOffsets = false
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE CoeffLoader(const Tensor& tensor) : m_tensor(tensor) { }
+
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void offsetBuffer(typename Tensor::Index) {
+ eigen_assert(false && "unsupported");
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE typename Tensor::Scalar coeff(typename Tensor::Index index) const { return m_tensor.coeff(index); }
+
+ template<int LoadMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ typename Tensor::PacketReturnType packet(typename Tensor::Index index) const
+ {
+ return m_tensor.template packet<LoadMode>(index);
+ }
+
+
+ private:
+ const Tensor m_tensor;
+};
+
+template <typename Tensor> struct CoeffLoader<Tensor, true> {
+ enum {
+ DirectOffsets = true
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE CoeffLoader(const Tensor& tensor) : m_data(tensor.data()) {}
+
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void offsetBuffer(typename Tensor::Index offset) {
+ m_data += offset;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE typename Tensor::Scalar coeff(typename Tensor::Index index) const { return loadConstant(m_data+index); }
+
+ template<int LoadMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ typename Tensor::PacketReturnType packet(typename Tensor::Index index) const
+ {
+ return internal::ploadt_ro<typename Tensor::PacketReturnType, LoadMode>(m_data + index);
+ }
+ private:
+ typedef typename Tensor::Scalar Scalar;
+ const Scalar* m_data;
+};
+
+template<typename Scalar, typename Index, int side,
+ typename Tensor,
+ typename nocontract_t, typename contract_t,
+ int packet_size, bool inner_dim_contiguous, int Alignment>
+class SimpleTensorContractionMapper {
+ public:
+ EIGEN_DEVICE_FUNC
+ SimpleTensorContractionMapper(const Tensor& tensor,
+ const nocontract_t& nocontract_strides,
+ const nocontract_t& ij_strides,
+ const contract_t& contract_strides,
+ const contract_t& k_strides) :
+ m_tensor(tensor),
+ m_nocontract_strides(nocontract_strides),
+ m_ij_strides(ij_strides),
+ m_contract_strides(contract_strides),
+ m_k_strides(k_strides) { }
+
+ enum {
+ DirectOffsets = CoeffLoader<Tensor, Tensor::RawAccess>::DirectOffsets
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void offsetBuffer(typename Tensor::Index offset) {
+ m_tensor.offsetBuffer(offset);
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE void prefetch(Index /*i*/) { }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar operator()(Index row) const {
+ // column major assumption
+ return operator()(row, 0);
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar operator()(Index row, Index col) const {
+ return m_tensor.coeff(computeIndex(row, col));
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Index computeIndex(Index row, Index col) const {
+ const bool left = (side == Lhs);
+ Index nocontract_val = left ? row : col;
+ Index linidx = 0;
+ for (int i = static_cast<int>(array_size<nocontract_t>::value) - 1; i > 0; i--) {
+ const Index idx = nocontract_val / m_ij_strides[i];
+ linidx += idx * m_nocontract_strides[i];
+ nocontract_val -= idx * m_ij_strides[i];
+ }
+ if (array_size<typename Tensor::Dimensions>::value > array_size<contract_t>::value) {
+ if (side == Lhs && inner_dim_contiguous) {
+ eigen_assert(m_nocontract_strides[0] == 1);
+ linidx += nocontract_val;
+ } else {
+ linidx += nocontract_val * m_nocontract_strides[0];
+ }
+ }
+
+ Index contract_val = left ? col : row;
+ if(array_size<contract_t>::value > 0) {
+ for (int i = static_cast<int>(array_size<contract_t>::value) - 1; i > 0; i--) {
+ const Index idx = contract_val / m_k_strides[i];
+ linidx += idx * m_contract_strides[i];
+ contract_val -= idx * m_k_strides[i];
+ }
+
+ if (side == Rhs && inner_dim_contiguous) {
+ eigen_assert(m_contract_strides[0] == 1);
+ linidx += contract_val;
+ } else {
+ linidx += contract_val * m_contract_strides[0];
+ }
+ }
+
+ return linidx;
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE IndexPair<Index> computeIndexPair(Index row, Index col, const Index distance) const {
+ const bool left = (side == Lhs);
+ Index nocontract_val[2] = {left ? row : col, left ? row + distance : col};
+ Index linidx[2] = {0, 0};
+ if (array_size<typename Tensor::Dimensions>::value > array_size<contract_t>::value) {
+ for (int i = static_cast<int>(array_size<nocontract_t>::value) - 1; i > 0; i--) {
+ const Index idx0 = nocontract_val[0] / m_ij_strides[i];
+ const Index idx1 = nocontract_val[1] / m_ij_strides[i];
+ linidx[0] += idx0 * m_nocontract_strides[i];
+ linidx[1] += idx1 * m_nocontract_strides[i];
+ nocontract_val[0] -= idx0 * m_ij_strides[i];
+ nocontract_val[1] -= idx1 * m_ij_strides[i];
+ }
+ if (side == Lhs && inner_dim_contiguous) {
+ eigen_assert(m_nocontract_strides[0] == 1);
+ linidx[0] += nocontract_val[0];
+ linidx[1] += nocontract_val[1];
+ } else {
+ linidx[0] += nocontract_val[0] * m_nocontract_strides[0];
+ linidx[1] += nocontract_val[1] * m_nocontract_strides[0];
+ }
+ }
+
+ Index contract_val[2] = {left ? col : row, left ? col : row + distance};
+ if (array_size<contract_t>::value> 0) {
+ for (int i = static_cast<int>(array_size<contract_t>::value) - 1; i > 0; i--) {
+ const Index idx0 = contract_val[0] / m_k_strides[i];
+ const Index idx1 = contract_val[1] / m_k_strides[i];
+ linidx[0] += idx0 * m_contract_strides[i];
+ linidx[1] += idx1 * m_contract_strides[i];
+ contract_val[0] -= idx0 * m_k_strides[i];
+ contract_val[1] -= idx1 * m_k_strides[i];
+ }
+
+ if (side == Rhs && inner_dim_contiguous) {
+ eigen_assert(m_contract_strides[0] == 1);
+ linidx[0] += contract_val[0];
+ linidx[1] += contract_val[1];
+ } else {
+ linidx[0] += contract_val[0] * m_contract_strides[0];
+ linidx[1] += contract_val[1] * m_contract_strides[0];
+ }
+ }
+ return IndexPair<Index>(linidx[0], linidx[1]);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Index firstAligned(Index size) const {
+ // Only claim alignment when we can compute the actual stride (ie when we're
+ // dealing with the lhs with inner_dim_contiguous. This is because the
+ // matrix-vector product relies on the stride when dealing with aligned inputs.
+ return (Alignment == Aligned) && (side == Lhs) && inner_dim_contiguous ? 0 : size;
+ }
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Index stride() const {
+ return ((side == Lhs) && inner_dim_contiguous && array_size<contract_t>::value > 0) ? m_contract_strides[0] : 1;
+ }
+
+ protected:
+ CoeffLoader<Tensor, Tensor::RawAccess> m_tensor;
+ const nocontract_t m_nocontract_strides;
+ const nocontract_t m_ij_strides;
+ const contract_t m_contract_strides;
+ const contract_t m_k_strides;
+};
+
+
+template<typename Scalar, typename Index, int side,
+ typename Tensor,
+ typename nocontract_t, typename contract_t,
+ int packet_size, bool inner_dim_contiguous,
+ bool inner_dim_reordered, int Alignment>
+class BaseTensorContractionMapper : public SimpleTensorContractionMapper<Scalar, Index, side, Tensor, nocontract_t, contract_t, packet_size, inner_dim_contiguous, Alignment>
+{
+ public:
+ typedef SimpleTensorContractionMapper<Scalar, Index, side, Tensor, nocontract_t, contract_t, packet_size, inner_dim_contiguous, Alignment> ParentMapper;
+
+ EIGEN_DEVICE_FUNC
+ BaseTensorContractionMapper(const Tensor& tensor,
+ const nocontract_t& nocontract_strides,
+ const nocontract_t& ij_strides,
+ const contract_t& contract_strides,
+ const contract_t& k_strides) :
+ ParentMapper(tensor, nocontract_strides, ij_strides, contract_strides, k_strides) { }
+
+ typedef typename Tensor::PacketReturnType Packet;
+ typedef typename unpacket_traits<Packet>::half HalfPacket;
+
+ template <int AlignmentType>
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Packet loadPacket(Index i, Index j) const {
+ // whole method makes column major assumption
+
+ // don't need to add offsets for now (because operator handles that)
+ // current code assumes packet size must be a multiple of 2
+ EIGEN_STATIC_ASSERT(packet_size % 2 == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
+
+ if (Tensor::PacketAccess && inner_dim_contiguous && !inner_dim_reordered) {
+ const Index index = this->computeIndex(i, j);
+ eigen_assert(this->computeIndex(i+packet_size-1, j) == index + packet_size-1);
+ return this->m_tensor.template packet<AlignmentType>(index);
+ }
+
+ const IndexPair<Index> indexPair = this->computeIndexPair(i, j, packet_size - 1);
+ const Index first = indexPair.first;
+ const Index last = indexPair.second;
+
+ // We can always do optimized packet reads from left hand side right now, because
+ // the vertical matrix dimension on the left hand side is never contracting.
+ // On the right hand side we need to check if the contracting dimensions may have
+ // been shuffled first.
+ if (Tensor::PacketAccess &&
+ (side == Lhs || internal::array_size<contract_t>::value <= 1 || !inner_dim_reordered) &&
+ (last - first) == (packet_size - 1)) {
+
+ return this->m_tensor.template packet<AlignmentType>(first);
+ }
+
+ EIGEN_ALIGN_MAX Scalar data[packet_size];
+
+ data[0] = this->m_tensor.coeff(first);
+ for (Index k = 1; k < packet_size - 1; k += 2) {
+ const IndexPair<Index> internal_pair = this->computeIndexPair(i + k, j, 1);
+ data[k] = this->m_tensor.coeff(internal_pair.first);
+ data[k + 1] = this->m_tensor.coeff(internal_pair.second);
+ }
+ data[packet_size - 1] = this->m_tensor.coeff(last);
+
+ return pload<Packet>(data);
+ }
+
+ template <int AlignmentType>
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE HalfPacket loadHalfPacket(Index i, Index j) const {
+ // whole method makes column major assumption
+
+ // don't need to add offsets for now (because operator handles that)
+ const Index half_packet_size = unpacket_traits<HalfPacket>::size;
+ if (half_packet_size == packet_size) {
+ return loadPacket<AlignmentType>(i, j);
+ }
+ EIGEN_ALIGN_MAX Scalar data[half_packet_size];
+ for (Index k = 0; k < half_packet_size; k++) {
+ data[k] = operator()(i + k, j);
+ }
+ return pload<HalfPacket>(data);
+ }
+};
+
+
+template<typename Scalar, typename Index, int side,
+ typename Tensor,
+ typename nocontract_t, typename contract_t,
+ bool inner_dim_contiguous,
+ bool inner_dim_reordered, int Alignment>
+class BaseTensorContractionMapper<Scalar, Index, side, Tensor, nocontract_t, contract_t, 1, inner_dim_contiguous, inner_dim_reordered, Alignment> : public SimpleTensorContractionMapper<Scalar, Index, side, Tensor, nocontract_t, contract_t, 1, inner_dim_contiguous, Alignment>
+{
+ public:
+ typedef SimpleTensorContractionMapper<Scalar, Index, side, Tensor, nocontract_t, contract_t, 1, inner_dim_contiguous, Alignment> ParentMapper;
+
+ EIGEN_DEVICE_FUNC
+ BaseTensorContractionMapper(const Tensor& tensor,
+ const nocontract_t& nocontract_strides,
+ const nocontract_t& ij_strides,
+ const contract_t& contract_strides,
+ const contract_t& k_strides) :
+ ParentMapper(tensor, nocontract_strides, ij_strides, contract_strides, k_strides) { }
+
+ typedef typename Tensor::PacketReturnType Packet;
+ template <int> EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Packet loadPacket(Index i, Index j) const {
+ EIGEN_ALIGN_MAX Scalar data[1];
+ data[0] = this->m_tensor.coeff(this->computeIndex(i, j));
+ return pload<typename Tensor::PacketReturnType>(data);
+ }
+ template <int> EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Packet loadHalfPacket(Index i, Index j) const {
+ return loadPacket(i, j);
+ }
+};
+
+
+template<typename Scalar, typename Index, int side,
+ typename Tensor,
+ typename nocontract_t, typename contract_t,
+ int packet_size,
+ bool inner_dim_contiguous, bool inner_dim_reordered, int Alignment>
+class TensorContractionSubMapper {
+ public:
+ typedef typename Tensor::PacketReturnType Packet;
+ typedef typename unpacket_traits<Packet>::half HalfPacket;
+
+ typedef BaseTensorContractionMapper<Scalar, Index, side, Tensor, nocontract_t, contract_t, packet_size, inner_dim_contiguous, inner_dim_reordered, Alignment> ParentMapper;
+ typedef TensorContractionSubMapper<Scalar, Index, side, Tensor, nocontract_t, contract_t, packet_size, inner_dim_contiguous, inner_dim_reordered, Alignment> Self;
+ typedef Self LinearMapper;
+
+ enum {
+ // We can use direct offsets iff the parent mapper supports then and we can compute the strides.
+ // TODO: we should also enable direct offsets for the Rhs case.
+ UseDirectOffsets = ParentMapper::DirectOffsets && (side == Lhs) && inner_dim_contiguous && (array_size<contract_t>::value > 0)
+ };
+
+ EIGEN_DEVICE_FUNC TensorContractionSubMapper(const ParentMapper& base_mapper, Index vert_offset, Index horiz_offset)
+ : m_base_mapper(base_mapper), m_vert_offset(vert_offset), m_horiz_offset(horiz_offset) {
+ // Bake the offsets into the buffer used by the base mapper whenever possible. This avoids the need to recompute
+ // this offset every time we attempt to access a coefficient.
+ if (UseDirectOffsets) {
+ Index stride = m_base_mapper.stride();
+ m_base_mapper.offsetBuffer(vert_offset + horiz_offset * stride);
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Scalar operator()(Index i) const {
+ if (UseDirectOffsets) {
+ return m_base_mapper(i, 0);
+ }
+ return m_base_mapper(i + m_vert_offset, m_horiz_offset);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Scalar operator()(Index i, Index j) const {
+ if (UseDirectOffsets) {
+ return m_base_mapper(i, j);
+ }
+ return m_base_mapper(i + m_vert_offset, j + m_horiz_offset);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet loadPacket(Index i) const {
+ if (UseDirectOffsets) {
+ return m_base_mapper.template loadPacket<Alignment>(i, 0);
+ }
+ return m_base_mapper.template loadPacket<Alignment>(i + m_vert_offset, m_horiz_offset);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet loadPacket(Index i, Index j) const {
+ if (UseDirectOffsets) {
+ return m_base_mapper.template loadPacket<Alignment>(i, j);
+ }
+ return m_base_mapper.template loadPacket<Alignment>(i + m_vert_offset, j + m_horiz_offset);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE HalfPacket loadHalfPacket(Index i) const {
+ if (UseDirectOffsets) {
+ return m_base_mapper.template loadHalfPacket<Alignment>(i, 0);
+ }
+ return m_base_mapper.template loadHalfPacket<Alignment>(i + m_vert_offset, m_horiz_offset);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void storePacket(Index i, Packet p) const {
+ if (UseDirectOffsets) {
+ m_base_mapper.storePacket(i, 0, p);
+ }
+ m_base_mapper.storePacket(i + m_vert_offset, m_horiz_offset, p);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE LinearMapper getLinearMapper(Index i, Index j) const {
+ if (UseDirectOffsets) {
+ return LinearMapper(m_base_mapper, i, j);
+ }
+ return LinearMapper(m_base_mapper, i + m_vert_offset, j + m_horiz_offset);
+ }
+
+ template <typename PacketT, int AlignmentType>
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketT load(Index i) const {
+ EIGEN_STATIC_ASSERT((internal::is_same<PacketT, Packet>::value), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ const int ActualAlignment = (AlignmentType == Aligned) && (Alignment == Aligned) ? Aligned : Unaligned;
+ if (UseDirectOffsets) {
+ return m_base_mapper.template loadPacket<ActualAlignment>(i, 0);
+ }
+ return m_base_mapper.template loadPacket<ActualAlignment>(i + m_vert_offset, m_horiz_offset);
+ }
+
+ template <typename Packet>
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool aligned(Index) const {
+ return false;
+ }
+
+ private:
+ ParentMapper m_base_mapper;
+ const Index m_vert_offset;
+ const Index m_horiz_offset;
+};
+
+
+template<typename Scalar_, typename Index, int side,
+ typename Tensor,
+ typename nocontract_t, typename contract_t,
+ int packet_size,
+ bool inner_dim_contiguous, bool inner_dim_reordered, int Alignment>
+class TensorContractionInputMapper
+ : public BaseTensorContractionMapper<Scalar_, Index, side, Tensor, nocontract_t, contract_t, packet_size, inner_dim_contiguous, inner_dim_reordered, Alignment> {
+
+ public:
+ typedef Scalar_ Scalar;
+ typedef BaseTensorContractionMapper<Scalar, Index, side, Tensor, nocontract_t, contract_t, packet_size, inner_dim_contiguous, inner_dim_reordered, Alignment> Base;
+ typedef TensorContractionSubMapper<Scalar, Index, side, Tensor, nocontract_t, contract_t, packet_size, inner_dim_contiguous, inner_dim_reordered, Alignment> SubMapper;
+ typedef SubMapper VectorMapper;
+
+ EIGEN_DEVICE_FUNC TensorContractionInputMapper(const Tensor& tensor,
+ const nocontract_t& nocontract_strides,
+ const nocontract_t& ij_strides,
+ const contract_t& contract_strides,
+ const contract_t& k_strides)
+ : Base(tensor, nocontract_strides, ij_strides, contract_strides, k_strides) { }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE SubMapper getSubMapper(Index i, Index j) const {
+ return SubMapper(*this, i, j);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE VectorMapper getVectorMapper(Index i, Index j) const {
+ return VectorMapper(*this, i, j);
+ }
+};
+
+
+
+} // end namespace internal
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_MAPPER_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
new file mode 100644
index 000000000..ee16cde9b
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -0,0 +1,1052 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_THREAD_POOL_H
+#define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_THREAD_POOL_H
+
+// evaluator for thread pool device
+#ifdef EIGEN_USE_THREADS
+
+namespace Eigen {
+
+#ifdef EIGEN_USE_SIMPLE_THREAD_POOL
+namespace internal {
+
+template<typename LhsScalar, typename LhsMapper, typename Index>
+struct packLhsArg {
+ LhsScalar* blockA;
+ const LhsMapper& lhs;
+ const Index m_start;
+ const Index k_start;
+ const Index mc;
+ const Index kc;
+};
+
+template<typename LhsScalar, typename RhsScalar, typename RhsMapper, typename OutputMapper, typename Index>
+struct packRhsAndKernelArg {
+ const MaxSizeVector<LhsScalar*>* blockAs;
+ RhsScalar* blockB;
+ const RhsMapper& rhs;
+ OutputMapper& output;
+ const Index m;
+ const Index k;
+ const Index n;
+ const Index mc;
+ const Index kc;
+ const Index nc;
+ const Index num_threads;
+ const Index num_blockAs;
+ const Index max_m;
+ const Index k_block_idx;
+ const Index m_block_idx;
+ const Index n_block_idx;
+ const Index m_blocks;
+ const Index n_blocks;
+ MaxSizeVector<Notification*>* kernel_notifications;
+ const MaxSizeVector<Notification*>* lhs_notifications;
+ const bool need_to_pack;
+};
+
+} // end namespace internal
+#endif // EIGEN_USE_SIMPLE_THREAD_POOL
+
+template<typename Indices, typename LeftArgType, typename RightArgType>
+struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType>, ThreadPoolDevice> :
+ public TensorContractionEvaluatorBase<TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType>, ThreadPoolDevice> > {
+
+ typedef ThreadPoolDevice Device;
+
+ typedef TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType>, Device> Self;
+ typedef TensorContractionEvaluatorBase<Self> Base;
+
+ typedef TensorContractionOp<Indices, LeftArgType, RightArgType> XprType;
+ typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar;
+ typedef typename XprType::Index Index;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+
+ enum {
+ Layout = TensorEvaluator<LeftArgType, Device>::Layout,
+ };
+
+ // Most of the code is assuming that both input tensors are ColMajor. If the
+ // inputs are RowMajor, we will "cheat" by swapping the LHS and RHS:
+ // If we want to compute A * B = C, where A is LHS and B is RHS, the code
+ // will pretend B is LHS and A is RHS.
+ typedef typename internal::conditional<
+ static_cast<int>(Layout) == static_cast<int>(ColMajor), LeftArgType, RightArgType>::type EvalLeftArgType;
+ typedef typename internal::conditional<
+ static_cast<int>(Layout) == static_cast<int>(ColMajor), RightArgType, LeftArgType>::type EvalRightArgType;
+
+ static const int LDims =
+ internal::array_size<typename TensorEvaluator<EvalLeftArgType, Device>::Dimensions>::value;
+ static const int RDims =
+ internal::array_size<typename TensorEvaluator<EvalRightArgType, Device>::Dimensions>::value;
+ static const int ContractDims = internal::array_size<Indices>::value;
+
+ typedef array<Index, LDims> left_dim_mapper_t;
+ typedef array<Index, RDims> right_dim_mapper_t;
+
+ typedef array<Index, ContractDims> contract_t;
+ typedef array<Index, LDims - ContractDims> left_nocontract_t;
+ typedef array<Index, RDims - ContractDims> right_nocontract_t;
+
+ static const int NumDims = LDims + RDims - 2 * ContractDims;
+
+ typedef DSizes<Index, NumDims> Dimensions;
+
+ // typedefs needed in evalTo
+ typedef typename internal::remove_const<typename EvalLeftArgType::Scalar>::type LhsScalar;
+ typedef typename internal::remove_const<typename EvalRightArgType::Scalar>::type RhsScalar;
+ typedef typename internal::gebp_traits<LhsScalar, RhsScalar> Traits;
+
+ typedef TensorEvaluator<EvalLeftArgType, Device> LeftEvaluator;
+ typedef TensorEvaluator<EvalRightArgType, Device> RightEvaluator;
+
+ TensorEvaluator(const XprType& op, const Device& device) :
+ Base(op, device) {}
+
+#ifndef EIGEN_USE_SIMPLE_THREAD_POOL
+ template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous,
+ bool rhs_inner_dim_reordered, int Alignment>
+ void evalProduct(Scalar* buffer) const {
+ typedef
+ typename internal::remove_const<typename EvalLeftArgType::Scalar>::type
+ LhsScalar;
+ typedef
+ typename internal::remove_const<typename EvalRightArgType::Scalar>::type
+ RhsScalar;
+ typedef typename internal::gebp_traits<LhsScalar, RhsScalar> Traits;
+ typedef TensorEvaluator<EvalLeftArgType, Device> LeftEvaluator;
+ typedef TensorEvaluator<EvalRightArgType, Device> RightEvaluator;
+ typedef internal::TensorContractionInputMapper<
+ LhsScalar, Index, internal::Lhs, LeftEvaluator, left_nocontract_t,
+ contract_t, internal::packet_traits<LhsScalar>::size,
+ lhs_inner_dim_contiguous, false, Unaligned>
+ LhsMapper;
+ typedef internal::TensorContractionInputMapper<
+ RhsScalar, Index, internal::Rhs, RightEvaluator, right_nocontract_t,
+ contract_t, internal::packet_traits<RhsScalar>::size,
+ rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Unaligned>
+ RhsMapper;
+ typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper;
+ typedef internal::gemm_pack_lhs<LhsScalar, Index,
+ typename LhsMapper::SubMapper, Traits::mr,
+ Traits::LhsProgress, ColMajor>
+ LhsPacker;
+ typedef internal::gemm_pack_rhs<
+ RhsScalar, Index, typename RhsMapper::SubMapper, Traits::nr, ColMajor>
+ RhsPacker;
+ typedef internal::gebp_kernel<LhsScalar, RhsScalar, Index, OutputMapper,
+ Traits::mr, Traits::nr, false, false>
+ GebpKernel;
+
+ const Index m = this->m_i_size;
+ const Index n = this->m_j_size;
+ const Index k = this->m_k_size;
+ if (m == 0 || n == 0 || k == 0) return;
+
+ // Compute a set of algorithm parameters:
+ // - kernel block sizes (bm, bn, bk)
+ // - task grain sizes (number of kernels executed per task: gm, gn)
+ // - number of threads
+ // - sharding by row/column
+ // - parallel packing or first lhs then rhs
+ // and some derived parameters:
+ // - number of tasks (nm, nn, nk)
+ // - number of kernels (nm0, nn0)
+ // Unfortunately, all these parameters are tightly interdependent.
+ // So in some cases we first compute approximate values, then compute other
+ // values based on these approximations and then refine the approximations.
+
+ // There are lots of heuristics here. There is some reasoning behind them,
+ // but ultimately they are just tuned on contraction benchmarks for
+ // different input configurations, thread counts and instruction sets.
+ // So feel free to question any of them.
+
+ // Compute whether we want to shard by row or by column.
+ // This is a first approximation, it will be refined later. Since we don't
+ // know number of threads yet we use 2, because what's we are most
+ // interested in at this point is whether it makes sense to use
+ // parallelization at all or not.
+ bool shard_by_col = shardByCol(m, n, 2);
+
+ // First approximation of kernel blocking sizes.
+ // Again, we don't know number of threads yet, so we use 2.
+ Index bm, bn, bk;
+ if (shard_by_col) {
+ internal::TensorContractionBlocking<LhsMapper, RhsMapper, Index,
+ internal::ShardByCol>
+ blocking(k, m, n, 2);
+ bm = blocking.mc();
+ bn = blocking.nc();
+ bk = blocking.kc();
+ } else {
+ internal::TensorContractionBlocking<LhsMapper, RhsMapper, Index,
+ internal::ShardByRow>
+ blocking(k, m, n, 2);
+ bm = blocking.mc();
+ bn = blocking.nc();
+ bk = blocking.kc();
+ }
+
+ // Compute optimal number of threads.
+ // Note: we use bk instead of k here because we are interested in amount of
+ // _parallelizable_ computations, and computations are not parallelizable
+ // across k dimension.
+ const TensorOpCost cost =
+ contractionCost(m, n, bm, bn, bk, shard_by_col, false);
+ int num_threads = TensorCostModel<ThreadPoolDevice>::numThreads(
+ static_cast<double>(n) * m, cost, this->m_device.numThreads());
+
+ // TODO(dvyukov): this is a stop-gap to prevent regressions while the cost
+ // model is not tuned. Remove this when the cost model is tuned.
+ if (n == 1) num_threads = 1;
+
+ if (num_threads == 1) {
+ // The single-threaded algorithm should be faster in this case.
+ if (n == 1)
+ this->template evalGemv<lhs_inner_dim_contiguous,
+ rhs_inner_dim_contiguous,
+ rhs_inner_dim_reordered, Alignment>(buffer);
+ else
+ this->template evalGemm<lhs_inner_dim_contiguous,
+ rhs_inner_dim_contiguous,
+ rhs_inner_dim_reordered, Alignment>(buffer);
+ return;
+ }
+
+ // Now that we know number of threads, recalculate sharding and blocking.
+ shard_by_col = shardByCol(m, n, num_threads);
+ if (shard_by_col) {
+ internal::TensorContractionBlocking<LhsMapper, RhsMapper, Index,
+ internal::ShardByCol>
+ blocking(k, m, n, num_threads);
+ bm = blocking.mc();
+ bn = blocking.nc();
+ bk = blocking.kc();
+ } else {
+ internal::TensorContractionBlocking<LhsMapper, RhsMapper, Index,
+ internal::ShardByRow>
+ blocking(k, m, n, num_threads);
+ bm = blocking.mc();
+ bn = blocking.nc();
+ bk = blocking.kc();
+ }
+
+ // Number of kernels for each dimension.
+ Index nm0 = divup(m, bm);
+ Index nn0 = divup(n, bn);
+ Index nk = divup(k, bk);
+
+ // Calculate task grain size (number of kernels executed per task).
+ // This task size coarsening serves two purposes:
+ // 1. It reduces per-task overheads including synchronization overheads.
+ // 2. It allows to use caches better (reuse the same packed rhs in several
+ // consecutive kernels).
+ Index gm = 1;
+ Index gn = 1;
+ // If we are sharding by column, then we prefer to reduce rows first.
+ if (shard_by_col) {
+ gm = coarsenM(m, n, bm, bn, bk, gn, num_threads, shard_by_col);
+ gn = coarsenN(m, n, bm, bn, bk, gm, num_threads, shard_by_col);
+ } else {
+ gn = coarsenN(m, n, bm, bn, bk, gm, num_threads, shard_by_col);
+ gm = coarsenM(m, n, bm, bn, bk, gn, num_threads, shard_by_col);
+ }
+ // Number of tasks in each dimension.
+ Index nm = divup(nm0, gm);
+ Index nn = divup(nn0, gn);
+
+ // Last by not least, decide whether we want to issue both lhs and rhs
+ // packing in parallel; or issue lhs packing first, and then issue rhs
+ // packing when lhs packing completes (for !shard_by_col lhs and rhs are
+ // swapped). Parallel packing allows more parallelism (for both packing and
+ // kernels), while sequential packing provides better locality (once
+ // a thread finishes rhs packing it proceed to kernels with that rhs).
+ // First, we are interested in parallel packing if there are few tasks.
+ bool parallel_pack = num_threads >= nm * nn;
+ // Also do parallel packing if all data fits into L2$.
+ if (m * bk * Index(sizeof(LhsScalar)) + n * bk * Index(sizeof(RhsScalar)) <=
+ l2CacheSize() * num_threads)
+ parallel_pack = true;
+ // But don't do it if we will use each rhs only once. Locality seems to be
+ // more important in this case.
+ if ((shard_by_col ? nm : nn) == 1) parallel_pack = false;
+
+ LhsMapper lhs(this->m_leftImpl, this->m_left_nocontract_strides,
+ this->m_i_strides, this->m_left_contracting_strides,
+ this->m_k_strides);
+
+ RhsMapper rhs(this->m_rightImpl, this->m_right_nocontract_strides,
+ this->m_j_strides, this->m_right_contracting_strides,
+ this->m_k_strides);
+
+ Context<LhsPacker, RhsPacker, GebpKernel, LhsMapper, RhsMapper,
+ OutputMapper>(this->m_device, num_threads, lhs, rhs, buffer, m, n,
+ k, bm, bn, bk, nm, nn, nk, gm, gn, nm0, nn0,
+ shard_by_col, parallel_pack)
+ .run();
+ }
+
+ // Context coordinates a single parallel gemm operation.
+ template <typename LhsPacker, typename RhsPacker, typename GebpKernel,
+ typename LhsMapper, typename RhsMapper, typename OutputMapper>
+ class Context {
+ public:
+ Context(const Device& device, int num_threads, LhsMapper& lhs,
+ RhsMapper& rhs, Scalar* buffer, Index tm, Index tn, Index tk, Index bm,
+ Index bn, Index bk, Index nm, Index nn, Index nk, Index gm,
+ Index gn, Index nm0, Index nn0, bool shard_by_col,
+ bool parallel_pack)
+ : device_(device),
+ lhs_(lhs),
+ rhs_(rhs),
+ buffer_(buffer),
+ output_(buffer, tm),
+ num_threads_(num_threads),
+ shard_by_col_(shard_by_col),
+ parallel_pack_(parallel_pack),
+ m_(tm),
+ n_(tn),
+ k_(tk),
+ bm_(bm),
+ bn_(bn),
+ bk_(bk),
+ nm_(nm),
+ nn_(nn),
+ nk_(nk),
+ gm_(gm),
+ gn_(gn),
+ nm0_(nm0),
+ nn0_(nn0)
+ {
+ for (Index x = 0; x < P; x++) {
+ // Normal number of notifications for k slice switch is
+ // nm_ + nn_ + nm_ * nn_. However, first P - 1 slices will receive only
+ // nm_ + nn_ notifications, because they will not receive notifications
+ // from preceeding kernels.
+ state_switch_[x] =
+ x == 0
+ ? 1
+ : (parallel_pack_ ? nn_ + nm_ : (shard_by_col_ ? nn_ : nm_)) +
+ (x == P - 1 ? nm_ * nn_ : 0);
+ state_packing_ready_[x] =
+ parallel_pack_ ? 0 : (shard_by_col_ ? nm_ : nn_);
+ state_kernel_[x] = new std::atomic<uint8_t>*[nm_];
+ for (Index m = 0; m < nm_; m++) {
+ state_kernel_[x][m] = new std::atomic<uint8_t>[nn_];
+ // Kernels generally receive 3 notifications (previous kernel + 2
+ // packing), but the first slice won't get notifications from previous
+ // kernels.
+ for (Index n = 0; n < nn_; n++)
+ state_kernel_[x][m][n].store(
+ (x == 0 ? 0 : 1) + (parallel_pack_ ? 2 : 1),
+ std::memory_order_relaxed);
+ }
+ }
+
+ // Allocate memory for packed rhs/lhs matrices.
+ size_t align = numext::maxi(EIGEN_MAX_ALIGN_BYTES, 1);
+ size_t lhs_size =
+ divup<size_t>(bm_ * bk_ * sizeof(LhsScalar), align) * align;
+ size_t rhs_size =
+ divup<size_t>(bn_ * bk_ * sizeof(RhsScalar), align) * align;
+ packed_mem_ = static_cast<char*>(internal::aligned_malloc(
+ (nm0_ * lhs_size + nn0_ * rhs_size) * std::min<size_t>(nk_, P - 1)));
+ char* mem = static_cast<char*>(packed_mem_);
+ for (Index x = 0; x < numext::mini<Index>(nk_, P - 1); x++) {
+ packed_lhs_[x].resize(nm0_);
+ for (Index m = 0; m < nm0_; m++) {
+ packed_lhs_[x][m] = reinterpret_cast<LhsScalar*>(mem);
+ mem += lhs_size;
+ }
+ packed_rhs_[x].resize(nn0_);
+ for (Index n = 0; n < nn0_; n++) {
+ packed_rhs_[x][n] = reinterpret_cast<RhsScalar*>(mem);
+ mem += rhs_size;
+ }
+ }
+ }
+
+ ~Context() {
+ for (Index x = 0; x < P; x++) {
+ for (Index m = 0; m < nm_; m++) delete[] state_kernel_[x][m];
+ delete[] state_kernel_[x];
+ }
+ internal::aligned_free(packed_mem_);
+ }
+
+ void run() {
+ // Kick off packing of the first slice.
+ signal_switch(0, 1);
+ // Wait for overall completion.
+ // TODO(dvyukov): this wait can lead to deadlock.
+ // If nthreads contractions are concurrently submitted from worker
+ // threads, this wait will block all worker threads and the system will
+ // deadlock.
+ done_.Wait();
+ }
+
+ private:
+ Notification done_;
+ const Device& device_;
+ LhsMapper& lhs_;
+ RhsMapper& rhs_;
+ Scalar* const buffer_;
+ OutputMapper output_;
+ const int num_threads_;
+ const bool shard_by_col_;
+ const bool parallel_pack_;
+ // Matrix sizes.
+ const Index m_;
+ const Index n_;
+ const Index k_;
+ // Block sizes.
+ const Index bm_;
+ const Index bn_;
+ const Index bk_;
+ // Number of tasks.
+ const Index nm_;
+ const Index nn_;
+ const Index nk_;
+ // Task grain sizes (number of kernels executed per task).
+ const Index gm_;
+ const Index gn_;
+ // Number of blocks (this is different from ni_/nn_ because of task size
+ // coarsening).
+ const Index nm0_;
+ const Index nn0_;
+
+ // Parallelization strategy.
+ //
+ // Blocks related to the same k block can run in parallel because they write
+ // to different output blocks. So we parallelize within k slices, this
+ // gives us parallelism level of m x n. Before we can start any kernels
+ // related to k-th slice, we need to issue m lhs packing tasks and n rhs
+ // packing tasks.
+ //
+ // However, there is a bottleneck when we are finishing kernels for k-th
+ // slice (at the very end there is only 1 runnable kernel). To mitigate this
+ // bottleneck we allow kernels from k-th and k+1-th slices to run in
+ // parallel. Note that (m, n, k) and (m, n, k+1) kernels write to the same
+ // output block, so they must not run in parallel.
+ //
+ // This gives us the following dependency graph.
+ // On each k slice we have m x n kernel tasks, m lhs paking tasks and n rhs
+ // packing tasks.
+ // Kernel (m, n, k) can start when:
+ // - kernel (m, n, k-1) has finished
+ // - lhs packing (m, k) has finished
+ // - rhs packing (n, k) has finished
+ // Lhs/rhs packing can start when:
+ // - all k-1 packing has finished (artificially imposed to limit amount of
+ // parallel packing)
+ //
+ // On top of that we limit runnable tasks to two consecutive k slices.
+ // This is done to limit amount of memory we need for packed lhs/rhs
+ // (for each k slice we need m*bk + n*bk memory in packed_lhs_/packed_rhs_).
+ //
+ // state_switch_ tracks when we are ready to switch to the next k slice.
+ // state_kernel_[m][n] tracks when we are ready to kick off kernel (m, n).
+ // These variable are rolling over 3 consecutive k slices: first two we are
+ // actively executing + one to track completion of kernels in the second
+ // slice.
+ static const Index P = 3;
+ void* packed_mem_;
+ std::vector<LhsScalar*> packed_lhs_[P - 1];
+ std::vector<RhsScalar*> packed_rhs_[P - 1];
+ std::atomic<uint8_t>** state_kernel_[P];
+ // state_switch_ is frequently modified by worker threads, while other
+ // fields are read-only after constructor. Let's move it to a separate cache
+ // line to reduce cache-coherency traffic.
+ char pad_[128];
+ std::atomic<Index> state_packing_ready_[P];
+ std::atomic<Index> state_switch_[P];
+
+ void pack_lhs(Index m, Index k) {
+ const Index mend = m * gm_ + gm(m);
+ for (Index m1 = m * gm_; m1 < mend; m1++)
+ LhsPacker()(packed_lhs_[k % (P - 1)][m1],
+ lhs_.getSubMapper(m1 * bm_, k * bk_), bk(k), bm(m1));
+
+ if (!parallel_pack_ && shard_by_col_) {
+ signal_packing(k);
+ } else {
+ signal_switch(k + 1);
+ for (Index n = nn_ - 1; n >= 0; n--) signal_kernel(m, n, k, n == 0);
+ }
+ }
+
+ void pack_rhs(Index n, Index k) {
+ const Index nend = n * gn_ + gn(n);
+ for (Index n1 = n * gn_; n1 < nend; n1++) {
+ if (k == 0) {
+ // Zero the output memory in parallel.
+ // On 10000x2x10000 mm zeroing can easily take half of time.
+ // Zero (bn x m) row. Safe to do here because all kernels that will
+ // write to this memory depend on completion of this task.
+ // Note: don't call device_.memset() here. device_.memset() blocks on
+ // thread pool worker thread, which can lead to underutilization and
+ // deadlocks.
+ memset(buffer_ + n1 * bn_ * m_, 0, bn(n1) * m_ * sizeof(Scalar));
+ }
+ RhsPacker()(packed_rhs_[k % (P - 1)][n1],
+ rhs_.getSubMapper(k * bk_, n1 * bn_), bk(k), bn(n1));
+ }
+
+ if (parallel_pack_ || shard_by_col_) {
+ signal_switch(k + 1);
+ for (Index m = nm_ - 1; m >= 0; m--) signal_kernel(m, n, k, m == 0);
+ } else {
+ signal_packing(k);
+ }
+ }
+
+ void kernel(Index m, Index n, Index k) {
+ // Note: order of iteration matters here. Iteration over m is innermost
+ // because we want to reuse the same packed rhs in consequetive tasks
+ // (rhs fits into L2$ while lhs only into L3$).
+ const Index nend = n * gn_ + gn(n);
+ const Index mend = m * gm_ + gm(m);
+ if (shard_by_col_) {
+ for (Index n1 = n * gn_; n1 < nend; n1++) {
+ for (Index m1 = m * gm_; m1 < mend; m1++)
+ GebpKernel()(output_.getSubMapper(m1 * bm_, n1 * bn_),
+ packed_lhs_[k % (P - 1)][m1],
+ packed_rhs_[k % (P - 1)][n1], bm(m1), bk(k), bn(n1),
+ Scalar(1), -1, -1, 0, 0);
+ }
+ } else {
+ for (Index m1 = m * gm_; m1 < mend; m1++)
+ for (Index n1 = n * gn_; n1 < nend; n1++) {
+ GebpKernel()(output_.getSubMapper(m1 * bm_, n1 * bn_),
+ packed_lhs_[k % (P - 1)][m1],
+ packed_rhs_[k % (P - 1)][n1], bm(m1), bk(k), bn(n1),
+ Scalar(1), -1, -1, 0, 0);
+ }
+ }
+ signal_kernel(m, n, k + 1, false);
+ signal_switch(k + 2);
+ }
+
+ void signal_packing(Index k) {
+ eigen_assert(!parallel_pack_);
+ Index s = state_packing_ready_[k % P].fetch_sub(1);
+ eigen_assert(s > 0);
+ if (s != 1) return;
+ state_packing_ready_[k % P] = shard_by_col_ ? nm_ : nn_;
+ enqueue_packing(k, shard_by_col_);
+ }
+
+ void signal_kernel(Index m, Index n, Index k, bool sync) {
+ std::atomic<uint8_t>* state = &state_kernel_[k % P][m][n];
+ Index s = state->load();
+ eigen_assert(s > 0);
+ if (s != 1 && state->fetch_sub(1) != 1) return;
+ state->store(parallel_pack_ ? 3 : 2, std::memory_order_relaxed);
+ if (sync)
+ kernel(m, n, k);
+ else
+ device_.enqueueNoNotification([=]() { kernel(m, n, k); });
+ }
+
+ void signal_switch(Index k, Index v = 1) {
+ Index s = state_switch_[k % P].fetch_sub(v);
+ eigen_assert(s >= v);
+ if (s != v) return;
+
+ // Ready to switch to the next k slice.
+ // Reset counter for the next iteration.
+ state_switch_[k % P] =
+ (parallel_pack_ ? nm_ + nn_ : (shard_by_col_ ? nn_ : nm_)) +
+ nm_ * nn_;
+ if (k < nk_) {
+ // Issue lhs/rhs packing. Their completion will in turn kick off
+ // kernels.
+ if (parallel_pack_) {
+ enqueue_packing(k, !shard_by_col_);
+ enqueue_packing(k, shard_by_col_);
+ } else if (shard_by_col_) {
+ enqueue_packing(k, false);
+ } else {
+ enqueue_packing(k, true);
+ }
+
+ // Termination handling.
+ // Because kernel completion signals k + 2 switch, we need to finish nk
+ // + 2 slices without issuing any tasks on nk + 1 slice. So here we
+ // pretend that all nk + 1 packing tasks just finish instantly; so that
+ // nk + 2 switch only waits for completion of nk kernels.
+ } else if (k == nk_) {
+ signal_switch(k + 1,
+ parallel_pack_ ? nm_ + nn_ : (shard_by_col_ ? nn_ : nm_));
+ } else {
+ done_.Notify();
+ }
+ }
+
+ // Enqueue all rhs/lhs packing for k-th slice.
+ void enqueue_packing(Index k, bool rhs) {
+ enqueue_packing_helper(0, rhs ? nn_ : nm_, k, rhs);
+ }
+
+ void enqueue_packing_helper(Index start, Index end, Index k, bool rhs) {
+ if (end - start == 1) {
+ if (rhs)
+ pack_rhs(start, k);
+ else
+ pack_lhs(start, k);
+ } else {
+ Index mid = (start + end) / 2;
+ device_.enqueueNoNotification(
+ [=]() { enqueue_packing_helper(mid, end, k, rhs); });
+ device_.enqueueNoNotification(
+ [=]() { enqueue_packing_helper(start, mid, k, rhs); });
+ }
+ }
+
+ // Block sizes with accounting for potentially incomplete last block.
+ Index bm(Index m) const { return m + 1 < nm0_ ? bm_ : m_ + bm_ - bm_ * nm0_; }
+ Index bn(Index n) const { return n + 1 < nn0_ ? bn_ : n_ + bn_ - bn_ * nn0_; }
+ Index bk(Index k) const { return k + 1 < nk_ ? bk_ : k_ + bk_ - bk_ * nk_; }
+ // Task grain sizes accounting for potentially incomplete last task.
+ Index gm(Index m) const { return m + 1 < nm_ ? gm_ : nm0_ + gm_ - gm_ * nm_; }
+ Index gn(Index n) const { return n + 1 < nn_ ? gn_ : nn0_ + gn_ - gn_ * nn_; }
+
+ Context(const Context&) = delete;
+ void operator=(const Context&) = delete;
+ };
+
+ // Decide whether we want to shard m x n contraction by columns or by rows.
+ static bool shardByCol(Index m, Index n, Index num_threads) {
+ // Note: we are comparing both n and m against Traits::nr, it is not
+ // a mistake. We are trying to figure out how both n and m will fit into
+ // the main sharding dimension.
+
+ // Sharding by column is the default
+ // ... unless there is enough data for vectorization over rows
+ if (m / num_threads >= Traits::nr &&
+ // and not enough data for vectorization over columns
+ (n / num_threads < Traits::nr ||
+ // ... or barely enough data for vectorization over columns,
+ // but it is not evenly dividable across threads
+ (n / num_threads < 4 * Traits::nr &&
+ (n % (num_threads * Traits::nr)) != 0 &&
+ // ... and it is evenly dividable across threads for rows
+ ((m % (num_threads * Traits::nr)) == 0 ||
+ // .. or it is not evenly dividable for both dimensions but
+ // there is much more data over rows so that corner effects are
+ // mitigated.
+ (m / n >= 6)))))
+ return false;
+ // Wait, or if matrices are just substantially prolonged over the other
+ // dimension.
+ if (n / num_threads < 16 * Traits::nr && m > n * 32) return false;
+ return true;
+ }
+
+ Index coarsenM(Index m, Index n, Index bm, Index bn, Index bk, Index gn,
+ int num_threads, bool shard_by_col) const {
+ Index gm = 1;
+ Index gm1 = 1;
+ Index nm0 = divup(m, bm);
+ Index nm1 = nm0;
+ for (;;) {
+ // Find the next candidate for m grain size. It needs to result in
+ // different number of blocks. E.g. if we have 10 kernels, we want to try
+ // 5 and 10, but not 6, 7, 8 and 9.
+ while (gm1 <= nm0 && nm1 == divup(nm0, gm1)) gm1++;
+ if (gm1 > nm0) break;
+ // Check the candidate.
+ int res = checkGrain(m, n, bm, bn, bk, gm1, gn, gm, gn, num_threads,
+ shard_by_col);
+ if (res < 0) break;
+ nm1 = divup(nm0, gm1);
+ if (res == 0) continue;
+ // Commit new grain size.
+ gm = gm1;
+ }
+ return gm;
+ }
+
+ Index coarsenN(Index m, Index n, Index bm, Index bn, Index bk, Index gm,
+ int num_threads, bool shard_by_col) const {
+ Index gn = 1;
+ Index gn1 = 1;
+ Index nn0 = divup(n, bn);
+ Index nn1 = nn0;
+ for (;;) {
+ while (gn1 <= nn0 && nn1 == divup(nn0, gn1)) gn1++;
+ if (gn1 > nn0) break;
+ int res = checkGrain(m, n, bm, bn, bk, gm, gn1, gm, gn, num_threads,
+ shard_by_col);
+ if (res < 0) break;
+ nn1 = divup(nn0, gn1);
+ if (res == 0) continue;
+ gn = gn1;
+ }
+ return gn;
+ }
+
+ // checkGrain checks whether grain (gm, gn) is suitable and is better than
+ // (oldgm, oldgn).
+ int checkGrain(Index m, Index n, Index bm, Index bn, Index bk, Index gm,
+ Index gn, Index oldgm, Index oldgn, int num_threads,
+ bool shard_by_col) const {
+ const TensorOpCost cost =
+ contractionCost(bm * gm, bn * gn, bm, bn, bk, shard_by_col, true);
+ double taskSize = TensorCostModel<ThreadPoolDevice>::taskSize(
+ static_cast<double>(bm) * gm * bn * gn, cost);
+ // If the task is too small, then we agree on it regardless of anything
+ // else. Otherwise synchronization overheads will dominate.
+ if (taskSize < 1) return 1;
+ // If it is too large, then we reject it and all larger tasks.
+ if (taskSize > 2) return -1;
+ // Now we are in presumably good task size range.
+ // The main deciding factor here is parallelism. Consider that we have 12
+ // kernels and 4 threads. Grains of 2, 3 and 4 all yield good task sizes.
+ // But 2/4 yield 6/3 tasks, which gives us parallelism of 0.75 (at most 3/4
+ // of cores will be busy). While grain size 3 gives us 4 tasks, which gives
+ // us parallelism of 1 (we can load all cores).
+ Index nm0 = divup(m, bm);
+ Index nn0 = divup(n, bn);
+ Index new_tasks = divup(nm0, gm) * divup(nn0, gn);
+ double new_parallelism = static_cast<double>(new_tasks) /
+ (divup<int>(new_tasks, num_threads) * num_threads);
+ Index old_tasks = divup(nm0, oldgm) * divup(nn0, oldgn);
+ double old_parallelism = static_cast<double>(old_tasks) /
+ (divup<int>(old_tasks, num_threads) * num_threads);
+ if (new_parallelism > old_parallelism || new_parallelism == 1) return 1;
+ return 0;
+ }
+
+#else // EIGEN_USE_SIMPLE_THREAD_POOL
+
+ template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment>
+ void evalProduct(Scalar* buffer) const {
+ if (this->m_j_size == 1) {
+ this->template evalGemv<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>(buffer);
+ return;
+ }
+
+ evalGemm<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>(buffer);
+ }
+
+ template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment>
+ void evalGemm(Scalar* buffer) const {
+ // columns in left side, rows in right side
+ const Index k = this->m_k_size;
+
+ // rows in left side
+ const Index m = this->m_i_size;
+
+ // columns in right side
+ const Index n = this->m_j_size;
+
+ // zero out the result buffer (which must be of size at least m * n * sizeof(Scalar)
+ this->m_device.memset(buffer, 0, m * n * sizeof(Scalar));
+
+
+ const int lhs_packet_size = internal::unpacket_traits<typename LeftEvaluator::PacketReturnType>::size;
+ const int rhs_packet_size = internal::unpacket_traits<typename RightEvaluator::PacketReturnType>::size;
+
+ typedef internal::TensorContractionInputMapper<LhsScalar, Index, internal::Lhs,
+ LeftEvaluator, left_nocontract_t,
+ contract_t, lhs_packet_size,
+ lhs_inner_dim_contiguous,
+ false, Unaligned> LhsMapper;
+
+ typedef internal::TensorContractionInputMapper<RhsScalar, Index, internal::Rhs,
+ RightEvaluator, right_nocontract_t,
+ contract_t, rhs_packet_size,
+ rhs_inner_dim_contiguous,
+ rhs_inner_dim_reordered, Unaligned> RhsMapper;
+
+ typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper;
+
+ // TODO: packing could be faster sometimes if we supported row major tensor mappers
+ typedef internal::gemm_pack_lhs<LhsScalar, Index, typename LhsMapper::SubMapper, Traits::mr,
+ Traits::LhsProgress, ColMajor> LhsPacker;
+ typedef internal::gemm_pack_rhs<RhsScalar, Index, typename RhsMapper::SubMapper, Traits::nr, ColMajor> RhsPacker;
+
+ // TODO: replace false, false with conjugate values?
+ typedef internal::gebp_kernel<LhsScalar, RhsScalar, Index, OutputMapper,
+ Traits::mr, Traits::nr, false, false> GebpKernel;
+
+ typedef internal::packLhsArg<LhsScalar, LhsMapper, Index> packLArg;
+ typedef internal::packRhsAndKernelArg<LhsScalar, RhsScalar, RhsMapper, OutputMapper, Index> packRKArg;
+
+ // initialize data mappers
+ LhsMapper lhs(this->m_leftImpl, this->m_left_nocontract_strides, this->m_i_strides,
+ this->m_left_contracting_strides, this->m_k_strides);
+
+ RhsMapper rhs(this->m_rightImpl, this->m_right_nocontract_strides, this->m_j_strides,
+ this->m_right_contracting_strides, this->m_k_strides);
+
+ OutputMapper output(buffer, m);
+
+ // compute block sizes (which depend on number of threads)
+ const Index num_threads = this->m_device.numThreads();
+ internal::TensorContractionBlocking<LhsMapper, RhsMapper, Index, internal::ShardByCol> blocking(k, m, n, num_threads);
+ Index mc = blocking.mc();
+ Index nc = blocking.nc();
+ Index kc = blocking.kc();
+ eigen_assert(mc <= m);
+ eigen_assert(nc <= n);
+ eigen_assert(kc <= k);
+
+#define CEIL_DIV(a, b) (((a) + (b) - 1) / (b))
+ const Index k_blocks = CEIL_DIV(k, kc);
+ const Index n_blocks = CEIL_DIV(n, nc);
+ const Index m_blocks = CEIL_DIV(m, mc);
+ const Index sizeA = mc * kc;
+ const Index sizeB = kc * nc;
+
+ /* cout << "m: " << m << " n: " << n << " k: " << k << endl;
+ cout << "mc: " << mc << " nc: " << nc << " kc: " << kc << endl;
+ cout << "m_blocks: " << m_blocks << " n_blocks: " << n_blocks << " k_blocks: " << k_blocks << endl;
+ cout << "num threads: " << num_threads << endl;
+ */
+
+ // note: m_device.allocate should return 16 byte aligned pointers, but if blockA and blockB
+ // aren't 16 byte aligned segfaults will happen due to SIMD instructions
+ // note: You can get away with allocating just a single blockA and offsets and meet the
+ // the alignment requirements with the assumption that
+ // (Traits::mr * sizeof(ResScalar)) % 16 == 0
+ const Index numBlockAs = numext::mini(num_threads, m_blocks);
+ MaxSizeVector<LhsScalar *> blockAs(num_threads);
+ for (int i = 0; i < num_threads; i++) {
+ blockAs.push_back(static_cast<LhsScalar *>(this->m_device.allocate(sizeA * sizeof(LhsScalar))));
+ }
+
+ // To circumvent alignment issues, I'm just going to separately allocate the memory for each thread
+ // TODO: is this too much memory to allocate? This simplifies coding a lot, but is wasteful.
+ // Other options: (1) reuse memory when a thread finishes. con: tricky
+ // (2) allocate block B memory in each thread. con: overhead
+ MaxSizeVector<RhsScalar *> blockBs(n_blocks);
+ for (int i = 0; i < n_blocks; i++) {
+ blockBs.push_back(static_cast<RhsScalar *>(this->m_device.allocate(sizeB * sizeof(RhsScalar))));
+ }
+
+ // lhs_notifications starts with all null Notifications
+ MaxSizeVector<Notification*> lhs_notifications(num_threads, nullptr);
+
+ // this should really be numBlockAs * n_blocks;
+ const Index num_kernel_notifications = num_threads * n_blocks;
+ MaxSizeVector<Notification*> kernel_notifications(num_kernel_notifications,
+ nullptr);
+
+ for (Index k_block_idx = 0; k_block_idx < k_blocks; k_block_idx++) {
+ const Index k_start = k_block_idx * kc;
+ // make sure we don't overshoot right edge of left matrix
+ const Index actual_kc = numext::mini(k_start + kc, k) - k_start;
+
+ for (Index m_block_idx = 0; m_block_idx < m_blocks; m_block_idx += numBlockAs) {
+ const Index num_blocks = numext::mini(m_blocks-m_block_idx, numBlockAs);
+
+ for (Index mt_block_idx = m_block_idx; mt_block_idx < m_block_idx+num_blocks; mt_block_idx++) {
+ const Index m_start = mt_block_idx * mc;
+ const Index actual_mc = numext::mini(m_start + mc, m) - m_start;
+ eigen_assert(actual_mc > 0);
+
+ Index blockAId = (k_block_idx * m_blocks + mt_block_idx) % num_threads;
+
+ for (int i = 0; i < n_blocks; ++i) {
+ Index notification_id = (blockAId * n_blocks + i);
+ // Wait for any current kernels using this slot to complete
+ // before using it.
+ if (kernel_notifications[notification_id]) {
+ wait_until_ready(kernel_notifications[notification_id]);
+ delete kernel_notifications[notification_id];
+ }
+ kernel_notifications[notification_id] = new Notification();
+ }
+ const packLArg arg = {
+ blockAs[blockAId], // blockA
+ lhs, // lhs
+ m_start, // m
+ k_start, // k
+ actual_mc, // mc
+ actual_kc, // kc
+ };
+
+ // Delete any existing notification since we may be
+ // replacing it. The algorithm should ensure that there are
+ // no existing waiters on this notification.
+ delete lhs_notifications[blockAId];
+ lhs_notifications[blockAId] =
+ this->m_device.enqueue(&Self::packLhs<packLArg, LhsPacker>, arg);
+ }
+
+ // now start kernels.
+ const Index m_base_start = m_block_idx * mc;
+ const bool need_to_pack = m_block_idx == 0;
+
+ for (Index n_block_idx = 0; n_block_idx < n_blocks; n_block_idx++) {
+ const Index n_start = n_block_idx * nc;
+ const Index actual_nc = numext::mini(n_start + nc, n) - n_start;
+
+ // first make sure the previous kernels are all done before overwriting rhs. Also wait if
+ // we're going to start new k. In both cases need_to_pack is true.
+ if (need_to_pack) {
+ for (Index i = num_blocks; i < num_threads; ++i) {
+ Index blockAId = (k_block_idx * m_blocks + i + m_block_idx) % num_threads;
+ Index future_id = (blockAId * n_blocks + n_block_idx);
+ wait_until_ready(kernel_notifications[future_id]);
+ }
+ }
+
+ packRKArg arg = {
+ &blockAs, // blockA
+ blockBs[n_block_idx], // blockB
+ rhs, // rhs
+ output, // output
+ m_base_start, // m
+ k_start, // k
+ n_start, // n
+ mc, // mc
+ actual_kc, // kc
+ actual_nc, // nc
+ num_threads,
+ numBlockAs,
+ m,
+ k_block_idx,
+ m_block_idx,
+ n_block_idx, // n_block_idx
+ m_blocks, // m_blocks
+ n_blocks, // n_blocks
+ &kernel_notifications, // kernel notifications
+ &lhs_notifications, // lhs notifications
+ need_to_pack, // need_to_pack
+ };
+
+ // We asynchronously kick off this function, which ends up
+ // notifying the appropriate kernel_notifications objects,
+ // which this thread waits on before exiting.
+ this->m_device.enqueueNoNotification(&Self::packRhsAndKernel<packRKArg, RhsPacker, GebpKernel>, arg);
+ }
+ }
+ }
+
+ // Make sure all the kernels are done.
+ for (size_t i = 0; i < kernel_notifications.size(); ++i) {
+ wait_until_ready(kernel_notifications[i]);
+ delete kernel_notifications[i];
+ }
+
+ // No need to wait for lhs notifications since they should have
+ // already been waited on. Just clean them up.
+ for (size_t i = 0; i < lhs_notifications.size(); ++i) {
+ delete lhs_notifications[i];
+ }
+
+ // deallocate all of the memory for both A and B's
+ for (size_t i = 0; i < blockAs.size(); i++) {
+ this->m_device.deallocate(blockAs[i]);
+ }
+ for (size_t i = 0; i < blockBs.size(); i++) {
+ this->m_device.deallocate(blockBs[i]);
+ }
+
+#undef CEIL_DIV
+ }
+
+ /*
+ * Packs a LHS block of size (mt, kc) starting at lhs(m, k). Before packing
+ * the LHS block, check that all of the kernels that worked on the same
+ * mt_block_idx in the previous m_block are done.
+ */
+ template <typename packLArg, typename LhsPacker>
+ static void packLhs(const packLArg arg) {
+ // perform actual packing
+ LhsPacker pack_lhs;
+ pack_lhs(arg.blockA, arg.lhs.getSubMapper(arg.m_start, arg.k_start), arg.kc, arg.mc);
+ }
+
+ /*
+ * Packs a RHS block of size (kc, nc) starting at (k, n) after checking that
+ * all kernels in the previous block are done.
+ * Then for each LHS future, we wait on the future and then call GEBP
+ * on the area packed by the future (which starts at
+ * blockA + future_idx * mt * kc) on the LHS and with the full packed
+ * RHS block.
+ * The output of this GEBP is written to output(m + i * mt, n).
+ */
+ template <typename packRKArg, typename RhsPacker, typename GebpKernel>
+ static void packRhsAndKernel(packRKArg arg) {
+ if (arg.need_to_pack) {
+ RhsPacker pack_rhs;
+ pack_rhs(arg.blockB, arg.rhs.getSubMapper(arg.k, arg.n), arg.kc, arg.nc);
+ }
+
+ GebpKernel gebp;
+ for (Index mt_block_idx = 0; mt_block_idx < arg.num_blockAs; mt_block_idx++) {
+ const Index m_base_start = arg.m + arg.mc*mt_block_idx;
+ if (m_base_start < arg.max_m) {
+ Index blockAId = (arg.k_block_idx * arg.m_blocks + mt_block_idx + arg.m_block_idx) % arg.num_threads;
+ wait_until_ready((*arg.lhs_notifications)[blockAId]);
+ const Index actual_mc = numext::mini(m_base_start + arg.mc, arg.max_m) - m_base_start;
+ gebp(arg.output.getSubMapper(m_base_start, arg.n),
+ (*arg.blockAs)[blockAId], arg.blockB,
+ actual_mc, arg.kc, arg.nc, Scalar(1), -1, -1, 0, 0);
+
+ // Notify that the kernel is done.
+ const Index set_idx = blockAId * arg.n_blocks + arg.n_block_idx;
+ (*arg.kernel_notifications)[set_idx]->Notify();
+ }
+ }
+ }
+#endif // EIGEN_USE_SIMPLE_THREAD_POOL
+
+ TensorOpCost contractionCost(Index m, Index n, Index bm, Index bn, Index bk,
+ bool shard_by_col, bool prepacked) const {
+ const int packed_size = std::min<int>(PacketType<LhsScalar, Device>::size,
+ PacketType<RhsScalar, Device>::size);
+ const int output_packet_size = internal::unpacket_traits<PacketReturnType>::size;
+ const double kd = static_cast<double>(bk);
+ // Peak VFMA bandwidth is 0.5. However if we have not enough data for
+ // vectorization bandwidth drops. The 4.0 and 2.0 bandwidth is determined
+ // experimentally.
+ double computeBandwidth = bk == 1 ? 4.0 :
+ (shard_by_col ? bn : bm) < Traits::nr ||
+ (shard_by_col ? bm : bn) < Traits::mr ? 2.0 : 0.5;
+#ifndef EIGEN_VECTORIZE_FMA
+ // Bandwidth of all of VFMA/MULPS/ADDPS is 0.5 on latest Intel processors.
+ // However for MULPS/ADDPS we have dependent sequence of 2 such instructions,
+ // so overall bandwidth is 1.0.
+ if (computeBandwidth == 0.5) computeBandwidth = 1.0;
+#endif
+ // Computations.
+ TensorOpCost cost = TensorOpCost(0, 0, kd * computeBandwidth, true, packed_size);
+ // Output stores.
+ cost += TensorOpCost(0, sizeof(CoeffReturnType), 0, true, output_packet_size);
+ if (prepacked) {
+ // Packing and kernels are executed in different tasks. When we calculate
+ // task grain size we look only at kernel cost assuming that kernel
+ // is more expensive than packing.
+ return cost;
+ }
+ // Lhs/rhs loads + computations.
+ TensorOpCost lhsCost = this->m_leftImpl.costPerCoeff(true) * (kd / n);
+ TensorOpCost rhsCost = this->m_rightImpl.costPerCoeff(true) * (kd / m);
+ // Lhs packing memory cost does not contribute considerably to overall
+ // execution time because lhs is prefetched early and accessed sequentially.
+ if (shard_by_col)
+ lhsCost.dropMemoryCost();
+ else
+ rhsCost.dropMemoryCost();
+ return cost + lhsCost + rhsCost;
+ }
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_USE_THREADS
+#endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_THREAD_POOL_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorConversion.h b/unsupported/Eigen/CXX11/src/Tensor/TensorConversion.h
new file mode 100644
index 000000000..860a6949a
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorConversion.h
@@ -0,0 +1,279 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2015 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONVERSION_H
+#define EIGEN_CXX11_TENSOR_TENSOR_CONVERSION_H
+
+namespace Eigen {
+
+/** \class TensorConversionOp
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Tensor conversion class. This class makes it possible to vectorize
+ * type casting operations when the number of scalars per packet in the source
+ * and the destination type differ
+ */
+namespace internal {
+template<typename TargetType, typename XprType>
+struct traits<TensorConversionOp<TargetType, XprType> >
+{
+ // Type promotion to handle the case where the types of the lhs and the rhs are different.
+ typedef TargetType Scalar;
+ typedef typename traits<XprType>::StorageKind StorageKind;
+ typedef typename traits<XprType>::Index Index;
+ typedef typename XprType::Nested Nested;
+ typedef typename remove_reference<Nested>::type _Nested;
+ static const int NumDimensions = traits<XprType>::NumDimensions;
+ static const int Layout = traits<XprType>::Layout;
+ enum { Flags = 0 };
+};
+
+template<typename TargetType, typename XprType>
+struct eval<TensorConversionOp<TargetType, XprType>, Eigen::Dense>
+{
+ typedef const TensorConversionOp<TargetType, XprType>& type;
+};
+
+template<typename TargetType, typename XprType>
+struct nested<TensorConversionOp<TargetType, XprType>, 1, typename eval<TensorConversionOp<TargetType, XprType> >::type>
+{
+ typedef TensorConversionOp<TargetType, XprType> type;
+};
+
+} // end namespace internal
+
+
+template <typename TensorEvaluator, typename SrcPacket, typename TgtPacket, int SrcCoeffRatio, int TgtCoeffRatio>
+struct PacketConverter {
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ PacketConverter(const TensorEvaluator& impl)
+ : m_impl(impl) {}
+
+ template<int LoadMode, typename Index>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket packet(Index index) const {
+ return internal::pcast<SrcPacket, TgtPacket>(m_impl.template packet<LoadMode>(index));
+ }
+
+ private:
+ const TensorEvaluator& m_impl;
+};
+
+
+template <typename TensorEvaluator, typename SrcPacket, typename TgtPacket>
+struct PacketConverter<TensorEvaluator, SrcPacket, TgtPacket, 2, 1> {
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ PacketConverter(const TensorEvaluator& impl)
+ : m_impl(impl) {}
+
+ template<int LoadMode, typename Index>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket packet(Index index) const {
+ const int SrcPacketSize = internal::unpacket_traits<SrcPacket>::size;
+
+ SrcPacket src1 = m_impl.template packet<LoadMode>(index);
+ SrcPacket src2 = m_impl.template packet<LoadMode>(index + SrcPacketSize);
+ TgtPacket result = internal::pcast<SrcPacket, TgtPacket>(src1, src2);
+ return result;
+ }
+
+ private:
+ const TensorEvaluator& m_impl;
+};
+
+template <typename TensorEvaluator, typename SrcPacket, typename TgtPacket>
+struct PacketConverter<TensorEvaluator, SrcPacket, TgtPacket, 4, 1> {
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ PacketConverter(const TensorEvaluator& impl)
+ : m_impl(impl) {}
+
+ template<int LoadMode, typename Index>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket packet(Index index) const {
+ const int SrcPacketSize = internal::unpacket_traits<SrcPacket>::size;
+
+ SrcPacket src1 = m_impl.template packet<LoadMode>(index);
+ SrcPacket src2 = m_impl.template packet<LoadMode>(index + SrcPacketSize);
+ SrcPacket src3 = m_impl.template packet<LoadMode>(index + 2 * SrcPacketSize);
+ SrcPacket src4 = m_impl.template packet<LoadMode>(index + 3 * SrcPacketSize);
+ TgtPacket result = internal::pcast<SrcPacket, TgtPacket>(src1, src2, src3, src4);
+ return result;
+ }
+
+ private:
+ const TensorEvaluator& m_impl;
+};
+
+template <typename TensorEvaluator, typename SrcPacket, typename TgtPacket>
+struct PacketConverter<TensorEvaluator, SrcPacket, TgtPacket, 1, 2> {
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ PacketConverter(const TensorEvaluator& impl)
+ : m_impl(impl), m_maxIndex(impl.dimensions().TotalSize()) {}
+
+ template<int LoadMode, typename Index>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket packet(Index index) const {
+ const int SrcPacketSize = internal::unpacket_traits<SrcPacket>::size;
+ // Only call m_impl.packet() when we have direct access to the underlying data. This
+ // ensures that we don't compute the subexpression twice. We may however load some
+ // coefficients twice, but in practice this doesn't negatively impact performance.
+ if (m_impl.data() && (index + SrcPacketSize < m_maxIndex)) {
+ // Force unaligned memory loads since we can't ensure alignment anymore
+ return internal::pcast<SrcPacket, TgtPacket>(m_impl.template packet<Unaligned>(index));
+ } else {
+ const int TgtPacketSize = internal::unpacket_traits<TgtPacket>::size;
+ typedef typename internal::unpacket_traits<SrcPacket>::type SrcType;
+ typedef typename internal::unpacket_traits<TgtPacket>::type TgtType;
+ internal::scalar_cast_op<SrcType, TgtType> converter;
+ EIGEN_ALIGN_MAX typename internal::unpacket_traits<TgtPacket>::type values[TgtPacketSize];
+ for (int i = 0; i < TgtPacketSize; ++i) {
+ values[i] = converter(m_impl.coeff(index+i));
+ }
+ TgtPacket rslt = internal::pload<TgtPacket>(values);
+ return rslt;
+ }
+ }
+
+ private:
+ const TensorEvaluator& m_impl;
+ const typename TensorEvaluator::Index m_maxIndex;
+};
+
+template<typename TargetType, typename XprType>
+class TensorConversionOp : public TensorBase<TensorConversionOp<TargetType, XprType>, ReadOnlyAccessors>
+{
+ public:
+ typedef typename internal::traits<TensorConversionOp>::Scalar Scalar;
+ typedef typename internal::traits<TensorConversionOp>::StorageKind StorageKind;
+ typedef typename internal::traits<TensorConversionOp>::Index Index;
+ typedef typename internal::nested<TensorConversionOp>::type Nested;
+ typedef Scalar CoeffReturnType;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorConversionOp(const XprType& xpr)
+ : m_xpr(xpr) {}
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename XprType::Nested>::type&
+ expression() const { return m_xpr; }
+
+ protected:
+ typename XprType::Nested m_xpr;
+};
+
+template <bool SameType, typename Eval, typename Scalar> struct ConversionSubExprEval {
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool run(Eval& impl, Scalar*) {
+ impl.evalSubExprsIfNeeded(NULL);
+ return true;
+ }
+};
+
+template <typename Eval, typename Scalar> struct ConversionSubExprEval<true, Eval, Scalar> {
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool run(Eval& impl, Scalar* data) {
+ return impl.evalSubExprsIfNeeded(data);
+ }
+};
+
+
+// Eval as rvalue
+template<typename TargetType, typename ArgType, typename Device>
+struct TensorEvaluator<const TensorConversionOp<TargetType, ArgType>, Device>
+{
+ typedef TensorConversionOp<TargetType, ArgType> XprType;
+ typedef typename XprType::Index Index;
+ typedef typename TensorEvaluator<ArgType, Device>::Dimensions Dimensions;
+ typedef TargetType Scalar;
+ typedef TargetType CoeffReturnType;
+ typedef typename internal::remove_all<typename internal::traits<ArgType>::Scalar>::type SrcType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ typedef typename PacketType<SrcType, Device>::type PacketSourceType;
+ static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+
+ enum {
+ IsAligned = false,
+ PacketAccess = true,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
+ : m_impl(op.expression(), device)
+ {
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_impl.dimensions(); }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data)
+ {
+ return ConversionSubExprEval<internal::is_same<TargetType, SrcType>::value, TensorEvaluator<ArgType, Device>, Scalar>::run(m_impl, data);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup()
+ {
+ m_impl.cleanup();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
+ {
+ internal::scalar_cast_op<SrcType, TargetType> converter;
+ return converter(m_impl.coeff(index));
+ }
+
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
+ {
+ const bool Vectorizable = TensorEvaluator<ArgType, Device>::PacketAccess &
+ internal::type_casting_traits<SrcType, TargetType>::VectorizedCast;
+ return PacketConv<LoadMode, Vectorizable>::run(m_impl, index);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
+ costPerCoeff(bool vectorized) const {
+ const double cast_cost = TensorOpCost::CastCost<SrcType, TargetType>();
+ if (vectorized) {
+ const double SrcCoeffRatio =
+ internal::type_casting_traits<SrcType, TargetType>::SrcCoeffRatio;
+ const double TgtCoeffRatio =
+ internal::type_casting_traits<SrcType, TargetType>::TgtCoeffRatio;
+ return m_impl.costPerCoeff(vectorized) * (SrcCoeffRatio / PacketSize) +
+ TensorOpCost(0, 0, TgtCoeffRatio * (cast_cost / PacketSize));
+ } else {
+ return m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, cast_cost);
+ }
+ }
+
+ EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
+
+ protected:
+ template <int LoadMode, bool ActuallyVectorize>
+ struct PacketConv {
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType run(const TensorEvaluator<ArgType, Device>& impl, Index index) {
+ internal::scalar_cast_op<SrcType, TargetType> converter;
+ EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
+ for (int i = 0; i < PacketSize; ++i) {
+ values[i] = converter(impl.coeff(index+i));
+ }
+ PacketReturnType rslt = internal::pload<PacketReturnType>(values);
+ return rslt;
+ }
+ };
+
+ template <int LoadMode>
+ struct PacketConv<LoadMode, true> {
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType run(const TensorEvaluator<ArgType, Device>& impl, Index index) {
+ const int SrcCoeffRatio = internal::type_casting_traits<SrcType, TargetType>::SrcCoeffRatio;
+ const int TgtCoeffRatio = internal::type_casting_traits<SrcType, TargetType>::TgtCoeffRatio;
+ PacketConverter<TensorEvaluator<ArgType, Device>, PacketSourceType, PacketReturnType,
+ SrcCoeffRatio, TgtCoeffRatio> converter(impl);
+ return converter.template packet<LoadMode>(index);
+ }
+ };
+
+ TensorEvaluator<ArgType, Device> m_impl;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_CONVERSION_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h b/unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h
new file mode 100644
index 000000000..abdf742c6
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h
@@ -0,0 +1,1104 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONVOLUTION_H
+#define EIGEN_CXX11_TENSOR_TENSOR_CONVOLUTION_H
+
+namespace Eigen {
+
+/** \class TensorConvolution
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Tensor convolution class.
+ *
+ *
+ */
+namespace internal {
+
+template <typename Index, typename InputDims, int NumKernelDims, int Layout>
+class IndexMapper {
+ public:
+ IndexMapper(const InputDims& input_dims, const array<Index, NumKernelDims>& kernel_dims,
+ const array<Index, NumKernelDims>& indices) {
+
+ array<Index, NumDims> dimensions = input_dims;
+ for (int i = 0; i < NumKernelDims; ++i) {
+ const Index index = indices[i];
+ const Index input_dim = input_dims[index];
+ const Index kernel_dim = kernel_dims[i];
+ const Index result_dim = input_dim - kernel_dim + 1;
+ dimensions[index] = result_dim;
+ }
+
+ array<Index, NumDims> inputStrides;
+ array<Index, NumDims> outputStrides;
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ inputStrides[0] = 1;
+ outputStrides[0] = 1;
+ for (int i = 1; i < NumDims; ++i) {
+ inputStrides[i] = inputStrides[i-1] * input_dims[i-1];
+ outputStrides[i] = outputStrides[i-1] * dimensions[i-1];
+ }
+ } else {
+ inputStrides[NumDims - 1] = 1;
+ outputStrides[NumDims - 1] = 1;
+ for (int i = static_cast<int>(NumDims) - 2; i >= 0; --i) {
+ inputStrides[i] = inputStrides[i + 1] * input_dims[i + 1];
+ outputStrides[i] = outputStrides[i + 1] * dimensions[i + 1];
+ }
+ }
+
+ array<Index, NumDims> cudaInputDimensions;
+ array<Index, NumDims> cudaOutputDimensions;
+ array<Index, NumDims> tmp = dimensions;
+ array<Index, NumDims> ordering;
+ const size_t offset = static_cast<int>(Layout) == static_cast<int>(ColMajor)
+ ? 0
+ : NumDims - NumKernelDims;
+ for (int i = 0; i < NumKernelDims; ++i) {
+ const Index index = i + offset;
+ ordering[index] = indices[i];
+ tmp[indices[i]] = -1;
+ cudaInputDimensions[index] = input_dims[indices[i]];
+ cudaOutputDimensions[index] = dimensions[indices[i]];
+ }
+
+ int written = static_cast<int>(Layout) == static_cast<int>(ColMajor)
+ ? NumKernelDims
+ : 0;
+ for (int i = 0; i < NumDims; ++i) {
+ if (tmp[i] >= 0) {
+ ordering[written] = i;
+ cudaInputDimensions[written] = input_dims[i];
+ cudaOutputDimensions[written] = dimensions[i];
+ ++written;
+ }
+ }
+
+ for (int i = 0; i < NumDims; ++i) {
+ m_inputStrides[i] = inputStrides[ordering[i]];
+ m_outputStrides[i] = outputStrides[ordering[i]];
+ }
+
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int i = 0; i < NumDims; ++i) {
+ if (i > NumKernelDims) {
+ m_cudaInputStrides[i] =
+ m_cudaInputStrides[i - 1] * cudaInputDimensions[i - 1];
+ m_cudaOutputStrides[i] =
+ m_cudaOutputStrides[i - 1] * cudaOutputDimensions[i - 1];
+ } else {
+ m_cudaInputStrides[i] = 1;
+ m_cudaOutputStrides[i] = 1;
+ }
+ }
+ } else {
+ for (int i = NumDims - 1; i >= 0; --i) {
+ if (i + 1 < offset) {
+ m_cudaInputStrides[i] =
+ m_cudaInputStrides[i + 1] * cudaInputDimensions[i + 1];
+ m_cudaOutputStrides[i] =
+ m_cudaOutputStrides[i + 1] * cudaOutputDimensions[i + 1];
+ } else {
+ m_cudaInputStrides[i] = 1;
+ m_cudaOutputStrides[i] = 1;
+ }
+ }
+ }
+ }
+
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaInputPlaneToTensorInputOffset(Index p) const {
+ Index inputIndex = 0;
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int d = NumDims - 1; d > NumKernelDims; --d) {
+ const Index idx = p / m_cudaInputStrides[d];
+ inputIndex += idx * m_inputStrides[d];
+ p -= idx * m_cudaInputStrides[d];
+ }
+ inputIndex += p * m_inputStrides[NumKernelDims];
+ } else {
+ std::ptrdiff_t limit = 0;
+ if (NumKernelDims < NumDims) {
+ limit = NumDims - NumKernelDims - 1;
+ }
+ for (int d = 0; d < limit; ++d) {
+ const Index idx = p / m_cudaInputStrides[d];
+ inputIndex += idx * m_inputStrides[d];
+ p -= idx * m_cudaInputStrides[d];
+ }
+ inputIndex += p * m_inputStrides[limit];
+ }
+ return inputIndex;
+ }
+
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaOutputPlaneToTensorOutputOffset(Index p) const {
+ Index outputIndex = 0;
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int d = NumDims - 1; d > NumKernelDims; --d) {
+ const Index idx = p / m_cudaOutputStrides[d];
+ outputIndex += idx * m_outputStrides[d];
+ p -= idx * m_cudaOutputStrides[d];
+ }
+ outputIndex += p * m_outputStrides[NumKernelDims];
+ } else {
+ std::ptrdiff_t limit = 0;
+ if (NumKernelDims < NumDims) {
+ limit = NumDims - NumKernelDims - 1;
+ }
+ for (int d = 0; d < limit; ++d) {
+ const Index idx = p / m_cudaOutputStrides[d];
+ outputIndex += idx * m_outputStrides[d];
+ p -= idx * m_cudaOutputStrides[d];
+ }
+ outputIndex += p * m_outputStrides[limit];
+ }
+ return outputIndex;
+ }
+
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaInputKernelToTensorInputOffset(Index i) const {
+ const size_t offset = static_cast<int>(Layout) == static_cast<int>(ColMajor)
+ ? 0
+ : NumDims - NumKernelDims;
+ return i * m_inputStrides[offset];
+ }
+
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaOutputKernelToTensorOutputOffset(Index i) const {
+ const size_t offset = static_cast<int>(Layout) == static_cast<int>(ColMajor)
+ ? 0
+ : NumDims - NumKernelDims;
+ return i * m_outputStrides[offset];
+ }
+
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaInputKernelToTensorInputOffset(Index i, Index j) const {
+ const size_t offset = static_cast<int>(Layout) == static_cast<int>(ColMajor)
+ ? 0
+ : NumDims - NumKernelDims;
+ return i * m_inputStrides[offset] + j * m_inputStrides[offset + 1];
+ }
+
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaOutputKernelToTensorOutputOffset(Index i, Index j) const {
+ const size_t offset = static_cast<int>(Layout) == static_cast<int>(ColMajor)
+ ? 0
+ : NumDims - NumKernelDims;
+ return i * m_outputStrides[offset] + j * m_outputStrides[offset + 1];
+ }
+
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaInputKernelToTensorInputOffset(Index i, Index j, Index k) const {
+ const size_t offset = static_cast<int>(Layout) == static_cast<int>(ColMajor)
+ ? 0
+ : NumDims - NumKernelDims;
+ return i * m_inputStrides[offset] + j * m_inputStrides[offset + 1] +
+ k * m_inputStrides[offset + 2];
+ }
+
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaOutputKernelToTensorOutputOffset(Index i, Index j, Index k) const {
+ const size_t offset = static_cast<int>(Layout) == static_cast<int>(ColMajor)
+ ? 0
+ : NumDims - NumKernelDims;
+ return i * m_outputStrides[offset] + j * m_outputStrides[offset + 1] +
+ k * m_outputStrides[offset + 2];
+ }
+
+ private:
+ static const int NumDims = internal::array_size<InputDims>::value;
+ array<Index, NumDims> m_inputStrides;
+ array<Index, NumDims> m_outputStrides;
+ array<Index, NumDims> m_cudaInputStrides;
+ array<Index, NumDims> m_cudaOutputStrides;
+};
+
+
+
+template<typename Dimensions, typename InputXprType, typename KernelXprType>
+struct traits<TensorConvolutionOp<Dimensions, InputXprType, KernelXprType> >
+{
+ // Type promotion to handle the case where the types of the lhs and the rhs are different.
+ typedef typename promote_storage_type<typename InputXprType::Scalar,
+ typename KernelXprType::Scalar>::ret Scalar;
+ typedef typename promote_storage_type<typename traits<InputXprType>::StorageKind,
+ typename traits<KernelXprType>::StorageKind>::ret StorageKind;
+ typedef typename promote_index_type<typename traits<InputXprType>::Index,
+ typename traits<KernelXprType>::Index>::type Index;
+ typedef typename InputXprType::Nested LhsNested;
+ typedef typename KernelXprType::Nested RhsNested;
+ typedef typename remove_reference<LhsNested>::type _LhsNested;
+ typedef typename remove_reference<RhsNested>::type _RhsNested;
+ static const int NumDimensions = traits<InputXprType>::NumDimensions;
+ static const int Layout = traits<InputXprType>::Layout;
+
+ enum {
+ Flags = 0
+ };
+};
+
+template<typename Dimensions, typename InputXprType, typename KernelXprType>
+struct eval<TensorConvolutionOp<Dimensions, InputXprType, KernelXprType>, Eigen::Dense>
+{
+ typedef const TensorConvolutionOp<Dimensions, InputXprType, KernelXprType>& type;
+};
+
+template<typename Dimensions, typename InputXprType, typename KernelXprType>
+struct nested<TensorConvolutionOp<Dimensions, InputXprType, KernelXprType>, 1, typename eval<TensorConvolutionOp<Dimensions, InputXprType, KernelXprType> >::type>
+{
+ typedef TensorConvolutionOp<Dimensions, InputXprType, KernelXprType> type;
+};
+
+} // end namespace internal
+
+
+
+template<typename Indices, typename InputXprType, typename KernelXprType>
+class TensorConvolutionOp : public TensorBase<TensorConvolutionOp<Indices, InputXprType, KernelXprType>, ReadOnlyAccessors>
+{
+ public:
+ typedef typename Eigen::internal::traits<TensorConvolutionOp>::Scalar Scalar;
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
+ typedef typename internal::promote_storage_type<typename InputXprType::CoeffReturnType,
+ typename KernelXprType::CoeffReturnType>::ret CoeffReturnType;
+ typedef typename Eigen::internal::nested<TensorConvolutionOp>::type Nested;
+ typedef typename Eigen::internal::traits<TensorConvolutionOp>::StorageKind StorageKind;
+ typedef typename Eigen::internal::traits<TensorConvolutionOp>::Index Index;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorConvolutionOp(const InputXprType& input, const KernelXprType& kernel, const Indices& dims)
+ : m_input_xpr(input), m_kernel_xpr(kernel), m_indices(dims) {}
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const Indices& indices() const { return m_indices; }
+
+ /** \returns the nested expressions */
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const typename internal::remove_all<typename InputXprType::Nested>::type&
+ inputExpression() const { return m_input_xpr; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const typename internal::remove_all<typename KernelXprType::Nested>::type&
+ kernelExpression() const { return m_kernel_xpr; }
+
+ protected:
+ typename InputXprType::Nested m_input_xpr;
+ typename KernelXprType::Nested m_kernel_xpr;
+ const Indices m_indices;
+};
+
+
+template<typename Indices, typename InputArgType, typename KernelArgType, typename Device>
+struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelArgType>, Device>
+{
+ typedef TensorConvolutionOp<Indices, InputArgType, KernelArgType> XprType;
+
+ static const int NumDims = internal::array_size<typename TensorEvaluator<InputArgType, Device>::Dimensions>::value;
+ static const int NumKernelDims = internal::array_size<Indices>::value;
+ typedef typename XprType::Index Index;
+ typedef DSizes<Index, NumDims> Dimensions;
+
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+
+ enum {
+ IsAligned = TensorEvaluator<InputArgType, Device>::IsAligned & TensorEvaluator<KernelArgType, Device>::IsAligned,
+ PacketAccess = TensorEvaluator<InputArgType, Device>::PacketAccess & TensorEvaluator<KernelArgType, Device>::PacketAccess,
+ Layout = TensorEvaluator<InputArgType, Device>::Layout,
+ CoordAccess = false, // to be implemented
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
+ : m_inputImpl(op.inputExpression(), device), m_kernelImpl(op.kernelExpression(), device), m_kernelArg(op.kernelExpression()), m_kernel(NULL), m_local_kernel(false), m_device(device)
+ {
+ EIGEN_STATIC_ASSERT((static_cast<int>(TensorEvaluator<InputArgType, Device>::Layout) == static_cast<int>(TensorEvaluator<KernelArgType, Device>::Layout)), YOU_MADE_A_PROGRAMMING_MISTAKE);
+
+ const typename TensorEvaluator<InputArgType, Device>::Dimensions& input_dims = m_inputImpl.dimensions();
+ const typename TensorEvaluator<KernelArgType, Device>::Dimensions& kernel_dims = m_kernelImpl.dimensions();
+
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ m_inputStride[0] = 1;
+ for (int i = 1; i < NumDims; ++i) {
+ m_inputStride[i] = m_inputStride[i - 1] * input_dims[i - 1];
+ }
+ } else {
+ m_inputStride[NumDims - 1] = 1;
+ for (int i = NumDims - 2; i >= 0; --i) {
+ m_inputStride[i] = m_inputStride[i + 1] * input_dims[i + 1];
+ }
+ }
+
+ m_dimensions = m_inputImpl.dimensions();
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int i = 0; i < NumKernelDims; ++i) {
+ const Index index = op.indices()[i];
+ const Index input_dim = input_dims[index];
+ const Index kernel_dim = kernel_dims[i];
+ const Index result_dim = input_dim - kernel_dim + 1;
+ m_dimensions[index] = result_dim;
+ if (i > 0) {
+ m_kernelStride[i] = m_kernelStride[i - 1] * kernel_dims[i - 1];
+ } else {
+ m_kernelStride[0] = 1;
+ }
+ m_indexStride[i] = m_inputStride[index];
+ }
+
+ m_outputStride[0] = 1;
+ for (int i = 1; i < NumDims; ++i) {
+ m_outputStride[i] = m_outputStride[i - 1] * m_dimensions[i - 1];
+ }
+ } else {
+ for (int i = NumKernelDims - 1; i >= 0; --i) {
+ const Index index = op.indices()[i];
+ const Index input_dim = input_dims[index];
+ const Index kernel_dim = kernel_dims[i];
+ const Index result_dim = input_dim - kernel_dim + 1;
+ m_dimensions[index] = result_dim;
+ if (i < NumKernelDims - 1) {
+ m_kernelStride[i] = m_kernelStride[i + 1] * kernel_dims[i + 1];
+ } else {
+ m_kernelStride[NumKernelDims - 1] = 1;
+ }
+ m_indexStride[i] = m_inputStride[index];
+ }
+
+ m_outputStride[NumDims - 1] = 1;
+ for (int i = NumDims - 2; i >= 0; --i) {
+ m_outputStride[i] = m_outputStride[i + 1] * m_dimensions[i + 1];
+ }
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar*) {
+ m_inputImpl.evalSubExprsIfNeeded(NULL);
+ preloadKernel();
+ return true;
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
+ m_inputImpl.cleanup();
+ if (m_local_kernel) {
+ m_device.deallocate((void*)m_kernel);
+ m_local_kernel = false;
+ }
+ m_kernel = NULL;
+ }
+
+ void evalTo(typename XprType::Scalar* buffer) {
+ evalSubExprsIfNeeded(NULL);
+ for (int i = 0; i < dimensions().TotalSize(); ++i) {
+ buffer[i] += coeff(i);
+ }
+ cleanup();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
+ {
+ CoeffReturnType result = CoeffReturnType(0);
+ convolve(firstInput(index), 0, NumKernelDims-1, result);
+ return result;
+ }
+
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC PacketReturnType packet(const Index index) const
+ {
+ Index indices[2] = {index, index+PacketSize-1};
+ Index startInputs[2] = {0, 0};
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int i = NumDims - 1; i > 0; --i) {
+ const Index idx0 = indices[0] / m_outputStride[i];
+ const Index idx1 = indices[1] / m_outputStride[i];
+ startInputs[0] += idx0 * m_inputStride[i];
+ startInputs[1] += idx1 * m_inputStride[i];
+ indices[0] -= idx0 * m_outputStride[i];
+ indices[1] -= idx1 * m_outputStride[i];
+ }
+ } else {
+ for (int i = 0; i < NumDims - 1; ++i) {
+ const Index idx0 = indices[0] / m_outputStride[i];
+ const Index idx1 = indices[1] / m_outputStride[i];
+ startInputs[0] += idx0 * m_inputStride[i];
+ startInputs[1] += idx1 * m_inputStride[i];
+ indices[0] -= idx0 * m_outputStride[i];
+ indices[1] -= idx1 * m_outputStride[i];
+ }
+ }
+ startInputs[0] += indices[0];
+ startInputs[1] += indices[1];
+
+ if (startInputs[1]-startInputs[0] == PacketSize-1) {
+ PacketReturnType result = internal::pset1<PacketReturnType>(0);
+ convolvePacket(startInputs[0], 0, NumKernelDims-1, result);
+ return result;
+ } else {
+ EIGEN_ALIGN_MAX Scalar data[PacketSize];
+ data[0] = Scalar(0);
+ convolve(startInputs[0], 0, NumKernelDims-1, data[0]);
+ for (int i = 1; i < PacketSize-1; ++i) {
+ data[i] = Scalar(0);
+ convolve(firstInput(index+i), 0, NumKernelDims-1, data[i]);
+ }
+ data[PacketSize-1] = Scalar(0);
+ convolve(startInputs[1], 0, NumKernelDims-1, data[PacketSize-1]);
+ return internal::pload<PacketReturnType>(data);
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
+ costPerCoeff(bool vectorized) const {
+ const double kernel_size = m_kernelImpl.dimensions().TotalSize();
+ // We ignore the use of fused multiply-add.
+ const double convolve_compute_cost =
+ TensorOpCost::AddCost<Scalar>() + TensorOpCost::MulCost<Scalar>();
+ const double firstIndex_compute_cost =
+ NumDims *
+ (2 * TensorOpCost::AddCost<Index>() + 2 * TensorOpCost::MulCost<Index>() +
+ TensorOpCost::DivCost<Index>());
+ return TensorOpCost(0, 0, firstIndex_compute_cost, vectorized, PacketSize) +
+ kernel_size * (m_inputImpl.costPerCoeff(vectorized) +
+ m_kernelImpl.costPerCoeff(vectorized) +
+ TensorOpCost(0, 0, convolve_compute_cost, vectorized,
+ PacketSize));
+ }
+
+ EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
+
+ private:
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index firstInput(Index index) const {
+ Index startInput = 0;
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int i = NumDims - 1; i > 0; --i) {
+ const Index idx = index / m_outputStride[i];
+ startInput += idx * m_inputStride[i];
+ index -= idx * m_outputStride[i];
+ }
+ } else {
+ for (int i = 0; i < NumDims - 1; ++i) {
+ const Index idx = index / m_outputStride[i];
+ startInput += idx * m_inputStride[i];
+ index -= idx * m_outputStride[i];
+ }
+ }
+ startInput += index;
+ return startInput;
+ }
+
+ EIGEN_DEVICE_FUNC void convolve(Index firstIndex, Index firstKernel, int DimIndex, CoeffReturnType& accum) const {
+ for (int j = 0; j < m_kernelImpl.dimensions()[DimIndex]; ++j) {
+ const Index input = firstIndex + j * m_indexStride[DimIndex];
+ const Index kernel = firstKernel + j * m_kernelStride[DimIndex];
+ if (DimIndex > 0) {
+ convolve(input, kernel, DimIndex-1, accum);
+ } else {
+ accum += m_inputImpl.coeff(input) * m_kernel[kernel];
+ }
+ }
+ }
+
+ template <typename Packet>
+ EIGEN_DEVICE_FUNC void convolvePacket(Index firstIndex, Index firstKernel, int DimIndex, Packet& accum) const {
+ for (int j = 0; j < m_kernelImpl.dimensions()[DimIndex]; ++j) {
+ const Index input = firstIndex + j * m_indexStride[DimIndex];
+ const Index kernel = firstKernel + j * m_kernelStride[DimIndex];
+ if (DimIndex > 0) {
+ convolvePacket(input, kernel, DimIndex-1, accum);
+ } else {
+ accum = internal::pmadd<Packet>(m_inputImpl.template packet<Unaligned>(input), internal::pset1<Packet>(m_kernel[kernel]), accum);
+ }
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void preloadKernel() {
+ // Don't make a local copy of the kernel unless we have to (i.e. it's an
+ // expression that needs to be evaluated)
+ const Scalar* in_place = m_kernelImpl.data();
+ if (in_place) {
+ m_kernel = in_place;
+ m_local_kernel = false;
+ } else {
+ size_t kernel_sz = m_kernelImpl.dimensions().TotalSize() * sizeof(Scalar);
+ Scalar* local = (Scalar*)m_device.allocate(kernel_sz);
+ typedef TensorEvalToOp<const KernelArgType> EvalTo;
+ EvalTo evalToTmp(local, m_kernelArg);
+ const bool PacketAccess = internal::IsVectorizable<Device, KernelArgType>::value;
+ internal::TensorExecutor<const EvalTo, Device, PacketAccess>::run(evalToTmp, m_device);
+
+ m_kernel = local;
+ m_local_kernel = true;
+ }
+ }
+
+ array<Index, NumDims> m_inputStride;
+ array<Index, NumDims> m_outputStride;
+
+ array<Index, NumKernelDims> m_indexStride;
+ array<Index, NumKernelDims> m_kernelStride;
+ TensorEvaluator<InputArgType, Device> m_inputImpl;
+ TensorEvaluator<KernelArgType, Device> m_kernelImpl;
+ Dimensions m_dimensions;
+
+ KernelArgType m_kernelArg;
+ const Scalar* m_kernel;
+ bool m_local_kernel;
+ const Device& m_device;
+};
+
+
+
+
+// Use an optimized implementation of the evaluation code for GPUs whenever possible.
+#if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
+
+template <int StaticKernelSize>
+struct GetKernelSize {
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int operator() (const int /*kernelSize*/) const {
+ return StaticKernelSize;
+ }
+};
+template <>
+struct GetKernelSize<Dynamic> {
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int operator() (const int kernelSize) const {
+ return kernelSize;
+ }
+};
+
+template <typename InputEvaluator, typename Index, typename InputDims,
+ int StaticKernelSize>
+__global__ void EigenConvolutionKernel1D(
+ InputEvaluator eval,
+ const internal::IndexMapper<Index, InputDims, 1, InputEvaluator::Layout>
+ indexMapper,
+ const float* __restrict kernel, const int numPlanes, const int numX,
+ const int maxX, const int kernelSize, float* buffer) {
+ extern __shared__ float s[];
+
+ const int first_x = blockIdx.x * maxX;
+ const int last_x = (first_x + maxX < numX ? first_x + maxX : numX) - 1;
+ const int num_x_input = last_x - first_x + GetKernelSize<StaticKernelSize>()(kernelSize);
+ const int num_x_output = last_x - first_x + 1;
+
+ const int first_plane = blockIdx.y * blockDim.y;
+ const int plane_stride = blockDim.y * gridDim.y;
+
+ for (int p = first_plane + threadIdx.y; p < numPlanes; p += plane_stride) {
+ // Load inputs to shared memory
+ const int plane_input_offset = indexMapper.mapCudaInputPlaneToTensorInputOffset(p);
+ const int plane_kernel_offset = threadIdx.y * num_x_input;
+ #pragma unroll
+ for (int i = threadIdx.x; i < num_x_input; i += blockDim.x) {
+ const int tensor_index = plane_input_offset + indexMapper.mapCudaInputKernelToTensorInputOffset(i+first_x);
+ s[i + plane_kernel_offset] = eval.coeff(tensor_index);
+ }
+
+ __syncthreads();
+
+ // Compute the convolution
+ const int plane_output_offset = indexMapper.mapCudaOutputPlaneToTensorOutputOffset(p);
+
+ #pragma unroll
+ for (int i = threadIdx.x; i < num_x_output; i += blockDim.x) {
+ const int kernel_offset = plane_kernel_offset + i;
+ float result = 0.0f;
+ #pragma unroll
+ for (int k = 0; k < GetKernelSize<StaticKernelSize>()(kernelSize); ++k) {
+ result += s[k + kernel_offset] * kernel[k];
+ }
+ const int tensor_index = plane_output_offset + indexMapper.mapCudaOutputKernelToTensorOutputOffset(i+first_x);
+ buffer[tensor_index] = result;
+ }
+ __syncthreads();
+ }
+};
+
+template <typename InputEvaluator, typename Index, typename InputDims,
+ int StaticKernelSizeX, int StaticKernelSizeY>
+__global__ void EigenConvolutionKernel2D(
+ InputEvaluator eval,
+ const internal::IndexMapper<Index, InputDims, 2, InputEvaluator::Layout>
+ indexMapper,
+ const float* __restrict kernel, const int numPlanes, const int numX,
+ const int maxX, const int numY, const int maxY, const int kernelSizeX,
+ const int kernelSizeY, float* buffer) {
+ extern __shared__ float s[];
+
+ const int first_x = blockIdx.x * maxX;
+ const int last_x = (first_x + maxX < numX ? first_x + maxX : numX) - 1;
+ const int num_x_input = last_x - first_x + GetKernelSize<StaticKernelSizeX>()(kernelSizeX);
+ const int num_x_output = last_x - first_x + 1;
+
+ const int first_y = blockIdx.y * maxY;
+ const int last_y = (first_y + maxY < numY ? first_y + maxY : numY) - 1;
+ const int num_y_input = last_y - first_y + GetKernelSize<StaticKernelSizeY>()(kernelSizeY);
+ const int num_y_output = last_y - first_y + 1;
+
+ const int first_plane = blockIdx.z * blockDim.z;
+ const int plane_stride = blockDim.z * gridDim.z;
+
+ for (int p = first_plane + threadIdx.z; p < numPlanes; p += plane_stride) {
+
+ const int plane_input_offset = indexMapper.mapCudaInputPlaneToTensorInputOffset(p);
+ const int plane_kernel_offset = threadIdx.z * num_y_input;
+
+ // Load inputs to shared memory
+ #pragma unroll
+ for (int j = threadIdx.y; j < num_y_input; j += blockDim.y) {
+ const int input_offset = num_x_input * (j + plane_kernel_offset);
+ #pragma unroll
+ for (int i = threadIdx.x; i < num_x_input; i += blockDim.x) {
+ const int tensor_index = plane_input_offset + indexMapper.mapCudaInputKernelToTensorInputOffset(i+first_x, j+first_y);
+ s[i + input_offset] = eval.coeff(tensor_index);
+ }
+ }
+
+ __syncthreads();
+
+ // Convolution
+ const int plane_output_offset = indexMapper.mapCudaOutputPlaneToTensorOutputOffset(p);
+
+ #pragma unroll
+ for (int j = threadIdx.y; j < num_y_output; j += blockDim.y) {
+ #pragma unroll
+ for (int i = threadIdx.x; i < num_x_output; i += blockDim.x) {
+ float result = 0.0f;
+ #pragma unroll
+ for (int l = 0; l < GetKernelSize<StaticKernelSizeY>()(kernelSizeY); ++l) {
+ const int kernel_offset = kernelSizeX * l;
+ const int input_offset = i + num_x_input * (j + l + plane_kernel_offset);
+ #pragma unroll
+ for (int k = 0; k < GetKernelSize<StaticKernelSizeX>()(kernelSizeX); ++k) {
+ result += s[k + input_offset] * kernel[k + kernel_offset];
+ }
+ }
+ const int tensor_index = plane_output_offset + indexMapper.mapCudaOutputKernelToTensorOutputOffset(i+first_x, j+first_y);
+ buffer[tensor_index] = result;
+ }
+ }
+
+ __syncthreads();
+ }
+};
+
+template <typename InputEvaluator, typename Index, typename InputDims>
+__global__ void EigenConvolutionKernel3D(
+ InputEvaluator eval,
+ const internal::IndexMapper<Index, InputDims, 3, InputEvaluator::Layout>
+ indexMapper,
+ const float* __restrict kernel, const size_t numPlanes, const size_t numX,
+ const size_t maxX, const size_t numY, const size_t maxY, const size_t numZ,
+ const size_t maxZ, const size_t kernelSizeX, const size_t kernelSizeY,
+ const size_t kernelSizeZ, float* buffer) {
+ extern __shared__ float s[];
+
+ // Load inputs to shared memory
+ const int first_x = blockIdx.x * maxX;
+ const int last_x = (first_x + maxX < numX ? first_x + maxX : numX) - 1;
+ const int num_x_input = last_x - first_x + kernelSizeX;
+
+ const int first_y = blockIdx.y * maxY;
+ const int last_y = (first_y + maxY < numY ? first_y + maxY : numY) - 1;
+ const int num_y_input = last_y - first_y + kernelSizeY;
+
+ const int first_z = blockIdx.z * maxZ;
+ const int last_z = (first_z + maxZ < numZ ? first_z + maxZ : numZ) - 1;
+ const int num_z_input = last_z - first_z + kernelSizeZ;
+
+ for (int p = 0; p < numPlanes; ++p) {
+
+ const int plane_input_offset = indexMapper.mapCudaInputPlaneToTensorInputOffset(p);
+ const int plane_kernel_offset = 0;
+
+ for (int k = threadIdx.z; k < num_z_input; k += blockDim.z) {
+ for (int j = threadIdx.y; j < num_y_input; j += blockDim.y) {
+ for (int i = threadIdx.x; i < num_x_input; i += blockDim.x) {
+ const int tensor_index = plane_input_offset + indexMapper.mapCudaInputKernelToTensorInputOffset(i+first_x, j+first_y, k+first_z);
+ s[i + num_x_input * (j + num_y_input * (k + plane_kernel_offset))] = eval.coeff(tensor_index);
+ }
+ }
+ }
+
+ __syncthreads();
+
+ // Convolution
+ const int num_z_output = last_z - first_z + 1;
+ const int num_y_output = last_y - first_y + 1;
+ const int num_x_output = last_x - first_x + 1;
+ const int plane_output_offset = indexMapper.mapCudaOutputPlaneToTensorOutputOffset(p);
+
+ for (int k = threadIdx.z; k < num_z_output; k += blockDim.z) {
+ for (int j = threadIdx.y; j < num_y_output; j += blockDim.y) {
+ for (int i = threadIdx.x; i < num_x_output; i += blockDim.x) {
+ float result = 0.0f;
+ for (int n = 0; n < kernelSizeZ; ++n) {
+ for (int m = 0; m < kernelSizeY; ++m) {
+ for (int l = 0; l < kernelSizeX; ++l) {
+ result += s[i + l + num_x_input * (j + m + num_y_input * (k + n + plane_kernel_offset))] * kernel[l + kernelSizeX * (m + kernelSizeY * n)];
+ }
+ }
+ }
+ const int tensor_index = plane_output_offset + indexMapper.mapCudaOutputKernelToTensorOutputOffset(i+first_x, j+first_y, k+first_z);
+ buffer[tensor_index] = result;
+ }
+ }
+ }
+ __syncthreads();
+ }
+};
+
+
+
+template<typename Indices, typename InputArgType, typename KernelArgType>
+struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelArgType>, GpuDevice>
+{
+ typedef TensorConvolutionOp<Indices, InputArgType, KernelArgType> XprType;
+
+ static const int NumDims = internal::array_size<typename TensorEvaluator<InputArgType, GpuDevice>::Dimensions>::value;
+ static const int NumKernelDims = internal::array_size<Indices>::value;
+ typedef typename XprType::Index Index;
+ typedef DSizes<Index, NumDims> Dimensions;
+ typedef typename TensorEvaluator<KernelArgType, GpuDevice>::Dimensions KernelDimensions;
+
+ enum {
+ IsAligned = TensorEvaluator<InputArgType, GpuDevice>::IsAligned & TensorEvaluator<KernelArgType, GpuDevice>::IsAligned,
+ PacketAccess = false,
+ Layout = TensorEvaluator<InputArgType, GpuDevice>::Layout,
+ CoordAccess = false, // to be implemented
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const GpuDevice& device)
+ : m_inputImpl(op.inputExpression(), device), m_kernelArg(op.kernelExpression()), m_kernelImpl(op.kernelExpression(), device), m_indices(op.indices()), m_buf(NULL), m_kernel(NULL), m_local_kernel(false), m_device(device)
+ {
+ EIGEN_STATIC_ASSERT((static_cast<int>(TensorEvaluator<InputArgType, GpuDevice>::Layout) == static_cast<int>(TensorEvaluator<KernelArgType, GpuDevice>::Layout)), YOU_MADE_A_PROGRAMMING_MISTAKE);
+
+ const typename TensorEvaluator<InputArgType, GpuDevice>::Dimensions& input_dims = m_inputImpl.dimensions();
+ const typename TensorEvaluator<KernelArgType, GpuDevice>::Dimensions& kernel_dims = m_kernelImpl.dimensions();
+
+ m_dimensions = m_inputImpl.dimensions();
+ for (int i = 0; i < NumKernelDims; ++i) {
+ const Index index = op.indices()[i];
+ const Index input_dim = input_dims[index];
+ const Index kernel_dim = kernel_dims[i];
+ const Index result_dim = input_dim - kernel_dim + 1;
+ m_dimensions[index] = result_dim;
+ }
+ }
+
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, GpuDevice>::type PacketReturnType;
+ typedef typename InputArgType::Scalar Scalar;
+ static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+
+ EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_dimensions; }
+
+ EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data) {
+ preloadKernel();
+ m_inputImpl.evalSubExprsIfNeeded(NULL);
+ if (data) {
+ executeEval(data);
+ return false;
+ } else {
+ m_buf = (Scalar*)m_device.allocate(dimensions().TotalSize() * sizeof(Scalar));
+ executeEval(m_buf);
+ return true;
+ }
+ }
+
+ EIGEN_STRONG_INLINE void cleanup() {
+ m_inputImpl.cleanup();
+ if (m_buf) {
+ m_device.deallocate(m_buf);
+ m_buf = NULL;
+ }
+ if (m_local_kernel) {
+ m_device.deallocate((void*)m_kernel);
+ m_local_kernel = false;
+ }
+ m_kernel = NULL;
+ }
+
+ EIGEN_STRONG_INLINE void preloadKernel() {
+ // Don't make a local copy of the kernel unless we have to (i.e. it's an
+ // expression that needs to be evaluated)
+ const Scalar* in_place = m_kernelImpl.data();
+ if (in_place) {
+ m_kernel = in_place;
+ m_local_kernel = false;
+ } else {
+ size_t kernel_sz = m_kernelImpl.dimensions().TotalSize() * sizeof(Scalar);
+ Scalar* local = (Scalar*)m_device.allocate(kernel_sz);
+ typedef TensorEvalToOp<const KernelArgType> EvalTo;
+ EvalTo evalToTmp(local, m_kernelArg);
+ const bool PacketAccess = internal::IsVectorizable<GpuDevice, KernelArgType>::value;
+ internal::TensorExecutor<const EvalTo, GpuDevice, PacketAccess>::run(evalToTmp, m_device);
+
+ m_kernel = local;
+ m_local_kernel = true;
+ }
+ }
+
+ static unsigned int ceil(unsigned int num, unsigned int denom) {
+ const unsigned int rounded_toward_zero = num / denom;
+ if (num > rounded_toward_zero * denom) {
+ return rounded_toward_zero + 1;
+ }
+ return rounded_toward_zero;
+ }
+
+ void executeEval(Scalar* data) const {
+ typedef typename TensorEvaluator<InputArgType, GpuDevice>::Dimensions InputDims;
+
+ const int maxSharedMem = m_device.sharedMemPerBlock();
+ const int maxThreadsPerBlock = m_device.maxCudaThreadsPerBlock();
+ const int maxBlocksPerProcessor = m_device.maxCudaThreadsPerMultiProcessor() / maxThreadsPerBlock;
+ const int numMultiProcessors = m_device.getNumCudaMultiProcessors();
+ const int warpSize = 32;
+
+ switch (NumKernelDims) {
+ case 1: {
+ const int kernel_size = m_kernelImpl.dimensions().TotalSize();
+
+ const int numX = dimensions()[m_indices[0]];
+ const int numP = dimensions().TotalSize() / numX;
+ int maxX;
+ dim3 block_size;
+
+ const int single_stride_dim =
+ static_cast<int>(Layout) == static_cast<int>(ColMajor)
+ ? 0
+ : m_inputImpl.dimensions().rank() - 1;
+ if (m_indices[0] == single_stride_dim) {
+ // Maximum the reuse
+ const int inner_dim = ((maxSharedMem / (sizeof(Scalar)) - kernel_size + 1 + 31) / 32) * 32;
+ maxX = numext::mini<int>(inner_dim, numX);
+ const int maxP = numext::mini<int>(maxSharedMem / ((kernel_size - 1 + maxX) * sizeof(Scalar)), numP);
+ block_size.x = numext::mini(maxThreadsPerBlock, maxX);
+ block_size.y = numext::mini<int>(maxThreadsPerBlock / block_size.x, maxP);
+ }
+ else {
+ // Read as much as possible alongside the inner most dimension, that is the plane
+ const int inner_dim = maxSharedMem / ((warpSize + kernel_size) * sizeof(Scalar));
+ const int maxP = numext::mini<int>(inner_dim, numP);
+ maxX = numext::mini<int>(maxSharedMem / (inner_dim * sizeof(Scalar)) - kernel_size + 1, numX);
+
+ block_size.x = numext::mini(warpSize, maxX);
+ block_size.y = numext::mini<int>(maxThreadsPerBlock/block_size.x, maxP);
+ }
+
+ const int shared_mem = block_size.y * (maxX + kernel_size - 1) * sizeof(Scalar);
+ assert(shared_mem <= maxSharedMem);
+
+ const int num_x_blocks = ceil(numX, maxX);
+ const int blocksPerProcessor = numext::mini(maxBlocksPerProcessor, maxSharedMem / shared_mem);
+ const int num_y_blocks = ceil(numMultiProcessors * blocksPerProcessor, num_x_blocks);
+
+ dim3 num_blocks(num_x_blocks, numext::mini<int>(num_y_blocks, ceil(numP, block_size.y)));
+
+
+ //cout << "launching 1D kernel with block_size.x: " << block_size.x << " block_size.y: " << block_size.y << " num_blocks.x: " << num_blocks.x << " num_blocks.y: " << num_blocks.y << " maxX: " << maxX << " shared_mem: " << shared_mem << " in stream " << m_device.stream() << endl;
+
+ const array<Index, 1> indices(m_indices[0]);
+ const array<Index, 1> kernel_dims(m_kernelImpl.dimensions()[0]);
+ internal::IndexMapper<Index, InputDims, 1, Layout> indexMapper(
+ m_inputImpl.dimensions(), kernel_dims, indices);
+ switch(kernel_size) {
+ case 4: {
+ LAUNCH_CUDA_KERNEL((EigenConvolutionKernel1D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 4>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, 4, data);
+ break;
+ }
+ case 7: {
+ LAUNCH_CUDA_KERNEL((EigenConvolutionKernel1D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 7>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, 7, data);
+ break;
+ }
+ default: {
+ LAUNCH_CUDA_KERNEL((EigenConvolutionKernel1D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, Dynamic>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, kernel_size, data);
+ }
+ }
+ break;
+ }
+
+ case 2: {
+ const int idxX =
+ static_cast<int>(Layout) == static_cast<int>(ColMajor) ? 0 : 1;
+ const int idxY =
+ static_cast<int>(Layout) == static_cast<int>(ColMajor) ? 1 : 0;
+ const int kernel_size_x = m_kernelImpl.dimensions()[idxX];
+ const int kernel_size_y = m_kernelImpl.dimensions()[idxY];
+
+ const int numX = dimensions()[m_indices[idxX]];
+ const int numY = dimensions()[m_indices[idxY]];
+ const int numP = dimensions().TotalSize() / (numX*numY);
+
+ const float scaling_factor = sqrtf(static_cast<float>(maxSharedMem) / (sizeof(Scalar) * kernel_size_y * kernel_size_x));
+
+ // Snap maxX to warp size
+ int inner_dim = ((static_cast<int>(scaling_factor * kernel_size_x) - kernel_size_x + 1 + 32) / 32) * 32;
+ const int maxX = numext::mini<int>(inner_dim, numX);
+ const int maxY = numext::mini<int>(maxSharedMem / (sizeof(Scalar) * (maxX + kernel_size_x - 1)) - kernel_size_y + 1, numY);
+ const int maxP = numext::mini<int>(maxSharedMem / ((kernel_size_x - 1 + maxX) * (kernel_size_y - 1 + maxY) * sizeof(Scalar)), numP);
+
+ dim3 block_size;
+ block_size.x = numext::mini(1024, maxX);
+ block_size.y = numext::mini<int>(1024/block_size.x, maxY);
+ block_size.z = numext::mini<int>(1024/(block_size.x*block_size.y), maxP);
+
+ const int shared_mem = block_size.z * (maxX + kernel_size_x - 1) * (maxY + kernel_size_y - 1) * sizeof(Scalar);
+ assert(shared_mem <= maxSharedMem);
+
+ const int num_x_blocks = ceil(numX, maxX);
+ const int num_y_blocks = ceil(numY, maxY);
+ const int blocksPerProcessor = numext::mini(maxBlocksPerProcessor, maxSharedMem / shared_mem);
+ const int num_z_blocks = ceil(numMultiProcessors * blocksPerProcessor, num_x_blocks * num_y_blocks);
+
+ dim3 num_blocks(num_x_blocks, num_y_blocks, numext::mini<int>(num_z_blocks, ceil(numP, block_size.z)));
+
+
+ //cout << "launching 2D kernel with block_size.x: " << block_size.x << " block_size.y: " << block_size.y << " block_size.z: " << block_size.z << " num_blocks.x: " << num_blocks.x << " num_blocks.y: " << num_blocks.y << " num_blocks.z: " << num_blocks.z << " maxX: " << maxX << " maxY: " << maxY << " maxP: " << maxP << " shared_mem: " << shared_mem << " in stream " << m_device.stream() << endl;
+
+ const array<Index, 2> indices(m_indices[idxX], m_indices[idxY]);
+ const array<Index, 2> kernel_dims(m_kernelImpl.dimensions()[idxX],
+ m_kernelImpl.dimensions()[idxY]);
+ internal::IndexMapper<Index, InputDims, 2, Layout> indexMapper(
+ m_inputImpl.dimensions(), kernel_dims, indices);
+ switch (kernel_size_x) {
+ case 4: {
+ switch (kernel_size_y) {
+ case 7: {
+ LAUNCH_CUDA_KERNEL((EigenConvolutionKernel2D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 4, 7>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 4, 7, data);
+ break;
+ }
+ default: {
+ LAUNCH_CUDA_KERNEL((EigenConvolutionKernel2D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 4, Dynamic>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 4, kernel_size_y, data);
+ break;
+ }
+ }
+ break;
+ }
+ case 7: {
+ switch (kernel_size_y) {
+ case 4: {
+ LAUNCH_CUDA_KERNEL((EigenConvolutionKernel2D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 7, 4>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 7, 4, data);
+ break;
+ }
+ default: {
+ LAUNCH_CUDA_KERNEL((EigenConvolutionKernel2D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 7, Dynamic>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 7, kernel_size_y, data);
+ break;
+ }
+ }
+ break;
+ }
+ default: {
+ LAUNCH_CUDA_KERNEL((EigenConvolutionKernel2D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, Dynamic, Dynamic>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, kernel_size_x, kernel_size_y, data);
+ break;
+ }
+ }
+ break;
+ }
+
+ case 3: {
+ const int idxX =
+ static_cast<int>(Layout) == static_cast<int>(ColMajor) ? 0 : 2;
+ const int idxY =
+ static_cast<int>(Layout) == static_cast<int>(ColMajor) ? 1 : 1;
+ const int idxZ =
+ static_cast<int>(Layout) == static_cast<int>(ColMajor) ? 2 : 0;
+
+ const int kernel_size_x = m_kernelImpl.dimensions()[idxX];
+ const int kernel_size_y = m_kernelImpl.dimensions()[idxY];
+ const int kernel_size_z = m_kernelImpl.dimensions()[idxZ];
+
+ const int numX = dimensions()[m_indices[idxX]];
+ const int numY = dimensions()[m_indices[idxY]];
+ const int numZ = dimensions()[m_indices[idxZ]];
+ const int numP = dimensions().TotalSize() / (numX*numY*numZ);
+
+ const int maxX = numext::mini<int>(128, numext::mini<int>(maxSharedMem / (sizeof(Scalar) * kernel_size_y * kernel_size_z) - kernel_size_x + 1, numX));
+ const int maxY = numext::mini<int>(128, numext::mini<int>(maxSharedMem / (sizeof(Scalar) * (maxX + kernel_size_x - 1) * kernel_size_z) - kernel_size_y + 1, numY));
+ const int maxZ = numext::mini<int>(128, numext::mini<int>(maxSharedMem / (sizeof(Scalar) * (maxX + kernel_size_x - 1) * (maxY + kernel_size_y - 1)) - kernel_size_z + 1, numZ));
+
+ dim3 block_size;
+ block_size.x = numext::mini(32, maxX);
+ block_size.y = numext::mini(32, maxY);
+ block_size.z = numext::mini<int>(1024/(block_size.x*block_size.y), maxZ);
+ dim3 num_blocks(ceil(numX, maxX), ceil(numY, maxY), ceil(numZ, maxZ));
+
+ const int shared_mem = (maxX + kernel_size_x - 1) * (maxY + kernel_size_y - 1) * (maxZ + kernel_size_z - 1) * sizeof(Scalar);
+ assert(shared_mem <= maxSharedMem);
+
+ //cout << "launching 3D kernel with block_size.x: " << block_size.x << " block_size.y: " << block_size.y << " block_size.z: " << block_size.z << " num_blocks.x: " << num_blocks.x << " num_blocks.y: " << num_blocks.y << " num_blocks.z: " << num_blocks.z << " shared_mem: " << shared_mem << " in stream " << m_device.stream() << endl;
+ const array<Index, 3> indices(m_indices[idxX], m_indices[idxY],
+ m_indices[idxZ]);
+ const array<Index, 3> kernel_dims(m_kernelImpl.dimensions()[idxX],
+ m_kernelImpl.dimensions()[idxY],
+ m_kernelImpl.dimensions()[idxZ]);
+ internal::IndexMapper<Index, InputDims, 3, Layout> indexMapper(
+ m_inputImpl.dimensions(), kernel_dims, indices);
+
+ LAUNCH_CUDA_KERNEL((EigenConvolutionKernel3D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, numZ, maxZ, kernel_size_x, kernel_size_y, kernel_size_z, data);
+ break;
+ }
+
+ default: {
+ EIGEN_STATIC_ASSERT((NumKernelDims >= 1 && NumKernelDims <= 3), THIS_METHOD_IS_ONLY_FOR_OBJECTS_OF_A_SPECIFIC_SIZE);
+ }
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
+ {
+ eigen_assert(m_buf);
+ eigen_assert(index < m_dimensions.TotalSize());
+ return m_buf[index];
+ }
+
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(const Index index) const
+ {
+ eigen_assert(m_buf);
+ eigen_assert(index < m_dimensions.TotalSize());
+ return internal::ploadt<PacketReturnType, LoadMode>(m_buf+index);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
+ costPerCoeff(bool vectorized) const {
+ // TODO(rmlarsen): FIXME: For now, this is just a copy of the CPU cost
+ // model.
+ const double kernel_size = m_kernelImpl.dimensions().TotalSize();
+ // We ignore the use of fused multiply-add.
+ const double convolve_compute_cost =
+ TensorOpCost::AddCost<Scalar>() + TensorOpCost::MulCost<Scalar>();
+ const double firstIndex_compute_cost =
+ NumDims *
+ (2 * TensorOpCost::AddCost<Index>() + 2 * TensorOpCost::MulCost<Index>() +
+ TensorOpCost::DivCost<Index>());
+ return TensorOpCost(0, 0, firstIndex_compute_cost, vectorized, PacketSize) +
+ kernel_size * (m_inputImpl.costPerCoeff(vectorized) +
+ m_kernelImpl.costPerCoeff(vectorized) +
+ TensorOpCost(0, 0, convolve_compute_cost, vectorized,
+ PacketSize));
+ }
+
+ private:
+ // No assignment (copies are needed by the kernels)
+ TensorEvaluator& operator = (const TensorEvaluator&);
+
+ TensorEvaluator<InputArgType, GpuDevice> m_inputImpl;
+ TensorEvaluator<KernelArgType, GpuDevice> m_kernelImpl;
+ KernelArgType m_kernelArg;
+ Indices m_indices;
+ Dimensions m_dimensions;
+ Scalar* m_buf;
+ const Scalar* m_kernel;
+ bool m_local_kernel;
+
+ const GpuDevice& m_device;
+};
+#endif
+
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_CONVOLUTION_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h b/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h
new file mode 100644
index 000000000..83c449cf1
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h
@@ -0,0 +1,212 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016 Rasmus Munk Larsen <rmlarsen@google.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_COST_MODEL_H
+#define EIGEN_CXX11_TENSOR_TENSOR_COST_MODEL_H
+
+namespace Eigen {
+
+/** \class TensorEvaluator
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief A cost model used to limit the number of threads used for evaluating
+ * tensor expression.
+ *
+ */
+
+// Class storing the cost of evaluating a tensor expression in terms of the
+// estimated number of operand bytes loads, bytes stored, and compute cycles.
+class TensorOpCost {
+ public:
+ // TODO(rmlarsen): Fix the scalar op costs in Eigen proper. Even a simple
+ // model based on minimal reciprocal throughput numbers from Intel or
+ // Agner Fog's tables would be better than what is there now.
+ template <typename ArgType>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int MulCost() {
+ return internal::functor_traits<
+ internal::scalar_product_op<ArgType, ArgType> >::Cost;
+ }
+ template <typename ArgType>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int AddCost() {
+ return internal::functor_traits<internal::scalar_sum_op<ArgType> >::Cost;
+ }
+ template <typename ArgType>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int DivCost() {
+ return internal::functor_traits<
+ internal::scalar_quotient_op<ArgType, ArgType> >::Cost;
+ }
+ template <typename ArgType>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int ModCost() {
+ return internal::functor_traits<internal::scalar_mod_op<ArgType> >::Cost;
+ }
+ template <typename SrcType, typename TargetType>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int CastCost() {
+ return internal::functor_traits<
+ internal::scalar_cast_op<SrcType, TargetType> >::Cost;
+ }
+
+ EIGEN_DEVICE_FUNC
+ TensorOpCost() : bytes_loaded_(0), bytes_stored_(0), compute_cycles_(0) {}
+ EIGEN_DEVICE_FUNC
+ TensorOpCost(double bytes_loaded, double bytes_stored, double compute_cycles)
+ : bytes_loaded_(bytes_loaded),
+ bytes_stored_(bytes_stored),
+ compute_cycles_(compute_cycles) {}
+
+ EIGEN_DEVICE_FUNC
+ TensorOpCost(double bytes_loaded, double bytes_stored, double compute_cycles,
+ bool vectorized, double packet_size)
+ : bytes_loaded_(bytes_loaded),
+ bytes_stored_(bytes_stored),
+ compute_cycles_(vectorized ? compute_cycles / packet_size
+ : compute_cycles) {
+ eigen_assert(bytes_loaded >= 0 && (numext::isfinite)(bytes_loaded));
+ eigen_assert(bytes_stored >= 0 && (numext::isfinite)(bytes_stored));
+ eigen_assert(compute_cycles >= 0 && (numext::isfinite)(compute_cycles));
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double bytes_loaded() const {
+ return bytes_loaded_;
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double bytes_stored() const {
+ return bytes_stored_;
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double compute_cycles() const {
+ return compute_cycles_;
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double total_cost(
+ double load_cost, double store_cost, double compute_cost) const {
+ return load_cost * bytes_loaded_ + store_cost * bytes_stored_ +
+ compute_cost * compute_cycles_;
+ }
+
+ // Drop memory access component. Intended for cases when memory accesses are
+ // sequential or are completely masked by computations.
+ EIGEN_DEVICE_FUNC void dropMemoryCost() {
+ bytes_loaded_ = 0;
+ bytes_stored_ = 0;
+ }
+
+ // TODO(rmlarsen): Define min in terms of total cost, not elementwise.
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost cwiseMin(
+ const TensorOpCost& rhs) const {
+ double bytes_loaded = numext::mini(bytes_loaded_, rhs.bytes_loaded());
+ double bytes_stored = numext::mini(bytes_stored_, rhs.bytes_stored());
+ double compute_cycles = numext::mini(compute_cycles_, rhs.compute_cycles());
+ return TensorOpCost(bytes_loaded, bytes_stored, compute_cycles);
+ }
+
+ // TODO(rmlarsen): Define max in terms of total cost, not elementwise.
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost cwiseMax(
+ const TensorOpCost& rhs) const {
+ double bytes_loaded = numext::maxi(bytes_loaded_, rhs.bytes_loaded());
+ double bytes_stored = numext::maxi(bytes_stored_, rhs.bytes_stored());
+ double compute_cycles = numext::maxi(compute_cycles_, rhs.compute_cycles());
+ return TensorOpCost(bytes_loaded, bytes_stored, compute_cycles);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost& operator+=(
+ const TensorOpCost& rhs) {
+ bytes_loaded_ += rhs.bytes_loaded();
+ bytes_stored_ += rhs.bytes_stored();
+ compute_cycles_ += rhs.compute_cycles();
+ return *this;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost& operator*=(double rhs) {
+ bytes_loaded_ *= rhs;
+ bytes_stored_ *= rhs;
+ compute_cycles_ *= rhs;
+ return *this;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend TensorOpCost operator+(
+ TensorOpCost lhs, const TensorOpCost& rhs) {
+ lhs += rhs;
+ return lhs;
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend TensorOpCost operator*(
+ TensorOpCost lhs, double rhs) {
+ lhs *= rhs;
+ return lhs;
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend TensorOpCost operator*(
+ double lhs, TensorOpCost rhs) {
+ rhs *= lhs;
+ return rhs;
+ }
+
+ friend std::ostream& operator<<(std::ostream& os, const TensorOpCost& tc) {
+ return os << "[bytes_loaded = " << tc.bytes_loaded()
+ << ", bytes_stored = " << tc.bytes_stored()
+ << ", compute_cycles = " << tc.compute_cycles() << "]";
+ }
+
+ private:
+ double bytes_loaded_;
+ double bytes_stored_;
+ double compute_cycles_;
+};
+
+// TODO(rmlarsen): Implement a policy that chooses an "optimal" number of theads
+// in [1:max_threads] instead of just switching multi-threading off for small
+// work units.
+template <typename Device>
+class TensorCostModel {
+ public:
+ // Scaling from Eigen compute cost to device cycles.
+ static const int kDeviceCyclesPerComputeCycle = 1;
+
+ // Costs in device cycles.
+ static const int kStartupCycles = 100000;
+ static const int kPerThreadCycles = 100000;
+ static const int kTaskSize = 40000;
+
+ // Returns the number of threads in [1:max_threads] to use for
+ // evaluating an expression with the given output size and cost per
+ // coefficient.
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int numThreads(
+ double output_size, const TensorOpCost& cost_per_coeff, int max_threads) {
+ double cost = totalCost(output_size, cost_per_coeff);
+ int threads = (cost - kStartupCycles) / kPerThreadCycles + 0.9;
+ return numext::mini(max_threads, numext::maxi(1, threads));
+ }
+
+ // taskSize assesses parallel task size.
+ // Value of 1.0 means ideal parallel task size. Values < 1.0 mean that task
+ // granularity needs to be increased to mitigate parallelization overheads.
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double taskSize(
+ double output_size, const TensorOpCost& cost_per_coeff) {
+ return totalCost(output_size, cost_per_coeff) / kTaskSize;
+ }
+
+ private:
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double totalCost(
+ double output_size, const TensorOpCost& cost_per_coeff) {
+ // Cost of memory fetches from L2 cache. 64 is typical cache line size.
+ // 11 is L2 cache latency on Haswell.
+ // We don't know whether data is in L1, L2 or L3. But we are most interested
+ // in single-threaded computational time around 100us-10ms (smaller time
+ // is too small for parallelization, larger time is not intersting
+ // either because we are probably using all available threads already).
+ // And for the target time range, L2 seems to be what matters. Data set
+ // fitting into L1 is too small to take noticeable time. Data set fitting
+ // only into L3 presumably will take more than 10ms to load and process.
+ const double kLoadCycles = 1.0 / 64 * 11;
+ const double kStoreCycles = 1.0 / 64 * 11;
+ // Scaling from Eigen compute cost to device cycles.
+ return output_size *
+ cost_per_coeff.total_cost(kLoadCycles, kStoreCycles,
+ kDeviceCyclesPerComputeCycle);
+ }
+};
+
+} // namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_COST_MODEL_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorCustomOp.h b/unsupported/Eigen/CXX11/src/Tensor/TensorCustomOp.h
new file mode 100644
index 000000000..e020d076f
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorCustomOp.h
@@ -0,0 +1,313 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_CUSTOM_OP_H
+#define EIGEN_CXX11_TENSOR_TENSOR_CUSTOM_OP_H
+
+namespace Eigen {
+
+/** \class TensorCustomUnaryOp
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Tensor custom class.
+ *
+ *
+ */
+namespace internal {
+template<typename CustomUnaryFunc, typename XprType>
+struct traits<TensorCustomUnaryOp<CustomUnaryFunc, XprType> >
+{
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::StorageKind StorageKind;
+ typedef typename XprType::Index Index;
+ typedef typename XprType::Nested Nested;
+ typedef typename remove_reference<Nested>::type _Nested;
+ static const int NumDimensions = traits<XprType>::NumDimensions;
+ static const int Layout = traits<XprType>::Layout;
+};
+
+template<typename CustomUnaryFunc, typename XprType>
+struct eval<TensorCustomUnaryOp<CustomUnaryFunc, XprType>, Eigen::Dense>
+{
+ typedef const TensorCustomUnaryOp<CustomUnaryFunc, XprType>& type;
+};
+
+template<typename CustomUnaryFunc, typename XprType>
+struct nested<TensorCustomUnaryOp<CustomUnaryFunc, XprType> >
+{
+ typedef TensorCustomUnaryOp<CustomUnaryFunc, XprType> type;
+};
+
+} // end namespace internal
+
+
+
+template<typename CustomUnaryFunc, typename XprType>
+class TensorCustomUnaryOp : public TensorBase<TensorCustomUnaryOp<CustomUnaryFunc, XprType>, ReadOnlyAccessors>
+{
+ public:
+ typedef typename internal::traits<TensorCustomUnaryOp>::Scalar Scalar;
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename internal::nested<TensorCustomUnaryOp>::type Nested;
+ typedef typename internal::traits<TensorCustomUnaryOp>::StorageKind StorageKind;
+ typedef typename internal::traits<TensorCustomUnaryOp>::Index Index;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorCustomUnaryOp(const XprType& expr, const CustomUnaryFunc& func)
+ : m_expr(expr), m_func(func) {}
+
+ EIGEN_DEVICE_FUNC
+ const CustomUnaryFunc& func() const { return m_func; }
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename XprType::Nested>::type&
+ expression() const { return m_expr; }
+
+ protected:
+ typename XprType::Nested m_expr;
+ const CustomUnaryFunc m_func;
+};
+
+
+// Eval as rvalue
+template<typename CustomUnaryFunc, typename XprType, typename Device>
+struct TensorEvaluator<const TensorCustomUnaryOp<CustomUnaryFunc, XprType>, Device>
+{
+ typedef TensorCustomUnaryOp<CustomUnaryFunc, XprType> ArgType;
+ typedef typename internal::traits<ArgType>::Index Index;
+ static const int NumDims = internal::traits<ArgType>::NumDimensions;
+ typedef DSizes<Index, NumDims> Dimensions;
+ typedef typename internal::remove_const<typename ArgType::Scalar>::type Scalar;
+ typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+
+ enum {
+ IsAligned = false,
+ PacketAccess = (internal::packet_traits<Scalar>::size > 1),
+ BlockAccess = false,
+ Layout = TensorEvaluator<XprType, Device>::Layout,
+ CoordAccess = false, // to be implemented
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const ArgType& op, const Device& device)
+ : m_op(op), m_device(device), m_result(NULL)
+ {
+ m_dimensions = op.func().dimensions(op.expression());
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) {
+ if (data) {
+ evalTo(data);
+ return false;
+ } else {
+ m_result = static_cast<CoeffReturnType*>(
+ m_device.allocate(dimensions().TotalSize() * sizeof(Scalar)));
+ evalTo(m_result);
+ return true;
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
+ if (m_result != NULL) {
+ m_device.deallocate(m_result);
+ m_result = NULL;
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const {
+ return m_result[index];
+ }
+
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const {
+ return internal::ploadt<PacketReturnType, LoadMode>(m_result + index);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
+ // TODO(rmlarsen): Extend CustomOp API to return its cost estimate.
+ return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize);
+ }
+
+ EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return m_result; }
+
+ protected:
+ EIGEN_DEVICE_FUNC void evalTo(Scalar* data) {
+ TensorMap<Tensor<CoeffReturnType, NumDims, Layout, Index> > result(
+ data, m_dimensions);
+ m_op.func().eval(m_op.expression(), result, m_device);
+ }
+
+ Dimensions m_dimensions;
+ const ArgType m_op;
+ const Device& m_device;
+ CoeffReturnType* m_result;
+};
+
+
+
+/** \class TensorCustomBinaryOp
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Tensor custom class.
+ *
+ *
+ */
+namespace internal {
+template<typename CustomBinaryFunc, typename LhsXprType, typename RhsXprType>
+struct traits<TensorCustomBinaryOp<CustomBinaryFunc, LhsXprType, RhsXprType> >
+{
+ typedef typename internal::promote_storage_type<typename LhsXprType::Scalar,
+ typename RhsXprType::Scalar>::ret Scalar;
+ typedef typename internal::promote_storage_type<typename LhsXprType::CoeffReturnType,
+ typename RhsXprType::CoeffReturnType>::ret CoeffReturnType;
+ typedef typename promote_storage_type<typename traits<LhsXprType>::StorageKind,
+ typename traits<RhsXprType>::StorageKind>::ret StorageKind;
+ typedef typename promote_index_type<typename traits<LhsXprType>::Index,
+ typename traits<RhsXprType>::Index>::type Index;
+ typedef typename LhsXprType::Nested LhsNested;
+ typedef typename RhsXprType::Nested RhsNested;
+ typedef typename remove_reference<LhsNested>::type _LhsNested;
+ typedef typename remove_reference<RhsNested>::type _RhsNested;
+ static const int NumDimensions = traits<LhsXprType>::NumDimensions;
+ static const int Layout = traits<LhsXprType>::Layout;
+};
+
+template<typename CustomBinaryFunc, typename LhsXprType, typename RhsXprType>
+struct eval<TensorCustomBinaryOp<CustomBinaryFunc, LhsXprType, RhsXprType>, Eigen::Dense>
+{
+ typedef const TensorCustomBinaryOp<CustomBinaryFunc, LhsXprType, RhsXprType>& type;
+};
+
+template<typename CustomBinaryFunc, typename LhsXprType, typename RhsXprType>
+struct nested<TensorCustomBinaryOp<CustomBinaryFunc, LhsXprType, RhsXprType> >
+{
+ typedef TensorCustomBinaryOp<CustomBinaryFunc, LhsXprType, RhsXprType> type;
+};
+
+} // end namespace internal
+
+
+
+template<typename CustomBinaryFunc, typename LhsXprType, typename RhsXprType>
+class TensorCustomBinaryOp : public TensorBase<TensorCustomBinaryOp<CustomBinaryFunc, LhsXprType, RhsXprType>, ReadOnlyAccessors>
+{
+ public:
+ typedef typename internal::traits<TensorCustomBinaryOp>::Scalar Scalar;
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
+ typedef typename internal::traits<TensorCustomBinaryOp>::CoeffReturnType CoeffReturnType;
+ typedef typename internal::nested<TensorCustomBinaryOp>::type Nested;
+ typedef typename internal::traits<TensorCustomBinaryOp>::StorageKind StorageKind;
+ typedef typename internal::traits<TensorCustomBinaryOp>::Index Index;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorCustomBinaryOp(const LhsXprType& lhs, const RhsXprType& rhs, const CustomBinaryFunc& func)
+
+ : m_lhs_xpr(lhs), m_rhs_xpr(rhs), m_func(func) {}
+
+ EIGEN_DEVICE_FUNC
+ const CustomBinaryFunc& func() const { return m_func; }
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename LhsXprType::Nested>::type&
+ lhsExpression() const { return m_lhs_xpr; }
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename RhsXprType::Nested>::type&
+ rhsExpression() const { return m_rhs_xpr; }
+
+ protected:
+ typename LhsXprType::Nested m_lhs_xpr;
+ typename RhsXprType::Nested m_rhs_xpr;
+ const CustomBinaryFunc m_func;
+};
+
+
+// Eval as rvalue
+template<typename CustomBinaryFunc, typename LhsXprType, typename RhsXprType, typename Device>
+struct TensorEvaluator<const TensorCustomBinaryOp<CustomBinaryFunc, LhsXprType, RhsXprType>, Device>
+{
+ typedef TensorCustomBinaryOp<CustomBinaryFunc, LhsXprType, RhsXprType> XprType;
+ typedef typename internal::traits<XprType>::Index Index;
+ static const int NumDims = internal::traits<XprType>::NumDimensions;
+ typedef DSizes<Index, NumDims> Dimensions;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+
+ enum {
+ IsAligned = false,
+ PacketAccess = (internal::packet_traits<Scalar>::size > 1),
+ BlockAccess = false,
+ Layout = TensorEvaluator<LhsXprType, Device>::Layout,
+ CoordAccess = false, // to be implemented
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
+ : m_op(op), m_device(device), m_result(NULL)
+ {
+ m_dimensions = op.func().dimensions(op.lhsExpression(), op.rhsExpression());
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) {
+ if (data) {
+ evalTo(data);
+ return false;
+ } else {
+ m_result = static_cast<Scalar *>(m_device.allocate(dimensions().TotalSize() * sizeof(Scalar)));
+ evalTo(m_result);
+ return true;
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
+ if (m_result != NULL) {
+ m_device.deallocate(m_result);
+ m_result = NULL;
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const {
+ return m_result[index];
+ }
+
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const {
+ return internal::ploadt<PacketReturnType, LoadMode>(m_result + index);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
+ // TODO(rmlarsen): Extend CustomOp API to return its cost estimate.
+ return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize);
+ }
+
+ EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return m_result; }
+
+ protected:
+ EIGEN_DEVICE_FUNC void evalTo(Scalar* data) {
+ TensorMap<Tensor<Scalar, NumDims, Layout> > result(data, m_dimensions);
+ m_op.func().eval(m_op.lhsExpression(), m_op.rhsExpression(), result, m_device);
+ }
+
+ Dimensions m_dimensions;
+ const XprType m_op;
+ const Device& m_device;
+ CoeffReturnType* m_result;
+};
+
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_CUSTOM_OP_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDevice.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDevice.h
new file mode 100644
index 000000000..29e50a3b2
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDevice.h
@@ -0,0 +1,68 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_DEVICE_H
+#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_H
+
+namespace Eigen {
+
+/** \class TensorDevice
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Pseudo expression providing an operator = that will evaluate its argument
+ * on the specified computing 'device' (GPU, thread pool, ...)
+ *
+ * Example:
+ * C.device(EIGEN_GPU) = A + B;
+ *
+ * Todo: operator *= and /=.
+ */
+
+template <typename ExpressionType, typename DeviceType> class TensorDevice {
+ public:
+ TensorDevice(const DeviceType& device, ExpressionType& expression) : m_device(device), m_expression(expression) {}
+
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE TensorDevice& operator=(const OtherDerived& other) {
+ typedef TensorAssignOp<ExpressionType, const OtherDerived> Assign;
+ Assign assign(m_expression, other);
+ internal::TensorExecutor<const Assign, DeviceType>::run(assign, m_device);
+ return *this;
+ }
+
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE TensorDevice& operator+=(const OtherDerived& other) {
+ typedef typename OtherDerived::Scalar Scalar;
+ typedef TensorCwiseBinaryOp<internal::scalar_sum_op<Scalar>, const ExpressionType, const OtherDerived> Sum;
+ Sum sum(m_expression, other);
+ typedef TensorAssignOp<ExpressionType, const Sum> Assign;
+ Assign assign(m_expression, sum);
+ internal::TensorExecutor<const Assign, DeviceType>::run(assign, m_device);
+ return *this;
+ }
+
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE TensorDevice& operator-=(const OtherDerived& other) {
+ typedef typename OtherDerived::Scalar Scalar;
+ typedef TensorCwiseBinaryOp<internal::scalar_difference_op<Scalar>, const ExpressionType, const OtherDerived> Difference;
+ Difference difference(m_expression, other);
+ typedef TensorAssignOp<ExpressionType, const Difference> Assign;
+ Assign assign(m_expression, difference);
+ internal::TensorExecutor<const Assign, DeviceType>::run(assign, m_device);
+ return *this;
+ }
+
+ protected:
+ const DeviceType& m_device;
+ ExpressionType& m_expression;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceCuda.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceCuda.h
new file mode 100644
index 000000000..4f5767bc7
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceCuda.h
@@ -0,0 +1,337 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#if defined(EIGEN_USE_GPU) && !defined(EIGEN_CXX11_TENSOR_TENSOR_DEVICE_CUDA_H)
+#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_CUDA_H
+
+namespace Eigen {
+
+static const int kCudaScratchSize = 1024;
+
+// This defines an interface that GPUDevice can take to use
+// CUDA streams underneath.
+class StreamInterface {
+ public:
+ virtual ~StreamInterface() {}
+
+ virtual const cudaStream_t& stream() const = 0;
+ virtual const cudaDeviceProp& deviceProperties() const = 0;
+
+ // Allocate memory on the actual device where the computation will run
+ virtual void* allocate(size_t num_bytes) const = 0;
+ virtual void deallocate(void* buffer) const = 0;
+
+ // Return a scratchpad buffer of size 1k
+ virtual void* scratchpad() const = 0;
+
+ // Return a semaphore. The semaphore is initially initialized to 0, and
+ // each kernel using it is responsible for resetting to 0 upon completion
+ // to maintain the invariant that the semaphore is always equal to 0 upon
+ // each kernel start.
+ virtual unsigned int* semaphore() const = 0;
+};
+
+static cudaDeviceProp* m_deviceProperties;
+static bool m_devicePropInitialized = false;
+
+static void initializeDeviceProp() {
+ if (!m_devicePropInitialized) {
+ // Attempts to ensure proper behavior in the case of multiple threads
+ // calling this function simultaneously. This would be trivial to
+ // implement if we could use std::mutex, but unfortunately mutex don't
+ // compile with nvcc, so we resort to atomics and thread fences instead.
+ // Note that if the caller uses a compiler that doesn't support c++11 we
+ // can't ensure that the initialization is thread safe.
+#if __cplusplus >= 201103L
+ static std::atomic<bool> first(true);
+ if (first.exchange(false)) {
+#else
+ static bool first = true;
+ if (first) {
+ first = false;
+#endif
+ // We're the first thread to reach this point.
+ int num_devices;
+ cudaError_t status = cudaGetDeviceCount(&num_devices);
+ if (status != cudaSuccess) {
+ std::cerr << "Failed to get the number of CUDA devices: "
+ << cudaGetErrorString(status)
+ << std::endl;
+ assert(status == cudaSuccess);
+ }
+ m_deviceProperties = new cudaDeviceProp[num_devices];
+ for (int i = 0; i < num_devices; ++i) {
+ status = cudaGetDeviceProperties(&m_deviceProperties[i], i);
+ if (status != cudaSuccess) {
+ std::cerr << "Failed to initialize CUDA device #"
+ << i
+ << ": "
+ << cudaGetErrorString(status)
+ << std::endl;
+ assert(status == cudaSuccess);
+ }
+ }
+
+#if __cplusplus >= 201103L
+ std::atomic_thread_fence(std::memory_order_release);
+#endif
+ m_devicePropInitialized = true;
+ } else {
+ // Wait for the other thread to inititialize the properties.
+ while (!m_devicePropInitialized) {
+#if __cplusplus >= 201103L
+ std::atomic_thread_fence(std::memory_order_acquire);
+#endif
+ sleep(1);
+ }
+ }
+ }
+}
+
+static const cudaStream_t default_stream = cudaStreamDefault;
+
+class CudaStreamDevice : public StreamInterface {
+ public:
+ // Use the default stream on the current device
+ CudaStreamDevice() : stream_(&default_stream), scratch_(NULL), semaphore_(NULL) {
+ cudaGetDevice(&device_);
+ initializeDeviceProp();
+ }
+ // Use the default stream on the specified device
+ CudaStreamDevice(int device) : stream_(&default_stream), device_(device), scratch_(NULL), semaphore_(NULL) {
+ initializeDeviceProp();
+ }
+ // Use the specified stream. Note that it's the
+ // caller responsibility to ensure that the stream can run on
+ // the specified device. If no device is specified the code
+ // assumes that the stream is associated to the current gpu device.
+ CudaStreamDevice(const cudaStream_t* stream, int device = -1)
+ : stream_(stream), device_(device), scratch_(NULL), semaphore_(NULL) {
+ if (device < 0) {
+ cudaGetDevice(&device_);
+ } else {
+ int num_devices;
+ cudaError_t err = cudaGetDeviceCount(&num_devices);
+ EIGEN_UNUSED_VARIABLE(err)
+ assert(err == cudaSuccess);
+ assert(device < num_devices);
+ device_ = device;
+ }
+ initializeDeviceProp();
+ }
+
+ virtual ~CudaStreamDevice() {
+ if (scratch_) {
+ deallocate(scratch_);
+ }
+ }
+
+ const cudaStream_t& stream() const { return *stream_; }
+ const cudaDeviceProp& deviceProperties() const {
+ return m_deviceProperties[device_];
+ }
+ virtual void* allocate(size_t num_bytes) const {
+ cudaError_t err = cudaSetDevice(device_);
+ EIGEN_UNUSED_VARIABLE(err)
+ assert(err == cudaSuccess);
+ void* result;
+ err = cudaMalloc(&result, num_bytes);
+ assert(err == cudaSuccess);
+ assert(result != NULL);
+ return result;
+ }
+ virtual void deallocate(void* buffer) const {
+ cudaError_t err = cudaSetDevice(device_);
+ EIGEN_UNUSED_VARIABLE(err)
+ assert(err == cudaSuccess);
+ assert(buffer != NULL);
+ err = cudaFree(buffer);
+ assert(err == cudaSuccess);
+ }
+
+ virtual void* scratchpad() const {
+ if (scratch_ == NULL) {
+ scratch_ = allocate(kCudaScratchSize + sizeof(unsigned int));
+ }
+ return scratch_;
+ }
+
+ virtual unsigned int* semaphore() const {
+ if (semaphore_ == NULL) {
+ char* scratch = static_cast<char*>(scratchpad()) + kCudaScratchSize;
+ semaphore_ = reinterpret_cast<unsigned int*>(scratch);
+ cudaError_t err = cudaMemsetAsync(semaphore_, 0, sizeof(unsigned int), *stream_);
+ EIGEN_UNUSED_VARIABLE(err)
+ assert(err == cudaSuccess);
+ }
+ return semaphore_;
+ }
+
+ private:
+ const cudaStream_t* stream_;
+ int device_;
+ mutable void* scratch_;
+ mutable unsigned int* semaphore_;
+};
+
+struct GpuDevice {
+ // The StreamInterface is not owned: the caller is
+ // responsible for its initialization and eventual destruction.
+ explicit GpuDevice(const StreamInterface* stream) : stream_(stream), max_blocks_(INT_MAX) {
+ eigen_assert(stream);
+ }
+ explicit GpuDevice(const StreamInterface* stream, int num_blocks) : stream_(stream), max_blocks_(num_blocks) {
+ eigen_assert(stream);
+ }
+ // TODO(bsteiner): This is an internal API, we should not expose it.
+ EIGEN_STRONG_INLINE const cudaStream_t& stream() const {
+ return stream_->stream();
+ }
+
+ EIGEN_STRONG_INLINE void* allocate(size_t num_bytes) const {
+ return stream_->allocate(num_bytes);
+ }
+
+ EIGEN_STRONG_INLINE void deallocate(void* buffer) const {
+ stream_->deallocate(buffer);
+ }
+
+ EIGEN_STRONG_INLINE void* scratchpad() const {
+ return stream_->scratchpad();
+ }
+
+ EIGEN_STRONG_INLINE unsigned int* semaphore() const {
+ return stream_->semaphore();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpy(void* dst, const void* src, size_t n) const {
+#ifndef __CUDA_ARCH__
+ cudaError_t err = cudaMemcpyAsync(dst, src, n, cudaMemcpyDeviceToDevice,
+ stream_->stream());
+ EIGEN_UNUSED_VARIABLE(err)
+ assert(err == cudaSuccess);
+#else
+ eigen_assert(false && "The default device should be used instead to generate kernel code");
+#endif
+ }
+
+ EIGEN_STRONG_INLINE void memcpyHostToDevice(void* dst, const void* src, size_t n) const {
+ cudaError_t err =
+ cudaMemcpyAsync(dst, src, n, cudaMemcpyHostToDevice, stream_->stream());
+ EIGEN_UNUSED_VARIABLE(err)
+ assert(err == cudaSuccess);
+ }
+
+ EIGEN_STRONG_INLINE void memcpyDeviceToHost(void* dst, const void* src, size_t n) const {
+ cudaError_t err =
+ cudaMemcpyAsync(dst, src, n, cudaMemcpyDeviceToHost, stream_->stream());
+ EIGEN_UNUSED_VARIABLE(err)
+ assert(err == cudaSuccess);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memset(void* buffer, int c, size_t n) const {
+#ifndef __CUDA_ARCH__
+ cudaError_t err = cudaMemsetAsync(buffer, c, n, stream_->stream());
+ EIGEN_UNUSED_VARIABLE(err)
+ assert(err == cudaSuccess);
+#else
+ eigen_assert(false && "The default device should be used instead to generate kernel code");
+#endif
+ }
+
+ EIGEN_STRONG_INLINE size_t numThreads() const {
+ // FIXME
+ return 32;
+ }
+
+ EIGEN_STRONG_INLINE size_t firstLevelCacheSize() const {
+ // FIXME
+ return 48*1024;
+ }
+
+ EIGEN_STRONG_INLINE size_t lastLevelCacheSize() const {
+ // We won't try to take advantage of the l2 cache for the time being, and
+ // there is no l3 cache on cuda devices.
+ return firstLevelCacheSize();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void synchronize() const {
+#if defined(__CUDACC__) && !defined(__CUDA_ARCH__)
+ cudaError_t err = cudaStreamSynchronize(stream_->stream());
+ if (err != cudaSuccess) {
+ std::cerr << "Error detected in CUDA stream: "
+ << cudaGetErrorString(err)
+ << std::endl;
+ assert(err == cudaSuccess);
+ }
+#else
+ assert(false && "The default device should be used instead to generate kernel code");
+#endif
+ }
+
+ EIGEN_STRONG_INLINE int getNumCudaMultiProcessors() const {
+ return stream_->deviceProperties().multiProcessorCount;
+ }
+ EIGEN_STRONG_INLINE int maxCudaThreadsPerBlock() const {
+ return stream_->deviceProperties().maxThreadsPerBlock;
+ }
+ EIGEN_STRONG_INLINE int maxCudaThreadsPerMultiProcessor() const {
+ return stream_->deviceProperties().maxThreadsPerMultiProcessor;
+ }
+ EIGEN_STRONG_INLINE int sharedMemPerBlock() const {
+ return stream_->deviceProperties().sharedMemPerBlock;
+ }
+ EIGEN_STRONG_INLINE int majorDeviceVersion() const {
+ return stream_->deviceProperties().major;
+ }
+ EIGEN_STRONG_INLINE int minorDeviceVersion() const {
+ return stream_->deviceProperties().minor;
+ }
+
+ EIGEN_STRONG_INLINE int maxBlocks() const {
+ return max_blocks_;
+ }
+
+ // This function checks if the CUDA runtime recorded an error for the
+ // underlying stream device.
+ inline bool ok() const {
+#ifdef __CUDACC__
+ cudaError_t error = cudaStreamQuery(stream_->stream());
+ return (error == cudaSuccess) || (error == cudaErrorNotReady);
+#else
+ return false;
+#endif
+ }
+
+ private:
+ const StreamInterface* stream_;
+ int max_blocks_;
+};
+
+#define LAUNCH_CUDA_KERNEL(kernel, gridsize, blocksize, sharedmem, device, ...) \
+ (kernel) <<< (gridsize), (blocksize), (sharedmem), (device).stream() >>> (__VA_ARGS__); \
+ assert(cudaGetLastError() == cudaSuccess);
+
+
+// FIXME: Should be device and kernel specific.
+#ifdef __CUDACC__
+static EIGEN_DEVICE_FUNC inline void setCudaSharedMemConfig(cudaSharedMemConfig config) {
+#ifndef __CUDA_ARCH__
+ cudaError_t status = cudaDeviceSetSharedMemConfig(config);
+ EIGEN_UNUSED_VARIABLE(status)
+ assert(status == cudaSuccess);
+#else
+ EIGEN_UNUSED_VARIABLE(config)
+#endif
+}
+#endif
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_CUDA_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceDefault.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceDefault.h
new file mode 100644
index 000000000..9d141395b
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceDefault.h
@@ -0,0 +1,81 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_DEVICE_DEFAULT_H
+#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_DEFAULT_H
+
+
+namespace Eigen {
+
+// Default device for the machine (typically a single cpu core)
+struct DefaultDevice {
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void* allocate(size_t num_bytes) const {
+ return internal::aligned_malloc(num_bytes);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void deallocate(void* buffer) const {
+ internal::aligned_free(buffer);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpy(void* dst, const void* src, size_t n) const {
+ ::memcpy(dst, src, n);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyHostToDevice(void* dst, const void* src, size_t n) const {
+ memcpy(dst, src, n);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyDeviceToHost(void* dst, const void* src, size_t n) const {
+ memcpy(dst, src, n);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memset(void* buffer, int c, size_t n) const {
+ ::memset(buffer, c, n);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t numThreads() const {
+#ifndef __CUDA_ARCH__
+ // Running on the host CPU
+ return 1;
+#else
+ // Running on a CUDA device
+ return 32;
+#endif
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t firstLevelCacheSize() const {
+#ifndef __CUDA_ARCH__
+ // Running on the host CPU
+ return l1CacheSize();
+#else
+ // Running on a CUDA device, return the amount of shared memory available.
+ return 48*1024;
+#endif
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t lastLevelCacheSize() const {
+#ifndef __CUDA_ARCH__
+ // Running single threaded on the host CPU
+ return l3CacheSize();
+#else
+ // Running on a CUDA device
+ return firstLevelCacheSize();
+#endif
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int majorDeviceVersion() const {
+#ifndef __CUDA_ARCH__
+ // Running single threaded on the host CPU
+ // Should return an enum that encodes the ISA supported by the CPU
+ return 1;
+#else
+ // Running on a CUDA device
+ return __CUDA_ARCH__ / 100;
+#endif
+ }
+};
+
+} // namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_DEFAULT_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h
new file mode 100644
index 000000000..7c039890e
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h
@@ -0,0 +1,122 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+// Copyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>
+
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#if defined(EIGEN_USE_SYCL) && !defined(EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H)
+#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H
+
+namespace Eigen {
+struct SyclDevice {
+ /// class members
+ /// sycl queue
+ mutable cl::sycl::queue m_queue;
+ /// std::map is the container used to make sure that we create only one buffer
+ /// per pointer. The lifespan of the buffer now depends on the lifespan of SyclDevice.
+ /// If a non-read-only pointer is needed to be accessed on the host we should manually deallocate it.
+ mutable std::map<const void *, std::shared_ptr<void>> buffer_map;
+ /// creating device by using selector
+ template<typename dev_Selector> SyclDevice(dev_Selector s)
+ :
+#ifdef EIGEN_EXCEPTIONS
+ m_queue(cl::sycl::queue(s, [=](cl::sycl::exception_list l) {
+ for (const auto& e : l) {
+ try {
+ std::rethrow_exception(e);
+ } catch (cl::sycl::exception e) {
+ std::cout << e.what() << std::endl;
+ }
+ }
+ }))
+#else
+ m_queue(cl::sycl::queue(s))
+#endif
+ {}
+ // destructor
+ ~SyclDevice() { deallocate_all(); }
+
+ template <typename T> void deallocate(T *p) const {
+ auto it = buffer_map.find(p);
+ if (it != buffer_map.end()) {
+ buffer_map.erase(it);
+ internal::aligned_free(p);
+ }
+ }
+ void deallocate_all() const {
+ std::map<const void *, std::shared_ptr<void>>::iterator it=buffer_map.begin();
+ while (it!=buffer_map.end()) {
+ auto p=it->first;
+ buffer_map.erase(it);
+ internal::aligned_free(const_cast<void*>(p));
+ it=buffer_map.begin();
+ }
+ buffer_map.clear();
+ }
+
+ /// creation of sycl accessor for a buffer. This function first tries to find
+ /// the buffer in the buffer_map. If found it gets the accessor from it, if not,
+ ///the function then adds an entry by creating a sycl buffer for that particular pointer.
+ template <cl::sycl::access::mode AcMd, typename T> inline cl::sycl::accessor<T, 1, AcMd, cl::sycl::access::target::global_buffer>
+ get_sycl_accessor(size_t num_bytes, cl::sycl::handler &cgh, const T * ptr) const {
+ return (get_sycl_buffer<T>(num_bytes, ptr)->template get_access<AcMd, cl::sycl::access::target::global_buffer>(cgh));
+ }
+
+ template<typename T> inline std::pair<std::map<const void *, std::shared_ptr<void>>::iterator,bool> add_sycl_buffer(const T *ptr, size_t num_bytes) const {
+ using Type = cl::sycl::buffer<T, 1>;
+ std::pair<std::map<const void *, std::shared_ptr<void>>::iterator,bool> ret = buffer_map.insert(std::pair<const void *, std::shared_ptr<void>>(ptr, std::shared_ptr<void>(new Type(cl::sycl::range<1>(num_bytes)),
+ [](void *dataMem) { delete static_cast<Type*>(dataMem); })));
+ (static_cast<Type*>(buffer_map.at(ptr).get()))->set_final_data(nullptr);
+ return ret;
+ }
+
+ template <typename T> inline cl::sycl::buffer<T, 1>* get_sycl_buffer(size_t num_bytes,const T * ptr) const {
+ return static_cast<cl::sycl::buffer<T, 1>*>(add_sycl_buffer(ptr, num_bytes).first->second.get());
+ }
+
+ /// allocating memory on the cpu
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void *allocate(size_t) const {
+ return internal::aligned_malloc(8);
+ }
+
+ // some runtime conditions that can be applied here
+ bool isDeviceSuitable() const { return true; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpy(void *dst, const void *src, size_t n) const {
+ ::memcpy(dst, src, n);
+ }
+
+ template<typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyHostToDevice(T *dst, const T *src, size_t n) const {
+ auto host_acc= (static_cast<cl::sycl::buffer<T, 1>*>(add_sycl_buffer(dst, n).first->second.get()))-> template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::host_buffer>();
+ memcpy(host_acc.get_pointer(), src, n);
+ }
+ /// whith the current implementation of sycl, the data is copied twice from device to host. This will be fixed soon.
+ template<typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyDeviceToHost(T *dst, const T *src, size_t n) const {
+ auto it = buffer_map.find(src);
+ if (it != buffer_map.end()) {
+ auto host_acc= (static_cast<cl::sycl::buffer<T, 1>*>(it->second.get()))-> template get_access<cl::sycl::access::mode::read, cl::sycl::access::target::host_buffer>();
+ memcpy(dst,host_acc.get_pointer(), n);
+ } else{
+ eigen_assert("no device memory found. The memory might be destroyed before creation");
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memset(void *buffer, int c, size_t n) const {
+ ::memset(buffer, c, n);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int majorDeviceVersion() const {
+ return 1;
+ }
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h
new file mode 100644
index 000000000..069680a11
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h
@@ -0,0 +1,279 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#if defined(EIGEN_USE_THREADS) && !defined(EIGEN_CXX11_TENSOR_TENSOR_DEVICE_THREAD_POOL_H)
+#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_THREAD_POOL_H
+
+namespace Eigen {
+
+// Use the SimpleThreadPool by default. We'll switch to the new non blocking
+// thread pool later.
+#ifndef EIGEN_USE_SIMPLE_THREAD_POOL
+template <typename Env> using ThreadPoolTempl = NonBlockingThreadPoolTempl<Env>;
+typedef NonBlockingThreadPool ThreadPool;
+#else
+template <typename Env> using ThreadPoolTempl = SimpleThreadPoolTempl<Env>;
+typedef SimpleThreadPool ThreadPool;
+#endif
+
+
+// Barrier is an object that allows one or more threads to wait until
+// Notify has been called a specified number of times.
+class Barrier {
+ public:
+ Barrier(unsigned int count) : state_(count << 1), notified_(false) {
+ eigen_assert(((count << 1) >> 1) == count);
+ }
+ ~Barrier() {
+ eigen_assert((state_>>1) == 0);
+ }
+
+ void Notify() {
+ unsigned int v = state_.fetch_sub(2, std::memory_order_acq_rel) - 2;
+ if (v != 1) {
+ eigen_assert(((v + 2) & ~1) != 0);
+ return; // either count has not dropped to 0, or waiter is not waiting
+ }
+ std::unique_lock<std::mutex> l(mu_);
+ eigen_assert(!notified_);
+ notified_ = true;
+ cv_.notify_all();
+ }
+
+ void Wait() {
+ unsigned int v = state_.fetch_or(1, std::memory_order_acq_rel);
+ if ((v >> 1) == 0) return;
+ std::unique_lock<std::mutex> l(mu_);
+ while (!notified_) {
+ cv_.wait(l);
+ }
+ }
+
+ private:
+ std::mutex mu_;
+ std::condition_variable cv_;
+ std::atomic<unsigned int> state_; // low bit is waiter flag
+ bool notified_;
+};
+
+
+// Notification is an object that allows a user to to wait for another
+// thread to signal a notification that an event has occurred.
+//
+// Multiple threads can wait on the same Notification object,
+// but only one caller must call Notify() on the object.
+struct Notification : Barrier {
+ Notification() : Barrier(1) {};
+};
+
+
+// Runs an arbitrary function and then calls Notify() on the passed in
+// Notification.
+template <typename Function, typename... Args> struct FunctionWrapperWithNotification
+{
+ static void run(Notification* n, Function f, Args... args) {
+ f(args...);
+ if (n) {
+ n->Notify();
+ }
+ }
+};
+
+template <typename Function, typename... Args> struct FunctionWrapperWithBarrier
+{
+ static void run(Barrier* b, Function f, Args... args) {
+ f(args...);
+ if (b) {
+ b->Notify();
+ }
+ }
+};
+
+template <typename SyncType>
+static EIGEN_STRONG_INLINE void wait_until_ready(SyncType* n) {
+ if (n) {
+ n->Wait();
+ }
+}
+
+
+// Build a thread pool device on top the an existing pool of threads.
+struct ThreadPoolDevice {
+ // The ownership of the thread pool remains with the caller.
+ ThreadPoolDevice(ThreadPoolInterface* pool, int num_cores) : pool_(pool), num_threads_(num_cores) { }
+
+ EIGEN_STRONG_INLINE void* allocate(size_t num_bytes) const {
+ return internal::aligned_malloc(num_bytes);
+ }
+
+ EIGEN_STRONG_INLINE void deallocate(void* buffer) const {
+ internal::aligned_free(buffer);
+ }
+
+ EIGEN_STRONG_INLINE void memcpy(void* dst, const void* src, size_t n) const {
+ ::memcpy(dst, src, n);
+ }
+ EIGEN_STRONG_INLINE void memcpyHostToDevice(void* dst, const void* src, size_t n) const {
+ memcpy(dst, src, n);
+ }
+ EIGEN_STRONG_INLINE void memcpyDeviceToHost(void* dst, const void* src, size_t n) const {
+ memcpy(dst, src, n);
+ }
+
+ EIGEN_STRONG_INLINE void memset(void* buffer, int c, size_t n) const {
+ ::memset(buffer, c, n);
+ }
+
+ EIGEN_STRONG_INLINE int numThreads() const {
+ return num_threads_;
+ }
+
+ EIGEN_STRONG_INLINE size_t firstLevelCacheSize() const {
+ return l1CacheSize();
+ }
+
+ EIGEN_STRONG_INLINE size_t lastLevelCacheSize() const {
+ // The l3 cache size is shared between all the cores.
+ return l3CacheSize() / num_threads_;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int majorDeviceVersion() const {
+ // Should return an enum that encodes the ISA supported by the CPU
+ return 1;
+ }
+
+ template <class Function, class... Args>
+ EIGEN_STRONG_INLINE Notification* enqueue(Function&& f, Args&&... args) const {
+ Notification* n = new Notification();
+ pool_->Schedule(std::bind(&FunctionWrapperWithNotification<Function, Args...>::run, n, f, args...));
+ return n;
+ }
+
+ template <class Function, class... Args>
+ EIGEN_STRONG_INLINE void enqueue_with_barrier(Barrier* b,
+ Function&& f,
+ Args&&... args) const {
+ pool_->Schedule(std::bind(
+ &FunctionWrapperWithBarrier<Function, Args...>::run, b, f, args...));
+ }
+
+ template <class Function, class... Args>
+ EIGEN_STRONG_INLINE void enqueueNoNotification(Function&& f, Args&&... args) const {
+ pool_->Schedule(std::bind(f, args...));
+ }
+
+ // Returns a logical thread index between 0 and pool_->NumThreads() - 1 if
+ // called from one of the threads in pool_. Returns -1 otherwise.
+ EIGEN_STRONG_INLINE int currentThreadId() const {
+ return pool_->CurrentThreadId();
+ }
+
+ // parallelFor executes f with [0, n) arguments in parallel and waits for
+ // completion. F accepts a half-open interval [first, last).
+ // Block size is choosen based on the iteration cost and resulting parallel
+ // efficiency. If block_align is not nullptr, it is called to round up the
+ // block size.
+ void parallelFor(Index n, const TensorOpCost& cost,
+ std::function<Index(Index)> block_align,
+ std::function<void(Index, Index)> f) const {
+ typedef TensorCostModel<ThreadPoolDevice> CostModel;
+ if (n <= 1 || numThreads() == 1 ||
+ CostModel::numThreads(n, cost, static_cast<int>(numThreads())) == 1) {
+ f(0, n);
+ return;
+ }
+
+ // Calculate block size based on (1) the iteration cost and (2) parallel
+ // efficiency. We want blocks to be not too small to mitigate
+ // parallelization overheads; not too large to mitigate tail
+ // effect and potential load imbalance and we also want number
+ // of blocks to be evenly dividable across threads.
+
+ double block_size_f = 1.0 / CostModel::taskSize(1, cost);
+ Index block_size = numext::mini(n, numext::maxi<Index>(1, block_size_f));
+ const Index max_block_size =
+ numext::mini(n, numext::maxi<Index>(1, 2 * block_size_f));
+ if (block_align) {
+ Index new_block_size = block_align(block_size);
+ eigen_assert(new_block_size >= block_size);
+ block_size = numext::mini(n, new_block_size);
+ }
+ Index block_count = divup(n, block_size);
+ // Calculate parallel efficiency as fraction of total CPU time used for
+ // computations:
+ double max_efficiency =
+ static_cast<double>(block_count) /
+ (divup<int>(block_count, numThreads()) * numThreads());
+ // Now try to increase block size up to max_block_size as long as it
+ // doesn't decrease parallel efficiency.
+ for (Index prev_block_count = block_count; prev_block_count > 1;) {
+ // This is the next block size that divides size into a smaller number
+ // of blocks than the current block_size.
+ Index coarser_block_size = divup(n, prev_block_count - 1);
+ if (block_align) {
+ Index new_block_size = block_align(coarser_block_size);
+ eigen_assert(new_block_size >= coarser_block_size);
+ coarser_block_size = numext::mini(n, new_block_size);
+ }
+ if (coarser_block_size > max_block_size) {
+ break; // Reached max block size. Stop.
+ }
+ // Recalculate parallel efficiency.
+ const Index coarser_block_count = divup(n, coarser_block_size);
+ eigen_assert(coarser_block_count < prev_block_count);
+ prev_block_count = coarser_block_count;
+ const double coarser_efficiency =
+ static_cast<double>(coarser_block_count) /
+ (divup<int>(coarser_block_count, numThreads()) * numThreads());
+ if (coarser_efficiency + 0.01 >= max_efficiency) {
+ // Taking it.
+ block_size = coarser_block_size;
+ block_count = coarser_block_count;
+ if (max_efficiency < coarser_efficiency) {
+ max_efficiency = coarser_efficiency;
+ }
+ }
+ }
+
+ // Recursively divide size into halves until we reach block_size.
+ // Division code rounds mid to block_size, so we are guaranteed to get
+ // block_count leaves that do actual computations.
+ Barrier barrier(static_cast<unsigned int>(block_count));
+ std::function<void(Index, Index)> handleRange;
+ handleRange = [=, &handleRange, &barrier, &f](Index first, Index last) {
+ if (last - first <= block_size) {
+ // Single block or less, execute directly.
+ f(first, last);
+ barrier.Notify();
+ return;
+ }
+ // Split into halves and submit to the pool.
+ Index mid = first + divup((last - first) / 2, block_size) * block_size;
+ pool_->Schedule([=, &handleRange]() { handleRange(mid, last); });
+ pool_->Schedule([=, &handleRange]() { handleRange(first, mid); });
+ };
+ handleRange(0, n);
+ barrier.Wait();
+ }
+
+ // Convenience wrapper for parallelFor that does not align blocks.
+ void parallelFor(Index n, const TensorOpCost& cost,
+ std::function<void(Index, Index)> f) const {
+ parallelFor(n, cost, nullptr, std::move(f));
+ }
+
+ private:
+ ThreadPoolInterface* pool_;
+ int num_threads_;
+};
+
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_THREAD_POOL_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDimensionList.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDimensionList.h
new file mode 100644
index 000000000..1a30e45fb
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDimensionList.h
@@ -0,0 +1,236 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2015 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_DIMENSION_LIST_H
+#define EIGEN_CXX11_TENSOR_TENSOR_DIMENSION_LIST_H
+
+namespace Eigen {
+
+/** \internal
+ *
+ * \class TensorDimensionList
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Special case of tensor index list used to list all the dimensions of a tensor of rank n.
+ *
+ * \sa Tensor
+ */
+
+template <typename Index, std::size_t Rank> struct DimensionList {
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+ const Index operator[] (const Index i) const { return i; }
+};
+
+namespace internal {
+
+template<typename Index, std::size_t Rank> struct array_size<DimensionList<Index, Rank> > {
+ static const size_t value = Rank;
+};
+template<typename Index, std::size_t Rank> struct array_size<const DimensionList<Index, Rank> > {
+ static const size_t value = Rank;
+};
+
+template<DenseIndex n, typename Index, std::size_t Rank> const Index array_get(DimensionList<Index, Rank>&) {
+ return n;
+}
+template<DenseIndex n, typename Index, std::size_t Rank> const Index array_get(const DimensionList<Index, Rank>&) {
+ return n;
+}
+
+
+#if EIGEN_HAS_CONSTEXPR
+template <typename Index, std::size_t Rank>
+struct index_known_statically_impl<DimensionList<Index, Rank> > {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex) {
+ return true;
+ }
+};
+template <typename Index, std::size_t Rank>
+struct index_known_statically_impl<const DimensionList<Index, Rank> > {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex) {
+ return true;
+ }
+};
+
+template <typename Index, std::size_t Rank>
+struct all_indices_known_statically_impl<DimensionList<Index, Rank> > {
+ EIGEN_DEVICE_FUNC static constexpr bool run() {
+ return true;
+ }
+};
+template <typename Index, std::size_t Rank>
+struct all_indices_known_statically_impl<const DimensionList<Index, Rank> > {
+ EIGEN_DEVICE_FUNC static constexpr bool run() {
+ return true;
+ }
+};
+
+template <typename Index, std::size_t Rank>
+struct indices_statically_known_to_increase_impl<DimensionList<Index, Rank> > {
+ EIGEN_DEVICE_FUNC static constexpr bool run() {
+ return true;
+ }
+};
+template <typename Index, std::size_t Rank>
+struct indices_statically_known_to_increase_impl<const DimensionList<Index, Rank> > {
+ EIGEN_DEVICE_FUNC static constexpr bool run() {
+ return true;
+ }
+};
+
+template <typename Index, std::size_t Rank>
+struct index_statically_eq_impl<DimensionList<Index, Rank> > {
+ static constexpr bool run(const DenseIndex i, const DenseIndex value) {
+ return i == value;
+ }
+};
+template <typename Index, std::size_t Rank>
+struct index_statically_eq_impl<const DimensionList<Index, Rank> > {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
+ return i == value;
+ }
+};
+
+template <typename Index, std::size_t Rank>
+struct index_statically_ne_impl<DimensionList<Index, Rank> > {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
+ return i != value;
+ }
+};
+template <typename Index, std::size_t Rank>
+struct index_statically_ne_impl<const DimensionList<Index, Rank> > {
+ static constexpr bool run(const DenseIndex i, const DenseIndex value) {
+ return i != value;
+ }
+};
+
+template <typename Index, std::size_t Rank>
+struct index_statically_gt_impl<DimensionList<Index, Rank> > {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
+ return i > value;
+ }
+};
+template <typename Index, std::size_t Rank>
+struct index_statically_gt_impl<const DimensionList<Index, Rank> > {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
+ return i > value;
+ }
+};
+
+template <typename Index, std::size_t Rank>
+struct index_statically_lt_impl<DimensionList<Index, Rank> > {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
+ return i < value;
+ }
+};
+template <typename Index, std::size_t Rank>
+struct index_statically_lt_impl<const DimensionList<Index, Rank> > {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
+ return i < value;
+ }
+};
+
+#else
+template <typename Index, std::size_t Rank>
+struct index_known_statically_impl<DimensionList<Index, Rank> > {
+ EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE bool run(const DenseIndex) {
+ return true;
+ }
+};
+template <typename Index, std::size_t Rank>
+struct index_known_statically_impl<const DimensionList<Index, Rank> > {
+ EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE bool run(const DenseIndex) {
+ return true;
+ }
+};
+
+template <typename Index, std::size_t Rank>
+struct all_indices_known_statically_impl<DimensionList<Index, Rank> > {
+ EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE bool run() {
+ return true;
+ }
+};
+template <typename Index, std::size_t Rank>
+struct all_indices_known_statically_impl<const DimensionList<Index, Rank> > {
+ EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE bool run() {
+ return true;
+ }
+};
+
+template <typename Index, std::size_t Rank>
+struct indices_statically_known_to_increase_impl<DimensionList<Index, Rank> > {
+ static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run() {
+ return true;
+ }
+};
+template <typename Index, std::size_t Rank>
+struct indices_statically_known_to_increase_impl<const DimensionList<Index, Rank> > {
+ static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run() {
+ return true;
+ }
+};
+
+template <typename Index, std::size_t Rank>
+struct index_statically_eq_impl<DimensionList<Index, Rank> > {
+ static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) {
+ return false;
+ }
+};
+template <typename Index, std::size_t Rank>
+struct index_statically_eq_impl<const DimensionList<Index, Rank> > {
+ static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) {
+ return false;
+ }
+};
+
+template <typename Index, std::size_t Rank>
+struct index_statically_ne_impl<DimensionList<Index, Rank> > {
+ static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex){
+ return false;
+ }
+};
+template <typename Index, std::size_t Rank>
+struct index_statically_ne_impl<const DimensionList<Index, Rank> > {
+ static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) {
+ return false;
+ }
+};
+
+template <typename Index, std::size_t Rank>
+struct index_statically_gt_impl<DimensionList<Index, Rank> > {
+ static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) {
+ return false;
+ }
+};
+template <typename Index, std::size_t Rank>
+struct index_statically_gt_impl<const DimensionList<Index, Rank> > {
+ static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) {
+ return false;
+ }
+};
+
+template <typename Index, std::size_t Rank>
+struct index_statically_lt_impl<DimensionList<Index, Rank> > {
+ static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) {
+ return false;
+ }
+};
+template <typename Index, std::size_t Rank>
+struct index_statically_lt_impl<const DimensionList<Index, Rank> > {
+ static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) {
+ return false;
+ }
+};
+#endif
+
+} // end namespace internal
+} // end namespace Eigen
+
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_DIMENSION_LIST_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h
new file mode 100644
index 000000000..b24cdebf1
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h
@@ -0,0 +1,428 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_DIMENSIONS_H
+#define EIGEN_CXX11_TENSOR_TENSOR_DIMENSIONS_H
+
+
+namespace Eigen {
+
+/** \internal
+ *
+ * \class TensorDimensions
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Set of classes used to encode and store the dimensions of a Tensor.
+ *
+ * The Sizes class encodes as part of the type the number of dimensions and the
+ * sizes corresponding to each dimension. It uses no storage space since it is
+ * entirely known at compile time.
+ * The DSizes class is its dynamic sibling: the number of dimensions is known
+ * at compile time but the sizes are set during execution.
+ *
+ * \sa Tensor
+ */
+
+// Boilerplate code
+namespace internal {
+
+template<std::size_t n, typename Dimension> struct dget {
+ static const std::size_t value = get<n, Dimension>::value;
+};
+
+
+template<typename Index, std::size_t NumIndices, std::size_t n, bool RowMajor>
+struct fixed_size_tensor_index_linearization_helper
+{
+ template <typename Dimensions> EIGEN_DEVICE_FUNC
+ static inline Index run(array<Index, NumIndices> const& indices,
+ const Dimensions& dimensions)
+ {
+ return array_get<RowMajor ? n - 1 : (NumIndices - n)>(indices) +
+ dget<RowMajor ? n - 1 : (NumIndices - n), Dimensions>::value *
+ fixed_size_tensor_index_linearization_helper<Index, NumIndices, n - 1, RowMajor>::run(indices, dimensions);
+ }
+};
+
+template<typename Index, std::size_t NumIndices, bool RowMajor>
+struct fixed_size_tensor_index_linearization_helper<Index, NumIndices, 0, RowMajor>
+{
+ template <typename Dimensions> EIGEN_DEVICE_FUNC
+ static inline Index run(array<Index, NumIndices> const&, const Dimensions&)
+ {
+ return 0;
+ }
+};
+
+template<typename Index, std::size_t n>
+struct fixed_size_tensor_index_extraction_helper
+{
+ template <typename Dimensions> EIGEN_DEVICE_FUNC
+ static inline Index run(const Index index,
+ const Dimensions& dimensions)
+ {
+ const Index mult = (index == n-1) ? 1 : 0;
+ return array_get<n-1>(dimensions) * mult +
+ fixed_size_tensor_index_extraction_helper<Index, n - 1>::run(index, dimensions);
+ }
+};
+
+template<typename Index>
+struct fixed_size_tensor_index_extraction_helper<Index, 0>
+{
+ template <typename Dimensions> EIGEN_DEVICE_FUNC
+ static inline Index run(const Index,
+ const Dimensions&)
+ {
+ return 0;
+ }
+ };
+
+} // end namespace internal
+
+
+// Fixed size
+#ifndef EIGEN_EMULATE_CXX11_META_H
+template <typename std::ptrdiff_t... Indices>
+struct Sizes : internal::numeric_list<std::ptrdiff_t, Indices...> {
+ typedef internal::numeric_list<std::ptrdiff_t, Indices...> Base;
+ static const std::ptrdiff_t total_size = internal::arg_prod(Indices...);
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t rank() const {
+ return Base::count;
+ }
+
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t TotalSize() {
+ return internal::arg_prod(Indices...);
+ }
+
+ EIGEN_DEVICE_FUNC Sizes() { }
+ template <typename DenseIndex>
+ explicit EIGEN_DEVICE_FUNC Sizes(const array<DenseIndex, Base::count>& /*indices*/) {
+ // todo: add assertion
+ }
+#if EIGEN_HAS_VARIADIC_TEMPLATES
+ template <typename... DenseIndex> EIGEN_DEVICE_FUNC Sizes(DenseIndex...) { }
+ explicit EIGEN_DEVICE_FUNC Sizes(std::initializer_list<std::ptrdiff_t> /*l*/) {
+ // todo: add assertion
+ }
+#endif
+
+ template <typename T> Sizes& operator = (const T& /*other*/) {
+ // add assertion failure if the size of other is different
+ return *this;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t operator[] (const std::size_t index) const {
+ return internal::fixed_size_tensor_index_extraction_helper<std::ptrdiff_t, Base::count>::run(index, *this);
+ }
+
+ template <typename DenseIndex> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ size_t IndexOfColMajor(const array<DenseIndex, Base::count>& indices) const {
+ return internal::fixed_size_tensor_index_linearization_helper<DenseIndex, Base::count, Base::count, false>::run(indices, *static_cast<const Base*>(this));
+ }
+ template <typename DenseIndex> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ size_t IndexOfRowMajor(const array<DenseIndex, Base::count>& indices) const {
+ return internal::fixed_size_tensor_index_linearization_helper<DenseIndex, Base::count, Base::count, true>::run(indices, *static_cast<const Base*>(this));
+ }
+};
+
+namespace internal {
+template <typename std::ptrdiff_t... Indices>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_prod(const Sizes<Indices...>&) {
+ return Sizes<Indices...>::total_size;
+}
+}
+
+#else
+
+template <std::size_t n>
+struct non_zero_size {
+ typedef internal::type2val<std::size_t, n> type;
+};
+template <>
+struct non_zero_size<0> {
+ typedef internal::null_type type;
+};
+
+template <std::size_t V1=0, std::size_t V2=0, std::size_t V3=0, std::size_t V4=0, std::size_t V5=0> struct Sizes {
+ typedef typename internal::make_type_list<typename non_zero_size<V1>::type, typename non_zero_size<V2>::type, typename non_zero_size<V3>::type, typename non_zero_size<V4>::type, typename non_zero_size<V5>::type >::type Base;
+ static const size_t count = Base::count;
+ static const std::size_t total_size = internal::arg_prod<Base>::value;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t rank() const {
+ return count;
+ }
+
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t TotalSize() {
+ return internal::arg_prod<Base>::value;
+ }
+
+ Sizes() { }
+ template <typename DenseIndex>
+ explicit Sizes(const array<DenseIndex, Base::count>& /*indices*/) {
+ // todo: add assertion
+ }
+ template <typename T> Sizes& operator = (const T& /*other*/) {
+ // add assertion failure if the size of other is different
+ return *this;
+ }
+
+#if EIGEN_HAS_VARIADIC_TEMPLATES
+ template <typename... DenseIndex> Sizes(DenseIndex... /*indices*/) { }
+ explicit Sizes(std::initializer_list<std::size_t>) {
+ // todo: add assertion
+ }
+#else
+ EIGEN_DEVICE_FUNC explicit Sizes(const DenseIndex) {
+ }
+ EIGEN_DEVICE_FUNC Sizes(const DenseIndex, const DenseIndex) {
+ }
+ EIGEN_DEVICE_FUNC Sizes(const DenseIndex, const DenseIndex, const DenseIndex) {
+ }
+ EIGEN_DEVICE_FUNC Sizes(const DenseIndex, const DenseIndex, const DenseIndex, const DenseIndex) {
+ }
+ EIGEN_DEVICE_FUNC Sizes(const DenseIndex, const DenseIndex, const DenseIndex, const DenseIndex, const DenseIndex) {
+ }
+#endif
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex operator[] (const int index) const {
+ switch (index) {
+ case 0:
+ return internal::get<0, Base>::value;
+ case 1:
+ return internal::get<1, Base>::value;
+ case 2:
+ return internal::get<2, Base>::value;
+ case 3:
+ return internal::get<3, Base>::value;
+ case 4:
+ return internal::get<4, Base>::value;
+ default:
+ eigen_assert(false && "index overflow");
+ return static_cast<DenseIndex>(-1);
+ }
+ }
+
+ template <typename DenseIndex> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ size_t IndexOfColMajor(const array<DenseIndex, Base::count>& indices) const {
+ return internal::fixed_size_tensor_index_linearization_helper<DenseIndex, Base::count, Base::count, false>::run(indices, *reinterpret_cast<const Base*>(this));
+ }
+ template <typename DenseIndex> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ size_t IndexOfRowMajor(const array<DenseIndex, Base::count>& indices) const {
+ return internal::fixed_size_tensor_index_linearization_helper<DenseIndex, Base::count, Base::count, true>::run(indices, *reinterpret_cast<const Base*>(this));
+ }
+};
+
+namespace internal {
+template <std::size_t V1, std::size_t V2, std::size_t V3, std::size_t V4, std::size_t V5>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::size_t array_prod(const Sizes<V1, V2, V3, V4, V5>&) {
+ return Sizes<V1, V2, V3, V4, V5>::total_size;
+}
+}
+
+#endif
+
+// Boilerplate
+namespace internal {
+template<typename Index, std::size_t NumIndices, std::size_t n, bool RowMajor>
+struct tensor_index_linearization_helper
+{
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Index run(array<Index, NumIndices> const& indices, array<Index, NumIndices> const& dimensions)
+ {
+ return array_get<RowMajor ? n : (NumIndices - n - 1)>(indices) +
+ array_get<RowMajor ? n : (NumIndices - n - 1)>(dimensions) *
+ tensor_index_linearization_helper<Index, NumIndices, n - 1, RowMajor>::run(indices, dimensions);
+ }
+};
+
+template<typename Index, std::size_t NumIndices, bool RowMajor>
+struct tensor_index_linearization_helper<Index, NumIndices, 0, RowMajor>
+{
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Index run(array<Index, NumIndices> const& indices, array<Index, NumIndices> const&)
+ {
+ return array_get<RowMajor ? 0 : NumIndices - 1>(indices);
+ }
+};
+} // end namespace internal
+
+
+
+// Dynamic size
+template <typename DenseIndex, int NumDims>
+struct DSizes : array<DenseIndex, NumDims> {
+ typedef array<DenseIndex, NumDims> Base;
+ static const int count = NumDims;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t rank() const {
+ return NumDims;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex TotalSize() const {
+ return (NumDims == 0) ? 1 : internal::array_prod(*static_cast<const Base*>(this));
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DSizes() {
+ for (int i = 0 ; i < NumDims; ++i) {
+ (*this)[i] = 0;
+ }
+ }
+ EIGEN_DEVICE_FUNC explicit DSizes(const array<DenseIndex, NumDims>& a) : Base(a) { }
+
+ EIGEN_DEVICE_FUNC explicit DSizes(const DenseIndex i0) {
+ eigen_assert(NumDims == 1);
+ (*this)[0] = i0;
+ }
+
+#if EIGEN_HAS_VARIADIC_TEMPLATES
+ template<typename... IndexTypes> EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE explicit DSizes(DenseIndex firstDimension, DenseIndex secondDimension, IndexTypes... otherDimensions) : Base({{firstDimension, secondDimension, otherDimensions...}}) {
+ EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 2 == NumDims, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ }
+#else
+ EIGEN_DEVICE_FUNC DSizes(const DenseIndex i0, const DenseIndex i1) {
+ eigen_assert(NumDims == 2);
+ (*this)[0] = i0;
+ (*this)[1] = i1;
+ }
+ EIGEN_DEVICE_FUNC DSizes(const DenseIndex i0, const DenseIndex i1, const DenseIndex i2) {
+ eigen_assert(NumDims == 3);
+ (*this)[0] = i0;
+ (*this)[1] = i1;
+ (*this)[2] = i2;
+ }
+ EIGEN_DEVICE_FUNC DSizes(const DenseIndex i0, const DenseIndex i1, const DenseIndex i2, const DenseIndex i3) {
+ eigen_assert(NumDims == 4);
+ (*this)[0] = i0;
+ (*this)[1] = i1;
+ (*this)[2] = i2;
+ (*this)[3] = i3;
+ }
+ EIGEN_DEVICE_FUNC DSizes(const DenseIndex i0, const DenseIndex i1, const DenseIndex i2, const DenseIndex i3, const DenseIndex i4) {
+ eigen_assert(NumDims == 5);
+ (*this)[0] = i0;
+ (*this)[1] = i1;
+ (*this)[2] = i2;
+ (*this)[3] = i3;
+ (*this)[4] = i4;
+ }
+#endif
+
+ EIGEN_DEVICE_FUNC DSizes& operator = (const array<DenseIndex, NumDims>& other) {
+ *static_cast<Base*>(this) = other;
+ return *this;
+ }
+
+ // A constexpr would be so much better here
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex IndexOfColMajor(const array<DenseIndex, NumDims>& indices) const {
+ return internal::tensor_index_linearization_helper<DenseIndex, NumDims, NumDims - 1, false>::run(indices, *static_cast<const Base*>(this));
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex IndexOfRowMajor(const array<DenseIndex, NumDims>& indices) const {
+ return internal::tensor_index_linearization_helper<DenseIndex, NumDims, NumDims - 1, true>::run(indices, *static_cast<const Base*>(this));
+ }
+};
+
+
+
+
+// Boilerplate
+namespace internal {
+template<typename Index, std::size_t NumIndices, std::size_t n, bool RowMajor>
+struct tensor_vsize_index_linearization_helper
+{
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Index run(array<Index, NumIndices> const& indices, std::vector<DenseIndex> const& dimensions)
+ {
+ return array_get<RowMajor ? n : (NumIndices - n - 1)>(indices) +
+ array_get<RowMajor ? n : (NumIndices - n - 1)>(dimensions) *
+ tensor_vsize_index_linearization_helper<Index, NumIndices, n - 1, RowMajor>::run(indices, dimensions);
+ }
+};
+
+template<typename Index, std::size_t NumIndices, bool RowMajor>
+struct tensor_vsize_index_linearization_helper<Index, NumIndices, 0, RowMajor>
+{
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Index run(array<Index, NumIndices> const& indices, std::vector<DenseIndex> const&)
+ {
+ return array_get<RowMajor ? 0 : NumIndices - 1>(indices);
+ }
+};
+} // end namespace internal
+
+
+namespace internal {
+
+template <typename DenseIndex, int NumDims> struct array_size<const DSizes<DenseIndex, NumDims> > {
+ static const size_t value = NumDims;
+};
+template <typename DenseIndex, int NumDims> struct array_size<DSizes<DenseIndex, NumDims> > {
+ static const size_t value = NumDims;
+};
+#ifndef EIGEN_EMULATE_CXX11_META_H
+template <typename std::ptrdiff_t... Indices> struct array_size<const Sizes<Indices...> > {
+static const std::ptrdiff_t value = Sizes<Indices...>::count;
+};
+template <typename std::ptrdiff_t... Indices> struct array_size<Sizes<Indices...> > {
+static const std::ptrdiff_t value = Sizes<Indices...>::count;
+};
+template <std::ptrdiff_t n, typename std::ptrdiff_t... Indices> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_get(const Sizes<Indices...>&) {
+ return get<n, internal::numeric_list<std::size_t, Indices...> >::value;
+}
+template <std::ptrdiff_t n> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_get(const Sizes<>&) {
+ eigen_assert(false && "should never be called");
+ return -1;
+}
+#else
+template <std::size_t V1, std::size_t V2, std::size_t V3, std::size_t V4, std::size_t V5> struct array_size<const Sizes<V1,V2,V3,V4,V5> > {
+ static const size_t value = Sizes<V1,V2,V3,V4,V5>::count;
+};
+template <std::size_t V1, std::size_t V2, std::size_t V3, std::size_t V4, std::size_t V5> struct array_size<Sizes<V1,V2,V3,V4,V5> > {
+ static const size_t value = Sizes<V1,V2,V3,V4,V5>::count;
+};
+template <std::size_t n, std::size_t V1, std::size_t V2, std::size_t V3, std::size_t V4, std::size_t V5> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::size_t array_get(const Sizes<V1,V2,V3,V4,V5>&) {
+ return get<n, typename Sizes<V1,V2,V3,V4,V5>::Base>::value;
+}
+
+#endif
+
+
+template <typename Dims1, typename Dims2, size_t n, size_t m>
+struct sizes_match_below_dim {
+ static EIGEN_DEVICE_FUNC inline bool run(Dims1&, Dims2&) {
+ return false;
+ }
+};
+template <typename Dims1, typename Dims2, size_t n>
+struct sizes_match_below_dim<Dims1, Dims2, n, n> {
+ static EIGEN_DEVICE_FUNC inline bool run(Dims1& dims1, Dims2& dims2) {
+ return (array_get<n-1>(dims1) == array_get<n-1>(dims2)) &
+ sizes_match_below_dim<Dims1, Dims2, n-1, n-1>::run(dims1, dims2);
+ }
+};
+template <typename Dims1, typename Dims2>
+struct sizes_match_below_dim<Dims1, Dims2, 0, 0> {
+ static EIGEN_DEVICE_FUNC inline bool run(Dims1&, Dims2&) {
+ return true;
+ }
+};
+
+} // end namespace internal
+
+
+template <typename Dims1, typename Dims2>
+EIGEN_DEVICE_FUNC bool dimensions_match(Dims1& dims1, Dims2& dims2) {
+ return internal::sizes_match_below_dim<Dims1, Dims2, internal::array_size<Dims1>::value, internal::array_size<Dims2>::value>::run(dims1, dims2);
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_DIMENSIONS_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h b/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h
new file mode 100644
index 000000000..06987132b
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h
@@ -0,0 +1,181 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_EVAL_TO_H
+#define EIGEN_CXX11_TENSOR_TENSOR_EVAL_TO_H
+
+namespace Eigen {
+
+/** \class TensorForcedEval
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Tensor reshaping class.
+ *
+ *
+ */
+namespace internal {
+template<typename XprType, template <class> class MakePointer_>
+struct traits<TensorEvalToOp<XprType, MakePointer_> >
+{
+ // Type promotion to handle the case where the types of the lhs and the rhs are different.
+ typedef typename XprType::Scalar Scalar;
+ typedef traits<XprType> XprTraits;
+ typedef typename XprTraits::StorageKind StorageKind;
+ typedef typename XprTraits::Index Index;
+ typedef typename XprType::Nested Nested;
+ typedef typename remove_reference<Nested>::type _Nested;
+ static const int NumDimensions = XprTraits::NumDimensions;
+ static const int Layout = XprTraits::Layout;
+
+ enum {
+ Flags = 0
+ };
+ template <class T>
+ struct MakePointer {
+ // Intermediate typedef to workaround MSVC issue.
+ typedef MakePointer_<T> MakePointerT;
+ typedef typename MakePointerT::Type Type;
+ };
+};
+
+template<typename XprType, template <class> class MakePointer_>
+struct eval<TensorEvalToOp<XprType, MakePointer_>, Eigen::Dense>
+{
+ typedef const TensorEvalToOp<XprType, MakePointer_>& type;
+};
+
+template<typename XprType, template <class> class MakePointer_>
+struct nested<TensorEvalToOp<XprType, MakePointer_>, 1, typename eval<TensorEvalToOp<XprType, MakePointer_> >::type>
+{
+ typedef TensorEvalToOp<XprType, MakePointer_> type;
+};
+
+} // end namespace internal
+
+
+
+
+template<typename XprType, template <class> class MakePointer_>
+class TensorEvalToOp : public TensorBase<TensorEvalToOp<XprType, MakePointer_>, ReadOnlyAccessors>
+{
+ public:
+ typedef typename Eigen::internal::traits<TensorEvalToOp>::Scalar Scalar;
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
+ typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType;
+ typedef typename MakePointer_<CoeffReturnType>::Type PointerType;
+ typedef typename Eigen::internal::nested<TensorEvalToOp>::type Nested;
+ typedef typename Eigen::internal::traits<TensorEvalToOp>::StorageKind StorageKind;
+ typedef typename Eigen::internal::traits<TensorEvalToOp>::Index Index;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvalToOp(PointerType buffer, const XprType& expr)
+ : m_xpr(expr), m_buffer(buffer) {}
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename XprType::Nested>::type&
+ expression() const { return m_xpr; }
+
+ EIGEN_DEVICE_FUNC PointerType buffer() const { return m_buffer; }
+
+ protected:
+ typename XprType::Nested m_xpr;
+ PointerType m_buffer;
+};
+
+
+
+template<typename ArgType, typename Device, template <class> class MakePointer_>
+struct TensorEvaluator<const TensorEvalToOp<ArgType, MakePointer_>, Device>
+{
+ typedef TensorEvalToOp<ArgType, MakePointer_> XprType;
+ typedef typename ArgType::Scalar Scalar;
+ typedef typename TensorEvaluator<ArgType, Device>::Dimensions Dimensions;
+ typedef typename XprType::Index Index;
+ typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+
+ enum {
+ IsAligned = TensorEvaluator<ArgType, Device>::IsAligned,
+ PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ CoordAccess = false, // to be implemented
+ RawAccess = true
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
+ : m_impl(op.expression(), device), m_device(device),
+ m_buffer(op.buffer()), m_op(op), m_expression(op.expression())
+ { }
+
+ // Used for accessor extraction in SYCL Managed TensorMap:
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const XprType& op() const {
+ return m_op;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ~TensorEvaluator() {
+ }
+
+ typedef typename internal::traits<const TensorEvalToOp<ArgType, MakePointer_> >::template MakePointer<CoeffReturnType>::Type DevicePointer;
+ EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_impl.dimensions(); }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(DevicePointer scalar) {
+ EIGEN_UNUSED_VARIABLE(scalar);
+ eigen_assert(scalar == NULL);
+ return m_impl.evalSubExprsIfNeeded(m_buffer);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalScalar(Index i) {
+ m_buffer[i] = m_impl.coeff(i);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalPacket(Index i) {
+ internal::pstoret<CoeffReturnType, PacketReturnType, Aligned>(m_buffer + i, m_impl.template packet<TensorEvaluator<ArgType, Device>::IsAligned ? Aligned : Unaligned>(i));
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
+ m_impl.cleanup();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
+ {
+ return m_buffer[index];
+ }
+
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
+ {
+ return internal::ploadt<PacketReturnType, LoadMode>(m_buffer + index);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
+ // We assume that evalPacket or evalScalar is called to perform the
+ // assignment and account for the cost of the write here.
+ return m_impl.costPerCoeff(vectorized) +
+ TensorOpCost(0, sizeof(CoeffReturnType), 0, vectorized, PacketSize);
+ }
+
+ EIGEN_DEVICE_FUNC DevicePointer data() const { return m_buffer; }
+ ArgType expression() const { return m_expression; }
+
+ /// required by sycl in order to extract the accessor
+ const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
+ /// added for sycl in order to construct the buffer from the sycl device
+ const Device& device() const{return m_device;}
+
+ private:
+ TensorEvaluator<ArgType, Device> m_impl;
+ const Device& m_device;
+ DevicePointer m_buffer;
+ const XprType& m_op;
+ const ArgType m_expression;
+};
+
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_EVAL_TO_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h b/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h
new file mode 100644
index 000000000..834ce07df
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h
@@ -0,0 +1,633 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_EVALUATOR_H
+#define EIGEN_CXX11_TENSOR_TENSOR_EVALUATOR_H
+
+namespace Eigen {
+
+/** \class TensorEvaluator
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief The tensor evaluator classes.
+ *
+ * These classes are responsible for the evaluation of the tensor expression.
+ *
+ * TODO: add support for more types of expressions, in particular expressions
+ * leading to lvalues (slicing, reshaping, etc...)
+ */
+
+// Generic evaluator
+template<typename Derived, typename Device>
+struct TensorEvaluator
+{
+ typedef typename Derived::Index Index;
+ typedef typename Derived::Scalar Scalar;
+ typedef typename Derived::Scalar CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ typedef typename Derived::Dimensions Dimensions;
+
+ // NumDimensions is -1 for variable dim tensors
+ static const int NumCoords = internal::traits<Derived>::NumDimensions > 0 ?
+ internal::traits<Derived>::NumDimensions : 0;
+
+ enum {
+ IsAligned = Derived::IsAligned,
+ PacketAccess = (internal::unpacket_traits<PacketReturnType>::size > 1),
+ Layout = Derived::Layout,
+ CoordAccess = NumCoords > 0,
+ RawAccess = true
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const Derived& m, const Device& device)
+ : m_data(const_cast<typename internal::traits<Derived>::template MakePointer<Scalar>::Type>(m.data())), m_dims(m.dimensions()), m_device(device), m_impl(m)
+ { }
+
+ // Used for accessor extraction in SYCL Managed TensorMap:
+ const Derived& derived() const { return m_impl; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dims; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* dest) {
+ if (dest) {
+ m_device.memcpy((void*)dest, m_data, sizeof(Scalar) * m_dims.TotalSize());
+ return false;
+ }
+ return true;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const {
+ eigen_assert(m_data);
+ return m_data[index];
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) {
+ eigen_assert(m_data);
+ return m_data[index];
+ }
+
+ template<int LoadMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ PacketReturnType packet(Index index) const
+ {
+ return internal::ploadt<PacketReturnType, LoadMode>(m_data + index);
+ }
+
+ template <int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ void writePacket(Index index, const PacketReturnType& x)
+ {
+ return internal::pstoret<Scalar, PacketReturnType, StoreMode>(m_data + index, x);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(const array<DenseIndex, NumCoords>& coords) const {
+ eigen_assert(m_data);
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ return m_data[m_dims.IndexOfColMajor(coords)];
+ } else {
+ return m_data[m_dims.IndexOfRowMajor(coords)];
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(const array<DenseIndex, NumCoords>& coords) {
+ eigen_assert(m_data);
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ return m_data[m_dims.IndexOfColMajor(coords)];
+ } else {
+ return m_data[m_dims.IndexOfRowMajor(coords)];
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
+ return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized,
+ internal::unpacket_traits<PacketReturnType>::size);
+ }
+
+ EIGEN_DEVICE_FUNC typename internal::traits<Derived>::template MakePointer<Scalar>::Type data() const { return m_data; }
+
+ /// required by sycl in order to construct sycl buffer from raw pointer
+ const Device& device() const{return m_device;}
+
+ protected:
+ typename internal::traits<Derived>::template MakePointer<Scalar>::Type m_data;
+ Dimensions m_dims;
+ const Device& m_device;
+ const Derived& m_impl;
+};
+
+namespace {
+template <typename T> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+T loadConstant(const T* address) {
+ return *address;
+}
+// Use the texture cache on CUDA devices whenever possible
+#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 350
+template <> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+float loadConstant(const float* address) {
+ return __ldg(address);
+}
+template <> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+double loadConstant(const double* address) {
+ return __ldg(address);
+}
+template <> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+Eigen::half loadConstant(const Eigen::half* address) {
+ return Eigen::half(half_impl::raw_uint16_to_half(__ldg(&address->x)));
+}
+#endif
+}
+
+
+// Default evaluator for rvalues
+template<typename Derived, typename Device>
+struct TensorEvaluator<const Derived, Device>
+{
+ typedef typename Derived::Index Index;
+ typedef typename Derived::Scalar Scalar;
+ typedef typename Derived::Scalar CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ typedef typename Derived::Dimensions Dimensions;
+
+ // NumDimensions is -1 for variable dim tensors
+ static const int NumCoords = internal::traits<Derived>::NumDimensions > 0 ?
+ internal::traits<Derived>::NumDimensions : 0;
+
+ enum {
+ IsAligned = Derived::IsAligned,
+ PacketAccess = (internal::unpacket_traits<PacketReturnType>::size > 1),
+ Layout = Derived::Layout,
+ CoordAccess = NumCoords > 0,
+ RawAccess = true
+ };
+
+ // Used for accessor extraction in SYCL Managed TensorMap:
+ const Derived& derived() const { return m_impl; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const Derived& m, const Device& device)
+ : m_data(m.data()), m_dims(m.dimensions()), m_device(device), m_impl(m)
+ { }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dims; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) {
+ if (!NumTraits<typename internal::remove_const<Scalar>::type>::RequireInitialization && data) {
+ m_device.memcpy((void*)data, m_data, m_dims.TotalSize() * sizeof(Scalar));
+ return false;
+ }
+ return true;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const {
+ eigen_assert(m_data);
+ return loadConstant(m_data+index);
+ }
+
+ template<int LoadMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ PacketReturnType packet(Index index) const
+ {
+ return internal::ploadt_ro<PacketReturnType, LoadMode>(m_data + index);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(const array<DenseIndex, NumCoords>& coords) const {
+ eigen_assert(m_data);
+ const Index index = (static_cast<int>(Layout) == static_cast<int>(ColMajor)) ? m_dims.IndexOfColMajor(coords)
+ : m_dims.IndexOfRowMajor(coords);
+ return loadConstant(m_data+index);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
+ return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized,
+ internal::unpacket_traits<PacketReturnType>::size);
+ }
+
+ EIGEN_DEVICE_FUNC typename internal::traits<Derived>::template MakePointer<const Scalar>::Type data() const { return m_data; }
+
+ /// added for sycl in order to construct the buffer from the sycl device
+ const Device& device() const{return m_device;}
+
+ protected:
+ typename internal::traits<Derived>::template MakePointer<const Scalar>::Type m_data;
+ Dimensions m_dims;
+ const Device& m_device;
+ const Derived& m_impl;
+};
+
+
+
+
+// -------------------- CwiseNullaryOp --------------------
+
+template<typename NullaryOp, typename ArgType, typename Device>
+struct TensorEvaluator<const TensorCwiseNullaryOp<NullaryOp, ArgType>, Device>
+{
+ typedef TensorCwiseNullaryOp<NullaryOp, ArgType> XprType;
+
+ enum {
+ IsAligned = true,
+ PacketAccess = internal::functor_traits<NullaryOp>::PacketAccess,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ CoordAccess = false, // to be implemented
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC
+ TensorEvaluator(const XprType& op, const Device& device)
+ : m_functor(op.functor()), m_argImpl(op.nestedExpression(), device), m_wrapper()
+ { }
+
+ typedef typename XprType::Index Index;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename internal::traits<XprType>::Scalar CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+ typedef typename TensorEvaluator<ArgType, Device>::Dimensions Dimensions;
+
+ EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_argImpl.dimensions(); }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType*) { return true; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { }
+
+ EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const
+ {
+ return m_wrapper(m_functor, index);
+ }
+
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
+ {
+ return m_wrapper.template packetOp<PacketReturnType, Index>(m_functor, index);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
+ costPerCoeff(bool vectorized) const {
+ return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized,
+ internal::unpacket_traits<PacketReturnType>::size);
+ }
+
+ EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return NULL; }
+
+ /// required by sycl in order to extract the accessor
+ const TensorEvaluator<ArgType, Device>& impl() const { return m_argImpl; }
+ /// required by sycl in order to extract the accessor
+ NullaryOp functor() const { return m_functor; }
+
+
+ private:
+ const NullaryOp m_functor;
+ TensorEvaluator<ArgType, Device> m_argImpl;
+ const internal::nullary_wrapper<CoeffReturnType,NullaryOp> m_wrapper;
+};
+
+
+
+// -------------------- CwiseUnaryOp --------------------
+
+template<typename UnaryOp, typename ArgType, typename Device>
+struct TensorEvaluator<const TensorCwiseUnaryOp<UnaryOp, ArgType>, Device>
+{
+ typedef TensorCwiseUnaryOp<UnaryOp, ArgType> XprType;
+
+ enum {
+ IsAligned = TensorEvaluator<ArgType, Device>::IsAligned,
+ PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess & internal::functor_traits<UnaryOp>::PacketAccess,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ CoordAccess = false, // to be implemented
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device)
+ : m_functor(op.functor()),
+ m_argImpl(op.nestedExpression(), device)
+ { }
+
+ typedef typename XprType::Index Index;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename internal::traits<XprType>::Scalar CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+ typedef typename TensorEvaluator<ArgType, Device>::Dimensions Dimensions;
+
+ EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_argImpl.dimensions(); }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar*) {
+ m_argImpl.evalSubExprsIfNeeded(NULL);
+ return true;
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
+ m_argImpl.cleanup();
+ }
+
+ EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const
+ {
+ return m_functor(m_argImpl.coeff(index));
+ }
+
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
+ {
+ return m_functor.packetOp(m_argImpl.template packet<LoadMode>(index));
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
+ const double functor_cost = internal::functor_traits<UnaryOp>::Cost;
+ return m_argImpl.costPerCoeff(vectorized) +
+ TensorOpCost(0, 0, functor_cost, vectorized, PacketSize);
+ }
+
+ EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return NULL; }
+
+ /// required by sycl in order to extract the accessor
+ const TensorEvaluator<ArgType, Device> & impl() const { return m_argImpl; }
+ /// added for sycl in order to construct the buffer from sycl device
+ UnaryOp functor() const { return m_functor; }
+
+
+ private:
+ const UnaryOp m_functor;
+ TensorEvaluator<ArgType, Device> m_argImpl;
+};
+
+
+// -------------------- CwiseBinaryOp --------------------
+
+template<typename BinaryOp, typename LeftArgType, typename RightArgType, typename Device>
+struct TensorEvaluator<const TensorCwiseBinaryOp<BinaryOp, LeftArgType, RightArgType>, Device>
+{
+ typedef TensorCwiseBinaryOp<BinaryOp, LeftArgType, RightArgType> XprType;
+
+ enum {
+ IsAligned = TensorEvaluator<LeftArgType, Device>::IsAligned & TensorEvaluator<RightArgType, Device>::IsAligned,
+ PacketAccess = TensorEvaluator<LeftArgType, Device>::PacketAccess & TensorEvaluator<RightArgType, Device>::PacketAccess &
+ internal::functor_traits<BinaryOp>::PacketAccess,
+ Layout = TensorEvaluator<LeftArgType, Device>::Layout,
+ CoordAccess = false, // to be implemented
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device)
+ : m_functor(op.functor()),
+ m_leftImpl(op.lhsExpression(), device),
+ m_rightImpl(op.rhsExpression(), device)
+ {
+ EIGEN_STATIC_ASSERT((static_cast<int>(TensorEvaluator<LeftArgType, Device>::Layout) == static_cast<int>(TensorEvaluator<RightArgType, Device>::Layout) || internal::traits<XprType>::NumDimensions <= 1), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ eigen_assert(dimensions_match(m_leftImpl.dimensions(), m_rightImpl.dimensions()));
+ }
+
+ typedef typename XprType::Index Index;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename internal::traits<XprType>::Scalar CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+ typedef typename TensorEvaluator<LeftArgType, Device>::Dimensions Dimensions;
+
+ EIGEN_DEVICE_FUNC const Dimensions& dimensions() const
+ {
+ // TODO: use right impl instead if right impl dimensions are known at compile time.
+ return m_leftImpl.dimensions();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType*) {
+ m_leftImpl.evalSubExprsIfNeeded(NULL);
+ m_rightImpl.evalSubExprsIfNeeded(NULL);
+ return true;
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
+ m_leftImpl.cleanup();
+ m_rightImpl.cleanup();
+ }
+
+ EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const
+ {
+ return m_functor(m_leftImpl.coeff(index), m_rightImpl.coeff(index));
+ }
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
+ {
+ return m_functor.packetOp(m_leftImpl.template packet<LoadMode>(index), m_rightImpl.template packet<LoadMode>(index));
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
+ costPerCoeff(bool vectorized) const {
+ const double functor_cost = internal::functor_traits<BinaryOp>::Cost;
+ return m_leftImpl.costPerCoeff(vectorized) +
+ m_rightImpl.costPerCoeff(vectorized) +
+ TensorOpCost(0, 0, functor_cost, vectorized, PacketSize);
+ }
+
+ EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return NULL; }
+ /// required by sycl in order to extract the accessor
+ const TensorEvaluator<LeftArgType, Device>& left_impl() const { return m_leftImpl; }
+ /// required by sycl in order to extract the accessor
+ const TensorEvaluator<RightArgType, Device>& right_impl() const { return m_rightImpl; }
+ /// required by sycl in order to extract the accessor
+ BinaryOp functor() const { return m_functor; }
+
+ private:
+ const BinaryOp m_functor;
+ TensorEvaluator<LeftArgType, Device> m_leftImpl;
+ TensorEvaluator<RightArgType, Device> m_rightImpl;
+};
+
+// -------------------- CwiseTernaryOp --------------------
+
+template<typename TernaryOp, typename Arg1Type, typename Arg2Type, typename Arg3Type, typename Device>
+struct TensorEvaluator<const TensorCwiseTernaryOp<TernaryOp, Arg1Type, Arg2Type, Arg3Type>, Device>
+{
+ typedef TensorCwiseTernaryOp<TernaryOp, Arg1Type, Arg2Type, Arg3Type> XprType;
+
+ enum {
+ IsAligned = TensorEvaluator<Arg1Type, Device>::IsAligned & TensorEvaluator<Arg2Type, Device>::IsAligned & TensorEvaluator<Arg3Type, Device>::IsAligned,
+ PacketAccess = TensorEvaluator<Arg1Type, Device>::PacketAccess & TensorEvaluator<Arg2Type, Device>::PacketAccess & TensorEvaluator<Arg3Type, Device>::PacketAccess &
+ internal::functor_traits<TernaryOp>::PacketAccess,
+ Layout = TensorEvaluator<Arg1Type, Device>::Layout,
+ CoordAccess = false, // to be implemented
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device)
+ : m_functor(op.functor()),
+ m_arg1Impl(op.arg1Expression(), device),
+ m_arg2Impl(op.arg2Expression(), device),
+ m_arg3Impl(op.arg3Expression(), device)
+ {
+ EIGEN_STATIC_ASSERT((static_cast<int>(TensorEvaluator<Arg1Type, Device>::Layout) == static_cast<int>(TensorEvaluator<Arg3Type, Device>::Layout) || internal::traits<XprType>::NumDimensions <= 1), YOU_MADE_A_PROGRAMMING_MISTAKE);
+
+ EIGEN_STATIC_ASSERT((internal::is_same<typename internal::traits<Arg1Type>::StorageKind,
+ typename internal::traits<Arg2Type>::StorageKind>::value),
+ STORAGE_KIND_MUST_MATCH)
+ EIGEN_STATIC_ASSERT((internal::is_same<typename internal::traits<Arg1Type>::StorageKind,
+ typename internal::traits<Arg3Type>::StorageKind>::value),
+ STORAGE_KIND_MUST_MATCH)
+ EIGEN_STATIC_ASSERT((internal::is_same<typename internal::traits<Arg1Type>::Index,
+ typename internal::traits<Arg2Type>::Index>::value),
+ STORAGE_INDEX_MUST_MATCH)
+ EIGEN_STATIC_ASSERT((internal::is_same<typename internal::traits<Arg1Type>::Index,
+ typename internal::traits<Arg3Type>::Index>::value),
+ STORAGE_INDEX_MUST_MATCH)
+
+ eigen_assert(dimensions_match(m_arg1Impl.dimensions(), m_arg2Impl.dimensions()) && dimensions_match(m_arg1Impl.dimensions(), m_arg3Impl.dimensions()));
+ }
+
+ typedef typename XprType::Index Index;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename internal::traits<XprType>::Scalar CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+ typedef typename TensorEvaluator<Arg1Type, Device>::Dimensions Dimensions;
+
+ EIGEN_DEVICE_FUNC const Dimensions& dimensions() const
+ {
+ // TODO: use arg2 or arg3 dimensions if they are known at compile time.
+ return m_arg1Impl.dimensions();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType*) {
+ m_arg1Impl.evalSubExprsIfNeeded(NULL);
+ m_arg2Impl.evalSubExprsIfNeeded(NULL);
+ m_arg3Impl.evalSubExprsIfNeeded(NULL);
+ return true;
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
+ m_arg1Impl.cleanup();
+ m_arg2Impl.cleanup();
+ m_arg3Impl.cleanup();
+ }
+
+ EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const
+ {
+ return m_functor(m_arg1Impl.coeff(index), m_arg2Impl.coeff(index), m_arg3Impl.coeff(index));
+ }
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
+ {
+ return m_functor.packetOp(m_arg1Impl.template packet<LoadMode>(index),
+ m_arg2Impl.template packet<LoadMode>(index),
+ m_arg3Impl.template packet<LoadMode>(index));
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
+ costPerCoeff(bool vectorized) const {
+ const double functor_cost = internal::functor_traits<TernaryOp>::Cost;
+ return m_arg1Impl.costPerCoeff(vectorized) +
+ m_arg2Impl.costPerCoeff(vectorized) +
+ m_arg3Impl.costPerCoeff(vectorized) +
+ TensorOpCost(0, 0, functor_cost, vectorized, PacketSize);
+ }
+
+ EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return NULL; }
+
+ /// required by sycl in order to extract the accessor
+ const TensorEvaluator<Arg1Type, Device> & arg1Impl() const { return m_arg1Impl; }
+ /// required by sycl in order to extract the accessor
+ const TensorEvaluator<Arg2Type, Device>& arg2Impl() const { return m_arg2Impl; }
+ /// required by sycl in order to extract the accessor
+ const TensorEvaluator<Arg3Type, Device>& arg3Impl() const { return m_arg3Impl; }
+
+ private:
+ const TernaryOp m_functor;
+ TensorEvaluator<Arg1Type, Device> m_arg1Impl;
+ TensorEvaluator<Arg2Type, Device> m_arg2Impl;
+ TensorEvaluator<Arg3Type, Device> m_arg3Impl;
+};
+
+
+// -------------------- SelectOp --------------------
+
+template<typename IfArgType, typename ThenArgType, typename ElseArgType, typename Device>
+struct TensorEvaluator<const TensorSelectOp<IfArgType, ThenArgType, ElseArgType>, Device>
+{
+ typedef TensorSelectOp<IfArgType, ThenArgType, ElseArgType> XprType;
+ typedef typename XprType::Scalar Scalar;
+
+ enum {
+ IsAligned = TensorEvaluator<ThenArgType, Device>::IsAligned & TensorEvaluator<ElseArgType, Device>::IsAligned,
+ PacketAccess = TensorEvaluator<ThenArgType, Device>::PacketAccess & TensorEvaluator<ElseArgType, Device>::PacketAccess &
+ internal::packet_traits<Scalar>::HasBlend,
+ Layout = TensorEvaluator<IfArgType, Device>::Layout,
+ CoordAccess = false, // to be implemented
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device)
+ : m_condImpl(op.ifExpression(), device),
+ m_thenImpl(op.thenExpression(), device),
+ m_elseImpl(op.elseExpression(), device)
+ {
+ EIGEN_STATIC_ASSERT((static_cast<int>(TensorEvaluator<IfArgType, Device>::Layout) == static_cast<int>(TensorEvaluator<ThenArgType, Device>::Layout)), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((static_cast<int>(TensorEvaluator<IfArgType, Device>::Layout) == static_cast<int>(TensorEvaluator<ElseArgType, Device>::Layout)), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ eigen_assert(dimensions_match(m_condImpl.dimensions(), m_thenImpl.dimensions()));
+ eigen_assert(dimensions_match(m_thenImpl.dimensions(), m_elseImpl.dimensions()));
+ }
+
+ typedef typename XprType::Index Index;
+ typedef typename internal::traits<XprType>::Scalar CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+ typedef typename TensorEvaluator<IfArgType, Device>::Dimensions Dimensions;
+
+ EIGEN_DEVICE_FUNC const Dimensions& dimensions() const
+ {
+ // TODO: use then or else impl instead if they happen to be known at compile time.
+ return m_condImpl.dimensions();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType*) {
+ m_condImpl.evalSubExprsIfNeeded(NULL);
+ m_thenImpl.evalSubExprsIfNeeded(NULL);
+ m_elseImpl.evalSubExprsIfNeeded(NULL);
+ return true;
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
+ m_condImpl.cleanup();
+ m_thenImpl.cleanup();
+ m_elseImpl.cleanup();
+ }
+
+ EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const
+ {
+ return m_condImpl.coeff(index) ? m_thenImpl.coeff(index) : m_elseImpl.coeff(index);
+ }
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const
+ {
+ internal::Selector<PacketSize> select;
+ for (Index i = 0; i < PacketSize; ++i) {
+ select.select[i] = m_condImpl.coeff(index+i);
+ }
+ return internal::pblend(select,
+ m_thenImpl.template packet<LoadMode>(index),
+ m_elseImpl.template packet<LoadMode>(index));
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
+ costPerCoeff(bool vectorized) const {
+ return m_condImpl.costPerCoeff(vectorized) +
+ m_thenImpl.costPerCoeff(vectorized)
+ .cwiseMax(m_elseImpl.costPerCoeff(vectorized));
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType* data() const { return NULL; }
+ /// required by sycl in order to extract the accessor
+ const TensorEvaluator<IfArgType, Device> & cond_impl() const { return m_condImpl; }
+ /// required by sycl in order to extract the accessor
+ const TensorEvaluator<ThenArgType, Device>& then_impl() const { return m_thenImpl; }
+ /// required by sycl in order to extract the accessor
+ const TensorEvaluator<ElseArgType, Device>& else_impl() const { return m_elseImpl; }
+
+ private:
+ TensorEvaluator<IfArgType, Device> m_condImpl;
+ TensorEvaluator<ThenArgType, Device> m_thenImpl;
+ TensorEvaluator<ElseArgType, Device> m_elseImpl;
+};
+
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_EVALUATOR_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h
new file mode 100644
index 000000000..f01d77c0a
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h
@@ -0,0 +1,288 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_EXECUTOR_H
+#define EIGEN_CXX11_TENSOR_TENSOR_EXECUTOR_H
+
+namespace Eigen {
+
+/** \class TensorExecutor
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief The tensor executor class.
+ *
+ * This class is responsible for launch the evaluation of the expression on
+ * the specified computing device.
+ */
+namespace internal {
+
+// Default strategy: the expression is evaluated with a single cpu thread.
+template<typename Expression, typename Device, bool Vectorizable>
+class TensorExecutor
+{
+ public:
+ typedef typename Expression::Index Index;
+ EIGEN_DEVICE_FUNC
+ static inline void run(const Expression& expr, const Device& device = Device())
+ {
+ TensorEvaluator<Expression, Device> evaluator(expr, device);
+ const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL);
+ if (needs_assign)
+ {
+ const Index size = array_prod(evaluator.dimensions());
+ for (Index i = 0; i < size; ++i) {
+ evaluator.evalScalar(i);
+ }
+ }
+ evaluator.cleanup();
+ }
+};
+
+
+template<typename Expression>
+class TensorExecutor<Expression, DefaultDevice, true>
+{
+ public:
+ typedef typename Expression::Index Index;
+ EIGEN_DEVICE_FUNC
+ static inline void run(const Expression& expr, const DefaultDevice& device = DefaultDevice())
+ {
+ TensorEvaluator<Expression, DefaultDevice> evaluator(expr, device);
+ const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL);
+ if (needs_assign)
+ {
+ const Index size = array_prod(evaluator.dimensions());
+ const int PacketSize = unpacket_traits<typename TensorEvaluator<Expression, DefaultDevice>::PacketReturnType>::size;
+ // Give the compiler a strong hint to unroll the loop. But don't insist
+ // on unrolling, because if the function is expensive the compiler should not
+ // unroll the loop at the expense of inlining.
+ const Index UnrolledSize = (size / (4 * PacketSize)) * 4 * PacketSize;
+ for (Index i = 0; i < UnrolledSize; i += 4*PacketSize) {
+ for (Index j = 0; j < 4; j++) {
+ evaluator.evalPacket(i + j * PacketSize);
+ }
+ }
+ const Index VectorizedSize = (size / PacketSize) * PacketSize;
+ for (Index i = UnrolledSize; i < VectorizedSize; i += PacketSize) {
+ evaluator.evalPacket(i);
+ }
+ for (Index i = VectorizedSize; i < size; ++i) {
+ evaluator.evalScalar(i);
+ }
+ }
+ evaluator.cleanup();
+ }
+};
+
+
+
+// Multicore strategy: the index space is partitioned and each partition is executed on a single core
+#ifdef EIGEN_USE_THREADS
+template <typename Evaluator, typename Index, bool Vectorizable>
+struct EvalRange {
+ static void run(Evaluator* evaluator_in, const Index first, const Index last) {
+ Evaluator evaluator = *evaluator_in;
+ eigen_assert(last >= first);
+ for (Index i = first; i < last; ++i) {
+ evaluator.evalScalar(i);
+ }
+ }
+
+ static Index alignBlockSize(Index size) {
+ return size;
+ }
+};
+
+template <typename Evaluator, typename Index>
+struct EvalRange<Evaluator, Index, true> {
+ static const int PacketSize = unpacket_traits<typename Evaluator::PacketReturnType>::size;
+
+ static void run(Evaluator* evaluator_in, const Index first, const Index last) {
+ Evaluator evaluator = *evaluator_in;
+ eigen_assert(last >= first);
+ Index i = first;
+ if (last - first >= PacketSize) {
+ eigen_assert(first % PacketSize == 0);
+ Index last_chunk_offset = last - 4 * PacketSize;
+ // Give the compiler a strong hint to unroll the loop. But don't insist
+ // on unrolling, because if the function is expensive the compiler should not
+ // unroll the loop at the expense of inlining.
+ for (; i <= last_chunk_offset; i += 4*PacketSize) {
+ for (Index j = 0; j < 4; j++) {
+ evaluator.evalPacket(i + j * PacketSize);
+ }
+ }
+ last_chunk_offset = last - PacketSize;
+ for (; i <= last_chunk_offset; i += PacketSize) {
+ evaluator.evalPacket(i);
+ }
+ }
+ for (; i < last; ++i) {
+ evaluator.evalScalar(i);
+ }
+ }
+
+ static Index alignBlockSize(Index size) {
+ // Align block size to packet size and account for unrolling in run above.
+ if (size >= 16 * PacketSize) {
+ return (size + 4 * PacketSize - 1) & ~(4 * PacketSize - 1);
+ }
+ // Aligning to 4 * PacketSize would increase block size by more than 25%.
+ return (size + PacketSize - 1) & ~(PacketSize - 1);
+ }
+};
+
+template <typename Expression, bool Vectorizable>
+class TensorExecutor<Expression, ThreadPoolDevice, Vectorizable> {
+ public:
+ typedef typename Expression::Index Index;
+ static inline void run(const Expression& expr, const ThreadPoolDevice& device)
+ {
+ typedef TensorEvaluator<Expression, ThreadPoolDevice> Evaluator;
+ Evaluator evaluator(expr, device);
+ const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL);
+ if (needs_assign)
+ {
+ const Index size = array_prod(evaluator.dimensions());
+#if !defined(EIGEN_USE_SIMPLE_THREAD_POOL)
+ device.parallelFor(size, evaluator.costPerCoeff(Vectorizable),
+ EvalRange<Evaluator, Index, Vectorizable>::alignBlockSize,
+ [&evaluator](Index first, Index last) {
+ EvalRange<Evaluator, Index, Vectorizable>::run(&evaluator, first, last);
+ });
+#else
+ size_t num_threads = device.numThreads();
+ if (num_threads > 1) {
+ num_threads = TensorCostModel<ThreadPoolDevice>::numThreads(
+ size, evaluator.costPerCoeff(Vectorizable), num_threads);
+ }
+ if (num_threads == 1) {
+ EvalRange<Evaluator, Index, Vectorizable>::run(&evaluator, 0, size);
+ } else {
+ const Index PacketSize = Vectorizable ? unpacket_traits<typename Evaluator::PacketReturnType>::size : 1;
+ Index blocksz = std::ceil<Index>(static_cast<float>(size)/num_threads) + PacketSize - 1;
+ const Index blocksize = numext::maxi<Index>(PacketSize, (blocksz - (blocksz % PacketSize)));
+ const Index numblocks = size / blocksize;
+
+ Barrier barrier(numblocks);
+ for (int i = 0; i < numblocks; ++i) {
+ device.enqueue_with_barrier(
+ &barrier, &EvalRange<Evaluator, Index, Vectorizable>::run,
+ &evaluator, i * blocksize, (i + 1) * blocksize);
+ }
+ if (numblocks * blocksize < size) {
+ EvalRange<Evaluator, Index, Vectorizable>::run(
+ &evaluator, numblocks * blocksize, size);
+ }
+ barrier.Wait();
+ }
+#endif // defined(!EIGEN_USE_SIMPLE_THREAD_POOL)
+ }
+ evaluator.cleanup();
+ }
+};
+#endif // EIGEN_USE_THREADS
+
+
+// GPU: the evaluation of the expression is offloaded to a GPU.
+#if defined(EIGEN_USE_GPU)
+
+template <typename Expression, bool Vectorizable>
+class TensorExecutor<Expression, GpuDevice, Vectorizable> {
+ public:
+ typedef typename Expression::Index Index;
+ static void run(const Expression& expr, const GpuDevice& device);
+};
+
+
+#if defined(__CUDACC__)
+template <typename Evaluator, typename Index, bool Vectorizable>
+struct EigenMetaKernelEval {
+ static __device__ EIGEN_ALWAYS_INLINE
+ void run(Evaluator& eval, Index first, Index last, Index step_size) {
+ for (Index i = first; i < last; i += step_size) {
+ eval.evalScalar(i);
+ }
+ }
+};
+
+template <typename Evaluator, typename Index>
+struct EigenMetaKernelEval<Evaluator, Index, true> {
+ static __device__ EIGEN_ALWAYS_INLINE
+ void run(Evaluator& eval, Index first, Index last, Index step_size) {
+ const Index PacketSize = unpacket_traits<typename Evaluator::PacketReturnType>::size;
+ const Index vectorized_size = (last / PacketSize) * PacketSize;
+ const Index vectorized_step_size = step_size * PacketSize;
+
+ // Use the vector path
+ for (Index i = first * PacketSize; i < vectorized_size;
+ i += vectorized_step_size) {
+ eval.evalPacket(i);
+ }
+ for (Index i = vectorized_size + first; i < last; i += step_size) {
+ eval.evalScalar(i);
+ }
+ }
+};
+
+template <typename Evaluator, typename Index>
+__global__ void
+__launch_bounds__(1024)
+EigenMetaKernel(Evaluator eval, Index size) {
+
+ const Index first_index = blockIdx.x * blockDim.x + threadIdx.x;
+ const Index step_size = blockDim.x * gridDim.x;
+
+ const bool vectorizable = Evaluator::PacketAccess & Evaluator::IsAligned;
+ EigenMetaKernelEval<Evaluator, Index, vectorizable>::run(eval, first_index, size, step_size);
+}
+
+/*static*/
+template <typename Expression, bool Vectorizable>
+inline void TensorExecutor<Expression, GpuDevice, Vectorizable>::run(
+ const Expression& expr, const GpuDevice& device) {
+ TensorEvaluator<Expression, GpuDevice> evaluator(expr, device);
+ const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL);
+ if (needs_assign) {
+ const int block_size = device.maxCudaThreadsPerBlock();
+ const int max_blocks = device.getNumCudaMultiProcessors() *
+ device.maxCudaThreadsPerMultiProcessor() / block_size;
+ const Index size = array_prod(evaluator.dimensions());
+ // Create a least one block to ensure we won't crash when tensorflow calls with tensors of size 0.
+ const int num_blocks = numext::maxi<int>(numext::mini<int>(max_blocks, divup<int>(size, block_size)), 1);
+
+ LAUNCH_CUDA_KERNEL(
+ (EigenMetaKernel<TensorEvaluator<Expression, GpuDevice>, Index>),
+ num_blocks, block_size, 0, device, evaluator, size);
+ }
+ evaluator.cleanup();
+}
+
+#endif // __CUDACC__
+#endif // EIGEN_USE_GPU
+
+// SYCL Executor policy
+#ifdef EIGEN_USE_SYCL
+
+template <typename Expression, bool Vectorizable>
+class TensorExecutor<Expression, SyclDevice, Vectorizable> {
+public:
+ static inline void run(const Expression &expr, const SyclDevice &device) {
+ // call TensorSYCL module
+ TensorSycl::run(expr, device);
+ }
+};
+
+#endif
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_EXECUTOR_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h b/unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h
new file mode 100644
index 000000000..85dfc7a69
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h
@@ -0,0 +1,371 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_EXPR_H
+#define EIGEN_CXX11_TENSOR_TENSOR_EXPR_H
+
+namespace Eigen {
+
+/** \class TensorExpr
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Tensor expression classes.
+ *
+ * The TensorCwiseNullaryOp class applies a nullary operators to an expression.
+ * This is typically used to generate constants.
+ *
+ * The TensorCwiseUnaryOp class represents an expression where a unary operator
+ * (e.g. cwiseSqrt) is applied to an expression.
+ *
+ * The TensorCwiseBinaryOp class represents an expression where a binary
+ * operator (e.g. addition) is applied to a lhs and a rhs expression.
+ *
+ */
+namespace internal {
+template<typename NullaryOp, typename XprType>
+struct traits<TensorCwiseNullaryOp<NullaryOp, XprType> >
+ : traits<XprType>
+{
+ typedef traits<XprType> XprTraits;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::Nested XprTypeNested;
+ typedef typename remove_reference<XprTypeNested>::type _XprTypeNested;
+ static const int NumDimensions = XprTraits::NumDimensions;
+ static const int Layout = XprTraits::Layout;
+
+ enum {
+ Flags = 0
+ };
+};
+
+} // end namespace internal
+
+
+
+template<typename NullaryOp, typename XprType>
+class TensorCwiseNullaryOp : public TensorBase<TensorCwiseNullaryOp<NullaryOp, XprType>, ReadOnlyAccessors>
+{
+ public:
+ typedef typename Eigen::internal::traits<TensorCwiseNullaryOp>::Scalar Scalar;
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef TensorCwiseNullaryOp<NullaryOp, XprType> Nested;
+ typedef typename Eigen::internal::traits<TensorCwiseNullaryOp>::StorageKind StorageKind;
+ typedef typename Eigen::internal::traits<TensorCwiseNullaryOp>::Index Index;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorCwiseNullaryOp(const XprType& xpr, const NullaryOp& func = NullaryOp())
+ : m_xpr(xpr), m_functor(func) {}
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename XprType::Nested>::type&
+ nestedExpression() const { return m_xpr; }
+
+ EIGEN_DEVICE_FUNC
+ const NullaryOp& functor() const { return m_functor; }
+
+ protected:
+ typename XprType::Nested m_xpr;
+ const NullaryOp m_functor;
+};
+
+
+
+namespace internal {
+template<typename UnaryOp, typename XprType>
+struct traits<TensorCwiseUnaryOp<UnaryOp, XprType> >
+ : traits<XprType>
+{
+ // TODO(phli): Add InputScalar, InputPacket. Check references to
+ // current Scalar/Packet to see if the intent is Input or Output.
+ typedef typename result_of<UnaryOp(typename XprType::Scalar)>::type Scalar;
+ typedef traits<XprType> XprTraits;
+ typedef typename XprType::Nested XprTypeNested;
+ typedef typename remove_reference<XprTypeNested>::type _XprTypeNested;
+ static const int NumDimensions = XprTraits::NumDimensions;
+ static const int Layout = XprTraits::Layout;
+};
+
+template<typename UnaryOp, typename XprType>
+struct eval<TensorCwiseUnaryOp<UnaryOp, XprType>, Eigen::Dense>
+{
+ typedef const TensorCwiseUnaryOp<UnaryOp, XprType>& type;
+};
+
+template<typename UnaryOp, typename XprType>
+struct nested<TensorCwiseUnaryOp<UnaryOp, XprType>, 1, typename eval<TensorCwiseUnaryOp<UnaryOp, XprType> >::type>
+{
+ typedef TensorCwiseUnaryOp<UnaryOp, XprType> type;
+};
+
+} // end namespace internal
+
+
+
+template<typename UnaryOp, typename XprType>
+class TensorCwiseUnaryOp : public TensorBase<TensorCwiseUnaryOp<UnaryOp, XprType>, ReadOnlyAccessors>
+{
+ public:
+ // TODO(phli): Add InputScalar, InputPacket. Check references to
+ // current Scalar/Packet to see if the intent is Input or Output.
+ typedef typename Eigen::internal::traits<TensorCwiseUnaryOp>::Scalar Scalar;
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
+ typedef Scalar CoeffReturnType;
+ typedef typename Eigen::internal::nested<TensorCwiseUnaryOp>::type Nested;
+ typedef typename Eigen::internal::traits<TensorCwiseUnaryOp>::StorageKind StorageKind;
+ typedef typename Eigen::internal::traits<TensorCwiseUnaryOp>::Index Index;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorCwiseUnaryOp(const XprType& xpr, const UnaryOp& func = UnaryOp())
+ : m_xpr(xpr), m_functor(func) {}
+
+ EIGEN_DEVICE_FUNC
+ const UnaryOp& functor() const { return m_functor; }
+
+ /** \returns the nested expression */
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename XprType::Nested>::type&
+ nestedExpression() const { return m_xpr; }
+
+ protected:
+ typename XprType::Nested m_xpr;
+ const UnaryOp m_functor;
+};
+
+
+namespace internal {
+template<typename BinaryOp, typename LhsXprType, typename RhsXprType>
+struct traits<TensorCwiseBinaryOp<BinaryOp, LhsXprType, RhsXprType> >
+{
+ // Type promotion to handle the case where the types of the lhs and the rhs
+ // are different.
+ // TODO(phli): Add Lhs/RhsScalar, Lhs/RhsPacket. Check references to
+ // current Scalar/Packet to see if the intent is Inputs or Output.
+ typedef typename result_of<
+ BinaryOp(typename LhsXprType::Scalar,
+ typename RhsXprType::Scalar)>::type Scalar;
+ typedef traits<LhsXprType> XprTraits;
+ typedef typename promote_storage_type<
+ typename traits<LhsXprType>::StorageKind,
+ typename traits<RhsXprType>::StorageKind>::ret StorageKind;
+ typedef typename promote_index_type<
+ typename traits<LhsXprType>::Index,
+ typename traits<RhsXprType>::Index>::type Index;
+ typedef typename LhsXprType::Nested LhsNested;
+ typedef typename RhsXprType::Nested RhsNested;
+ typedef typename remove_reference<LhsNested>::type _LhsNested;
+ typedef typename remove_reference<RhsNested>::type _RhsNested;
+ static const int NumDimensions = XprTraits::NumDimensions;
+ static const int Layout = XprTraits::Layout;
+
+ enum {
+ Flags = 0
+ };
+};
+
+template<typename BinaryOp, typename LhsXprType, typename RhsXprType>
+struct eval<TensorCwiseBinaryOp<BinaryOp, LhsXprType, RhsXprType>, Eigen::Dense>
+{
+ typedef const TensorCwiseBinaryOp<BinaryOp, LhsXprType, RhsXprType>& type;
+};
+
+template<typename BinaryOp, typename LhsXprType, typename RhsXprType>
+struct nested<TensorCwiseBinaryOp<BinaryOp, LhsXprType, RhsXprType>, 1, typename eval<TensorCwiseBinaryOp<BinaryOp, LhsXprType, RhsXprType> >::type>
+{
+ typedef TensorCwiseBinaryOp<BinaryOp, LhsXprType, RhsXprType> type;
+};
+
+} // end namespace internal
+
+
+
+template<typename BinaryOp, typename LhsXprType, typename RhsXprType>
+class TensorCwiseBinaryOp : public TensorBase<TensorCwiseBinaryOp<BinaryOp, LhsXprType, RhsXprType>, ReadOnlyAccessors>
+{
+ public:
+ // TODO(phli): Add Lhs/RhsScalar, Lhs/RhsPacket. Check references to
+ // current Scalar/Packet to see if the intent is Inputs or Output.
+ typedef typename Eigen::internal::traits<TensorCwiseBinaryOp>::Scalar Scalar;
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
+ typedef Scalar CoeffReturnType;
+ typedef typename Eigen::internal::nested<TensorCwiseBinaryOp>::type Nested;
+ typedef typename Eigen::internal::traits<TensorCwiseBinaryOp>::StorageKind StorageKind;
+ typedef typename Eigen::internal::traits<TensorCwiseBinaryOp>::Index Index;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorCwiseBinaryOp(const LhsXprType& lhs, const RhsXprType& rhs, const BinaryOp& func = BinaryOp())
+ : m_lhs_xpr(lhs), m_rhs_xpr(rhs), m_functor(func) {}
+
+ EIGEN_DEVICE_FUNC
+ const BinaryOp& functor() const { return m_functor; }
+
+ /** \returns the nested expressions */
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename LhsXprType::Nested>::type&
+ lhsExpression() const { return m_lhs_xpr; }
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename RhsXprType::Nested>::type&
+ rhsExpression() const { return m_rhs_xpr; }
+
+ protected:
+ typename LhsXprType::Nested m_lhs_xpr;
+ typename RhsXprType::Nested m_rhs_xpr;
+ const BinaryOp m_functor;
+};
+
+
+namespace internal {
+template<typename TernaryOp, typename Arg1XprType, typename Arg2XprType, typename Arg3XprType>
+struct traits<TensorCwiseTernaryOp<TernaryOp, Arg1XprType, Arg2XprType, Arg3XprType> >
+{
+ // Type promotion to handle the case where the types of the args are different.
+ typedef typename result_of<
+ TernaryOp(typename Arg1XprType::Scalar,
+ typename Arg2XprType::Scalar,
+ typename Arg3XprType::Scalar)>::type Scalar;
+ typedef traits<Arg1XprType> XprTraits;
+ typedef typename traits<Arg1XprType>::StorageKind StorageKind;
+ typedef typename traits<Arg1XprType>::Index Index;
+ typedef typename Arg1XprType::Nested Arg1Nested;
+ typedef typename Arg2XprType::Nested Arg2Nested;
+ typedef typename Arg3XprType::Nested Arg3Nested;
+ typedef typename remove_reference<Arg1Nested>::type _Arg1Nested;
+ typedef typename remove_reference<Arg2Nested>::type _Arg2Nested;
+ typedef typename remove_reference<Arg3Nested>::type _Arg3Nested;
+ static const int NumDimensions = XprTraits::NumDimensions;
+ static const int Layout = XprTraits::Layout;
+
+ enum {
+ Flags = 0
+ };
+};
+
+template<typename TernaryOp, typename Arg1XprType, typename Arg2XprType, typename Arg3XprType>
+struct eval<TensorCwiseTernaryOp<TernaryOp, Arg1XprType, Arg2XprType, Arg3XprType>, Eigen::Dense>
+{
+ typedef const TensorCwiseTernaryOp<TernaryOp, Arg1XprType, Arg2XprType, Arg3XprType>& type;
+};
+
+template<typename TernaryOp, typename Arg1XprType, typename Arg2XprType, typename Arg3XprType>
+struct nested<TensorCwiseTernaryOp<TernaryOp, Arg1XprType, Arg2XprType, Arg3XprType>, 1, typename eval<TensorCwiseTernaryOp<TernaryOp, Arg1XprType, Arg2XprType, Arg3XprType> >::type>
+{
+ typedef TensorCwiseTernaryOp<TernaryOp, Arg1XprType, Arg2XprType, Arg3XprType> type;
+};
+
+} // end namespace internal
+
+
+
+template<typename TernaryOp, typename Arg1XprType, typename Arg2XprType, typename Arg3XprType>
+class TensorCwiseTernaryOp : public TensorBase<TensorCwiseTernaryOp<TernaryOp, Arg1XprType, Arg2XprType, Arg3XprType>, ReadOnlyAccessors>
+{
+ public:
+ typedef typename Eigen::internal::traits<TensorCwiseTernaryOp>::Scalar Scalar;
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
+ typedef Scalar CoeffReturnType;
+ typedef typename Eigen::internal::nested<TensorCwiseTernaryOp>::type Nested;
+ typedef typename Eigen::internal::traits<TensorCwiseTernaryOp>::StorageKind StorageKind;
+ typedef typename Eigen::internal::traits<TensorCwiseTernaryOp>::Index Index;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorCwiseTernaryOp(const Arg1XprType& arg1, const Arg2XprType& arg2, const Arg3XprType& arg3, const TernaryOp& func = TernaryOp())
+ : m_arg1_xpr(arg1), m_arg2_xpr(arg2), m_arg3_xpr(arg3), m_functor(func) {}
+
+ EIGEN_DEVICE_FUNC
+ const TernaryOp& functor() const { return m_functor; }
+
+ /** \returns the nested expressions */
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename Arg1XprType::Nested>::type&
+ arg1Expression() const { return m_arg1_xpr; }
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename Arg2XprType::Nested>::type&
+ arg2Expression() const { return m_arg2_xpr; }
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename Arg3XprType::Nested>::type&
+ arg3Expression() const { return m_arg3_xpr; }
+
+ protected:
+ typename Arg1XprType::Nested m_arg1_xpr;
+ typename Arg2XprType::Nested m_arg2_xpr;
+ typename Arg3XprType::Nested m_arg3_xpr;
+ const TernaryOp m_functor;
+};
+
+
+namespace internal {
+template<typename IfXprType, typename ThenXprType, typename ElseXprType>
+struct traits<TensorSelectOp<IfXprType, ThenXprType, ElseXprType> >
+ : traits<ThenXprType>
+{
+ typedef typename traits<ThenXprType>::Scalar Scalar;
+ typedef traits<ThenXprType> XprTraits;
+ typedef typename promote_storage_type<typename traits<ThenXprType>::StorageKind,
+ typename traits<ElseXprType>::StorageKind>::ret StorageKind;
+ typedef typename promote_index_type<typename traits<ElseXprType>::Index,
+ typename traits<ThenXprType>::Index>::type Index;
+ typedef typename IfXprType::Nested IfNested;
+ typedef typename ThenXprType::Nested ThenNested;
+ typedef typename ElseXprType::Nested ElseNested;
+ static const int NumDimensions = XprTraits::NumDimensions;
+ static const int Layout = XprTraits::Layout;
+};
+
+template<typename IfXprType, typename ThenXprType, typename ElseXprType>
+struct eval<TensorSelectOp<IfXprType, ThenXprType, ElseXprType>, Eigen::Dense>
+{
+ typedef const TensorSelectOp<IfXprType, ThenXprType, ElseXprType>& type;
+};
+
+template<typename IfXprType, typename ThenXprType, typename ElseXprType>
+struct nested<TensorSelectOp<IfXprType, ThenXprType, ElseXprType>, 1, typename eval<TensorSelectOp<IfXprType, ThenXprType, ElseXprType> >::type>
+{
+ typedef TensorSelectOp<IfXprType, ThenXprType, ElseXprType> type;
+};
+
+} // end namespace internal
+
+
+template<typename IfXprType, typename ThenXprType, typename ElseXprType>
+class TensorSelectOp : public TensorBase<TensorSelectOp<IfXprType, ThenXprType, ElseXprType>, ReadOnlyAccessors>
+{
+ public:
+ typedef typename Eigen::internal::traits<TensorSelectOp>::Scalar Scalar;
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
+ typedef typename internal::promote_storage_type<typename ThenXprType::CoeffReturnType,
+ typename ElseXprType::CoeffReturnType>::ret CoeffReturnType;
+ typedef typename Eigen::internal::nested<TensorSelectOp>::type Nested;
+ typedef typename Eigen::internal::traits<TensorSelectOp>::StorageKind StorageKind;
+ typedef typename Eigen::internal::traits<TensorSelectOp>::Index Index;
+
+ EIGEN_DEVICE_FUNC
+ TensorSelectOp(const IfXprType& a_condition,
+ const ThenXprType& a_then,
+ const ElseXprType& a_else)
+ : m_condition(a_condition), m_then(a_then), m_else(a_else)
+ { }
+
+ EIGEN_DEVICE_FUNC
+ const IfXprType& ifExpression() const { return m_condition; }
+
+ EIGEN_DEVICE_FUNC
+ const ThenXprType& thenExpression() const { return m_then; }
+
+ EIGEN_DEVICE_FUNC
+ const ElseXprType& elseExpression() const { return m_else; }
+
+ protected:
+ typename IfXprType::Nested m_condition;
+ typename ThenXprType::Nested m_then;
+ typename ElseXprType::Nested m_else;
+};
+
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_EXPR_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorFFT.h b/unsupported/Eigen/CXX11/src/Tensor/TensorFFT.h
new file mode 100644
index 000000000..08eb5595a
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorFFT.h
@@ -0,0 +1,651 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2015 Jianwei Cui <thucjw@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_FFT_H
+#define EIGEN_CXX11_TENSOR_TENSOR_FFT_H
+
+// This code requires the ability to initialize arrays of constant
+// values directly inside a class.
+#if __cplusplus >= 201103L || EIGEN_COMP_MSVC >= 1900
+
+namespace Eigen {
+
+/** \class TensorFFT
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Tensor FFT class.
+ *
+ * TODO:
+ * Vectorize the Cooley Tukey and the Bluestein algorithm
+ * Add support for multithreaded evaluation
+ * Improve the performance on GPU
+ */
+
+template <bool NeedUprade> struct MakeComplex {
+ template <typename T>
+ EIGEN_DEVICE_FUNC
+ T operator() (const T& val) const { return val; }
+};
+
+template <> struct MakeComplex<true> {
+ template <typename T>
+ EIGEN_DEVICE_FUNC
+ std::complex<T> operator() (const T& val) const { return std::complex<T>(val, 0); }
+};
+
+template <> struct MakeComplex<false> {
+ template <typename T>
+ EIGEN_DEVICE_FUNC
+ std::complex<T> operator() (const std::complex<T>& val) const { return val; }
+};
+
+template <int ResultType> struct PartOf {
+ template <typename T> T operator() (const T& val) const { return val; }
+};
+
+template <> struct PartOf<RealPart> {
+ template <typename T> T operator() (const std::complex<T>& val) const { return val.real(); }
+};
+
+template <> struct PartOf<ImagPart> {
+ template <typename T> T operator() (const std::complex<T>& val) const { return val.imag(); }
+};
+
+namespace internal {
+template <typename FFT, typename XprType, int FFTResultType, int FFTDir>
+struct traits<TensorFFTOp<FFT, XprType, FFTResultType, FFTDir> > : public traits<XprType> {
+ typedef traits<XprType> XprTraits;
+ typedef typename NumTraits<typename XprTraits::Scalar>::Real RealScalar;
+ typedef typename std::complex<RealScalar> ComplexScalar;
+ typedef typename XprTraits::Scalar InputScalar;
+ typedef typename conditional<FFTResultType == RealPart || FFTResultType == ImagPart, RealScalar, ComplexScalar>::type OutputScalar;
+ typedef typename XprTraits::StorageKind StorageKind;
+ typedef typename XprTraits::Index Index;
+ typedef typename XprType::Nested Nested;
+ typedef typename remove_reference<Nested>::type _Nested;
+ static const int NumDimensions = XprTraits::NumDimensions;
+ static const int Layout = XprTraits::Layout;
+};
+
+template <typename FFT, typename XprType, int FFTResultType, int FFTDirection>
+struct eval<TensorFFTOp<FFT, XprType, FFTResultType, FFTDirection>, Eigen::Dense> {
+ typedef const TensorFFTOp<FFT, XprType, FFTResultType, FFTDirection>& type;
+};
+
+template <typename FFT, typename XprType, int FFTResultType, int FFTDirection>
+struct nested<TensorFFTOp<FFT, XprType, FFTResultType, FFTDirection>, 1, typename eval<TensorFFTOp<FFT, XprType, FFTResultType, FFTDirection> >::type> {
+ typedef TensorFFTOp<FFT, XprType, FFTResultType, FFTDirection> type;
+};
+
+} // end namespace internal
+
+template <typename FFT, typename XprType, int FFTResultType, int FFTDir>
+class TensorFFTOp : public TensorBase<TensorFFTOp<FFT, XprType, FFTResultType, FFTDir>, ReadOnlyAccessors> {
+ public:
+ typedef typename Eigen::internal::traits<TensorFFTOp>::Scalar Scalar;
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
+ typedef typename std::complex<RealScalar> ComplexScalar;
+ typedef typename internal::conditional<FFTResultType == RealPart || FFTResultType == ImagPart, RealScalar, ComplexScalar>::type OutputScalar;
+ typedef OutputScalar CoeffReturnType;
+ typedef typename Eigen::internal::nested<TensorFFTOp>::type Nested;
+ typedef typename Eigen::internal::traits<TensorFFTOp>::StorageKind StorageKind;
+ typedef typename Eigen::internal::traits<TensorFFTOp>::Index Index;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorFFTOp(const XprType& expr, const FFT& fft)
+ : m_xpr(expr), m_fft(fft) {}
+
+ EIGEN_DEVICE_FUNC
+ const FFT& fft() const { return m_fft; }
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename XprType::Nested>::type& expression() const {
+ return m_xpr;
+ }
+
+ protected:
+ typename XprType::Nested m_xpr;
+ const FFT m_fft;
+};
+
+// Eval as rvalue
+template <typename FFT, typename ArgType, typename Device, int FFTResultType, int FFTDir>
+struct TensorEvaluator<const TensorFFTOp<FFT, ArgType, FFTResultType, FFTDir>, Device> {
+ typedef TensorFFTOp<FFT, ArgType, FFTResultType, FFTDir> XprType;
+ typedef typename XprType::Index Index;
+ static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
+ typedef DSizes<Index, NumDims> Dimensions;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
+ typedef typename std::complex<RealScalar> ComplexScalar;
+ typedef typename TensorEvaluator<ArgType, Device>::Dimensions InputDimensions;
+ typedef internal::traits<XprType> XprTraits;
+ typedef typename XprTraits::Scalar InputScalar;
+ typedef typename internal::conditional<FFTResultType == RealPart || FFTResultType == ImagPart, RealScalar, ComplexScalar>::type OutputScalar;
+ typedef OutputScalar CoeffReturnType;
+ typedef typename PacketType<OutputScalar, Device>::type PacketReturnType;
+ static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+
+ enum {
+ IsAligned = false,
+ PacketAccess = true,
+ BlockAccess = false,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ CoordAccess = false,
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) : m_fft(op.fft()), m_impl(op.expression(), device), m_data(NULL), m_device(device) {
+ const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
+ for (int i = 0; i < NumDims; ++i) {
+ eigen_assert(input_dims[i] > 0);
+ m_dimensions[i] = input_dims[i];
+ }
+
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ m_strides[0] = 1;
+ for (int i = 1; i < NumDims; ++i) {
+ m_strides[i] = m_strides[i - 1] * m_dimensions[i - 1];
+ }
+ } else {
+ m_strides[NumDims - 1] = 1;
+ for (int i = NumDims - 2; i >= 0; --i) {
+ m_strides[i] = m_strides[i + 1] * m_dimensions[i + 1];
+ }
+ }
+ m_size = m_dimensions.TotalSize();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const {
+ return m_dimensions;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(OutputScalar* data) {
+ m_impl.evalSubExprsIfNeeded(NULL);
+ if (data) {
+ evalToBuf(data);
+ return false;
+ } else {
+ m_data = (CoeffReturnType*)m_device.allocate(sizeof(CoeffReturnType) * m_size);
+ evalToBuf(m_data);
+ return true;
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
+ if (m_data) {
+ m_device.deallocate(m_data);
+ m_data = NULL;
+ }
+ m_impl.cleanup();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE CoeffReturnType coeff(Index index) const {
+ return m_data[index];
+ }
+
+ template <int LoadMode>
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketReturnType
+ packet(Index index) const {
+ return internal::ploadt<PacketReturnType, LoadMode>(m_data + index);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
+ costPerCoeff(bool vectorized) const {
+ return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize);
+ }
+
+ EIGEN_DEVICE_FUNC Scalar* data() const { return m_data; }
+
+
+ private:
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalToBuf(OutputScalar* data) {
+ const bool write_to_out = internal::is_same<OutputScalar, ComplexScalar>::value;
+ ComplexScalar* buf = write_to_out ? (ComplexScalar*)data : (ComplexScalar*)m_device.allocate(sizeof(ComplexScalar) * m_size);
+
+ for (Index i = 0; i < m_size; ++i) {
+ buf[i] = MakeComplex<internal::is_same<InputScalar, RealScalar>::value>()(m_impl.coeff(i));
+ }
+
+ for (size_t i = 0; i < m_fft.size(); ++i) {
+ Index dim = m_fft[i];
+ eigen_assert(dim >= 0 && dim < NumDims);
+ Index line_len = m_dimensions[dim];
+ eigen_assert(line_len >= 1);
+ ComplexScalar* line_buf = (ComplexScalar*)m_device.allocate(sizeof(ComplexScalar) * line_len);
+ const bool is_power_of_two = isPowerOfTwo(line_len);
+ const Index good_composite = is_power_of_two ? 0 : findGoodComposite(line_len);
+ const Index log_len = is_power_of_two ? getLog2(line_len) : getLog2(good_composite);
+
+ ComplexScalar* a = is_power_of_two ? NULL : (ComplexScalar*)m_device.allocate(sizeof(ComplexScalar) * good_composite);
+ ComplexScalar* b = is_power_of_two ? NULL : (ComplexScalar*)m_device.allocate(sizeof(ComplexScalar) * good_composite);
+ ComplexScalar* pos_j_base_powered = is_power_of_two ? NULL : (ComplexScalar*)m_device.allocate(sizeof(ComplexScalar) * (line_len + 1));
+ if (!is_power_of_two) {
+ // Compute twiddle factors
+ // t_n = exp(sqrt(-1) * pi * n^2 / line_len)
+ // for n = 0, 1,..., line_len-1.
+ // For n > 2 we use the recurrence t_n = t_{n-1}^2 / t_{n-2} * t_1^2
+ pos_j_base_powered[0] = ComplexScalar(1, 0);
+ if (line_len > 1) {
+ const RealScalar pi_over_len(EIGEN_PI / line_len);
+ const ComplexScalar pos_j_base = ComplexScalar(
+ std::cos(pi_over_len), std::sin(pi_over_len));
+ pos_j_base_powered[1] = pos_j_base;
+ if (line_len > 2) {
+ const ComplexScalar pos_j_base_sq = pos_j_base * pos_j_base;
+ for (int j = 2; j < line_len + 1; ++j) {
+ pos_j_base_powered[j] = pos_j_base_powered[j - 1] *
+ pos_j_base_powered[j - 1] /
+ pos_j_base_powered[j - 2] * pos_j_base_sq;
+ }
+ }
+ }
+ }
+
+ for (Index partial_index = 0; partial_index < m_size / line_len; ++partial_index) {
+ const Index base_offset = getBaseOffsetFromIndex(partial_index, dim);
+
+ // get data into line_buf
+ const Index stride = m_strides[dim];
+ if (stride == 1) {
+ memcpy(line_buf, &buf[base_offset], line_len*sizeof(ComplexScalar));
+ } else {
+ Index offset = base_offset;
+ for (int j = 0; j < line_len; ++j, offset += stride) {
+ line_buf[j] = buf[offset];
+ }
+ }
+
+ // processs the line
+ if (is_power_of_two) {
+ processDataLineCooleyTukey(line_buf, line_len, log_len);
+ }
+ else {
+ processDataLineBluestein(line_buf, line_len, good_composite, log_len, a, b, pos_j_base_powered);
+ }
+
+ // write back
+ if (FFTDir == FFT_FORWARD && stride == 1) {
+ memcpy(&buf[base_offset], line_buf, line_len*sizeof(ComplexScalar));
+ } else {
+ Index offset = base_offset;
+ const ComplexScalar div_factor = ComplexScalar(1.0 / line_len, 0);
+ for (int j = 0; j < line_len; ++j, offset += stride) {
+ buf[offset] = (FFTDir == FFT_FORWARD) ? line_buf[j] : line_buf[j] * div_factor;
+ }
+ }
+ }
+ m_device.deallocate(line_buf);
+ if (!is_power_of_two) {
+ m_device.deallocate(a);
+ m_device.deallocate(b);
+ m_device.deallocate(pos_j_base_powered);
+ }
+ }
+
+ if(!write_to_out) {
+ for (Index i = 0; i < m_size; ++i) {
+ data[i] = PartOf<FFTResultType>()(buf[i]);
+ }
+ m_device.deallocate(buf);
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static bool isPowerOfTwo(Index x) {
+ eigen_assert(x > 0);
+ return !(x & (x - 1));
+ }
+
+ // The composite number for padding, used in Bluestein's FFT algorithm
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static Index findGoodComposite(Index n) {
+ Index i = 2;
+ while (i < 2 * n - 1) i *= 2;
+ return i;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static Index getLog2(Index m) {
+ Index log2m = 0;
+ while (m >>= 1) log2m++;
+ return log2m;
+ }
+
+ // Call Cooley Tukey algorithm directly, data length must be power of 2
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void processDataLineCooleyTukey(ComplexScalar* line_buf, Index line_len, Index log_len) {
+ eigen_assert(isPowerOfTwo(line_len));
+ scramble_FFT(line_buf, line_len);
+ compute_1D_Butterfly<FFTDir>(line_buf, line_len, log_len);
+ }
+
+ // Call Bluestein's FFT algorithm, m is a good composite number greater than (2 * n - 1), used as the padding length
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void processDataLineBluestein(ComplexScalar* line_buf, Index line_len, Index good_composite, Index log_len, ComplexScalar* a, ComplexScalar* b, const ComplexScalar* pos_j_base_powered) {
+ Index n = line_len;
+ Index m = good_composite;
+ ComplexScalar* data = line_buf;
+
+ for (Index i = 0; i < n; ++i) {
+ if(FFTDir == FFT_FORWARD) {
+ a[i] = data[i] * numext::conj(pos_j_base_powered[i]);
+ }
+ else {
+ a[i] = data[i] * pos_j_base_powered[i];
+ }
+ }
+ for (Index i = n; i < m; ++i) {
+ a[i] = ComplexScalar(0, 0);
+ }
+
+ for (Index i = 0; i < n; ++i) {
+ if(FFTDir == FFT_FORWARD) {
+ b[i] = pos_j_base_powered[i];
+ }
+ else {
+ b[i] = numext::conj(pos_j_base_powered[i]);
+ }
+ }
+ for (Index i = n; i < m - n; ++i) {
+ b[i] = ComplexScalar(0, 0);
+ }
+ for (Index i = m - n; i < m; ++i) {
+ if(FFTDir == FFT_FORWARD) {
+ b[i] = pos_j_base_powered[m-i];
+ }
+ else {
+ b[i] = numext::conj(pos_j_base_powered[m-i]);
+ }
+ }
+
+ scramble_FFT(a, m);
+ compute_1D_Butterfly<FFT_FORWARD>(a, m, log_len);
+
+ scramble_FFT(b, m);
+ compute_1D_Butterfly<FFT_FORWARD>(b, m, log_len);
+
+ for (Index i = 0; i < m; ++i) {
+ a[i] *= b[i];
+ }
+
+ scramble_FFT(a, m);
+ compute_1D_Butterfly<FFT_REVERSE>(a, m, log_len);
+
+ //Do the scaling after ifft
+ for (Index i = 0; i < m; ++i) {
+ a[i] /= m;
+ }
+
+ for (Index i = 0; i < n; ++i) {
+ if(FFTDir == FFT_FORWARD) {
+ data[i] = a[i] * numext::conj(pos_j_base_powered[i]);
+ }
+ else {
+ data[i] = a[i] * pos_j_base_powered[i];
+ }
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static void scramble_FFT(ComplexScalar* data, Index n) {
+ eigen_assert(isPowerOfTwo(n));
+ Index j = 1;
+ for (Index i = 1; i < n; ++i){
+ if (j > i) {
+ std::swap(data[j-1], data[i-1]);
+ }
+ Index m = n >> 1;
+ while (m >= 2 && j > m) {
+ j -= m;
+ m >>= 1;
+ }
+ j += m;
+ }
+ }
+
+ template <int Dir>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void butterfly_2(ComplexScalar* data) {
+ ComplexScalar tmp = data[1];
+ data[1] = data[0] - data[1];
+ data[0] += tmp;
+ }
+
+ template <int Dir>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void butterfly_4(ComplexScalar* data) {
+ ComplexScalar tmp[4];
+ tmp[0] = data[0] + data[1];
+ tmp[1] = data[0] - data[1];
+ tmp[2] = data[2] + data[3];
+ if (Dir == FFT_FORWARD) {
+ tmp[3] = ComplexScalar(0.0, -1.0) * (data[2] - data[3]);
+ } else {
+ tmp[3] = ComplexScalar(0.0, 1.0) * (data[2] - data[3]);
+ }
+ data[0] = tmp[0] + tmp[2];
+ data[1] = tmp[1] + tmp[3];
+ data[2] = tmp[0] - tmp[2];
+ data[3] = tmp[1] - tmp[3];
+ }
+
+ template <int Dir>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void butterfly_8(ComplexScalar* data) {
+ ComplexScalar tmp_1[8];
+ ComplexScalar tmp_2[8];
+
+ tmp_1[0] = data[0] + data[1];
+ tmp_1[1] = data[0] - data[1];
+ tmp_1[2] = data[2] + data[3];
+ if (Dir == FFT_FORWARD) {
+ tmp_1[3] = (data[2] - data[3]) * ComplexScalar(0, -1);
+ } else {
+ tmp_1[3] = (data[2] - data[3]) * ComplexScalar(0, 1);
+ }
+ tmp_1[4] = data[4] + data[5];
+ tmp_1[5] = data[4] - data[5];
+ tmp_1[6] = data[6] + data[7];
+ if (Dir == FFT_FORWARD) {
+ tmp_1[7] = (data[6] - data[7]) * ComplexScalar(0, -1);
+ } else {
+ tmp_1[7] = (data[6] - data[7]) * ComplexScalar(0, 1);
+ }
+ tmp_2[0] = tmp_1[0] + tmp_1[2];
+ tmp_2[1] = tmp_1[1] + tmp_1[3];
+ tmp_2[2] = tmp_1[0] - tmp_1[2];
+ tmp_2[3] = tmp_1[1] - tmp_1[3];
+ tmp_2[4] = tmp_1[4] + tmp_1[6];
+// SQRT2DIV2 = sqrt(2)/2
+#define SQRT2DIV2 0.7071067811865476
+ if (Dir == FFT_FORWARD) {
+ tmp_2[5] = (tmp_1[5] + tmp_1[7]) * ComplexScalar(SQRT2DIV2, -SQRT2DIV2);
+ tmp_2[6] = (tmp_1[4] - tmp_1[6]) * ComplexScalar(0, -1);
+ tmp_2[7] = (tmp_1[5] - tmp_1[7]) * ComplexScalar(-SQRT2DIV2, -SQRT2DIV2);
+ } else {
+ tmp_2[5] = (tmp_1[5] + tmp_1[7]) * ComplexScalar(SQRT2DIV2, SQRT2DIV2);
+ tmp_2[6] = (tmp_1[4] - tmp_1[6]) * ComplexScalar(0, 1);
+ tmp_2[7] = (tmp_1[5] - tmp_1[7]) * ComplexScalar(-SQRT2DIV2, SQRT2DIV2);
+ }
+ data[0] = tmp_2[0] + tmp_2[4];
+ data[1] = tmp_2[1] + tmp_2[5];
+ data[2] = tmp_2[2] + tmp_2[6];
+ data[3] = tmp_2[3] + tmp_2[7];
+ data[4] = tmp_2[0] - tmp_2[4];
+ data[5] = tmp_2[1] - tmp_2[5];
+ data[6] = tmp_2[2] - tmp_2[6];
+ data[7] = tmp_2[3] - tmp_2[7];
+ }
+
+ template <int Dir>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void butterfly_1D_merge(
+ ComplexScalar* data, Index n, Index n_power_of_2) {
+ // Original code:
+ // RealScalar wtemp = std::sin(M_PI/n);
+ // RealScalar wpi = -std::sin(2 * M_PI/n);
+ const RealScalar wtemp = m_sin_PI_div_n_LUT[n_power_of_2];
+ const RealScalar wpi = (Dir == FFT_FORWARD)
+ ? m_minus_sin_2_PI_div_n_LUT[n_power_of_2]
+ : -m_minus_sin_2_PI_div_n_LUT[n_power_of_2];
+
+ const ComplexScalar wp(wtemp, wpi);
+ const ComplexScalar wp_one = wp + ComplexScalar(1, 0);
+ const ComplexScalar wp_one_2 = wp_one * wp_one;
+ const ComplexScalar wp_one_3 = wp_one_2 * wp_one;
+ const ComplexScalar wp_one_4 = wp_one_3 * wp_one;
+ const Index n2 = n / 2;
+ ComplexScalar w(1.0, 0.0);
+ for (Index i = 0; i < n2; i += 4) {
+ ComplexScalar temp0(data[i + n2] * w);
+ ComplexScalar temp1(data[i + 1 + n2] * w * wp_one);
+ ComplexScalar temp2(data[i + 2 + n2] * w * wp_one_2);
+ ComplexScalar temp3(data[i + 3 + n2] * w * wp_one_3);
+ w = w * wp_one_4;
+
+ data[i + n2] = data[i] - temp0;
+ data[i] += temp0;
+
+ data[i + 1 + n2] = data[i + 1] - temp1;
+ data[i + 1] += temp1;
+
+ data[i + 2 + n2] = data[i + 2] - temp2;
+ data[i + 2] += temp2;
+
+ data[i + 3 + n2] = data[i + 3] - temp3;
+ data[i + 3] += temp3;
+ }
+ }
+
+ template <int Dir>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void compute_1D_Butterfly(
+ ComplexScalar* data, Index n, Index n_power_of_2) {
+ eigen_assert(isPowerOfTwo(n));
+ if (n > 8) {
+ compute_1D_Butterfly<Dir>(data, n / 2, n_power_of_2 - 1);
+ compute_1D_Butterfly<Dir>(data + n / 2, n / 2, n_power_of_2 - 1);
+ butterfly_1D_merge<Dir>(data, n, n_power_of_2);
+ } else if (n == 8) {
+ butterfly_8<Dir>(data);
+ } else if (n == 4) {
+ butterfly_4<Dir>(data);
+ } else if (n == 2) {
+ butterfly_2<Dir>(data);
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index getBaseOffsetFromIndex(Index index, Index omitted_dim) const {
+ Index result = 0;
+
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int i = NumDims - 1; i > omitted_dim; --i) {
+ const Index partial_m_stride = m_strides[i] / m_dimensions[omitted_dim];
+ const Index idx = index / partial_m_stride;
+ index -= idx * partial_m_stride;
+ result += idx * m_strides[i];
+ }
+ result += index;
+ }
+ else {
+ for (Index i = 0; i < omitted_dim; ++i) {
+ const Index partial_m_stride = m_strides[i] / m_dimensions[omitted_dim];
+ const Index idx = index / partial_m_stride;
+ index -= idx * partial_m_stride;
+ result += idx * m_strides[i];
+ }
+ result += index;
+ }
+ // Value of index_coords[omitted_dim] is not determined to this step
+ return result;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index getIndexFromOffset(Index base, Index omitted_dim, Index offset) const {
+ Index result = base + offset * m_strides[omitted_dim] ;
+ return result;
+ }
+
+ protected:
+ Index m_size;
+ const FFT& m_fft;
+ Dimensions m_dimensions;
+ array<Index, NumDims> m_strides;
+ TensorEvaluator<ArgType, Device> m_impl;
+ CoeffReturnType* m_data;
+ const Device& m_device;
+
+ // This will support a maximum FFT size of 2^32 for each dimension
+ // m_sin_PI_div_n_LUT[i] = (-2) * std::sin(M_PI / std::pow(2,i)) ^ 2;
+ const RealScalar m_sin_PI_div_n_LUT[32] = {
+ RealScalar(0.0),
+ RealScalar(-2),
+ RealScalar(-0.999999999999999),
+ RealScalar(-0.292893218813453),
+ RealScalar(-0.0761204674887130),
+ RealScalar(-0.0192147195967696),
+ RealScalar(-0.00481527332780311),
+ RealScalar(-0.00120454379482761),
+ RealScalar(-3.01181303795779e-04),
+ RealScalar(-7.52981608554592e-05),
+ RealScalar(-1.88247173988574e-05),
+ RealScalar(-4.70619042382852e-06),
+ RealScalar(-1.17654829809007e-06),
+ RealScalar(-2.94137117780840e-07),
+ RealScalar(-7.35342821488550e-08),
+ RealScalar(-1.83835707061916e-08),
+ RealScalar(-4.59589268710903e-09),
+ RealScalar(-1.14897317243732e-09),
+ RealScalar(-2.87243293150586e-10),
+ RealScalar( -7.18108232902250e-11),
+ RealScalar(-1.79527058227174e-11),
+ RealScalar(-4.48817645568941e-12),
+ RealScalar(-1.12204411392298e-12),
+ RealScalar(-2.80511028480785e-13),
+ RealScalar(-7.01277571201985e-14),
+ RealScalar(-1.75319392800498e-14),
+ RealScalar(-4.38298482001247e-15),
+ RealScalar(-1.09574620500312e-15),
+ RealScalar(-2.73936551250781e-16),
+ RealScalar(-6.84841378126949e-17),
+ RealScalar(-1.71210344531737e-17),
+ RealScalar(-4.28025861329343e-18)
+ };
+
+ // m_minus_sin_2_PI_div_n_LUT[i] = -std::sin(2 * M_PI / std::pow(2,i));
+ const RealScalar m_minus_sin_2_PI_div_n_LUT[32] = {
+ RealScalar(0.0),
+ RealScalar(0.0),
+ RealScalar(-1.00000000000000e+00),
+ RealScalar(-7.07106781186547e-01),
+ RealScalar(-3.82683432365090e-01),
+ RealScalar(-1.95090322016128e-01),
+ RealScalar(-9.80171403295606e-02),
+ RealScalar(-4.90676743274180e-02),
+ RealScalar(-2.45412285229123e-02),
+ RealScalar(-1.22715382857199e-02),
+ RealScalar(-6.13588464915448e-03),
+ RealScalar(-3.06795676296598e-03),
+ RealScalar(-1.53398018628477e-03),
+ RealScalar(-7.66990318742704e-04),
+ RealScalar(-3.83495187571396e-04),
+ RealScalar(-1.91747597310703e-04),
+ RealScalar(-9.58737990959773e-05),
+ RealScalar(-4.79368996030669e-05),
+ RealScalar(-2.39684498084182e-05),
+ RealScalar(-1.19842249050697e-05),
+ RealScalar(-5.99211245264243e-06),
+ RealScalar(-2.99605622633466e-06),
+ RealScalar(-1.49802811316901e-06),
+ RealScalar(-7.49014056584716e-07),
+ RealScalar(-3.74507028292384e-07),
+ RealScalar(-1.87253514146195e-07),
+ RealScalar(-9.36267570730981e-08),
+ RealScalar(-4.68133785365491e-08),
+ RealScalar(-2.34066892682746e-08),
+ RealScalar(-1.17033446341373e-08),
+ RealScalar(-5.85167231706864e-09),
+ RealScalar(-2.92583615853432e-09)
+ };
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_HAS_CONSTEXPR
+
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_FFT_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h b/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h
new file mode 100644
index 000000000..fcee5f60d
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h
@@ -0,0 +1,389 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H
+#define EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H
+
+namespace Eigen {
+
+/** \class TensorFixedSize
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief The fixed sized version of the tensor class.
+ *
+ * The fixed sized equivalent of
+ * Eigen::Tensor<float, 3> t(3, 5, 7);
+ * is
+ * Eigen::TensorFixedSize<float, Size<3,5,7>> t;
+ */
+
+template<typename Scalar_, typename Dimensions_, int Options_, typename IndexType>
+class TensorFixedSize : public TensorBase<TensorFixedSize<Scalar_, Dimensions_, Options_, IndexType> >
+{
+ public:
+ typedef TensorFixedSize<Scalar_, Dimensions_, Options_, IndexType> Self;
+ typedef TensorBase<TensorFixedSize<Scalar_, Dimensions_, Options_, IndexType> > Base;
+ typedef typename Eigen::internal::nested<Self>::type Nested;
+ typedef typename internal::traits<Self>::StorageKind StorageKind;
+ typedef typename internal::traits<Self>::Index Index;
+ typedef Scalar_ Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef typename Base::CoeffReturnType CoeffReturnType;
+
+ static const int Options = Options_;
+
+ enum {
+ IsAligned = bool(EIGEN_MAX_ALIGN_BYTES>0),
+ Layout = Options_ & RowMajor ? RowMajor : ColMajor,
+ CoordAccess = true,
+ RawAccess = true
+ };
+
+ typedef Dimensions_ Dimensions;
+ static const std::size_t NumIndices = Dimensions::count;
+
+ protected:
+ TensorStorage<Scalar, Dimensions, Options> m_storage;
+
+ public:
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rank() const { return NumIndices; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index dimension(std::size_t n) const { return m_storage.dimensions()[n]; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_storage.dimensions(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index size() const { return m_storage.size(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar *data() { return m_storage.data(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar *data() const { return m_storage.data(); }
+
+ // This makes EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
+ // work, because that uses base().coeffRef() - and we don't yet
+ // implement a similar class hierarchy
+ inline Self& base() { return *this; }
+ inline const Self& base() const { return *this; }
+
+#if EIGEN_HAS_VARIADIC_TEMPLATES
+ template<typename... IndexTypes>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index firstIndex, IndexTypes... otherIndices) const
+ {
+ // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
+ EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ return coeff(array<Index, NumIndices>{{firstIndex, otherIndices...}});
+ }
+#endif
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar& coeff(const array<Index, NumIndices>& indices) const
+ {
+ eigen_internal_assert(checkIndexRange(indices));
+ return m_storage.data()[linearizedIndex(indices)];
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const
+ {
+ eigen_internal_assert(index >= 0 && index < size());
+ return m_storage.data()[index];
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar& coeff() const
+ {
+ EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
+ return m_storage.data()[0];
+ }
+
+
+#if EIGEN_HAS_VARIADIC_TEMPLATES
+ template<typename... IndexTypes>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index firstIndex, IndexTypes... otherIndices)
+ {
+ // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
+ EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ return coeffRef(array<Index, NumIndices>{{firstIndex, otherIndices...}});
+ }
+#endif
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& coeffRef(const array<Index, NumIndices>& indices)
+ {
+ eigen_internal_assert(checkIndexRange(indices));
+ return m_storage.data()[linearizedIndex(indices)];
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)
+ {
+ eigen_internal_assert(index >= 0 && index < size());
+ return m_storage.data()[index];
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& coeffRef()
+ {
+ EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
+ return m_storage.data()[0];
+ }
+
+#if EIGEN_HAS_VARIADIC_TEMPLATES
+ template<typename... IndexTypes>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(Index firstIndex, IndexTypes... otherIndices) const
+ {
+ // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
+ EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ return this->operator()(array<Index, NumIndices>{{firstIndex, otherIndices...}});
+ }
+#else
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1) const
+ {
+ if (Options&RowMajor) {
+ const Index index = i1 + i0 * m_storage.dimensions()[1];
+ return m_storage.data()[index];
+ } else {
+ const Index index = i0 + i1 * m_storage.dimensions()[0];
+ return m_storage.data()[index];
+ }
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2) const
+ {
+ if (Options&RowMajor) {
+ const Index index = i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0);
+ return m_storage.data()[index];
+ } else {
+ const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * i2);
+ return m_storage.data()[index];
+ }
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3) const
+ {
+ if (Options&RowMajor) {
+ const Index index = i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0));
+ return m_storage.data()[index];
+ } else {
+ const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * i3));
+ return m_storage.data()[index];
+ }
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const
+ {
+ if (Options&RowMajor) {
+ const Index index = i4 + m_storage.dimensions()[4] * (i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0)));
+ return m_storage.data()[index];
+ } else {
+ const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * (i3 + m_storage.dimensions()[3] * i4)));
+ return m_storage.data()[index];
+ }
+ }
+#endif
+
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar& operator()(const array<Index, NumIndices>& indices) const
+ {
+ eigen_assert(checkIndexRange(indices));
+ return coeff(indices);
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar& operator()(Index index) const
+ {
+ eigen_internal_assert(index >= 0 && index < size());
+ return coeff(index);
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar& operator()() const
+ {
+ EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
+ return coeff();
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar& operator[](Index index) const
+ {
+ // The bracket operator is only for vectors, use the parenthesis operator instead.
+ EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE);
+ return coeff(index);
+ }
+
+#if EIGEN_HAS_VARIADIC_TEMPLATES
+ template<typename... IndexTypes>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index firstIndex, IndexTypes... otherIndices)
+ {
+ // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
+ EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ return operator()(array<Index, NumIndices>{{firstIndex, otherIndices...}});
+ }
+#else
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1)
+ {
+ if (Options&RowMajor) {
+ const Index index = i1 + i0 * m_storage.dimensions()[1];
+ return m_storage.data()[index];
+ } else {
+ const Index index = i0 + i1 * m_storage.dimensions()[0];
+ return m_storage.data()[index];
+ }
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2)
+ {
+ if (Options&RowMajor) {
+ const Index index = i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0);
+ return m_storage.data()[index];
+ } else {
+ const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * i2);
+ return m_storage.data()[index];
+ }
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3)
+ {
+ if (Options&RowMajor) {
+ const Index index = i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0));
+ return m_storage.data()[index];
+ } else {
+ const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * i3));
+ return m_storage.data()[index];
+ }
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4)
+ {
+ if (Options&RowMajor) {
+ const Index index = i4 + m_storage.dimensions()[4] * (i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0)));
+ return m_storage.data()[index];
+ } else {
+ const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * (i3 + m_storage.dimensions()[3] * i4)));
+ return m_storage.data()[index];
+ }
+ }
+#endif
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& operator()(const array<Index, NumIndices>& indices)
+ {
+ eigen_assert(checkIndexRange(indices));
+ return coeffRef(indices);
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& operator()(Index index)
+ {
+ eigen_assert(index >= 0 && index < size());
+ return coeffRef(index);
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& operator()()
+ {
+ EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
+ return coeffRef();
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& operator[](Index index)
+ {
+ // The bracket operator is only for vectors, use the parenthesis operator instead
+ EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ return coeffRef(index);
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE TensorFixedSize()
+ : m_storage()
+ {
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE TensorFixedSize(const Self& other)
+ : m_storage(other.m_storage)
+ {
+ }
+
+#if EIGEN_HAS_RVALUE_REFERENCES
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorFixedSize(Self&& other)
+ : m_storage(other.m_storage)
+ {
+ }
+#endif
+
+ template<typename OtherDerived>
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE TensorFixedSize(const TensorBase<OtherDerived, ReadOnlyAccessors>& other)
+ {
+ typedef TensorAssignOp<TensorFixedSize, const OtherDerived> Assign;
+ Assign assign(*this, other.derived());
+ internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
+ }
+ template<typename OtherDerived>
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE TensorFixedSize(const TensorBase<OtherDerived, WriteAccessors>& other)
+ {
+ typedef TensorAssignOp<TensorFixedSize, const OtherDerived> Assign;
+ Assign assign(*this, other.derived());
+ internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE TensorFixedSize& operator=(const TensorFixedSize& other)
+ {
+ // FIXME: check that the dimensions of other match the dimensions of *this.
+ // Unfortunately this isn't possible yet when the rhs is an expression.
+ typedef TensorAssignOp<Self, const TensorFixedSize> Assign;
+ Assign assign(*this, other);
+ internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
+ return *this;
+ }
+ template<typename OtherDerived>
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE TensorFixedSize& operator=(const OtherDerived& other)
+ {
+ // FIXME: check that the dimensions of other match the dimensions of *this.
+ // Unfortunately this isn't possible yet when the rhs is an expression.
+ typedef TensorAssignOp<Self, const OtherDerived> Assign;
+ Assign assign(*this, other);
+ internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
+ return *this;
+ }
+
+ protected:
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE bool checkIndexRange(const array<Index, NumIndices>& /*indices*/) const
+ {
+ using internal::array_apply_and_reduce;
+ using internal::array_zip_and_reduce;
+ using internal::greater_equal_zero_op;
+ using internal::logical_and_op;
+ using internal::lesser_op;
+
+ return true;
+ // check whether the indices are all >= 0
+ /* array_apply_and_reduce<logical_and_op, greater_equal_zero_op>(indices) &&
+ // check whether the indices fit in the dimensions
+ array_zip_and_reduce<logical_and_op, lesser_op>(indices, m_storage.dimensions());*/
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Index linearizedIndex(const array<Index, NumIndices>& indices) const
+ {
+ if (Options&RowMajor) {
+ return m_storage.dimensions().IndexOfRowMajor(indices);
+ } else {
+ return m_storage.dimensions().IndexOfColMajor(indices);
+ }
+ }
+};
+
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h b/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h
new file mode 100644
index 000000000..bbd5eb374
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h
@@ -0,0 +1,167 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_FORCED_EVAL_H
+#define EIGEN_CXX11_TENSOR_TENSOR_FORCED_EVAL_H
+
+namespace Eigen {
+
+/** \class TensorForcedEval
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Tensor reshaping class.
+ *
+ *
+ */
+/// template <class> class MakePointer_ is added to convert the host pointer to the device pointer.
+/// It is added due to the fact that for our device compiler T* is not allowed.
+/// If we wanted to use the same Evaluator functions we have to convert that type to our pointer T.
+/// This is done through our MakePointer_ class. By default the Type in the MakePointer_<T> is T* .
+/// Therefore, by adding the default value, we managed to convert the type and it does not break any
+/// existing code as its default value is T*.
+namespace internal {
+template<typename XprType, template <class> class MakePointer_>
+struct traits<TensorForcedEvalOp<XprType, MakePointer_> >
+{
+ // Type promotion to handle the case where the types of the lhs and the rhs are different.
+ typedef typename XprType::Scalar Scalar;
+ typedef traits<XprType> XprTraits;
+ typedef typename traits<XprType>::StorageKind StorageKind;
+ typedef typename traits<XprType>::Index Index;
+ typedef typename XprType::Nested Nested;
+ typedef typename remove_reference<Nested>::type _Nested;
+ static const int NumDimensions = XprTraits::NumDimensions;
+ static const int Layout = XprTraits::Layout;
+
+ enum {
+ Flags = 0
+ };
+ template <class T> struct MakePointer {
+ // Intermediate typedef to workaround MSVC issue.
+ typedef MakePointer_<T> MakePointerT;
+ typedef typename MakePointerT::Type Type;
+ };
+};
+
+template<typename XprType, template <class> class MakePointer_>
+struct eval<TensorForcedEvalOp<XprType, MakePointer_>, Eigen::Dense>
+{
+ typedef const TensorForcedEvalOp<XprType, MakePointer_>& type;
+};
+
+template<typename XprType, template <class> class MakePointer_>
+struct nested<TensorForcedEvalOp<XprType, MakePointer_>, 1, typename eval<TensorForcedEvalOp<XprType, MakePointer_> >::type>
+{
+ typedef TensorForcedEvalOp<XprType, MakePointer_> type;
+};
+
+} // end namespace internal
+
+
+
+template<typename XprType, template <class> class MakePointer_>
+class TensorForcedEvalOp : public TensorBase<TensorForcedEvalOp<XprType, MakePointer_>, ReadOnlyAccessors>
+{
+ public:
+ typedef typename Eigen::internal::traits<TensorForcedEvalOp>::Scalar Scalar;
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
+ typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType;
+ typedef typename Eigen::internal::nested<TensorForcedEvalOp>::type Nested;
+ typedef typename Eigen::internal::traits<TensorForcedEvalOp>::StorageKind StorageKind;
+ typedef typename Eigen::internal::traits<TensorForcedEvalOp>::Index Index;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorForcedEvalOp(const XprType& expr)
+ : m_xpr(expr) {}
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename XprType::Nested>::type&
+ expression() const { return m_xpr; }
+
+ protected:
+ typename XprType::Nested m_xpr;
+};
+
+
+template<typename ArgType, typename Device, template <class> class MakePointer_>
+struct TensorEvaluator<const TensorForcedEvalOp<ArgType, MakePointer_>, Device>
+{
+ typedef TensorForcedEvalOp<ArgType, MakePointer_> XprType;
+ typedef typename ArgType::Scalar Scalar;
+ typedef typename TensorEvaluator<ArgType, Device>::Dimensions Dimensions;
+ typedef typename XprType::Index Index;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+
+ enum {
+ IsAligned = true,
+ PacketAccess = (PacketSize > 1),
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ RawAccess = true
+ };
+
+ EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device)
+ /// op_ is used for sycl
+ : m_impl(op.expression(), device), m_op(op.expression()), m_device(device), m_buffer(NULL)
+ { }
+
+ EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_impl.dimensions(); }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType*) {
+ const Index numValues = internal::array_prod(m_impl.dimensions());
+ m_buffer = (CoeffReturnType*)m_device.allocate(numValues * sizeof(CoeffReturnType));
+ // Should initialize the memory in case we're dealing with non POD types.
+ if (NumTraits<CoeffReturnType>::RequireInitialization) {
+ for (Index i = 0; i < numValues; ++i) {
+ new(m_buffer+i) CoeffReturnType();
+ }
+ }
+ typedef TensorEvalToOp< const typename internal::remove_const<ArgType>::type > EvalTo;
+ EvalTo evalToTmp(m_buffer, m_op);
+ const bool PacketAccess = internal::IsVectorizable<Device, const ArgType>::value;
+ internal::TensorExecutor<const EvalTo, typename internal::remove_const<Device>::type, PacketAccess>::run(evalToTmp, m_device);
+ return true;
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
+ m_device.deallocate(m_buffer);
+ m_buffer = NULL;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
+ {
+ return m_buffer[index];
+ }
+
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
+ {
+ return internal::ploadt<PacketReturnType, LoadMode>(m_buffer + index);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
+ return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize);
+ }
+
+ EIGEN_DEVICE_FUNC typename MakePointer<Scalar>::Type data() const { return m_buffer; }
+
+ /// required by sycl in order to extract the sycl accessor
+ const TensorEvaluator<ArgType, Device>& impl() { return m_impl; }
+ /// used by sycl in order to build the sycl buffer
+ const Device& device() const{return m_device;}
+ private:
+ TensorEvaluator<ArgType, Device> m_impl;
+ const ArgType m_op;
+ const Device& m_device;
+ typename MakePointer<CoeffReturnType>::Type m_buffer;
+};
+
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_FORCED_EVAL_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h b/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h
new file mode 100644
index 000000000..52b803d7f
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h
@@ -0,0 +1,109 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_FORWARD_DECLARATIONS_H
+#define EIGEN_CXX11_TENSOR_TENSOR_FORWARD_DECLARATIONS_H
+
+namespace Eigen {
+
+// MakePointer class is used as a container of the adress space of the pointer
+// on the host and on the device. From the host side it generates the T* pointer
+// and when EIGEN_USE_SYCL is used it construct a buffer with a map_allocator to
+// T* m_data on the host. It is always called on the device.
+// Specialisation of MakePointer class for creating the sycl buffer with
+// map_allocator.
+template<typename T> struct MakePointer {
+ typedef T* Type;
+};
+
+template<typename PlainObjectType, int Options_ = Unaligned, template <class> class MakePointer_ = MakePointer> class TensorMap;
+template<typename Scalar_, int NumIndices_, int Options_ = 0, typename IndexType = DenseIndex> class Tensor;
+template<typename Scalar_, typename Dimensions, int Options_ = 0, typename IndexType = DenseIndex> class TensorFixedSize;
+template<typename PlainObjectType> class TensorRef;
+template<typename Derived, int AccessLevel> class TensorBase;
+
+template<typename NullaryOp, typename PlainObjectType> class TensorCwiseNullaryOp;
+template<typename UnaryOp, typename XprType> class TensorCwiseUnaryOp;
+template<typename BinaryOp, typename LeftXprType, typename RightXprType> class TensorCwiseBinaryOp;
+template<typename TernaryOp, typename Arg1XprType, typename Arg2XprType, typename Arg3XprType> class TensorCwiseTernaryOp;
+template<typename IfXprType, typename ThenXprType, typename ElseXprType> class TensorSelectOp;
+template<typename Op, typename Dims, typename XprType, template <class> class MakePointer_ = MakePointer > class TensorReductionOp;
+template<typename XprType> class TensorIndexTupleOp;
+template<typename ReduceOp, typename Dims, typename XprType> class TensorTupleReducerOp;
+template<typename Axis, typename LeftXprType, typename RightXprType> class TensorConcatenationOp;
+template<typename Dimensions, typename LeftXprType, typename RightXprType> class TensorContractionOp;
+template<typename TargetType, typename XprType> class TensorConversionOp;
+template<typename Dimensions, typename InputXprType, typename KernelXprType> class TensorConvolutionOp;
+template<typename FFT, typename XprType, int FFTDataType, int FFTDirection> class TensorFFTOp;
+template<typename PatchDim, typename XprType> class TensorPatchOp;
+template<DenseIndex Rows, DenseIndex Cols, typename XprType> class TensorImagePatchOp;
+template<DenseIndex Planes, DenseIndex Rows, DenseIndex Cols, typename XprType> class TensorVolumePatchOp;
+template<typename Broadcast, typename XprType> class TensorBroadcastingOp;
+template<DenseIndex DimId, typename XprType> class TensorChippingOp;
+template<typename NewDimensions, typename XprType> class TensorReshapingOp;
+template<typename XprType> class TensorLayoutSwapOp;
+template<typename StartIndices, typename Sizes, typename XprType> class TensorSlicingOp;
+template<typename ReverseDimensions, typename XprType> class TensorReverseOp;
+template<typename PaddingDimensions, typename XprType> class TensorPaddingOp;
+template<typename Shuffle, typename XprType> class TensorShufflingOp;
+template<typename Strides, typename XprType> class TensorStridingOp;
+template<typename StartIndices, typename StopIndices, typename Strides, typename XprType> class TensorStridingSlicingOp;
+template<typename Strides, typename XprType> class TensorInflationOp;
+template<typename Generator, typename XprType> class TensorGeneratorOp;
+template<typename LeftXprType, typename RightXprType> class TensorAssignOp;
+template<typename Op, typename XprType> class TensorScanOp;
+
+template<typename CustomUnaryFunc, typename XprType> class TensorCustomUnaryOp;
+template<typename CustomBinaryFunc, typename LhsXprType, typename RhsXprType> class TensorCustomBinaryOp;
+
+template<typename XprType, template <class> class MakePointer_ = MakePointer> class TensorEvalToOp;
+template<typename XprType, template <class> class MakePointer_ = MakePointer> class TensorForcedEvalOp;
+
+template<typename ExpressionType, typename DeviceType> class TensorDevice;
+template<typename Derived, typename Device> struct TensorEvaluator;
+
+struct DefaultDevice;
+struct ThreadPoolDevice;
+struct GpuDevice;
+struct SyclDevice;
+
+enum FFTResultType {
+ RealPart = 0,
+ ImagPart = 1,
+ BothParts = 2
+};
+
+enum FFTDirection {
+ FFT_FORWARD = 0,
+ FFT_REVERSE = 1
+};
+
+
+namespace internal {
+
+template <typename Device, typename Expression>
+struct IsVectorizable {
+ static const bool value = TensorEvaluator<Expression, Device>::PacketAccess;
+};
+
+template <typename Expression>
+struct IsVectorizable<GpuDevice, Expression> {
+ static const bool value = TensorEvaluator<Expression, GpuDevice>::PacketAccess &&
+ TensorEvaluator<Expression, GpuDevice>::IsAligned;
+};
+
+template <typename Expression, typename Device,
+ bool Vectorizable = IsVectorizable<Device, Expression>::value>
+class TensorExecutor;
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_FORWARD_DECLARATIONS_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h b/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h
new file mode 100644
index 000000000..d73f6dc68
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h
@@ -0,0 +1,489 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_FUNCTORS_H
+#define EIGEN_CXX11_TENSOR_TENSOR_FUNCTORS_H
+
+namespace Eigen {
+namespace internal {
+
+
+/** \internal
+ * \brief Template functor to compute the modulo between an array and a scalar.
+ */
+template <typename Scalar>
+struct scalar_mod_op {
+ EIGEN_DEVICE_FUNC scalar_mod_op(const Scalar& divisor) : m_divisor(divisor) {}
+ EIGEN_DEVICE_FUNC inline Scalar operator() (const Scalar& a) const { return a % m_divisor; }
+ const Scalar m_divisor;
+};
+template <typename Scalar>
+struct functor_traits<scalar_mod_op<Scalar> >
+{ enum { Cost = scalar_div_cost<Scalar,false>::value, PacketAccess = false }; };
+
+
+/** \internal
+ * \brief Template functor to compute the modulo between 2 arrays.
+ */
+template <typename Scalar>
+struct scalar_mod2_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_mod2_op);
+ EIGEN_DEVICE_FUNC inline Scalar operator() (const Scalar& a, const Scalar& b) const { return a % b; }
+};
+template <typename Scalar>
+struct functor_traits<scalar_mod2_op<Scalar> >
+{ enum { Cost = scalar_div_cost<Scalar,false>::value, PacketAccess = false }; };
+
+template <typename Scalar>
+struct scalar_fmod_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_fmod_op);
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar
+ operator()(const Scalar& a, const Scalar& b) const {
+ return numext::fmod(a, b);
+ }
+};
+template <typename Scalar>
+struct functor_traits<scalar_fmod_op<Scalar> > {
+ enum { Cost = 13, // Reciprocal throughput of FPREM on Haswell.
+ PacketAccess = false };
+};
+
+
+/** \internal
+ * \brief Template functor to compute the sigmoid of a scalar
+ * \sa class CwiseUnaryOp, ArrayBase::sigmoid()
+ */
+template <typename T>
+struct scalar_sigmoid_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_sigmoid_op)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator()(const T& x) const {
+ const T one = T(1);
+ return one / (one + numext::exp(-x));
+ }
+
+ template <typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Packet packetOp(const Packet& x) const {
+ const Packet one = pset1<Packet>(T(1));
+ return pdiv(one, padd(one, pexp(pnegate(x))));
+ }
+};
+
+template <typename T>
+struct functor_traits<scalar_sigmoid_op<T> > {
+ enum {
+ Cost = NumTraits<T>::AddCost * 2 + NumTraits<T>::MulCost * 6,
+ PacketAccess = packet_traits<T>::HasAdd && packet_traits<T>::HasDiv &&
+ packet_traits<T>::HasNegate && packet_traits<T>::HasExp
+ };
+};
+
+
+template<typename Reducer, typename Device>
+struct reducer_traits {
+ enum {
+ Cost = 1,
+ PacketAccess = false
+ };
+};
+
+// Standard reduction functors
+template <typename T> struct SumReducer
+{
+ static const bool PacketAccess = packet_traits<T>::HasAdd;
+ static const bool IsStateful = false;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const {
+ internal::scalar_sum_op<T> sum_op;
+ *accum = sum_op(*accum, t);
+ }
+ template <typename Packet>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const {
+ (*accum) = padd<Packet>(*accum, p);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const {
+ internal::scalar_cast_op<int, T> conv;
+ return conv(0);
+ }
+ template <typename Packet>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const {
+ return pset1<Packet>(initialize());
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const {
+ return accum;
+ }
+ template <typename Packet>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const {
+ return vaccum;
+ }
+ template <typename Packet>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const {
+ internal::scalar_sum_op<T> sum_op;
+ return sum_op(saccum, predux(vaccum));
+ }
+};
+
+template <typename T, typename Device>
+struct reducer_traits<SumReducer<T>, Device> {
+ enum {
+ Cost = NumTraits<T>::AddCost,
+ PacketAccess = PacketType<T, Device>::HasAdd
+ };
+};
+
+
+template <typename T> struct MeanReducer
+{
+ static const bool PacketAccess = packet_traits<T>::HasAdd && !NumTraits<T>::IsInteger;
+ static const bool IsStateful = true;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ MeanReducer() : scalarCount_(0), packetCount_(0) { }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) {
+ internal::scalar_sum_op<T> sum_op;
+ *accum = sum_op(*accum, t);
+ scalarCount_++;
+ }
+ template <typename Packet>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) {
+ (*accum) = padd<Packet>(*accum, p);
+ packetCount_++;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const {
+ internal::scalar_cast_op<int, T> conv;
+ return conv(0);
+ }
+ template <typename Packet>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const {
+ return pset1<Packet>(initialize());
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const {
+ return accum / scalarCount_;
+ }
+ template <typename Packet>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const {
+ return pdiv(vaccum, pset1<Packet>(packetCount_));
+ }
+ template <typename Packet>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const {
+ internal::scalar_sum_op<T> sum_op;
+ return sum_op(saccum, predux(vaccum)) / (scalarCount_ + packetCount_ * unpacket_traits<Packet>::size);
+ }
+
+ protected:
+ DenseIndex scalarCount_;
+ DenseIndex packetCount_;
+};
+
+template <typename T, typename Device>
+struct reducer_traits<MeanReducer<T>, Device> {
+ enum {
+ Cost = NumTraits<T>::AddCost,
+ PacketAccess = PacketType<T, Device>::HasAdd
+ };
+};
+
+
+template <typename T, bool IsMax = true, bool IsInteger = true>
+struct MinMaxBottomValue {
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T bottom_value() {
+ return Eigen::NumTraits<T>::lowest();
+ }
+};
+template <typename T>
+struct MinMaxBottomValue<T, true, false> {
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T bottom_value() {
+ return -Eigen::NumTraits<T>::infinity();
+ }
+};
+template <typename T>
+struct MinMaxBottomValue<T, false, true> {
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T bottom_value() {
+ return Eigen::NumTraits<T>::highest();
+ }
+};
+template <typename T>
+struct MinMaxBottomValue<T, false, false> {
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T bottom_value() {
+ return Eigen::NumTraits<T>::infinity();
+ }
+};
+
+
+template <typename T> struct MaxReducer
+{
+ static const bool PacketAccess = packet_traits<T>::HasMax;
+ static const bool IsStateful = false;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const {
+ if (t > *accum) { *accum = t; }
+ }
+ template <typename Packet>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const {
+ (*accum) = pmax<Packet>(*accum, p);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const {
+ return MinMaxBottomValue<T, true, Eigen::NumTraits<T>::IsInteger>::bottom_value();
+ }
+ template <typename Packet>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const {
+ return pset1<Packet>(initialize());
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const {
+ return accum;
+ }
+ template <typename Packet>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const {
+ return vaccum;
+ }
+ template <typename Packet>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const {
+ return numext::maxi(saccum, predux_max(vaccum));
+ }
+};
+
+template <typename T, typename Device>
+struct reducer_traits<MaxReducer<T>, Device> {
+ enum {
+ Cost = NumTraits<T>::AddCost,
+ PacketAccess = PacketType<T, Device>::HasMax
+ };
+};
+
+
+template <typename T> struct MinReducer
+{
+ static const bool PacketAccess = packet_traits<T>::HasMin;
+ static const bool IsStateful = false;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const {
+ if (t < *accum) { *accum = t; }
+ }
+ template <typename Packet>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const {
+ (*accum) = pmin<Packet>(*accum, p);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const {
+ return MinMaxBottomValue<T, false, Eigen::NumTraits<T>::IsInteger>::bottom_value();
+ }
+ template <typename Packet>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const {
+ return pset1<Packet>(initialize());
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const {
+ return accum;
+ }
+ template <typename Packet>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const {
+ return vaccum;
+ }
+ template <typename Packet>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const {
+ return numext::mini(saccum, predux_min(vaccum));
+ }
+};
+
+template <typename T, typename Device>
+struct reducer_traits<MinReducer<T>, Device> {
+ enum {
+ Cost = NumTraits<T>::AddCost,
+ PacketAccess = PacketType<T, Device>::HasMin
+ };
+};
+
+
+template <typename T> struct ProdReducer
+{
+ static const bool PacketAccess = packet_traits<T>::HasMul;
+ static const bool IsStateful = false;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const {
+ internal::scalar_product_op<T> prod_op;
+ (*accum) = prod_op(*accum, t);
+ }
+ template <typename Packet>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const {
+ (*accum) = pmul<Packet>(*accum, p);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const {
+ internal::scalar_cast_op<int, T> conv;
+ return conv(1);
+ }
+ template <typename Packet>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const {
+ return pset1<Packet>(initialize());
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const {
+ return accum;
+ }
+ template <typename Packet>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const {
+ return vaccum;
+ }
+ template <typename Packet>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const {
+ internal::scalar_product_op<T> prod_op;
+ return prod_op(saccum, predux_mul(vaccum));
+ }
+};
+
+template <typename T, typename Device>
+struct reducer_traits<ProdReducer<T>, Device> {
+ enum {
+ Cost = NumTraits<T>::MulCost,
+ PacketAccess = PacketType<T, Device>::HasMul
+ };
+};
+
+
+struct AndReducer
+{
+ static const bool PacketAccess = false;
+ static const bool IsStateful = false;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(bool t, bool* accum) const {
+ *accum = *accum && t;
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool initialize() const {
+ return true;
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool finalize(bool accum) const {
+ return accum;
+ }
+};
+
+template <typename Device>
+struct reducer_traits<AndReducer, Device> {
+ enum {
+ Cost = 1,
+ PacketAccess = false
+ };
+};
+
+
+struct OrReducer {
+ static const bool PacketAccess = false;
+ static const bool IsStateful = false;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(bool t, bool* accum) const {
+ *accum = *accum || t;
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool initialize() const {
+ return false;
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool finalize(bool accum) const {
+ return accum;
+ }
+};
+
+template <typename Device>
+struct reducer_traits<OrReducer, Device> {
+ enum {
+ Cost = 1,
+ PacketAccess = false
+ };
+};
+
+
+// Argmin/Argmax reducers
+template <typename T> struct ArgMaxTupleReducer
+{
+ static const bool PacketAccess = false;
+ static const bool IsStateful = false;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const {
+ if (t.second > accum->second) { *accum = t; }
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const {
+ return T(0, NumTraits<typename T::second_type>::lowest());
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T& accum) const {
+ return accum;
+ }
+};
+
+template <typename T, typename Device>
+struct reducer_traits<ArgMaxTupleReducer<T>, Device> {
+ enum {
+ Cost = NumTraits<T>::AddCost,
+ PacketAccess = false
+ };
+};
+
+
+template <typename T> struct ArgMinTupleReducer
+{
+ static const bool PacketAccess = false;
+ static const bool IsStateful = false;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T& t, T* accum) const {
+ if (t.second < accum->second) { *accum = t; }
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const {
+ return T(0, NumTraits<typename T::second_type>::highest());
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T& accum) const {
+ return accum;
+ }
+};
+
+template <typename T, typename Device>
+struct reducer_traits<ArgMinTupleReducer<T>, Device> {
+ enum {
+ Cost = NumTraits<T>::AddCost,
+ PacketAccess = false
+ };
+};
+
+
+template <typename T, typename Index, size_t NumDims>
+class GaussianGenerator {
+ public:
+ static const bool PacketAccess = false;
+
+ EIGEN_DEVICE_FUNC GaussianGenerator(const array<T, NumDims>& means,
+ const array<T, NumDims>& std_devs)
+ : m_means(means)
+ {
+ for (size_t i = 0; i < NumDims; ++i) {
+ m_two_sigmas[i] = std_devs[i] * std_devs[i] * 2;
+ }
+ }
+
+ EIGEN_DEVICE_FUNC T operator()(const array<Index, NumDims>& coordinates) const {
+ T tmp = T(0);
+ for (size_t i = 0; i < NumDims; ++i) {
+ T offset = coordinates[i] - m_means[i];
+ tmp += offset * offset / m_two_sigmas[i];
+ }
+ return numext::exp(-tmp);
+ }
+
+ private:
+ array<T, NumDims> m_means;
+ array<T, NumDims> m_two_sigmas;
+};
+
+template <typename T, typename Index, size_t NumDims>
+struct functor_traits<GaussianGenerator<T, Index, NumDims> > {
+ enum {
+ Cost = NumDims * (2 * NumTraits<T>::AddCost + NumTraits<T>::MulCost +
+ functor_traits<scalar_quotient_op<T, T> >::Cost) +
+ functor_traits<scalar_exp_op<T> >::Cost,
+ PacketAccess = GaussianGenerator<T, Index, NumDims>::PacketAccess
+ };
+};
+
+} // end namespace internal
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_FUNCTORS_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorGenerator.h b/unsupported/Eigen/CXX11/src/Tensor/TensorGenerator.h
new file mode 100644
index 000000000..eb1d4934e
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorGenerator.h
@@ -0,0 +1,185 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2015 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_GENERATOR_H
+#define EIGEN_CXX11_TENSOR_TENSOR_GENERATOR_H
+
+namespace Eigen {
+
+/** \class TensorGenerator
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Tensor generator class.
+ *
+ *
+ */
+namespace internal {
+template<typename Generator, typename XprType>
+struct traits<TensorGeneratorOp<Generator, XprType> > : public traits<XprType>
+{
+ typedef typename XprType::Scalar Scalar;
+ typedef traits<XprType> XprTraits;
+ typedef typename XprTraits::StorageKind StorageKind;
+ typedef typename XprTraits::Index Index;
+ typedef typename XprType::Nested Nested;
+ typedef typename remove_reference<Nested>::type _Nested;
+ static const int NumDimensions = XprTraits::NumDimensions;
+ static const int Layout = XprTraits::Layout;
+};
+
+template<typename Generator, typename XprType>
+struct eval<TensorGeneratorOp<Generator, XprType>, Eigen::Dense>
+{
+ typedef const TensorGeneratorOp<Generator, XprType>& type;
+};
+
+template<typename Generator, typename XprType>
+struct nested<TensorGeneratorOp<Generator, XprType>, 1, typename eval<TensorGeneratorOp<Generator, XprType> >::type>
+{
+ typedef TensorGeneratorOp<Generator, XprType> type;
+};
+
+} // end namespace internal
+
+
+
+template<typename Generator, typename XprType>
+class TensorGeneratorOp : public TensorBase<TensorGeneratorOp<Generator, XprType>, ReadOnlyAccessors>
+{
+ public:
+ typedef typename Eigen::internal::traits<TensorGeneratorOp>::Scalar Scalar;
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename Eigen::internal::nested<TensorGeneratorOp>::type Nested;
+ typedef typename Eigen::internal::traits<TensorGeneratorOp>::StorageKind StorageKind;
+ typedef typename Eigen::internal::traits<TensorGeneratorOp>::Index Index;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorGeneratorOp(const XprType& expr, const Generator& generator)
+ : m_xpr(expr), m_generator(generator) {}
+
+ EIGEN_DEVICE_FUNC
+ const Generator& generator() const { return m_generator; }
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename XprType::Nested>::type&
+ expression() const { return m_xpr; }
+
+ protected:
+ typename XprType::Nested m_xpr;
+ const Generator m_generator;
+};
+
+
+// Eval as rvalue
+template<typename Generator, typename ArgType, typename Device>
+struct TensorEvaluator<const TensorGeneratorOp<Generator, ArgType>, Device>
+{
+ typedef TensorGeneratorOp<Generator, ArgType> XprType;
+ typedef typename XprType::Index Index;
+ typedef typename TensorEvaluator<ArgType, Device>::Dimensions Dimensions;
+ static const int NumDims = internal::array_size<Dimensions>::value;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ enum {
+ IsAligned = false,
+ PacketAccess = (internal::unpacket_traits<PacketReturnType>::size > 1),
+ BlockAccess = false,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ CoordAccess = false, // to be implemented
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
+ : m_generator(op.generator())
+ {
+ TensorEvaluator<ArgType, Device> impl(op.expression(), device);
+ m_dimensions = impl.dimensions();
+
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ m_strides[0] = 1;
+ for (int i = 1; i < NumDims; ++i) {
+ m_strides[i] = m_strides[i - 1] * m_dimensions[i - 1];
+ }
+ } else {
+ m_strides[NumDims - 1] = 1;
+ for (int i = NumDims - 2; i >= 0; --i) {
+ m_strides[i] = m_strides[i + 1] * m_dimensions[i + 1];
+ }
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) {
+ return true;
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
+ {
+ array<Index, NumDims> coords;
+ extract_coordinates(index, coords);
+ return m_generator(coords);
+ }
+
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
+ {
+ const int packetSize = internal::unpacket_traits<PacketReturnType>::size;
+ EIGEN_STATIC_ASSERT((packetSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
+ eigen_assert(index+packetSize-1 < dimensions().TotalSize());
+
+ EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[packetSize];
+ for (int i = 0; i < packetSize; ++i) {
+ values[i] = coeff(index+i);
+ }
+ PacketReturnType rslt = internal::pload<PacketReturnType>(values);
+ return rslt;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
+ costPerCoeff(bool) const {
+ // TODO(rmlarsen): This is just a placeholder. Define interface to make
+ // generators return their cost.
+ return TensorOpCost(0, 0, TensorOpCost::AddCost<Scalar>() +
+ TensorOpCost::MulCost<Scalar>());
+ }
+
+ EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
+
+ protected:
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ void extract_coordinates(Index index, array<Index, NumDims>& coords) const {
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int i = NumDims - 1; i > 0; --i) {
+ const Index idx = index / m_strides[i];
+ index -= idx * m_strides[i];
+ coords[i] = idx;
+ }
+ coords[0] = index;
+ } else {
+ for (int i = 0; i < NumDims - 1; ++i) {
+ const Index idx = index / m_strides[i];
+ index -= idx * m_strides[i];
+ coords[i] = idx;
+ }
+ coords[NumDims-1] = index;
+ }
+ }
+
+ Dimensions m_dimensions;
+ array<Index, NumDims> m_strides;
+ Generator m_generator;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_GENERATOR_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorGlobalFunctions.h b/unsupported/Eigen/CXX11/src/Tensor/TensorGlobalFunctions.h
new file mode 100644
index 000000000..665b861cf
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorGlobalFunctions.h
@@ -0,0 +1,33 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016 Eugene Brevdo <ebrevdo@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_GLOBAL_FUNCTIONS_H
+#define EIGEN_CXX11_TENSOR_TENSOR_GLOBAL_FUNCTIONS_H
+
+namespace Eigen {
+
+/** \cpp11 \returns an expression of the coefficient-wise betainc(\a x, \a a, \a b) to the given tensors.
+ *
+ * This function computes the regularized incomplete beta function (integral).
+ *
+ */
+template <typename ADerived, typename BDerived, typename XDerived>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const
+ TensorCwiseTernaryOp<internal::scalar_betainc_op<typename XDerived::Scalar>,
+ const ADerived, const BDerived, const XDerived>
+ betainc(const ADerived& a, const BDerived& b, const XDerived& x) {
+ return TensorCwiseTernaryOp<
+ internal::scalar_betainc_op<typename XDerived::Scalar>, const ADerived,
+ const BDerived, const XDerived>(
+ a, b, x, internal::scalar_betainc_op<typename XDerived::Scalar>());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_GLOBAL_FUNCTIONS_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorIO.h b/unsupported/Eigen/CXX11/src/Tensor/TensorIO.h
new file mode 100644
index 000000000..a901c5dd4
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorIO.h
@@ -0,0 +1,79 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_IO_H
+#define EIGEN_CXX11_TENSOR_TENSOR_IO_H
+
+namespace Eigen {
+
+namespace internal {
+
+// Print the tensor as a 2d matrix
+template <typename Tensor, int Rank>
+struct TensorPrinter {
+ static void run (std::ostream& os, const Tensor& tensor) {
+ typedef typename internal::remove_const<typename Tensor::Scalar>::type Scalar;
+ typedef typename Tensor::Index Index;
+ const Index total_size = internal::array_prod(tensor.dimensions());
+ if (total_size > 0) {
+ const Index first_dim = Eigen::internal::array_get<0>(tensor.dimensions());
+ static const int layout = Tensor::Layout;
+ Map<const Array<Scalar, Dynamic, Dynamic, layout> > matrix(const_cast<Scalar*>(tensor.data()), first_dim, total_size/first_dim);
+ os << matrix;
+ }
+ }
+};
+
+
+// Print the tensor as a vector
+template <typename Tensor>
+struct TensorPrinter<Tensor, 1> {
+ static void run (std::ostream& os, const Tensor& tensor) {
+ typedef typename internal::remove_const<typename Tensor::Scalar>::type Scalar;
+ typedef typename Tensor::Index Index;
+ const Index total_size = internal::array_prod(tensor.dimensions());
+ if (total_size > 0) {
+ Map<const Array<Scalar, Dynamic, 1> > array(const_cast<Scalar*>(tensor.data()), total_size);
+ os << array;
+ }
+ }
+};
+
+
+// Print the tensor as a scalar
+template <typename Tensor>
+struct TensorPrinter<Tensor, 0> {
+ static void run (std::ostream& os, const Tensor& tensor) {
+ os << tensor.coeff(0);
+ }
+};
+}
+
+template <typename T>
+std::ostream& operator << (std::ostream& os, const TensorBase<T, ReadOnlyAccessors>& expr) {
+ typedef TensorEvaluator<const TensorForcedEvalOp<const T>, DefaultDevice> Evaluator;
+ typedef typename Evaluator::Dimensions Dimensions;
+
+ // Evaluate the expression if needed
+ TensorForcedEvalOp<const T> eval = expr.eval();
+ Evaluator tensor(eval, DefaultDevice());
+ tensor.evalSubExprsIfNeeded(NULL);
+
+ // Print the result
+ static const int rank = internal::array_size<Dimensions>::value;
+ internal::TensorPrinter<Evaluator, rank>::run(os, tensor);
+
+ // Cleanup.
+ tensor.cleanup();
+ return os;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_IO_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h b/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h
new file mode 100644
index 000000000..566856ed2
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h
@@ -0,0 +1,509 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_IMAGE_PATCH_H
+#define EIGEN_CXX11_TENSOR_TENSOR_IMAGE_PATCH_H
+
+namespace Eigen {
+
+/** \class TensorImagePatch
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Patch extraction specialized for image processing.
+ * This assumes that the input has a least 3 dimensions ordered as follow:
+ * 1st dimension: channels (of size d)
+ * 2nd dimension: rows (of size r)
+ * 3rd dimension: columns (of size c)
+ * There can be additional dimensions such as time (for video) or batch (for
+ * bulk processing after the first 3.
+ * Calling the image patch code with patch_rows and patch_cols is equivalent
+ * to calling the regular patch extraction code with parameters d, patch_rows,
+ * patch_cols, and 1 for all the additional dimensions.
+ */
+namespace internal {
+template<DenseIndex Rows, DenseIndex Cols, typename XprType>
+struct traits<TensorImagePatchOp<Rows, Cols, XprType> > : public traits<XprType>
+{
+ typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar;
+ typedef traits<XprType> XprTraits;
+ typedef typename XprTraits::StorageKind StorageKind;
+ typedef typename XprTraits::Index Index;
+ typedef typename XprType::Nested Nested;
+ typedef typename remove_reference<Nested>::type _Nested;
+ static const int NumDimensions = XprTraits::NumDimensions + 1;
+ static const int Layout = XprTraits::Layout;
+};
+
+template<DenseIndex Rows, DenseIndex Cols, typename XprType>
+struct eval<TensorImagePatchOp<Rows, Cols, XprType>, Eigen::Dense>
+{
+ typedef const TensorImagePatchOp<Rows, Cols, XprType>& type;
+};
+
+template<DenseIndex Rows, DenseIndex Cols, typename XprType>
+struct nested<TensorImagePatchOp<Rows, Cols, XprType>, 1, typename eval<TensorImagePatchOp<Rows, Cols, XprType> >::type>
+{
+ typedef TensorImagePatchOp<Rows, Cols, XprType> type;
+};
+
+} // end namespace internal
+
+template<DenseIndex Rows, DenseIndex Cols, typename XprType>
+class TensorImagePatchOp : public TensorBase<TensorImagePatchOp<Rows, Cols, XprType>, ReadOnlyAccessors>
+{
+ public:
+ typedef typename Eigen::internal::traits<TensorImagePatchOp>::Scalar Scalar;
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename Eigen::internal::nested<TensorImagePatchOp>::type Nested;
+ typedef typename Eigen::internal::traits<TensorImagePatchOp>::StorageKind StorageKind;
+ typedef typename Eigen::internal::traits<TensorImagePatchOp>::Index Index;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorImagePatchOp(const XprType& expr, DenseIndex patch_rows, DenseIndex patch_cols,
+ DenseIndex row_strides, DenseIndex col_strides,
+ DenseIndex in_row_strides, DenseIndex in_col_strides,
+ DenseIndex row_inflate_strides, DenseIndex col_inflate_strides,
+ PaddingType padding_type, Scalar padding_value)
+ : m_xpr(expr), m_patch_rows(patch_rows), m_patch_cols(patch_cols),
+ m_row_strides(row_strides), m_col_strides(col_strides),
+ m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides),
+ m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides),
+ m_padding_explicit(false), m_padding_top(0), m_padding_bottom(0), m_padding_left(0), m_padding_right(0),
+ m_padding_type(padding_type), m_padding_value(padding_value) {}
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorImagePatchOp(const XprType& expr, DenseIndex patch_rows, DenseIndex patch_cols,
+ DenseIndex row_strides, DenseIndex col_strides,
+ DenseIndex in_row_strides, DenseIndex in_col_strides,
+ DenseIndex row_inflate_strides, DenseIndex col_inflate_strides,
+ DenseIndex padding_top, DenseIndex padding_bottom,
+ DenseIndex padding_left, DenseIndex padding_right,
+ Scalar padding_value)
+ : m_xpr(expr), m_patch_rows(patch_rows), m_patch_cols(patch_cols),
+ m_row_strides(row_strides), m_col_strides(col_strides),
+ m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides),
+ m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides),
+ m_padding_explicit(true), m_padding_top(padding_top), m_padding_bottom(padding_bottom),
+ m_padding_left(padding_left), m_padding_right(padding_right),
+ m_padding_type(PADDING_VALID), m_padding_value(padding_value) {}
+
+ EIGEN_DEVICE_FUNC
+ DenseIndex patch_rows() const { return m_patch_rows; }
+ EIGEN_DEVICE_FUNC
+ DenseIndex patch_cols() const { return m_patch_cols; }
+ EIGEN_DEVICE_FUNC
+ DenseIndex row_strides() const { return m_row_strides; }
+ EIGEN_DEVICE_FUNC
+ DenseIndex col_strides() const { return m_col_strides; }
+ EIGEN_DEVICE_FUNC
+ DenseIndex in_row_strides() const { return m_in_row_strides; }
+ EIGEN_DEVICE_FUNC
+ DenseIndex in_col_strides() const { return m_in_col_strides; }
+ EIGEN_DEVICE_FUNC
+ DenseIndex row_inflate_strides() const { return m_row_inflate_strides; }
+ EIGEN_DEVICE_FUNC
+ DenseIndex col_inflate_strides() const { return m_col_inflate_strides; }
+ EIGEN_DEVICE_FUNC
+ bool padding_explicit() const { return m_padding_explicit; }
+ EIGEN_DEVICE_FUNC
+ DenseIndex padding_top() const { return m_padding_top; }
+ EIGEN_DEVICE_FUNC
+ DenseIndex padding_bottom() const { return m_padding_bottom; }
+ EIGEN_DEVICE_FUNC
+ DenseIndex padding_left() const { return m_padding_left; }
+ EIGEN_DEVICE_FUNC
+ DenseIndex padding_right() const { return m_padding_right; }
+ EIGEN_DEVICE_FUNC
+ PaddingType padding_type() const { return m_padding_type; }
+ EIGEN_DEVICE_FUNC
+ Scalar padding_value() const { return m_padding_value; }
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename XprType::Nested>::type&
+ expression() const { return m_xpr; }
+
+ protected:
+ typename XprType::Nested m_xpr;
+ const DenseIndex m_patch_rows;
+ const DenseIndex m_patch_cols;
+ const DenseIndex m_row_strides;
+ const DenseIndex m_col_strides;
+ const DenseIndex m_in_row_strides;
+ const DenseIndex m_in_col_strides;
+ const DenseIndex m_row_inflate_strides;
+ const DenseIndex m_col_inflate_strides;
+ const bool m_padding_explicit;
+ const DenseIndex m_padding_top;
+ const DenseIndex m_padding_bottom;
+ const DenseIndex m_padding_left;
+ const DenseIndex m_padding_right;
+ const PaddingType m_padding_type;
+ const Scalar m_padding_value;
+};
+
+// Eval as rvalue
+template<DenseIndex Rows, DenseIndex Cols, typename ArgType, typename Device>
+struct TensorEvaluator<const TensorImagePatchOp<Rows, Cols, ArgType>, Device>
+{
+ typedef TensorImagePatchOp<Rows, Cols, ArgType> XprType;
+ typedef typename XprType::Index Index;
+ static const int NumInputDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
+ static const int NumDims = NumInputDims + 1;
+ typedef DSizes<Index, NumDims> Dimensions;
+ typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar;
+ typedef TensorEvaluator<const TensorImagePatchOp<Rows, Cols, ArgType>,
+ Device> Self;
+ typedef TensorEvaluator<ArgType, Device> Impl;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+
+ enum {
+ IsAligned = false,
+ PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ CoordAccess = false,
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
+ : m_impl(op.expression(), device)
+ {
+ EIGEN_STATIC_ASSERT((NumDims >= 4), YOU_MADE_A_PROGRAMMING_MISTAKE);
+
+ m_paddingValue = op.padding_value();
+
+ const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
+
+ // Caches a few variables.
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ m_inputDepth = input_dims[0];
+ m_inputRows = input_dims[1];
+ m_inputCols = input_dims[2];
+ } else {
+ m_inputDepth = input_dims[NumInputDims-1];
+ m_inputRows = input_dims[NumInputDims-2];
+ m_inputCols = input_dims[NumInputDims-3];
+ }
+
+ m_row_strides = op.row_strides();
+ m_col_strides = op.col_strides();
+
+ // Input strides and effective input/patch size
+ m_in_row_strides = op.in_row_strides();
+ m_in_col_strides = op.in_col_strides();
+ m_row_inflate_strides = op.row_inflate_strides();
+ m_col_inflate_strides = op.col_inflate_strides();
+ // The "effective" input rows and input cols are the input rows and cols
+ // after inflating them with zeros.
+ // For examples, a 2x3 matrix with row_inflate_strides and
+ // col_inflate_strides of 2 comes from:
+ // A B C
+ // D E F
+ //
+ // to a matrix is 3 x 5:
+ //
+ // A . B . C
+ // . . . . .
+ // D . E . F
+
+ m_input_rows_eff = (m_inputRows - 1) * m_row_inflate_strides + 1;
+ m_input_cols_eff = (m_inputCols - 1) * m_col_inflate_strides + 1;
+ m_patch_rows_eff = op.patch_rows() + (op.patch_rows() - 1) * (m_in_row_strides - 1);
+ m_patch_cols_eff = op.patch_cols() + (op.patch_cols() - 1) * (m_in_col_strides - 1);
+
+ if (op.padding_explicit()) {
+ m_outputRows = numext::ceil((m_input_rows_eff + op.padding_top() + op.padding_bottom() - m_patch_rows_eff + 1.f) / static_cast<float>(m_row_strides));
+ m_outputCols = numext::ceil((m_input_cols_eff + op.padding_left() + op.padding_right() - m_patch_cols_eff + 1.f) / static_cast<float>(m_col_strides));
+ m_rowPaddingTop = op.padding_top();
+ m_colPaddingLeft = op.padding_left();
+ } else {
+ // Computing padding from the type
+ switch (op.padding_type()) {
+ case PADDING_VALID:
+ m_outputRows = numext::ceil((m_input_rows_eff - m_patch_rows_eff + 1.f) / static_cast<float>(m_row_strides));
+ m_outputCols = numext::ceil((m_input_cols_eff - m_patch_cols_eff + 1.f) / static_cast<float>(m_col_strides));
+ // Calculate the padding
+ m_rowPaddingTop = numext::maxi<Index>(0, ((m_outputRows - 1) * m_row_strides + m_patch_rows_eff - m_input_rows_eff) / 2);
+ m_colPaddingLeft = numext::maxi<Index>(0, ((m_outputCols - 1) * m_col_strides + m_patch_cols_eff - m_input_cols_eff) / 2);
+ break;
+ case PADDING_SAME:
+ m_outputRows = numext::ceil(m_input_rows_eff / static_cast<float>(m_row_strides));
+ m_outputCols = numext::ceil(m_input_cols_eff / static_cast<float>(m_col_strides));
+ // Calculate the padding
+ m_rowPaddingTop = ((m_outputRows - 1) * m_row_strides + m_patch_rows_eff - m_input_rows_eff) / 2;
+ m_colPaddingLeft = ((m_outputCols - 1) * m_col_strides + m_patch_cols_eff - m_input_cols_eff) / 2;
+ break;
+ default:
+ eigen_assert(false && "unexpected padding");
+ }
+ }
+ eigen_assert(m_outputRows > 0);
+ eigen_assert(m_outputCols > 0);
+
+ // Dimensions for result of extraction.
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ // ColMajor
+ // 0: depth
+ // 1: patch_rows
+ // 2: patch_cols
+ // 3: number of patches
+ // 4 and beyond: anything else (such as batch).
+ m_dimensions[0] = input_dims[0];
+ m_dimensions[1] = op.patch_rows();
+ m_dimensions[2] = op.patch_cols();
+ m_dimensions[3] = m_outputRows * m_outputCols;
+ for (int i = 4; i < NumDims; ++i) {
+ m_dimensions[i] = input_dims[i-1];
+ }
+ } else {
+ // RowMajor
+ // NumDims-1: depth
+ // NumDims-2: patch_rows
+ // NumDims-3: patch_cols
+ // NumDims-4: number of patches
+ // NumDims-5 and beyond: anything else (such as batch).
+ m_dimensions[NumDims-1] = input_dims[NumInputDims-1];
+ m_dimensions[NumDims-2] = op.patch_rows();
+ m_dimensions[NumDims-3] = op.patch_cols();
+ m_dimensions[NumDims-4] = m_outputRows * m_outputCols;
+ for (int i = NumDims-5; i >= 0; --i) {
+ m_dimensions[i] = input_dims[i];
+ }
+ }
+
+ // Strides for moving the patch in various dimensions.
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ m_colStride = m_dimensions[1];
+ m_patchStride = m_colStride * m_dimensions[2] * m_dimensions[0];
+ m_otherStride = m_patchStride * m_dimensions[3];
+ } else {
+ m_colStride = m_dimensions[NumDims-2];
+ m_patchStride = m_colStride * m_dimensions[NumDims-3] * m_dimensions[NumDims-1];
+ m_otherStride = m_patchStride * m_dimensions[NumDims-4];
+ }
+
+ // Strides for navigating through the input tensor.
+ m_rowInputStride = m_inputDepth;
+ m_colInputStride = m_inputDepth * m_inputRows;
+ m_patchInputStride = m_inputDepth * m_inputRows * m_inputCols;
+
+ // Fast representations of different variables.
+ m_fastOtherStride = internal::TensorIntDivisor<Index>(m_otherStride);
+ m_fastPatchStride = internal::TensorIntDivisor<Index>(m_patchStride);
+ m_fastColStride = internal::TensorIntDivisor<Index>(m_colStride);
+ m_fastInflateRowStride = internal::TensorIntDivisor<Index>(m_row_inflate_strides);
+ m_fastInflateColStride = internal::TensorIntDivisor<Index>(m_col_inflate_strides);
+ m_fastInputColsEff = internal::TensorIntDivisor<Index>(m_input_cols_eff);
+
+ // Number of patches in the width dimension.
+ m_fastOutputRows = internal::TensorIntDivisor<Index>(m_outputRows);
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ m_fastOutputDepth = internal::TensorIntDivisor<Index>(m_dimensions[0]);
+ } else {
+ m_fastOutputDepth = internal::TensorIntDivisor<Index>(m_dimensions[NumDims-1]);
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) {
+ m_impl.evalSubExprsIfNeeded(NULL);
+ return true;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
+ m_impl.cleanup();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
+ {
+ // Patch index corresponding to the passed in index.
+ const Index patchIndex = index / m_fastPatchStride;
+ // Find the offset of the element wrt the location of the first element.
+ const Index patchOffset = (index - patchIndex * m_patchStride) / m_fastOutputDepth;
+
+ // Other ways to index this element.
+ const Index otherIndex = (NumDims == 4) ? 0 : index / m_fastOtherStride;
+ const Index patch2DIndex = (NumDims == 4) ? patchIndex : (index - otherIndex * m_otherStride) / m_fastPatchStride;
+
+ // Calculate col index in the input original tensor.
+ const Index colIndex = patch2DIndex / m_fastOutputRows;
+ const Index colOffset = patchOffset / m_fastColStride;
+ const Index inputCol = colIndex * m_col_strides + colOffset * m_in_col_strides - m_colPaddingLeft;
+ const Index origInputCol = (m_col_inflate_strides == 1) ? inputCol : ((inputCol >= 0) ? (inputCol / m_fastInflateColStride) : 0);
+ if (inputCol < 0 || inputCol >= m_input_cols_eff ||
+ ((m_col_inflate_strides != 1) && (inputCol != origInputCol * m_col_inflate_strides))) {
+ return Scalar(m_paddingValue);
+ }
+
+ // Calculate row index in the original input tensor.
+ const Index rowIndex = patch2DIndex - colIndex * m_outputRows;
+ const Index rowOffset = patchOffset - colOffset * m_colStride;
+ const Index inputRow = rowIndex * m_row_strides + rowOffset * m_in_row_strides - m_rowPaddingTop;
+ const Index origInputRow = (m_row_inflate_strides == 1) ? inputRow : ((inputRow >= 0) ? (inputRow / m_fastInflateRowStride) : 0);
+ if (inputRow < 0 || inputRow >= m_input_rows_eff ||
+ ((m_row_inflate_strides != 1) && (inputRow != origInputRow * m_row_inflate_strides))) {
+ return Scalar(m_paddingValue);
+ }
+
+ const int depth_index = static_cast<int>(Layout) == static_cast<int>(ColMajor) ? 0 : NumDims - 1;
+ const Index depth = index - (index / m_fastOutputDepth) * m_dimensions[depth_index];
+
+ const Index inputIndex = depth + origInputRow * m_rowInputStride + origInputCol * m_colInputStride + otherIndex * m_patchInputStride;
+ return m_impl.coeff(inputIndex);
+ }
+
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
+ {
+ EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
+ eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
+
+ if (m_in_row_strides != 1 || m_in_col_strides != 1 || m_row_inflate_strides != 1 || m_col_inflate_strides != 1) {
+ return packetWithPossibleZero(index);
+ }
+
+ const Index indices[2] = {index, index + PacketSize - 1};
+ const Index patchIndex = indices[0] / m_fastPatchStride;
+ if (patchIndex != indices[1] / m_fastPatchStride) {
+ return packetWithPossibleZero(index);
+ }
+ const Index otherIndex = (NumDims == 4) ? 0 : indices[0] / m_fastOtherStride;
+ eigen_assert(otherIndex == indices[1] / m_fastOtherStride);
+
+ // Find the offset of the element wrt the location of the first element.
+ const Index patchOffsets[2] = {(indices[0] - patchIndex * m_patchStride) / m_fastOutputDepth,
+ (indices[1] - patchIndex * m_patchStride) / m_fastOutputDepth};
+
+ const Index patch2DIndex = (NumDims == 4) ? patchIndex : (indices[0] - otherIndex * m_otherStride) / m_fastPatchStride;
+ eigen_assert(patch2DIndex == (indices[1] - otherIndex * m_otherStride) / m_fastPatchStride);
+
+ const Index colIndex = patch2DIndex / m_fastOutputRows;
+ const Index colOffsets[2] = {patchOffsets[0] / m_fastColStride, patchOffsets[1] / m_fastColStride};
+
+ // Calculate col indices in the original input tensor.
+ const Index inputCols[2] = {colIndex * m_col_strides + colOffsets[0] -
+ m_colPaddingLeft, colIndex * m_col_strides + colOffsets[1] - m_colPaddingLeft};
+ if (inputCols[1] < 0 || inputCols[0] >= m_inputCols) {
+ return internal::pset1<PacketReturnType>(Scalar(m_paddingValue));
+ }
+
+ if (inputCols[0] == inputCols[1]) {
+ const Index rowIndex = patch2DIndex - colIndex * m_outputRows;
+ const Index rowOffsets[2] = {patchOffsets[0] - colOffsets[0]*m_colStride, patchOffsets[1] - colOffsets[1]*m_colStride};
+ eigen_assert(rowOffsets[0] <= rowOffsets[1]);
+ // Calculate col indices in the original input tensor.
+ const Index inputRows[2] = {rowIndex * m_row_strides + rowOffsets[0] -
+ m_rowPaddingTop, rowIndex * m_row_strides + rowOffsets[1] - m_rowPaddingTop};
+
+ if (inputRows[1] < 0 || inputRows[0] >= m_inputRows) {
+ return internal::pset1<PacketReturnType>(Scalar(m_paddingValue));
+ }
+
+ if (inputRows[0] >= 0 && inputRows[1] < m_inputRows) {
+ // no padding
+ const int depth_index = static_cast<int>(Layout) == static_cast<int>(ColMajor) ? 0 : NumDims - 1;
+ const Index depth = index - (index / m_fastOutputDepth) * m_dimensions[depth_index];
+ const Index inputIndex = depth + inputRows[0] * m_rowInputStride + inputCols[0] * m_colInputStride + otherIndex * m_patchInputStride;
+ return m_impl.template packet<Unaligned>(inputIndex);
+ }
+ }
+
+ return packetWithPossibleZero(index);
+ }
+
+ EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
+
+ const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
+
+ Index rowPaddingTop() const { return m_rowPaddingTop; }
+ Index colPaddingLeft() const { return m_colPaddingLeft; }
+ Index outputRows() const { return m_outputRows; }
+ Index outputCols() const { return m_outputCols; }
+ Index userRowStride() const { return m_row_strides; }
+ Index userColStride() const { return m_col_strides; }
+ Index userInRowStride() const { return m_in_row_strides; }
+ Index userInColStride() const { return m_in_col_strides; }
+ Index rowInflateStride() const { return m_row_inflate_strides; }
+ Index colInflateStride() const { return m_col_inflate_strides; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
+ costPerCoeff(bool vectorized) const {
+ // We conservatively estimate the cost for the code path where the computed
+ // index is inside the original image and
+ // TensorEvaluator<ArgType, Device>::CoordAccess is false.
+ const double compute_cost = 3 * TensorOpCost::DivCost<Index>() +
+ 6 * TensorOpCost::MulCost<Index>() +
+ 8 * TensorOpCost::MulCost<Index>();
+ return m_impl.costPerCoeff(vectorized) +
+ TensorOpCost(0, 0, compute_cost, vectorized, PacketSize);
+ }
+
+ protected:
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetWithPossibleZero(Index index) const
+ {
+ EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
+ for (int i = 0; i < PacketSize; ++i) {
+ values[i] = coeff(index+i);
+ }
+ PacketReturnType rslt = internal::pload<PacketReturnType>(values);
+ return rslt;
+ }
+
+ Dimensions m_dimensions;
+
+ Index m_otherStride;
+ Index m_patchStride;
+ Index m_colStride;
+ Index m_row_strides;
+ Index m_col_strides;
+
+ Index m_in_row_strides;
+ Index m_in_col_strides;
+ Index m_row_inflate_strides;
+ Index m_col_inflate_strides;
+
+ Index m_input_rows_eff;
+ Index m_input_cols_eff;
+ Index m_patch_rows_eff;
+ Index m_patch_cols_eff;
+
+ internal::TensorIntDivisor<Index> m_fastOtherStride;
+ internal::TensorIntDivisor<Index> m_fastPatchStride;
+ internal::TensorIntDivisor<Index> m_fastColStride;
+ internal::TensorIntDivisor<Index> m_fastInflateRowStride;
+ internal::TensorIntDivisor<Index> m_fastInflateColStride;
+ internal::TensorIntDivisor<Index> m_fastInputColsEff;
+
+ Index m_rowInputStride;
+ Index m_colInputStride;
+ Index m_patchInputStride;
+
+ Index m_inputDepth;
+ Index m_inputRows;
+ Index m_inputCols;
+
+ Index m_outputRows;
+ Index m_outputCols;
+
+ Index m_rowPaddingTop;
+ Index m_colPaddingLeft;
+
+ internal::TensorIntDivisor<Index> m_fastOutputRows;
+ internal::TensorIntDivisor<Index> m_fastOutputDepth;
+
+ Scalar m_paddingValue;
+
+ TensorEvaluator<ArgType, Device> m_impl;
+};
+
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_IMAGE_PATCH_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h b/unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h
new file mode 100644
index 000000000..3209fecd3
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h
@@ -0,0 +1,725 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_INDEX_LIST_H
+#define EIGEN_CXX11_TENSOR_TENSOR_INDEX_LIST_H
+
+
+#if EIGEN_HAS_CONSTEXPR && EIGEN_HAS_VARIADIC_TEMPLATES
+
+#define EIGEN_HAS_INDEX_LIST
+
+namespace Eigen {
+
+/** \internal
+ *
+ * \class TensorIndexList
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Set of classes used to encode a set of Tensor dimensions/indices.
+ *
+ * The indices in the list can be known at compile time or at runtime. A mix
+ * of static and dynamic indices can also be provided if needed. The tensor
+ * code will attempt to take advantage of the indices that are known at
+ * compile time to optimize the code it generates.
+ *
+ * This functionality requires a c++11 compliant compiler. If your compiler
+ * is older you need to use arrays of indices instead.
+ *
+ * Several examples are provided in the cxx11_tensor_index_list.cpp file.
+ *
+ * \sa Tensor
+ */
+
+template <DenseIndex n>
+struct type2index {
+ static const DenseIndex value = n;
+ EIGEN_DEVICE_FUNC constexpr operator DenseIndex() const { return n; }
+ EIGEN_DEVICE_FUNC void set(DenseIndex val) {
+ eigen_assert(val == n);
+ }
+};
+
+// This can be used with IndexPairList to get compile-time constant pairs,
+// such as IndexPairList<type2indexpair<1,2>, type2indexpair<3,4>>().
+template <DenseIndex f, DenseIndex s>
+struct type2indexpair {
+ static const DenseIndex first = f;
+ static const DenseIndex second = s;
+
+ constexpr EIGEN_DEVICE_FUNC operator IndexPair<DenseIndex>() const {
+ return IndexPair<DenseIndex>(f, s);
+ }
+
+ EIGEN_DEVICE_FUNC void set(const IndexPair<DenseIndex>& val) {
+ eigen_assert(val.first == f);
+ eigen_assert(val.second == s);
+ }
+};
+
+
+template<DenseIndex n> struct NumTraits<type2index<n> >
+{
+ typedef DenseIndex Real;
+ enum {
+ IsComplex = 0,
+ RequireInitialization = false,
+ ReadCost = 1,
+ AddCost = 1,
+ MulCost = 1
+ };
+
+ EIGEN_DEVICE_FUNC static inline Real epsilon() { return 0; }
+ EIGEN_DEVICE_FUNC static inline Real dummy_precision() { return 0; }
+ EIGEN_DEVICE_FUNC static inline Real highest() { return n; }
+ EIGEN_DEVICE_FUNC static inline Real lowest() { return n; }
+};
+
+namespace internal {
+template <typename T>
+EIGEN_DEVICE_FUNC void update_value(T& val, DenseIndex new_val) {
+ val = new_val;
+}
+template <DenseIndex n>
+EIGEN_DEVICE_FUNC void update_value(type2index<n>& val, DenseIndex new_val) {
+ val.set(new_val);
+}
+
+template <typename T>
+EIGEN_DEVICE_FUNC void update_value(T& val, IndexPair<DenseIndex> new_val) {
+ val = new_val;
+}
+template <DenseIndex f, DenseIndex s>
+EIGEN_DEVICE_FUNC void update_value(type2indexpair<f, s>& val, IndexPair<DenseIndex> new_val) {
+ val.set(new_val);
+}
+
+
+template <typename T>
+struct is_compile_time_constant {
+ static constexpr bool value = false;
+};
+
+template <DenseIndex idx>
+struct is_compile_time_constant<type2index<idx> > {
+ static constexpr bool value = true;
+};
+template <DenseIndex idx>
+struct is_compile_time_constant<const type2index<idx> > {
+ static constexpr bool value = true;
+};
+template <DenseIndex idx>
+struct is_compile_time_constant<type2index<idx>& > {
+ static constexpr bool value = true;
+};
+template <DenseIndex idx>
+struct is_compile_time_constant<const type2index<idx>& > {
+ static constexpr bool value = true;
+};
+
+template <DenseIndex f, DenseIndex s>
+struct is_compile_time_constant<type2indexpair<f, s> > {
+ static constexpr bool value = true;
+};
+template <DenseIndex f, DenseIndex s>
+struct is_compile_time_constant<const type2indexpair<f, s> > {
+ static constexpr bool value = true;
+};
+template <DenseIndex f, DenseIndex s>
+struct is_compile_time_constant<type2indexpair<f, s>& > {
+ static constexpr bool value = true;
+};
+template <DenseIndex f, DenseIndex s>
+struct is_compile_time_constant<const type2indexpair<f, s>& > {
+ static constexpr bool value = true;
+};
+
+
+template<typename... T>
+struct IndexTuple;
+
+template<typename T, typename... O>
+struct IndexTuple<T, O...> {
+ EIGEN_DEVICE_FUNC constexpr IndexTuple() : head(), others() { }
+ EIGEN_DEVICE_FUNC constexpr IndexTuple(const T& v, const O... o) : head(v), others(o...) { }
+
+ constexpr static int count = 1 + sizeof...(O);
+ T head;
+ IndexTuple<O...> others;
+ typedef T Head;
+ typedef IndexTuple<O...> Other;
+};
+
+template<typename T>
+ struct IndexTuple<T> {
+ EIGEN_DEVICE_FUNC constexpr IndexTuple() : head() { }
+ EIGEN_DEVICE_FUNC constexpr IndexTuple(const T& v) : head(v) { }
+
+ constexpr static int count = 1;
+ T head;
+ typedef T Head;
+};
+
+
+template<int N, typename... T>
+struct IndexTupleExtractor;
+
+template<int N, typename T, typename... O>
+struct IndexTupleExtractor<N, T, O...> {
+
+ typedef typename IndexTupleExtractor<N-1, O...>::ValType ValType;
+
+ EIGEN_DEVICE_FUNC static constexpr ValType& get_val(IndexTuple<T, O...>& val) {
+ return IndexTupleExtractor<N-1, O...>::get_val(val.others);
+ }
+
+ EIGEN_DEVICE_FUNC static constexpr const ValType& get_val(const IndexTuple<T, O...>& val) {
+ return IndexTupleExtractor<N-1, O...>::get_val(val.others);
+ }
+ template <typename V>
+ EIGEN_DEVICE_FUNC static void set_val(IndexTuple<T, O...>& val, V& new_val) {
+ IndexTupleExtractor<N-1, O...>::set_val(val.others, new_val);
+ }
+
+};
+
+template<typename T, typename... O>
+ struct IndexTupleExtractor<0, T, O...> {
+
+ typedef T ValType;
+
+ EIGEN_DEVICE_FUNC static constexpr ValType& get_val(IndexTuple<T, O...>& val) {
+ return val.head;
+ }
+ EIGEN_DEVICE_FUNC static constexpr const ValType& get_val(const IndexTuple<T, O...>& val) {
+ return val.head;
+ }
+ template <typename V>
+ EIGEN_DEVICE_FUNC static void set_val(IndexTuple<T, O...>& val, V& new_val) {
+ val.head = new_val;
+ }
+};
+
+
+
+template <int N, typename T, typename... O>
+EIGEN_DEVICE_FUNC constexpr typename IndexTupleExtractor<N, T, O...>::ValType& array_get(IndexTuple<T, O...>& tuple) {
+ return IndexTupleExtractor<N, T, O...>::get_val(tuple);
+}
+template <int N, typename T, typename... O>
+EIGEN_DEVICE_FUNC constexpr const typename IndexTupleExtractor<N, T, O...>::ValType& array_get(const IndexTuple<T, O...>& tuple) {
+ return IndexTupleExtractor<N, T, O...>::get_val(tuple);
+}
+template <typename T, typename... O>
+ struct array_size<IndexTuple<T, O...> > {
+ static const size_t value = IndexTuple<T, O...>::count;
+};
+template <typename T, typename... O>
+ struct array_size<const IndexTuple<T, O...> > {
+ static const size_t value = IndexTuple<T, O...>::count;
+};
+
+
+
+
+template <DenseIndex Idx, typename ValueT>
+struct tuple_coeff {
+ template <typename... T>
+ EIGEN_DEVICE_FUNC static constexpr ValueT get(const DenseIndex i, const IndexTuple<T...>& t) {
+ // return array_get<Idx>(t) * (i == Idx) + tuple_coeff<Idx-1>::get(i, t) * (i != Idx);
+ return (i == Idx ? array_get<Idx>(t) : tuple_coeff<Idx-1, ValueT>::get(i, t));
+ }
+ template <typename... T>
+ EIGEN_DEVICE_FUNC static void set(const DenseIndex i, IndexTuple<T...>& t, const ValueT& value) {
+ if (i == Idx) {
+ update_value(array_get<Idx>(t), value);
+ } else {
+ tuple_coeff<Idx-1, ValueT>::set(i, t, value);
+ }
+ }
+
+ template <typename... T>
+ EIGEN_DEVICE_FUNC static constexpr bool value_known_statically(const DenseIndex i, const IndexTuple<T...>& t) {
+ return ((i == Idx) & is_compile_time_constant<typename IndexTupleExtractor<Idx, T...>::ValType>::value) ||
+ tuple_coeff<Idx-1, ValueT>::value_known_statically(i, t);
+ }
+
+ template <typename... T>
+ EIGEN_DEVICE_FUNC static constexpr bool values_up_to_known_statically(const IndexTuple<T...>& t) {
+ return is_compile_time_constant<typename IndexTupleExtractor<Idx, T...>::ValType>::value &&
+ tuple_coeff<Idx-1, ValueT>::values_up_to_known_statically(t);
+ }
+
+ template <typename... T>
+ EIGEN_DEVICE_FUNC static constexpr bool values_up_to_statically_known_to_increase(const IndexTuple<T...>& t) {
+ return is_compile_time_constant<typename IndexTupleExtractor<Idx, T...>::ValType>::value &&
+ is_compile_time_constant<typename IndexTupleExtractor<Idx, T...>::ValType>::value &&
+ array_get<Idx>(t) > array_get<Idx-1>(t) &&
+ tuple_coeff<Idx-1, ValueT>::values_up_to_statically_known_to_increase(t);
+ }
+};
+
+template <typename ValueT>
+struct tuple_coeff<0, ValueT> {
+ template <typename... T>
+ EIGEN_DEVICE_FUNC static constexpr ValueT get(const DenseIndex /*i*/, const IndexTuple<T...>& t) {
+ // eigen_assert (i == 0); // gcc fails to compile assertions in constexpr
+ return array_get<0>(t)/* * (i == 0)*/;
+ }
+ template <typename... T>
+ EIGEN_DEVICE_FUNC static void set(const DenseIndex i, IndexTuple<T...>& t, const ValueT value) {
+ eigen_assert (i == 0);
+ update_value(array_get<0>(t), value);
+ }
+ template <typename... T>
+ EIGEN_DEVICE_FUNC static constexpr bool value_known_statically(const DenseIndex i, const IndexTuple<T...>&) {
+ return is_compile_time_constant<typename IndexTupleExtractor<0, T...>::ValType>::value & (i == 0);
+ }
+
+ template <typename... T>
+ EIGEN_DEVICE_FUNC static constexpr bool values_up_to_known_statically(const IndexTuple<T...>&) {
+ return is_compile_time_constant<typename IndexTupleExtractor<0, T...>::ValType>::value;
+ }
+
+ template <typename... T>
+ EIGEN_DEVICE_FUNC static constexpr bool values_up_to_statically_known_to_increase(const IndexTuple<T...>&) {
+ return true;
+ }
+};
+} // namespace internal
+
+
+
+template<typename FirstType, typename... OtherTypes>
+struct IndexList : internal::IndexTuple<FirstType, OtherTypes...> {
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC constexpr DenseIndex operator[] (const DenseIndex i) const {
+ return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...> >::value-1, DenseIndex>::get(i, *this);
+ }
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC constexpr DenseIndex get(const DenseIndex i) const {
+ return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...> >::value-1, DenseIndex>::get(i, *this);
+ }
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC void set(const DenseIndex i, const DenseIndex value) {
+ return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...> >::value-1, DenseIndex>::set(i, *this, value);
+ }
+
+ EIGEN_DEVICE_FUNC constexpr IndexList(const internal::IndexTuple<FirstType, OtherTypes...>& other) : internal::IndexTuple<FirstType, OtherTypes...>(other) { }
+ EIGEN_DEVICE_FUNC constexpr IndexList(FirstType& first, OtherTypes... other) : internal::IndexTuple<FirstType, OtherTypes...>(first, other...) { }
+ EIGEN_DEVICE_FUNC constexpr IndexList() : internal::IndexTuple<FirstType, OtherTypes...>() { }
+
+ EIGEN_DEVICE_FUNC constexpr bool value_known_statically(const DenseIndex i) const {
+ return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...> >::value-1, DenseIndex>::value_known_statically(i, *this);
+ }
+ EIGEN_DEVICE_FUNC constexpr bool all_values_known_statically() const {
+ return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...> >::value-1, DenseIndex>::values_up_to_known_statically(*this);
+ }
+
+ EIGEN_DEVICE_FUNC constexpr bool values_statically_known_to_increase() const {
+ return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...> >::value-1, DenseIndex>::values_up_to_statically_known_to_increase(*this);
+ }
+};
+
+
+template<typename FirstType, typename... OtherTypes>
+constexpr IndexList<FirstType, OtherTypes...> make_index_list(FirstType val1, OtherTypes... other_vals) {
+ return IndexList<FirstType, OtherTypes...>(val1, other_vals...);
+}
+
+
+template<typename FirstType, typename... OtherTypes>
+struct IndexPairList : internal::IndexTuple<FirstType, OtherTypes...> {
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC constexpr IndexPair<DenseIndex> operator[] (const DenseIndex i) const {
+ return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...> >::value-1, IndexPair<DenseIndex>>::get(i, *this);
+ }
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC void set(const DenseIndex i, const IndexPair<DenseIndex> value) {
+ return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...>>::value-1, IndexPair<DenseIndex> >::set(i, *this, value);
+ }
+
+ EIGEN_DEVICE_FUNC constexpr IndexPairList(const internal::IndexTuple<FirstType, OtherTypes...>& other) : internal::IndexTuple<FirstType, OtherTypes...>(other) { }
+ EIGEN_DEVICE_FUNC constexpr IndexPairList() : internal::IndexTuple<FirstType, OtherTypes...>() { }
+
+ EIGEN_DEVICE_FUNC constexpr bool value_known_statically(const DenseIndex i) const {
+ return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...> >::value-1, DenseIndex>::value_known_statically(i, *this);
+ }
+};
+
+namespace internal {
+
+template<typename FirstType, typename... OtherTypes> size_t array_prod(const IndexList<FirstType, OtherTypes...>& sizes) {
+ size_t result = 1;
+ for (int i = 0; i < array_size<IndexList<FirstType, OtherTypes...> >::value; ++i) {
+ result *= sizes[i];
+ }
+ return result;
+}
+
+template<typename FirstType, typename... OtherTypes> struct array_size<IndexList<FirstType, OtherTypes...> > {
+ static const size_t value = array_size<IndexTuple<FirstType, OtherTypes...> >::value;
+};
+template<typename FirstType, typename... OtherTypes> struct array_size<const IndexList<FirstType, OtherTypes...> > {
+ static const size_t value = array_size<IndexTuple<FirstType, OtherTypes...> >::value;
+};
+
+template<typename FirstType, typename... OtherTypes> struct array_size<IndexPairList<FirstType, OtherTypes...> > {
+ static const size_t value = std::tuple_size<std::tuple<FirstType, OtherTypes...> >::value;
+};
+template<typename FirstType, typename... OtherTypes> struct array_size<const IndexPairList<FirstType, OtherTypes...> > {
+ static const size_t value = std::tuple_size<std::tuple<FirstType, OtherTypes...> >::value;
+};
+
+template<DenseIndex N, typename FirstType, typename... OtherTypes> EIGEN_DEVICE_FUNC constexpr DenseIndex array_get(IndexList<FirstType, OtherTypes...>& a) {
+ return IndexTupleExtractor<N, FirstType, OtherTypes...>::get_val(a);
+}
+template<DenseIndex N, typename FirstType, typename... OtherTypes> EIGEN_DEVICE_FUNC constexpr DenseIndex array_get(const IndexList<FirstType, OtherTypes...>& a) {
+ return IndexTupleExtractor<N, FirstType, OtherTypes...>::get_val(a);
+}
+
+template <typename T>
+struct index_known_statically_impl {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex) {
+ return false;
+ }
+};
+
+template <typename FirstType, typename... OtherTypes>
+struct index_known_statically_impl<IndexList<FirstType, OtherTypes...> > {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i) {
+ return IndexList<FirstType, OtherTypes...>().value_known_statically(i);
+ }
+};
+
+template <typename FirstType, typename... OtherTypes>
+struct index_known_statically_impl<const IndexList<FirstType, OtherTypes...> > {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i) {
+ return IndexList<FirstType, OtherTypes...>().value_known_statically(i);
+ }
+};
+
+
+template <typename T>
+struct all_indices_known_statically_impl {
+ static constexpr bool run() {
+ return false;
+ }
+};
+
+template <typename FirstType, typename... OtherTypes>
+struct all_indices_known_statically_impl<IndexList<FirstType, OtherTypes...> > {
+ EIGEN_DEVICE_FUNC static constexpr bool run() {
+ return IndexList<FirstType, OtherTypes...>().all_values_known_statically();
+ }
+};
+
+template <typename FirstType, typename... OtherTypes>
+struct all_indices_known_statically_impl<const IndexList<FirstType, OtherTypes...> > {
+ EIGEN_DEVICE_FUNC static constexpr bool run() {
+ return IndexList<FirstType, OtherTypes...>().all_values_known_statically();
+ }
+};
+
+
+template <typename T>
+struct indices_statically_known_to_increase_impl {
+ EIGEN_DEVICE_FUNC static constexpr bool run() {
+ return false;
+ }
+};
+
+template <typename FirstType, typename... OtherTypes>
+ struct indices_statically_known_to_increase_impl<IndexList<FirstType, OtherTypes...> > {
+ EIGEN_DEVICE_FUNC static constexpr bool run() {
+ return Eigen::IndexList<FirstType, OtherTypes...>().values_statically_known_to_increase();
+ }
+};
+
+template <typename FirstType, typename... OtherTypes>
+ struct indices_statically_known_to_increase_impl<const IndexList<FirstType, OtherTypes...> > {
+ EIGEN_DEVICE_FUNC static constexpr bool run() {
+ return Eigen::IndexList<FirstType, OtherTypes...>().values_statically_known_to_increase();
+ }
+};
+
+
+template <typename Tx>
+struct index_statically_eq_impl {
+ EIGEN_DEVICE_FUNC static constexpr bool run(DenseIndex, DenseIndex) {
+ return false;
+ }
+};
+
+template <typename FirstType, typename... OtherTypes>
+struct index_statically_eq_impl<IndexList<FirstType, OtherTypes...> > {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
+ return IndexList<FirstType, OtherTypes...>().value_known_statically(i) &
+ (IndexList<FirstType, OtherTypes...>().get(i) == value);
+ }
+};
+
+template <typename FirstType, typename... OtherTypes>
+struct index_statically_eq_impl<const IndexList<FirstType, OtherTypes...> > {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
+ return IndexList<FirstType, OtherTypes...>().value_known_statically(i) &
+ (IndexList<FirstType, OtherTypes...>().get(i) == value);
+ }
+};
+
+
+template <typename T>
+struct index_statically_ne_impl {
+ EIGEN_DEVICE_FUNC static constexpr bool run(DenseIndex, DenseIndex) {
+ return false;
+ }
+};
+
+template <typename FirstType, typename... OtherTypes>
+struct index_statically_ne_impl<IndexList<FirstType, OtherTypes...> > {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
+ return IndexList<FirstType, OtherTypes...>().value_known_statically(i) &
+ (IndexList<FirstType, OtherTypes...>().get(i) != value);
+ }
+};
+
+template <typename FirstType, typename... OtherTypes>
+struct index_statically_ne_impl<const IndexList<FirstType, OtherTypes...> > {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
+ return IndexList<FirstType, OtherTypes...>().value_known_statically(i) &
+ (IndexList<FirstType, OtherTypes...>().get(i) != value);
+ }
+};
+
+
+template <typename T>
+struct index_statically_gt_impl {
+ EIGEN_DEVICE_FUNC static constexpr bool run(DenseIndex, DenseIndex) {
+ return false;
+ }
+};
+
+template <typename FirstType, typename... OtherTypes>
+struct index_statically_gt_impl<IndexList<FirstType, OtherTypes...> > {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
+ return IndexList<FirstType, OtherTypes...>().value_known_statically(i) &
+ (IndexList<FirstType, OtherTypes...>().get(i) > value);
+ }
+};
+
+template <typename FirstType, typename... OtherTypes>
+struct index_statically_gt_impl<const IndexList<FirstType, OtherTypes...> > {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
+ return IndexList<FirstType, OtherTypes...>().value_known_statically(i) &
+ (IndexList<FirstType, OtherTypes...>().get(i) > value);
+ }
+};
+
+
+
+template <typename T>
+struct index_statically_lt_impl {
+ EIGEN_DEVICE_FUNC static constexpr bool run(DenseIndex, DenseIndex) {
+ return false;
+ }
+};
+
+template <typename FirstType, typename... OtherTypes>
+struct index_statically_lt_impl<IndexList<FirstType, OtherTypes...> > {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
+ return IndexList<FirstType, OtherTypes...>().value_known_statically(i) &
+ (IndexList<FirstType, OtherTypes...>().get(i) < value);
+ }
+};
+
+template <typename FirstType, typename... OtherTypes>
+struct index_statically_lt_impl<const IndexList<FirstType, OtherTypes...> > {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
+ return IndexList<FirstType, OtherTypes...>().value_known_statically(i) &
+ (IndexList<FirstType, OtherTypes...>().get(i) < value);
+ }
+};
+
+
+
+template <typename Tx>
+struct index_pair_first_statically_eq_impl {
+ EIGEN_DEVICE_FUNC static constexpr bool run(DenseIndex, DenseIndex) {
+ return false;
+ }
+};
+
+template <typename FirstType, typename... OtherTypes>
+struct index_pair_first_statically_eq_impl<IndexPairList<FirstType, OtherTypes...> > {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
+ return IndexPairList<FirstType, OtherTypes...>().value_known_statically(i) &
+ (IndexPairList<FirstType, OtherTypes...>().operator[](i).first == value);
+ }
+};
+
+template <typename FirstType, typename... OtherTypes>
+struct index_pair_first_statically_eq_impl<const IndexPairList<FirstType, OtherTypes...> > {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
+ return IndexPairList<FirstType, OtherTypes...>().value_known_statically(i) &
+ (IndexPairList<FirstType, OtherTypes...>().operator[](i).first == value);
+ }
+};
+
+
+
+template <typename Tx>
+struct index_pair_second_statically_eq_impl {
+ EIGEN_DEVICE_FUNC static constexpr bool run(DenseIndex, DenseIndex) {
+ return false;
+ }
+};
+
+template <typename FirstType, typename... OtherTypes>
+struct index_pair_second_statically_eq_impl<IndexPairList<FirstType, OtherTypes...> > {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
+ return IndexPairList<FirstType, OtherTypes...>().value_known_statically(i) &
+ (IndexPairList<FirstType, OtherTypes...>().operator[](i).second == value);
+ }
+};
+
+template <typename FirstType, typename... OtherTypes>
+struct index_pair_second_statically_eq_impl<const IndexPairList<FirstType, OtherTypes...> > {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
+ return IndexPairList<FirstType, OtherTypes...>().value_known_statically(i) &
+ (IndexPairList<FirstType, OtherTypes...>().operator[](i).second == value);
+ }
+};
+
+
+} // end namespace internal
+} // end namespace Eigen
+
+#else
+
+namespace Eigen {
+namespace internal {
+
+template <typename T>
+struct index_known_statically_impl {
+ static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex) {
+ return false;
+ }
+};
+
+template <typename T>
+struct all_indices_known_statically_impl {
+ static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run() {
+ return false;
+ }
+};
+
+template <typename T>
+struct indices_statically_known_to_increase_impl {
+ static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run() {
+ return false;
+ }
+};
+
+template <typename T>
+struct index_statically_eq_impl {
+ static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(DenseIndex, DenseIndex) {
+ return false;
+ }
+};
+
+template <typename T>
+struct index_statically_ne_impl {
+ static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(DenseIndex, DenseIndex) {
+ return false;
+ }
+};
+
+template <typename T>
+struct index_statically_gt_impl {
+ static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(DenseIndex, DenseIndex) {
+ return false;
+ }
+};
+
+template <typename T>
+struct index_statically_lt_impl {
+ static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(DenseIndex, DenseIndex) {
+ return false;
+ }
+};
+
+template <typename Tx>
+struct index_pair_first_statically_eq_impl {
+ static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(DenseIndex, DenseIndex) {
+ return false;
+ }
+};
+
+template <typename Tx>
+struct index_pair_second_statically_eq_impl {
+ static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(DenseIndex, DenseIndex) {
+ return false;
+ }
+};
+
+
+
+} // end namespace internal
+} // end namespace Eigen
+
+#endif
+
+
+namespace Eigen {
+namespace internal {
+template <typename T>
+static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_known_statically(DenseIndex i) {
+ return index_known_statically_impl<T>::run(i);
+}
+
+template <typename T>
+static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool all_indices_known_statically() {
+ return all_indices_known_statically_impl<T>::run();
+}
+
+template <typename T>
+static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool indices_statically_known_to_increase() {
+ return indices_statically_known_to_increase_impl<T>::run();
+}
+
+template <typename T>
+static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_statically_eq(DenseIndex i, DenseIndex value) {
+ return index_statically_eq_impl<T>::run(i, value);
+}
+
+template <typename T>
+static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_statically_ne(DenseIndex i, DenseIndex value) {
+ return index_statically_ne_impl<T>::run(i, value);
+}
+
+template <typename T>
+static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_statically_gt(DenseIndex i, DenseIndex value) {
+ return index_statically_gt_impl<T>::run(i, value);
+}
+
+template <typename T>
+static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_statically_lt(DenseIndex i, DenseIndex value) {
+ return index_statically_lt_impl<T>::run(i, value);
+}
+
+template <typename T>
+static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_pair_first_statically_eq(DenseIndex i, DenseIndex value) {
+ return index_pair_first_statically_eq_impl<T>::run(i, value);
+}
+
+template <typename T>
+static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_pair_second_statically_eq(DenseIndex i, DenseIndex value) {
+ return index_pair_second_statically_eq_impl<T>::run(i, value);
+}
+
+} // end namespace internal
+} // end namespace Eigen
+
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_INDEX_LIST_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorInflation.h b/unsupported/Eigen/CXX11/src/Tensor/TensorInflation.h
new file mode 100644
index 000000000..f391fb9ee
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorInflation.h
@@ -0,0 +1,229 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2015 Ke Yang <yangke@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_INFLATION_H
+#define EIGEN_CXX11_TENSOR_TENSOR_INFLATION_H
+
+namespace Eigen {
+
+/** \class TensorInflation
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Tensor inflation class.
+ *
+ *
+ */
+namespace internal {
+template<typename Strides, typename XprType>
+struct traits<TensorInflationOp<Strides, XprType> > : public traits<XprType>
+{
+ typedef typename XprType::Scalar Scalar;
+ typedef traits<XprType> XprTraits;
+ typedef typename XprTraits::StorageKind StorageKind;
+ typedef typename XprTraits::Index Index;
+ typedef typename XprType::Nested Nested;
+ typedef typename remove_reference<Nested>::type _Nested;
+ static const int NumDimensions = XprTraits::NumDimensions;
+ static const int Layout = XprTraits::Layout;
+};
+
+template<typename Strides, typename XprType>
+struct eval<TensorInflationOp<Strides, XprType>, Eigen::Dense>
+{
+ typedef const TensorInflationOp<Strides, XprType>& type;
+};
+
+template<typename Strides, typename XprType>
+struct nested<TensorInflationOp<Strides, XprType>, 1, typename eval<TensorInflationOp<Strides, XprType> >::type>
+{
+ typedef TensorInflationOp<Strides, XprType> type;
+};
+
+} // end namespace internal
+
+template<typename Strides, typename XprType>
+class TensorInflationOp : public TensorBase<TensorInflationOp<Strides, XprType>, ReadOnlyAccessors>
+{
+ public:
+ typedef typename Eigen::internal::traits<TensorInflationOp>::Scalar Scalar;
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename Eigen::internal::nested<TensorInflationOp>::type Nested;
+ typedef typename Eigen::internal::traits<TensorInflationOp>::StorageKind StorageKind;
+ typedef typename Eigen::internal::traits<TensorInflationOp>::Index Index;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorInflationOp(const XprType& expr, const Strides& strides)
+ : m_xpr(expr), m_strides(strides) {}
+
+ EIGEN_DEVICE_FUNC
+ const Strides& strides() const { return m_strides; }
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename XprType::Nested>::type&
+ expression() const { return m_xpr; }
+
+ protected:
+ typename XprType::Nested m_xpr;
+ const Strides m_strides;
+};
+
+// Eval as rvalue
+template<typename Strides, typename ArgType, typename Device>
+struct TensorEvaluator<const TensorInflationOp<Strides, ArgType>, Device>
+{
+ typedef TensorInflationOp<Strides, ArgType> XprType;
+ typedef typename XprType::Index Index;
+ static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
+ typedef DSizes<Index, NumDims> Dimensions;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+
+ enum {
+ IsAligned = /*TensorEvaluator<ArgType, Device>::IsAligned*/ false,
+ PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ BlockAccess = false,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ CoordAccess = false, // to be implemented
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
+ : m_impl(op.expression(), device), m_strides(op.strides())
+ {
+ m_dimensions = m_impl.dimensions();
+ // Expand each dimension to the inflated dimension.
+ for (int i = 0; i < NumDims; ++i) {
+ m_dimensions[i] = (m_dimensions[i] - 1) * op.strides()[i] + 1;
+ }
+
+ // Remember the strides for fast division.
+ for (int i = 0; i < NumDims; ++i) {
+ m_fastStrides[i] = internal::TensorIntDivisor<Index>(m_strides[i]);
+ }
+
+ const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ m_outputStrides[0] = 1;
+ m_inputStrides[0] = 1;
+ for (int i = 1; i < NumDims; ++i) {
+ m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1];
+ m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1];
+ }
+ } else { // RowMajor
+ m_outputStrides[NumDims-1] = 1;
+ m_inputStrides[NumDims-1] = 1;
+ for (int i = NumDims - 2; i >= 0; --i) {
+ m_outputStrides[i] = m_outputStrides[i+1] * m_dimensions[i+1];
+ m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1];
+ }
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) {
+ m_impl.evalSubExprsIfNeeded(NULL);
+ return true;
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
+ m_impl.cleanup();
+ }
+
+ // Computes the input index given the output index. Returns true if the output
+ // index doesn't fall into a hole.
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool getInputIndex(Index index, Index* inputIndex) const
+ {
+ eigen_assert(index < dimensions().TotalSize());
+ *inputIndex = 0;
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int i = NumDims - 1; i > 0; --i) {
+ const Index idx = index / m_outputStrides[i];
+ if (idx != idx / m_fastStrides[i] * m_strides[i]) {
+ return false;
+ }
+ *inputIndex += idx / m_strides[i] * m_inputStrides[i];
+ index -= idx * m_outputStrides[i];
+ }
+ if (index != index / m_fastStrides[0] * m_strides[0]) {
+ return false;
+ }
+ *inputIndex += index / m_strides[0];
+ return true;
+ } else {
+ for (int i = 0; i < NumDims - 1; ++i) {
+ const Index idx = index / m_outputStrides[i];
+ if (idx != idx / m_fastStrides[i] * m_strides[i]) {
+ return false;
+ }
+ *inputIndex += idx / m_strides[i] * m_inputStrides[i];
+ index -= idx * m_outputStrides[i];
+ }
+ if (index != index / m_fastStrides[NumDims-1] * m_strides[NumDims-1]) {
+ return false;
+ }
+ *inputIndex += index / m_strides[NumDims - 1];
+ }
+ return true;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
+ {
+ Index inputIndex = 0;
+ if (getInputIndex(index, &inputIndex)) {
+ return m_impl.coeff(inputIndex);
+ } else {
+ return Scalar(0);
+ }
+ }
+
+ // TODO(yangke): optimize this function so that we can detect and produce
+ // all-zero packets
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
+ {
+ EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
+ eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
+
+ EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
+ for (int i = 0; i < PacketSize; ++i) {
+ values[i] = coeff(index+i);
+ }
+ PacketReturnType rslt = internal::pload<PacketReturnType>(values);
+ return rslt;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
+ const double compute_cost = NumDims * (3 * TensorOpCost::DivCost<Index>() +
+ 3 * TensorOpCost::MulCost<Index>() +
+ 2 * TensorOpCost::AddCost<Index>());
+ const double input_size = m_impl.dimensions().TotalSize();
+ const double output_size = m_dimensions.TotalSize();
+ if (output_size == 0)
+ return TensorOpCost();
+ return m_impl.costPerCoeff(vectorized) +
+ TensorOpCost(sizeof(CoeffReturnType) * input_size / output_size, 0,
+ compute_cost, vectorized, PacketSize);
+ }
+
+ EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
+
+ protected:
+ Dimensions m_dimensions;
+ array<Index, NumDims> m_outputStrides;
+ array<Index, NumDims> m_inputStrides;
+ TensorEvaluator<ArgType, Device> m_impl;
+ const Strides m_strides;
+ array<internal::TensorIntDivisor<Index>, NumDims> m_fastStrides;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_INFLATION_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorInitializer.h b/unsupported/Eigen/CXX11/src/Tensor/TensorInitializer.h
new file mode 100644
index 000000000..33edc49e3
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorInitializer.h
@@ -0,0 +1,82 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_INITIALIZER_H
+#define EIGEN_CXX11_TENSOR_TENSOR_INITIALIZER_H
+
+#if EIGEN_HAS_VARIADIC_TEMPLATES
+
+#include <initializer_list>
+
+namespace Eigen {
+
+/** \class TensorInitializer
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Helper template to initialize Tensors from std::initializer_lists.
+ */
+namespace internal {
+
+template <typename Derived, int N>
+struct Initializer {
+ typedef std::initializer_list<
+ typename Initializer<Derived, N - 1>::InitList> InitList;
+
+ static void run(TensorEvaluator<Derived, DefaultDevice>& tensor,
+ Eigen::array<typename traits<Derived>::Index, traits<Derived>::NumDimensions>* indices,
+ const InitList& vals) {
+ int i = 0;
+ for (auto v : vals) {
+ (*indices)[traits<Derived>::NumDimensions - N] = i++;
+ Initializer<Derived, N - 1>::run(tensor, indices, v);
+ }
+ }
+};
+
+template <typename Derived>
+struct Initializer<Derived, 1> {
+ typedef std::initializer_list<typename traits<Derived>::Scalar> InitList;
+
+ static void run(TensorEvaluator<Derived, DefaultDevice>& tensor,
+ Eigen::array<typename traits<Derived>::Index, traits<Derived>::NumDimensions>* indices,
+ const InitList& vals) {
+ int i = 0;
+ // There is likely a faster way to do that than iterating.
+ for (auto v : vals) {
+ (*indices)[traits<Derived>::NumDimensions - 1] = i++;
+ tensor.coeffRef(*indices) = v;
+ }
+ }
+};
+
+template <typename Derived>
+struct Initializer<Derived, 0> {
+ typedef typename traits<Derived>::Scalar InitList;
+
+ static void run(TensorEvaluator<Derived, DefaultDevice>& tensor,
+ Eigen::array<typename traits<Derived>::Index, traits<Derived>::NumDimensions>*,
+ const InitList& v) {
+ tensor.coeffRef(0) = v;
+ }
+};
+
+
+template <typename Derived, int N>
+void initialize_tensor(TensorEvaluator<Derived, DefaultDevice>& tensor,
+ const typename Initializer<Derived, traits<Derived>::NumDimensions>::InitList& vals) {
+ Eigen::array<typename traits<Derived>::Index, traits<Derived>::NumDimensions> indices;
+ Initializer<Derived, traits<Derived>::NumDimensions>::run(tensor, &indices, vals);
+}
+
+} // namespace internal
+} // namespace Eigen
+
+#endif // EIGEN_HAS_VARIADIC_TEMPLATES
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_INITIALIZER_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorIntDiv.h b/unsupported/Eigen/CXX11/src/Tensor/TensorIntDiv.h
new file mode 100644
index 000000000..ede3939c2
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorIntDiv.h
@@ -0,0 +1,253 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H
+#define EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H
+
+
+namespace Eigen {
+
+/** \internal
+ *
+ * \class TensorIntDiv
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Fast integer division by a constant.
+ *
+ * See the paper from Granlund and Montgomery for explanation.
+ * (at http://dx.doi.org/10.1145/773473.178249)
+ *
+ * \sa Tensor
+ */
+
+namespace internal {
+
+namespace {
+
+ // Note: result is undefined if val == 0
+ template <typename T>
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+ typename internal::enable_if<sizeof(T)==4,int>::type count_leading_zeros(const T val)
+ {
+#ifdef __CUDA_ARCH__
+ return __clz(val);
+#elif EIGEN_COMP_MSVC
+ unsigned long index;
+ _BitScanReverse(&index, val);
+ return 31 - index;
+#else
+ EIGEN_STATIC_ASSERT(sizeof(unsigned long long) == 8, YOU_MADE_A_PROGRAMMING_MISTAKE);
+ return __builtin_clz(static_cast<uint32_t>(val));
+#endif
+ }
+
+ template <typename T>
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+ typename internal::enable_if<sizeof(T)==8,int>::type count_leading_zeros(const T val)
+ {
+#ifdef __CUDA_ARCH__
+ return __clzll(val);
+#elif EIGEN_COMP_MSVC && EIGEN_ARCH_x86_64
+ unsigned long index;
+ _BitScanReverse64(&index, val);
+ return 63 - index;
+#elif EIGEN_COMP_MSVC
+ // MSVC's _BitScanReverse64 is not available for 32bits builds.
+ unsigned int lo = (unsigned int)(val&0xffffffff);
+ unsigned int hi = (unsigned int)((val>>32)&0xffffffff);
+ int n;
+ if(hi==0)
+ n = 32 + count_leading_zeros<unsigned int>(lo);
+ else
+ n = count_leading_zeros<unsigned int>(hi);
+ return n;
+#else
+ EIGEN_STATIC_ASSERT(sizeof(unsigned long long) == 8, YOU_MADE_A_PROGRAMMING_MISTAKE);
+ return __builtin_clzll(static_cast<uint64_t>(val));
+#endif
+ }
+
+ template <typename T>
+ struct UnsignedTraits {
+ typedef typename conditional<sizeof(T) == 8, uint64_t, uint32_t>::type type;
+ };
+
+ template <typename T>
+ struct DividerTraits {
+ typedef typename UnsignedTraits<T>::type type;
+ static const int N = sizeof(T) * 8;
+ };
+
+ template <typename T>
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint32_t muluh(const uint32_t a, const T b) {
+#if defined(__CUDA_ARCH__)
+ return __umulhi(a, b);
+#else
+ return (static_cast<uint64_t>(a) * b) >> 32;
+#endif
+ }
+
+ template <typename T>
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint64_t muluh(const uint64_t a, const T b) {
+#if defined(__CUDA_ARCH__)
+ return __umul64hi(a, b);
+#elif defined(__SIZEOF_INT128__)
+ __uint128_t v = static_cast<__uint128_t>(a) * static_cast<__uint128_t>(b);
+ return static_cast<uint64_t>(v >> 64);
+#else
+ return (TensorUInt128<static_val<0>, uint64_t>(a) * TensorUInt128<static_val<0>, uint64_t>(b)).upper();
+#endif
+ }
+
+ template <int N, typename T>
+ struct DividerHelper {
+ static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint32_t computeMultiplier(const int log_div, const T divider) {
+ EIGEN_STATIC_ASSERT(N == 32, YOU_MADE_A_PROGRAMMING_MISTAKE);
+ return static_cast<uint32_t>((static_cast<uint64_t>(1) << (N+log_div)) / divider - (static_cast<uint64_t>(1) << N) + 1);
+ }
+ };
+
+ template <typename T>
+ struct DividerHelper<64, T> {
+ static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint64_t computeMultiplier(const int log_div, const T divider) {
+#if defined(__SIZEOF_INT128__) && !defined(__CUDA_ARCH__)
+ return static_cast<uint64_t>((static_cast<__uint128_t>(1) << (64+log_div)) / static_cast<__uint128_t>(divider) - (static_cast<__uint128_t>(1) << 64) + 1);
+#else
+ const uint64_t shift = 1ULL << log_div;
+ TensorUInt128<uint64_t, uint64_t> result = TensorUInt128<uint64_t, static_val<0> >(shift, 0) / TensorUInt128<static_val<0>, uint64_t>(divider)
+ - TensorUInt128<static_val<1>, static_val<0> >(1, 0)
+ + TensorUInt128<static_val<0>, static_val<1> >(1);
+ return static_cast<uint64_t>(result);
+#endif
+ }
+ };
+}
+
+
+template <typename T, bool div_gt_one = false>
+struct TensorIntDivisor {
+ public:
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIntDivisor() {
+ multiplier = 0;
+ shift1 = 0;
+ shift2 = 0;
+ }
+
+ // Must have 0 < divider < 2^31. This is relaxed to
+ // 0 < divider < 2^63 when using 64-bit indices on platforms that support
+ // the __uint128_t type.
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIntDivisor(const T divider) {
+ const int N = DividerTraits<T>::N;
+ eigen_assert(static_cast<typename UnsignedTraits<T>::type>(divider) < NumTraits<UnsignedType>::highest()/2);
+ eigen_assert(divider > 0);
+
+ // fast ln2
+ const int leading_zeros = count_leading_zeros(static_cast<UnsignedType>(divider));
+ int log_div = N - leading_zeros;
+ // if divider is a power of two then log_div is 1 more than it should be.
+ if ((static_cast<typename UnsignedTraits<T>::type>(1) << (log_div-1)) == static_cast<typename UnsignedTraits<T>::type>(divider))
+ log_div--;
+
+ multiplier = DividerHelper<N, T>::computeMultiplier(log_div, divider);
+ shift1 = log_div > 1 ? 1 : log_div;
+ shift2 = log_div > 1 ? log_div-1 : 0;
+ }
+
+ // Must have 0 <= numerator. On platforms that dont support the __uint128_t
+ // type numerator should also be less than 2^32-1.
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T divide(const T numerator) const {
+ eigen_assert(static_cast<typename UnsignedTraits<T>::type>(numerator) < NumTraits<UnsignedType>::highest()/2);
+ //eigen_assert(numerator >= 0); // this is implicitly asserted by the line above
+
+ UnsignedType t1 = muluh(multiplier, numerator);
+ UnsignedType t = (static_cast<UnsignedType>(numerator) - t1) >> shift1;
+ return (t1 + t) >> shift2;
+ }
+
+ private:
+ typedef typename DividerTraits<T>::type UnsignedType;
+ UnsignedType multiplier;
+ int32_t shift1;
+ int32_t shift2;
+};
+
+
+// Optimized version for signed 32 bit integers.
+// Derived from Hacker's Delight.
+// Only works for divisors strictly greater than one
+template <>
+class TensorIntDivisor<int32_t, true> {
+ public:
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIntDivisor() {
+ magic = 0;
+ shift = 0;
+ }
+ // Must have 2 <= divider
+ EIGEN_DEVICE_FUNC TensorIntDivisor(int32_t divider) {
+ eigen_assert(divider >= 2);
+ calcMagic(divider);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE int divide(const int32_t n) const {
+#ifdef __CUDA_ARCH__
+ return (__umulhi(magic, n) >> shift);
+#else
+ uint64_t v = static_cast<uint64_t>(magic) * static_cast<uint64_t>(n);
+ return (static_cast<uint32_t>(v >> 32) >> shift);
+#endif
+ }
+
+private:
+ // Compute the magic numbers. See Hacker's Delight section 10 for an in
+ // depth explanation.
+ EIGEN_DEVICE_FUNC void calcMagic(int32_t d) {
+ const unsigned two31 = 0x80000000; // 2**31.
+ unsigned ad = d;
+ unsigned t = two31 + (ad >> 31);
+ unsigned anc = t - 1 - t%ad; // Absolute value of nc.
+ int p = 31; // Init. p.
+ unsigned q1 = two31/anc; // Init. q1 = 2**p/|nc|.
+ unsigned r1 = two31 - q1*anc; // Init. r1 = rem(2**p, |nc|).
+ unsigned q2 = two31/ad; // Init. q2 = 2**p/|d|.
+ unsigned r2 = two31 - q2*ad; // Init. r2 = rem(2**p, |d|).
+ unsigned delta = 0;
+ do {
+ p = p + 1;
+ q1 = 2*q1; // Update q1 = 2**p/|nc|.
+ r1 = 2*r1; // Update r1 = rem(2**p, |nc|).
+ if (r1 >= anc) { // (Must be an unsigned
+ q1 = q1 + 1; // comparison here).
+ r1 = r1 - anc;}
+ q2 = 2*q2; // Update q2 = 2**p/|d|.
+ r2 = 2*r2; // Update r2 = rem(2**p, |d|).
+ if (r2 >= ad) { // (Must be an unsigned
+ q2 = q2 + 1; // comparison here).
+ r2 = r2 - ad;}
+ delta = ad - r2;
+ } while (q1 < delta || (q1 == delta && r1 == 0));
+
+ magic = (unsigned)(q2 + 1);
+ shift = p - 32;
+ }
+
+ uint32_t magic;
+ int32_t shift;
+};
+
+
+template <typename T, bool div_gt_one>
+static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator / (const T& numerator, const TensorIntDivisor<T, div_gt_one>& divisor) {
+ return divisor.divide(numerator);
+}
+
+
+} // end namespace internal
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h b/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h
new file mode 100644
index 000000000..cd0109ef4
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h
@@ -0,0 +1,209 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_LAYOUT_SWAP_H
+#define EIGEN_CXX11_TENSOR_TENSOR_LAYOUT_SWAP_H
+
+namespace Eigen {
+
+/** \class TensorLayoutSwap
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Swap the layout from col-major to row-major, or row-major
+ * to col-major, and invert the order of the dimensions.
+ *
+ * Beware: the dimensions are reversed by this operation. If you want to
+ * preserve the ordering of the dimensions, you need to combine this
+ * operation with a shuffle.
+ *
+ * \example:
+ * Tensor<float, 2, ColMajor> input(2, 4);
+ * Tensor<float, 2, RowMajor> output = input.swap_layout();
+ * eigen_assert(output.dimension(0) == 4);
+ * eigen_assert(output.dimension(1) == 2);
+ *
+ * array<int, 2> shuffle(1, 0);
+ * output = input.swap_layout().shuffle(shuffle);
+ * eigen_assert(output.dimension(0) == 2);
+ * eigen_assert(output.dimension(1) == 4);
+ *
+ */
+namespace internal {
+template<typename XprType>
+struct traits<TensorLayoutSwapOp<XprType> > : public traits<XprType>
+{
+ typedef typename XprType::Scalar Scalar;
+ typedef traits<XprType> XprTraits;
+ typedef typename XprTraits::StorageKind StorageKind;
+ typedef typename XprTraits::Index Index;
+ typedef typename XprType::Nested Nested;
+ typedef typename remove_reference<Nested>::type _Nested;
+ static const int NumDimensions = traits<XprType>::NumDimensions;
+ static const int Layout = (traits<XprType>::Layout == ColMajor) ? RowMajor : ColMajor;
+};
+
+template<typename XprType>
+struct eval<TensorLayoutSwapOp<XprType>, Eigen::Dense>
+{
+ typedef const TensorLayoutSwapOp<XprType>& type;
+};
+
+template<typename XprType>
+struct nested<TensorLayoutSwapOp<XprType>, 1, typename eval<TensorLayoutSwapOp<XprType> >::type>
+{
+ typedef TensorLayoutSwapOp<XprType> type;
+};
+
+} // end namespace internal
+
+
+
+template<typename XprType>
+class TensorLayoutSwapOp : public TensorBase<TensorLayoutSwapOp<XprType>, WriteAccessors>
+{
+ public:
+ typedef typename Eigen::internal::traits<TensorLayoutSwapOp>::Scalar Scalar;
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
+ typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType;
+ typedef typename Eigen::internal::nested<TensorLayoutSwapOp>::type Nested;
+ typedef typename Eigen::internal::traits<TensorLayoutSwapOp>::StorageKind StorageKind;
+ typedef typename Eigen::internal::traits<TensorLayoutSwapOp>::Index Index;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorLayoutSwapOp(const XprType& expr)
+ : m_xpr(expr) {}
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename XprType::Nested>::type&
+ expression() const { return m_xpr; }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE TensorLayoutSwapOp& operator = (const TensorLayoutSwapOp& other)
+ {
+ typedef TensorAssignOp<TensorLayoutSwapOp, const TensorLayoutSwapOp> Assign;
+ Assign assign(*this, other);
+ internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
+ return *this;
+ }
+
+ template<typename OtherDerived>
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE TensorLayoutSwapOp& operator = (const OtherDerived& other)
+ {
+ typedef TensorAssignOp<TensorLayoutSwapOp, const OtherDerived> Assign;
+ Assign assign(*this, other);
+ internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
+ return *this;
+ }
+
+ protected:
+ typename XprType::Nested m_xpr;
+};
+
+
+// Eval as rvalue
+template<typename ArgType, typename Device>
+struct TensorEvaluator<const TensorLayoutSwapOp<ArgType>, Device>
+{
+ typedef TensorLayoutSwapOp<ArgType> XprType;
+ typedef typename XprType::Index Index;
+ static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
+ typedef DSizes<Index, NumDims> Dimensions;
+
+ enum {
+ IsAligned = TensorEvaluator<ArgType, Device>::IsAligned,
+ PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ Layout = (static_cast<int>(TensorEvaluator<ArgType, Device>::Layout) == static_cast<int>(ColMajor)) ? RowMajor : ColMajor,
+ CoordAccess = false, // to be implemented
+ RawAccess = TensorEvaluator<ArgType, Device>::RawAccess
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
+ : m_impl(op.expression(), device)
+ {
+ for(int i = 0; i < NumDims; ++i) {
+ m_dimensions[i] = m_impl.dimensions()[NumDims-1-i];
+ }
+ }
+
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) {
+ return m_impl.evalSubExprsIfNeeded(data);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
+ m_impl.cleanup();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
+ {
+ return m_impl.coeff(index);
+ }
+
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
+ {
+ return m_impl.template packet<LoadMode>(index);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
+ return m_impl.costPerCoeff(vectorized);
+ }
+
+ EIGEN_DEVICE_FUNC Scalar* data() const { return m_impl.data(); }
+
+ const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
+
+ protected:
+ TensorEvaluator<ArgType, Device> m_impl;
+ Dimensions m_dimensions;
+};
+
+
+// Eval as lvalue
+template<typename ArgType, typename Device>
+ struct TensorEvaluator<TensorLayoutSwapOp<ArgType>, Device>
+ : public TensorEvaluator<const TensorLayoutSwapOp<ArgType>, Device>
+{
+ typedef TensorEvaluator<const TensorLayoutSwapOp<ArgType>, Device> Base;
+ typedef TensorLayoutSwapOp<ArgType> XprType;
+
+ enum {
+ IsAligned = TensorEvaluator<ArgType, Device>::IsAligned,
+ PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ Layout = (static_cast<int>(TensorEvaluator<ArgType, Device>::Layout) == static_cast<int>(ColMajor)) ? RowMajor : ColMajor,
+ CoordAccess = false // to be implemented
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
+ : Base(op, device)
+ { }
+
+ typedef typename XprType::Index Index;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index)
+ {
+ return this->m_impl.coeffRef(index);
+ }
+ template <int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ void writePacket(Index index, const PacketReturnType& x)
+ {
+ this->m_impl.template writePacket<StoreMode>(index, x);
+ }
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_LAYOUT_SWAP_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorMacros.h b/unsupported/Eigen/CXX11/src/Tensor/TensorMacros.h
new file mode 100644
index 000000000..ee0078bbc
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorMacros.h
@@ -0,0 +1,54 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2015 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_META_MACROS_H
+#define EIGEN_CXX11_TENSOR_TENSOR_META_MACROS_H
+
+
+/** use this macro in sfinae selection in templated functions
+ *
+ * template<typename T,
+ * typename std::enable_if< isBanana<T>::value , int >::type = 0
+ * >
+ * void foo(){}
+ *
+ * becomes =>
+ *
+ * template<typename TopoType,
+ * SFINAE_ENABLE_IF( isBanana<T>::value )
+ * >
+ * void foo(){}
+ */
+
+// SFINAE requires variadic templates
+#ifndef __CUDACC__
+#if EIGEN_HAS_VARIADIC_TEMPLATES
+ // SFINAE doesn't work for gcc <= 4.7
+ #ifdef EIGEN_COMP_GNUC
+ #if EIGEN_GNUC_AT_LEAST(4,8)
+ #define EIGEN_HAS_SFINAE
+ #endif
+ #else
+ #define EIGEN_HAS_SFINAE
+ #endif
+#endif
+#endif
+
+#define EIGEN_SFINAE_ENABLE_IF( __condition__ ) \
+ typename internal::enable_if< ( __condition__ ) , int >::type = 0
+
+
+#if EIGEN_HAS_CONSTEXPR
+#define EIGEN_CONSTEXPR constexpr
+#else
+#define EIGEN_CONSTEXPR
+#endif
+
+
+#endif
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h b/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h
new file mode 100644
index 000000000..a8e55757e
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h
@@ -0,0 +1,321 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_MAP_H
+#define EIGEN_CXX11_TENSOR_TENSOR_MAP_H
+
+namespace Eigen {
+
+/** \class TensorMap
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief A tensor expression mapping an existing array of data.
+ *
+ */
+/// template <class> class MakePointer_ is added to convert the host pointer to the device pointer.
+/// It is added due to the fact that for our device compiler T* is not allowed.
+/// If we wanted to use the same Evaluator functions we have to convert that type to our pointer T.
+/// This is done through our MakePointer_ class. By default the Type in the MakePointer_<T> is T* .
+/// Therefore, by adding the default value, we managed to convert the type and it does not break any
+/// existing code as its default value is T*.
+template<typename PlainObjectType, int Options_, template <class> class MakePointer_> class TensorMap : public TensorBase<TensorMap<PlainObjectType, Options_, MakePointer_> >
+{
+ public:
+ typedef TensorMap<PlainObjectType, Options_, MakePointer_> Self;
+ typedef typename PlainObjectType::Base Base;
+ typedef typename Eigen::internal::nested<Self>::type Nested;
+ typedef typename internal::traits<PlainObjectType>::StorageKind StorageKind;
+ typedef typename internal::traits<PlainObjectType>::Index Index;
+ typedef typename internal::traits<PlainObjectType>::Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef typename Base::CoeffReturnType CoeffReturnType;
+
+ /* typedef typename internal::conditional<
+ bool(internal::is_lvalue<PlainObjectType>::value),
+ Scalar *,
+ const Scalar *>::type
+ PointerType;*/
+ typedef typename MakePointer_<Scalar>::Type PointerType;
+ typedef PointerType PointerArgType;
+
+ static const int Options = Options_;
+
+ static const Index NumIndices = PlainObjectType::NumIndices;
+ typedef typename PlainObjectType::Dimensions Dimensions;
+
+ enum {
+ IsAligned = ((int(Options_)&Aligned)==Aligned),
+ Layout = PlainObjectType::Layout,
+ CoordAccess = true,
+ RawAccess = true
+ };
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr) : m_data(dataPtr), m_dimensions() {
+ // The number of dimensions used to construct a tensor must be equal to the rank of the tensor.
+ EIGEN_STATIC_ASSERT((0 == NumIndices || NumIndices == Dynamic), YOU_MADE_A_PROGRAMMING_MISTAKE)
+ }
+
+#if EIGEN_HAS_VARIADIC_TEMPLATES
+ template<typename... IndexTypes> EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr, Index firstDimension, IndexTypes... otherDimensions) : m_data(dataPtr), m_dimensions(firstDimension, otherDimensions...) {
+ // The number of dimensions used to construct a tensor must be equal to the rank of the tensor.
+ EIGEN_STATIC_ASSERT((sizeof...(otherDimensions) + 1 == NumIndices || NumIndices == Dynamic), YOU_MADE_A_PROGRAMMING_MISTAKE)
+ }
+#else
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr, Index firstDimension) : m_data(dataPtr), m_dimensions(firstDimension) {
+ // The number of dimensions used to construct a tensor must be equal to the rank of the tensor.
+ EIGEN_STATIC_ASSERT((1 == NumIndices || NumIndices == Dynamic), YOU_MADE_A_PROGRAMMING_MISTAKE)
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr, Index dim1, Index dim2) : m_data(dataPtr), m_dimensions(dim1, dim2) {
+ EIGEN_STATIC_ASSERT(2 == NumIndices || NumIndices == Dynamic, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr, Index dim1, Index dim2, Index dim3) : m_data(dataPtr), m_dimensions(dim1, dim2, dim3) {
+ EIGEN_STATIC_ASSERT(3 == NumIndices || NumIndices == Dynamic, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr, Index dim1, Index dim2, Index dim3, Index dim4) : m_data(dataPtr), m_dimensions(dim1, dim2, dim3, dim4) {
+ EIGEN_STATIC_ASSERT(4 == NumIndices || NumIndices == Dynamic, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr, Index dim1, Index dim2, Index dim3, Index dim4, Index dim5) : m_data(dataPtr), m_dimensions(dim1, dim2, dim3, dim4, dim5) {
+ EIGEN_STATIC_ASSERT(5 == NumIndices || NumIndices == Dynamic, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ }
+#endif
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr, const array<Index, NumIndices>& dimensions)
+ : m_data(dataPtr), m_dimensions(dimensions)
+ { }
+
+ template <typename Dimensions>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr, const Dimensions& dimensions)
+ : m_data(dataPtr), m_dimensions(dimensions)
+ { }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorMap(PlainObjectType& tensor)
+ : m_data(tensor.data()), m_dimensions(tensor.dimensions())
+ { }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Index rank() const { return m_dimensions.rank(); }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Index dimension(Index n) const { return m_dimensions[n]; }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Index size() const { return m_dimensions.TotalSize(); }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE PointerType data() { return m_data; }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const PointerType data() const { return m_data; }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar& operator()(const array<Index, NumIndices>& indices) const
+ {
+ // eigen_assert(checkIndexRange(indices));
+ if (PlainObjectType::Options&RowMajor) {
+ const Index index = m_dimensions.IndexOfRowMajor(indices);
+ return m_data[index];
+ } else {
+ const Index index = m_dimensions.IndexOfColMajor(indices);
+ return m_data[index];
+ }
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar& operator()() const
+ {
+ EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ return m_data[0];
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar& operator()(Index index) const
+ {
+ eigen_internal_assert(index >= 0 && index < size());
+ return m_data[index];
+ }
+
+#if EIGEN_HAS_VARIADIC_TEMPLATES
+ template<typename... IndexTypes> EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
+ {
+ EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ if (PlainObjectType::Options&RowMajor) {
+ const Index index = m_dimensions.IndexOfRowMajor(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
+ return m_data[index];
+ } else {
+ const Index index = m_dimensions.IndexOfColMajor(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
+ return m_data[index];
+ }
+ }
+#else
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1) const
+ {
+ if (PlainObjectType::Options&RowMajor) {
+ const Index index = i1 + i0 * m_dimensions[1];
+ return m_data[index];
+ } else {
+ const Index index = i0 + i1 * m_dimensions[0];
+ return m_data[index];
+ }
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2) const
+ {
+ if (PlainObjectType::Options&RowMajor) {
+ const Index index = i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0);
+ return m_data[index];
+ } else {
+ const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * i2);
+ return m_data[index];
+ }
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3) const
+ {
+ if (PlainObjectType::Options&RowMajor) {
+ const Index index = i3 + m_dimensions[3] * (i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0));
+ return m_data[index];
+ } else {
+ const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * (i2 + m_dimensions[2] * i3));
+ return m_data[index];
+ }
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const
+ {
+ if (PlainObjectType::Options&RowMajor) {
+ const Index index = i4 + m_dimensions[4] * (i3 + m_dimensions[3] * (i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0)));
+ return m_data[index];
+ } else {
+ const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * (i2 + m_dimensions[2] * (i3 + m_dimensions[3] * i4)));
+ return m_data[index];
+ }
+ }
+#endif
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& operator()(const array<Index, NumIndices>& indices)
+ {
+ // eigen_assert(checkIndexRange(indices));
+ if (PlainObjectType::Options&RowMajor) {
+ const Index index = m_dimensions.IndexOfRowMajor(indices);
+ return m_data[index];
+ } else {
+ const Index index = m_dimensions.IndexOfColMajor(indices);
+ return m_data[index];
+ }
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& operator()()
+ {
+ EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ return m_data[0];
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& operator()(Index index)
+ {
+ eigen_internal_assert(index >= 0 && index < size());
+ return m_data[index];
+ }
+
+#if EIGEN_HAS_VARIADIC_TEMPLATES
+ template<typename... IndexTypes> EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
+ {
+ static_assert(sizeof...(otherIndices) + 2 == NumIndices || NumIndices == Dynamic, "Number of indices used to access a tensor coefficient must be equal to the rank of the tensor.");
+ const std::size_t NumDims = sizeof...(otherIndices) + 2;
+ if (PlainObjectType::Options&RowMajor) {
+ const Index index = m_dimensions.IndexOfRowMajor(array<Index, NumDims>{{firstIndex, secondIndex, otherIndices...}});
+ return m_data[index];
+ } else {
+ const Index index = m_dimensions.IndexOfColMajor(array<Index, NumDims>{{firstIndex, secondIndex, otherIndices...}});
+ return m_data[index];
+ }
+ }
+#else
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1)
+ {
+ if (PlainObjectType::Options&RowMajor) {
+ const Index index = i1 + i0 * m_dimensions[1];
+ return m_data[index];
+ } else {
+ const Index index = i0 + i1 * m_dimensions[0];
+ return m_data[index];
+ }
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2)
+ {
+ if (PlainObjectType::Options&RowMajor) {
+ const Index index = i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0);
+ return m_data[index];
+ } else {
+ const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * i2);
+ return m_data[index];
+ }
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3)
+ {
+ if (PlainObjectType::Options&RowMajor) {
+ const Index index = i3 + m_dimensions[3] * (i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0));
+ return m_data[index];
+ } else {
+ const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * (i2 + m_dimensions[2] * i3));
+ return m_data[index];
+ }
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4)
+ {
+ if (PlainObjectType::Options&RowMajor) {
+ const Index index = i4 + m_dimensions[4] * (i3 + m_dimensions[3] * (i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0)));
+ return m_data[index];
+ } else {
+ const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * (i2 + m_dimensions[2] * (i3 + m_dimensions[3] * i4)));
+ return m_data[index];
+ }
+ }
+#endif
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Self& operator=(const Self& other)
+ {
+ typedef TensorAssignOp<Self, const Self> Assign;
+ Assign assign(*this, other);
+ internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
+ return *this;
+ }
+
+ template<typename OtherDerived>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Self& operator=(const OtherDerived& other)
+ {
+ typedef TensorAssignOp<Self, const OtherDerived> Assign;
+ Assign assign(*this, other);
+ internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
+ return *this;
+ }
+
+ private:
+ typename MakePointer_<Scalar>::Type m_data;
+ Dimensions m_dimensions;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_MAP_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorMeta.h b/unsupported/Eigen/CXX11/src/Tensor/TensorMeta.h
new file mode 100644
index 000000000..615559d44
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorMeta.h
@@ -0,0 +1,218 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2015 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_META_H
+#define EIGEN_CXX11_TENSOR_TENSOR_META_H
+
+namespace Eigen {
+
+template<bool cond> struct Cond {};
+
+template<typename T1, typename T2> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+const T1& choose(Cond<true>, const T1& first, const T2&) {
+ return first;
+}
+
+template<typename T1, typename T2> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+const T2& choose(Cond<false>, const T1&, const T2& second) {
+ return second;
+}
+
+
+template <typename T, typename X, typename Y>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+T divup(const X x, const Y y) {
+ return static_cast<T>((x + y - 1) / y);
+}
+
+template <typename T>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+T divup(const T x, const T y) {
+ return static_cast<T>((x + y - 1) / y);
+}
+
+template <size_t n> struct max_n_1 {
+ static const size_t size = n;
+};
+template <> struct max_n_1<0> {
+ static const size_t size = 1;
+};
+
+
+// Default packet types
+template <typename Scalar, typename Device>
+struct PacketType : internal::packet_traits<Scalar> {
+ typedef typename internal::packet_traits<Scalar>::type type;
+};
+
+// For CUDA packet types when using a GpuDevice
+#if defined(EIGEN_USE_GPU) && defined(__CUDACC__) && defined(EIGEN_HAS_CUDA_FP16)
+template <>
+struct PacketType<half, GpuDevice> {
+ typedef half2 type;
+ static const int size = 2;
+ enum {
+ HasAdd = 1,
+ HasSub = 1,
+ HasMul = 1,
+ HasNegate = 1,
+ HasAbs = 1,
+ HasArg = 0,
+ HasAbs2 = 0,
+ HasMin = 1,
+ HasMax = 1,
+ HasConj = 0,
+ HasSetLinear = 0,
+ HasBlend = 0,
+
+ HasDiv = 1,
+ HasSqrt = 1,
+ HasRsqrt = 1,
+ HasExp = 1,
+ HasLog = 1,
+ HasLog1p = 0,
+ HasLog10 = 0,
+ HasPow = 1,
+ };
+};
+#endif
+
+#if defined(EIGEN_USE_SYCL)
+template <typename T>
+ struct PacketType<T, SyclDevice> {
+ typedef T type;
+ static const int size = 1;
+ enum {
+ HasAdd = 0,
+ HasSub = 0,
+ HasMul = 0,
+ HasNegate = 0,
+ HasAbs = 0,
+ HasArg = 0,
+ HasAbs2 = 0,
+ HasMin = 0,
+ HasMax = 0,
+ HasConj = 0,
+ HasSetLinear = 0,
+ HasBlend = 0
+ };
+};
+#endif
+
+
+// Tuple mimics std::pair but works on e.g. nvcc.
+template <typename U, typename V> struct Tuple {
+ public:
+ U first;
+ V second;
+
+ typedef U first_type;
+ typedef V second_type;
+
+ EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Tuple() : first(), second() {}
+
+ EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Tuple(const U& f, const V& s) : first(f), second(s) {}
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Tuple& operator= (const Tuple& rhs) {
+ if (&rhs == this) return *this;
+ first = rhs.first;
+ second = rhs.second;
+ return *this;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ void swap(Tuple& rhs) {
+ using numext::swap;
+ swap(first, rhs.first);
+ swap(second, rhs.second);
+ }
+};
+
+template <typename U, typename V>
+EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+bool operator==(const Tuple<U, V>& x, const Tuple<U, V>& y) {
+ return (x.first == y.first && x.second == y.second);
+}
+
+template <typename U, typename V>
+EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+bool operator!=(const Tuple<U, V>& x, const Tuple<U, V>& y) {
+ return !(x == y);
+}
+
+
+// Can't use std::pairs on cuda devices
+template <typename Idx> struct IndexPair {
+ EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE IndexPair() : first(0), second(0) {}
+ EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE IndexPair(Idx f, Idx s) : first(f), second(s) {}
+
+ EIGEN_DEVICE_FUNC void set(IndexPair<Idx> val) {
+ first = val.first;
+ second = val.second;
+ }
+
+ Idx first;
+ Idx second;
+};
+
+
+#ifdef EIGEN_HAS_SFINAE
+namespace internal {
+
+ template<typename IndexType, Index... Is>
+ EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ array<Index, sizeof...(Is)> customIndices2Array(IndexType& idx, numeric_list<Index, Is...>) {
+ return { idx[Is]... };
+ }
+ template<typename IndexType>
+ EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ array<Index, 0> customIndices2Array(IndexType&, numeric_list<Index>) {
+ return array<Index, 0>();
+ }
+
+ /** Make an array (for index/dimensions) out of a custom index */
+ template<typename Index, std::size_t NumIndices, typename IndexType>
+ EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ array<Index, NumIndices> customIndices2Array(IndexType& idx) {
+ return customIndices2Array(idx, typename gen_numeric_list<Index, NumIndices>::type{});
+ }
+
+
+ template <typename B, typename D>
+ struct is_base_of
+ {
+
+ typedef char (&yes)[1];
+ typedef char (&no)[2];
+
+ template <typename BB, typename DD>
+ struct Host
+ {
+ operator BB*() const;
+ operator DD*();
+ };
+
+ template<typename T>
+ static yes check(D*, T);
+ static no check(B*, int);
+
+ static const bool value = sizeof(check(Host<B,D>(), int())) == sizeof(yes);
+ };
+
+}
+#endif
+
+
+
+} // namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_META_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h b/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h
new file mode 100644
index 000000000..d34f1e328
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h
@@ -0,0 +1,888 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_MORPHING_H
+#define EIGEN_CXX11_TENSOR_TENSOR_MORPHING_H
+
+namespace Eigen {
+
+/** \class TensorReshaping
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Tensor reshaping class.
+ *
+ *
+ */
+namespace internal {
+template<typename NewDimensions, typename XprType>
+struct traits<TensorReshapingOp<NewDimensions, XprType> > : public traits<XprType>
+{
+ typedef typename XprType::Scalar Scalar;
+ typedef traits<XprType> XprTraits;
+ typedef typename XprTraits::StorageKind StorageKind;
+ typedef typename XprTraits::Index Index;
+ typedef typename XprType::Nested Nested;
+ typedef typename remove_reference<Nested>::type _Nested;
+ static const int NumDimensions = array_size<NewDimensions>::value;
+ static const int Layout = XprTraits::Layout;
+};
+
+template<typename NewDimensions, typename XprType>
+struct eval<TensorReshapingOp<NewDimensions, XprType>, Eigen::Dense>
+{
+ typedef const TensorReshapingOp<NewDimensions, XprType>& type;
+};
+
+template<typename NewDimensions, typename XprType>
+struct nested<TensorReshapingOp<NewDimensions, XprType>, 1, typename eval<TensorReshapingOp<NewDimensions, XprType> >::type>
+{
+ typedef TensorReshapingOp<NewDimensions, XprType> type;
+};
+
+} // end namespace internal
+
+
+
+template<typename NewDimensions, typename XprType>
+class TensorReshapingOp : public TensorBase<TensorReshapingOp<NewDimensions, XprType>, WriteAccessors>
+{
+ public:
+ typedef typename Eigen::internal::traits<TensorReshapingOp>::Scalar Scalar;
+ typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType;
+ typedef typename Eigen::internal::nested<TensorReshapingOp>::type Nested;
+ typedef typename Eigen::internal::traits<TensorReshapingOp>::StorageKind StorageKind;
+ typedef typename Eigen::internal::traits<TensorReshapingOp>::Index Index;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorReshapingOp(const XprType& expr, const NewDimensions& dims)
+ : m_xpr(expr), m_dims(dims) {}
+
+ EIGEN_DEVICE_FUNC
+ const NewDimensions& dimensions() const { return m_dims; }
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename XprType::Nested>::type&
+ expression() const { return m_xpr; }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE TensorReshapingOp& operator = (const TensorReshapingOp& other)
+ {
+ typedef TensorAssignOp<TensorReshapingOp, const TensorReshapingOp> Assign;
+ Assign assign(*this, other);
+ internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
+ return *this;
+ }
+
+ template<typename OtherDerived>
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE TensorReshapingOp& operator = (const OtherDerived& other)
+ {
+ typedef TensorAssignOp<TensorReshapingOp, const OtherDerived> Assign;
+ Assign assign(*this, other);
+ internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
+ return *this;
+ }
+
+ protected:
+ typename XprType::Nested m_xpr;
+ const NewDimensions m_dims;
+};
+
+
+// Eval as rvalue
+template<typename NewDimensions, typename ArgType, typename Device>
+struct TensorEvaluator<const TensorReshapingOp<NewDimensions, ArgType>, Device>
+{
+ typedef TensorReshapingOp<NewDimensions, ArgType> XprType;
+ typedef NewDimensions Dimensions;
+
+ enum {
+ IsAligned = TensorEvaluator<ArgType, Device>::IsAligned,
+ PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ CoordAccess = false, // to be implemented
+ RawAccess = TensorEvaluator<ArgType, Device>::RawAccess
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
+ : m_impl(op.expression(), device), m_dimensions(op.dimensions())
+ {
+ // The total size of the reshaped tensor must be equal to the total size
+ // of the input tensor.
+ eigen_assert(internal::array_prod(m_impl.dimensions()) == internal::array_prod(op.dimensions()));
+ }
+
+ typedef typename XprType::Index Index;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) {
+ return m_impl.evalSubExprsIfNeeded(data);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
+ m_impl.cleanup();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
+ {
+ return m_impl.coeff(index);
+ }
+
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
+ {
+ return m_impl.template packet<LoadMode>(index);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
+ return m_impl.costPerCoeff(vectorized);
+ }
+
+ EIGEN_DEVICE_FUNC Scalar* data() const { return const_cast<Scalar*>(m_impl.data()); }
+
+ EIGEN_DEVICE_FUNC const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
+
+ protected:
+ TensorEvaluator<ArgType, Device> m_impl;
+ NewDimensions m_dimensions;
+};
+
+
+// Eval as lvalue
+template<typename NewDimensions, typename ArgType, typename Device>
+ struct TensorEvaluator<TensorReshapingOp<NewDimensions, ArgType>, Device>
+ : public TensorEvaluator<const TensorReshapingOp<NewDimensions, ArgType>, Device>
+
+{
+ typedef TensorEvaluator<const TensorReshapingOp<NewDimensions, ArgType>, Device> Base;
+ typedef TensorReshapingOp<NewDimensions, ArgType> XprType;
+ typedef NewDimensions Dimensions;
+
+ enum {
+ IsAligned = TensorEvaluator<ArgType, Device>::IsAligned,
+ PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ CoordAccess = false, // to be implemented
+ RawAccess = TensorEvaluator<ArgType, Device>::RawAccess
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
+ : Base(op, device)
+ { }
+
+ typedef typename XprType::Index Index;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index)
+ {
+ return this->m_impl.coeffRef(index);
+ }
+ template <int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ void writePacket(Index index, const PacketReturnType& x)
+ {
+ this->m_impl.template writePacket<StoreMode>(index, x);
+ }
+};
+
+
+/** \class TensorSlicing
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Tensor slicing class.
+ *
+ *
+ */
+namespace internal {
+template<typename StartIndices, typename Sizes, typename XprType>
+struct traits<TensorSlicingOp<StartIndices, Sizes, XprType> > : public traits<XprType>
+{
+ typedef typename XprType::Scalar Scalar;
+ typedef traits<XprType> XprTraits;
+ typedef typename XprTraits::StorageKind StorageKind;
+ typedef typename XprTraits::Index Index;
+ typedef typename XprType::Nested Nested;
+ typedef typename remove_reference<Nested>::type _Nested;
+ static const int NumDimensions = array_size<StartIndices>::value;
+ static const int Layout = XprTraits::Layout;
+};
+
+template<typename StartIndices, typename Sizes, typename XprType>
+struct eval<TensorSlicingOp<StartIndices, Sizes, XprType>, Eigen::Dense>
+{
+ typedef const TensorSlicingOp<StartIndices, Sizes, XprType>& type;
+};
+
+template<typename StartIndices, typename Sizes, typename XprType>
+struct nested<TensorSlicingOp<StartIndices, Sizes, XprType>, 1, typename eval<TensorSlicingOp<StartIndices, Sizes, XprType> >::type>
+{
+ typedef TensorSlicingOp<StartIndices, Sizes, XprType> type;
+};
+
+} // end namespace internal
+
+
+
+template<typename StartIndices, typename Sizes, typename XprType>
+class TensorSlicingOp : public TensorBase<TensorSlicingOp<StartIndices, Sizes, XprType> >
+{
+ public:
+ typedef typename Eigen::internal::traits<TensorSlicingOp>::Scalar Scalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename Eigen::internal::nested<TensorSlicingOp>::type Nested;
+ typedef typename Eigen::internal::traits<TensorSlicingOp>::StorageKind StorageKind;
+ typedef typename Eigen::internal::traits<TensorSlicingOp>::Index Index;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorSlicingOp(const XprType& expr, const StartIndices& indices, const Sizes& sizes)
+ : m_xpr(expr), m_indices(indices), m_sizes(sizes) {}
+
+ EIGEN_DEVICE_FUNC
+ const StartIndices& startIndices() const { return m_indices; }
+ EIGEN_DEVICE_FUNC
+ const Sizes& sizes() const { return m_sizes; }
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename XprType::Nested>::type&
+ expression() const { return m_xpr; }
+
+ template<typename OtherDerived>
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE TensorSlicingOp& operator = (const OtherDerived& other)
+ {
+ typedef TensorAssignOp<TensorSlicingOp, const OtherDerived> Assign;
+ Assign assign(*this, other);
+ internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
+ return *this;
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE TensorSlicingOp& operator = (const TensorSlicingOp& other)
+ {
+ typedef TensorAssignOp<TensorSlicingOp, const TensorSlicingOp> Assign;
+ Assign assign(*this, other);
+ internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
+ return *this;
+ }
+
+
+ protected:
+ typename XprType::Nested m_xpr;
+ const StartIndices m_indices;
+ const Sizes m_sizes;
+};
+
+
+// Fixme: figure out the exact threshold
+namespace {
+template <typename Index, typename Device> struct MemcpyTriggerForSlicing {
+ EIGEN_DEVICE_FUNC MemcpyTriggerForSlicing(const Device& device) : threshold_(2 * device.numThreads()) { }
+ EIGEN_DEVICE_FUNC bool operator ()(Index val) const { return val > threshold_; }
+
+ private:
+ Index threshold_;
+};
+
+// It is very expensive to start the memcpy kernel on GPU: we therefore only
+// use it for large copies.
+#ifdef EIGEN_USE_GPU
+template <typename Index> struct MemcpyTriggerForSlicing<Index, GpuDevice> {
+ EIGEN_DEVICE_FUNC MemcpyTriggerForSlicing(const GpuDevice&) { }
+ EIGEN_DEVICE_FUNC bool operator ()(Index val) const { return val > 4*1024*1024; }
+};
+#endif
+}
+
+// Eval as rvalue
+template<typename StartIndices, typename Sizes, typename ArgType, typename Device>
+struct TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, ArgType>, Device>
+{
+ typedef TensorSlicingOp<StartIndices, Sizes, ArgType> XprType;
+ static const int NumDims = internal::array_size<Sizes>::value;
+
+ enum {
+ // Alignment can't be guaranteed at compile time since it depends on the
+ // slice offsets and sizes.
+ IsAligned = /*TensorEvaluator<ArgType, Device>::IsAligned*/false,
+ PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ CoordAccess = false,
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
+ : m_impl(op.expression(), device), m_device(device), m_dimensions(op.sizes()), m_offsets(op.startIndices())
+ {
+ for (std::size_t i = 0; i < internal::array_size<Dimensions>::value; ++i) {
+ eigen_assert(m_impl.dimensions()[i] >= op.sizes()[i] + op.startIndices()[i]);
+ }
+
+ const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
+ const Sizes& output_dims = op.sizes();
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ m_inputStrides[0] = 1;
+ for (int i = 1; i < NumDims; ++i) {
+ m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1];
+ }
+
+ // Don't initialize m_fastOutputStrides[0] since it won't ever be accessed.
+ m_outputStrides[0] = 1;
+ for (int i = 1; i < NumDims; ++i) {
+ m_outputStrides[i] = m_outputStrides[i-1] * output_dims[i-1];
+ m_fastOutputStrides[i] = internal::TensorIntDivisor<Index>(m_outputStrides[i]);
+ }
+ } else {
+ m_inputStrides[NumDims-1] = 1;
+ for (int i = NumDims - 2; i >= 0; --i) {
+ m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1];
+ }
+
+ // Don't initialize m_fastOutputStrides[NumDims-1] since it won't ever be accessed.
+ m_outputStrides[NumDims-1] = 1;
+ for (int i = NumDims - 2; i >= 0; --i) {
+ m_outputStrides[i] = m_outputStrides[i+1] * output_dims[i+1];
+ m_fastOutputStrides[i] = internal::TensorIntDivisor<Index>(m_outputStrides[i]);
+ }
+ }
+ }
+
+ typedef typename XprType::Index Index;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ typedef Sizes Dimensions;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
+
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) {
+ m_impl.evalSubExprsIfNeeded(NULL);
+ if (!NumTraits<typename internal::remove_const<Scalar>::type>::RequireInitialization && data && m_impl.data()) {
+ Index contiguous_values = 1;
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int i = 0; i < NumDims; ++i) {
+ contiguous_values *= dimensions()[i];
+ if (dimensions()[i] != m_impl.dimensions()[i]) {
+ break;
+ }
+ }
+ } else {
+ for (int i = NumDims-1; i >= 0; --i) {
+ contiguous_values *= dimensions()[i];
+ if (dimensions()[i] != m_impl.dimensions()[i]) {
+ break;
+ }
+ }
+ }
+ // Use memcpy if it's going to be faster than using the regular evaluation.
+ const MemcpyTriggerForSlicing<Index, Device> trigger(m_device);
+ if (trigger(contiguous_values)) {
+ Scalar* src = (Scalar*)m_impl.data();
+ for (int i = 0; i < internal::array_prod(dimensions()); i += contiguous_values) {
+ Index offset = srcCoeff(i);
+ m_device.memcpy((void*)(data+i), src+offset, contiguous_values * sizeof(Scalar));
+ }
+ return false;
+ }
+ }
+ return true;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
+ m_impl.cleanup();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
+ {
+ return m_impl.coeff(srcCoeff(index));
+ }
+
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
+ {
+ const int packetSize = internal::unpacket_traits<PacketReturnType>::size;
+ EIGEN_STATIC_ASSERT((packetSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
+ eigen_assert(index+packetSize-1 < internal::array_prod(dimensions()));
+
+ Index inputIndices[] = {0, 0};
+ Index indices[] = {index, index + packetSize - 1};
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int i = NumDims - 1; i > 0; --i) {
+ const Index idx0 = indices[0] / m_fastOutputStrides[i];
+ const Index idx1 = indices[1] / m_fastOutputStrides[i];
+ inputIndices[0] += (idx0 + m_offsets[i]) * m_inputStrides[i];
+ inputIndices[1] += (idx1 + m_offsets[i]) * m_inputStrides[i];
+ indices[0] -= idx0 * m_outputStrides[i];
+ indices[1] -= idx1 * m_outputStrides[i];
+ }
+ inputIndices[0] += (indices[0] + m_offsets[0]);
+ inputIndices[1] += (indices[1] + m_offsets[0]);
+ } else {
+ for (int i = 0; i < NumDims - 1; ++i) {
+ const Index idx0 = indices[0] / m_fastOutputStrides[i];
+ const Index idx1 = indices[1] / m_fastOutputStrides[i];
+ inputIndices[0] += (idx0 + m_offsets[i]) * m_inputStrides[i];
+ inputIndices[1] += (idx1 + m_offsets[i]) * m_inputStrides[i];
+ indices[0] -= idx0 * m_outputStrides[i];
+ indices[1] -= idx1 * m_outputStrides[i];
+ }
+ inputIndices[0] += (indices[0] + m_offsets[NumDims-1]);
+ inputIndices[1] += (indices[1] + m_offsets[NumDims-1]);
+ }
+ if (inputIndices[1] - inputIndices[0] == packetSize - 1) {
+ PacketReturnType rslt = m_impl.template packet<Unaligned>(inputIndices[0]);
+ return rslt;
+ }
+ else {
+ EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[packetSize];
+ values[0] = m_impl.coeff(inputIndices[0]);
+ values[packetSize-1] = m_impl.coeff(inputIndices[1]);
+ for (int i = 1; i < packetSize-1; ++i) {
+ values[i] = coeff(index+i);
+ }
+ PacketReturnType rslt = internal::pload<PacketReturnType>(values);
+ return rslt;
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
+ return m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, NumDims);
+ }
+
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar* data() const {
+ Scalar* result = m_impl.data();
+ if (result) {
+ Index offset = 0;
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int i = 0; i < NumDims; ++i) {
+ if (m_dimensions[i] != m_impl.dimensions()[i]) {
+ offset += m_offsets[i] * m_inputStrides[i];
+ for (int j = i+1; j < NumDims; ++j) {
+ if (m_dimensions[j] > 1) {
+ return NULL;
+ }
+ offset += m_offsets[j] * m_inputStrides[j];
+ }
+ break;
+ }
+ }
+ } else {
+ for (int i = NumDims - 1; i >= 0; --i) {
+ if (m_dimensions[i] != m_impl.dimensions()[i]) {
+ offset += m_offsets[i] * m_inputStrides[i];
+ for (int j = i-1; j >= 0; --j) {
+ if (m_dimensions[j] > 1) {
+ return NULL;
+ }
+ offset += m_offsets[j] * m_inputStrides[j];
+ }
+ break;
+ }
+ }
+ }
+ return result + offset;
+ }
+ return NULL;
+ }
+
+ protected:
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const
+ {
+ Index inputIndex = 0;
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int i = NumDims - 1; i > 0; --i) {
+ const Index idx = index / m_fastOutputStrides[i];
+ inputIndex += (idx + m_offsets[i]) * m_inputStrides[i];
+ index -= idx * m_outputStrides[i];
+ }
+ inputIndex += (index + m_offsets[0]);
+ } else {
+ for (int i = 0; i < NumDims - 1; ++i) {
+ const Index idx = index / m_fastOutputStrides[i];
+ inputIndex += (idx + m_offsets[i]) * m_inputStrides[i];
+ index -= idx * m_outputStrides[i];
+ }
+ inputIndex += (index + m_offsets[NumDims-1]);
+ }
+ return inputIndex;
+ }
+
+ array<Index, NumDims> m_outputStrides;
+ array<internal::TensorIntDivisor<Index>, NumDims> m_fastOutputStrides;
+ array<Index, NumDims> m_inputStrides;
+ TensorEvaluator<ArgType, Device> m_impl;
+ const Device& m_device;
+ Dimensions m_dimensions;
+ const StartIndices m_offsets;
+};
+
+
+// Eval as lvalue
+template<typename StartIndices, typename Sizes, typename ArgType, typename Device>
+struct TensorEvaluator<TensorSlicingOp<StartIndices, Sizes, ArgType>, Device>
+ : public TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, ArgType>, Device>
+{
+ typedef TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, ArgType>, Device> Base;
+ typedef TensorSlicingOp<StartIndices, Sizes, ArgType> XprType;
+ static const int NumDims = internal::array_size<Sizes>::value;
+
+ enum {
+ IsAligned = /*TensorEvaluator<ArgType, Device>::IsAligned*/false,
+ PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ CoordAccess = false,
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
+ : Base(op, device)
+ { }
+
+ typedef typename XprType::Index Index;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ typedef Sizes Dimensions;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index)
+ {
+ return this->m_impl.coeffRef(this->srcCoeff(index));
+ }
+
+ template <int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ void writePacket(Index index, const PacketReturnType& x)
+ {
+ const int packetSize = internal::unpacket_traits<PacketReturnType>::size;
+ Index inputIndices[] = {0, 0};
+ Index indices[] = {index, index + packetSize - 1};
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int i = NumDims - 1; i > 0; --i) {
+ const Index idx0 = indices[0] / this->m_fastOutputStrides[i];
+ const Index idx1 = indices[1] / this->m_fastOutputStrides[i];
+ inputIndices[0] += (idx0 + this->m_offsets[i]) * this->m_inputStrides[i];
+ inputIndices[1] += (idx1 + this->m_offsets[i]) * this->m_inputStrides[i];
+ indices[0] -= idx0 * this->m_outputStrides[i];
+ indices[1] -= idx1 * this->m_outputStrides[i];
+ }
+ inputIndices[0] += (indices[0] + this->m_offsets[0]);
+ inputIndices[1] += (indices[1] + this->m_offsets[0]);
+ } else {
+ for (int i = 0; i < NumDims - 1; ++i) {
+ const Index idx0 = indices[0] / this->m_fastOutputStrides[i];
+ const Index idx1 = indices[1] / this->m_fastOutputStrides[i];
+ inputIndices[0] += (idx0 + this->m_offsets[i]) * this->m_inputStrides[i];
+ inputIndices[1] += (idx1 + this->m_offsets[i]) * this->m_inputStrides[i];
+ indices[0] -= idx0 * this->m_outputStrides[i];
+ indices[1] -= idx1 * this->m_outputStrides[i];
+ }
+ inputIndices[0] += (indices[0] + this->m_offsets[NumDims-1]);
+ inputIndices[1] += (indices[1] + this->m_offsets[NumDims-1]);
+ }
+ if (inputIndices[1] - inputIndices[0] == packetSize - 1) {
+ this->m_impl.template writePacket<StoreMode>(inputIndices[0], x);
+ }
+ else {
+ EIGEN_ALIGN_MAX CoeffReturnType values[packetSize];
+ internal::pstore<CoeffReturnType, PacketReturnType>(values, x);
+ this->m_impl.coeffRef(inputIndices[0]) = values[0];
+ this->m_impl.coeffRef(inputIndices[1]) = values[packetSize-1];
+ for (int i = 1; i < packetSize-1; ++i) {
+ this->coeffRef(index+i) = values[i];
+ }
+ }
+ }
+};
+
+
+
+namespace internal {
+template<typename StartIndices, typename StopIndices, typename Strides, typename XprType>
+struct traits<TensorStridingSlicingOp<StartIndices, StopIndices, Strides, XprType> > : public traits<XprType>
+{
+ typedef typename XprType::Scalar Scalar;
+ typedef traits<XprType> XprTraits;
+ typedef typename XprTraits::StorageKind StorageKind;
+ typedef typename XprTraits::Index Index;
+ typedef typename XprType::Nested Nested;
+ typedef typename remove_reference<Nested>::type _Nested;
+ static const int NumDimensions = array_size<StartIndices>::value;
+ static const int Layout = XprTraits::Layout;
+};
+
+template<typename StartIndices, typename StopIndices, typename Strides, typename XprType>
+struct eval<TensorStridingSlicingOp<StartIndices, StopIndices, Strides, XprType>, Eigen::Dense>
+{
+ typedef const TensorStridingSlicingOp<StartIndices, StopIndices, Strides, XprType>& type;
+};
+
+template<typename StartIndices, typename StopIndices, typename Strides, typename XprType>
+struct nested<TensorStridingSlicingOp<StartIndices, StopIndices, Strides, XprType>, 1, typename eval<TensorStridingSlicingOp<StartIndices, StopIndices, Strides, XprType> >::type>
+{
+ typedef TensorStridingSlicingOp<StartIndices, StopIndices, Strides, XprType> type;
+};
+
+} // end namespace internal
+
+
+template<typename StartIndices, typename StopIndices, typename Strides, typename XprType>
+class TensorStridingSlicingOp : public TensorBase<TensorStridingSlicingOp<StartIndices, StopIndices, Strides, XprType> >
+{
+ public:
+ typedef typename internal::traits<TensorStridingSlicingOp>::Scalar Scalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename internal::nested<TensorStridingSlicingOp>::type Nested;
+ typedef typename internal::traits<TensorStridingSlicingOp>::StorageKind StorageKind;
+ typedef typename internal::traits<TensorStridingSlicingOp>::Index Index;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorStridingSlicingOp(
+ const XprType& expr, const StartIndices& startIndices,
+ const StopIndices& stopIndices, const Strides& strides)
+ : m_xpr(expr), m_startIndices(startIndices), m_stopIndices(stopIndices),
+ m_strides(strides) {}
+
+ EIGEN_DEVICE_FUNC
+ const StartIndices& startIndices() const { return m_startIndices; }
+ EIGEN_DEVICE_FUNC
+ const StartIndices& stopIndices() const { return m_stopIndices; }
+ EIGEN_DEVICE_FUNC
+ const StartIndices& strides() const { return m_strides; }
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename XprType::Nested>::type&
+ expression() const { return m_xpr; }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE TensorStridingSlicingOp& operator = (const TensorStridingSlicingOp& other)
+ {
+ typedef TensorAssignOp<TensorStridingSlicingOp, const TensorStridingSlicingOp> Assign;
+ Assign assign(*this, other);
+ internal::TensorExecutor<const Assign, DefaultDevice>::run(
+ assign, DefaultDevice());
+ return *this;
+ }
+
+ template<typename OtherDerived>
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE TensorStridingSlicingOp& operator = (const OtherDerived& other)
+ {
+ typedef TensorAssignOp<TensorStridingSlicingOp, const OtherDerived> Assign;
+ Assign assign(*this, other);
+ internal::TensorExecutor<const Assign, DefaultDevice>::run(
+ assign, DefaultDevice());
+ return *this;
+ }
+
+ protected:
+ typename XprType::Nested m_xpr;
+ const StartIndices m_startIndices;
+ const StopIndices m_stopIndices;
+ const Strides m_strides;
+};
+
+// Eval as rvalue
+template<typename StartIndices, typename StopIndices, typename Strides, typename ArgType, typename Device>
+struct TensorEvaluator<const TensorStridingSlicingOp<StartIndices, StopIndices, Strides, ArgType>, Device>
+{
+ typedef TensorStridingSlicingOp<StartIndices, StopIndices, Strides, ArgType> XprType;
+ static const int NumDims = internal::array_size<Strides>::value;
+
+ enum {
+ // Alignment can't be guaranteed at compile time since it depends on the
+ // slice offsets and sizes.
+ IsAligned = false,
+ PacketAccess = false,
+ BlockAccess = false,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
+ : m_impl(op.expression(), device), m_device(device), m_strides(op.strides())
+ {
+ // Handle degenerate intervals by gracefully clamping and allowing m_dimensions to be zero
+ DSizes<Index,NumDims> startIndicesClamped, stopIndicesClamped;
+ for (size_t i = 0; i < internal::array_size<Dimensions>::value; ++i) {
+ eigen_assert(m_strides[i] != 0 && "0 stride is invalid");
+ if(m_strides[i]>0){
+ startIndicesClamped[i] = clamp(op.startIndices()[i], 0, m_impl.dimensions()[i]);
+ stopIndicesClamped[i] = clamp(op.stopIndices()[i], 0, m_impl.dimensions()[i]);
+ }else{
+ /* implies m_strides[i]<0 by assert */
+ startIndicesClamped[i] = clamp(op.startIndices()[i], -1, m_impl.dimensions()[i] - 1);
+ stopIndicesClamped[i] = clamp(op.stopIndices()[i], -1, m_impl.dimensions()[i] - 1);
+ }
+ m_startIndices[i] = startIndicesClamped[i];
+ }
+
+ const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
+
+ // check for degenerate intervals and compute output tensor shape
+ bool degenerate = false;;
+ for(int i = 0; i < NumDims; i++){
+ Index interval = stopIndicesClamped[i] - startIndicesClamped[i];
+ if(interval == 0 || ((interval<0) != (m_strides[i]<0))){
+ m_dimensions[i] = 0;
+ degenerate = true;
+ }else{
+ m_dimensions[i] = interval / m_strides[i]
+ + (interval % m_strides[i] != 0 ? 1 : 0);
+ eigen_assert(m_dimensions[i] >= 0);
+ }
+ }
+ Strides output_dims = m_dimensions;
+
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ m_inputStrides[0] = m_strides[0];
+ m_offsets[0] = startIndicesClamped[0];
+ Index previousDimProduct = 1;
+ for (int i = 1; i < NumDims; ++i) {
+ previousDimProduct *= input_dims[i-1];
+ m_inputStrides[i] = previousDimProduct * m_strides[i];
+ m_offsets[i] = startIndicesClamped[i] * previousDimProduct;
+ }
+
+ // Don't initialize m_fastOutputStrides[0] since it won't ever be accessed.
+ m_outputStrides[0] = 1;
+ for (int i = 1; i < NumDims; ++i) {
+ m_outputStrides[i] = m_outputStrides[i-1] * output_dims[i-1];
+ // NOTE: if tensor is degenerate, we send 1 to prevent TensorIntDivisor constructor crash
+ m_fastOutputStrides[i] = internal::TensorIntDivisor<Index>(degenerate ? 1 : m_outputStrides[i]);
+ }
+ } else {
+ m_inputStrides[NumDims-1] = m_strides[NumDims-1];
+ m_offsets[NumDims-1] = startIndicesClamped[NumDims-1];
+ Index previousDimProduct = 1;
+ for (int i = NumDims - 2; i >= 0; --i) {
+ previousDimProduct *= input_dims[i+1];
+ m_inputStrides[i] = previousDimProduct * m_strides[i];
+ m_offsets[i] = startIndicesClamped[i] * previousDimProduct;
+ }
+
+ m_outputStrides[NumDims-1] = 1;
+ for (int i = NumDims - 2; i >= 0; --i) {
+ m_outputStrides[i] = m_outputStrides[i+1] * output_dims[i+1];
+ // NOTE: if tensor is degenerate, we send 1 to prevent TensorIntDivisor constructor crash
+ m_fastOutputStrides[i] = internal::TensorIntDivisor<Index>(degenerate ? 1 : m_outputStrides[i]);
+ }
+ }
+ m_block_total_size_max = numext::maxi(static_cast<std::size_t>(1),
+ device.lastLevelCacheSize() /
+ sizeof(Scalar));
+ }
+
+ typedef typename XprType::Index Index;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename internal::remove_const<Scalar>::type ScalarNonConst;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ typedef Strides Dimensions;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
+
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType*) {
+ m_impl.evalSubExprsIfNeeded(NULL);
+ return true;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
+ m_impl.cleanup();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
+ {
+ return m_impl.coeff(srcCoeff(index));
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
+ return m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, NumDims);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar* data() const {
+ return NULL;
+ }
+
+ protected:
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const
+ {
+ Index inputIndex = 0;
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int i = NumDims - 1; i >= 0; --i) {
+ const Index idx = index / m_fastOutputStrides[i];
+ inputIndex += idx * m_inputStrides[i] + m_offsets[i];
+ index -= idx * m_outputStrides[i];
+ }
+ } else {
+ for (int i = 0; i < NumDims; ++i) {
+ const Index idx = index / m_fastOutputStrides[i];
+ inputIndex += idx * m_inputStrides[i] + m_offsets[i];
+ index -= idx * m_outputStrides[i];
+ }
+ }
+ return inputIndex;
+ }
+
+ static EIGEN_STRONG_INLINE Index clamp(Index value, Index min, Index max) {
+ return numext::maxi(min, numext::mini(max,value));
+ }
+
+ array<Index, NumDims> m_outputStrides;
+ array<internal::TensorIntDivisor<Index>, NumDims> m_fastOutputStrides;
+ array<Index, NumDims> m_inputStrides;
+ TensorEvaluator<ArgType, Device> m_impl;
+ const Device& m_device;
+ DSizes<Index, NumDims> m_startIndices; // clamped startIndices
+ DSizes<Index, NumDims> m_dimensions;
+ DSizes<Index, NumDims> m_offsets; // offset in a flattened shape
+ const Strides m_strides;
+ std::size_t m_block_total_size_max;
+};
+
+// Eval as lvalue
+template<typename StartIndices, typename StopIndices, typename Strides, typename ArgType, typename Device>
+struct TensorEvaluator<TensorStridingSlicingOp<StartIndices, StopIndices, Strides, ArgType>, Device>
+ : public TensorEvaluator<const TensorStridingSlicingOp<StartIndices, StopIndices, Strides, ArgType>, Device>
+{
+ typedef TensorEvaluator<const TensorStridingSlicingOp<StartIndices, StopIndices, Strides, ArgType>, Device> Base;
+ typedef TensorStridingSlicingOp<StartIndices, StopIndices, Strides, ArgType> XprType;
+ static const int NumDims = internal::array_size<Strides>::value;
+
+ enum {
+ IsAligned = false,
+ PacketAccess = false,
+ BlockAccess = false,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ CoordAccess = TensorEvaluator<ArgType, Device>::CoordAccess,
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
+ : Base(op, device)
+ { }
+
+ typedef typename XprType::Index Index;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename internal::remove_const<Scalar>::type ScalarNonConst;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ typedef Strides Dimensions;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index)
+ {
+ return this->m_impl.coeffRef(this->srcCoeff(index));
+ }
+};
+
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_MORPHING_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h b/unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h
new file mode 100644
index 000000000..647bcf108
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h
@@ -0,0 +1,397 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_PADDING_H
+#define EIGEN_CXX11_TENSOR_TENSOR_PADDING_H
+
+namespace Eigen {
+
+/** \class TensorPadding
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Tensor padding class.
+ * At the moment only padding with a constant value is supported.
+ *
+ */
+namespace internal {
+template<typename PaddingDimensions, typename XprType>
+struct traits<TensorPaddingOp<PaddingDimensions, XprType> > : public traits<XprType>
+{
+ typedef typename XprType::Scalar Scalar;
+ typedef traits<XprType> XprTraits;
+ typedef typename XprTraits::StorageKind StorageKind;
+ typedef typename XprTraits::Index Index;
+ typedef typename XprType::Nested Nested;
+ typedef typename remove_reference<Nested>::type _Nested;
+ static const int NumDimensions = XprTraits::NumDimensions;
+ static const int Layout = XprTraits::Layout;
+};
+
+template<typename PaddingDimensions, typename XprType>
+struct eval<TensorPaddingOp<PaddingDimensions, XprType>, Eigen::Dense>
+{
+ typedef const TensorPaddingOp<PaddingDimensions, XprType>& type;
+};
+
+template<typename PaddingDimensions, typename XprType>
+struct nested<TensorPaddingOp<PaddingDimensions, XprType>, 1, typename eval<TensorPaddingOp<PaddingDimensions, XprType> >::type>
+{
+ typedef TensorPaddingOp<PaddingDimensions, XprType> type;
+};
+
+} // end namespace internal
+
+
+
+template<typename PaddingDimensions, typename XprType>
+class TensorPaddingOp : public TensorBase<TensorPaddingOp<PaddingDimensions, XprType>, ReadOnlyAccessors>
+{
+ public:
+ typedef typename Eigen::internal::traits<TensorPaddingOp>::Scalar Scalar;
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename Eigen::internal::nested<TensorPaddingOp>::type Nested;
+ typedef typename Eigen::internal::traits<TensorPaddingOp>::StorageKind StorageKind;
+ typedef typename Eigen::internal::traits<TensorPaddingOp>::Index Index;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorPaddingOp(const XprType& expr, const PaddingDimensions& padding_dims, const Scalar padding_value)
+ : m_xpr(expr), m_padding_dims(padding_dims), m_padding_value(padding_value) {}
+
+ EIGEN_DEVICE_FUNC
+ const PaddingDimensions& padding() const { return m_padding_dims; }
+ EIGEN_DEVICE_FUNC
+ Scalar padding_value() const { return m_padding_value; }
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename XprType::Nested>::type&
+ expression() const { return m_xpr; }
+
+ protected:
+ typename XprType::Nested m_xpr;
+ const PaddingDimensions m_padding_dims;
+ const Scalar m_padding_value;
+};
+
+
+// Eval as rvalue
+template<typename PaddingDimensions, typename ArgType, typename Device>
+struct TensorEvaluator<const TensorPaddingOp<PaddingDimensions, ArgType>, Device>
+{
+ typedef TensorPaddingOp<PaddingDimensions, ArgType> XprType;
+ typedef typename XprType::Index Index;
+ static const int NumDims = internal::array_size<PaddingDimensions>::value;
+ typedef DSizes<Index, NumDims> Dimensions;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+
+ enum {
+ IsAligned = true,
+ PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ CoordAccess = true,
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
+ : m_impl(op.expression(), device), m_padding(op.padding()), m_paddingValue(op.padding_value())
+ {
+ // The padding op doesn't change the rank of the tensor. Directly padding a scalar would lead
+ // to a vector, which doesn't make sense. Instead one should reshape the scalar into a vector
+ // of 1 element first and then pad.
+ EIGEN_STATIC_ASSERT((NumDims > 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
+
+ // Compute dimensions
+ m_dimensions = m_impl.dimensions();
+ for (int i = 0; i < NumDims; ++i) {
+ m_dimensions[i] += m_padding[i].first + m_padding[i].second;
+ }
+ const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ m_inputStrides[0] = 1;
+ m_outputStrides[0] = 1;
+ for (int i = 1; i < NumDims; ++i) {
+ m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1];
+ m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1];
+ }
+ m_outputStrides[NumDims] = m_outputStrides[NumDims-1] * m_dimensions[NumDims-1];
+ } else {
+ m_inputStrides[NumDims - 1] = 1;
+ m_outputStrides[NumDims] = 1;
+ for (int i = NumDims - 2; i >= 0; --i) {
+ m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1];
+ m_outputStrides[i+1] = m_outputStrides[i+2] * m_dimensions[i+1];
+ }
+ m_outputStrides[0] = m_outputStrides[1] * m_dimensions[0];
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar*) {
+ m_impl.evalSubExprsIfNeeded(NULL);
+ return true;
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
+ m_impl.cleanup();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
+ {
+ eigen_assert(index < dimensions().TotalSize());
+ Index inputIndex = 0;
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int i = NumDims - 1; i > 0; --i) {
+ const Index idx = index / m_outputStrides[i];
+ if (isPaddingAtIndexForDim(idx, i)) {
+ return m_paddingValue;
+ }
+ inputIndex += (idx - m_padding[i].first) * m_inputStrides[i];
+ index -= idx * m_outputStrides[i];
+ }
+ if (isPaddingAtIndexForDim(index, 0)) {
+ return m_paddingValue;
+ }
+ inputIndex += (index - m_padding[0].first);
+ } else {
+ for (int i = 0; i < NumDims - 1; ++i) {
+ const Index idx = index / m_outputStrides[i+1];
+ if (isPaddingAtIndexForDim(idx, i)) {
+ return m_paddingValue;
+ }
+ inputIndex += (idx - m_padding[i].first) * m_inputStrides[i];
+ index -= idx * m_outputStrides[i+1];
+ }
+ if (isPaddingAtIndexForDim(index, NumDims-1)) {
+ return m_paddingValue;
+ }
+ inputIndex += (index - m_padding[NumDims-1].first);
+ }
+ return m_impl.coeff(inputIndex);
+ }
+
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
+ {
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ return packetColMajor(index);
+ }
+ return packetRowMajor(index);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
+ TensorOpCost cost = m_impl.costPerCoeff(vectorized);
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int i = 0; i < NumDims; ++i)
+ updateCostPerDimension(cost, i, i == 0);
+ } else {
+ for (int i = NumDims - 1; i >= 0; --i)
+ updateCostPerDimension(cost, i, i == NumDims - 1);
+ }
+ return cost;
+ }
+
+ EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
+
+ private:
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool isPaddingAtIndexForDim(
+ Index index, int dim_index) const {
+#if defined(EIGEN_HAS_INDEX_LIST)
+ return (!internal::index_pair_first_statically_eq<PaddingDimensions>(dim_index, 0) &&
+ index < m_padding[dim_index].first) ||
+ (!internal::index_pair_second_statically_eq<PaddingDimensions>(dim_index, 0) &&
+ index >= m_dimensions[dim_index] - m_padding[dim_index].second);
+#else
+ return (index < m_padding[dim_index].first) ||
+ (index >= m_dimensions[dim_index] - m_padding[dim_index].second);
+#endif
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool isLeftPaddingCompileTimeZero(
+ int dim_index) const {
+#if defined(EIGEN_HAS_INDEX_LIST)
+ return internal::index_pair_first_statically_eq<PaddingDimensions>(dim_index, 0);
+#else
+ EIGEN_UNUSED_VARIABLE(dim_index);
+ return false;
+#endif
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool isRightPaddingCompileTimeZero(
+ int dim_index) const {
+#if defined(EIGEN_HAS_INDEX_LIST)
+ return internal::index_pair_second_statically_eq<PaddingDimensions>(dim_index, 0);
+#else
+ EIGEN_UNUSED_VARIABLE(dim_index);
+ return false;
+#endif
+ }
+
+
+ void updateCostPerDimension(TensorOpCost& cost, int i, bool first) const {
+ const double in = static_cast<double>(m_impl.dimensions()[i]);
+ const double out = in + m_padding[i].first + m_padding[i].second;
+ if (out == 0)
+ return;
+ const double reduction = in / out;
+ cost *= reduction;
+ if (first) {
+ cost += TensorOpCost(0, 0, 2 * TensorOpCost::AddCost<Index>() +
+ reduction * (1 * TensorOpCost::AddCost<Index>()));
+ } else {
+ cost += TensorOpCost(0, 0, 2 * TensorOpCost::AddCost<Index>() +
+ 2 * TensorOpCost::MulCost<Index>() +
+ reduction * (2 * TensorOpCost::MulCost<Index>() +
+ 1 * TensorOpCost::DivCost<Index>()));
+ }
+ }
+
+ protected:
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetColMajor(Index index) const
+ {
+ EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
+ eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
+
+ const Index initialIndex = index;
+ Index inputIndex = 0;
+ for (int i = NumDims - 1; i > 0; --i) {
+ const Index first = index;
+ const Index last = index + PacketSize - 1;
+ const Index lastPaddedLeft = m_padding[i].first * m_outputStrides[i];
+ const Index firstPaddedRight = (m_dimensions[i] - m_padding[i].second) * m_outputStrides[i];
+ const Index lastPaddedRight = m_outputStrides[i+1];
+
+ if (!isLeftPaddingCompileTimeZero(i) && last < lastPaddedLeft) {
+ // all the coefficient are in the padding zone.
+ return internal::pset1<PacketReturnType>(m_paddingValue);
+ }
+ else if (!isRightPaddingCompileTimeZero(i) && first >= firstPaddedRight && last < lastPaddedRight) {
+ // all the coefficient are in the padding zone.
+ return internal::pset1<PacketReturnType>(m_paddingValue);
+ }
+ else if ((isLeftPaddingCompileTimeZero(i) && isRightPaddingCompileTimeZero(i)) || (first >= lastPaddedLeft && last < firstPaddedRight)) {
+ // all the coefficient are between the 2 padding zones.
+ const Index idx = index / m_outputStrides[i];
+ inputIndex += (idx - m_padding[i].first) * m_inputStrides[i];
+ index -= idx * m_outputStrides[i];
+ }
+ else {
+ // Every other case
+ return packetWithPossibleZero(initialIndex);
+ }
+ }
+
+ const Index last = index + PacketSize - 1;
+ const Index first = index;
+ const Index lastPaddedLeft = m_padding[0].first;
+ const Index firstPaddedRight = (m_dimensions[0] - m_padding[0].second);
+ const Index lastPaddedRight = m_outputStrides[1];
+
+ if (!isLeftPaddingCompileTimeZero(0) && last < lastPaddedLeft) {
+ // all the coefficient are in the padding zone.
+ return internal::pset1<PacketReturnType>(m_paddingValue);
+ }
+ else if (!isRightPaddingCompileTimeZero(0) && first >= firstPaddedRight && last < lastPaddedRight) {
+ // all the coefficient are in the padding zone.
+ return internal::pset1<PacketReturnType>(m_paddingValue);
+ }
+ else if ((isLeftPaddingCompileTimeZero(0) && isRightPaddingCompileTimeZero(0)) || (first >= lastPaddedLeft && last < firstPaddedRight)) {
+ // all the coefficient are between the 2 padding zones.
+ inputIndex += (index - m_padding[0].first);
+ return m_impl.template packet<Unaligned>(inputIndex);
+ }
+ // Every other case
+ return packetWithPossibleZero(initialIndex);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetRowMajor(Index index) const
+ {
+ EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
+ eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
+
+ const Index initialIndex = index;
+ Index inputIndex = 0;
+
+ for (int i = 0; i < NumDims - 1; ++i) {
+ const Index first = index;
+ const Index last = index + PacketSize - 1;
+ const Index lastPaddedLeft = m_padding[i].first * m_outputStrides[i+1];
+ const Index firstPaddedRight = (m_dimensions[i] - m_padding[i].second) * m_outputStrides[i+1];
+ const Index lastPaddedRight = m_outputStrides[i];
+
+ if (!isLeftPaddingCompileTimeZero(i) && last < lastPaddedLeft) {
+ // all the coefficient are in the padding zone.
+ return internal::pset1<PacketReturnType>(m_paddingValue);
+ }
+ else if (!isRightPaddingCompileTimeZero(i) && first >= firstPaddedRight && last < lastPaddedRight) {
+ // all the coefficient are in the padding zone.
+ return internal::pset1<PacketReturnType>(m_paddingValue);
+ }
+ else if ((isLeftPaddingCompileTimeZero(i) && isRightPaddingCompileTimeZero(i)) || (first >= lastPaddedLeft && last < firstPaddedRight)) {
+ // all the coefficient are between the 2 padding zones.
+ const Index idx = index / m_outputStrides[i+1];
+ inputIndex += (idx - m_padding[i].first) * m_inputStrides[i];
+ index -= idx * m_outputStrides[i+1];
+ }
+ else {
+ // Every other case
+ return packetWithPossibleZero(initialIndex);
+ }
+ }
+
+ const Index last = index + PacketSize - 1;
+ const Index first = index;
+ const Index lastPaddedLeft = m_padding[NumDims-1].first;
+ const Index firstPaddedRight = (m_dimensions[NumDims-1] - m_padding[NumDims-1].second);
+ const Index lastPaddedRight = m_outputStrides[NumDims-1];
+
+ if (!isLeftPaddingCompileTimeZero(NumDims-1) && last < lastPaddedLeft) {
+ // all the coefficient are in the padding zone.
+ return internal::pset1<PacketReturnType>(m_paddingValue);
+ }
+ else if (!isRightPaddingCompileTimeZero(NumDims-1) && first >= firstPaddedRight && last < lastPaddedRight) {
+ // all the coefficient are in the padding zone.
+ return internal::pset1<PacketReturnType>(m_paddingValue);
+ }
+ else if ((isLeftPaddingCompileTimeZero(NumDims-1) && isRightPaddingCompileTimeZero(NumDims-1)) || (first >= lastPaddedLeft && last < firstPaddedRight)) {
+ // all the coefficient are between the 2 padding zones.
+ inputIndex += (index - m_padding[NumDims-1].first);
+ return m_impl.template packet<Unaligned>(inputIndex);
+ }
+ // Every other case
+ return packetWithPossibleZero(initialIndex);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetWithPossibleZero(Index index) const
+ {
+ EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
+ for (int i = 0; i < PacketSize; ++i) {
+ values[i] = coeff(index+i);
+ }
+ PacketReturnType rslt = internal::pload<PacketReturnType>(values);
+ return rslt;
+ }
+
+ Dimensions m_dimensions;
+ array<Index, NumDims+1> m_outputStrides;
+ array<Index, NumDims> m_inputStrides;
+ TensorEvaluator<ArgType, Device> m_impl;
+ PaddingDimensions m_padding;
+
+ Scalar m_paddingValue;
+};
+
+
+
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_PADDING_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorPatch.h b/unsupported/Eigen/CXX11/src/Tensor/TensorPatch.h
new file mode 100644
index 000000000..886a254f6
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorPatch.h
@@ -0,0 +1,269 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_PATCH_H
+#define EIGEN_CXX11_TENSOR_TENSOR_PATCH_H
+
+namespace Eigen {
+
+/** \class TensorPatch
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Tensor patch class.
+ *
+ *
+ */
+namespace internal {
+template<typename PatchDim, typename XprType>
+struct traits<TensorPatchOp<PatchDim, XprType> > : public traits<XprType>
+{
+ typedef typename XprType::Scalar Scalar;
+ typedef traits<XprType> XprTraits;
+ typedef typename XprTraits::StorageKind StorageKind;
+ typedef typename XprTraits::Index Index;
+ typedef typename XprType::Nested Nested;
+ typedef typename remove_reference<Nested>::type _Nested;
+ static const int NumDimensions = XprTraits::NumDimensions + 1;
+ static const int Layout = XprTraits::Layout;
+};
+
+template<typename PatchDim, typename XprType>
+struct eval<TensorPatchOp<PatchDim, XprType>, Eigen::Dense>
+{
+ typedef const TensorPatchOp<PatchDim, XprType>& type;
+};
+
+template<typename PatchDim, typename XprType>
+struct nested<TensorPatchOp<PatchDim, XprType>, 1, typename eval<TensorPatchOp<PatchDim, XprType> >::type>
+{
+ typedef TensorPatchOp<PatchDim, XprType> type;
+};
+
+} // end namespace internal
+
+
+
+template<typename PatchDim, typename XprType>
+class TensorPatchOp : public TensorBase<TensorPatchOp<PatchDim, XprType>, ReadOnlyAccessors>
+{
+ public:
+ typedef typename Eigen::internal::traits<TensorPatchOp>::Scalar Scalar;
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename Eigen::internal::nested<TensorPatchOp>::type Nested;
+ typedef typename Eigen::internal::traits<TensorPatchOp>::StorageKind StorageKind;
+ typedef typename Eigen::internal::traits<TensorPatchOp>::Index Index;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorPatchOp(const XprType& expr, const PatchDim& patch_dims)
+ : m_xpr(expr), m_patch_dims(patch_dims) {}
+
+ EIGEN_DEVICE_FUNC
+ const PatchDim& patch_dims() const { return m_patch_dims; }
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename XprType::Nested>::type&
+ expression() const { return m_xpr; }
+
+ protected:
+ typename XprType::Nested m_xpr;
+ const PatchDim m_patch_dims;
+};
+
+
+// Eval as rvalue
+template<typename PatchDim, typename ArgType, typename Device>
+struct TensorEvaluator<const TensorPatchOp<PatchDim, ArgType>, Device>
+{
+ typedef TensorPatchOp<PatchDim, ArgType> XprType;
+ typedef typename XprType::Index Index;
+ static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value + 1;
+ typedef DSizes<Index, NumDims> Dimensions;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+
+
+ enum {
+ IsAligned = false,
+ PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ CoordAccess = false,
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
+ : m_impl(op.expression(), device)
+ {
+ Index num_patches = 1;
+ const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
+ const PatchDim& patch_dims = op.patch_dims();
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int i = 0; i < NumDims-1; ++i) {
+ m_dimensions[i] = patch_dims[i];
+ num_patches *= (input_dims[i] - patch_dims[i] + 1);
+ }
+ m_dimensions[NumDims-1] = num_patches;
+
+ m_inputStrides[0] = 1;
+ m_patchStrides[0] = 1;
+ for (int i = 1; i < NumDims-1; ++i) {
+ m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1];
+ m_patchStrides[i] = m_patchStrides[i-1] * (input_dims[i-1] - patch_dims[i-1] + 1);
+ }
+ m_outputStrides[0] = 1;
+ for (int i = 1; i < NumDims; ++i) {
+ m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1];
+ }
+ } else {
+ for (int i = 0; i < NumDims-1; ++i) {
+ m_dimensions[i+1] = patch_dims[i];
+ num_patches *= (input_dims[i] - patch_dims[i] + 1);
+ }
+ m_dimensions[0] = num_patches;
+
+ m_inputStrides[NumDims-2] = 1;
+ m_patchStrides[NumDims-2] = 1;
+ for (int i = NumDims-3; i >= 0; --i) {
+ m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1];
+ m_patchStrides[i] = m_patchStrides[i+1] * (input_dims[i+1] - patch_dims[i+1] + 1);
+ }
+ m_outputStrides[NumDims-1] = 1;
+ for (int i = NumDims-2; i >= 0; --i) {
+ m_outputStrides[i] = m_outputStrides[i+1] * m_dimensions[i+1];
+ }
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) {
+ m_impl.evalSubExprsIfNeeded(NULL);
+ return true;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
+ m_impl.cleanup();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
+ {
+ Index output_stride_index = (static_cast<int>(Layout) == static_cast<int>(ColMajor)) ? NumDims - 1 : 0;
+ // Find the location of the first element of the patch.
+ Index patchIndex = index / m_outputStrides[output_stride_index];
+ // Find the offset of the element wrt the location of the first element.
+ Index patchOffset = index - patchIndex * m_outputStrides[output_stride_index];
+ Index inputIndex = 0;
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int i = NumDims - 2; i > 0; --i) {
+ const Index patchIdx = patchIndex / m_patchStrides[i];
+ patchIndex -= patchIdx * m_patchStrides[i];
+ const Index offsetIdx = patchOffset / m_outputStrides[i];
+ patchOffset -= offsetIdx * m_outputStrides[i];
+ inputIndex += (patchIdx + offsetIdx) * m_inputStrides[i];
+ }
+ } else {
+ for (int i = 0; i < NumDims - 2; ++i) {
+ const Index patchIdx = patchIndex / m_patchStrides[i];
+ patchIndex -= patchIdx * m_patchStrides[i];
+ const Index offsetIdx = patchOffset / m_outputStrides[i+1];
+ patchOffset -= offsetIdx * m_outputStrides[i+1];
+ inputIndex += (patchIdx + offsetIdx) * m_inputStrides[i];
+ }
+ }
+ inputIndex += (patchIndex + patchOffset);
+ return m_impl.coeff(inputIndex);
+ }
+
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
+ {
+ EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
+ eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
+
+ Index output_stride_index = (static_cast<int>(Layout) == static_cast<int>(ColMajor)) ? NumDims - 1 : 0;
+ Index indices[2] = {index, index + PacketSize - 1};
+ Index patchIndices[2] = {indices[0] / m_outputStrides[output_stride_index],
+ indices[1] / m_outputStrides[output_stride_index]};
+ Index patchOffsets[2] = {indices[0] - patchIndices[0] * m_outputStrides[output_stride_index],
+ indices[1] - patchIndices[1] * m_outputStrides[output_stride_index]};
+
+ Index inputIndices[2] = {0, 0};
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int i = NumDims - 2; i > 0; --i) {
+ const Index patchIdx[2] = {patchIndices[0] / m_patchStrides[i],
+ patchIndices[1] / m_patchStrides[i]};
+ patchIndices[0] -= patchIdx[0] * m_patchStrides[i];
+ patchIndices[1] -= patchIdx[1] * m_patchStrides[i];
+
+ const Index offsetIdx[2] = {patchOffsets[0] / m_outputStrides[i],
+ patchOffsets[1] / m_outputStrides[i]};
+ patchOffsets[0] -= offsetIdx[0] * m_outputStrides[i];
+ patchOffsets[1] -= offsetIdx[1] * m_outputStrides[i];
+
+ inputIndices[0] += (patchIdx[0] + offsetIdx[0]) * m_inputStrides[i];
+ inputIndices[1] += (patchIdx[1] + offsetIdx[1]) * m_inputStrides[i];
+ }
+ } else {
+ for (int i = 0; i < NumDims - 2; ++i) {
+ const Index patchIdx[2] = {patchIndices[0] / m_patchStrides[i],
+ patchIndices[1] / m_patchStrides[i]};
+ patchIndices[0] -= patchIdx[0] * m_patchStrides[i];
+ patchIndices[1] -= patchIdx[1] * m_patchStrides[i];
+
+ const Index offsetIdx[2] = {patchOffsets[0] / m_outputStrides[i+1],
+ patchOffsets[1] / m_outputStrides[i+1]};
+ patchOffsets[0] -= offsetIdx[0] * m_outputStrides[i+1];
+ patchOffsets[1] -= offsetIdx[1] * m_outputStrides[i+1];
+
+ inputIndices[0] += (patchIdx[0] + offsetIdx[0]) * m_inputStrides[i];
+ inputIndices[1] += (patchIdx[1] + offsetIdx[1]) * m_inputStrides[i];
+ }
+ }
+ inputIndices[0] += (patchIndices[0] + patchOffsets[0]);
+ inputIndices[1] += (patchIndices[1] + patchOffsets[1]);
+
+ if (inputIndices[1] - inputIndices[0] == PacketSize - 1) {
+ PacketReturnType rslt = m_impl.template packet<Unaligned>(inputIndices[0]);
+ return rslt;
+ }
+ else {
+ EIGEN_ALIGN_MAX CoeffReturnType values[PacketSize];
+ values[0] = m_impl.coeff(inputIndices[0]);
+ values[PacketSize-1] = m_impl.coeff(inputIndices[1]);
+ for (int i = 1; i < PacketSize-1; ++i) {
+ values[i] = coeff(index+i);
+ }
+ PacketReturnType rslt = internal::pload<PacketReturnType>(values);
+ return rslt;
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
+ const double compute_cost = NumDims * (TensorOpCost::DivCost<Index>() +
+ TensorOpCost::MulCost<Index>() +
+ 2 * TensorOpCost::AddCost<Index>());
+ return m_impl.costPerCoeff(vectorized) +
+ TensorOpCost(0, 0, compute_cost, vectorized, PacketSize);
+ }
+
+ EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
+
+ protected:
+ Dimensions m_dimensions;
+ array<Index, NumDims> m_outputStrides;
+ array<Index, NumDims-1> m_inputStrides;
+ array<Index, NumDims-1> m_patchStrides;
+
+ TensorEvaluator<ArgType, Device> m_impl;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_PATCH_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorRandom.h b/unsupported/Eigen/CXX11/src/Tensor/TensorRandom.h
new file mode 100644
index 000000000..1655a813e
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorRandom.h
@@ -0,0 +1,276 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_RANDOM_H
+#define EIGEN_CXX11_TENSOR_TENSOR_RANDOM_H
+
+namespace Eigen {
+namespace internal {
+
+namespace {
+
+EIGEN_DEVICE_FUNC uint64_t get_random_seed() {
+#ifdef __CUDA_ARCH__
+ // We don't support 3d kernels since we currently only use 1 and
+ // 2d kernels.
+ assert(threadIdx.z == 0);
+ return clock64() +
+ blockIdx.x * blockDim.x + threadIdx.x +
+ gridDim.x * blockDim.x * (blockIdx.y * blockDim.y + threadIdx.y);
+
+#elif defined _WIN32
+ // Use the current time as a baseline.
+ SYSTEMTIME st;
+ GetSystemTime(&st);
+ int time = st.wSecond + 1000 * st.wMilliseconds;
+ // Mix in a random number to make sure that we get different seeds if
+ // we try to generate seeds faster than the clock resolution.
+ // We need 2 random values since the generator only generate 16 bits at
+ // a time (https://msdn.microsoft.com/en-us/library/398ax69y.aspx)
+ int rnd1 = ::rand();
+ int rnd2 = ::rand();
+ uint64_t rnd = (rnd1 | rnd2 << 16) ^ time;
+ return rnd;
+
+#elif defined __APPLE__
+ // Same approach as for win32, except that the random number generator
+ // is better (// https://developer.apple.com/legacy/library/documentation/Darwin/Reference/ManPages/man3/random.3.html#//apple_ref/doc/man/3/random).
+ uint64_t rnd = ::random() ^ mach_absolute_time();
+ return rnd;
+
+#else
+ // Augment the current time with pseudo random number generation
+ // to ensure that we get different seeds if we try to generate seeds
+ // faster than the clock resolution.
+ timespec ts;
+ clock_gettime(CLOCK_REALTIME, &ts);
+ uint64_t rnd = ::random() ^ ts.tv_nsec;
+ return rnd;
+#endif
+}
+
+static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE unsigned PCG_XSH_RS_generator(uint64_t* state) {
+ // TODO: Unify with the implementation in the non blocking thread pool.
+ uint64_t current = *state;
+ // Update the internal state
+ *state = current * 6364136223846793005ULL + 0xda3e39cb94b95bdbULL;
+ // Generate the random output (using the PCG-XSH-RS scheme)
+ return static_cast<unsigned>((current ^ (current >> 22)) >> (22 + (current >> 61)));
+}
+
+static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE uint64_t PCG_XSH_RS_state(uint64_t seed) {
+ seed = seed ? seed : get_random_seed();
+ return seed * 6364136223846793005ULL + 0xda3e39cb94b95bdbULL;
+}
+
+} // namespace
+
+
+template <typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+T RandomToTypeUniform(uint64_t* state) {
+ unsigned rnd = PCG_XSH_RS_generator(state);
+ return static_cast<T>(rnd);
+}
+
+
+template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+Eigen::half RandomToTypeUniform<Eigen::half>(uint64_t* state) {
+ Eigen::half result;
+ // Generate 10 random bits for the mantissa
+ unsigned rnd = PCG_XSH_RS_generator(state);
+ result.x = static_cast<uint16_t>(rnd & 0x3ffu);
+ // Set the exponent
+ result.x |= (static_cast<uint16_t>(15) << 10);
+ // Return the final result
+ return result - Eigen::half(1.0f);
+}
+
+
+template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+float RandomToTypeUniform<float>(uint64_t* state) {
+ typedef union {
+ uint32_t raw;
+ float fp;
+ } internal;
+ internal result;
+ // Generate 23 random bits for the mantissa mantissa
+ const unsigned rnd = PCG_XSH_RS_generator(state);
+ result.raw = rnd & 0x7fffffu;
+ // Set the exponent
+ result.raw |= (static_cast<uint32_t>(127) << 23);
+ // Return the final result
+ return result.fp - 1.0f;
+}
+
+template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+double RandomToTypeUniform<double>(uint64_t* state) {
+ typedef union {
+ uint64_t raw;
+ double dp;
+ } internal;
+ internal result;
+ result.raw = 0;
+ // Generate 52 random bits for the mantissa
+ // First generate the upper 20 bits
+ unsigned rnd1 = PCG_XSH_RS_generator(state) & 0xfffffu;
+ // The generate the lower 32 bits
+ unsigned rnd2 = PCG_XSH_RS_generator(state);
+ result.raw = (static_cast<uint64_t>(rnd1) << 32) | rnd2;
+ // Set the exponent
+ result.raw |= (static_cast<uint64_t>(1023) << 52);
+ // Return the final result
+ return result.dp - 1.0;
+}
+
+template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+std::complex<float> RandomToTypeUniform<std::complex<float> >(uint64_t* state) {
+ return std::complex<float>(RandomToTypeUniform<float>(state),
+ RandomToTypeUniform<float>(state));
+}
+template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+std::complex<double> RandomToTypeUniform<std::complex<double> >(uint64_t* state) {
+ return std::complex<double>(RandomToTypeUniform<double>(state),
+ RandomToTypeUniform<double>(state));
+}
+
+template <typename T> class UniformRandomGenerator {
+ public:
+ static const bool PacketAccess = true;
+
+ // Uses the given "seed" if non-zero, otherwise uses a random seed.
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE UniformRandomGenerator(
+ uint64_t seed = 0) {
+ m_state = PCG_XSH_RS_state(seed);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE UniformRandomGenerator(
+ const UniformRandomGenerator& other) {
+ m_state = other.m_state;
+ }
+
+ template<typename Index> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ T operator()(Index i) const {
+ uint64_t local_state = m_state + i;
+ T result = RandomToTypeUniform<T>(&local_state);
+ m_state = local_state;
+ return result;
+ }
+
+ template<typename Packet, typename Index> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Packet packetOp(Index i) const {
+ const int packetSize = internal::unpacket_traits<Packet>::size;
+ EIGEN_ALIGN_MAX T values[packetSize];
+ uint64_t local_state = m_state + i;
+ for (int j = 0; j < packetSize; ++j) {
+ values[j] = RandomToTypeUniform<T>(&local_state);
+ }
+ m_state = local_state;
+ return internal::pload<Packet>(values);
+ }
+
+ private:
+ mutable uint64_t m_state;
+};
+
+template <typename Scalar>
+struct functor_traits<UniformRandomGenerator<Scalar> > {
+ enum {
+ // Rough estimate for floating point, multiplied by ceil(sizeof(T) / sizeof(float)).
+ Cost = 12 * NumTraits<Scalar>::AddCost *
+ ((sizeof(Scalar) + sizeof(float) - 1) / sizeof(float)),
+ PacketAccess = UniformRandomGenerator<Scalar>::PacketAccess
+ };
+};
+
+
+
+template <typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+T RandomToTypeNormal(uint64_t* state) {
+ // Use the ratio of uniform method to generate numbers following a normal
+ // distribution. See for example Numerical Recipes chapter 7.3.9 for the
+ // details.
+ T u, v, q;
+ do {
+ u = RandomToTypeUniform<T>(state);
+ v = T(1.7156) * (RandomToTypeUniform<T>(state) - T(0.5));
+ const T x = u - T(0.449871);
+ const T y = numext::abs(v) + T(0.386595);
+ q = x*x + y * (T(0.196)*y - T(0.25472)*x);
+ } while (q > T(0.27597) &&
+ (q > T(0.27846) || v*v > T(-4) * numext::log(u) * u*u));
+
+ return v/u;
+}
+
+template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+std::complex<float> RandomToTypeNormal<std::complex<float> >(uint64_t* state) {
+ return std::complex<float>(RandomToTypeNormal<float>(state),
+ RandomToTypeNormal<float>(state));
+}
+template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+std::complex<double> RandomToTypeNormal<std::complex<double> >(uint64_t* state) {
+ return std::complex<double>(RandomToTypeNormal<double>(state),
+ RandomToTypeNormal<double>(state));
+}
+
+
+template <typename T> class NormalRandomGenerator {
+ public:
+ static const bool PacketAccess = true;
+
+ // Uses the given "seed" if non-zero, otherwise uses a random seed.
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE NormalRandomGenerator(uint64_t seed = 0) {
+ m_state = PCG_XSH_RS_state(seed);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE NormalRandomGenerator(
+ const NormalRandomGenerator& other) {
+ m_state = other.m_state;
+ }
+
+ template<typename Index> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ T operator()(Index i) const {
+ uint64_t local_state = m_state + i;
+ T result = RandomToTypeNormal<T>(&local_state);
+ m_state = local_state;
+ return result;
+ }
+
+ template<typename Packet, typename Index> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Packet packetOp(Index i) const {
+ const int packetSize = internal::unpacket_traits<Packet>::size;
+ EIGEN_ALIGN_MAX T values[packetSize];
+ uint64_t local_state = m_state + i;
+ for (int j = 0; j < packetSize; ++j) {
+ values[j] = RandomToTypeNormal<T>(&local_state);
+ }
+ m_state = local_state;
+ return internal::pload<Packet>(values);
+ }
+
+ private:
+ mutable uint64_t m_state;
+};
+
+
+template <typename Scalar>
+struct functor_traits<NormalRandomGenerator<Scalar> > {
+ enum {
+ // On average, we need to generate about 3 random numbers
+ // 15 mul, 8 add, 1.5 logs
+ Cost = 3 * functor_traits<UniformRandomGenerator<Scalar> >::Cost +
+ 15 * NumTraits<Scalar>::AddCost + 8 * NumTraits<Scalar>::AddCost +
+ 3 * functor_traits<scalar_log_op<Scalar> >::Cost / 2,
+ PacketAccess = NormalRandomGenerator<Scalar>::PacketAccess
+ };
+};
+
+
+} // end namespace internal
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_RANDOM_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h
new file mode 100644
index 000000000..41d0d0022
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h
@@ -0,0 +1,781 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+// Copyright (C) 2016 Mehdi Goli, Codeplay Software Ltd <eigen@codeplay.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_H
+#define EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_H
+
+namespace Eigen {
+
+/** \class TensorReduction
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Tensor reduction class.
+ *
+ */
+
+namespace internal {
+ template<typename Op, typename Dims, typename XprType,template <class> class MakePointer_ >
+ struct traits<TensorReductionOp<Op, Dims, XprType, MakePointer_> >
+ : traits<XprType>
+{
+ typedef traits<XprType> XprTraits;
+ typedef typename XprTraits::Scalar Scalar;
+ typedef typename XprTraits::StorageKind StorageKind;
+ typedef typename XprTraits::Index Index;
+ typedef typename XprType::Nested Nested;
+ static const int NumDimensions = XprTraits::NumDimensions - array_size<Dims>::value;
+ static const int Layout = XprTraits::Layout;
+
+ template <class T> struct MakePointer {
+ // Intermediate typedef to workaround MSVC issue.
+ typedef MakePointer_<T> MakePointerT;
+ typedef typename MakePointerT::Type Type;
+ };
+};
+
+template<typename Op, typename Dims, typename XprType, template <class> class MakePointer_>
+struct eval<TensorReductionOp<Op, Dims, XprType, MakePointer_>, Eigen::Dense>
+{
+ typedef const TensorReductionOp<Op, Dims, XprType, MakePointer_>& type;
+};
+
+template<typename Op, typename Dims, typename XprType, template <class> class MakePointer_>
+struct nested<TensorReductionOp<Op, Dims, XprType, MakePointer_>, 1, typename eval<TensorReductionOp<Op, Dims, XprType, MakePointer_> >::type>
+{
+ typedef TensorReductionOp<Op, Dims, XprType, MakePointer_> type;
+};
+
+
+template <typename OutputDims> struct DimInitializer {
+ template <typename InputDims, typename ReducedDims> EIGEN_DEVICE_FUNC
+ static void run(const InputDims& input_dims,
+ const array<bool, internal::array_size<InputDims>::value>& reduced,
+ OutputDims* output_dims, ReducedDims* reduced_dims) {
+ const int NumInputDims = internal::array_size<InputDims>::value;
+ int outputIndex = 0;
+ int reduceIndex = 0;
+ for (int i = 0; i < NumInputDims; ++i) {
+ if (reduced[i]) {
+ (*reduced_dims)[reduceIndex] = input_dims[i];
+ ++reduceIndex;
+ } else {
+ (*output_dims)[outputIndex] = input_dims[i];
+ ++outputIndex;
+ }
+ }
+ }
+};
+
+template <> struct DimInitializer<Sizes<> > {
+ template <typename InputDims, typename Index, size_t Rank> EIGEN_DEVICE_FUNC
+ static void run(const InputDims& input_dims, const array<bool, Rank>&,
+ Sizes<>*, array<Index, Rank>* reduced_dims) {
+ const int NumInputDims = internal::array_size<InputDims>::value;
+ for (int i = 0; i < NumInputDims; ++i) {
+ (*reduced_dims)[i] = input_dims[i];
+ }
+ }
+};
+
+
+template <typename ReducedDims, int NumTensorDims, int Layout>
+struct are_inner_most_dims {
+ static const bool value = false;
+};
+template <typename ReducedDims, int NumTensorDims, int Layout>
+struct preserve_inner_most_dims {
+ static const bool value = false;
+};
+
+#if EIGEN_HAS_CONSTEXPR && EIGEN_HAS_VARIADIC_TEMPLATES
+template <typename ReducedDims, int NumTensorDims>
+struct are_inner_most_dims<ReducedDims, NumTensorDims, ColMajor>{
+ static const bool tmp1 = indices_statically_known_to_increase<ReducedDims>();
+ static const bool tmp2 = index_statically_eq<ReducedDims>(0, 0);
+ static const bool tmp3 = index_statically_eq<ReducedDims>(array_size<ReducedDims>::value-1, array_size<ReducedDims>::value-1);
+ static const bool value = tmp1 & tmp2 & tmp3;
+};
+template <typename ReducedDims, int NumTensorDims>
+struct are_inner_most_dims<ReducedDims, NumTensorDims, RowMajor>{
+ static const bool tmp1 = indices_statically_known_to_increase<ReducedDims>();
+ static const bool tmp2 = index_statically_eq<ReducedDims>(0, NumTensorDims - array_size<ReducedDims>::value);
+ static const bool tmp3 = index_statically_eq<ReducedDims>(array_size<ReducedDims>::value - 1, NumTensorDims - 1);
+ static const bool value = tmp1 & tmp2 & tmp3;
+
+};
+template <typename ReducedDims, int NumTensorDims>
+struct preserve_inner_most_dims<ReducedDims, NumTensorDims, ColMajor>{
+ static const bool tmp1 = indices_statically_known_to_increase<ReducedDims>();
+ static const bool tmp2 = index_statically_gt<ReducedDims>(0, 0);
+ static const bool value = tmp1 & tmp2;
+
+};
+template <typename ReducedDims, int NumTensorDims>
+struct preserve_inner_most_dims<ReducedDims, NumTensorDims, RowMajor>{
+ static const bool tmp1 = indices_statically_known_to_increase<ReducedDims>();
+ static const bool tmp2 = index_statically_lt<ReducedDims>(array_size<ReducedDims>::value - 1, NumTensorDims - 1);
+ static const bool value = tmp1 & tmp2;
+};
+#endif
+
+
+template <int DimIndex, typename Self, typename Op>
+struct GenericDimReducer {
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self& self, typename Self::Index firstIndex, Op& reducer, typename Self::CoeffReturnType* accum) {
+ EIGEN_STATIC_ASSERT((DimIndex > 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ for (int j = 0; j < self.m_reducedDims[DimIndex]; ++j) {
+ const typename Self::Index input = firstIndex + j * self.m_reducedStrides[DimIndex];
+ GenericDimReducer<DimIndex-1, Self, Op>::reduce(self, input, reducer, accum);
+ }
+ }
+};
+template <typename Self, typename Op>
+struct GenericDimReducer<0, Self, Op> {
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self& self, typename Self::Index firstIndex, Op& reducer, typename Self::CoeffReturnType* accum) {
+ for (int j = 0; j < self.m_reducedDims[0]; ++j) {
+ const typename Self::Index input = firstIndex + j * self.m_reducedStrides[0];
+ reducer.reduce(self.m_impl.coeff(input), accum);
+ }
+ }
+};
+template <typename Self, typename Op>
+struct GenericDimReducer<-1, Self, Op> {
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self& self, typename Self::Index index, Op& reducer, typename Self::CoeffReturnType* accum) {
+ reducer.reduce(self.m_impl.coeff(index), accum);
+ }
+};
+
+template <typename Self, typename Op, bool Vectorizable = (Self::InputPacketAccess & Op::PacketAccess)>
+struct InnerMostDimReducer {
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Self::CoeffReturnType reduce(const Self& self, typename Self::Index firstIndex, typename Self::Index numValuesToReduce, Op& reducer) {
+ typename Self::CoeffReturnType accum = reducer.initialize();
+ for (typename Self::Index j = 0; j < numValuesToReduce; ++j) {
+ reducer.reduce(self.m_impl.coeff(firstIndex + j), &accum);
+ }
+ return reducer.finalize(accum);
+ }
+};
+
+template <typename Self, typename Op>
+struct InnerMostDimReducer<Self, Op, true> {
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Self::CoeffReturnType reduce(const Self& self, typename Self::Index firstIndex, typename Self::Index numValuesToReduce, Op& reducer) {
+ const int packetSize = internal::unpacket_traits<typename Self::PacketReturnType>::size;
+ const typename Self::Index VectorizedSize = (numValuesToReduce / packetSize) * packetSize;
+ typename Self::PacketReturnType p = reducer.template initializePacket<typename Self::PacketReturnType>();
+ for (typename Self::Index j = 0; j < VectorizedSize; j += packetSize) {
+ reducer.reducePacket(self.m_impl.template packet<Unaligned>(firstIndex + j), &p);
+ }
+ typename Self::CoeffReturnType accum = reducer.initialize();
+ for (typename Self::Index j = VectorizedSize; j < numValuesToReduce; ++j) {
+ reducer.reduce(self.m_impl.coeff(firstIndex + j), &accum);
+ }
+ return reducer.finalizeBoth(accum, p);
+ }
+};
+
+template <int DimIndex, typename Self, typename Op, bool vectorizable = (Self::InputPacketAccess & Op::PacketAccess)>
+struct InnerMostDimPreserver {
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self&, typename Self::Index, Op&, typename Self::PacketReturnType*) {
+ eigen_assert(false && "should never be called");
+ }
+};
+
+template <int DimIndex, typename Self, typename Op>
+struct InnerMostDimPreserver<DimIndex, Self, Op, true> {
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self& self, typename Self::Index firstIndex, Op& reducer, typename Self::PacketReturnType* accum) {
+ EIGEN_STATIC_ASSERT((DimIndex > 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ for (typename Self::Index j = 0; j < self.m_reducedDims[DimIndex]; ++j) {
+ const typename Self::Index input = firstIndex + j * self.m_reducedStrides[DimIndex];
+ InnerMostDimPreserver<DimIndex-1, Self, Op>::reduce(self, input, reducer, accum);
+ }
+ }
+};
+
+template <typename Self, typename Op>
+struct InnerMostDimPreserver<0, Self, Op, true> {
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self& self, typename Self::Index firstIndex, Op& reducer, typename Self::PacketReturnType* accum) {
+ for (typename Self::Index j = 0; j < self.m_reducedDims[0]; ++j) {
+ const typename Self::Index input = firstIndex + j * self.m_reducedStrides[0];
+ reducer.reducePacket(self.m_impl.template packet<Unaligned>(input), accum);
+ }
+ }
+};
+template <typename Self, typename Op>
+struct InnerMostDimPreserver<-1, Self, Op, true> {
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self&, typename Self::Index, Op&, typename Self::PacketReturnType*) {
+ eigen_assert(false && "should never be called");
+ }
+};
+
+// Default full reducer
+template <typename Self, typename Op, typename Device, bool Vectorizable = (Self::InputPacketAccess & Op::PacketAccess)>
+struct FullReducer {
+ static const bool HasOptimizedImplementation = false;
+
+ static EIGEN_DEVICE_FUNC void run(const Self& self, Op& reducer, const Device&, typename Self::CoeffReturnType* output) {
+ const typename Self::Index num_coeffs = array_prod(self.m_impl.dimensions());
+ *output = InnerMostDimReducer<Self, Op, Vectorizable>::reduce(self, 0, num_coeffs, reducer);
+ }
+};
+
+
+#ifdef EIGEN_USE_THREADS
+// Multithreaded full reducers
+template <typename Self, typename Op,
+ bool Vectorizable = (Self::InputPacketAccess & Op::PacketAccess)>
+struct FullReducerShard {
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(const Self& self, typename Self::Index firstIndex,
+ typename Self::Index numValuesToReduce, Op& reducer,
+ typename Self::CoeffReturnType* output) {
+ *output = InnerMostDimReducer<Self, Op, Vectorizable>::reduce(
+ self, firstIndex, numValuesToReduce, reducer);
+ }
+};
+
+// Multithreaded full reducer
+template <typename Self, typename Op, bool Vectorizable>
+struct FullReducer<Self, Op, ThreadPoolDevice, Vectorizable> {
+ static const bool HasOptimizedImplementation = !Op::IsStateful;
+ static const int PacketSize =
+ unpacket_traits<typename Self::PacketReturnType>::size;
+
+ // launch one reducer per thread and accumulate the result.
+ static void run(const Self& self, Op& reducer, const ThreadPoolDevice& device,
+ typename Self::CoeffReturnType* output) {
+ typedef typename Self::Index Index;
+ const Index num_coeffs = array_prod(self.m_impl.dimensions());
+ if (num_coeffs == 0) {
+ *output = reducer.finalize(reducer.initialize());
+ return;
+ }
+ const TensorOpCost cost =
+ self.m_impl.costPerCoeff(Vectorizable) +
+ TensorOpCost(0, 0, internal::functor_traits<Op>::Cost, Vectorizable,
+ PacketSize);
+ const int num_threads = TensorCostModel<ThreadPoolDevice>::numThreads(
+ num_coeffs, cost, device.numThreads());
+ if (num_threads == 1) {
+ *output =
+ InnerMostDimReducer<Self, Op, Vectorizable>::reduce(self, 0, num_coeffs, reducer);
+ return;
+ }
+ const Index blocksize =
+ std::floor<Index>(static_cast<float>(num_coeffs) / num_threads);
+ const Index numblocks = blocksize > 0 ? num_coeffs / blocksize : 0;
+ eigen_assert(num_coeffs >= numblocks * blocksize);
+
+ Barrier barrier(internal::convert_index<unsigned int>(numblocks));
+ MaxSizeVector<typename Self::CoeffReturnType> shards(numblocks, reducer.initialize());
+ for (Index i = 0; i < numblocks; ++i) {
+ device.enqueue_with_barrier(&barrier, &FullReducerShard<Self, Op, Vectorizable>::run,
+ self, i * blocksize, blocksize, reducer,
+ &shards[i]);
+ }
+ typename Self::CoeffReturnType finalShard;
+ if (numblocks * blocksize < num_coeffs) {
+ finalShard = InnerMostDimReducer<Self, Op, Vectorizable>::reduce(
+ self, numblocks * blocksize, num_coeffs - numblocks * blocksize,
+ reducer);
+ } else {
+ finalShard = reducer.initialize();
+ }
+ barrier.Wait();
+
+ for (Index i = 0; i < numblocks; ++i) {
+ reducer.reduce(shards[i], &finalShard);
+ }
+ *output = reducer.finalize(finalShard);
+ }
+};
+
+#endif
+
+
+// Default inner reducer
+template <typename Self, typename Op, typename Device>
+struct InnerReducer {
+ static const bool HasOptimizedImplementation = false;
+
+ EIGEN_DEVICE_FUNC static bool run(const Self&, Op&, const Device&, typename Self::CoeffReturnType*, typename Self::Index, typename Self::Index) {
+ eigen_assert(false && "Not implemented");
+ return true;
+ }
+};
+
+// Default outer reducer
+template <typename Self, typename Op, typename Device>
+struct OuterReducer {
+ static const bool HasOptimizedImplementation = false;
+
+ EIGEN_DEVICE_FUNC static bool run(const Self&, Op&, const Device&, typename Self::CoeffReturnType*, typename Self::Index, typename Self::Index) {
+ eigen_assert(false && "Not implemented");
+ return true;
+ }
+};
+
+
+#if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
+template <int B, int N, typename S, typename R, typename I>
+__global__ void FullReductionKernel(R, const S, I, typename S::CoeffReturnType*, unsigned int*);
+
+
+#ifdef EIGEN_HAS_CUDA_FP16
+template <typename S, typename R, typename I>
+__global__ void ReductionInitFullReduxKernelHalfFloat(R, const S, I, half2*);
+template <int B, int N, typename S, typename R, typename I>
+__global__ void FullReductionKernelHalfFloat(R, const S, I, half*, half2*);
+template <int NPT, typename S, typename R, typename I>
+__global__ void InnerReductionKernelHalfFloat(R, const S, I, I, half*);
+
+#endif
+
+template <int NPT, typename S, typename R, typename I>
+__global__ void InnerReductionKernel(R, const S, I, I, typename S::CoeffReturnType*);
+
+template <int NPT, typename S, typename R, typename I>
+__global__ void OuterReductionKernel(R, const S, I, I, typename S::CoeffReturnType*);
+#endif
+
+} // end namespace internal
+
+
+template <typename Op, typename Dims, typename XprType, template <class> class MakePointer_>
+class TensorReductionOp : public TensorBase<TensorReductionOp<Op, Dims, XprType, MakePointer_>, ReadOnlyAccessors> {
+ public:
+ typedef typename Eigen::internal::traits<TensorReductionOp>::Scalar Scalar;
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
+ typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType;
+ typedef typename Eigen::internal::nested<TensorReductionOp>::type Nested;
+ typedef typename Eigen::internal::traits<TensorReductionOp>::StorageKind StorageKind;
+ typedef typename Eigen::internal::traits<TensorReductionOp>::Index Index;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ TensorReductionOp(const XprType& expr, const Dims& dims) : m_expr(expr), m_dims(dims)
+ { }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ TensorReductionOp(const XprType& expr, const Dims& dims, const Op& reducer) : m_expr(expr), m_dims(dims), m_reducer(reducer)
+ { }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const XprType& expression() const { return m_expr; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const Dims& dims() const { return m_dims; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const Op& reducer() const { return m_reducer; }
+
+ protected:
+ typename XprType::Nested m_expr;
+ const Dims m_dims;
+ const Op m_reducer;
+};
+
+
+// Eval as rvalue
+template<typename Op, typename Dims, typename ArgType, template <class> class MakePointer_, typename Device>
+struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Device>
+{
+ typedef TensorReductionOp<Op, Dims, ArgType, MakePointer_> XprType;
+ typedef typename XprType::Index Index;
+ typedef ArgType ChildType;
+ typedef typename TensorEvaluator<ArgType, Device>::Dimensions InputDimensions;
+ static const int NumInputDims = internal::array_size<InputDimensions>::value;
+ static const int NumReducedDims = internal::array_size<Dims>::value;
+ static const int NumOutputDims = NumInputDims - NumReducedDims;
+ typedef typename internal::conditional<NumOutputDims==0, Sizes<>, DSizes<Index, NumOutputDims> >::type Dimensions;
+ typedef typename XprType::Scalar Scalar;
+ typedef TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Device> Self;
+ static const bool InputPacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess;
+ typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+
+ enum {
+ IsAligned = false,
+ PacketAccess = Self::InputPacketAccess && Op::PacketAccess,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ CoordAccess = false, // to be implemented
+ RawAccess = false
+ };
+
+ static const bool ReducingInnerMostDims = internal::are_inner_most_dims<Dims, NumInputDims, Layout>::value;
+ static const bool PreservingInnerMostDims = internal::preserve_inner_most_dims<Dims, NumInputDims, Layout>::value;
+ static const bool RunningFullReduction = (NumOutputDims==0);
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
+ : m_impl(op.expression(), device), m_reducer(op.reducer()), m_result(NULL), m_device(device), m_xpr_dims(op.dims())
+ {
+ EIGEN_STATIC_ASSERT((NumInputDims >= NumReducedDims), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((!ReducingInnerMostDims | !PreservingInnerMostDims | (NumReducedDims == NumInputDims)),
+ YOU_MADE_A_PROGRAMMING_MISTAKE);
+
+ // Build the bitmap indicating if an input dimension is reduced or not.
+ for (int i = 0; i < NumInputDims; ++i) {
+ m_reduced[i] = false;
+ }
+ for (int i = 0; i < NumReducedDims; ++i) {
+ eigen_assert(op.dims()[i] >= 0);
+ eigen_assert(op.dims()[i] < NumInputDims);
+ m_reduced[op.dims()[i]] = true;
+ }
+
+ const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
+ internal::DimInitializer<Dimensions>::run(input_dims, m_reduced, &m_dimensions, &m_reducedDims);
+
+ // Precompute output strides.
+ if (NumOutputDims > 0) {
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ m_outputStrides[0] = 1;
+ for (int i = 1; i < NumOutputDims; ++i) {
+ m_outputStrides[i] = m_outputStrides[i - 1] * m_dimensions[i - 1];
+ }
+ } else {
+ m_outputStrides.back() = 1;
+ for (int i = NumOutputDims - 2; i >= 0; --i) {
+ m_outputStrides[i] = m_outputStrides[i + 1] * m_dimensions[i + 1];
+ }
+ }
+ }
+
+ // Precompute input strides.
+ if (NumInputDims > 0) {
+ array<Index, NumInputDims> input_strides;
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ input_strides[0] = 1;
+ for (int i = 1; i < NumInputDims; ++i) {
+ input_strides[i] = input_strides[i-1] * input_dims[i-1];
+ }
+ } else {
+ input_strides.back() = 1;
+ for (int i = NumInputDims - 2; i >= 0; --i) {
+ input_strides[i] = input_strides[i + 1] * input_dims[i + 1];
+ }
+ }
+
+ int outputIndex = 0;
+ int reduceIndex = 0;
+ for (int i = 0; i < NumInputDims; ++i) {
+ if (m_reduced[i]) {
+ m_reducedStrides[reduceIndex] = input_strides[i];
+ ++reduceIndex;
+ } else {
+ m_preservedStrides[outputIndex] = input_strides[i];
+ ++outputIndex;
+ }
+ }
+ }
+
+ // Special case for full reductions
+ if (NumOutputDims == 0) {
+ m_preservedStrides[0] = internal::array_prod(input_dims);
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
+
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool evalSubExprsIfNeeded(typename MakePointer_<CoeffReturnType>::Type data) {
+ m_impl.evalSubExprsIfNeeded(NULL);
+
+ // Use the FullReducer if possible.
+ if ((RunningFullReduction && RunningOnSycl) ||(RunningFullReduction &&
+ internal::FullReducer<Self, Op, Device>::HasOptimizedImplementation &&
+ ((RunningOnGPU && (m_device.majorDeviceVersion() >= 3)) ||
+ !RunningOnGPU))) {
+ bool need_assign = false;
+ if (!data) {
+ m_result = static_cast<CoeffReturnType*>(m_device.allocate(sizeof(CoeffReturnType)));
+ data = m_result;
+ need_assign = true;
+ }
+ Op reducer(m_reducer);
+ internal::FullReducer<Self, Op, Device>::run(*this, reducer, m_device, data);
+ return need_assign;
+ }
+ else if(RunningOnSycl){
+ const Index num_values_to_reduce = internal::array_prod(m_reducedDims);
+ const Index num_coeffs_to_preserve = internal::array_prod(m_dimensions);
+ if (!data) {
+ data = static_cast<CoeffReturnType*>(m_device.allocate(sizeof(CoeffReturnType) * num_coeffs_to_preserve));
+ m_result = data;
+ }
+ Op reducer(m_reducer);
+ internal::InnerReducer<Self, Op, Device>::run(*this, reducer, m_device, data, num_values_to_reduce, num_coeffs_to_preserve);
+ return (m_result != NULL);
+ }
+
+ // Attempt to use an optimized reduction.
+ else if (RunningOnGPU && (m_device.majorDeviceVersion() >= 3)) {
+ bool reducing_inner_dims = true;
+ for (int i = 0; i < NumReducedDims; ++i) {
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ reducing_inner_dims &= m_reduced[i];
+ } else {
+ reducing_inner_dims &= m_reduced[NumInputDims - 1 - i];
+ }
+ }
+ if (internal::InnerReducer<Self, Op, Device>::HasOptimizedImplementation &&
+ (reducing_inner_dims || ReducingInnerMostDims)) {
+ const Index num_values_to_reduce = internal::array_prod(m_reducedDims);
+ const Index num_coeffs_to_preserve = internal::array_prod(m_dimensions);
+ if (!data) {
+ if (num_coeffs_to_preserve < 1024 && num_values_to_reduce > num_coeffs_to_preserve && num_values_to_reduce > 128) {
+ data = static_cast<CoeffReturnType*>(m_device.allocate(sizeof(CoeffReturnType) * num_coeffs_to_preserve));
+ m_result = data;
+ }
+ else {
+ return true;
+ }
+ }
+ Op reducer(m_reducer);
+ if (internal::InnerReducer<Self, Op, Device>::run(*this, reducer, m_device, data, num_values_to_reduce, num_coeffs_to_preserve)) {
+ if (m_result) {
+ m_device.deallocate(m_result);
+ m_result = NULL;
+ }
+ return true;
+ } else {
+ return (m_result != NULL);
+ }
+ }
+
+ bool preserving_inner_dims = true;
+ for (int i = 0; i < NumReducedDims; ++i) {
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ preserving_inner_dims &= m_reduced[NumInputDims - 1 - i];
+ } else {
+ preserving_inner_dims &= m_reduced[i];
+ }
+ }
+ if (internal::OuterReducer<Self, Op, Device>::HasOptimizedImplementation &&
+ preserving_inner_dims) {
+ const Index num_values_to_reduce = internal::array_prod(m_reducedDims);
+ const Index num_coeffs_to_preserve = internal::array_prod(m_dimensions);
+ if (!data) {
+ if (num_coeffs_to_preserve < 1024 && num_values_to_reduce > num_coeffs_to_preserve && num_values_to_reduce > 32) {
+ data = static_cast<CoeffReturnType*>(m_device.allocate(sizeof(CoeffReturnType) * num_coeffs_to_preserve));
+ m_result = data;
+ }
+ else {
+ return true;
+ }
+ }
+ Op reducer(m_reducer);
+ if (internal::OuterReducer<Self, Op, Device>::run(*this, reducer, m_device, data, num_values_to_reduce, num_coeffs_to_preserve)) {
+ if (m_result) {
+ m_device.deallocate(m_result);
+ m_result = NULL;
+ }
+ return true;
+ } else {
+ return (m_result != NULL);
+ }
+ }
+ }
+ return true;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
+ m_impl.cleanup();
+ if (m_result) {
+ m_device.deallocate(m_result);
+ m_result = NULL;
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
+ {
+ if ((RunningOnSycl || RunningFullReduction || RunningOnGPU) && m_result) {
+ return *(m_result + index);
+ }
+ Op reducer(m_reducer);
+ if (ReducingInnerMostDims || RunningFullReduction) {
+ const Index num_values_to_reduce =
+ (static_cast<int>(Layout) == static_cast<int>(ColMajor)) ? m_preservedStrides[0] : m_preservedStrides[NumPreservedStrides - 1];
+ return internal::InnerMostDimReducer<Self, Op>::reduce(*this, firstInput(index),
+ num_values_to_reduce, reducer);
+ } else {
+ typename Self::CoeffReturnType accum = reducer.initialize();
+ internal::GenericDimReducer<NumReducedDims-1, Self, Op>::reduce(*this, firstInput(index), reducer, &accum);
+ return reducer.finalize(accum);
+ }
+ }
+
+ // TODO(bsteiner): provide a more efficient implementation.
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
+ {
+ EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
+ eigen_assert(index + PacketSize - 1 < Index(internal::array_prod(dimensions())));
+
+ if (RunningOnGPU && m_result) {
+ return internal::pload<PacketReturnType>(m_result + index);
+ }
+
+ EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
+ if (ReducingInnerMostDims) {
+ const Index num_values_to_reduce =
+ (static_cast<int>(Layout) == static_cast<int>(ColMajor)) ? m_preservedStrides[0] : m_preservedStrides[NumPreservedStrides - 1];
+ const Index firstIndex = firstInput(index);
+ for (Index i = 0; i < PacketSize; ++i) {
+ Op reducer(m_reducer);
+ values[i] = internal::InnerMostDimReducer<Self, Op>::reduce(*this, firstIndex + i * num_values_to_reduce,
+ num_values_to_reduce, reducer);
+ }
+ } else if (PreservingInnerMostDims) {
+ const Index firstIndex = firstInput(index);
+ const int innermost_dim = (static_cast<int>(Layout) == static_cast<int>(ColMajor)) ? 0 : NumOutputDims - 1;
+ // TBD: extend this the the n innermost dimensions that we preserve.
+ if (((firstIndex % m_dimensions[innermost_dim]) + PacketSize - 1) < m_dimensions[innermost_dim]) {
+ Op reducer(m_reducer);
+ typename Self::PacketReturnType accum = reducer.template initializePacket<typename Self::PacketReturnType>();
+ internal::InnerMostDimPreserver<NumReducedDims-1, Self, Op>::reduce(*this, firstIndex, reducer, &accum);
+ return reducer.finalizePacket(accum);
+ } else {
+ for (int i = 0; i < PacketSize; ++i) {
+ values[i] = coeff(index + i);
+ }
+ }
+ } else {
+ for (int i = 0; i < PacketSize; ++i) {
+ values[i] = coeff(index + i);
+ }
+ }
+ PacketReturnType rslt = internal::pload<PacketReturnType>(values);
+ return rslt;
+ }
+
+ // Must be called after evalSubExprsIfNeeded().
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
+ if (RunningFullReduction && m_result) {
+ return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize);
+ } else {
+ const Index num_values_to_reduce = internal::array_prod(m_reducedDims);
+ const double compute_cost = num_values_to_reduce * internal::functor_traits<Op>::Cost;
+ return m_impl.costPerCoeff(vectorized) * num_values_to_reduce +
+ TensorOpCost(0, 0, compute_cost, vectorized, PacketSize);
+ }
+ }
+
+ EIGEN_DEVICE_FUNC typename MakePointer_<Scalar>::Type data() const { return m_result; }
+ /// required by sycl in order to extract the accessor
+ const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
+ /// added for sycl in order to construct the buffer from the sycl device
+ const Device& device() const{return m_device;}
+ /// added for sycl in order to re-construct the reduction eval on the device for the sub-kernel
+ const Dims& xprDims() const {return m_xpr_dims;}
+
+
+ private:
+ template <int, typename, typename> friend struct internal::GenericDimReducer;
+ template <typename, typename, bool> friend struct internal::InnerMostDimReducer;
+ template <int, typename, typename, bool> friend struct internal::InnerMostDimPreserver;
+ template <typename S, typename O, typename D, bool V> friend struct internal::FullReducer;
+#ifdef EIGEN_USE_THREADS
+ template <typename S, typename O, bool V> friend struct internal::FullReducerShard;
+#endif
+#if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
+ template <int B, int N, typename S, typename R, typename I> friend void internal::FullReductionKernel(R, const S, I, typename S::CoeffReturnType*, unsigned int*);
+#ifdef EIGEN_HAS_CUDA_FP16
+ template <typename S, typename R, typename I> friend void internal::ReductionInitFullReduxKernelHalfFloat(R, const S, I, half2*);
+ template <int B, int N, typename S, typename R, typename I> friend void internal::FullReductionKernelHalfFloat(R, const S, I, half*, half2*);
+ template <int NPT, typename S, typename R, typename I> friend void internal::InnerReductionKernelHalfFloat(R, const S, I, I, half*);
+#endif
+ template <int NPT, typename S, typename R, typename I> friend void internal::InnerReductionKernel(R, const S, I, I, typename S::CoeffReturnType*);
+
+ template <int NPT, typename S, typename R, typename I> friend void internal::OuterReductionKernel(R, const S, I, I, typename S::CoeffReturnType*);
+#endif
+
+ template <typename S, typename O, typename D> friend struct internal::InnerReducer;
+
+ // Returns the Index in the input tensor of the first value that needs to be
+ // used to compute the reduction at output index "index".
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index firstInput(Index index) const {
+ if (ReducingInnerMostDims) {
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ return index * m_preservedStrides[0];
+ } else {
+ return index * m_preservedStrides[NumPreservedStrides - 1];
+ }
+ }
+ // TBD: optimize the case where we preserve the innermost dimensions.
+ Index startInput = 0;
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int i = NumOutputDims - 1; i > 0; --i) {
+ // This is index_i in the output tensor.
+ const Index idx = index / m_outputStrides[i];
+ startInput += idx * m_preservedStrides[i];
+ index -= idx * m_outputStrides[i];
+ }
+ if (PreservingInnerMostDims) {
+ eigen_assert(m_preservedStrides[0] == 1);
+ startInput += index;
+ } else {
+ startInput += index * m_preservedStrides[0];
+ }
+ } else {
+ for (int i = 0; i < NumOutputDims - 1; ++i) {
+ // This is index_i in the output tensor.
+ const Index idx = index / m_outputStrides[i];
+ startInput += idx * m_preservedStrides[i];
+ index -= idx * m_outputStrides[i];
+ }
+ if (PreservingInnerMostDims) {
+ eigen_assert(m_preservedStrides[NumPreservedStrides - 1] == 1);
+ startInput += index;
+ } else {
+ startInput += index * m_preservedStrides[NumPreservedStrides - 1];
+ }
+ }
+ return startInput;
+ }
+
+ // Bitmap indicating if an input dimension is reduced or not.
+ array<bool, NumInputDims> m_reduced;
+ // Dimensions of the output of the operation.
+ Dimensions m_dimensions;
+ // Precomputed strides for the output tensor.
+ array<Index, NumOutputDims> m_outputStrides;
+ // Subset of strides of the input tensor for the non-reduced dimensions.
+ // Indexed by output dimensions.
+ static const int NumPreservedStrides = max_n_1<NumOutputDims>::size;
+ array<Index, NumPreservedStrides> m_preservedStrides;
+
+ // Subset of strides of the input tensor for the reduced dimensions.
+ // Indexed by reduced dimensions.
+ array<Index, NumReducedDims> m_reducedStrides;
+ // Size of the input dimensions that are reduced.
+ // Indexed by reduced dimensions.
+ array<Index, NumReducedDims> m_reducedDims;
+
+ // Evaluator for the input expression.
+ TensorEvaluator<ArgType, Device> m_impl;
+
+ // Operation to apply for computing the reduction.
+ Op m_reducer;
+
+ // For full reductions
+#if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
+ static const bool RunningOnGPU = internal::is_same<Device, Eigen::GpuDevice>::value;
+ static const bool RunningOnSycl = false;
+#elif defined(EIGEN_USE_SYCL)
+static const bool RunningOnSycl = internal::is_same<typename internal::remove_all<Device>::type, Eigen::SyclDevice>::value;
+static const bool RunningOnGPU = false;
+#else
+ static const bool RunningOnGPU = false;
+ static const bool RunningOnSycl = false;
+#endif
+ typename MakePointer_<CoeffReturnType>::Type m_result;
+
+ const Device& m_device;
+ const Dims& m_xpr_dims;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h
new file mode 100644
index 000000000..65638b6a8
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h
@@ -0,0 +1,750 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_CUDA_H
+#define EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_CUDA_H
+
+namespace Eigen {
+namespace internal {
+
+
+#if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
+// Full reducers for GPU, don't vectorize for now
+
+// Reducer function that enables multiple cuda thread to safely accumulate at the same
+// output address. It basically reads the current value of the output variable, and
+// attempts to update it with the new value. If in the meantime another cuda thread
+// updated the content of the output address it will try again.
+template <typename T, typename R>
+__device__ EIGEN_ALWAYS_INLINE void atomicReduce(T* output, T accum, R& reducer) {
+#if __CUDA_ARCH__ >= 300
+ if (sizeof(T) == 4)
+ {
+ unsigned int oldval = *reinterpret_cast<unsigned int*>(output);
+ unsigned int newval = oldval;
+ reducer.reduce(accum, reinterpret_cast<T*>(&newval));
+ if (newval == oldval) {
+ return;
+ }
+ unsigned int readback;
+ while ((readback = atomicCAS((unsigned int*)output, oldval, newval)) != oldval) {
+ oldval = readback;
+ newval = oldval;
+ reducer.reduce(accum, reinterpret_cast<T*>(&newval));
+ if (newval == oldval) {
+ return;
+ }
+ }
+ }
+ else if (sizeof(T) == 8) {
+ unsigned long long oldval = *reinterpret_cast<unsigned long long*>(output);
+ unsigned long long newval = oldval;
+ reducer.reduce(accum, reinterpret_cast<T*>(&newval));
+ if (newval == oldval) {
+ return;
+ }
+ unsigned long long readback;
+ while ((readback = atomicCAS((unsigned long long*)output, oldval, newval)) != oldval) {
+ oldval = readback;
+ newval = oldval;
+ reducer.reduce(accum, reinterpret_cast<T*>(&newval));
+ if (newval == oldval) {
+ return;
+ }
+ }
+ }
+ else {
+ assert(0 && "Wordsize not supported");
+ }
+#else
+ assert(0 && "Shouldn't be called on unsupported device");
+#endif
+}
+
+// We extend atomicExch to support extra data types
+template <typename Type>
+__device__ inline Type atomicExchCustom(Type* address, Type val) {
+ return atomicExch(address, val);
+}
+
+template <>
+__device__ inline double atomicExchCustom(double* address, double val) {
+ unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(address);
+ return __longlong_as_double(atomicExch(address_as_ull, __double_as_longlong(val)));
+}
+
+#ifdef EIGEN_HAS_CUDA_FP16
+template <template <typename T> class R>
+__device__ inline void atomicReduce(half2* output, half2 accum, R<half>& reducer) {
+ unsigned int oldval = *reinterpret_cast<unsigned int*>(output);
+ unsigned int newval = oldval;
+ reducer.reducePacket(accum, reinterpret_cast<half2*>(&newval));
+ if (newval == oldval) {
+ return;
+ }
+ unsigned int readback;
+ while ((readback = atomicCAS((unsigned int*)output, oldval, newval)) != oldval) {
+ oldval = readback;
+ newval = oldval;
+ reducer.reducePacket(accum, reinterpret_cast<half2*>(&newval));
+ if (newval == oldval) {
+ return;
+ }
+ }
+}
+#endif
+
+template <>
+__device__ inline void atomicReduce(float* output, float accum, SumReducer<float>&) {
+#if __CUDA_ARCH__ >= 300
+ atomicAdd(output, accum);
+#else
+ assert(0 && "Shouldn't be called on unsupported device");
+#endif
+}
+
+
+template <typename CoeffType, typename Index>
+__global__ void ReductionInitKernel(const CoeffType val, Index num_preserved_coeffs, CoeffType* output) {
+ const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x;
+ const Index num_threads = blockDim.x * gridDim.x;
+ for (Index i = thread_id; i < num_preserved_coeffs; i += num_threads) {
+ output[i] = val;
+ }
+}
+
+
+template <int BlockSize, int NumPerThread, typename Self,
+ typename Reducer, typename Index>
+__global__ void FullReductionKernel(Reducer reducer, const Self input, Index num_coeffs,
+ typename Self::CoeffReturnType* output, unsigned int* semaphore) {
+#if __CUDA_ARCH__ >= 300
+ // Initialize the output value
+ const Index first_index = blockIdx.x * BlockSize * NumPerThread + threadIdx.x;
+ if (gridDim.x == 1) {
+ if (first_index == 0) {
+ *output = reducer.initialize();
+ }
+ }
+ else {
+ if (threadIdx.x == 0) {
+ unsigned int block = atomicCAS(semaphore, 0u, 1u);
+ if (block == 0) {
+ // We're the first block to run, initialize the output value
+ atomicExchCustom(output, reducer.initialize());
+ __threadfence();
+ atomicExch(semaphore, 2u);
+ }
+ else {
+ // Wait for the first block to initialize the output value.
+ // Use atomicCAS here to ensure that the reads aren't cached
+ unsigned int val;
+ do {
+ val = atomicCAS(semaphore, 2u, 2u);
+ }
+ while (val < 2u);
+ }
+ }
+ }
+
+ __syncthreads();
+
+ eigen_assert(gridDim.x == 1 || *semaphore >= 2u);
+
+ typename Self::CoeffReturnType accum = reducer.initialize();
+ Index max_iter = numext::mini<Index>(num_coeffs - first_index, NumPerThread*BlockSize);
+ for (Index i = 0; i < max_iter; i+=BlockSize) {
+ const Index index = first_index + i;
+ eigen_assert(index < num_coeffs);
+ typename Self::CoeffReturnType val = input.m_impl.coeff(index);
+ reducer.reduce(val, &accum);
+ }
+
+#pragma unroll
+ for (int offset = warpSize/2; offset > 0; offset /= 2) {
+ reducer.reduce(__shfl_down(accum, offset, warpSize), &accum);
+ }
+
+ if ((threadIdx.x & (warpSize - 1)) == 0) {
+ atomicReduce(output, accum, reducer);
+ }
+
+ if (gridDim.x > 1 && threadIdx.x == 0) {
+ // Let the last block reset the semaphore
+ atomicInc(semaphore, gridDim.x + 1);
+ }
+#else
+ assert(0 && "Shouldn't be called on unsupported device");
+#endif
+}
+
+
+#ifdef EIGEN_HAS_CUDA_FP16
+template <typename Self,
+ typename Reducer, typename Index>
+__global__ void ReductionInitFullReduxKernelHalfFloat(Reducer reducer, const Self input, Index num_coeffs, half2* scratch) {
+ eigen_assert(blockDim.x == 1);
+ eigen_assert(gridDim.x == 1);
+ if (num_coeffs % 2 != 0) {
+ half last = input.m_impl.coeff(num_coeffs-1);
+ *scratch = __halves2half2(last, reducer.initialize());
+ } else {
+ *scratch = reducer.template initializePacket<half2>();
+ }
+}
+
+template <typename Self,
+ typename Reducer, typename Index>
+__global__ void ReductionInitKernelHalfFloat(Reducer reducer, const Self input, Index num_coeffs, half* output) {
+ const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x;
+ const Index num_threads = blockDim.x * gridDim.x;
+ const Index num_packets = num_coeffs / 2;
+ for (Index i = thread_id; i < num_packets; i += num_threads) {
+ ((half2*)output)[i] = reducer.template initializePacket<half2>();
+ }
+
+ if (thread_id == 0 && num_coeffs % 2 != 0) {
+ output[num_coeffs-1] = reducer.initialize();
+ }
+}
+
+template <int BlockSize, int NumPerThread, typename Self,
+ typename Reducer, typename Index>
+__global__ void FullReductionKernelHalfFloat(Reducer reducer, const Self input, Index num_coeffs,
+ half* output, half2* scratch) {
+ eigen_assert(NumPerThread % 2 == 0);
+
+ const Index first_index = blockIdx.x * BlockSize * NumPerThread + 2*threadIdx.x;
+
+ // Initialize the output value if it wasn't initialized by the ReductionInitKernel
+ if (gridDim.x == 1 && first_index == 0) {
+ if (num_coeffs % 2 != 0) {
+ half last = input.m_impl.coeff(num_coeffs-1);
+ *scratch = __halves2half2(last, reducer.initialize());
+ } else {
+ *scratch = reducer.template initializePacket<half2>();
+ }
+ __syncthreads();
+ }
+
+ half2 accum = reducer.template initializePacket<half2>();
+ const Index max_iter = numext::mini<Index>((num_coeffs - first_index) / 2, NumPerThread*BlockSize / 2);
+ for (Index i = 0; i < max_iter; i += BlockSize) {
+ const Index index = first_index + 2*i;
+ eigen_assert(index + 1 < num_coeffs);
+ half2 val = input.m_impl.template packet<Unaligned>(index);
+ reducer.reducePacket(val, &accum);
+ }
+
+#pragma unroll
+ for (int offset = warpSize/2; offset > 0; offset /= 2) {
+ reducer.reducePacket(__shfl_down(accum, offset, warpSize), &accum);
+ }
+
+ if ((threadIdx.x & (warpSize - 1)) == 0) {
+ atomicReduce(scratch, accum, reducer);
+ }
+
+ __syncthreads();
+
+ if (gridDim.x == 1 && first_index == 0) {
+ half tmp = __low2half(*scratch);
+ reducer.reduce(__high2half(*scratch), &tmp);
+ *output = tmp;
+ }
+}
+
+template <typename Op>
+__global__ void ReductionCleanupKernelHalfFloat(Op& reducer, half* output, half2* scratch) {
+ eigen_assert(threadIdx.x == 1);
+ half tmp = __low2half(*scratch);
+ reducer.reduce(__high2half(*scratch), &tmp);
+ *output = tmp;
+}
+
+#endif
+
+template <typename Self, typename Op, typename OutputType, bool PacketAccess, typename Enabled = void>
+struct FullReductionLauncher {
+ static void run(const Self&, Op&, const GpuDevice&, OutputType*, typename Self::Index) {
+ assert(false && "Should only be called on doubles, floats and half floats");
+ }
+};
+
+// Specialization for float and double
+template <typename Self, typename Op, typename OutputType, bool PacketAccess>
+struct FullReductionLauncher<
+ Self, Op, OutputType, PacketAccess,
+ typename internal::enable_if<
+ internal::is_same<float, OutputType>::value ||
+ internal::is_same<double, OutputType>::value,
+ void>::type> {
+ static void run(const Self& self, Op& reducer, const GpuDevice& device, OutputType* output, typename Self::Index num_coeffs) {
+ typedef typename Self::Index Index;
+ typedef typename Self::CoeffReturnType Scalar;
+ const int block_size = 256;
+ const int num_per_thread = 128;
+ const int num_blocks = divup<int>(num_coeffs, block_size * num_per_thread);
+
+ unsigned int* semaphore = NULL;
+ if (num_blocks > 1) {
+ semaphore = device.semaphore();
+ }
+
+ LAUNCH_CUDA_KERNEL((FullReductionKernel<block_size, num_per_thread, Self, Op, Index>),
+ num_blocks, block_size, 0, device, reducer, self, num_coeffs, output, semaphore);
+ }
+};
+
+#ifdef EIGEN_HAS_CUDA_FP16
+template <typename Self, typename Op>
+struct FullReductionLauncher<Self, Op, Eigen::half, false> {
+ static void run(const Self&, Op&, const GpuDevice&, half*, typename Self::Index) {
+ assert(false && "Should not be called since there is no packet accessor");
+ }
+};
+
+template <typename Self, typename Op>
+struct FullReductionLauncher<Self, Op, Eigen::half, true> {
+ static void run(const Self& self, Op& reducer, const GpuDevice& device, half* output, typename Self::Index num_coeffs) {
+ typedef typename Self::Index Index;
+
+ const int block_size = 256;
+ const int num_per_thread = 128;
+ const int num_blocks = divup<int>(num_coeffs, block_size * num_per_thread);
+ half2* scratch = static_cast<half2*>(device.scratchpad());
+
+ if (num_blocks > 1) {
+ // We initialize the output and the scrathpad outside the reduction kernel when we can't be sure that there
+ // won't be a race conditions between multiple thread blocks.
+ LAUNCH_CUDA_KERNEL((ReductionInitFullReduxKernelHalfFloat<Self, Op, Index>),
+ 1, 1, 0, device, reducer, self, num_coeffs, scratch);
+ }
+
+ LAUNCH_CUDA_KERNEL((FullReductionKernelHalfFloat<block_size, num_per_thread, Self, Op, Index>),
+ num_blocks, block_size, 0, device, reducer, self, num_coeffs, output, scratch);
+
+ if (num_blocks > 1) {
+ LAUNCH_CUDA_KERNEL((ReductionCleanupKernelHalfFloat<Op>),
+ 1, 1, 0, device, reducer, output, scratch);
+ }
+ }
+};
+#endif
+
+
+template <typename Self, typename Op, bool Vectorizable>
+struct FullReducer<Self, Op, GpuDevice, Vectorizable> {
+ // Unfortunately nvidia doesn't support well exotic types such as complex,
+ // so reduce the scope of the optimized version of the code to the simple cases
+ // of doubles, floats and half floats
+#ifdef EIGEN_HAS_CUDA_FP16
+ static const bool HasOptimizedImplementation = !Op::IsStateful &&
+ (internal::is_same<typename Self::CoeffReturnType, float>::value ||
+ internal::is_same<typename Self::CoeffReturnType, double>::value ||
+ (internal::is_same<typename Self::CoeffReturnType, Eigen::half>::value && reducer_traits<Op, GpuDevice>::PacketAccess));
+#else
+ static const bool HasOptimizedImplementation = !Op::IsStateful &&
+ (internal::is_same<typename Self::CoeffReturnType, float>::value ||
+ internal::is_same<typename Self::CoeffReturnType, double>::value);
+#endif
+
+ template <typename OutputType>
+ static void run(const Self& self, Op& reducer, const GpuDevice& device, OutputType* output) {
+ assert(HasOptimizedImplementation && "Should only be called on doubles, floats or half floats");
+ const Index num_coeffs = array_prod(self.m_impl.dimensions());
+ // Don't crash when we're called with an input tensor of size 0.
+ if (num_coeffs == 0) {
+ return;
+ }
+
+ FullReductionLauncher<Self, Op, OutputType, reducer_traits<Op, GpuDevice>::PacketAccess>::run(self, reducer, device, output, num_coeffs);
+ }
+};
+
+
+template <int NumPerThread, typename Self,
+ typename Reducer, typename Index>
+__global__ void InnerReductionKernel(Reducer reducer, const Self input, Index num_coeffs_to_reduce, Index num_preserved_coeffs,
+ typename Self::CoeffReturnType* output) {
+#if __CUDA_ARCH__ >= 300
+ typedef typename Self::CoeffReturnType Type;
+ eigen_assert(blockDim.y == 1);
+ eigen_assert(blockDim.z == 1);
+ eigen_assert(gridDim.y == 1);
+ eigen_assert(gridDim.z == 1);
+
+ const int unroll_times = 16;
+ eigen_assert(NumPerThread % unroll_times == 0);
+
+ const Index input_col_blocks = divup<Index>(num_coeffs_to_reduce, blockDim.x * NumPerThread);
+ const Index num_input_blocks = input_col_blocks * num_preserved_coeffs;
+
+ const Index num_threads = blockDim.x * gridDim.x;
+ const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x;
+
+ // Initialize the output values if they weren't initialized by the ReductionInitKernel
+ if (gridDim.x == 1) {
+ for (Index i = thread_id; i < num_preserved_coeffs; i += num_threads) {
+ output[i] = reducer.initialize();
+ }
+ __syncthreads();
+ }
+
+ for (Index i = blockIdx.x; i < num_input_blocks; i += gridDim.x) {
+ const Index row = i / input_col_blocks;
+
+ if (row < num_preserved_coeffs) {
+ const Index col_block = i % input_col_blocks;
+ const Index col_begin = col_block * blockDim.x * NumPerThread + threadIdx.x;
+
+ Type reduced_val = reducer.initialize();
+
+ for (Index j = 0; j < NumPerThread; j += unroll_times) {
+ const Index last_col = col_begin + blockDim.x * (j + unroll_times - 1);
+ if (last_col >= num_coeffs_to_reduce) {
+ for (Index col = col_begin + blockDim.x * j; col < num_coeffs_to_reduce; col += blockDim.x) {
+ const Type val = input.m_impl.coeff(row * num_coeffs_to_reduce + col);
+ reducer.reduce(val, &reduced_val);
+ }
+ break;
+ } else {
+ // Faster version of the loop with no branches after unrolling.
+#pragma unroll
+ for (int k = 0; k < unroll_times; ++k) {
+ const Index col = col_begin + blockDim.x * (j + k);
+ reducer.reduce(input.m_impl.coeff(row * num_coeffs_to_reduce + col), &reduced_val);
+ }
+ }
+ }
+
+#pragma unroll
+ for (int offset = warpSize/2; offset > 0; offset /= 2) {
+ reducer.reduce(__shfl_down(reduced_val, offset), &reduced_val);
+ }
+
+ if ((threadIdx.x & (warpSize - 1)) == 0) {
+ atomicReduce(&(output[row]), reduced_val, reducer);
+ }
+ }
+ }
+#else
+ assert(0 && "Shouldn't be called on unsupported device");
+#endif
+}
+
+#ifdef EIGEN_HAS_CUDA_FP16
+
+template <int NumPerThread, typename Self,
+ typename Reducer, typename Index>
+__global__ void InnerReductionKernelHalfFloat(Reducer reducer, const Self input, Index num_coeffs_to_reduce, Index num_preserved_coeffs,
+ half* output) {
+ eigen_assert(blockDim.y == 1);
+ eigen_assert(blockDim.z == 1);
+ eigen_assert(gridDim.y == 1);
+ eigen_assert(gridDim.z == 1);
+
+ const int unroll_times = 16;
+ eigen_assert(NumPerThread % unroll_times == 0);
+ eigen_assert(unroll_times % 2 == 0);
+
+ const Index input_col_blocks = divup<Index>(num_coeffs_to_reduce, blockDim.x * NumPerThread * 2);
+ const Index num_input_blocks = divup<Index>(input_col_blocks * num_preserved_coeffs, 2);
+
+ const Index num_threads = blockDim.x * gridDim.x;
+ const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x;
+
+ // Initialize the output values if they weren't initialized by the ReductionInitKernel
+ if (gridDim.x == 1) {
+ Index i = 2*thread_id;
+ for (; i + 1 < num_preserved_coeffs; i += 2*num_threads) {
+ half* loc = output + i;
+ *((half2*)loc) = reducer.template initializePacket<half2>();
+ }
+ if (i < num_preserved_coeffs) {
+ output[i] = reducer.initialize();
+ }
+ __syncthreads();
+ }
+
+ for (Index i = blockIdx.x; i < num_input_blocks; i += gridDim.x) {
+ const Index row = 2 * (i / input_col_blocks);
+
+ if (row + 1 < num_preserved_coeffs) {
+ const Index col_block = i % input_col_blocks;
+ const Index col_begin = 2 * (col_block * blockDim.x * NumPerThread + threadIdx.x);
+
+ half2 reduced_val1 = reducer.template initializePacket<half2>();
+ half2 reduced_val2 = reducer.template initializePacket<half2>();
+
+ for (Index j = 0; j < NumPerThread; j += unroll_times) {
+ const Index last_col = col_begin + blockDim.x * (j + unroll_times - 1) * 2;
+ if (last_col >= num_coeffs_to_reduce) {
+ Index col = col_begin + blockDim.x * j;
+ for (; col + 1 < num_coeffs_to_reduce; col += blockDim.x) {
+ const half2 val1 = input.m_impl.template packet<Unaligned>(row * num_coeffs_to_reduce + col);
+ reducer.reducePacket(val1, &reduced_val1);
+ const half2 val2 = input.m_impl.template packet<Unaligned>((row+1) * num_coeffs_to_reduce + col);
+ reducer.reducePacket(val2, &reduced_val2);
+ }
+ if (col < num_coeffs_to_reduce) {
+ // Peel;
+ const half last1 = input.m_impl.coeff(row * num_coeffs_to_reduce + col);
+ const half2 val1 = __halves2half2(last1, reducer.initialize());
+ reducer.reducePacket(val1, &reduced_val1);
+ const half last2 = input.m_impl.coeff((row+1) * num_coeffs_to_reduce + col);
+ const half2 val2 = __halves2half2(last2, reducer.initialize());
+ reducer.reducePacket(val2, &reduced_val2);
+ }
+ break;
+ } else {
+ // Faster version of the loop with no branches after unrolling.
+#pragma unroll
+ for (int k = 0; k < unroll_times; ++k) {
+ const Index col = col_begin + blockDim.x * (j + k) * 2;
+ reducer.reducePacket(input.m_impl.template packet<Unaligned>(row * num_coeffs_to_reduce + col), &reduced_val1);
+ reducer.reducePacket(input.m_impl.template packet<Unaligned>((row + 1)* num_coeffs_to_reduce + col), &reduced_val2);
+ }
+ }
+ }
+
+#pragma unroll
+ for (int offset = warpSize/2; offset > 0; offset /= 2) {
+ reducer.reducePacket(__shfl_down(reduced_val1, offset, warpSize), &reduced_val1);
+ reducer.reducePacket(__shfl_down(reduced_val2, offset, warpSize), &reduced_val2);
+ }
+
+ half val1 = __low2half(reduced_val1);
+ reducer.reduce(__high2half(reduced_val1), &val1);
+ half val2 = __low2half(reduced_val2);
+ reducer.reduce(__high2half(reduced_val2), &val2);
+ half2 val = __halves2half2(val1, val2);
+
+ if ((threadIdx.x & (warpSize - 1)) == 0) {
+ half* loc = output + row;
+ atomicReduce((half2*)loc, val, reducer);
+ }
+ }
+ }
+}
+
+#endif
+
+template <typename Self, typename Op, typename OutputType, bool PacketAccess, typename Enabled = void>
+struct InnerReductionLauncher {
+ static EIGEN_DEVICE_FUNC bool run(const Self&, Op&, const GpuDevice&, OutputType*, typename Self::Index, typename Self::Index) {
+ assert(false && "Should only be called to reduce doubles, floats and half floats on a gpu device");
+ return true;
+ }
+};
+
+// Specialization for float and double
+template <typename Self, typename Op, typename OutputType, bool PacketAccess>
+struct InnerReductionLauncher<
+ Self, Op, OutputType, PacketAccess,
+ typename internal::enable_if<
+ internal::is_same<float, OutputType>::value ||
+ internal::is_same<double, OutputType>::value,
+ void>::type> {
+ static bool run(const Self& self, Op& reducer, const GpuDevice& device, OutputType* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) {
+ typedef typename Self::Index Index;
+
+ const Index num_coeffs = num_coeffs_to_reduce * num_preserved_vals;
+ const int block_size = 256;
+ const int num_per_thread = 128;
+ const int dyn_blocks = divup<int>(num_coeffs, block_size * num_per_thread);
+ const int max_blocks = device.getNumCudaMultiProcessors() *
+ device.maxCudaThreadsPerMultiProcessor() / block_size;
+ const int num_blocks = numext::mini<int>(max_blocks, dyn_blocks);
+
+ if (num_blocks > 1) {
+ // We initialize the outputs outside the reduction kernel when we can't be sure that there
+ // won't be a race conditions between multiple thread blocks.
+ const int dyn_blocks = divup<int>(num_preserved_vals, 1024);
+ const int max_blocks = device.getNumCudaMultiProcessors() *
+ device.maxCudaThreadsPerMultiProcessor() / 1024;
+ const int num_blocks = numext::mini<int>(max_blocks, dyn_blocks);
+ LAUNCH_CUDA_KERNEL((ReductionInitKernel<OutputType, Index>),
+ num_blocks, 1024, 0, device, reducer.initialize(),
+ num_preserved_vals, output);
+ }
+
+ LAUNCH_CUDA_KERNEL((InnerReductionKernel<num_per_thread, Self, Op, Index>),
+ num_blocks, block_size, 0, device, reducer, self, num_coeffs_to_reduce, num_preserved_vals, output);
+
+ return false;
+ }
+};
+
+#ifdef EIGEN_HAS_CUDA_FP16
+template <typename Self, typename Op>
+struct InnerReductionLauncher<Self, Op, Eigen::half, false> {
+ static bool run(const Self&, Op&, const GpuDevice&, half*, typename Self::Index, typename Self::Index) {
+ assert(false && "Should not be called since there is no packet accessor");
+ return true;
+ }
+};
+
+template <typename Self, typename Op>
+struct InnerReductionLauncher<Self, Op, Eigen::half, true> {
+ static bool run(const Self& self, Op& reducer, const GpuDevice& device, half* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) {
+ typedef typename Self::Index Index;
+
+ if (num_preserved_vals % 2 != 0) {
+ // Not supported yet, revert to the slower code path
+ return true;
+ }
+
+ const Index num_coeffs = num_coeffs_to_reduce * num_preserved_vals;
+ const int block_size = /*256*/128;
+ const int num_per_thread = /*128*/64;
+ const int dyn_blocks = divup<int>(num_coeffs, block_size * num_per_thread);
+ const int max_blocks = device.getNumCudaMultiProcessors() *
+ device.maxCudaThreadsPerMultiProcessor() / block_size;
+ const int num_blocks = numext::mini<int>(max_blocks, dyn_blocks);
+
+ if (num_blocks > 1) {
+ // We initialize the outputs outside the reduction kernel when we can't be sure that there
+ // won't be a race conditions between multiple thread blocks.
+ const int dyn_blocks = divup<int>(num_preserved_vals, 1024);
+ const int max_blocks = device.getNumCudaMultiProcessors() *
+ device.maxCudaThreadsPerMultiProcessor() / 1024;
+ const int num_blocks = numext::mini<int>(max_blocks, dyn_blocks);
+ LAUNCH_CUDA_KERNEL((ReductionInitKernelHalfFloat<Self, Op, Index>),
+ 1, 1, 0, device, reducer, self, num_preserved_vals, output);
+ }
+
+ LAUNCH_CUDA_KERNEL((InnerReductionKernelHalfFloat<num_per_thread, Self, Op, Index>),
+ num_blocks, block_size, 0, device, reducer, self, num_coeffs_to_reduce, num_preserved_vals, output);
+
+ return false;
+ }
+};
+#endif
+
+
+template <typename Self, typename Op>
+struct InnerReducer<Self, Op, GpuDevice> {
+ // Unfortunately nvidia doesn't support well exotic types such as complex,
+ // so reduce the scope of the optimized version of the code to the simple case
+ // of floats and half floats.
+#ifdef EIGEN_HAS_CUDA_FP16
+ static const bool HasOptimizedImplementation = !Op::IsStateful &&
+ (internal::is_same<typename Self::CoeffReturnType, float>::value ||
+ internal::is_same<typename Self::CoeffReturnType, double>::value ||
+ (internal::is_same<typename Self::CoeffReturnType, Eigen::half>::value && reducer_traits<Op, GpuDevice>::PacketAccess));
+#else
+ static const bool HasOptimizedImplementation = !Op::IsStateful &&
+ (internal::is_same<typename Self::CoeffReturnType, float>::value ||
+ internal::is_same<typename Self::CoeffReturnType, double>::value);
+#endif
+
+ template <typename OutputType>
+ static bool run(const Self& self, Op& reducer, const GpuDevice& device, OutputType* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) {
+ assert(HasOptimizedImplementation && "Should only be called on doubles, floats or half floats");
+ const Index num_coeffs = array_prod(self.m_impl.dimensions());
+ // Don't crash when we're called with an input tensor of size 0.
+ if (num_coeffs == 0) {
+ return true;
+ }
+ // It's faster to use the usual code.
+ if (num_coeffs_to_reduce <= 128) {
+ return true;
+ }
+
+ return InnerReductionLauncher<Self, Op, OutputType, reducer_traits<Op, GpuDevice>::PacketAccess>::run(self, reducer, device, output, num_coeffs_to_reduce, num_preserved_vals);
+ }
+};
+
+template <int NumPerThread, typename Self,
+ typename Reducer, typename Index>
+__global__ void OuterReductionKernel(Reducer reducer, const Self input, Index num_coeffs_to_reduce, Index num_preserved_coeffs,
+ typename Self::CoeffReturnType* output) {
+ const Index num_threads = blockDim.x * gridDim.x;
+ const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x;
+ // Initialize the output values if they weren't initialized by the ReductionInitKernel
+ if (gridDim.x == 1) {
+ for (Index i = thread_id; i < num_preserved_coeffs; i += num_threads) {
+ output[i] = reducer.initialize();
+ }
+ __syncthreads();
+ }
+
+ // Do the reduction.
+ const Index max_iter = num_preserved_coeffs * divup<Index>(num_coeffs_to_reduce, NumPerThread);
+ for (Index i = thread_id; i < max_iter; i += num_threads) {
+ const Index input_col = i % num_preserved_coeffs;
+ const Index input_row = (i / num_preserved_coeffs) * NumPerThread;
+ typename Self::CoeffReturnType reduced_val = reducer.initialize();
+ const Index max_row = numext::mini(input_row + NumPerThread, num_coeffs_to_reduce);
+ for (Index j = input_row; j < max_row; j++) {
+ typename Self::CoeffReturnType val = input.m_impl.coeff(j * num_preserved_coeffs + input_col);
+ reducer.reduce(val, &reduced_val);
+ }
+ atomicReduce(&(output[input_col]), reduced_val, reducer);
+ }
+}
+
+
+template <typename Self, typename Op>
+struct OuterReducer<Self, Op, GpuDevice> {
+ // Unfortunately nvidia doesn't support well exotic types such as complex,
+ // so reduce the scope of the optimized version of the code to the simple case
+ // of floats.
+ static const bool HasOptimizedImplementation = !Op::IsStateful &&
+ (internal::is_same<typename Self::CoeffReturnType, float>::value ||
+ internal::is_same<typename Self::CoeffReturnType, double>::value);
+ template <typename Device, typename OutputType>
+ static EIGEN_DEVICE_FUNC bool run(const Self&, Op&, const Device&, OutputType*, typename Self::Index, typename Self::Index) {
+ assert(false && "Should only be called to reduce doubles or floats on a gpu device");
+ return true;
+ }
+
+ static bool run(const Self& self, Op& reducer, const GpuDevice& device, float* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) {
+ typedef typename Self::Index Index;
+
+ // It's faster to use the usual code.
+ if (num_coeffs_to_reduce <= 32) {
+ return true;
+ }
+
+ const Index num_coeffs = num_coeffs_to_reduce * num_preserved_vals;
+ const int block_size = 256;
+ const int num_per_thread = 16;
+ const int dyn_blocks = divup<int>(num_coeffs, block_size * num_per_thread);
+ const int max_blocks = device.getNumCudaMultiProcessors() *
+ device.maxCudaThreadsPerMultiProcessor() / block_size;
+ const int num_blocks = numext::mini<int>(max_blocks, dyn_blocks);
+
+ if (num_blocks > 1) {
+ // We initialize the outputs in the reduction kernel itself when we don't have to worry
+ // about race conditions between multiple thread blocks.
+ const int dyn_blocks = divup<int>(num_preserved_vals, 1024);
+ const int max_blocks = device.getNumCudaMultiProcessors() *
+ device.maxCudaThreadsPerMultiProcessor() / 1024;
+ const int num_blocks = numext::mini<int>(max_blocks, dyn_blocks);
+ LAUNCH_CUDA_KERNEL((ReductionInitKernel<float, Index>),
+ num_blocks, 1024, 0, device, reducer.initialize(),
+ num_preserved_vals, output);
+ }
+
+ LAUNCH_CUDA_KERNEL((OuterReductionKernel<num_per_thread, Self, Op, Index>),
+ num_blocks, block_size, 0, device, reducer, self, num_coeffs_to_reduce, num_preserved_vals, output);
+
+ return false;
+ }
+};
+
+#endif
+
+
+} // end namespace internal
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_CUDA_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h
new file mode 100644
index 000000000..3daecb045
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h
@@ -0,0 +1,242 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/*****************************************************************
+ * TensorSyclPlaceHolderExpr.h
+ *
+ * \brief:
+ * This is the specialisation of the placeholder expression based on the
+ * operation type
+ *
+*****************************************************************/
+
+#ifndef UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSOR_REDUCTION_SYCL_HPP
+#define UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSOR_REDUCTION_SYCL_HPP
+
+namespace Eigen {
+namespace internal {
+
+template<typename CoeffReturnType, typename KernelName> struct syclGenericBufferReducer{
+template<typename BufferTOut, typename BufferTIn>
+static void run(BufferTOut* bufOut, BufferTIn& bufI, const Eigen::SyclDevice& dev, size_t length, size_t local){
+ do {
+ auto f = [length, local, bufOut, &bufI](cl::sycl::handler& h) mutable {
+ cl::sycl::nd_range<1> r{cl::sycl::range<1>{std::max(length, local)},
+ cl::sycl::range<1>{std::min(length, local)}};
+ /* Two accessors are used: one to the buffer that is being reduced,
+ * and a second to local memory, used to store intermediate data. */
+ auto aI =
+ bufI.template get_access<cl::sycl::access::mode::read_write>(h);
+ auto aOut =
+ bufOut->template get_access<cl::sycl::access::mode::discard_write>(h);
+ cl::sycl::accessor<CoeffReturnType, 1, cl::sycl::access::mode::read_write,
+ cl::sycl::access::target::local>
+ scratch(cl::sycl::range<1>(local), h);
+
+ /* The parallel_for invocation chosen is the variant with an nd_item
+ * parameter, since the code requires barriers for correctness. */
+ h.parallel_for<KernelName>(
+ r, [aOut, aI, scratch, local, length](cl::sycl::nd_item<1> id) {
+ size_t globalid = id.get_global(0);
+ size_t localid = id.get_local(0);
+ /* All threads collectively read from global memory into local.
+ * The barrier ensures all threads' IO is resolved before
+ * execution continues (strictly speaking, all threads within
+ * a single work-group - there is no co-ordination between
+ * work-groups, only work-items). */
+ if (globalid < length) {
+ scratch[localid] = aI[globalid];
+ }
+ id.barrier(cl::sycl::access::fence_space::local_space);
+
+ /* Apply the reduction operation between the current local
+ * id and the one on the other half of the vector. */
+ if (globalid < length) {
+ int min = (length < local) ? length : local;
+ for (size_t offset = min / 2; offset > 0; offset /= 2) {
+ if (localid < offset) {
+ scratch[localid] += scratch[localid + offset];
+ }
+ id.barrier(cl::sycl::access::fence_space::local_space);
+ }
+ /* The final result will be stored in local id 0. */
+ if (localid == 0) {
+ aI[id.get_group(0)] = scratch[localid];
+ if((length<=local) && globalid ==0){
+ aOut[globalid]=scratch[localid];
+ }
+ }
+ }
+ });
+ };
+ dev.m_queue.submit(f);
+ dev.m_queue.throw_asynchronous();
+
+ /* At this point, you could queue::wait_and_throw() to ensure that
+ * errors are caught quickly. However, this would likely impact
+ * performance negatively. */
+ length = length / local;
+
+ } while (length > 1);
+
+
+
+}
+
+};
+
+/// For now let's start with a full reducer
+/// Self is useless here because in expression construction we are going to treat reduction as a leafnode.
+/// we want to take reduction child and then build a construction and apply the full reducer function on it. Fullreducre applies the
+/// reduction operation on the child of the reduction. once it is done the reduction is an empty shell and can be thrown away and treated as
+// a leafNode.
+template <typename Self, typename Op, bool Vectorizable>
+struct FullReducer<Self, Op, const Eigen::SyclDevice, Vectorizable> {
+
+ typedef typename Self::CoeffReturnType CoeffReturnType;
+ static const bool HasOptimizedImplementation = false;
+
+ static void run(const Self& self, Op& reducer, const Eigen::SyclDevice& dev, CoeffReturnType* output) {
+ typedef const typename Self::ChildType HostExpr; /// this is the child of reduction
+ typedef typename TensorSycl::internal::createPlaceHolderExpression<HostExpr>::Type PlaceHolderExpr;
+ auto functors = TensorSycl::internal::extractFunctors(self.impl());
+ int red_factor =256; /// initial reduction. If the size is less than red_factor we only creates one thread.
+ size_t inputSize =self.impl().dimensions().TotalSize();
+ size_t rng = inputSize/red_factor; // the total number of thread initially is half the size of the input
+ size_t remaining = inputSize% red_factor;
+ if(rng ==0) {
+ red_factor=1;
+ };
+ size_t tileSize =dev.m_queue.get_device(). template get_info<cl::sycl::info::device::max_work_group_size>()/2;
+ size_t GRange=std::max((size_t )1, rng);
+
+ // convert global range to power of 2 for redecution
+ GRange--;
+ GRange |= GRange >> 1;
+ GRange |= GRange >> 2;
+ GRange |= GRange >> 4;
+ GRange |= GRange >> 8;
+ GRange |= GRange >> 16;
+#if __x86_64__ || __ppc64__ || _WIN64
+ GRange |= GRange >> 32;
+#endif
+ GRange++;
+ size_t outTileSize = tileSize;
+ /// if the shared memory is less than the GRange, we set shared_mem size to the TotalSize and in this case one kernel would be created for recursion to reduce all to one.
+ if (GRange < outTileSize) outTileSize=GRange;
+ // getting final out buffer at the moment the created buffer is true because there is no need for assign
+ auto out_buffer =dev.template get_sycl_buffer<typename Eigen::internal::remove_all<CoeffReturnType>::type>(self.dimensions().TotalSize(), output);
+ /// creating the shared memory for calculating reduction.
+ /// This one is used to collect all the reduced value of shared memory as we dont have global barrier on GPU. Once it is saved we can
+ /// recursively apply reduction on it in order to reduce the whole.
+ auto temp_global_buffer =cl::sycl::buffer<CoeffReturnType, 1>(cl::sycl::range<1>(GRange));
+ typedef typename Eigen::internal::remove_all<decltype(self.xprDims())>::type Dims;
+ Dims dims= self.xprDims();
+ Op functor = reducer;
+ dev.m_queue.submit([&](cl::sycl::handler &cgh) {
+ // create a tuple of accessors from Evaluator
+ auto tuple_of_accessors = TensorSycl::internal::createTupleOfAccessors(cgh, self.impl());
+ auto tmp_global_accessor = temp_global_buffer. template get_access<cl::sycl::access::mode::read_write, cl::sycl::access::target::global_buffer>(cgh);
+
+ cgh.parallel_for<PlaceHolderExpr>( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(outTileSize)), [=](cl::sycl::nd_item<1> itemID) {
+ typedef typename TensorSycl::internal::ConvertToDeviceExpression<const HostExpr>::Type DevExpr;
+ auto device_expr = TensorSycl::internal::createDeviceExpression<DevExpr, PlaceHolderExpr>(functors, tuple_of_accessors);
+ /// reduction cannot be captured automatically through our device conversion recursion. The reason is that reduction has two behaviour
+ /// the first behaviour is when it is used as a root to lauch the sub-kernel. The second one is when it is treated as a leafnode to pass the
+ /// calculated result to its parent kernel. While the latter is automatically detected through our device expression generator. The former is created here.
+ const auto device_self_expr= TensorReductionOp<Op, Dims, decltype(device_expr.expr) ,MakeGlobalPointer>(device_expr.expr, dims, functor);
+ /// This is the evaluator for device_self_expr. This is exactly similar to the self which has been passed to run function. The difference is
+ /// the device_evaluator is detectable and recognisable on the device.
+ auto device_self_evaluator = Eigen::TensorEvaluator<decltype(device_self_expr), Eigen::DefaultDevice>(device_self_expr, Eigen::DefaultDevice());
+ /// const cast added as a naive solution to solve the qualifier drop error
+ auto globalid=itemID.get_global_linear_id();
+
+ if(globalid<rng)
+ tmp_global_accessor.get_pointer()[globalid]=InnerMostDimReducer<decltype(device_self_evaluator), Op, false>::reduce(device_self_evaluator, red_factor*globalid, red_factor, const_cast<Op&>(functor));
+ else
+ tmp_global_accessor.get_pointer()[globalid]=static_cast<CoeffReturnType>(0);
+
+ if(remaining!=0 && globalid==0 )
+ // this will add the rest of input buffer when the input size is not devidable to red_factor.
+ tmp_global_accessor.get_pointer()[globalid]+=InnerMostDimReducer<decltype(device_self_evaluator), Op, false>::reduce(device_self_evaluator, red_factor*(rng), remaining, const_cast<Op&>(functor));
+ });
+ });
+ dev.m_queue.throw_asynchronous();
+
+/// This is used to recursively reduce the tmp value to an element of 1;
+ syclGenericBufferReducer<CoeffReturnType,HostExpr>::run(out_buffer, temp_global_buffer,dev, GRange, outTileSize);
+ }
+
+};
+
+template <typename Self, typename Op>
+struct InnerReducer<Self, Op, const Eigen::SyclDevice> {
+
+ typedef typename Self::CoeffReturnType CoeffReturnType;
+ static const bool HasOptimizedImplementation = false;
+
+ static bool run(const Self& self, Op& reducer, const Eigen::SyclDevice& dev, CoeffReturnType* output, typename Self::Index , typename Self::Index num_coeffs_to_preserve) {
+ typedef const typename Self::ChildType HostExpr; /// this is the child of reduction
+ typedef typename TensorSycl::internal::createPlaceHolderExpression<HostExpr>::Type PlaceHolderExpr;
+ auto functors = TensorSycl::internal::extractFunctors(self.impl());
+
+ size_t tileSize =dev.m_queue.get_device(). template get_info<cl::sycl::info::device::max_work_group_size>()/2;
+
+ size_t GRange=num_coeffs_to_preserve;
+ if (tileSize>GRange) tileSize=GRange;
+ else if(GRange>tileSize){
+ size_t xMode = GRange % tileSize;
+ if (xMode != 0) GRange += (tileSize - xMode);
+ }
+ // getting final out buffer at the moment the created buffer is true because there is no need for assign
+ /// creating the shared memory for calculating reduction.
+ /// This one is used to collect all the reduced value of shared memory as we dont have global barrier on GPU. Once it is saved we can
+ /// recursively apply reduction on it in order to reduce the whole.
+ typedef typename Eigen::internal::remove_all<decltype(self.xprDims())>::type Dims;
+ Dims dims= self.xprDims();
+ Op functor = reducer;
+
+ dev.m_queue.submit([&](cl::sycl::handler &cgh) {
+ // create a tuple of accessors from Evaluator
+ auto tuple_of_accessors = TensorSycl::internal::createTupleOfAccessors(cgh, self.impl());
+ auto output_accessor = dev.template get_sycl_accessor<cl::sycl::access::mode::discard_write>(num_coeffs_to_preserve,cgh, output);
+
+ cgh.parallel_for<Self>( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) {
+ typedef typename TensorSycl::internal::ConvertToDeviceExpression<const HostExpr>::Type DevExpr;
+ auto device_expr = TensorSycl::internal::createDeviceExpression<DevExpr, PlaceHolderExpr>(functors, tuple_of_accessors);
+ /// reduction cannot be captured automatically through our device conversion recursion. The reason is that reduction has two behaviour
+ /// the first behaviour is when it is used as a root to lauch the sub-kernel. The second one is when it is treated as a leafnode to pass the
+ /// calculated result to its parent kernel. While the latter is automatically detected through our device expression generator. The former is created here.
+ const auto device_self_expr= TensorReductionOp<Op, Dims, decltype(device_expr.expr) ,MakeGlobalPointer>(device_expr.expr, dims, functor);
+ /// This is the evaluator for device_self_expr. This is exactly similar to the self which has been passed to run function. The difference is
+ /// the device_evaluator is detectable and recognisable on the device.
+ typedef Eigen::TensorEvaluator<decltype(device_self_expr), Eigen::DefaultDevice> DeiceSelf;
+ auto device_self_evaluator = Eigen::TensorEvaluator<decltype(device_self_expr), Eigen::DefaultDevice>(device_self_expr, Eigen::DefaultDevice());
+ /// const cast added as a naive solution to solve the qualifier drop error
+ auto globalid=itemID.get_global_linear_id();
+ if (globalid< static_cast<size_t>(num_coeffs_to_preserve)) {
+ typename DeiceSelf::CoeffReturnType accum = functor.initialize();
+ GenericDimReducer<DeiceSelf::NumReducedDims-1, DeiceSelf, Op>::reduce(device_self_evaluator, device_self_evaluator.firstInput(globalid),const_cast<Op&>(functor), &accum);
+ functor.finalize(accum);
+ output_accessor.get_pointer()[globalid]= accum;
+ }
+ });
+ });
+ dev.m_queue.throw_asynchronous();
+ return false;
+ }
+};
+
+} // end namespace internal
+} // namespace Eigen
+
+#endif // UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSOR_REDUCTION_SYCL_HPP
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorRef.h b/unsupported/Eigen/CXX11/src/Tensor/TensorRef.h
new file mode 100644
index 000000000..99245f778
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorRef.h
@@ -0,0 +1,429 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_REF_H
+#define EIGEN_CXX11_TENSOR_TENSOR_REF_H
+
+namespace Eigen {
+
+namespace internal {
+
+template <typename Dimensions, typename Scalar>
+class TensorLazyBaseEvaluator {
+ public:
+ TensorLazyBaseEvaluator() : m_refcount(0) { }
+ virtual ~TensorLazyBaseEvaluator() { }
+
+ EIGEN_DEVICE_FUNC virtual const Dimensions& dimensions() const = 0;
+ EIGEN_DEVICE_FUNC virtual const Scalar* data() const = 0;
+
+ EIGEN_DEVICE_FUNC virtual const Scalar coeff(DenseIndex index) const = 0;
+ EIGEN_DEVICE_FUNC virtual Scalar& coeffRef(DenseIndex index) = 0;
+
+ void incrRefCount() { ++m_refcount; }
+ void decrRefCount() { --m_refcount; }
+ int refCount() const { return m_refcount; }
+
+ private:
+ // No copy, no assigment;
+ TensorLazyBaseEvaluator(const TensorLazyBaseEvaluator& other);
+ TensorLazyBaseEvaluator& operator = (const TensorLazyBaseEvaluator& other);
+
+ int m_refcount;
+};
+
+
+template <typename Dimensions, typename Expr, typename Device>
+class TensorLazyEvaluatorReadOnly : public TensorLazyBaseEvaluator<Dimensions, typename TensorEvaluator<Expr, Device>::Scalar> {
+ public:
+ // typedef typename TensorEvaluator<Expr, Device>::Dimensions Dimensions;
+ typedef typename TensorEvaluator<Expr, Device>::Scalar Scalar;
+
+ TensorLazyEvaluatorReadOnly(const Expr& expr, const Device& device) : m_impl(expr, device), m_dummy(Scalar(0)) {
+ m_dims = m_impl.dimensions();
+ m_impl.evalSubExprsIfNeeded(NULL);
+ }
+ virtual ~TensorLazyEvaluatorReadOnly() {
+ m_impl.cleanup();
+ }
+
+ EIGEN_DEVICE_FUNC virtual const Dimensions& dimensions() const {
+ return m_dims;
+ }
+ EIGEN_DEVICE_FUNC virtual const Scalar* data() const {
+ return m_impl.data();
+ }
+
+ EIGEN_DEVICE_FUNC virtual const Scalar coeff(DenseIndex index) const {
+ return m_impl.coeff(index);
+ }
+ EIGEN_DEVICE_FUNC virtual Scalar& coeffRef(DenseIndex /*index*/) {
+ eigen_assert(false && "can't reference the coefficient of a rvalue");
+ return m_dummy;
+ };
+
+ protected:
+ TensorEvaluator<Expr, Device> m_impl;
+ Dimensions m_dims;
+ Scalar m_dummy;
+};
+
+template <typename Dimensions, typename Expr, typename Device>
+class TensorLazyEvaluatorWritable : public TensorLazyEvaluatorReadOnly<Dimensions, Expr, Device> {
+ public:
+ typedef TensorLazyEvaluatorReadOnly<Dimensions, Expr, Device> Base;
+ typedef typename Base::Scalar Scalar;
+
+ TensorLazyEvaluatorWritable(const Expr& expr, const Device& device) : Base(expr, device) {
+ }
+ virtual ~TensorLazyEvaluatorWritable() {
+ }
+
+ EIGEN_DEVICE_FUNC virtual Scalar& coeffRef(DenseIndex index) {
+ return this->m_impl.coeffRef(index);
+ }
+};
+
+template <typename Dimensions, typename Expr, typename Device>
+class TensorLazyEvaluator : public internal::conditional<bool(internal::is_lvalue<Expr>::value),
+ TensorLazyEvaluatorWritable<Dimensions, Expr, Device>,
+ TensorLazyEvaluatorReadOnly<Dimensions, const Expr, Device> >::type {
+ public:
+ typedef typename internal::conditional<bool(internal::is_lvalue<Expr>::value),
+ TensorLazyEvaluatorWritable<Dimensions, Expr, Device>,
+ TensorLazyEvaluatorReadOnly<Dimensions, const Expr, Device> >::type Base;
+ typedef typename Base::Scalar Scalar;
+
+ TensorLazyEvaluator(const Expr& expr, const Device& device) : Base(expr, device) {
+ }
+ virtual ~TensorLazyEvaluator() {
+ }
+};
+
+} // namespace internal
+
+
+/** \class TensorRef
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief A reference to a tensor expression
+ * The expression will be evaluated lazily (as much as possible).
+ *
+ */
+template<typename PlainObjectType> class TensorRef : public TensorBase<TensorRef<PlainObjectType> >
+{
+ public:
+ typedef TensorRef<PlainObjectType> Self;
+ typedef typename PlainObjectType::Base Base;
+ typedef typename Eigen::internal::nested<Self>::type Nested;
+ typedef typename internal::traits<PlainObjectType>::StorageKind StorageKind;
+ typedef typename internal::traits<PlainObjectType>::Index Index;
+ typedef typename internal::traits<PlainObjectType>::Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef typename Base::CoeffReturnType CoeffReturnType;
+ typedef Scalar* PointerType;
+ typedef PointerType PointerArgType;
+
+ static const Index NumIndices = PlainObjectType::NumIndices;
+ typedef typename PlainObjectType::Dimensions Dimensions;
+
+ enum {
+ IsAligned = false,
+ PacketAccess = false,
+ Layout = PlainObjectType::Layout,
+ CoordAccess = false, // to be implemented
+ RawAccess = false
+ };
+
+ EIGEN_STRONG_INLINE TensorRef() : m_evaluator(NULL) {
+ }
+
+ template <typename Expression>
+ EIGEN_STRONG_INLINE TensorRef(const Expression& expr) : m_evaluator(new internal::TensorLazyEvaluator<Dimensions, Expression, DefaultDevice>(expr, DefaultDevice())) {
+ m_evaluator->incrRefCount();
+ }
+
+ template <typename Expression>
+ EIGEN_STRONG_INLINE TensorRef& operator = (const Expression& expr) {
+ unrefEvaluator();
+ m_evaluator = new internal::TensorLazyEvaluator<Dimensions, Expression, DefaultDevice>(expr, DefaultDevice());
+ m_evaluator->incrRefCount();
+ return *this;
+ }
+
+ ~TensorRef() {
+ unrefEvaluator();
+ }
+
+ TensorRef(const TensorRef& other) : m_evaluator(other.m_evaluator) {
+ eigen_assert(m_evaluator->refCount() > 0);
+ m_evaluator->incrRefCount();
+ }
+
+ TensorRef& operator = (const TensorRef& other) {
+ if (this != &other) {
+ unrefEvaluator();
+ m_evaluator = other.m_evaluator;
+ eigen_assert(m_evaluator->refCount() > 0);
+ m_evaluator->incrRefCount();
+ }
+ return *this;
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Index rank() const { return m_evaluator->dimensions().size(); }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Index dimension(Index n) const { return m_evaluator->dimensions()[n]; }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_evaluator->dimensions(); }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Index size() const { return m_evaluator->dimensions().TotalSize(); }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar* data() const { return m_evaluator->data(); }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar operator()(Index index) const
+ {
+ return m_evaluator->coeff(index);
+ }
+
+#if EIGEN_HAS_VARIADIC_TEMPLATES
+ template<typename... IndexTypes> EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar operator()(Index firstIndex, IndexTypes... otherIndices) const
+ {
+ const std::size_t num_indices = (sizeof...(otherIndices) + 1);
+ const array<Index, num_indices> indices{{firstIndex, otherIndices...}};
+ return coeff(indices);
+ }
+ template<typename... IndexTypes> EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& coeffRef(Index firstIndex, IndexTypes... otherIndices)
+ {
+ const std::size_t num_indices = (sizeof...(otherIndices) + 1);
+ const array<Index, num_indices> indices{{firstIndex, otherIndices...}};
+ return coeffRef(indices);
+ }
+#else
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar operator()(Index i0, Index i1) const
+ {
+ array<Index, 2> indices;
+ indices[0] = i0;
+ indices[1] = i1;
+ return coeff(indices);
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar operator()(Index i0, Index i1, Index i2) const
+ {
+ array<Index, 3> indices;
+ indices[0] = i0;
+ indices[1] = i1;
+ indices[2] = i2;
+ return coeff(indices);
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar operator()(Index i0, Index i1, Index i2, Index i3) const
+ {
+ array<Index, 4> indices;
+ indices[0] = i0;
+ indices[1] = i1;
+ indices[2] = i2;
+ indices[3] = i3;
+ return coeff(indices);
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const
+ {
+ array<Index, 5> indices;
+ indices[0] = i0;
+ indices[1] = i1;
+ indices[2] = i2;
+ indices[3] = i3;
+ indices[4] = i4;
+ return coeff(indices);
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& coeffRef(Index i0, Index i1)
+ {
+ array<Index, 2> indices;
+ indices[0] = i0;
+ indices[1] = i1;
+ return coeffRef(indices);
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& coeffRef(Index i0, Index i1, Index i2)
+ {
+ array<Index, 3> indices;
+ indices[0] = i0;
+ indices[1] = i1;
+ indices[2] = i2;
+ return coeffRef(indices);
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3)
+ {
+ array<Index, 4> indices;
+ indices[0] = i0;
+ indices[1] = i1;
+ indices[2] = i2;
+ indices[3] = i3;
+ return coeffRef(indices);
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& coeffRef(Index i0, Index i1, Index i2, Index i3, Index i4)
+ {
+ array<Index, 5> indices;
+ indices[0] = i0;
+ indices[1] = i1;
+ indices[2] = i2;
+ indices[3] = i3;
+ indices[4] = i4;
+ return coeffRef(indices);
+ }
+#endif
+
+ template <std::size_t NumIndices> EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar coeff(const array<Index, NumIndices>& indices) const
+ {
+ const Dimensions& dims = this->dimensions();
+ Index index = 0;
+ if (PlainObjectType::Options & RowMajor) {
+ index += indices[0];
+ for (size_t i = 1; i < NumIndices; ++i) {
+ index = index * dims[i] + indices[i];
+ }
+ } else {
+ index += indices[NumIndices-1];
+ for (int i = NumIndices-2; i >= 0; --i) {
+ index = index * dims[i] + indices[i];
+ }
+ }
+ return m_evaluator->coeff(index);
+ }
+ template <std::size_t NumIndices> EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& coeffRef(const array<Index, NumIndices>& indices)
+ {
+ const Dimensions& dims = this->dimensions();
+ Index index = 0;
+ if (PlainObjectType::Options & RowMajor) {
+ index += indices[0];
+ for (size_t i = 1; i < NumIndices; ++i) {
+ index = index * dims[i] + indices[i];
+ }
+ } else {
+ index += indices[NumIndices-1];
+ for (int i = NumIndices-2; i >= 0; --i) {
+ index = index * dims[i] + indices[i];
+ }
+ }
+ return m_evaluator->coeffRef(index);
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar coeff(Index index) const
+ {
+ return m_evaluator->coeff(index);
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)
+ {
+ return m_evaluator->coeffRef(index);
+ }
+
+ private:
+ EIGEN_STRONG_INLINE void unrefEvaluator() {
+ if (m_evaluator) {
+ m_evaluator->decrRefCount();
+ if (m_evaluator->refCount() == 0) {
+ delete m_evaluator;
+ }
+ }
+ }
+
+ internal::TensorLazyBaseEvaluator<Dimensions, Scalar>* m_evaluator;
+};
+
+
+// evaluator for rvalues
+template<typename Derived, typename Device>
+struct TensorEvaluator<const TensorRef<Derived>, Device>
+{
+ typedef typename Derived::Index Index;
+ typedef typename Derived::Scalar Scalar;
+ typedef typename Derived::Scalar CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ typedef typename Derived::Dimensions Dimensions;
+
+ enum {
+ IsAligned = false,
+ PacketAccess = false,
+ Layout = TensorRef<Derived>::Layout,
+ CoordAccess = false, // to be implemented
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const TensorRef<Derived>& m, const Device&)
+ : m_ref(m)
+ { }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_ref.dimensions(); }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar*) {
+ return true;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const {
+ return m_ref.coeff(index);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) {
+ return m_ref.coeffRef(index);
+ }
+
+ EIGEN_DEVICE_FUNC Scalar* data() const { return m_ref.data(); }
+
+ protected:
+ TensorRef<Derived> m_ref;
+};
+
+
+// evaluator for lvalues
+template<typename Derived, typename Device>
+struct TensorEvaluator<TensorRef<Derived>, Device> : public TensorEvaluator<const TensorRef<Derived>, Device>
+{
+ typedef typename Derived::Index Index;
+ typedef typename Derived::Scalar Scalar;
+ typedef typename Derived::Scalar CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ typedef typename Derived::Dimensions Dimensions;
+
+ typedef TensorEvaluator<const TensorRef<Derived>, Device> Base;
+
+ enum {
+ IsAligned = false,
+ PacketAccess = false,
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(TensorRef<Derived>& m, const Device& d) : Base(m, d)
+ { }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) {
+ return this->m_ref.coeffRef(index);
+ }
+};
+
+
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_REF_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReverse.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReverse.h
new file mode 100644
index 000000000..14e392e36
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReverse.h
@@ -0,0 +1,288 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Navdeep Jaitly <ndjaitly@google.com>
+// Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_REVERSE_H
+#define EIGEN_CXX11_TENSOR_TENSOR_REVERSE_H
+namespace Eigen {
+
+/** \class TensorReverse
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Tensor reverse elements class.
+ *
+ */
+namespace internal {
+template<typename ReverseDimensions, typename XprType>
+struct traits<TensorReverseOp<ReverseDimensions,
+ XprType> > : public traits<XprType>
+{
+ typedef typename XprType::Scalar Scalar;
+ typedef traits<XprType> XprTraits;
+ typedef typename XprTraits::StorageKind StorageKind;
+ typedef typename XprTraits::Index Index;
+ typedef typename XprType::Nested Nested;
+ typedef typename remove_reference<Nested>::type _Nested;
+ static const int NumDimensions = XprTraits::NumDimensions;
+ static const int Layout = XprTraits::Layout;
+};
+
+template<typename ReverseDimensions, typename XprType>
+struct eval<TensorReverseOp<ReverseDimensions, XprType>, Eigen::Dense>
+{
+ typedef const TensorReverseOp<ReverseDimensions, XprType>& type;
+};
+
+template<typename ReverseDimensions, typename XprType>
+struct nested<TensorReverseOp<ReverseDimensions, XprType>, 1,
+ typename eval<TensorReverseOp<ReverseDimensions, XprType> >::type>
+{
+ typedef TensorReverseOp<ReverseDimensions, XprType> type;
+};
+
+} // end namespace internal
+
+template<typename ReverseDimensions, typename XprType>
+class TensorReverseOp : public TensorBase<TensorReverseOp<ReverseDimensions,
+ XprType>, WriteAccessors>
+{
+ public:
+ typedef typename Eigen::internal::traits<TensorReverseOp>::Scalar Scalar;
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename Eigen::internal::nested<TensorReverseOp>::type Nested;
+ typedef typename Eigen::internal::traits<TensorReverseOp>::StorageKind
+ StorageKind;
+ typedef typename Eigen::internal::traits<TensorReverseOp>::Index Index;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorReverseOp(
+ const XprType& expr, const ReverseDimensions& reverse_dims)
+ : m_xpr(expr), m_reverse_dims(reverse_dims) { }
+
+ EIGEN_DEVICE_FUNC
+ const ReverseDimensions& reverse() const { return m_reverse_dims; }
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename XprType::Nested>::type&
+ expression() const { return m_xpr; }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE TensorReverseOp& operator = (const TensorReverseOp& other)
+ {
+ typedef TensorAssignOp<TensorReverseOp, const TensorReverseOp> Assign;
+ Assign assign(*this, other);
+ internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
+ return *this;
+ }
+
+ template<typename OtherDerived>
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE TensorReverseOp& operator = (const OtherDerived& other)
+ {
+ typedef TensorAssignOp<TensorReverseOp, const OtherDerived> Assign;
+ Assign assign(*this, other);
+ internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
+ return *this;
+ }
+
+ protected:
+ typename XprType::Nested m_xpr;
+ const ReverseDimensions m_reverse_dims;
+};
+
+// Eval as rvalue
+template<typename ReverseDimensions, typename ArgType, typename Device>
+struct TensorEvaluator<const TensorReverseOp<ReverseDimensions, ArgType>, Device>
+{
+ typedef TensorReverseOp<ReverseDimensions, ArgType> XprType;
+ typedef typename XprType::Index Index;
+ static const int NumDims = internal::array_size<ReverseDimensions>::value;
+ typedef DSizes<Index, NumDims> Dimensions;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+
+ enum {
+ IsAligned = false,
+ PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ CoordAccess = false, // to be implemented
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op,
+ const Device& device)
+ : m_impl(op.expression(), device), m_reverse(op.reverse())
+ {
+ // Reversing a scalar isn't supported yet. It would be a no-op anyway.
+ EIGEN_STATIC_ASSERT((NumDims > 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
+
+ // Compute strides
+ m_dimensions = m_impl.dimensions();
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ m_strides[0] = 1;
+ for (int i = 1; i < NumDims; ++i) {
+ m_strides[i] = m_strides[i-1] * m_dimensions[i-1];
+ }
+ } else {
+ m_strides[NumDims-1] = 1;
+ for (int i = NumDims - 2; i >= 0; --i) {
+ m_strides[i] = m_strides[i+1] * m_dimensions[i+1];
+ }
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const Dimensions& dimensions() const { return m_dimensions; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar*) {
+ m_impl.evalSubExprsIfNeeded(NULL);
+ return true;
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
+ m_impl.cleanup();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index reverseIndex(
+ Index index) const {
+ eigen_assert(index < dimensions().TotalSize());
+ Index inputIndex = 0;
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int i = NumDims - 1; i > 0; --i) {
+ Index idx = index / m_strides[i];
+ index -= idx * m_strides[i];
+ if (m_reverse[i]) {
+ idx = m_dimensions[i] - idx - 1;
+ }
+ inputIndex += idx * m_strides[i] ;
+ }
+ if (m_reverse[0]) {
+ inputIndex += (m_dimensions[0] - index - 1);
+ } else {
+ inputIndex += index;
+ }
+ } else {
+ for (int i = 0; i < NumDims - 1; ++i) {
+ Index idx = index / m_strides[i];
+ index -= idx * m_strides[i];
+ if (m_reverse[i]) {
+ idx = m_dimensions[i] - idx - 1;
+ }
+ inputIndex += idx * m_strides[i] ;
+ }
+ if (m_reverse[NumDims-1]) {
+ inputIndex += (m_dimensions[NumDims-1] - index - 1);
+ } else {
+ inputIndex += index;
+ }
+ }
+ return inputIndex;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(
+ Index index) const {
+ return m_impl.coeff(reverseIndex(index));
+ }
+
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ PacketReturnType packet(Index index) const
+ {
+ EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
+ eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
+
+ // TODO(ndjaitly): write a better packing routine that uses
+ // local structure.
+ EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type
+ values[PacketSize];
+ for (int i = 0; i < PacketSize; ++i) {
+ values[i] = coeff(index+i);
+ }
+ PacketReturnType rslt = internal::pload<PacketReturnType>(values);
+ return rslt;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
+ double compute_cost = NumDims * (2 * TensorOpCost::AddCost<Index>() +
+ 2 * TensorOpCost::MulCost<Index>() +
+ TensorOpCost::DivCost<Index>());
+ for (int i = 0; i < NumDims; ++i) {
+ if (m_reverse[i]) {
+ compute_cost += 2 * TensorOpCost::AddCost<Index>();
+ }
+ }
+ return m_impl.costPerCoeff(vectorized) +
+ TensorOpCost(0, 0, compute_cost, false /* vectorized */, PacketSize);
+ }
+
+ EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
+
+ protected:
+ Dimensions m_dimensions;
+ array<Index, NumDims> m_strides;
+ TensorEvaluator<ArgType, Device> m_impl;
+ ReverseDimensions m_reverse;
+};
+
+// Eval as lvalue
+
+template <typename ReverseDimensions, typename ArgType, typename Device>
+struct TensorEvaluator<TensorReverseOp<ReverseDimensions, ArgType>, Device>
+ : public TensorEvaluator<const TensorReverseOp<ReverseDimensions, ArgType>,
+ Device> {
+ typedef TensorEvaluator<const TensorReverseOp<ReverseDimensions, ArgType>,
+ Device> Base;
+ typedef TensorReverseOp<ReverseDimensions, ArgType> XprType;
+ typedef typename XprType::Index Index;
+ static const int NumDims = internal::array_size<ReverseDimensions>::value;
+ typedef DSizes<Index, NumDims> Dimensions;
+
+ enum {
+ IsAligned = false,
+ PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ CoordAccess = false, // to be implemented
+ RawAccess = false
+ };
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op,
+ const Device& device)
+ : Base(op, device) {}
+
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const Dimensions& dimensions() const { return this->m_dimensions; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) {
+ return this->m_impl.coeffRef(this->reverseIndex(index));
+ }
+
+ template <int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ void writePacket(Index index, const PacketReturnType& x) {
+ EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
+ eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
+
+ // This code is pilfered from TensorMorphing.h
+ EIGEN_ALIGN_MAX CoeffReturnType values[PacketSize];
+ internal::pstore<CoeffReturnType, PacketReturnType>(values, x);
+ for (int i = 0; i < PacketSize; ++i) {
+ this->coeffRef(index+i) = values[i];
+ }
+ }
+
+};
+
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_REVERSE_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorScan.h b/unsupported/Eigen/CXX11/src/Tensor/TensorScan.h
new file mode 100644
index 000000000..8501466ce
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorScan.h
@@ -0,0 +1,287 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016 Igor Babuschkin <igor@babuschk.in>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_SCAN_H
+#define EIGEN_CXX11_TENSOR_TENSOR_SCAN_H
+
+namespace Eigen {
+
+namespace internal {
+
+template <typename Op, typename XprType>
+struct traits<TensorScanOp<Op, XprType> >
+ : public traits<XprType> {
+ typedef typename XprType::Scalar Scalar;
+ typedef traits<XprType> XprTraits;
+ typedef typename XprTraits::StorageKind StorageKind;
+ typedef typename XprType::Nested Nested;
+ typedef typename remove_reference<Nested>::type _Nested;
+ static const int NumDimensions = XprTraits::NumDimensions;
+ static const int Layout = XprTraits::Layout;
+};
+
+template<typename Op, typename XprType>
+struct eval<TensorScanOp<Op, XprType>, Eigen::Dense>
+{
+ typedef const TensorScanOp<Op, XprType>& type;
+};
+
+template<typename Op, typename XprType>
+struct nested<TensorScanOp<Op, XprType>, 1,
+ typename eval<TensorScanOp<Op, XprType> >::type>
+{
+ typedef TensorScanOp<Op, XprType> type;
+};
+} // end namespace internal
+
+/** \class TensorScan
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Tensor scan class.
+ */
+template <typename Op, typename XprType>
+class TensorScanOp
+ : public TensorBase<TensorScanOp<Op, XprType>, ReadOnlyAccessors> {
+public:
+ typedef typename Eigen::internal::traits<TensorScanOp>::Scalar Scalar;
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename Eigen::internal::nested<TensorScanOp>::type Nested;
+ typedef typename Eigen::internal::traits<TensorScanOp>::StorageKind StorageKind;
+ typedef typename Eigen::internal::traits<TensorScanOp>::Index Index;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorScanOp(
+ const XprType& expr, const Index& axis, bool exclusive = false, const Op& op = Op())
+ : m_expr(expr), m_axis(axis), m_accumulator(op), m_exclusive(exclusive) {}
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const Index axis() const { return m_axis; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const XprType& expression() const { return m_expr; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const Op accumulator() const { return m_accumulator; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ bool exclusive() const { return m_exclusive; }
+
+protected:
+ typename XprType::Nested m_expr;
+ const Index m_axis;
+ const Op m_accumulator;
+ const bool m_exclusive;
+};
+
+template <typename Self, typename Reducer, typename Device>
+struct ScanLauncher;
+
+// Eval as rvalue
+template <typename Op, typename ArgType, typename Device>
+struct TensorEvaluator<const TensorScanOp<Op, ArgType>, Device> {
+
+ typedef TensorScanOp<Op, ArgType> XprType;
+ typedef typename XprType::Index Index;
+ static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
+ typedef DSizes<Index, NumDims> Dimensions;
+ typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ typedef TensorEvaluator<const TensorScanOp<Op, ArgType>, Device> Self;
+
+ enum {
+ IsAligned = false,
+ PacketAccess = (internal::unpacket_traits<PacketReturnType>::size > 1),
+ BlockAccess = false,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ CoordAccess = false,
+ RawAccess = true
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op,
+ const Device& device)
+ : m_impl(op.expression(), device),
+ m_device(device),
+ m_exclusive(op.exclusive()),
+ m_accumulator(op.accumulator()),
+ m_size(m_impl.dimensions()[op.axis()]),
+ m_stride(1),
+ m_output(NULL) {
+
+ // Accumulating a scalar isn't supported.
+ EIGEN_STATIC_ASSERT((NumDims > 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ eigen_assert(op.axis() >= 0 && op.axis() < NumDims);
+
+ // Compute stride of scan axis
+ const Dimensions& dims = m_impl.dimensions();
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int i = 0; i < op.axis(); ++i) {
+ m_stride = m_stride * dims[i];
+ }
+ } else {
+ for (int i = NumDims - 1; i > op.axis(); --i) {
+ m_stride = m_stride * dims[i];
+ }
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const {
+ return m_impl.dimensions();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Index& stride() const {
+ return m_stride;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Index& size() const {
+ return m_size;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Op& accumulator() const {
+ return m_accumulator;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool exclusive() const {
+ return m_exclusive;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const TensorEvaluator<ArgType, Device>& inner() const {
+ return m_impl;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Device& device() const {
+ return m_device;
+ }
+
+ EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data) {
+ m_impl.evalSubExprsIfNeeded(NULL);
+ ScanLauncher<Self, Op, Device> launcher;
+ if (data) {
+ launcher(*this, data);
+ return false;
+ }
+
+ const Index total_size = internal::array_prod(dimensions());
+ m_output = static_cast<CoeffReturnType*>(m_device.allocate(total_size * sizeof(Scalar)));
+ launcher(*this, m_output);
+ return true;
+ }
+
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const {
+ return internal::ploadt<PacketReturnType, LoadMode>(m_output + index);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType* data() const
+ {
+ return m_output;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
+ {
+ return m_output[index];
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool) const {
+ return TensorOpCost(sizeof(CoeffReturnType), 0, 0);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
+ if (m_output != NULL) {
+ m_device.deallocate(m_output);
+ m_output = NULL;
+ }
+ m_impl.cleanup();
+ }
+
+protected:
+ TensorEvaluator<ArgType, Device> m_impl;
+ const Device& m_device;
+ const bool m_exclusive;
+ Op m_accumulator;
+ const Index m_size;
+ Index m_stride;
+ CoeffReturnType* m_output;
+};
+
+// CPU implementation of scan
+// TODO(ibab) This single-threaded implementation should be parallelized,
+// at least by running multiple scans at the same time.
+template <typename Self, typename Reducer, typename Device>
+struct ScanLauncher {
+ void operator()(Self& self, typename Self::CoeffReturnType *data) {
+ Index total_size = internal::array_prod(self.dimensions());
+
+ // We fix the index along the scan axis to 0 and perform a
+ // scan per remaining entry. The iteration is split into two nested
+ // loops to avoid an integer division by keeping track of each idx1 and idx2.
+ for (Index idx1 = 0; idx1 < total_size; idx1 += self.stride() * self.size()) {
+ for (Index idx2 = 0; idx2 < self.stride(); idx2++) {
+ // Calculate the starting offset for the scan
+ Index offset = idx1 + idx2;
+
+ // Compute the scan along the axis, starting at the calculated offset
+ typename Self::CoeffReturnType accum = self.accumulator().initialize();
+ for (Index idx3 = 0; idx3 < self.size(); idx3++) {
+ Index curr = offset + idx3 * self.stride();
+
+ if (self.exclusive()) {
+ data[curr] = self.accumulator().finalize(accum);
+ self.accumulator().reduce(self.inner().coeff(curr), &accum);
+ } else {
+ self.accumulator().reduce(self.inner().coeff(curr), &accum);
+ data[curr] = self.accumulator().finalize(accum);
+ }
+ }
+ }
+ }
+ }
+};
+
+#if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
+
+// GPU implementation of scan
+// TODO(ibab) This placeholder implementation performs multiple scans in
+// parallel, but it would be better to use a parallel scan algorithm and
+// optimize memory access.
+template <typename Self, typename Reducer>
+__global__ void ScanKernel(Self self, Index total_size, typename Self::CoeffReturnType* data) {
+ // Compute offset as in the CPU version
+ Index val = threadIdx.x + blockIdx.x * blockDim.x;
+ Index offset = (val / self.stride()) * self.stride() * self.size() + val % self.stride();
+
+ if (offset + (self.size() - 1) * self.stride() < total_size) {
+ // Compute the scan along the axis, starting at the calculated offset
+ typename Self::CoeffReturnType accum = self.accumulator().initialize();
+ for (Index idx = 0; idx < self.size(); idx++) {
+ Index curr = offset + idx * self.stride();
+ if (self.exclusive()) {
+ data[curr] = self.accumulator().finalize(accum);
+ self.accumulator().reduce(self.inner().coeff(curr), &accum);
+ } else {
+ self.accumulator().reduce(self.inner().coeff(curr), &accum);
+ data[curr] = self.accumulator().finalize(accum);
+ }
+ }
+ }
+ __syncthreads();
+
+}
+
+template <typename Self, typename Reducer>
+struct ScanLauncher<Self, Reducer, GpuDevice> {
+ void operator()(const Self& self, typename Self::CoeffReturnType* data) {
+ Index total_size = internal::array_prod(self.dimensions());
+ Index num_blocks = (total_size / self.size() + 63) / 64;
+ Index block_size = 64;
+ LAUNCH_CUDA_KERNEL((ScanKernel<Self, Reducer>), num_blocks, block_size, 0, self.device(), self, total_size, data);
+ }
+};
+#endif // EIGEN_USE_GPU && __CUDACC__
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_SCAN_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorShuffling.h b/unsupported/Eigen/CXX11/src/Tensor/TensorShuffling.h
new file mode 100644
index 000000000..113c060e3
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorShuffling.h
@@ -0,0 +1,264 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_SHUFFLING_H
+#define EIGEN_CXX11_TENSOR_TENSOR_SHUFFLING_H
+
+namespace Eigen {
+
+/** \class TensorShuffling
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Tensor shuffling class.
+ *
+ *
+ */
+namespace internal {
+template<typename Shuffle, typename XprType>
+struct traits<TensorShufflingOp<Shuffle, XprType> > : public traits<XprType>
+{
+ typedef typename XprType::Scalar Scalar;
+ typedef traits<XprType> XprTraits;
+ typedef typename XprTraits::StorageKind StorageKind;
+ typedef typename XprTraits::Index Index;
+ typedef typename XprType::Nested Nested;
+ typedef typename remove_reference<Nested>::type _Nested;
+ static const int NumDimensions = XprTraits::NumDimensions;
+ static const int Layout = XprTraits::Layout;
+};
+
+template<typename Shuffle, typename XprType>
+struct eval<TensorShufflingOp<Shuffle, XprType>, Eigen::Dense>
+{
+ typedef const TensorShufflingOp<Shuffle, XprType>& type;
+};
+
+template<typename Shuffle, typename XprType>
+struct nested<TensorShufflingOp<Shuffle, XprType>, 1, typename eval<TensorShufflingOp<Shuffle, XprType> >::type>
+{
+ typedef TensorShufflingOp<Shuffle, XprType> type;
+};
+
+} // end namespace internal
+
+
+
+template<typename Shuffle, typename XprType>
+class TensorShufflingOp : public TensorBase<TensorShufflingOp<Shuffle, XprType> >
+{
+ public:
+ typedef typename Eigen::internal::traits<TensorShufflingOp>::Scalar Scalar;
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename Eigen::internal::nested<TensorShufflingOp>::type Nested;
+ typedef typename Eigen::internal::traits<TensorShufflingOp>::StorageKind StorageKind;
+ typedef typename Eigen::internal::traits<TensorShufflingOp>::Index Index;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorShufflingOp(const XprType& expr, const Shuffle& shuffle)
+ : m_xpr(expr), m_shuffle(shuffle) {}
+
+ EIGEN_DEVICE_FUNC
+ const Shuffle& shufflePermutation() const { return m_shuffle; }
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename XprType::Nested>::type&
+ expression() const { return m_xpr; }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE TensorShufflingOp& operator = (const TensorShufflingOp& other)
+ {
+ typedef TensorAssignOp<TensorShufflingOp, const TensorShufflingOp> Assign;
+ Assign assign(*this, other);
+ internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
+ return *this;
+ }
+
+ template<typename OtherDerived>
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE TensorShufflingOp& operator = (const OtherDerived& other)
+ {
+ typedef TensorAssignOp<TensorShufflingOp, const OtherDerived> Assign;
+ Assign assign(*this, other);
+ internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
+ return *this;
+ }
+
+ protected:
+ typename XprType::Nested m_xpr;
+ const Shuffle m_shuffle;
+};
+
+
+// Eval as rvalue
+template<typename Shuffle, typename ArgType, typename Device>
+struct TensorEvaluator<const TensorShufflingOp<Shuffle, ArgType>, Device>
+{
+ typedef TensorShufflingOp<Shuffle, ArgType> XprType;
+ typedef typename XprType::Index Index;
+ static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
+ typedef DSizes<Index, NumDims> Dimensions;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+
+ enum {
+ IsAligned = false,
+ PacketAccess = (internal::packet_traits<Scalar>::size > 1),
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ CoordAccess = false, // to be implemented
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
+ : m_impl(op.expression(), device)
+ {
+ const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
+ const Shuffle& shuffle = op.shufflePermutation();
+ for (int i = 0; i < NumDims; ++i) {
+ m_dimensions[i] = input_dims[shuffle[i]];
+ }
+
+ array<Index, NumDims> inputStrides;
+
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ inputStrides[0] = 1;
+ m_outputStrides[0] = 1;
+ for (int i = 1; i < NumDims; ++i) {
+ inputStrides[i] = inputStrides[i - 1] * input_dims[i - 1];
+ m_outputStrides[i] = m_outputStrides[i - 1] * m_dimensions[i - 1];
+ }
+ } else {
+ inputStrides[NumDims - 1] = 1;
+ m_outputStrides[NumDims - 1] = 1;
+ for (int i = NumDims - 2; i >= 0; --i) {
+ inputStrides[i] = inputStrides[i + 1] * input_dims[i + 1];
+ m_outputStrides[i] = m_outputStrides[i + 1] * m_dimensions[i + 1];
+ }
+ }
+
+ for (int i = 0; i < NumDims; ++i) {
+ m_inputStrides[i] = inputStrides[shuffle[i]];
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) {
+ m_impl.evalSubExprsIfNeeded(NULL);
+ return true;
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
+ m_impl.cleanup();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
+ {
+ return m_impl.coeff(srcCoeff(index));
+ }
+
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
+ {
+ EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
+ eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
+
+ EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
+ for (int i = 0; i < PacketSize; ++i) {
+ values[i] = coeff(index+i);
+ }
+ PacketReturnType rslt = internal::pload<PacketReturnType>(values);
+ return rslt;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
+ const double compute_cost = NumDims * (2 * TensorOpCost::AddCost<Index>() +
+ 2 * TensorOpCost::MulCost<Index>() +
+ TensorOpCost::DivCost<Index>());
+ return m_impl.costPerCoeff(vectorized) +
+ TensorOpCost(0, 0, compute_cost, false /* vectorized */, PacketSize);
+ }
+
+ EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
+
+ protected:
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const {
+ Index inputIndex = 0;
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int i = NumDims - 1; i > 0; --i) {
+ const Index idx = index / m_outputStrides[i];
+ inputIndex += idx * m_inputStrides[i];
+ index -= idx * m_outputStrides[i];
+ }
+ return inputIndex + index * m_inputStrides[0];
+ } else {
+ for (int i = 0; i < NumDims - 1; ++i) {
+ const Index idx = index / m_outputStrides[i];
+ inputIndex += idx * m_inputStrides[i];
+ index -= idx * m_outputStrides[i];
+ }
+ return inputIndex + index * m_inputStrides[NumDims - 1];
+ }
+ }
+
+ Dimensions m_dimensions;
+ array<Index, NumDims> m_outputStrides;
+ array<Index, NumDims> m_inputStrides;
+ TensorEvaluator<ArgType, Device> m_impl;
+};
+
+
+// Eval as lvalue
+template<typename Shuffle, typename ArgType, typename Device>
+struct TensorEvaluator<TensorShufflingOp<Shuffle, ArgType>, Device>
+ : public TensorEvaluator<const TensorShufflingOp<Shuffle, ArgType>, Device>
+{
+ typedef TensorEvaluator<const TensorShufflingOp<Shuffle, ArgType>, Device> Base;
+
+ typedef TensorShufflingOp<Shuffle, ArgType> XprType;
+ typedef typename XprType::Index Index;
+ static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
+ typedef DSizes<Index, NumDims> Dimensions;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+
+ enum {
+ IsAligned = false,
+ PacketAccess = (internal::packet_traits<Scalar>::size > 1),
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
+ : Base(op, device)
+ { }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index)
+ {
+ return this->m_impl.coeffRef(this->srcCoeff(index));
+ }
+
+ template <int StoreMode> EIGEN_STRONG_INLINE
+ void writePacket(Index index, const PacketReturnType& x)
+ {
+ EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
+
+ EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
+ internal::pstore<CoeffReturnType, PacketReturnType>(values, x);
+ for (int i = 0; i < PacketSize; ++i) {
+ this->coeffRef(index+i) = values[i];
+ }
+ }
+};
+
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_SHUFFLING_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h b/unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h
new file mode 100644
index 000000000..2854a4a17
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h
@@ -0,0 +1,146 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2013 Christian Seiler <christian@iwakd.de>
+// Copyright (C) 2014-2015 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSORSTORAGE_H
+#define EIGEN_CXX11_TENSOR_TENSORSTORAGE_H
+
+#ifdef EIGEN_TENSOR_STORAGE_CTOR_PLUGIN
+ #define EIGEN_INTERNAL_TENSOR_STORAGE_CTOR_PLUGIN EIGEN_TENSOR_STORAGE_CTOR_PLUGIN;
+#else
+ #define EIGEN_INTERNAL_TENSOR_STORAGE_CTOR_PLUGIN
+#endif
+
+namespace Eigen {
+
+/** \internal
+ *
+ * \class TensorStorage
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Stores the data of a tensor
+ *
+ * This class stores the data of fixed-size, dynamic-size or mixed tensors
+ * in a way as compact as possible.
+ *
+ * \sa Tensor
+ */
+template<typename T, typename Dimensions, int Options_> class TensorStorage;
+
+
+// Pure fixed-size storage
+template<typename T, int Options_, typename FixedDimensions>
+class TensorStorage<T, FixedDimensions, Options_>
+{
+ private:
+ static const std::size_t Size = FixedDimensions::total_size;
+
+ // Allocate an array of size at least one to prevent compiler warnings.
+ static const std::size_t MinSize = max_n_1<Size>::size;
+ EIGEN_ALIGN_MAX T m_data[MinSize];
+
+ FixedDimensions m_dimensions;
+
+ public:
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE TensorStorage() {
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE T *data() { return m_data; }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const T *data() const { return m_data; }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const FixedDimensions& dimensions() const { return m_dimensions; }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE DenseIndex size() const { return m_dimensions.TotalSize(); }
+};
+
+
+// pure dynamic
+template<typename T, int Options_, typename IndexType, int NumIndices_>
+class TensorStorage<T, DSizes<IndexType, NumIndices_>, Options_>
+{
+ public:
+ typedef IndexType Index;
+ typedef DSizes<IndexType, NumIndices_> Dimensions;
+ typedef TensorStorage<T, DSizes<IndexType, NumIndices_>, Options_> Self;
+
+ EIGEN_DEVICE_FUNC TensorStorage() : m_data(0), m_dimensions() {
+ if (NumIndices_ == 0) {
+ m_data = internal::conditional_aligned_new_auto<T,(Options_&DontAlign)==0>(1);
+ }
+ }
+ EIGEN_DEVICE_FUNC TensorStorage(internal::constructor_without_unaligned_array_assert)
+ : m_data(0), m_dimensions(internal::template repeat<NumIndices_, Index>(0)) {}
+ EIGEN_DEVICE_FUNC TensorStorage(Index size, const array<Index, NumIndices_>& dimensions)
+ : m_data(internal::conditional_aligned_new_auto<T,(Options_&DontAlign)==0>(size)), m_dimensions(dimensions)
+ { EIGEN_INTERNAL_TENSOR_STORAGE_CTOR_PLUGIN }
+
+#if EIGEN_HAS_VARIADIC_TEMPLATES
+ template <typename... DenseIndex>
+ EIGEN_DEVICE_FUNC TensorStorage(DenseIndex... indices) : m_dimensions(indices...) {
+ m_data = internal::conditional_aligned_new_auto<T,(Options_&DontAlign)==0>(internal::array_prod(m_dimensions));
+ }
+#endif
+
+ EIGEN_DEVICE_FUNC TensorStorage(const Self& other)
+ : m_data(internal::conditional_aligned_new_auto<T,(Options_&DontAlign)==0>(internal::array_prod(other.m_dimensions)))
+ , m_dimensions(other.m_dimensions)
+ {
+ internal::smart_copy(other.m_data, other.m_data+internal::array_prod(other.m_dimensions), m_data);
+ }
+ EIGEN_DEVICE_FUNC Self& operator=(const Self& other)
+ {
+ if (this != &other) {
+ Self tmp(other);
+ this->swap(tmp);
+ }
+ return *this;
+ }
+
+ EIGEN_DEVICE_FUNC ~TensorStorage() { internal::conditional_aligned_delete_auto<T,(Options_&DontAlign)==0>(m_data, internal::array_prod(m_dimensions)); }
+ EIGEN_DEVICE_FUNC void swap(Self& other)
+ { numext::swap(m_data,other.m_data); numext::swap(m_dimensions,other.m_dimensions); }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const {return m_dimensions;}
+
+ EIGEN_DEVICE_FUNC void resize(Index size, const array<Index, NumIndices_>& nbDimensions)
+ {
+ const Index currentSz = internal::array_prod(m_dimensions);
+ if(size != currentSz)
+ {
+ internal::conditional_aligned_delete_auto<T,(Options_&DontAlign)==0>(m_data, currentSz);
+ if (size)
+ m_data = internal::conditional_aligned_new_auto<T,(Options_&DontAlign)==0>(size);
+ else if (NumIndices_ == 0) {
+ m_data = internal::conditional_aligned_new_auto<T,(Options_&DontAlign)==0>(1);
+ }
+ else
+ m_data = 0;
+ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})
+ }
+ m_dimensions = nbDimensions;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T *data() { return m_data; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const T *data() const { return m_data; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index size() const { return m_dimensions.TotalSize(); }
+
+ private:
+ T *m_data;
+ Dimensions m_dimensions;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSORSTORAGE_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorStriding.h b/unsupported/Eigen/CXX11/src/Tensor/TensorStriding.h
new file mode 100644
index 000000000..6c35bfdb6
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorStriding.h
@@ -0,0 +1,338 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_STRIDING_H
+#define EIGEN_CXX11_TENSOR_TENSOR_STRIDING_H
+
+namespace Eigen {
+
+/** \class TensorStriding
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Tensor striding class.
+ *
+ *
+ */
+namespace internal {
+template<typename Strides, typename XprType>
+struct traits<TensorStridingOp<Strides, XprType> > : public traits<XprType>
+{
+ typedef typename XprType::Scalar Scalar;
+ typedef traits<XprType> XprTraits;
+ typedef typename XprTraits::StorageKind StorageKind;
+ typedef typename XprTraits::Index Index;
+ typedef typename XprType::Nested Nested;
+ typedef typename remove_reference<Nested>::type _Nested;
+ static const int NumDimensions = XprTraits::NumDimensions;
+ static const int Layout = XprTraits::Layout;
+};
+
+template<typename Strides, typename XprType>
+struct eval<TensorStridingOp<Strides, XprType>, Eigen::Dense>
+{
+ typedef const TensorStridingOp<Strides, XprType>& type;
+};
+
+template<typename Strides, typename XprType>
+struct nested<TensorStridingOp<Strides, XprType>, 1, typename eval<TensorStridingOp<Strides, XprType> >::type>
+{
+ typedef TensorStridingOp<Strides, XprType> type;
+};
+
+} // end namespace internal
+
+
+
+template<typename Strides, typename XprType>
+class TensorStridingOp : public TensorBase<TensorStridingOp<Strides, XprType> >
+{
+ public:
+ typedef typename Eigen::internal::traits<TensorStridingOp>::Scalar Scalar;
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename Eigen::internal::nested<TensorStridingOp>::type Nested;
+ typedef typename Eigen::internal::traits<TensorStridingOp>::StorageKind StorageKind;
+ typedef typename Eigen::internal::traits<TensorStridingOp>::Index Index;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorStridingOp(const XprType& expr, const Strides& dims)
+ : m_xpr(expr), m_dims(dims) {}
+
+ EIGEN_DEVICE_FUNC
+ const Strides& strides() const { return m_dims; }
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename XprType::Nested>::type&
+ expression() const { return m_xpr; }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE TensorStridingOp& operator = (const TensorStridingOp& other)
+ {
+ typedef TensorAssignOp<TensorStridingOp, const TensorStridingOp> Assign;
+ Assign assign(*this, other);
+ internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
+ return *this;
+ }
+
+ template<typename OtherDerived>
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE TensorStridingOp& operator = (const OtherDerived& other)
+ {
+ typedef TensorAssignOp<TensorStridingOp, const OtherDerived> Assign;
+ Assign assign(*this, other);
+ internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
+ return *this;
+ }
+
+ protected:
+ typename XprType::Nested m_xpr;
+ const Strides m_dims;
+};
+
+
+// Eval as rvalue
+template<typename Strides, typename ArgType, typename Device>
+struct TensorEvaluator<const TensorStridingOp<Strides, ArgType>, Device>
+{
+ typedef TensorStridingOp<Strides, ArgType> XprType;
+ typedef typename XprType::Index Index;
+ static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
+ typedef DSizes<Index, NumDims> Dimensions;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+
+ enum {
+ IsAligned = /*TensorEvaluator<ArgType, Device>::IsAligned*/false,
+ PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ CoordAccess = false, // to be implemented
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
+ : m_impl(op.expression(), device)
+ {
+ m_dimensions = m_impl.dimensions();
+ for (int i = 0; i < NumDims; ++i) {
+ m_dimensions[i] = ceilf(static_cast<float>(m_dimensions[i]) / op.strides()[i]);
+ }
+
+ const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ m_outputStrides[0] = 1;
+ m_inputStrides[0] = 1;
+ for (int i = 1; i < NumDims; ++i) {
+ m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1];
+ m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1];
+ m_inputStrides[i-1] *= op.strides()[i-1];
+ }
+ m_inputStrides[NumDims-1] *= op.strides()[NumDims-1];
+ } else { // RowMajor
+ m_outputStrides[NumDims-1] = 1;
+ m_inputStrides[NumDims-1] = 1;
+ for (int i = NumDims - 2; i >= 0; --i) {
+ m_outputStrides[i] = m_outputStrides[i+1] * m_dimensions[i+1];
+ m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1];
+ m_inputStrides[i+1] *= op.strides()[i+1];
+ }
+ m_inputStrides[0] *= op.strides()[0];
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) {
+ m_impl.evalSubExprsIfNeeded(NULL);
+ return true;
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
+ m_impl.cleanup();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
+ {
+ return m_impl.coeff(srcCoeff(index));
+ }
+
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
+ {
+ EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
+ eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
+
+ Index inputIndices[] = {0, 0};
+ Index indices[] = {index, index + PacketSize - 1};
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int i = NumDims - 1; i > 0; --i) {
+ const Index idx0 = indices[0] / m_outputStrides[i];
+ const Index idx1 = indices[1] / m_outputStrides[i];
+ inputIndices[0] += idx0 * m_inputStrides[i];
+ inputIndices[1] += idx1 * m_inputStrides[i];
+ indices[0] -= idx0 * m_outputStrides[i];
+ indices[1] -= idx1 * m_outputStrides[i];
+ }
+ inputIndices[0] += indices[0] * m_inputStrides[0];
+ inputIndices[1] += indices[1] * m_inputStrides[0];
+ } else { // RowMajor
+ for (int i = 0; i < NumDims - 1; ++i) {
+ const Index idx0 = indices[0] / m_outputStrides[i];
+ const Index idx1 = indices[1] / m_outputStrides[i];
+ inputIndices[0] += idx0 * m_inputStrides[i];
+ inputIndices[1] += idx1 * m_inputStrides[i];
+ indices[0] -= idx0 * m_outputStrides[i];
+ indices[1] -= idx1 * m_outputStrides[i];
+ }
+ inputIndices[0] += indices[0] * m_inputStrides[NumDims-1];
+ inputIndices[1] += indices[1] * m_inputStrides[NumDims-1];
+ }
+ if (inputIndices[1] - inputIndices[0] == PacketSize - 1) {
+ PacketReturnType rslt = m_impl.template packet<Unaligned>(inputIndices[0]);
+ return rslt;
+ }
+ else {
+ EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
+ values[0] = m_impl.coeff(inputIndices[0]);
+ values[PacketSize-1] = m_impl.coeff(inputIndices[1]);
+ for (int i = 1; i < PacketSize-1; ++i) {
+ values[i] = coeff(index+i);
+ }
+ PacketReturnType rslt = internal::pload<PacketReturnType>(values);
+ return rslt;
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
+ double compute_cost = (NumDims - 1) * (TensorOpCost::AddCost<Index>() +
+ TensorOpCost::MulCost<Index>() +
+ TensorOpCost::DivCost<Index>()) +
+ TensorOpCost::MulCost<Index>();
+ if (vectorized) {
+ compute_cost *= 2; // packet() computes two indices
+ }
+ const int innerDim = (static_cast<int>(Layout) == static_cast<int>(ColMajor)) ? 0 : (NumDims - 1);
+ return m_impl.costPerCoeff(vectorized && m_inputStrides[innerDim] == 1) +
+ // Computation is not vectorized per se, but it is done once per packet.
+ TensorOpCost(0, 0, compute_cost, vectorized, PacketSize);
+ }
+
+ EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
+
+ protected:
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const
+ {
+ Index inputIndex = 0;
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int i = NumDims - 1; i > 0; --i) {
+ const Index idx = index / m_outputStrides[i];
+ inputIndex += idx * m_inputStrides[i];
+ index -= idx * m_outputStrides[i];
+ }
+ inputIndex += index * m_inputStrides[0];
+ } else { // RowMajor
+ for (int i = 0; i < NumDims - 1; ++i) {
+ const Index idx = index / m_outputStrides[i];
+ inputIndex += idx * m_inputStrides[i];
+ index -= idx * m_outputStrides[i];
+ }
+ inputIndex += index * m_inputStrides[NumDims-1];
+ }
+ return inputIndex;
+ }
+
+ Dimensions m_dimensions;
+ array<Index, NumDims> m_outputStrides;
+ array<Index, NumDims> m_inputStrides;
+ TensorEvaluator<ArgType, Device> m_impl;
+};
+
+
+// Eval as lvalue
+template<typename Strides, typename ArgType, typename Device>
+struct TensorEvaluator<TensorStridingOp<Strides, ArgType>, Device>
+ : public TensorEvaluator<const TensorStridingOp<Strides, ArgType>, Device>
+{
+ typedef TensorStridingOp<Strides, ArgType> XprType;
+ typedef TensorEvaluator<const XprType, Device> Base;
+ // typedef typename XprType::Index Index;
+ static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
+ // typedef DSizes<Index, NumDims> Dimensions;
+
+ enum {
+ IsAligned = /*TensorEvaluator<ArgType, Device>::IsAligned*/false,
+ PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ CoordAccess = false, // to be implemented
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
+ : Base(op, device) { }
+
+ typedef typename XprType::Index Index;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)
+ {
+ return this->m_impl.coeffRef(this->srcCoeff(index));
+ }
+
+ template <int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ void writePacket(Index index, const PacketReturnType& x)
+ {
+ EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
+ eigen_assert(index+PacketSize-1 < this->dimensions().TotalSize());
+
+ Index inputIndices[] = {0, 0};
+ Index indices[] = {index, index + PacketSize - 1};
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int i = NumDims - 1; i > 0; --i) {
+ const Index idx0 = indices[0] / this->m_outputStrides[i];
+ const Index idx1 = indices[1] / this->m_outputStrides[i];
+ inputIndices[0] += idx0 * this->m_inputStrides[i];
+ inputIndices[1] += idx1 * this->m_inputStrides[i];
+ indices[0] -= idx0 * this->m_outputStrides[i];
+ indices[1] -= idx1 * this->m_outputStrides[i];
+ }
+ inputIndices[0] += indices[0] * this->m_inputStrides[0];
+ inputIndices[1] += indices[1] * this->m_inputStrides[0];
+ } else { // RowMajor
+ for (int i = 0; i < NumDims - 1; ++i) {
+ const Index idx0 = indices[0] / this->m_outputStrides[i];
+ const Index idx1 = indices[1] / this->m_outputStrides[i];
+ inputIndices[0] += idx0 * this->m_inputStrides[i];
+ inputIndices[1] += idx1 * this->m_inputStrides[i];
+ indices[0] -= idx0 * this->m_outputStrides[i];
+ indices[1] -= idx1 * this->m_outputStrides[i];
+ }
+ inputIndices[0] += indices[0] * this->m_inputStrides[NumDims-1];
+ inputIndices[1] += indices[1] * this->m_inputStrides[NumDims-1];
+ }
+ if (inputIndices[1] - inputIndices[0] == PacketSize - 1) {
+ this->m_impl.template writePacket<Unaligned>(inputIndices[0], x);
+ }
+ else {
+ EIGEN_ALIGN_MAX Scalar values[PacketSize];
+ internal::pstore<Scalar, PacketReturnType>(values, x);
+ this->m_impl.coeffRef(inputIndices[0]) = values[0];
+ this->m_impl.coeffRef(inputIndices[1]) = values[PacketSize-1];
+ for (int i = 1; i < PacketSize-1; ++i) {
+ this->coeffRef(index+i) = values[i];
+ }
+ }
+ }
+};
+
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_STRIDING_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSycl.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSycl.h
new file mode 100644
index 000000000..bb8800d45
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSycl.h
@@ -0,0 +1,82 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: eigen@codeplay.com
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// General include header of SYCL target for Tensor Module
+#ifndef UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_H
+#define UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_H
+
+#ifdef EIGEN_USE_SYCL
+
+// global pointer to set different attribute state for a class
+template <class T>
+struct MakeGlobalPointer {
+ typedef typename cl::sycl::global_ptr<T>::pointer_t Type;
+};
+
+// global pointer to set different attribute state for a class
+template <class T>
+struct MakeLocalPointer {
+ typedef typename cl::sycl::local_ptr<T>::pointer_t Type;
+};
+
+
+namespace Eigen {
+namespace TensorSycl {
+namespace internal {
+
+/// This struct is used for special expression nodes with no operations (for example assign and selectOP).
+ struct NoOP;
+
+template<bool IsConst, typename T> struct GetType{
+ typedef const T Type;
+};
+template<typename T> struct GetType<false, T>{
+ typedef T Type;
+};
+
+}
+}
+}
+
+// tuple construction
+#include "TensorSyclTuple.h"
+
+// counting number of leaf at compile time
+#include "TensorSyclLeafCount.h"
+
+// The index PlaceHolder takes the actual expression and replaces the actual
+// data on it with the place holder. It uses the same pre-order expression tree
+// traverse as the leaf count in order to give the right access number to each
+// node in the expression
+#include "TensorSyclPlaceHolderExpr.h"
+
+// creation of an accessor tuple from a tuple of SYCL buffers
+#include "TensorSyclExtractAccessor.h"
+
+// this is used to change the address space type in tensor map for GPU
+#include "TensorSyclConvertToDeviceExpression.h"
+
+// this is used to extract the functors
+#include "TensorSyclExtractFunctors.h"
+
+// this is used to create tensormap on the device
+// this is used to construct the expression on the device
+#include "TensorSyclExprConstructor.h"
+
+/// this is used for extracting tensor reduction
+#include "TensorReductionSycl.h"
+
+// kernel execution using fusion
+#include "TensorSyclRun.h"
+
+#endif // end of EIGEN_USE_SYCL
+#endif // UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclConvertToDeviceExpression.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclConvertToDeviceExpression.h
new file mode 100644
index 000000000..8729c86ee
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclConvertToDeviceExpression.h
@@ -0,0 +1,121 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/*****************************************************************
+ * TensorSyclConvertToDeviceExpression.h
+ *
+ * \brief:
+ * Conversion from host pointer to device pointer
+ * inside leaf nodes of the expression.
+ *
+*****************************************************************/
+
+#ifndef UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_CONVERT_TO_DEVICE_EXPRESSION_HPP
+#define UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_CONVERT_TO_DEVICE_EXPRESSION_HPP
+
+namespace Eigen {
+namespace TensorSycl {
+namespace internal {
+
+/// \struct ConvertToDeviceExpression
+/// \brief This struct is used to convert the MakePointer in the host expression
+/// to the MakeGlobalPointer for the device expression. For the leafNodes
+/// containing the pointer. This is due to the fact that the address space of
+/// the pointer T* is different on the host and the device.
+template <typename Expr>
+struct ConvertToDeviceExpression;
+
+template<template<class...> class NonOpCategory, bool IsConst, typename... Args>
+struct NonOpConversion{
+ typedef typename GetType<IsConst, NonOpCategory<typename ConvertToDeviceExpression<Args>::Type...> >::Type Type;
+};
+
+
+template<template<class, template <class> class > class NonOpCategory, bool IsConst, typename Args>
+struct DeviceConvertor{
+ typedef typename GetType<IsConst, NonOpCategory<typename ConvertToDeviceExpression<Args>::Type, MakeGlobalPointer> >::Type Type;
+};
+
+/// specialisation of the \ref ConvertToDeviceExpression struct when the node
+/// type is TensorMap
+#define TENSORMAPCONVERT(CVQual)\
+template <typename Scalar_, int Options_, int Options2_, int NumIndices_, typename IndexType_, template <class> class MakePointer_>\
+struct ConvertToDeviceExpression<CVQual TensorMap<Tensor<Scalar_, NumIndices_, Options_, IndexType_>, Options2_, MakePointer_> > {\
+ typedef CVQual TensorMap<Tensor<Scalar_, NumIndices_, Options_, IndexType_>, Options2_, MakeGlobalPointer> Type;\
+};
+
+TENSORMAPCONVERT(const)
+TENSORMAPCONVERT()
+#undef TENSORMAPCONVERT
+
+/// specialisation of the \ref ConvertToDeviceExpression struct when the node
+/// type is TensorCwiseNullaryOp, TensorCwiseUnaryOp, TensorCwiseBinaryOp, TensorCwiseTernaryOp, TensorBroadcastingOp
+#define CATEGORYCONVERT(CVQual)\
+template <template<class, class...> class Category, typename OP, typename... subExprs>\
+struct ConvertToDeviceExpression<CVQual Category<OP, subExprs...> > {\
+ typedef CVQual Category<OP, typename ConvertToDeviceExpression<subExprs>::Type... > Type;\
+};
+CATEGORYCONVERT(const)
+CATEGORYCONVERT()
+#undef CATEGORYCONVERT
+
+
+/// specialisation of the \ref ConvertToDeviceExpression struct when the node
+/// type is TensorCwiseSelectOp
+#define SELECTOPCONVERT(CVQual, Res)\
+template <typename IfExpr, typename ThenExpr, typename ElseExpr>\
+struct ConvertToDeviceExpression<CVQual TensorSelectOp<IfExpr, ThenExpr, ElseExpr> >\
+: NonOpConversion<TensorSelectOp, Res, IfExpr, ThenExpr, ElseExpr> {};
+SELECTOPCONVERT(const, true)
+SELECTOPCONVERT(, false)
+#undef SELECTOPCONVERT
+
+/// specialisation of the \ref ConvertToDeviceExpression struct when the node
+/// type is const AssingOP
+#define ASSIGNCONVERT(CVQual, Res)\
+template <typename LHSExpr, typename RHSExpr>\
+struct ConvertToDeviceExpression<CVQual TensorAssignOp<LHSExpr, RHSExpr> >\
+: NonOpConversion<TensorAssignOp, Res, LHSExpr, RHSExpr>{};
+
+ASSIGNCONVERT(const, true)
+ASSIGNCONVERT(, false)
+#undef ASSIGNCONVERT
+
+/// specialisation of the \ref ConvertToDeviceExpression struct when the node
+/// type is either TensorForcedEvalOp or TensorEvalToOp
+#define KERNELBROKERCONVERT(CVQual, Res, ExprNode)\
+template <typename Expr>\
+struct ConvertToDeviceExpression<CVQual ExprNode<Expr> > \
+: DeviceConvertor<ExprNode, Res, Expr>{};
+
+KERNELBROKERCONVERT(const, true, TensorForcedEvalOp)
+KERNELBROKERCONVERT(, false, TensorForcedEvalOp)
+KERNELBROKERCONVERT(const, true, TensorEvalToOp)
+KERNELBROKERCONVERT(, false, TensorEvalToOp)
+#undef KERNELBROKERCONVERT
+
+/// specialisation of the \ref ConvertToDeviceExpression struct when the node type is TensorReductionOp
+#define KERNELBROKERCONVERTREDUCTION(CVQual)\
+template <typename OP, typename Dim, typename subExpr, template <class> class MakePointer_>\
+struct ConvertToDeviceExpression<CVQual TensorReductionOp<OP, Dim, subExpr, MakePointer_> > {\
+ typedef CVQual TensorReductionOp<OP, Dim, typename ConvertToDeviceExpression<subExpr>::Type, MakeGlobalPointer> Type;\
+};
+
+KERNELBROKERCONVERTREDUCTION(const)
+KERNELBROKERCONVERTREDUCTION()
+#undef KERNELBROKERCONVERTREDUCTION
+
+} // namespace internal
+} // namespace TensorSycl
+} // namespace Eigen
+
+#endif // UNSUPPORTED_EIGEN_CXX1
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h
new file mode 100644
index 000000000..7ed3a3a56
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h
@@ -0,0 +1,239 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/*****************************************************************
+ * TensorSyclExprConstructor.h
+ *
+ * \brief:
+ * This file re-create an expression on the SYCL device in order
+ * to use the original tensor evaluator.
+ *
+*****************************************************************/
+
+#ifndef UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_EXPR_CONSTRUCTOR_HPP
+#define UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_EXPR_CONSTRUCTOR_HPP
+
+namespace Eigen {
+namespace TensorSycl {
+namespace internal {
+/// this class is used by EvalToOp in order to create an lhs expression which is
+/// a pointer from an accessor on device-only buffer
+template <typename PtrType, size_t N, typename... Params>
+struct EvalToLHSConstructor {
+ PtrType expr;
+ EvalToLHSConstructor(const utility::tuple::Tuple<Params...> &t): expr((&(*(utility::tuple::get<N>(t).get_pointer())))) {}
+};
+
+/// \struct ExprConstructor is used to reconstruct the expression on the device and
+/// recreate the expression with MakeGlobalPointer containing the device address
+/// space for the TensorMap pointers used in eval function.
+/// It receives the original expression type, the functor of the node, the tuple
+/// of accessors, and the device expression type to re-instantiate the
+/// expression tree for the device
+template <typename OrigExpr, typename IndexExpr, typename... Params>
+struct ExprConstructor;
+
+/// specialisation of the \ref ExprConstructor struct when the node type is
+/// TensorMap
+#define TENSORMAP(CVQual)\
+template <typename Scalar_, int Options_, int Options2_, int Options3_, int NumIndices_, typename IndexType_,\
+template <class> class MakePointer_, size_t N, typename... Params>\
+struct ExprConstructor< CVQual TensorMap<Tensor<Scalar_, NumIndices_, Options_, IndexType_>, Options2_, MakeGlobalPointer>,\
+CVQual PlaceHolder<CVQual TensorMap<Tensor<Scalar_, NumIndices_, Options_, IndexType_>, Options3_, MakePointer_>, N>, Params...>{\
+ typedef CVQual TensorMap<Tensor<Scalar_, NumIndices_, Options_, IndexType_>, Options2_, MakeGlobalPointer> Type;\
+ Type expr;\
+ template <typename FuncDetector>\
+ ExprConstructor(FuncDetector &fd, const utility::tuple::Tuple<Params...> &t)\
+ : expr(Type((&(*(utility::tuple::get<N>(t).get_pointer()))), fd.dimensions())) {}\
+};
+
+TENSORMAP(const)
+TENSORMAP()
+#undef TENSORMAP
+
+#define UNARYCATEGORY(CVQual)\
+template <template<class, class> class UnaryCategory, typename OP, typename OrigRHSExpr, typename RHSExpr, typename... Params>\
+struct ExprConstructor<CVQual UnaryCategory<OP, OrigRHSExpr>, CVQual UnaryCategory<OP, RHSExpr>, Params...> {\
+ typedef ExprConstructor<OrigRHSExpr, RHSExpr, Params...> my_type;\
+ my_type rhsExpr;\
+ typedef CVQual UnaryCategory<OP, typename my_type::Type> Type;\
+ Type expr;\
+ template <typename FuncDetector>\
+ ExprConstructor(FuncDetector &funcD, const utility::tuple::Tuple<Params...> &t)\
+ : rhsExpr(funcD.rhsExpr, t), expr(rhsExpr.expr, funcD.func) {}\
+};
+
+UNARYCATEGORY(const)
+UNARYCATEGORY()
+#undef UNARYCATEGORY
+
+/// specialisation of the \ref ExprConstructor struct when the node type is
+/// TensorBinaryOp
+#define BINARYCATEGORY(CVQual)\
+template <template<class, class, class> class BinaryCategory, typename OP, typename OrigLHSExpr, typename OrigRHSExpr, typename LHSExpr,\
+typename RHSExpr, typename... Params>\
+struct ExprConstructor<CVQual BinaryCategory<OP, OrigLHSExpr, OrigRHSExpr>, CVQual BinaryCategory<OP, LHSExpr, RHSExpr>, Params...> {\
+ typedef ExprConstructor<OrigLHSExpr, LHSExpr, Params...> my_left_type;\
+ typedef ExprConstructor<OrigRHSExpr, RHSExpr, Params...> my_right_type;\
+ typedef CVQual BinaryCategory<OP, typename my_left_type::Type, typename my_right_type::Type> Type;\
+ my_left_type lhsExpr;\
+ my_right_type rhsExpr;\
+ Type expr;\
+ template <typename FuncDetector>\
+ ExprConstructor(FuncDetector &funcD, const utility::tuple::Tuple<Params...> &t)\
+ : lhsExpr(funcD.lhsExpr, t),rhsExpr(funcD.rhsExpr, t), expr(lhsExpr.expr, rhsExpr.expr, funcD.func) {}\
+};
+
+BINARYCATEGORY(const)
+BINARYCATEGORY()
+#undef BINARYCATEGORY
+
+/// specialisation of the \ref ExprConstructor struct when the node type is
+/// TensorCwiseTernaryOp
+#define TERNARYCATEGORY(CVQual)\
+template <template <class, class, class, class> class TernaryCategory, typename OP, typename OrigArg1Expr, typename OrigArg2Expr,typename OrigArg3Expr,\
+typename Arg1Expr, typename Arg2Expr, typename Arg3Expr, typename... Params>\
+struct ExprConstructor<CVQual TernaryCategory<OP, OrigArg1Expr, OrigArg2Expr, OrigArg3Expr>, CVQual TernaryCategory<OP, Arg1Expr, Arg2Expr, Arg3Expr>, Params...> {\
+ typedef ExprConstructor<OrigArg1Expr, Arg1Expr, Params...> my_arg1_type;\
+ typedef ExprConstructor<OrigArg2Expr, Arg2Expr, Params...> my_arg2_type;\
+ typedef ExprConstructor<OrigArg3Expr, Arg3Expr, Params...> my_arg3_type;\
+ typedef CVQual TernaryCategory<OP, typename my_arg1_type::Type, typename my_arg2_type::Type, typename my_arg3_type::Type> Type;\
+ my_arg1_type arg1Expr;\
+ my_arg2_type arg2Expr;\
+ my_arg3_type arg3Expr;\
+ Type expr;\
+ template <typename FuncDetector>\
+ ExprConstructor(FuncDetector &funcD,const utility::tuple::Tuple<Params...> &t)\
+ : arg1Expr(funcD.arg1Expr, t), arg2Expr(funcD.arg2Expr, t), arg3Expr(funcD.arg3Expr, t), expr(arg1Expr.expr, arg2Expr.expr, arg3Expr.expr, funcD.func) {}\
+};
+
+TERNARYCATEGORY(const)
+TERNARYCATEGORY()
+#undef TERNARYCATEGORY
+
+/// specialisation of the \ref ExprConstructor struct when the node type is
+/// TensorCwiseSelectOp
+#define SELECTOP(CVQual)\
+template <typename OrigIfExpr, typename OrigThenExpr, typename OrigElseExpr, typename IfExpr, typename ThenExpr, typename ElseExpr, typename... Params>\
+struct ExprConstructor< CVQual TensorSelectOp<OrigIfExpr, OrigThenExpr, OrigElseExpr>, CVQual TensorSelectOp<IfExpr, ThenExpr, ElseExpr>, Params...> {\
+ typedef ExprConstructor<OrigIfExpr, IfExpr, Params...> my_if_type;\
+ typedef ExprConstructor<OrigThenExpr, ThenExpr, Params...> my_then_type;\
+ typedef ExprConstructor<OrigElseExpr, ElseExpr, Params...> my_else_type;\
+ typedef CVQual TensorSelectOp<typename my_if_type::Type, typename my_then_type::Type, typename my_else_type::Type> Type;\
+ my_if_type ifExpr;\
+ my_then_type thenExpr;\
+ my_else_type elseExpr;\
+ Type expr;\
+ template <typename FuncDetector>\
+ ExprConstructor(FuncDetector &funcD, const utility::tuple::Tuple<Params...> &t)\
+ : ifExpr(funcD.ifExpr, t), thenExpr(funcD.thenExpr, t), elseExpr(funcD.elseExpr, t), expr(ifExpr.expr, thenExpr.expr, elseExpr.expr) {}\
+};
+
+SELECTOP(const)
+SELECTOP()
+#undef SELECTOP
+
+/// specialisation of the \ref ExprConstructor struct when the node type is
+/// const TensorAssignOp
+#define ASSIGN(CVQual)\
+template <typename OrigLHSExpr, typename OrigRHSExpr, typename LHSExpr, typename RHSExpr, typename... Params>\
+struct ExprConstructor<CVQual TensorAssignOp<OrigLHSExpr, OrigRHSExpr>, CVQual TensorAssignOp<LHSExpr, RHSExpr>, Params...> {\
+ typedef ExprConstructor<OrigLHSExpr, LHSExpr, Params...> my_left_type;\
+ typedef ExprConstructor<OrigRHSExpr, RHSExpr, Params...> my_right_type;\
+ typedef CVQual TensorAssignOp<typename my_left_type::Type, typename my_right_type::Type> Type;\
+ my_left_type lhsExpr;\
+ my_right_type rhsExpr;\
+ Type expr;\
+ template <typename FuncDetector>\
+ ExprConstructor(FuncDetector &funcD, const utility::tuple::Tuple<Params...> &t)\
+ : lhsExpr(funcD.lhsExpr, t), rhsExpr(funcD.rhsExpr, t), expr(lhsExpr.expr, rhsExpr.expr) {}\
+ };
+
+ ASSIGN(const)
+ ASSIGN()
+ #undef ASSIGN
+/// specialisation of the \ref ExprConstructor struct when the node type is
+/// TensorEvalToOp
+#define EVALTO(CVQual)\
+template <typename OrigExpr, typename Expr, typename... Params>\
+struct ExprConstructor<CVQual TensorEvalToOp<OrigExpr, MakeGlobalPointer>, CVQual TensorEvalToOp<Expr>, Params...> {\
+ typedef ExprConstructor<OrigExpr, Expr, Params...> my_expr_type;\
+ typedef typename TensorEvalToOp<OrigExpr, MakeGlobalPointer>::PointerType my_buffer_type;\
+ typedef CVQual TensorEvalToOp<typename my_expr_type::Type, MakeGlobalPointer> Type;\
+ my_expr_type nestedExpression;\
+ EvalToLHSConstructor<my_buffer_type, 0, Params...> buffer;\
+ Type expr;\
+ template <typename FuncDetector>\
+ ExprConstructor(FuncDetector &funcD, const utility::tuple::Tuple<Params...> &t)\
+ : nestedExpression(funcD.rhsExpr, t), buffer(t), expr(buffer.expr, nestedExpression.expr) {}\
+};
+
+EVALTO(const)
+EVALTO()
+#undef EVALTO
+
+/// specialisation of the \ref ExprConstructor struct when the node type is
+/// TensorForcedEvalOp
+#define FORCEDEVAL(CVQual)\
+template <typename OrigExpr, typename DevExpr, size_t N, typename... Params>\
+struct ExprConstructor<CVQual TensorForcedEvalOp<OrigExpr, MakeGlobalPointer>,\
+CVQual PlaceHolder<CVQual TensorForcedEvalOp<DevExpr>, N>, Params...> {\
+ typedef CVQual TensorMap<Tensor<typename TensorForcedEvalOp<DevExpr, MakeGlobalPointer>::Scalar,\
+ TensorForcedEvalOp<DevExpr, MakeGlobalPointer>::NumDimensions, 0, typename TensorForcedEvalOp<DevExpr>::Index>, 0, MakeGlobalPointer> Type;\
+ Type expr;\
+ template <typename FuncDetector>\
+ ExprConstructor(FuncDetector &fd, const utility::tuple::Tuple<Params...> &t)\
+ : expr(Type((&(*(utility::tuple::get<N>(t).get_pointer()))), fd.dimensions())) {}\
+};
+
+FORCEDEVAL(const)
+FORCEDEVAL()
+#undef FORCEDEVAL
+
+template <bool Conds, size_t X , size_t Y > struct ValueCondition {
+ static const size_t Res =X;
+};
+template<size_t X, size_t Y> struct ValueCondition<false, X , Y> {
+ static const size_t Res =Y;
+};
+
+/// specialisation of the \ref ExprConstructor struct when the node type is TensorReductionOp
+#define SYCLREDUCTIONEXPR(CVQual)\
+template <typename OP, typename Dim, typename OrigExpr, typename DevExpr, size_t N, typename... Params>\
+struct ExprConstructor<CVQual TensorReductionOp<OP, Dim, OrigExpr, MakeGlobalPointer>,\
+CVQual PlaceHolder<CVQual TensorReductionOp<OP, Dim, DevExpr>, N>, Params...> {\
+ static const size_t NumIndices= ValueCondition< TensorReductionOp<OP, Dim, DevExpr, MakeGlobalPointer>::NumDimensions==0, 1, TensorReductionOp<OP, Dim, DevExpr, MakeGlobalPointer>::NumDimensions >::Res;\
+ typedef CVQual TensorMap<Tensor<typename TensorReductionOp<OP, Dim, DevExpr, MakeGlobalPointer>::Scalar,\
+ NumIndices, 0, typename TensorReductionOp<OP, Dim, DevExpr>::Index>, 0, MakeGlobalPointer> Type;\
+ Type expr;\
+ template <typename FuncDetector>\
+ ExprConstructor(FuncDetector &fd, const utility::tuple::Tuple<Params...> &t)\
+ : expr(Type((&(*(utility::tuple::get<N>(t).get_pointer()))), fd.dimensions())) {}\
+};
+
+SYCLREDUCTIONEXPR(const)
+SYCLREDUCTIONEXPR()
+#undef SYCLREDUCTIONEXPR
+
+/// template deduction for \ref ExprConstructor struct
+template <typename OrigExpr, typename IndexExpr, typename FuncD, typename... Params>
+auto createDeviceExpression(FuncD &funcD, const utility::tuple::Tuple<Params...> &t)
+ -> decltype(ExprConstructor<OrigExpr, IndexExpr, Params...>(funcD, t)) {
+ return ExprConstructor<OrigExpr, IndexExpr, Params...>(funcD, t);
+}
+
+} /// namespace TensorSycl
+} /// namespace internal
+} /// namespace Eigen
+
+
+#endif // UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_EXPR_CONSTRUCTOR_HPP
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h
new file mode 100644
index 000000000..b1da6858e
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h
@@ -0,0 +1,204 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/*****************************************************************
+ * TensorSyclExtractAccessor.h
+ *
+ * \brief:
+ * ExtractAccessor takes Expression placeHolder expression and the tuple of sycl
+ * buffers as an input. Using pre-order tree traversal, ExtractAccessor
+ * recursively calls itself for its children in the expression tree. The
+ * leaf node in the PlaceHolder expression is nothing but a container preserving
+ * the order of the actual data in the tuple of sycl buffer. By invoking the
+ * extract accessor for the PlaceHolder<N>, an accessor is created for the Nth
+ * buffer in the tuple of buffers. This accessor is then added as an Nth
+ * element in the tuple of accessors. In this case we preserve the order of data
+ * in the expression tree.
+ *
+ * This is the specialisation of extract accessor method for different operation
+ * type in the PlaceHolder expression.
+ *
+*****************************************************************/
+
+#ifndef UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_EXTRACT_ACCESSOR_HPP
+#define UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_EXTRACT_ACCESSOR_HPP
+
+namespace Eigen {
+namespace TensorSycl {
+namespace internal {
+/// \struct ExtractAccessor: Extract Accessor Class is used to extract the
+/// accessor from a buffer.
+/// Depending on the type of the leaf node we can get a read accessor or a
+/// read_write accessor
+template <typename Evaluator>
+struct ExtractAccessor;
+
+struct AccessorConstructor{
+ template<typename Arg> static inline auto getTuple(cl::sycl::handler& cgh, Arg eval)
+ -> decltype(ExtractAccessor<Arg>::getTuple(cgh, eval)) {
+ return ExtractAccessor<Arg>::getTuple(cgh, eval);
+ }
+
+ template<typename Arg1, typename Arg2> static inline auto getTuple(cl::sycl::handler& cgh, Arg1 eval1, Arg2 eval2)
+ -> decltype(utility::tuple::append(ExtractAccessor<Arg1>::getTuple(cgh, eval1), ExtractAccessor<Arg2>::getTuple(cgh, eval2))) {
+ return utility::tuple::append(ExtractAccessor<Arg1>::getTuple(cgh, eval1), ExtractAccessor<Arg2>::getTuple(cgh, eval2));
+ }
+ template<typename Arg1, typename Arg2, typename Arg3> static inline auto getTuple(cl::sycl::handler& cgh, Arg1 eval1 , Arg2 eval2 , Arg3 eval3)
+ -> decltype(utility::tuple::append(ExtractAccessor<Arg1>::getTuple(cgh, eval1),utility::tuple::append(ExtractAccessor<Arg2>::getTuple(cgh, eval2), ExtractAccessor<Arg3>::getTuple(cgh, eval3)))) {
+ return utility::tuple::append(ExtractAccessor<Arg1>::getTuple(cgh, eval1),utility::tuple::append(ExtractAccessor<Arg2>::getTuple(cgh, eval2), ExtractAccessor<Arg3>::getTuple(cgh, eval3)));
+ }
+ template< cl::sycl::access::mode AcM, typename Arg> static inline auto getAccessor(cl::sycl::handler& cgh, Arg eval)
+ -> decltype(utility::tuple::make_tuple( eval.device().template get_sycl_accessor<AcM,
+ typename Eigen::internal::remove_all<typename Arg::CoeffReturnType>::type>(eval.dimensions().TotalSize(), cgh,eval.data()))){
+ return utility::tuple::make_tuple(eval.device().template get_sycl_accessor<AcM, typename Eigen::internal::remove_all<typename Arg::CoeffReturnType>::type>(eval.dimensions().TotalSize(), cgh,eval.data()));
+ }
+};
+
+/// specialisation of the \ref ExtractAccessor struct when the node type is
+/// const TensorCwiseNullaryOp, const TensorCwiseUnaryOp and const TensorBroadcastingOp
+template <template<class, class> class UnaryCategory, typename OP, typename RHSExpr, typename Dev>
+struct ExtractAccessor<TensorEvaluator<const UnaryCategory<OP, RHSExpr>, Dev> > {
+ static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator<const UnaryCategory<OP, RHSExpr>, Dev> eval)
+ -> decltype(AccessorConstructor::getTuple(cgh, eval.impl())){
+ return AccessorConstructor::getTuple(cgh, eval.impl());
+ }
+};
+
+/// specialisation of the \ref ExtractAccessor struct when the node type is TensorCwiseNullaryOp, TensorCwiseUnaryOp and TensorBroadcastingOp
+template <template<class, class> class UnaryCategory, typename OP, typename RHSExpr, typename Dev>
+struct ExtractAccessor<TensorEvaluator<UnaryCategory<OP, RHSExpr>, Dev> >
+: ExtractAccessor<TensorEvaluator<const UnaryCategory<OP, RHSExpr>, Dev> > {};
+
+/// specialisation of the \ref ExtractAccessor struct when the node type is const TensorCwiseBinaryOp
+template <template<class, class, class> class BinaryCategory, typename OP, typename LHSExpr, typename RHSExpr, typename Dev>
+struct ExtractAccessor<TensorEvaluator<const BinaryCategory<OP, LHSExpr, RHSExpr>, Dev> > {
+ static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator<const BinaryCategory<OP, LHSExpr, RHSExpr>, Dev> eval)
+ -> decltype(AccessorConstructor::getTuple(cgh, eval.left_impl(), eval.right_impl())){
+ return AccessorConstructor::getTuple(cgh, eval.left_impl(), eval.right_impl());
+ }
+};
+/// specialisation of the \ref ExtractAccessor struct when the node type is TensorCwiseBinaryOp
+template <template<class, class, class> class BinaryCategory, typename OP, typename LHSExpr, typename RHSExpr, typename Dev>
+struct ExtractAccessor<TensorEvaluator<BinaryCategory<OP, LHSExpr, RHSExpr>, Dev> >
+: ExtractAccessor<TensorEvaluator<const BinaryCategory<OP, LHSExpr, RHSExpr>, Dev> >{};
+
+/// specialisation of the \ref ExtractAccessor struct when the node type is
+/// const TensorCwiseTernaryOp
+template <template<class, class, class, class> class TernaryCategory, typename OP, typename Arg1Expr, typename Arg2Expr, typename Arg3Expr, typename Dev>
+struct ExtractAccessor<TensorEvaluator<const TernaryCategory<OP, Arg1Expr, Arg2Expr, Arg3Expr>, Dev> > {
+ static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator<const TernaryCategory<OP, Arg1Expr, Arg2Expr, Arg3Expr>, Dev> eval)
+ -> decltype(AccessorConstructor::getTuple(cgh, eval.arg1Impl(), eval.arg2Impl(), eval.arg3Impl())){
+ return AccessorConstructor::getTuple(cgh, eval.arg1Impl(), eval.arg2Impl(), eval.arg3Impl());
+ }
+};
+
+/// specialisation of the \ref ExtractAccessor struct when the node type is TensorCwiseTernaryOp
+template <template<class, class, class, class> class TernaryCategory, typename OP, typename Arg1Expr, typename Arg2Expr, typename Arg3Expr, typename Dev>
+struct ExtractAccessor<TensorEvaluator<TernaryCategory<OP, Arg1Expr, Arg2Expr, Arg3Expr>, Dev> >
+: ExtractAccessor<TensorEvaluator<const TernaryCategory<OP, Arg1Expr, Arg2Expr, Arg3Expr>, Dev> >{};
+
+/// specialisation of the \ref ExtractAccessor struct when the node type is
+/// const TensorCwiseSelectOp. This is a special case where there is no OP
+template <typename IfExpr, typename ThenExpr, typename ElseExpr, typename Dev>
+struct ExtractAccessor<TensorEvaluator<const TensorSelectOp<IfExpr, ThenExpr, ElseExpr>, Dev> > {
+ static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator<const TensorSelectOp<IfExpr, ThenExpr, ElseExpr>, Dev> eval)
+ -> decltype(AccessorConstructor::getTuple(cgh, eval.cond_impl(), eval.then_impl(), eval.else_impl())){
+ return AccessorConstructor::getTuple(cgh, eval.cond_impl(), eval.then_impl(), eval.else_impl());
+ }
+};
+
+/// specialisation of the \ref ExtractAccessor struct when the node type is
+/// TensorCwiseSelectOp. This is a special case where there is no OP
+template <typename IfExpr, typename ThenExpr, typename ElseExpr, typename Dev>
+struct ExtractAccessor<TensorEvaluator<TensorSelectOp<IfExpr, ThenExpr, ElseExpr>, Dev> >
+: ExtractAccessor<TensorEvaluator<const TensorSelectOp<IfExpr, ThenExpr, ElseExpr>, Dev> >{};
+
+/// specialisation of the \ref ExtractAccessor struct when the node type is const TensorAssignOp
+template <typename LHSExpr, typename RHSExpr, typename Dev>
+struct ExtractAccessor<TensorEvaluator<const TensorAssignOp<LHSExpr, RHSExpr>, Dev> > {
+ static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator<const TensorAssignOp<LHSExpr, RHSExpr>, Dev> eval)
+ -> decltype(AccessorConstructor::getTuple(cgh, eval.left_impl(), eval.right_impl())){
+ return AccessorConstructor::getTuple(cgh, eval.left_impl(), eval.right_impl());
+ }
+};
+
+/// specialisation of the \ref ExtractAccessor struct when the node type is TensorAssignOp
+template <typename LHSExpr, typename RHSExpr, typename Dev>
+struct ExtractAccessor<TensorEvaluator<TensorAssignOp<LHSExpr, RHSExpr>, Dev> >
+: ExtractAccessor<TensorEvaluator<const TensorAssignOp<LHSExpr, RHSExpr>, Dev> >{};
+
+/// specialisation of the \ref ExtractAccessor struct when the node type is const TensorMap
+#define TENSORMAPEXPR(CVQual, ACCType)\
+template <typename PlainObjectType, int Options_, typename Dev>\
+struct ExtractAccessor<TensorEvaluator<CVQual TensorMap<PlainObjectType, Options_>, Dev> > {\
+ static inline auto getTuple(cl::sycl::handler& cgh,const TensorEvaluator<CVQual TensorMap<PlainObjectType, Options_>, Dev> eval)\
+ -> decltype(AccessorConstructor::template getAccessor<ACCType>(cgh, eval)){\
+ return AccessorConstructor::template getAccessor<ACCType>(cgh, eval);\
+ }\
+};
+TENSORMAPEXPR(const, cl::sycl::access::mode::read)
+TENSORMAPEXPR(, cl::sycl::access::mode::read_write)
+#undef TENSORMAPEXPR
+
+/// specialisation of the \ref ExtractAccessor struct when the node type is const TensorForcedEvalOp
+template <typename Expr, typename Dev>
+struct ExtractAccessor<TensorEvaluator<const TensorForcedEvalOp<Expr>, Dev> > {
+ static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator<const TensorForcedEvalOp<Expr>, Dev> eval)
+ -> decltype(AccessorConstructor::template getAccessor<cl::sycl::access::mode::read>(cgh, eval)){
+ return AccessorConstructor::template getAccessor<cl::sycl::access::mode::read>(cgh, eval);
+ }
+};
+
+/// specialisation of the \ref ExtractAccessor struct when the node type is TensorForcedEvalOp
+template <typename Expr, typename Dev>
+struct ExtractAccessor<TensorEvaluator<TensorForcedEvalOp<Expr>, Dev> >
+: ExtractAccessor<TensorEvaluator<const TensorForcedEvalOp<Expr>, Dev> >{};
+
+/// specialisation of the \ref ExtractAccessor struct when the node type is const TensorEvalToOp
+template <typename Expr, typename Dev>
+struct ExtractAccessor<TensorEvaluator<const TensorEvalToOp<Expr>, Dev> > {
+ static inline auto getTuple(cl::sycl::handler& cgh,const TensorEvaluator<const TensorEvalToOp<Expr>, Dev> eval)
+ -> decltype(utility::tuple::append(AccessorConstructor::template getAccessor<cl::sycl::access::mode::write>(cgh, eval), AccessorConstructor::getTuple(cgh, eval.impl()))){
+ return utility::tuple::append(AccessorConstructor::template getAccessor<cl::sycl::access::mode::write>(cgh, eval), AccessorConstructor::getTuple(cgh, eval.impl()));
+ }
+};
+
+/// specialisation of the \ref ExtractAccessor struct when the node type is TensorEvalToOp
+template <typename Expr, typename Dev>
+struct ExtractAccessor<TensorEvaluator<TensorEvalToOp<Expr>, Dev> >
+: ExtractAccessor<TensorEvaluator<const TensorEvalToOp<Expr>, Dev> >{};
+
+/// specialisation of the \ref ExtractAccessor struct when the node type is const TensorReductionOp
+template <typename OP, typename Dim, typename Expr, typename Dev>
+struct ExtractAccessor<TensorEvaluator<const TensorReductionOp<OP, Dim, Expr>, Dev> > {
+ static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator<const TensorReductionOp<OP, Dim, Expr>, Dev> eval)
+ -> decltype(AccessorConstructor::template getAccessor<cl::sycl::access::mode::read>(cgh, eval)){
+ return AccessorConstructor::template getAccessor<cl::sycl::access::mode::read>(cgh, eval);
+ }
+};
+
+/// specialisation of the \ref ExtractAccessor struct when the node type is TensorReductionOp
+template <typename OP, typename Dim, typename Expr, typename Dev>
+struct ExtractAccessor<TensorEvaluator<TensorReductionOp<OP, Dim, Expr>, Dev> >
+: ExtractAccessor<TensorEvaluator<const TensorReductionOp<OP, Dim, Expr>, Dev> >{};
+
+/// template deduction for \ref ExtractAccessor
+template <typename Evaluator>
+auto createTupleOfAccessors(cl::sycl::handler& cgh, const Evaluator& expr)
+-> decltype(ExtractAccessor<Evaluator>::getTuple(cgh, expr)) {
+ return ExtractAccessor<Evaluator>::getTuple(cgh, expr);
+}
+
+} /// namespace TensorSycl
+} /// namespace internal
+} /// namespace Eigen
+#endif // UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_EXTRACT_ACCESSOR_HPP
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h
new file mode 100644
index 000000000..427125343
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h
@@ -0,0 +1,177 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/*****************************************************************
+ * TensorSyclextractFunctors.h
+ *
+ * \brief:
+ * Used to extract all the functors allocated to each node of the expression
+*tree.
+ *
+*****************************************************************/
+
+#ifndef UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_EXTRACT_FUNCTORS_HPP
+#define UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_EXTRACT_FUNCTORS_HPP
+
+namespace Eigen {
+namespace TensorSycl {
+namespace internal {
+/// \struct FunctorExtractor: This struct is used to extract the functors
+/// constructed on
+/// the host-side, to pack them and reuse them in reconstruction of the
+/// expression on the device.
+/// We have to do that as in Eigen the functors are not stateless so we cannot
+/// re-instantiate them on the device.
+/// We have to pass instantiated functors to the device.
+// This struct is used for leafNode (TensorMap) and nodes behaving like leafNode (TensorForcedEval).
+template <typename Evaluator> struct FunctorExtractor{
+ typedef typename Evaluator::Dimensions Dimensions;
+ const Dimensions m_dimensions;
+ const Dimensions& dimensions() const { return m_dimensions; }
+ FunctorExtractor(const Evaluator& expr)
+ : m_dimensions(expr.dimensions()) {}
+
+};
+
+/// specialisation of the \ref FunctorExtractor struct when the node type is
+/// const TensorCwiseNullaryOp, const TensorCwiseUnaryOp, and const TensorBroadcastingOp
+template <template <class, class> class UnaryCategory, typename OP, typename RHSExpr, typename Dev>
+struct FunctorExtractor<TensorEvaluator<const UnaryCategory<OP, RHSExpr>, Dev> > {
+ FunctorExtractor<TensorEvaluator<RHSExpr, Dev> > rhsExpr;
+ OP func;
+ FunctorExtractor(const TensorEvaluator<const UnaryCategory<OP, RHSExpr>, Dev>& expr)
+ : rhsExpr(expr.impl()), func(expr.functor()) {}
+};
+/// specialisation of the \ref FunctorExtractor struct when the node type is
+/// TensorCwiseNullaryOp, TensorCwiseUnaryOp, and TensorBroadcastingOp
+template <template <class, class> class UnaryCategory, typename OP, typename RHSExpr, typename Dev>
+struct FunctorExtractor<TensorEvaluator<UnaryCategory<OP, RHSExpr>, Dev> >
+: FunctorExtractor<TensorEvaluator<const UnaryCategory<OP, RHSExpr>, Dev> >{};
+
+/// specialisation of the \ref FunctorExtractor struct when the node type is
+/// const TensorCwiseBinaryOp
+template <template<class, class, class> class BinaryCategory, typename OP, typename LHSExpr, typename RHSExpr, typename Dev>
+struct FunctorExtractor<TensorEvaluator<const BinaryCategory<OP, LHSExpr, RHSExpr>, Dev> > {
+ FunctorExtractor<TensorEvaluator<LHSExpr, Dev> > lhsExpr;
+ FunctorExtractor<TensorEvaluator<RHSExpr, Dev> > rhsExpr;
+ OP func;
+ FunctorExtractor(const TensorEvaluator<const BinaryCategory<OP, LHSExpr, RHSExpr>, Dev>& expr)
+ : lhsExpr(expr.left_impl()),rhsExpr(expr.right_impl()),func(expr.functor()) {}
+};
+
+/// specialisation of the \ref FunctorExtractor struct when the node type is
+/// const TensorCwiseBinaryOp
+template <template <class, class, class> class BinaryCategory, typename OP, typename LHSExpr, typename RHSExpr, typename Dev>
+struct FunctorExtractor<TensorEvaluator<BinaryCategory<OP, LHSExpr, RHSExpr>, Dev> >
+: FunctorExtractor<TensorEvaluator<const BinaryCategory<OP, LHSExpr, RHSExpr>, Dev> >{};
+
+/// specialisation of the \ref FunctorExtractor struct when the node type is
+/// const TensorCwiseTernaryOp
+template <template <class, class, class, class> class TernaryCategory, typename OP, typename Arg1Expr, typename Arg2Expr, typename Arg3Expr,typename Dev>
+struct FunctorExtractor<TensorEvaluator<const TernaryCategory<OP, Arg1Expr, Arg2Expr, Arg3Expr>, Dev> > {
+ FunctorExtractor<TensorEvaluator<Arg1Expr, Dev> > arg1Expr;
+ FunctorExtractor<TensorEvaluator<Arg2Expr, Dev> > arg2Expr;
+ FunctorExtractor<TensorEvaluator<Arg3Expr, Dev> > arg3Expr;
+ OP func;
+ FunctorExtractor(const TensorEvaluator<const TernaryCategory<OP, Arg1Expr, Arg2Expr, Arg3Expr>, Dev>& expr)
+ : arg1Expr(expr.arg1Impl()), arg2Expr(expr.arg2Impl()), arg3Expr(expr.arg3Impl()), func(expr.functor()) {}
+};
+
+/// specialisation of the \ref FunctorExtractor struct when the node type is
+/// TensorCwiseTernaryOp
+template <template <class, class, class, class> class TernaryCategory, typename OP, typename Arg1Expr, typename Arg2Expr, typename Arg3Expr, typename Dev>
+struct FunctorExtractor<TensorEvaluator< TernaryCategory<OP, Arg1Expr, Arg2Expr, Arg3Expr>, Dev> >
+:FunctorExtractor<TensorEvaluator<const TernaryCategory<OP, Arg1Expr, Arg2Expr, Arg3Expr>, Dev> >{};
+
+/// specialisation of the \ref FunctorExtractor struct when the node type is
+/// const TensorCwiseSelectOp. This is an specialisation without OP so it has to be separated.
+template <typename IfExpr, typename ThenExpr, typename ElseExpr, typename Dev>
+struct FunctorExtractor< TensorEvaluator<const TensorSelectOp<IfExpr, ThenExpr, ElseExpr>, Dev> > {
+ FunctorExtractor<TensorEvaluator<IfExpr, Dev> > ifExpr;
+ FunctorExtractor<TensorEvaluator<ThenExpr, Dev> > thenExpr;
+ FunctorExtractor<TensorEvaluator<ElseExpr, Dev> > elseExpr;
+ FunctorExtractor(const TensorEvaluator<const TensorSelectOp<IfExpr, ThenExpr, ElseExpr>, Dev>& expr)
+ : ifExpr(expr.cond_impl()), thenExpr(expr.then_impl()), elseExpr(expr.else_impl()) {}
+};
+
+/// specialisation of the \ref FunctorExtractor struct when the node type is
+/// TensorCwiseSelectOp. This is an specialisation without OP so it has to be separated
+template <typename IfExpr, typename ThenExpr, typename ElseExpr, typename Dev>
+struct FunctorExtractor<TensorEvaluator<TensorSelectOp<IfExpr, ThenExpr, ElseExpr>, Dev> >
+:FunctorExtractor< TensorEvaluator<const TensorSelectOp<IfExpr, ThenExpr, ElseExpr>, Dev> > {};
+
+/// specialisation of the \ref FunctorExtractor struct when the node type is
+/// const TensorAssignOp. This is an specialisation without OP so it has to be separated.
+template <typename LHSExpr, typename RHSExpr, typename Dev>
+struct FunctorExtractor<TensorEvaluator<const TensorAssignOp<LHSExpr, RHSExpr>, Dev> > {
+ FunctorExtractor<TensorEvaluator<LHSExpr, Dev> > lhsExpr;
+ FunctorExtractor<TensorEvaluator<RHSExpr, Dev> > rhsExpr;
+ FunctorExtractor(const TensorEvaluator<const TensorAssignOp<LHSExpr, RHSExpr>, Dev>& expr)
+ : lhsExpr(expr.left_impl()), rhsExpr(expr.right_impl()) {}
+};
+
+/// specialisation of the \ref FunctorExtractor struct when the node type is
+/// TensorAssignOp. This is an specialisation without OP so it has to be separated.
+template <typename LHSExpr, typename RHSExpr, typename Dev>
+struct FunctorExtractor<TensorEvaluator<TensorAssignOp<LHSExpr, RHSExpr>, Dev> >
+:FunctorExtractor<TensorEvaluator<const TensorAssignOp<LHSExpr, RHSExpr>, Dev> >{};
+
+
+/// specialisation of the \ref FunctorExtractor struct when the node type is
+/// const TensorEvalToOp, This is an specialisation without OP so it has to be separated.
+template <typename RHSExpr, typename Dev>
+struct FunctorExtractor<TensorEvaluator<const TensorEvalToOp<RHSExpr>, Dev> > {
+ FunctorExtractor<TensorEvaluator<RHSExpr, Dev> > rhsExpr;
+ FunctorExtractor(const TensorEvaluator<const TensorEvalToOp<RHSExpr>, Dev>& expr)
+ : rhsExpr(expr.impl()) {}
+};
+
+/// specialisation of the \ref FunctorExtractor struct when the node type is
+/// TensorEvalToOp. This is a specialisation without OP so it has to be separated.
+template <typename RHSExpr, typename Dev>
+struct FunctorExtractor<TensorEvaluator<TensorEvalToOp<RHSExpr>, Dev> >
+: FunctorExtractor<TensorEvaluator<const TensorEvalToOp<RHSExpr>, Dev> > {};
+
+template<typename Dim, size_t NumOutputDim> struct DimConstr {
+template<typename InDim>
+ static inline Dim getDim(InDim dims ) {return dims;}
+};
+
+template<typename Dim> struct DimConstr<Dim, 0> {
+ template<typename InDim>
+ static inline Dim getDim(InDim dims ) {return Dim(dims.TotalSize());}
+};
+
+template<typename Op, typename Dims, typename ArgType, template <class> class MakePointer_, typename Device>
+struct FunctorExtractor<TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Device>>{
+ typedef TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Device> Evaluator;
+ typedef typename Eigen::internal::conditional<Evaluator::NumOutputDims==0, DSizes<typename Evaluator::Index, 1>, typename Evaluator::Dimensions >::type Dimensions;
+ const Dimensions m_dimensions;
+ const Dimensions& dimensions() const { return m_dimensions; }
+ FunctorExtractor(const TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Device>& expr)
+ : m_dimensions(DimConstr<Dimensions, Evaluator::NumOutputDims>::getDim(expr.dimensions())) {}
+};
+
+
+template<typename Op, typename Dims, typename ArgType, template <class> class MakePointer_, typename Device>
+struct FunctorExtractor<TensorEvaluator<TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Device>>
+: FunctorExtractor<TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Device>>{};
+/// template deduction function for FunctorExtractor
+template <typename Evaluator>
+auto inline extractFunctors(const Evaluator& evaluator)-> FunctorExtractor<Evaluator> {
+ return FunctorExtractor<Evaluator>(evaluator);
+}
+} // namespace internal
+} // namespace TensorSycl
+} // namespace Eigen
+
+#endif // UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_EXTRACT_FUNCTORS_HPP
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclLeafCount.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclLeafCount.h
new file mode 100644
index 000000000..25d1fac9b
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclLeafCount.h
@@ -0,0 +1,114 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/*****************************************************************
+ * TensorSyclLeafCount.h
+ *
+ * \brief:
+ * The leaf count used the pre-order expression tree traverse in order to name
+ * count the number of leaf nodes in the expression
+ *
+*****************************************************************/
+
+#ifndef UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_LEAF_COUNT_HPP
+#define UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_LEAF_COUNT_HPP
+
+namespace Eigen {
+namespace TensorSycl {
+namespace internal {
+/// \brief LeafCount used to counting terminal nodes. The total number of
+/// leaf nodes is used by MakePlaceHolderExprHelper to find the order
+/// of the leaf node in a expression tree at compile time.
+template <typename Expr>
+struct LeafCount;
+
+template<typename... Args> struct CategoryCount;
+
+template<> struct CategoryCount<>
+{
+ static const size_t Count =0;
+};
+
+template<typename Arg, typename... Args>
+struct CategoryCount<Arg,Args...>{
+ static const size_t Count = LeafCount<Arg>::Count + CategoryCount<Args...>::Count;
+};
+
+/// specialisation of the \ref LeafCount struct when the node type is const TensorMap
+template <typename PlainObjectType, int Options_, template <class> class MakePointer_>
+struct LeafCount<const TensorMap<PlainObjectType, Options_, MakePointer_> > {
+ static const size_t Count =1;
+};
+
+/// specialisation of the \ref LeafCount struct when the node type is TensorMap
+template <typename PlainObjectType, int Options_, template <class> class MakePointer_>
+struct LeafCount<TensorMap<PlainObjectType, Options_, MakePointer_> > :LeafCount<const TensorMap<PlainObjectType, Options_, MakePointer_> >{};
+
+// const TensorCwiseUnaryOp, const TensorCwiseNullaryOp, const TensorCwiseBinaryOp, const TensorCwiseTernaryOp, and Const TensorBroadcastingOp
+template <template <class, class...> class CategoryExpr, typename OP, typename... RHSExpr>
+struct LeafCount<const CategoryExpr<OP, RHSExpr...> >: CategoryCount<RHSExpr...> {};
+// TensorCwiseUnaryOp, TensorCwiseNullaryOp, TensorCwiseBinaryOp, TensorCwiseTernaryOp, and TensorBroadcastingOp
+template <template <class, class...> class CategoryExpr, typename OP, typename... RHSExpr>
+struct LeafCount<CategoryExpr<OP, RHSExpr...> > :LeafCount<const CategoryExpr<OP, RHSExpr...> >{};
+
+/// specialisation of the \ref LeafCount struct when the node type is const TensorSelectOp is an exception
+template <typename IfExpr, typename ThenExpr, typename ElseExpr>
+struct LeafCount<const TensorSelectOp<IfExpr, ThenExpr, ElseExpr> > : CategoryCount<IfExpr, ThenExpr, ElseExpr> {};
+/// specialisation of the \ref LeafCount struct when the node type is TensorSelectOp
+template <typename IfExpr, typename ThenExpr, typename ElseExpr>
+struct LeafCount<TensorSelectOp<IfExpr, ThenExpr, ElseExpr> >: LeafCount<const TensorSelectOp<IfExpr, ThenExpr, ElseExpr> > {};
+
+
+/// specialisation of the \ref LeafCount struct when the node type is const TensorAssignOp
+template <typename LHSExpr, typename RHSExpr>
+struct LeafCount<const TensorAssignOp<LHSExpr, RHSExpr> >: CategoryCount<LHSExpr,RHSExpr> {};
+
+/// specialisation of the \ref LeafCount struct when the node type is
+/// TensorAssignOp is an exception. It is not the same as Unary
+template <typename LHSExpr, typename RHSExpr>
+struct LeafCount<TensorAssignOp<LHSExpr, RHSExpr> > :LeafCount<const TensorAssignOp<LHSExpr, RHSExpr> >{};
+
+/// specialisation of the \ref LeafCount struct when the node type is const TensorForcedEvalOp
+template <typename Expr>
+struct LeafCount<const TensorForcedEvalOp<Expr> > {
+ static const size_t Count =1;
+};
+
+/// specialisation of the \ref LeafCount struct when the node type is TensorForcedEvalOp
+template <typename Expr>
+struct LeafCount<TensorForcedEvalOp<Expr> >: LeafCount<const TensorForcedEvalOp<Expr> > {};
+
+/// specialisation of the \ref LeafCount struct when the node type is const TensorEvalToOp
+template <typename Expr>
+struct LeafCount<const TensorEvalToOp<Expr> > {
+ static const size_t Count = 1 + CategoryCount<Expr>::Count;
+};
+
+/// specialisation of the \ref LeafCount struct when the node type is const TensorReductionOp
+template <typename OP, typename Dim, typename Expr>
+struct LeafCount<const TensorReductionOp<OP, Dim, Expr> > {
+ static const size_t Count =1;
+};
+
+/// specialisation of the \ref LeafCount struct when the node type is TensorReductionOp
+template <typename OP, typename Dim, typename Expr>
+struct LeafCount<TensorReductionOp<OP, Dim, Expr> >: LeafCount<const TensorReductionOp<OP, Dim, Expr> >{};
+
+/// specialisation of the \ref LeafCount struct when the node type is TensorEvalToOp
+template <typename Expr>
+struct LeafCount<TensorEvalToOp<Expr> >: LeafCount<const TensorEvalToOp<Expr> >{};
+
+} /// namespace TensorSycl
+} /// namespace internal
+} /// namespace Eigen
+
+#endif // UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_LEAF_COUNT_HPP
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclPlaceHolderExpr.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclPlaceHolderExpr.h
new file mode 100644
index 000000000..d4c250c6d
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclPlaceHolderExpr.h
@@ -0,0 +1,181 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/*****************************************************************
+ * TensorSyclPlaceHolderExpr.h
+ *
+ * \brief:
+ * This is the specialisation of the placeholder expression based on the
+ * operation type
+ *
+*****************************************************************/
+
+#ifndef UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_PLACEHOLDER_EXPR_HPP
+#define UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_PLACEHOLDER_EXPR_HPP
+
+namespace Eigen {
+namespace TensorSycl {
+namespace internal {
+
+/// \struct PlaceHolder
+/// \brief PlaceHolder is used to replace the \ref TensorMap in the expression
+/// tree.
+/// PlaceHolder contains the order of the leaf node in the expression tree.
+template <typename Scalar, size_t N>
+struct PlaceHolder {
+ static constexpr size_t I = N;
+ typedef Scalar Type;
+};
+
+/// \sttruct PlaceHolderExpression
+/// \brief it is used to create the PlaceHolder expression. The PlaceHolder
+/// expression is a copy of expression type in which the TensorMap of the has
+/// been replaced with PlaceHolder.
+template <typename Expr, size_t N>
+struct PlaceHolderExpression;
+
+template<size_t N, typename... Args>
+struct CalculateIndex;
+
+template<size_t N, typename Arg>
+struct CalculateIndex<N, Arg>{
+ typedef typename PlaceHolderExpression<Arg, N>::Type ArgType;
+ typedef utility::tuple::Tuple<ArgType> ArgsTuple;
+};
+
+template<size_t N, typename Arg1, typename Arg2>
+struct CalculateIndex<N, Arg1, Arg2>{
+ static const size_t Arg2LeafCount = LeafCount<Arg2>::Count;
+ typedef typename PlaceHolderExpression<Arg1, N - Arg2LeafCount>::Type Arg1Type;
+ typedef typename PlaceHolderExpression<Arg2, N>::Type Arg2Type;
+ typedef utility::tuple::Tuple<Arg1Type, Arg2Type> ArgsTuple;
+};
+
+template<size_t N, typename Arg1, typename Arg2, typename Arg3>
+struct CalculateIndex<N, Arg1, Arg2, Arg3> {
+ static const size_t Arg3LeafCount = LeafCount<Arg3>::Count;
+ static const size_t Arg2LeafCount = LeafCount<Arg2>::Count;
+ typedef typename PlaceHolderExpression<Arg1, N - Arg3LeafCount - Arg2LeafCount>::Type Arg1Type;
+ typedef typename PlaceHolderExpression<Arg2, N - Arg3LeafCount>::Type Arg2Type;
+ typedef typename PlaceHolderExpression<Arg3, N>::Type Arg3Type;
+ typedef utility::tuple::Tuple<Arg1Type, Arg2Type, Arg3Type> ArgsTuple;
+};
+
+template<template<class...> class Category , class OP, class TPL>
+struct CategoryHelper;
+
+template<template<class...> class Category , class OP, class ...T >
+struct CategoryHelper<Category, OP, utility::tuple::Tuple<T...> > {
+ typedef Category<OP, T... > Type;
+};
+
+template<template<class...> class Category , class ...T >
+struct CategoryHelper<Category, NoOP, utility::tuple::Tuple<T...> > {
+ typedef Category<T... > Type;
+};
+
+/// specialisation of the \ref PlaceHolderExpression when the node is
+/// TensorCwiseNullaryOp, TensorCwiseUnaryOp, TensorBroadcastingOp, TensorCwiseBinaryOp, TensorCwiseTernaryOp
+#define OPEXPRCATEGORY(CVQual)\
+template <template <class, class... > class Category, typename OP, typename... SubExpr, size_t N>\
+struct PlaceHolderExpression<CVQual Category<OP, SubExpr...>, N>{\
+ typedef CVQual typename CategoryHelper<Category, OP, typename CalculateIndex<N, SubExpr...>::ArgsTuple>::Type Type;\
+};
+
+OPEXPRCATEGORY(const)
+OPEXPRCATEGORY()
+#undef OPEXPRCATEGORY
+
+/// specialisation of the \ref PlaceHolderExpression when the node is
+/// TensorCwiseSelectOp
+#define SELECTEXPR(CVQual)\
+template <typename IfExpr, typename ThenExpr, typename ElseExpr, size_t N>\
+struct PlaceHolderExpression<CVQual TensorSelectOp<IfExpr, ThenExpr, ElseExpr>, N> {\
+ typedef CVQual typename CategoryHelper<TensorSelectOp, NoOP, typename CalculateIndex<N, IfExpr, ThenExpr, ElseExpr>::ArgsTuple>::Type Type;\
+};
+
+SELECTEXPR(const)
+SELECTEXPR()
+#undef SELECTEXPR
+
+/// specialisation of the \ref PlaceHolderExpression when the node is
+/// TensorAssignOp
+#define ASSIGNEXPR(CVQual)\
+template <typename LHSExpr, typename RHSExpr, size_t N>\
+struct PlaceHolderExpression<CVQual TensorAssignOp<LHSExpr, RHSExpr>, N> {\
+ typedef CVQual typename CategoryHelper<TensorAssignOp, NoOP, typename CalculateIndex<N, LHSExpr, RHSExpr>::ArgsTuple>::Type Type;\
+};
+
+ASSIGNEXPR(const)
+ASSIGNEXPR()
+#undef ASSIGNEXPR
+
+/// specialisation of the \ref PlaceHolderExpression when the node is
+/// TensorMap
+#define TENSORMAPEXPR(CVQual)\
+template <typename Scalar_, int Options_, int Options2_, int NumIndices_, typename IndexType_, template <class> class MakePointer_, size_t N>\
+struct PlaceHolderExpression< CVQual TensorMap< Tensor<Scalar_, NumIndices_, Options_, IndexType_>, Options2_, MakePointer_>, N> {\
+ typedef CVQual PlaceHolder<CVQual TensorMap<Tensor<Scalar_, NumIndices_, Options_, IndexType_>, Options2_, MakePointer_>, N> Type;\
+};
+
+TENSORMAPEXPR(const)
+TENSORMAPEXPR()
+#undef TENSORMAPEXPR
+
+/// specialisation of the \ref PlaceHolderExpression when the node is
+/// TensorForcedEvalOp
+#define FORCEDEVAL(CVQual)\
+template <typename Expr, size_t N>\
+struct PlaceHolderExpression<CVQual TensorForcedEvalOp<Expr>, N> {\
+ typedef CVQual PlaceHolder<CVQual TensorForcedEvalOp<Expr>, N> Type;\
+};
+
+FORCEDEVAL(const)
+FORCEDEVAL()
+#undef FORCEDEVAL
+
+/// specialisation of the \ref PlaceHolderExpression when the node is
+/// TensorEvalToOp
+#define EVALTO(CVQual)\
+template <typename Expr, size_t N>\
+struct PlaceHolderExpression<CVQual TensorEvalToOp<Expr>, N> {\
+ typedef CVQual TensorEvalToOp<typename CalculateIndex <N, Expr>::ArgType> Type;\
+};
+
+EVALTO(const)
+EVALTO()
+#undef EVALTO
+
+
+/// specialisation of the \ref PlaceHolderExpression when the node is
+/// TensorReductionOp
+#define SYCLREDUCTION(CVQual)\
+template <typename OP, typename Dims, typename Expr, size_t N>\
+struct PlaceHolderExpression<CVQual TensorReductionOp<OP, Dims, Expr>, N>{\
+ typedef CVQual PlaceHolder<CVQual TensorReductionOp<OP, Dims,Expr>, N> Type;\
+};
+SYCLREDUCTION(const)
+SYCLREDUCTION()
+#undef SYCLREDUCTION
+
+/// template deduction for \ref PlaceHolderExpression struct
+template <typename Expr>
+struct createPlaceHolderExpression {
+ static const size_t TotalLeaves = LeafCount<Expr>::Count;
+ typedef typename PlaceHolderExpression<Expr, TotalLeaves - 1>::Type Type;
+};
+
+} // internal
+} // TensorSycl
+} // namespace Eigen
+
+#endif // UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_PLACEHOLDER_EXPR_HPP
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclRun.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclRun.h
new file mode 100644
index 000000000..7914b6fad
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclRun.h
@@ -0,0 +1,70 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Cummins Chris PhD student at The University of Edinburgh.
+// Contact: <eigen@codeplay.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/*****************************************************************
+ * TensorSyclRun.h
+ *
+ * \brief:
+ * Schedule_kernel invoke an specialised version of kernel struct. The
+ * specialisation is based on the data dimension in sycl buffer
+ *
+*****************************************************************/
+
+#ifndef UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_SYCLRUN_HPP
+#define UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_SYCLRUN_HPP
+
+namespace Eigen {
+namespace TensorSycl {
+/// The run function in tensor sycl convert the expression tree to a buffer
+/// based expression tree;
+/// creates the expression tree for the device with accessor to buffers;
+/// construct the kernel and submit it to the sycl queue.
+template <typename Expr, typename Dev>
+void run(Expr &expr, Dev &dev) {
+ Eigen::TensorEvaluator<Expr, Dev> evaluator(expr, dev);
+ const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL);
+ if (needs_assign) {
+ typedef typename internal::createPlaceHolderExpression<Expr>::Type PlaceHolderExpr;
+ auto functors = internal::extractFunctors(evaluator);
+
+ size_t tileSize =dev.m_queue.get_device(). template get_info<cl::sycl::info::device::max_work_group_size>()/2;
+ dev.m_queue.submit([&](cl::sycl::handler &cgh) {
+
+ // create a tuple of accessors from Evaluator
+ auto tuple_of_accessors = internal::createTupleOfAccessors<decltype(evaluator)>(cgh, evaluator);
+ const auto range = utility::tuple::get<0>(tuple_of_accessors).get_range()[0];
+ size_t GRange=range;
+ if (tileSize>GRange) tileSize=GRange;
+ else if(GRange>tileSize){
+ size_t xMode = GRange % tileSize;
+ if (xMode != 0) GRange += (tileSize - xMode);
+ }
+ // run the kernel
+ cgh.parallel_for<PlaceHolderExpr>( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) {
+ typedef typename internal::ConvertToDeviceExpression<Expr>::Type DevExpr;
+ auto device_expr =internal::createDeviceExpression<DevExpr, PlaceHolderExpr>(functors, tuple_of_accessors);
+ auto device_evaluator = Eigen::TensorEvaluator<decltype(device_expr.expr), Eigen::DefaultDevice>(device_expr.expr, Eigen::DefaultDevice());
+ if (itemID.get_global_linear_id() < range) {
+ device_evaluator.evalScalar(static_cast<int>(itemID.get_global_linear_id()));
+ }
+ });
+ });
+ dev.m_queue.throw_asynchronous();
+ }
+
+ evaluator.cleanup();
+}
+} // namespace TensorSycl
+} // namespace Eigen
+
+#endif // UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_SYCLRUN_HPP
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclTuple.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclTuple.h
new file mode 100644
index 000000000..063b027e8
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclTuple.h
@@ -0,0 +1,234 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/*****************************************************************
+ * TensroSyclTuple.h
+ *
+ * \brief:
+ * Minimal implementation of std::tuple that can be used inside a SYCL kernel.
+ *
+*****************************************************************/
+
+#ifndef UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_TUPLE_HPP
+#define UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_TUPLE_HPP
+namespace utility {
+namespace tuple {
+/// \struct StaticIf
+/// \brief The StaticIf struct is used to statically choose the type based on the
+/// condition.
+template <bool, typename T = void> struct StaticIf;
+/// \brief specialisation of the \ref StaticIf when the condition is true
+template <typename T>
+struct StaticIf<true, T> {
+ typedef T type;
+};
+
+/// \struct Tuple
+/// \brief is a fixed-size collection of heterogeneous values
+/// \ztparam Ts... - the types of the elements that the tuple stores.
+/// Empty list is supported.
+template <class... Ts>
+struct Tuple {};
+
+/// \brief specialisation of the \ref Tuple class when the tuple has at least
+/// one element.
+/// \tparam T : the type of the first element in the tuple.
+/// \tparam Ts... the rest of the elements in the tuple. Ts... can be empty.
+template <class T, class... Ts>
+struct Tuple<T, Ts...> {
+ Tuple(T t, Ts... ts) : head(t), tail(ts...) {}
+ T head;
+ Tuple<Ts...> tail;
+};
+
+///\ struct ElemTypeHolder
+/// \brief ElemTypeHolder class is used to specify the types of the
+/// elements inside the tuple
+/// \tparam size_t the number of elements inside the tuple
+/// \tparam class the tuple class
+template <size_t, class>
+struct ElemTypeHolder;
+
+/// \brief specialisation of the \ref ElemTypeHolder class when the number of
+/// elements inside the tuple is 1
+template <class T, class... Ts>
+struct ElemTypeHolder<0, Tuple<T, Ts...> > {
+ typedef T type;
+};
+
+/// \brief specialisation of the \ref ElemTypeHolder class when the number of
+/// elements inside the tuple is bigger than 1. It recursively calls itself to
+/// detect the type of each element in the tuple
+/// \tparam T : the type of the first element in the tuple.
+/// \tparam Ts... the rest of the elements in the tuple. Ts... can be empty.
+/// \tparam K is the Kth element in the tuple
+template <size_t k, class T, class... Ts>
+struct ElemTypeHolder<k, Tuple<T, Ts...> > {
+ typedef typename ElemTypeHolder<k - 1, Tuple<Ts...> >::type type;
+};
+
+/// get
+/// \brief Extracts the first element from the tuple.
+/// K=0 represents the first element of the tuple. The tuple cannot be empty.
+/// \tparam Ts... are the type of the elements in the tuple.
+/// \param t is the tuple whose contents to extract
+/// \return typename ElemTypeHolder<0, Tuple<Ts...> >::type &>::type
+
+#define TERMINATE_CONDS_TUPLE_GET(CVQual) \
+template <size_t k, class... Ts> \
+typename StaticIf<k == 0, CVQual typename ElemTypeHolder<0, Tuple<Ts...> >::type &>::type \
+get(CVQual Tuple<Ts...> &t) { \
+ static_assert(sizeof...(Ts)!=0, "The requseted value is bigger than the size of the tuple"); \
+ return t.head; \
+}
+
+TERMINATE_CONDS_TUPLE_GET(const)
+TERMINATE_CONDS_TUPLE_GET()
+#undef TERMINATE_CONDS_TUPLE_GET
+/// get
+/// \brief Extracts the Kth element from the tuple.
+///\tparam K is an integer value in [0,sizeof...(Types)).
+/// \tparam T is the (sizeof...(Types) -(K+1)) element in the tuple
+/// \tparam Ts... are the type of the elements in the tuple.
+/// \param t is the tuple whose contents to extract
+/// \return typename ElemTypeHolder<K, Tuple<Ts...> >::type &>::type
+#define RECURSIVE_TUPLE_GET(CVQual) \
+template <size_t k, class T, class... Ts> \
+typename StaticIf<k != 0, CVQual typename ElemTypeHolder<k, Tuple<T, Ts...> >::type &>::type \
+get(CVQual Tuple<T, Ts...> &t) { \
+ return utility::tuple::get<k - 1>(t.tail); \
+}
+RECURSIVE_TUPLE_GET(const)
+RECURSIVE_TUPLE_GET()
+#undef RECURSIVE_TUPLE_GET
+
+/// make_tuple
+/// \brief Creates a tuple object, deducing the target type from the types of
+/// arguments.
+/// \tparam Args the type of the arguments to construct the tuple from
+/// \param args zero or more arguments to construct the tuple from
+/// \return Tuple<Args...>
+template <typename... Args>
+Tuple<Args...> make_tuple(Args... args) {
+ return Tuple<Args...>(args...);
+}
+
+/// size
+/// \brief Provides access to the number of elements in a tuple as a
+/// compile-time constant expression.
+/// \tparam Args the type of the arguments to construct the tuple from
+/// \return size_t
+template <typename... Args>
+static constexpr size_t size(Tuple<Args...> &) {
+ return sizeof...(Args);
+}
+
+/// \struct IndexList
+/// \brief Creates a list of index from the elements in the tuple
+/// \tparam Is... a list of index from [0 to sizeof...(tuple elements))
+template <size_t... Is>
+struct IndexList {};
+
+/// \struct RangeBuilder
+/// \brief Collects internal details for generating index ranges [MIN, MAX)
+/// Declare primary template for index range builder
+/// \tparam MIN is the starting index in the tuple
+/// \tparam N represents sizeof..(elemens)- sizeof...(Is)
+/// \tparam Is... are the list of generated index so far
+template <size_t MIN, size_t N, size_t... Is>
+struct RangeBuilder;
+
+/// \brief base Step: Specialisation of the \ref RangeBuilder when the
+/// MIN==MAX. In this case the Is... is [0 to sizeof...(tuple elements))
+/// \tparam MIN is the starting index of the tuple
+/// \tparam Is is [0 to sizeof...(tuple elements))
+template <size_t MIN, size_t... Is>
+struct RangeBuilder<MIN, MIN, Is...> {
+ typedef IndexList<Is...> type;
+};
+
+/// Induction step: Specialisation of the RangeBuilder class when N!=MIN
+/// in this case we are recursively subtracting N by one and adding one
+/// index to Is... list until MIN==N
+/// \tparam MIN is the starting index in the tuple
+/// \tparam N represents sizeof..(elemens)- sizeof...(Is)
+/// \tparam Is... are the list of generated index so far
+template <size_t MIN, size_t N, size_t... Is>
+struct RangeBuilder : public RangeBuilder<MIN, N - 1, N - 1, Is...> {};
+
+/// \brief IndexRange that returns a [MIN, MAX) index range
+/// \tparam MIN is the starting index in the tuple
+/// \tparam MAX is the size of the tuple
+template <size_t MIN, size_t MAX>
+struct IndexRange: RangeBuilder<MIN, MAX>::type {};
+
+/// append_base
+/// \brief unpacking the elements of the input tuple t and creating a new tuple
+/// by adding element a at the end of it.
+///\tparam Args... the type of the elements inside the tuple t
+/// \tparam T the type of the new element going to be added at the end of tuple
+/// \tparam I... is the list of index from [0 to sizeof...(t))
+/// \param t the tuple on which we want to append a.
+/// \param a the new elements going to be added to the tuple
+/// \return Tuple<Args..., T>
+template <typename... Args, typename T, size_t... I>
+Tuple<Args..., T> append_base(Tuple<Args...> t, T a,IndexList<I...>) {
+ return utility::tuple::make_tuple(get<I>(t)..., a);
+}
+
+/// append
+/// \brief the deduction function for \ref append_base that automatically
+/// generate the \ref IndexRange
+///\tparam Args... the type of the elements inside the tuple t
+/// \tparam T the type of the new element going to be added at the end of tuple
+/// \param t the tuple on which we want to append a.
+/// \param a the new elements going to be added to the tuple
+/// \return Tuple<Args..., T>
+template <typename... Args, typename T>
+Tuple<Args..., T> append(Tuple<Args...> t, T a) {
+ return utility::tuple::append_base(t, a, IndexRange<0, sizeof...(Args)>());
+}
+
+/// append_base
+/// \brief This is a specialisation of \ref append_base when we want to
+/// concatenate
+/// tuple t2 at the end of the tuple t1. Here we unpack both tuples, generate the
+/// IndexRange for each of them and create an output tuple T that contains both
+/// elements of t1 and t2.
+///\tparam Args1... the type of the elements inside the tuple t1
+///\tparam Args2... the type of the elements inside the tuple t2
+/// \tparam I1... is the list of index from [0 to sizeof...(t1))
+/// \tparam I2... is the list of index from [0 to sizeof...(t2))
+/// \param t1 is the tuple on which we want to append t2.
+/// \param t2 is the tuple that is going to be added on t1.
+/// \return Tuple<Args1..., Args2...>
+template <typename... Args1, typename... Args2, size_t... I1, size_t... I2>
+Tuple<Args1..., Args2...> append_base(Tuple<Args1...> t1, Tuple<Args2...> t2, IndexList<I1...>, IndexList<I2...>) {
+ return utility::tuple::make_tuple(get<I1>(t1)...,get<I2>(t2)...);
+}
+
+/// append
+/// \brief deduction function for \ref append_base when we are appending tuple
+/// t1 by tuple t2. In this case the \ref IndexRange for both tuple are
+/// automatically generated.
+///\tparam Args1... the type of the elements inside the tuple t1
+///\tparam Args2... the type of the elements inside the tuple t2
+/// \param t1 is the tuple on which we want to append t2.
+/// \param t2 is the tuple that is going to be added on t1.
+/// \return Tuple<Args1..., Args2...>
+template <typename... Args1, typename... Args2>
+Tuple<Args1..., Args2...> append(Tuple<Args1...> t1,Tuple<Args2...> t2) {
+ return utility::tuple::append_base(t1, t2, IndexRange<0, sizeof...(Args1)>(), IndexRange<0, sizeof...(Args2)>());
+}
+} // tuple
+} // utility
+#endif // UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_TUPLE_HPP
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h b/unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h
new file mode 100644
index 000000000..ffcf8b00f
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h
@@ -0,0 +1,272 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_TRAITS_H
+#define EIGEN_CXX11_TENSOR_TENSOR_TRAITS_H
+
+namespace Eigen {
+namespace internal {
+
+
+template<typename Scalar, int Options>
+class compute_tensor_flags
+{
+ enum {
+ is_dynamic_size_storage = 1,
+
+ is_aligned =
+ (
+ ((Options&DontAlign)==0) && (
+#if EIGEN_MAX_STATIC_ALIGN_BYTES>0
+ (!is_dynamic_size_storage)
+#else
+ 0
+#endif
+ |
+#if EIGEN_MAX_ALIGN_BYTES>0
+ is_dynamic_size_storage
+#else
+ 0
+#endif
+ )
+ ),
+ packet_access_bit = packet_traits<Scalar>::Vectorizable && is_aligned ? PacketAccessBit : 0
+ };
+
+ public:
+ enum { ret = packet_access_bit };
+};
+
+
+template<typename Scalar_, int NumIndices_, int Options_, typename IndexType_>
+struct traits<Tensor<Scalar_, NumIndices_, Options_, IndexType_> >
+{
+ typedef Scalar_ Scalar;
+ typedef Dense StorageKind;
+ typedef IndexType_ Index;
+ static const int NumDimensions = NumIndices_;
+ static const int Layout = Options_ & RowMajor ? RowMajor : ColMajor;
+ enum {
+ Options = Options_,
+ Flags = compute_tensor_flags<Scalar_, Options_>::ret | (is_const<Scalar_>::value ? 0 : LvalueBit)
+ };
+ template <typename T> struct MakePointer {
+ typedef T* Type;
+ };
+};
+
+
+template<typename Scalar_, typename Dimensions, int Options_, typename IndexType_>
+struct traits<TensorFixedSize<Scalar_, Dimensions, Options_, IndexType_> >
+{
+ typedef Scalar_ Scalar;
+ typedef Dense StorageKind;
+ typedef IndexType_ Index;
+ static const int NumDimensions = array_size<Dimensions>::value;
+ static const int Layout = Options_ & RowMajor ? RowMajor : ColMajor;
+ enum {
+ Options = Options_,
+ Flags = compute_tensor_flags<Scalar_, Options_>::ret | (is_const<Scalar_>::value ? 0: LvalueBit)
+ };
+ template <typename T> struct MakePointer {
+ typedef T* Type;
+ };
+};
+
+
+template<typename PlainObjectType, int Options_, template <class> class MakePointer_>
+struct traits<TensorMap<PlainObjectType, Options_, MakePointer_> >
+ : public traits<PlainObjectType>
+{
+ typedef traits<PlainObjectType> BaseTraits;
+ typedef typename BaseTraits::Scalar Scalar;
+ typedef typename BaseTraits::StorageKind StorageKind;
+ typedef typename BaseTraits::Index Index;
+ static const int NumDimensions = BaseTraits::NumDimensions;
+ static const int Layout = BaseTraits::Layout;
+ enum {
+ Options = Options_,
+ Flags = BaseTraits::Flags
+ };
+ template <class T> struct MakePointer {
+ // Intermediate typedef to workaround MSVC issue.
+ typedef MakePointer_<T> MakePointerT;
+ typedef typename MakePointerT::Type Type;
+ };
+};
+
+template<typename PlainObjectType>
+struct traits<TensorRef<PlainObjectType> >
+ : public traits<PlainObjectType>
+{
+ typedef traits<PlainObjectType> BaseTraits;
+ typedef typename BaseTraits::Scalar Scalar;
+ typedef typename BaseTraits::StorageKind StorageKind;
+ typedef typename BaseTraits::Index Index;
+ static const int NumDimensions = BaseTraits::NumDimensions;
+ static const int Layout = BaseTraits::Layout;
+ enum {
+ Options = BaseTraits::Options,
+ Flags = BaseTraits::Flags
+ };
+};
+
+
+template<typename _Scalar, int NumIndices_, int Options, typename IndexType_>
+struct eval<Tensor<_Scalar, NumIndices_, Options, IndexType_>, Eigen::Dense>
+{
+ typedef const Tensor<_Scalar, NumIndices_, Options, IndexType_>& type;
+};
+
+template<typename _Scalar, int NumIndices_, int Options, typename IndexType_>
+struct eval<const Tensor<_Scalar, NumIndices_, Options, IndexType_>, Eigen::Dense>
+{
+ typedef const Tensor<_Scalar, NumIndices_, Options, IndexType_>& type;
+};
+
+template<typename Scalar_, typename Dimensions, int Options, typename IndexType_>
+struct eval<TensorFixedSize<Scalar_, Dimensions, Options, IndexType_>, Eigen::Dense>
+{
+ typedef const TensorFixedSize<Scalar_, Dimensions, Options, IndexType_>& type;
+};
+
+template<typename Scalar_, typename Dimensions, int Options, typename IndexType_>
+struct eval<const TensorFixedSize<Scalar_, Dimensions, Options, IndexType_>, Eigen::Dense>
+{
+ typedef const TensorFixedSize<Scalar_, Dimensions, Options, IndexType_>& type;
+};
+
+template<typename PlainObjectType, int Options, template <class> class MakePointer>
+struct eval<TensorMap<PlainObjectType, Options, MakePointer>, Eigen::Dense>
+{
+ typedef const TensorMap<PlainObjectType, Options, MakePointer>& type;
+};
+
+template<typename PlainObjectType, int Options, template <class> class MakePointer>
+struct eval<const TensorMap<PlainObjectType, Options, MakePointer>, Eigen::Dense>
+{
+ typedef const TensorMap<PlainObjectType, Options, MakePointer>& type;
+};
+
+template<typename PlainObjectType>
+struct eval<TensorRef<PlainObjectType>, Eigen::Dense>
+{
+ typedef const TensorRef<PlainObjectType>& type;
+};
+
+template<typename PlainObjectType>
+struct eval<const TensorRef<PlainObjectType>, Eigen::Dense>
+{
+ typedef const TensorRef<PlainObjectType>& type;
+};
+
+// TODO nested<> does not exist anymore in Eigen/Core, and it thus has to be removed in favor of ref_selector.
+template<typename T, int n=1, typename PlainObject = void> struct nested
+{
+ typedef typename ref_selector<T>::type type;
+};
+
+template <typename Scalar_, int NumIndices_, int Options_, typename IndexType_>
+struct nested<Tensor<Scalar_, NumIndices_, Options_, IndexType_> >
+{
+ typedef const Tensor<Scalar_, NumIndices_, Options_, IndexType_>& type;
+};
+
+template <typename Scalar_, int NumIndices_, int Options_, typename IndexType_>
+struct nested<const Tensor<Scalar_, NumIndices_, Options_, IndexType_> >
+{
+ typedef const Tensor<Scalar_, NumIndices_, Options_, IndexType_>& type;
+};
+
+template <typename Scalar_, typename Dimensions, int Options, typename IndexType_>
+struct nested<TensorFixedSize<Scalar_, Dimensions, Options, IndexType_> >
+{
+ typedef const TensorFixedSize<Scalar_, Dimensions, Options, IndexType_>& type;
+};
+
+template <typename Scalar_, typename Dimensions, int Options, typename IndexType_>
+struct nested<const TensorFixedSize<Scalar_, Dimensions, Options, IndexType_> >
+{
+ typedef const TensorFixedSize<Scalar_, Dimensions, Options, IndexType_>& type;
+};
+
+
+template <typename PlainObjectType, int Options, template <class> class MakePointer>
+struct nested<TensorMap<PlainObjectType, Options, MakePointer> >
+{
+ typedef const TensorMap<PlainObjectType, Options, MakePointer>& type;
+};
+
+template <typename PlainObjectType, int Options, template <class> class MakePointer>
+struct nested<const TensorMap<PlainObjectType, Options, MakePointer> >
+{
+ typedef const TensorMap<PlainObjectType, Options, MakePointer>& type;
+};
+
+template <typename PlainObjectType>
+struct nested<TensorRef<PlainObjectType> >
+{
+ typedef const TensorRef<PlainObjectType>& type;
+};
+
+template <typename PlainObjectType>
+struct nested<const TensorRef<PlainObjectType> >
+{
+ typedef const TensorRef<PlainObjectType>& type;
+};
+
+} // end namespace internal
+
+// Convolutional layers take in an input tensor of shape (D, R, C, B), or (D, C,
+// R, B), and convolve it with a set of filters, which can also be presented as
+// a tensor (D, K, K, M), where M is the number of filters, K is the filter
+// size, and each 3-dimensional tensor of size (D, K, K) is a filter. For
+// simplicity we assume that we always use square filters (which is usually the
+// case in images), hence the two Ks in the tensor dimension. It also takes in
+// a few additional parameters:
+// Stride (S): The convolution stride is the offset between locations where we
+// apply the filters. A larger stride means that the output will be
+// spatially smaller.
+// Padding (P): The padding we apply to the input tensor along the R and C
+// dimensions. This is usually used to make sure that the spatial
+// dimensions of the output matches our intention.
+//
+// Two types of padding are often used:
+// SAME: The pad value is computed so that the output will have size
+// R/S and C/S.
+// VALID: no padding is carried out.
+// When we do padding, the padded values at the padded locations are usually
+// zero.
+//
+// The output dimensions for convolution, when given all the parameters above,
+// are as follows:
+// When Padding = SAME: the output size is (B, R', C', M), where
+// R' = ceil(float(R) / float(S))
+// C' = ceil(float(C) / float(S))
+// where ceil is the ceiling function. The input tensor is padded with 0 as
+// needed. The number of padded rows and columns are computed as:
+// Pr = ((R' - 1) * S + K - R) / 2
+// Pc = ((C' - 1) * S + K - C) / 2
+// when the stride is 1, we have the simplified case R'=R, C'=C, Pr=Pc=(K-1)/2.
+// This is where SAME comes from - the output has the same size as the input has.
+// When Padding = VALID: the output size is computed as
+// R' = ceil(float(R - K + 1) / float(S))
+// C' = ceil(float(C - K + 1) / float(S))
+// and the number of padded rows and columns are computed in the same way as in
+// the SAME case.
+// When the stride is 1, we have the simplified case R'=R-K+1, C'=C-K+1, Pr=0,
+// Pc=0.
+typedef enum {
+ PADDING_VALID = 1,
+ PADDING_SAME = 2
+} PaddingType;
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_TRAITS_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorUInt128.h b/unsupported/Eigen/CXX11/src/Tensor/TensorUInt128.h
new file mode 100644
index 000000000..3523e7c94
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorUInt128.h
@@ -0,0 +1,248 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2015 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_UINT128_H
+#define EIGEN_CXX11_TENSOR_TENSOR_UINT128_H
+
+namespace Eigen {
+namespace internal {
+
+
+template <uint64_t n>
+struct static_val {
+ static const uint64_t value = n;
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE operator uint64_t() const { return n; }
+
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE static_val() { }
+
+ template <typename T>
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE static_val(const T& v) {
+ eigen_assert(v == n);
+ }
+};
+
+
+template <typename HIGH = uint64_t, typename LOW = uint64_t>
+struct TensorUInt128
+{
+ HIGH high;
+ LOW low;
+
+ template<typename OTHER_HIGH, typename OTHER_LOW>
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+ TensorUInt128(const TensorUInt128<OTHER_HIGH, OTHER_LOW>& other) : high(other.high), low(other.low) {
+ EIGEN_STATIC_ASSERT(sizeof(OTHER_HIGH) <= sizeof(HIGH), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT(sizeof(OTHER_LOW) <= sizeof(LOW), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ }
+
+ template<typename OTHER_HIGH, typename OTHER_LOW>
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+ TensorUInt128& operator = (const TensorUInt128<OTHER_HIGH, OTHER_LOW>& other) {
+ EIGEN_STATIC_ASSERT(sizeof(OTHER_HIGH) <= sizeof(HIGH), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT(sizeof(OTHER_LOW) <= sizeof(LOW), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ high = other.high;
+ low = other.low;
+ return *this;
+ }
+
+ template<typename T>
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+ explicit TensorUInt128(const T& x) : high(0), low(x) {
+ eigen_assert((static_cast<typename conditional<sizeof(T) == 8, uint64_t, uint32_t>::type>(x) <= NumTraits<uint64_t>::highest()));
+ eigen_assert(x >= 0);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+ TensorUInt128(HIGH y, LOW x) : high(y), low(x) { }
+
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE operator LOW() const {
+ return low;
+ }
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE LOW lower() const {
+ return low;
+ }
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE HIGH upper() const {
+ return high;
+ }
+};
+
+
+template <typename HL, typename LL, typename HR, typename LR>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+bool operator == (const TensorUInt128<HL, LL>& lhs, const TensorUInt128<HR, LR>& rhs)
+{
+ return (lhs.high == rhs.high) & (lhs.low == rhs.low);
+}
+
+template <typename HL, typename LL, typename HR, typename LR>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+bool operator != (const TensorUInt128<HL, LL>& lhs, const TensorUInt128<HR, LR>& rhs)
+{
+ return (lhs.high != rhs.high) | (lhs.low != rhs.low);
+}
+
+template <typename HL, typename LL, typename HR, typename LR>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+bool operator >= (const TensorUInt128<HL, LL>& lhs, const TensorUInt128<HR, LR>& rhs)
+{
+ if (lhs.high != rhs.high) {
+ return lhs.high > rhs.high;
+ }
+ return lhs.low >= rhs.low;
+}
+
+template <typename HL, typename LL, typename HR, typename LR>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+bool operator < (const TensorUInt128<HL, LL>& lhs, const TensorUInt128<HR, LR>& rhs)
+{
+ if (lhs.high != rhs.high) {
+ return lhs.high < rhs.high;
+ }
+ return lhs.low < rhs.low;
+}
+
+template <typename HL, typename LL, typename HR, typename LR>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+TensorUInt128<uint64_t, uint64_t> operator + (const TensorUInt128<HL, LL>& lhs, const TensorUInt128<HR, LR>& rhs)
+{
+ TensorUInt128<uint64_t, uint64_t> result(lhs.high + rhs.high, lhs.low + rhs.low);
+ if (result.low < rhs.low) {
+ result.high += 1;
+ }
+ return result;
+}
+
+template <typename HL, typename LL, typename HR, typename LR>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+TensorUInt128<uint64_t, uint64_t> operator - (const TensorUInt128<HL, LL>& lhs, const TensorUInt128<HR, LR>& rhs)
+{
+ TensorUInt128<uint64_t, uint64_t> result(lhs.high - rhs.high, lhs.low - rhs.low);
+ if (result.low > lhs.low) {
+ result.high -= 1;
+ }
+ return result;
+}
+
+
+template <typename HL, typename LL, typename HR, typename LR>
+static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+TensorUInt128<uint64_t, uint64_t> operator * (const TensorUInt128<HL, LL>& lhs, const TensorUInt128<HR, LR>& rhs)
+{
+ // Split each 128-bit integer into 4 32-bit integers, and then do the
+ // multiplications by hand as follow:
+ // lhs a b c d
+ // rhs e f g h
+ // -----------
+ // ah bh ch dh
+ // bg cg dg
+ // cf df
+ // de
+ // The result is stored in 2 64bit integers, high and low.
+
+ const uint64_t LOW = 0x00000000FFFFFFFFLL;
+ const uint64_t HIGH = 0xFFFFFFFF00000000LL;
+
+ uint64_t d = lhs.low & LOW;
+ uint64_t c = (lhs.low & HIGH) >> 32LL;
+ uint64_t b = lhs.high & LOW;
+ uint64_t a = (lhs.high & HIGH) >> 32LL;
+
+ uint64_t h = rhs.low & LOW;
+ uint64_t g = (rhs.low & HIGH) >> 32LL;
+ uint64_t f = rhs.high & LOW;
+ uint64_t e = (rhs.high & HIGH) >> 32LL;
+
+ // Compute the low 32 bits of low
+ uint64_t acc = d * h;
+ uint64_t low = acc & LOW;
+ // Compute the high 32 bits of low. Add a carry every time we wrap around
+ acc >>= 32LL;
+ uint64_t carry = 0;
+ uint64_t acc2 = acc + c * h;
+ if (acc2 < acc) {
+ carry++;
+ }
+ acc = acc2 + d * g;
+ if (acc < acc2) {
+ carry++;
+ }
+ low |= (acc << 32LL);
+
+ // Carry forward the high bits of acc to initiate the computation of the
+ // low 32 bits of high
+ acc2 = (acc >> 32LL) | (carry << 32LL);
+ carry = 0;
+
+ acc = acc2 + b * h;
+ if (acc < acc2) {
+ carry++;
+ }
+ acc2 = acc + c * g;
+ if (acc2 < acc) {
+ carry++;
+ }
+ acc = acc2 + d * f;
+ if (acc < acc2) {
+ carry++;
+ }
+ uint64_t high = acc & LOW;
+
+ // Start to compute the high 32 bits of high.
+ acc2 = (acc >> 32LL) | (carry << 32LL);
+
+ acc = acc2 + a * h;
+ acc2 = acc + b * g;
+ acc = acc2 + c * f;
+ acc2 = acc + d * e;
+ high |= (acc2 << 32LL);
+
+ return TensorUInt128<uint64_t, uint64_t>(high, low);
+}
+
+template <typename HL, typename LL, typename HR, typename LR>
+static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+TensorUInt128<uint64_t, uint64_t> operator / (const TensorUInt128<HL, LL>& lhs, const TensorUInt128<HR, LR>& rhs)
+{
+ if (rhs == TensorUInt128<static_val<0>, static_val<1> >(1)) {
+ return TensorUInt128<uint64_t, uint64_t>(lhs.high, lhs.low);
+ } else if (lhs < rhs) {
+ return TensorUInt128<uint64_t, uint64_t>(0);
+ } else {
+ // calculate the biggest power of 2 times rhs that's less than or equal to lhs
+ TensorUInt128<uint64_t, uint64_t> power2(1);
+ TensorUInt128<uint64_t, uint64_t> d(rhs);
+ TensorUInt128<uint64_t, uint64_t> tmp(lhs - d);
+ while (lhs >= d) {
+ tmp = tmp - d;
+ d = d + d;
+ power2 = power2 + power2;
+ }
+
+ tmp = TensorUInt128<uint64_t, uint64_t>(lhs.high, lhs.low);
+ TensorUInt128<uint64_t, uint64_t> result(0);
+ while (power2 != TensorUInt128<static_val<0>, static_val<0> >(0)) {
+ if (tmp >= d) {
+ tmp = tmp - d;
+ result = result + power2;
+ }
+ // Shift right
+ power2 = TensorUInt128<uint64_t, uint64_t>(power2.high >> 1, (power2.low >> 1) | (power2.high << 63));
+ d = TensorUInt128<uint64_t, uint64_t>(d.high >> 1, (d.low >> 1) | (d.high << 63));
+ }
+
+ return result;
+ }
+}
+
+
+} // namespace internal
+} // namespace Eigen
+
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_UINT128_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorVolumePatch.h b/unsupported/Eigen/CXX11/src/Tensor/TensorVolumePatch.h
new file mode 100644
index 000000000..0ca2cac84
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorVolumePatch.h
@@ -0,0 +1,608 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_VOLUME_PATCH_H
+#define EIGEN_CXX11_TENSOR_TENSOR_VOLUME_PATCH_H
+
+namespace Eigen {
+
+/** \class TensorVolumePatch
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Patch extraction specialized for processing of volumetric data.
+ * This assumes that the input has a least 4 dimensions ordered as follows:
+ * - channels
+ * - planes
+ * - rows
+ * - columns
+ * - (optional) additional dimensions such as time or batch size.
+ * Calling the volume patch code with patch_planes, patch_rows, and patch_cols
+ * is equivalent to calling the regular patch extraction code with parameters
+ * d, patch_planes, patch_rows, patch_cols, and 1 for all the additional
+ * dimensions.
+ */
+namespace internal {
+template<DenseIndex Planes, DenseIndex Rows, DenseIndex Cols, typename XprType>
+struct traits<TensorVolumePatchOp<Planes, Rows, Cols, XprType> > : public traits<XprType>
+{
+ typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar;
+ typedef traits<XprType> XprTraits;
+ typedef typename XprTraits::StorageKind StorageKind;
+ typedef typename XprTraits::Index Index;
+ typedef typename XprType::Nested Nested;
+ typedef typename remove_reference<Nested>::type _Nested;
+ static const int NumDimensions = XprTraits::NumDimensions + 1;
+ static const int Layout = XprTraits::Layout;
+};
+
+template<DenseIndex Planes, DenseIndex Rows, DenseIndex Cols, typename XprType>
+struct eval<TensorVolumePatchOp<Planes, Rows, Cols, XprType>, Eigen::Dense>
+{
+ typedef const TensorVolumePatchOp<Planes, Rows, Cols, XprType>& type;
+};
+
+template<DenseIndex Planes, DenseIndex Rows, DenseIndex Cols, typename XprType>
+struct nested<TensorVolumePatchOp<Planes, Rows, Cols, XprType>, 1, typename eval<TensorVolumePatchOp<Planes, Rows, Cols, XprType> >::type>
+{
+ typedef TensorVolumePatchOp<Planes, Rows, Cols, XprType> type;
+};
+
+} // end namespace internal
+
+template<DenseIndex Planes, DenseIndex Rows, DenseIndex Cols, typename XprType>
+class TensorVolumePatchOp : public TensorBase<TensorVolumePatchOp<Planes, Rows, Cols, XprType>, ReadOnlyAccessors>
+{
+ public:
+ typedef typename Eigen::internal::traits<TensorVolumePatchOp>::Scalar Scalar;
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename Eigen::internal::nested<TensorVolumePatchOp>::type Nested;
+ typedef typename Eigen::internal::traits<TensorVolumePatchOp>::StorageKind StorageKind;
+ typedef typename Eigen::internal::traits<TensorVolumePatchOp>::Index Index;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorVolumePatchOp(const XprType& expr, DenseIndex patch_planes, DenseIndex patch_rows, DenseIndex patch_cols,
+ DenseIndex plane_strides, DenseIndex row_strides, DenseIndex col_strides,
+ DenseIndex in_plane_strides, DenseIndex in_row_strides, DenseIndex in_col_strides,
+ DenseIndex plane_inflate_strides, DenseIndex row_inflate_strides, DenseIndex col_inflate_strides,
+ PaddingType padding_type, Scalar padding_value)
+ : m_xpr(expr), m_patch_planes(patch_planes), m_patch_rows(patch_rows), m_patch_cols(patch_cols),
+ m_plane_strides(plane_strides), m_row_strides(row_strides), m_col_strides(col_strides),
+ m_in_plane_strides(in_plane_strides), m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides),
+ m_plane_inflate_strides(plane_inflate_strides), m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides),
+ m_padding_explicit(false), m_padding_top_z(0), m_padding_bottom_z(0), m_padding_top(0), m_padding_bottom(0), m_padding_left(0), m_padding_right(0),
+ m_padding_type(padding_type), m_padding_value(padding_value) {}
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorVolumePatchOp(const XprType& expr, DenseIndex patch_planes, DenseIndex patch_rows, DenseIndex patch_cols,
+ DenseIndex plane_strides, DenseIndex row_strides, DenseIndex col_strides,
+ DenseIndex in_plane_strides, DenseIndex in_row_strides, DenseIndex in_col_strides,
+ DenseIndex plane_inflate_strides, DenseIndex row_inflate_strides, DenseIndex col_inflate_strides,
+ DenseIndex padding_top_z, DenseIndex padding_bottom_z,
+ DenseIndex padding_top, DenseIndex padding_bottom,
+ DenseIndex padding_left, DenseIndex padding_right,
+ Scalar padding_value)
+ : m_xpr(expr), m_patch_planes(patch_planes), m_patch_rows(patch_rows), m_patch_cols(patch_cols),
+ m_plane_strides(plane_strides), m_row_strides(row_strides), m_col_strides(col_strides),
+ m_in_plane_strides(in_plane_strides), m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides),
+ m_plane_inflate_strides(plane_inflate_strides), m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides),
+ m_padding_explicit(true), m_padding_top_z(padding_top_z), m_padding_bottom_z(padding_bottom_z), m_padding_top(padding_top), m_padding_bottom(padding_bottom),
+ m_padding_left(padding_left), m_padding_right(padding_right),
+ m_padding_type(PADDING_VALID), m_padding_value(padding_value) {}
+
+ EIGEN_DEVICE_FUNC
+ DenseIndex patch_planes() const { return m_patch_planes; }
+ EIGEN_DEVICE_FUNC
+ DenseIndex patch_rows() const { return m_patch_rows; }
+ EIGEN_DEVICE_FUNC
+ DenseIndex patch_cols() const { return m_patch_cols; }
+ EIGEN_DEVICE_FUNC
+ DenseIndex plane_strides() const { return m_plane_strides; }
+ EIGEN_DEVICE_FUNC
+ DenseIndex row_strides() const { return m_row_strides; }
+ EIGEN_DEVICE_FUNC
+ DenseIndex col_strides() const { return m_col_strides; }
+ EIGEN_DEVICE_FUNC
+ DenseIndex in_plane_strides() const { return m_in_plane_strides; }
+ EIGEN_DEVICE_FUNC
+ DenseIndex in_row_strides() const { return m_in_row_strides; }
+ EIGEN_DEVICE_FUNC
+ DenseIndex in_col_strides() const { return m_in_col_strides; }
+ EIGEN_DEVICE_FUNC
+ DenseIndex plane_inflate_strides() const { return m_plane_inflate_strides; }
+ EIGEN_DEVICE_FUNC
+ DenseIndex row_inflate_strides() const { return m_row_inflate_strides; }
+ EIGEN_DEVICE_FUNC
+ DenseIndex col_inflate_strides() const { return m_col_inflate_strides; }
+ EIGEN_DEVICE_FUNC
+ bool padding_explicit() const { return m_padding_explicit; }
+ EIGEN_DEVICE_FUNC
+ DenseIndex padding_top_z() const { return m_padding_top_z; }
+ EIGEN_DEVICE_FUNC
+ DenseIndex padding_bottom_z() const { return m_padding_bottom_z; }
+ EIGEN_DEVICE_FUNC
+ DenseIndex padding_top() const { return m_padding_top; }
+ EIGEN_DEVICE_FUNC
+ DenseIndex padding_bottom() const { return m_padding_bottom; }
+ EIGEN_DEVICE_FUNC
+ DenseIndex padding_left() const { return m_padding_left; }
+ EIGEN_DEVICE_FUNC
+ DenseIndex padding_right() const { return m_padding_right; }
+ EIGEN_DEVICE_FUNC
+ PaddingType padding_type() const { return m_padding_type; }
+ EIGEN_DEVICE_FUNC
+ Scalar padding_value() const { return m_padding_value; }
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename XprType::Nested>::type&
+ expression() const { return m_xpr; }
+
+ protected:
+ typename XprType::Nested m_xpr;
+ const DenseIndex m_patch_planes;
+ const DenseIndex m_patch_rows;
+ const DenseIndex m_patch_cols;
+ const DenseIndex m_plane_strides;
+ const DenseIndex m_row_strides;
+ const DenseIndex m_col_strides;
+ const DenseIndex m_in_plane_strides;
+ const DenseIndex m_in_row_strides;
+ const DenseIndex m_in_col_strides;
+ const DenseIndex m_plane_inflate_strides;
+ const DenseIndex m_row_inflate_strides;
+ const DenseIndex m_col_inflate_strides;
+ const bool m_padding_explicit;
+ const DenseIndex m_padding_top_z;
+ const DenseIndex m_padding_bottom_z;
+ const DenseIndex m_padding_top;
+ const DenseIndex m_padding_bottom;
+ const DenseIndex m_padding_left;
+ const DenseIndex m_padding_right;
+ const PaddingType m_padding_type;
+ const Scalar m_padding_value;
+};
+
+
+// Eval as rvalue
+template<DenseIndex Planes, DenseIndex Rows, DenseIndex Cols, typename ArgType, typename Device>
+struct TensorEvaluator<const TensorVolumePatchOp<Planes, Rows, Cols, ArgType>, Device>
+{
+ typedef TensorVolumePatchOp<Planes, Rows, Cols, ArgType> XprType;
+ typedef typename XprType::Index Index;
+ static const int NumInputDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
+ static const int NumDims = NumInputDims + 1;
+ typedef DSizes<Index, NumDims> Dimensions;
+ typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+
+ enum {
+ IsAligned = false,
+ PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ BlockAccess = false,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ CoordAccess = false,
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
+ : m_impl(op.expression(), device)
+ {
+ EIGEN_STATIC_ASSERT((NumDims >= 5), YOU_MADE_A_PROGRAMMING_MISTAKE);
+
+ m_paddingValue = op.padding_value();
+
+ const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
+
+ // Cache a few variables.
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ m_inputDepth = input_dims[0];
+ m_inputPlanes = input_dims[1];
+ m_inputRows = input_dims[2];
+ m_inputCols = input_dims[3];
+ } else {
+ m_inputDepth = input_dims[NumInputDims-1];
+ m_inputPlanes = input_dims[NumInputDims-2];
+ m_inputRows = input_dims[NumInputDims-3];
+ m_inputCols = input_dims[NumInputDims-4];
+ }
+
+ m_plane_strides = op.plane_strides();
+ m_row_strides = op.row_strides();
+ m_col_strides = op.col_strides();
+
+ // Input strides and effective input/patch size
+ m_in_plane_strides = op.in_plane_strides();
+ m_in_row_strides = op.in_row_strides();
+ m_in_col_strides = op.in_col_strides();
+ m_plane_inflate_strides = op.plane_inflate_strides();
+ m_row_inflate_strides = op.row_inflate_strides();
+ m_col_inflate_strides = op.col_inflate_strides();
+
+ // The "effective" spatial size after inflating data with zeros.
+ m_input_planes_eff = (m_inputPlanes - 1) * m_plane_inflate_strides + 1;
+ m_input_rows_eff = (m_inputRows - 1) * m_row_inflate_strides + 1;
+ m_input_cols_eff = (m_inputCols - 1) * m_col_inflate_strides + 1;
+ m_patch_planes_eff = op.patch_planes() + (op.patch_planes() - 1) * (m_in_plane_strides - 1);
+ m_patch_rows_eff = op.patch_rows() + (op.patch_rows() - 1) * (m_in_row_strides - 1);
+ m_patch_cols_eff = op.patch_cols() + (op.patch_cols() - 1) * (m_in_col_strides - 1);
+
+ if (op.padding_explicit()) {
+ m_outputPlanes = numext::ceil((m_input_planes_eff + op.padding_top_z() + op.padding_bottom_z() - m_patch_planes_eff + 1.f) / static_cast<float>(m_plane_strides));
+ m_outputRows = numext::ceil((m_input_rows_eff + op.padding_top() + op.padding_bottom() - m_patch_rows_eff + 1.f) / static_cast<float>(m_row_strides));
+ m_outputCols = numext::ceil((m_input_cols_eff + op.padding_left() + op.padding_right() - m_patch_cols_eff + 1.f) / static_cast<float>(m_col_strides));
+ m_planePaddingTop = op.padding_top_z();
+ m_rowPaddingTop = op.padding_top();
+ m_colPaddingLeft = op.padding_left();
+ } else {
+ // Computing padding from the type
+ switch (op.padding_type()) {
+ case PADDING_VALID:
+ m_outputPlanes = numext::ceil((m_input_planes_eff - m_patch_planes_eff + 1.f) / static_cast<float>(m_plane_strides));
+ m_outputRows = numext::ceil((m_input_rows_eff - m_patch_rows_eff + 1.f) / static_cast<float>(m_row_strides));
+ m_outputCols = numext::ceil((m_input_cols_eff - m_patch_cols_eff + 1.f) / static_cast<float>(m_col_strides));
+ m_planePaddingTop = 0;
+ m_rowPaddingTop = 0;
+ m_colPaddingLeft = 0;
+ break;
+ case PADDING_SAME: {
+ m_outputPlanes = numext::ceil(m_input_planes_eff / static_cast<float>(m_plane_strides));
+ m_outputRows = numext::ceil(m_input_rows_eff / static_cast<float>(m_row_strides));
+ m_outputCols = numext::ceil(m_input_cols_eff / static_cast<float>(m_col_strides));
+ const Index dz = m_outputPlanes * m_plane_strides + m_patch_planes_eff - 1 - m_input_planes_eff;
+ const Index dy = m_outputRows * m_row_strides + m_patch_rows_eff - 1 - m_input_rows_eff;
+ const Index dx = m_outputCols * m_col_strides + m_patch_cols_eff - 1 - m_input_cols_eff;
+ m_planePaddingTop = dz - dz / 2;
+ m_rowPaddingTop = dy - dy / 2;
+ m_colPaddingLeft = dx - dx / 2;
+ break;
+ }
+ default:
+ eigen_assert(false && "unexpected padding");
+ }
+ }
+ eigen_assert(m_outputRows > 0);
+ eigen_assert(m_outputCols > 0);
+ eigen_assert(m_outputPlanes > 0);
+
+ // Dimensions for result of extraction.
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ // ColMajor
+ // 0: depth
+ // 1: patch_planes
+ // 2: patch_rows
+ // 3: patch_cols
+ // 4: number of patches
+ // 5 and beyond: anything else (such as batch).
+ m_dimensions[0] = input_dims[0];
+ m_dimensions[1] = op.patch_planes();
+ m_dimensions[2] = op.patch_rows();
+ m_dimensions[3] = op.patch_cols();
+ m_dimensions[4] = m_outputPlanes * m_outputRows * m_outputCols;
+ for (int i = 5; i < NumDims; ++i) {
+ m_dimensions[i] = input_dims[i-1];
+ }
+ } else {
+ // RowMajor
+ // NumDims-1: depth
+ // NumDims-2: patch_planes
+ // NumDims-3: patch_rows
+ // NumDims-4: patch_cols
+ // NumDims-5: number of patches
+ // NumDims-6 and beyond: anything else (such as batch).
+ m_dimensions[NumDims-1] = input_dims[NumInputDims-1];
+ m_dimensions[NumDims-2] = op.patch_planes();
+ m_dimensions[NumDims-3] = op.patch_rows();
+ m_dimensions[NumDims-4] = op.patch_cols();
+ m_dimensions[NumDims-5] = m_outputPlanes * m_outputRows * m_outputCols;
+ for (int i = NumDims-6; i >= 0; --i) {
+ m_dimensions[i] = input_dims[i];
+ }
+ }
+
+ // Strides for the output tensor.
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ m_rowStride = m_dimensions[1];
+ m_colStride = m_dimensions[2] * m_rowStride;
+ m_patchStride = m_colStride * m_dimensions[3] * m_dimensions[0];
+ m_otherStride = m_patchStride * m_dimensions[4];
+ } else {
+ m_rowStride = m_dimensions[NumDims-2];
+ m_colStride = m_dimensions[NumDims-3] * m_rowStride;
+ m_patchStride = m_colStride * m_dimensions[NumDims-4] * m_dimensions[NumDims-1];
+ m_otherStride = m_patchStride * m_dimensions[NumDims-5];
+ }
+
+ // Strides for navigating through the input tensor.
+ m_planeInputStride = m_inputDepth;
+ m_rowInputStride = m_inputDepth * m_inputPlanes;
+ m_colInputStride = m_inputDepth * m_inputRows * m_inputPlanes;
+ m_otherInputStride = m_inputDepth * m_inputRows * m_inputCols * m_inputPlanes;
+
+ m_outputPlanesRows = m_outputPlanes * m_outputRows;
+
+ // Fast representations of different variables.
+ m_fastOtherStride = internal::TensorIntDivisor<Index>(m_otherStride);
+ m_fastPatchStride = internal::TensorIntDivisor<Index>(m_patchStride);
+ m_fastColStride = internal::TensorIntDivisor<Index>(m_colStride);
+ m_fastRowStride = internal::TensorIntDivisor<Index>(m_rowStride);
+ m_fastInputRowStride = internal::TensorIntDivisor<Index>(m_row_inflate_strides);
+ m_fastInputColStride = internal::TensorIntDivisor<Index>(m_col_inflate_strides);
+ m_fastInputPlaneStride = internal::TensorIntDivisor<Index>(m_plane_inflate_strides);
+ m_fastInputColsEff = internal::TensorIntDivisor<Index>(m_input_cols_eff);
+ m_fastOutputPlanes = internal::TensorIntDivisor<Index>(m_outputPlanes);
+ m_fastOutputPlanesRows = internal::TensorIntDivisor<Index>(m_outputPlanesRows);
+
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ m_fastOutputDepth = internal::TensorIntDivisor<Index>(m_dimensions[0]);
+ } else {
+ m_fastOutputDepth = internal::TensorIntDivisor<Index>(m_dimensions[NumDims-1]);
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) {
+ m_impl.evalSubExprsIfNeeded(NULL);
+ return true;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
+ m_impl.cleanup();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
+ {
+ // Patch index corresponding to the passed in index.
+ const Index patchIndex = index / m_fastPatchStride;
+
+ // Spatial offset within the patch. This has to be translated into 3D
+ // coordinates within the patch.
+ const Index patchOffset = (index - patchIndex * m_patchStride) / m_fastOutputDepth;
+
+ // Batch, etc.
+ const Index otherIndex = (NumDims == 5) ? 0 : index / m_fastOtherStride;
+ const Index patch3DIndex = (NumDims == 5) ? patchIndex : (index - otherIndex * m_otherStride) / m_fastPatchStride;
+
+ // Calculate column index in the input original tensor.
+ const Index colIndex = patch3DIndex / m_fastOutputPlanesRows;
+ const Index colOffset = patchOffset / m_fastColStride;
+ const Index inputCol = colIndex * m_col_strides + colOffset * m_in_col_strides - m_colPaddingLeft;
+ const Index origInputCol = (m_col_inflate_strides == 1) ? inputCol : ((inputCol >= 0) ? (inputCol / m_fastInputColStride) : 0);
+ if (inputCol < 0 || inputCol >= m_input_cols_eff ||
+ ((m_col_inflate_strides != 1) && (inputCol != origInputCol * m_col_inflate_strides))) {
+ return Scalar(m_paddingValue);
+ }
+
+ // Calculate row index in the original input tensor.
+ const Index rowIndex = (patch3DIndex - colIndex * m_outputPlanesRows) / m_fastOutputPlanes;
+ const Index rowOffset = (patchOffset - colOffset * m_colStride) / m_fastRowStride;
+ const Index inputRow = rowIndex * m_row_strides + rowOffset * m_in_row_strides - m_rowPaddingTop;
+ const Index origInputRow = (m_row_inflate_strides == 1) ? inputRow : ((inputRow >= 0) ? (inputRow / m_fastInputRowStride) : 0);
+ if (inputRow < 0 || inputRow >= m_input_rows_eff ||
+ ((m_row_inflate_strides != 1) && (inputRow != origInputRow * m_row_inflate_strides))) {
+ return Scalar(m_paddingValue);
+ }
+
+ // Calculate plane index in the original input tensor.
+ const Index planeIndex = (patch3DIndex - m_outputPlanes * (colIndex * m_outputRows + rowIndex));
+ const Index planeOffset = patchOffset - colOffset * m_colStride - rowOffset * m_rowStride;
+ const Index inputPlane = planeIndex * m_plane_strides + planeOffset * m_in_plane_strides - m_planePaddingTop;
+ const Index origInputPlane = (m_plane_inflate_strides == 1) ? inputPlane : ((inputPlane >= 0) ? (inputPlane / m_fastInputPlaneStride) : 0);
+ if (inputPlane < 0 || inputPlane >= m_input_planes_eff ||
+ ((m_plane_inflate_strides != 1) && (inputPlane != origInputPlane * m_plane_inflate_strides))) {
+ return Scalar(m_paddingValue);
+ }
+
+ const int depth_index = static_cast<int>(Layout) == static_cast<int>(ColMajor) ? 0 : NumDims - 1;
+ const Index depth = index - (index / m_fastOutputDepth) * m_dimensions[depth_index];
+
+ const Index inputIndex = depth +
+ origInputRow * m_rowInputStride +
+ origInputCol * m_colInputStride +
+ origInputPlane * m_planeInputStride +
+ otherIndex * m_otherInputStride;
+
+ return m_impl.coeff(inputIndex);
+ }
+
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
+ {
+ EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
+ eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
+
+ if (m_in_row_strides != 1 || m_in_col_strides != 1 || m_row_inflate_strides != 1 || m_col_inflate_strides != 1 ||
+ m_in_plane_strides != 1 || m_plane_inflate_strides != 1) {
+ return packetWithPossibleZero(index);
+ }
+
+ const Index indices[2] = {index, index + PacketSize - 1};
+ const Index patchIndex = indices[0] / m_fastPatchStride;
+ if (patchIndex != indices[1] / m_fastPatchStride) {
+ return packetWithPossibleZero(index);
+ }
+ const Index otherIndex = (NumDims == 5) ? 0 : indices[0] / m_fastOtherStride;
+ eigen_assert(otherIndex == indices[1] / m_fastOtherStride);
+
+ // Find the offset of the element wrt the location of the first element.
+ const Index patchOffsets[2] = {(indices[0] - patchIndex * m_patchStride) / m_fastOutputDepth,
+ (indices[1] - patchIndex * m_patchStride) / m_fastOutputDepth};
+
+ const Index patch3DIndex = (NumDims == 5) ? patchIndex : (indices[0] - otherIndex * m_otherStride) / m_fastPatchStride;
+ eigen_assert(patch3DIndex == (indices[1] - otherIndex * m_otherStride) / m_fastPatchStride);
+
+ const Index colIndex = patch3DIndex / m_fastOutputPlanesRows;
+ const Index colOffsets[2] = {
+ patchOffsets[0] / m_fastColStride,
+ patchOffsets[1] / m_fastColStride};
+
+ // Calculate col indices in the original input tensor.
+ const Index inputCols[2] = {
+ colIndex * m_col_strides + colOffsets[0] - m_colPaddingLeft,
+ colIndex * m_col_strides + colOffsets[1] - m_colPaddingLeft};
+ if (inputCols[1] < 0 || inputCols[0] >= m_inputCols) {
+ return internal::pset1<PacketReturnType>(Scalar(m_paddingValue));
+ }
+
+ if (inputCols[0] != inputCols[1]) {
+ return packetWithPossibleZero(index);
+ }
+
+ const Index rowIndex = (patch3DIndex - colIndex * m_outputPlanesRows) / m_fastOutputPlanes;
+ const Index rowOffsets[2] = {
+ (patchOffsets[0] - colOffsets[0] * m_colStride) / m_fastRowStride,
+ (patchOffsets[1] - colOffsets[1] * m_colStride) / m_fastRowStride};
+ eigen_assert(rowOffsets[0] <= rowOffsets[1]);
+ // Calculate col indices in the original input tensor.
+ const Index inputRows[2] = {
+ rowIndex * m_row_strides + rowOffsets[0] - m_rowPaddingTop,
+ rowIndex * m_row_strides + rowOffsets[1] - m_rowPaddingTop};
+
+ if (inputRows[1] < 0 || inputRows[0] >= m_inputRows) {
+ return internal::pset1<PacketReturnType>(Scalar(m_paddingValue));
+ }
+
+ if (inputRows[0] != inputRows[1]) {
+ return packetWithPossibleZero(index);
+ }
+
+ const Index planeIndex = (patch3DIndex - m_outputPlanes * (colIndex * m_outputRows + rowIndex));
+ const Index planeOffsets[2] = {
+ patchOffsets[0] - colOffsets[0] * m_colStride - rowOffsets[0] * m_rowStride,
+ patchOffsets[1] - colOffsets[1] * m_colStride - rowOffsets[1] * m_rowStride};
+ eigen_assert(planeOffsets[0] <= planeOffsets[1]);
+ const Index inputPlanes[2] = {
+ planeIndex * m_plane_strides + planeOffsets[0] - m_planePaddingTop,
+ planeIndex * m_plane_strides + planeOffsets[1] - m_planePaddingTop};
+
+ if (inputPlanes[1] < 0 || inputPlanes[0] >= m_inputPlanes) {
+ return internal::pset1<PacketReturnType>(Scalar(m_paddingValue));
+ }
+
+ if (inputPlanes[0] >= 0 && inputPlanes[1] < m_inputPlanes) {
+ // no padding
+ const int depth_index = static_cast<int>(Layout) == static_cast<int>(ColMajor) ? 0 : NumDims - 1;
+ const Index depth = index - (index / m_fastOutputDepth) * m_dimensions[depth_index];
+ const Index inputIndex = depth +
+ inputRows[0] * m_rowInputStride +
+ inputCols[0] * m_colInputStride +
+ m_planeInputStride * inputPlanes[0] +
+ otherIndex * m_otherInputStride;
+ return m_impl.template packet<Unaligned>(inputIndex);
+ }
+
+ return packetWithPossibleZero(index);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
+ costPerCoeff(bool vectorized) const {
+ const double compute_cost =
+ 10 * TensorOpCost::DivCost<Index>() + 21 * TensorOpCost::MulCost<Index>() +
+ 8 * TensorOpCost::AddCost<Index>();
+ return TensorOpCost(0, 0, compute_cost, vectorized, PacketSize);
+ }
+
+ EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
+
+ const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
+
+ Index planePaddingTop() const { return m_planePaddingTop; }
+ Index rowPaddingTop() const { return m_rowPaddingTop; }
+ Index colPaddingLeft() const { return m_colPaddingLeft; }
+ Index outputPlanes() const { return m_outputPlanes; }
+ Index outputRows() const { return m_outputRows; }
+ Index outputCols() const { return m_outputCols; }
+ Index userPlaneStride() const { return m_plane_strides; }
+ Index userRowStride() const { return m_row_strides; }
+ Index userColStride() const { return m_col_strides; }
+ Index userInPlaneStride() const { return m_in_plane_strides; }
+ Index userInRowStride() const { return m_in_row_strides; }
+ Index userInColStride() const { return m_in_col_strides; }
+ Index planeInflateStride() const { return m_plane_inflate_strides; }
+ Index rowInflateStride() const { return m_row_inflate_strides; }
+ Index colInflateStride() const { return m_col_inflate_strides; }
+
+ protected:
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetWithPossibleZero(Index index) const
+ {
+ EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
+ for (int i = 0; i < PacketSize; ++i) {
+ values[i] = coeff(index+i);
+ }
+ PacketReturnType rslt = internal::pload<PacketReturnType>(values);
+ return rslt;
+ }
+
+ Dimensions m_dimensions;
+
+ // Parameters passed to the costructor.
+ Index m_plane_strides;
+ Index m_row_strides;
+ Index m_col_strides;
+
+ Index m_outputPlanes;
+ Index m_outputRows;
+ Index m_outputCols;
+
+ Index m_planePaddingTop;
+ Index m_rowPaddingTop;
+ Index m_colPaddingLeft;
+
+ Index m_in_plane_strides;
+ Index m_in_row_strides;
+ Index m_in_col_strides;
+
+ Index m_plane_inflate_strides;
+ Index m_row_inflate_strides;
+ Index m_col_inflate_strides;
+
+ // Cached input size.
+ Index m_inputDepth;
+ Index m_inputPlanes;
+ Index m_inputRows;
+ Index m_inputCols;
+
+ // Other cached variables.
+ Index m_outputPlanesRows;
+
+ // Effective input/patch post-inflation size.
+ Index m_input_planes_eff;
+ Index m_input_rows_eff;
+ Index m_input_cols_eff;
+ Index m_patch_planes_eff;
+ Index m_patch_rows_eff;
+ Index m_patch_cols_eff;
+
+ // Strides for the output tensor.
+ Index m_otherStride;
+ Index m_patchStride;
+ Index m_rowStride;
+ Index m_colStride;
+
+ // Strides for the input tensor.
+ Index m_planeInputStride;
+ Index m_rowInputStride;
+ Index m_colInputStride;
+ Index m_otherInputStride;
+
+ internal::TensorIntDivisor<Index> m_fastOtherStride;
+ internal::TensorIntDivisor<Index> m_fastPatchStride;
+ internal::TensorIntDivisor<Index> m_fastColStride;
+ internal::TensorIntDivisor<Index> m_fastRowStride;
+ internal::TensorIntDivisor<Index> m_fastInputPlaneStride;
+ internal::TensorIntDivisor<Index> m_fastInputRowStride;
+ internal::TensorIntDivisor<Index> m_fastInputColStride;
+ internal::TensorIntDivisor<Index> m_fastInputColsEff;
+ internal::TensorIntDivisor<Index> m_fastOutputPlanesRows;
+ internal::TensorIntDivisor<Index> m_fastOutputPlanes;
+ internal::TensorIntDivisor<Index> m_fastOutputDepth;
+
+ Scalar m_paddingValue;
+
+ TensorEvaluator<ArgType, Device> m_impl;
+};
+
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_VOLUME_PATCH_H
diff --git a/unsupported/Eigen/CXX11/src/TensorSymmetry/DynamicSymmetry.h b/unsupported/Eigen/CXX11/src/TensorSymmetry/DynamicSymmetry.h
new file mode 100644
index 000000000..bc4f2025f
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/TensorSymmetry/DynamicSymmetry.h
@@ -0,0 +1,293 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2013 Christian Seiler <christian@iwakd.de>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSORSYMMETRY_DYNAMICSYMMETRY_H
+#define EIGEN_CXX11_TENSORSYMMETRY_DYNAMICSYMMETRY_H
+
+namespace Eigen {
+
+class DynamicSGroup
+{
+ public:
+ inline explicit DynamicSGroup() : m_numIndices(1), m_elements(), m_generators(), m_globalFlags(0) { m_elements.push_back(ge(Generator(0, 0, 0))); }
+ inline DynamicSGroup(const DynamicSGroup& o) : m_numIndices(o.m_numIndices), m_elements(o.m_elements), m_generators(o.m_generators), m_globalFlags(o.m_globalFlags) { }
+ inline DynamicSGroup(DynamicSGroup&& o) : m_numIndices(o.m_numIndices), m_elements(), m_generators(o.m_generators), m_globalFlags(o.m_globalFlags) { std::swap(m_elements, o.m_elements); }
+ inline DynamicSGroup& operator=(const DynamicSGroup& o) { m_numIndices = o.m_numIndices; m_elements = o.m_elements; m_generators = o.m_generators; m_globalFlags = o.m_globalFlags; return *this; }
+ inline DynamicSGroup& operator=(DynamicSGroup&& o) { m_numIndices = o.m_numIndices; std::swap(m_elements, o.m_elements); m_generators = o.m_generators; m_globalFlags = o.m_globalFlags; return *this; }
+
+ void add(int one, int two, int flags = 0);
+
+ template<typename Gen_>
+ inline void add(Gen_) { add(Gen_::One, Gen_::Two, Gen_::Flags); }
+ inline void addSymmetry(int one, int two) { add(one, two, 0); }
+ inline void addAntiSymmetry(int one, int two) { add(one, two, NegationFlag); }
+ inline void addHermiticity(int one, int two) { add(one, two, ConjugationFlag); }
+ inline void addAntiHermiticity(int one, int two) { add(one, two, NegationFlag | ConjugationFlag); }
+
+ template<typename Op, typename RV, typename Index, std::size_t N, typename... Args>
+ inline RV apply(const std::array<Index, N>& idx, RV initial, Args&&... args) const
+ {
+ eigen_assert(N >= m_numIndices && "Can only apply symmetry group to objects that have at least the required amount of indices.");
+ for (std::size_t i = 0; i < size(); i++)
+ initial = Op::run(h_permute(i, idx, typename internal::gen_numeric_list<int, N>::type()), m_elements[i].flags, initial, std::forward<Args>(args)...);
+ return initial;
+ }
+
+ template<typename Op, typename RV, typename Index, typename... Args>
+ inline RV apply(const std::vector<Index>& idx, RV initial, Args&&... args) const
+ {
+ eigen_assert(idx.size() >= m_numIndices && "Can only apply symmetry group to objects that have at least the required amount of indices.");
+ for (std::size_t i = 0; i < size(); i++)
+ initial = Op::run(h_permute(i, idx), m_elements[i].flags, initial, std::forward<Args>(args)...);
+ return initial;
+ }
+
+ inline int globalFlags() const { return m_globalFlags; }
+ inline std::size_t size() const { return m_elements.size(); }
+
+ template<typename Tensor_, typename... IndexTypes>
+ inline internal::tensor_symmetry_value_setter<Tensor_, DynamicSGroup> operator()(Tensor_& tensor, typename Tensor_::Index firstIndex, IndexTypes... otherIndices) const
+ {
+ static_assert(sizeof...(otherIndices) + 1 == Tensor_::NumIndices, "Number of indices used to access a tensor coefficient must be equal to the rank of the tensor.");
+ return operator()(tensor, std::array<typename Tensor_::Index, Tensor_::NumIndices>{{firstIndex, otherIndices...}});
+ }
+
+ template<typename Tensor_>
+ inline internal::tensor_symmetry_value_setter<Tensor_, DynamicSGroup> operator()(Tensor_& tensor, std::array<typename Tensor_::Index, Tensor_::NumIndices> const& indices) const
+ {
+ return internal::tensor_symmetry_value_setter<Tensor_, DynamicSGroup>(tensor, *this, indices);
+ }
+ private:
+ struct GroupElement {
+ std::vector<int> representation;
+ int flags;
+ bool isId() const
+ {
+ for (std::size_t i = 0; i < representation.size(); i++)
+ if (i != (size_t)representation[i])
+ return false;
+ return true;
+ }
+ };
+ struct Generator {
+ int one;
+ int two;
+ int flags;
+ constexpr inline Generator(int one_, int two_, int flags_) : one(one_), two(two_), flags(flags_) {}
+ };
+
+ std::size_t m_numIndices;
+ std::vector<GroupElement> m_elements;
+ std::vector<Generator> m_generators;
+ int m_globalFlags;
+
+ template<typename Index, std::size_t N, int... n>
+ inline std::array<Index, N> h_permute(std::size_t which, const std::array<Index, N>& idx, internal::numeric_list<int, n...>) const
+ {
+ return std::array<Index, N>{{ idx[n >= m_numIndices ? n : m_elements[which].representation[n]]... }};
+ }
+
+ template<typename Index>
+ inline std::vector<Index> h_permute(std::size_t which, std::vector<Index> idx) const
+ {
+ std::vector<Index> result;
+ result.reserve(idx.size());
+ for (auto k : m_elements[which].representation)
+ result.push_back(idx[k]);
+ for (std::size_t i = m_numIndices; i < idx.size(); i++)
+ result.push_back(idx[i]);
+ return result;
+ }
+
+ inline GroupElement ge(Generator const& g) const
+ {
+ GroupElement result;
+ result.representation.reserve(m_numIndices);
+ result.flags = g.flags;
+ for (std::size_t k = 0; k < m_numIndices; k++) {
+ if (k == (std::size_t)g.one)
+ result.representation.push_back(g.two);
+ else if (k == (std::size_t)g.two)
+ result.representation.push_back(g.one);
+ else
+ result.representation.push_back(int(k));
+ }
+ return result;
+ }
+
+ GroupElement mul(GroupElement, GroupElement) const;
+ inline GroupElement mul(Generator g1, GroupElement g2) const
+ {
+ return mul(ge(g1), g2);
+ }
+
+ inline GroupElement mul(GroupElement g1, Generator g2) const
+ {
+ return mul(g1, ge(g2));
+ }
+
+ inline GroupElement mul(Generator g1, Generator g2) const
+ {
+ return mul(ge(g1), ge(g2));
+ }
+
+ inline int findElement(GroupElement e) const
+ {
+ for (auto ee : m_elements) {
+ if (ee.representation == e.representation)
+ return ee.flags ^ e.flags;
+ }
+ return -1;
+ }
+
+ void updateGlobalFlags(int flagDiffOfSameGenerator);
+};
+
+// dynamic symmetry group that auto-adds the template parameters in the constructor
+template<typename... Gen>
+class DynamicSGroupFromTemplateArgs : public DynamicSGroup
+{
+ public:
+ inline DynamicSGroupFromTemplateArgs() : DynamicSGroup()
+ {
+ add_all(internal::type_list<Gen...>());
+ }
+ inline DynamicSGroupFromTemplateArgs(DynamicSGroupFromTemplateArgs const& other) : DynamicSGroup(other) { }
+ inline DynamicSGroupFromTemplateArgs(DynamicSGroupFromTemplateArgs&& other) : DynamicSGroup(other) { }
+ inline DynamicSGroupFromTemplateArgs<Gen...>& operator=(const DynamicSGroupFromTemplateArgs<Gen...>& o) { DynamicSGroup::operator=(o); return *this; }
+ inline DynamicSGroupFromTemplateArgs<Gen...>& operator=(DynamicSGroupFromTemplateArgs<Gen...>&& o) { DynamicSGroup::operator=(o); return *this; }
+
+ private:
+ template<typename Gen1, typename... GenNext>
+ inline void add_all(internal::type_list<Gen1, GenNext...>)
+ {
+ add(Gen1());
+ add_all(internal::type_list<GenNext...>());
+ }
+
+ inline void add_all(internal::type_list<>)
+ {
+ }
+};
+
+inline DynamicSGroup::GroupElement DynamicSGroup::mul(GroupElement g1, GroupElement g2) const
+{
+ eigen_internal_assert(g1.representation.size() == m_numIndices);
+ eigen_internal_assert(g2.representation.size() == m_numIndices);
+
+ GroupElement result;
+ result.representation.reserve(m_numIndices);
+ for (std::size_t i = 0; i < m_numIndices; i++) {
+ int v = g2.representation[g1.representation[i]];
+ eigen_assert(v >= 0);
+ result.representation.push_back(v);
+ }
+ result.flags = g1.flags ^ g2.flags;
+ return result;
+}
+
+inline void DynamicSGroup::add(int one, int two, int flags)
+{
+ eigen_assert(one >= 0);
+ eigen_assert(two >= 0);
+ eigen_assert(one != two);
+
+ if ((std::size_t)one >= m_numIndices || (std::size_t)two >= m_numIndices) {
+ std::size_t newNumIndices = (one > two) ? one : two + 1;
+ for (auto& gelem : m_elements) {
+ gelem.representation.reserve(newNumIndices);
+ for (std::size_t i = m_numIndices; i < newNumIndices; i++)
+ gelem.representation.push_back(i);
+ }
+ m_numIndices = newNumIndices;
+ }
+
+ Generator g{one, two, flags};
+ GroupElement e = ge(g);
+
+ /* special case for first generator */
+ if (m_elements.size() == 1) {
+ while (!e.isId()) {
+ m_elements.push_back(e);
+ e = mul(e, g);
+ }
+
+ if (e.flags > 0)
+ updateGlobalFlags(e.flags);
+
+ // only add in case we didn't have identity
+ if (m_elements.size() > 1)
+ m_generators.push_back(g);
+ return;
+ }
+
+ int p = findElement(e);
+ if (p >= 0) {
+ updateGlobalFlags(p);
+ return;
+ }
+
+ std::size_t coset_order = m_elements.size();
+ m_elements.push_back(e);
+ for (std::size_t i = 1; i < coset_order; i++)
+ m_elements.push_back(mul(m_elements[i], e));
+ m_generators.push_back(g);
+
+ std::size_t coset_rep = coset_order;
+ do {
+ for (auto g : m_generators) {
+ e = mul(m_elements[coset_rep], g);
+ p = findElement(e);
+ if (p < 0) {
+ // element not yet in group
+ m_elements.push_back(e);
+ for (std::size_t i = 1; i < coset_order; i++)
+ m_elements.push_back(mul(m_elements[i], e));
+ } else if (p > 0) {
+ updateGlobalFlags(p);
+ }
+ }
+ coset_rep += coset_order;
+ } while (coset_rep < m_elements.size());
+}
+
+inline void DynamicSGroup::updateGlobalFlags(int flagDiffOfSameGenerator)
+{
+ switch (flagDiffOfSameGenerator) {
+ case 0:
+ default:
+ // nothing happened
+ break;
+ case NegationFlag:
+ // every element is it's own negative => whole tensor is zero
+ m_globalFlags |= GlobalZeroFlag;
+ break;
+ case ConjugationFlag:
+ // every element is it's own conjugate => whole tensor is real
+ m_globalFlags |= GlobalRealFlag;
+ break;
+ case (NegationFlag | ConjugationFlag):
+ // every element is it's own negative conjugate => whole tensor is imaginary
+ m_globalFlags |= GlobalImagFlag;
+ break;
+ /* NOTE:
+ * since GlobalZeroFlag == GlobalRealFlag | GlobalImagFlag, if one generator
+ * causes the tensor to be real and the next one to be imaginary, this will
+ * trivially give the correct result
+ */
+ }
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSORSYMMETRY_DYNAMICSYMMETRY_H
+
+/*
+ * kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle;
+ */
diff --git a/unsupported/Eigen/CXX11/src/TensorSymmetry/StaticSymmetry.h b/unsupported/Eigen/CXX11/src/TensorSymmetry/StaticSymmetry.h
new file mode 100644
index 000000000..942293bd7
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/TensorSymmetry/StaticSymmetry.h
@@ -0,0 +1,236 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2013 Christian Seiler <christian@iwakd.de>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSORSYMMETRY_STATICSYMMETRY_H
+#define EIGEN_CXX11_TENSORSYMMETRY_STATICSYMMETRY_H
+
+namespace Eigen {
+
+namespace internal {
+
+template<typename list> struct tensor_static_symgroup_permutate;
+
+template<int... nn>
+struct tensor_static_symgroup_permutate<numeric_list<int, nn...>>
+{
+ constexpr static std::size_t N = sizeof...(nn);
+
+ template<typename T>
+ constexpr static inline std::array<T, N> run(const std::array<T, N>& indices)
+ {
+ return {{indices[nn]...}};
+ }
+};
+
+template<typename indices_, int flags_>
+struct tensor_static_symgroup_element
+{
+ typedef indices_ indices;
+ constexpr static int flags = flags_;
+};
+
+template<typename Gen, int N>
+struct tensor_static_symgroup_element_ctor
+{
+ typedef tensor_static_symgroup_element<
+ typename gen_numeric_list_swapped_pair<int, N, Gen::One, Gen::Two>::type,
+ Gen::Flags
+ > type;
+};
+
+template<int N>
+struct tensor_static_symgroup_identity_ctor
+{
+ typedef tensor_static_symgroup_element<
+ typename gen_numeric_list<int, N>::type,
+ 0
+ > type;
+};
+
+template<typename iib>
+struct tensor_static_symgroup_multiply_helper
+{
+ template<int... iia>
+ constexpr static inline numeric_list<int, get<iia, iib>::value...> helper(numeric_list<int, iia...>) {
+ return numeric_list<int, get<iia, iib>::value...>();
+ }
+};
+
+template<typename A, typename B>
+struct tensor_static_symgroup_multiply
+{
+ private:
+ typedef typename A::indices iia;
+ typedef typename B::indices iib;
+ constexpr static int ffa = A::flags;
+ constexpr static int ffb = B::flags;
+
+ public:
+ static_assert(iia::count == iib::count, "Cannot multiply symmetry elements with different number of indices.");
+
+ typedef tensor_static_symgroup_element<
+ decltype(tensor_static_symgroup_multiply_helper<iib>::helper(iia())),
+ ffa ^ ffb
+ > type;
+};
+
+template<typename A, typename B>
+struct tensor_static_symgroup_equality
+{
+ typedef typename A::indices iia;
+ typedef typename B::indices iib;
+ constexpr static int ffa = A::flags;
+ constexpr static int ffb = B::flags;
+ static_assert(iia::count == iib::count, "Cannot compare symmetry elements with different number of indices.");
+
+ constexpr static bool value = is_same<iia, iib>::value;
+
+ private:
+ /* this should be zero if they are identical, or else the tensor
+ * will be forced to be pure real, pure imaginary or even pure zero
+ */
+ constexpr static int flags_cmp_ = ffa ^ ffb;
+
+ /* either they are not equal, then we don't care whether the flags
+ * match, or they are equal, and then we have to check
+ */
+ constexpr static bool is_zero = value && flags_cmp_ == NegationFlag;
+ constexpr static bool is_real = value && flags_cmp_ == ConjugationFlag;
+ constexpr static bool is_imag = value && flags_cmp_ == (NegationFlag | ConjugationFlag);
+
+ public:
+ constexpr static int global_flags =
+ (is_real ? GlobalRealFlag : 0) |
+ (is_imag ? GlobalImagFlag : 0) |
+ (is_zero ? GlobalZeroFlag : 0);
+};
+
+template<std::size_t NumIndices, typename... Gen>
+struct tensor_static_symgroup
+{
+ typedef StaticSGroup<Gen...> type;
+ constexpr static std::size_t size = type::static_size;
+};
+
+template<typename Index, std::size_t N, int... ii, int... jj>
+constexpr static inline std::array<Index, N> tensor_static_symgroup_index_permute(std::array<Index, N> idx, internal::numeric_list<int, ii...>, internal::numeric_list<int, jj...>)
+{
+ return {{ idx[ii]..., idx[jj]... }};
+}
+
+template<typename Index, int... ii>
+static inline std::vector<Index> tensor_static_symgroup_index_permute(std::vector<Index> idx, internal::numeric_list<int, ii...>)
+{
+ std::vector<Index> result{{ idx[ii]... }};
+ std::size_t target_size = idx.size();
+ for (std::size_t i = result.size(); i < target_size; i++)
+ result.push_back(idx[i]);
+ return result;
+}
+
+template<typename T> struct tensor_static_symgroup_do_apply;
+
+template<typename first, typename... next>
+struct tensor_static_symgroup_do_apply<internal::type_list<first, next...>>
+{
+ template<typename Op, typename RV, std::size_t SGNumIndices, typename Index, std::size_t NumIndices, typename... Args>
+ static inline RV run(const std::array<Index, NumIndices>& idx, RV initial, Args&&... args)
+ {
+ static_assert(NumIndices >= SGNumIndices, "Can only apply symmetry group to objects that have at least the required amount of indices.");
+ typedef typename internal::gen_numeric_list<int, NumIndices - SGNumIndices, SGNumIndices>::type remaining_indices;
+ initial = Op::run(tensor_static_symgroup_index_permute(idx, typename first::indices(), remaining_indices()), first::flags, initial, std::forward<Args>(args)...);
+ return tensor_static_symgroup_do_apply<internal::type_list<next...>>::template run<Op, RV, SGNumIndices>(idx, initial, args...);
+ }
+
+ template<typename Op, typename RV, std::size_t SGNumIndices, typename Index, typename... Args>
+ static inline RV run(const std::vector<Index>& idx, RV initial, Args&&... args)
+ {
+ eigen_assert(idx.size() >= SGNumIndices && "Can only apply symmetry group to objects that have at least the required amount of indices.");
+ initial = Op::run(tensor_static_symgroup_index_permute(idx, typename first::indices()), first::flags, initial, std::forward<Args>(args)...);
+ return tensor_static_symgroup_do_apply<internal::type_list<next...>>::template run<Op, RV, SGNumIndices>(idx, initial, args...);
+ }
+};
+
+template<EIGEN_TPL_PP_SPEC_HACK_DEF(typename, empty)>
+struct tensor_static_symgroup_do_apply<internal::type_list<EIGEN_TPL_PP_SPEC_HACK_USE(empty)>>
+{
+ template<typename Op, typename RV, std::size_t SGNumIndices, typename Index, std::size_t NumIndices, typename... Args>
+ static inline RV run(const std::array<Index, NumIndices>&, RV initial, Args&&...)
+ {
+ // do nothing
+ return initial;
+ }
+
+ template<typename Op, typename RV, std::size_t SGNumIndices, typename Index, typename... Args>
+ static inline RV run(const std::vector<Index>&, RV initial, Args&&...)
+ {
+ // do nothing
+ return initial;
+ }
+};
+
+} // end namespace internal
+
+template<typename... Gen>
+class StaticSGroup
+{
+ constexpr static std::size_t NumIndices = internal::tensor_symmetry_num_indices<Gen...>::value;
+ typedef internal::group_theory::enumerate_group_elements<
+ internal::tensor_static_symgroup_multiply,
+ internal::tensor_static_symgroup_equality,
+ typename internal::tensor_static_symgroup_identity_ctor<NumIndices>::type,
+ internal::type_list<typename internal::tensor_static_symgroup_element_ctor<Gen, NumIndices>::type...>
+ > group_elements;
+ typedef typename group_elements::type ge;
+ public:
+ constexpr inline StaticSGroup() {}
+ constexpr inline StaticSGroup(const StaticSGroup<Gen...>&) {}
+ constexpr inline StaticSGroup(StaticSGroup<Gen...>&&) {}
+
+ template<typename Op, typename RV, typename Index, std::size_t N, typename... Args>
+ static inline RV apply(const std::array<Index, N>& idx, RV initial, Args&&... args)
+ {
+ return internal::tensor_static_symgroup_do_apply<ge>::template run<Op, RV, NumIndices>(idx, initial, args...);
+ }
+
+ template<typename Op, typename RV, typename Index, typename... Args>
+ static inline RV apply(const std::vector<Index>& idx, RV initial, Args&&... args)
+ {
+ eigen_assert(idx.size() == NumIndices);
+ return internal::tensor_static_symgroup_do_apply<ge>::template run<Op, RV, NumIndices>(idx, initial, args...);
+ }
+
+ constexpr static std::size_t static_size = ge::count;
+
+ constexpr static inline std::size_t size() {
+ return ge::count;
+ }
+ constexpr static inline int globalFlags() { return group_elements::global_flags; }
+
+ template<typename Tensor_, typename... IndexTypes>
+ inline internal::tensor_symmetry_value_setter<Tensor_, StaticSGroup<Gen...>> operator()(Tensor_& tensor, typename Tensor_::Index firstIndex, IndexTypes... otherIndices) const
+ {
+ static_assert(sizeof...(otherIndices) + 1 == Tensor_::NumIndices, "Number of indices used to access a tensor coefficient must be equal to the rank of the tensor.");
+ return operator()(tensor, std::array<typename Tensor_::Index, Tensor_::NumIndices>{{firstIndex, otherIndices...}});
+ }
+
+ template<typename Tensor_>
+ inline internal::tensor_symmetry_value_setter<Tensor_, StaticSGroup<Gen...>> operator()(Tensor_& tensor, std::array<typename Tensor_::Index, Tensor_::NumIndices> const& indices) const
+ {
+ return internal::tensor_symmetry_value_setter<Tensor_, StaticSGroup<Gen...>>(tensor, *this, indices);
+ }
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSORSYMMETRY_STATICSYMMETRY_H
+
+/*
+ * kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle;
+ */
diff --git a/unsupported/Eigen/CXX11/src/TensorSymmetry/Symmetry.h b/unsupported/Eigen/CXX11/src/TensorSymmetry/Symmetry.h
new file mode 100644
index 000000000..879d6cd77
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/TensorSymmetry/Symmetry.h
@@ -0,0 +1,338 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2013 Christian Seiler <christian@iwakd.de>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSORSYMMETRY_SYMMETRY_H
+#define EIGEN_CXX11_TENSORSYMMETRY_SYMMETRY_H
+
+namespace Eigen {
+
+enum {
+ NegationFlag = 0x01,
+ ConjugationFlag = 0x02
+};
+
+enum {
+ GlobalRealFlag = 0x01,
+ GlobalImagFlag = 0x02,
+ GlobalZeroFlag = 0x03
+};
+
+namespace internal {
+
+template<std::size_t NumIndices, typename... Sym> struct tensor_symmetry_pre_analysis;
+template<std::size_t NumIndices, typename... Sym> struct tensor_static_symgroup;
+template<bool instantiate, std::size_t NumIndices, typename... Sym> struct tensor_static_symgroup_if;
+template<typename Tensor_> struct tensor_symmetry_calculate_flags;
+template<typename Tensor_> struct tensor_symmetry_assign_value;
+template<typename... Sym> struct tensor_symmetry_num_indices;
+
+} // end namespace internal
+
+template<int One_, int Two_>
+struct Symmetry
+{
+ static_assert(One_ != Two_, "Symmetries must cover distinct indices.");
+ constexpr static int One = One_;
+ constexpr static int Two = Two_;
+ constexpr static int Flags = 0;
+};
+
+template<int One_, int Two_>
+struct AntiSymmetry
+{
+ static_assert(One_ != Two_, "Symmetries must cover distinct indices.");
+ constexpr static int One = One_;
+ constexpr static int Two = Two_;
+ constexpr static int Flags = NegationFlag;
+};
+
+template<int One_, int Two_>
+struct Hermiticity
+{
+ static_assert(One_ != Two_, "Symmetries must cover distinct indices.");
+ constexpr static int One = One_;
+ constexpr static int Two = Two_;
+ constexpr static int Flags = ConjugationFlag;
+};
+
+template<int One_, int Two_>
+struct AntiHermiticity
+{
+ static_assert(One_ != Two_, "Symmetries must cover distinct indices.");
+ constexpr static int One = One_;
+ constexpr static int Two = Two_;
+ constexpr static int Flags = ConjugationFlag | NegationFlag;
+};
+
+/** \class DynamicSGroup
+ * \ingroup TensorSymmetry_Module
+ *
+ * \brief Dynamic symmetry group
+ *
+ * The %DynamicSGroup class represents a symmetry group that need not be known at
+ * compile time. It is useful if one wants to support arbitrary run-time defineable
+ * symmetries for tensors, but it is also instantiated if a symmetry group is defined
+ * at compile time that would be either too large for the compiler to reasonably
+ * generate (using templates to calculate this at compile time is very inefficient)
+ * or that the compiler could generate the group but that it wouldn't make sense to
+ * unroll the loop for setting coefficients anymore.
+ */
+class DynamicSGroup;
+
+/** \internal
+ *
+ * \class DynamicSGroupFromTemplateArgs
+ * \ingroup TensorSymmetry_Module
+ *
+ * \brief Dynamic symmetry group, initialized from template arguments
+ *
+ * This class is a child class of DynamicSGroup. It uses the template arguments
+ * specified to initialize itself.
+ */
+template<typename... Gen>
+class DynamicSGroupFromTemplateArgs;
+
+/** \class StaticSGroup
+ * \ingroup TensorSymmetry_Module
+ *
+ * \brief Static symmetry group
+ *
+ * This class represents a symmetry group that is known and resolved completely
+ * at compile time. Ideally, no run-time penalty is incurred compared to the
+ * manual unrolling of the symmetry.
+ *
+ * <b><i>CAUTION:</i></b>
+ *
+ * Do not use this class directly for large symmetry groups. The compiler
+ * may run into a limit, or segfault or in the very least will take a very,
+ * very, very long time to compile the code. Use the SGroup class instead
+ * if you want a static group. That class contains logic that will
+ * automatically select the DynamicSGroup class instead if the symmetry
+ * group becomes too large. (In that case, unrolling may not even be
+ * beneficial.)
+ */
+template<typename... Gen>
+class StaticSGroup;
+
+/** \class SGroup
+ * \ingroup TensorSymmetry_Module
+ *
+ * \brief Symmetry group, initialized from template arguments
+ *
+ * This class represents a symmetry group whose generators are already
+ * known at compile time. It may or may not be resolved at compile time,
+ * depending on the estimated size of the group.
+ *
+ * \sa StaticSGroup
+ * \sa DynamicSGroup
+ */
+template<typename... Gen>
+class SGroup : public internal::tensor_symmetry_pre_analysis<internal::tensor_symmetry_num_indices<Gen...>::value, Gen...>::root_type
+{
+ public:
+ constexpr static std::size_t NumIndices = internal::tensor_symmetry_num_indices<Gen...>::value;
+ typedef typename internal::tensor_symmetry_pre_analysis<NumIndices, Gen...>::root_type Base;
+
+ // make standard constructors + assignment operators public
+ inline SGroup() : Base() { }
+ inline SGroup(const SGroup<Gen...>& other) : Base(other) { }
+ inline SGroup(SGroup<Gen...>&& other) : Base(other) { }
+ inline SGroup<Gen...>& operator=(const SGroup<Gen...>& other) { Base::operator=(other); return *this; }
+ inline SGroup<Gen...>& operator=(SGroup<Gen...>&& other) { Base::operator=(other); return *this; }
+
+ // all else is defined in the base class
+};
+
+namespace internal {
+
+template<typename... Sym> struct tensor_symmetry_num_indices
+{
+ constexpr static std::size_t value = 1;
+};
+
+template<int One_, int Two_, typename... Sym> struct tensor_symmetry_num_indices<Symmetry<One_, Two_>, Sym...>
+{
+private:
+ constexpr static std::size_t One = static_cast<std::size_t>(One_);
+ constexpr static std::size_t Two = static_cast<std::size_t>(Two_);
+ constexpr static std::size_t Three = tensor_symmetry_num_indices<Sym...>::value;
+
+ // don't use std::max, since it's not constexpr until C++14...
+ constexpr static std::size_t maxOneTwoPlusOne = ((One > Two) ? One : Two) + 1;
+public:
+ constexpr static std::size_t value = (maxOneTwoPlusOne > Three) ? maxOneTwoPlusOne : Three;
+};
+
+template<int One_, int Two_, typename... Sym> struct tensor_symmetry_num_indices<AntiSymmetry<One_, Two_>, Sym...>
+ : public tensor_symmetry_num_indices<Symmetry<One_, Two_>, Sym...> {};
+template<int One_, int Two_, typename... Sym> struct tensor_symmetry_num_indices<Hermiticity<One_, Two_>, Sym...>
+ : public tensor_symmetry_num_indices<Symmetry<One_, Two_>, Sym...> {};
+template<int One_, int Two_, typename... Sym> struct tensor_symmetry_num_indices<AntiHermiticity<One_, Two_>, Sym...>
+ : public tensor_symmetry_num_indices<Symmetry<One_, Two_>, Sym...> {};
+
+/** \internal
+ *
+ * \class tensor_symmetry_pre_analysis
+ * \ingroup TensorSymmetry_Module
+ *
+ * \brief Pre-select whether to use a static or dynamic symmetry group
+ *
+ * When a symmetry group could in principle be determined at compile time,
+ * this template implements the logic whether to actually do that or whether
+ * to rather defer that to runtime.
+ *
+ * The logic is as follows:
+ * <dl>
+ * <dt><b>No generators (trivial symmetry):</b></dt>
+ * <dd>Use a trivial static group. Ideally, this has no performance impact
+ * compared to not using symmetry at all. In practice, this might not
+ * be the case.</dd>
+ * <dt><b>More than 4 generators:</b></dt>
+ * <dd>Calculate the group at run time, it is likely far too large for the
+ * compiler to be able to properly generate it in a realistic time.</dd>
+ * <dt><b>Up to and including 4 generators:</b></dt>
+ * <dd>Actually enumerate all group elements, but then check how many there
+ * are. If there are more than 16, it is unlikely that unrolling the
+ * loop (as is done in the static compile-time case) is sensible, so
+ * use a dynamic group instead. If there are at most 16 elements, actually
+ * use that static group. Note that the largest group with 4 generators
+ * still compiles with reasonable resources.</dd>
+ * </dl>
+ *
+ * Note: Example compile time performance with g++-4.6 on an Intenl Core i5-3470
+ * with 16 GiB RAM (all generators non-redundant and the subgroups don't
+ * factorize):
+ *
+ * # Generators -O0 -ggdb -O2
+ * -------------------------------------------------------------------
+ * 1 0.5 s / 250 MiB 0.45s / 230 MiB
+ * 2 0.5 s / 260 MiB 0.5 s / 250 MiB
+ * 3 0.65s / 310 MiB 0.62s / 310 MiB
+ * 4 2.2 s / 860 MiB 1.7 s / 770 MiB
+ * 5 130 s / 13000 MiB 120 s / 11000 MiB
+ *
+ * It is clear that everything is still very efficient up to 4 generators, then
+ * the memory and CPU requirements become unreasonable. Thus we only instantiate
+ * the template group theory logic if the number of generators supplied is 4 or
+ * lower, otherwise this will be forced to be done during runtime, where the
+ * algorithm is reasonably fast.
+ */
+template<std::size_t NumIndices>
+struct tensor_symmetry_pre_analysis<NumIndices>
+{
+ typedef StaticSGroup<> root_type;
+};
+
+template<std::size_t NumIndices, typename Gen_, typename... Gens_>
+struct tensor_symmetry_pre_analysis<NumIndices, Gen_, Gens_...>
+{
+ constexpr static std::size_t max_static_generators = 4;
+ constexpr static std::size_t max_static_elements = 16;
+ typedef tensor_static_symgroup_if<(sizeof...(Gens_) + 1 <= max_static_generators), NumIndices, Gen_, Gens_...> helper;
+ constexpr static std::size_t possible_size = helper::size;
+
+ typedef typename conditional<
+ possible_size == 0 || possible_size >= max_static_elements,
+ DynamicSGroupFromTemplateArgs<Gen_, Gens_...>,
+ typename helper::type
+ >::type root_type;
+};
+
+template<bool instantiate, std::size_t NumIndices, typename... Gens>
+struct tensor_static_symgroup_if
+{
+ constexpr static std::size_t size = 0;
+ typedef void type;
+};
+
+template<std::size_t NumIndices, typename... Gens>
+struct tensor_static_symgroup_if<true, NumIndices, Gens...> : tensor_static_symgroup<NumIndices, Gens...> {};
+
+template<typename Tensor_>
+struct tensor_symmetry_assign_value
+{
+ typedef typename Tensor_::Index Index;
+ typedef typename Tensor_::Scalar Scalar;
+ constexpr static std::size_t NumIndices = Tensor_::NumIndices;
+
+ static inline int run(const std::array<Index, NumIndices>& transformed_indices, int transformation_flags, int dummy, Tensor_& tensor, const Scalar& value_)
+ {
+ Scalar value(value_);
+ if (transformation_flags & ConjugationFlag)
+ value = numext::conj(value);
+ if (transformation_flags & NegationFlag)
+ value = -value;
+ tensor.coeffRef(transformed_indices) = value;
+ return dummy;
+ }
+};
+
+template<typename Tensor_>
+struct tensor_symmetry_calculate_flags
+{
+ typedef typename Tensor_::Index Index;
+ constexpr static std::size_t NumIndices = Tensor_::NumIndices;
+
+ static inline int run(const std::array<Index, NumIndices>& transformed_indices, int transform_flags, int current_flags, const std::array<Index, NumIndices>& orig_indices)
+ {
+ if (transformed_indices == orig_indices) {
+ if (transform_flags & (ConjugationFlag | NegationFlag))
+ return current_flags | GlobalImagFlag; // anti-hermitian diagonal
+ else if (transform_flags & ConjugationFlag)
+ return current_flags | GlobalRealFlag; // hermitian diagonal
+ else if (transform_flags & NegationFlag)
+ return current_flags | GlobalZeroFlag; // anti-symmetric diagonal
+ }
+ return current_flags;
+ }
+};
+
+template<typename Tensor_, typename Symmetry_, int Flags = 0>
+class tensor_symmetry_value_setter
+{
+ public:
+ typedef typename Tensor_::Index Index;
+ typedef typename Tensor_::Scalar Scalar;
+ constexpr static std::size_t NumIndices = Tensor_::NumIndices;
+
+ inline tensor_symmetry_value_setter(Tensor_& tensor, Symmetry_ const& symmetry, std::array<Index, NumIndices> const& indices)
+ : m_tensor(tensor), m_symmetry(symmetry), m_indices(indices) { }
+
+ inline tensor_symmetry_value_setter<Tensor_, Symmetry_, Flags>& operator=(Scalar const& value)
+ {
+ doAssign(value);
+ return *this;
+ }
+ private:
+ Tensor_& m_tensor;
+ Symmetry_ m_symmetry;
+ std::array<Index, NumIndices> m_indices;
+
+ inline void doAssign(Scalar const& value)
+ {
+ #ifdef EIGEN_TENSOR_SYMMETRY_CHECK_VALUES
+ int value_flags = m_symmetry.template apply<internal::tensor_symmetry_calculate_flags<Tensor_>, int>(m_indices, m_symmetry.globalFlags(), m_indices);
+ if (value_flags & GlobalRealFlag)
+ eigen_assert(numext::imag(value) == 0);
+ if (value_flags & GlobalImagFlag)
+ eigen_assert(numext::real(value) == 0);
+ #endif
+ m_symmetry.template apply<internal::tensor_symmetry_assign_value<Tensor_>, int>(m_indices, 0, m_tensor, value);
+ }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSORSYMMETRY_SYMMETRY_H
+
+/*
+ * kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle;
+ */
diff --git a/unsupported/Eigen/CXX11/src/TensorSymmetry/util/TemplateGroupTheory.h b/unsupported/Eigen/CXX11/src/TensorSymmetry/util/TemplateGroupTheory.h
new file mode 100644
index 000000000..0fe0b7c46
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/TensorSymmetry/util/TemplateGroupTheory.h
@@ -0,0 +1,666 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2013 Christian Seiler <christian@iwakd.de>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSORSYMMETRY_TEMPLATEGROUPTHEORY_H
+#define EIGEN_CXX11_TENSORSYMMETRY_TEMPLATEGROUPTHEORY_H
+
+namespace Eigen {
+
+namespace internal {
+
+namespace group_theory {
+
+/** \internal
+ * \file CXX11/Tensor/util/TemplateGroupTheory.h
+ * This file contains C++ templates that implement group theory algorithms.
+ *
+ * The algorithms allow for a compile-time analysis of finite groups.
+ *
+ * Currently only Dimino's algorithm is implemented, which returns a list
+ * of all elements in a group given a set of (possibly redundant) generators.
+ * (One could also do that with the so-called orbital algorithm, but that
+ * is much more expensive and usually has no advantages.)
+ */
+
+/**********************************************************************
+ * "Ok kid, here is where it gets complicated."
+ * - Amelia Pond in the "Doctor Who" episode
+ * "The Big Bang"
+ *
+ * Dimino's algorithm
+ * ==================
+ *
+ * The following is Dimino's algorithm in sequential form:
+ *
+ * Input: identity element, list of generators, equality check,
+ * multiplication operation
+ * Output: list of group elements
+ *
+ * 1. add identity element
+ * 2. remove identities from list of generators
+ * 3. add all powers of first generator that aren't the
+ * identity element
+ * 4. go through all remaining generators:
+ * a. if generator is already in the list of elements
+ * -> do nothing
+ * b. otherwise
+ * i. remember current # of elements
+ * (i.e. the size of the current subgroup)
+ * ii. add all current elements (which includes
+ * the identity) each multiplied from right
+ * with the current generator to the group
+ * iii. add all remaining cosets that are generated
+ * by products of the new generator with itself
+ * and all other generators seen so far
+ *
+ * In functional form, this is implemented as a long set of recursive
+ * templates that have a complicated relationship.
+ *
+ * The main interface for Dimino's algorithm is the template
+ * enumerate_group_elements. All lists are implemented as variadic
+ * type_list<typename...> and numeric_list<typename = int, int...>
+ * templates.
+ *
+ * 'Calling' templates is usually done via typedefs.
+ *
+ * This algorithm is an extended version of the basic version. The
+ * extension consists in the fact that each group element has a set
+ * of flags associated with it. Multiplication of two group elements
+ * with each other results in a group element whose flags are the
+ * XOR of the flags of the previous elements. Each time the algorithm
+ * notices that a group element it just calculated is already in the
+ * list of current elements, the flags of both will be compared and
+ * added to the so-called 'global flags' of the group.
+ *
+ * The rationale behind this extension is that this allows not only
+ * for the description of symmetries between tensor indices, but
+ * also allows for the description of hermiticity, antisymmetry and
+ * antihermiticity. Negation and conjugation each are specific bit
+ * in the flags value and if two different ways to reach a group
+ * element lead to two different flags, this poses a constraint on
+ * the allowed values of the resulting tensor. For example, if a
+ * group element is reach both with and without the conjugation
+ * flags, it is clear that the resulting tensor has to be real.
+ *
+ * Note that this flag mechanism is quite generic and may have other
+ * uses beyond tensor properties.
+ *
+ * IMPORTANT:
+ * This algorithm assumes the group to be finite. If you try to
+ * run it with a group that's infinite, the algorithm will only
+ * terminate once you hit a compiler limit (max template depth).
+ * Also note that trying to use this implementation to create a
+ * very large group will probably either make you hit the same
+ * limit, cause the compiler to segfault or at the very least
+ * take a *really* long time (hours, days, weeks - sic!) to
+ * compile. It is not recommended to plug in more than 4
+ * generators, unless they are independent of each other.
+ */
+
+/** \internal
+ *
+ * \class strip_identities
+ * \ingroup CXX11_TensorSymmetry_Module
+ *
+ * \brief Cleanse a list of group elements of the identity element
+ *
+ * This template is used to make a first pass through all initial
+ * generators of Dimino's algorithm and remove the identity
+ * elements.
+ *
+ * \sa enumerate_group_elements
+ */
+template<template<typename, typename> class Equality, typename id, typename L> struct strip_identities;
+
+template<
+ template<typename, typename> class Equality,
+ typename id,
+ typename t,
+ typename... ts
+>
+struct strip_identities<Equality, id, type_list<t, ts...>>
+{
+ typedef typename conditional<
+ Equality<id, t>::value,
+ typename strip_identities<Equality, id, type_list<ts...>>::type,
+ typename concat<type_list<t>, typename strip_identities<Equality, id, type_list<ts...>>::type>::type
+ >::type type;
+ constexpr static int global_flags = Equality<id, t>::global_flags | strip_identities<Equality, id, type_list<ts...>>::global_flags;
+};
+
+template<
+ template<typename, typename> class Equality,
+ typename id
+ EIGEN_TPL_PP_SPEC_HACK_DEFC(typename, ts)
+>
+struct strip_identities<Equality, id, type_list<EIGEN_TPL_PP_SPEC_HACK_USE(ts)>>
+{
+ typedef type_list<> type;
+ constexpr static int global_flags = 0;
+};
+
+/** \internal
+ *
+ * \class dimino_first_step_elements_helper
+ * \ingroup CXX11_TensorSymmetry_Module
+ *
+ * \brief Recursive template that adds powers of the first generator to the list of group elements
+ *
+ * This template calls itself recursively to add powers of the first
+ * generator to the list of group elements. It stops if it reaches
+ * the identity element again.
+ *
+ * \sa enumerate_group_elements, dimino_first_step_elements
+ */
+template<
+ template<typename, typename> class Multiply,
+ template<typename, typename> class Equality,
+ typename id,
+ typename g,
+ typename current_element,
+ typename elements,
+ bool dont_add_current_element // = false
+>
+struct dimino_first_step_elements_helper :
+ public dimino_first_step_elements_helper<
+ Multiply,
+ Equality,
+ id,
+ g,
+ typename Multiply<current_element, g>::type,
+ typename concat<elements, type_list<current_element>>::type,
+ Equality<typename Multiply<current_element, g>::type, id>::value
+ > {};
+
+template<
+ template<typename, typename> class Multiply,
+ template<typename, typename> class Equality,
+ typename id,
+ typename g,
+ typename current_element,
+ typename elements
+>
+struct dimino_first_step_elements_helper<Multiply, Equality, id, g, current_element, elements, true>
+{
+ typedef elements type;
+ constexpr static int global_flags = Equality<current_element, id>::global_flags;
+};
+
+/** \internal
+ *
+ * \class dimino_first_step_elements
+ * \ingroup CXX11_TensorSymmetry_Module
+ *
+ * \brief Add all powers of the first generator to the list of group elements
+ *
+ * This template takes the first non-identity generator and generates the initial
+ * list of elements which consists of all powers of that generator. For a group
+ * with just one generated, it would be enumerated after this.
+ *
+ * \sa enumerate_group_elements
+ */
+template<
+ template<typename, typename> class Multiply,
+ template<typename, typename> class Equality,
+ typename id,
+ typename generators
+>
+struct dimino_first_step_elements
+{
+ typedef typename get<0, generators>::type first_generator;
+ typedef typename skip<1, generators>::type next_generators;
+ typedef type_list<first_generator> generators_done;
+
+ typedef dimino_first_step_elements_helper<
+ Multiply,
+ Equality,
+ id,
+ first_generator,
+ first_generator,
+ type_list<id>,
+ false
+ > helper;
+ typedef typename helper::type type;
+ constexpr static int global_flags = helper::global_flags;
+};
+
+/** \internal
+ *
+ * \class dimino_get_coset_elements
+ * \ingroup CXX11_TensorSymmetry_Module
+ *
+ * \brief Generate all elements of a specific coset
+ *
+ * This template generates all the elements of a specific coset by
+ * multiplying all elements in the given subgroup with the new
+ * coset representative. Note that the first element of the
+ * subgroup is always the identity element, so the first element of
+ * ther result of this template is going to be the coset
+ * representative itself.
+ *
+ * Note that this template accepts an additional boolean parameter
+ * that specifies whether to actually generate the coset (true) or
+ * just return an empty list (false).
+ *
+ * \sa enumerate_group_elements, dimino_add_cosets_for_rep
+ */
+template<
+ template<typename, typename> class Multiply,
+ typename sub_group_elements,
+ typename new_coset_rep,
+ bool generate_coset // = true
+>
+struct dimino_get_coset_elements
+{
+ typedef typename apply_op_from_right<Multiply, new_coset_rep, sub_group_elements>::type type;
+};
+
+template<
+ template<typename, typename> class Multiply,
+ typename sub_group_elements,
+ typename new_coset_rep
+>
+struct dimino_get_coset_elements<Multiply, sub_group_elements, new_coset_rep, false>
+{
+ typedef type_list<> type;
+};
+
+/** \internal
+ *
+ * \class dimino_add_cosets_for_rep
+ * \ingroup CXX11_TensorSymmetry_Module
+ *
+ * \brief Recursive template for adding coset spaces
+ *
+ * This template multiplies the coset representative with a generator
+ * from the list of previous generators. If the new element is not in
+ * the group already, it adds the corresponding coset. Finally it
+ * proceeds to call itself with the next generator from the list.
+ *
+ * \sa enumerate_group_elements, dimino_add_all_coset_spaces
+ */
+template<
+ template<typename, typename> class Multiply,
+ template<typename, typename> class Equality,
+ typename id,
+ typename sub_group_elements,
+ typename elements,
+ typename generators,
+ typename rep_element,
+ int sub_group_size
+>
+struct dimino_add_cosets_for_rep;
+
+template<
+ template<typename, typename> class Multiply,
+ template<typename, typename> class Equality,
+ typename id,
+ typename sub_group_elements,
+ typename elements,
+ typename g,
+ typename... gs,
+ typename rep_element,
+ int sub_group_size
+>
+struct dimino_add_cosets_for_rep<Multiply, Equality, id, sub_group_elements, elements, type_list<g, gs...>, rep_element, sub_group_size>
+{
+ typedef typename Multiply<rep_element, g>::type new_coset_rep;
+ typedef contained_in_list_gf<Equality, new_coset_rep, elements> _cil;
+ constexpr static bool add_coset = !_cil::value;
+
+ typedef typename dimino_get_coset_elements<
+ Multiply,
+ sub_group_elements,
+ new_coset_rep,
+ add_coset
+ >::type coset_elements;
+
+ typedef dimino_add_cosets_for_rep<
+ Multiply,
+ Equality,
+ id,
+ sub_group_elements,
+ typename concat<elements, coset_elements>::type,
+ type_list<gs...>,
+ rep_element,
+ sub_group_size
+ > _helper;
+
+ typedef typename _helper::type type;
+ constexpr static int global_flags = _cil::global_flags | _helper::global_flags;
+
+ /* Note that we don't have to update global flags here, since
+ * we will only add these elements if they are not part of
+ * the group already. But that only happens if the coset rep
+ * is not already in the group, so the check for the coset rep
+ * will catch this.
+ */
+};
+
+template<
+ template<typename, typename> class Multiply,
+ template<typename, typename> class Equality,
+ typename id,
+ typename sub_group_elements,
+ typename elements
+ EIGEN_TPL_PP_SPEC_HACK_DEFC(typename, empty),
+ typename rep_element,
+ int sub_group_size
+>
+struct dimino_add_cosets_for_rep<Multiply, Equality, id, sub_group_elements, elements, type_list<EIGEN_TPL_PP_SPEC_HACK_USE(empty)>, rep_element, sub_group_size>
+{
+ typedef elements type;
+ constexpr static int global_flags = 0;
+};
+
+/** \internal
+ *
+ * \class dimino_add_all_coset_spaces
+ * \ingroup CXX11_TensorSymmetry_Module
+ *
+ * \brief Recursive template for adding all coset spaces for a new generator
+ *
+ * This template tries to go through the list of generators (with
+ * the help of the dimino_add_cosets_for_rep template) as long as
+ * it still finds elements that are not part of the group and add
+ * the corresponding cosets.
+ *
+ * \sa enumerate_group_elements, dimino_add_cosets_for_rep
+ */
+template<
+ template<typename, typename> class Multiply,
+ template<typename, typename> class Equality,
+ typename id,
+ typename sub_group_elements,
+ typename elements,
+ typename generators,
+ int sub_group_size,
+ int rep_pos,
+ bool stop_condition // = false
+>
+struct dimino_add_all_coset_spaces
+{
+ typedef typename get<rep_pos, elements>::type rep_element;
+ typedef dimino_add_cosets_for_rep<
+ Multiply,
+ Equality,
+ id,
+ sub_group_elements,
+ elements,
+ generators,
+ rep_element,
+ sub_group_elements::count
+ > _ac4r;
+ typedef typename _ac4r::type new_elements;
+
+ constexpr static int new_rep_pos = rep_pos + sub_group_elements::count;
+ constexpr static bool new_stop_condition = new_rep_pos >= new_elements::count;
+
+ typedef dimino_add_all_coset_spaces<
+ Multiply,
+ Equality,
+ id,
+ sub_group_elements,
+ new_elements,
+ generators,
+ sub_group_size,
+ new_rep_pos,
+ new_stop_condition
+ > _helper;
+
+ typedef typename _helper::type type;
+ constexpr static int global_flags = _helper::global_flags | _ac4r::global_flags;
+};
+
+template<
+ template<typename, typename> class Multiply,
+ template<typename, typename> class Equality,
+ typename id,
+ typename sub_group_elements,
+ typename elements,
+ typename generators,
+ int sub_group_size,
+ int rep_pos
+>
+struct dimino_add_all_coset_spaces<Multiply, Equality, id, sub_group_elements, elements, generators, sub_group_size, rep_pos, true>
+{
+ typedef elements type;
+ constexpr static int global_flags = 0;
+};
+
+/** \internal
+ *
+ * \class dimino_add_generator
+ * \ingroup CXX11_TensorSymmetry_Module
+ *
+ * \brief Enlarge the group by adding a new generator.
+ *
+ * It accepts a boolean parameter that determines if the generator is redundant,
+ * i.e. was already seen in the group. In that case, it reduces to a no-op.
+ *
+ * \sa enumerate_group_elements, dimino_add_all_coset_spaces
+ */
+template<
+ template<typename, typename> class Multiply,
+ template<typename, typename> class Equality,
+ typename id,
+ typename elements,
+ typename generators_done,
+ typename current_generator,
+ bool redundant // = false
+>
+struct dimino_add_generator
+{
+ /* this template is only called if the generator is not redundant
+ * => all elements of the group multiplied with the new generator
+ * are going to be new elements of the most trivial coset space
+ */
+ typedef typename apply_op_from_right<Multiply, current_generator, elements>::type multiplied_elements;
+ typedef typename concat<elements, multiplied_elements>::type new_elements;
+
+ constexpr static int rep_pos = elements::count;
+
+ typedef dimino_add_all_coset_spaces<
+ Multiply,
+ Equality,
+ id,
+ elements, // elements of previous subgroup
+ new_elements,
+ typename concat<generators_done, type_list<current_generator>>::type,
+ elements::count, // size of previous subgroup
+ rep_pos,
+ false // don't stop (because rep_pos >= new_elements::count is always false at this point)
+ > _helper;
+ typedef typename _helper::type type;
+ constexpr static int global_flags = _helper::global_flags;
+};
+
+template<
+ template<typename, typename> class Multiply,
+ template<typename, typename> class Equality,
+ typename id,
+ typename elements,
+ typename generators_done,
+ typename current_generator
+>
+struct dimino_add_generator<Multiply, Equality, id, elements, generators_done, current_generator, true>
+{
+ // redundant case
+ typedef elements type;
+ constexpr static int global_flags = 0;
+};
+
+/** \internal
+ *
+ * \class dimino_add_remaining_generators
+ * \ingroup CXX11_TensorSymmetry_Module
+ *
+ * \brief Recursive template that adds all remaining generators to a group
+ *
+ * Loop through the list of generators that remain and successively
+ * add them to the group.
+ *
+ * \sa enumerate_group_elements, dimino_add_generator
+ */
+template<
+ template<typename, typename> class Multiply,
+ template<typename, typename> class Equality,
+ typename id,
+ typename generators_done,
+ typename remaining_generators,
+ typename elements
+>
+struct dimino_add_remaining_generators
+{
+ typedef typename get<0, remaining_generators>::type first_generator;
+ typedef typename skip<1, remaining_generators>::type next_generators;
+
+ typedef contained_in_list_gf<Equality, first_generator, elements> _cil;
+
+ typedef dimino_add_generator<
+ Multiply,
+ Equality,
+ id,
+ elements,
+ generators_done,
+ first_generator,
+ _cil::value
+ > _helper;
+
+ typedef typename _helper::type new_elements;
+
+ typedef dimino_add_remaining_generators<
+ Multiply,
+ Equality,
+ id,
+ typename concat<generators_done, type_list<first_generator>>::type,
+ next_generators,
+ new_elements
+ > _next_iter;
+
+ typedef typename _next_iter::type type;
+ constexpr static int global_flags =
+ _cil::global_flags |
+ _helper::global_flags |
+ _next_iter::global_flags;
+};
+
+template<
+ template<typename, typename> class Multiply,
+ template<typename, typename> class Equality,
+ typename id,
+ typename generators_done,
+ typename elements
+>
+struct dimino_add_remaining_generators<Multiply, Equality, id, generators_done, type_list<>, elements>
+{
+ typedef elements type;
+ constexpr static int global_flags = 0;
+};
+
+/** \internal
+ *
+ * \class enumerate_group_elements_noid
+ * \ingroup CXX11_TensorSymmetry_Module
+ *
+ * \brief Helper template that implements group element enumeration
+ *
+ * This is a helper template that implements the actual enumeration
+ * of group elements. This has been split so that the list of
+ * generators can be cleansed of the identity element before
+ * performing the actual operation.
+ *
+ * \sa enumerate_group_elements
+ */
+template<
+ template<typename, typename> class Multiply,
+ template<typename, typename> class Equality,
+ typename id,
+ typename generators,
+ int initial_global_flags = 0
+>
+struct enumerate_group_elements_noid
+{
+ typedef dimino_first_step_elements<Multiply, Equality, id, generators> first_step;
+ typedef typename first_step::type first_step_elements;
+
+ typedef dimino_add_remaining_generators<
+ Multiply,
+ Equality,
+ id,
+ typename first_step::generators_done,
+ typename first_step::next_generators, // remaining_generators
+ typename first_step::type // first_step elements
+ > _helper;
+
+ typedef typename _helper::type type;
+ constexpr static int global_flags =
+ initial_global_flags |
+ first_step::global_flags |
+ _helper::global_flags;
+};
+
+// in case when no generators are specified
+template<
+ template<typename, typename> class Multiply,
+ template<typename, typename> class Equality,
+ typename id,
+ int initial_global_flags
+>
+struct enumerate_group_elements_noid<Multiply, Equality, id, type_list<>, initial_global_flags>
+{
+ typedef type_list<id> type;
+ constexpr static int global_flags = initial_global_flags;
+};
+
+/** \internal
+ *
+ * \class enumerate_group_elements
+ * \ingroup CXX11_TensorSymmetry_Module
+ *
+ * \brief Enumerate all elements in a finite group
+ *
+ * This template enumerates all elements in a finite group. It accepts
+ * the following template parameters:
+ *
+ * \tparam Multiply The multiplication operation that multiplies two group elements
+ * with each other.
+ * \tparam Equality The equality check operation that checks if two group elements
+ * are equal to another.
+ * \tparam id The identity element
+ * \tparam _generators A list of (possibly redundant) generators of the group
+ */
+template<
+ template<typename, typename> class Multiply,
+ template<typename, typename> class Equality,
+ typename id,
+ typename _generators
+>
+struct enumerate_group_elements
+ : public enumerate_group_elements_noid<
+ Multiply,
+ Equality,
+ id,
+ typename strip_identities<Equality, id, _generators>::type,
+ strip_identities<Equality, id, _generators>::global_flags
+ >
+{
+};
+
+} // end namespace group_theory
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSORSYMMETRY_TEMPLATEGROUPTHEORY_H
+
+/*
+ * kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle;
+ */
diff --git a/unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h b/unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h
new file mode 100644
index 000000000..71d55552d
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h
@@ -0,0 +1,233 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016 Dmitry Vyukov <dvyukov@google.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_THREADPOOL_EVENTCOUNT_H_
+#define EIGEN_CXX11_THREADPOOL_EVENTCOUNT_H_
+
+namespace Eigen {
+
+// EventCount allows to wait for arbitrary predicates in non-blocking
+// algorithms. Think of condition variable, but wait predicate does not need to
+// be protected by a mutex. Usage:
+// Waiting thread does:
+//
+// if (predicate)
+// return act();
+// EventCount::Waiter& w = waiters[my_index];
+// ec.Prewait(&w);
+// if (predicate) {
+// ec.CancelWait(&w);
+// return act();
+// }
+// ec.CommitWait(&w);
+//
+// Notifying thread does:
+//
+// predicate = true;
+// ec.Notify(true);
+//
+// Notify is cheap if there are no waiting threads. Prewait/CommitWait are not
+// cheap, but they are executed only if the preceeding predicate check has
+// failed.
+//
+// Algorihtm outline:
+// There are two main variables: predicate (managed by user) and state_.
+// Operation closely resembles Dekker mutual algorithm:
+// https://en.wikipedia.org/wiki/Dekker%27s_algorithm
+// Waiting thread sets state_ then checks predicate, Notifying thread sets
+// predicate then checks state_. Due to seq_cst fences in between these
+// operations it is guaranteed than either waiter will see predicate change
+// and won't block, or notifying thread will see state_ change and will unblock
+// the waiter, or both. But it can't happen that both threads don't see each
+// other changes, which would lead to deadlock.
+class EventCount {
+ public:
+ class Waiter;
+
+ EventCount(MaxSizeVector<Waiter>& waiters) : waiters_(waiters) {
+ eigen_assert(waiters.size() < (1 << kWaiterBits) - 1);
+ // Initialize epoch to something close to overflow to test overflow.
+ state_ = kStackMask | (kEpochMask - kEpochInc * waiters.size() * 2);
+ }
+
+ ~EventCount() {
+ // Ensure there are no waiters.
+ eigen_assert((state_.load() & (kStackMask | kWaiterMask)) == kStackMask);
+ }
+
+ // Prewait prepares for waiting.
+ // After calling this function the thread must re-check the wait predicate
+ // and call either CancelWait or CommitWait passing the same Waiter object.
+ void Prewait(Waiter* w) {
+ w->epoch = state_.fetch_add(kWaiterInc, std::memory_order_relaxed);
+ std::atomic_thread_fence(std::memory_order_seq_cst);
+ }
+
+ // CommitWait commits waiting.
+ void CommitWait(Waiter* w) {
+ w->state = Waiter::kNotSignaled;
+ // Modification epoch of this waiter.
+ uint64_t epoch =
+ (w->epoch & kEpochMask) +
+ (((w->epoch & kWaiterMask) >> kWaiterShift) << kEpochShift);
+ uint64_t state = state_.load(std::memory_order_seq_cst);
+ for (;;) {
+ if (int64_t((state & kEpochMask) - epoch) < 0) {
+ // The preceeding waiter has not decided on its fate. Wait until it
+ // calls either CancelWait or CommitWait, or is notified.
+ EIGEN_THREAD_YIELD();
+ state = state_.load(std::memory_order_seq_cst);
+ continue;
+ }
+ // We've already been notified.
+ if (int64_t((state & kEpochMask) - epoch) > 0) return;
+ // Remove this thread from prewait counter and add it to the waiter list.
+ eigen_assert((state & kWaiterMask) != 0);
+ uint64_t newstate = state - kWaiterInc + kEpochInc;
+ newstate = (newstate & ~kStackMask) | (w - &waiters_[0]);
+ if ((state & kStackMask) == kStackMask)
+ w->next.store(nullptr, std::memory_order_relaxed);
+ else
+ w->next.store(&waiters_[state & kStackMask], std::memory_order_relaxed);
+ if (state_.compare_exchange_weak(state, newstate,
+ std::memory_order_release))
+ break;
+ }
+ Park(w);
+ }
+
+ // CancelWait cancels effects of the previous Prewait call.
+ void CancelWait(Waiter* w) {
+ uint64_t epoch =
+ (w->epoch & kEpochMask) +
+ (((w->epoch & kWaiterMask) >> kWaiterShift) << kEpochShift);
+ uint64_t state = state_.load(std::memory_order_relaxed);
+ for (;;) {
+ if (int64_t((state & kEpochMask) - epoch) < 0) {
+ // The preceeding waiter has not decided on its fate. Wait until it
+ // calls either CancelWait or CommitWait, or is notified.
+ EIGEN_THREAD_YIELD();
+ state = state_.load(std::memory_order_relaxed);
+ continue;
+ }
+ // We've already been notified.
+ if (int64_t((state & kEpochMask) - epoch) > 0) return;
+ // Remove this thread from prewait counter.
+ eigen_assert((state & kWaiterMask) != 0);
+ if (state_.compare_exchange_weak(state, state - kWaiterInc + kEpochInc,
+ std::memory_order_relaxed))
+ return;
+ }
+ }
+
+ // Notify wakes one or all waiting threads.
+ // Must be called after changing the associated wait predicate.
+ void Notify(bool all) {
+ std::atomic_thread_fence(std::memory_order_seq_cst);
+ uint64_t state = state_.load(std::memory_order_acquire);
+ for (;;) {
+ // Easy case: no waiters.
+ if ((state & kStackMask) == kStackMask && (state & kWaiterMask) == 0)
+ return;
+ uint64_t waiters = (state & kWaiterMask) >> kWaiterShift;
+ uint64_t newstate;
+ if (all) {
+ // Reset prewait counter and empty wait list.
+ newstate = (state & kEpochMask) + (kEpochInc * waiters) + kStackMask;
+ } else if (waiters) {
+ // There is a thread in pre-wait state, unblock it.
+ newstate = state + kEpochInc - kWaiterInc;
+ } else {
+ // Pop a waiter from list and unpark it.
+ Waiter* w = &waiters_[state & kStackMask];
+ Waiter* wnext = w->next.load(std::memory_order_relaxed);
+ uint64_t next = kStackMask;
+ if (wnext != nullptr) next = wnext - &waiters_[0];
+ // Note: we don't add kEpochInc here. ABA problem on the lock-free stack
+ // can't happen because a waiter is re-pushed onto the stack only after
+ // it was in the pre-wait state which inevitably leads to epoch
+ // increment.
+ newstate = (state & kEpochMask) + next;
+ }
+ if (state_.compare_exchange_weak(state, newstate,
+ std::memory_order_acquire)) {
+ if (!all && waiters) return; // unblocked pre-wait thread
+ if ((state & kStackMask) == kStackMask) return;
+ Waiter* w = &waiters_[state & kStackMask];
+ if (!all) w->next.store(nullptr, std::memory_order_relaxed);
+ Unpark(w);
+ return;
+ }
+ }
+ }
+
+ class Waiter {
+ friend class EventCount;
+ // Align to 128 byte boundary to prevent false sharing with other Waiter objects in the same vector.
+ EIGEN_ALIGN_TO_BOUNDARY(128) std::atomic<Waiter*> next;
+ std::mutex mu;
+ std::condition_variable cv;
+ uint64_t epoch;
+ unsigned state;
+ enum {
+ kNotSignaled,
+ kWaiting,
+ kSignaled,
+ };
+ };
+
+ private:
+ // State_ layout:
+ // - low kStackBits is a stack of waiters committed wait.
+ // - next kWaiterBits is count of waiters in prewait state.
+ // - next kEpochBits is modification counter.
+ static const uint64_t kStackBits = 16;
+ static const uint64_t kStackMask = (1ull << kStackBits) - 1;
+ static const uint64_t kWaiterBits = 16;
+ static const uint64_t kWaiterShift = 16;
+ static const uint64_t kWaiterMask = ((1ull << kWaiterBits) - 1)
+ << kWaiterShift;
+ static const uint64_t kWaiterInc = 1ull << kWaiterBits;
+ static const uint64_t kEpochBits = 32;
+ static const uint64_t kEpochShift = 32;
+ static const uint64_t kEpochMask = ((1ull << kEpochBits) - 1) << kEpochShift;
+ static const uint64_t kEpochInc = 1ull << kEpochShift;
+ std::atomic<uint64_t> state_;
+ MaxSizeVector<Waiter>& waiters_;
+
+ void Park(Waiter* w) {
+ std::unique_lock<std::mutex> lock(w->mu);
+ while (w->state != Waiter::kSignaled) {
+ w->state = Waiter::kWaiting;
+ w->cv.wait(lock);
+ }
+ }
+
+ void Unpark(Waiter* waiters) {
+ Waiter* next = nullptr;
+ for (Waiter* w = waiters; w; w = next) {
+ next = w->next.load(std::memory_order_relaxed);
+ unsigned state;
+ {
+ std::unique_lock<std::mutex> lock(w->mu);
+ state = w->state;
+ w->state = Waiter::kSignaled;
+ }
+ // Avoid notifying if it wasn't waiting.
+ if (state == Waiter::kWaiting) w->cv.notify_one();
+ }
+ }
+
+ EventCount(const EventCount&) = delete;
+ void operator=(const EventCount&) = delete;
+};
+
+} // namespace Eigen
+
+#endif // EIGEN_CXX11_THREADPOOL_EVENTCOUNT_H_
diff --git a/unsupported/Eigen/CXX11/src/ThreadPool/NonBlockingThreadPool.h b/unsupported/Eigen/CXX11/src/ThreadPool/NonBlockingThreadPool.h
new file mode 100644
index 000000000..354bce52a
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/ThreadPool/NonBlockingThreadPool.h
@@ -0,0 +1,274 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016 Dmitry Vyukov <dvyukov@google.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_THREADPOOL_NONBLOCKING_THREAD_POOL_H
+#define EIGEN_CXX11_THREADPOOL_NONBLOCKING_THREAD_POOL_H
+
+
+namespace Eigen {
+
+template <typename Environment>
+class NonBlockingThreadPoolTempl : public Eigen::ThreadPoolInterface {
+ public:
+ typedef typename Environment::Task Task;
+ typedef RunQueue<Task, 1024> Queue;
+
+ NonBlockingThreadPoolTempl(int num_threads, Environment env = Environment())
+ : env_(env),
+ threads_(num_threads),
+ queues_(num_threads),
+ coprimes_(num_threads),
+ waiters_(num_threads),
+ blocked_(0),
+ spinning_(0),
+ done_(false),
+ ec_(waiters_) {
+ waiters_.resize(num_threads);
+
+ // Calculate coprimes of num_threads.
+ // Coprimes are used for a random walk over all threads in Steal
+ // and NonEmptyQueueIndex. Iteration is based on the fact that if we take
+ // a walk starting thread index t and calculate num_threads - 1 subsequent
+ // indices as (t + coprime) % num_threads, we will cover all threads without
+ // repetitions (effectively getting a presudo-random permutation of thread
+ // indices).
+ for (int i = 1; i <= num_threads; i++) {
+ unsigned a = i;
+ unsigned b = num_threads;
+ // If GCD(a, b) == 1, then a and b are coprimes.
+ while (b != 0) {
+ unsigned tmp = a;
+ a = b;
+ b = tmp % b;
+ }
+ if (a == 1) {
+ coprimes_.push_back(i);
+ }
+ }
+ for (int i = 0; i < num_threads; i++) {
+ queues_.push_back(new Queue());
+ }
+ for (int i = 0; i < num_threads; i++) {
+ threads_.push_back(env_.CreateThread([this, i]() { WorkerLoop(i); }));
+ }
+ }
+
+ ~NonBlockingThreadPoolTempl() {
+ done_ = true;
+ // Now if all threads block without work, they will start exiting.
+ // But note that threads can continue to work arbitrary long,
+ // block, submit new work, unblock and otherwise live full life.
+ ec_.Notify(true);
+
+ // Join threads explicitly to avoid destruction order issues.
+ for (size_t i = 0; i < threads_.size(); i++) delete threads_[i];
+ for (size_t i = 0; i < threads_.size(); i++) delete queues_[i];
+ }
+
+ void Schedule(std::function<void()> fn) {
+ Task t = env_.CreateTask(std::move(fn));
+ PerThread* pt = GetPerThread();
+ if (pt->pool == this) {
+ // Worker thread of this pool, push onto the thread's queue.
+ Queue* q = queues_[pt->thread_id];
+ t = q->PushFront(std::move(t));
+ } else {
+ // A free-standing thread (or worker of another pool), push onto a random
+ // queue.
+ Queue* q = queues_[Rand(&pt->rand) % queues_.size()];
+ t = q->PushBack(std::move(t));
+ }
+ // Note: below we touch this after making w available to worker threads.
+ // Strictly speaking, this can lead to a racy-use-after-free. Consider that
+ // Schedule is called from a thread that is neither main thread nor a worker
+ // thread of this pool. Then, execution of w directly or indirectly
+ // completes overall computations, which in turn leads to destruction of
+ // this. We expect that such scenario is prevented by program, that is,
+ // this is kept alive while any threads can potentially be in Schedule.
+ if (!t.f)
+ ec_.Notify(false);
+ else
+ env_.ExecuteTask(t); // Push failed, execute directly.
+ }
+
+ int NumThreads() const final {
+ return static_cast<int>(threads_.size());
+ }
+
+ int CurrentThreadId() const final {
+ const PerThread* pt =
+ const_cast<NonBlockingThreadPoolTempl*>(this)->GetPerThread();
+ if (pt->pool == this) {
+ return pt->thread_id;
+ } else {
+ return -1;
+ }
+ }
+
+ private:
+ typedef typename Environment::EnvThread Thread;
+
+ struct PerThread {
+ constexpr PerThread() : pool(NULL), rand(0), thread_id(-1) { }
+ NonBlockingThreadPoolTempl* pool; // Parent pool, or null for normal threads.
+ uint64_t rand; // Random generator state.
+ int thread_id; // Worker thread index in pool.
+ };
+
+ Environment env_;
+ MaxSizeVector<Thread*> threads_;
+ MaxSizeVector<Queue*> queues_;
+ MaxSizeVector<unsigned> coprimes_;
+ MaxSizeVector<EventCount::Waiter> waiters_;
+ std::atomic<unsigned> blocked_;
+ std::atomic<bool> spinning_;
+ std::atomic<bool> done_;
+ EventCount ec_;
+
+ // Main worker thread loop.
+ void WorkerLoop(int thread_id) {
+ PerThread* pt = GetPerThread();
+ pt->pool = this;
+ pt->rand = std::hash<std::thread::id>()(std::this_thread::get_id());
+ pt->thread_id = thread_id;
+ Queue* q = queues_[thread_id];
+ EventCount::Waiter* waiter = &waiters_[thread_id];
+ for (;;) {
+ Task t = q->PopFront();
+ if (!t.f) {
+ t = Steal();
+ if (!t.f) {
+ // Leave one thread spinning. This reduces latency.
+ // TODO(dvyukov): 1000 iterations is based on fair dice roll, tune it.
+ // Also, the time it takes to attempt to steal work 1000 times depends
+ // on the size of the thread pool. However the speed at which the user
+ // of the thread pool submit tasks is independent of the size of the
+ // pool. Consider a time based limit instead.
+ if (!spinning_ && !spinning_.exchange(true)) {
+ for (int i = 0; i < 1000 && !t.f; i++) {
+ t = Steal();
+ }
+ spinning_ = false;
+ }
+ if (!t.f) {
+ if (!WaitForWork(waiter, &t)) {
+ return;
+ }
+ }
+ }
+ }
+ if (t.f) {
+ env_.ExecuteTask(t);
+ }
+ }
+ }
+
+ // Steal tries to steal work from other worker threads in best-effort manner.
+ Task Steal() {
+ PerThread* pt = GetPerThread();
+ const size_t size = queues_.size();
+ unsigned r = Rand(&pt->rand);
+ unsigned inc = coprimes_[r % coprimes_.size()];
+ unsigned victim = r % size;
+ for (unsigned i = 0; i < size; i++) {
+ Task t = queues_[victim]->PopBack();
+ if (t.f) {
+ return t;
+ }
+ victim += inc;
+ if (victim >= size) {
+ victim -= size;
+ }
+ }
+ return Task();
+ }
+
+ // WaitForWork blocks until new work is available (returns true), or if it is
+ // time to exit (returns false). Can optionally return a task to execute in t
+ // (in such case t.f != nullptr on return).
+ bool WaitForWork(EventCount::Waiter* waiter, Task* t) {
+ eigen_assert(!t->f);
+ // We already did best-effort emptiness check in Steal, so prepare for
+ // blocking.
+ ec_.Prewait(waiter);
+ // Now do a reliable emptiness check.
+ int victim = NonEmptyQueueIndex();
+ if (victim != -1) {
+ ec_.CancelWait(waiter);
+ *t = queues_[victim]->PopBack();
+ return true;
+ }
+ // Number of blocked threads is used as termination condition.
+ // If we are shutting down and all worker threads blocked without work,
+ // that's we are done.
+ blocked_++;
+ if (done_ && blocked_ == threads_.size()) {
+ ec_.CancelWait(waiter);
+ // Almost done, but need to re-check queues.
+ // Consider that all queues are empty and all worker threads are preempted
+ // right after incrementing blocked_ above. Now a free-standing thread
+ // submits work and calls destructor (which sets done_). If we don't
+ // re-check queues, we will exit leaving the work unexecuted.
+ if (NonEmptyQueueIndex() != -1) {
+ // Note: we must not pop from queues before we decrement blocked_,
+ // otherwise the following scenario is possible. Consider that instead
+ // of checking for emptiness we popped the only element from queues.
+ // Now other worker threads can start exiting, which is bad if the
+ // work item submits other work. So we just check emptiness here,
+ // which ensures that all worker threads exit at the same time.
+ blocked_--;
+ return true;
+ }
+ // Reached stable termination state.
+ ec_.Notify(true);
+ return false;
+ }
+ ec_.CommitWait(waiter);
+ blocked_--;
+ return true;
+ }
+
+ int NonEmptyQueueIndex() {
+ PerThread* pt = GetPerThread();
+ const size_t size = queues_.size();
+ unsigned r = Rand(&pt->rand);
+ unsigned inc = coprimes_[r % coprimes_.size()];
+ unsigned victim = r % size;
+ for (unsigned i = 0; i < size; i++) {
+ if (!queues_[victim]->Empty()) {
+ return victim;
+ }
+ victim += inc;
+ if (victim >= size) {
+ victim -= size;
+ }
+ }
+ return -1;
+ }
+
+ static EIGEN_STRONG_INLINE PerThread* GetPerThread() {
+ EIGEN_THREAD_LOCAL PerThread per_thread_;
+ PerThread* pt = &per_thread_;
+ return pt;
+ }
+
+ static EIGEN_STRONG_INLINE unsigned Rand(uint64_t* state) {
+ uint64_t current = *state;
+ // Update the internal state
+ *state = current * 6364136223846793005ULL + 0xda3e39cb94b95bdbULL;
+ // Generate the random output (using the PCG-XSH-RS scheme)
+ return static_cast<unsigned>((current ^ (current >> 22)) >> (22 + (current >> 61)));
+ }
+};
+
+typedef NonBlockingThreadPoolTempl<StlThreadEnvironment> NonBlockingThreadPool;
+
+} // namespace Eigen
+
+#endif // EIGEN_CXX11_THREADPOOL_NONBLOCKING_THREAD_POOL_H
diff --git a/unsupported/Eigen/CXX11/src/ThreadPool/RunQueue.h b/unsupported/Eigen/CXX11/src/ThreadPool/RunQueue.h
new file mode 100644
index 000000000..05ed76cbe
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/ThreadPool/RunQueue.h
@@ -0,0 +1,210 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016 Dmitry Vyukov <dvyukov@google.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_THREADPOOL_RUNQUEUE_H_
+#define EIGEN_CXX11_THREADPOOL_RUNQUEUE_H_
+
+
+namespace Eigen {
+
+// RunQueue is a fixed-size, partially non-blocking deque or Work items.
+// Operations on front of the queue must be done by a single thread (owner),
+// operations on back of the queue can be done by multiple threads concurrently.
+//
+// Algorithm outline:
+// All remote threads operating on the queue back are serialized by a mutex.
+// This ensures that at most two threads access state: owner and one remote
+// thread (Size aside). The algorithm ensures that the occupied region of the
+// underlying array is logically continuous (can wraparound, but no stray
+// occupied elements). Owner operates on one end of this region, remote thread
+// operates on the other end. Synchronization between these threads
+// (potential consumption of the last element and take up of the last empty
+// element) happens by means of state variable in each element. States are:
+// empty, busy (in process of insertion of removal) and ready. Threads claim
+// elements (empty->busy and ready->busy transitions) by means of a CAS
+// operation. The finishing transition (busy->empty and busy->ready) are done
+// with plain store as the element is exclusively owned by the current thread.
+//
+// Note: we could permit only pointers as elements, then we would not need
+// separate state variable as null/non-null pointer value would serve as state,
+// but that would require malloc/free per operation for large, complex values
+// (and this is designed to store std::function<()>).
+template <typename Work, unsigned kSize>
+class RunQueue {
+ public:
+ RunQueue() : front_(0), back_(0) {
+ // require power-of-two for fast masking
+ eigen_assert((kSize & (kSize - 1)) == 0);
+ eigen_assert(kSize > 2); // why would you do this?
+ eigen_assert(kSize <= (64 << 10)); // leave enough space for counter
+ for (unsigned i = 0; i < kSize; i++)
+ array_[i].state.store(kEmpty, std::memory_order_relaxed);
+ }
+
+ ~RunQueue() { eigen_assert(Size() == 0); }
+
+ // PushFront inserts w at the beginning of the queue.
+ // If queue is full returns w, otherwise returns default-constructed Work.
+ Work PushFront(Work w) {
+ unsigned front = front_.load(std::memory_order_relaxed);
+ Elem* e = &array_[front & kMask];
+ uint8_t s = e->state.load(std::memory_order_relaxed);
+ if (s != kEmpty ||
+ !e->state.compare_exchange_strong(s, kBusy, std::memory_order_acquire))
+ return w;
+ front_.store(front + 1 + (kSize << 1), std::memory_order_relaxed);
+ e->w = std::move(w);
+ e->state.store(kReady, std::memory_order_release);
+ return Work();
+ }
+
+ // PopFront removes and returns the first element in the queue.
+ // If the queue was empty returns default-constructed Work.
+ Work PopFront() {
+ unsigned front = front_.load(std::memory_order_relaxed);
+ Elem* e = &array_[(front - 1) & kMask];
+ uint8_t s = e->state.load(std::memory_order_relaxed);
+ if (s != kReady ||
+ !e->state.compare_exchange_strong(s, kBusy, std::memory_order_acquire))
+ return Work();
+ Work w = std::move(e->w);
+ e->state.store(kEmpty, std::memory_order_release);
+ front = ((front - 1) & kMask2) | (front & ~kMask2);
+ front_.store(front, std::memory_order_relaxed);
+ return w;
+ }
+
+ // PushBack adds w at the end of the queue.
+ // If queue is full returns w, otherwise returns default-constructed Work.
+ Work PushBack(Work w) {
+ std::unique_lock<std::mutex> lock(mutex_);
+ unsigned back = back_.load(std::memory_order_relaxed);
+ Elem* e = &array_[(back - 1) & kMask];
+ uint8_t s = e->state.load(std::memory_order_relaxed);
+ if (s != kEmpty ||
+ !e->state.compare_exchange_strong(s, kBusy, std::memory_order_acquire))
+ return w;
+ back = ((back - 1) & kMask2) | (back & ~kMask2);
+ back_.store(back, std::memory_order_relaxed);
+ e->w = std::move(w);
+ e->state.store(kReady, std::memory_order_release);
+ return Work();
+ }
+
+ // PopBack removes and returns the last elements in the queue.
+ // Can fail spuriously.
+ Work PopBack() {
+ if (Empty()) return Work();
+ std::unique_lock<std::mutex> lock(mutex_, std::try_to_lock);
+ if (!lock) return Work();
+ unsigned back = back_.load(std::memory_order_relaxed);
+ Elem* e = &array_[back & kMask];
+ uint8_t s = e->state.load(std::memory_order_relaxed);
+ if (s != kReady ||
+ !e->state.compare_exchange_strong(s, kBusy, std::memory_order_acquire))
+ return Work();
+ Work w = std::move(e->w);
+ e->state.store(kEmpty, std::memory_order_release);
+ back_.store(back + 1 + (kSize << 1), std::memory_order_relaxed);
+ return w;
+ }
+
+ // PopBackHalf removes and returns half last elements in the queue.
+ // Returns number of elements removed. But can also fail spuriously.
+ unsigned PopBackHalf(std::vector<Work>* result) {
+ if (Empty()) return 0;
+ std::unique_lock<std::mutex> lock(mutex_, std::try_to_lock);
+ if (!lock) return 0;
+ unsigned back = back_.load(std::memory_order_relaxed);
+ unsigned size = Size();
+ unsigned mid = back;
+ if (size > 1) mid = back + (size - 1) / 2;
+ unsigned n = 0;
+ unsigned start = 0;
+ for (; static_cast<int>(mid - back) >= 0; mid--) {
+ Elem* e = &array_[mid & kMask];
+ uint8_t s = e->state.load(std::memory_order_relaxed);
+ if (n == 0) {
+ if (s != kReady ||
+ !e->state.compare_exchange_strong(s, kBusy,
+ std::memory_order_acquire))
+ continue;
+ start = mid;
+ } else {
+ // Note: no need to store temporal kBusy, we exclusively own these
+ // elements.
+ eigen_assert(s == kReady);
+ }
+ result->push_back(std::move(e->w));
+ e->state.store(kEmpty, std::memory_order_release);
+ n++;
+ }
+ if (n != 0)
+ back_.store(start + 1 + (kSize << 1), std::memory_order_relaxed);
+ return n;
+ }
+
+ // Size returns current queue size.
+ // Can be called by any thread at any time.
+ unsigned Size() const {
+ // Emptiness plays critical role in thread pool blocking. So we go to great
+ // effort to not produce false positives (claim non-empty queue as empty).
+ for (;;) {
+ // Capture a consistent snapshot of front/tail.
+ unsigned front = front_.load(std::memory_order_acquire);
+ unsigned back = back_.load(std::memory_order_acquire);
+ unsigned front1 = front_.load(std::memory_order_relaxed);
+ if (front != front1) continue;
+ int size = (front & kMask2) - (back & kMask2);
+ // Fix overflow.
+ if (size < 0) size += 2 * kSize;
+ // Order of modification in push/pop is crafted to make the queue look
+ // larger than it is during concurrent modifications. E.g. pop can
+ // decrement size before the corresponding push has incremented it.
+ // So the computed size can be up to kSize + 1, fix it.
+ if (size > static_cast<int>(kSize)) size = kSize;
+ return size;
+ }
+ }
+
+ // Empty tests whether container is empty.
+ // Can be called by any thread at any time.
+ bool Empty() const { return Size() == 0; }
+
+ private:
+ static const unsigned kMask = kSize - 1;
+ static const unsigned kMask2 = (kSize << 1) - 1;
+ struct Elem {
+ std::atomic<uint8_t> state;
+ Work w;
+ };
+ enum {
+ kEmpty,
+ kBusy,
+ kReady,
+ };
+ std::mutex mutex_;
+ // Low log(kSize) + 1 bits in front_ and back_ contain rolling index of
+ // front/back, repsectively. The remaining bits contain modification counters
+ // that are incremented on Push operations. This allows us to (1) distinguish
+ // between empty and full conditions (if we would use log(kSize) bits for
+ // position, these conditions would be indistinguishable); (2) obtain
+ // consistent snapshot of front_/back_ for Size operation using the
+ // modification counters.
+ std::atomic<unsigned> front_;
+ std::atomic<unsigned> back_;
+ Elem array_[kSize];
+
+ RunQueue(const RunQueue&) = delete;
+ void operator=(const RunQueue&) = delete;
+};
+
+} // namespace Eigen
+
+#endif // EIGEN_CXX11_THREADPOOL_RUNQUEUE_H_
diff --git a/unsupported/Eigen/CXX11/src/ThreadPool/SimpleThreadPool.h b/unsupported/Eigen/CXX11/src/ThreadPool/SimpleThreadPool.h
new file mode 100644
index 000000000..e75d0f467
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/ThreadPool/SimpleThreadPool.h
@@ -0,0 +1,154 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_THREADPOOL_SIMPLE_THREAD_POOL_H
+#define EIGEN_CXX11_THREADPOOL_SIMPLE_THREAD_POOL_H
+
+namespace Eigen {
+
+// The implementation of the ThreadPool type ensures that the Schedule method
+// runs the functions it is provided in FIFO order when the scheduling is done
+// by a single thread.
+// Environment provides a way to create threads and also allows to intercept
+// task submission and execution.
+template <typename Environment>
+class SimpleThreadPoolTempl : public ThreadPoolInterface {
+ public:
+ // Construct a pool that contains "num_threads" threads.
+ explicit SimpleThreadPoolTempl(int num_threads, Environment env = Environment())
+ : env_(env), threads_(num_threads), waiters_(num_threads) {
+ for (int i = 0; i < num_threads; i++) {
+ threads_.push_back(env.CreateThread([this, i]() { WorkerLoop(i); }));
+ }
+ }
+
+ // Wait until all scheduled work has finished and then destroy the
+ // set of threads.
+ ~SimpleThreadPoolTempl() {
+ {
+ // Wait for all work to get done.
+ std::unique_lock<std::mutex> l(mu_);
+ while (!pending_.empty()) {
+ empty_.wait(l);
+ }
+ exiting_ = true;
+
+ // Wakeup all waiters.
+ for (auto w : waiters_) {
+ w->ready = true;
+ w->task.f = nullptr;
+ w->cv.notify_one();
+ }
+ }
+
+ // Wait for threads to finish.
+ for (auto t : threads_) {
+ delete t;
+ }
+ }
+
+ // Schedule fn() for execution in the pool of threads. The functions are
+ // executed in the order in which they are scheduled.
+ void Schedule(std::function<void()> fn) final {
+ Task t = env_.CreateTask(std::move(fn));
+ std::unique_lock<std::mutex> l(mu_);
+ if (waiters_.empty()) {
+ pending_.push_back(std::move(t));
+ } else {
+ Waiter* w = waiters_.back();
+ waiters_.pop_back();
+ w->ready = true;
+ w->task = std::move(t);
+ w->cv.notify_one();
+ }
+ }
+
+ int NumThreads() const final {
+ return static_cast<int>(threads_.size());
+ }
+
+ int CurrentThreadId() const final {
+ const PerThread* pt = this->GetPerThread();
+ if (pt->pool == this) {
+ return pt->thread_id;
+ } else {
+ return -1;
+ }
+ }
+
+ protected:
+ void WorkerLoop(int thread_id) {
+ std::unique_lock<std::mutex> l(mu_);
+ PerThread* pt = GetPerThread();
+ pt->pool = this;
+ pt->thread_id = thread_id;
+ Waiter w;
+ Task t;
+ while (!exiting_) {
+ if (pending_.empty()) {
+ // Wait for work to be assigned to me
+ w.ready = false;
+ waiters_.push_back(&w);
+ while (!w.ready) {
+ w.cv.wait(l);
+ }
+ t = w.task;
+ w.task.f = nullptr;
+ } else {
+ // Pick up pending work
+ t = std::move(pending_.front());
+ pending_.pop_front();
+ if (pending_.empty()) {
+ empty_.notify_all();
+ }
+ }
+ if (t.f) {
+ mu_.unlock();
+ env_.ExecuteTask(t);
+ t.f = nullptr;
+ mu_.lock();
+ }
+ }
+ }
+
+ private:
+ typedef typename Environment::Task Task;
+ typedef typename Environment::EnvThread Thread;
+
+ struct Waiter {
+ std::condition_variable cv;
+ Task task;
+ bool ready;
+ };
+
+ struct PerThread {
+ constexpr PerThread() : pool(NULL), thread_id(-1) { }
+ SimpleThreadPoolTempl* pool; // Parent pool, or null for normal threads.
+ int thread_id; // Worker thread index in pool.
+ };
+
+ Environment env_;
+ std::mutex mu_;
+ MaxSizeVector<Thread*> threads_; // All threads
+ MaxSizeVector<Waiter*> waiters_; // Stack of waiting threads.
+ std::deque<Task> pending_; // Queue of pending work
+ std::condition_variable empty_; // Signaled on pending_.empty()
+ bool exiting_ = false;
+
+ PerThread* GetPerThread() const {
+ EIGEN_THREAD_LOCAL PerThread per_thread;
+ return &per_thread;
+ }
+};
+
+typedef SimpleThreadPoolTempl<StlThreadEnvironment> SimpleThreadPool;
+
+} // namespace Eigen
+
+#endif // EIGEN_CXX11_THREADPOOL_SIMPLE_THREAD_POOL_H
diff --git a/unsupported/Eigen/CXX11/src/ThreadPool/ThreadEnvironment.h b/unsupported/Eigen/CXX11/src/ThreadPool/ThreadEnvironment.h
new file mode 100644
index 000000000..399f95cc1
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/ThreadPool/ThreadEnvironment.h
@@ -0,0 +1,38 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_THREADPOOL_THREAD_ENVIRONMENT_H
+#define EIGEN_CXX11_THREADPOOL_THREAD_ENVIRONMENT_H
+
+namespace Eigen {
+
+struct StlThreadEnvironment {
+ struct Task {
+ std::function<void()> f;
+ };
+
+ // EnvThread constructor must start the thread,
+ // destructor must join the thread.
+ class EnvThread {
+ public:
+ EnvThread(std::function<void()> f) : thr_(std::move(f)) {}
+ ~EnvThread() { thr_.join(); }
+
+ private:
+ std::thread thr_;
+ };
+
+ EnvThread* CreateThread(std::function<void()> f) { return new EnvThread(std::move(f)); }
+ Task CreateTask(std::function<void()> f) { return Task{std::move(f)}; }
+ void ExecuteTask(const Task& t) { t.f(); }
+};
+
+} // namespace Eigen
+
+#endif // EIGEN_CXX11_THREADPOOL_THREAD_ENVIRONMENT_H
diff --git a/unsupported/Eigen/CXX11/src/ThreadPool/ThreadLocal.h b/unsupported/Eigen/CXX11/src/ThreadPool/ThreadLocal.h
new file mode 100644
index 000000000..cfa221732
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/ThreadPool/ThreadLocal.h
@@ -0,0 +1,22 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_THREADPOOL_THREAD_LOCAL_H
+#define EIGEN_CXX11_THREADPOOL_THREAD_LOCAL_H
+
+// Try to come up with a portable implementation of thread local variables
+#if EIGEN_COMP_GNUC && EIGEN_GNUC_AT_MOST(4, 7)
+#define EIGEN_THREAD_LOCAL static __thread
+#elif EIGEN_COMP_CLANG
+#define EIGEN_THREAD_LOCAL static __thread
+#else
+#define EIGEN_THREAD_LOCAL static thread_local
+#endif
+
+#endif // EIGEN_CXX11_THREADPOOL_THREAD_LOCAL_H
diff --git a/unsupported/Eigen/CXX11/src/ThreadPool/ThreadPoolInterface.h b/unsupported/Eigen/CXX11/src/ThreadPool/ThreadPoolInterface.h
new file mode 100644
index 000000000..a65ee97c9
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/ThreadPool/ThreadPoolInterface.h
@@ -0,0 +1,33 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_THREADPOOL_THREAD_POOL_INTERFACE_H
+#define EIGEN_CXX11_THREADPOOL_THREAD_POOL_INTERFACE_H
+
+namespace Eigen {
+
+// This defines an interface that ThreadPoolDevice can take to use
+// custom thread pools underneath.
+class ThreadPoolInterface {
+ public:
+ virtual void Schedule(std::function<void()> fn) = 0;
+
+ // Returns the number of threads in the pool.
+ virtual int NumThreads() const = 0;
+
+ // Returns a logical thread index between 0 and NumThreads() - 1 if called
+ // from one of the threads in the pool. Returns -1 otherwise.
+ virtual int CurrentThreadId() const = 0;
+
+ virtual ~ThreadPoolInterface() {}
+};
+
+} // namespace Eigen
+
+#endif // EIGEN_CXX11_THREADPOOL_THREAD_POOL_INTERFACE_H
diff --git a/unsupported/Eigen/CXX11/src/ThreadPool/ThreadYield.h b/unsupported/Eigen/CXX11/src/ThreadPool/ThreadYield.h
new file mode 100644
index 000000000..a859c7ba3
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/ThreadPool/ThreadYield.h
@@ -0,0 +1,20 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_THREADPOOL_THREAD_YIELD_H
+#define EIGEN_CXX11_THREADPOOL_THREAD_YIELD_H
+
+// Try to come up with a portable way to yield
+#if EIGEN_COMP_GNUC && EIGEN_GNUC_AT_MOST(4, 7)
+#define EIGEN_THREAD_YIELD() sched_yield()
+#else
+#define EIGEN_THREAD_YIELD() std::this_thread::yield()
+#endif
+
+#endif // EIGEN_CXX11_THREADPOOL_THREAD_YIELD_H
diff --git a/unsupported/Eigen/CXX11/src/util/CXX11Meta.h b/unsupported/Eigen/CXX11/src/util/CXX11Meta.h
new file mode 100644
index 000000000..ec27eddb8
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/util/CXX11Meta.h
@@ -0,0 +1,542 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2013 Christian Seiler <christian@iwakd.de>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11META_H
+#define EIGEN_CXX11META_H
+
+#include <vector>
+#include "EmulateArray.h"
+
+// Emulate the cxx11 functionality that we need if the compiler doesn't support it.
+// Visual studio 2015 doesn't advertise itself as cxx11 compliant, although it
+// supports enough of the standard for our needs
+#if __cplusplus > 199711L || EIGEN_COMP_MSVC >= 1900
+
+#include "CXX11Workarounds.h"
+
+namespace Eigen {
+
+namespace internal {
+
+/** \internal
+ * \file CXX11/util/CXX11Meta.h
+ * This file contains generic metaprogramming classes which are not specifically related to Eigen.
+ * This file expands upon Core/util/Meta.h and adds support for C++11 specific features.
+ */
+
+template<typename... tt>
+struct type_list { constexpr static int count = sizeof...(tt); };
+
+template<typename t, typename... tt>
+struct type_list<t, tt...> { constexpr static int count = sizeof...(tt) + 1; typedef t first_type; };
+
+template<typename T, T... nn>
+struct numeric_list { constexpr static std::size_t count = sizeof...(nn); };
+
+template<typename T, T n, T... nn>
+struct numeric_list<T, n, nn...> { constexpr static std::size_t count = sizeof...(nn) + 1; constexpr static T first_value = n; };
+
+/* numeric list constructors
+ *
+ * equivalencies:
+ * constructor result
+ * typename gen_numeric_list<int, 5>::type numeric_list<int, 0,1,2,3,4>
+ * typename gen_numeric_list_reversed<int, 5>::type numeric_list<int, 4,3,2,1,0>
+ * typename gen_numeric_list_swapped_pair<int, 5,1,2>::type numeric_list<int, 0,2,1,3,4>
+ * typename gen_numeric_list_repeated<int, 0, 5>::type numeric_list<int, 0,0,0,0,0>
+ */
+
+template<typename T, std::size_t n, T start = 0, T... ii> struct gen_numeric_list : gen_numeric_list<T, n-1, start, start + n-1, ii...> {};
+template<typename T, T start, T... ii> struct gen_numeric_list<T, 0, start, ii...> { typedef numeric_list<T, ii...> type; };
+
+template<typename T, std::size_t n, T start = 0, T... ii> struct gen_numeric_list_reversed : gen_numeric_list_reversed<T, n-1, start, ii..., start + n-1> {};
+template<typename T, T start, T... ii> struct gen_numeric_list_reversed<T, 0, start, ii...> { typedef numeric_list<T, ii...> type; };
+
+template<typename T, std::size_t n, T a, T b, T start = 0, T... ii> struct gen_numeric_list_swapped_pair : gen_numeric_list_swapped_pair<T, n-1, a, b, start, (start + n-1) == a ? b : ((start + n-1) == b ? a : (start + n-1)), ii...> {};
+template<typename T, T a, T b, T start, T... ii> struct gen_numeric_list_swapped_pair<T, 0, a, b, start, ii...> { typedef numeric_list<T, ii...> type; };
+
+template<typename T, std::size_t n, T V, T... nn> struct gen_numeric_list_repeated : gen_numeric_list_repeated<T, n-1, V, V, nn...> {};
+template<typename T, T V, T... nn> struct gen_numeric_list_repeated<T, 0, V, nn...> { typedef numeric_list<T, nn...> type; };
+
+/* list manipulation: concatenate */
+
+template<class a, class b> struct concat;
+
+template<typename... as, typename... bs> struct concat<type_list<as...>, type_list<bs...>> { typedef type_list<as..., bs...> type; };
+template<typename T, T... as, T... bs> struct concat<numeric_list<T, as...>, numeric_list<T, bs...> > { typedef numeric_list<T, as..., bs...> type; };
+
+template<typename... p> struct mconcat;
+template<typename a> struct mconcat<a> { typedef a type; };
+template<typename a, typename b> struct mconcat<a, b> : concat<a, b> {};
+template<typename a, typename b, typename... cs> struct mconcat<a, b, cs...> : concat<a, typename mconcat<b, cs...>::type> {};
+
+/* list manipulation: extract slices */
+
+template<int n, typename x> struct take;
+template<int n, typename a, typename... as> struct take<n, type_list<a, as...>> : concat<type_list<a>, typename take<n-1, type_list<as...>>::type> {};
+template<int n> struct take<n, type_list<>> { typedef type_list<> type; };
+template<typename a, typename... as> struct take<0, type_list<a, as...>> { typedef type_list<> type; };
+template<> struct take<0, type_list<>> { typedef type_list<> type; };
+
+template<typename T, int n, T a, T... as> struct take<n, numeric_list<T, a, as...>> : concat<numeric_list<T, a>, typename take<n-1, numeric_list<T, as...>>::type> {};
+template<typename T, int n> struct take<n, numeric_list<T>> { typedef numeric_list<T> type; };
+template<typename T, T a, T... as> struct take<0, numeric_list<T, a, as...>> { typedef numeric_list<T> type; };
+template<typename T> struct take<0, numeric_list<T>> { typedef numeric_list<T> type; };
+
+template<typename T, int n, T... ii> struct h_skip_helper_numeric;
+template<typename T, int n, T i, T... ii> struct h_skip_helper_numeric<T, n, i, ii...> : h_skip_helper_numeric<T, n-1, ii...> {};
+template<typename T, T i, T... ii> struct h_skip_helper_numeric<T, 0, i, ii...> { typedef numeric_list<T, i, ii...> type; };
+template<typename T, int n> struct h_skip_helper_numeric<T, n> { typedef numeric_list<T> type; };
+template<typename T> struct h_skip_helper_numeric<T, 0> { typedef numeric_list<T> type; };
+
+template<int n, typename... tt> struct h_skip_helper_type;
+template<int n, typename t, typename... tt> struct h_skip_helper_type<n, t, tt...> : h_skip_helper_type<n-1, tt...> {};
+template<typename t, typename... tt> struct h_skip_helper_type<0, t, tt...> { typedef type_list<t, tt...> type; };
+template<int n> struct h_skip_helper_type<n> { typedef type_list<> type; };
+template<> struct h_skip_helper_type<0> { typedef type_list<> type; };
+
+template<int n>
+struct h_skip {
+ template<typename T, T... ii>
+ constexpr static inline typename h_skip_helper_numeric<T, n, ii...>::type helper(numeric_list<T, ii...>) { return typename h_skip_helper_numeric<T, n, ii...>::type(); }
+ template<typename... tt>
+ constexpr static inline typename h_skip_helper_type<n, tt...>::type helper(type_list<tt...>) { return typename h_skip_helper_type<n, tt...>::type(); }
+};
+
+template<int n, typename a> struct skip { typedef decltype(h_skip<n>::helper(a())) type; };
+
+template<int start, int count, typename a> struct slice : take<count, typename skip<start, a>::type> {};
+
+/* list manipulation: retrieve single element from list */
+
+template<int n, typename x> struct get;
+
+template<int n, typename a, typename... as> struct get<n, type_list<a, as...>> : get<n-1, type_list<as...>> {};
+template<typename a, typename... as> struct get<0, type_list<a, as...>> { typedef a type; };
+
+template<typename T, int n, T a, T... as> struct get<n, numeric_list<T, a, as...>> : get<n-1, numeric_list<T, as...>> {};
+template<typename T, T a, T... as> struct get<0, numeric_list<T, a, as...>> { constexpr static T value = a; };
+
+/* always get type, regardless of dummy; good for parameter pack expansion */
+
+template<typename T, T dummy, typename t> struct id_numeric { typedef t type; };
+template<typename dummy, typename t> struct id_type { typedef t type; };
+
+/* equality checking, flagged version */
+
+template<typename a, typename b> struct is_same_gf : is_same<a, b> { constexpr static int global_flags = 0; };
+
+/* apply_op to list */
+
+template<
+ bool from_left, // false
+ template<typename, typename> class op,
+ typename additional_param,
+ typename... values
+>
+struct h_apply_op_helper { typedef type_list<typename op<values, additional_param>::type...> type; };
+template<
+ template<typename, typename> class op,
+ typename additional_param,
+ typename... values
+>
+struct h_apply_op_helper<true, op, additional_param, values...> { typedef type_list<typename op<additional_param, values>::type...> type; };
+
+template<
+ bool from_left,
+ template<typename, typename> class op,
+ typename additional_param
+>
+struct h_apply_op
+{
+ template<typename... values>
+ constexpr static typename h_apply_op_helper<from_left, op, additional_param, values...>::type helper(type_list<values...>)
+ { return typename h_apply_op_helper<from_left, op, additional_param, values...>::type(); }
+};
+
+template<
+ template<typename, typename> class op,
+ typename additional_param,
+ typename a
+>
+struct apply_op_from_left { typedef decltype(h_apply_op<true, op, additional_param>::helper(a())) type; };
+
+template<
+ template<typename, typename> class op,
+ typename additional_param,
+ typename a
+>
+struct apply_op_from_right { typedef decltype(h_apply_op<false, op, additional_param>::helper(a())) type; };
+
+/* see if an element is in a list */
+
+template<
+ template<typename, typename> class test,
+ typename check_against,
+ typename h_list,
+ bool last_check_positive = false
+>
+struct contained_in_list;
+
+template<
+ template<typename, typename> class test,
+ typename check_against,
+ typename h_list
+>
+struct contained_in_list<test, check_against, h_list, true>
+{
+ constexpr static bool value = true;
+};
+
+template<
+ template<typename, typename> class test,
+ typename check_against,
+ typename a,
+ typename... as
+>
+struct contained_in_list<test, check_against, type_list<a, as...>, false> : contained_in_list<test, check_against, type_list<as...>, test<check_against, a>::value> {};
+
+template<
+ template<typename, typename> class test,
+ typename check_against
+ EIGEN_TPL_PP_SPEC_HACK_DEFC(typename, empty)
+>
+struct contained_in_list<test, check_against, type_list<EIGEN_TPL_PP_SPEC_HACK_USE(empty)>, false> { constexpr static bool value = false; };
+
+/* see if an element is in a list and check for global flags */
+
+template<
+ template<typename, typename> class test,
+ typename check_against,
+ typename h_list,
+ int default_flags = 0,
+ bool last_check_positive = false,
+ int last_check_flags = default_flags
+>
+struct contained_in_list_gf;
+
+template<
+ template<typename, typename> class test,
+ typename check_against,
+ typename h_list,
+ int default_flags,
+ int last_check_flags
+>
+struct contained_in_list_gf<test, check_against, h_list, default_flags, true, last_check_flags>
+{
+ constexpr static bool value = true;
+ constexpr static int global_flags = last_check_flags;
+};
+
+template<
+ template<typename, typename> class test,
+ typename check_against,
+ typename a,
+ typename... as,
+ int default_flags,
+ int last_check_flags
+>
+struct contained_in_list_gf<test, check_against, type_list<a, as...>, default_flags, false, last_check_flags> : contained_in_list_gf<test, check_against, type_list<as...>, default_flags, test<check_against, a>::value, test<check_against, a>::global_flags> {};
+
+template<
+ template<typename, typename> class test,
+ typename check_against
+ EIGEN_TPL_PP_SPEC_HACK_DEFC(typename, empty),
+ int default_flags,
+ int last_check_flags
+>
+struct contained_in_list_gf<test, check_against, type_list<EIGEN_TPL_PP_SPEC_HACK_USE(empty)>, default_flags, false, last_check_flags> { constexpr static bool value = false; constexpr static int global_flags = default_flags; };
+
+/* generic reductions */
+
+template<
+ typename Reducer,
+ typename... Ts
+> struct reduce;
+
+template<
+ typename Reducer
+> struct reduce<Reducer>
+{
+ constexpr static inline int run() { return Reducer::Identity; }
+};
+
+template<
+ typename Reducer,
+ typename A
+> struct reduce<Reducer, A>
+{
+ constexpr static inline A run(A a) { return a; }
+};
+
+template<
+ typename Reducer,
+ typename A,
+ typename... Ts
+> struct reduce<Reducer, A, Ts...>
+{
+ constexpr static inline auto run(A a, Ts... ts) -> decltype(Reducer::run(a, reduce<Reducer, Ts...>::run(ts...))) {
+ return Reducer::run(a, reduce<Reducer, Ts...>::run(ts...));
+ }
+};
+
+/* generic binary operations */
+
+struct sum_op {
+ template<typename A, typename B> EIGEN_DEVICE_FUNC constexpr static inline auto run(A a, B b) -> decltype(a + b) { return a + b; }
+ static constexpr int Identity = 0;
+};
+struct product_op {
+ template<typename A, typename B> EIGEN_DEVICE_FUNC constexpr static inline auto run(A a, B b) -> decltype(a * b) { return a * b; }
+ static constexpr int Identity = 1;
+};
+
+struct logical_and_op { template<typename A, typename B> constexpr static inline auto run(A a, B b) -> decltype(a && b) { return a && b; } };
+struct logical_or_op { template<typename A, typename B> constexpr static inline auto run(A a, B b) -> decltype(a || b) { return a || b; } };
+
+struct equal_op { template<typename A, typename B> constexpr static inline auto run(A a, B b) -> decltype(a == b) { return a == b; } };
+struct not_equal_op { template<typename A, typename B> constexpr static inline auto run(A a, B b) -> decltype(a != b) { return a != b; } };
+struct lesser_op { template<typename A, typename B> constexpr static inline auto run(A a, B b) -> decltype(a < b) { return a < b; } };
+struct lesser_equal_op { template<typename A, typename B> constexpr static inline auto run(A a, B b) -> decltype(a <= b) { return a <= b; } };
+struct greater_op { template<typename A, typename B> constexpr static inline auto run(A a, B b) -> decltype(a > b) { return a > b; } };
+struct greater_equal_op { template<typename A, typename B> constexpr static inline auto run(A a, B b) -> decltype(a >= b) { return a >= b; } };
+
+/* generic unary operations */
+
+struct not_op { template<typename A> constexpr static inline auto run(A a) -> decltype(!a) { return !a; } };
+struct negation_op { template<typename A> constexpr static inline auto run(A a) -> decltype(-a) { return -a; } };
+struct greater_equal_zero_op { template<typename A> constexpr static inline auto run(A a) -> decltype(a >= 0) { return a >= 0; } };
+
+
+/* reductions for lists */
+
+// using auto -> return value spec makes ICC 13.0 and 13.1 crash here, so we have to hack it
+// together in front... (13.0 doesn't work with array_prod/array_reduce/... anyway, but 13.1
+// does...
+template<typename... Ts>
+constexpr inline decltype(reduce<product_op, Ts...>::run((*((Ts*)0))...)) arg_prod(Ts... ts)
+{
+ return reduce<product_op, Ts...>::run(ts...);
+}
+
+template<typename... Ts>
+constexpr inline decltype(reduce<sum_op, Ts...>::run((*((Ts*)0))...)) arg_sum(Ts... ts)
+{
+ return reduce<sum_op, Ts...>::run(ts...);
+}
+
+/* reverse arrays */
+
+template<typename Array, int... n>
+constexpr inline Array h_array_reverse(Array arr, numeric_list<int, n...>)
+{
+ return {{array_get<sizeof...(n) - n - 1>(arr)...}};
+}
+
+template<typename T, std::size_t N>
+constexpr inline array<T, N> array_reverse(array<T, N> arr)
+{
+ return h_array_reverse(arr, typename gen_numeric_list<int, N>::type());
+}
+
+
+/* generic array reductions */
+
+// can't reuse standard reduce() interface above because Intel's Compiler
+// *really* doesn't like it, so we just reimplement the stuff
+// (start from N - 1 and work down to 0 because specialization for
+// n == N - 1 also doesn't work in Intel's compiler, so it goes into
+// an infinite loop)
+template<typename Reducer, typename T, std::size_t N, std::size_t n = N - 1>
+struct h_array_reduce {
+ EIGEN_DEVICE_FUNC constexpr static inline auto run(array<T, N> arr, T identity) -> decltype(Reducer::run(h_array_reduce<Reducer, T, N, n - 1>::run(arr, identity), array_get<n>(arr)))
+ {
+ return Reducer::run(h_array_reduce<Reducer, T, N, n - 1>::run(arr, identity), array_get<n>(arr));
+ }
+};
+
+template<typename Reducer, typename T, std::size_t N>
+struct h_array_reduce<Reducer, T, N, 0>
+{
+ EIGEN_DEVICE_FUNC constexpr static inline T run(const array<T, N>& arr, T)
+ {
+ return array_get<0>(arr);
+ }
+};
+
+template<typename Reducer, typename T>
+struct h_array_reduce<Reducer, T, 0>
+{
+ EIGEN_DEVICE_FUNC constexpr static inline T run(const array<T, 0>&, T identity)
+ {
+ return identity;
+ }
+};
+
+template<typename Reducer, typename T, std::size_t N>
+EIGEN_DEVICE_FUNC constexpr inline auto array_reduce(const array<T, N>& arr, T identity) -> decltype(h_array_reduce<Reducer, T, N>::run(arr, identity))
+{
+ return h_array_reduce<Reducer, T, N>::run(arr, identity);
+}
+
+/* standard array reductions */
+
+template<typename T, std::size_t N>
+EIGEN_DEVICE_FUNC constexpr inline auto array_sum(const array<T, N>& arr) -> decltype(array_reduce<sum_op, T, N>(arr, static_cast<T>(0)))
+{
+ return array_reduce<sum_op, T, N>(arr, static_cast<T>(0));
+}
+
+template<typename T, std::size_t N>
+EIGEN_DEVICE_FUNC constexpr inline auto array_prod(const array<T, N>& arr) -> decltype(array_reduce<product_op, T, N>(arr, static_cast<T>(1)))
+{
+ return array_reduce<product_op, T, N>(arr, static_cast<T>(1));
+}
+
+template<typename t>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE t array_prod(const std::vector<t>& a) {
+ eigen_assert(a.size() > 0);
+ t prod = 1;
+ for (size_t i = 0; i < a.size(); ++i) { prod *= a[i]; }
+ return prod;
+}
+
+/* zip an array */
+
+template<typename Op, typename A, typename B, std::size_t N, int... n>
+constexpr inline array<decltype(Op::run(A(), B())),N> h_array_zip(array<A, N> a, array<B, N> b, numeric_list<int, n...>)
+{
+ return array<decltype(Op::run(A(), B())),N>{{ Op::run(array_get<n>(a), array_get<n>(b))... }};
+}
+
+template<typename Op, typename A, typename B, std::size_t N>
+constexpr inline array<decltype(Op::run(A(), B())),N> array_zip(array<A, N> a, array<B, N> b)
+{
+ return h_array_zip<Op>(a, b, typename gen_numeric_list<int, N>::type());
+}
+
+/* zip an array and reduce the result */
+
+template<typename Reducer, typename Op, typename A, typename B, std::size_t N, int... n>
+constexpr inline auto h_array_zip_and_reduce(array<A, N> a, array<B, N> b, numeric_list<int, n...>) -> decltype(reduce<Reducer, typename id_numeric<int,n,decltype(Op::run(A(), B()))>::type...>::run(Op::run(array_get<n>(a), array_get<n>(b))...))
+{
+ return reduce<Reducer, typename id_numeric<int,n,decltype(Op::run(A(), B()))>::type...>::run(Op::run(array_get<n>(a), array_get<n>(b))...);
+}
+
+template<typename Reducer, typename Op, typename A, typename B, std::size_t N>
+constexpr inline auto array_zip_and_reduce(array<A, N> a, array<B, N> b) -> decltype(h_array_zip_and_reduce<Reducer, Op, A, B, N>(a, b, typename gen_numeric_list<int, N>::type()))
+{
+ return h_array_zip_and_reduce<Reducer, Op, A, B, N>(a, b, typename gen_numeric_list<int, N>::type());
+}
+
+/* apply stuff to an array */
+
+template<typename Op, typename A, std::size_t N, int... n>
+constexpr inline array<decltype(Op::run(A())),N> h_array_apply(array<A, N> a, numeric_list<int, n...>)
+{
+ return array<decltype(Op::run(A())),N>{{ Op::run(array_get<n>(a))... }};
+}
+
+template<typename Op, typename A, std::size_t N>
+constexpr inline array<decltype(Op::run(A())),N> array_apply(array<A, N> a)
+{
+ return h_array_apply<Op>(a, typename gen_numeric_list<int, N>::type());
+}
+
+/* apply stuff to an array and reduce */
+
+template<typename Reducer, typename Op, typename A, std::size_t N, int... n>
+constexpr inline auto h_array_apply_and_reduce(array<A, N> arr, numeric_list<int, n...>) -> decltype(reduce<Reducer, typename id_numeric<int,n,decltype(Op::run(A()))>::type...>::run(Op::run(array_get<n>(arr))...))
+{
+ return reduce<Reducer, typename id_numeric<int,n,decltype(Op::run(A()))>::type...>::run(Op::run(array_get<n>(arr))...);
+}
+
+template<typename Reducer, typename Op, typename A, std::size_t N>
+constexpr inline auto array_apply_and_reduce(array<A, N> a) -> decltype(h_array_apply_and_reduce<Reducer, Op, A, N>(a, typename gen_numeric_list<int, N>::type()))
+{
+ return h_array_apply_and_reduce<Reducer, Op, A, N>(a, typename gen_numeric_list<int, N>::type());
+}
+
+/* repeat a value n times (and make an array out of it
+ * usage:
+ * array<int, 16> = repeat<16>(42);
+ */
+
+template<int n>
+struct h_repeat
+{
+ template<typename t, int... ii>
+ constexpr static inline array<t, n> run(t v, numeric_list<int, ii...>)
+ {
+ return {{ typename id_numeric<int, ii, t>::type(v)... }};
+ }
+};
+
+template<int n, typename t>
+constexpr array<t, n> repeat(t v) { return h_repeat<n>::run(v, typename gen_numeric_list<int, n>::type()); }
+
+/* instantiate a class by a C-style array */
+template<class InstType, typename ArrType, std::size_t N, bool Reverse, typename... Ps>
+struct h_instantiate_by_c_array;
+
+template<class InstType, typename ArrType, std::size_t N, typename... Ps>
+struct h_instantiate_by_c_array<InstType, ArrType, N, false, Ps...>
+{
+ static InstType run(ArrType* arr, Ps... args)
+ {
+ return h_instantiate_by_c_array<InstType, ArrType, N - 1, false, Ps..., ArrType>::run(arr + 1, args..., arr[0]);
+ }
+};
+
+template<class InstType, typename ArrType, std::size_t N, typename... Ps>
+struct h_instantiate_by_c_array<InstType, ArrType, N, true, Ps...>
+{
+ static InstType run(ArrType* arr, Ps... args)
+ {
+ return h_instantiate_by_c_array<InstType, ArrType, N - 1, false, ArrType, Ps...>::run(arr + 1, arr[0], args...);
+ }
+};
+
+template<class InstType, typename ArrType, typename... Ps>
+struct h_instantiate_by_c_array<InstType, ArrType, 0, false, Ps...>
+{
+ static InstType run(ArrType* arr, Ps... args)
+ {
+ (void)arr;
+ return InstType(args...);
+ }
+};
+
+template<class InstType, typename ArrType, typename... Ps>
+struct h_instantiate_by_c_array<InstType, ArrType, 0, true, Ps...>
+{
+ static InstType run(ArrType* arr, Ps... args)
+ {
+ (void)arr;
+ return InstType(args...);
+ }
+};
+
+template<class InstType, typename ArrType, std::size_t N, bool Reverse = false>
+InstType instantiate_by_c_array(ArrType* arr)
+{
+ return h_instantiate_by_c_array<InstType, ArrType, N, Reverse>::run(arr);
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#else // Non C++11, fallback to emulation mode
+
+#include "EmulateCXX11Meta.h"
+
+#endif
+
+#endif // EIGEN_CXX11META_H
diff --git a/unsupported/Eigen/CXX11/src/util/CXX11Workarounds.h b/unsupported/Eigen/CXX11/src/util/CXX11Workarounds.h
new file mode 100644
index 000000000..fe4d22803
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/util/CXX11Workarounds.h
@@ -0,0 +1,88 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2013 Christian Seiler <christian@iwakd.de>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11WORKAROUNDS_H
+#define EIGEN_CXX11WORKAROUNDS_H
+
+/* COMPATIBILITY CHECKS
+ * (so users of compilers that are too old get some realistic error messages)
+ */
+#if defined(__INTEL_COMPILER) && (__INTEL_COMPILER < 1310)
+#error Intel Compiler only supports required C++ features since version 13.1.
+// note that most stuff in principle works with 13.0 but when combining
+// some features, at some point 13.0 will just fail with an internal assertion
+#elif defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 6))
+// G++ < 4.6 by default will continue processing the source files - even if we use #error to make
+// it error out. For this reason, we use the pragma to make sure G++ aborts at the first error
+// it sees. Unfortunately, that is still not our #error directive, but at least the output is
+// short enough the user has a chance to see that the compiler version is not sufficient for
+// the funky template mojo we use.
+#pragma GCC diagnostic error "-Wfatal-errors"
+#error GNU C++ Compiler (g++) only supports required C++ features since version 4.6.
+#endif
+
+/* Check that the compiler at least claims to support C++11. It might not be sufficient
+ * because the compiler may not implement it correctly, but at least we'll know.
+ * On the other hand, visual studio still doesn't claim to support C++11 although it's
+ * compliant enugh for our purpose.
+ */
+#if (__cplusplus <= 199711L) && (EIGEN_COMP_MSVC < 1900)
+#if defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER)
+#pragma GCC diagnostic error "-Wfatal-errors"
+#endif
+#error This library needs at least a C++11 compliant compiler. If you use g++/clang, please enable the -std=c++11 compiler flag. (-std=c++0x on older versions.)
+#endif
+
+namespace Eigen {
+
+namespace internal {
+
+/* std::get is only constexpr in C++14, not yet in C++11
+ */
+
+
+template<std::size_t I, class T> constexpr inline T& array_get(std::vector<T>& a) { return a[I]; }
+template<std::size_t I, class T> constexpr inline T&& array_get(std::vector<T>&& a) { return a[I]; }
+template<std::size_t I, class T> constexpr inline T const& array_get(std::vector<T> const& a) { return a[I]; }
+
+/* Suppose you have a template of the form
+ * template<typename T> struct X;
+ * And you want to specialize it in such a way:
+ * template<typename S1, typename... SN> struct X<Foo<S1, SN...>> { ::: };
+ * template<> struct X<Foo<>> { ::: };
+ * This will work in Intel's compiler 13.0, but only to some extent in g++ 4.6, since
+ * g++ can only match templates called with parameter packs if the number of template
+ * arguments is not a fixed size (so inside the first specialization, referencing
+ * X<Foo<Sn...>> will fail in g++). On the other hand, g++ will accept the following:
+ * template<typename S...> struct X<Foo<S...>> { ::: }:
+ * as an additional (!) specialization, which will then only match the empty case.
+ * But Intel's compiler 13.0 won't accept that, it will only accept the empty syntax,
+ * so we have to create a workaround for this.
+ */
+#if defined(__GNUC__) && !defined(__INTEL_COMPILER)
+#define EIGEN_TPL_PP_SPEC_HACK_DEF(mt, n) mt... n
+#define EIGEN_TPL_PP_SPEC_HACK_DEFC(mt, n) , EIGEN_TPL_PP_SPEC_HACK_DEF(mt, n)
+#define EIGEN_TPL_PP_SPEC_HACK_USE(n) n...
+#define EIGEN_TPL_PP_SPEC_HACK_USEC(n) , n...
+#else
+#define EIGEN_TPL_PP_SPEC_HACK_DEF(mt, n)
+#define EIGEN_TPL_PP_SPEC_HACK_DEFC(mt, n)
+#define EIGEN_TPL_PP_SPEC_HACK_USE(n)
+#define EIGEN_TPL_PP_SPEC_HACK_USEC(n)
+#endif
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11WORKAROUNDS_H
+
+/*
+ * kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle;
+ */
diff --git a/unsupported/Eigen/CXX11/src/util/EmulateArray.h b/unsupported/Eigen/CXX11/src/util/EmulateArray.h
new file mode 100644
index 000000000..30d3ebcff
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/util/EmulateArray.h
@@ -0,0 +1,267 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_EMULATE_ARRAY_H
+#define EIGEN_EMULATE_ARRAY_H
+
+
+
+// The array class is only available starting with cxx11. Emulate our own here
+// if needed. Beware, msvc still doesn't advertise itself as a c++11 compiler!
+// Moreover, CUDA doesn't support the STL containers, so we use our own instead.
+#if (__cplusplus <= 199711L && EIGEN_COMP_MSVC < 1900) || defined(__CUDACC__) || defined(EIGEN_AVOID_STL_ARRAY)
+
+namespace Eigen {
+template <typename T, size_t n> class array {
+ public:
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE T& operator[] (size_t index) { return values[index]; }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const T& operator[] (size_t index) const { return values[index]; }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE T& front() { return values[0]; }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const T& front() const { return values[0]; }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE T& back() { return values[n-1]; }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const T& back() const { return values[n-1]; }
+
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+ static std::size_t size() { return n; }
+
+ T values[n];
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE array() { }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE array(const T& v) {
+ EIGEN_STATIC_ASSERT(n==1, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ values[0] = v;
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE array(const T& v1, const T& v2) {
+ EIGEN_STATIC_ASSERT(n==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ values[0] = v1;
+ values[1] = v2;
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE array(const T& v1, const T& v2, const T& v3) {
+ EIGEN_STATIC_ASSERT(n==3, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ values[0] = v1;
+ values[1] = v2;
+ values[2] = v3;
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE array(const T& v1, const T& v2, const T& v3,
+ const T& v4) {
+ EIGEN_STATIC_ASSERT(n==4, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ values[0] = v1;
+ values[1] = v2;
+ values[2] = v3;
+ values[3] = v4;
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE array(const T& v1, const T& v2, const T& v3, const T& v4,
+ const T& v5) {
+ EIGEN_STATIC_ASSERT(n==5, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ values[0] = v1;
+ values[1] = v2;
+ values[2] = v3;
+ values[3] = v4;
+ values[4] = v5;
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE array(const T& v1, const T& v2, const T& v3, const T& v4,
+ const T& v5, const T& v6) {
+ EIGEN_STATIC_ASSERT(n==6, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ values[0] = v1;
+ values[1] = v2;
+ values[2] = v3;
+ values[3] = v4;
+ values[4] = v5;
+ values[5] = v6;
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE array(const T& v1, const T& v2, const T& v3, const T& v4,
+ const T& v5, const T& v6, const T& v7) {
+ EIGEN_STATIC_ASSERT(n==7, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ values[0] = v1;
+ values[1] = v2;
+ values[2] = v3;
+ values[3] = v4;
+ values[4] = v5;
+ values[5] = v6;
+ values[6] = v7;
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE array(
+ const T& v1, const T& v2, const T& v3, const T& v4,
+ const T& v5, const T& v6, const T& v7, const T& v8) {
+ EIGEN_STATIC_ASSERT(n==8, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ values[0] = v1;
+ values[1] = v2;
+ values[2] = v3;
+ values[3] = v4;
+ values[4] = v5;
+ values[5] = v6;
+ values[6] = v7;
+ values[7] = v8;
+ }
+
+#if EIGEN_HAS_VARIADIC_TEMPLATES
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE array(std::initializer_list<T> l) {
+ eigen_assert(l.size() == n);
+ internal::smart_copy(l.begin(), l.end(), values);
+ }
+#endif
+};
+
+
+// Specialize array for zero size
+template <typename T> class array<T, 0> {
+ public:
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE T& operator[] (size_t) {
+ eigen_assert(false && "Can't index a zero size array");
+ return dummy;
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const T& operator[] (size_t) const {
+ eigen_assert(false && "Can't index a zero size array");
+ return dummy;
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE T& front() {
+ eigen_assert(false && "Can't index a zero size array");
+ return dummy;
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const T& front() const {
+ eigen_assert(false && "Can't index a zero size array");
+ return dummy;
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE T& back() {
+ eigen_assert(false && "Can't index a zero size array");
+ return dummy;
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const T& back() const {
+ eigen_assert(false && "Can't index a zero size array");
+ return dummy;
+ }
+
+ static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE std::size_t size() { return 0; }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE array() : dummy() { }
+
+#if EIGEN_HAS_VARIADIC_TEMPLATES
+ EIGEN_DEVICE_FUNC array(std::initializer_list<T> l) : dummy() {
+ eigen_assert(l.size() == 0);
+ }
+#endif
+
+ private:
+ T dummy;
+};
+
+// Comparison operator
+// Todo: implement !=, <, <=, >, and >=
+template<class T, std::size_t N>
+EIGEN_DEVICE_FUNC bool operator==(const array<T,N>& lhs, const array<T,N>& rhs) {
+ for (std::size_t i = 0; i < N; ++i) {
+ if (lhs[i] != rhs[i]) {
+ return false;
+ }
+ }
+ return true;
+}
+
+
+namespace internal {
+template<std::size_t I, class T, std::size_t N>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T& array_get(array<T,N>& a) {
+ return a[I];
+}
+template<std::size_t I, class T, std::size_t N>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const T& array_get(const array<T,N>& a) {
+ return a[I];
+}
+
+template <typename T> struct array_size;
+template<class T, std::size_t N> struct array_size<array<T,N> > {
+ static const size_t value = N;
+};
+template <typename T> struct array_size;
+template<class T, std::size_t N> struct array_size<array<T,N>& > {
+ static const size_t value = N;
+};
+template <typename T> struct array_size;
+template<class T, std::size_t N> struct array_size<const array<T,N> > {
+ static const size_t value = N;
+};
+template <typename T> struct array_size;
+template<class T, std::size_t N> struct array_size<const array<T,N>& > {
+ static const size_t value = N;
+};
+
+} // end namespace internal
+} // end namespace Eigen
+
+#else
+
+// The compiler supports c++11, and we're not targetting cuda: use std::array as Eigen::array
+#include <array>
+namespace Eigen {
+
+template <typename T, std::size_t N> using array = std::array<T, N>;
+
+namespace internal {
+/* std::get is only constexpr in C++14, not yet in C++11
+ * - libstdc++ from version 4.7 onwards has it nevertheless,
+ * so use that
+ * - libstdc++ older versions: use _M_instance directly
+ * - libc++ all versions so far: use __elems_ directly
+ * - all other libs: use std::get to be portable, but
+ * this may not be constexpr
+ */
+#if defined(__GLIBCXX__) && __GLIBCXX__ < 20120322
+#define STD_GET_ARR_HACK a._M_instance[I]
+#elif defined(_LIBCPP_VERSION)
+#define STD_GET_ARR_HACK a.__elems_[I]
+#else
+#define STD_GET_ARR_HACK std::template get<I, T, N>(a)
+#endif
+
+template<std::size_t I, class T, std::size_t N> constexpr inline T& array_get(std::array<T,N>& a) { return (T&) STD_GET_ARR_HACK; }
+template<std::size_t I, class T, std::size_t N> constexpr inline T&& array_get(std::array<T,N>&& a) { return (T&&) STD_GET_ARR_HACK; }
+template<std::size_t I, class T, std::size_t N> constexpr inline T const& array_get(std::array<T,N> const& a) { return (T const&) STD_GET_ARR_HACK; }
+
+#undef STD_GET_ARR_HACK
+
+template <typename T> struct array_size;
+template<class T, std::size_t N> struct array_size<const std::array<T,N> > {
+ static const size_t value = N;
+};
+template <typename T> struct array_size;
+template<class T, std::size_t N> struct array_size<std::array<T,N> > {
+ static const size_t value = N;
+};
+} // end namespace internal
+} // end namespace Eigen
+
+#endif
+
+#endif // EIGEN_EMULATE_ARRAY_H
diff --git a/unsupported/Eigen/CXX11/src/util/EmulateCXX11Meta.h b/unsupported/Eigen/CXX11/src/util/EmulateCXX11Meta.h
new file mode 100644
index 000000000..f3aa1b144
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/util/EmulateCXX11Meta.h
@@ -0,0 +1,311 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_EMULATE_CXX11_META_H
+#define EIGEN_EMULATE_CXX11_META_H
+
+
+
+namespace Eigen {
+
+namespace internal {
+
+/** \internal
+ * \file CXX11/util/EmulateCXX11Meta.h
+ * This file emulates a subset of the functionality provided by CXXMeta.h for
+ * compilers that don't yet support cxx11 such as nvcc.
+ */
+
+struct empty_list { static const std::size_t count = 0; };
+
+template<typename T, typename Tail=empty_list> struct type_list {
+ typedef T HeadType;
+ typedef Tail TailType;
+ static const T head;
+ static const Tail tail;
+ static const std::size_t count = 1 + Tail::count;
+};
+
+struct null_type { };
+
+template<typename T1 = null_type, typename T2 = null_type, typename T3 = null_type,
+ typename T4 = null_type, typename T5 = null_type, typename T6 = null_type,
+ typename T7 = null_type, typename T8 = null_type>
+struct make_type_list {
+ typedef typename make_type_list<T2, T3, T4, T5, T6, T7, T8>::type tailresult;
+
+ typedef type_list<T1, tailresult> type;
+};
+
+template<> struct make_type_list<> {
+ typedef empty_list type;
+};
+
+
+template <std::size_t index, class TList> struct get_type;
+
+template <class Head, class Tail>
+struct get_type<0, type_list<Head, Tail> >
+{
+ typedef Head type;
+};
+
+template <std::size_t i, class Head, class Tail>
+struct get_type<i, type_list<Head, Tail> >
+{
+ typedef typename get_type<i-1, Tail>::type type;
+};
+
+
+/* numeric list */
+template <typename T, T n>
+struct type2val {
+ typedef T type;
+ static const T value = n;
+};
+
+
+template<typename T, size_t n, T V> struct gen_numeric_list_repeated;
+
+template<typename T, T V> struct gen_numeric_list_repeated<T, 1, V> {
+ typedef typename make_type_list<type2val<T, V> >::type type;
+};
+
+template<typename T, T V> struct gen_numeric_list_repeated<T, 2, V> {
+ typedef typename make_type_list<type2val<T, V>, type2val<T, V> >::type type;
+};
+
+template<typename T, T V> struct gen_numeric_list_repeated<T, 3, V> {
+ typedef typename make_type_list<type2val<T, V>, type2val<T, V>, type2val<T, V> >::type type;
+};
+
+template<typename T, T V> struct gen_numeric_list_repeated<T, 4, V> {
+ typedef typename make_type_list<type2val<T, V>, type2val<T, V>, type2val<T, V>, type2val<T, V> >::type type;
+};
+
+template<typename T, T V> struct gen_numeric_list_repeated<T, 5, V> {
+ typedef typename make_type_list<type2val<T, V>, type2val<T, V>, type2val<T, V>, type2val<T, V>, type2val<T, V> >::type type;
+};
+
+template<typename T, T V> struct gen_numeric_list_repeated<T, 6, V> {
+ typedef typename make_type_list<type2val<T, V>, type2val<T, V>, type2val<T, V>,
+ type2val<T, V>, type2val<T, V>, type2val<T, V> >::type type;
+};
+
+template<typename T, T V> struct gen_numeric_list_repeated<T, 7, V> {
+ typedef typename make_type_list<type2val<T, V>, type2val<T, V>, type2val<T, V>,
+ type2val<T, V>, type2val<T, V>, type2val<T, V>,
+ type2val<T, V> >::type type;
+};
+
+template<typename T, T V> struct gen_numeric_list_repeated<T, 8, V> {
+ typedef typename make_type_list<type2val<T, V>, type2val<T, V>, type2val<T, V>,
+ type2val<T, V>, type2val<T, V>, type2val<T, V>,
+ type2val<T, V>, type2val<T, V> >::type type;
+};
+
+
+template <std::size_t index, class NList> struct get;
+
+template <std::size_t i>
+struct get<i, empty_list>
+{
+ get() { eigen_assert(false && "index overflow"); }
+ typedef void type;
+ static const char value = '\0';
+};
+
+template <std::size_t i, class Head>
+struct get<i, type_list<Head, empty_list> >
+{
+ get() { eigen_assert(false && "index overflow"); }
+ typedef void type;
+ static const char value = '\0';
+};
+
+template <class Head>
+struct get<0, type_list<Head, empty_list> >
+{
+ typedef typename Head::type type;
+ static const type value = Head::value;
+};
+
+template <class Head, class Tail>
+struct get<0, type_list<Head, Tail> >
+{
+ typedef typename Head::type type;
+ static const type value = Head::value;
+};
+
+template <std::size_t i, class Head, class Tail>
+struct get<i, type_list<Head, Tail> >
+{
+ typedef typename Tail::HeadType::type type;
+ static const type value = get<i-1, Tail>::value;
+};
+
+
+template <class NList> struct arg_prod {
+ static const typename NList::HeadType::type value = get<0, NList>::value * arg_prod<typename NList::TailType>::value;
+};
+template <> struct arg_prod<empty_list> {
+ static const int value = 1;
+};
+
+
+template<int n, typename t>
+array<t, n> repeat(t v) {
+ array<t, n> array;
+ array.fill(v);
+ return array;
+}
+
+template<std::size_t I, class Head, class Tail>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Head::type array_get(type_list<Head, Tail>&) {
+ return get<I, type_list<Head, Tail> >::value;
+}
+template<std::size_t I, class Head, class Tail>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Head::type array_get(const type_list<Head, Tail>&) {
+ return get<I, type_list<Head, Tail> >::value;
+}
+
+template <class NList>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename NList::HeadType::type array_prod(const NList&) {
+ return arg_prod<NList>::value;
+}
+
+template<typename t, std::size_t n>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE t array_prod(const array<t, n>& a) {
+ t prod = 1;
+ for (size_t i = 0; i < n; ++i) { prod *= a[i]; }
+ return prod;
+}
+template<typename t>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE t array_prod(const array<t, 0>& /*a*/) {
+ return 0;
+}
+
+template<typename t>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE t array_prod(const std::vector<t>& a) {
+ eigen_assert(a.size() > 0);
+ t prod = 1;
+ for (size_t i = 0; i < a.size(); ++i) { prod *= a[i]; }
+ return prod;
+}
+
+
+template<std::size_t I, class T>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T& array_get(std::vector<T>& a) {
+ return a[I];
+}
+template<std::size_t I, class T>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const T& array_get(const std::vector<T>& a) {
+ return a[I];
+}
+
+struct sum_op {
+ template<typename A, typename B> static inline bool run(A a, B b) { return a + b; }
+};
+struct product_op {
+ template<typename A, typename B> static inline bool run(A a, B b) { return a * b; }
+};
+
+struct logical_and_op {
+ template<typename A, typename B> static inline bool run(A a, B b) { return a && b; }
+};
+struct logical_or_op {
+ template<typename A, typename B> static inline bool run(A a, B b) { return a || b; }
+};
+
+struct equal_op {
+ template<typename A, typename B> static inline bool run(A a, B b) { return a == b; }
+};
+struct not_equal_op {
+ template<typename A, typename B> static inline bool run(A a, B b) { return a != b; }
+};
+struct lesser_op {
+ template<typename A, typename B> static inline bool run(A a, B b) { return a < b; }
+};
+struct lesser_equal_op {
+ template<typename A, typename B> static inline bool run(A a, B b) { return a <= b; }
+};
+
+struct greater_op {
+ template<typename A, typename B> static inline bool run(A a, B b) { return a > b; }
+};
+struct greater_equal_op {
+ template<typename A, typename B> static inline bool run(A a, B b) { return a >= b; }
+};
+
+struct not_op {
+ template<typename A> static inline bool run(A a) { return !a; }
+};
+struct negation_op {
+ template<typename A> static inline bool run(A a) { return -a; }
+};
+struct greater_equal_zero_op {
+ template<typename A> static inline bool run(A a) { return a >= 0; }
+};
+
+
+template<typename Reducer, typename Op, typename A, std::size_t N>
+struct ArrayApplyAndReduce {
+ static inline bool run(const array<A, N>& a) {
+ EIGEN_STATIC_ASSERT(N >= 2, YOU_MADE_A_PROGRAMMING_MISTAKE);
+ bool result = Reducer::run(Op::run(a[0]), Op::run(a[1]));
+ for (size_t i = 2; i < N; ++i) {
+ result = Reducer::run(result, Op::run(a[i]));
+ }
+ return result;
+ }
+};
+
+template<typename Reducer, typename Op, typename A>
+struct ArrayApplyAndReduce<Reducer, Op, A, 1> {
+ static inline bool run(const array<A, 1>& a) {
+ return Op::run(a[0]);
+ }
+};
+
+template<typename Reducer, typename Op, typename A, std::size_t N>
+inline bool array_apply_and_reduce(const array<A, N>& a) {
+ return ArrayApplyAndReduce<Reducer, Op, A, N>::run(a);
+}
+
+template<typename Reducer, typename Op, typename A, typename B, std::size_t N>
+struct ArrayZipAndReduce {
+ static inline bool run(const array<A, N>& a, const array<B, N>& b) {
+ EIGEN_STATIC_ASSERT(N >= 2, YOU_MADE_A_PROGRAMMING_MISTAKE);
+ bool result = Reducer::run(Op::run(a[0], b[0]), Op::run(a[1], b[1]));
+ for (size_t i = 2; i < N; ++i) {
+ result = Reducer::run(result, Op::run(a[i], b[i]));
+ }
+ return result;
+ }
+};
+
+template<typename Reducer, typename Op, typename A, typename B>
+struct ArrayZipAndReduce<Reducer, Op, A, B, 1> {
+ static inline bool run(const array<A, 1>& a, const array<B, 1>& b) {
+ return Op::run(a[0], b[0]);
+ }
+};
+
+template<typename Reducer, typename Op, typename A, typename B, std::size_t N>
+inline bool array_zip_and_reduce(const array<A, N>& a, const array<B, N>& b) {
+ return ArrayZipAndReduce<Reducer, Op, A, B, N>::run(a, b);
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+
+
+#endif // EIGEN_EMULATE_CXX11_META_H
diff --git a/unsupported/Eigen/CXX11/src/util/MaxSizeVector.h b/unsupported/Eigen/CXX11/src/util/MaxSizeVector.h
new file mode 100644
index 000000000..4bc3dd1ba
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/util/MaxSizeVector.h
@@ -0,0 +1,141 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_FIXEDSIZEVECTOR_H
+#define EIGEN_FIXEDSIZEVECTOR_H
+
+namespace Eigen {
+
+/** \class MaxSizeVector
+ * \ingroup Core
+ *
+ * \brief The MaxSizeVector class.
+ *
+ * The %MaxSizeVector provides a subset of std::vector functionality.
+ *
+ * The goal is to provide basic std::vector operations when using
+ * std::vector is not an option (e.g. on GPU or when compiling using
+ * FMA/AVX, as this can cause either compilation failures or illegal
+ * instruction failures).
+ *
+ * Beware: The constructors are not API compatible with these of
+ * std::vector.
+ */
+template <typename T>
+class MaxSizeVector {
+ public:
+ // Construct a new MaxSizeVector, reserve n elements.
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit MaxSizeVector(size_t n)
+ : reserve_(n), size_(0),
+ data_(static_cast<T*>(internal::aligned_malloc(n * sizeof(T)))) {
+ for (size_t i = 0; i < n; ++i) { new (&data_[i]) T; }
+ }
+
+ // Construct a new MaxSizeVector, reserve and resize to n.
+ // Copy the init value to all elements.
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ MaxSizeVector(size_t n, const T& init)
+ : reserve_(n), size_(n),
+ data_(static_cast<T*>(internal::aligned_malloc(n * sizeof(T)))) {
+ for (size_t i = 0; i < n; ++i) { new (&data_[i]) T(init); }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ ~MaxSizeVector() {
+ for (size_t i = 0; i < size_; ++i) {
+ data_[i].~T();
+ }
+ internal::aligned_free(data_);
+ }
+
+ void resize(size_t n) {
+ eigen_assert(n <= reserve_);
+ for (size_t i = size_; i < n; ++i) {
+ new (&data_[i]) T;
+ }
+ for (size_t i = n; i < size_; ++i) {
+ data_[i].~T();
+ }
+ size_ = n;
+ }
+
+ // Append new elements (up to reserved size).
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ void push_back(const T& t) {
+ eigen_assert(size_ < reserve_);
+ data_[size_++] = t;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const T& operator[] (size_t i) const {
+ eigen_assert(i < size_);
+ return data_[i];
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ T& operator[] (size_t i) {
+ eigen_assert(i < size_);
+ return data_[i];
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ T& back() {
+ eigen_assert(size_ > 0);
+ return data_[size_ - 1];
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const T& back() const {
+ eigen_assert(size_ > 0);
+ return data_[size_ - 1];
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ void pop_back() {
+ // NOTE: This does not destroy the value at the end the way
+ // std::vector's version of pop_back() does. That happens when
+ // the Vector is destroyed.
+ eigen_assert(size_ > 0);
+ size_--;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ size_t size() const { return size_; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ bool empty() const { return size_ == 0; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ T* data() { return data_; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const T* data() const { return data_; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ T* begin() { return data_; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ T* end() { return data_ + size_; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const T* begin() const { return data_; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const T* end() const { return data_ + size_; }
+
+ private:
+ size_t reserve_;
+ size_t size_;
+ T* data_;
+};
+
+} // namespace Eigen
+
+#endif // EIGEN_FIXEDSIZEVECTOR_H
diff --git a/unsupported/Eigen/EulerAngles b/unsupported/Eigen/EulerAngles
new file mode 100644
index 000000000..521fa3f76
--- /dev/null
+++ b/unsupported/Eigen/EulerAngles
@@ -0,0 +1,43 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2015 Tal Hadad <tal_hd@hotmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_EULERANGLES_MODULE_H
+#define EIGEN_EULERANGLES_MODULE_H
+
+
+#include "Eigen/Core"
+#include "Eigen/Geometry"
+
+#include "Eigen/src/Core/util/DisableStupidWarnings.h"
+
+namespace Eigen {
+
+/**
+ * \defgroup EulerAngles_Module EulerAngles module
+ * \brief This module provides generic euler angles rotation.
+ *
+ * Euler angles are a way to represent 3D rotation.
+ *
+ * In order to use this module in your code, include this header:
+ * \code
+ * #include <unsupported/Eigen/EulerAngles>
+ * \endcode
+ *
+ * See \ref EulerAngles for more information.
+ *
+ */
+
+}
+
+#include "src/EulerAngles/EulerSystem.h"
+#include "src/EulerAngles/EulerAngles.h"
+
+#include "Eigen/src/Core/util/ReenableStupidWarnings.h"
+
+#endif // EIGEN_EULERANGLES_MODULE_H
diff --git a/unsupported/Eigen/IterativeSolvers b/unsupported/Eigen/IterativeSolvers
index aa15403db..31e880bdc 100644
--- a/unsupported/Eigen/IterativeSolvers
+++ b/unsupported/Eigen/IterativeSolvers
@@ -24,9 +24,6 @@
*/
//@{
-#include "../../Eigen/src/misc/Solve.h"
-#include "../../Eigen/src/misc/SparseSolve.h"
-
#ifndef EIGEN_MPL2_ONLY
#include "src/IterativeSolvers/IterationController.h"
#include "src/IterativeSolvers/ConstrainedConjGrad.h"
@@ -36,7 +33,7 @@
#include "../../Eigen/Jacobi"
#include "../../Eigen/Householder"
#include "src/IterativeSolvers/GMRES.h"
-#include "src/IterativeSolvers/IncompleteCholesky.h"
+#include "src/IterativeSolvers/DGMRES.h"
//#include "src/IterativeSolvers/SSORPreconditioner.h"
#include "src/IterativeSolvers/MINRES.h"
diff --git a/unsupported/Eigen/KroneckerProduct b/unsupported/Eigen/KroneckerProduct
index c932c06a6..5f5afb8cf 100644
--- a/unsupported/Eigen/KroneckerProduct
+++ b/unsupported/Eigen/KroneckerProduct
@@ -13,6 +13,8 @@
#include "../../Eigen/src/Core/util/DisableStupidWarnings.h"
+#include "../../Eigen/src/SparseCore/SparseUtil.h"
+
namespace Eigen {
/**
diff --git a/unsupported/Eigen/MPRealSupport b/unsupported/Eigen/MPRealSupport
index d4b03647d..7f0b70c63 100644
--- a/unsupported/Eigen/MPRealSupport
+++ b/unsupported/Eigen/MPRealSupport
@@ -27,6 +27,8 @@ namespace Eigen {
* via the <a href="http://www.holoborodko.com/pavel/mpfr">MPFR C++</a>
* library which itself is built upon <a href="http://www.mpfr.org/">MPFR</a>/<a href="http://gmplib.org/">GMP</a>.
*
+ * \warning MPFR C++ is licensed under the GPL.
+ *
* You can find a copy of MPFR C++ that is known to be compatible in the unsupported/test/mpreal folder.
*
* Here is an example:
@@ -65,30 +67,35 @@ int main()
IsSigned = 1,
IsComplex = 0,
RequireInitialization = 1,
- ReadCost = 10,
- AddCost = 10,
- MulCost = 40
+ ReadCost = HugeCost,
+ AddCost = HugeCost,
+ MulCost = HugeCost
};
typedef mpfr::mpreal Real;
typedef mpfr::mpreal NonInteger;
- inline static Real highest (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::maxval(Precision); }
- inline static Real lowest (long Precision = mpfr::mpreal::get_default_prec()) { return -mpfr::maxval(Precision); }
+ static inline Real highest (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::maxval(Precision); }
+ static inline Real lowest (long Precision = mpfr::mpreal::get_default_prec()) { return -mpfr::maxval(Precision); }
// Constants
- inline static Real Pi (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::const_pi(Precision); }
- inline static Real Euler (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::const_euler(Precision); }
- inline static Real Log2 (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::const_log2(Precision); }
- inline static Real Catalan (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::const_catalan(Precision); }
-
- inline static Real epsilon (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::machine_epsilon(Precision); }
- inline static Real epsilon (const Real& x) { return mpfr::machine_epsilon(x); }
-
- inline static Real dummy_precision()
- {
- unsigned int weak_prec = ((mpfr::mpreal::get_default_prec()-1) * 90) / 100;
- return mpfr::machine_epsilon(weak_prec);
+ static inline Real Pi (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::const_pi(Precision); }
+ static inline Real Euler (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::const_euler(Precision); }
+ static inline Real Log2 (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::const_log2(Precision); }
+ static inline Real Catalan (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::const_catalan(Precision); }
+
+ static inline Real epsilon (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::machine_epsilon(Precision); }
+ static inline Real epsilon (const Real& x) { return mpfr::machine_epsilon(x); }
+
+#ifdef MPREAL_HAVE_DYNAMIC_STD_NUMERIC_LIMITS
+ static inline int digits10 (long Precision = mpfr::mpreal::get_default_prec()) { return std::numeric_limits<Real>::digits10(Precision); }
+ static inline int digits10 (const Real& x) { return std::numeric_limits<Real>::digits10(x); }
+#endif
+
+ static inline Real dummy_precision()
+ {
+ mpfr_prec_t weak_prec = ((mpfr::mpreal::get_default_prec()-1) * 90) / 100;
+ return mpfr::machine_epsilon(weak_prec);
}
};
@@ -139,64 +146,63 @@ int main()
public:
typedef mpfr::mpreal ResScalar;
enum {
- nr = 2, // must be 2 for proper packing...
+ Vectorizable = false,
+ LhsPacketSize = 1,
+ RhsPacketSize = 1,
+ ResPacketSize = 1,
+ NumberOfRegisters = 1,
+ nr = 1,
mr = 1,
- WorkSpaceFactor = nr,
LhsProgress = 1,
RhsProgress = 1
};
+ typedef ResScalar LhsPacket;
+ typedef ResScalar RhsPacket;
+ typedef ResScalar ResPacket;
+
};
- template<typename Index, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
- struct gebp_kernel<mpfr::mpreal,mpfr::mpreal,Index,mr,nr,ConjugateLhs,ConjugateRhs>
+
+
+ template<typename Index, typename DataMapper, bool ConjugateLhs, bool ConjugateRhs>
+ struct gebp_kernel<mpfr::mpreal,mpfr::mpreal,Index,DataMapper,1,1,ConjugateLhs,ConjugateRhs>
{
typedef mpfr::mpreal mpreal;
EIGEN_DONT_INLINE
- void operator()(mpreal* res, Index resStride, const mpreal* blockA, const mpreal* blockB, Index rows, Index depth, Index cols, mpreal alpha,
- Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0, mpreal* /*unpackedB*/ = 0)
+ void operator()(const DataMapper& res, const mpreal* blockA, const mpreal* blockB,
+ Index rows, Index depth, Index cols, const mpreal& alpha,
+ Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0)
{
- mpreal acc1, acc2, tmp;
-
+ if(rows==0 || cols==0 || depth==0)
+ return;
+
+ mpreal acc1(0,mpfr_get_prec(blockA[0].mpfr_srcptr())),
+ tmp (0,mpfr_get_prec(blockA[0].mpfr_srcptr()));
+
if(strideA==-1) strideA = depth;
if(strideB==-1) strideB = depth;
- for(Index j=0; j<cols; j+=nr)
+ for(Index i=0; i<rows; ++i)
{
- Index actual_nr = (std::min<Index>)(nr,cols-j);
- mpreal *C1 = res + j*resStride;
- mpreal *C2 = res + (j+1)*resStride;
- for(Index i=0; i<rows; i++)
+ for(Index j=0; j<cols; ++j)
{
- mpreal *B = const_cast<mpreal*>(blockB) + j*strideB + offsetB*actual_nr;
- mpreal *A = const_cast<mpreal*>(blockA) + i*strideA + offsetA;
+ const mpreal *A = blockA + i*strideA + offsetA;
+ const mpreal *B = blockB + j*strideB + offsetB;
+
acc1 = 0;
- acc2 = 0;
for(Index k=0; k<depth; k++)
{
- mpfr_mul(tmp.mpfr_ptr(), A[k].mpfr_ptr(), B[0].mpfr_ptr(), mpreal::get_default_rnd());
+ mpfr_mul(tmp.mpfr_ptr(), A[k].mpfr_srcptr(), B[k].mpfr_srcptr(), mpreal::get_default_rnd());
mpfr_add(acc1.mpfr_ptr(), acc1.mpfr_ptr(), tmp.mpfr_ptr(), mpreal::get_default_rnd());
-
- if(actual_nr==2) {
- mpfr_mul(tmp.mpfr_ptr(), A[k].mpfr_ptr(), B[1].mpfr_ptr(), mpreal::get_default_rnd());
- mpfr_add(acc2.mpfr_ptr(), acc2.mpfr_ptr(), tmp.mpfr_ptr(), mpreal::get_default_rnd());
- }
-
- B+=actual_nr;
}
- mpfr_mul(acc1.mpfr_ptr(), acc1.mpfr_ptr(), alpha.mpfr_ptr(), mpreal::get_default_rnd());
- mpfr_add(C1[i].mpfr_ptr(), C1[i].mpfr_ptr(), acc1.mpfr_ptr(), mpreal::get_default_rnd());
-
- if(actual_nr==2) {
- mpfr_mul(acc2.mpfr_ptr(), acc2.mpfr_ptr(), alpha.mpfr_ptr(), mpreal::get_default_rnd());
- mpfr_add(C2[i].mpfr_ptr(), C2[i].mpfr_ptr(), acc2.mpfr_ptr(), mpreal::get_default_rnd());
- }
+ mpfr_mul(acc1.mpfr_ptr(), acc1.mpfr_srcptr(), alpha.mpfr_srcptr(), mpreal::get_default_rnd());
+ mpfr_add(res(i,j).mpfr_ptr(), res(i,j).mpfr_srcptr(), acc1.mpfr_srcptr(), mpreal::get_default_rnd());
}
}
}
};
-
} // end namespace internal
}
diff --git a/unsupported/Eigen/MatrixFunctions b/unsupported/Eigen/MatrixFunctions
index 0991817d5..0320606c1 100644
--- a/unsupported/Eigen/MatrixFunctions
+++ b/unsupported/Eigen/MatrixFunctions
@@ -13,8 +13,6 @@
#include <cfloat>
#include <list>
-#include <functional>
-#include <iterator>
#include <Eigen/Core>
#include <Eigen/LU>
@@ -84,7 +82,9 @@ const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::cos() const
\param[in] M a square matrix.
\returns expression representing \f$ \cos(M) \f$.
-This function calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::cos().
+This function computes the matrix cosine. Use ArrayBase::cos() for computing the entry-wise cosine.
+
+The implementation calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::cos().
\sa \ref matrixbase_sin "sin()" for an example.
@@ -125,6 +125,9 @@ differential equations: the solution of \f$ y' = My \f$ with the
initial condition \f$ y(0) = y_0 \f$ is given by
\f$ y(t) = \exp(M) y_0 \f$.
+The matrix exponential is different from applying the exp function to all the entries in the matrix.
+Use ArrayBase::exp() if you want to do the latter.
+
The cost of the computation is approximately \f$ 20 n^3 \f$ for
matrices of size \f$ n \f$. The number 20 depends weakly on the
norm of the matrix.
@@ -179,6 +182,9 @@ the scalar logarithm, the equation \f$ \exp(X) = M \f$ may have
multiple solutions; this function returns a matrix whose eigenvalues
have imaginary part in the interval \f$ (-\pi,\pi] \f$.
+The matrix logarithm is different from applying the log function to all the entries in the matrix.
+Use ArrayBase::log() if you want to do the latter.
+
In the real case, the matrix \f$ M \f$ should be invertible and
it should have no eigenvalues which are real and negative (pairs of
complex conjugate eigenvalues are allowed). In the complex case, it
@@ -230,22 +236,66 @@ const MatrixPowerReturnValue<Derived> MatrixBase<Derived>::pow(RealScalar p) con
\endcode
\param[in] M base of the matrix power, should be a square matrix.
-\param[in] p exponent of the matrix power, should be real.
+\param[in] p exponent of the matrix power.
The matrix power \f$ M^p \f$ is defined as \f$ \exp(p \log(M)) \f$,
where exp denotes the matrix exponential, and log denotes the matrix
-logarithm.
+logarithm. This is different from raising all the entries in the matrix
+to the p-th power. Use ArrayBase::pow() if you want to do the latter.
-The matrix \f$ M \f$ should meet the conditions to be an argument of
-matrix logarithm. If \p p is not of the real scalar type of \p M, it
-is casted into the real scalar type of \p M.
+If \p p is complex, the scalar type of \p M should be the type of \p
+p . \f$ M^p \f$ simply evaluates into \f$ \exp(p \log(M)) \f$.
+Therefore, the matrix \f$ M \f$ should meet the conditions to be an
+argument of matrix logarithm.
-This function computes the matrix power using the Schur-Pad&eacute;
+If \p p is real, it is casted into the real scalar type of \p M. Then
+this function computes the matrix power using the Schur-Pad&eacute;
algorithm as implemented by class MatrixPower. The exponent is split
into integral part and fractional part, where the fractional part is
in the interval \f$ (-1, 1) \f$. The main diagonal and the first
super-diagonal is directly computed.
+If \p M is singular with a semisimple zero eigenvalue and \p p is
+positive, the Schur factor \f$ T \f$ is reordered with Givens
+rotations, i.e.
+
+\f[ T = \left[ \begin{array}{cc}
+ T_1 & T_2 \\
+ 0 & 0
+ \end{array} \right] \f]
+
+where \f$ T_1 \f$ is invertible. Then \f$ T^p \f$ is given by
+
+\f[ T^p = \left[ \begin{array}{cc}
+ T_1^p & T_1^{-1} T_1^p T_2 \\
+ 0 & 0
+ \end{array}. \right] \f]
+
+\warning Fractional power of a matrix with a non-semisimple zero
+eigenvalue is not well-defined. We introduce an assertion failure
+against inaccurate result, e.g. \code
+#include <unsupported/Eigen/MatrixFunctions>
+#include <iostream>
+
+int main()
+{
+ Eigen::Matrix4d A;
+ A << 0, 0, 2, 3,
+ 0, 0, 4, 5,
+ 0, 0, 6, 7,
+ 0, 0, 8, 9;
+ std::cout << A.pow(0.37) << std::endl;
+
+ // The 1 makes eigenvalue 0 non-semisimple.
+ A.coeffRef(0, 1) = 1;
+
+ // This fails if EIGEN_NO_DEBUG is undefined.
+ std::cout << A.pow(0.37) << std::endl;
+
+ return 0;
+}
+\endcode
+
Details of the algorithm can be found in: Nicholas J. Higham and
Lijing Lin, "A Schur-Pad&eacute; algorithm for fractional powers of a
matrix," <em>SIAM J. %Matrix Anal. Applic.</em>,
@@ -350,7 +400,9 @@ const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::sin() const
\param[in] M a square matrix.
\returns expression representing \f$ \sin(M) \f$.
-This function calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::sin().
+This function computes the matrix sine. Use ArrayBase::sin() for computing the entry-wise sine.
+
+The implementation calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::sin().
Example: \include MatrixSine.cpp
Output: \verbinclude MatrixSine.out
@@ -387,7 +439,9 @@ const MatrixSquareRootReturnValue<Derived> MatrixBase<Derived>::sqrt() const
The matrix square root of \f$ M \f$ is the matrix \f$ M^{1/2} \f$
whose square is the original matrix; so if \f$ S = M^{1/2} \f$ then
-\f$ S^2 = M \f$.
+\f$ S^2 = M \f$. This is different from taking the square root of all
+the entries in the matrix; use ArrayBase::sqrt() if you want to do the
+latter.
In the <b>real case</b>, the matrix \f$ M \f$ should be invertible and
it should have no eigenvalues which are real and negative (pairs of
diff --git a/unsupported/Eigen/OpenGLSupport b/unsupported/Eigen/OpenGLSupport
index e2769449c..87f50947d 100644
--- a/unsupported/Eigen/OpenGLSupport
+++ b/unsupported/Eigen/OpenGLSupport
@@ -51,7 +51,7 @@ namespace internal {
typename Scalar = typename XprType::Scalar, \
int Rows = XprType::RowsAtCompileTime, \
int Cols = XprType::ColsAtCompileTime, \
- bool IsGLCompatible = bool(XprType::Flags&LinearAccessBit) \
+ bool IsGLCompatible = bool(internal::evaluator<XprType>::Flags&LinearAccessBit) \
&& bool(XprType::Flags&DirectAccessBit) \
&& (XprType::IsVectorAtCompileTime || (XprType::Flags&RowMajorBit)==0)> \
struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl); \
@@ -180,11 +180,11 @@ template<typename Scalar> void glLoadMatrix(const Transform<Scalar,3,AffineCompa
inline void glRotate(const Rotation2D<float>& rot)
{
- glRotatef(rot.angle()*180.f/float(M_PI), 0.f, 0.f, 1.f);
+ glRotatef(rot.angle()*180.f/float(EIGEN_PI), 0.f, 0.f, 1.f);
}
inline void glRotate(const Rotation2D<double>& rot)
{
- glRotated(rot.angle()*180.0/M_PI, 0.0, 0.0, 1.0);
+ glRotated(rot.angle()*180.0/EIGEN_PI, 0.0, 0.0, 1.0);
}
template<typename Derived> void glRotate(const RotationBase<Derived,3>& rot)
@@ -203,7 +203,7 @@ namespace internal {
typename Scalar = typename XprType::Scalar, \
int Rows = XprType::RowsAtCompileTime, \
int Cols = XprType::ColsAtCompileTime, \
- bool IsGLCompatible = bool(XprType::Flags&LinearAccessBit) \
+ bool IsGLCompatible = bool(internal::evaluator<XprType>::Flags&LinearAccessBit) \
&& bool(XprType::Flags&DirectAccessBit) \
&& (XprType::IsVectorAtCompileTime || (XprType::Flags&RowMajorBit)==0)> \
struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl); \
@@ -276,12 +276,12 @@ EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 4,4,Matrix
#ifdef GL_VERSION_2_1
-static void glUniformMatrix2x3fv_ei(GLint loc, const float* v) { glUniformMatrix2x3fv(loc,1,false,v); }
-static void glUniformMatrix3x2fv_ei(GLint loc, const float* v) { glUniformMatrix3x2fv(loc,1,false,v); }
-static void glUniformMatrix2x4fv_ei(GLint loc, const float* v) { glUniformMatrix2x4fv(loc,1,false,v); }
-static void glUniformMatrix4x2fv_ei(GLint loc, const float* v) { glUniformMatrix4x2fv(loc,1,false,v); }
-static void glUniformMatrix3x4fv_ei(GLint loc, const float* v) { glUniformMatrix3x4fv(loc,1,false,v); }
-static void glUniformMatrix4x3fv_ei(GLint loc, const float* v) { glUniformMatrix4x3fv(loc,1,false,v); }
+inline void glUniformMatrix2x3fv_ei(GLint loc, const float* v) { glUniformMatrix2x3fv(loc,1,false,v); }
+inline void glUniformMatrix3x2fv_ei(GLint loc, const float* v) { glUniformMatrix3x2fv(loc,1,false,v); }
+inline void glUniformMatrix2x4fv_ei(GLint loc, const float* v) { glUniformMatrix2x4fv(loc,1,false,v); }
+inline void glUniformMatrix4x2fv_ei(GLint loc, const float* v) { glUniformMatrix4x2fv(loc,1,false,v); }
+inline void glUniformMatrix3x4fv_ei(GLint loc, const float* v) { glUniformMatrix3x4fv(loc,1,false,v); }
+inline void glUniformMatrix4x3fv_ei(GLint loc, const float* v) { glUniformMatrix4x3fv(loc,1,false,v); }
EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 2,3,Matrix2x3fv_ei)
EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 3,2,Matrix3x2fv_ei)
diff --git a/unsupported/Eigen/SVD b/unsupported/Eigen/SVD
deleted file mode 100644
index 7cc059280..000000000
--- a/unsupported/Eigen/SVD
+++ /dev/null
@@ -1,39 +0,0 @@
-#ifndef EIGEN_SVD_MODULE_H
-#define EIGEN_SVD_MODULE_H
-
-#include <Eigen/QR>
-#include <Eigen/Householder>
-#include <Eigen/Jacobi>
-
-#include "../../Eigen/src/Core/util/DisableStupidWarnings.h"
-
-/** \defgroup SVD_Module SVD module
- *
- *
- *
- * This module provides SVD decomposition for matrices (both real and complex).
- * This decomposition is accessible via the following MatrixBase method:
- * - MatrixBase::jacobiSvd()
- *
- * \code
- * #include <Eigen/SVD>
- * \endcode
- */
-
-#include "../../Eigen/src/misc/Solve.h"
-#include "../../Eigen/src/SVD/UpperBidiagonalization.h"
-#include "src/SVD/SVDBase.h"
-#include "src/SVD/JacobiSVD.h"
-#include "src/SVD/BDCSVD.h"
-#if defined(EIGEN_USE_LAPACKE) && !defined(EIGEN_USE_LAPACKE_STRICT)
-#include "../../Eigen/src/SVD/JacobiSVD_MKL.h"
-#endif
-
-#ifdef EIGEN2_SUPPORT
-#include "../../Eigen/src/Eigen2Support/SVD.h"
-#endif
-
-#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h"
-
-#endif // EIGEN_SVD_MODULE_H
-/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/unsupported/Eigen/SparseExtra b/unsupported/Eigen/SparseExtra
index b5597902a..819cffa27 100644
--- a/unsupported/Eigen/SparseExtra
+++ b/unsupported/Eigen/SparseExtra
@@ -37,9 +37,6 @@
*/
-#include "../../Eigen/src/misc/Solve.h"
-#include "../../Eigen/src/misc/SparseSolve.h"
-
#include "src/SparseExtra/DynamicSparseMatrix.h"
#include "src/SparseExtra/BlockOfDynamicSparseMatrix.h"
#include "src/SparseExtra/RandomSetter.h"
diff --git a/unsupported/Eigen/SpecialFunctions b/unsupported/Eigen/SpecialFunctions
new file mode 100644
index 000000000..a2ad4925e
--- /dev/null
+++ b/unsupported/Eigen/SpecialFunctions
@@ -0,0 +1,63 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016 Gael Guennebaud <g.gael@free.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPECIALFUNCTIONS_MODULE
+#define EIGEN_SPECIALFUNCTIONS_MODULE
+
+#include <math.h>
+
+#include "../../Eigen/Core"
+
+#include "../../Eigen/src/Core/util/DisableStupidWarnings.h"
+
+namespace Eigen {
+
+/**
+ * \defgroup SpecialFunctions_Module Special math functions module
+ *
+ * This module features additional coefficient-wise math functions available
+ * within the numext:: namespace for the scalar version, and as method and/or free
+ * functions of Array. Those include:
+ *
+ * - erf
+ * - erfc
+ * - lgamma
+ * - igamma
+ * - igammac
+ * - digamma
+ * - polygamma
+ * - zeta
+ * - betainc
+ *
+ * \code
+ * #include <unsupported/Eigen/SpecialFunctions>
+ * \endcode
+ */
+//@{
+
+}
+
+#include "src/SpecialFunctions/SpecialFunctionsImpl.h"
+#include "src/SpecialFunctions/SpecialFunctionsPacketMath.h"
+#include "src/SpecialFunctions/SpecialFunctionsHalf.h"
+#include "src/SpecialFunctions/SpecialFunctionsFunctors.h"
+#include "src/SpecialFunctions/SpecialFunctionsArrayAPI.h"
+
+#if defined EIGEN_VECTORIZE_CUDA
+ #include "src/SpecialFunctions/arch/CUDA/CudaSpecialFunctions.h"
+#endif
+
+namespace Eigen {
+//@}
+}
+
+
+#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h"
+
+#endif // EIGEN_SPECIALFUNCTIONS_MODULE
diff --git a/unsupported/Eigen/src/AutoDiff/AutoDiffJacobian.h b/unsupported/Eigen/src/AutoDiff/AutoDiffJacobian.h
index 1a61e3367..33b6c393f 100644
--- a/unsupported/Eigen/src/AutoDiff/AutoDiffJacobian.h
+++ b/unsupported/Eigen/src/AutoDiff/AutoDiffJacobian.h
@@ -20,37 +20,60 @@ public:
AutoDiffJacobian(const Functor& f) : Functor(f) {}
// forward constructors
+#if EIGEN_HAS_VARIADIC_TEMPLATES
+ template<typename... T>
+ AutoDiffJacobian(const T& ...Values) : Functor(Values...) {}
+#else
template<typename T0>
AutoDiffJacobian(const T0& a0) : Functor(a0) {}
template<typename T0, typename T1>
AutoDiffJacobian(const T0& a0, const T1& a1) : Functor(a0, a1) {}
template<typename T0, typename T1, typename T2>
AutoDiffJacobian(const T0& a0, const T1& a1, const T2& a2) : Functor(a0, a1, a2) {}
+#endif
+
+ typedef typename Functor::InputType InputType;
+ typedef typename Functor::ValueType ValueType;
+ typedef typename ValueType::Scalar Scalar;
enum {
- InputsAtCompileTime = Functor::InputsAtCompileTime,
- ValuesAtCompileTime = Functor::ValuesAtCompileTime
+ InputsAtCompileTime = InputType::RowsAtCompileTime,
+ ValuesAtCompileTime = ValueType::RowsAtCompileTime
};
- typedef typename Functor::InputType InputType;
- typedef typename Functor::ValueType ValueType;
- typedef typename Functor::JacobianType JacobianType;
- typedef typename JacobianType::Scalar Scalar;
+ typedef Matrix<Scalar, ValuesAtCompileTime, InputsAtCompileTime> JacobianType;
typedef typename JacobianType::Index Index;
- typedef Matrix<Scalar,InputsAtCompileTime,1> DerivativeType;
+ typedef Matrix<Scalar, InputsAtCompileTime, 1> DerivativeType;
typedef AutoDiffScalar<DerivativeType> ActiveScalar;
-
typedef Matrix<ActiveScalar, InputsAtCompileTime, 1> ActiveInput;
typedef Matrix<ActiveScalar, ValuesAtCompileTime, 1> ActiveValue;
+#if EIGEN_HAS_VARIADIC_TEMPLATES
+ // Some compilers don't accept variadic parameters after a default parameter,
+ // i.e., we can't just write _jac=0 but we need to overload operator():
+ EIGEN_STRONG_INLINE
+ void operator() (const InputType& x, ValueType* v) const
+ {
+ this->operator()(x, v, 0);
+ }
+ template<typename... ParamsType>
+ void operator() (const InputType& x, ValueType* v, JacobianType* _jac,
+ const ParamsType&... Params) const
+#else
void operator() (const InputType& x, ValueType* v, JacobianType* _jac=0) const
+#endif
{
eigen_assert(v!=0);
+
if (!_jac)
{
+#if EIGEN_HAS_VARIADIC_TEMPLATES
+ Functor::operator()(x, v, Params...);
+#else
Functor::operator()(x, v);
+#endif
return;
}
@@ -61,12 +84,16 @@ public:
if(InputsAtCompileTime==Dynamic)
for (Index j=0; j<jac.rows(); j++)
- av[j].derivatives().resize(this->inputs());
+ av[j].derivatives().resize(x.rows());
for (Index i=0; i<jac.cols(); i++)
- ax[i].derivatives() = DerivativeType::Unit(this->inputs(),i);
+ ax[i].derivatives() = DerivativeType::Unit(x.rows(),i);
+#if EIGEN_HAS_VARIADIC_TEMPLATES
+ Functor::operator()(ax, &av, Params...);
+#else
Functor::operator()(ax, &av);
+#endif
for (Index i=0; i<jac.rows(); i++)
{
@@ -74,8 +101,6 @@ public:
jac.row(i) = av[i].derivatives();
}
}
-protected:
-
};
}
diff --git a/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h b/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h
index 8d42e69b9..50fedf6ac 100644..100755
--- a/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h
+++ b/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h
@@ -30,6 +30,13 @@ template<typename _DerType, bool Enable> struct auto_diff_special_op;
} // end namespace internal
+template<typename _DerType> class AutoDiffScalar;
+
+template<typename NewDerType>
+inline AutoDiffScalar<NewDerType> MakeAutoDiffScalar(const typename NewDerType::Scalar& value, const NewDerType &der) {
+ return AutoDiffScalar<NewDerType>(value,der);
+}
+
/** \class AutoDiffScalar
* \brief A scalar type replacement with automatic differentation capability
*
@@ -60,7 +67,7 @@ template<typename _DerType>
class AutoDiffScalar
: public internal::auto_diff_special_op
<_DerType, !internal::is_same<typename internal::traits<typename internal::remove_all<_DerType>::type>::Scalar,
- typename NumTraits<typename internal::traits<typename internal::remove_all<_DerType>::type>::Scalar>::Real>::value>
+ typename NumTraits<typename internal::traits<typename internal::remove_all<_DerType>::type>::Scalar>::Real>::value>
{
public:
typedef internal::auto_diff_special_op
@@ -99,7 +106,11 @@ class AutoDiffScalar
{}
template<typename OtherDerType>
- AutoDiffScalar(const AutoDiffScalar<OtherDerType>& other)
+ AutoDiffScalar(const AutoDiffScalar<OtherDerType>& other
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+ , typename internal::enable_if<internal::is_same<Scalar, typename internal::traits<typename internal::remove_all<OtherDerType>::type>::Scalar>::value,void*>::type = 0
+#endif
+ )
: m_value(other.value()), m_derivatives(other.derivatives())
{}
@@ -127,6 +138,14 @@ class AutoDiffScalar
return *this;
}
+ inline AutoDiffScalar& operator=(const Scalar& other)
+ {
+ m_value = other;
+ if(m_derivatives.size()>0)
+ m_derivatives.setZero();
+ return *this;
+ }
+
// inline operator const Scalar& () const { return m_value; }
// inline operator Scalar& () { return m_value; }
@@ -245,20 +264,16 @@ class AutoDiffScalar
-m_derivatives);
}
- inline const AutoDiffScalar<CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DerType> >
+ inline const AutoDiffScalar<EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(DerType,Scalar,product) >
operator*(const Scalar& other) const
{
- return AutoDiffScalar<CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DerType> >(
- m_value * other,
- (m_derivatives * other));
+ return MakeAutoDiffScalar(m_value * other, m_derivatives * other);
}
- friend inline const AutoDiffScalar<CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DerType> >
+ friend inline const AutoDiffScalar<EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(DerType,Scalar,product) >
operator*(const Scalar& other, const AutoDiffScalar& a)
{
- return AutoDiffScalar<CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DerType> >(
- a.value() * other,
- a.derivatives() * other);
+ return MakeAutoDiffScalar(a.value() * other, a.derivatives() * other);
}
// inline const AutoDiffScalar<typename CwiseUnaryOp<internal::scalar_multiple_op<Real>, DerType>::Type >
@@ -277,20 +292,16 @@ class AutoDiffScalar
// a.derivatives() * other);
// }
- inline const AutoDiffScalar<CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DerType> >
+ inline const AutoDiffScalar<EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(DerType,Scalar,product) >
operator/(const Scalar& other) const
{
- return AutoDiffScalar<CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DerType> >(
- m_value / other,
- (m_derivatives * (Scalar(1)/other)));
+ return MakeAutoDiffScalar(m_value / other, (m_derivatives * (Scalar(1)/other)));
}
- friend inline const AutoDiffScalar<CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DerType> >
+ friend inline const AutoDiffScalar<EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(DerType,Scalar,product) >
operator/(const Scalar& other, const AutoDiffScalar& a)
{
- return AutoDiffScalar<CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DerType> >(
- other / a.value(),
- a.derivatives() * (Scalar(-other) / (a.value()*a.value())));
+ return MakeAutoDiffScalar(other / a.value(), a.derivatives() * (Scalar(-other) / (a.value()*a.value())));
}
// inline const AutoDiffScalar<typename CwiseUnaryOp<internal::scalar_multiple_op<Real>, DerType>::Type >
@@ -310,34 +321,29 @@ class AutoDiffScalar
// }
template<typename OtherDerType>
- inline const AutoDiffScalar<CwiseUnaryOp<internal::scalar_multiple_op<Scalar>,
- const CwiseBinaryOp<internal::scalar_difference_op<Scalar>,
- const CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DerType>,
- const CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const typename internal::remove_all<OtherDerType>::type > > > >
+ inline const AutoDiffScalar<EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(
+ CwiseBinaryOp<internal::scalar_difference_op<Scalar> EIGEN_COMMA
+ const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(DerType,Scalar,product) EIGEN_COMMA
+ const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(typename internal::remove_all<OtherDerType>::type,Scalar,product) >,Scalar,product) >
operator/(const AutoDiffScalar<OtherDerType>& other) const
{
internal::make_coherent(m_derivatives, other.derivatives());
- return AutoDiffScalar<CwiseUnaryOp<internal::scalar_multiple_op<Scalar>,
- const CwiseBinaryOp<internal::scalar_difference_op<Scalar>,
- const CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DerType>,
- const CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const typename internal::remove_all<OtherDerType>::type > > > >(
+ return MakeAutoDiffScalar(
m_value / other.value(),
- ((m_derivatives * other.value()) - (m_value * other.derivatives()))
+ ((m_derivatives * other.value()) - (other.derivatives() * m_value))
* (Scalar(1)/(other.value()*other.value())));
}
template<typename OtherDerType>
inline const AutoDiffScalar<CwiseBinaryOp<internal::scalar_sum_op<Scalar>,
- const CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DerType>,
- const CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const typename internal::remove_all<OtherDerType>::type> > >
+ const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(DerType,Scalar,product),
+ const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(typename internal::remove_all<OtherDerType>::type,Scalar,product) > >
operator*(const AutoDiffScalar<OtherDerType>& other) const
{
internal::make_coherent(m_derivatives, other.derivatives());
- return AutoDiffScalar<const CwiseBinaryOp<internal::scalar_sum_op<Scalar>,
- const CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DerType>,
- const CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const typename internal::remove_all<OtherDerType>::type > > >(
+ return MakeAutoDiffScalar(
m_value * other.value(),
- (m_derivatives * other.value()) + (m_value * other.derivatives()));
+ (m_derivatives * other.value()) + (other.derivatives() * m_value));
}
inline AutoDiffScalar& operator*=(const Scalar& other)
@@ -414,18 +420,18 @@ struct auto_diff_special_op<_DerType, true>
}
- inline const AutoDiffScalar<typename CwiseUnaryOp<scalar_multiple2_op<Scalar,Real>, DerType>::Type >
+ inline const AutoDiffScalar<typename CwiseUnaryOp<bind2nd_op<scalar_product_op<Scalar,Real> >, DerType>::Type >
operator*(const Real& other) const
{
- return AutoDiffScalar<typename CwiseUnaryOp<scalar_multiple2_op<Scalar,Real>, DerType>::Type >(
+ return AutoDiffScalar<typename CwiseUnaryOp<bind2nd_op<scalar_product_op<Scalar,Real> >, DerType>::Type >(
derived().value() * other,
derived().derivatives() * other);
}
- friend inline const AutoDiffScalar<typename CwiseUnaryOp<scalar_multiple2_op<Scalar,Real>, DerType>::Type >
+ friend inline const AutoDiffScalar<typename CwiseUnaryOp<bind1st_op<scalar_product_op<Real,Scalar> >, DerType>::Type >
operator*(const Real& other, const AutoDiffScalar<_DerType>& a)
{
- return AutoDiffScalar<typename CwiseUnaryOp<scalar_multiple2_op<Scalar,Real>, DerType>::Type >(
+ return AutoDiffScalar<typename CwiseUnaryOp<bind1st_op<scalar_product_op<Real,Scalar> >, DerType>::Type >(
a.value() * other,
a.derivatives() * other);
}
@@ -489,43 +495,44 @@ struct make_coherent_impl<Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows,
}
};
-template<typename A_Scalar, int A_Rows, int A_Cols, int A_Options, int A_MaxRows, int A_MaxCols>
-struct scalar_product_traits<Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols>,A_Scalar>
-{
- enum { Defined = 1 };
- typedef Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols> ReturnType;
-};
-
-template<typename A_Scalar, int A_Rows, int A_Cols, int A_Options, int A_MaxRows, int A_MaxCols>
-struct scalar_product_traits<A_Scalar, Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols> >
-{
- enum { Defined = 1 };
- typedef Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols> ReturnType;
-};
+} // end namespace internal
-template<typename DerType>
-struct scalar_product_traits<AutoDiffScalar<DerType>,typename DerType::Scalar>
+template<typename DerType, typename BinOp>
+struct ScalarBinaryOpTraits<AutoDiffScalar<DerType>,typename DerType::Scalar,BinOp>
{
- enum { Defined = 1 };
typedef AutoDiffScalar<DerType> ReturnType;
};
-template<typename DerType>
-struct scalar_product_traits<typename DerType::Scalar,AutoDiffScalar<DerType> >
+template<typename DerType, typename BinOp>
+struct ScalarBinaryOpTraits<typename DerType::Scalar,AutoDiffScalar<DerType>, BinOp>
{
- enum { Defined = 1 };
typedef AutoDiffScalar<DerType> ReturnType;
};
-} // end namespace internal
+
+// The following is an attempt to let Eigen's known about expression template, but that's more tricky!
+
+// template<typename DerType, typename BinOp>
+// struct ScalarBinaryOpTraits<AutoDiffScalar<DerType>,AutoDiffScalar<DerType>, BinOp>
+// {
+// enum { Defined = 1 };
+// typedef AutoDiffScalar<typename DerType::PlainObject> ReturnType;
+// };
+//
+// template<typename DerType1,typename DerType2, typename BinOp>
+// struct ScalarBinaryOpTraits<AutoDiffScalar<DerType1>,AutoDiffScalar<DerType2>, BinOp>
+// {
+// enum { Defined = 1 };//internal::is_same<typename DerType1::Scalar,typename DerType2::Scalar>::value };
+// typedef AutoDiffScalar<typename DerType1::PlainObject> ReturnType;
+// };
#define EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(FUNC,CODE) \
template<typename DerType> \
- inline const Eigen::AutoDiffScalar<Eigen::CwiseUnaryOp<Eigen::internal::scalar_multiple_op<typename Eigen::internal::traits<typename Eigen::internal::remove_all<DerType>::type>::Scalar>, const typename Eigen::internal::remove_all<DerType>::type> > \
+ inline const Eigen::AutoDiffScalar< \
+ EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(typename Eigen::internal::remove_all<DerType>::type, typename Eigen::internal::traits<typename Eigen::internal::remove_all<DerType>::type>::Scalar, product) > \
FUNC(const Eigen::AutoDiffScalar<DerType>& x) { \
using namespace Eigen; \
- typedef typename Eigen::internal::traits<typename Eigen::internal::remove_all<DerType>::type>::Scalar Scalar; \
- typedef AutoDiffScalar<CwiseUnaryOp<Eigen::internal::scalar_multiple_op<Scalar>, const typename Eigen::internal::remove_all<DerType>::type> > ReturnType; \
+ EIGEN_UNUSED typedef typename Eigen::internal::traits<typename Eigen::internal::remove_all<DerType>::type>::Scalar Scalar; \
CODE; \
}
@@ -536,75 +543,92 @@ inline const AutoDiffScalar<DerType>& real(const AutoDiffScalar<DerType>& x) {
template<typename DerType>
inline typename DerType::Scalar imag(const AutoDiffScalar<DerType>&) { return 0.; }
template<typename DerType, typename T>
-inline AutoDiffScalar<DerType> (min)(const AutoDiffScalar<DerType>& x, const T& y) { return (x <= y ? x : y); }
+inline AutoDiffScalar<typename Eigen::internal::remove_all<DerType>::type::PlainObject> (min)(const AutoDiffScalar<DerType>& x, const T& y) {
+ typedef AutoDiffScalar<typename Eigen::internal::remove_all<DerType>::type::PlainObject> ADS;
+ return (x <= y ? ADS(x) : ADS(y));
+}
template<typename DerType, typename T>
-inline AutoDiffScalar<DerType> (max)(const AutoDiffScalar<DerType>& x, const T& y) { return (x >= y ? x : y); }
+inline AutoDiffScalar<typename Eigen::internal::remove_all<DerType>::type::PlainObject> (max)(const AutoDiffScalar<DerType>& x, const T& y) {
+ typedef AutoDiffScalar<typename Eigen::internal::remove_all<DerType>::type::PlainObject> ADS;
+ return (x >= y ? ADS(x) : ADS(y));
+}
template<typename DerType, typename T>
-inline AutoDiffScalar<DerType> (min)(const T& x, const AutoDiffScalar<DerType>& y) { return (x < y ? x : y); }
+inline AutoDiffScalar<typename Eigen::internal::remove_all<DerType>::type::PlainObject> (min)(const T& x, const AutoDiffScalar<DerType>& y) {
+ typedef AutoDiffScalar<typename Eigen::internal::remove_all<DerType>::type::PlainObject> ADS;
+ return (x < y ? ADS(x) : ADS(y));
+}
template<typename DerType, typename T>
-inline AutoDiffScalar<DerType> (max)(const T& x, const AutoDiffScalar<DerType>& y) { return (x > y ? x : y); }
+inline AutoDiffScalar<typename Eigen::internal::remove_all<DerType>::type::PlainObject> (max)(const T& x, const AutoDiffScalar<DerType>& y) {
+ typedef AutoDiffScalar<typename Eigen::internal::remove_all<DerType>::type::PlainObject> ADS;
+ return (x > y ? ADS(x) : ADS(y));
+}
+template<typename DerType>
+inline AutoDiffScalar<typename Eigen::internal::remove_all<DerType>::type::PlainObject> (min)(const AutoDiffScalar<DerType>& x, const AutoDiffScalar<DerType>& y) {
+ return (x.value() < y.value() ? x : y);
+}
+template<typename DerType>
+inline AutoDiffScalar<typename Eigen::internal::remove_all<DerType>::type::PlainObject> (max)(const AutoDiffScalar<DerType>& x, const AutoDiffScalar<DerType>& y) {
+ return (x.value() >= y.value() ? x : y);
+}
+
EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(abs,
using std::abs;
- return ReturnType(abs(x.value()), x.derivatives() * (x.value()<0 ? -1 : 1) );)
+ return Eigen::MakeAutoDiffScalar(abs(x.value()), x.derivatives() * (x.value()<0 ? -1 : 1) );)
EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(abs2,
using numext::abs2;
- return ReturnType(abs2(x.value()), x.derivatives() * (Scalar(2)*x.value()));)
+ return Eigen::MakeAutoDiffScalar(abs2(x.value()), x.derivatives() * (Scalar(2)*x.value()));)
EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(sqrt,
using std::sqrt;
Scalar sqrtx = sqrt(x.value());
- return ReturnType(sqrtx,x.derivatives() * (Scalar(0.5) / sqrtx));)
+ return Eigen::MakeAutoDiffScalar(sqrtx,x.derivatives() * (Scalar(0.5) / sqrtx));)
EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(cos,
using std::cos;
using std::sin;
- return ReturnType(cos(x.value()), x.derivatives() * (-sin(x.value())));)
+ return Eigen::MakeAutoDiffScalar(cos(x.value()), x.derivatives() * (-sin(x.value())));)
EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(sin,
using std::sin;
using std::cos;
- return ReturnType(sin(x.value()),x.derivatives() * cos(x.value()));)
+ return Eigen::MakeAutoDiffScalar(sin(x.value()),x.derivatives() * cos(x.value()));)
EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(exp,
using std::exp;
Scalar expx = exp(x.value());
- return ReturnType(expx,x.derivatives() * expx);)
+ return Eigen::MakeAutoDiffScalar(expx,x.derivatives() * expx);)
EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(log,
using std::log;
- return ReturnType(log(x.value()),x.derivatives() * (Scalar(1)/x.value()));)
+ return Eigen::MakeAutoDiffScalar(log(x.value()),x.derivatives() * (Scalar(1)/x.value()));)
template<typename DerType>
-inline const Eigen::AutoDiffScalar<Eigen::CwiseUnaryOp<Eigen::internal::scalar_multiple_op<typename Eigen::internal::traits<DerType>::Scalar>, const DerType> >
-pow(const Eigen::AutoDiffScalar<DerType>& x, typename Eigen::internal::traits<DerType>::Scalar y)
+inline const Eigen::AutoDiffScalar<
+EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(typename internal::remove_all<DerType>::type,typename internal::traits<typename internal::remove_all<DerType>::type>::Scalar,product) >
+pow(const Eigen::AutoDiffScalar<DerType> &x, const typename internal::traits<typename internal::remove_all<DerType>::type>::Scalar &y)
{
using namespace Eigen;
- typedef typename Eigen::internal::traits<DerType>::Scalar Scalar;
- return AutoDiffScalar<CwiseUnaryOp<Eigen::internal::scalar_multiple_op<Scalar>, const DerType> >(
- std::pow(x.value(),y),
- x.derivatives() * (y * std::pow(x.value(),y-1)));
+ using std::pow;
+ return Eigen::MakeAutoDiffScalar(pow(x.value(),y), x.derivatives() * (y * pow(x.value(),y-1)));
}
template<typename DerTypeA,typename DerTypeB>
-inline const AutoDiffScalar<Matrix<typename internal::traits<DerTypeA>::Scalar,Dynamic,1> >
+inline const AutoDiffScalar<Matrix<typename internal::traits<typename internal::remove_all<DerTypeA>::type>::Scalar,Dynamic,1> >
atan2(const AutoDiffScalar<DerTypeA>& a, const AutoDiffScalar<DerTypeB>& b)
{
using std::atan2;
- using std::max;
- typedef typename internal::traits<DerTypeA>::Scalar Scalar;
+ typedef typename internal::traits<typename internal::remove_all<DerTypeA>::type>::Scalar Scalar;
typedef AutoDiffScalar<Matrix<Scalar,Dynamic,1> > PlainADS;
PlainADS ret;
ret.value() = atan2(a.value(), b.value());
- Scalar tmp2 = a.value() * a.value();
- Scalar tmp3 = b.value() * b.value();
- Scalar tmp4 = tmp3/(tmp2+tmp3);
+ Scalar squared_hypot = a.value() * a.value() + b.value() * b.value();
- if (tmp4!=0)
- ret.derivatives() = (a.derivatives() * b.value() - a.value() * b.derivatives()) * (tmp2+tmp3);
+ // if (squared_hypot==0) the derivation is undefined and the following results in a NaN:
+ ret.derivatives() = (a.derivatives() * b.value() - a.value() * b.derivatives()) / squared_hypot;
return ret;
}
@@ -612,26 +636,44 @@ atan2(const AutoDiffScalar<DerTypeA>& a, const AutoDiffScalar<DerTypeB>& b)
EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(tan,
using std::tan;
using std::cos;
- return ReturnType(tan(x.value()),x.derivatives() * (Scalar(1)/numext::abs2(cos(x.value()))));)
+ return Eigen::MakeAutoDiffScalar(tan(x.value()),x.derivatives() * (Scalar(1)/numext::abs2(cos(x.value()))));)
EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(asin,
using std::sqrt;
using std::asin;
- return ReturnType(asin(x.value()),x.derivatives() * (Scalar(1)/sqrt(1-numext::abs2(x.value()))));)
+ return Eigen::MakeAutoDiffScalar(asin(x.value()),x.derivatives() * (Scalar(1)/sqrt(1-numext::abs2(x.value()))));)
EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(acos,
using std::sqrt;
using std::acos;
- return ReturnType(acos(x.value()),x.derivatives() * (Scalar(-1)/sqrt(1-numext::abs2(x.value()))));)
+ return Eigen::MakeAutoDiffScalar(acos(x.value()),x.derivatives() * (Scalar(-1)/sqrt(1-numext::abs2(x.value()))));)
+
+EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(tanh,
+ using std::cosh;
+ using std::tanh;
+ return Eigen::MakeAutoDiffScalar(tanh(x.value()),x.derivatives() * (Scalar(1)/numext::abs2(cosh(x.value()))));)
+
+EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(sinh,
+ using std::sinh;
+ using std::cosh;
+ return Eigen::MakeAutoDiffScalar(sinh(x.value()),x.derivatives() * cosh(x.value()));)
+
+EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(cosh,
+ using std::sinh;
+ using std::cosh;
+ return Eigen::MakeAutoDiffScalar(cosh(x.value()),x.derivatives() * sinh(x.value()));)
#undef EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY
template<typename DerType> struct NumTraits<AutoDiffScalar<DerType> >
- : NumTraits< typename NumTraits<typename DerType::Scalar>::Real >
+ : NumTraits< typename NumTraits<typename internal::remove_all<DerType>::type::Scalar>::Real >
{
- typedef AutoDiffScalar<Matrix<typename NumTraits<typename DerType::Scalar>::Real,DerType::RowsAtCompileTime,DerType::ColsAtCompileTime> > Real;
+ typedef typename internal::remove_all<DerType>::type DerTypeCleaned;
+ typedef AutoDiffScalar<Matrix<typename NumTraits<typename DerTypeCleaned::Scalar>::Real,DerTypeCleaned::RowsAtCompileTime,DerTypeCleaned::ColsAtCompileTime,
+ 0, DerTypeCleaned::MaxRowsAtCompileTime, DerTypeCleaned::MaxColsAtCompileTime> > Real;
typedef AutoDiffScalar<DerType> NonInteger;
- typedef AutoDiffScalar<DerType>& Nested;
+ typedef AutoDiffScalar<DerType> Nested;
+ typedef typename NumTraits<typename DerTypeCleaned::Scalar>::Literal Literal;
enum{
RequireInitialization = 1
};
diff --git a/unsupported/Eigen/src/AutoDiff/CMakeLists.txt b/unsupported/Eigen/src/AutoDiff/CMakeLists.txt
deleted file mode 100644
index ad91fd9c4..000000000
--- a/unsupported/Eigen/src/AutoDiff/CMakeLists.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-FILE(GLOB Eigen_AutoDiff_SRCS "*.h")
-
-INSTALL(FILES
- ${Eigen_AutoDiff_SRCS}
- DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/AutoDiff COMPONENT Devel
- )
diff --git a/unsupported/Eigen/src/BVH/CMakeLists.txt b/unsupported/Eigen/src/BVH/CMakeLists.txt
deleted file mode 100644
index b377d865c..000000000
--- a/unsupported/Eigen/src/BVH/CMakeLists.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-FILE(GLOB Eigen_BVH_SRCS "*.h")
-
-INSTALL(FILES
- ${Eigen_BVH_SRCS}
- DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/BVH COMPONENT Devel
- )
diff --git a/unsupported/Eigen/src/CMakeLists.txt b/unsupported/Eigen/src/CMakeLists.txt
deleted file mode 100644
index 125c43fdf..000000000
--- a/unsupported/Eigen/src/CMakeLists.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-ADD_SUBDIRECTORY(AutoDiff)
-ADD_SUBDIRECTORY(BVH)
-ADD_SUBDIRECTORY(FFT)
-ADD_SUBDIRECTORY(IterativeSolvers)
-ADD_SUBDIRECTORY(KroneckerProduct)
-ADD_SUBDIRECTORY(LevenbergMarquardt)
-ADD_SUBDIRECTORY(MatrixFunctions)
-ADD_SUBDIRECTORY(MoreVectorization)
-ADD_SUBDIRECTORY(NonLinearOptimization)
-ADD_SUBDIRECTORY(NumericalDiff)
-ADD_SUBDIRECTORY(Polynomials)
-ADD_SUBDIRECTORY(Skyline)
-ADD_SUBDIRECTORY(SparseExtra)
-ADD_SUBDIRECTORY(Splines)
diff --git a/unsupported/Eigen/src/Eigenvalues/ArpackSelfAdjointEigenSolver.h b/unsupported/Eigen/src/Eigenvalues/ArpackSelfAdjointEigenSolver.h
index 3b6a69aff..866a8a460 100644
--- a/unsupported/Eigen/src/Eigenvalues/ArpackSelfAdjointEigenSolver.h
+++ b/unsupported/Eigen/src/Eigenvalues/ArpackSelfAdjointEigenSolver.h
@@ -628,15 +628,15 @@ ArpackGeneralizedSelfAdjointEigenSolver<MatrixType, MatrixSolver, BisSPD>&
m_info = Success;
}
- delete select;
+ delete[] select;
}
- delete v;
- delete iparam;
- delete ipntr;
- delete workd;
- delete workl;
- delete resid;
+ delete[] v;
+ delete[] iparam;
+ delete[] ipntr;
+ delete[] workd;
+ delete[] workl;
+ delete[] resid;
m_isInitialized = true;
diff --git a/unsupported/Eigen/src/EulerAngles/CMakeLists.txt b/unsupported/Eigen/src/EulerAngles/CMakeLists.txt
new file mode 100644
index 000000000..40af550e8
--- /dev/null
+++ b/unsupported/Eigen/src/EulerAngles/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_EulerAngles_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_EulerAngles_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/EulerAngles COMPONENT Devel
+ )
diff --git a/unsupported/Eigen/src/EulerAngles/EulerAngles.h b/unsupported/Eigen/src/EulerAngles/EulerAngles.h
new file mode 100644
index 000000000..13a0da1ab
--- /dev/null
+++ b/unsupported/Eigen/src/EulerAngles/EulerAngles.h
@@ -0,0 +1,386 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2015 Tal Hadad <tal_hd@hotmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_EULERANGLESCLASS_H// TODO: Fix previous "EIGEN_EULERANGLES_H" definition?
+#define EIGEN_EULERANGLESCLASS_H
+
+namespace Eigen
+{
+ /*template<typename Other,
+ int OtherRows=Other::RowsAtCompileTime,
+ int OtherCols=Other::ColsAtCompileTime>
+ struct ei_eulerangles_assign_impl;*/
+
+ /** \class EulerAngles
+ *
+ * \ingroup EulerAngles_Module
+ *
+ * \brief Represents a rotation in a 3 dimensional space as three Euler angles.
+ *
+ * Euler rotation is a set of three rotation of three angles over three fixed axes, defined by the EulerSystem given as a template parameter.
+ *
+ * Here is how intrinsic Euler angles works:
+ * - first, rotate the axes system over the alpha axis in angle alpha
+ * - then, rotate the axes system over the beta axis(which was rotated in the first stage) in angle beta
+ * - then, rotate the axes system over the gamma axis(which was rotated in the two stages above) in angle gamma
+ *
+ * \note This class support only intrinsic Euler angles for simplicity,
+ * see EulerSystem how to easily overcome this for extrinsic systems.
+ *
+ * ### Rotation representation and conversions ###
+ *
+ * It has been proved(see Wikipedia link below) that every rotation can be represented
+ * by Euler angles, but there is no singular representation (e.g. unlike rotation matrices).
+ * Therefore, you can convert from Eigen rotation and to them
+ * (including rotation matrices, which is not called "rotations" by Eigen design).
+ *
+ * Euler angles usually used for:
+ * - convenient human representation of rotation, especially in interactive GUI.
+ * - gimbal systems and robotics
+ * - efficient encoding(i.e. 3 floats only) of rotation for network protocols.
+ *
+ * However, Euler angles are slow comparing to quaternion or matrices,
+ * because their unnatural math definition, although it's simple for human.
+ * To overcome this, this class provide easy movement from the math friendly representation
+ * to the human friendly representation, and vise-versa.
+ *
+ * All the user need to do is a safe simple C++ type conversion,
+ * and this class take care for the math.
+ * Additionally, some axes related computation is done in compile time.
+ *
+ * #### Euler angles ranges in conversions ####
+ *
+ * When converting some rotation to Euler angles, there are some ways you can guarantee
+ * the Euler angles ranges.
+ *
+ * #### implicit ranges ####
+ * When using implicit ranges, all angles are guarantee to be in the range [-PI, +PI],
+ * unless you convert from some other Euler angles.
+ * In this case, the range is __undefined__ (might be even less than -PI or greater than +2*PI).
+ * \sa EulerAngles(const MatrixBase<Derived>&)
+ * \sa EulerAngles(const RotationBase<Derived, 3>&)
+ *
+ * #### explicit ranges ####
+ * When using explicit ranges, all angles are guarantee to be in the range you choose.
+ * In the range Boolean parameter, you're been ask whether you prefer the positive range or not:
+ * - _true_ - force the range between [0, +2*PI]
+ * - _false_ - force the range between [-PI, +PI]
+ *
+ * ##### compile time ranges #####
+ * This is when you have compile time ranges and you prefer to
+ * use template parameter. (e.g. for performance)
+ * \sa FromRotation()
+ *
+ * ##### run-time time ranges #####
+ * Run-time ranges are also supported.
+ * \sa EulerAngles(const MatrixBase<Derived>&, bool, bool, bool)
+ * \sa EulerAngles(const RotationBase<Derived, 3>&, bool, bool, bool)
+ *
+ * ### Convenient user typedefs ###
+ *
+ * Convenient typedefs for EulerAngles exist for float and double scalar,
+ * in a form of EulerAngles{A}{B}{C}{scalar},
+ * e.g. \ref EulerAnglesXYZd, \ref EulerAnglesZYZf.
+ *
+ * Only for positive axes{+x,+y,+z} Euler systems are have convenient typedef.
+ * If you need negative axes{-x,-y,-z}, it is recommended to create you own typedef with
+ * a word that represent what you need.
+ *
+ * ### Example ###
+ *
+ * \include EulerAngles.cpp
+ * Output: \verbinclude EulerAngles.out
+ *
+ * ### Additional reading ###
+ *
+ * If you're want to get more idea about how Euler system work in Eigen see EulerSystem.
+ *
+ * More information about Euler angles: https://en.wikipedia.org/wiki/Euler_angles
+ *
+ * \tparam _Scalar the scalar type, i.e., the type of the angles.
+ *
+ * \tparam _System the EulerSystem to use, which represents the axes of rotation.
+ */
+ template <typename _Scalar, class _System>
+ class EulerAngles : public RotationBase<EulerAngles<_Scalar, _System>, 3>
+ {
+ public:
+ /** the scalar type of the angles */
+ typedef _Scalar Scalar;
+
+ /** the EulerSystem to use, which represents the axes of rotation. */
+ typedef _System System;
+
+ typedef Matrix<Scalar,3,3> Matrix3; /*!< the equivalent rotation matrix type */
+ typedef Matrix<Scalar,3,1> Vector3; /*!< the equivalent 3 dimension vector type */
+ typedef Quaternion<Scalar> QuaternionType; /*!< the equivalent quaternion type */
+ typedef AngleAxis<Scalar> AngleAxisType; /*!< the equivalent angle-axis type */
+
+ /** \returns the axis vector of the first (alpha) rotation */
+ static Vector3 AlphaAxisVector() {
+ const Vector3& u = Vector3::Unit(System::AlphaAxisAbs - 1);
+ return System::IsAlphaOpposite ? -u : u;
+ }
+
+ /** \returns the axis vector of the second (beta) rotation */
+ static Vector3 BetaAxisVector() {
+ const Vector3& u = Vector3::Unit(System::BetaAxisAbs - 1);
+ return System::IsBetaOpposite ? -u : u;
+ }
+
+ /** \returns the axis vector of the third (gamma) rotation */
+ static Vector3 GammaAxisVector() {
+ const Vector3& u = Vector3::Unit(System::GammaAxisAbs - 1);
+ return System::IsGammaOpposite ? -u : u;
+ }
+
+ private:
+ Vector3 m_angles;
+
+ public:
+ /** Default constructor without initialization. */
+ EulerAngles() {}
+ /** Constructs and initialize Euler angles(\p alpha, \p beta, \p gamma). */
+ EulerAngles(const Scalar& alpha, const Scalar& beta, const Scalar& gamma) :
+ m_angles(alpha, beta, gamma) {}
+
+ /** Constructs and initialize Euler angles from a 3x3 rotation matrix \p m.
+ *
+ * \note All angles will be in the range [-PI, PI].
+ */
+ template<typename Derived>
+ EulerAngles(const MatrixBase<Derived>& m) { *this = m; }
+
+ /** Constructs and initialize Euler angles from a 3x3 rotation matrix \p m,
+ * with options to choose for each angle the requested range.
+ *
+ * If positive range is true, then the specified angle will be in the range [0, +2*PI].
+ * Otherwise, the specified angle will be in the range [-PI, +PI].
+ *
+ * \param m The 3x3 rotation matrix to convert
+ * \param positiveRangeAlpha If true, alpha will be in [0, 2*PI]. Otherwise, in [-PI, +PI].
+ * \param positiveRangeBeta If true, beta will be in [0, 2*PI]. Otherwise, in [-PI, +PI].
+ * \param positiveRangeGamma If true, gamma will be in [0, 2*PI]. Otherwise, in [-PI, +PI].
+ */
+ template<typename Derived>
+ EulerAngles(
+ const MatrixBase<Derived>& m,
+ bool positiveRangeAlpha,
+ bool positiveRangeBeta,
+ bool positiveRangeGamma) {
+
+ System::CalcEulerAngles(*this, m, positiveRangeAlpha, positiveRangeBeta, positiveRangeGamma);
+ }
+
+ /** Constructs and initialize Euler angles from a rotation \p rot.
+ *
+ * \note All angles will be in the range [-PI, PI], unless \p rot is an EulerAngles.
+ * If rot is an EulerAngles, expected EulerAngles range is __undefined__.
+ * (Use other functions here for enforcing range if this effect is desired)
+ */
+ template<typename Derived>
+ EulerAngles(const RotationBase<Derived, 3>& rot) { *this = rot; }
+
+ /** Constructs and initialize Euler angles from a rotation \p rot,
+ * with options to choose for each angle the requested range.
+ *
+ * If positive range is true, then the specified angle will be in the range [0, +2*PI].
+ * Otherwise, the specified angle will be in the range [-PI, +PI].
+ *
+ * \param rot The 3x3 rotation matrix to convert
+ * \param positiveRangeAlpha If true, alpha will be in [0, 2*PI]. Otherwise, in [-PI, +PI].
+ * \param positiveRangeBeta If true, beta will be in [0, 2*PI]. Otherwise, in [-PI, +PI].
+ * \param positiveRangeGamma If true, gamma will be in [0, 2*PI]. Otherwise, in [-PI, +PI].
+ */
+ template<typename Derived>
+ EulerAngles(
+ const RotationBase<Derived, 3>& rot,
+ bool positiveRangeAlpha,
+ bool positiveRangeBeta,
+ bool positiveRangeGamma) {
+
+ System::CalcEulerAngles(*this, rot.toRotationMatrix(), positiveRangeAlpha, positiveRangeBeta, positiveRangeGamma);
+ }
+
+ /** \returns The angle values stored in a vector (alpha, beta, gamma). */
+ const Vector3& angles() const { return m_angles; }
+ /** \returns A read-write reference to the angle values stored in a vector (alpha, beta, gamma). */
+ Vector3& angles() { return m_angles; }
+
+ /** \returns The value of the first angle. */
+ Scalar alpha() const { return m_angles[0]; }
+ /** \returns A read-write reference to the angle of the first angle. */
+ Scalar& alpha() { return m_angles[0]; }
+
+ /** \returns The value of the second angle. */
+ Scalar beta() const { return m_angles[1]; }
+ /** \returns A read-write reference to the angle of the second angle. */
+ Scalar& beta() { return m_angles[1]; }
+
+ /** \returns The value of the third angle. */
+ Scalar gamma() const { return m_angles[2]; }
+ /** \returns A read-write reference to the angle of the third angle. */
+ Scalar& gamma() { return m_angles[2]; }
+
+ /** \returns The Euler angles rotation inverse (which is as same as the negative),
+ * (-alpha, -beta, -gamma).
+ */
+ EulerAngles inverse() const
+ {
+ EulerAngles res;
+ res.m_angles = -m_angles;
+ return res;
+ }
+
+ /** \returns The Euler angles rotation negative (which is as same as the inverse),
+ * (-alpha, -beta, -gamma).
+ */
+ EulerAngles operator -() const
+ {
+ return inverse();
+ }
+
+ /** Constructs and initialize Euler angles from a 3x3 rotation matrix \p m,
+ * with options to choose for each angle the requested range (__only in compile time__).
+ *
+ * If positive range is true, then the specified angle will be in the range [0, +2*PI].
+ * Otherwise, the specified angle will be in the range [-PI, +PI].
+ *
+ * \param m The 3x3 rotation matrix to convert
+ * \tparam positiveRangeAlpha If true, alpha will be in [0, 2*PI]. Otherwise, in [-PI, +PI].
+ * \tparam positiveRangeBeta If true, beta will be in [0, 2*PI]. Otherwise, in [-PI, +PI].
+ * \tparam positiveRangeGamma If true, gamma will be in [0, 2*PI]. Otherwise, in [-PI, +PI].
+ */
+ template<
+ bool PositiveRangeAlpha,
+ bool PositiveRangeBeta,
+ bool PositiveRangeGamma,
+ typename Derived>
+ static EulerAngles FromRotation(const MatrixBase<Derived>& m)
+ {
+ EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Derived, 3, 3)
+
+ EulerAngles e;
+ System::template CalcEulerAngles<
+ PositiveRangeAlpha, PositiveRangeBeta, PositiveRangeGamma, _Scalar>(e, m);
+ return e;
+ }
+
+ /** Constructs and initialize Euler angles from a rotation \p rot,
+ * with options to choose for each angle the requested range (__only in compile time__).
+ *
+ * If positive range is true, then the specified angle will be in the range [0, +2*PI].
+ * Otherwise, the specified angle will be in the range [-PI, +PI].
+ *
+ * \param rot The 3x3 rotation matrix to convert
+ * \tparam positiveRangeAlpha If true, alpha will be in [0, 2*PI]. Otherwise, in [-PI, +PI].
+ * \tparam positiveRangeBeta If true, beta will be in [0, 2*PI]. Otherwise, in [-PI, +PI].
+ * \tparam positiveRangeGamma If true, gamma will be in [0, 2*PI]. Otherwise, in [-PI, +PI].
+ */
+ template<
+ bool PositiveRangeAlpha,
+ bool PositiveRangeBeta,
+ bool PositiveRangeGamma,
+ typename Derived>
+ static EulerAngles FromRotation(const RotationBase<Derived, 3>& rot)
+ {
+ return FromRotation<PositiveRangeAlpha, PositiveRangeBeta, PositiveRangeGamma>(rot.toRotationMatrix());
+ }
+
+ /*EulerAngles& fromQuaternion(const QuaternionType& q)
+ {
+ // TODO: Implement it in a faster way for quaternions
+ // According to http://www.euclideanspace.com/maths/geometry/rotations/conversions/quaternionToEuler/
+ // we can compute only the needed matrix cells and then convert to euler angles. (see ZYX example below)
+ // Currently we compute all matrix cells from quaternion.
+
+ // Special case only for ZYX
+ //Scalar y2 = q.y() * q.y();
+ //m_angles[0] = std::atan2(2*(q.w()*q.z() + q.x()*q.y()), (1 - 2*(y2 + q.z()*q.z())));
+ //m_angles[1] = std::asin( 2*(q.w()*q.y() - q.z()*q.x()));
+ //m_angles[2] = std::atan2(2*(q.w()*q.x() + q.y()*q.z()), (1 - 2*(q.x()*q.x() + y2)));
+ }*/
+
+ /** Set \c *this from a rotation matrix(i.e. pure orthogonal matrix with determinant of +1). */
+ template<typename Derived>
+ EulerAngles& operator=(const MatrixBase<Derived>& m) {
+ EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Derived, 3, 3)
+
+ System::CalcEulerAngles(*this, m);
+ return *this;
+ }
+
+ // TODO: Assign and construct from another EulerAngles (with different system)
+
+ /** Set \c *this from a rotation. */
+ template<typename Derived>
+ EulerAngles& operator=(const RotationBase<Derived, 3>& rot) {
+ System::CalcEulerAngles(*this, rot.toRotationMatrix());
+ return *this;
+ }
+
+ // TODO: Support isApprox function
+
+ /** \returns an equivalent 3x3 rotation matrix. */
+ Matrix3 toRotationMatrix() const
+ {
+ return static_cast<QuaternionType>(*this).toRotationMatrix();
+ }
+
+ /** Convert the Euler angles to quaternion. */
+ operator QuaternionType() const
+ {
+ return
+ AngleAxisType(alpha(), AlphaAxisVector()) *
+ AngleAxisType(beta(), BetaAxisVector()) *
+ AngleAxisType(gamma(), GammaAxisVector());
+ }
+
+ friend std::ostream& operator<<(std::ostream& s, const EulerAngles<Scalar, System>& eulerAngles)
+ {
+ s << eulerAngles.angles().transpose();
+ return s;
+ }
+ };
+
+#define EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(AXES, SCALAR_TYPE, SCALAR_POSTFIX) \
+ /** \ingroup EulerAngles_Module */ \
+ typedef EulerAngles<SCALAR_TYPE, EulerSystem##AXES> EulerAngles##AXES##SCALAR_POSTFIX;
+
+#define EIGEN_EULER_ANGLES_TYPEDEFS(SCALAR_TYPE, SCALAR_POSTFIX) \
+ EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(XYZ, SCALAR_TYPE, SCALAR_POSTFIX) \
+ EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(XYX, SCALAR_TYPE, SCALAR_POSTFIX) \
+ EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(XZY, SCALAR_TYPE, SCALAR_POSTFIX) \
+ EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(XZX, SCALAR_TYPE, SCALAR_POSTFIX) \
+ \
+ EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(YZX, SCALAR_TYPE, SCALAR_POSTFIX) \
+ EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(YZY, SCALAR_TYPE, SCALAR_POSTFIX) \
+ EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(YXZ, SCALAR_TYPE, SCALAR_POSTFIX) \
+ EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(YXY, SCALAR_TYPE, SCALAR_POSTFIX) \
+ \
+ EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(ZXY, SCALAR_TYPE, SCALAR_POSTFIX) \
+ EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(ZXZ, SCALAR_TYPE, SCALAR_POSTFIX) \
+ EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(ZYX, SCALAR_TYPE, SCALAR_POSTFIX) \
+ EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(ZYZ, SCALAR_TYPE, SCALAR_POSTFIX)
+
+EIGEN_EULER_ANGLES_TYPEDEFS(float, f)
+EIGEN_EULER_ANGLES_TYPEDEFS(double, d)
+
+ namespace internal
+ {
+ template<typename _Scalar, class _System>
+ struct traits<EulerAngles<_Scalar, _System> >
+ {
+ typedef _Scalar Scalar;
+ };
+ }
+
+}
+
+#endif // EIGEN_EULERANGLESCLASS_H
diff --git a/unsupported/Eigen/src/EulerAngles/EulerSystem.h b/unsupported/Eigen/src/EulerAngles/EulerSystem.h
new file mode 100644
index 000000000..98f9f647d
--- /dev/null
+++ b/unsupported/Eigen/src/EulerAngles/EulerSystem.h
@@ -0,0 +1,326 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2015 Tal Hadad <tal_hd@hotmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_EULERSYSTEM_H
+#define EIGEN_EULERSYSTEM_H
+
+namespace Eigen
+{
+ // Forward declerations
+ template <typename _Scalar, class _System>
+ class EulerAngles;
+
+ namespace internal
+ {
+ // TODO: Check if already exists on the rest API
+ template <int Num, bool IsPositive = (Num > 0)>
+ struct Abs
+ {
+ enum { value = Num };
+ };
+
+ template <int Num>
+ struct Abs<Num, false>
+ {
+ enum { value = -Num };
+ };
+
+ template <int Axis>
+ struct IsValidAxis
+ {
+ enum { value = Axis != 0 && Abs<Axis>::value <= 3 };
+ };
+ }
+
+ #define EIGEN_EULER_ANGLES_CLASS_STATIC_ASSERT(COND,MSG) typedef char static_assertion_##MSG[(COND)?1:-1]
+
+ /** \brief Representation of a fixed signed rotation axis for EulerSystem.
+ *
+ * \ingroup EulerAngles_Module
+ *
+ * Values here represent:
+ * - The axis of the rotation: X, Y or Z.
+ * - The sign (i.e. direction of the rotation along the axis): positive(+) or negative(-)
+ *
+ * Therefore, this could express all the axes {+X,+Y,+Z,-X,-Y,-Z}
+ *
+ * For positive axis, use +EULER_{axis}, and for negative axis use -EULER_{axis}.
+ */
+ enum EulerAxis
+ {
+ EULER_X = 1, /*!< the X axis */
+ EULER_Y = 2, /*!< the Y axis */
+ EULER_Z = 3 /*!< the Z axis */
+ };
+
+ /** \class EulerSystem
+ *
+ * \ingroup EulerAngles_Module
+ *
+ * \brief Represents a fixed Euler rotation system.
+ *
+ * This meta-class goal is to represent the Euler system in compilation time, for EulerAngles.
+ *
+ * You can use this class to get two things:
+ * - Build an Euler system, and then pass it as a template parameter to EulerAngles.
+ * - Query some compile time data about an Euler system. (e.g. Whether it's tait bryan)
+ *
+ * Euler rotation is a set of three rotation on fixed axes. (see \ref EulerAngles)
+ * This meta-class store constantly those signed axes. (see \ref EulerAxis)
+ *
+ * ### Types of Euler systems ###
+ *
+ * All and only valid 3 dimension Euler rotation over standard
+ * signed axes{+X,+Y,+Z,-X,-Y,-Z} are supported:
+ * - all axes X, Y, Z in each valid order (see below what order is valid)
+ * - rotation over the axis is supported both over the positive and negative directions.
+ * - both tait bryan and proper/classic Euler angles (i.e. the opposite).
+ *
+ * Since EulerSystem support both positive and negative directions,
+ * you may call this rotation distinction in other names:
+ * - _right handed_ or _left handed_
+ * - _counterclockwise_ or _clockwise_
+ *
+ * Notice all axed combination are valid, and would trigger a static assertion.
+ * Same unsigned axes can't be neighbors, e.g. {X,X,Y} is invalid.
+ * This yield two and only two classes:
+ * - _tait bryan_ - all unsigned axes are distinct, e.g. {X,Y,Z}
+ * - _proper/classic Euler angles_ - The first and the third unsigned axes is equal,
+ * and the second is different, e.g. {X,Y,X}
+ *
+ * ### Intrinsic vs extrinsic Euler systems ###
+ *
+ * Only intrinsic Euler systems are supported for simplicity.
+ * If you want to use extrinsic Euler systems,
+ * just use the equal intrinsic opposite order for axes and angles.
+ * I.e axes (A,B,C) becomes (C,B,A), and angles (a,b,c) becomes (c,b,a).
+ *
+ * ### Convenient user typedefs ###
+ *
+ * Convenient typedefs for EulerSystem exist (only for positive axes Euler systems),
+ * in a form of EulerSystem{A}{B}{C}, e.g. \ref EulerSystemXYZ.
+ *
+ * ### Additional reading ###
+ *
+ * More information about Euler angles: https://en.wikipedia.org/wiki/Euler_angles
+ *
+ * \tparam _AlphaAxis the first fixed EulerAxis
+ *
+ * \tparam _AlphaAxis the second fixed EulerAxis
+ *
+ * \tparam _AlphaAxis the third fixed EulerAxis
+ */
+ template <int _AlphaAxis, int _BetaAxis, int _GammaAxis>
+ class EulerSystem
+ {
+ public:
+ // It's defined this way and not as enum, because I think
+ // that enum is not guerantee to support negative numbers
+
+ /** The first rotation axis */
+ static const int AlphaAxis = _AlphaAxis;
+
+ /** The second rotation axis */
+ static const int BetaAxis = _BetaAxis;
+
+ /** The third rotation axis */
+ static const int GammaAxis = _GammaAxis;
+
+ enum
+ {
+ AlphaAxisAbs = internal::Abs<AlphaAxis>::value, /*!< the first rotation axis unsigned */
+ BetaAxisAbs = internal::Abs<BetaAxis>::value, /*!< the second rotation axis unsigned */
+ GammaAxisAbs = internal::Abs<GammaAxis>::value, /*!< the third rotation axis unsigned */
+
+ IsAlphaOpposite = (AlphaAxis < 0) ? 1 : 0, /*!< weather alpha axis is negative */
+ IsBetaOpposite = (BetaAxis < 0) ? 1 : 0, /*!< weather beta axis is negative */
+ IsGammaOpposite = (GammaAxis < 0) ? 1 : 0, /*!< weather gamma axis is negative */
+
+ IsOdd = ((AlphaAxisAbs)%3 == (BetaAxisAbs - 1)%3) ? 0 : 1, /*!< weather the Euler system is odd */
+ IsEven = IsOdd ? 0 : 1, /*!< weather the Euler system is even */
+
+ IsTaitBryan = ((unsigned)AlphaAxisAbs != (unsigned)GammaAxisAbs) ? 1 : 0 /*!< weather the Euler system is tait bryan */
+ };
+
+ private:
+
+ EIGEN_EULER_ANGLES_CLASS_STATIC_ASSERT(internal::IsValidAxis<AlphaAxis>::value,
+ ALPHA_AXIS_IS_INVALID);
+
+ EIGEN_EULER_ANGLES_CLASS_STATIC_ASSERT(internal::IsValidAxis<BetaAxis>::value,
+ BETA_AXIS_IS_INVALID);
+
+ EIGEN_EULER_ANGLES_CLASS_STATIC_ASSERT(internal::IsValidAxis<GammaAxis>::value,
+ GAMMA_AXIS_IS_INVALID);
+
+ EIGEN_EULER_ANGLES_CLASS_STATIC_ASSERT((unsigned)AlphaAxisAbs != (unsigned)BetaAxisAbs,
+ ALPHA_AXIS_CANT_BE_EQUAL_TO_BETA_AXIS);
+
+ EIGEN_EULER_ANGLES_CLASS_STATIC_ASSERT((unsigned)BetaAxisAbs != (unsigned)GammaAxisAbs,
+ BETA_AXIS_CANT_BE_EQUAL_TO_GAMMA_AXIS);
+
+ enum
+ {
+ // I, J, K are the pivot indexes permutation for the rotation matrix, that match this Euler system.
+ // They are used in this class converters.
+ // They are always different from each other, and their possible values are: 0, 1, or 2.
+ I = AlphaAxisAbs - 1,
+ J = (AlphaAxisAbs - 1 + 1 + IsOdd)%3,
+ K = (AlphaAxisAbs - 1 + 2 - IsOdd)%3
+ };
+
+ // TODO: Get @mat parameter in form that avoids double evaluation.
+ template <typename Derived>
+ static void CalcEulerAngles_imp(Matrix<typename MatrixBase<Derived>::Scalar, 3, 1>& res, const MatrixBase<Derived>& mat, internal::true_type /*isTaitBryan*/)
+ {
+ using std::atan2;
+ using std::sin;
+ using std::cos;
+
+ typedef typename Derived::Scalar Scalar;
+ typedef Matrix<Scalar,2,1> Vector2;
+
+ res[0] = atan2(mat(J,K), mat(K,K));
+ Scalar c2 = Vector2(mat(I,I), mat(I,J)).norm();
+ if((IsOdd && res[0]<Scalar(0)) || ((!IsOdd) && res[0]>Scalar(0))) {
+ if(res[0] > Scalar(0)) {
+ res[0] -= Scalar(EIGEN_PI);
+ }
+ else {
+ res[0] += Scalar(EIGEN_PI);
+ }
+ res[1] = atan2(-mat(I,K), -c2);
+ }
+ else
+ res[1] = atan2(-mat(I,K), c2);
+ Scalar s1 = sin(res[0]);
+ Scalar c1 = cos(res[0]);
+ res[2] = atan2(s1*mat(K,I)-c1*mat(J,I), c1*mat(J,J) - s1 * mat(K,J));
+ }
+
+ template <typename Derived>
+ static void CalcEulerAngles_imp(Matrix<typename MatrixBase<Derived>::Scalar,3,1>& res, const MatrixBase<Derived>& mat, internal::false_type /*isTaitBryan*/)
+ {
+ using std::atan2;
+ using std::sin;
+ using std::cos;
+
+ typedef typename Derived::Scalar Scalar;
+ typedef Matrix<Scalar,2,1> Vector2;
+
+ res[0] = atan2(mat(J,I), mat(K,I));
+ if((IsOdd && res[0]<Scalar(0)) || ((!IsOdd) && res[0]>Scalar(0)))
+ {
+ if(res[0] > Scalar(0)) {
+ res[0] -= Scalar(EIGEN_PI);
+ }
+ else {
+ res[0] += Scalar(EIGEN_PI);
+ }
+ Scalar s2 = Vector2(mat(J,I), mat(K,I)).norm();
+ res[1] = -atan2(s2, mat(I,I));
+ }
+ else
+ {
+ Scalar s2 = Vector2(mat(J,I), mat(K,I)).norm();
+ res[1] = atan2(s2, mat(I,I));
+ }
+
+ // With a=(0,1,0), we have i=0; j=1; k=2, and after computing the first two angles,
+ // we can compute their respective rotation, and apply its inverse to M. Since the result must
+ // be a rotation around x, we have:
+ //
+ // c2 s1.s2 c1.s2 1 0 0
+ // 0 c1 -s1 * M = 0 c3 s3
+ // -s2 s1.c2 c1.c2 0 -s3 c3
+ //
+ // Thus: m11.c1 - m21.s1 = c3 & m12.c1 - m22.s1 = s3
+
+ Scalar s1 = sin(res[0]);
+ Scalar c1 = cos(res[0]);
+ res[2] = atan2(c1*mat(J,K)-s1*mat(K,K), c1*mat(J,J) - s1 * mat(K,J));
+ }
+
+ template<typename Scalar>
+ static void CalcEulerAngles(
+ EulerAngles<Scalar, EulerSystem>& res,
+ const typename EulerAngles<Scalar, EulerSystem>::Matrix3& mat)
+ {
+ CalcEulerAngles(res, mat, false, false, false);
+ }
+
+ template<
+ bool PositiveRangeAlpha,
+ bool PositiveRangeBeta,
+ bool PositiveRangeGamma,
+ typename Scalar>
+ static void CalcEulerAngles(
+ EulerAngles<Scalar, EulerSystem>& res,
+ const typename EulerAngles<Scalar, EulerSystem>::Matrix3& mat)
+ {
+ CalcEulerAngles(res, mat, PositiveRangeAlpha, PositiveRangeBeta, PositiveRangeGamma);
+ }
+
+ template<typename Scalar>
+ static void CalcEulerAngles(
+ EulerAngles<Scalar, EulerSystem>& res,
+ const typename EulerAngles<Scalar, EulerSystem>::Matrix3& mat,
+ bool PositiveRangeAlpha,
+ bool PositiveRangeBeta,
+ bool PositiveRangeGamma)
+ {
+ CalcEulerAngles_imp(
+ res.angles(), mat,
+ typename internal::conditional<IsTaitBryan, internal::true_type, internal::false_type>::type());
+
+ if (IsAlphaOpposite == IsOdd)
+ res.alpha() = -res.alpha();
+
+ if (IsBetaOpposite == IsOdd)
+ res.beta() = -res.beta();
+
+ if (IsGammaOpposite == IsOdd)
+ res.gamma() = -res.gamma();
+
+ // Saturate results to the requested range
+ if (PositiveRangeAlpha && (res.alpha() < 0))
+ res.alpha() += Scalar(2 * EIGEN_PI);
+
+ if (PositiveRangeBeta && (res.beta() < 0))
+ res.beta() += Scalar(2 * EIGEN_PI);
+
+ if (PositiveRangeGamma && (res.gamma() < 0))
+ res.gamma() += Scalar(2 * EIGEN_PI);
+ }
+
+ template <typename _Scalar, class _System>
+ friend class Eigen::EulerAngles;
+ };
+
+#define EIGEN_EULER_SYSTEM_TYPEDEF(A, B, C) \
+ /** \ingroup EulerAngles_Module */ \
+ typedef EulerSystem<EULER_##A, EULER_##B, EULER_##C> EulerSystem##A##B##C;
+
+ EIGEN_EULER_SYSTEM_TYPEDEF(X,Y,Z)
+ EIGEN_EULER_SYSTEM_TYPEDEF(X,Y,X)
+ EIGEN_EULER_SYSTEM_TYPEDEF(X,Z,Y)
+ EIGEN_EULER_SYSTEM_TYPEDEF(X,Z,X)
+
+ EIGEN_EULER_SYSTEM_TYPEDEF(Y,Z,X)
+ EIGEN_EULER_SYSTEM_TYPEDEF(Y,Z,Y)
+ EIGEN_EULER_SYSTEM_TYPEDEF(Y,X,Z)
+ EIGEN_EULER_SYSTEM_TYPEDEF(Y,X,Y)
+
+ EIGEN_EULER_SYSTEM_TYPEDEF(Z,X,Y)
+ EIGEN_EULER_SYSTEM_TYPEDEF(Z,X,Z)
+ EIGEN_EULER_SYSTEM_TYPEDEF(Z,Y,X)
+ EIGEN_EULER_SYSTEM_TYPEDEF(Z,Y,Z)
+}
+
+#endif // EIGEN_EULERSYSTEM_H
diff --git a/unsupported/Eigen/src/FFT/CMakeLists.txt b/unsupported/Eigen/src/FFT/CMakeLists.txt
deleted file mode 100644
index edcffcb18..000000000
--- a/unsupported/Eigen/src/FFT/CMakeLists.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-FILE(GLOB Eigen_FFT_SRCS "*.h")
-
-INSTALL(FILES
- ${Eigen_FFT_SRCS}
- DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/FFT COMPONENT Devel
- )
diff --git a/unsupported/Eigen/src/IterativeSolvers/CMakeLists.txt b/unsupported/Eigen/src/IterativeSolvers/CMakeLists.txt
deleted file mode 100644
index 7986afc5e..000000000
--- a/unsupported/Eigen/src/IterativeSolvers/CMakeLists.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-FILE(GLOB Eigen_IterativeSolvers_SRCS "*.h")
-
-INSTALL(FILES
- ${Eigen_IterativeSolvers_SRCS}
- DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/IterativeSolvers COMPONENT Devel
- )
diff --git a/unsupported/Eigen/src/IterativeSolvers/DGMRES.h b/unsupported/Eigen/src/IterativeSolvers/DGMRES.h
index 9fcc8a8d9..bae04fc30 100644
--- a/unsupported/Eigen/src/IterativeSolvers/DGMRES.h
+++ b/unsupported/Eigen/src/IterativeSolvers/DGMRES.h
@@ -40,7 +40,6 @@ void sortWithPermutation (VectorType& vec, IndexType& perm, typename IndexType::
{
eigen_assert(vec.size() == perm.size());
typedef typename IndexType::Scalar Index;
- typedef typename VectorType::Scalar Scalar;
bool flag;
for (Index k = 0; k < ncut; k++)
{
@@ -84,6 +83,8 @@ void sortWithPermutation (VectorType& vec, IndexType& perm, typename IndexType::
* x = solver.solve(b);
* \endcode
*
+ * DGMRES can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink.
+ *
* References :
* [1] D. NUENTSA WAKAM and F. PACULL, Memory Efficient Hybrid
* Algebraic Solvers for Linear Systems Arising from Compressible
@@ -101,16 +102,18 @@ template< typename _MatrixType, typename _Preconditioner>
class DGMRES : public IterativeSolverBase<DGMRES<_MatrixType,_Preconditioner> >
{
typedef IterativeSolverBase<DGMRES> Base;
- using Base::mp_matrix;
+ using Base::matrix;
using Base::m_error;
using Base::m_iterations;
using Base::m_info;
using Base::m_isInitialized;
using Base::m_tolerance;
public:
+ using Base::_solve_impl;
typedef _MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::StorageIndex StorageIndex;
typedef typename MatrixType::RealScalar RealScalar;
typedef _Preconditioner Preconditioner;
typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
@@ -133,30 +136,14 @@ class DGMRES : public IterativeSolverBase<DGMRES<_MatrixType,_Preconditioner> >
* this class becomes invalid. Call compute() to update it with the new
* matrix A, or modify a copy of A.
*/
- DGMRES(const MatrixType& A) : Base(A),m_restart(30),m_neig(0),m_r(0),m_maxNeig(5),m_isDeflAllocated(false),m_isDeflInitialized(false)
- {}
+ template<typename MatrixDerived>
+ explicit DGMRES(const EigenBase<MatrixDerived>& A) : Base(A.derived()), m_restart(30),m_neig(0),m_r(0),m_maxNeig(5),m_isDeflAllocated(false),m_isDeflInitialized(false) {}
~DGMRES() {}
- /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A
- * \a x0 as an initial solution.
- *
- * \sa compute()
- */
- template<typename Rhs,typename Guess>
- inline const internal::solve_retval_with_guess<DGMRES, Rhs, Guess>
- solveWithGuess(const MatrixBase<Rhs>& b, const Guess& x0) const
- {
- eigen_assert(m_isInitialized && "DGMRES is not initialized.");
- eigen_assert(Base::rows()==b.rows()
- && "DGMRES::solve(): invalid number of rows of the right hand side matrix b");
- return internal::solve_retval_with_guess
- <DGMRES, Rhs, Guess>(*this, b.derived(), x0);
- }
-
/** \internal */
template<typename Rhs,typename Dest>
- void _solveWithGuess(const Rhs& b, Dest& x) const
+ void _solve_with_guess_impl(const Rhs& b, Dest& x) const
{
bool failed = false;
for(int j=0; j<b.cols(); ++j)
@@ -165,7 +152,7 @@ class DGMRES : public IterativeSolverBase<DGMRES<_MatrixType,_Preconditioner> >
m_error = Base::m_tolerance;
typename Dest::ColXpr xj(x,j);
- dgmres(*mp_matrix, b.col(j), xj, Base::m_preconditioner);
+ dgmres(matrix(), b.col(j), xj, Base::m_preconditioner);
}
m_info = failed ? NumericalIssue
: m_error <= Base::m_tolerance ? Success
@@ -175,10 +162,10 @@ class DGMRES : public IterativeSolverBase<DGMRES<_MatrixType,_Preconditioner> >
/** \internal */
template<typename Rhs,typename Dest>
- void _solve(const Rhs& b, Dest& x) const
+ void _solve_impl(const Rhs& b, MatrixBase<Dest>& x) const
{
x = b;
- _solveWithGuess(b,x);
+ _solve_with_guess_impl(b,x.derived());
}
/**
* Get the restart value
@@ -217,7 +204,7 @@ class DGMRES : public IterativeSolverBase<DGMRES<_MatrixType,_Preconditioner> >
template<typename Dest>
int dgmresCycle(const MatrixType& mat, const Preconditioner& precond, Dest& x, DenseVector& r0, RealScalar& beta, const RealScalar& normRhs, int& nbIts) const;
// Compute data to use for deflation
- int dgmresComputeDeflationData(const MatrixType& mat, const Preconditioner& precond, const Index& it, Index& neig) const;
+ int dgmresComputeDeflationData(const MatrixType& mat, const Preconditioner& precond, const Index& it, StorageIndex& neig) const;
// Apply deflation to a vector
template<typename RhsType, typename DestType>
int dgmresApplyDeflation(const RhsType& In, DestType& Out) const;
@@ -233,7 +220,7 @@ class DGMRES : public IterativeSolverBase<DGMRES<_MatrixType,_Preconditioner> >
mutable DenseMatrix m_MU; // matrix operator applied to m_U (for next cycles)
mutable DenseMatrix m_T; /* T=U^T*M^{-1}*A*U */
mutable PartialPivLU<DenseMatrix> m_luT; // LU factorization of m_T
- mutable int m_neig; //Number of eigenvalues to extract at each restart
+ mutable StorageIndex m_neig; //Number of eigenvalues to extract at each restart
mutable int m_r; // Current number of deflated eigenvalues, size of m_U
mutable int m_maxNeig; // Maximum number of eigenvalues to deflate
mutable RealScalar m_lambdaN; //Modulus of the largest eigenvalue of A
@@ -353,7 +340,7 @@ int DGMRES<_MatrixType, _Preconditioner>::dgmresCycle(const MatrixType& mat, con
beta = std::abs(g(it+1));
m_error = beta/normRhs;
- std::cerr << nbIts << " Relative Residual Norm " << m_error << std::endl;
+ // std::cerr << nbIts << " Relative Residual Norm " << m_error << std::endl;
it++; nbIts++;
if (m_error < m_tolerance)
@@ -431,7 +418,7 @@ inline typename DGMRES<_MatrixType, _Preconditioner>::ComplexVector DGMRES<_Matr
}
template< typename _MatrixType, typename _Preconditioner>
-int DGMRES<_MatrixType, _Preconditioner>::dgmresComputeDeflationData(const MatrixType& mat, const Preconditioner& precond, const Index& it, Index& neig) const
+int DGMRES<_MatrixType, _Preconditioner>::dgmresComputeDeflationData(const MatrixType& mat, const Preconditioner& precond, const Index& it, StorageIndex& neig) const
{
// First, find the Schur form of the Hessenberg matrix H
typename internal::conditional<NumTraits<Scalar>::IsComplex, ComplexSchur<DenseMatrix>, RealSchur<DenseMatrix> >::type schurofH;
@@ -441,7 +428,7 @@ int DGMRES<_MatrixType, _Preconditioner>::dgmresComputeDeflationData(const Matri
schurofH.computeFromHessenberg(m_Hes.topLeftCorner(it,it), matrixQ, computeU);
ComplexVector eig(it);
- Matrix<Index,Dynamic,1>perm(it);
+ Matrix<StorageIndex,Dynamic,1>perm(it);
eig = this->schurValues(schurofH);
// Reorder the absolute values of Schur values
@@ -522,21 +509,5 @@ int DGMRES<_MatrixType, _Preconditioner>::dgmresApplyDeflation(const RhsType &x,
return 0;
}
-namespace internal {
-
- template<typename _MatrixType, typename _Preconditioner, typename Rhs>
-struct solve_retval<DGMRES<_MatrixType, _Preconditioner>, Rhs>
- : solve_retval_base<DGMRES<_MatrixType, _Preconditioner>, Rhs>
-{
- typedef DGMRES<_MatrixType, _Preconditioner> Dec;
- EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
-
- template<typename Dest> void evalTo(Dest& dst) const
- {
- dec()._solve(rhs(),dst);
- }
-};
-} // end namespace internal
-
} // end namespace Eigen
#endif
diff --git a/unsupported/Eigen/src/IterativeSolvers/GMRES.h b/unsupported/Eigen/src/IterativeSolvers/GMRES.h
index 7ba13afd2..5a82b0df6 100644
--- a/unsupported/Eigen/src/IterativeSolvers/GMRES.h
+++ b/unsupported/Eigen/src/IterativeSolvers/GMRES.h
@@ -11,193 +11,197 @@
#ifndef EIGEN_GMRES_H
#define EIGEN_GMRES_H
-namespace Eigen {
+namespace Eigen {
namespace internal {
/**
- * Generalized Minimal Residual Algorithm based on the
- * Arnoldi algorithm implemented with Householder reflections.
- *
- * Parameters:
- * \param mat matrix of linear system of equations
- * \param Rhs right hand side vector of linear system of equations
- * \param x on input: initial guess, on output: solution
- * \param precond preconditioner used
- * \param iters on input: maximum number of iterations to perform
- * on output: number of iterations performed
- * \param restart number of iterations for a restart
- * \param tol_error on input: residual tolerance
- * on output: residuum achieved
- *
- * \sa IterativeMethods::bicgstab()
- *
- *
- * For references, please see:
- *
- * Saad, Y. and Schultz, M. H.
- * GMRES: A Generalized Minimal Residual Algorithm for Solving Nonsymmetric Linear Systems.
- * SIAM J.Sci.Stat.Comp. 7, 1986, pp. 856 - 869.
- *
- * Saad, Y.
- * Iterative Methods for Sparse Linear Systems.
- * Society for Industrial and Applied Mathematics, Philadelphia, 2003.
- *
- * Walker, H. F.
- * Implementations of the GMRES method.
- * Comput.Phys.Comm. 53, 1989, pp. 311 - 320.
- *
- * Walker, H. F.
- * Implementation of the GMRES Method using Householder Transformations.
- * SIAM J.Sci.Stat.Comp. 9, 1988, pp. 152 - 163.
- *
- */
+* Generalized Minimal Residual Algorithm based on the
+* Arnoldi algorithm implemented with Householder reflections.
+*
+* Parameters:
+* \param mat matrix of linear system of equations
+* \param Rhs right hand side vector of linear system of equations
+* \param x on input: initial guess, on output: solution
+* \param precond preconditioner used
+* \param iters on input: maximum number of iterations to perform
+* on output: number of iterations performed
+* \param restart number of iterations for a restart
+* \param tol_error on input: relative residual tolerance
+* on output: residuum achieved
+*
+* \sa IterativeMethods::bicgstab()
+*
+*
+* For references, please see:
+*
+* Saad, Y. and Schultz, M. H.
+* GMRES: A Generalized Minimal Residual Algorithm for Solving Nonsymmetric Linear Systems.
+* SIAM J.Sci.Stat.Comp. 7, 1986, pp. 856 - 869.
+*
+* Saad, Y.
+* Iterative Methods for Sparse Linear Systems.
+* Society for Industrial and Applied Mathematics, Philadelphia, 2003.
+*
+* Walker, H. F.
+* Implementations of the GMRES method.
+* Comput.Phys.Comm. 53, 1989, pp. 311 - 320.
+*
+* Walker, H. F.
+* Implementation of the GMRES Method using Householder Transformations.
+* SIAM J.Sci.Stat.Comp. 9, 1988, pp. 152 - 163.
+*
+*/
template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Preconditioner & precond,
- int &iters, const int &restart, typename Dest::RealScalar & tol_error) {
+ Index &iters, const Index &restart, typename Dest::RealScalar & tol_error) {
- using std::sqrt;
- using std::abs;
+ using std::sqrt;
+ using std::abs;
- typedef typename Dest::RealScalar RealScalar;
- typedef typename Dest::Scalar Scalar;
- typedef Matrix < Scalar, Dynamic, 1 > VectorType;
- typedef Matrix < Scalar, Dynamic, Dynamic > FMatrixType;
+ typedef typename Dest::RealScalar RealScalar;
+ typedef typename Dest::Scalar Scalar;
+ typedef Matrix < Scalar, Dynamic, 1 > VectorType;
+ typedef Matrix < Scalar, Dynamic, Dynamic, ColMajor> FMatrixType;
- RealScalar tol = tol_error;
- const int maxIters = iters;
- iters = 0;
+ RealScalar tol = tol_error;
+ const Index maxIters = iters;
+ iters = 0;
- const int m = mat.rows();
+ const Index m = mat.rows();
- VectorType p0 = rhs - mat*x;
- VectorType r0 = precond.solve(p0);
-
- // is initial guess already good enough?
- if(abs(r0.norm()) < tol) {
- return true;
- }
+ // residual and preconditioned residual
+ VectorType p0 = rhs - mat*x;
+ VectorType r0 = precond.solve(p0);
- VectorType w = VectorType::Zero(restart + 1);
+ const RealScalar r0Norm = r0.norm();
- FMatrixType H = FMatrixType::Zero(m, restart + 1); // Hessenberg matrix
- VectorType tau = VectorType::Zero(restart + 1);
- std::vector < JacobiRotation < Scalar > > G(restart);
-
- // generate first Householder vector
- VectorType e(m-1);
- RealScalar beta;
- r0.makeHouseholder(e, tau.coeffRef(0), beta);
- w(0)=(Scalar) beta;
- H.bottomLeftCorner(m - 1, 1) = e;
-
- for (int k = 1; k <= restart; ++k) {
-
- ++iters;
-
- VectorType v = VectorType::Unit(m, k - 1), workspace(m);
-
- // apply Householder reflections H_{1} ... H_{k-1} to v
- for (int i = k - 1; i >= 0; --i) {
- v.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data());
- }
-
- // apply matrix M to v: v = mat * v;
- VectorType t=mat*v;
- v=precond.solve(t);
-
- // apply Householder reflections H_{k-1} ... H_{1} to v
- for (int i = 0; i < k; ++i) {
- v.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data());
- }
-
- if (v.tail(m - k).norm() != 0.0) {
-
- if (k <= restart) {
-
- // generate new Householder vector
- VectorType e(m - k - 1);
- RealScalar beta;
- v.tail(m - k).makeHouseholder(e, tau.coeffRef(k), beta);
- H.col(k).tail(m - k - 1) = e;
-
- // apply Householder reflection H_{k} to v
- v.tail(m - k).applyHouseholderOnTheLeft(H.col(k).tail(m - k - 1), tau.coeffRef(k), workspace.data());
-
- }
- }
-
- if (k > 1) {
- for (int i = 0; i < k - 1; ++i) {
- // apply old Givens rotations to v
- v.applyOnTheLeft(i, i + 1, G[i].adjoint());
- }
- }
-
- if (k<m && v(k) != (Scalar) 0) {
- // determine next Givens rotation
- G[k - 1].makeGivens(v(k - 1), v(k));
-
- // apply Givens rotation to v and w
- v.applyOnTheLeft(k - 1, k, G[k - 1].adjoint());
- w.applyOnTheLeft(k - 1, k, G[k - 1].adjoint());
-
- }
-
- // insert coefficients into upper matrix triangle
- H.col(k - 1).head(k) = v.head(k);
-
- bool stop=(k==m || abs(w(k)) < tol || iters == maxIters);
+ // is initial guess already good enough?
+ if(r0Norm == 0)
+ {
+ tol_error = 0;
+ return true;
+ }
- if (stop || k == restart) {
+ // storage for Hessenberg matrix and Householder data
+ FMatrixType H = FMatrixType::Zero(m, restart + 1);
+ VectorType w = VectorType::Zero(restart + 1);
+ VectorType tau = VectorType::Zero(restart + 1);
- // solve upper triangular system
- VectorType y = w.head(k);
- H.topLeftCorner(k, k).template triangularView < Eigen::Upper > ().solveInPlace(y);
+ // storage for Jacobi rotations
+ std::vector < JacobiRotation < Scalar > > G(restart);
+
+ // storage for temporaries
+ VectorType t(m), v(m), workspace(m), x_new(m);
+
+ // generate first Householder vector
+ Ref<VectorType> H0_tail = H.col(0).tail(m - 1);
+ RealScalar beta;
+ r0.makeHouseholder(H0_tail, tau.coeffRef(0), beta);
+ w(0) = Scalar(beta);
+
+ for (Index k = 1; k <= restart; ++k)
+ {
+ ++iters;
- // use Horner-like scheme to calculate solution vector
- VectorType x_new = y(k - 1) * VectorType::Unit(m, k - 1);
+ v = VectorType::Unit(m, k - 1);
- // apply Householder reflection H_{k} to x_new
- x_new.tail(m - k + 1).applyHouseholderOnTheLeft(H.col(k - 1).tail(m - k), tau.coeffRef(k - 1), workspace.data());
+ // apply Householder reflections H_{1} ... H_{k-1} to v
+ // TODO: use a HouseholderSequence
+ for (Index i = k - 1; i >= 0; --i) {
+ v.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data());
+ }
- for (int i = k - 2; i >= 0; --i) {
- x_new += y(i) * VectorType::Unit(m, i);
- // apply Householder reflection H_{i} to x_new
- x_new.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data());
- }
+ // apply matrix M to v: v = mat * v;
+ t.noalias() = mat * v;
+ v = precond.solve(t);
- x += x_new;
+ // apply Householder reflections H_{k-1} ... H_{1} to v
+ // TODO: use a HouseholderSequence
+ for (Index i = 0; i < k; ++i) {
+ v.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data());
+ }
- if (stop) {
- return true;
- } else {
- k=0;
+ if (v.tail(m - k).norm() != 0.0)
+ {
+ if (k <= restart)
+ {
+ // generate new Householder vector
+ Ref<VectorType> Hk_tail = H.col(k).tail(m - k - 1);
+ v.tail(m - k).makeHouseholder(Hk_tail, tau.coeffRef(k), beta);
+
+ // apply Householder reflection H_{k} to v
+ v.tail(m - k).applyHouseholderOnTheLeft(Hk_tail, tau.coeffRef(k), workspace.data());
+ }
+ }
- // reset data for a restart r0 = rhs - mat * x;
- VectorType p0=mat*x;
- VectorType p1=precond.solve(p0);
- r0 = rhs - p1;
-// r0_sqnorm = r0.squaredNorm();
- w = VectorType::Zero(restart + 1);
- H = FMatrixType::Zero(m, restart + 1);
- tau = VectorType::Zero(restart + 1);
+ if (k > 1)
+ {
+ for (Index i = 0; i < k - 1; ++i)
+ {
+ // apply old Givens rotations to v
+ v.applyOnTheLeft(i, i + 1, G[i].adjoint());
+ }
+ }
- // generate first Householder vector
- RealScalar beta;
- r0.makeHouseholder(e, tau.coeffRef(0), beta);
- w(0)=(Scalar) beta;
- H.bottomLeftCorner(m - 1, 1) = e;
+ if (k<m && v(k) != (Scalar) 0)
+ {
+ // determine next Givens rotation
+ G[k - 1].makeGivens(v(k - 1), v(k));
- }
+ // apply Givens rotation to v and w
+ v.applyOnTheLeft(k - 1, k, G[k - 1].adjoint());
+ w.applyOnTheLeft(k - 1, k, G[k - 1].adjoint());
+ }
- }
+ // insert coefficients into upper matrix triangle
+ H.col(k-1).head(k) = v.head(k);
+ tol_error = abs(w(k)) / r0Norm;
+ bool stop = (k==m || tol_error < tol || iters == maxIters);
+ if (stop || k == restart)
+ {
+ // solve upper triangular system
+ Ref<VectorType> y = w.head(k);
+ H.topLeftCorner(k, k).template triangularView <Upper>().solveInPlace(y);
+
+ // use Horner-like scheme to calculate solution vector
+ x_new.setZero();
+ for (Index i = k - 1; i >= 0; --i)
+ {
+ x_new(i) += y(i);
+ // apply Householder reflection H_{i} to x_new
+ x_new.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data());
+ }
+
+ x += x_new;
+
+ if(stop)
+ {
+ return true;
+ }
+ else
+ {
+ k=0;
+
+ // reset data for restart
+ p0.noalias() = rhs - mat*x;
+ r0 = precond.solve(p0);
+
+ // clear Hessenberg matrix and Householder data
+ H.setZero();
+ w.setZero();
+ tau.setZero();
+
+ // generate first Householder vector
+ r0.makeHouseholder(H0_tail, tau.coeffRef(0), beta);
+ w(0) = Scalar(beta);
+ }
+ }
+ }
- }
-
- return false;
+ return false;
}
@@ -230,7 +234,7 @@ struct traits<GMRES<_MatrixType,_Preconditioner> >
* The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
* and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
* and NumTraits<Scalar>::epsilon() for the tolerance.
- *
+ *
* This class can be used as the direct solver classes. Here is a typical usage example:
* \code
* int n = 10000;
@@ -244,29 +248,31 @@ struct traits<GMRES<_MatrixType,_Preconditioner> >
* // update b, and solve again
* x = solver.solve(b);
* \endcode
- *
+ *
* By default the iterations start with x=0 as an initial guess of the solution.
* One can control the start using the solveWithGuess() method.
*
+ * GMRES can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink.
+ *
* \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
*/
template< typename _MatrixType, typename _Preconditioner>
class GMRES : public IterativeSolverBase<GMRES<_MatrixType,_Preconditioner> >
{
typedef IterativeSolverBase<GMRES> Base;
- using Base::mp_matrix;
+ using Base::matrix;
using Base::m_error;
using Base::m_iterations;
using Base::m_info;
using Base::m_isInitialized;
-
+
private:
- int m_restart;
-
+ Index m_restart;
+
public:
+ using Base::_solve_impl;
typedef _MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::RealScalar RealScalar;
typedef _Preconditioner Preconditioner;
@@ -276,95 +282,62 @@ public:
GMRES() : Base(), m_restart(30) {}
/** Initialize the solver with matrix \a A for further \c Ax=b solving.
- *
+ *
* This constructor is a shortcut for the default constructor followed
* by a call to compute().
- *
+ *
* \warning this class stores a reference to the matrix A as well as some
* precomputed values that depend on it. Therefore, if \a A is changed
* this class becomes invalid. Call compute() to update it with the new
* matrix A, or modify a copy of A.
*/
- GMRES(const MatrixType& A) : Base(A), m_restart(30) {}
+ template<typename MatrixDerived>
+ explicit GMRES(const EigenBase<MatrixDerived>& A) : Base(A.derived()), m_restart(30) {}
~GMRES() {}
-
+
/** Get the number of iterations after that a restart is performed.
*/
- int get_restart() { return m_restart; }
-
+ Index get_restart() { return m_restart; }
+
/** Set the number of iterations after that a restart is performed.
* \param restart number of iterations for a restarti, default is 30.
*/
- void set_restart(const int restart) { m_restart=restart; }
-
- /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A
- * \a x0 as an initial solution.
- *
- * \sa compute()
- */
- template<typename Rhs,typename Guess>
- inline const internal::solve_retval_with_guess<GMRES, Rhs, Guess>
- solveWithGuess(const MatrixBase<Rhs>& b, const Guess& x0) const
- {
- eigen_assert(m_isInitialized && "GMRES is not initialized.");
- eigen_assert(Base::rows()==b.rows()
- && "GMRES::solve(): invalid number of rows of the right hand side matrix b");
- return internal::solve_retval_with_guess
- <GMRES, Rhs, Guess>(*this, b.derived(), x0);
- }
-
+ void set_restart(const Index restart) { m_restart=restart; }
+
/** \internal */
template<typename Rhs,typename Dest>
- void _solveWithGuess(const Rhs& b, Dest& x) const
- {
+ void _solve_with_guess_impl(const Rhs& b, Dest& x) const
+ {
bool failed = false;
- for(int j=0; j<b.cols(); ++j)
+ for(Index j=0; j<b.cols(); ++j)
{
m_iterations = Base::maxIterations();
m_error = Base::m_tolerance;
-
+
typename Dest::ColXpr xj(x,j);
- if(!internal::gmres(*mp_matrix, b.col(j), xj, Base::m_preconditioner, m_iterations, m_restart, m_error))
+ if(!internal::gmres(matrix(), b.col(j), xj, Base::m_preconditioner, m_iterations, m_restart, m_error))
failed = true;
}
m_info = failed ? NumericalIssue
- : m_error <= Base::m_tolerance ? Success
- : NoConvergence;
+ : m_error <= Base::m_tolerance ? Success
+ : NoConvergence;
m_isInitialized = true;
}
/** \internal */
template<typename Rhs,typename Dest>
- void _solve(const Rhs& b, Dest& x) const
+ void _solve_impl(const Rhs& b, MatrixBase<Dest> &x) const
{
x = b;
if(x.squaredNorm() == 0) return; // Check Zero right hand side
- _solveWithGuess(b,x);
+ _solve_with_guess_impl(b,x.derived());
}
protected:
};
-
-namespace internal {
-
- template<typename _MatrixType, typename _Preconditioner, typename Rhs>
-struct solve_retval<GMRES<_MatrixType, _Preconditioner>, Rhs>
- : solve_retval_base<GMRES<_MatrixType, _Preconditioner>, Rhs>
-{
- typedef GMRES<_MatrixType, _Preconditioner> Dec;
- EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
-
- template<typename Dest> void evalTo(Dest& dst) const
- {
- dec()._solve(rhs(),dst);
- }
-};
-
-} // end namespace internal
-
} // end namespace Eigen
#endif // EIGEN_GMRES_H
diff --git a/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h b/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h
deleted file mode 100644
index 661c1f2e0..000000000
--- a/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h
+++ /dev/null
@@ -1,278 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_INCOMPLETE_CHOlESKY_H
-#define EIGEN_INCOMPLETE_CHOlESKY_H
-#include "Eigen/src/IterativeLinearSolvers/IncompleteLUT.h"
-#include <Eigen/OrderingMethods>
-#include <list>
-
-namespace Eigen {
-/**
- * \brief Modified Incomplete Cholesky with dual threshold
- *
- * References : C-J. Lin and J. J. Moré, Incomplete Cholesky Factorizations with
- * Limited memory, SIAM J. Sci. Comput. 21(1), pp. 24-45, 1999
- *
- * \tparam _MatrixType The type of the sparse matrix. It should be a symmetric
- * matrix. It is advised to give a row-oriented sparse matrix
- * \tparam _UpLo The triangular part of the matrix to reference.
- * \tparam _OrderingType
- */
-
-template <typename Scalar, int _UpLo = Lower, typename _OrderingType = NaturalOrdering<int> >
-class IncompleteCholesky : internal::noncopyable
-{
- public:
- typedef SparseMatrix<Scalar,ColMajor> MatrixType;
- typedef _OrderingType OrderingType;
- typedef typename MatrixType::RealScalar RealScalar;
- typedef typename MatrixType::Index Index;
- typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;
- typedef Matrix<Scalar,Dynamic,1> ScalarType;
- typedef Matrix<Index,Dynamic, 1> IndexType;
- typedef std::vector<std::list<Index> > VectorList;
- enum { UpLo = _UpLo };
- public:
- IncompleteCholesky() : m_shift(1),m_factorizationIsOk(false) {}
- IncompleteCholesky(const MatrixType& matrix) : m_shift(1),m_factorizationIsOk(false)
- {
- compute(matrix);
- }
-
- Index rows() const { return m_L.rows(); }
-
- Index cols() const { return m_L.cols(); }
-
-
- /** \brief Reports whether previous computation was successful.
- *
- * \returns \c Success if computation was succesful,
- * \c NumericalIssue if the matrix appears to be negative.
- */
- ComputationInfo info() const
- {
- eigen_assert(m_isInitialized && "IncompleteLLT is not initialized.");
- return m_info;
- }
-
- /**
- * \brief Set the initial shift parameter
- */
- void setShift( Scalar shift) { m_shift = shift; }
-
- /**
- * \brief Computes the fill reducing permutation vector.
- */
- template<typename MatrixType>
- void analyzePattern(const MatrixType& mat)
- {
- OrderingType ord;
- ord(mat.template selfadjointView<UpLo>(), m_perm);
- m_analysisIsOk = true;
- }
-
- template<typename MatrixType>
- void factorize(const MatrixType& amat);
-
- template<typename MatrixType>
- void compute (const MatrixType& matrix)
- {
- analyzePattern(matrix);
- factorize(matrix);
- }
-
- template<typename Rhs, typename Dest>
- void _solve(const Rhs& b, Dest& x) const
- {
- eigen_assert(m_factorizationIsOk && "factorize() should be called first");
- if (m_perm.rows() == b.rows())
- x = m_perm.inverse() * b;
- else
- x = b;
- x = m_scal.asDiagonal() * x;
- x = m_L.template triangularView<UnitLower>().solve(x);
- x = m_L.adjoint().template triangularView<Upper>().solve(x);
- if (m_perm.rows() == b.rows())
- x = m_perm * x;
- x = m_scal.asDiagonal() * x;
- }
- template<typename Rhs> inline const internal::solve_retval<IncompleteCholesky, Rhs>
- solve(const MatrixBase<Rhs>& b) const
- {
- eigen_assert(m_factorizationIsOk && "IncompleteLLT did not succeed");
- eigen_assert(m_isInitialized && "IncompleteLLT is not initialized.");
- eigen_assert(cols()==b.rows()
- && "IncompleteLLT::solve(): invalid number of rows of the right hand side matrix b");
- return internal::solve_retval<IncompleteCholesky, Rhs>(*this, b.derived());
- }
- protected:
- SparseMatrix<Scalar,ColMajor> m_L; // The lower part stored in CSC
- ScalarType m_scal; // The vector for scaling the matrix
- Scalar m_shift; //The initial shift parameter
- bool m_analysisIsOk;
- bool m_factorizationIsOk;
- bool m_isInitialized;
- ComputationInfo m_info;
- PermutationType m_perm;
-
- private:
- template <typename IdxType, typename SclType>
- inline void updateList(const IdxType& colPtr, IdxType& rowIdx, SclType& vals, const Index& col, const Index& jk, IndexType& firstElt, VectorList& listCol);
-};
-
-template<typename Scalar, int _UpLo, typename OrderingType>
-template<typename _MatrixType>
-void IncompleteCholesky<Scalar,_UpLo, OrderingType>::factorize(const _MatrixType& mat)
-{
- using std::sqrt;
- using std::min;
- eigen_assert(m_analysisIsOk && "analyzePattern() should be called first");
-
- // Dropping strategies : Keep only the p largest elements per column, where p is the number of elements in the column of the original matrix. Other strategies will be added
-
- // Apply the fill-reducing permutation computed in analyzePattern()
- if (m_perm.rows() == mat.rows() ) // To detect the null permutation
- m_L.template selfadjointView<Lower>() = mat.template selfadjointView<_UpLo>().twistedBy(m_perm);
- else
- m_L.template selfadjointView<Lower>() = mat.template selfadjointView<_UpLo>();
-
- Index n = m_L.cols();
- Index nnz = m_L.nonZeros();
- Map<ScalarType> vals(m_L.valuePtr(), nnz); //values
- Map<IndexType> rowIdx(m_L.innerIndexPtr(), nnz); //Row indices
- Map<IndexType> colPtr( m_L.outerIndexPtr(), n+1); // Pointer to the beginning of each row
- IndexType firstElt(n-1); // for each j, points to the next entry in vals that will be used in the factorization
- VectorList listCol(n); // listCol(j) is a linked list of columns to update column j
- ScalarType curCol(n); // Store a nonzero values in each column
- IndexType irow(n); // Row indices of nonzero elements in each column
-
-
- // Computes the scaling factors
- m_scal.resize(n);
- for (int j = 0; j < n; j++)
- {
- m_scal(j) = m_L.col(j).norm();
- m_scal(j) = sqrt(m_scal(j));
- }
- // Scale and compute the shift for the matrix
- Scalar mindiag = vals[0];
- for (int j = 0; j < n; j++){
- for (int k = colPtr[j]; k < colPtr[j+1]; k++)
- vals[k] /= (m_scal(j) * m_scal(rowIdx[k]));
- mindiag = (min)(vals[colPtr[j]], mindiag);
- }
-
- if(mindiag < Scalar(0.)) m_shift = m_shift - mindiag;
- // Apply the shift to the diagonal elements of the matrix
- for (int j = 0; j < n; j++)
- vals[colPtr[j]] += m_shift;
- // jki version of the Cholesky factorization
- for (int j=0; j < n; ++j)
- {
- //Left-looking factorize the column j
- // First, load the jth column into curCol
- Scalar diag = vals[colPtr[j]]; // It is assumed that only the lower part is stored
- curCol.setZero();
- irow.setLinSpaced(n,0,n-1);
- for (int i = colPtr[j] + 1; i < colPtr[j+1]; i++)
- {
- curCol(rowIdx[i]) = vals[i];
- irow(rowIdx[i]) = rowIdx[i];
- }
- std::list<int>::iterator k;
- // Browse all previous columns that will update column j
- for(k = listCol[j].begin(); k != listCol[j].end(); k++)
- {
- int jk = firstElt(*k); // First element to use in the column
- jk += 1;
- for (int i = jk; i < colPtr[*k+1]; i++)
- {
- curCol(rowIdx[i]) -= vals[i] * vals[jk] ;
- }
- updateList(colPtr,rowIdx,vals, *k, jk, firstElt, listCol);
- }
-
- // Scale the current column
- if(RealScalar(diag) <= 0)
- {
- std::cerr << "\nNegative diagonal during Incomplete factorization... "<< j << "\n";
- m_info = NumericalIssue;
- return;
- }
- RealScalar rdiag = sqrt(RealScalar(diag));
- vals[colPtr[j]] = rdiag;
- for (int i = j+1; i < n; i++)
- {
- //Scale
- curCol(i) /= rdiag;
- //Update the remaining diagonals with curCol
- vals[colPtr[i]] -= curCol(i) * curCol(i);
- }
- // Select the largest p elements
- // p is the original number of elements in the column (without the diagonal)
- int p = colPtr[j+1] - colPtr[j] - 1 ;
- internal::QuickSplit(curCol, irow, p);
- // Insert the largest p elements in the matrix
- int cpt = 0;
- for (int i = colPtr[j]+1; i < colPtr[j+1]; i++)
- {
- vals[i] = curCol(cpt);
- rowIdx[i] = irow(cpt);
- cpt ++;
- }
- // Get the first smallest row index and put it after the diagonal element
- Index jk = colPtr(j)+1;
- updateList(colPtr,rowIdx,vals,j,jk,firstElt,listCol);
- }
- m_factorizationIsOk = true;
- m_isInitialized = true;
- m_info = Success;
-}
-
-template<typename Scalar, int _UpLo, typename OrderingType>
-template <typename IdxType, typename SclType>
-inline void IncompleteCholesky<Scalar,_UpLo, OrderingType>::updateList(const IdxType& colPtr, IdxType& rowIdx, SclType& vals, const Index& col, const Index& jk, IndexType& firstElt, VectorList& listCol)
-{
- if (jk < colPtr(col+1) )
- {
- Index p = colPtr(col+1) - jk;
- Index minpos;
- rowIdx.segment(jk,p).minCoeff(&minpos);
- minpos += jk;
- if (rowIdx(minpos) != rowIdx(jk))
- {
- //Swap
- std::swap(rowIdx(jk),rowIdx(minpos));
- std::swap(vals(jk),vals(minpos));
- }
- firstElt(col) = jk;
- listCol[rowIdx(jk)].push_back(col);
- }
-}
-namespace internal {
-
-template<typename _Scalar, int _UpLo, typename OrderingType, typename Rhs>
-struct solve_retval<IncompleteCholesky<_Scalar, _UpLo, OrderingType>, Rhs>
- : solve_retval_base<IncompleteCholesky<_Scalar, _UpLo, OrderingType>, Rhs>
-{
- typedef IncompleteCholesky<_Scalar, _UpLo, OrderingType> Dec;
- EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
-
- template<typename Dest> void evalTo(Dest& dst) const
- {
- dec()._solve(rhs(),dst);
- }
-};
-
-} // end namespace internal
-
-} // end namespace Eigen
-
-#endif
diff --git a/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h b/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h
index 67e780181..7d08c3515 100644
--- a/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h
+++ b/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h
@@ -13,8 +13,12 @@
namespace Eigen {
template <typename _Scalar>
-class IncompleteLU
+class IncompleteLU : public SparseSolverBase<IncompleteLU<_Scalar> >
{
+ protected:
+ typedef SparseSolverBase<IncompleteLU<_Scalar> > Base;
+ using Base::m_isInitialized;
+
typedef _Scalar Scalar;
typedef Matrix<Scalar,Dynamic,1> Vector;
typedef typename Vector::Index Index;
@@ -23,10 +27,10 @@ class IncompleteLU
public:
typedef Matrix<Scalar,Dynamic,Dynamic> MatrixType;
- IncompleteLU() : m_isInitialized(false) {}
+ IncompleteLU() {}
template<typename MatrixType>
- IncompleteLU(const MatrixType& mat) : m_isInitialized(false)
+ IncompleteLU(const MatrixType& mat)
{
compute(mat);
}
@@ -71,43 +75,16 @@ class IncompleteLU
}
template<typename Rhs, typename Dest>
- void _solve(const Rhs& b, Dest& x) const
+ void _solve_impl(const Rhs& b, Dest& x) const
{
x = m_lu.template triangularView<UnitLower>().solve(b);
x = m_lu.template triangularView<Upper>().solve(x);
}
- template<typename Rhs> inline const internal::solve_retval<IncompleteLU, Rhs>
- solve(const MatrixBase<Rhs>& b) const
- {
- eigen_assert(m_isInitialized && "IncompleteLU is not initialized.");
- eigen_assert(cols()==b.rows()
- && "IncompleteLU::solve(): invalid number of rows of the right hand side matrix b");
- return internal::solve_retval<IncompleteLU, Rhs>(*this, b.derived());
- }
-
protected:
FactorType m_lu;
- bool m_isInitialized;
-};
-
-namespace internal {
-
-template<typename _MatrixType, typename Rhs>
-struct solve_retval<IncompleteLU<_MatrixType>, Rhs>
- : solve_retval_base<IncompleteLU<_MatrixType>, Rhs>
-{
- typedef IncompleteLU<_MatrixType> Dec;
- EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
-
- template<typename Dest> void evalTo(Dest& dst) const
- {
- dec()._solve(rhs(),dst);
- }
};
-} // end namespace internal
-
} // end namespace Eigen
#endif // EIGEN_INCOMPLETE_LU_H
diff --git a/unsupported/Eigen/src/IterativeSolvers/MINRES.h b/unsupported/Eigen/src/IterativeSolvers/MINRES.h
index 30f26aa50..256990c1a 100644
--- a/unsupported/Eigen/src/IterativeSolvers/MINRES.h
+++ b/unsupported/Eigen/src/IterativeSolvers/MINRES.h
@@ -2,7 +2,7 @@
// for linear algebra.
//
// Copyright (C) 2012 Giacomo Po <gpo@ucla.edu>
-// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -29,7 +29,7 @@ namespace Eigen {
template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
EIGEN_DONT_INLINE
void minres(const MatrixType& mat, const Rhs& rhs, Dest& x,
- const Preconditioner& precond, int& iters,
+ const Preconditioner& precond, Index& iters,
typename Dest::RealScalar& tol_error)
{
using std::sqrt;
@@ -48,8 +48,8 @@ namespace Eigen {
}
// initialize
- const int maxIters(iters); // initialize maxIters to iters
- const int N(mat.cols()); // the size of the matrix
+ const Index maxIters(iters); // initialize maxIters to iters
+ const Index N(mat.cols()); // the size of the matrix
const RealScalar threshold2(tol_error*tol_error*rhsNorm2); // convergence threshold (compared to residualNorm2)
// Initialize preconditioned Lanczos
@@ -144,7 +144,6 @@ namespace Eigen {
template< typename _MatrixType, int _UpLo=Lower,
typename _Preconditioner = IdentityPreconditioner>
-// typename _Preconditioner = IdentityPreconditioner<typename _MatrixType::Scalar> > // preconditioner must be positive definite
class MINRES;
namespace internal {
@@ -166,8 +165,8 @@ namespace Eigen {
* The vectors x and b can be either dense or sparse.
*
* \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
- * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
- * or Upper. Default is Lower.
+ * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower,
+ * Upper, or Lower|Upper in which the full matrix entries will be considered. Default is Lower.
* \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
*
* The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
@@ -192,6 +191,8 @@ namespace Eigen {
* By default the iterations start with x=0 as an initial guess of the solution.
* One can control the start using the solveWithGuess() method.
*
+ * MINRES can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink.
+ *
* \sa class ConjugateGradient, BiCGSTAB, SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
*/
template< typename _MatrixType, int _UpLo, typename _Preconditioner>
@@ -199,15 +200,15 @@ namespace Eigen {
{
typedef IterativeSolverBase<MINRES> Base;
- using Base::mp_matrix;
+ using Base::matrix;
using Base::m_error;
using Base::m_iterations;
using Base::m_info;
using Base::m_isInitialized;
public:
+ using Base::_solve_impl;
typedef _MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::RealScalar RealScalar;
typedef _Preconditioner Preconditioner;
@@ -228,46 +229,41 @@ namespace Eigen {
* this class becomes invalid. Call compute() to update it with the new
* matrix A, or modify a copy of A.
*/
- MINRES(const MatrixType& A) : Base(A) {}
+ template<typename MatrixDerived>
+ explicit MINRES(const EigenBase<MatrixDerived>& A) : Base(A.derived()) {}
/** Destructor. */
~MINRES(){}
-
- /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A
- * \a x0 as an initial solution.
- *
- * \sa compute()
- */
- template<typename Rhs,typename Guess>
- inline const internal::solve_retval_with_guess<MINRES, Rhs, Guess>
- solveWithGuess(const MatrixBase<Rhs>& b, const Guess& x0) const
- {
- eigen_assert(m_isInitialized && "MINRES is not initialized.");
- eigen_assert(Base::rows()==b.rows()
- && "MINRES::solve(): invalid number of rows of the right hand side matrix b");
- return internal::solve_retval_with_guess
- <MINRES, Rhs, Guess>(*this, b.derived(), x0);
- }
-
+
/** \internal */
template<typename Rhs,typename Dest>
- void _solveWithGuess(const Rhs& b, Dest& x) const
+ void _solve_with_guess_impl(const Rhs& b, Dest& x) const
{
+ typedef typename Base::MatrixWrapper MatrixWrapper;
+ typedef typename Base::ActualMatrixType ActualMatrixType;
+ enum {
+ TransposeInput = (!MatrixWrapper::MatrixFree)
+ && (UpLo==(Lower|Upper))
+ && (!MatrixType::IsRowMajor)
+ && (!NumTraits<Scalar>::IsComplex)
+ };
+ typedef typename internal::conditional<TransposeInput,Transpose<const ActualMatrixType>, ActualMatrixType const&>::type RowMajorWrapper;
+ EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(MatrixWrapper::MatrixFree,UpLo==(Lower|Upper)),MATRIX_FREE_CONJUGATE_GRADIENT_IS_COMPATIBLE_WITH_UPPER_UNION_LOWER_MODE_ONLY);
typedef typename internal::conditional<UpLo==(Lower|Upper),
- const MatrixType&,
- SparseSelfAdjointView<const MatrixType, UpLo>
- >::type MatrixWrapperType;
-
+ RowMajorWrapper,
+ typename MatrixWrapper::template ConstSelfAdjointViewReturnType<UpLo>::Type
+ >::type SelfAdjointWrapper;
+
m_iterations = Base::maxIterations();
m_error = Base::m_tolerance;
-
+ RowMajorWrapper row_mat(matrix());
for(int j=0; j<b.cols(); ++j)
{
m_iterations = Base::maxIterations();
m_error = Base::m_tolerance;
typename Dest::ColXpr xj(x,j);
- internal::minres(MatrixWrapperType(*mp_matrix), b.col(j), xj,
+ internal::minres(SelfAdjointWrapper(row_mat), b.col(j), xj,
Base::m_preconditioner, m_iterations, m_error);
}
@@ -277,33 +273,16 @@ namespace Eigen {
/** \internal */
template<typename Rhs,typename Dest>
- void _solve(const Rhs& b, Dest& x) const
+ void _solve_impl(const Rhs& b, MatrixBase<Dest> &x) const
{
x.setZero();
- _solveWithGuess(b,x);
+ _solve_with_guess_impl(b,x.derived());
}
protected:
};
-
- namespace internal {
-
- template<typename _MatrixType, int _UpLo, typename _Preconditioner, typename Rhs>
- struct solve_retval<MINRES<_MatrixType,_UpLo,_Preconditioner>, Rhs>
- : solve_retval_base<MINRES<_MatrixType,_UpLo,_Preconditioner>, Rhs>
- {
- typedef MINRES<_MatrixType,_UpLo,_Preconditioner> Dec;
- EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
-
- template<typename Dest> void evalTo(Dest& dst) const
- {
- dec()._solve(rhs(),dst);
- }
- };
-
- } // end namespace internal
-
+
} // end namespace Eigen
#endif // EIGEN_MINRES_H
diff --git a/unsupported/Eigen/src/IterativeSolvers/Scaling.h b/unsupported/Eigen/src/IterativeSolvers/Scaling.h
index 4fd439202..d113e6e90 100644
--- a/unsupported/Eigen/src/IterativeSolvers/Scaling.h
+++ b/unsupported/Eigen/src/IterativeSolvers/Scaling.h
@@ -9,6 +9,9 @@
#ifndef EIGEN_ITERSCALING_H
#define EIGEN_ITERSCALING_H
+
+namespace Eigen {
+
/**
* \ingroup IterativeSolvers_Module
* \brief iterative scaling algorithm to equilibrate rows and column norms in matrices
@@ -41,8 +44,6 @@
*
* \sa \ref IncompleteLUT
*/
-namespace Eigen {
-using std::abs;
template<typename _MatrixType>
class IterScaling
{
@@ -71,6 +72,7 @@ class IterScaling
*/
void compute (const MatrixType& mat)
{
+ using std::abs;
int m = mat.rows();
int n = mat.cols();
eigen_assert((m>0 && m == n) && "Please give a non - empty matrix");
diff --git a/unsupported/Eigen/src/KroneckerProduct/CMakeLists.txt b/unsupported/Eigen/src/KroneckerProduct/CMakeLists.txt
deleted file mode 100644
index 4daefebee..000000000
--- a/unsupported/Eigen/src/KroneckerProduct/CMakeLists.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-FILE(GLOB Eigen_KroneckerProduct_SRCS "*.h")
-
-INSTALL(FILES
- ${Eigen_KroneckerProduct_SRCS}
- DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/KroneckerProduct COMPONENT Devel
- )
diff --git a/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h b/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h
index 532896c3b..582fa8512 100644
--- a/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h
+++ b/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h
@@ -12,58 +12,93 @@
#ifndef KRONECKER_TENSOR_PRODUCT_H
#define KRONECKER_TENSOR_PRODUCT_H
-namespace Eigen {
-
-template<typename Scalar, int Options, typename Index> class SparseMatrix;
+namespace Eigen {
/*!
- * \brief Kronecker tensor product helper class for dense matrices
+ * \ingroup KroneckerProduct_Module
*
- * This class is the return value of kroneckerProduct(MatrixBase,
- * MatrixBase). Use the function rather than construct this class
- * directly to avoid specifying template prarameters.
+ * \brief The base class of dense and sparse Kronecker product.
*
- * \tparam Lhs Type of the left-hand side, a matrix expression.
- * \tparam Rhs Type of the rignt-hand side, a matrix expression.
+ * \tparam Derived is the derived type.
*/
-template<typename Lhs, typename Rhs>
-class KroneckerProduct : public ReturnByValue<KroneckerProduct<Lhs,Rhs> >
+template<typename Derived>
+class KroneckerProductBase : public ReturnByValue<Derived>
{
private:
- typedef ReturnByValue<KroneckerProduct> Base;
- typedef typename Base::Scalar Scalar;
- typedef typename Base::Index Index;
+ typedef typename internal::traits<Derived> Traits;
+ typedef typename Traits::Scalar Scalar;
+
+ protected:
+ typedef typename Traits::Lhs Lhs;
+ typedef typename Traits::Rhs Rhs;
public:
/*! \brief Constructor. */
- KroneckerProduct(const Lhs& A, const Rhs& B)
+ KroneckerProductBase(const Lhs& A, const Rhs& B)
: m_A(A), m_B(B)
{}
- /*! \brief Evaluate the Kronecker tensor product. */
- template<typename Dest> void evalTo(Dest& dst) const;
-
inline Index rows() const { return m_A.rows() * m_B.rows(); }
inline Index cols() const { return m_A.cols() * m_B.cols(); }
+ /*!
+ * This overrides ReturnByValue::coeff because this function is
+ * efficient enough.
+ */
Scalar coeff(Index row, Index col) const
{
return m_A.coeff(row / m_B.rows(), col / m_B.cols()) *
m_B.coeff(row % m_B.rows(), col % m_B.cols());
}
+ /*!
+ * This overrides ReturnByValue::coeff because this function is
+ * efficient enough.
+ */
Scalar coeff(Index i) const
{
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(KroneckerProduct);
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
return m_A.coeff(i / m_A.size()) * m_B.coeff(i % m_A.size());
}
- private:
+ protected:
typename Lhs::Nested m_A;
typename Rhs::Nested m_B;
};
/*!
+ * \ingroup KroneckerProduct_Module
+ *
+ * \brief Kronecker tensor product helper class for dense matrices
+ *
+ * This class is the return value of kroneckerProduct(MatrixBase,
+ * MatrixBase). Use the function rather than construct this class
+ * directly to avoid specifying template prarameters.
+ *
+ * \tparam Lhs Type of the left-hand side, a matrix expression.
+ * \tparam Rhs Type of the rignt-hand side, a matrix expression.
+ */
+template<typename Lhs, typename Rhs>
+class KroneckerProduct : public KroneckerProductBase<KroneckerProduct<Lhs,Rhs> >
+{
+ private:
+ typedef KroneckerProductBase<KroneckerProduct> Base;
+ using Base::m_A;
+ using Base::m_B;
+
+ public:
+ /*! \brief Constructor. */
+ KroneckerProduct(const Lhs& A, const Rhs& B)
+ : Base(A, B)
+ {}
+
+ /*! \brief Evaluate the Kronecker tensor product. */
+ template<typename Dest> void evalTo(Dest& dst) const;
+};
+
+/*!
+ * \ingroup KroneckerProduct_Module
+ *
* \brief Kronecker tensor product helper class for sparse matrices
*
* If at least one of the operands is a sparse matrix expression,
@@ -77,34 +112,21 @@ class KroneckerProduct : public ReturnByValue<KroneckerProduct<Lhs,Rhs> >
* \tparam Rhs Type of the rignt-hand side, a matrix expression.
*/
template<typename Lhs, typename Rhs>
-class KroneckerProductSparse : public EigenBase<KroneckerProductSparse<Lhs,Rhs> >
+class KroneckerProductSparse : public KroneckerProductBase<KroneckerProductSparse<Lhs,Rhs> >
{
private:
- typedef typename internal::traits<KroneckerProductSparse>::Index Index;
+ typedef KroneckerProductBase<KroneckerProductSparse> Base;
+ using Base::m_A;
+ using Base::m_B;
public:
/*! \brief Constructor. */
KroneckerProductSparse(const Lhs& A, const Rhs& B)
- : m_A(A), m_B(B)
+ : Base(A, B)
{}
/*! \brief Evaluate the Kronecker tensor product. */
template<typename Dest> void evalTo(Dest& dst) const;
-
- inline Index rows() const { return m_A.rows() * m_B.rows(); }
- inline Index cols() const { return m_A.cols() * m_B.cols(); }
-
- template<typename Scalar, int Options, typename Index>
- operator SparseMatrix<Scalar, Options, Index>()
- {
- SparseMatrix<Scalar, Options, Index> result;
- evalTo(result.derived());
- return result;
- }
-
- private:
- typename Lhs::Nested m_A;
- typename Rhs::Nested m_B;
};
template<typename Lhs, typename Rhs>
@@ -124,22 +146,49 @@ template<typename Lhs, typename Rhs>
template<typename Dest>
void KroneckerProductSparse<Lhs,Rhs>::evalTo(Dest& dst) const
{
- const Index Br = m_B.rows(),
- Bc = m_B.cols();
- dst.resize(rows(),cols());
+ Index Br = m_B.rows(), Bc = m_B.cols();
+ dst.resize(this->rows(), this->cols());
dst.resizeNonZeros(0);
- dst.reserve(m_A.nonZeros() * m_B.nonZeros());
+
+ // 1 - evaluate the operands if needed:
+ typedef typename internal::nested_eval<Lhs,Dynamic>::type Lhs1;
+ typedef typename internal::remove_all<Lhs1>::type Lhs1Cleaned;
+ const Lhs1 lhs1(m_A);
+ typedef typename internal::nested_eval<Rhs,Dynamic>::type Rhs1;
+ typedef typename internal::remove_all<Rhs1>::type Rhs1Cleaned;
+ const Rhs1 rhs1(m_B);
+
+ // 2 - construct respective iterators
+ typedef Eigen::InnerIterator<Lhs1Cleaned> LhsInnerIterator;
+ typedef Eigen::InnerIterator<Rhs1Cleaned> RhsInnerIterator;
+
+ // compute number of non-zeros per innervectors of dst
+ {
+ // TODO VectorXi is not necessarily big enough!
+ VectorXi nnzA = VectorXi::Zero(Dest::IsRowMajor ? m_A.rows() : m_A.cols());
+ for (Index kA=0; kA < m_A.outerSize(); ++kA)
+ for (LhsInnerIterator itA(lhs1,kA); itA; ++itA)
+ nnzA(Dest::IsRowMajor ? itA.row() : itA.col())++;
+
+ VectorXi nnzB = VectorXi::Zero(Dest::IsRowMajor ? m_B.rows() : m_B.cols());
+ for (Index kB=0; kB < m_B.outerSize(); ++kB)
+ for (RhsInnerIterator itB(rhs1,kB); itB; ++itB)
+ nnzB(Dest::IsRowMajor ? itB.row() : itB.col())++;
+
+ Matrix<int,Dynamic,Dynamic,ColMajor> nnzAB = nnzB * nnzA.transpose();
+ dst.reserve(VectorXi::Map(nnzAB.data(), nnzAB.size()));
+ }
for (Index kA=0; kA < m_A.outerSize(); ++kA)
{
for (Index kB=0; kB < m_B.outerSize(); ++kB)
{
- for (typename Lhs::InnerIterator itA(m_A,kA); itA; ++itA)
+ for (LhsInnerIterator itA(lhs1,kA); itA; ++itA)
{
- for (typename Rhs::InnerIterator itB(m_B,kB); itB; ++itB)
+ for (RhsInnerIterator itB(rhs1,kB); itB; ++itB)
{
- const Index i = itA.row() * Br + itB.row(),
- j = itA.col() * Bc + itB.col();
+ Index i = itA.row() * Br + itB.row(),
+ j = itA.col() * Bc + itB.col();
dst.insert(i,j) = itA.value() * itB.value();
}
}
@@ -154,14 +203,14 @@ struct traits<KroneckerProduct<_Lhs,_Rhs> >
{
typedef typename remove_all<_Lhs>::type Lhs;
typedef typename remove_all<_Rhs>::type Rhs;
- typedef typename scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType Scalar;
+ typedef typename ScalarBinaryOpTraits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType Scalar;
+ typedef typename promote_index_type<typename Lhs::StorageIndex, typename Rhs::StorageIndex>::type StorageIndex;
enum {
Rows = size_at_compile_time<traits<Lhs>::RowsAtCompileTime, traits<Rhs>::RowsAtCompileTime>::ret,
Cols = size_at_compile_time<traits<Lhs>::ColsAtCompileTime, traits<Rhs>::ColsAtCompileTime>::ret,
MaxRows = size_at_compile_time<traits<Lhs>::MaxRowsAtCompileTime, traits<Rhs>::MaxRowsAtCompileTime>::ret,
- MaxCols = size_at_compile_time<traits<Lhs>::MaxColsAtCompileTime, traits<Rhs>::MaxColsAtCompileTime>::ret,
- CoeffReadCost = Lhs::CoeffReadCost + Rhs::CoeffReadCost + NumTraits<Scalar>::MulCost
+ MaxCols = size_at_compile_time<traits<Lhs>::MaxColsAtCompileTime, traits<Rhs>::MaxColsAtCompileTime>::ret
};
typedef Matrix<Scalar,Rows,Cols> ReturnType;
@@ -173,9 +222,9 @@ struct traits<KroneckerProductSparse<_Lhs,_Rhs> >
typedef MatrixXpr XprKind;
typedef typename remove_all<_Lhs>::type Lhs;
typedef typename remove_all<_Rhs>::type Rhs;
- typedef typename scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType Scalar;
- typedef typename promote_storage_type<typename traits<Lhs>::StorageKind, typename traits<Rhs>::StorageKind>::ret StorageKind;
- typedef typename promote_index_type<typename Lhs::Index, typename Rhs::Index>::type Index;
+ typedef typename ScalarBinaryOpTraits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType Scalar;
+ typedef typename cwise_promote_storage_type<typename traits<Lhs>::StorageKind, typename traits<Rhs>::StorageKind, scalar_product_op<typename Lhs::Scalar, typename Rhs::Scalar> >::ret StorageKind;
+ typedef typename promote_index_type<typename Lhs::StorageIndex, typename Rhs::StorageIndex>::type StorageIndex;
enum {
LhsFlags = Lhs::Flags,
@@ -190,9 +239,11 @@ struct traits<KroneckerProductSparse<_Lhs,_Rhs> >
RemovedBits = ~(EvalToRowMajor ? 0 : RowMajorBit),
Flags = ((LhsFlags | RhsFlags) & HereditaryBits & RemovedBits)
- | EvalBeforeNestingBit | EvalBeforeAssigningBit,
- CoeffReadCost = Dynamic
+ | EvalBeforeNestingBit,
+ CoeffReadCost = HugeCost
};
+
+ typedef SparseMatrix<Scalar, 0, StorageIndex> ReturnType;
};
} // end namespace internal
@@ -228,6 +279,16 @@ KroneckerProduct<A,B> kroneckerProduct(const MatrixBase<A>& a, const MatrixBase<
* Computes Kronecker tensor product of two matrices, at least one of
* which is sparse
*
+ * \warning If you want to replace a matrix by its Kronecker product
+ * with some matrix, do \b NOT do this:
+ * \code
+ * A = kroneckerProduct(A,B); // bug!!! caused by aliasing effect
+ * \endcode
+ * instead, use eval() to work around this:
+ * \code
+ * A = kroneckerProduct(A,B).eval();
+ * \endcode
+ *
* \param a Dense/sparse matrix a
* \param b Dense/sparse matrix b
* \return Kronecker tensor product of a and b, stored in a sparse
diff --git a/unsupported/Eigen/src/LevenbergMarquardt/CMakeLists.txt b/unsupported/Eigen/src/LevenbergMarquardt/CMakeLists.txt
deleted file mode 100644
index d9690854d..000000000
--- a/unsupported/Eigen/src/LevenbergMarquardt/CMakeLists.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-FILE(GLOB Eigen_LevenbergMarquardt_SRCS "*.h")
-
-INSTALL(FILES
- ${Eigen_LevenbergMarquardt_SRCS}
- DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/LevenbergMarquardt COMPONENT Devel
- )
diff --git a/unsupported/Eigen/src/LevenbergMarquardt/LMcovar.h b/unsupported/Eigen/src/LevenbergMarquardt/LMcovar.h
index 32d3ad518..b75bea25f 100644
--- a/unsupported/Eigen/src/LevenbergMarquardt/LMcovar.h
+++ b/unsupported/Eigen/src/LevenbergMarquardt/LMcovar.h
@@ -23,7 +23,6 @@ void covar(
Scalar tol = std::sqrt(NumTraits<Scalar>::epsilon()) )
{
using std::abs;
- typedef DenseIndex Index;
/* Local variables */
Index i, j, k, l, ii, jj;
bool sing;
diff --git a/unsupported/Eigen/src/LevenbergMarquardt/LMpar.h b/unsupported/Eigen/src/LevenbergMarquardt/LMpar.h
index 9532042d9..9a4836547 100644
--- a/unsupported/Eigen/src/LevenbergMarquardt/LMpar.h
+++ b/unsupported/Eigen/src/LevenbergMarquardt/LMpar.h
@@ -30,7 +30,7 @@ namespace internal {
using std::abs;
typedef typename QRSolver::MatrixType MatrixType;
typedef typename QRSolver::Scalar Scalar;
- typedef typename QRSolver::Index Index;
+// typedef typename QRSolver::StorageIndex StorageIndex;
/* Local variables */
Index j;
diff --git a/unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h b/unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h
index f5290dee4..ae9d793b1 100644
--- a/unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h
+++ b/unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h
@@ -19,18 +19,17 @@ namespace Eigen {
namespace internal {
-template <typename Scalar,int Rows, int Cols, typename Index>
+template <typename Scalar,int Rows, int Cols, typename PermIndex>
void lmqrsolv(
Matrix<Scalar,Rows,Cols> &s,
- const PermutationMatrix<Dynamic,Dynamic,Index> &iPerm,
+ const PermutationMatrix<Dynamic,Dynamic,PermIndex> &iPerm,
const Matrix<Scalar,Dynamic,1> &diag,
const Matrix<Scalar,Dynamic,1> &qtb,
Matrix<Scalar,Dynamic,1> &x,
Matrix<Scalar,Dynamic,1> &sdiag)
{
-
/* Local variables */
- Index i, j, k, l;
+ Index i, j, k;
Scalar temp;
Index n = s.cols();
Matrix<Scalar,Dynamic,1> wa(n);
@@ -52,7 +51,7 @@ void lmqrsolv(
/* prepare the row of d to be eliminated, locating the */
/* diagonal element using p from the qr factorization. */
- l = iPerm.indices()(j);
+ const PermIndex l = iPerm.indices()(j);
if (diag[l] == 0.)
break;
sdiag.tail(n-j).setZero();
diff --git a/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h b/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h
index 51dd1d3c4..995427978 100644
--- a/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h
+++ b/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h
@@ -115,8 +115,7 @@ class LevenbergMarquardt : internal::no_assignment_operator
typedef typename FunctorType::JacobianType JacobianType;
typedef typename JacobianType::Scalar Scalar;
typedef typename JacobianType::RealScalar RealScalar;
- typedef typename JacobianType::Index Index;
- typedef typename QRSolver::Index PermIndex;
+ typedef typename QRSolver::StorageIndex PermIndex;
typedef Matrix<Scalar,Dynamic,1> FVectorType;
typedef PermutationMatrix<Dynamic,Dynamic> PermutationType;
public:
@@ -144,11 +143,13 @@ class LevenbergMarquardt : internal::no_assignment_operator
/** Sets the default parameters */
void resetParameters()
- {
+ {
+ using std::sqrt;
+
m_factor = 100.;
m_maxfev = 400;
- m_ftol = std::sqrt(NumTraits<RealScalar>::epsilon());
- m_xtol = std::sqrt(NumTraits<RealScalar>::epsilon());
+ m_ftol = sqrt(NumTraits<RealScalar>::epsilon());
+ m_xtol = sqrt(NumTraits<RealScalar>::epsilon());
m_gtol = 0. ;
m_epsfcn = 0. ;
}
@@ -174,6 +175,24 @@ class LevenbergMarquardt : internal::no_assignment_operator
/** Use an external Scaling. If set to true, pass a nonzero diagonal to diag() */
void setExternalScaling(bool value) {m_useExternalScaling = value; }
+ /** \returns the tolerance for the norm of the solution vector */
+ RealScalar xtol() const {return m_xtol; }
+
+ /** \returns the tolerance for the norm of the vector function */
+ RealScalar ftol() const {return m_ftol; }
+
+ /** \returns the tolerance for the norm of the gradient of the error vector */
+ RealScalar gtol() const {return m_gtol; }
+
+ /** \returns the step bound for the diagonal shift */
+ RealScalar factor() const {return m_factor; }
+
+ /** \returns the error precision */
+ RealScalar epsilon() const {return m_epsfcn; }
+
+ /** \returns the maximum number of function evaluation */
+ Index maxfev() const {return m_maxfev; }
+
/** \returns a reference to the diagonal of the jacobian */
FVectorType& diag() {return m_diag; }
@@ -285,7 +304,7 @@ LevenbergMarquardt<FunctorType>::minimizeInit(FVectorType &x)
// m_fjac.reserve(VectorXi::Constant(n,5)); // FIXME Find a better alternative
if (!m_useExternalScaling)
m_diag.resize(n);
- eigen_assert( (!m_useExternalScaling || m_diag.size()==n) || "When m_useExternalScaling is set, the caller must provide a valid 'm_diag'");
+ eigen_assert( (!m_useExternalScaling || m_diag.size()==n) && "When m_useExternalScaling is set, the caller must provide a valid 'm_diag'");
m_qtf.resize(n);
/* Function Body */
diff --git a/unsupported/Eigen/src/MatrixFunctions/CMakeLists.txt b/unsupported/Eigen/src/MatrixFunctions/CMakeLists.txt
deleted file mode 100644
index cdde64d2c..000000000
--- a/unsupported/Eigen/src/MatrixFunctions/CMakeLists.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-FILE(GLOB Eigen_MatrixFunctions_SRCS "*.h")
-
-INSTALL(FILES
- ${Eigen_MatrixFunctions_SRCS}
- DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/MatrixFunctions COMPONENT Devel
- )
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h b/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
index 6825a7882..bb6d9e1fe 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
@@ -1,8 +1,8 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2009, 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
-// Copyright (C) 2011 Chen-Pang He <jdh8@ms63.hinet.net>
+// Copyright (C) 2009, 2010, 2013 Jitse Niesen <jitse@maths.leeds.ac.uk>
+// Copyright (C) 2011, 2013 Chen-Pang He <jdh8@ms63.hinet.net>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -14,388 +14,374 @@
#include "StemFunction.h"
namespace Eigen {
+namespace internal {
-/** \ingroup MatrixFunctions_Module
- * \brief Class for computing the matrix exponential.
- * \tparam MatrixType type of the argument of the exponential,
- * expected to be an instantiation of the Matrix class template.
- */
-template <typename MatrixType>
-class MatrixExponential {
-
- public:
+/** \brief Scaling operator.
+ *
+ * This struct is used by CwiseUnaryOp to scale a matrix by \f$ 2^{-s} \f$.
+ */
+template <typename RealScalar>
+struct MatrixExponentialScalingOp
+{
+ /** \brief Constructor.
+ *
+ * \param[in] squarings The integer \f$ s \f$ in this document.
+ */
+ MatrixExponentialScalingOp(int squarings) : m_squarings(squarings) { }
+
+
+ /** \brief Scale a matrix coefficient.
+ *
+ * \param[in,out] x The scalar to be scaled, becoming \f$ 2^{-s} x \f$.
+ */
+ inline const RealScalar operator() (const RealScalar& x) const
+ {
+ using std::ldexp;
+ return ldexp(x, -m_squarings);
+ }
- /** \brief Constructor.
- *
- * The class stores a reference to \p M, so it should not be
- * changed (or destroyed) before compute() is called.
- *
- * \param[in] M matrix whose exponential is to be computed.
- */
- MatrixExponential(const MatrixType &M);
+ typedef std::complex<RealScalar> ComplexScalar;
- /** \brief Computes the matrix exponential.
- *
- * \param[out] result the matrix exponential of \p M in the constructor.
- */
- template <typename ResultType>
- void compute(ResultType &result);
+ /** \brief Scale a matrix coefficient.
+ *
+ * \param[in,out] x The scalar to be scaled, becoming \f$ 2^{-s} x \f$.
+ */
+ inline const ComplexScalar operator() (const ComplexScalar& x) const
+ {
+ using std::ldexp;
+ return ComplexScalar(ldexp(x.real(), -m_squarings), ldexp(x.imag(), -m_squarings));
+ }
private:
-
- // Prevent copying
- MatrixExponential(const MatrixExponential&);
- MatrixExponential& operator=(const MatrixExponential&);
-
- /** \brief Compute the (3,3)-Pad&eacute; approximant to the exponential.
- *
- * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Pad&eacute;
- * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
- *
- * \param[in] A Argument of matrix exponential
- */
- void pade3(const MatrixType &A);
-
- /** \brief Compute the (5,5)-Pad&eacute; approximant to the exponential.
- *
- * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Pad&eacute;
- * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
- *
- * \param[in] A Argument of matrix exponential
- */
- void pade5(const MatrixType &A);
-
- /** \brief Compute the (7,7)-Pad&eacute; approximant to the exponential.
- *
- * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Pad&eacute;
- * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
- *
- * \param[in] A Argument of matrix exponential
- */
- void pade7(const MatrixType &A);
-
- /** \brief Compute the (9,9)-Pad&eacute; approximant to the exponential.
- *
- * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Pad&eacute;
- * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
- *
- * \param[in] A Argument of matrix exponential
- */
- void pade9(const MatrixType &A);
-
- /** \brief Compute the (13,13)-Pad&eacute; approximant to the exponential.
- *
- * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Pad&eacute;
- * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
- *
- * \param[in] A Argument of matrix exponential
- */
- void pade13(const MatrixType &A);
-
- /** \brief Compute the (17,17)-Pad&eacute; approximant to the exponential.
- *
- * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Pad&eacute;
- * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
- *
- * This function activates only if your long double is double-double or quadruple.
- *
- * \param[in] A Argument of matrix exponential
- */
- void pade17(const MatrixType &A);
-
- /** \brief Compute Pad&eacute; approximant to the exponential.
- *
- * Computes \c m_U, \c m_V and \c m_squarings such that
- * \f$ (V+U)(V-U)^{-1} \f$ is a Pad&eacute; of
- * \f$ \exp(2^{-\mbox{squarings}}M) \f$ around \f$ M = 0 \f$. The
- * degree of the Pad&eacute; approximant and the value of
- * squarings are chosen such that the approximation error is no
- * more than the round-off error.
- *
- * The argument of this function should correspond with the (real
- * part of) the entries of \c m_M. It is used to select the
- * correct implementation using overloading.
- */
- void computeUV(double);
-
- /** \brief Compute Pad&eacute; approximant to the exponential.
- *
- * \sa computeUV(double);
- */
- void computeUV(float);
-
- /** \brief Compute Pad&eacute; approximant to the exponential.
- *
- * \sa computeUV(double);
- */
- void computeUV(long double);
-
- typedef typename internal::traits<MatrixType>::Scalar Scalar;
- typedef typename NumTraits<Scalar>::Real RealScalar;
- typedef typename std::complex<RealScalar> ComplexScalar;
-
- /** \brief Reference to matrix whose exponential is to be computed. */
- typename internal::nested<MatrixType>::type m_M;
-
- /** \brief Odd-degree terms in numerator of Pad&eacute; approximant. */
- MatrixType m_U;
-
- /** \brief Even-degree terms in numerator of Pad&eacute; approximant. */
- MatrixType m_V;
-
- /** \brief Used for temporary storage. */
- MatrixType m_tmp1;
-
- /** \brief Used for temporary storage. */
- MatrixType m_tmp2;
-
- /** \brief Identity matrix of the same size as \c m_M. */
- MatrixType m_Id;
-
- /** \brief Number of squarings required in the last step. */
int m_squarings;
-
- /** \brief L1 norm of m_M. */
- RealScalar m_l1norm;
};
-template <typename MatrixType>
-MatrixExponential<MatrixType>::MatrixExponential(const MatrixType &M) :
- m_M(M),
- m_U(M.rows(),M.cols()),
- m_V(M.rows(),M.cols()),
- m_tmp1(M.rows(),M.cols()),
- m_tmp2(M.rows(),M.cols()),
- m_Id(MatrixType::Identity(M.rows(), M.cols())),
- m_squarings(0),
- m_l1norm(M.cwiseAbs().colwise().sum().maxCoeff())
-{
- /* empty body */
-}
-
-template <typename MatrixType>
-template <typename ResultType>
-void MatrixExponential<MatrixType>::compute(ResultType &result)
+/** \brief Compute the (3,3)-Pad&eacute; approximant to the exponential.
+ *
+ * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Pad&eacute;
+ * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
+ */
+template <typename MatA, typename MatU, typename MatV>
+void matrix_exp_pade3(const MatA& A, MatU& U, MatV& V)
{
-#if LDBL_MANT_DIG > 112 // rarely happens
- if(sizeof(RealScalar) > 14) {
- result = m_M.matrixFunction(StdStemFunctions<ComplexScalar>::exp);
- return;
- }
-#endif
- computeUV(RealScalar());
- m_tmp1 = m_U + m_V; // numerator of Pade approximant
- m_tmp2 = -m_U + m_V; // denominator of Pade approximant
- result = m_tmp2.partialPivLu().solve(m_tmp1);
- for (int i=0; i<m_squarings; i++)
- result *= result; // undo scaling by repeated squaring
+ typedef typename MatA::PlainObject MatrixType;
+ typedef typename NumTraits<typename traits<MatA>::Scalar>::Real RealScalar;
+ const RealScalar b[] = {120.L, 60.L, 12.L, 1.L};
+ const MatrixType A2 = A * A;
+ const MatrixType tmp = b[3] * A2 + b[1] * MatrixType::Identity(A.rows(), A.cols());
+ U.noalias() = A * tmp;
+ V = b[2] * A2 + b[0] * MatrixType::Identity(A.rows(), A.cols());
}
-template <typename MatrixType>
-EIGEN_STRONG_INLINE void MatrixExponential<MatrixType>::pade3(const MatrixType &A)
+/** \brief Compute the (5,5)-Pad&eacute; approximant to the exponential.
+ *
+ * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Pad&eacute;
+ * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
+ */
+template <typename MatA, typename MatU, typename MatV>
+void matrix_exp_pade5(const MatA& A, MatU& U, MatV& V)
{
- const RealScalar b[] = {120., 60., 12., 1.};
- m_tmp1.noalias() = A * A;
- m_tmp2 = b[3]*m_tmp1 + b[1]*m_Id;
- m_U.noalias() = A * m_tmp2;
- m_V = b[2]*m_tmp1 + b[0]*m_Id;
+ typedef typename MatA::PlainObject MatrixType;
+ typedef typename NumTraits<typename traits<MatrixType>::Scalar>::Real RealScalar;
+ const RealScalar b[] = {30240.L, 15120.L, 3360.L, 420.L, 30.L, 1.L};
+ const MatrixType A2 = A * A;
+ const MatrixType A4 = A2 * A2;
+ const MatrixType tmp = b[5] * A4 + b[3] * A2 + b[1] * MatrixType::Identity(A.rows(), A.cols());
+ U.noalias() = A * tmp;
+ V = b[4] * A4 + b[2] * A2 + b[0] * MatrixType::Identity(A.rows(), A.cols());
}
-template <typename MatrixType>
-EIGEN_STRONG_INLINE void MatrixExponential<MatrixType>::pade5(const MatrixType &A)
+/** \brief Compute the (7,7)-Pad&eacute; approximant to the exponential.
+ *
+ * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Pad&eacute;
+ * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
+ */
+template <typename MatA, typename MatU, typename MatV>
+void matrix_exp_pade7(const MatA& A, MatU& U, MatV& V)
{
- const RealScalar b[] = {30240., 15120., 3360., 420., 30., 1.};
- MatrixType A2 = A * A;
- m_tmp1.noalias() = A2 * A2;
- m_tmp2 = b[5]*m_tmp1 + b[3]*A2 + b[1]*m_Id;
- m_U.noalias() = A * m_tmp2;
- m_V = b[4]*m_tmp1 + b[2]*A2 + b[0]*m_Id;
-}
+ typedef typename MatA::PlainObject MatrixType;
+ typedef typename NumTraits<typename traits<MatrixType>::Scalar>::Real RealScalar;
+ const RealScalar b[] = {17297280.L, 8648640.L, 1995840.L, 277200.L, 25200.L, 1512.L, 56.L, 1.L};
+ const MatrixType A2 = A * A;
+ const MatrixType A4 = A2 * A2;
+ const MatrixType A6 = A4 * A2;
+ const MatrixType tmp = b[7] * A6 + b[5] * A4 + b[3] * A2
+ + b[1] * MatrixType::Identity(A.rows(), A.cols());
+ U.noalias() = A * tmp;
+ V = b[6] * A6 + b[4] * A4 + b[2] * A2 + b[0] * MatrixType::Identity(A.rows(), A.cols());
-template <typename MatrixType>
-EIGEN_STRONG_INLINE void MatrixExponential<MatrixType>::pade7(const MatrixType &A)
-{
- const RealScalar b[] = {17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.};
- MatrixType A2 = A * A;
- MatrixType A4 = A2 * A2;
- m_tmp1.noalias() = A4 * A2;
- m_tmp2 = b[7]*m_tmp1 + b[5]*A4 + b[3]*A2 + b[1]*m_Id;
- m_U.noalias() = A * m_tmp2;
- m_V = b[6]*m_tmp1 + b[4]*A4 + b[2]*A2 + b[0]*m_Id;
}
-template <typename MatrixType>
-EIGEN_STRONG_INLINE void MatrixExponential<MatrixType>::pade9(const MatrixType &A)
+/** \brief Compute the (9,9)-Pad&eacute; approximant to the exponential.
+ *
+ * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Pad&eacute;
+ * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
+ */
+template <typename MatA, typename MatU, typename MatV>
+void matrix_exp_pade9(const MatA& A, MatU& U, MatV& V)
{
- const RealScalar b[] = {17643225600., 8821612800., 2075673600., 302702400., 30270240.,
- 2162160., 110880., 3960., 90., 1.};
- MatrixType A2 = A * A;
- MatrixType A4 = A2 * A2;
- MatrixType A6 = A4 * A2;
- m_tmp1.noalias() = A6 * A2;
- m_tmp2 = b[9]*m_tmp1 + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*m_Id;
- m_U.noalias() = A * m_tmp2;
- m_V = b[8]*m_tmp1 + b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*m_Id;
+ typedef typename MatA::PlainObject MatrixType;
+ typedef typename NumTraits<typename traits<MatrixType>::Scalar>::Real RealScalar;
+ const RealScalar b[] = {17643225600.L, 8821612800.L, 2075673600.L, 302702400.L, 30270240.L,
+ 2162160.L, 110880.L, 3960.L, 90.L, 1.L};
+ const MatrixType A2 = A * A;
+ const MatrixType A4 = A2 * A2;
+ const MatrixType A6 = A4 * A2;
+ const MatrixType A8 = A6 * A2;
+ const MatrixType tmp = b[9] * A8 + b[7] * A6 + b[5] * A4 + b[3] * A2
+ + b[1] * MatrixType::Identity(A.rows(), A.cols());
+ U.noalias() = A * tmp;
+ V = b[8] * A8 + b[6] * A6 + b[4] * A4 + b[2] * A2 + b[0] * MatrixType::Identity(A.rows(), A.cols());
}
-template <typename MatrixType>
-EIGEN_STRONG_INLINE void MatrixExponential<MatrixType>::pade13(const MatrixType &A)
+/** \brief Compute the (13,13)-Pad&eacute; approximant to the exponential.
+ *
+ * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Pad&eacute;
+ * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
+ */
+template <typename MatA, typename MatU, typename MatV>
+void matrix_exp_pade13(const MatA& A, MatU& U, MatV& V)
{
- const RealScalar b[] = {64764752532480000., 32382376266240000., 7771770303897600.,
- 1187353796428800., 129060195264000., 10559470521600., 670442572800.,
- 33522128640., 1323241920., 40840800., 960960., 16380., 182., 1.};
- MatrixType A2 = A * A;
- MatrixType A4 = A2 * A2;
- m_tmp1.noalias() = A4 * A2;
- m_V = b[13]*m_tmp1 + b[11]*A4 + b[9]*A2; // used for temporary storage
- m_tmp2.noalias() = m_tmp1 * m_V;
- m_tmp2 += b[7]*m_tmp1 + b[5]*A4 + b[3]*A2 + b[1]*m_Id;
- m_U.noalias() = A * m_tmp2;
- m_tmp2 = b[12]*m_tmp1 + b[10]*A4 + b[8]*A2;
- m_V.noalias() = m_tmp1 * m_tmp2;
- m_V += b[6]*m_tmp1 + b[4]*A4 + b[2]*A2 + b[0]*m_Id;
+ typedef typename MatA::PlainObject MatrixType;
+ typedef typename NumTraits<typename traits<MatrixType>::Scalar>::Real RealScalar;
+ const RealScalar b[] = {64764752532480000.L, 32382376266240000.L, 7771770303897600.L,
+ 1187353796428800.L, 129060195264000.L, 10559470521600.L, 670442572800.L,
+ 33522128640.L, 1323241920.L, 40840800.L, 960960.L, 16380.L, 182.L, 1.L};
+ const MatrixType A2 = A * A;
+ const MatrixType A4 = A2 * A2;
+ const MatrixType A6 = A4 * A2;
+ V = b[13] * A6 + b[11] * A4 + b[9] * A2; // used for temporary storage
+ MatrixType tmp = A6 * V;
+ tmp += b[7] * A6 + b[5] * A4 + b[3] * A2 + b[1] * MatrixType::Identity(A.rows(), A.cols());
+ U.noalias() = A * tmp;
+ tmp = b[12] * A6 + b[10] * A4 + b[8] * A2;
+ V.noalias() = A6 * tmp;
+ V += b[6] * A6 + b[4] * A4 + b[2] * A2 + b[0] * MatrixType::Identity(A.rows(), A.cols());
}
+/** \brief Compute the (17,17)-Pad&eacute; approximant to the exponential.
+ *
+ * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Pad&eacute;
+ * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
+ *
+ * This function activates only if your long double is double-double or quadruple.
+ */
#if LDBL_MANT_DIG > 64
-template <typename MatrixType>
-EIGEN_STRONG_INLINE void MatrixExponential<MatrixType>::pade17(const MatrixType &A)
+template <typename MatA, typename MatU, typename MatV>
+void matrix_exp_pade17(const MatA& A, MatU& U, MatV& V)
{
+ typedef typename MatA::PlainObject MatrixType;
+ typedef typename NumTraits<typename traits<MatrixType>::Scalar>::Real RealScalar;
const RealScalar b[] = {830034394580628357120000.L, 415017197290314178560000.L,
- 100610229646136770560000.L, 15720348382208870400000.L,
- 1774878043152614400000.L, 153822763739893248000.L, 10608466464820224000.L,
- 595373117923584000.L, 27563570274240000.L, 1060137318240000.L,
- 33924394183680.L, 899510451840.L, 19554575040.L, 341863200.L, 4651200.L,
- 46512.L, 306.L, 1.L};
- MatrixType A2 = A * A;
- MatrixType A4 = A2 * A2;
- MatrixType A6 = A4 * A2;
- m_tmp1.noalias() = A4 * A4;
- m_V = b[17]*m_tmp1 + b[15]*A6 + b[13]*A4 + b[11]*A2; // used for temporary storage
- m_tmp2.noalias() = m_tmp1 * m_V;
- m_tmp2 += b[9]*m_tmp1 + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*m_Id;
- m_U.noalias() = A * m_tmp2;
- m_tmp2 = b[16]*m_tmp1 + b[14]*A6 + b[12]*A4 + b[10]*A2;
- m_V.noalias() = m_tmp1 * m_tmp2;
- m_V += b[8]*m_tmp1 + b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*m_Id;
+ 100610229646136770560000.L, 15720348382208870400000.L,
+ 1774878043152614400000.L, 153822763739893248000.L, 10608466464820224000.L,
+ 595373117923584000.L, 27563570274240000.L, 1060137318240000.L,
+ 33924394183680.L, 899510451840.L, 19554575040.L, 341863200.L, 4651200.L,
+ 46512.L, 306.L, 1.L};
+ const MatrixType A2 = A * A;
+ const MatrixType A4 = A2 * A2;
+ const MatrixType A6 = A4 * A2;
+ const MatrixType A8 = A4 * A4;
+ V = b[17] * A8 + b[15] * A6 + b[13] * A4 + b[11] * A2; // used for temporary storage
+ MatrixType tmp = A8 * V;
+ tmp += b[9] * A8 + b[7] * A6 + b[5] * A4 + b[3] * A2
+ + b[1] * MatrixType::Identity(A.rows(), A.cols());
+ U.noalias() = A * tmp;
+ tmp = b[16] * A8 + b[14] * A6 + b[12] * A4 + b[10] * A2;
+ V.noalias() = tmp * A8;
+ V += b[8] * A8 + b[6] * A6 + b[4] * A4 + b[2] * A2
+ + b[0] * MatrixType::Identity(A.rows(), A.cols());
}
#endif
+template <typename MatrixType, typename RealScalar = typename NumTraits<typename traits<MatrixType>::Scalar>::Real>
+struct matrix_exp_computeUV
+{
+ /** \brief Compute Pad&eacute; approximant to the exponential.
+ *
+ * Computes \c U, \c V and \c squarings such that \f$ (V+U)(V-U)^{-1} \f$ is a Pad&eacute;
+ * approximant of \f$ \exp(2^{-\mbox{squarings}}M) \f$ around \f$ M = 0 \f$, where \f$ M \f$
+ * denotes the matrix \c arg. The degree of the Pad&eacute; approximant and the value of squarings
+ * are chosen such that the approximation error is no more than the round-off error.
+ */
+ static void run(const MatrixType& arg, MatrixType& U, MatrixType& V, int& squarings);
+};
+
template <typename MatrixType>
-void MatrixExponential<MatrixType>::computeUV(float)
+struct matrix_exp_computeUV<MatrixType, float>
{
- using std::frexp;
- using std::pow;
- if (m_l1norm < 4.258730016922831e-001) {
- pade3(m_M);
- } else if (m_l1norm < 1.880152677804762e+000) {
- pade5(m_M);
- } else {
- const float maxnorm = 3.925724783138660f;
- frexp(m_l1norm / maxnorm, &m_squarings);
- if (m_squarings < 0) m_squarings = 0;
- MatrixType A = m_M / pow(Scalar(2), m_squarings);
- pade7(A);
+ template <typename ArgType>
+ static void run(const ArgType& arg, MatrixType& U, MatrixType& V, int& squarings)
+ {
+ using std::frexp;
+ using std::pow;
+ const float l1norm = arg.cwiseAbs().colwise().sum().maxCoeff();
+ squarings = 0;
+ if (l1norm < 4.258730016922831e-001f) {
+ matrix_exp_pade3(arg, U, V);
+ } else if (l1norm < 1.880152677804762e+000f) {
+ matrix_exp_pade5(arg, U, V);
+ } else {
+ const float maxnorm = 3.925724783138660f;
+ frexp(l1norm / maxnorm, &squarings);
+ if (squarings < 0) squarings = 0;
+ MatrixType A = arg.unaryExpr(MatrixExponentialScalingOp<float>(squarings));
+ matrix_exp_pade7(A, U, V);
+ }
}
-}
+};
template <typename MatrixType>
-void MatrixExponential<MatrixType>::computeUV(double)
+struct matrix_exp_computeUV<MatrixType, double>
{
- using std::frexp;
- using std::pow;
- if (m_l1norm < 1.495585217958292e-002) {
- pade3(m_M);
- } else if (m_l1norm < 2.539398330063230e-001) {
- pade5(m_M);
- } else if (m_l1norm < 9.504178996162932e-001) {
- pade7(m_M);
- } else if (m_l1norm < 2.097847961257068e+000) {
- pade9(m_M);
- } else {
- const double maxnorm = 5.371920351148152;
- frexp(m_l1norm / maxnorm, &m_squarings);
- if (m_squarings < 0) m_squarings = 0;
- MatrixType A = m_M / pow(Scalar(2), m_squarings);
- pade13(A);
+ template <typename ArgType>
+ static void run(const ArgType& arg, MatrixType& U, MatrixType& V, int& squarings)
+ {
+ using std::frexp;
+ using std::pow;
+ const double l1norm = arg.cwiseAbs().colwise().sum().maxCoeff();
+ squarings = 0;
+ if (l1norm < 1.495585217958292e-002) {
+ matrix_exp_pade3(arg, U, V);
+ } else if (l1norm < 2.539398330063230e-001) {
+ matrix_exp_pade5(arg, U, V);
+ } else if (l1norm < 9.504178996162932e-001) {
+ matrix_exp_pade7(arg, U, V);
+ } else if (l1norm < 2.097847961257068e+000) {
+ matrix_exp_pade9(arg, U, V);
+ } else {
+ const double maxnorm = 5.371920351148152;
+ frexp(l1norm / maxnorm, &squarings);
+ if (squarings < 0) squarings = 0;
+ MatrixType A = arg.unaryExpr(MatrixExponentialScalingOp<double>(squarings));
+ matrix_exp_pade13(A, U, V);
+ }
}
-}
-
+};
+
template <typename MatrixType>
-void MatrixExponential<MatrixType>::computeUV(long double)
+struct matrix_exp_computeUV<MatrixType, long double>
{
- using std::frexp;
- using std::pow;
+ template <typename ArgType>
+ static void run(const ArgType& arg, MatrixType& U, MatrixType& V, int& squarings)
+ {
#if LDBL_MANT_DIG == 53 // double precision
- computeUV(double());
-#elif LDBL_MANT_DIG <= 64 // extended precision
- if (m_l1norm < 4.1968497232266989671e-003L) {
- pade3(m_M);
- } else if (m_l1norm < 1.1848116734693823091e-001L) {
- pade5(m_M);
- } else if (m_l1norm < 5.5170388480686700274e-001L) {
- pade7(m_M);
- } else if (m_l1norm < 1.3759868875587845383e+000L) {
- pade9(m_M);
- } else {
- const long double maxnorm = 4.0246098906697353063L;
- frexp(m_l1norm / maxnorm, &m_squarings);
- if (m_squarings < 0) m_squarings = 0;
- MatrixType A = m_M / pow(Scalar(2), m_squarings);
- pade13(A);
- }
+ matrix_exp_computeUV<MatrixType, double>::run(arg, U, V, squarings);
+
+#else
+
+ using std::frexp;
+ using std::pow;
+ const long double l1norm = arg.cwiseAbs().colwise().sum().maxCoeff();
+ squarings = 0;
+
+#if LDBL_MANT_DIG <= 64 // extended precision
+
+ if (l1norm < 4.1968497232266989671e-003L) {
+ matrix_exp_pade3(arg, U, V);
+ } else if (l1norm < 1.1848116734693823091e-001L) {
+ matrix_exp_pade5(arg, U, V);
+ } else if (l1norm < 5.5170388480686700274e-001L) {
+ matrix_exp_pade7(arg, U, V);
+ } else if (l1norm < 1.3759868875587845383e+000L) {
+ matrix_exp_pade9(arg, U, V);
+ } else {
+ const long double maxnorm = 4.0246098906697353063L;
+ frexp(l1norm / maxnorm, &squarings);
+ if (squarings < 0) squarings = 0;
+ MatrixType A = arg.unaryExpr(MatrixExponentialScalingOp<long double>(squarings));
+ matrix_exp_pade13(A, U, V);
+ }
+
#elif LDBL_MANT_DIG <= 106 // double-double
- if (m_l1norm < 3.2787892205607026992947488108213e-005L) {
- pade3(m_M);
- } else if (m_l1norm < 6.4467025060072760084130906076332e-003L) {
- pade5(m_M);
- } else if (m_l1norm < 6.8988028496595374751374122881143e-002L) {
- pade7(m_M);
- } else if (m_l1norm < 2.7339737518502231741495857201670e-001L) {
- pade9(m_M);
- } else if (m_l1norm < 1.3203382096514474905666448850278e+000L) {
- pade13(m_M);
- } else {
- const long double maxnorm = 3.2579440895405400856599663723517L;
- frexp(m_l1norm / maxnorm, &m_squarings);
- if (m_squarings < 0) m_squarings = 0;
- MatrixType A = m_M / pow(Scalar(2), m_squarings);
- pade17(A);
- }
+
+ if (l1norm < 3.2787892205607026992947488108213e-005L) {
+ matrix_exp_pade3(arg, U, V);
+ } else if (l1norm < 6.4467025060072760084130906076332e-003L) {
+ matrix_exp_pade5(arg, U, V);
+ } else if (l1norm < 6.8988028496595374751374122881143e-002L) {
+ matrix_exp_pade7(arg, U, V);
+ } else if (l1norm < 2.7339737518502231741495857201670e-001L) {
+ matrix_exp_pade9(arg, U, V);
+ } else if (l1norm < 1.3203382096514474905666448850278e+000L) {
+ matrix_exp_pade13(arg, U, V);
+ } else {
+ const long double maxnorm = 3.2579440895405400856599663723517L;
+ frexp(l1norm / maxnorm, &squarings);
+ if (squarings < 0) squarings = 0;
+ MatrixType A = arg.unaryExpr(MatrixExponentialScalingOp<long double>(squarings));
+ matrix_exp_pade17(A, U, V);
+ }
+
#elif LDBL_MANT_DIG <= 112 // quadruple precison
- if (m_l1norm < 1.639394610288918690547467954466970e-005L) {
- pade3(m_M);
- } else if (m_l1norm < 4.253237712165275566025884344433009e-003L) {
- pade5(m_M);
- } else if (m_l1norm < 5.125804063165764409885122032933142e-002L) {
- pade7(m_M);
- } else if (m_l1norm < 2.170000765161155195453205651889853e-001L) {
- pade9(m_M);
- } else if (m_l1norm < 1.125358383453143065081397882891878e+000L) {
- pade13(m_M);
- } else {
- const long double maxnorm = 2.884233277829519311757165057717815L;
- frexp(m_l1norm / maxnorm, &m_squarings);
- if (m_squarings < 0) m_squarings = 0;
- MatrixType A = m_M / pow(Scalar(2), m_squarings);
- pade17(A);
- }
+
+ if (l1norm < 1.639394610288918690547467954466970e-005L) {
+ matrix_exp_pade3(arg, U, V);
+ } else if (l1norm < 4.253237712165275566025884344433009e-003L) {
+ matrix_exp_pade5(arg, U, V);
+ } else if (l1norm < 5.125804063165764409885122032933142e-002L) {
+ matrix_exp_pade7(arg, U, V);
+ } else if (l1norm < 2.170000765161155195453205651889853e-001L) {
+ matrix_exp_pade9(arg, U, V);
+ } else if (l1norm < 1.125358383453143065081397882891878e+000L) {
+ matrix_exp_pade13(arg, U, V);
+ } else {
+ frexp(l1norm / maxnorm, &squarings);
+ if (squarings < 0) squarings = 0;
+ MatrixType A = arg.unaryExpr(MatrixExponentialScalingOp<long double>(squarings));
+ matrix_exp_pade17(A, U, V);
+ }
+
#else
- // this case should be handled in compute()
- eigen_assert(false && "Bug in MatrixExponential");
+
+ // this case should be handled in compute()
+ eigen_assert(false && "Bug in MatrixExponential");
+
+#endif
#endif // LDBL_MANT_DIG
+ }
+};
+
+
+/* Computes the matrix exponential
+ *
+ * \param arg argument of matrix exponential (should be plain object)
+ * \param result variable in which result will be stored
+ */
+template <typename ArgType, typename ResultType>
+void matrix_exp_compute(const ArgType& arg, ResultType &result)
+{
+ typedef typename ArgType::PlainObject MatrixType;
+#if LDBL_MANT_DIG > 112 // rarely happens
+ typedef typename traits<MatrixType>::Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef typename std::complex<RealScalar> ComplexScalar;
+ if (sizeof(RealScalar) > 14) {
+ result = arg.matrixFunction(internal::stem_function_exp<ComplexScalar>);
+ return;
+ }
+#endif
+ MatrixType U, V;
+ int squarings;
+ matrix_exp_computeUV<MatrixType>::run(arg, U, V, squarings); // Pade approximant is (U+V) / (-U+V)
+ MatrixType numer = U + V;
+ MatrixType denom = -U + V;
+ result = denom.partialPivLu().solve(numer);
+ for (int i=0; i<squarings; i++)
+ result *= result; // undo scaling by repeated squaring
}
+} // end namespace Eigen::internal
+
/** \ingroup MatrixFunctions_Module
*
* \brief Proxy for the matrix exponential of some matrix (expression).
*
* \tparam Derived Type of the argument to the matrix exponential.
*
- * This class holds the argument to the matrix exponential until it
- * is assigned or evaluated for some other reason (so the argument
- * should not be changed in the meantime). It is the return type of
- * MatrixBase::exp() and most of the time this is the only way it is
- * used.
+ * This class holds the argument to the matrix exponential until it is assigned or evaluated for
+ * some other reason (so the argument should not be changed in the meantime). It is the return type
+ * of MatrixBase::exp() and most of the time this is the only way it is used.
*/
template<typename Derived> struct MatrixExponentialReturnValue
: public ReturnByValue<MatrixExponentialReturnValue<Derived> >
@@ -404,31 +390,26 @@ template<typename Derived> struct MatrixExponentialReturnValue
public:
/** \brief Constructor.
*
- * \param[in] src %Matrix (expression) forming the argument of the
- * matrix exponential.
+ * \param src %Matrix (expression) forming the argument of the matrix exponential.
*/
MatrixExponentialReturnValue(const Derived& src) : m_src(src) { }
/** \brief Compute the matrix exponential.
*
- * \param[out] result the matrix exponential of \p src in the
- * constructor.
+ * \param result the matrix exponential of \p src in the constructor.
*/
template <typename ResultType>
inline void evalTo(ResultType& result) const
{
- const typename Derived::PlainObject srcEvaluated = m_src.eval();
- MatrixExponential<typename Derived::PlainObject> me(srcEvaluated);
- me.compute(result);
+ const typename internal::nested_eval<Derived, 10>::type tmp(m_src);
+ internal::matrix_exp_compute(tmp, result);
}
Index rows() const { return m_src.rows(); }
Index cols() const { return m_src.cols(); }
protected:
- const Derived& m_src;
- private:
- MatrixExponentialReturnValue& operator=(const MatrixExponentialReturnValue&);
+ const typename internal::ref_selector<Derived>::type m_src;
};
namespace internal {
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h b/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
index 7d426640c..db2449d02 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2009-2011 Jitse Niesen <jitse@maths.leeds.ac.uk>
+// Copyright (C) 2009-2011, 2013 Jitse Niesen <jitse@maths.leeds.ac.uk>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -11,398 +11,245 @@
#define EIGEN_MATRIX_FUNCTION
#include "StemFunction.h"
-#include "MatrixFunctionAtomic.h"
namespace Eigen {
+namespace internal {
+
+/** \brief Maximum distance allowed between eigenvalues to be considered "close". */
+static const float matrix_function_separation = 0.1f;
+
/** \ingroup MatrixFunctions_Module
- * \brief Class for computing matrix functions.
- * \tparam MatrixType type of the argument of the matrix function,
- * expected to be an instantiation of the Matrix class template.
- * \tparam AtomicType type for computing matrix function of atomic blocks.
- * \tparam IsComplex used internally to select correct specialization.
+ * \class MatrixFunctionAtomic
+ * \brief Helper class for computing matrix functions of atomic matrices.
*
- * This class implements the Schur-Parlett algorithm for computing matrix functions. The spectrum of the
- * matrix is divided in clustered of eigenvalues that lies close together. This class delegates the
- * computation of the matrix function on every block corresponding to these clusters to an object of type
- * \p AtomicType and uses these results to compute the matrix function of the whole matrix. The class
- * \p AtomicType should have a \p compute() member function for computing the matrix function of a block.
- *
- * \sa class MatrixFunctionAtomic, class MatrixLogarithmAtomic
+ * Here, an atomic matrix is a triangular matrix whose diagonal entries are close to each other.
*/
-template <typename MatrixType,
- typename AtomicType,
- int IsComplex = NumTraits<typename internal::traits<MatrixType>::Scalar>::IsComplex>
-class MatrixFunction
-{
+template <typename MatrixType>
+class MatrixFunctionAtomic
+{
public:
- /** \brief Constructor.
- *
- * \param[in] A argument of matrix function, should be a square matrix.
- * \param[in] atomic class for computing matrix function of atomic blocks.
- *
- * The class stores references to \p A and \p atomic, so they should not be
- * changed (or destroyed) before compute() is called.
- */
- MatrixFunction(const MatrixType& A, AtomicType& atomic);
-
- /** \brief Compute the matrix function.
- *
- * \param[out] result the function \p f applied to \p A, as
- * specified in the constructor.
- *
- * See MatrixBase::matrixFunction() for details on how this computation
- * is implemented.
- */
- template <typename ResultType>
- void compute(ResultType &result);
-};
-
-
-/** \internal \ingroup MatrixFunctions_Module
- * \brief Partial specialization of MatrixFunction for real matrices
- */
-template <typename MatrixType, typename AtomicType>
-class MatrixFunction<MatrixType, AtomicType, 0>
-{
- private:
-
- typedef internal::traits<MatrixType> Traits;
- typedef typename Traits::Scalar Scalar;
- static const int Rows = Traits::RowsAtCompileTime;
- static const int Cols = Traits::ColsAtCompileTime;
- static const int Options = MatrixType::Options;
- static const int MaxRows = Traits::MaxRowsAtCompileTime;
- static const int MaxCols = Traits::MaxColsAtCompileTime;
-
- typedef std::complex<Scalar> ComplexScalar;
- typedef Matrix<ComplexScalar, Rows, Cols, Options, MaxRows, MaxCols> ComplexMatrix;
-
- public:
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename stem_function<Scalar>::type StemFunction;
- /** \brief Constructor.
- *
- * \param[in] A argument of matrix function, should be a square matrix.
- * \param[in] atomic class for computing matrix function of atomic blocks.
+ /** \brief Constructor
+ * \param[in] f matrix function to compute.
*/
- MatrixFunction(const MatrixType& A, AtomicType& atomic) : m_A(A), m_atomic(atomic) { }
+ MatrixFunctionAtomic(StemFunction f) : m_f(f) { }
- /** \brief Compute the matrix function.
- *
- * \param[out] result the function \p f applied to \p A, as
- * specified in the constructor.
- *
- * This function converts the real matrix \c A to a complex matrix,
- * uses MatrixFunction<MatrixType,1> and then converts the result back to
- * a real matrix.
+ /** \brief Compute matrix function of atomic matrix
+ * \param[in] A argument of matrix function, should be upper triangular and atomic
+ * \returns f(A), the matrix function evaluated at the given matrix
*/
- template <typename ResultType>
- void compute(ResultType& result)
- {
- ComplexMatrix CA = m_A.template cast<ComplexScalar>();
- ComplexMatrix Cresult;
- MatrixFunction<ComplexMatrix, AtomicType> mf(CA, m_atomic);
- mf.compute(Cresult);
- result = Cresult.real();
- }
-
- private:
- typename internal::nested<MatrixType>::type m_A; /**< \brief Reference to argument of matrix function. */
- AtomicType& m_atomic; /**< \brief Class for computing matrix function of atomic blocks. */
-
- MatrixFunction& operator=(const MatrixFunction&);
-};
-
-
-/** \internal \ingroup MatrixFunctions_Module
- * \brief Partial specialization of MatrixFunction for complex matrices
- */
-template <typename MatrixType, typename AtomicType>
-class MatrixFunction<MatrixType, AtomicType, 1>
-{
- private:
-
- typedef internal::traits<MatrixType> Traits;
- typedef typename MatrixType::Scalar Scalar;
- typedef typename MatrixType::Index Index;
- static const int RowsAtCompileTime = Traits::RowsAtCompileTime;
- static const int ColsAtCompileTime = Traits::ColsAtCompileTime;
- static const int Options = MatrixType::Options;
- typedef typename NumTraits<Scalar>::Real RealScalar;
- typedef Matrix<Scalar, Traits::RowsAtCompileTime, 1> VectorType;
- typedef Matrix<Index, Traits::RowsAtCompileTime, 1> IntVectorType;
- typedef Matrix<Index, Dynamic, 1> DynamicIntVectorType;
- typedef std::list<Scalar> Cluster;
- typedef std::list<Cluster> ListOfClusters;
- typedef Matrix<Scalar, Dynamic, Dynamic, Options, RowsAtCompileTime, ColsAtCompileTime> DynMatrixType;
-
- public:
-
- MatrixFunction(const MatrixType& A, AtomicType& atomic);
- template <typename ResultType> void compute(ResultType& result);
+ MatrixType compute(const MatrixType& A);
private:
-
- void computeSchurDecomposition();
- void partitionEigenvalues();
- typename ListOfClusters::iterator findCluster(Scalar key);
- void computeClusterSize();
- void computeBlockStart();
- void constructPermutation();
- void permuteSchur();
- void swapEntriesInSchur(Index index);
- void computeBlockAtomic();
- Block<MatrixType> block(MatrixType& A, Index i, Index j);
- void computeOffDiagonal();
- DynMatrixType solveTriangularSylvester(const DynMatrixType& A, const DynMatrixType& B, const DynMatrixType& C);
-
- typename internal::nested<MatrixType>::type m_A; /**< \brief Reference to argument of matrix function. */
- AtomicType& m_atomic; /**< \brief Class for computing matrix function of atomic blocks. */
- MatrixType m_T; /**< \brief Triangular part of Schur decomposition */
- MatrixType m_U; /**< \brief Unitary part of Schur decomposition */
- MatrixType m_fT; /**< \brief %Matrix function applied to #m_T */
- ListOfClusters m_clusters; /**< \brief Partition of eigenvalues into clusters of ei'vals "close" to each other */
- DynamicIntVectorType m_eivalToCluster; /**< \brief m_eivalToCluster[i] = j means i-th ei'val is in j-th cluster */
- DynamicIntVectorType m_clusterSize; /**< \brief Number of eigenvalues in each clusters */
- DynamicIntVectorType m_blockStart; /**< \brief Row index at which block corresponding to i-th cluster starts */
- IntVectorType m_permutation; /**< \brief Permutation which groups ei'vals in the same cluster together */
-
- /** \brief Maximum distance allowed between eigenvalues to be considered "close".
- *
- * This is morally a \c static \c const \c Scalar, but only
- * integers can be static constant class members in C++. The
- * separation constant is set to 0.1, a value taken from the
- * paper by Davies and Higham. */
- static const RealScalar separation() { return static_cast<RealScalar>(0.1); }
-
- MatrixFunction& operator=(const MatrixFunction&);
+ StemFunction* m_f;
};
-/** \brief Constructor.
- *
- * \param[in] A argument of matrix function, should be a square matrix.
- * \param[in] atomic class for computing matrix function of atomic blocks.
- */
-template <typename MatrixType, typename AtomicType>
-MatrixFunction<MatrixType,AtomicType,1>::MatrixFunction(const MatrixType& A, AtomicType& atomic)
- : m_A(A), m_atomic(atomic)
+template <typename MatrixType>
+typename NumTraits<typename MatrixType::Scalar>::Real matrix_function_compute_mu(const MatrixType& A)
{
- /* empty body */
+ typedef typename plain_col_type<MatrixType>::type VectorType;
+ typename MatrixType::Index rows = A.rows();
+ const MatrixType N = MatrixType::Identity(rows, rows) - A;
+ VectorType e = VectorType::Ones(rows);
+ N.template triangularView<Upper>().solveInPlace(e);
+ return e.cwiseAbs().maxCoeff();
}
-/** \brief Compute the matrix function.
- *
- * \param[out] result the function \p f applied to \p A, as
- * specified in the constructor.
- */
-template <typename MatrixType, typename AtomicType>
-template <typename ResultType>
-void MatrixFunction<MatrixType,AtomicType,1>::compute(ResultType& result)
+template <typename MatrixType>
+MatrixType MatrixFunctionAtomic<MatrixType>::compute(const MatrixType& A)
{
- computeSchurDecomposition();
- partitionEigenvalues();
- computeClusterSize();
- computeBlockStart();
- constructPermutation();
- permuteSchur();
- computeBlockAtomic();
- computeOffDiagonal();
- result = m_U * (m_fT.template triangularView<Upper>() * m_U.adjoint());
+ // TODO: Use that A is upper triangular
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef typename MatrixType::Index Index;
+ Index rows = A.rows();
+ Scalar avgEival = A.trace() / Scalar(RealScalar(rows));
+ MatrixType Ashifted = A - avgEival * MatrixType::Identity(rows, rows);
+ RealScalar mu = matrix_function_compute_mu(Ashifted);
+ MatrixType F = m_f(avgEival, 0) * MatrixType::Identity(rows, rows);
+ MatrixType P = Ashifted;
+ MatrixType Fincr;
+ for (Index s = 1; s < 1.1 * rows + 10; s++) { // upper limit is fairly arbitrary
+ Fincr = m_f(avgEival, static_cast<int>(s)) * P;
+ F += Fincr;
+ P = Scalar(RealScalar(1.0/(s + 1))) * P * Ashifted;
+
+ // test whether Taylor series converged
+ const RealScalar F_norm = F.cwiseAbs().rowwise().sum().maxCoeff();
+ const RealScalar Fincr_norm = Fincr.cwiseAbs().rowwise().sum().maxCoeff();
+ if (Fincr_norm < NumTraits<Scalar>::epsilon() * F_norm) {
+ RealScalar delta = 0;
+ RealScalar rfactorial = 1;
+ for (Index r = 0; r < rows; r++) {
+ RealScalar mx = 0;
+ for (Index i = 0; i < rows; i++)
+ mx = (std::max)(mx, std::abs(m_f(Ashifted(i, i) + avgEival, static_cast<int>(s+r))));
+ if (r != 0)
+ rfactorial *= RealScalar(r);
+ delta = (std::max)(delta, mx / rfactorial);
+ }
+ const RealScalar P_norm = P.cwiseAbs().rowwise().sum().maxCoeff();
+ if (mu * delta * P_norm < NumTraits<Scalar>::epsilon() * F_norm) // series converged
+ break;
+ }
+ }
+ return F;
}
-/** \brief Store the Schur decomposition of #m_A in #m_T and #m_U */
-template <typename MatrixType, typename AtomicType>
-void MatrixFunction<MatrixType,AtomicType,1>::computeSchurDecomposition()
+/** \brief Find cluster in \p clusters containing some value
+ * \param[in] key Value to find
+ * \returns Iterator to cluster containing \p key, or \c clusters.end() if no cluster in \p m_clusters
+ * contains \p key.
+ */
+template <typename Index, typename ListOfClusters>
+typename ListOfClusters::iterator matrix_function_find_cluster(Index key, ListOfClusters& clusters)
{
- const ComplexSchur<MatrixType> schurOfA(m_A);
- m_T = schurOfA.matrixT();
- m_U = schurOfA.matrixU();
+ typename std::list<Index>::iterator j;
+ for (typename ListOfClusters::iterator i = clusters.begin(); i != clusters.end(); ++i) {
+ j = std::find(i->begin(), i->end(), key);
+ if (j != i->end())
+ return i;
+ }
+ return clusters.end();
}
/** \brief Partition eigenvalues in clusters of ei'vals close to each other
*
- * This function computes #m_clusters. This is a partition of the
- * eigenvalues of #m_T in clusters, such that
- * # Any eigenvalue in a certain cluster is at most separation() away
- * from another eigenvalue in the same cluster.
- * # The distance between two eigenvalues in different clusters is
- * more than separation().
- * The implementation follows Algorithm 4.1 in the paper of Davies
- * and Higham.
+ * \param[in] eivals Eigenvalues
+ * \param[out] clusters Resulting partition of eigenvalues
+ *
+ * The partition satisfies the following two properties:
+ * # Any eigenvalue in a certain cluster is at most matrix_function_separation() away from another eigenvalue
+ * in the same cluster.
+ * # The distance between two eigenvalues in different clusters is more than matrix_function_separation().
+ * The implementation follows Algorithm 4.1 in the paper of Davies and Higham.
*/
-template <typename MatrixType, typename AtomicType>
-void MatrixFunction<MatrixType,AtomicType,1>::partitionEigenvalues()
+template <typename EivalsType, typename Cluster>
+void matrix_function_partition_eigenvalues(const EivalsType& eivals, std::list<Cluster>& clusters)
{
- using std::abs;
- const Index rows = m_T.rows();
- VectorType diag = m_T.diagonal(); // contains eigenvalues of A
-
- for (Index i=0; i<rows; ++i) {
- // Find set containing diag(i), adding a new set if necessary
- typename ListOfClusters::iterator qi = findCluster(diag(i));
- if (qi == m_clusters.end()) {
+ typedef typename EivalsType::Index Index;
+ typedef typename EivalsType::RealScalar RealScalar;
+ for (Index i=0; i<eivals.rows(); ++i) {
+ // Find cluster containing i-th ei'val, adding a new cluster if necessary
+ typename std::list<Cluster>::iterator qi = matrix_function_find_cluster(i, clusters);
+ if (qi == clusters.end()) {
Cluster l;
- l.push_back(diag(i));
- m_clusters.push_back(l);
- qi = m_clusters.end();
+ l.push_back(i);
+ clusters.push_back(l);
+ qi = clusters.end();
--qi;
}
// Look for other element to add to the set
- for (Index j=i+1; j<rows; ++j) {
- if (abs(diag(j) - diag(i)) <= separation() && std::find(qi->begin(), qi->end(), diag(j)) == qi->end()) {
- typename ListOfClusters::iterator qj = findCluster(diag(j));
- if (qj == m_clusters.end()) {
- qi->push_back(diag(j));
+ for (Index j=i+1; j<eivals.rows(); ++j) {
+ if (abs(eivals(j) - eivals(i)) <= RealScalar(matrix_function_separation)
+ && std::find(qi->begin(), qi->end(), j) == qi->end()) {
+ typename std::list<Cluster>::iterator qj = matrix_function_find_cluster(j, clusters);
+ if (qj == clusters.end()) {
+ qi->push_back(j);
} else {
qi->insert(qi->end(), qj->begin(), qj->end());
- m_clusters.erase(qj);
+ clusters.erase(qj);
}
}
}
}
}
-/** \brief Find cluster in #m_clusters containing some value
- * \param[in] key Value to find
- * \returns Iterator to cluster containing \c key, or
- * \c m_clusters.end() if no cluster in m_clusters contains \c key.
- */
-template <typename MatrixType, typename AtomicType>
-typename MatrixFunction<MatrixType,AtomicType,1>::ListOfClusters::iterator MatrixFunction<MatrixType,AtomicType,1>::findCluster(Scalar key)
+/** \brief Compute size of each cluster given a partitioning */
+template <typename ListOfClusters, typename Index>
+void matrix_function_compute_cluster_size(const ListOfClusters& clusters, Matrix<Index, Dynamic, 1>& clusterSize)
{
- typename Cluster::iterator j;
- for (typename ListOfClusters::iterator i = m_clusters.begin(); i != m_clusters.end(); ++i) {
- j = std::find(i->begin(), i->end(), key);
- if (j != i->end())
- return i;
+ const Index numClusters = static_cast<Index>(clusters.size());
+ clusterSize.setZero(numClusters);
+ Index clusterIndex = 0;
+ for (typename ListOfClusters::const_iterator cluster = clusters.begin(); cluster != clusters.end(); ++cluster) {
+ clusterSize[clusterIndex] = cluster->size();
+ ++clusterIndex;
}
- return m_clusters.end();
}
-/** \brief Compute #m_clusterSize and #m_eivalToCluster using #m_clusters */
-template <typename MatrixType, typename AtomicType>
-void MatrixFunction<MatrixType,AtomicType,1>::computeClusterSize()
+/** \brief Compute start of each block using clusterSize */
+template <typename VectorType>
+void matrix_function_compute_block_start(const VectorType& clusterSize, VectorType& blockStart)
{
- const Index rows = m_T.rows();
- VectorType diag = m_T.diagonal();
- const Index numClusters = static_cast<Index>(m_clusters.size());
+ blockStart.resize(clusterSize.rows());
+ blockStart(0) = 0;
+ for (typename VectorType::Index i = 1; i < clusterSize.rows(); i++) {
+ blockStart(i) = blockStart(i-1) + clusterSize(i-1);
+ }
+}
- m_clusterSize.setZero(numClusters);
- m_eivalToCluster.resize(rows);
+/** \brief Compute mapping of eigenvalue indices to cluster indices */
+template <typename EivalsType, typename ListOfClusters, typename VectorType>
+void matrix_function_compute_map(const EivalsType& eivals, const ListOfClusters& clusters, VectorType& eivalToCluster)
+{
+ typedef typename EivalsType::Index Index;
+ eivalToCluster.resize(eivals.rows());
Index clusterIndex = 0;
- for (typename ListOfClusters::const_iterator cluster = m_clusters.begin(); cluster != m_clusters.end(); ++cluster) {
- for (Index i = 0; i < diag.rows(); ++i) {
- if (std::find(cluster->begin(), cluster->end(), diag(i)) != cluster->end()) {
- ++m_clusterSize[clusterIndex];
- m_eivalToCluster[i] = clusterIndex;
+ for (typename ListOfClusters::const_iterator cluster = clusters.begin(); cluster != clusters.end(); ++cluster) {
+ for (Index i = 0; i < eivals.rows(); ++i) {
+ if (std::find(cluster->begin(), cluster->end(), i) != cluster->end()) {
+ eivalToCluster[i] = clusterIndex;
}
}
++clusterIndex;
}
}
-/** \brief Compute #m_blockStart using #m_clusterSize */
-template <typename MatrixType, typename AtomicType>
-void MatrixFunction<MatrixType,AtomicType,1>::computeBlockStart()
-{
- m_blockStart.resize(m_clusterSize.rows());
- m_blockStart(0) = 0;
- for (Index i = 1; i < m_clusterSize.rows(); i++) {
- m_blockStart(i) = m_blockStart(i-1) + m_clusterSize(i-1);
- }
-}
-
-/** \brief Compute #m_permutation using #m_eivalToCluster and #m_blockStart */
-template <typename MatrixType, typename AtomicType>
-void MatrixFunction<MatrixType,AtomicType,1>::constructPermutation()
+/** \brief Compute permutation which groups ei'vals in same cluster together */
+template <typename DynVectorType, typename VectorType>
+void matrix_function_compute_permutation(const DynVectorType& blockStart, const DynVectorType& eivalToCluster, VectorType& permutation)
{
- DynamicIntVectorType indexNextEntry = m_blockStart;
- m_permutation.resize(m_T.rows());
- for (Index i = 0; i < m_T.rows(); i++) {
- Index cluster = m_eivalToCluster[i];
- m_permutation[i] = indexNextEntry[cluster];
+ typedef typename VectorType::Index Index;
+ DynVectorType indexNextEntry = blockStart;
+ permutation.resize(eivalToCluster.rows());
+ for (Index i = 0; i < eivalToCluster.rows(); i++) {
+ Index cluster = eivalToCluster[i];
+ permutation[i] = indexNextEntry[cluster];
++indexNextEntry[cluster];
}
}
-/** \brief Permute Schur decomposition in #m_U and #m_T according to #m_permutation */
-template <typename MatrixType, typename AtomicType>
-void MatrixFunction<MatrixType,AtomicType,1>::permuteSchur()
+/** \brief Permute Schur decomposition in U and T according to permutation */
+template <typename VectorType, typename MatrixType>
+void matrix_function_permute_schur(VectorType& permutation, MatrixType& U, MatrixType& T)
{
- IntVectorType p = m_permutation;
- for (Index i = 0; i < p.rows() - 1; i++) {
+ typedef typename VectorType::Index Index;
+ for (Index i = 0; i < permutation.rows() - 1; i++) {
Index j;
- for (j = i; j < p.rows(); j++) {
- if (p(j) == i) break;
+ for (j = i; j < permutation.rows(); j++) {
+ if (permutation(j) == i) break;
}
- eigen_assert(p(j) == i);
+ eigen_assert(permutation(j) == i);
for (Index k = j-1; k >= i; k--) {
- swapEntriesInSchur(k);
- std::swap(p.coeffRef(k), p.coeffRef(k+1));
+ JacobiRotation<typename MatrixType::Scalar> rotation;
+ rotation.makeGivens(T(k, k+1), T(k+1, k+1) - T(k, k));
+ T.applyOnTheLeft(k, k+1, rotation.adjoint());
+ T.applyOnTheRight(k, k+1, rotation);
+ U.applyOnTheRight(k, k+1, rotation);
+ std::swap(permutation.coeffRef(k), permutation.coeffRef(k+1));
}
}
}
-/** \brief Swap rows \a index and \a index+1 in Schur decomposition in #m_U and #m_T */
-template <typename MatrixType, typename AtomicType>
-void MatrixFunction<MatrixType,AtomicType,1>::swapEntriesInSchur(Index index)
-{
- JacobiRotation<Scalar> rotation;
- rotation.makeGivens(m_T(index, index+1), m_T(index+1, index+1) - m_T(index, index));
- m_T.applyOnTheLeft(index, index+1, rotation.adjoint());
- m_T.applyOnTheRight(index, index+1, rotation);
- m_U.applyOnTheRight(index, index+1, rotation);
-}
-
-/** \brief Compute block diagonal part of #m_fT.
- *
- * This routine computes the matrix function applied to the block diagonal part of #m_T, with the blocking
- * given by #m_blockStart. The matrix function of each diagonal block is computed by #m_atomic. The
- * off-diagonal parts of #m_fT are set to zero.
- */
-template <typename MatrixType, typename AtomicType>
-void MatrixFunction<MatrixType,AtomicType,1>::computeBlockAtomic()
-{
- m_fT.resize(m_T.rows(), m_T.cols());
- m_fT.setZero();
- for (Index i = 0; i < m_clusterSize.rows(); ++i) {
- block(m_fT, i, i) = m_atomic.compute(block(m_T, i, i));
- }
-}
-
-/** \brief Return block of matrix according to blocking given by #m_blockStart */
-template <typename MatrixType, typename AtomicType>
-Block<MatrixType> MatrixFunction<MatrixType,AtomicType,1>::block(MatrixType& A, Index i, Index j)
-{
- return A.block(m_blockStart(i), m_blockStart(j), m_clusterSize(i), m_clusterSize(j));
-}
-
-/** \brief Compute part of #m_fT above block diagonal.
+/** \brief Compute block diagonal part of matrix function.
*
- * This routine assumes that the block diagonal part of #m_fT (which
- * equals the matrix function applied to #m_T) has already been computed and computes
- * the part above the block diagonal. The part below the diagonal is
- * zero, because #m_T is upper triangular.
+ * This routine computes the matrix function applied to the block diagonal part of \p T (which should be
+ * upper triangular), with the blocking given by \p blockStart and \p clusterSize. The matrix function of
+ * each diagonal block is computed by \p atomic. The off-diagonal parts of \p fT are set to zero.
*/
-template <typename MatrixType, typename AtomicType>
-void MatrixFunction<MatrixType,AtomicType,1>::computeOffDiagonal()
+template <typename MatrixType, typename AtomicType, typename VectorType>
+void matrix_function_compute_block_atomic(const MatrixType& T, AtomicType& atomic, const VectorType& blockStart, const VectorType& clusterSize, MatrixType& fT)
{
- for (Index diagIndex = 1; diagIndex < m_clusterSize.rows(); diagIndex++) {
- for (Index blockIndex = 0; blockIndex < m_clusterSize.rows() - diagIndex; blockIndex++) {
- // compute (blockIndex, blockIndex+diagIndex) block
- DynMatrixType A = block(m_T, blockIndex, blockIndex);
- DynMatrixType B = -block(m_T, blockIndex+diagIndex, blockIndex+diagIndex);
- DynMatrixType C = block(m_fT, blockIndex, blockIndex) * block(m_T, blockIndex, blockIndex+diagIndex);
- C -= block(m_T, blockIndex, blockIndex+diagIndex) * block(m_fT, blockIndex+diagIndex, blockIndex+diagIndex);
- for (Index k = blockIndex + 1; k < blockIndex + diagIndex; k++) {
- C += block(m_fT, blockIndex, k) * block(m_T, k, blockIndex+diagIndex);
- C -= block(m_T, blockIndex, k) * block(m_fT, k, blockIndex+diagIndex);
- }
- block(m_fT, blockIndex, blockIndex+diagIndex) = solveTriangularSylvester(A, B, C);
- }
+ fT.setZero(T.rows(), T.cols());
+ for (typename VectorType::Index i = 0; i < clusterSize.rows(); ++i) {
+ fT.block(blockStart(i), blockStart(i), clusterSize(i), clusterSize(i))
+ = atomic.compute(T.block(blockStart(i), blockStart(i), clusterSize(i), clusterSize(i)));
}
}
@@ -414,8 +261,8 @@ void MatrixFunction<MatrixType,AtomicType,1>::computeOffDiagonal()
*
* \returns the solution X.
*
- * If A is m-by-m and B is n-by-n, then both C and X are m-by-n.
- * The (i,j)-th component of the Sylvester equation is
+ * If A is m-by-m and B is n-by-n, then both C and X are m-by-n. The (i,j)-th component of the Sylvester
+ * equation is
* \f[
* \sum_{k=i}^m A_{ik} X_{kj} + \sum_{k=1}^j X_{ik} B_{kj} = C_{ij}.
* \f]
@@ -424,16 +271,12 @@ void MatrixFunction<MatrixType,AtomicType,1>::computeOffDiagonal()
* X_{ij} = \frac{1}{A_{ii} + B_{jj}} \Bigl( C_{ij}
* - \sum_{k=i+1}^m A_{ik} X_{kj} - \sum_{k=1}^{j-1} X_{ik} B_{kj} \Bigr).
* \f]
- * It is assumed that A and B are such that the numerator is never
- * zero (otherwise the Sylvester equation does not have a unique
- * solution). In that case, these equations can be evaluated in the
- * order \f$ i=m,\ldots,1 \f$ and \f$ j=1,\ldots,n \f$.
+ * It is assumed that A and B are such that the numerator is never zero (otherwise the Sylvester equation
+ * does not have a unique solution). In that case, these equations can be evaluated in the order
+ * \f$ i=m,\ldots,1 \f$ and \f$ j=1,\ldots,n \f$.
*/
-template <typename MatrixType, typename AtomicType>
-typename MatrixFunction<MatrixType,AtomicType,1>::DynMatrixType MatrixFunction<MatrixType,AtomicType,1>::solveTriangularSylvester(
- const DynMatrixType& A,
- const DynMatrixType& B,
- const DynMatrixType& C)
+template <typename MatrixType>
+MatrixType matrix_function_solve_triangular_sylvester(const MatrixType& A, const MatrixType& B, const MatrixType& C)
{
eigen_assert(A.rows() == A.cols());
eigen_assert(A.isUpperTriangular());
@@ -442,9 +285,12 @@ typename MatrixFunction<MatrixType,AtomicType,1>::DynMatrixType MatrixFunction<M
eigen_assert(C.rows() == A.rows());
eigen_assert(C.cols() == B.rows());
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+
Index m = A.rows();
Index n = B.rows();
- DynMatrixType X(m, n);
+ MatrixType X(m, n);
for (Index i = m - 1; i >= 0; --i) {
for (Index j = 0; j < n; ++j) {
@@ -473,66 +319,210 @@ typename MatrixFunction<MatrixType,AtomicType,1>::DynMatrixType MatrixFunction<M
return X;
}
+/** \brief Compute part of matrix function above block diagonal.
+ *
+ * This routine completes the computation of \p fT, denoting a matrix function applied to the triangular
+ * matrix \p T. It assumes that the block diagonal part of \p fT has already been computed. The part below
+ * the diagonal is zero, because \p T is upper triangular.
+ */
+template <typename MatrixType, typename VectorType>
+void matrix_function_compute_above_diagonal(const MatrixType& T, const VectorType& blockStart, const VectorType& clusterSize, MatrixType& fT)
+{
+ typedef internal::traits<MatrixType> Traits;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ static const int RowsAtCompileTime = Traits::RowsAtCompileTime;
+ static const int ColsAtCompileTime = Traits::ColsAtCompileTime;
+ static const int Options = MatrixType::Options;
+ typedef Matrix<Scalar, Dynamic, Dynamic, Options, RowsAtCompileTime, ColsAtCompileTime> DynMatrixType;
+
+ for (Index k = 1; k < clusterSize.rows(); k++) {
+ for (Index i = 0; i < clusterSize.rows() - k; i++) {
+ // compute (i, i+k) block
+ DynMatrixType A = T.block(blockStart(i), blockStart(i), clusterSize(i), clusterSize(i));
+ DynMatrixType B = -T.block(blockStart(i+k), blockStart(i+k), clusterSize(i+k), clusterSize(i+k));
+ DynMatrixType C = fT.block(blockStart(i), blockStart(i), clusterSize(i), clusterSize(i))
+ * T.block(blockStart(i), blockStart(i+k), clusterSize(i), clusterSize(i+k));
+ C -= T.block(blockStart(i), blockStart(i+k), clusterSize(i), clusterSize(i+k))
+ * fT.block(blockStart(i+k), blockStart(i+k), clusterSize(i+k), clusterSize(i+k));
+ for (Index m = i + 1; m < i + k; m++) {
+ C += fT.block(blockStart(i), blockStart(m), clusterSize(i), clusterSize(m))
+ * T.block(blockStart(m), blockStart(i+k), clusterSize(m), clusterSize(i+k));
+ C -= T.block(blockStart(i), blockStart(m), clusterSize(i), clusterSize(m))
+ * fT.block(blockStart(m), blockStart(i+k), clusterSize(m), clusterSize(i+k));
+ }
+ fT.block(blockStart(i), blockStart(i+k), clusterSize(i), clusterSize(i+k))
+ = matrix_function_solve_triangular_sylvester(A, B, C);
+ }
+ }
+}
+
+/** \ingroup MatrixFunctions_Module
+ * \brief Class for computing matrix functions.
+ * \tparam MatrixType type of the argument of the matrix function,
+ * expected to be an instantiation of the Matrix class template.
+ * \tparam AtomicType type for computing matrix function of atomic blocks.
+ * \tparam IsComplex used internally to select correct specialization.
+ *
+ * This class implements the Schur-Parlett algorithm for computing matrix functions. The spectrum of the
+ * matrix is divided in clustered of eigenvalues that lies close together. This class delegates the
+ * computation of the matrix function on every block corresponding to these clusters to an object of type
+ * \p AtomicType and uses these results to compute the matrix function of the whole matrix. The class
+ * \p AtomicType should have a \p compute() member function for computing the matrix function of a block.
+ *
+ * \sa class MatrixFunctionAtomic, class MatrixLogarithmAtomic
+ */
+template <typename MatrixType, int IsComplex = NumTraits<typename internal::traits<MatrixType>::Scalar>::IsComplex>
+struct matrix_function_compute
+{
+ /** \brief Compute the matrix function.
+ *
+ * \param[in] A argument of matrix function, should be a square matrix.
+ * \param[in] atomic class for computing matrix function of atomic blocks.
+ * \param[out] result the function \p f applied to \p A, as
+ * specified in the constructor.
+ *
+ * See MatrixBase::matrixFunction() for details on how this computation
+ * is implemented.
+ */
+ template <typename AtomicType, typename ResultType>
+ static void run(const MatrixType& A, AtomicType& atomic, ResultType &result);
+};
+
+/** \internal \ingroup MatrixFunctions_Module
+ * \brief Partial specialization of MatrixFunction for real matrices
+ *
+ * This converts the real matrix to a complex matrix, compute the matrix function of that matrix, and then
+ * converts the result back to a real matrix.
+ */
+template <typename MatrixType>
+struct matrix_function_compute<MatrixType, 0>
+{
+ template <typename AtomicType, typename ResultType>
+ static void run(const MatrixType& A, AtomicType& atomic, ResultType &result)
+ {
+ typedef internal::traits<MatrixType> Traits;
+ typedef typename Traits::Scalar Scalar;
+ static const int Rows = Traits::RowsAtCompileTime, Cols = Traits::ColsAtCompileTime;
+ static const int MaxRows = Traits::MaxRowsAtCompileTime, MaxCols = Traits::MaxColsAtCompileTime;
+
+ typedef std::complex<Scalar> ComplexScalar;
+ typedef Matrix<ComplexScalar, Rows, Cols, 0, MaxRows, MaxCols> ComplexMatrix;
+
+ ComplexMatrix CA = A.template cast<ComplexScalar>();
+ ComplexMatrix Cresult;
+ matrix_function_compute<ComplexMatrix>::run(CA, atomic, Cresult);
+ result = Cresult.real();
+ }
+};
+
+/** \internal \ingroup MatrixFunctions_Module
+ * \brief Partial specialization of MatrixFunction for complex matrices
+ */
+template <typename MatrixType>
+struct matrix_function_compute<MatrixType, 1>
+{
+ template <typename AtomicType, typename ResultType>
+ static void run(const MatrixType& A, AtomicType& atomic, ResultType &result)
+ {
+ typedef internal::traits<MatrixType> Traits;
+ typedef typename MatrixType::Index Index;
+
+ // compute Schur decomposition of A
+ const ComplexSchur<MatrixType> schurOfA(A);
+ MatrixType T = schurOfA.matrixT();
+ MatrixType U = schurOfA.matrixU();
+
+ // partition eigenvalues into clusters of ei'vals "close" to each other
+ std::list<std::list<Index> > clusters;
+ matrix_function_partition_eigenvalues(T.diagonal(), clusters);
+
+ // compute size of each cluster
+ Matrix<Index, Dynamic, 1> clusterSize;
+ matrix_function_compute_cluster_size(clusters, clusterSize);
+
+ // blockStart[i] is row index at which block corresponding to i-th cluster starts
+ Matrix<Index, Dynamic, 1> blockStart;
+ matrix_function_compute_block_start(clusterSize, blockStart);
+
+ // compute map so that eivalToCluster[i] = j means that i-th ei'val is in j-th cluster
+ Matrix<Index, Dynamic, 1> eivalToCluster;
+ matrix_function_compute_map(T.diagonal(), clusters, eivalToCluster);
+
+ // compute permutation which groups ei'vals in same cluster together
+ Matrix<Index, Traits::RowsAtCompileTime, 1> permutation;
+ matrix_function_compute_permutation(blockStart, eivalToCluster, permutation);
+
+ // permute Schur decomposition
+ matrix_function_permute_schur(permutation, U, T);
+
+ // compute result
+ MatrixType fT; // matrix function applied to T
+ matrix_function_compute_block_atomic(T, atomic, blockStart, clusterSize, fT);
+ matrix_function_compute_above_diagonal(T, blockStart, clusterSize, fT);
+ result = U * (fT.template triangularView<Upper>() * U.adjoint());
+ }
+};
+
+} // end of namespace internal
+
/** \ingroup MatrixFunctions_Module
*
* \brief Proxy for the matrix function of some matrix (expression).
*
* \tparam Derived Type of the argument to the matrix function.
*
- * This class holds the argument to the matrix function until it is
- * assigned or evaluated for some other reason (so the argument
- * should not be changed in the meantime). It is the return type of
- * matrixBase::matrixFunction() and related functions and most of the
- * time this is the only way it is used.
+ * This class holds the argument to the matrix function until it is assigned or evaluated for some other
+ * reason (so the argument should not be changed in the meantime). It is the return type of
+ * matrixBase::matrixFunction() and related functions and most of the time this is the only way it is used.
*/
template<typename Derived> class MatrixFunctionReturnValue
: public ReturnByValue<MatrixFunctionReturnValue<Derived> >
{
public:
-
typedef typename Derived::Scalar Scalar;
typedef typename Derived::Index Index;
typedef typename internal::stem_function<Scalar>::type StemFunction;
- /** \brief Constructor.
+ protected:
+ typedef typename internal::ref_selector<Derived>::type DerivedNested;
+
+ public:
+
+ /** \brief Constructor.
*
- * \param[in] A %Matrix (expression) forming the argument of the
- * matrix function.
+ * \param[in] A %Matrix (expression) forming the argument of the matrix function.
* \param[in] f Stem function for matrix function under consideration.
*/
MatrixFunctionReturnValue(const Derived& A, StemFunction f) : m_A(A), m_f(f) { }
/** \brief Compute the matrix function.
*
- * \param[out] result \p f applied to \p A, where \p f and \p A
- * are as in the constructor.
+ * \param[out] result \p f applied to \p A, where \p f and \p A are as in the constructor.
*/
template <typename ResultType>
inline void evalTo(ResultType& result) const
{
- typedef typename Derived::PlainObject PlainObject;
- typedef internal::traits<PlainObject> Traits;
+ typedef typename internal::nested_eval<Derived, 10>::type NestedEvalType;
+ typedef typename internal::remove_all<NestedEvalType>::type NestedEvalTypeClean;
+ typedef internal::traits<NestedEvalTypeClean> Traits;
static const int RowsAtCompileTime = Traits::RowsAtCompileTime;
static const int ColsAtCompileTime = Traits::ColsAtCompileTime;
- static const int Options = PlainObject::Options;
typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
- typedef Matrix<ComplexScalar, Dynamic, Dynamic, Options, RowsAtCompileTime, ColsAtCompileTime> DynMatrixType;
- typedef MatrixFunctionAtomic<DynMatrixType> AtomicType;
+ typedef Matrix<ComplexScalar, Dynamic, Dynamic, 0, RowsAtCompileTime, ColsAtCompileTime> DynMatrixType;
+
+ typedef internal::MatrixFunctionAtomic<DynMatrixType> AtomicType;
AtomicType atomic(m_f);
- const PlainObject Aevaluated = m_A.eval();
- MatrixFunction<PlainObject, AtomicType> mf(Aevaluated, atomic);
- mf.compute(result);
+ internal::matrix_function_compute<NestedEvalTypeClean>::run(m_A, atomic, result);
}
Index rows() const { return m_A.rows(); }
Index cols() const { return m_A.cols(); }
private:
- typename internal::nested<Derived>::type m_A;
+ const DerivedNested m_A;
StemFunction *m_f;
-
- MatrixFunctionReturnValue& operator=(const MatrixFunctionReturnValue&);
};
namespace internal {
@@ -559,7 +549,7 @@ const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::sin() const
{
eigen_assert(rows() == cols());
typedef typename internal::stem_function<Scalar>::ComplexScalar ComplexScalar;
- return MatrixFunctionReturnValue<Derived>(derived(), StdStemFunctions<ComplexScalar>::sin);
+ return MatrixFunctionReturnValue<Derived>(derived(), internal::stem_function_sin<ComplexScalar>);
}
template <typename Derived>
@@ -567,7 +557,7 @@ const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::cos() const
{
eigen_assert(rows() == cols());
typedef typename internal::stem_function<Scalar>::ComplexScalar ComplexScalar;
- return MatrixFunctionReturnValue<Derived>(derived(), StdStemFunctions<ComplexScalar>::cos);
+ return MatrixFunctionReturnValue<Derived>(derived(), internal::stem_function_cos<ComplexScalar>);
}
template <typename Derived>
@@ -575,7 +565,7 @@ const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::sinh() const
{
eigen_assert(rows() == cols());
typedef typename internal::stem_function<Scalar>::ComplexScalar ComplexScalar;
- return MatrixFunctionReturnValue<Derived>(derived(), StdStemFunctions<ComplexScalar>::sinh);
+ return MatrixFunctionReturnValue<Derived>(derived(), internal::stem_function_sinh<ComplexScalar>);
}
template <typename Derived>
@@ -583,7 +573,7 @@ const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::cosh() const
{
eigen_assert(rows() == cols());
typedef typename internal::stem_function<Scalar>::ComplexScalar ComplexScalar;
- return MatrixFunctionReturnValue<Derived>(derived(), StdStemFunctions<ComplexScalar>::cosh);
+ return MatrixFunctionReturnValue<Derived>(derived(), internal::stem_function_cosh<ComplexScalar>);
}
} // end namespace Eigen
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h b/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h
deleted file mode 100644
index efe332c48..000000000
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h
+++ /dev/null
@@ -1,131 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2009 Jitse Niesen <jitse@maths.leeds.ac.uk>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_MATRIX_FUNCTION_ATOMIC
-#define EIGEN_MATRIX_FUNCTION_ATOMIC
-
-namespace Eigen {
-
-/** \ingroup MatrixFunctions_Module
- * \class MatrixFunctionAtomic
- * \brief Helper class for computing matrix functions of atomic matrices.
- *
- * \internal
- * Here, an atomic matrix is a triangular matrix whose diagonal
- * entries are close to each other.
- */
-template <typename MatrixType>
-class MatrixFunctionAtomic
-{
- public:
-
- typedef typename MatrixType::Scalar Scalar;
- typedef typename MatrixType::Index Index;
- typedef typename NumTraits<Scalar>::Real RealScalar;
- typedef typename internal::stem_function<Scalar>::type StemFunction;
- typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
-
- /** \brief Constructor
- * \param[in] f matrix function to compute.
- */
- MatrixFunctionAtomic(StemFunction f) : m_f(f) { }
-
- /** \brief Compute matrix function of atomic matrix
- * \param[in] A argument of matrix function, should be upper triangular and atomic
- * \returns f(A), the matrix function evaluated at the given matrix
- */
- MatrixType compute(const MatrixType& A);
-
- private:
-
- // Prevent copying
- MatrixFunctionAtomic(const MatrixFunctionAtomic&);
- MatrixFunctionAtomic& operator=(const MatrixFunctionAtomic&);
-
- void computeMu();
- bool taylorConverged(Index s, const MatrixType& F, const MatrixType& Fincr, const MatrixType& P);
-
- /** \brief Pointer to scalar function */
- StemFunction* m_f;
-
- /** \brief Size of matrix function */
- Index m_Arows;
-
- /** \brief Mean of eigenvalues */
- Scalar m_avgEival;
-
- /** \brief Argument shifted by mean of eigenvalues */
- MatrixType m_Ashifted;
-
- /** \brief Constant used to determine whether Taylor series has converged */
- RealScalar m_mu;
-};
-
-template <typename MatrixType>
-MatrixType MatrixFunctionAtomic<MatrixType>::compute(const MatrixType& A)
-{
- // TODO: Use that A is upper triangular
- m_Arows = A.rows();
- m_avgEival = A.trace() / Scalar(RealScalar(m_Arows));
- m_Ashifted = A - m_avgEival * MatrixType::Identity(m_Arows, m_Arows);
- computeMu();
- MatrixType F = m_f(m_avgEival, 0) * MatrixType::Identity(m_Arows, m_Arows);
- MatrixType P = m_Ashifted;
- MatrixType Fincr;
- for (Index s = 1; s < 1.1 * m_Arows + 10; s++) { // upper limit is fairly arbitrary
- Fincr = m_f(m_avgEival, static_cast<int>(s)) * P;
- F += Fincr;
- P = Scalar(RealScalar(1.0/(s + 1))) * P * m_Ashifted;
- if (taylorConverged(s, F, Fincr, P)) {
- return F;
- }
- }
- eigen_assert("Taylor series does not converge" && 0);
- return F;
-}
-
-/** \brief Compute \c m_mu. */
-template <typename MatrixType>
-void MatrixFunctionAtomic<MatrixType>::computeMu()
-{
- const MatrixType N = MatrixType::Identity(m_Arows, m_Arows) - m_Ashifted;
- VectorType e = VectorType::Ones(m_Arows);
- N.template triangularView<Upper>().solveInPlace(e);
- m_mu = e.cwiseAbs().maxCoeff();
-}
-
-/** \brief Determine whether Taylor series has converged */
-template <typename MatrixType>
-bool MatrixFunctionAtomic<MatrixType>::taylorConverged(Index s, const MatrixType& F,
- const MatrixType& Fincr, const MatrixType& P)
-{
- const Index n = F.rows();
- const RealScalar F_norm = F.cwiseAbs().rowwise().sum().maxCoeff();
- const RealScalar Fincr_norm = Fincr.cwiseAbs().rowwise().sum().maxCoeff();
- if (Fincr_norm < NumTraits<Scalar>::epsilon() * F_norm) {
- RealScalar delta = 0;
- RealScalar rfactorial = 1;
- for (Index r = 0; r < n; r++) {
- RealScalar mx = 0;
- for (Index i = 0; i < n; i++)
- mx = (std::max)(mx, std::abs(m_f(m_Ashifted(i, i) + m_avgEival, static_cast<int>(s+r))));
- if (r != 0)
- rfactorial *= RealScalar(r);
- delta = (std::max)(delta, mx / rfactorial);
- }
- const RealScalar P_norm = P.cwiseAbs().rowwise().sum().maxCoeff();
- if (m_mu * delta * P_norm < NumTraits<Scalar>::epsilon() * F_norm)
- return true;
- }
- return false;
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_MATRIX_FUNCTION_ATOMIC
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h b/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h
index c744fc05f..1acfbed9e 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2011 Jitse Niesen <jitse@maths.leeds.ac.uk>
+// Copyright (C) 2011, 2013 Jitse Niesen <jitse@maths.leeds.ac.uk>
// Copyright (C) 2011 Chen-Pang He <jdh8@ms63.hinet.net>
//
// This Source Code Form is subject to the terms of the Mozilla
@@ -11,91 +11,33 @@
#ifndef EIGEN_MATRIX_LOGARITHM
#define EIGEN_MATRIX_LOGARITHM
-#ifndef M_PI
-#define M_PI 3.141592653589793238462643383279503L
-#endif
-
namespace Eigen {
-/** \ingroup MatrixFunctions_Module
- * \class MatrixLogarithmAtomic
- * \brief Helper class for computing matrix logarithm of atomic matrices.
- *
- * \internal
- * Here, an atomic matrix is a triangular matrix whose diagonal
- * entries are close to each other.
- *
- * \sa class MatrixFunctionAtomic, MatrixBase::log()
- */
-template <typename MatrixType>
-class MatrixLogarithmAtomic
-{
-public:
-
- typedef typename MatrixType::Scalar Scalar;
- // typedef typename MatrixType::Index Index;
- typedef typename NumTraits<Scalar>::Real RealScalar;
- // typedef typename internal::stem_function<Scalar>::type StemFunction;
- // typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
-
- /** \brief Constructor. */
- MatrixLogarithmAtomic() { }
-
- /** \brief Compute matrix logarithm of atomic matrix
- * \param[in] A argument of matrix logarithm, should be upper triangular and atomic
- * \returns The logarithm of \p A.
- */
- MatrixType compute(const MatrixType& A);
-
-private:
+namespace internal {
- void compute2x2(const MatrixType& A, MatrixType& result);
- void computeBig(const MatrixType& A, MatrixType& result);
- int getPadeDegree(float normTminusI);
- int getPadeDegree(double normTminusI);
- int getPadeDegree(long double normTminusI);
- void computePade(MatrixType& result, const MatrixType& T, int degree);
- void computePade3(MatrixType& result, const MatrixType& T);
- void computePade4(MatrixType& result, const MatrixType& T);
- void computePade5(MatrixType& result, const MatrixType& T);
- void computePade6(MatrixType& result, const MatrixType& T);
- void computePade7(MatrixType& result, const MatrixType& T);
- void computePade8(MatrixType& result, const MatrixType& T);
- void computePade9(MatrixType& result, const MatrixType& T);
- void computePade10(MatrixType& result, const MatrixType& T);
- void computePade11(MatrixType& result, const MatrixType& T);
-
- static const int minPadeDegree = 3;
- static const int maxPadeDegree = std::numeric_limits<RealScalar>::digits<= 24? 5: // single precision
- std::numeric_limits<RealScalar>::digits<= 53? 7: // double precision
- std::numeric_limits<RealScalar>::digits<= 64? 8: // extended precision
- std::numeric_limits<RealScalar>::digits<=106? 10: // double-double
- 11; // quadruple precision
-
- // Prevent copying
- MatrixLogarithmAtomic(const MatrixLogarithmAtomic&);
- MatrixLogarithmAtomic& operator=(const MatrixLogarithmAtomic&);
+template <typename Scalar>
+struct matrix_log_min_pade_degree
+{
+ static const int value = 3;
};
-/** \brief Compute logarithm of triangular matrix with clustered eigenvalues. */
-template <typename MatrixType>
-MatrixType MatrixLogarithmAtomic<MatrixType>::compute(const MatrixType& A)
+template <typename Scalar>
+struct matrix_log_max_pade_degree
{
- using std::log;
- MatrixType result(A.rows(), A.rows());
- if (A.rows() == 1)
- result(0,0) = log(A(0,0));
- else if (A.rows() == 2)
- compute2x2(A, result);
- else
- computeBig(A, result);
- return result;
-}
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ static const int value = std::numeric_limits<RealScalar>::digits<= 24? 5: // single precision
+ std::numeric_limits<RealScalar>::digits<= 53? 7: // double precision
+ std::numeric_limits<RealScalar>::digits<= 64? 8: // extended precision
+ std::numeric_limits<RealScalar>::digits<=106? 10: // double-double
+ 11; // quadruple precision
+};
/** \brief Compute logarithm of 2x2 triangular matrix. */
template <typename MatrixType>
-void MatrixLogarithmAtomic<MatrixType>::compute2x2(const MatrixType& A, MatrixType& result)
+void matrix_log_compute_2x2(const MatrixType& A, MatrixType& result)
{
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
using std::abs;
using std::ceil;
using std::imag;
@@ -108,59 +50,31 @@ void MatrixLogarithmAtomic<MatrixType>::compute2x2(const MatrixType& A, MatrixTy
result(1,0) = Scalar(0);
result(1,1) = logA11;
- if (A(0,0) == A(1,1)) {
+ Scalar y = A(1,1) - A(0,0);
+ if (y==Scalar(0))
+ {
result(0,1) = A(0,1) / A(0,0);
- } else if ((abs(A(0,0)) < 0.5*abs(A(1,1))) || (abs(A(0,0)) > 2*abs(A(1,1)))) {
- result(0,1) = A(0,1) * (logA11 - logA00) / (A(1,1) - A(0,0));
- } else {
- // computation in previous branch is inaccurate if A(1,1) \approx A(0,0)
- int unwindingNumber = static_cast<int>(ceil((imag(logA11 - logA00) - M_PI) / (2*M_PI)));
- Scalar y = A(1,1) - A(0,0), x = A(1,1) + A(0,0);
- result(0,1) = A(0,1) * (Scalar(2) * numext::atanh2(y,x) + Scalar(0,2*M_PI*unwindingNumber)) / y;
}
-}
-
-/** \brief Compute logarithm of triangular matrices with size > 2.
- * \details This uses a inverse scale-and-square algorithm. */
-template <typename MatrixType>
-void MatrixLogarithmAtomic<MatrixType>::computeBig(const MatrixType& A, MatrixType& result)
-{
- using std::pow;
- int numberOfSquareRoots = 0;
- int numberOfExtraSquareRoots = 0;
- int degree;
- MatrixType T = A, sqrtT;
- const RealScalar maxNormForPade = maxPadeDegree<= 5? 5.3149729967117310e-1: // single precision
- maxPadeDegree<= 7? 2.6429608311114350e-1: // double precision
- maxPadeDegree<= 8? 2.32777776523703892094e-1L: // extended precision
- maxPadeDegree<=10? 1.05026503471351080481093652651105e-1L: // double-double
- 1.1880960220216759245467951592883642e-1L; // quadruple precision
-
- while (true) {
- RealScalar normTminusI = (T - MatrixType::Identity(T.rows(), T.rows())).cwiseAbs().colwise().sum().maxCoeff();
- if (normTminusI < maxNormForPade) {
- degree = getPadeDegree(normTminusI);
- int degree2 = getPadeDegree(normTminusI / RealScalar(2));
- if ((degree - degree2 <= 1) || (numberOfExtraSquareRoots == 1))
- break;
- ++numberOfExtraSquareRoots;
- }
- MatrixSquareRootTriangular<MatrixType>(T).compute(sqrtT);
- T = sqrtT.template triangularView<Upper>();
- ++numberOfSquareRoots;
+ else if ((abs(A(0,0)) < RealScalar(0.5)*abs(A(1,1))) || (abs(A(0,0)) > 2*abs(A(1,1))))
+ {
+ result(0,1) = A(0,1) * (logA11 - logA00) / y;
+ }
+ else
+ {
+ // computation in previous branch is inaccurate if A(1,1) \approx A(0,0)
+ int unwindingNumber = static_cast<int>(ceil((imag(logA11 - logA00) - RealScalar(EIGEN_PI)) / RealScalar(2*EIGEN_PI)));
+ result(0,1) = A(0,1) * (numext::log1p(y/A(0,0)) + Scalar(0,2*EIGEN_PI*unwindingNumber)) / y;
}
-
- computePade(result, T, degree);
- result *= pow(RealScalar(2), numberOfSquareRoots);
}
/* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = float) */
-template <typename MatrixType>
-int MatrixLogarithmAtomic<MatrixType>::getPadeDegree(float normTminusI)
+inline int matrix_log_get_pade_degree(float normTminusI)
{
const float maxNormForPade[] = { 2.5111573934555054e-1 /* degree = 3 */ , 4.0535837411880493e-1,
5.3149729967117310e-1 };
- int degree = 3;
+ const int minPadeDegree = matrix_log_min_pade_degree<float>::value;
+ const int maxPadeDegree = matrix_log_max_pade_degree<float>::value;
+ int degree = minPadeDegree;
for (; degree <= maxPadeDegree; ++degree)
if (normTminusI <= maxNormForPade[degree - minPadeDegree])
break;
@@ -168,12 +82,13 @@ int MatrixLogarithmAtomic<MatrixType>::getPadeDegree(float normTminusI)
}
/* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = double) */
-template <typename MatrixType>
-int MatrixLogarithmAtomic<MatrixType>::getPadeDegree(double normTminusI)
+inline int matrix_log_get_pade_degree(double normTminusI)
{
const double maxNormForPade[] = { 1.6206284795015624e-2 /* degree = 3 */ , 5.3873532631381171e-2,
1.1352802267628681e-1, 1.8662860613541288e-1, 2.642960831111435e-1 };
- int degree = 3;
+ const int minPadeDegree = matrix_log_min_pade_degree<double>::value;
+ const int maxPadeDegree = matrix_log_max_pade_degree<double>::value;
+ int degree = minPadeDegree;
for (; degree <= maxPadeDegree; ++degree)
if (normTminusI <= maxNormForPade[degree - minPadeDegree])
break;
@@ -181,8 +96,7 @@ int MatrixLogarithmAtomic<MatrixType>::getPadeDegree(double normTminusI)
}
/* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = long double) */
-template <typename MatrixType>
-int MatrixLogarithmAtomic<MatrixType>::getPadeDegree(long double normTminusI)
+inline int matrix_log_get_pade_degree(long double normTminusI)
{
#if LDBL_MANT_DIG == 53 // double precision
const long double maxNormForPade[] = { 1.6206284795015624e-2L /* degree = 3 */ , 5.3873532631381171e-2L,
@@ -204,7 +118,9 @@ int MatrixLogarithmAtomic<MatrixType>::getPadeDegree(long double normTminusI)
3.6688019729653446926585242192447447e-2L, 5.9290962294020186998954055264528393e-2L,
8.6998436081634343903250580992127677e-2L, 1.1880960220216759245467951592883642e-1L };
#endif
- int degree = 3;
+ const int minPadeDegree = matrix_log_min_pade_degree<long double>::value;
+ const int maxPadeDegree = matrix_log_max_pade_degree<long double>::value;
+ int degree = minPadeDegree;
for (; degree <= maxPadeDegree; ++degree)
if (normTminusI <= maxNormForPade[degree - minPadeDegree])
break;
@@ -213,197 +129,168 @@ int MatrixLogarithmAtomic<MatrixType>::getPadeDegree(long double normTminusI)
/* \brief Compute Pade approximation to matrix logarithm */
template <typename MatrixType>
-void MatrixLogarithmAtomic<MatrixType>::computePade(MatrixType& result, const MatrixType& T, int degree)
+void matrix_log_compute_pade(MatrixType& result, const MatrixType& T, int degree)
{
- switch (degree) {
- case 3: computePade3(result, T); break;
- case 4: computePade4(result, T); break;
- case 5: computePade5(result, T); break;
- case 6: computePade6(result, T); break;
- case 7: computePade7(result, T); break;
- case 8: computePade8(result, T); break;
- case 9: computePade9(result, T); break;
- case 10: computePade10(result, T); break;
- case 11: computePade11(result, T); break;
- default: assert(false); // should never happen
- }
-}
+ typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+ const int minPadeDegree = 3;
+ const int maxPadeDegree = 11;
+ assert(degree >= minPadeDegree && degree <= maxPadeDegree);
+
+ const RealScalar nodes[][maxPadeDegree] = {
+ { 0.1127016653792583114820734600217600L, 0.5000000000000000000000000000000000L, // degree 3
+ 0.8872983346207416885179265399782400L },
+ { 0.0694318442029737123880267555535953L, 0.3300094782075718675986671204483777L, // degree 4
+ 0.6699905217924281324013328795516223L, 0.9305681557970262876119732444464048L },
+ { 0.0469100770306680036011865608503035L, 0.2307653449471584544818427896498956L, // degree 5
+ 0.5000000000000000000000000000000000L, 0.7692346550528415455181572103501044L,
+ 0.9530899229693319963988134391496965L },
+ { 0.0337652428984239860938492227530027L, 0.1693953067668677431693002024900473L, // degree 6
+ 0.3806904069584015456847491391596440L, 0.6193095930415984543152508608403560L,
+ 0.8306046932331322568306997975099527L, 0.9662347571015760139061507772469973L },
+ { 0.0254460438286207377369051579760744L, 0.1292344072003027800680676133596058L, // degree 7
+ 0.2970774243113014165466967939615193L, 0.5000000000000000000000000000000000L,
+ 0.7029225756886985834533032060384807L, 0.8707655927996972199319323866403942L,
+ 0.9745539561713792622630948420239256L },
+ { 0.0198550717512318841582195657152635L, 0.1016667612931866302042230317620848L, // degree 8
+ 0.2372337950418355070911304754053768L, 0.4082826787521750975302619288199080L,
+ 0.5917173212478249024697380711800920L, 0.7627662049581644929088695245946232L,
+ 0.8983332387068133697957769682379152L, 0.9801449282487681158417804342847365L },
+ { 0.0159198802461869550822118985481636L, 0.0819844463366821028502851059651326L, // degree 9
+ 0.1933142836497048013456489803292629L, 0.3378732882980955354807309926783317L,
+ 0.5000000000000000000000000000000000L, 0.6621267117019044645192690073216683L,
+ 0.8066857163502951986543510196707371L, 0.9180155536633178971497148940348674L,
+ 0.9840801197538130449177881014518364L },
+ { 0.0130467357414141399610179939577740L, 0.0674683166555077446339516557882535L, // degree 10
+ 0.1602952158504877968828363174425632L, 0.2833023029353764046003670284171079L,
+ 0.4255628305091843945575869994351400L, 0.5744371694908156054424130005648600L,
+ 0.7166976970646235953996329715828921L, 0.8397047841495122031171636825574368L,
+ 0.9325316833444922553660483442117465L, 0.9869532642585858600389820060422260L },
+ { 0.0108856709269715035980309994385713L, 0.0564687001159523504624211153480364L, // degree 11
+ 0.1349239972129753379532918739844233L, 0.2404519353965940920371371652706952L,
+ 0.3652284220238275138342340072995692L, 0.5000000000000000000000000000000000L,
+ 0.6347715779761724861657659927004308L, 0.7595480646034059079628628347293048L,
+ 0.8650760027870246620467081260155767L, 0.9435312998840476495375788846519636L,
+ 0.9891143290730284964019690005614287L } };
+
+ const RealScalar weights[][maxPadeDegree] = {
+ { 0.2777777777777777777777777777777778L, 0.4444444444444444444444444444444444L, // degree 3
+ 0.2777777777777777777777777777777778L },
+ { 0.1739274225687269286865319746109997L, 0.3260725774312730713134680253890003L, // degree 4
+ 0.3260725774312730713134680253890003L, 0.1739274225687269286865319746109997L },
+ { 0.1184634425280945437571320203599587L, 0.2393143352496832340206457574178191L, // degree 5
+ 0.2844444444444444444444444444444444L, 0.2393143352496832340206457574178191L,
+ 0.1184634425280945437571320203599587L },
+ { 0.0856622461895851725201480710863665L, 0.1803807865240693037849167569188581L, // degree 6
+ 0.2339569672863455236949351719947755L, 0.2339569672863455236949351719947755L,
+ 0.1803807865240693037849167569188581L, 0.0856622461895851725201480710863665L },
+ { 0.0647424830844348466353057163395410L, 0.1398526957446383339507338857118898L, // degree 7
+ 0.1909150252525594724751848877444876L, 0.2089795918367346938775510204081633L,
+ 0.1909150252525594724751848877444876L, 0.1398526957446383339507338857118898L,
+ 0.0647424830844348466353057163395410L },
+ { 0.0506142681451881295762656771549811L, 0.1111905172266872352721779972131204L, // degree 8
+ 0.1568533229389436436689811009933007L, 0.1813418916891809914825752246385978L,
+ 0.1813418916891809914825752246385978L, 0.1568533229389436436689811009933007L,
+ 0.1111905172266872352721779972131204L, 0.0506142681451881295762656771549811L },
+ { 0.0406371941807872059859460790552618L, 0.0903240803474287020292360156214564L, // degree 9
+ 0.1303053482014677311593714347093164L, 0.1561735385200014200343152032922218L,
+ 0.1651196775006298815822625346434870L, 0.1561735385200014200343152032922218L,
+ 0.1303053482014677311593714347093164L, 0.0903240803474287020292360156214564L,
+ 0.0406371941807872059859460790552618L },
+ { 0.0333356721543440687967844049466659L, 0.0747256745752902965728881698288487L, // degree 10
+ 0.1095431812579910219977674671140816L, 0.1346333596549981775456134607847347L,
+ 0.1477621123573764350869464973256692L, 0.1477621123573764350869464973256692L,
+ 0.1346333596549981775456134607847347L, 0.1095431812579910219977674671140816L,
+ 0.0747256745752902965728881698288487L, 0.0333356721543440687967844049466659L },
+ { 0.0278342835580868332413768602212743L, 0.0627901847324523123173471496119701L, // degree 11
+ 0.0931451054638671257130488207158280L, 0.1165968822959952399592618524215876L,
+ 0.1314022722551233310903444349452546L, 0.1364625433889503153572417641681711L,
+ 0.1314022722551233310903444349452546L, 0.1165968822959952399592618524215876L,
+ 0.0931451054638671257130488207158280L, 0.0627901847324523123173471496119701L,
+ 0.0278342835580868332413768602212743L } };
-template <typename MatrixType>
-void MatrixLogarithmAtomic<MatrixType>::computePade3(MatrixType& result, const MatrixType& T)
-{
- const int degree = 3;
- const RealScalar nodes[] = { 0.1127016653792583114820734600217600L, 0.5000000000000000000000000000000000L,
- 0.8872983346207416885179265399782400L };
- const RealScalar weights[] = { 0.2777777777777777777777777777777778L, 0.4444444444444444444444444444444444L,
- 0.2777777777777777777777777777777778L };
- eigen_assert(degree <= maxPadeDegree);
MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
result.setZero(T.rows(), T.rows());
- for (int k = 0; k < degree; ++k)
- result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
- .template triangularView<Upper>().solve(TminusI);
-}
+ for (int k = 0; k < degree; ++k) {
+ RealScalar weight = weights[degree-minPadeDegree][k];
+ RealScalar node = nodes[degree-minPadeDegree][k];
+ result += weight * (MatrixType::Identity(T.rows(), T.rows()) + node * TminusI)
+ .template triangularView<Upper>().solve(TminusI);
+ }
+}
+/** \brief Compute logarithm of triangular matrices with size > 2.
+ * \details This uses a inverse scale-and-square algorithm. */
template <typename MatrixType>
-void MatrixLogarithmAtomic<MatrixType>::computePade4(MatrixType& result, const MatrixType& T)
+void matrix_log_compute_big(const MatrixType& A, MatrixType& result)
{
- const int degree = 4;
- const RealScalar nodes[] = { 0.0694318442029737123880267555535953L, 0.3300094782075718675986671204483777L,
- 0.6699905217924281324013328795516223L, 0.9305681557970262876119732444464048L };
- const RealScalar weights[] = { 0.1739274225687269286865319746109997L, 0.3260725774312730713134680253890003L,
- 0.3260725774312730713134680253890003L, 0.1739274225687269286865319746109997L };
- eigen_assert(degree <= maxPadeDegree);
- MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
- result.setZero(T.rows(), T.rows());
- for (int k = 0; k < degree; ++k)
- result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
- .template triangularView<Upper>().solve(TminusI);
-}
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ using std::pow;
-template <typename MatrixType>
-void MatrixLogarithmAtomic<MatrixType>::computePade5(MatrixType& result, const MatrixType& T)
-{
- const int degree = 5;
- const RealScalar nodes[] = { 0.0469100770306680036011865608503035L, 0.2307653449471584544818427896498956L,
- 0.5000000000000000000000000000000000L, 0.7692346550528415455181572103501044L,
- 0.9530899229693319963988134391496965L };
- const RealScalar weights[] = { 0.1184634425280945437571320203599587L, 0.2393143352496832340206457574178191L,
- 0.2844444444444444444444444444444444L, 0.2393143352496832340206457574178191L,
- 0.1184634425280945437571320203599587L };
- eigen_assert(degree <= maxPadeDegree);
- MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
- result.setZero(T.rows(), T.rows());
- for (int k = 0; k < degree; ++k)
- result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
- .template triangularView<Upper>().solve(TminusI);
-}
+ int numberOfSquareRoots = 0;
+ int numberOfExtraSquareRoots = 0;
+ int degree;
+ MatrixType T = A, sqrtT;
-template <typename MatrixType>
-void MatrixLogarithmAtomic<MatrixType>::computePade6(MatrixType& result, const MatrixType& T)
-{
- const int degree = 6;
- const RealScalar nodes[] = { 0.0337652428984239860938492227530027L, 0.1693953067668677431693002024900473L,
- 0.3806904069584015456847491391596440L, 0.6193095930415984543152508608403560L,
- 0.8306046932331322568306997975099527L, 0.9662347571015760139061507772469973L };
- const RealScalar weights[] = { 0.0856622461895851725201480710863665L, 0.1803807865240693037849167569188581L,
- 0.2339569672863455236949351719947755L, 0.2339569672863455236949351719947755L,
- 0.1803807865240693037849167569188581L, 0.0856622461895851725201480710863665L };
- eigen_assert(degree <= maxPadeDegree);
- MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
- result.setZero(T.rows(), T.rows());
- for (int k = 0; k < degree; ++k)
- result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
- .template triangularView<Upper>().solve(TminusI);
-}
+ int maxPadeDegree = matrix_log_max_pade_degree<Scalar>::value;
+ const RealScalar maxNormForPade = maxPadeDegree<= 5? 5.3149729967117310e-1L: // single precision
+ maxPadeDegree<= 7? 2.6429608311114350e-1L: // double precision
+ maxPadeDegree<= 8? 2.32777776523703892094e-1L: // extended precision
+ maxPadeDegree<=10? 1.05026503471351080481093652651105e-1L: // double-double
+ 1.1880960220216759245467951592883642e-1L; // quadruple precision
-template <typename MatrixType>
-void MatrixLogarithmAtomic<MatrixType>::computePade7(MatrixType& result, const MatrixType& T)
-{
- const int degree = 7;
- const RealScalar nodes[] = { 0.0254460438286207377369051579760744L, 0.1292344072003027800680676133596058L,
- 0.2970774243113014165466967939615193L, 0.5000000000000000000000000000000000L,
- 0.7029225756886985834533032060384807L, 0.8707655927996972199319323866403942L,
- 0.9745539561713792622630948420239256L };
- const RealScalar weights[] = { 0.0647424830844348466353057163395410L, 0.1398526957446383339507338857118898L,
- 0.1909150252525594724751848877444876L, 0.2089795918367346938775510204081633L,
- 0.1909150252525594724751848877444876L, 0.1398526957446383339507338857118898L,
- 0.0647424830844348466353057163395410L };
- eigen_assert(degree <= maxPadeDegree);
- MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
- result.setZero(T.rows(), T.rows());
- for (int k = 0; k < degree; ++k)
- result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
- .template triangularView<Upper>().solve(TminusI);
-}
+ while (true) {
+ RealScalar normTminusI = (T - MatrixType::Identity(T.rows(), T.rows())).cwiseAbs().colwise().sum().maxCoeff();
+ if (normTminusI < maxNormForPade) {
+ degree = matrix_log_get_pade_degree(normTminusI);
+ int degree2 = matrix_log_get_pade_degree(normTminusI / RealScalar(2));
+ if ((degree - degree2 <= 1) || (numberOfExtraSquareRoots == 1))
+ break;
+ ++numberOfExtraSquareRoots;
+ }
+ matrix_sqrt_triangular(T, sqrtT);
+ T = sqrtT.template triangularView<Upper>();
+ ++numberOfSquareRoots;
+ }
-template <typename MatrixType>
-void MatrixLogarithmAtomic<MatrixType>::computePade8(MatrixType& result, const MatrixType& T)
-{
- const int degree = 8;
- const RealScalar nodes[] = { 0.0198550717512318841582195657152635L, 0.1016667612931866302042230317620848L,
- 0.2372337950418355070911304754053768L, 0.4082826787521750975302619288199080L,
- 0.5917173212478249024697380711800920L, 0.7627662049581644929088695245946232L,
- 0.8983332387068133697957769682379152L, 0.9801449282487681158417804342847365L };
- const RealScalar weights[] = { 0.0506142681451881295762656771549811L, 0.1111905172266872352721779972131204L,
- 0.1568533229389436436689811009933007L, 0.1813418916891809914825752246385978L,
- 0.1813418916891809914825752246385978L, 0.1568533229389436436689811009933007L,
- 0.1111905172266872352721779972131204L, 0.0506142681451881295762656771549811L };
- eigen_assert(degree <= maxPadeDegree);
- MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
- result.setZero(T.rows(), T.rows());
- for (int k = 0; k < degree; ++k)
- result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
- .template triangularView<Upper>().solve(TminusI);
+ matrix_log_compute_pade(result, T, degree);
+ result *= pow(RealScalar(2), numberOfSquareRoots);
}
+/** \ingroup MatrixFunctions_Module
+ * \class MatrixLogarithmAtomic
+ * \brief Helper class for computing matrix logarithm of atomic matrices.
+ *
+ * Here, an atomic matrix is a triangular matrix whose diagonal entries are close to each other.
+ *
+ * \sa class MatrixFunctionAtomic, MatrixBase::log()
+ */
template <typename MatrixType>
-void MatrixLogarithmAtomic<MatrixType>::computePade9(MatrixType& result, const MatrixType& T)
+class MatrixLogarithmAtomic
{
- const int degree = 9;
- const RealScalar nodes[] = { 0.0159198802461869550822118985481636L, 0.0819844463366821028502851059651326L,
- 0.1933142836497048013456489803292629L, 0.3378732882980955354807309926783317L,
- 0.5000000000000000000000000000000000L, 0.6621267117019044645192690073216683L,
- 0.8066857163502951986543510196707371L, 0.9180155536633178971497148940348674L,
- 0.9840801197538130449177881014518364L };
- const RealScalar weights[] = { 0.0406371941807872059859460790552618L, 0.0903240803474287020292360156214564L,
- 0.1303053482014677311593714347093164L, 0.1561735385200014200343152032922218L,
- 0.1651196775006298815822625346434870L, 0.1561735385200014200343152032922218L,
- 0.1303053482014677311593714347093164L, 0.0903240803474287020292360156214564L,
- 0.0406371941807872059859460790552618L };
- eigen_assert(degree <= maxPadeDegree);
- MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
- result.setZero(T.rows(), T.rows());
- for (int k = 0; k < degree; ++k)
- result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
- .template triangularView<Upper>().solve(TminusI);
-}
+public:
+ /** \brief Compute matrix logarithm of atomic matrix
+ * \param[in] A argument of matrix logarithm, should be upper triangular and atomic
+ * \returns The logarithm of \p A.
+ */
+ MatrixType compute(const MatrixType& A);
+};
template <typename MatrixType>
-void MatrixLogarithmAtomic<MatrixType>::computePade10(MatrixType& result, const MatrixType& T)
+MatrixType MatrixLogarithmAtomic<MatrixType>::compute(const MatrixType& A)
{
- const int degree = 10;
- const RealScalar nodes[] = { 0.0130467357414141399610179939577740L, 0.0674683166555077446339516557882535L,
- 0.1602952158504877968828363174425632L, 0.2833023029353764046003670284171079L,
- 0.4255628305091843945575869994351400L, 0.5744371694908156054424130005648600L,
- 0.7166976970646235953996329715828921L, 0.8397047841495122031171636825574368L,
- 0.9325316833444922553660483442117465L, 0.9869532642585858600389820060422260L };
- const RealScalar weights[] = { 0.0333356721543440687967844049466659L, 0.0747256745752902965728881698288487L,
- 0.1095431812579910219977674671140816L, 0.1346333596549981775456134607847347L,
- 0.1477621123573764350869464973256692L, 0.1477621123573764350869464973256692L,
- 0.1346333596549981775456134607847347L, 0.1095431812579910219977674671140816L,
- 0.0747256745752902965728881698288487L, 0.0333356721543440687967844049466659L };
- eigen_assert(degree <= maxPadeDegree);
- MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
- result.setZero(T.rows(), T.rows());
- for (int k = 0; k < degree; ++k)
- result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
- .template triangularView<Upper>().solve(TminusI);
+ using std::log;
+ MatrixType result(A.rows(), A.rows());
+ if (A.rows() == 1)
+ result(0,0) = log(A(0,0));
+ else if (A.rows() == 2)
+ matrix_log_compute_2x2(A, result);
+ else
+ matrix_log_compute_big(A, result);
+ return result;
}
-template <typename MatrixType>
-void MatrixLogarithmAtomic<MatrixType>::computePade11(MatrixType& result, const MatrixType& T)
-{
- const int degree = 11;
- const RealScalar nodes[] = { 0.0108856709269715035980309994385713L, 0.0564687001159523504624211153480364L,
- 0.1349239972129753379532918739844233L, 0.2404519353965940920371371652706952L,
- 0.3652284220238275138342340072995692L, 0.5000000000000000000000000000000000L,
- 0.6347715779761724861657659927004308L, 0.7595480646034059079628628347293048L,
- 0.8650760027870246620467081260155767L, 0.9435312998840476495375788846519636L,
- 0.9891143290730284964019690005614287L };
- const RealScalar weights[] = { 0.0278342835580868332413768602212743L, 0.0627901847324523123173471496119701L,
- 0.0931451054638671257130488207158280L, 0.1165968822959952399592618524215876L,
- 0.1314022722551233310903444349452546L, 0.1364625433889503153572417641681711L,
- 0.1314022722551233310903444349452546L, 0.1165968822959952399592618524215876L,
- 0.0931451054638671257130488207158280L, 0.0627901847324523123173471496119701L,
- 0.0278342835580868332413768602212743L };
- eigen_assert(degree <= maxPadeDegree);
- MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
- result.setZero(T.rows(), T.rows());
- for (int k = 0; k < degree; ++k)
- result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
- .template triangularView<Upper>().solve(TminusI);
-}
+} // end of namespace internal
/** \ingroup MatrixFunctions_Module
*
@@ -421,15 +308,19 @@ template<typename Derived> class MatrixLogarithmReturnValue
: public ReturnByValue<MatrixLogarithmReturnValue<Derived> >
{
public:
-
typedef typename Derived::Scalar Scalar;
typedef typename Derived::Index Index;
+protected:
+ typedef typename internal::ref_selector<Derived>::type DerivedNested;
+
+public:
+
/** \brief Constructor.
*
* \param[in] A %Matrix (expression) forming the argument of the matrix logarithm.
*/
- MatrixLogarithmReturnValue(const Derived& A) : m_A(A) { }
+ explicit MatrixLogarithmReturnValue(const Derived& A) : m_A(A) { }
/** \brief Compute the matrix logarithm.
*
@@ -438,28 +329,24 @@ public:
template <typename ResultType>
inline void evalTo(ResultType& result) const
{
- typedef typename Derived::PlainObject PlainObject;
- typedef internal::traits<PlainObject> Traits;
+ typedef typename internal::nested_eval<Derived, 10>::type DerivedEvalType;
+ typedef typename internal::remove_all<DerivedEvalType>::type DerivedEvalTypeClean;
+ typedef internal::traits<DerivedEvalTypeClean> Traits;
static const int RowsAtCompileTime = Traits::RowsAtCompileTime;
static const int ColsAtCompileTime = Traits::ColsAtCompileTime;
- static const int Options = PlainObject::Options;
typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
- typedef Matrix<ComplexScalar, Dynamic, Dynamic, Options, RowsAtCompileTime, ColsAtCompileTime> DynMatrixType;
- typedef MatrixLogarithmAtomic<DynMatrixType> AtomicType;
+ typedef Matrix<ComplexScalar, Dynamic, Dynamic, 0, RowsAtCompileTime, ColsAtCompileTime> DynMatrixType;
+ typedef internal::MatrixLogarithmAtomic<DynMatrixType> AtomicType;
AtomicType atomic;
- const PlainObject Aevaluated = m_A.eval();
- MatrixFunction<PlainObject, AtomicType> mf(Aevaluated, atomic);
- mf.compute(result);
+ internal::matrix_function_compute<DerivedEvalTypeClean>::run(m_A, atomic, result);
}
Index rows() const { return m_A.rows(); }
Index cols() const { return m_A.cols(); }
private:
- typename internal::nested<Derived>::type m_A;
-
- MatrixLogarithmReturnValue& operator=(const MatrixLogarithmReturnValue&);
+ const DerivedNested m_A;
};
namespace internal {
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h b/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h
index 78a307e96..ebc433d89 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h
@@ -14,16 +14,48 @@ namespace Eigen {
template<typename MatrixType> class MatrixPower;
+/**
+ * \ingroup MatrixFunctions_Module
+ *
+ * \brief Proxy for the matrix power of some matrix.
+ *
+ * \tparam MatrixType type of the base, a matrix.
+ *
+ * This class holds the arguments to the matrix power until it is
+ * assigned or evaluated for some other reason (so the argument
+ * should not be changed in the meantime). It is the return type of
+ * MatrixPower::operator() and related functions and most of the
+ * time this is the only way it is used.
+ */
+/* TODO This class is only used by MatrixPower, so it should be nested
+ * into MatrixPower, like MatrixPower::ReturnValue. However, my
+ * compiler complained about unused template parameter in the
+ * following declaration in namespace internal.
+ *
+ * template<typename MatrixType>
+ * struct traits<MatrixPower<MatrixType>::ReturnValue>;
+ */
template<typename MatrixType>
-class MatrixPowerRetval : public ReturnByValue< MatrixPowerRetval<MatrixType> >
+class MatrixPowerParenthesesReturnValue : public ReturnByValue< MatrixPowerParenthesesReturnValue<MatrixType> >
{
public:
typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::Index Index;
- MatrixPowerRetval(MatrixPower<MatrixType>& pow, RealScalar p) : m_pow(pow), m_p(p)
+ /**
+ * \brief Constructor.
+ *
+ * \param[in] pow %MatrixPower storing the base.
+ * \param[in] p scalar, the exponent of the matrix power.
+ */
+ MatrixPowerParenthesesReturnValue(MatrixPower<MatrixType>& pow, RealScalar p) : m_pow(pow), m_p(p)
{ }
+ /**
+ * \brief Compute the matrix power.
+ *
+ * \param[out] result
+ */
template<typename ResultType>
inline void evalTo(ResultType& res) const
{ m_pow.compute(res, m_p); }
@@ -34,11 +66,25 @@ class MatrixPowerRetval : public ReturnByValue< MatrixPowerRetval<MatrixType> >
private:
MatrixPower<MatrixType>& m_pow;
const RealScalar m_p;
- MatrixPowerRetval& operator=(const MatrixPowerRetval&);
};
+/**
+ * \ingroup MatrixFunctions_Module
+ *
+ * \brief Class for computing matrix powers.
+ *
+ * \tparam MatrixType type of the base, expected to be an instantiation
+ * of the Matrix class template.
+ *
+ * This class is capable of computing triangular real/complex matrices
+ * raised to a power in the interval \f$ (-1, 1) \f$.
+ *
+ * \note Currently this class is only used by MatrixPower. One may
+ * insist that this be nested into MatrixPower. This class is here to
+ * faciliate future development of triangular matrix functions.
+ */
template<typename MatrixType>
-class MatrixPowerAtomic
+class MatrixPowerAtomic : internal::noncopyable
{
private:
enum {
@@ -49,14 +95,14 @@ class MatrixPowerAtomic
typedef typename MatrixType::RealScalar RealScalar;
typedef std::complex<RealScalar> ComplexScalar;
typedef typename MatrixType::Index Index;
- typedef Array<Scalar, RowsAtCompileTime, 1, ColMajor, MaxRowsAtCompileTime> ArrayType;
+ typedef Block<MatrixType,Dynamic,Dynamic> ResultType;
const MatrixType& m_A;
RealScalar m_p;
- void computePade(int degree, const MatrixType& IminusT, MatrixType& res) const;
- void compute2x2(MatrixType& res, RealScalar p) const;
- void computeBig(MatrixType& res) const;
+ void computePade(int degree, const MatrixType& IminusT, ResultType& res) const;
+ void compute2x2(ResultType& res, RealScalar p) const;
+ void computeBig(ResultType& res) const;
static int getPadeDegree(float normIminusT);
static int getPadeDegree(double normIminusT);
static int getPadeDegree(long double normIminusT);
@@ -64,24 +110,45 @@ class MatrixPowerAtomic
static RealScalar computeSuperDiag(RealScalar, RealScalar, RealScalar p);
public:
+ /**
+ * \brief Constructor.
+ *
+ * \param[in] T the base of the matrix power.
+ * \param[in] p the exponent of the matrix power, should be in
+ * \f$ (-1, 1) \f$.
+ *
+ * The class stores a reference to T, so it should not be changed
+ * (or destroyed) before evaluation. Only the upper triangular
+ * part of T is read.
+ */
MatrixPowerAtomic(const MatrixType& T, RealScalar p);
- void compute(MatrixType& res) const;
+
+ /**
+ * \brief Compute the matrix power.
+ *
+ * \param[out] res \f$ A^p \f$ where A and p are specified in the
+ * constructor.
+ */
+ void compute(ResultType& res) const;
};
template<typename MatrixType>
MatrixPowerAtomic<MatrixType>::MatrixPowerAtomic(const MatrixType& T, RealScalar p) :
m_A(T), m_p(p)
-{ eigen_assert(T.rows() == T.cols()); }
+{
+ eigen_assert(T.rows() == T.cols());
+ eigen_assert(p > -1 && p < 1);
+}
template<typename MatrixType>
-void MatrixPowerAtomic<MatrixType>::compute(MatrixType& res) const
+void MatrixPowerAtomic<MatrixType>::compute(ResultType& res) const
{
- res.resizeLike(m_A);
+ using std::pow;
switch (m_A.rows()) {
case 0:
break;
case 1:
- res(0,0) = std::pow(m_A(0,0), m_p);
+ res(0,0) = pow(m_A(0,0), m_p);
break;
case 2:
compute2x2(res, m_p);
@@ -92,24 +159,24 @@ void MatrixPowerAtomic<MatrixType>::compute(MatrixType& res) const
}
template<typename MatrixType>
-void MatrixPowerAtomic<MatrixType>::computePade(int degree, const MatrixType& IminusT, MatrixType& res) const
+void MatrixPowerAtomic<MatrixType>::computePade(int degree, const MatrixType& IminusT, ResultType& res) const
{
- int i = degree<<1;
- res = (m_p-degree) / ((i-1)<<1) * IminusT;
+ int i = 2*degree;
+ res = (m_p-degree) / (2*i-2) * IminusT;
+
for (--i; i; --i) {
res = (MatrixType::Identity(IminusT.rows(), IminusT.cols()) + res).template triangularView<Upper>()
- .solve((i==1 ? -m_p : i&1 ? (-m_p-(i>>1))/(i<<1) : (m_p-(i>>1))/((i-1)<<1)) * IminusT).eval();
+ .solve((i==1 ? -m_p : i&1 ? (-m_p-i/2)/(2*i) : (m_p-i/2)/(2*i-2)) * IminusT).eval();
}
res += MatrixType::Identity(IminusT.rows(), IminusT.cols());
}
// This function assumes that res has the correct size (see bug 614)
template<typename MatrixType>
-void MatrixPowerAtomic<MatrixType>::compute2x2(MatrixType& res, RealScalar p) const
+void MatrixPowerAtomic<MatrixType>::compute2x2(ResultType& res, RealScalar p) const
{
using std::abs;
using std::pow;
-
res.coeffRef(0,0) = pow(m_A.coeff(0,0), p);
for (Index i=1; i < m_A.cols(); ++i) {
@@ -125,32 +192,20 @@ void MatrixPowerAtomic<MatrixType>::compute2x2(MatrixType& res, RealScalar p) co
}
template<typename MatrixType>
-void MatrixPowerAtomic<MatrixType>::computeBig(MatrixType& res) const
+void MatrixPowerAtomic<MatrixType>::computeBig(ResultType& res) const
{
+ using std::ldexp;
const int digits = std::numeric_limits<RealScalar>::digits;
- const RealScalar maxNormForPade = digits <= 24? 4.3386528e-1f: // sigle precision
- digits <= 53? 2.789358995219730e-1: // double precision
- digits <= 64? 2.4471944416607995472e-1L: // extended precision
- digits <= 106? 1.1016843812851143391275867258512e-1L: // double-double
- 9.134603732914548552537150753385375e-2L; // quadruple precision
+ const RealScalar maxNormForPade = digits <= 24? 4.3386528e-1L // single precision
+ : digits <= 53? 2.789358995219730e-1L // double precision
+ : digits <= 64? 2.4471944416607995472e-1L // extended precision
+ : digits <= 106? 1.1016843812851143391275867258512e-1L // double-double
+ : 9.134603732914548552537150753385375e-2L; // quadruple precision
MatrixType IminusT, sqrtT, T = m_A.template triangularView<Upper>();
RealScalar normIminusT;
int degree, degree2, numberOfSquareRoots = 0;
bool hasExtraSquareRoot = false;
- /* FIXME
- * For singular T, norm(I - T) >= 1 but maxNormForPade < 1, leads to infinite
- * loop. We should move 0 eigenvalues to bottom right corner. We need not
- * worry about tiny values (e.g. 1e-300) because they will reach 1 if
- * repetitively sqrt'ed.
- *
- * If the 0 eigenvalues are semisimple, they can form a 0 matrix at the
- * bottom right corner.
- *
- * [ T A ]^p [ T^p (T^-1 T^p A) ]
- * [ ] = [ ]
- * [ 0 0 ] [ 0 0 ]
- */
for (Index i=0; i < m_A.cols(); ++i)
eigen_assert(m_A(i,i) != RealScalar(0));
@@ -164,14 +219,14 @@ void MatrixPowerAtomic<MatrixType>::computeBig(MatrixType& res) const
break;
hasExtraSquareRoot = true;
}
- MatrixSquareRootTriangular<MatrixType>(T).compute(sqrtT);
+ matrix_sqrt_triangular(T, sqrtT);
T = sqrtT.template triangularView<Upper>();
++numberOfSquareRoots;
}
computePade(degree, IminusT, res);
for (; numberOfSquareRoots; --numberOfSquareRoots) {
- compute2x2(res, std::ldexp(m_p, -numberOfSquareRoots));
+ compute2x2(res, ldexp(m_p, -numberOfSquareRoots));
res = res.template triangularView<Upper>() * res;
}
compute2x2(res, m_p);
@@ -209,7 +264,7 @@ inline int MatrixPowerAtomic<MatrixType>::getPadeDegree(long double normIminusT)
1.999045567181744e-1L, 2.789358995219730e-1L };
#elif LDBL_MANT_DIG <= 64
const int maxPadeDegree = 8;
- const double maxNormForPade[] = { 6.3854693117491799460e-3L /* degree = 3 */ , 2.6394893435456973676e-2L,
+ const long double maxNormForPade[] = { 6.3854693117491799460e-3L /* degree = 3 */ , 2.6394893435456973676e-2L,
6.4216043030404063729e-2L, 1.1701165502926694307e-1L, 1.7904284231268670284e-1L, 2.4471944416607995472e-1L };
#elif LDBL_MANT_DIG <= 106
const int maxPadeDegree = 10;
@@ -236,19 +291,28 @@ template<typename MatrixType>
inline typename MatrixPowerAtomic<MatrixType>::ComplexScalar
MatrixPowerAtomic<MatrixType>::computeSuperDiag(const ComplexScalar& curr, const ComplexScalar& prev, RealScalar p)
{
- ComplexScalar logCurr = std::log(curr);
- ComplexScalar logPrev = std::log(prev);
- int unwindingNumber = std::ceil((numext::imag(logCurr - logPrev) - M_PI) / (2*M_PI));
- ComplexScalar w = numext::atanh2(curr - prev, curr + prev) + ComplexScalar(0, M_PI*unwindingNumber);
- return RealScalar(2) * std::exp(RealScalar(0.5) * p * (logCurr + logPrev)) * std::sinh(p * w) / (curr - prev);
+ using std::ceil;
+ using std::exp;
+ using std::log;
+ using std::sinh;
+
+ ComplexScalar logCurr = log(curr);
+ ComplexScalar logPrev = log(prev);
+ int unwindingNumber = ceil((numext::imag(logCurr - logPrev) - RealScalar(EIGEN_PI)) / RealScalar(2*EIGEN_PI));
+ ComplexScalar w = numext::log1p((curr-prev)/prev)/RealScalar(2) + ComplexScalar(0, EIGEN_PI*unwindingNumber);
+ return RealScalar(2) * exp(RealScalar(0.5) * p * (logCurr + logPrev)) * sinh(p * w) / (curr - prev);
}
template<typename MatrixType>
inline typename MatrixPowerAtomic<MatrixType>::RealScalar
MatrixPowerAtomic<MatrixType>::computeSuperDiag(RealScalar curr, RealScalar prev, RealScalar p)
{
- RealScalar w = numext::atanh2(curr - prev, curr + prev);
- return 2 * std::exp(p * (std::log(curr) + std::log(prev)) / 2) * std::sinh(p * w) / (curr - prev);
+ using std::exp;
+ using std::log;
+ using std::sinh;
+
+ RealScalar w = numext::log1p((curr-prev)/prev)/RealScalar(2);
+ return 2 * exp(p * (log(curr) + log(prev)) / 2) * sinh(p * w) / (curr - prev);
}
/**
@@ -271,15 +335,9 @@ MatrixPowerAtomic<MatrixType>::computeSuperDiag(RealScalar curr, RealScalar prev
* Output: \verbinclude MatrixPower_optimal.out
*/
template<typename MatrixType>
-class MatrixPower
+class MatrixPower : internal::noncopyable
{
private:
- enum {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = MatrixType::ColsAtCompileTime,
- MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
- MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
- };
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::Index Index;
@@ -293,7 +351,11 @@ class MatrixPower
* The class stores a reference to A, so it should not be changed
* (or destroyed) before evaluation.
*/
- explicit MatrixPower(const MatrixType& A) : m_A(A), m_conditionNumber(0)
+ explicit MatrixPower(const MatrixType& A) :
+ m_A(A),
+ m_conditionNumber(0),
+ m_rank(A.cols()),
+ m_nulls(0)
{ eigen_assert(A.rows() == A.cols()); }
/**
@@ -303,8 +365,8 @@ class MatrixPower
* \return The expression \f$ A^p \f$, where A is specified in the
* constructor.
*/
- const MatrixPowerRetval<MatrixType> operator()(RealScalar p)
- { return MatrixPowerRetval<MatrixType>(*this, p); }
+ const MatrixPowerParenthesesReturnValue<MatrixType> operator()(RealScalar p)
+ { return MatrixPowerParenthesesReturnValue<MatrixType>(*this, p); }
/**
* \brief Compute the matrix power.
@@ -321,21 +383,54 @@ class MatrixPower
private:
typedef std::complex<RealScalar> ComplexScalar;
- typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, MatrixType::Options,
- MaxRowsAtCompileTime, MaxColsAtCompileTime> ComplexMatrix;
+ typedef Matrix<ComplexScalar, Dynamic, Dynamic, 0,
+ MatrixType::RowsAtCompileTime, MatrixType::ColsAtCompileTime> ComplexMatrix;
+ /** \brief Reference to the base of matrix power. */
typename MatrixType::Nested m_A;
+
+ /** \brief Temporary storage. */
MatrixType m_tmp;
- ComplexMatrix m_T, m_U, m_fT;
+
+ /** \brief Store the result of Schur decomposition. */
+ ComplexMatrix m_T, m_U;
+
+ /** \brief Store fractional power of m_T. */
+ ComplexMatrix m_fT;
+
+ /**
+ * \brief Condition number of m_A.
+ *
+ * It is initialized as 0 to avoid performing unnecessary Schur
+ * decomposition, which is the bottleneck.
+ */
RealScalar m_conditionNumber;
- RealScalar modfAndInit(RealScalar, RealScalar*);
+ /** \brief Rank of m_A. */
+ Index m_rank;
+
+ /** \brief Rank deficiency of m_A. */
+ Index m_nulls;
+
+ /**
+ * \brief Split p into integral part and fractional part.
+ *
+ * \param[in] p The exponent.
+ * \param[out] p The fractional part ranging in \f$ (-1, 1) \f$.
+ * \param[out] intpart The integral part.
+ *
+ * Only if the fractional part is nonzero, it calls initialize().
+ */
+ void split(RealScalar& p, RealScalar& intpart);
+
+ /** \brief Perform Schur decomposition for fractional power. */
+ void initialize();
template<typename ResultType>
- void computeIntPower(ResultType&, RealScalar);
+ void computeIntPower(ResultType& res, RealScalar p);
template<typename ResultType>
- void computeFracPower(ResultType&, RealScalar);
+ void computeFracPower(ResultType& res, RealScalar p);
template<int Rows, int Cols, int Options, int MaxRows, int MaxCols>
static void revertSchur(
@@ -354,59 +449,102 @@ template<typename MatrixType>
template<typename ResultType>
void MatrixPower<MatrixType>::compute(ResultType& res, RealScalar p)
{
+ using std::pow;
switch (cols()) {
case 0:
break;
case 1:
- res(0,0) = std::pow(m_A.coeff(0,0), p);
+ res(0,0) = pow(m_A.coeff(0,0), p);
break;
default:
- RealScalar intpart, x = modfAndInit(p, &intpart);
+ RealScalar intpart;
+ split(p, intpart);
+
+ res = MatrixType::Identity(rows(), cols());
computeIntPower(res, intpart);
- computeFracPower(res, x);
+ if (p) computeFracPower(res, p);
}
}
template<typename MatrixType>
-typename MatrixPower<MatrixType>::RealScalar
-MatrixPower<MatrixType>::modfAndInit(RealScalar x, RealScalar* intpart)
+void MatrixPower<MatrixType>::split(RealScalar& p, RealScalar& intpart)
{
- typedef Array<RealScalar, RowsAtCompileTime, 1, ColMajor, MaxRowsAtCompileTime> RealArray;
+ using std::floor;
+ using std::pow;
- *intpart = std::floor(x);
- RealScalar res = x - *intpart;
+ intpart = floor(p);
+ p -= intpart;
- if (!m_conditionNumber && res) {
- const ComplexSchur<MatrixType> schurOfA(m_A);
- m_T = schurOfA.matrixT();
- m_U = schurOfA.matrixU();
-
- const RealArray absTdiag = m_T.diagonal().array().abs();
- m_conditionNumber = absTdiag.maxCoeff() / absTdiag.minCoeff();
+ // Perform Schur decomposition if it is not yet performed and the power is
+ // not an integer.
+ if (!m_conditionNumber && p)
+ initialize();
+
+ // Choose the more stable of intpart = floor(p) and intpart = ceil(p).
+ if (p > RealScalar(0.5) && p > (1-p) * pow(m_conditionNumber, p)) {
+ --p;
+ ++intpart;
+ }
+}
+
+template<typename MatrixType>
+void MatrixPower<MatrixType>::initialize()
+{
+ const ComplexSchur<MatrixType> schurOfA(m_A);
+ JacobiRotation<ComplexScalar> rot;
+ ComplexScalar eigenvalue;
+
+ m_fT.resizeLike(m_A);
+ m_T = schurOfA.matrixT();
+ m_U = schurOfA.matrixU();
+ m_conditionNumber = m_T.diagonal().array().abs().maxCoeff() / m_T.diagonal().array().abs().minCoeff();
+
+ // Move zero eigenvalues to the bottom right corner.
+ for (Index i = cols()-1; i>=0; --i) {
+ if (m_rank <= 2)
+ return;
+ if (m_T.coeff(i,i) == RealScalar(0)) {
+ for (Index j=i+1; j < m_rank; ++j) {
+ eigenvalue = m_T.coeff(j,j);
+ rot.makeGivens(m_T.coeff(j-1,j), eigenvalue);
+ m_T.applyOnTheRight(j-1, j, rot);
+ m_T.applyOnTheLeft(j-1, j, rot.adjoint());
+ m_T.coeffRef(j-1,j-1) = eigenvalue;
+ m_T.coeffRef(j,j) = RealScalar(0);
+ m_U.applyOnTheRight(j-1, j, rot);
+ }
+ --m_rank;
+ }
}
- if (res>RealScalar(0.5) && res>(1-res)*std::pow(m_conditionNumber, res)) {
- --res;
- ++*intpart;
+ m_nulls = rows() - m_rank;
+ if (m_nulls) {
+ eigen_assert(m_T.bottomRightCorner(m_nulls, m_nulls).isZero()
+ && "Base of matrix power should be invertible or with a semisimple zero eigenvalue.");
+ m_fT.bottomRows(m_nulls).fill(RealScalar(0));
}
- return res;
}
template<typename MatrixType>
template<typename ResultType>
void MatrixPower<MatrixType>::computeIntPower(ResultType& res, RealScalar p)
{
- RealScalar pp = std::abs(p);
+ using std::abs;
+ using std::fmod;
+ RealScalar pp = abs(p);
- if (p<0) m_tmp = m_A.inverse();
- else m_tmp = m_A;
+ if (p<0)
+ m_tmp = m_A.inverse();
+ else
+ m_tmp = m_A;
- res = MatrixType::Identity(rows(), cols());
- while (pp >= 1) {
- if (std::fmod(pp, 2) >= 1)
+ while (true) {
+ if (fmod(pp, 2) >= 1)
res = m_tmp * res;
- m_tmp *= m_tmp;
pp /= 2;
+ if (pp < 1)
+ break;
+ m_tmp *= m_tmp;
}
}
@@ -414,12 +552,17 @@ template<typename MatrixType>
template<typename ResultType>
void MatrixPower<MatrixType>::computeFracPower(ResultType& res, RealScalar p)
{
- if (p) {
- eigen_assert(m_conditionNumber);
- MatrixPowerAtomic<ComplexMatrix>(m_T, p).compute(m_fT);
- revertSchur(m_tmp, m_fT, m_U);
- res = m_tmp * res;
+ Block<ComplexMatrix,Dynamic,Dynamic> blockTp(m_fT, 0, 0, m_rank, m_rank);
+ eigen_assert(m_conditionNumber);
+ eigen_assert(m_rank + m_nulls == rows());
+
+ MatrixPowerAtomic<ComplexMatrix>(m_T.topLeftCorner(m_rank, m_rank), p).compute(blockTp);
+ if (m_nulls) {
+ m_fT.topRightCorner(m_rank, m_nulls) = m_T.topLeftCorner(m_rank, m_rank).template triangularView<Upper>()
+ .solve(blockTp * m_T.topRightCorner(m_rank, m_nulls));
}
+ revertSchur(m_tmp, m_fT, m_U);
+ res = m_tmp * res;
}
template<typename MatrixType>
@@ -463,7 +606,7 @@ class MatrixPowerReturnValue : public ReturnByValue< MatrixPowerReturnValue<Deri
* \brief Constructor.
*
* \param[in] A %Matrix (expression), the base of the matrix power.
- * \param[in] p scalar, the exponent of the matrix power.
+ * \param[in] p real scalar, the exponent of the matrix power.
*/
MatrixPowerReturnValue(const Derived& A, RealScalar p) : m_A(A), m_p(p)
{ }
@@ -484,25 +627,83 @@ class MatrixPowerReturnValue : public ReturnByValue< MatrixPowerReturnValue<Deri
private:
const Derived& m_A;
const RealScalar m_p;
- MatrixPowerReturnValue& operator=(const MatrixPowerReturnValue&);
+};
+
+/**
+ * \ingroup MatrixFunctions_Module
+ *
+ * \brief Proxy for the matrix power of some matrix (expression).
+ *
+ * \tparam Derived type of the base, a matrix (expression).
+ *
+ * This class holds the arguments to the matrix power until it is
+ * assigned or evaluated for some other reason (so the argument
+ * should not be changed in the meantime). It is the return type of
+ * MatrixBase::pow() and related functions and most of the
+ * time this is the only way it is used.
+ */
+template<typename Derived>
+class MatrixComplexPowerReturnValue : public ReturnByValue< MatrixComplexPowerReturnValue<Derived> >
+{
+ public:
+ typedef typename Derived::PlainObject PlainObject;
+ typedef typename std::complex<typename Derived::RealScalar> ComplexScalar;
+ typedef typename Derived::Index Index;
+
+ /**
+ * \brief Constructor.
+ *
+ * \param[in] A %Matrix (expression), the base of the matrix power.
+ * \param[in] p complex scalar, the exponent of the matrix power.
+ */
+ MatrixComplexPowerReturnValue(const Derived& A, const ComplexScalar& p) : m_A(A), m_p(p)
+ { }
+
+ /**
+ * \brief Compute the matrix power.
+ *
+ * Because \p p is complex, \f$ A^p \f$ is simply evaluated as \f$
+ * \exp(p \log(A)) \f$.
+ *
+ * \param[out] result \f$ A^p \f$ where \p A and \p p are as in the
+ * constructor.
+ */
+ template<typename ResultType>
+ inline void evalTo(ResultType& res) const
+ { res = (m_p * m_A.log()).exp(); }
+
+ Index rows() const { return m_A.rows(); }
+ Index cols() const { return m_A.cols(); }
+
+ private:
+ const Derived& m_A;
+ const ComplexScalar m_p;
};
namespace internal {
template<typename MatrixPowerType>
-struct traits< MatrixPowerRetval<MatrixPowerType> >
+struct traits< MatrixPowerParenthesesReturnValue<MatrixPowerType> >
{ typedef typename MatrixPowerType::PlainObject ReturnType; };
template<typename Derived>
struct traits< MatrixPowerReturnValue<Derived> >
{ typedef typename Derived::PlainObject ReturnType; };
+template<typename Derived>
+struct traits< MatrixComplexPowerReturnValue<Derived> >
+{ typedef typename Derived::PlainObject ReturnType; };
+
}
template<typename Derived>
const MatrixPowerReturnValue<Derived> MatrixBase<Derived>::pow(const RealScalar& p) const
{ return MatrixPowerReturnValue<Derived>(derived(), p); }
+template<typename Derived>
+const MatrixComplexPowerReturnValue<Derived> MatrixBase<Derived>::pow(const std::complex<RealScalar>& p) const
+{ return MatrixComplexPowerReturnValue<Derived>(derived(), p); }
+
} // namespace Eigen
#endif // EIGEN_MATRIX_POWER
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h b/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h
index b48ea9d46..afd88ec4d 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2011 Jitse Niesen <jitse@maths.leeds.ac.uk>
+// Copyright (C) 2011, 2013 Jitse Niesen <jitse@maths.leeds.ac.uk>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -12,133 +12,16 @@
namespace Eigen {
-/** \ingroup MatrixFunctions_Module
- * \brief Class for computing matrix square roots of upper quasi-triangular matrices.
- * \tparam MatrixType type of the argument of the matrix square root,
- * expected to be an instantiation of the Matrix class template.
- *
- * This class computes the square root of the upper quasi-triangular
- * matrix stored in the upper Hessenberg part of the matrix passed to
- * the constructor.
- *
- * \sa MatrixSquareRoot, MatrixSquareRootTriangular
- */
-template <typename MatrixType>
-class MatrixSquareRootQuasiTriangular
-{
- public:
-
- /** \brief Constructor.
- *
- * \param[in] A upper quasi-triangular matrix whose square root
- * is to be computed.
- *
- * The class stores a reference to \p A, so it should not be
- * changed (or destroyed) before compute() is called.
- */
- MatrixSquareRootQuasiTriangular(const MatrixType& A)
- : m_A(A)
- {
- eigen_assert(A.rows() == A.cols());
- }
-
- /** \brief Compute the matrix square root
- *
- * \param[out] result square root of \p A, as specified in the constructor.
- *
- * Only the upper Hessenberg part of \p result is updated, the
- * rest is not touched. See MatrixBase::sqrt() for details on
- * how this computation is implemented.
- */
- template <typename ResultType> void compute(ResultType &result);
-
- private:
- typedef typename MatrixType::Index Index;
- typedef typename MatrixType::Scalar Scalar;
-
- void computeDiagonalPartOfSqrt(MatrixType& sqrtT, const MatrixType& T);
- void computeOffDiagonalPartOfSqrt(MatrixType& sqrtT, const MatrixType& T);
- void compute2x2diagonalBlock(MatrixType& sqrtT, const MatrixType& T, typename MatrixType::Index i);
- void compute1x1offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T,
- typename MatrixType::Index i, typename MatrixType::Index j);
- void compute1x2offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T,
- typename MatrixType::Index i, typename MatrixType::Index j);
- void compute2x1offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T,
- typename MatrixType::Index i, typename MatrixType::Index j);
- void compute2x2offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T,
- typename MatrixType::Index i, typename MatrixType::Index j);
-
- template <typename SmallMatrixType>
- static void solveAuxiliaryEquation(SmallMatrixType& X, const SmallMatrixType& A,
- const SmallMatrixType& B, const SmallMatrixType& C);
-
- const MatrixType& m_A;
-};
-
-template <typename MatrixType>
-template <typename ResultType>
-void MatrixSquareRootQuasiTriangular<MatrixType>::compute(ResultType &result)
-{
- result.resize(m_A.rows(), m_A.cols());
- computeDiagonalPartOfSqrt(result, m_A);
- computeOffDiagonalPartOfSqrt(result, m_A);
-}
-
-// pre: T is quasi-upper-triangular and sqrtT is a zero matrix of the same size
-// post: the diagonal blocks of sqrtT are the square roots of the diagonal blocks of T
-template <typename MatrixType>
-void MatrixSquareRootQuasiTriangular<MatrixType>::computeDiagonalPartOfSqrt(MatrixType& sqrtT,
- const MatrixType& T)
-{
- using std::sqrt;
- const Index size = m_A.rows();
- for (Index i = 0; i < size; i++) {
- if (i == size - 1 || T.coeff(i+1, i) == 0) {
- eigen_assert(T(i,i) >= 0);
- sqrtT.coeffRef(i,i) = sqrt(T.coeff(i,i));
- }
- else {
- compute2x2diagonalBlock(sqrtT, T, i);
- ++i;
- }
- }
-}
-
-// pre: T is quasi-upper-triangular and diagonal blocks of sqrtT are square root of diagonal blocks of T.
-// post: sqrtT is the square root of T.
-template <typename MatrixType>
-void MatrixSquareRootQuasiTriangular<MatrixType>::computeOffDiagonalPartOfSqrt(MatrixType& sqrtT,
- const MatrixType& T)
-{
- const Index size = m_A.rows();
- for (Index j = 1; j < size; j++) {
- if (T.coeff(j, j-1) != 0) // if T(j-1:j, j-1:j) is a 2-by-2 block
- continue;
- for (Index i = j-1; i >= 0; i--) {
- if (i > 0 && T.coeff(i, i-1) != 0) // if T(i-1:i, i-1:i) is a 2-by-2 block
- continue;
- bool iBlockIs2x2 = (i < size - 1) && (T.coeff(i+1, i) != 0);
- bool jBlockIs2x2 = (j < size - 1) && (T.coeff(j+1, j) != 0);
- if (iBlockIs2x2 && jBlockIs2x2)
- compute2x2offDiagonalBlock(sqrtT, T, i, j);
- else if (iBlockIs2x2 && !jBlockIs2x2)
- compute2x1offDiagonalBlock(sqrtT, T, i, j);
- else if (!iBlockIs2x2 && jBlockIs2x2)
- compute1x2offDiagonalBlock(sqrtT, T, i, j);
- else if (!iBlockIs2x2 && !jBlockIs2x2)
- compute1x1offDiagonalBlock(sqrtT, T, i, j);
- }
- }
-}
+namespace internal {
// pre: T.block(i,i,2,2) has complex conjugate eigenvalues
// post: sqrtT.block(i,i,2,2) is square root of T.block(i,i,2,2)
-template <typename MatrixType>
-void MatrixSquareRootQuasiTriangular<MatrixType>
- ::compute2x2diagonalBlock(MatrixType& sqrtT, const MatrixType& T, typename MatrixType::Index i)
+template <typename MatrixType, typename ResultType>
+void matrix_sqrt_quasi_triangular_2x2_diagonal_block(const MatrixType& T, typename MatrixType::Index i, ResultType& sqrtT)
{
// TODO: This case (2-by-2 blocks with complex conjugate eigenvalues) is probably hidden somewhere
// in EigenSolver. If we expose it, we could call it directly from here.
+ typedef typename traits<MatrixType>::Scalar Scalar;
Matrix<Scalar,2,2> block = T.template block<2,2>(i,i);
EigenSolver<Matrix<Scalar,2,2> > es(block);
sqrtT.template block<2,2>(i,i)
@@ -148,21 +31,19 @@ void MatrixSquareRootQuasiTriangular<MatrixType>
// pre: block structure of T is such that (i,j) is a 1x1 block,
// all blocks of sqrtT to left of and below (i,j) are correct
// post: sqrtT(i,j) has the correct value
-template <typename MatrixType>
-void MatrixSquareRootQuasiTriangular<MatrixType>
- ::compute1x1offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T,
- typename MatrixType::Index i, typename MatrixType::Index j)
+template <typename MatrixType, typename ResultType>
+void matrix_sqrt_quasi_triangular_1x1_off_diagonal_block(const MatrixType& T, typename MatrixType::Index i, typename MatrixType::Index j, ResultType& sqrtT)
{
+ typedef typename traits<MatrixType>::Scalar Scalar;
Scalar tmp = (sqrtT.row(i).segment(i+1,j-i-1) * sqrtT.col(j).segment(i+1,j-i-1)).value();
sqrtT.coeffRef(i,j) = (T.coeff(i,j) - tmp) / (sqrtT.coeff(i,i) + sqrtT.coeff(j,j));
}
// similar to compute1x1offDiagonalBlock()
-template <typename MatrixType>
-void MatrixSquareRootQuasiTriangular<MatrixType>
- ::compute1x2offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T,
- typename MatrixType::Index i, typename MatrixType::Index j)
+template <typename MatrixType, typename ResultType>
+void matrix_sqrt_quasi_triangular_1x2_off_diagonal_block(const MatrixType& T, typename MatrixType::Index i, typename MatrixType::Index j, ResultType& sqrtT)
{
+ typedef typename traits<MatrixType>::Scalar Scalar;
Matrix<Scalar,1,2> rhs = T.template block<1,2>(i,j);
if (j-i > 1)
rhs -= sqrtT.block(i, i+1, 1, j-i-1) * sqrtT.block(i+1, j, j-i-1, 2);
@@ -172,11 +53,10 @@ void MatrixSquareRootQuasiTriangular<MatrixType>
}
// similar to compute1x1offDiagonalBlock()
-template <typename MatrixType>
-void MatrixSquareRootQuasiTriangular<MatrixType>
- ::compute2x1offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T,
- typename MatrixType::Index i, typename MatrixType::Index j)
+template <typename MatrixType, typename ResultType>
+void matrix_sqrt_quasi_triangular_2x1_off_diagonal_block(const MatrixType& T, typename MatrixType::Index i, typename MatrixType::Index j, ResultType& sqrtT)
{
+ typedef typename traits<MatrixType>::Scalar Scalar;
Matrix<Scalar,2,1> rhs = T.template block<2,1>(i,j);
if (j-i > 2)
rhs -= sqrtT.block(i, i+2, 2, j-i-2) * sqrtT.block(i+2, j, j-i-2, 1);
@@ -185,32 +65,11 @@ void MatrixSquareRootQuasiTriangular<MatrixType>
sqrtT.template block<2,1>(i,j) = A.fullPivLu().solve(rhs);
}
-// similar to compute1x1offDiagonalBlock()
-template <typename MatrixType>
-void MatrixSquareRootQuasiTriangular<MatrixType>
- ::compute2x2offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T,
- typename MatrixType::Index i, typename MatrixType::Index j)
-{
- Matrix<Scalar,2,2> A = sqrtT.template block<2,2>(i,i);
- Matrix<Scalar,2,2> B = sqrtT.template block<2,2>(j,j);
- Matrix<Scalar,2,2> C = T.template block<2,2>(i,j);
- if (j-i > 2)
- C -= sqrtT.block(i, i+2, 2, j-i-2) * sqrtT.block(i+2, j, j-i-2, 2);
- Matrix<Scalar,2,2> X;
- solveAuxiliaryEquation(X, A, B, C);
- sqrtT.template block<2,2>(i,j) = X;
-}
-
// solves the equation A X + X B = C where all matrices are 2-by-2
template <typename MatrixType>
-template <typename SmallMatrixType>
-void MatrixSquareRootQuasiTriangular<MatrixType>
- ::solveAuxiliaryEquation(SmallMatrixType& X, const SmallMatrixType& A,
- const SmallMatrixType& B, const SmallMatrixType& C)
+void matrix_sqrt_quasi_triangular_solve_auxiliary_equation(MatrixType& X, const MatrixType& A, const MatrixType& B, const MatrixType& C)
{
- EIGEN_STATIC_ASSERT((internal::is_same<SmallMatrixType, Matrix<Scalar,2,2> >::value),
- EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT);
-
+ typedef typename traits<MatrixType>::Scalar Scalar;
Matrix<Scalar,4,4> coeffMatrix = Matrix<Scalar,4,4>::Zero();
coeffMatrix.coeffRef(0,0) = A.coeff(0,0) + B.coeff(0,0);
coeffMatrix.coeffRef(1,1) = A.coeff(0,0) + B.coeff(1,1);
@@ -224,13 +83,13 @@ void MatrixSquareRootQuasiTriangular<MatrixType>
coeffMatrix.coeffRef(2,3) = B.coeff(1,0);
coeffMatrix.coeffRef(3,1) = A.coeff(1,0);
coeffMatrix.coeffRef(3,2) = B.coeff(0,1);
-
+
Matrix<Scalar,4,1> rhs;
rhs.coeffRef(0) = C.coeff(0,0);
rhs.coeffRef(1) = C.coeff(0,1);
rhs.coeffRef(2) = C.coeff(1,0);
rhs.coeffRef(3) = C.coeff(1,1);
-
+
Matrix<Scalar,4,1> result;
result = coeffMatrix.fullPivLu().solve(rhs);
@@ -240,165 +99,208 @@ void MatrixSquareRootQuasiTriangular<MatrixType>
X.coeffRef(1,1) = result.coeff(3);
}
+// similar to compute1x1offDiagonalBlock()
+template <typename MatrixType, typename ResultType>
+void matrix_sqrt_quasi_triangular_2x2_off_diagonal_block(const MatrixType& T, typename MatrixType::Index i, typename MatrixType::Index j, ResultType& sqrtT)
+{
+ typedef typename traits<MatrixType>::Scalar Scalar;
+ Matrix<Scalar,2,2> A = sqrtT.template block<2,2>(i,i);
+ Matrix<Scalar,2,2> B = sqrtT.template block<2,2>(j,j);
+ Matrix<Scalar,2,2> C = T.template block<2,2>(i,j);
+ if (j-i > 2)
+ C -= sqrtT.block(i, i+2, 2, j-i-2) * sqrtT.block(i+2, j, j-i-2, 2);
+ Matrix<Scalar,2,2> X;
+ matrix_sqrt_quasi_triangular_solve_auxiliary_equation(X, A, B, C);
+ sqrtT.template block<2,2>(i,j) = X;
+}
+
+// pre: T is quasi-upper-triangular and sqrtT is a zero matrix of the same size
+// post: the diagonal blocks of sqrtT are the square roots of the diagonal blocks of T
+template <typename MatrixType, typename ResultType>
+void matrix_sqrt_quasi_triangular_diagonal(const MatrixType& T, ResultType& sqrtT)
+{
+ using std::sqrt;
+ typedef typename MatrixType::Index Index;
+ const Index size = T.rows();
+ for (Index i = 0; i < size; i++) {
+ if (i == size - 1 || T.coeff(i+1, i) == 0) {
+ eigen_assert(T(i,i) >= 0);
+ sqrtT.coeffRef(i,i) = sqrt(T.coeff(i,i));
+ }
+ else {
+ matrix_sqrt_quasi_triangular_2x2_diagonal_block(T, i, sqrtT);
+ ++i;
+ }
+ }
+}
+
+// pre: T is quasi-upper-triangular and diagonal blocks of sqrtT are square root of diagonal blocks of T.
+// post: sqrtT is the square root of T.
+template <typename MatrixType, typename ResultType>
+void matrix_sqrt_quasi_triangular_off_diagonal(const MatrixType& T, ResultType& sqrtT)
+{
+ typedef typename MatrixType::Index Index;
+ const Index size = T.rows();
+ for (Index j = 1; j < size; j++) {
+ if (T.coeff(j, j-1) != 0) // if T(j-1:j, j-1:j) is a 2-by-2 block
+ continue;
+ for (Index i = j-1; i >= 0; i--) {
+ if (i > 0 && T.coeff(i, i-1) != 0) // if T(i-1:i, i-1:i) is a 2-by-2 block
+ continue;
+ bool iBlockIs2x2 = (i < size - 1) && (T.coeff(i+1, i) != 0);
+ bool jBlockIs2x2 = (j < size - 1) && (T.coeff(j+1, j) != 0);
+ if (iBlockIs2x2 && jBlockIs2x2)
+ matrix_sqrt_quasi_triangular_2x2_off_diagonal_block(T, i, j, sqrtT);
+ else if (iBlockIs2x2 && !jBlockIs2x2)
+ matrix_sqrt_quasi_triangular_2x1_off_diagonal_block(T, i, j, sqrtT);
+ else if (!iBlockIs2x2 && jBlockIs2x2)
+ matrix_sqrt_quasi_triangular_1x2_off_diagonal_block(T, i, j, sqrtT);
+ else if (!iBlockIs2x2 && !jBlockIs2x2)
+ matrix_sqrt_quasi_triangular_1x1_off_diagonal_block(T, i, j, sqrtT);
+ }
+ }
+}
+
+} // end of namespace internal
/** \ingroup MatrixFunctions_Module
- * \brief Class for computing matrix square roots of upper triangular matrices.
- * \tparam MatrixType type of the argument of the matrix square root,
+ * \brief Compute matrix square root of quasi-triangular matrix.
+ *
+ * \tparam MatrixType type of \p arg, the argument of matrix square root,
* expected to be an instantiation of the Matrix class template.
+ * \tparam ResultType type of \p result, where result is to be stored.
+ * \param[in] arg argument of matrix square root.
+ * \param[out] result matrix square root of upper Hessenberg part of \p arg.
*
- * This class computes the square root of the upper triangular matrix
- * stored in the upper triangular part (including the diagonal) of
- * the matrix passed to the constructor.
+ * This function computes the square root of the upper quasi-triangular matrix stored in the upper
+ * Hessenberg part of \p arg. Only the upper Hessenberg part of \p result is updated, the rest is
+ * not touched. See MatrixBase::sqrt() for details on how this computation is implemented.
*
* \sa MatrixSquareRoot, MatrixSquareRootQuasiTriangular
*/
-template <typename MatrixType>
-class MatrixSquareRootTriangular
+template <typename MatrixType, typename ResultType>
+void matrix_sqrt_quasi_triangular(const MatrixType &arg, ResultType &result)
{
- public:
- MatrixSquareRootTriangular(const MatrixType& A)
- : m_A(A)
- {
- eigen_assert(A.rows() == A.cols());
- }
-
- /** \brief Compute the matrix square root
- *
- * \param[out] result square root of \p A, as specified in the constructor.
- *
- * Only the upper triangular part (including the diagonal) of
- * \p result is updated, the rest is not touched. See
- * MatrixBase::sqrt() for details on how this computation is
- * implemented.
- */
- template <typename ResultType> void compute(ResultType &result);
+ eigen_assert(arg.rows() == arg.cols());
+ result.resize(arg.rows(), arg.cols());
+ internal::matrix_sqrt_quasi_triangular_diagonal(arg, result);
+ internal::matrix_sqrt_quasi_triangular_off_diagonal(arg, result);
+}
- private:
- const MatrixType& m_A;
-};
-template <typename MatrixType>
-template <typename ResultType>
-void MatrixSquareRootTriangular<MatrixType>::compute(ResultType &result)
+/** \ingroup MatrixFunctions_Module
+ * \brief Compute matrix square root of triangular matrix.
+ *
+ * \tparam MatrixType type of \p arg, the argument of matrix square root,
+ * expected to be an instantiation of the Matrix class template.
+ * \tparam ResultType type of \p result, where result is to be stored.
+ * \param[in] arg argument of matrix square root.
+ * \param[out] result matrix square root of upper triangular part of \p arg.
+ *
+ * Only the upper triangular part (including the diagonal) of \p result is updated, the rest is not
+ * touched. See MatrixBase::sqrt() for details on how this computation is implemented.
+ *
+ * \sa MatrixSquareRoot, MatrixSquareRootQuasiTriangular
+ */
+template <typename MatrixType, typename ResultType>
+void matrix_sqrt_triangular(const MatrixType &arg, ResultType &result)
{
using std::sqrt;
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
- // Compute square root of m_A and store it in upper triangular part of result
+ eigen_assert(arg.rows() == arg.cols());
+
+ // Compute square root of arg and store it in upper triangular part of result
// This uses that the square root of triangular matrices can be computed directly.
- result.resize(m_A.rows(), m_A.cols());
- typedef typename MatrixType::Index Index;
- for (Index i = 0; i < m_A.rows(); i++) {
- result.coeffRef(i,i) = sqrt(m_A.coeff(i,i));
+ result.resize(arg.rows(), arg.cols());
+ for (Index i = 0; i < arg.rows(); i++) {
+ result.coeffRef(i,i) = sqrt(arg.coeff(i,i));
}
- for (Index j = 1; j < m_A.cols(); j++) {
+ for (Index j = 1; j < arg.cols(); j++) {
for (Index i = j-1; i >= 0; i--) {
- typedef typename MatrixType::Scalar Scalar;
// if i = j-1, then segment has length 0 so tmp = 0
Scalar tmp = (result.row(i).segment(i+1,j-i-1) * result.col(j).segment(i+1,j-i-1)).value();
// denominator may be zero if original matrix is singular
- result.coeffRef(i,j) = (m_A.coeff(i,j) - tmp) / (result.coeff(i,i) + result.coeff(j,j));
+ result.coeffRef(i,j) = (arg.coeff(i,j) - tmp) / (result.coeff(i,i) + result.coeff(j,j));
}
}
}
+namespace internal {
+
/** \ingroup MatrixFunctions_Module
- * \brief Class for computing matrix square roots of general matrices.
+ * \brief Helper struct for computing matrix square roots of general matrices.
* \tparam MatrixType type of the argument of the matrix square root,
* expected to be an instantiation of the Matrix class template.
*
* \sa MatrixSquareRootTriangular, MatrixSquareRootQuasiTriangular, MatrixBase::sqrt()
*/
template <typename MatrixType, int IsComplex = NumTraits<typename internal::traits<MatrixType>::Scalar>::IsComplex>
-class MatrixSquareRoot
+struct matrix_sqrt_compute
{
- public:
-
- /** \brief Constructor.
- *
- * \param[in] A matrix whose square root is to be computed.
- *
- * The class stores a reference to \p A, so it should not be
- * changed (or destroyed) before compute() is called.
- */
- MatrixSquareRoot(const MatrixType& A);
-
- /** \brief Compute the matrix square root
- *
- * \param[out] result square root of \p A, as specified in the constructor.
- *
- * See MatrixBase::sqrt() for details on how this computation is
- * implemented.
- */
- template <typename ResultType> void compute(ResultType &result);
+ /** \brief Compute the matrix square root
+ *
+ * \param[in] arg matrix whose square root is to be computed.
+ * \param[out] result square root of \p arg.
+ *
+ * See MatrixBase::sqrt() for details on how this computation is implemented.
+ */
+ template <typename ResultType> static void run(const MatrixType &arg, ResultType &result);
};
// ********** Partial specialization for real matrices **********
template <typename MatrixType>
-class MatrixSquareRoot<MatrixType, 0>
+struct matrix_sqrt_compute<MatrixType, 0>
{
- public:
-
- MatrixSquareRoot(const MatrixType& A)
- : m_A(A)
- {
- eigen_assert(A.rows() == A.cols());
- }
-
- template <typename ResultType> void compute(ResultType &result)
- {
- // Compute Schur decomposition of m_A
- const RealSchur<MatrixType> schurOfA(m_A);
- const MatrixType& T = schurOfA.matrixT();
- const MatrixType& U = schurOfA.matrixU();
-
- // Compute square root of T
- MatrixType sqrtT = MatrixType::Zero(m_A.rows(), m_A.cols());
- MatrixSquareRootQuasiTriangular<MatrixType>(T).compute(sqrtT);
+ template <typename ResultType>
+ static void run(const MatrixType &arg, ResultType &result)
+ {
+ eigen_assert(arg.rows() == arg.cols());
+
+ // Compute Schur decomposition of arg
+ const RealSchur<MatrixType> schurOfA(arg);
+ const MatrixType& T = schurOfA.matrixT();
+ const MatrixType& U = schurOfA.matrixU();
- // Compute square root of m_A
- result = U * sqrtT * U.adjoint();
- }
+ // Compute square root of T
+ MatrixType sqrtT = MatrixType::Zero(arg.rows(), arg.cols());
+ matrix_sqrt_quasi_triangular(T, sqrtT);
- private:
- const MatrixType& m_A;
+ // Compute square root of arg
+ result = U * sqrtT * U.adjoint();
+ }
};
// ********** Partial specialization for complex matrices **********
template <typename MatrixType>
-class MatrixSquareRoot<MatrixType, 1>
+struct matrix_sqrt_compute<MatrixType, 1>
{
- public:
-
- MatrixSquareRoot(const MatrixType& A)
- : m_A(A)
- {
- eigen_assert(A.rows() == A.cols());
- }
-
- template <typename ResultType> void compute(ResultType &result)
- {
- // Compute Schur decomposition of m_A
- const ComplexSchur<MatrixType> schurOfA(m_A);
- const MatrixType& T = schurOfA.matrixT();
- const MatrixType& U = schurOfA.matrixU();
+ template <typename ResultType>
+ static void run(const MatrixType &arg, ResultType &result)
+ {
+ eigen_assert(arg.rows() == arg.cols());
+
+ // Compute Schur decomposition of arg
+ const ComplexSchur<MatrixType> schurOfA(arg);
+ const MatrixType& T = schurOfA.matrixT();
+ const MatrixType& U = schurOfA.matrixU();
- // Compute square root of T
- MatrixType sqrtT;
- MatrixSquareRootTriangular<MatrixType>(T).compute(sqrtT);
+ // Compute square root of T
+ MatrixType sqrtT;
+ matrix_sqrt_triangular(T, sqrtT);
- // Compute square root of m_A
- result = U * (sqrtT.template triangularView<Upper>() * U.adjoint());
- }
-
- private:
- const MatrixType& m_A;
+ // Compute square root of arg
+ result = U * (sqrtT.template triangularView<Upper>() * U.adjoint());
+ }
};
+} // end namespace internal
/** \ingroup MatrixFunctions_Module
*
@@ -415,14 +317,17 @@ class MatrixSquareRoot<MatrixType, 1>
template<typename Derived> class MatrixSquareRootReturnValue
: public ReturnByValue<MatrixSquareRootReturnValue<Derived> >
{
+ protected:
typedef typename Derived::Index Index;
+ typedef typename internal::ref_selector<Derived>::type DerivedNested;
+
public:
/** \brief Constructor.
*
* \param[in] src %Matrix (expression) forming the argument of the
* matrix square root.
*/
- MatrixSquareRootReturnValue(const Derived& src) : m_src(src) { }
+ explicit MatrixSquareRootReturnValue(const Derived& src) : m_src(src) { }
/** \brief Compute the matrix square root.
*
@@ -432,18 +337,17 @@ template<typename Derived> class MatrixSquareRootReturnValue
template <typename ResultType>
inline void evalTo(ResultType& result) const
{
- const typename Derived::PlainObject srcEvaluated = m_src.eval();
- MatrixSquareRoot<typename Derived::PlainObject> me(srcEvaluated);
- me.compute(result);
+ typedef typename internal::nested_eval<Derived, 10>::type DerivedEvalType;
+ typedef typename internal::remove_all<DerivedEvalType>::type DerivedEvalTypeClean;
+ DerivedEvalType tmp(m_src);
+ internal::matrix_sqrt_compute<DerivedEvalTypeClean>::run(tmp, result);
}
Index rows() const { return m_src.rows(); }
Index cols() const { return m_src.cols(); }
protected:
- const Derived& m_src;
- private:
- MatrixSquareRootReturnValue& operator=(const MatrixSquareRootReturnValue&);
+ const DerivedNested m_src;
};
namespace internal {
diff --git a/unsupported/Eigen/src/MatrixFunctions/StemFunction.h b/unsupported/Eigen/src/MatrixFunctions/StemFunction.h
index 724e55c1d..7604df903 100644
--- a/unsupported/Eigen/src/MatrixFunctions/StemFunction.h
+++ b/unsupported/Eigen/src/MatrixFunctions/StemFunction.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
+// Copyright (C) 2010, 2013 Jitse Niesen <jitse@maths.leeds.ac.uk>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -12,93 +12,105 @@
namespace Eigen {
-/** \ingroup MatrixFunctions_Module
- * \brief Stem functions corresponding to standard mathematical functions.
- */
+namespace internal {
+
+/** \brief The exponential function (and its derivatives). */
template <typename Scalar>
-class StdStemFunctions
+Scalar stem_function_exp(Scalar x, int)
{
- public:
+ using std::exp;
+ return exp(x);
+}
- /** \brief The exponential function (and its derivatives). */
- static Scalar exp(Scalar x, int)
- {
- return std::exp(x);
- }
+/** \brief Cosine (and its derivatives). */
+template <typename Scalar>
+Scalar stem_function_cos(Scalar x, int n)
+{
+ using std::cos;
+ using std::sin;
+ Scalar res;
- /** \brief Cosine (and its derivatives). */
- static Scalar cos(Scalar x, int n)
- {
- Scalar res;
- switch (n % 4) {
- case 0:
- res = std::cos(x);
- break;
- case 1:
- res = -std::sin(x);
- break;
- case 2:
- res = -std::cos(x);
- break;
- case 3:
- res = std::sin(x);
- break;
- }
- return res;
- }
+ switch (n % 4) {
+ case 0:
+ res = std::cos(x);
+ break;
+ case 1:
+ res = -std::sin(x);
+ break;
+ case 2:
+ res = -std::cos(x);
+ break;
+ case 3:
+ res = std::sin(x);
+ break;
+ }
+ return res;
+}
+
+/** \brief Sine (and its derivatives). */
+template <typename Scalar>
+Scalar stem_function_sin(Scalar x, int n)
+{
+ using std::cos;
+ using std::sin;
+ Scalar res;
- /** \brief Sine (and its derivatives). */
- static Scalar sin(Scalar x, int n)
- {
- Scalar res;
- switch (n % 4) {
- case 0:
- res = std::sin(x);
- break;
- case 1:
- res = std::cos(x);
- break;
- case 2:
- res = -std::sin(x);
- break;
- case 3:
- res = -std::cos(x);
- break;
- }
- return res;
- }
+ switch (n % 4) {
+ case 0:
+ res = std::sin(x);
+ break;
+ case 1:
+ res = std::cos(x);
+ break;
+ case 2:
+ res = -std::sin(x);
+ break;
+ case 3:
+ res = -std::cos(x);
+ break;
+ }
+ return res;
+}
- /** \brief Hyperbolic cosine (and its derivatives). */
- static Scalar cosh(Scalar x, int n)
- {
- Scalar res;
- switch (n % 2) {
- case 0:
- res = std::cosh(x);
- break;
- case 1:
- res = std::sinh(x);
- break;
- }
- return res;
- }
+/** \brief Hyperbolic cosine (and its derivatives). */
+template <typename Scalar>
+Scalar stem_function_cosh(Scalar x, int n)
+{
+ using std::cosh;
+ using std::sinh;
+ Scalar res;
+
+ switch (n % 2) {
+ case 0:
+ res = std::cosh(x);
+ break;
+ case 1:
+ res = std::sinh(x);
+ break;
+ }
+ return res;
+}
- /** \brief Hyperbolic sine (and its derivatives). */
- static Scalar sinh(Scalar x, int n)
- {
- Scalar res;
- switch (n % 2) {
- case 0:
- res = std::sinh(x);
- break;
- case 1:
- res = std::cosh(x);
- break;
- }
- return res;
- }
+/** \brief Hyperbolic sine (and its derivatives). */
+template <typename Scalar>
+Scalar stem_function_sinh(Scalar x, int n)
+{
+ using std::cosh;
+ using std::sinh;
+ Scalar res;
+
+ switch (n % 2) {
+ case 0:
+ res = std::sinh(x);
+ break;
+ case 1:
+ res = std::cosh(x);
+ break;
+ }
+ return res;
+}
-}; // end of class StdStemFunctions
+} // end namespace internal
} // end namespace Eigen
diff --git a/unsupported/Eigen/src/MoreVectorization/CMakeLists.txt b/unsupported/Eigen/src/MoreVectorization/CMakeLists.txt
deleted file mode 100644
index 1b887cc8e..000000000
--- a/unsupported/Eigen/src/MoreVectorization/CMakeLists.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-FILE(GLOB Eigen_MoreVectorization_SRCS "*.h")
-
-INSTALL(FILES
- ${Eigen_MoreVectorization_SRCS}
- DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/MoreVectorization COMPONENT Devel
- )
diff --git a/unsupported/Eigen/src/NonLinearOptimization/CMakeLists.txt b/unsupported/Eigen/src/NonLinearOptimization/CMakeLists.txt
deleted file mode 100644
index 9322ddadf..000000000
--- a/unsupported/Eigen/src/NonLinearOptimization/CMakeLists.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-FILE(GLOB Eigen_NonLinearOptimization_SRCS "*.h")
-
-INSTALL(FILES
- ${Eigen_NonLinearOptimization_SRCS}
- DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/NonLinearOptimization COMPONENT Devel
- )
diff --git a/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h b/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h
index b8ba6ddcb..8fe3ed86b 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h
@@ -150,7 +150,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveInit(FVectorType &x)
fjac.resize(n, n);
if (!useExternalScaling)
diag.resize(n);
- eigen_assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'");
+ eigen_assert( (!useExternalScaling || diag.size()==n) && "When useExternalScaling is set, the caller must provide a valid 'diag'");
/* Function Body */
nfev = 0;
@@ -390,7 +390,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffInit(FVectorType &
fvec.resize(n);
if (!useExternalScaling)
diag.resize(n);
- eigen_assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'");
+ eigen_assert( (!useExternalScaling || diag.size()==n) && "When useExternalScaling is set, the caller must provide a valid 'diag'");
/* Function Body */
nfev = 0;
diff --git a/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h b/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h
index bfeb26fc9..fe3b79ca7 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h
@@ -45,18 +45,24 @@ namespace LevenbergMarquardtSpace {
template<typename FunctorType, typename Scalar=double>
class LevenbergMarquardt
{
+ static Scalar sqrt_epsilon()
+ {
+ using std::sqrt;
+ return sqrt(NumTraits<Scalar>::epsilon());
+ }
+
public:
LevenbergMarquardt(FunctorType &_functor)
: functor(_functor) { nfev = njev = iter = 0; fnorm = gnorm = 0.; useExternalScaling=false; }
typedef DenseIndex Index;
-
+
struct Parameters {
Parameters()
: factor(Scalar(100.))
, maxfev(400)
- , ftol(std::sqrt(NumTraits<Scalar>::epsilon()))
- , xtol(std::sqrt(NumTraits<Scalar>::epsilon()))
+ , ftol(sqrt_epsilon())
+ , xtol(sqrt_epsilon())
, gtol(Scalar(0.))
, epsfcn(Scalar(0.)) {}
Scalar factor;
@@ -72,7 +78,7 @@ public:
LevenbergMarquardtSpace::Status lmder1(
FVectorType &x,
- const Scalar tol = std::sqrt(NumTraits<Scalar>::epsilon())
+ const Scalar tol = sqrt_epsilon()
);
LevenbergMarquardtSpace::Status minimize(FVectorType &x);
@@ -83,12 +89,12 @@ public:
FunctorType &functor,
FVectorType &x,
Index *nfev,
- const Scalar tol = std::sqrt(NumTraits<Scalar>::epsilon())
+ const Scalar tol = sqrt_epsilon()
);
LevenbergMarquardtSpace::Status lmstr1(
FVectorType &x,
- const Scalar tol = std::sqrt(NumTraits<Scalar>::epsilon())
+ const Scalar tol = sqrt_epsilon()
);
LevenbergMarquardtSpace::Status minimizeOptimumStorage(FVectorType &x);
@@ -109,6 +115,7 @@ public:
Scalar lm_param(void) { return par; }
private:
+
FunctorType &functor;
Index n;
Index m;
@@ -172,7 +179,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeInit(FVectorType &x)
fjac.resize(m, n);
if (!useExternalScaling)
diag.resize(n);
- eigen_assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'");
+ eigen_assert( (!useExternalScaling || diag.size()==n) && "When useExternalScaling is set, the caller must provide a valid 'diag'");
qtf.resize(n);
/* Function Body */
@@ -208,7 +215,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(FVectorType &x)
{
using std::abs;
using std::sqrt;
-
+
eigen_assert(x.size()==n); // check the caller is not cheating us
/* calculate the jacobian matrix. */
@@ -391,7 +398,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageInit(FVectorType
fjac.resize(n, n);
if (!useExternalScaling)
diag.resize(n);
- eigen_assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'");
+ eigen_assert( (!useExternalScaling || diag.size()==n) && "When useExternalScaling is set, the caller must provide a valid 'diag'");
qtf.resize(n);
/* Function Body */
diff --git a/unsupported/Eigen/src/NumericalDiff/CMakeLists.txt b/unsupported/Eigen/src/NumericalDiff/CMakeLists.txt
deleted file mode 100644
index 1199aca2f..000000000
--- a/unsupported/Eigen/src/NumericalDiff/CMakeLists.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-FILE(GLOB Eigen_NumericalDiff_SRCS "*.h")
-
-INSTALL(FILES
- ${Eigen_NumericalDiff_SRCS}
- DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/NumericalDiff COMPONENT Devel
- )
diff --git a/unsupported/Eigen/src/Polynomials/CMakeLists.txt b/unsupported/Eigen/src/Polynomials/CMakeLists.txt
deleted file mode 100644
index 51f13f3cb..000000000
--- a/unsupported/Eigen/src/Polynomials/CMakeLists.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-FILE(GLOB Eigen_Polynomials_SRCS "*.h")
-
-INSTALL(FILES
- ${Eigen_Polynomials_SRCS}
- DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/Polynomials COMPONENT Devel
- )
diff --git a/unsupported/Eigen/src/Polynomials/PolynomialSolver.h b/unsupported/Eigen/src/Polynomials/PolynomialSolver.h
index cd5c04bbf..03198ec8e 100644
--- a/unsupported/Eigen/src/Polynomials/PolynomialSolver.h
+++ b/unsupported/Eigen/src/Polynomials/PolynomialSolver.h
@@ -41,7 +41,7 @@ class PolynomialSolverBase
protected:
template< typename OtherPolynomial >
inline void setPolynomial( const OtherPolynomial& poly ){
- m_roots.resize(poly.size()); }
+ m_roots.resize(poly.size()-1); }
public:
template< typename OtherPolynomial >
@@ -316,7 +316,7 @@ class PolynomialSolverBase
* - real roots with greatest, smallest absolute real value.
* - greatest, smallest real roots.
*
- * WARNING: this polynomial solver is experimental, part of the unsuported Eigen modules.
+ * WARNING: this polynomial solver is experimental, part of the unsupported Eigen modules.
*
*
* Currently a QR algorithm is used to compute the eigenvalues of the companion matrix of
@@ -345,10 +345,19 @@ class PolynomialSolver : public PolynomialSolverBase<_Scalar,_Deg>
void compute( const OtherPolynomial& poly )
{
eigen_assert( Scalar(0) != poly[poly.size()-1] );
- internal::companion<Scalar,_Deg> companion( poly );
- companion.balance();
- m_eigenSolver.compute( companion.denseMatrix() );
- m_roots = m_eigenSolver.eigenvalues();
+ eigen_assert( poly.size() > 1 );
+ if(poly.size() > 2 )
+ {
+ internal::companion<Scalar,_Deg> companion( poly );
+ companion.balance();
+ m_eigenSolver.compute( companion.denseMatrix() );
+ m_roots = m_eigenSolver.eigenvalues();
+ }
+ else if(poly.size () == 2)
+ {
+ m_roots.resize(1);
+ m_roots[0] = -poly[0]/poly[1];
+ }
}
public:
@@ -376,10 +385,18 @@ class PolynomialSolver<_Scalar,1> : public PolynomialSolverBase<_Scalar,1>
template< typename OtherPolynomial >
void compute( const OtherPolynomial& poly )
{
- eigen_assert( Scalar(0) != poly[poly.size()-1] );
- m_roots[0] = -poly[0]/poly[poly.size()-1];
+ eigen_assert( poly.size() == 2 );
+ eigen_assert( Scalar(0) != poly[1] );
+ m_roots[0] = -poly[0]/poly[1];
}
+ public:
+ template< typename OtherPolynomial >
+ inline PolynomialSolver( const OtherPolynomial& poly ){
+ compute( poly ); }
+
+ inline PolynomialSolver(){}
+
protected:
using PS_Base::m_roots;
};
diff --git a/unsupported/Eigen/src/Polynomials/PolynomialUtils.h b/unsupported/Eigen/src/Polynomials/PolynomialUtils.h
index 2bb8bc84a..40ba65b7e 100644
--- a/unsupported/Eigen/src/Polynomials/PolynomialUtils.h
+++ b/unsupported/Eigen/src/Polynomials/PolynomialUtils.h
@@ -56,7 +56,7 @@ T poly_eval( const Polynomials& poly, const T& x )
for( DenseIndex i=1; i<poly.size(); ++i ){
val = val*inv_x + poly[i]; }
- return std::pow(x,(T)(poly.size()-1)) * val;
+ return numext::pow(x,(T)(poly.size()-1)) * val;
}
}
diff --git a/unsupported/Eigen/src/SVD/BDCSVD.h b/unsupported/Eigen/src/SVD/BDCSVD.h
deleted file mode 100644
index 11d4882e4..000000000
--- a/unsupported/Eigen/src/SVD/BDCSVD.h
+++ /dev/null
@@ -1,748 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// We used the "A Divide-And-Conquer Algorithm for the Bidiagonal SVD"
-// research report written by Ming Gu and Stanley C.Eisenstat
-// The code variable names correspond to the names they used in their
-// report
-//
-// Copyright (C) 2013 Gauthier Brun <brun.gauthier@gmail.com>
-// Copyright (C) 2013 Nicolas Carre <nicolas.carre@ensimag.fr>
-// Copyright (C) 2013 Jean Ceccato <jean.ceccato@ensimag.fr>
-// Copyright (C) 2013 Pierre Zoppitelli <pierre.zoppitelli@ensimag.fr>
-//
-// Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_BDCSVD_H
-#define EIGEN_BDCSVD_H
-
-#define EPSILON 0.0000000000000001
-
-#define ALGOSWAP 32
-
-namespace Eigen {
-/** \ingroup SVD_Module
- *
- *
- * \class BDCSVD
- *
- * \brief class Bidiagonal Divide and Conquer SVD
- *
- * \param MatrixType the type of the matrix of which we are computing the SVD decomposition
- * We plan to have a very similar interface to JacobiSVD on this class.
- * It should be used to speed up the calcul of SVD for big matrices.
- */
-template<typename _MatrixType>
-class BDCSVD : public SVDBase<_MatrixType>
-{
- typedef SVDBase<_MatrixType> Base;
-
-public:
- using Base::rows;
- using Base::cols;
-
- typedef _MatrixType MatrixType;
- typedef typename MatrixType::Scalar Scalar;
- typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
- typedef typename MatrixType::Index Index;
- enum {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = MatrixType::ColsAtCompileTime,
- DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime, ColsAtCompileTime),
- MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
- MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
- MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime, MaxColsAtCompileTime),
- MatrixOptions = MatrixType::Options
- };
-
- typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime,
- MatrixOptions, MaxRowsAtCompileTime, MaxRowsAtCompileTime>
- MatrixUType;
- typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime,
- MatrixOptions, MaxColsAtCompileTime, MaxColsAtCompileTime>
- MatrixVType;
- typedef typename internal::plain_diag_type<MatrixType, RealScalar>::type SingularValuesType;
- typedef typename internal::plain_row_type<MatrixType>::type RowType;
- typedef typename internal::plain_col_type<MatrixType>::type ColType;
- typedef Matrix<Scalar, Dynamic, Dynamic> MatrixX;
- typedef Matrix<RealScalar, Dynamic, Dynamic> MatrixXr;
- typedef Matrix<RealScalar, Dynamic, 1> VectorType;
-
- /** \brief Default Constructor.
- *
- * The default constructor is useful in cases in which the user intends to
- * perform decompositions via BDCSVD::compute(const MatrixType&).
- */
- BDCSVD()
- : SVDBase<_MatrixType>::SVDBase(),
- algoswap(ALGOSWAP)
- {}
-
-
- /** \brief Default Constructor with memory preallocation
- *
- * Like the default constructor but with preallocation of the internal data
- * according to the specified problem size.
- * \sa BDCSVD()
- */
- BDCSVD(Index rows, Index cols, unsigned int computationOptions = 0)
- : SVDBase<_MatrixType>::SVDBase(),
- algoswap(ALGOSWAP)
- {
- allocate(rows, cols, computationOptions);
- }
-
- /** \brief Constructor performing the decomposition of given matrix.
- *
- * \param matrix the matrix to decompose
- * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
- * By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU,
- * #ComputeFullV, #ComputeThinV.
- *
- * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
- * available with the (non - default) FullPivHouseholderQR preconditioner.
- */
- BDCSVD(const MatrixType& matrix, unsigned int computationOptions = 0)
- : SVDBase<_MatrixType>::SVDBase(),
- algoswap(ALGOSWAP)
- {
- compute(matrix, computationOptions);
- }
-
- ~BDCSVD()
- {
- }
- /** \brief Method performing the decomposition of given matrix using custom options.
- *
- * \param matrix the matrix to decompose
- * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
- * By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU,
- * #ComputeFullV, #ComputeThinV.
- *
- * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
- * available with the (non - default) FullPivHouseholderQR preconditioner.
- */
- SVDBase<MatrixType>& compute(const MatrixType& matrix, unsigned int computationOptions);
-
- /** \brief Method performing the decomposition of given matrix using current options.
- *
- * \param matrix the matrix to decompose
- *
- * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int).
- */
- SVDBase<MatrixType>& compute(const MatrixType& matrix)
- {
- return compute(matrix, this->m_computationOptions);
- }
-
- void setSwitchSize(int s)
- {
- eigen_assert(s>3 && "BDCSVD the size of the algo switch has to be greater than 4");
- algoswap = s;
- }
-
-
- /** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A.
- *
- * \param b the right - hand - side of the equation to solve.
- *
- * \note Solving requires both U and V to be computed. Thin U and V are enough, there is no need for full U or V.
- *
- * \note SVD solving is implicitly least - squares. Thus, this method serves both purposes of exact solving and least - squares solving.
- * In other words, the returned solution is guaranteed to minimize the Euclidean norm \f$ \Vert A x - b \Vert \f$.
- */
- template<typename Rhs>
- inline const internal::solve_retval<BDCSVD, Rhs>
- solve(const MatrixBase<Rhs>& b) const
- {
- eigen_assert(this->m_isInitialized && "BDCSVD is not initialized.");
- eigen_assert(SVDBase<_MatrixType>::computeU() && SVDBase<_MatrixType>::computeV() &&
- "BDCSVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice).");
- return internal::solve_retval<BDCSVD, Rhs>(*this, b.derived());
- }
-
-
- const MatrixUType& matrixU() const
- {
- eigen_assert(this->m_isInitialized && "SVD is not initialized.");
- if (isTranspose){
- eigen_assert(this->computeV() && "This SVD decomposition didn't compute U. Did you ask for it?");
- return this->m_matrixV;
- }
- else
- {
- eigen_assert(this->computeU() && "This SVD decomposition didn't compute U. Did you ask for it?");
- return this->m_matrixU;
- }
-
- }
-
-
- const MatrixVType& matrixV() const
- {
- eigen_assert(this->m_isInitialized && "SVD is not initialized.");
- if (isTranspose){
- eigen_assert(this->computeU() && "This SVD decomposition didn't compute V. Did you ask for it?");
- return this->m_matrixU;
- }
- else
- {
- eigen_assert(this->computeV() && "This SVD decomposition didn't compute V. Did you ask for it?");
- return this->m_matrixV;
- }
- }
-
-private:
- void allocate(Index rows, Index cols, unsigned int computationOptions);
- void divide (Index firstCol, Index lastCol, Index firstRowW,
- Index firstColW, Index shift);
- void deflation43(Index firstCol, Index shift, Index i, Index size);
- void deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size);
- void deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift);
- void copyUV(MatrixXr naiveU, MatrixXr naiveV, MatrixX householderU, MatrixX houseHolderV);
-
-protected:
- MatrixXr m_naiveU, m_naiveV;
- MatrixXr m_computed;
- Index nRec;
- int algoswap;
- bool isTranspose, compU, compV;
-
-}; //end class BDCSVD
-
-
-// Methode to allocate ans initialize matrix and attributs
-template<typename MatrixType>
-void BDCSVD<MatrixType>::allocate(Index rows, Index cols, unsigned int computationOptions)
-{
- isTranspose = (cols > rows);
- if (SVDBase<MatrixType>::allocate(rows, cols, computationOptions)) return;
- m_computed = MatrixXr::Zero(this->m_diagSize + 1, this->m_diagSize );
- if (isTranspose){
- compU = this->computeU();
- compV = this->computeV();
- }
- else
- {
- compV = this->computeU();
- compU = this->computeV();
- }
- if (compU) m_naiveU = MatrixXr::Zero(this->m_diagSize + 1, this->m_diagSize + 1 );
- else m_naiveU = MatrixXr::Zero(2, this->m_diagSize + 1 );
-
- if (compV) m_naiveV = MatrixXr::Zero(this->m_diagSize, this->m_diagSize);
-
-
- //should be changed for a cleaner implementation
- if (isTranspose){
- bool aux;
- if (this->computeU()||this->computeV()){
- aux = this->m_computeFullU;
- this->m_computeFullU = this->m_computeFullV;
- this->m_computeFullV = aux;
- aux = this->m_computeThinU;
- this->m_computeThinU = this->m_computeThinV;
- this->m_computeThinV = aux;
- }
- }
-}// end allocate
-
-// Methode which compute the BDCSVD for the int
-template<>
-SVDBase<Matrix<int, Dynamic, Dynamic> >&
-BDCSVD<Matrix<int, Dynamic, Dynamic> >::compute(const MatrixType& matrix, unsigned int computationOptions) {
- allocate(matrix.rows(), matrix.cols(), computationOptions);
- this->m_nonzeroSingularValues = 0;
- m_computed = Matrix<int, Dynamic, Dynamic>::Zero(rows(), cols());
- for (int i=0; i<this->m_diagSize; i++) {
- this->m_singularValues.coeffRef(i) = 0;
- }
- if (this->m_computeFullU) this->m_matrixU = Matrix<int, Dynamic, Dynamic>::Zero(rows(), rows());
- if (this->m_computeFullV) this->m_matrixV = Matrix<int, Dynamic, Dynamic>::Zero(cols(), cols());
- this->m_isInitialized = true;
- return *this;
-}
-
-
-// Methode which compute the BDCSVD
-template<typename MatrixType>
-SVDBase<MatrixType>&
-BDCSVD<MatrixType>::compute(const MatrixType& matrix, unsigned int computationOptions)
-{
- allocate(matrix.rows(), matrix.cols(), computationOptions);
- using std::abs;
-
- //**** step 1 Bidiagonalization isTranspose = (matrix.cols()>matrix.rows()) ;
- MatrixType copy;
- if (isTranspose) copy = matrix.adjoint();
- else copy = matrix;
-
- internal::UpperBidiagonalization<MatrixX > bid(copy);
-
- //**** step 2 Divide
- // this is ugly and has to be redone (care of complex cast)
- MatrixXr temp;
- temp = bid.bidiagonal().toDenseMatrix().transpose();
- m_computed.setZero();
- for (int i=0; i<this->m_diagSize - 1; i++) {
- m_computed(i, i) = temp(i, i);
- m_computed(i + 1, i) = temp(i + 1, i);
- }
- m_computed(this->m_diagSize - 1, this->m_diagSize - 1) = temp(this->m_diagSize - 1, this->m_diagSize - 1);
- divide(0, this->m_diagSize - 1, 0, 0, 0);
-
- //**** step 3 copy
- for (int i=0; i<this->m_diagSize; i++) {
- RealScalar a = abs(m_computed.coeff(i, i));
- this->m_singularValues.coeffRef(i) = a;
- if (a == 0){
- this->m_nonzeroSingularValues = i;
- break;
- }
- else if (i == this->m_diagSize - 1)
- {
- this->m_nonzeroSingularValues = i + 1;
- break;
- }
- }
- copyUV(m_naiveV, m_naiveU, bid.householderU(), bid.householderV());
- this->m_isInitialized = true;
- return *this;
-}// end compute
-
-
-template<typename MatrixType>
-void BDCSVD<MatrixType>::copyUV(MatrixXr naiveU, MatrixXr naiveV, MatrixX householderU, MatrixX householderV){
- if (this->computeU()){
- MatrixX temp = MatrixX::Zero(naiveU.rows(), naiveU.cols());
- temp.real() = naiveU;
- if (this->m_computeThinU){
- this->m_matrixU = MatrixX::Identity(householderU.cols(), this->m_nonzeroSingularValues );
- this->m_matrixU.block(0, 0, this->m_diagSize, this->m_nonzeroSingularValues) =
- temp.block(0, 0, this->m_diagSize, this->m_nonzeroSingularValues);
- this->m_matrixU = householderU * this->m_matrixU ;
- }
- else
- {
- this->m_matrixU = MatrixX::Identity(householderU.cols(), householderU.cols());
- this->m_matrixU.block(0, 0, this->m_diagSize, this->m_diagSize) = temp.block(0, 0, this->m_diagSize, this->m_diagSize);
- this->m_matrixU = householderU * this->m_matrixU ;
- }
- }
- if (this->computeV()){
- MatrixX temp = MatrixX::Zero(naiveV.rows(), naiveV.cols());
- temp.real() = naiveV;
- if (this->m_computeThinV){
- this->m_matrixV = MatrixX::Identity(householderV.cols(),this->m_nonzeroSingularValues );
- this->m_matrixV.block(0, 0, this->m_nonzeroSingularValues, this->m_nonzeroSingularValues) =
- temp.block(0, 0, this->m_nonzeroSingularValues, this->m_nonzeroSingularValues);
- this->m_matrixV = householderV * this->m_matrixV ;
- }
- else
- {
- this->m_matrixV = MatrixX::Identity(householderV.cols(), householderV.cols());
- this->m_matrixV.block(0, 0, this->m_diagSize, this->m_diagSize) = temp.block(0, 0, this->m_diagSize, this->m_diagSize);
- this->m_matrixV = householderV * this->m_matrixV;
- }
- }
-}
-
-// The divide algorithm is done "in place", we are always working on subsets of the same matrix. The divide methods takes as argument the
-// place of the submatrix we are currently working on.
-
-//@param firstCol : The Index of the first column of the submatrix of m_computed and for m_naiveU;
-//@param lastCol : The Index of the last column of the submatrix of m_computed and for m_naiveU;
-// lastCol + 1 - firstCol is the size of the submatrix.
-//@param firstRowW : The Index of the first row of the matrix W that we are to change. (see the reference paper section 1 for more information on W)
-//@param firstRowW : Same as firstRowW with the column.
-//@param shift : Each time one takes the left submatrix, one must add 1 to the shift. Why? Because! We actually want the last column of the U submatrix
-// to become the first column (*coeff) and to shift all the other columns to the right. There are more details on the reference paper.
-template<typename MatrixType>
-void BDCSVD<MatrixType>::divide (Index firstCol, Index lastCol, Index firstRowW,
- Index firstColW, Index shift)
-{
- // requires nbRows = nbCols + 1;
- using std::pow;
- using std::sqrt;
- using std::abs;
- const Index n = lastCol - firstCol + 1;
- const Index k = n/2;
- RealScalar alphaK;
- RealScalar betaK;
- RealScalar r0;
- RealScalar lambda, phi, c0, s0;
- MatrixXr l, f;
- // We use the other algorithm which is more efficient for small
- // matrices.
- if (n < algoswap){
- JacobiSVD<MatrixXr> b(m_computed.block(firstCol, firstCol, n + 1, n),
- ComputeFullU | (ComputeFullV * compV)) ;
- if (compU) m_naiveU.block(firstCol, firstCol, n + 1, n + 1).real() << b.matrixU();
- else
- {
- m_naiveU.row(0).segment(firstCol, n + 1).real() << b.matrixU().row(0);
- m_naiveU.row(1).segment(firstCol, n + 1).real() << b.matrixU().row(n);
- }
- if (compV) m_naiveV.block(firstRowW, firstColW, n, n).real() << b.matrixV();
- m_computed.block(firstCol + shift, firstCol + shift, n + 1, n).setZero();
- for (int i=0; i<n; i++)
- {
- m_computed(firstCol + shift + i, firstCol + shift +i) = b.singularValues().coeffRef(i);
- }
- return;
- }
- // We use the divide and conquer algorithm
- alphaK = m_computed(firstCol + k, firstCol + k);
- betaK = m_computed(firstCol + k + 1, firstCol + k);
- // The divide must be done in that order in order to have good results. Divide change the data inside the submatrices
- // and the divide of the right submatrice reads one column of the left submatrice. That's why we need to treat the
- // right submatrix before the left one.
- divide(k + 1 + firstCol, lastCol, k + 1 + firstRowW, k + 1 + firstColW, shift);
- divide(firstCol, k - 1 + firstCol, firstRowW, firstColW + 1, shift + 1);
- if (compU)
- {
- lambda = m_naiveU(firstCol + k, firstCol + k);
- phi = m_naiveU(firstCol + k + 1, lastCol + 1);
- }
- else
- {
- lambda = m_naiveU(1, firstCol + k);
- phi = m_naiveU(0, lastCol + 1);
- }
- r0 = sqrt((abs(alphaK * lambda) * abs(alphaK * lambda))
- + abs(betaK * phi) * abs(betaK * phi));
- if (compU)
- {
- l = m_naiveU.row(firstCol + k).segment(firstCol, k);
- f = m_naiveU.row(firstCol + k + 1).segment(firstCol + k + 1, n - k - 1);
- }
- else
- {
- l = m_naiveU.row(1).segment(firstCol, k);
- f = m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1);
- }
- if (compV) m_naiveV(firstRowW+k, firstColW) = 1;
- if (r0 == 0)
- {
- c0 = 1;
- s0 = 0;
- }
- else
- {
- c0 = alphaK * lambda / r0;
- s0 = betaK * phi / r0;
- }
- if (compU)
- {
- MatrixXr q1 (m_naiveU.col(firstCol + k).segment(firstCol, k + 1));
- // we shiftW Q1 to the right
- for (Index i = firstCol + k - 1; i >= firstCol; i--)
- {
- m_naiveU.col(i + 1).segment(firstCol, k + 1) << m_naiveU.col(i).segment(firstCol, k + 1);
- }
- // we shift q1 at the left with a factor c0
- m_naiveU.col(firstCol).segment( firstCol, k + 1) << (q1 * c0);
- // last column = q1 * - s0
- m_naiveU.col(lastCol + 1).segment(firstCol, k + 1) << (q1 * ( - s0));
- // first column = q2 * s0
- m_naiveU.col(firstCol).segment(firstCol + k + 1, n - k) <<
- m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *s0;
- // q2 *= c0
- m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *= c0;
- }
- else
- {
- RealScalar q1 = (m_naiveU(0, firstCol + k));
- // we shift Q1 to the right
- for (Index i = firstCol + k - 1; i >= firstCol; i--)
- {
- m_naiveU(0, i + 1) = m_naiveU(0, i);
- }
- // we shift q1 at the left with a factor c0
- m_naiveU(0, firstCol) = (q1 * c0);
- // last column = q1 * - s0
- m_naiveU(0, lastCol + 1) = (q1 * ( - s0));
- // first column = q2 * s0
- m_naiveU(1, firstCol) = m_naiveU(1, lastCol + 1) *s0;
- // q2 *= c0
- m_naiveU(1, lastCol + 1) *= c0;
- m_naiveU.row(1).segment(firstCol + 1, k).setZero();
- m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1).setZero();
- }
- m_computed(firstCol + shift, firstCol + shift) = r0;
- m_computed.col(firstCol + shift).segment(firstCol + shift + 1, k) << alphaK * l.transpose().real();
- m_computed.col(firstCol + shift).segment(firstCol + shift + k + 1, n - k - 1) << betaK * f.transpose().real();
-
-
- // the line below do the deflation of the matrix for the third part of the algorithm
- // Here the deflation is commented because the third part of the algorithm is not implemented
- // the third part of the algorithm is a fast SVD on the matrix m_computed which works thanks to the deflation
-
- deflation(firstCol, lastCol, k, firstRowW, firstColW, shift);
-
- // Third part of the algorithm, since the real third part of the algorithm is not implemeted we use a JacobiSVD
- JacobiSVD<MatrixXr> res= JacobiSVD<MatrixXr>(m_computed.block(firstCol + shift, firstCol +shift, n + 1, n),
- ComputeFullU | (ComputeFullV * compV)) ;
- if (compU) m_naiveU.block(firstCol, firstCol, n + 1, n + 1) *= res.matrixU();
- else m_naiveU.block(0, firstCol, 2, n + 1) *= res.matrixU();
-
- if (compV) m_naiveV.block(firstRowW, firstColW, n, n) *= res.matrixV();
- m_computed.block(firstCol + shift, firstCol + shift, n, n) << MatrixXr::Zero(n, n);
- for (int i=0; i<n; i++)
- m_computed(firstCol + shift + i, firstCol + shift +i) = res.singularValues().coeffRef(i);
- // end of the third part
-
-
-}// end divide
-
-
-// page 12_13
-// i >= 1, di almost null and zi non null.
-// We use a rotation to zero out zi applied to the left of M
-template <typename MatrixType>
-void BDCSVD<MatrixType>::deflation43(Index firstCol, Index shift, Index i, Index size){
- using std::abs;
- using std::sqrt;
- using std::pow;
- RealScalar c = m_computed(firstCol + shift, firstCol + shift);
- RealScalar s = m_computed(i, firstCol + shift);
- RealScalar r = sqrt(pow(abs(c), 2) + pow(abs(s), 2));
- if (r == 0){
- m_computed(i, i)=0;
- return;
- }
- c/=r;
- s/=r;
- m_computed(firstCol + shift, firstCol + shift) = r;
- m_computed(i, firstCol + shift) = 0;
- m_computed(i, i) = 0;
- if (compU){
- m_naiveU.col(firstCol).segment(firstCol,size) =
- c * m_naiveU.col(firstCol).segment(firstCol, size) -
- s * m_naiveU.col(i).segment(firstCol, size) ;
-
- m_naiveU.col(i).segment(firstCol, size) =
- (c + s*s/c) * m_naiveU.col(i).segment(firstCol, size) +
- (s/c) * m_naiveU.col(firstCol).segment(firstCol,size);
- }
-}// end deflation 43
-
-
-// page 13
-// i,j >= 1, i != j and |di - dj| < epsilon * norm2(M)
-// We apply two rotations to have zj = 0;
-template <typename MatrixType>
-void BDCSVD<MatrixType>::deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size){
- using std::abs;
- using std::sqrt;
- using std::conj;
- using std::pow;
- RealScalar c = m_computed(firstColm, firstColm + j - 1);
- RealScalar s = m_computed(firstColm, firstColm + i - 1);
- RealScalar r = sqrt(pow(abs(c), 2) + pow(abs(s), 2));
- if (r==0){
- m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j);
- return;
- }
- c/=r;
- s/=r;
- m_computed(firstColm + i, firstColm) = r;
- m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j);
- m_computed(firstColm + j, firstColm) = 0;
- if (compU){
- m_naiveU.col(firstColu + i).segment(firstColu, size) =
- c * m_naiveU.col(firstColu + i).segment(firstColu, size) -
- s * m_naiveU.col(firstColu + j).segment(firstColu, size) ;
-
- m_naiveU.col(firstColu + j).segment(firstColu, size) =
- (c + s*s/c) * m_naiveU.col(firstColu + j).segment(firstColu, size) +
- (s/c) * m_naiveU.col(firstColu + i).segment(firstColu, size);
- }
- if (compV){
- m_naiveV.col(firstColW + i).segment(firstRowW, size - 1) =
- c * m_naiveV.col(firstColW + i).segment(firstRowW, size - 1) +
- s * m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) ;
-
- m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) =
- (c + s*s/c) * m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) -
- (s/c) * m_naiveV.col(firstColW + i).segment(firstRowW, size - 1);
- }
-}// end deflation 44
-
-
-
-template <typename MatrixType>
-void BDCSVD<MatrixType>::deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift){
- //condition 4.1
- RealScalar EPS = EPSILON * (std::max<RealScalar>(m_computed(firstCol + shift + 1, firstCol + shift + 1), m_computed(firstCol + k, firstCol + k)));
- const Index length = lastCol + 1 - firstCol;
- if (m_computed(firstCol + shift, firstCol + shift) < EPS){
- m_computed(firstCol + shift, firstCol + shift) = EPS;
- }
- //condition 4.2
- for (Index i=firstCol + shift + 1;i<=lastCol + shift;i++){
- if (std::abs(m_computed(i, firstCol + shift)) < EPS){
- m_computed(i, firstCol + shift) = 0;
- }
- }
-
- //condition 4.3
- for (Index i=firstCol + shift + 1;i<=lastCol + shift; i++){
- if (m_computed(i, i) < EPS){
- deflation43(firstCol, shift, i, length);
- }
- }
-
- //condition 4.4
-
- Index i=firstCol + shift + 1, j=firstCol + shift + k + 1;
- //we stock the final place of each line
- Index *permutation = new Index[length];
-
- for (Index p =1; p < length; p++) {
- if (i> firstCol + shift + k){
- permutation[p] = j;
- j++;
- } else if (j> lastCol + shift)
- {
- permutation[p] = i;
- i++;
- }
- else
- {
- if (m_computed(i, i) < m_computed(j, j)){
- permutation[p] = j;
- j++;
- }
- else
- {
- permutation[p] = i;
- i++;
- }
- }
- }
- //we do the permutation
- RealScalar aux;
- //we stock the current index of each col
- //and the column of each index
- Index *realInd = new Index[length];
- Index *realCol = new Index[length];
- for (int pos = 0; pos< length; pos++){
- realCol[pos] = pos + firstCol + shift;
- realInd[pos] = pos;
- }
- const Index Zero = firstCol + shift;
- VectorType temp;
- for (int i = 1; i < length - 1; i++){
- const Index I = i + Zero;
- const Index realI = realInd[i];
- const Index j = permutation[length - i] - Zero;
- const Index J = realCol[j];
-
- //diag displace
- aux = m_computed(I, I);
- m_computed(I, I) = m_computed(J, J);
- m_computed(J, J) = aux;
-
- //firstrow displace
- aux = m_computed(I, Zero);
- m_computed(I, Zero) = m_computed(J, Zero);
- m_computed(J, Zero) = aux;
-
- // change columns
- if (compU) {
- temp = m_naiveU.col(I - shift).segment(firstCol, length + 1);
- m_naiveU.col(I - shift).segment(firstCol, length + 1) <<
- m_naiveU.col(J - shift).segment(firstCol, length + 1);
- m_naiveU.col(J - shift).segment(firstCol, length + 1) << temp;
- }
- else
- {
- temp = m_naiveU.col(I - shift).segment(0, 2);
- m_naiveU.col(I - shift).segment(0, 2) <<
- m_naiveU.col(J - shift).segment(0, 2);
- m_naiveU.col(J - shift).segment(0, 2) << temp;
- }
- if (compV) {
- const Index CWI = I + firstColW - Zero;
- const Index CWJ = J + firstColW - Zero;
- temp = m_naiveV.col(CWI).segment(firstRowW, length);
- m_naiveV.col(CWI).segment(firstRowW, length) << m_naiveV.col(CWJ).segment(firstRowW, length);
- m_naiveV.col(CWJ).segment(firstRowW, length) << temp;
- }
-
- //update real pos
- realCol[realI] = J;
- realCol[j] = I;
- realInd[J - Zero] = realI;
- realInd[I - Zero] = j;
- }
- for (Index i = firstCol + shift + 1; i<lastCol + shift;i++){
- if ((m_computed(i + 1, i + 1) - m_computed(i, i)) < EPS){
- deflation44(firstCol ,
- firstCol + shift,
- firstRowW,
- firstColW,
- i - Zero,
- i + 1 - Zero,
- length);
- }
- }
- delete [] permutation;
- delete [] realInd;
- delete [] realCol;
-
-}//end deflation
-
-
-namespace internal{
-
-template<typename _MatrixType, typename Rhs>
-struct solve_retval<BDCSVD<_MatrixType>, Rhs>
- : solve_retval_base<BDCSVD<_MatrixType>, Rhs>
-{
- typedef BDCSVD<_MatrixType> BDCSVDType;
- EIGEN_MAKE_SOLVE_HELPERS(BDCSVDType, Rhs)
-
- template<typename Dest> void evalTo(Dest& dst) const
- {
- eigen_assert(rhs().rows() == dec().rows());
- // A = U S V^*
- // So A^{ - 1} = V S^{ - 1} U^*
- Index diagSize = (std::min)(dec().rows(), dec().cols());
- typename BDCSVDType::SingularValuesType invertedSingVals(diagSize);
- Index nonzeroSingVals = dec().nonzeroSingularValues();
- invertedSingVals.head(nonzeroSingVals) = dec().singularValues().head(nonzeroSingVals).array().inverse();
- invertedSingVals.tail(diagSize - nonzeroSingVals).setZero();
-
- dst = dec().matrixV().leftCols(diagSize)
- * invertedSingVals.asDiagonal()
- * dec().matrixU().leftCols(diagSize).adjoint()
- * rhs();
- return;
- }
-};
-
-} //end namespace internal
-
- /** \svd_module
- *
- * \return the singular value decomposition of \c *this computed by
- * BDC Algorithm
- *
- * \sa class BDCSVD
- */
-/*
-template<typename Derived>
-BDCSVD<typename MatrixBase<Derived>::PlainObject>
-MatrixBase<Derived>::bdcSvd(unsigned int computationOptions) const
-{
- return BDCSVD<PlainObject>(*this, computationOptions);
-}
-*/
-
-} // end namespace Eigen
-
-#endif
diff --git a/unsupported/Eigen/src/SVD/CMakeLists.txt b/unsupported/Eigen/src/SVD/CMakeLists.txt
deleted file mode 100644
index b40baf092..000000000
--- a/unsupported/Eigen/src/SVD/CMakeLists.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-FILE(GLOB Eigen_SVD_SRCS "*.h")
-
-INSTALL(FILES
- ${Eigen_SVD_SRCS}
- DESTINATION ${INCLUDE_INSTALL_DIR}unsupported/Eigen/src/SVD COMPONENT Devel
- )
diff --git a/unsupported/Eigen/src/SVD/JacobiSVD.h b/unsupported/Eigen/src/SVD/JacobiSVD.h
deleted file mode 100644
index 02fac409e..000000000
--- a/unsupported/Eigen/src/SVD/JacobiSVD.h
+++ /dev/null
@@ -1,782 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_JACOBISVD_H
-#define EIGEN_JACOBISVD_H
-
-namespace Eigen {
-
-namespace internal {
-// forward declaration (needed by ICC)
-// the empty body is required by MSVC
-template<typename MatrixType, int QRPreconditioner,
- bool IsComplex = NumTraits<typename MatrixType::Scalar>::IsComplex>
-struct svd_precondition_2x2_block_to_be_real {};
-
-/*** QR preconditioners (R-SVD)
- ***
- *** Their role is to reduce the problem of computing the SVD to the case of a square matrix.
- *** This approach, known as R-SVD, is an optimization for rectangular-enough matrices, and is a requirement for
- *** JacobiSVD which by itself is only able to work on square matrices.
- ***/
-
-enum { PreconditionIfMoreColsThanRows, PreconditionIfMoreRowsThanCols };
-
-template<typename MatrixType, int QRPreconditioner, int Case>
-struct qr_preconditioner_should_do_anything
-{
- enum { a = MatrixType::RowsAtCompileTime != Dynamic &&
- MatrixType::ColsAtCompileTime != Dynamic &&
- MatrixType::ColsAtCompileTime <= MatrixType::RowsAtCompileTime,
- b = MatrixType::RowsAtCompileTime != Dynamic &&
- MatrixType::ColsAtCompileTime != Dynamic &&
- MatrixType::RowsAtCompileTime <= MatrixType::ColsAtCompileTime,
- ret = !( (QRPreconditioner == NoQRPreconditioner) ||
- (Case == PreconditionIfMoreColsThanRows && bool(a)) ||
- (Case == PreconditionIfMoreRowsThanCols && bool(b)) )
- };
-};
-
-template<typename MatrixType, int QRPreconditioner, int Case,
- bool DoAnything = qr_preconditioner_should_do_anything<MatrixType, QRPreconditioner, Case>::ret
-> struct qr_preconditioner_impl {};
-
-template<typename MatrixType, int QRPreconditioner, int Case>
-class qr_preconditioner_impl<MatrixType, QRPreconditioner, Case, false>
-{
-public:
- typedef typename MatrixType::Index Index;
- void allocate(const JacobiSVD<MatrixType, QRPreconditioner>&) {}
- bool run(JacobiSVD<MatrixType, QRPreconditioner>&, const MatrixType&)
- {
- return false;
- }
-};
-
-/*** preconditioner using FullPivHouseholderQR ***/
-
-template<typename MatrixType>
-class qr_preconditioner_impl<MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
-{
-public:
- typedef typename MatrixType::Index Index;
- typedef typename MatrixType::Scalar Scalar;
- enum
- {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime
- };
- typedef Matrix<Scalar, 1, RowsAtCompileTime, RowMajor, 1, MaxRowsAtCompileTime> WorkspaceType;
-
- void allocate(const JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd)
- {
- if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
- {
- m_qr.~QRType();
- ::new (&m_qr) QRType(svd.rows(), svd.cols());
- }
- if (svd.m_computeFullU) m_workspace.resize(svd.rows());
- }
-
- bool run(JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
- {
- if(matrix.rows() > matrix.cols())
- {
- m_qr.compute(matrix);
- svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
- if(svd.m_computeFullU) m_qr.matrixQ().evalTo(svd.m_matrixU, m_workspace);
- if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation();
- return true;
- }
- return false;
- }
-private:
- typedef FullPivHouseholderQR<MatrixType> QRType;
- QRType m_qr;
- WorkspaceType m_workspace;
-};
-
-template<typename MatrixType>
-class qr_preconditioner_impl<MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
-{
-public:
- typedef typename MatrixType::Index Index;
- typedef typename MatrixType::Scalar Scalar;
- enum
- {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = MatrixType::ColsAtCompileTime,
- MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
- MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
- Options = MatrixType::Options
- };
- typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime>
- TransposeTypeWithSameStorageOrder;
-
- void allocate(const JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd)
- {
- if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
- {
- m_qr.~QRType();
- ::new (&m_qr) QRType(svd.cols(), svd.rows());
- }
- m_adjoint.resize(svd.cols(), svd.rows());
- if (svd.m_computeFullV) m_workspace.resize(svd.cols());
- }
-
- bool run(JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
- {
- if(matrix.cols() > matrix.rows())
- {
- m_adjoint = matrix.adjoint();
- m_qr.compute(m_adjoint);
- svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
- if(svd.m_computeFullV) m_qr.matrixQ().evalTo(svd.m_matrixV, m_workspace);
- if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation();
- return true;
- }
- else return false;
- }
-private:
- typedef FullPivHouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
- QRType m_qr;
- TransposeTypeWithSameStorageOrder m_adjoint;
- typename internal::plain_row_type<MatrixType>::type m_workspace;
-};
-
-/*** preconditioner using ColPivHouseholderQR ***/
-
-template<typename MatrixType>
-class qr_preconditioner_impl<MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
-{
-public:
- typedef typename MatrixType::Index Index;
-
- void allocate(const JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd)
- {
- if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
- {
- m_qr.~QRType();
- ::new (&m_qr) QRType(svd.rows(), svd.cols());
- }
- if (svd.m_computeFullU) m_workspace.resize(svd.rows());
- else if (svd.m_computeThinU) m_workspace.resize(svd.cols());
- }
-
- bool run(JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
- {
- if(matrix.rows() > matrix.cols())
- {
- m_qr.compute(matrix);
- svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
- if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace);
- else if(svd.m_computeThinU)
- {
- svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols());
- m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace);
- }
- if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation();
- return true;
- }
- return false;
- }
-
-private:
- typedef ColPivHouseholderQR<MatrixType> QRType;
- QRType m_qr;
- typename internal::plain_col_type<MatrixType>::type m_workspace;
-};
-
-template<typename MatrixType>
-class qr_preconditioner_impl<MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
-{
-public:
- typedef typename MatrixType::Index Index;
- typedef typename MatrixType::Scalar Scalar;
- enum
- {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = MatrixType::ColsAtCompileTime,
- MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
- MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
- Options = MatrixType::Options
- };
-
- typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime>
- TransposeTypeWithSameStorageOrder;
-
- void allocate(const JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd)
- {
- if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
- {
- m_qr.~QRType();
- ::new (&m_qr) QRType(svd.cols(), svd.rows());
- }
- if (svd.m_computeFullV) m_workspace.resize(svd.cols());
- else if (svd.m_computeThinV) m_workspace.resize(svd.rows());
- m_adjoint.resize(svd.cols(), svd.rows());
- }
-
- bool run(JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
- {
- if(matrix.cols() > matrix.rows())
- {
- m_adjoint = matrix.adjoint();
- m_qr.compute(m_adjoint);
-
- svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
- if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace);
- else if(svd.m_computeThinV)
- {
- svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows());
- m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace);
- }
- if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation();
- return true;
- }
- else return false;
- }
-
-private:
- typedef ColPivHouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
- QRType m_qr;
- TransposeTypeWithSameStorageOrder m_adjoint;
- typename internal::plain_row_type<MatrixType>::type m_workspace;
-};
-
-/*** preconditioner using HouseholderQR ***/
-
-template<typename MatrixType>
-class qr_preconditioner_impl<MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
-{
-public:
- typedef typename MatrixType::Index Index;
-
- void allocate(const JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd)
- {
- if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
- {
- m_qr.~QRType();
- ::new (&m_qr) QRType(svd.rows(), svd.cols());
- }
- if (svd.m_computeFullU) m_workspace.resize(svd.rows());
- else if (svd.m_computeThinU) m_workspace.resize(svd.cols());
- }
-
- bool run(JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd, const MatrixType& matrix)
- {
- if(matrix.rows() > matrix.cols())
- {
- m_qr.compute(matrix);
- svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
- if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace);
- else if(svd.m_computeThinU)
- {
- svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols());
- m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace);
- }
- if(svd.computeV()) svd.m_matrixV.setIdentity(matrix.cols(), matrix.cols());
- return true;
- }
- return false;
- }
-private:
- typedef HouseholderQR<MatrixType> QRType;
- QRType m_qr;
- typename internal::plain_col_type<MatrixType>::type m_workspace;
-};
-
-template<typename MatrixType>
-class qr_preconditioner_impl<MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
-{
-public:
- typedef typename MatrixType::Index Index;
- typedef typename MatrixType::Scalar Scalar;
- enum
- {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = MatrixType::ColsAtCompileTime,
- MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
- MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
- Options = MatrixType::Options
- };
-
- typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime>
- TransposeTypeWithSameStorageOrder;
-
- void allocate(const JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd)
- {
- if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
- {
- m_qr.~QRType();
- ::new (&m_qr) QRType(svd.cols(), svd.rows());
- }
- if (svd.m_computeFullV) m_workspace.resize(svd.cols());
- else if (svd.m_computeThinV) m_workspace.resize(svd.rows());
- m_adjoint.resize(svd.cols(), svd.rows());
- }
-
- bool run(JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd, const MatrixType& matrix)
- {
- if(matrix.cols() > matrix.rows())
- {
- m_adjoint = matrix.adjoint();
- m_qr.compute(m_adjoint);
-
- svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
- if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace);
- else if(svd.m_computeThinV)
- {
- svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows());
- m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace);
- }
- if(svd.computeU()) svd.m_matrixU.setIdentity(matrix.rows(), matrix.rows());
- return true;
- }
- else return false;
- }
-
-private:
- typedef HouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
- QRType m_qr;
- TransposeTypeWithSameStorageOrder m_adjoint;
- typename internal::plain_row_type<MatrixType>::type m_workspace;
-};
-
-/*** 2x2 SVD implementation
- ***
- *** JacobiSVD consists in performing a series of 2x2 SVD subproblems
- ***/
-
-template<typename MatrixType, int QRPreconditioner>
-struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, false>
-{
- typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;
- typedef typename SVD::Index Index;
- static void run(typename SVD::WorkMatrixType&, SVD&, Index, Index) {}
-};
-
-template<typename MatrixType, int QRPreconditioner>
-struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, true>
-{
- typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;
- typedef typename MatrixType::Scalar Scalar;
- typedef typename MatrixType::RealScalar RealScalar;
- typedef typename SVD::Index Index;
- static void run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q)
- {
- using std::sqrt;
- Scalar z;
- JacobiRotation<Scalar> rot;
- RealScalar n = sqrt(numext::abs2(work_matrix.coeff(p,p)) + numext::abs2(work_matrix.coeff(q,p)));
- if(n==0)
- {
- z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
- work_matrix.row(p) *= z;
- if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z);
- z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
- work_matrix.row(q) *= z;
- if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
- }
- else
- {
- rot.c() = conj(work_matrix.coeff(p,p)) / n;
- rot.s() = work_matrix.coeff(q,p) / n;
- work_matrix.applyOnTheLeft(p,q,rot);
- if(svd.computeU()) svd.m_matrixU.applyOnTheRight(p,q,rot.adjoint());
- if(work_matrix.coeff(p,q) != Scalar(0))
- {
- Scalar z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
- work_matrix.col(q) *= z;
- if(svd.computeV()) svd.m_matrixV.col(q) *= z;
- }
- if(work_matrix.coeff(q,q) != Scalar(0))
- {
- z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
- work_matrix.row(q) *= z;
- if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
- }
- }
- }
-};
-
-template<typename MatrixType, typename RealScalar, typename Index>
-void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q,
- JacobiRotation<RealScalar> *j_left,
- JacobiRotation<RealScalar> *j_right)
-{
- using std::sqrt;
- Matrix<RealScalar,2,2> m;
- m << numext::real(matrix.coeff(p,p)), numext::real(matrix.coeff(p,q)),
- numext::real(matrix.coeff(q,p)), numext::real(matrix.coeff(q,q));
- JacobiRotation<RealScalar> rot1;
- RealScalar t = m.coeff(0,0) + m.coeff(1,1);
- RealScalar d = m.coeff(1,0) - m.coeff(0,1);
- if(t == RealScalar(0))
- {
- rot1.c() = RealScalar(0);
- rot1.s() = d > RealScalar(0) ? RealScalar(1) : RealScalar(-1);
- }
- else
- {
- RealScalar u = d / t;
- rot1.c() = RealScalar(1) / sqrt(RealScalar(1) + numext::abs2(u));
- rot1.s() = rot1.c() * u;
- }
- m.applyOnTheLeft(0,1,rot1);
- j_right->makeJacobi(m,0,1);
- *j_left = rot1 * j_right->transpose();
-}
-
-} // end namespace internal
-
-/** \ingroup SVD_Module
- *
- *
- * \class JacobiSVD
- *
- * \brief Two-sided Jacobi SVD decomposition of a rectangular matrix
- *
- * \param MatrixType the type of the matrix of which we are computing the SVD decomposition
- * \param QRPreconditioner this optional parameter allows to specify the type of QR decomposition that will be used internally
- * for the R-SVD step for non-square matrices. See discussion of possible values below.
- *
- * SVD decomposition consists in decomposing any n-by-p matrix \a A as a product
- * \f[ A = U S V^* \f]
- * where \a U is a n-by-n unitary, \a V is a p-by-p unitary, and \a S is a n-by-p real positive matrix which is zero outside of its main diagonal;
- * the diagonal entries of S are known as the \em singular \em values of \a A and the columns of \a U and \a V are known as the left
- * and right \em singular \em vectors of \a A respectively.
- *
- * Singular values are always sorted in decreasing order.
- *
- * This JacobiSVD decomposition computes only the singular values by default. If you want \a U or \a V, you need to ask for them explicitly.
- *
- * You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the
- * smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual
- * singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix,
- * and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving.
- *
- * Here's an example demonstrating basic usage:
- * \include JacobiSVD_basic.cpp
- * Output: \verbinclude JacobiSVD_basic.out
- *
- * This JacobiSVD class is a two-sided Jacobi R-SVD decomposition, ensuring optimal reliability and accuracy. The downside is that it's slower than
- * bidiagonalizing SVD algorithms for large square matrices; however its complexity is still \f$ O(n^2p) \f$ where \a n is the smaller dimension and
- * \a p is the greater dimension, meaning that it is still of the same order of complexity as the faster bidiagonalizing R-SVD algorithms.
- * In particular, like any R-SVD, it takes advantage of non-squareness in that its complexity is only linear in the greater dimension.
- *
- * If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to
- * terminate in finite (and reasonable) time.
- *
- * The possible values for QRPreconditioner are:
- * \li ColPivHouseholderQRPreconditioner is the default. In practice it's very safe. It uses column-pivoting QR.
- * \li FullPivHouseholderQRPreconditioner, is the safest and slowest. It uses full-pivoting QR.
- * Contrary to other QRs, it doesn't allow computing thin unitaries.
- * \li HouseholderQRPreconditioner is the fastest, and less safe and accurate than the pivoting variants. It uses non-pivoting QR.
- * This is very similar in safety and accuracy to the bidiagonalization process used by bidiagonalizing SVD algorithms (since bidiagonalization
- * is inherently non-pivoting). However the resulting SVD is still more reliable than bidiagonalizing SVDs because the Jacobi-based iterarive
- * process is more reliable than the optimized bidiagonal SVD iterations.
- * \li NoQRPreconditioner allows not to use a QR preconditioner at all. This is useful if you know that you will only be computing
- * JacobiSVD decompositions of square matrices. Non-square matrices require a QR preconditioner. Using this option will result in
- * faster compilation and smaller executable code. It won't significantly speed up computation, since JacobiSVD is always checking
- * if QR preconditioning is needed before applying it anyway.
- *
- * \sa MatrixBase::jacobiSvd()
- */
-template<typename _MatrixType, int QRPreconditioner>
-class JacobiSVD : public SVDBase<_MatrixType>
-{
- public:
-
- typedef _MatrixType MatrixType;
- typedef typename MatrixType::Scalar Scalar;
- typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
- typedef typename MatrixType::Index Index;
- enum {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = MatrixType::ColsAtCompileTime,
- DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime),
- MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
- MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
- MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime),
- MatrixOptions = MatrixType::Options
- };
-
- typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime,
- MatrixOptions, MaxRowsAtCompileTime, MaxRowsAtCompileTime>
- MatrixUType;
- typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime,
- MatrixOptions, MaxColsAtCompileTime, MaxColsAtCompileTime>
- MatrixVType;
- typedef typename internal::plain_diag_type<MatrixType, RealScalar>::type SingularValuesType;
- typedef typename internal::plain_row_type<MatrixType>::type RowType;
- typedef typename internal::plain_col_type<MatrixType>::type ColType;
- typedef Matrix<Scalar, DiagSizeAtCompileTime, DiagSizeAtCompileTime,
- MatrixOptions, MaxDiagSizeAtCompileTime, MaxDiagSizeAtCompileTime>
- WorkMatrixType;
-
- /** \brief Default Constructor.
- *
- * The default constructor is useful in cases in which the user intends to
- * perform decompositions via JacobiSVD::compute(const MatrixType&).
- */
- JacobiSVD()
- : SVDBase<_MatrixType>::SVDBase()
- {}
-
-
- /** \brief Default Constructor with memory preallocation
- *
- * Like the default constructor but with preallocation of the internal data
- * according to the specified problem size.
- * \sa JacobiSVD()
- */
- JacobiSVD(Index rows, Index cols, unsigned int computationOptions = 0)
- : SVDBase<_MatrixType>::SVDBase()
- {
- allocate(rows, cols, computationOptions);
- }
-
- /** \brief Constructor performing the decomposition of given matrix.
- *
- * \param matrix the matrix to decompose
- * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
- * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,
- * #ComputeFullV, #ComputeThinV.
- *
- * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
- * available with the (non-default) FullPivHouseholderQR preconditioner.
- */
- JacobiSVD(const MatrixType& matrix, unsigned int computationOptions = 0)
- : SVDBase<_MatrixType>::SVDBase()
- {
- compute(matrix, computationOptions);
- }
-
- /** \brief Method performing the decomposition of given matrix using custom options.
- *
- * \param matrix the matrix to decompose
- * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
- * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,
- * #ComputeFullV, #ComputeThinV.
- *
- * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
- * available with the (non-default) FullPivHouseholderQR preconditioner.
- */
- SVDBase<MatrixType>& compute(const MatrixType& matrix, unsigned int computationOptions);
-
- /** \brief Method performing the decomposition of given matrix using current options.
- *
- * \param matrix the matrix to decompose
- *
- * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int).
- */
- SVDBase<MatrixType>& compute(const MatrixType& matrix)
- {
- return compute(matrix, this->m_computationOptions);
- }
-
- /** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A.
- *
- * \param b the right-hand-side of the equation to solve.
- *
- * \note Solving requires both U and V to be computed. Thin U and V are enough, there is no need for full U or V.
- *
- * \note SVD solving is implicitly least-squares. Thus, this method serves both purposes of exact solving and least-squares solving.
- * In other words, the returned solution is guaranteed to minimize the Euclidean norm \f$ \Vert A x - b \Vert \f$.
- */
- template<typename Rhs>
- inline const internal::solve_retval<JacobiSVD, Rhs>
- solve(const MatrixBase<Rhs>& b) const
- {
- eigen_assert(this->m_isInitialized && "JacobiSVD is not initialized.");
- eigen_assert(SVDBase<MatrixType>::computeU() && SVDBase<MatrixType>::computeV() && "JacobiSVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice).");
- return internal::solve_retval<JacobiSVD, Rhs>(*this, b.derived());
- }
-
-
-
- private:
- void allocate(Index rows, Index cols, unsigned int computationOptions);
-
- protected:
- WorkMatrixType m_workMatrix;
-
- template<typename __MatrixType, int _QRPreconditioner, bool _IsComplex>
- friend struct internal::svd_precondition_2x2_block_to_be_real;
- template<typename __MatrixType, int _QRPreconditioner, int _Case, bool _DoAnything>
- friend struct internal::qr_preconditioner_impl;
-
- internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreColsThanRows> m_qr_precond_morecols;
- internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreRowsThanCols> m_qr_precond_morerows;
-};
-
-template<typename MatrixType, int QRPreconditioner>
-void JacobiSVD<MatrixType, QRPreconditioner>::allocate(Index rows, Index cols, unsigned int computationOptions)
-{
- if (SVDBase<MatrixType>::allocate(rows, cols, computationOptions)) return;
-
- if (QRPreconditioner == FullPivHouseholderQRPreconditioner)
- {
- eigen_assert(!(this->m_computeThinU || this->m_computeThinV) &&
- "JacobiSVD: can't compute thin U or thin V with the FullPivHouseholderQR preconditioner. "
- "Use the ColPivHouseholderQR preconditioner instead.");
- }
-
- m_workMatrix.resize(this->m_diagSize, this->m_diagSize);
-
- if(this->m_cols>this->m_rows) m_qr_precond_morecols.allocate(*this);
- if(this->m_rows>this->m_cols) m_qr_precond_morerows.allocate(*this);
-}
-
-template<typename MatrixType, int QRPreconditioner>
-SVDBase<MatrixType>&
-JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsigned int computationOptions)
-{
- using std::abs;
- allocate(matrix.rows(), matrix.cols(), computationOptions);
-
- // currently we stop when we reach precision 2*epsilon as the last bit of precision can require an unreasonable number of iterations,
- // only worsening the precision of U and V as we accumulate more rotations
- const RealScalar precision = RealScalar(2) * NumTraits<Scalar>::epsilon();
-
- // limit for very small denormal numbers to be considered zero in order to avoid infinite loops (see bug 286)
- const RealScalar considerAsZero = RealScalar(2) * std::numeric_limits<RealScalar>::denorm_min();
-
- /*** step 1. The R-SVD step: we use a QR decomposition to reduce to the case of a square matrix */
-
- if(!m_qr_precond_morecols.run(*this, matrix) && !m_qr_precond_morerows.run(*this, matrix))
- {
- m_workMatrix = matrix.block(0,0,this->m_diagSize,this->m_diagSize);
- if(this->m_computeFullU) this->m_matrixU.setIdentity(this->m_rows,this->m_rows);
- if(this->m_computeThinU) this->m_matrixU.setIdentity(this->m_rows,this->m_diagSize);
- if(this->m_computeFullV) this->m_matrixV.setIdentity(this->m_cols,this->m_cols);
- if(this->m_computeThinV) this->m_matrixV.setIdentity(this->m_cols, this->m_diagSize);
- }
-
- /*** step 2. The main Jacobi SVD iteration. ***/
-
- bool finished = false;
- while(!finished)
- {
- finished = true;
-
- // do a sweep: for all index pairs (p,q), perform SVD of the corresponding 2x2 sub-matrix
-
- for(Index p = 1; p < this->m_diagSize; ++p)
- {
- for(Index q = 0; q < p; ++q)
- {
- // if this 2x2 sub-matrix is not diagonal already...
- // notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't
- // keep us iterating forever. Similarly, small denormal numbers are considered zero.
- using std::max;
- RealScalar threshold = (max)(considerAsZero, precision * (max)(abs(m_workMatrix.coeff(p,p)),
- abs(m_workMatrix.coeff(q,q))));
- if((max)(abs(m_workMatrix.coeff(p,q)),abs(m_workMatrix.coeff(q,p))) > threshold)
- {
- finished = false;
-
- // perform SVD decomposition of 2x2 sub-matrix corresponding to indices p,q to make it diagonal
- internal::svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner>::run(m_workMatrix, *this, p, q);
- JacobiRotation<RealScalar> j_left, j_right;
- internal::real_2x2_jacobi_svd(m_workMatrix, p, q, &j_left, &j_right);
-
- // accumulate resulting Jacobi rotations
- m_workMatrix.applyOnTheLeft(p,q,j_left);
- if(SVDBase<MatrixType>::computeU()) this->m_matrixU.applyOnTheRight(p,q,j_left.transpose());
-
- m_workMatrix.applyOnTheRight(p,q,j_right);
- if(SVDBase<MatrixType>::computeV()) this->m_matrixV.applyOnTheRight(p,q,j_right);
- }
- }
- }
- }
-
- /*** step 3. The work matrix is now diagonal, so ensure it's positive so its diagonal entries are the singular values ***/
-
- for(Index i = 0; i < this->m_diagSize; ++i)
- {
- RealScalar a = abs(m_workMatrix.coeff(i,i));
- this->m_singularValues.coeffRef(i) = a;
- if(SVDBase<MatrixType>::computeU() && (a!=RealScalar(0))) this->m_matrixU.col(i) *= this->m_workMatrix.coeff(i,i)/a;
- }
-
- /*** step 4. Sort singular values in descending order and compute the number of nonzero singular values ***/
-
- this->m_nonzeroSingularValues = this->m_diagSize;
- for(Index i = 0; i < this->m_diagSize; i++)
- {
- Index pos;
- RealScalar maxRemainingSingularValue = this->m_singularValues.tail(this->m_diagSize-i).maxCoeff(&pos);
- if(maxRemainingSingularValue == RealScalar(0))
- {
- this->m_nonzeroSingularValues = i;
- break;
- }
- if(pos)
- {
- pos += i;
- std::swap(this->m_singularValues.coeffRef(i), this->m_singularValues.coeffRef(pos));
- if(SVDBase<MatrixType>::computeU()) this->m_matrixU.col(pos).swap(this->m_matrixU.col(i));
- if(SVDBase<MatrixType>::computeV()) this->m_matrixV.col(pos).swap(this->m_matrixV.col(i));
- }
- }
-
- this->m_isInitialized = true;
- return *this;
-}
-
-namespace internal {
-template<typename _MatrixType, int QRPreconditioner, typename Rhs>
-struct solve_retval<JacobiSVD<_MatrixType, QRPreconditioner>, Rhs>
- : solve_retval_base<JacobiSVD<_MatrixType, QRPreconditioner>, Rhs>
-{
- typedef JacobiSVD<_MatrixType, QRPreconditioner> JacobiSVDType;
- EIGEN_MAKE_SOLVE_HELPERS(JacobiSVDType,Rhs)
-
- template<typename Dest> void evalTo(Dest& dst) const
- {
- eigen_assert(rhs().rows() == dec().rows());
-
- // A = U S V^*
- // So A^{-1} = V S^{-1} U^*
-
- Index diagSize = (std::min)(dec().rows(), dec().cols());
- typename JacobiSVDType::SingularValuesType invertedSingVals(diagSize);
-
- Index nonzeroSingVals = dec().nonzeroSingularValues();
- invertedSingVals.head(nonzeroSingVals) = dec().singularValues().head(nonzeroSingVals).array().inverse();
- invertedSingVals.tail(diagSize - nonzeroSingVals).setZero();
-
- dst = dec().matrixV().leftCols(diagSize)
- * invertedSingVals.asDiagonal()
- * dec().matrixU().leftCols(diagSize).adjoint()
- * rhs();
- }
-};
-} // end namespace internal
-
-/** \svd_module
- *
- * \return the singular value decomposition of \c *this computed by two-sided
- * Jacobi transformations.
- *
- * \sa class JacobiSVD
- */
-template<typename Derived>
-JacobiSVD<typename MatrixBase<Derived>::PlainObject>
-MatrixBase<Derived>::jacobiSvd(unsigned int computationOptions) const
-{
- return JacobiSVD<PlainObject>(*this, computationOptions);
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_JACOBISVD_H
diff --git a/unsupported/Eigen/src/SVD/SVDBase.h b/unsupported/Eigen/src/SVD/SVDBase.h
deleted file mode 100644
index fd8af3b8c..000000000
--- a/unsupported/Eigen/src/SVD/SVDBase.h
+++ /dev/null
@@ -1,236 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// Copyright (C) 2013 Gauthier Brun <brun.gauthier@gmail.com>
-// Copyright (C) 2013 Nicolas Carre <nicolas.carre@ensimag.fr>
-// Copyright (C) 2013 Jean Ceccato <jean.ceccato@ensimag.fr>
-// Copyright (C) 2013 Pierre Zoppitelli <pierre.zoppitelli@ensimag.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_SVD_H
-#define EIGEN_SVD_H
-
-namespace Eigen {
-/** \ingroup SVD_Module
- *
- *
- * \class SVDBase
- *
- * \brief Mother class of SVD classes algorithms
- *
- * \param MatrixType the type of the matrix of which we are computing the SVD decomposition
- * SVD decomposition consists in decomposing any n-by-p matrix \a A as a product
- * \f[ A = U S V^* \f]
- * where \a U is a n-by-n unitary, \a V is a p-by-p unitary, and \a S is a n-by-p real positive matrix which is zero outside of its main diagonal;
- * the diagonal entries of S are known as the \em singular \em values of \a A and the columns of \a U and \a V are known as the left
- * and right \em singular \em vectors of \a A respectively.
- *
- * Singular values are always sorted in decreasing order.
- *
- *
- * You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the
- * smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual
- * singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix,
- * and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving.
- *
- * If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to
- * terminate in finite (and reasonable) time.
- * \sa MatrixBase::genericSvd()
- */
-template<typename _MatrixType>
-class SVDBase
-{
-
-public:
- typedef _MatrixType MatrixType;
- typedef typename MatrixType::Scalar Scalar;
- typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
- typedef typename MatrixType::Index Index;
- enum {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = MatrixType::ColsAtCompileTime,
- DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime),
- MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
- MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
- MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime),
- MatrixOptions = MatrixType::Options
- };
-
- typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime,
- MatrixOptions, MaxRowsAtCompileTime, MaxRowsAtCompileTime>
- MatrixUType;
- typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime,
- MatrixOptions, MaxColsAtCompileTime, MaxColsAtCompileTime>
- MatrixVType;
- typedef typename internal::plain_diag_type<MatrixType, RealScalar>::type SingularValuesType;
- typedef typename internal::plain_row_type<MatrixType>::type RowType;
- typedef typename internal::plain_col_type<MatrixType>::type ColType;
- typedef Matrix<Scalar, DiagSizeAtCompileTime, DiagSizeAtCompileTime,
- MatrixOptions, MaxDiagSizeAtCompileTime, MaxDiagSizeAtCompileTime>
- WorkMatrixType;
-
-
-
-
- /** \brief Method performing the decomposition of given matrix using custom options.
- *
- * \param matrix the matrix to decompose
- * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
- * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,
- * #ComputeFullV, #ComputeThinV.
- *
- * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
- * available with the (non-default) FullPivHouseholderQR preconditioner.
- */
- SVDBase& compute(const MatrixType& matrix, unsigned int computationOptions);
-
- /** \brief Method performing the decomposition of given matrix using current options.
- *
- * \param matrix the matrix to decompose
- *
- * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int).
- */
- //virtual SVDBase& compute(const MatrixType& matrix) = 0;
- SVDBase& compute(const MatrixType& matrix);
-
- /** \returns the \a U matrix.
- *
- * For the SVDBase decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p,
- * the U matrix is n-by-n if you asked for #ComputeFullU, and is n-by-m if you asked for #ComputeThinU.
- *
- * The \a m first columns of \a U are the left singular vectors of the matrix being decomposed.
- *
- * This method asserts that you asked for \a U to be computed.
- */
- const MatrixUType& matrixU() const
- {
- eigen_assert(m_isInitialized && "SVD is not initialized.");
- eigen_assert(computeU() && "This SVD decomposition didn't compute U. Did you ask for it?");
- return m_matrixU;
- }
-
- /** \returns the \a V matrix.
- *
- * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p,
- * the V matrix is p-by-p if you asked for #ComputeFullV, and is p-by-m if you asked for ComputeThinV.
- *
- * The \a m first columns of \a V are the right singular vectors of the matrix being decomposed.
- *
- * This method asserts that you asked for \a V to be computed.
- */
- const MatrixVType& matrixV() const
- {
- eigen_assert(m_isInitialized && "SVD is not initialized.");
- eigen_assert(computeV() && "This SVD decomposition didn't compute V. Did you ask for it?");
- return m_matrixV;
- }
-
- /** \returns the vector of singular values.
- *
- * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, the
- * returned vector has size \a m. Singular values are always sorted in decreasing order.
- */
- const SingularValuesType& singularValues() const
- {
- eigen_assert(m_isInitialized && "SVD is not initialized.");
- return m_singularValues;
- }
-
-
-
- /** \returns the number of singular values that are not exactly 0 */
- Index nonzeroSingularValues() const
- {
- eigen_assert(m_isInitialized && "SVD is not initialized.");
- return m_nonzeroSingularValues;
- }
-
-
- /** \returns true if \a U (full or thin) is asked for in this SVD decomposition */
- inline bool computeU() const { return m_computeFullU || m_computeThinU; }
- /** \returns true if \a V (full or thin) is asked for in this SVD decomposition */
- inline bool computeV() const { return m_computeFullV || m_computeThinV; }
-
-
- inline Index rows() const { return m_rows; }
- inline Index cols() const { return m_cols; }
-
-
-protected:
- // return true if already allocated
- bool allocate(Index rows, Index cols, unsigned int computationOptions) ;
-
- MatrixUType m_matrixU;
- MatrixVType m_matrixV;
- SingularValuesType m_singularValues;
- bool m_isInitialized, m_isAllocated;
- bool m_computeFullU, m_computeThinU;
- bool m_computeFullV, m_computeThinV;
- unsigned int m_computationOptions;
- Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize;
-
-
- /** \brief Default Constructor.
- *
- * Default constructor of SVDBase
- */
- SVDBase()
- : m_isInitialized(false),
- m_isAllocated(false),
- m_computationOptions(0),
- m_rows(-1), m_cols(-1)
- {}
-
-
-};
-
-
-template<typename MatrixType>
-bool SVDBase<MatrixType>::allocate(Index rows, Index cols, unsigned int computationOptions)
-{
- eigen_assert(rows >= 0 && cols >= 0);
-
- if (m_isAllocated &&
- rows == m_rows &&
- cols == m_cols &&
- computationOptions == m_computationOptions)
- {
- return true;
- }
-
- m_rows = rows;
- m_cols = cols;
- m_isInitialized = false;
- m_isAllocated = true;
- m_computationOptions = computationOptions;
- m_computeFullU = (computationOptions & ComputeFullU) != 0;
- m_computeThinU = (computationOptions & ComputeThinU) != 0;
- m_computeFullV = (computationOptions & ComputeFullV) != 0;
- m_computeThinV = (computationOptions & ComputeThinV) != 0;
- eigen_assert(!(m_computeFullU && m_computeThinU) && "SVDBase: you can't ask for both full and thin U");
- eigen_assert(!(m_computeFullV && m_computeThinV) && "SVDBase: you can't ask for both full and thin V");
- eigen_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) &&
- "SVDBase: thin U and V are only available when your matrix has a dynamic number of columns.");
-
- m_diagSize = (std::min)(m_rows, m_cols);
- m_singularValues.resize(m_diagSize);
- if(RowsAtCompileTime==Dynamic)
- m_matrixU.resize(m_rows, m_computeFullU ? m_rows
- : m_computeThinU ? m_diagSize
- : 0);
- if(ColsAtCompileTime==Dynamic)
- m_matrixV.resize(m_cols, m_computeFullV ? m_cols
- : m_computeThinV ? m_diagSize
- : 0);
-
- return false;
-}
-
-}// end namespace
-
-#endif // EIGEN_SVD_H
diff --git a/unsupported/Eigen/src/SVD/TODOBdcsvd.txt b/unsupported/Eigen/src/SVD/TODOBdcsvd.txt
deleted file mode 100644
index 0bc9a46e6..000000000
--- a/unsupported/Eigen/src/SVD/TODOBdcsvd.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-TO DO LIST
-
-
-
-(optional optimization) - do all the allocations in the allocate part
- - support static matrices
- - return a error at compilation time when using integer matrices (int, long, std::complex<int>, ...)
-
-to finish the algorithm :
- -implement the last part of the algorithm as described on the reference paper.
- You may find more information on that part on this paper
-
- -to replace the call to JacobiSVD at the end of the divide algorithm, just after the call to
- deflation.
-
-(suggested step by step resolution)
- 0) comment the call to Jacobi in the last part of the divide method and everything right after
- until the end of the method. What is commented can be a guideline to steps 3) 4) and 6)
- 1) solve the secular equation (Characteristic equation) on the values that are not null (zi!=0 and di!=0), after the deflation
- wich should be uncommented in the divide method
- 2) remember the values of the singular values that are already computed (zi=0)
- 3) assign the singular values found in m_computed at the right places (with the ones found in step 2) )
- in decreasing order
- 4) set the firstcol to zero (except the first element) in m_computed
- 5) compute all the singular vectors when CompV is set to true and only the left vectors when
- CompV is set to false
- 6) multiply naiveU and naiveV to the right by the matrices found, only naiveU when CompV is set to
- false, /!\ if CompU is false NaiveU has only 2 rows
- 7) delete everything commented in step 0)
diff --git a/unsupported/Eigen/src/SVD/doneInBDCSVD.txt b/unsupported/Eigen/src/SVD/doneInBDCSVD.txt
deleted file mode 100644
index 8563ddab8..000000000
--- a/unsupported/Eigen/src/SVD/doneInBDCSVD.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-This unsupported package is about a divide and conquer algorithm to compute SVD.
-
-The implementation follows as closely as possible the following reference paper :
-http://www.cs.yale.edu/publications/techreports/tr933.pdf
-
-The code documentation uses the same names for variables as the reference paper. The code, deflation included, is
-working but there are a few things that could be optimised as explained in the TODOBdsvd.
-
-In the code comments were put at the line where would be the third step of the algorithm so one could simply add the call
-of a function doing the last part of the algorithm and that would not require any knowledge of the part we implemented.
-
-In the TODOBdcsvd we explain what is the main difficulty of the last part and suggest a reference paper to help solve it.
-
-The implemented has trouble with fixed size matrices.
-
-In the actual implementation, it returns matrices of zero when ask to do a svd on an int matrix.
-
-
-Paper for the third part:
-http://www.stat.uchicago.edu/~lekheng/courses/302/classics/greengard-rokhlin.pdf
-
diff --git a/unsupported/Eigen/src/Skyline/CMakeLists.txt b/unsupported/Eigen/src/Skyline/CMakeLists.txt
deleted file mode 100644
index 3bf1b0dd4..000000000
--- a/unsupported/Eigen/src/Skyline/CMakeLists.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-FILE(GLOB Eigen_Skyline_SRCS "*.h")
-
-INSTALL(FILES
- ${Eigen_Skyline_SRCS}
- DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/Skyline COMPONENT Devel
- )
diff --git a/unsupported/Eigen/src/Skyline/SkylineProduct.h b/unsupported/Eigen/src/Skyline/SkylineProduct.h
index 1ddf455e2..d9eb814c1 100644
--- a/unsupported/Eigen/src/Skyline/SkylineProduct.h
+++ b/unsupported/Eigen/src/Skyline/SkylineProduct.h
@@ -14,8 +14,8 @@ namespace Eigen {
template<typename Lhs, typename Rhs, int ProductMode>
struct SkylineProductReturnType {
- typedef const typename internal::nested<Lhs, Rhs::RowsAtCompileTime>::type LhsNested;
- typedef const typename internal::nested<Rhs, Lhs::RowsAtCompileTime>::type RhsNested;
+ typedef const typename internal::nested_eval<Lhs, Rhs::RowsAtCompileTime>::type LhsNested;
+ typedef const typename internal::nested_eval<Rhs, Lhs::RowsAtCompileTime>::type RhsNested;
typedef SkylineProduct<LhsNested, RhsNested, ProductMode> Type;
};
@@ -49,7 +49,7 @@ struct internal::traits<SkylineProduct<LhsNested, RhsNested, ProductMode> > {
| EvalBeforeAssigningBit
| EvalBeforeNestingBit,
- CoeffReadCost = Dynamic
+ CoeffReadCost = HugeCost
};
typedef typename internal::conditional<ResultIsSkyline,
diff --git a/unsupported/Eigen/src/SparseExtra/BlockSparseMatrix.h b/unsupported/Eigen/src/SparseExtra/BlockSparseMatrix.h
new file mode 100644
index 000000000..0e8350a7d
--- /dev/null
+++ b/unsupported/Eigen/src/SparseExtra/BlockSparseMatrix.h
@@ -0,0 +1,1079 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2013 Desire Nuentsa <desire.nuentsa_wakam@inria.fr>
+// Copyright (C) 2013 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSEBLOCKMATRIX_H
+#define EIGEN_SPARSEBLOCKMATRIX_H
+
+namespace Eigen {
+/** \ingroup SparseCore_Module
+ *
+ * \class BlockSparseMatrix
+ *
+ * \brief A versatile sparse matrix representation where each element is a block
+ *
+ * This class provides routines to manipulate block sparse matrices stored in a
+ * BSR-like representation. There are two main types :
+ *
+ * 1. All blocks have the same number of rows and columns, called block size
+ * in the following. In this case, if this block size is known at compile time,
+ * it can be given as a template parameter like
+ * \code
+ * BlockSparseMatrix<Scalar, 3, ColMajor> bmat(b_rows, b_cols);
+ * \endcode
+ * Here, bmat is a b_rows x b_cols block sparse matrix
+ * where each coefficient is a 3x3 dense matrix.
+ * If the block size is fixed but will be given at runtime,
+ * \code
+ * BlockSparseMatrix<Scalar, Dynamic, ColMajor> bmat(b_rows, b_cols);
+ * bmat.setBlockSize(block_size);
+ * \endcode
+ *
+ * 2. The second case is for variable-block sparse matrices.
+ * Here each block has its own dimensions. The only restriction is that all the blocks
+ * in a row (resp. a column) should have the same number of rows (resp. of columns).
+ * It is thus required in this case to describe the layout of the matrix by calling
+ * setBlockLayout(rowBlocks, colBlocks).
+ *
+ * In any of the previous case, the matrix can be filled by calling setFromTriplets().
+ * A regular sparse matrix can be converted to a block sparse matrix and vice versa.
+ * It is obviously required to describe the block layout beforehand by calling either
+ * setBlockSize() for fixed-size blocks or setBlockLayout for variable-size blocks.
+ *
+ * \tparam _Scalar The Scalar type
+ * \tparam _BlockAtCompileTime The block layout option. It takes the following values
+ * Dynamic : block size known at runtime
+ * a numeric number : fixed-size block known at compile time
+ */
+template<typename _Scalar, int _BlockAtCompileTime=Dynamic, int _Options=ColMajor, typename _StorageIndex=int> class BlockSparseMatrix;
+
+template<typename BlockSparseMatrixT> class BlockSparseMatrixView;
+
+namespace internal {
+template<typename _Scalar, int _BlockAtCompileTime, int _Options, typename _Index>
+struct traits<BlockSparseMatrix<_Scalar,_BlockAtCompileTime,_Options, _Index> >
+{
+ typedef _Scalar Scalar;
+ typedef _Index Index;
+ typedef Sparse StorageKind; // FIXME Where is it used ??
+ typedef MatrixXpr XprKind;
+ enum {
+ RowsAtCompileTime = Dynamic,
+ ColsAtCompileTime = Dynamic,
+ MaxRowsAtCompileTime = Dynamic,
+ MaxColsAtCompileTime = Dynamic,
+ BlockSize = _BlockAtCompileTime,
+ Flags = _Options | NestByRefBit | LvalueBit,
+ CoeffReadCost = NumTraits<Scalar>::ReadCost,
+ SupportedAccessPatterns = InnerRandomAccessPattern
+ };
+};
+template<typename BlockSparseMatrixT>
+struct traits<BlockSparseMatrixView<BlockSparseMatrixT> >
+{
+ typedef Ref<Matrix<typename BlockSparseMatrixT::Scalar, BlockSparseMatrixT::BlockSize, BlockSparseMatrixT::BlockSize> > Scalar;
+ typedef Ref<Matrix<typename BlockSparseMatrixT::RealScalar, BlockSparseMatrixT::BlockSize, BlockSparseMatrixT::BlockSize> > RealScalar;
+
+};
+
+// Function object to sort a triplet list
+template<typename Iterator, bool IsColMajor>
+struct TripletComp
+{
+ typedef typename Iterator::value_type Triplet;
+ bool operator()(const Triplet& a, const Triplet& b)
+ { if(IsColMajor)
+ return ((a.col() == b.col() && a.row() < b.row()) || (a.col() < b.col()));
+ else
+ return ((a.row() == b.row() && a.col() < b.col()) || (a.row() < b.row()));
+ }
+};
+} // end namespace internal
+
+
+/* Proxy to view the block sparse matrix as a regular sparse matrix */
+template<typename BlockSparseMatrixT>
+class BlockSparseMatrixView : public SparseMatrixBase<BlockSparseMatrixT>
+{
+ public:
+ typedef Ref<typename BlockSparseMatrixT::BlockScalar> Scalar;
+ typedef Ref<typename BlockSparseMatrixT::BlockRealScalar> RealScalar;
+ typedef typename BlockSparseMatrixT::Index Index;
+ typedef BlockSparseMatrixT Nested;
+ enum {
+ Flags = BlockSparseMatrixT::Options,
+ Options = BlockSparseMatrixT::Options,
+ RowsAtCompileTime = BlockSparseMatrixT::RowsAtCompileTime,
+ ColsAtCompileTime = BlockSparseMatrixT::ColsAtCompileTime,
+ MaxColsAtCompileTime = BlockSparseMatrixT::MaxColsAtCompileTime,
+ MaxRowsAtCompileTime = BlockSparseMatrixT::MaxRowsAtCompileTime
+ };
+ public:
+ BlockSparseMatrixView(const BlockSparseMatrixT& spblockmat)
+ : m_spblockmat(spblockmat)
+ {}
+
+ Index outerSize() const
+ {
+ return (Flags&RowMajorBit) == 1 ? this->rows() : this->cols();
+ }
+ Index cols() const
+ {
+ return m_spblockmat.blockCols();
+ }
+ Index rows() const
+ {
+ return m_spblockmat.blockRows();
+ }
+ Scalar coeff(Index row, Index col)
+ {
+ return m_spblockmat.coeff(row, col);
+ }
+ Scalar coeffRef(Index row, Index col)
+ {
+ return m_spblockmat.coeffRef(row, col);
+ }
+ // Wrapper to iterate over all blocks
+ class InnerIterator : public BlockSparseMatrixT::BlockInnerIterator
+ {
+ public:
+ InnerIterator(const BlockSparseMatrixView& mat, Index outer)
+ : BlockSparseMatrixT::BlockInnerIterator(mat.m_spblockmat, outer)
+ {}
+
+ };
+
+ protected:
+ const BlockSparseMatrixT& m_spblockmat;
+};
+
+// Proxy to view a regular vector as a block vector
+template<typename BlockSparseMatrixT, typename VectorType>
+class BlockVectorView
+{
+ public:
+ enum {
+ BlockSize = BlockSparseMatrixT::BlockSize,
+ ColsAtCompileTime = VectorType::ColsAtCompileTime,
+ RowsAtCompileTime = VectorType::RowsAtCompileTime,
+ Flags = VectorType::Flags
+ };
+ typedef Ref<const Matrix<typename BlockSparseMatrixT::Scalar, (RowsAtCompileTime==1)? 1 : BlockSize, (ColsAtCompileTime==1)? 1 : BlockSize> >Scalar;
+ typedef typename BlockSparseMatrixT::Index Index;
+ public:
+ BlockVectorView(const BlockSparseMatrixT& spblockmat, const VectorType& vec)
+ : m_spblockmat(spblockmat),m_vec(vec)
+ { }
+ inline Index cols() const
+ {
+ return m_vec.cols();
+ }
+ inline Index size() const
+ {
+ return m_spblockmat.blockRows();
+ }
+ inline Scalar coeff(Index bi) const
+ {
+ Index startRow = m_spblockmat.blockRowsIndex(bi);
+ Index rowSize = m_spblockmat.blockRowsIndex(bi+1) - startRow;
+ return m_vec.middleRows(startRow, rowSize);
+ }
+ inline Scalar coeff(Index bi, Index j) const
+ {
+ Index startRow = m_spblockmat.blockRowsIndex(bi);
+ Index rowSize = m_spblockmat.blockRowsIndex(bi+1) - startRow;
+ return m_vec.block(startRow, j, rowSize, 1);
+ }
+ protected:
+ const BlockSparseMatrixT& m_spblockmat;
+ const VectorType& m_vec;
+};
+
+template<typename VectorType, typename Index> class BlockVectorReturn;
+
+
+// Proxy to view a regular vector as a block vector
+template<typename BlockSparseMatrixT, typename VectorType>
+class BlockVectorReturn
+{
+ public:
+ enum {
+ ColsAtCompileTime = VectorType::ColsAtCompileTime,
+ RowsAtCompileTime = VectorType::RowsAtCompileTime,
+ Flags = VectorType::Flags
+ };
+ typedef Ref<Matrix<typename VectorType::Scalar, RowsAtCompileTime, ColsAtCompileTime> > Scalar;
+ typedef typename BlockSparseMatrixT::Index Index;
+ public:
+ BlockVectorReturn(const BlockSparseMatrixT& spblockmat, VectorType& vec)
+ : m_spblockmat(spblockmat),m_vec(vec)
+ { }
+ inline Index size() const
+ {
+ return m_spblockmat.blockRows();
+ }
+ inline Scalar coeffRef(Index bi)
+ {
+ Index startRow = m_spblockmat.blockRowsIndex(bi);
+ Index rowSize = m_spblockmat.blockRowsIndex(bi+1) - startRow;
+ return m_vec.middleRows(startRow, rowSize);
+ }
+ inline Scalar coeffRef(Index bi, Index j)
+ {
+ Index startRow = m_spblockmat.blockRowsIndex(bi);
+ Index rowSize = m_spblockmat.blockRowsIndex(bi+1) - startRow;
+ return m_vec.block(startRow, j, rowSize, 1);
+ }
+
+ protected:
+ const BlockSparseMatrixT& m_spblockmat;
+ VectorType& m_vec;
+};
+
+// Block version of the sparse dense product
+template<typename Lhs, typename Rhs>
+class BlockSparseTimeDenseProduct;
+
+namespace internal {
+
+template<typename BlockSparseMatrixT, typename VecType>
+struct traits<BlockSparseTimeDenseProduct<BlockSparseMatrixT, VecType> >
+{
+ typedef Dense StorageKind;
+ typedef MatrixXpr XprKind;
+ typedef typename BlockSparseMatrixT::Scalar Scalar;
+ typedef typename BlockSparseMatrixT::Index Index;
+ enum {
+ RowsAtCompileTime = Dynamic,
+ ColsAtCompileTime = Dynamic,
+ MaxRowsAtCompileTime = Dynamic,
+ MaxColsAtCompileTime = Dynamic,
+ Flags = 0,
+ CoeffReadCost = internal::traits<BlockSparseMatrixT>::CoeffReadCost
+ };
+};
+} // end namespace internal
+
+template<typename Lhs, typename Rhs>
+class BlockSparseTimeDenseProduct
+ : public ProductBase<BlockSparseTimeDenseProduct<Lhs,Rhs>, Lhs, Rhs>
+{
+ public:
+ EIGEN_PRODUCT_PUBLIC_INTERFACE(BlockSparseTimeDenseProduct)
+
+ BlockSparseTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+ {}
+
+ template<typename Dest> void scaleAndAddTo(Dest& dest, const typename Rhs::Scalar& alpha) const
+ {
+ BlockVectorReturn<Lhs,Dest> tmpDest(m_lhs, dest);
+ internal::sparse_time_dense_product( BlockSparseMatrixView<Lhs>(m_lhs), BlockVectorView<Lhs, Rhs>(m_lhs, m_rhs), tmpDest, alpha);
+ }
+
+ private:
+ BlockSparseTimeDenseProduct& operator=(const BlockSparseTimeDenseProduct&);
+};
+
+template<typename _Scalar, int _BlockAtCompileTime, int _Options, typename _StorageIndex>
+class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<_Scalar,_BlockAtCompileTime, _Options,_StorageIndex> >
+{
+ public:
+ typedef _Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef _StorageIndex StorageIndex;
+ typedef typename internal::ref_selector<BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _StorageIndex> >::type Nested;
+
+ enum {
+ Options = _Options,
+ Flags = Options,
+ BlockSize=_BlockAtCompileTime,
+ RowsAtCompileTime = Dynamic,
+ ColsAtCompileTime = Dynamic,
+ MaxRowsAtCompileTime = Dynamic,
+ MaxColsAtCompileTime = Dynamic,
+ IsVectorAtCompileTime = 0,
+ IsColMajor = Flags&RowMajorBit ? 0 : 1
+ };
+ typedef Matrix<Scalar, _BlockAtCompileTime, _BlockAtCompileTime,IsColMajor ? ColMajor : RowMajor> BlockScalar;
+ typedef Matrix<RealScalar, _BlockAtCompileTime, _BlockAtCompileTime,IsColMajor ? ColMajor : RowMajor> BlockRealScalar;
+ typedef typename internal::conditional<_BlockAtCompileTime==Dynamic, Scalar, BlockScalar>::type BlockScalarReturnType;
+ typedef BlockSparseMatrix<Scalar, BlockSize, IsColMajor ? ColMajor : RowMajor, StorageIndex> PlainObject;
+ public:
+ // Default constructor
+ BlockSparseMatrix()
+ : m_innerBSize(0),m_outerBSize(0),m_innerOffset(0),m_outerOffset(0),
+ m_nonzerosblocks(0),m_values(0),m_blockPtr(0),m_indices(0),
+ m_outerIndex(0),m_blockSize(BlockSize)
+ { }
+
+
+ /**
+ * \brief Construct and resize
+ *
+ */
+ BlockSparseMatrix(Index brow, Index bcol)
+ : m_innerBSize(IsColMajor ? brow : bcol),
+ m_outerBSize(IsColMajor ? bcol : brow),
+ m_innerOffset(0),m_outerOffset(0),m_nonzerosblocks(0),
+ m_values(0),m_blockPtr(0),m_indices(0),
+ m_outerIndex(0),m_blockSize(BlockSize)
+ { }
+
+ /**
+ * \brief Copy-constructor
+ */
+ BlockSparseMatrix(const BlockSparseMatrix& other)
+ : m_innerBSize(other.m_innerBSize),m_outerBSize(other.m_outerBSize),
+ m_nonzerosblocks(other.m_nonzerosblocks),m_nonzeros(other.m_nonzeros),
+ m_blockPtr(0),m_blockSize(other.m_blockSize)
+ {
+ // should we allow copying between variable-size blocks and fixed-size blocks ??
+ eigen_assert(m_blockSize == BlockSize && " CAN NOT COPY BETWEEN FIXED-SIZE AND VARIABLE-SIZE BLOCKS");
+
+ std::copy(other.m_innerOffset, other.m_innerOffset+m_innerBSize+1, m_innerOffset);
+ std::copy(other.m_outerOffset, other.m_outerOffset+m_outerBSize+1, m_outerOffset);
+ std::copy(other.m_values, other.m_values+m_nonzeros, m_values);
+
+ if(m_blockSize != Dynamic)
+ std::copy(other.m_blockPtr, other.m_blockPtr+m_nonzerosblocks, m_blockPtr);
+
+ std::copy(other.m_indices, other.m_indices+m_nonzerosblocks, m_indices);
+ std::copy(other.m_outerIndex, other.m_outerIndex+m_outerBSize, m_outerIndex);
+ }
+
+ friend void swap(BlockSparseMatrix& first, BlockSparseMatrix& second)
+ {
+ std::swap(first.m_innerBSize, second.m_innerBSize);
+ std::swap(first.m_outerBSize, second.m_outerBSize);
+ std::swap(first.m_innerOffset, second.m_innerOffset);
+ std::swap(first.m_outerOffset, second.m_outerOffset);
+ std::swap(first.m_nonzerosblocks, second.m_nonzerosblocks);
+ std::swap(first.m_nonzeros, second.m_nonzeros);
+ std::swap(first.m_values, second.m_values);
+ std::swap(first.m_blockPtr, second.m_blockPtr);
+ std::swap(first.m_indices, second.m_indices);
+ std::swap(first.m_outerIndex, second.m_outerIndex);
+ std::swap(first.m_BlockSize, second.m_blockSize);
+ }
+
+ BlockSparseMatrix& operator=(BlockSparseMatrix other)
+ {
+ //Copy-and-swap paradigm ... avoid leaked data if thrown
+ swap(*this, other);
+ return *this;
+ }
+
+ // Destructor
+ ~BlockSparseMatrix()
+ {
+ delete[] m_outerIndex;
+ delete[] m_innerOffset;
+ delete[] m_outerOffset;
+ delete[] m_indices;
+ delete[] m_blockPtr;
+ delete[] m_values;
+ }
+
+
+ /**
+ * \brief Constructor from a sparse matrix
+ *
+ */
+ template<typename MatrixType>
+ inline BlockSparseMatrix(const MatrixType& spmat) : m_blockSize(BlockSize)
+ {
+ EIGEN_STATIC_ASSERT((m_blockSize != Dynamic), THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE);
+
+ *this = spmat;
+ }
+
+ /**
+ * \brief Assignment from a sparse matrix with the same storage order
+ *
+ * Convert from a sparse matrix to block sparse matrix.
+ * \warning Before calling this function, tt is necessary to call
+ * either setBlockLayout() (matrices with variable-size blocks)
+ * or setBlockSize() (for fixed-size blocks).
+ */
+ template<typename MatrixType>
+ inline BlockSparseMatrix& operator=(const MatrixType& spmat)
+ {
+ eigen_assert((m_innerBSize != 0 && m_outerBSize != 0)
+ && "Trying to assign to a zero-size matrix, call resize() first");
+ eigen_assert(((MatrixType::Options&RowMajorBit) != IsColMajor) && "Wrong storage order");
+ typedef SparseMatrix<bool,MatrixType::Options,typename MatrixType::Index> MatrixPatternType;
+ MatrixPatternType blockPattern(blockRows(), blockCols());
+ m_nonzeros = 0;
+
+ // First, compute the number of nonzero blocks and their locations
+ for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
+ {
+ // Browse each outer block and compute the structure
+ std::vector<bool> nzblocksFlag(m_innerBSize,false); // Record the existing blocks
+ blockPattern.startVec(bj);
+ for(StorageIndex j = blockOuterIndex(bj); j < blockOuterIndex(bj+1); ++j)
+ {
+ typename MatrixType::InnerIterator it_spmat(spmat, j);
+ for(; it_spmat; ++it_spmat)
+ {
+ StorageIndex bi = innerToBlock(it_spmat.index()); // Index of the current nonzero block
+ if(!nzblocksFlag[bi])
+ {
+ // Save the index of this nonzero block
+ nzblocksFlag[bi] = true;
+ blockPattern.insertBackByOuterInnerUnordered(bj, bi) = true;
+ // Compute the total number of nonzeros (including explicit zeros in blocks)
+ m_nonzeros += blockOuterSize(bj) * blockInnerSize(bi);
+ }
+ }
+ } // end current outer block
+ }
+ blockPattern.finalize();
+
+ // Allocate the internal arrays
+ setBlockStructure(blockPattern);
+
+ for(StorageIndex nz = 0; nz < m_nonzeros; ++nz) m_values[nz] = Scalar(0);
+ for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
+ {
+ // Now copy the values
+ for(StorageIndex j = blockOuterIndex(bj); j < blockOuterIndex(bj+1); ++j)
+ {
+ // Browse the outer block column by column (for column-major matrices)
+ typename MatrixType::InnerIterator it_spmat(spmat, j);
+ for(; it_spmat; ++it_spmat)
+ {
+ StorageIndex idx = 0; // Position of this block in the column block
+ StorageIndex bi = innerToBlock(it_spmat.index()); // Index of the current nonzero block
+ // Go to the inner block where this element belongs to
+ while(bi > m_indices[m_outerIndex[bj]+idx]) ++idx; // Not expensive for ordered blocks
+ StorageIndex idxVal;// Get the right position in the array of values for this element
+ if(m_blockSize == Dynamic)
+ {
+ // Offset from all blocks before ...
+ idxVal = m_blockPtr[m_outerIndex[bj]+idx];
+ // ... and offset inside the block
+ idxVal += (j - blockOuterIndex(bj)) * blockOuterSize(bj) + it_spmat.index() - m_innerOffset[bi];
+ }
+ else
+ {
+ // All blocks before
+ idxVal = (m_outerIndex[bj] + idx) * m_blockSize * m_blockSize;
+ // inside the block
+ idxVal += (j - blockOuterIndex(bj)) * m_blockSize + (it_spmat.index()%m_blockSize);
+ }
+ // Insert the value
+ m_values[idxVal] = it_spmat.value();
+ } // end of this column
+ } // end of this block
+ } // end of this outer block
+
+ return *this;
+ }
+
+ /**
+ * \brief Set the nonzero block pattern of the matrix
+ *
+ * Given a sparse matrix describing the nonzero block pattern,
+ * this function prepares the internal pointers for values.
+ * After calling this function, any *nonzero* block (bi, bj) can be set
+ * with a simple call to coeffRef(bi,bj).
+ *
+ *
+ * \warning Before calling this function, tt is necessary to call
+ * either setBlockLayout() (matrices with variable-size blocks)
+ * or setBlockSize() (for fixed-size blocks).
+ *
+ * \param blockPattern Sparse matrix of boolean elements describing the block structure
+ *
+ * \sa setBlockLayout() \sa setBlockSize()
+ */
+ template<typename MatrixType>
+ void setBlockStructure(const MatrixType& blockPattern)
+ {
+ resize(blockPattern.rows(), blockPattern.cols());
+ reserve(blockPattern.nonZeros());
+
+ // Browse the block pattern and set up the various pointers
+ m_outerIndex[0] = 0;
+ if(m_blockSize == Dynamic) m_blockPtr[0] = 0;
+ for(StorageIndex nz = 0; nz < m_nonzeros; ++nz) m_values[nz] = Scalar(0);
+ for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
+ {
+ //Browse each outer block
+
+ //First, copy and save the indices of nonzero blocks
+ //FIXME : find a way to avoid this ...
+ std::vector<int> nzBlockIdx;
+ typename MatrixType::InnerIterator it(blockPattern, bj);
+ for(; it; ++it)
+ {
+ nzBlockIdx.push_back(it.index());
+ }
+ std::sort(nzBlockIdx.begin(), nzBlockIdx.end());
+
+ // Now, fill block indices and (eventually) pointers to blocks
+ for(StorageIndex idx = 0; idx < nzBlockIdx.size(); ++idx)
+ {
+ StorageIndex offset = m_outerIndex[bj]+idx; // offset in m_indices
+ m_indices[offset] = nzBlockIdx[idx];
+ if(m_blockSize == Dynamic)
+ m_blockPtr[offset] = m_blockPtr[offset-1] + blockInnerSize(nzBlockIdx[idx]) * blockOuterSize(bj);
+ // There is no blockPtr for fixed-size blocks... not needed !???
+ }
+ // Save the pointer to the next outer block
+ m_outerIndex[bj+1] = m_outerIndex[bj] + nzBlockIdx.size();
+ }
+ }
+
+ /**
+ * \brief Set the number of rows and columns blocks
+ */
+ inline void resize(Index brow, Index bcol)
+ {
+ m_innerBSize = IsColMajor ? brow : bcol;
+ m_outerBSize = IsColMajor ? bcol : brow;
+ }
+
+ /**
+ * \brief set the block size at runtime for fixed-size block layout
+ *
+ * Call this only for fixed-size blocks
+ */
+ inline void setBlockSize(Index blockSize)
+ {
+ m_blockSize = blockSize;
+ }
+
+ /**
+ * \brief Set the row and column block layouts,
+ *
+ * This function set the size of each row and column block.
+ * So this function should be used only for blocks with variable size.
+ * \param rowBlocks : Number of rows per row block
+ * \param colBlocks : Number of columns per column block
+ * \sa resize(), setBlockSize()
+ */
+ inline void setBlockLayout(const VectorXi& rowBlocks, const VectorXi& colBlocks)
+ {
+ const VectorXi& innerBlocks = IsColMajor ? rowBlocks : colBlocks;
+ const VectorXi& outerBlocks = IsColMajor ? colBlocks : rowBlocks;
+ eigen_assert(m_innerBSize == innerBlocks.size() && "CHECK THE NUMBER OF ROW OR COLUMN BLOCKS");
+ eigen_assert(m_outerBSize == outerBlocks.size() && "CHECK THE NUMBER OF ROW OR COLUMN BLOCKS");
+ m_outerBSize = outerBlocks.size();
+ // starting index of blocks... cumulative sums
+ m_innerOffset = new StorageIndex[m_innerBSize+1];
+ m_outerOffset = new StorageIndex[m_outerBSize+1];
+ m_innerOffset[0] = 0;
+ m_outerOffset[0] = 0;
+ std::partial_sum(&innerBlocks[0], &innerBlocks[m_innerBSize-1]+1, &m_innerOffset[1]);
+ std::partial_sum(&outerBlocks[0], &outerBlocks[m_outerBSize-1]+1, &m_outerOffset[1]);
+
+ // Compute the total number of nonzeros
+ m_nonzeros = 0;
+ for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
+ for(StorageIndex bi = 0; bi < m_innerBSize; ++bi)
+ m_nonzeros += outerBlocks[bj] * innerBlocks[bi];
+
+ }
+
+ /**
+ * \brief Allocate the internal array of pointers to blocks and their inner indices
+ *
+ * \note For fixed-size blocks, call setBlockSize() to set the block.
+ * And For variable-size blocks, call setBlockLayout() before using this function
+ *
+ * \param nonzerosblocks Number of nonzero blocks. The total number of nonzeros is
+ * is computed in setBlockLayout() for variable-size blocks
+ * \sa setBlockSize()
+ */
+ inline void reserve(const Index nonzerosblocks)
+ {
+ eigen_assert((m_innerBSize != 0 && m_outerBSize != 0) &&
+ "TRYING TO RESERVE ZERO-SIZE MATRICES, CALL resize() first");
+
+ //FIXME Should free if already allocated
+ m_outerIndex = new StorageIndex[m_outerBSize+1];
+
+ m_nonzerosblocks = nonzerosblocks;
+ if(m_blockSize != Dynamic)
+ {
+ m_nonzeros = nonzerosblocks * (m_blockSize * m_blockSize);
+ m_blockPtr = 0;
+ }
+ else
+ {
+ // m_nonzeros is already computed in setBlockLayout()
+ m_blockPtr = new StorageIndex[m_nonzerosblocks+1];
+ }
+ m_indices = new StorageIndex[m_nonzerosblocks+1];
+ m_values = new Scalar[m_nonzeros];
+ }
+
+
+ /**
+ * \brief Fill values in a matrix from a triplet list.
+ *
+ * Each triplet item has a block stored in an Eigen dense matrix.
+ * The InputIterator class should provide the functions row(), col() and value()
+ *
+ * \note For fixed-size blocks, call setBlockSize() before this function.
+ *
+ * FIXME Do not accept duplicates
+ */
+ template<typename InputIterator>
+ void setFromTriplets(const InputIterator& begin, const InputIterator& end)
+ {
+ eigen_assert((m_innerBSize!=0 && m_outerBSize !=0) && "ZERO BLOCKS, PLEASE CALL resize() before");
+
+ /* First, sort the triplet list
+ * FIXME This can be unnecessarily expensive since only the inner indices have to be sorted
+ * The best approach is like in SparseMatrix::setFromTriplets()
+ */
+ internal::TripletComp<InputIterator, IsColMajor> tripletcomp;
+ std::sort(begin, end, tripletcomp);
+
+ /* Count the number of rows and column blocks,
+ * and the number of nonzero blocks per outer dimension
+ */
+ VectorXi rowBlocks(m_innerBSize); // Size of each block row
+ VectorXi colBlocks(m_outerBSize); // Size of each block column
+ rowBlocks.setZero(); colBlocks.setZero();
+ VectorXi nzblock_outer(m_outerBSize); // Number of nz blocks per outer vector
+ VectorXi nz_outer(m_outerBSize); // Number of nz per outer vector...for variable-size blocks
+ nzblock_outer.setZero();
+ nz_outer.setZero();
+ for(InputIterator it(begin); it !=end; ++it)
+ {
+ eigen_assert(it->row() >= 0 && it->row() < this->blockRows() && it->col() >= 0 && it->col() < this->blockCols());
+ eigen_assert((it->value().rows() == it->value().cols() && (it->value().rows() == m_blockSize))
+ || (m_blockSize == Dynamic));
+
+ if(m_blockSize == Dynamic)
+ {
+ eigen_assert((rowBlocks[it->row()] == 0 || rowBlocks[it->row()] == it->value().rows()) &&
+ "NON CORRESPONDING SIZES FOR ROW BLOCKS");
+ eigen_assert((colBlocks[it->col()] == 0 || colBlocks[it->col()] == it->value().cols()) &&
+ "NON CORRESPONDING SIZES FOR COLUMN BLOCKS");
+ rowBlocks[it->row()] =it->value().rows();
+ colBlocks[it->col()] = it->value().cols();
+ }
+ nz_outer(IsColMajor ? it->col() : it->row()) += it->value().rows() * it->value().cols();
+ nzblock_outer(IsColMajor ? it->col() : it->row())++;
+ }
+ // Allocate member arrays
+ if(m_blockSize == Dynamic) setBlockLayout(rowBlocks, colBlocks);
+ StorageIndex nzblocks = nzblock_outer.sum();
+ reserve(nzblocks);
+
+ // Temporary markers
+ VectorXi block_id(m_outerBSize); // To be used as a block marker during insertion
+
+ // Setup outer index pointers and markers
+ m_outerIndex[0] = 0;
+ if (m_blockSize == Dynamic) m_blockPtr[0] = 0;
+ for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
+ {
+ m_outerIndex[bj+1] = m_outerIndex[bj] + nzblock_outer(bj);
+ block_id(bj) = m_outerIndex[bj];
+ if(m_blockSize==Dynamic)
+ {
+ m_blockPtr[m_outerIndex[bj+1]] = m_blockPtr[m_outerIndex[bj]] + nz_outer(bj);
+ }
+ }
+
+ // Fill the matrix
+ for(InputIterator it(begin); it!=end; ++it)
+ {
+ StorageIndex outer = IsColMajor ? it->col() : it->row();
+ StorageIndex inner = IsColMajor ? it->row() : it->col();
+ m_indices[block_id(outer)] = inner;
+ StorageIndex block_size = it->value().rows()*it->value().cols();
+ StorageIndex nz_marker = blockPtr(block_id[outer]);
+ memcpy(&(m_values[nz_marker]), it->value().data(), block_size * sizeof(Scalar));
+ if(m_blockSize == Dynamic)
+ {
+ m_blockPtr[block_id(outer)+1] = m_blockPtr[block_id(outer)] + block_size;
+ }
+ block_id(outer)++;
+ }
+
+ // An alternative when the outer indices are sorted...no need to use an array of markers
+// for(Index bcol = 0; bcol < m_outerBSize; ++bcol)
+// {
+// Index id = 0, id_nz = 0, id_nzblock = 0;
+// for(InputIterator it(begin); it!=end; ++it)
+// {
+// while (id<bcol) // one pass should do the job unless there are empty columns
+// {
+// id++;
+// m_outerIndex[id+1]=m_outerIndex[id];
+// }
+// m_outerIndex[id+1] += 1;
+// m_indices[id_nzblock]=brow;
+// Index block_size = it->value().rows()*it->value().cols();
+// m_blockPtr[id_nzblock+1] = m_blockPtr[id_nzblock] + block_size;
+// id_nzblock++;
+// memcpy(&(m_values[id_nz]),it->value().data(), block_size*sizeof(Scalar));
+// id_nz += block_size;
+// }
+// while(id < m_outerBSize-1) // Empty columns at the end
+// {
+// id++;
+// m_outerIndex[id+1]=m_outerIndex[id];
+// }
+// }
+ }
+
+
+ /**
+ * \returns the number of rows
+ */
+ inline Index rows() const
+ {
+// return blockRows();
+ return (IsColMajor ? innerSize() : outerSize());
+ }
+
+ /**
+ * \returns the number of cols
+ */
+ inline Index cols() const
+ {
+// return blockCols();
+ return (IsColMajor ? outerSize() : innerSize());
+ }
+
+ inline Index innerSize() const
+ {
+ if(m_blockSize == Dynamic) return m_innerOffset[m_innerBSize];
+ else return (m_innerBSize * m_blockSize) ;
+ }
+
+ inline Index outerSize() const
+ {
+ if(m_blockSize == Dynamic) return m_outerOffset[m_outerBSize];
+ else return (m_outerBSize * m_blockSize) ;
+ }
+ /** \returns the number of rows grouped by blocks */
+ inline Index blockRows() const
+ {
+ return (IsColMajor ? m_innerBSize : m_outerBSize);
+ }
+ /** \returns the number of columns grouped by blocks */
+ inline Index blockCols() const
+ {
+ return (IsColMajor ? m_outerBSize : m_innerBSize);
+ }
+
+ inline Index outerBlocks() const { return m_outerBSize; }
+ inline Index innerBlocks() const { return m_innerBSize; }
+
+ /** \returns the block index where outer belongs to */
+ inline Index outerToBlock(Index outer) const
+ {
+ eigen_assert(outer < outerSize() && "OUTER INDEX OUT OF BOUNDS");
+
+ if(m_blockSize != Dynamic)
+ return (outer / m_blockSize); // Integer division
+
+ StorageIndex b_outer = 0;
+ while(m_outerOffset[b_outer] <= outer) ++b_outer;
+ return b_outer - 1;
+ }
+ /** \returns the block index where inner belongs to */
+ inline Index innerToBlock(Index inner) const
+ {
+ eigen_assert(inner < innerSize() && "OUTER INDEX OUT OF BOUNDS");
+
+ if(m_blockSize != Dynamic)
+ return (inner / m_blockSize); // Integer division
+
+ StorageIndex b_inner = 0;
+ while(m_innerOffset[b_inner] <= inner) ++b_inner;
+ return b_inner - 1;
+ }
+
+ /**
+ *\returns a reference to the (i,j) block as an Eigen Dense Matrix
+ */
+ Ref<BlockScalar> coeffRef(Index brow, Index bcol)
+ {
+ eigen_assert(brow < blockRows() && "BLOCK ROW INDEX OUT OF BOUNDS");
+ eigen_assert(bcol < blockCols() && "BLOCK nzblocksFlagCOLUMN OUT OF BOUNDS");
+
+ StorageIndex rsize = IsColMajor ? blockInnerSize(brow): blockOuterSize(bcol);
+ StorageIndex csize = IsColMajor ? blockOuterSize(bcol) : blockInnerSize(brow);
+ StorageIndex inner = IsColMajor ? brow : bcol;
+ StorageIndex outer = IsColMajor ? bcol : brow;
+ StorageIndex offset = m_outerIndex[outer];
+ while(offset < m_outerIndex[outer+1] && m_indices[offset] != inner)
+ offset++;
+ if(m_indices[offset] == inner)
+ {
+ return Map<BlockScalar>(&(m_values[blockPtr(offset)]), rsize, csize);
+ }
+ else
+ {
+ //FIXME the block does not exist, Insert it !!!!!!!!!
+ eigen_assert("DYNAMIC INSERTION IS NOT YET SUPPORTED");
+ }
+ }
+
+ /**
+ * \returns the value of the (i,j) block as an Eigen Dense Matrix
+ */
+ Map<const BlockScalar> coeff(Index brow, Index bcol) const
+ {
+ eigen_assert(brow < blockRows() && "BLOCK ROW INDEX OUT OF BOUNDS");
+ eigen_assert(bcol < blockCols() && "BLOCK COLUMN OUT OF BOUNDS");
+
+ StorageIndex rsize = IsColMajor ? blockInnerSize(brow): blockOuterSize(bcol);
+ StorageIndex csize = IsColMajor ? blockOuterSize(bcol) : blockInnerSize(brow);
+ StorageIndex inner = IsColMajor ? brow : bcol;
+ StorageIndex outer = IsColMajor ? bcol : brow;
+ StorageIndex offset = m_outerIndex[outer];
+ while(offset < m_outerIndex[outer+1] && m_indices[offset] != inner) offset++;
+ if(m_indices[offset] == inner)
+ {
+ return Map<const BlockScalar> (&(m_values[blockPtr(offset)]), rsize, csize);
+ }
+ else
+// return BlockScalar::Zero(rsize, csize);
+ eigen_assert("NOT YET SUPPORTED");
+ }
+
+ // Block Matrix times vector product
+ template<typename VecType>
+ BlockSparseTimeDenseProduct<BlockSparseMatrix, VecType> operator*(const VecType& lhs) const
+ {
+ return BlockSparseTimeDenseProduct<BlockSparseMatrix, VecType>(*this, lhs);
+ }
+
+ /** \returns the number of nonzero blocks */
+ inline Index nonZerosBlocks() const { return m_nonzerosblocks; }
+ /** \returns the total number of nonzero elements, including eventual explicit zeros in blocks */
+ inline Index nonZeros() const { return m_nonzeros; }
+
+ inline BlockScalarReturnType *valuePtr() {return static_cast<BlockScalarReturnType *>(m_values);}
+// inline Scalar *valuePtr(){ return m_values; }
+ inline StorageIndex *innerIndexPtr() {return m_indices; }
+ inline const StorageIndex *innerIndexPtr() const {return m_indices; }
+ inline StorageIndex *outerIndexPtr() {return m_outerIndex; }
+ inline const StorageIndex* outerIndexPtr() const {return m_outerIndex; }
+
+ /** \brief for compatibility purposes with the SparseMatrix class */
+ inline bool isCompressed() const {return true;}
+ /**
+ * \returns the starting index of the bi row block
+ */
+ inline Index blockRowsIndex(Index bi) const
+ {
+ return IsColMajor ? blockInnerIndex(bi) : blockOuterIndex(bi);
+ }
+
+ /**
+ * \returns the starting index of the bj col block
+ */
+ inline Index blockColsIndex(Index bj) const
+ {
+ return IsColMajor ? blockOuterIndex(bj) : blockInnerIndex(bj);
+ }
+
+ inline Index blockOuterIndex(Index bj) const
+ {
+ return (m_blockSize == Dynamic) ? m_outerOffset[bj] : (bj * m_blockSize);
+ }
+ inline Index blockInnerIndex(Index bi) const
+ {
+ return (m_blockSize == Dynamic) ? m_innerOffset[bi] : (bi * m_blockSize);
+ }
+
+ // Not needed ???
+ inline Index blockInnerSize(Index bi) const
+ {
+ return (m_blockSize == Dynamic) ? (m_innerOffset[bi+1] - m_innerOffset[bi]) : m_blockSize;
+ }
+ inline Index blockOuterSize(Index bj) const
+ {
+ return (m_blockSize == Dynamic) ? (m_outerOffset[bj+1]- m_outerOffset[bj]) : m_blockSize;
+ }
+
+ /**
+ * \brief Browse the matrix by outer index
+ */
+ class InnerIterator; // Browse column by column
+
+ /**
+ * \brief Browse the matrix by block outer index
+ */
+ class BlockInnerIterator; // Browse block by block
+
+ friend std::ostream & operator << (std::ostream & s, const BlockSparseMatrix& m)
+ {
+ for (StorageIndex j = 0; j < m.outerBlocks(); ++j)
+ {
+ BlockInnerIterator itb(m, j);
+ for(; itb; ++itb)
+ {
+ s << "("<<itb.row() << ", " << itb.col() << ")\n";
+ s << itb.value() <<"\n";
+ }
+ }
+ s << std::endl;
+ return s;
+ }
+
+ /**
+ * \returns the starting position of the block <id> in the array of values
+ */
+ Index blockPtr(Index id) const
+ {
+ if(m_blockSize == Dynamic) return m_blockPtr[id];
+ else return id * m_blockSize * m_blockSize;
+ //return blockDynIdx(id, typename internal::conditional<(BlockSize==Dynamic), internal::true_type, internal::false_type>::type());
+ }
+
+
+ protected:
+// inline Index blockDynIdx(Index id, internal::true_type) const
+// {
+// return m_blockPtr[id];
+// }
+// inline Index blockDynIdx(Index id, internal::false_type) const
+// {
+// return id * BlockSize * BlockSize;
+// }
+
+ // To be implemented
+ // Insert a block at a particular location... need to make a room for that
+ Map<BlockScalar> insert(Index brow, Index bcol);
+
+ Index m_innerBSize; // Number of block rows
+ Index m_outerBSize; // Number of block columns
+ StorageIndex *m_innerOffset; // Starting index of each inner block (size m_innerBSize+1)
+ StorageIndex *m_outerOffset; // Starting index of each outer block (size m_outerBSize+1)
+ Index m_nonzerosblocks; // Total nonzeros blocks (lower than m_innerBSize x m_outerBSize)
+ Index m_nonzeros; // Total nonzeros elements
+ Scalar *m_values; //Values stored block column after block column (size m_nonzeros)
+ StorageIndex *m_blockPtr; // Pointer to the beginning of each block in m_values, size m_nonzeroblocks ... null for fixed-size blocks
+ StorageIndex *m_indices; //Inner block indices, size m_nonzerosblocks ... OK
+ StorageIndex *m_outerIndex; // Starting pointer of each block column in m_indices (size m_outerBSize)... OK
+ Index m_blockSize; // Size of a block for fixed-size blocks, otherwise -1
+};
+
+template<typename _Scalar, int _BlockAtCompileTime, int _Options, typename _StorageIndex>
+class BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _StorageIndex>::BlockInnerIterator
+{
+ public:
+
+ enum{
+ Flags = _Options
+ };
+
+ BlockInnerIterator(const BlockSparseMatrix& mat, const Index outer)
+ : m_mat(mat),m_outer(outer),
+ m_id(mat.m_outerIndex[outer]),
+ m_end(mat.m_outerIndex[outer+1])
+ {
+ }
+
+ inline BlockInnerIterator& operator++() {m_id++; return *this; }
+
+ inline const Map<const BlockScalar> value() const
+ {
+ return Map<const BlockScalar>(&(m_mat.m_values[m_mat.blockPtr(m_id)]),
+ rows(),cols());
+ }
+ inline Map<BlockScalar> valueRef()
+ {
+ return Map<BlockScalar>(&(m_mat.m_values[m_mat.blockPtr(m_id)]),
+ rows(),cols());
+ }
+ // Block inner index
+ inline Index index() const {return m_mat.m_indices[m_id]; }
+ inline Index outer() const { return m_outer; }
+ // block row index
+ inline Index row() const {return index(); }
+ // block column index
+ inline Index col() const {return outer(); }
+ // FIXME Number of rows in the current block
+ inline Index rows() const { return (m_mat.m_blockSize==Dynamic) ? (m_mat.m_innerOffset[index()+1] - m_mat.m_innerOffset[index()]) : m_mat.m_blockSize; }
+ // Number of columns in the current block ...
+ inline Index cols() const { return (m_mat.m_blockSize==Dynamic) ? (m_mat.m_outerOffset[m_outer+1]-m_mat.m_outerOffset[m_outer]) : m_mat.m_blockSize;}
+ inline operator bool() const { return (m_id < m_end); }
+
+ protected:
+ const BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, StorageIndex>& m_mat;
+ const Index m_outer;
+ Index m_id;
+ Index m_end;
+};
+
+template<typename _Scalar, int _BlockAtCompileTime, int _Options, typename _StorageIndex>
+class BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _StorageIndex>::InnerIterator
+{
+ public:
+ InnerIterator(const BlockSparseMatrix& mat, Index outer)
+ : m_mat(mat),m_outerB(mat.outerToBlock(outer)),m_outer(outer),
+ itb(mat, mat.outerToBlock(outer)),
+ m_offset(outer - mat.blockOuterIndex(m_outerB))
+ {
+ if (itb)
+ {
+ m_id = m_mat.blockInnerIndex(itb.index());
+ m_start = m_id;
+ m_end = m_mat.blockInnerIndex(itb.index()+1);
+ }
+ }
+ inline InnerIterator& operator++()
+ {
+ m_id++;
+ if (m_id >= m_end)
+ {
+ ++itb;
+ if (itb)
+ {
+ m_id = m_mat.blockInnerIndex(itb.index());
+ m_start = m_id;
+ m_end = m_mat.blockInnerIndex(itb.index()+1);
+ }
+ }
+ return *this;
+ }
+ inline const Scalar& value() const
+ {
+ return itb.value().coeff(m_id - m_start, m_offset);
+ }
+ inline Scalar& valueRef()
+ {
+ return itb.valueRef().coeff(m_id - m_start, m_offset);
+ }
+ inline Index index() const { return m_id; }
+ inline Index outer() const {return m_outer; }
+ inline Index col() const {return outer(); }
+ inline Index row() const { return index();}
+ inline operator bool() const
+ {
+ return itb;
+ }
+ protected:
+ const BlockSparseMatrix& m_mat;
+ const Index m_outer;
+ const Index m_outerB;
+ BlockInnerIterator itb; // Iterator through the blocks
+ const Index m_offset; // Position of this column in the block
+ Index m_start; // starting inner index of this block
+ Index m_id; // current inner index in the block
+ Index m_end; // starting inner index of the next block
+
+};
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSEBLOCKMATRIX_H
diff --git a/unsupported/Eigen/src/SparseExtra/CMakeLists.txt b/unsupported/Eigen/src/SparseExtra/CMakeLists.txt
deleted file mode 100644
index 7ea32ca5e..000000000
--- a/unsupported/Eigen/src/SparseExtra/CMakeLists.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-FILE(GLOB Eigen_SparseExtra_SRCS "*.h")
-
-INSTALL(FILES
- ${Eigen_SparseExtra_SRCS}
- DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/SparseExtra COMPONENT Devel
- )
diff --git a/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h b/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h
index dec16df28..037a13f86 100644
--- a/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h
+++ b/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h
@@ -33,11 +33,11 @@ namespace Eigen {
*/
namespace internal {
-template<typename _Scalar, int _Options, typename _Index>
-struct traits<DynamicSparseMatrix<_Scalar, _Options, _Index> >
+template<typename _Scalar, int _Options, typename _StorageIndex>
+struct traits<DynamicSparseMatrix<_Scalar, _Options, _StorageIndex> >
{
typedef _Scalar Scalar;
- typedef _Index Index;
+ typedef _StorageIndex StorageIndex;
typedef Sparse StorageKind;
typedef MatrixXpr XprKind;
enum {
@@ -52,10 +52,12 @@ struct traits<DynamicSparseMatrix<_Scalar, _Options, _Index> >
};
}
-template<typename _Scalar, int _Options, typename _Index>
+template<typename _Scalar, int _Options, typename _StorageIndex>
class DynamicSparseMatrix
- : public SparseMatrixBase<DynamicSparseMatrix<_Scalar, _Options, _Index> >
+ : public SparseMatrixBase<DynamicSparseMatrix<_Scalar, _Options, _StorageIndex> >
{
+ typedef SparseMatrixBase<DynamicSparseMatrix> Base;
+ using Base::convert_index;
public:
EIGEN_SPARSE_PUBLIC_INTERFACE(DynamicSparseMatrix)
// FIXME: why are these operator already alvailable ???
@@ -70,21 +72,21 @@ template<typename _Scalar, int _Options, typename _Index>
protected:
- typedef DynamicSparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
+ typedef DynamicSparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0), StorageIndex> TransposedSparseMatrix;
Index m_innerSize;
- std::vector<internal::CompressedStorage<Scalar,Index> > m_data;
+ std::vector<internal::CompressedStorage<Scalar,StorageIndex> > m_data;
public:
inline Index rows() const { return IsRowMajor ? outerSize() : m_innerSize; }
inline Index cols() const { return IsRowMajor ? m_innerSize : outerSize(); }
inline Index innerSize() const { return m_innerSize; }
- inline Index outerSize() const { return static_cast<Index>(m_data.size()); }
+ inline Index outerSize() const { return convert_index(m_data.size()); }
inline Index innerNonZeros(Index j) const { return m_data[j].size(); }
- std::vector<internal::CompressedStorage<Scalar,Index> >& _data() { return m_data; }
- const std::vector<internal::CompressedStorage<Scalar,Index> >& _data() const { return m_data; }
+ std::vector<internal::CompressedStorage<Scalar,StorageIndex> >& _data() { return m_data; }
+ const std::vector<internal::CompressedStorage<Scalar,StorageIndex> >& _data() const { return m_data; }
/** \returns the coefficient value at given position \a row, \a col
* This operation involes a log(rho*outer_size) binary search.
@@ -121,7 +123,7 @@ template<typename _Scalar, int _Options, typename _Index>
{
Index res = 0;
for (Index j=0; j<outerSize(); ++j)
- res += static_cast<Index>(m_data[j].size());
+ res += m_data[j].size();
return res;
}
@@ -197,7 +199,7 @@ template<typename _Scalar, int _Options, typename _Index>
void resize(Index rows, Index cols)
{
const Index outerSize = IsRowMajor ? rows : cols;
- m_innerSize = IsRowMajor ? cols : rows;
+ m_innerSize = convert_index(IsRowMajor ? cols : rows);
setZero();
if (Index(m_data.size()) != outerSize)
{
@@ -320,10 +322,10 @@ template<typename _Scalar, int _Options, typename _Index>
# endif
};
-template<typename Scalar, int _Options, typename _Index>
-class DynamicSparseMatrix<Scalar,_Options,_Index>::InnerIterator : public SparseVector<Scalar,_Options,_Index>::InnerIterator
+template<typename Scalar, int _Options, typename _StorageIndex>
+class DynamicSparseMatrix<Scalar,_Options,_StorageIndex>::InnerIterator : public SparseVector<Scalar,_Options,_StorageIndex>::InnerIterator
{
- typedef typename SparseVector<Scalar,_Options,_Index>::InnerIterator Base;
+ typedef typename SparseVector<Scalar,_Options,_StorageIndex>::InnerIterator Base;
public:
InnerIterator(const DynamicSparseMatrix& mat, Index outer)
: Base(mat.m_data[outer]), m_outer(outer)
@@ -331,15 +333,16 @@ class DynamicSparseMatrix<Scalar,_Options,_Index>::InnerIterator : public Sparse
inline Index row() const { return IsRowMajor ? m_outer : Base::index(); }
inline Index col() const { return IsRowMajor ? Base::index() : m_outer; }
+ inline Index outer() const { return m_outer; }
protected:
const Index m_outer;
};
-template<typename Scalar, int _Options, typename _Index>
-class DynamicSparseMatrix<Scalar,_Options,_Index>::ReverseInnerIterator : public SparseVector<Scalar,_Options,_Index>::ReverseInnerIterator
+template<typename Scalar, int _Options, typename _StorageIndex>
+class DynamicSparseMatrix<Scalar,_Options,_StorageIndex>::ReverseInnerIterator : public SparseVector<Scalar,_Options,_StorageIndex>::ReverseInnerIterator
{
- typedef typename SparseVector<Scalar,_Options,_Index>::ReverseInnerIterator Base;
+ typedef typename SparseVector<Scalar,_Options,_StorageIndex>::ReverseInnerIterator Base;
public:
ReverseInnerIterator(const DynamicSparseMatrix& mat, Index outer)
: Base(mat.m_data[outer]), m_outer(outer)
@@ -347,11 +350,43 @@ class DynamicSparseMatrix<Scalar,_Options,_Index>::ReverseInnerIterator : public
inline Index row() const { return IsRowMajor ? m_outer : Base::index(); }
inline Index col() const { return IsRowMajor ? Base::index() : m_outer; }
+ inline Index outer() const { return m_outer; }
protected:
const Index m_outer;
};
+namespace internal {
+
+template<typename _Scalar, int _Options, typename _StorageIndex>
+struct evaluator<DynamicSparseMatrix<_Scalar,_Options,_StorageIndex> >
+ : evaluator_base<DynamicSparseMatrix<_Scalar,_Options,_StorageIndex> >
+{
+ typedef _Scalar Scalar;
+ typedef DynamicSparseMatrix<_Scalar,_Options,_StorageIndex> SparseMatrixType;
+ typedef typename SparseMatrixType::InnerIterator InnerIterator;
+ typedef typename SparseMatrixType::ReverseInnerIterator ReverseInnerIterator;
+
+ enum {
+ CoeffReadCost = NumTraits<_Scalar>::ReadCost,
+ Flags = SparseMatrixType::Flags
+ };
+
+ evaluator() : m_matrix(0) {}
+ evaluator(const SparseMatrixType &mat) : m_matrix(&mat) {}
+
+ operator SparseMatrixType&() { return m_matrix->const_cast_derived(); }
+ operator const SparseMatrixType&() const { return *m_matrix; }
+
+ Scalar coeff(Index row, Index col) const { return m_matrix->coeff(row,col); }
+
+ Index nonZerosEstimate() const { return m_matrix->nonZeros(); }
+
+ const SparseMatrixType *m_matrix;
+};
+
+}
+
} // end namespace Eigen
#endif // EIGEN_DYNAMIC_SPARSEMATRIX_H
diff --git a/unsupported/Eigen/src/SparseExtra/MarketIO.h b/unsupported/Eigen/src/SparseExtra/MarketIO.h
index 7aafce928..cdc14f86e 100644
--- a/unsupported/Eigen/src/SparseExtra/MarketIO.h
+++ b/unsupported/Eigen/src/SparseExtra/MarketIO.h
@@ -18,7 +18,7 @@ namespace Eigen {
namespace internal
{
template <typename Scalar>
- inline bool GetMarketLine (std::stringstream& line, int& M, int& N, int& i, int& j, Scalar& value)
+ inline bool GetMarketLine (std::stringstream& line, Index& M, Index& N, Index& i, Index& j, Scalar& value)
{
line >> i >> j >> value;
i--;
@@ -31,7 +31,7 @@ namespace internal
return false;
}
template <typename Scalar>
- inline bool GetMarketLine (std::stringstream& line, int& M, int& N, int& i, int& j, std::complex<Scalar>& value)
+ inline bool GetMarketLine (std::stringstream& line, Index& M, Index& N, Index& i, Index& j, std::complex<Scalar>& value)
{
Scalar valR, valI;
line >> i >> j >> valR >> valI;
@@ -133,6 +133,7 @@ template<typename SparseMatrixType>
bool loadMarket(SparseMatrixType& mat, const std::string& filename)
{
typedef typename SparseMatrixType::Scalar Scalar;
+ typedef typename SparseMatrixType::Index Index;
std::ifstream input(filename.c_str(),std::ios::in);
if(!input)
return false;
@@ -142,11 +143,11 @@ bool loadMarket(SparseMatrixType& mat, const std::string& filename)
bool readsizes = false;
- typedef Triplet<Scalar,int> T;
+ typedef Triplet<Scalar,Index> T;
std::vector<T> elements;
- int M(-1), N(-1), NNZ(-1);
- int count = 0;
+ Index M(-1), N(-1), NNZ(-1);
+ Index count = 0;
while(input.getline(buffer, maxBuffersize))
{
// skip comments
@@ -162,14 +163,14 @@ bool loadMarket(SparseMatrixType& mat, const std::string& filename)
if(M > 0 && N > 0 && NNZ > 0)
{
readsizes = true;
- std::cout << "sizes: " << M << "," << N << "," << NNZ << "\n";
+ //std::cout << "sizes: " << M << "," << N << "," << NNZ << "\n";
mat.resize(M,N);
mat.reserve(NNZ);
}
}
else
{
- int i(-1), j(-1);
+ Index i(-1), j(-1);
Scalar value;
if( internal::GetMarketLine(line, M, N, i, j, value) )
{
@@ -238,9 +239,9 @@ bool saveMarket(const SparseMatrixType& mat, const std::string& filename, int sy
for(int j=0; j<mat.outerSize(); ++j)
for(typename SparseMatrixType::InnerIterator it(mat,j); it; ++it)
{
- ++ count;
- internal::PutMatrixElt(it.value(), it.row()+1, it.col()+1, out);
- // out << it.row()+1 << " " << it.col()+1 << " " << it.value() << "\n";
+ ++ count;
+ internal::PutMatrixElt(it.value(), it.row()+1, it.col()+1, out);
+ // out << it.row()+1 << " " << it.col()+1 << " " << it.value() << "\n";
}
out.close();
return true;
diff --git a/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h b/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h
index bf13cf21f..02916ea6f 100644
--- a/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h
+++ b/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h
@@ -41,20 +41,18 @@ enum {
template <typename Scalar>
class MatrixMarketIterator
{
+ typedef typename NumTraits<Scalar>::Real RealScalar;
public:
typedef Matrix<Scalar,Dynamic,1> VectorType;
typedef SparseMatrix<Scalar,ColMajor> MatrixType;
public:
- MatrixMarketIterator(const std::string folder):m_sym(0),m_isvalid(false),m_matIsLoaded(false),m_hasRhs(false),m_hasrefX(false),m_folder(folder)
+ MatrixMarketIterator(const std::string &folder)
+ : m_sym(0), m_isvalid(false), m_matIsLoaded(false), m_hasRhs(false), m_hasrefX(false), m_folder(folder)
{
m_folder_id = opendir(folder.c_str());
- if (!m_folder_id){
- m_isvalid = false;
- std::cerr << "The provided Matrix folder could not be opened \n\n";
- abort();
- }
- Getnextvalidmatrix();
+ if(m_folder_id)
+ Getnextvalidmatrix();
}
~MatrixMarketIterator()
@@ -81,16 +79,30 @@ class MatrixMarketIterator
std::string matrix_file = m_folder + "/" + m_matname + ".mtx";
if ( !loadMarket(m_mat, matrix_file))
{
+ std::cerr << "Warning loadMarket failed when loading \"" << matrix_file << "\"" << std::endl;
m_matIsLoaded = false;
return m_mat;
}
m_matIsLoaded = true;
-
+
if (m_sym != NonSymmetric)
- { // Store the upper part of the matrix. It is needed by the solvers dealing with nonsymmetric matrices ??
- MatrixType B;
- B = m_mat;
- m_mat = B.template selfadjointView<Lower>();
+ {
+ // Check whether we need to restore a full matrix:
+ RealScalar diag_norm = m_mat.diagonal().norm();
+ RealScalar lower_norm = m_mat.template triangularView<Lower>().norm();
+ RealScalar upper_norm = m_mat.template triangularView<Upper>().norm();
+ if(lower_norm>diag_norm && upper_norm==diag_norm)
+ {
+ // only the lower part is stored
+ MatrixType tmp(m_mat);
+ m_mat = tmp.template selfadjointView<Lower>();
+ }
+ else if(upper_norm>diag_norm && lower_norm==diag_norm)
+ {
+ // only the upper part is stored
+ MatrixType tmp(m_mat);
+ m_mat = tmp.template selfadjointView<Upper>();
+ }
}
return m_mat;
}
@@ -143,6 +155,8 @@ class MatrixMarketIterator
m_refX.resize(m_mat.cols());
m_hasrefX = loadMarketVector(m_refX, lhs_file);
}
+ else
+ m_refX.resize(0);
return m_refX;
}
@@ -150,8 +164,9 @@ class MatrixMarketIterator
inline int sym() { return m_sym; }
- inline bool hasRhs() {return m_hasRhs; }
- inline bool hasrefX() {return m_hasrefX; }
+ bool hasRhs() {return m_hasRhs; }
+ bool hasrefX() {return m_hasrefX; }
+ bool isFolderValid() { return bool(m_folder_id); }
protected:
diff --git a/unsupported/Eigen/src/SparseExtra/RandomSetter.h b/unsupported/Eigen/src/SparseExtra/RandomSetter.h
index dee1708e7..ee97299af 100644
--- a/unsupported/Eigen/src/SparseExtra/RandomSetter.h
+++ b/unsupported/Eigen/src/SparseExtra/RandomSetter.h
@@ -95,10 +95,10 @@ template<typename Scalar> struct GoogleSparseHashMapTraits
*
* \brief The RandomSetter is a wrapper object allowing to set/update a sparse matrix with random access
*
- * \param SparseMatrixType the type of the sparse matrix we are updating
- * \param MapTraits a traits class representing the map implementation used for the temporary sparse storage.
+ * \tparam SparseMatrixType the type of the sparse matrix we are updating
+ * \tparam MapTraits a traits class representing the map implementation used for the temporary sparse storage.
* Its default value depends on the system.
- * \param OuterPacketBits defines the number of rows (or columns) manage by a single map object
+ * \tparam OuterPacketBits defines the number of rows (or columns) manage by a single map object
* as a power of two exponent.
*
* This class temporarily represents a sparse matrix object using a generic map implementation allowing for
@@ -154,7 +154,7 @@ template<typename SparseMatrixType,
class RandomSetter
{
typedef typename SparseMatrixType::Scalar Scalar;
- typedef typename SparseMatrixType::Index Index;
+ typedef typename SparseMatrixType::StorageIndex StorageIndex;
struct ScalarWrapper
{
@@ -296,7 +296,7 @@ class RandomSetter
const Index inner = SetterRowMajor ? col : row;
const Index outerMajor = outer >> OuterPacketBits; // index of the packet/map
const Index outerMinor = outer & OuterPacketMask; // index of the inner vector in the packet
- const KeyType key = (KeyType(outerMinor)<<m_keyBitsOffset) | inner;
+ const KeyType key = internal::convert_index<KeyType>((outerMinor<<m_keyBitsOffset) | inner);
return m_hashmaps[outerMajor][key].value;
}
diff --git a/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsArrayAPI.h b/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsArrayAPI.h
new file mode 100644
index 000000000..ed415db99
--- /dev/null
+++ b/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsArrayAPI.h
@@ -0,0 +1,124 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+#ifndef EIGEN_SPECIALFUNCTIONS_ARRAYAPI_H
+#define EIGEN_SPECIALFUNCTIONS_ARRAYAPI_H
+
+namespace Eigen {
+
+/** \cpp11 \returns an expression of the coefficient-wise igamma(\a a, \a x) to the given arrays.
+ *
+ * This function computes the coefficient-wise incomplete gamma function.
+ *
+ * \note This function supports only float and double scalar types in c++11 mode. To support other scalar types,
+ * or float/double in non c++11 mode, the user has to provide implementations of igammac(T,T) for any scalar
+ * type T to be supported.
+ *
+ * \sa Eigen::igammac(), Eigen::lgamma()
+ */
+template<typename Derived,typename ExponentDerived>
+inline const Eigen::CwiseBinaryOp<Eigen::internal::scalar_igamma_op<typename Derived::Scalar>, const Derived, const ExponentDerived>
+igamma(const Eigen::ArrayBase<Derived>& a, const Eigen::ArrayBase<ExponentDerived>& x)
+{
+ return Eigen::CwiseBinaryOp<Eigen::internal::scalar_igamma_op<typename Derived::Scalar>, const Derived, const ExponentDerived>(
+ a.derived(),
+ x.derived()
+ );
+}
+
+/** \cpp11 \returns an expression of the coefficient-wise igammac(\a a, \a x) to the given arrays.
+ *
+ * This function computes the coefficient-wise complementary incomplete gamma function.
+ *
+ * \note This function supports only float and double scalar types in c++11 mode. To support other scalar types,
+ * or float/double in non c++11 mode, the user has to provide implementations of igammac(T,T) for any scalar
+ * type T to be supported.
+ *
+ * \sa Eigen::igamma(), Eigen::lgamma()
+ */
+template<typename Derived,typename ExponentDerived>
+inline const Eigen::CwiseBinaryOp<Eigen::internal::scalar_igammac_op<typename Derived::Scalar>, const Derived, const ExponentDerived>
+igammac(const Eigen::ArrayBase<Derived>& a, const Eigen::ArrayBase<ExponentDerived>& x)
+{
+ return Eigen::CwiseBinaryOp<Eigen::internal::scalar_igammac_op<typename Derived::Scalar>, const Derived, const ExponentDerived>(
+ a.derived(),
+ x.derived()
+ );
+}
+
+/** \cpp11 \returns an expression of the coefficient-wise polygamma(\a n, \a x) to the given arrays.
+ *
+ * It returns the \a n -th derivative of the digamma(psi) evaluated at \c x.
+ *
+ * \note This function supports only float and double scalar types in c++11 mode. To support other scalar types,
+ * or float/double in non c++11 mode, the user has to provide implementations of polygamma(T,T) for any scalar
+ * type T to be supported.
+ *
+ * \sa Eigen::digamma()
+ */
+// * \warning Be careful with the order of the parameters: x.polygamma(n) is equivalent to polygamma(n,x)
+// * \sa ArrayBase::polygamma()
+template<typename DerivedN,typename DerivedX>
+inline const Eigen::CwiseBinaryOp<Eigen::internal::scalar_polygamma_op<typename DerivedX::Scalar>, const DerivedN, const DerivedX>
+polygamma(const Eigen::ArrayBase<DerivedN>& n, const Eigen::ArrayBase<DerivedX>& x)
+{
+ return Eigen::CwiseBinaryOp<Eigen::internal::scalar_polygamma_op<typename DerivedX::Scalar>, const DerivedN, const DerivedX>(
+ n.derived(),
+ x.derived()
+ );
+}
+
+/** \cpp11 \returns an expression of the coefficient-wise betainc(\a x, \a a, \a b) to the given arrays.
+ *
+ * This function computes the regularized incomplete beta function (integral).
+ *
+ * \note This function supports only float and double scalar types in c++11 mode. To support other scalar types,
+ * or float/double in non c++11 mode, the user has to provide implementations of betainc(T,T,T) for any scalar
+ * type T to be supported.
+ *
+ * \sa Eigen::betainc(), Eigen::lgamma()
+ */
+template<typename ArgADerived, typename ArgBDerived, typename ArgXDerived>
+inline const Eigen::CwiseTernaryOp<Eigen::internal::scalar_betainc_op<typename ArgXDerived::Scalar>, const ArgADerived, const ArgBDerived, const ArgXDerived>
+betainc(const Eigen::ArrayBase<ArgADerived>& a, const Eigen::ArrayBase<ArgBDerived>& b, const Eigen::ArrayBase<ArgXDerived>& x)
+{
+ return Eigen::CwiseTernaryOp<Eigen::internal::scalar_betainc_op<typename ArgXDerived::Scalar>, const ArgADerived, const ArgBDerived, const ArgXDerived>(
+ a.derived(),
+ b.derived(),
+ x.derived()
+ );
+}
+
+
+/** \returns an expression of the coefficient-wise zeta(\a x, \a q) to the given arrays.
+ *
+ * It returns the Riemann zeta function of two arguments \a x and \a q:
+ *
+ * \param x is the exposent, it must be > 1
+ * \param q is the shift, it must be > 0
+ *
+ * \note This function supports only float and double scalar types. To support other scalar types, the user has
+ * to provide implementations of zeta(T,T) for any scalar type T to be supported.
+ *
+ * \sa ArrayBase::zeta()
+ */
+template<typename DerivedX,typename DerivedQ>
+inline const Eigen::CwiseBinaryOp<Eigen::internal::scalar_zeta_op<typename DerivedX::Scalar>, const DerivedX, const DerivedQ>
+zeta(const Eigen::ArrayBase<DerivedX>& x, const Eigen::ArrayBase<DerivedQ>& q)
+{
+ return Eigen::CwiseBinaryOp<Eigen::internal::scalar_zeta_op<typename DerivedX::Scalar>, const DerivedX, const DerivedQ>(
+ x.derived(),
+ q.derived()
+ );
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPECIALFUNCTIONS_ARRAYAPI_H
diff --git a/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsFunctors.h b/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsFunctors.h
new file mode 100644
index 000000000..d8f2363be
--- /dev/null
+++ b/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsFunctors.h
@@ -0,0 +1,236 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016 Eugene Brevdo <ebrevdo@gmail.com>
+// Copyright (C) 2016 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPECIALFUNCTIONS_FUNCTORS_H
+#define EIGEN_SPECIALFUNCTIONS_FUNCTORS_H
+
+namespace Eigen {
+
+namespace internal {
+
+
+/** \internal
+ * \brief Template functor to compute the incomplete gamma function igamma(a, x)
+ *
+ * \sa class CwiseBinaryOp, Cwise::igamma
+ */
+template<typename Scalar> struct scalar_igamma_op : binary_op_base<Scalar,Scalar>
+{
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_igamma_op)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& x) const {
+ using numext::igamma; return igamma(a, x);
+ }
+ template<typename Packet>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& x) const {
+ return internal::pigamma(a, x);
+ }
+};
+template<typename Scalar>
+struct functor_traits<scalar_igamma_op<Scalar> > {
+ enum {
+ // Guesstimate
+ Cost = 20 * NumTraits<Scalar>::MulCost + 10 * NumTraits<Scalar>::AddCost,
+ PacketAccess = packet_traits<Scalar>::HasIGamma
+ };
+};
+
+
+/** \internal
+ * \brief Template functor to compute the complementary incomplete gamma function igammac(a, x)
+ *
+ * \sa class CwiseBinaryOp, Cwise::igammac
+ */
+template<typename Scalar> struct scalar_igammac_op : binary_op_base<Scalar,Scalar>
+{
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_igammac_op)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& x) const {
+ using numext::igammac; return igammac(a, x);
+ }
+ template<typename Packet>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& x) const
+ {
+ return internal::pigammac(a, x);
+ }
+};
+template<typename Scalar>
+struct functor_traits<scalar_igammac_op<Scalar> > {
+ enum {
+ // Guesstimate
+ Cost = 20 * NumTraits<Scalar>::MulCost + 10 * NumTraits<Scalar>::AddCost,
+ PacketAccess = packet_traits<Scalar>::HasIGammac
+ };
+};
+
+
+/** \internal
+ * \brief Template functor to compute the incomplete beta integral betainc(a, b, x)
+ *
+ */
+template<typename Scalar> struct scalar_betainc_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_betainc_op)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& x, const Scalar& a, const Scalar& b) const {
+ using numext::betainc; return betainc(x, a, b);
+ }
+ template<typename Packet>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& x, const Packet& a, const Packet& b) const
+ {
+ return internal::pbetainc(x, a, b);
+ }
+};
+template<typename Scalar>
+struct functor_traits<scalar_betainc_op<Scalar> > {
+ enum {
+ // Guesstimate
+ Cost = 400 * NumTraits<Scalar>::MulCost + 400 * NumTraits<Scalar>::AddCost,
+ PacketAccess = packet_traits<Scalar>::HasBetaInc
+ };
+};
+
+
+/** \internal
+ * \brief Template functor to compute the natural log of the absolute
+ * value of Gamma of a scalar
+ * \sa class CwiseUnaryOp, Cwise::lgamma()
+ */
+template<typename Scalar> struct scalar_lgamma_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_lgamma_op)
+ EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const {
+ using numext::lgamma; return lgamma(a);
+ }
+ typedef typename packet_traits<Scalar>::type Packet;
+ EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::plgamma(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_lgamma_op<Scalar> >
+{
+ enum {
+ // Guesstimate
+ Cost = 10 * NumTraits<Scalar>::MulCost + 5 * NumTraits<Scalar>::AddCost,
+ PacketAccess = packet_traits<Scalar>::HasLGamma
+ };
+};
+
+/** \internal
+ * \brief Template functor to compute psi, the derivative of lgamma of a scalar.
+ * \sa class CwiseUnaryOp, Cwise::digamma()
+ */
+template<typename Scalar> struct scalar_digamma_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_digamma_op)
+ EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const {
+ using numext::digamma; return digamma(a);
+ }
+ typedef typename packet_traits<Scalar>::type Packet;
+ EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::pdigamma(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_digamma_op<Scalar> >
+{
+ enum {
+ // Guesstimate
+ Cost = 10 * NumTraits<Scalar>::MulCost + 5 * NumTraits<Scalar>::AddCost,
+ PacketAccess = packet_traits<Scalar>::HasDiGamma
+ };
+};
+
+/** \internal
+ * \brief Template functor to compute the Riemann Zeta function of two arguments.
+ * \sa class CwiseUnaryOp, Cwise::zeta()
+ */
+template<typename Scalar> struct scalar_zeta_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_zeta_op)
+ EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& x, const Scalar& q) const {
+ using numext::zeta; return zeta(x, q);
+ }
+ typedef typename packet_traits<Scalar>::type Packet;
+ EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& x, const Packet& q) const { return internal::pzeta(x, q); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_zeta_op<Scalar> >
+{
+ enum {
+ // Guesstimate
+ Cost = 10 * NumTraits<Scalar>::MulCost + 5 * NumTraits<Scalar>::AddCost,
+ PacketAccess = packet_traits<Scalar>::HasZeta
+ };
+};
+
+/** \internal
+ * \brief Template functor to compute the polygamma function.
+ * \sa class CwiseUnaryOp, Cwise::polygamma()
+ */
+template<typename Scalar> struct scalar_polygamma_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_polygamma_op)
+ EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& n, const Scalar& x) const {
+ using numext::polygamma; return polygamma(n, x);
+ }
+ typedef typename packet_traits<Scalar>::type Packet;
+ EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& n, const Packet& x) const { return internal::ppolygamma(n, x); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_polygamma_op<Scalar> >
+{
+ enum {
+ // Guesstimate
+ Cost = 10 * NumTraits<Scalar>::MulCost + 5 * NumTraits<Scalar>::AddCost,
+ PacketAccess = packet_traits<Scalar>::HasPolygamma
+ };
+};
+
+/** \internal
+ * \brief Template functor to compute the Gauss error function of a
+ * scalar
+ * \sa class CwiseUnaryOp, Cwise::erf()
+ */
+template<typename Scalar> struct scalar_erf_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_erf_op)
+ EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const {
+ using numext::erf; return erf(a);
+ }
+ typedef typename packet_traits<Scalar>::type Packet;
+ EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::perf(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_erf_op<Scalar> >
+{
+ enum {
+ // Guesstimate
+ Cost = 10 * NumTraits<Scalar>::MulCost + 5 * NumTraits<Scalar>::AddCost,
+ PacketAccess = packet_traits<Scalar>::HasErf
+ };
+};
+
+/** \internal
+ * \brief Template functor to compute the Complementary Error Function
+ * of a scalar
+ * \sa class CwiseUnaryOp, Cwise::erfc()
+ */
+template<typename Scalar> struct scalar_erfc_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_erfc_op)
+ EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const {
+ using numext::erfc; return erfc(a);
+ }
+ typedef typename packet_traits<Scalar>::type Packet;
+ EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::perfc(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_erfc_op<Scalar> >
+{
+ enum {
+ // Guesstimate
+ Cost = 10 * NumTraits<Scalar>::MulCost + 5 * NumTraits<Scalar>::AddCost,
+ PacketAccess = packet_traits<Scalar>::HasErfc
+ };
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPECIALFUNCTIONS_FUNCTORS_H
diff --git a/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsHalf.h b/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsHalf.h
new file mode 100644
index 000000000..553bcda6a
--- /dev/null
+++ b/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsHalf.h
@@ -0,0 +1,47 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPECIALFUNCTIONS_HALF_H
+#define EIGEN_SPECIALFUNCTIONS_HALF_H
+
+namespace Eigen {
+namespace numext {
+
+#if EIGEN_HAS_C99_MATH
+template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half lgamma(const Eigen::half& a) {
+ return Eigen::half(Eigen::numext::lgamma(static_cast<float>(a)));
+}
+template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half digamma(const Eigen::half& a) {
+ return Eigen::half(Eigen::numext::digamma(static_cast<float>(a)));
+}
+template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half zeta(const Eigen::half& x, const Eigen::half& q) {
+ return Eigen::half(Eigen::numext::zeta(static_cast<float>(x), static_cast<float>(q)));
+}
+template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half polygamma(const Eigen::half& n, const Eigen::half& x) {
+ return Eigen::half(Eigen::numext::polygamma(static_cast<float>(n), static_cast<float>(x)));
+}
+template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half erf(const Eigen::half& a) {
+ return Eigen::half(Eigen::numext::erf(static_cast<float>(a)));
+}
+template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half erfc(const Eigen::half& a) {
+ return Eigen::half(Eigen::numext::erfc(static_cast<float>(a)));
+}
+template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half igamma(const Eigen::half& a, const Eigen::half& x) {
+ return Eigen::half(Eigen::numext::igamma(static_cast<float>(a), static_cast<float>(x)));
+}
+template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half igammac(const Eigen::half& a, const Eigen::half& x) {
+ return Eigen::half(Eigen::numext::igammac(static_cast<float>(a), static_cast<float>(x)));
+}
+template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half betainc(const Eigen::half& a, const Eigen::half& b, const Eigen::half& x) {
+ return Eigen::half(Eigen::numext::betainc(static_cast<float>(a), static_cast<float>(b), static_cast<float>(x)));
+}
+#endif
+
+} // end namespace numext
+} // end namespace Eigen
+
+#endif // EIGEN_SPECIALFUNCTIONS_HALF_H
diff --git a/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsImpl.h b/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsImpl.h
new file mode 100644
index 000000000..f524d7137
--- /dev/null
+++ b/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsImpl.h
@@ -0,0 +1,1565 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2015 Eugene Brevdo <ebrevdo@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPECIAL_FUNCTIONS_H
+#define EIGEN_SPECIAL_FUNCTIONS_H
+
+namespace Eigen {
+namespace internal {
+
+// Parts of this code are based on the Cephes Math Library.
+//
+// Cephes Math Library Release 2.8: June, 2000
+// Copyright 1984, 1987, 1992, 2000 by Stephen L. Moshier
+//
+// Permission has been kindly provided by the original author
+// to incorporate the Cephes software into the Eigen codebase:
+//
+// From: Stephen Moshier
+// To: Eugene Brevdo
+// Subject: Re: Permission to wrap several cephes functions in Eigen
+//
+// Hello Eugene,
+//
+// Thank you for writing.
+//
+// If your licensing is similar to BSD, the formal way that has been
+// handled is simply to add a statement to the effect that you are incorporating
+// the Cephes software by permission of the author.
+//
+// Good luck with your project,
+// Steve
+
+namespace cephes {
+
+/* polevl (modified for Eigen)
+ *
+ * Evaluate polynomial
+ *
+ *
+ *
+ * SYNOPSIS:
+ *
+ * int N;
+ * Scalar x, y, coef[N+1];
+ *
+ * y = polevl<decltype(x), N>( x, coef);
+ *
+ *
+ *
+ * DESCRIPTION:
+ *
+ * Evaluates polynomial of degree N:
+ *
+ * 2 N
+ * y = C + C x + C x +...+ C x
+ * 0 1 2 N
+ *
+ * Coefficients are stored in reverse order:
+ *
+ * coef[0] = C , ..., coef[N] = C .
+ * N 0
+ *
+ * The function p1evl() assumes that coef[N] = 1.0 and is
+ * omitted from the array. Its calling arguments are
+ * otherwise the same as polevl().
+ *
+ *
+ * The Eigen implementation is templatized. For best speed, store
+ * coef as a const array (constexpr), e.g.
+ *
+ * const double coef[] = {1.0, 2.0, 3.0, ...};
+ *
+ */
+template <typename Scalar, int N>
+struct polevl {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE Scalar run(const Scalar x, const Scalar coef[]) {
+ EIGEN_STATIC_ASSERT((N > 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
+
+ return polevl<Scalar, N - 1>::run(x, coef) * x + coef[N];
+ }
+};
+
+template <typename Scalar>
+struct polevl<Scalar, 0> {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE Scalar run(const Scalar, const Scalar coef[]) {
+ return coef[0];
+ }
+};
+
+} // end namespace cephes
+
+/****************************************************************************
+ * Implementation of lgamma, requires C++11/C99 *
+ ****************************************************************************/
+
+template <typename Scalar>
+struct lgamma_impl {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE Scalar run(const Scalar) {
+ EIGEN_STATIC_ASSERT((internal::is_same<Scalar, Scalar>::value == false),
+ THIS_TYPE_IS_NOT_SUPPORTED);
+ return Scalar(0);
+ }
+};
+
+template <typename Scalar>
+struct lgamma_retval {
+ typedef Scalar type;
+};
+
+#if EIGEN_HAS_C99_MATH
+template <>
+struct lgamma_impl<float> {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE float run(float x) {
+#if !defined(__CUDA_ARCH__) && (defined(_BSD_SOURCE) || defined(_SVID_SOURCE)) && !defined(__APPLE__)
+ int signgam;
+ return ::lgammaf_r(x, &signgam);
+#else
+ return ::lgammaf(x);
+#endif
+ }
+};
+
+template <>
+struct lgamma_impl<double> {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE double run(double x) {
+#if !defined(__CUDA_ARCH__) && (defined(_BSD_SOURCE) || defined(_SVID_SOURCE)) && !defined(__APPLE__)
+ int signgam;
+ return ::lgamma_r(x, &signgam);
+#else
+ return ::lgamma(x);
+#endif
+ }
+};
+#endif
+
+/****************************************************************************
+ * Implementation of digamma (psi), based on Cephes *
+ ****************************************************************************/
+
+template <typename Scalar>
+struct digamma_retval {
+ typedef Scalar type;
+};
+
+/*
+ *
+ * Polynomial evaluation helper for the Psi (digamma) function.
+ *
+ * digamma_impl_maybe_poly::run(s) evaluates the asymptotic Psi expansion for
+ * input Scalar s, assuming s is above 10.0.
+ *
+ * If s is above a certain threshold for the given Scalar type, zero
+ * is returned. Otherwise the polynomial is evaluated with enough
+ * coefficients for results matching Scalar machine precision.
+ *
+ *
+ */
+template <typename Scalar>
+struct digamma_impl_maybe_poly {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE Scalar run(const Scalar) {
+ EIGEN_STATIC_ASSERT((internal::is_same<Scalar, Scalar>::value == false),
+ THIS_TYPE_IS_NOT_SUPPORTED);
+ return Scalar(0);
+ }
+};
+
+
+template <>
+struct digamma_impl_maybe_poly<float> {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE float run(const float s) {
+ const float A[] = {
+ -4.16666666666666666667E-3f,
+ 3.96825396825396825397E-3f,
+ -8.33333333333333333333E-3f,
+ 8.33333333333333333333E-2f
+ };
+
+ float z;
+ if (s < 1.0e8f) {
+ z = 1.0f / (s * s);
+ return z * cephes::polevl<float, 3>::run(z, A);
+ } else return 0.0f;
+ }
+};
+
+template <>
+struct digamma_impl_maybe_poly<double> {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE double run(const double s) {
+ const double A[] = {
+ 8.33333333333333333333E-2,
+ -2.10927960927960927961E-2,
+ 7.57575757575757575758E-3,
+ -4.16666666666666666667E-3,
+ 3.96825396825396825397E-3,
+ -8.33333333333333333333E-3,
+ 8.33333333333333333333E-2
+ };
+
+ double z;
+ if (s < 1.0e17) {
+ z = 1.0 / (s * s);
+ return z * cephes::polevl<double, 6>::run(z, A);
+ }
+ else return 0.0;
+ }
+};
+
+template <typename Scalar>
+struct digamma_impl {
+ EIGEN_DEVICE_FUNC
+ static Scalar run(Scalar x) {
+ /*
+ *
+ * Psi (digamma) function (modified for Eigen)
+ *
+ *
+ * SYNOPSIS:
+ *
+ * double x, y, psi();
+ *
+ * y = psi( x );
+ *
+ *
+ * DESCRIPTION:
+ *
+ * d -
+ * psi(x) = -- ln | (x)
+ * dx
+ *
+ * is the logarithmic derivative of the gamma function.
+ * For integer x,
+ * n-1
+ * -
+ * psi(n) = -EUL + > 1/k.
+ * -
+ * k=1
+ *
+ * If x is negative, it is transformed to a positive argument by the
+ * reflection formula psi(1-x) = psi(x) + pi cot(pi x).
+ * For general positive x, the argument is made greater than 10
+ * using the recurrence psi(x+1) = psi(x) + 1/x.
+ * Then the following asymptotic expansion is applied:
+ *
+ * inf. B
+ * - 2k
+ * psi(x) = log(x) - 1/2x - > -------
+ * - 2k
+ * k=1 2k x
+ *
+ * where the B2k are Bernoulli numbers.
+ *
+ * ACCURACY (float):
+ * Relative error (except absolute when |psi| < 1):
+ * arithmetic domain # trials peak rms
+ * IEEE 0,30 30000 1.3e-15 1.4e-16
+ * IEEE -30,0 40000 1.5e-15 2.2e-16
+ *
+ * ACCURACY (double):
+ * Absolute error, relative when |psi| > 1 :
+ * arithmetic domain # trials peak rms
+ * IEEE -33,0 30000 8.2e-7 1.2e-7
+ * IEEE 0,33 100000 7.3e-7 7.7e-8
+ *
+ * ERROR MESSAGES:
+ * message condition value returned
+ * psi singularity x integer <=0 INFINITY
+ */
+
+ Scalar p, q, nz, s, w, y;
+ bool negative = false;
+
+ const Scalar maxnum = NumTraits<Scalar>::infinity();
+ const Scalar m_pi = Scalar(EIGEN_PI);
+
+ const Scalar zero = Scalar(0);
+ const Scalar one = Scalar(1);
+ const Scalar half = Scalar(0.5);
+ nz = zero;
+
+ if (x <= zero) {
+ negative = true;
+ q = x;
+ p = numext::floor(q);
+ if (p == q) {
+ return maxnum;
+ }
+ /* Remove the zeros of tan(m_pi x)
+ * by subtracting the nearest integer from x
+ */
+ nz = q - p;
+ if (nz != half) {
+ if (nz > half) {
+ p += one;
+ nz = q - p;
+ }
+ nz = m_pi / numext::tan(m_pi * nz);
+ }
+ else {
+ nz = zero;
+ }
+ x = one - x;
+ }
+
+ /* use the recurrence psi(x+1) = psi(x) + 1/x. */
+ s = x;
+ w = zero;
+ while (s < Scalar(10)) {
+ w += one / s;
+ s += one;
+ }
+
+ y = digamma_impl_maybe_poly<Scalar>::run(s);
+
+ y = numext::log(s) - (half / s) - y - w;
+
+ return (negative) ? y - nz : y;
+ }
+};
+
+/****************************************************************************
+ * Implementation of erf, requires C++11/C99 *
+ ****************************************************************************/
+
+template <typename Scalar>
+struct erf_impl {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE Scalar run(const Scalar) {
+ EIGEN_STATIC_ASSERT((internal::is_same<Scalar, Scalar>::value == false),
+ THIS_TYPE_IS_NOT_SUPPORTED);
+ return Scalar(0);
+ }
+};
+
+template <typename Scalar>
+struct erf_retval {
+ typedef Scalar type;
+};
+
+#if EIGEN_HAS_C99_MATH
+template <>
+struct erf_impl<float> {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE float run(float x) { return ::erff(x); }
+};
+
+template <>
+struct erf_impl<double> {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE double run(double x) { return ::erf(x); }
+};
+#endif // EIGEN_HAS_C99_MATH
+
+/***************************************************************************
+* Implementation of erfc, requires C++11/C99 *
+****************************************************************************/
+
+template <typename Scalar>
+struct erfc_impl {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE Scalar run(const Scalar) {
+ EIGEN_STATIC_ASSERT((internal::is_same<Scalar, Scalar>::value == false),
+ THIS_TYPE_IS_NOT_SUPPORTED);
+ return Scalar(0);
+ }
+};
+
+template <typename Scalar>
+struct erfc_retval {
+ typedef Scalar type;
+};
+
+#if EIGEN_HAS_C99_MATH
+template <>
+struct erfc_impl<float> {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE float run(const float x) { return ::erfcf(x); }
+};
+
+template <>
+struct erfc_impl<double> {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE double run(const double x) { return ::erfc(x); }
+};
+#endif // EIGEN_HAS_C99_MATH
+
+/**************************************************************************************************************
+ * Implementation of igammac (complemented incomplete gamma integral), based on Cephes but requires C++11/C99 *
+ **************************************************************************************************************/
+
+template <typename Scalar>
+struct igammac_retval {
+ typedef Scalar type;
+};
+
+// NOTE: cephes_helper is also used to implement zeta
+template <typename Scalar>
+struct cephes_helper {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE Scalar machep() { assert(false && "machep not supported for this type"); return 0.0; }
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE Scalar big() { assert(false && "big not supported for this type"); return 0.0; }
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE Scalar biginv() { assert(false && "biginv not supported for this type"); return 0.0; }
+};
+
+template <>
+struct cephes_helper<float> {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE float machep() {
+ return NumTraits<float>::epsilon() / 2; // 1.0 - machep == 1.0
+ }
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE float big() {
+ // use epsneg (1.0 - epsneg == 1.0)
+ return 1.0f / (NumTraits<float>::epsilon() / 2);
+ }
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE float biginv() {
+ // epsneg
+ return machep();
+ }
+};
+
+template <>
+struct cephes_helper<double> {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE double machep() {
+ return NumTraits<double>::epsilon() / 2; // 1.0 - machep == 1.0
+ }
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE double big() {
+ return 1.0 / NumTraits<double>::epsilon();
+ }
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE double biginv() {
+ // inverse of eps
+ return NumTraits<double>::epsilon();
+ }
+};
+
+#if !EIGEN_HAS_C99_MATH
+
+template <typename Scalar>
+struct igammac_impl {
+ EIGEN_DEVICE_FUNC
+ static Scalar run(Scalar a, Scalar x) {
+ EIGEN_STATIC_ASSERT((internal::is_same<Scalar, Scalar>::value == false),
+ THIS_TYPE_IS_NOT_SUPPORTED);
+ return Scalar(0);
+ }
+};
+
+#else
+
+template <typename Scalar> struct igamma_impl; // predeclare igamma_impl
+
+template <typename Scalar>
+struct igammac_impl {
+ EIGEN_DEVICE_FUNC
+ static Scalar run(Scalar a, Scalar x) {
+ /* igamc()
+ *
+ * Incomplete gamma integral (modified for Eigen)
+ *
+ *
+ *
+ * SYNOPSIS:
+ *
+ * double a, x, y, igamc();
+ *
+ * y = igamc( a, x );
+ *
+ * DESCRIPTION:
+ *
+ * The function is defined by
+ *
+ *
+ * igamc(a,x) = 1 - igam(a,x)
+ *
+ * inf.
+ * -
+ * 1 | | -t a-1
+ * = ----- | e t dt.
+ * - | |
+ * | (a) -
+ * x
+ *
+ *
+ * In this implementation both arguments must be positive.
+ * The integral is evaluated by either a power series or
+ * continued fraction expansion, depending on the relative
+ * values of a and x.
+ *
+ * ACCURACY (float):
+ *
+ * Relative error:
+ * arithmetic domain # trials peak rms
+ * IEEE 0,30 30000 7.8e-6 5.9e-7
+ *
+ *
+ * ACCURACY (double):
+ *
+ * Tested at random a, x.
+ * a x Relative error:
+ * arithmetic domain domain # trials peak rms
+ * IEEE 0.5,100 0,100 200000 1.9e-14 1.7e-15
+ * IEEE 0.01,0.5 0,100 200000 1.4e-13 1.6e-15
+ *
+ */
+ /*
+ Cephes Math Library Release 2.2: June, 1992
+ Copyright 1985, 1987, 1992 by Stephen L. Moshier
+ Direct inquiries to 30 Frost Street, Cambridge, MA 02140
+ */
+ const Scalar zero = 0;
+ const Scalar one = 1;
+ const Scalar nan = NumTraits<Scalar>::quiet_NaN();
+
+ if ((x < zero) || (a <= zero)) {
+ // domain error
+ return nan;
+ }
+
+ if ((x < one) || (x < a)) {
+ /* The checks above ensure that we meet the preconditions for
+ * igamma_impl::Impl(), so call it, rather than igamma_impl::Run().
+ * Calling Run() would also work, but in that case the compiler may not be
+ * able to prove that igammac_impl::Run and igamma_impl::Run are not
+ * mutually recursive. This leads to worse code, particularly on
+ * platforms like nvptx, where recursion is allowed only begrudgingly.
+ */
+ return (one - igamma_impl<Scalar>::Impl(a, x));
+ }
+
+ return Impl(a, x);
+ }
+
+ private:
+ /* igamma_impl calls igammac_impl::Impl. */
+ friend struct igamma_impl<Scalar>;
+
+ /* Actually computes igamc(a, x).
+ *
+ * Preconditions:
+ * a > 0
+ * x >= 1
+ * x >= a
+ */
+ EIGEN_DEVICE_FUNC static Scalar Impl(Scalar a, Scalar x) {
+ const Scalar zero = 0;
+ const Scalar one = 1;
+ const Scalar two = 2;
+ const Scalar machep = cephes_helper<Scalar>::machep();
+ const Scalar maxlog = numext::log(NumTraits<Scalar>::highest());
+ const Scalar big = cephes_helper<Scalar>::big();
+ const Scalar biginv = cephes_helper<Scalar>::biginv();
+ const Scalar inf = NumTraits<Scalar>::infinity();
+
+ Scalar ans, ax, c, yc, r, t, y, z;
+ Scalar pk, pkm1, pkm2, qk, qkm1, qkm2;
+
+ if (x == inf) return zero; // std::isinf crashes on CUDA
+
+ /* Compute x**a * exp(-x) / gamma(a) */
+ ax = a * numext::log(x) - x - lgamma_impl<Scalar>::run(a);
+ if (ax < -maxlog) { // underflow
+ return zero;
+ }
+ ax = numext::exp(ax);
+
+ // continued fraction
+ y = one - a;
+ z = x + y + one;
+ c = zero;
+ pkm2 = one;
+ qkm2 = x;
+ pkm1 = x + one;
+ qkm1 = z * x;
+ ans = pkm1 / qkm1;
+
+ while (true) {
+ c += one;
+ y += one;
+ z += two;
+ yc = y * c;
+ pk = pkm1 * z - pkm2 * yc;
+ qk = qkm1 * z - qkm2 * yc;
+ if (qk != zero) {
+ r = pk / qk;
+ t = numext::abs((ans - r) / r);
+ ans = r;
+ } else {
+ t = one;
+ }
+ pkm2 = pkm1;
+ pkm1 = pk;
+ qkm2 = qkm1;
+ qkm1 = qk;
+ if (numext::abs(pk) > big) {
+ pkm2 *= biginv;
+ pkm1 *= biginv;
+ qkm2 *= biginv;
+ qkm1 *= biginv;
+ }
+ if (t <= machep) {
+ break;
+ }
+ }
+
+ return (ans * ax);
+ }
+};
+
+#endif // EIGEN_HAS_C99_MATH
+
+/************************************************************************************************
+ * Implementation of igamma (incomplete gamma integral), based on Cephes but requires C++11/C99 *
+ ************************************************************************************************/
+
+template <typename Scalar>
+struct igamma_retval {
+ typedef Scalar type;
+};
+
+#if !EIGEN_HAS_C99_MATH
+
+template <typename Scalar>
+struct igamma_impl {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE Scalar run(Scalar a, Scalar x) {
+ EIGEN_STATIC_ASSERT((internal::is_same<Scalar, Scalar>::value == false),
+ THIS_TYPE_IS_NOT_SUPPORTED);
+ return Scalar(0);
+ }
+};
+
+#else
+
+template <typename Scalar>
+struct igamma_impl {
+ EIGEN_DEVICE_FUNC
+ static Scalar run(Scalar a, Scalar x) {
+ /* igam()
+ * Incomplete gamma integral
+ *
+ *
+ *
+ * SYNOPSIS:
+ *
+ * double a, x, y, igam();
+ *
+ * y = igam( a, x );
+ *
+ * DESCRIPTION:
+ *
+ * The function is defined by
+ *
+ * x
+ * -
+ * 1 | | -t a-1
+ * igam(a,x) = ----- | e t dt.
+ * - | |
+ * | (a) -
+ * 0
+ *
+ *
+ * In this implementation both arguments must be positive.
+ * The integral is evaluated by either a power series or
+ * continued fraction expansion, depending on the relative
+ * values of a and x.
+ *
+ * ACCURACY (double):
+ *
+ * Relative error:
+ * arithmetic domain # trials peak rms
+ * IEEE 0,30 200000 3.6e-14 2.9e-15
+ * IEEE 0,100 300000 9.9e-14 1.5e-14
+ *
+ *
+ * ACCURACY (float):
+ *
+ * Relative error:
+ * arithmetic domain # trials peak rms
+ * IEEE 0,30 20000 7.8e-6 5.9e-7
+ *
+ */
+ /*
+ Cephes Math Library Release 2.2: June, 1992
+ Copyright 1985, 1987, 1992 by Stephen L. Moshier
+ Direct inquiries to 30 Frost Street, Cambridge, MA 02140
+ */
+
+
+ /* left tail of incomplete gamma function:
+ *
+ * inf. k
+ * a -x - x
+ * x e > ----------
+ * - -
+ * k=0 | (a+k+1)
+ *
+ */
+ const Scalar zero = 0;
+ const Scalar one = 1;
+ const Scalar nan = NumTraits<Scalar>::quiet_NaN();
+
+ if (x == zero) return zero;
+
+ if ((x < zero) || (a <= zero)) { // domain error
+ return nan;
+ }
+
+ if ((x > one) && (x > a)) {
+ /* The checks above ensure that we meet the preconditions for
+ * igammac_impl::Impl(), so call it, rather than igammac_impl::Run().
+ * Calling Run() would also work, but in that case the compiler may not be
+ * able to prove that igammac_impl::Run and igamma_impl::Run are not
+ * mutually recursive. This leads to worse code, particularly on
+ * platforms like nvptx, where recursion is allowed only begrudgingly.
+ */
+ return (one - igammac_impl<Scalar>::Impl(a, x));
+ }
+
+ return Impl(a, x);
+ }
+
+ private:
+ /* igammac_impl calls igamma_impl::Impl. */
+ friend struct igammac_impl<Scalar>;
+
+ /* Actually computes igam(a, x).
+ *
+ * Preconditions:
+ * x > 0
+ * a > 0
+ * !(x > 1 && x > a)
+ */
+ EIGEN_DEVICE_FUNC static Scalar Impl(Scalar a, Scalar x) {
+ const Scalar zero = 0;
+ const Scalar one = 1;
+ const Scalar machep = cephes_helper<Scalar>::machep();
+ const Scalar maxlog = numext::log(NumTraits<Scalar>::highest());
+
+ Scalar ans, ax, c, r;
+
+ /* Compute x**a * exp(-x) / gamma(a) */
+ ax = a * numext::log(x) - x - lgamma_impl<Scalar>::run(a);
+ if (ax < -maxlog) {
+ // underflow
+ return zero;
+ }
+ ax = numext::exp(ax);
+
+ /* power series */
+ r = a;
+ c = one;
+ ans = one;
+
+ while (true) {
+ r += one;
+ c *= x/r;
+ ans += c;
+ if (c/ans <= machep) {
+ break;
+ }
+ }
+
+ return (ans * ax / a);
+ }
+};
+
+#endif // EIGEN_HAS_C99_MATH
+
+/*****************************************************************************
+ * Implementation of Riemann zeta function of two arguments, based on Cephes *
+ *****************************************************************************/
+
+template <typename Scalar>
+struct zeta_retval {
+ typedef Scalar type;
+};
+
+template <typename Scalar>
+struct zeta_impl_series {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE Scalar run(const Scalar) {
+ EIGEN_STATIC_ASSERT((internal::is_same<Scalar, Scalar>::value == false),
+ THIS_TYPE_IS_NOT_SUPPORTED);
+ return Scalar(0);
+ }
+};
+
+template <>
+struct zeta_impl_series<float> {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE bool run(float& a, float& b, float& s, const float x, const float machep) {
+ int i = 0;
+ while(i < 9)
+ {
+ i += 1;
+ a += 1.0f;
+ b = numext::pow( a, -x );
+ s += b;
+ if( numext::abs(b/s) < machep )
+ return true;
+ }
+
+ //Return whether we are done
+ return false;
+ }
+};
+
+template <>
+struct zeta_impl_series<double> {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE bool run(double& a, double& b, double& s, const double x, const double machep) {
+ int i = 0;
+ while( (i < 9) || (a <= 9.0) )
+ {
+ i += 1;
+ a += 1.0;
+ b = numext::pow( a, -x );
+ s += b;
+ if( numext::abs(b/s) < machep )
+ return true;
+ }
+
+ //Return whether we are done
+ return false;
+ }
+};
+
+template <typename Scalar>
+struct zeta_impl {
+ EIGEN_DEVICE_FUNC
+ static Scalar run(Scalar x, Scalar q) {
+ /* zeta.c
+ *
+ * Riemann zeta function of two arguments
+ *
+ *
+ *
+ * SYNOPSIS:
+ *
+ * double x, q, y, zeta();
+ *
+ * y = zeta( x, q );
+ *
+ *
+ *
+ * DESCRIPTION:
+ *
+ *
+ *
+ * inf.
+ * - -x
+ * zeta(x,q) = > (k+q)
+ * -
+ * k=0
+ *
+ * where x > 1 and q is not a negative integer or zero.
+ * The Euler-Maclaurin summation formula is used to obtain
+ * the expansion
+ *
+ * n
+ * - -x
+ * zeta(x,q) = > (k+q)
+ * -
+ * k=1
+ *
+ * 1-x inf. B x(x+1)...(x+2j)
+ * (n+q) 1 - 2j
+ * + --------- - ------- + > --------------------
+ * x-1 x - x+2j+1
+ * 2(n+q) j=1 (2j)! (n+q)
+ *
+ * where the B2j are Bernoulli numbers. Note that (see zetac.c)
+ * zeta(x,1) = zetac(x) + 1.
+ *
+ *
+ *
+ * ACCURACY:
+ *
+ * Relative error for single precision:
+ * arithmetic domain # trials peak rms
+ * IEEE 0,25 10000 6.9e-7 1.0e-7
+ *
+ * Large arguments may produce underflow in powf(), in which
+ * case the results are inaccurate.
+ *
+ * REFERENCE:
+ *
+ * Gradshteyn, I. S., and I. M. Ryzhik, Tables of Integrals,
+ * Series, and Products, p. 1073; Academic Press, 1980.
+ *
+ */
+
+ int i;
+ Scalar p, r, a, b, k, s, t, w;
+
+ const Scalar A[] = {
+ Scalar(12.0),
+ Scalar(-720.0),
+ Scalar(30240.0),
+ Scalar(-1209600.0),
+ Scalar(47900160.0),
+ Scalar(-1.8924375803183791606e9), /*1.307674368e12/691*/
+ Scalar(7.47242496e10),
+ Scalar(-2.950130727918164224e12), /*1.067062284288e16/3617*/
+ Scalar(1.1646782814350067249e14), /*5.109094217170944e18/43867*/
+ Scalar(-4.5979787224074726105e15), /*8.028576626982912e20/174611*/
+ Scalar(1.8152105401943546773e17), /*1.5511210043330985984e23/854513*/
+ Scalar(-7.1661652561756670113e18) /*1.6938241367317436694528e27/236364091*/
+ };
+
+ const Scalar maxnum = NumTraits<Scalar>::infinity();
+ const Scalar zero = 0.0, half = 0.5, one = 1.0;
+ const Scalar machep = cephes_helper<Scalar>::machep();
+ const Scalar nan = NumTraits<Scalar>::quiet_NaN();
+
+ if( x == one )
+ return maxnum;
+
+ if( x < one )
+ {
+ return nan;
+ }
+
+ if( q <= zero )
+ {
+ if(q == numext::floor(q))
+ {
+ return maxnum;
+ }
+ p = x;
+ r = numext::floor(p);
+ if (p != r)
+ return nan;
+ }
+
+ /* Permit negative q but continue sum until n+q > +9 .
+ * This case should be handled by a reflection formula.
+ * If q<0 and x is an integer, there is a relation to
+ * the polygamma function.
+ */
+ s = numext::pow( q, -x );
+ a = q;
+ b = zero;
+ // Run the summation in a helper function that is specific to the floating precision
+ if (zeta_impl_series<Scalar>::run(a, b, s, x, machep)) {
+ return s;
+ }
+
+ w = a;
+ s += b*w/(x-one);
+ s -= half * b;
+ a = one;
+ k = zero;
+ for( i=0; i<12; i++ )
+ {
+ a *= x + k;
+ b /= w;
+ t = a*b/A[i];
+ s = s + t;
+ t = numext::abs(t/s);
+ if( t < machep ) {
+ break;
+ }
+ k += one;
+ a *= x + k;
+ b /= w;
+ k += one;
+ }
+ return s;
+ }
+};
+
+/****************************************************************************
+ * Implementation of polygamma function, requires C++11/C99 *
+ ****************************************************************************/
+
+template <typename Scalar>
+struct polygamma_retval {
+ typedef Scalar type;
+};
+
+#if !EIGEN_HAS_C99_MATH
+
+template <typename Scalar>
+struct polygamma_impl {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE Scalar run(Scalar n, Scalar x) {
+ EIGEN_STATIC_ASSERT((internal::is_same<Scalar, Scalar>::value == false),
+ THIS_TYPE_IS_NOT_SUPPORTED);
+ return Scalar(0);
+ }
+};
+
+#else
+
+template <typename Scalar>
+struct polygamma_impl {
+ EIGEN_DEVICE_FUNC
+ static Scalar run(Scalar n, Scalar x) {
+ Scalar zero = 0.0, one = 1.0;
+ Scalar nplus = n + one;
+ const Scalar nan = NumTraits<Scalar>::quiet_NaN();
+
+ // Check that n is an integer
+ if (numext::floor(n) != n) {
+ return nan;
+ }
+ // Just return the digamma function for n = 1
+ else if (n == zero) {
+ return digamma_impl<Scalar>::run(x);
+ }
+ // Use the same implementation as scipy
+ else {
+ Scalar factorial = numext::exp(lgamma_impl<Scalar>::run(nplus));
+ return numext::pow(-one, nplus) * factorial * zeta_impl<Scalar>::run(nplus, x);
+ }
+ }
+};
+
+#endif // EIGEN_HAS_C99_MATH
+
+/************************************************************************************************
+ * Implementation of betainc (incomplete beta integral), based on Cephes but requires C++11/C99 *
+ ************************************************************************************************/
+
+template <typename Scalar>
+struct betainc_retval {
+ typedef Scalar type;
+};
+
+#if !EIGEN_HAS_C99_MATH
+
+template <typename Scalar>
+struct betainc_impl {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE Scalar run(Scalar a, Scalar b, Scalar x) {
+ EIGEN_STATIC_ASSERT((internal::is_same<Scalar, Scalar>::value == false),
+ THIS_TYPE_IS_NOT_SUPPORTED);
+ return Scalar(0);
+ }
+};
+
+#else
+
+template <typename Scalar>
+struct betainc_impl {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE Scalar run(Scalar, Scalar, Scalar) {
+ /* betaincf.c
+ *
+ * Incomplete beta integral
+ *
+ *
+ * SYNOPSIS:
+ *
+ * float a, b, x, y, betaincf();
+ *
+ * y = betaincf( a, b, x );
+ *
+ *
+ * DESCRIPTION:
+ *
+ * Returns incomplete beta integral of the arguments, evaluated
+ * from zero to x. The function is defined as
+ *
+ * x
+ * - -
+ * | (a+b) | | a-1 b-1
+ * ----------- | t (1-t) dt.
+ * - - | |
+ * | (a) | (b) -
+ * 0
+ *
+ * The domain of definition is 0 <= x <= 1. In this
+ * implementation a and b are restricted to positive values.
+ * The integral from x to 1 may be obtained by the symmetry
+ * relation
+ *
+ * 1 - betainc( a, b, x ) = betainc( b, a, 1-x ).
+ *
+ * The integral is evaluated by a continued fraction expansion.
+ * If a < 1, the function calls itself recursively after a
+ * transformation to increase a to a+1.
+ *
+ * ACCURACY (float):
+ *
+ * Tested at random points (a,b,x) with a and b in the indicated
+ * interval and x between 0 and 1.
+ *
+ * arithmetic domain # trials peak rms
+ * Relative error:
+ * IEEE 0,30 10000 3.7e-5 5.1e-6
+ * IEEE 0,100 10000 1.7e-4 2.5e-5
+ * The useful domain for relative error is limited by underflow
+ * of the single precision exponential function.
+ * Absolute error:
+ * IEEE 0,30 100000 2.2e-5 9.6e-7
+ * IEEE 0,100 10000 6.5e-5 3.7e-6
+ *
+ * Larger errors may occur for extreme ratios of a and b.
+ *
+ * ACCURACY (double):
+ * arithmetic domain # trials peak rms
+ * IEEE 0,5 10000 6.9e-15 4.5e-16
+ * IEEE 0,85 250000 2.2e-13 1.7e-14
+ * IEEE 0,1000 30000 5.3e-12 6.3e-13
+ * IEEE 0,10000 250000 9.3e-11 7.1e-12
+ * IEEE 0,100000 10000 8.7e-10 4.8e-11
+ * Outputs smaller than the IEEE gradual underflow threshold
+ * were excluded from these statistics.
+ *
+ * ERROR MESSAGES:
+ * message condition value returned
+ * incbet domain x<0, x>1 nan
+ * incbet underflow nan
+ */
+
+ EIGEN_STATIC_ASSERT((internal::is_same<Scalar, Scalar>::value == false),
+ THIS_TYPE_IS_NOT_SUPPORTED);
+ return Scalar(0);
+ }
+};
+
+/* Continued fraction expansion #1 for incomplete beta integral (small_branch = True)
+ * Continued fraction expansion #2 for incomplete beta integral (small_branch = False)
+ */
+template <typename Scalar>
+struct incbeta_cfe {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE Scalar run(Scalar a, Scalar b, Scalar x, bool small_branch) {
+ EIGEN_STATIC_ASSERT((internal::is_same<Scalar, float>::value ||
+ internal::is_same<Scalar, double>::value),
+ THIS_TYPE_IS_NOT_SUPPORTED);
+ const Scalar big = cephes_helper<Scalar>::big();
+ const Scalar machep = cephes_helper<Scalar>::machep();
+ const Scalar biginv = cephes_helper<Scalar>::biginv();
+
+ const Scalar zero = 0;
+ const Scalar one = 1;
+ const Scalar two = 2;
+
+ Scalar xk, pk, pkm1, pkm2, qk, qkm1, qkm2;
+ Scalar k1, k2, k3, k4, k5, k6, k7, k8, k26update;
+ Scalar ans;
+ int n;
+
+ const int num_iters = (internal::is_same<Scalar, float>::value) ? 100 : 300;
+ const Scalar thresh =
+ (internal::is_same<Scalar, float>::value) ? machep : Scalar(3) * machep;
+ Scalar r = (internal::is_same<Scalar, float>::value) ? zero : one;
+
+ if (small_branch) {
+ k1 = a;
+ k2 = a + b;
+ k3 = a;
+ k4 = a + one;
+ k5 = one;
+ k6 = b - one;
+ k7 = k4;
+ k8 = a + two;
+ k26update = one;
+ } else {
+ k1 = a;
+ k2 = b - one;
+ k3 = a;
+ k4 = a + one;
+ k5 = one;
+ k6 = a + b;
+ k7 = a + one;
+ k8 = a + two;
+ k26update = -one;
+ x = x / (one - x);
+ }
+
+ pkm2 = zero;
+ qkm2 = one;
+ pkm1 = one;
+ qkm1 = one;
+ ans = one;
+ n = 0;
+
+ do {
+ xk = -(x * k1 * k2) / (k3 * k4);
+ pk = pkm1 + pkm2 * xk;
+ qk = qkm1 + qkm2 * xk;
+ pkm2 = pkm1;
+ pkm1 = pk;
+ qkm2 = qkm1;
+ qkm1 = qk;
+
+ xk = (x * k5 * k6) / (k7 * k8);
+ pk = pkm1 + pkm2 * xk;
+ qk = qkm1 + qkm2 * xk;
+ pkm2 = pkm1;
+ pkm1 = pk;
+ qkm2 = qkm1;
+ qkm1 = qk;
+
+ if (qk != zero) {
+ r = pk / qk;
+ if (numext::abs(ans - r) < numext::abs(r) * thresh) {
+ return r;
+ }
+ ans = r;
+ }
+
+ k1 += one;
+ k2 += k26update;
+ k3 += two;
+ k4 += two;
+ k5 += one;
+ k6 -= k26update;
+ k7 += two;
+ k8 += two;
+
+ if ((numext::abs(qk) + numext::abs(pk)) > big) {
+ pkm2 *= biginv;
+ pkm1 *= biginv;
+ qkm2 *= biginv;
+ qkm1 *= biginv;
+ }
+ if ((numext::abs(qk) < biginv) || (numext::abs(pk) < biginv)) {
+ pkm2 *= big;
+ pkm1 *= big;
+ qkm2 *= big;
+ qkm1 *= big;
+ }
+ } while (++n < num_iters);
+
+ return ans;
+ }
+};
+
+/* Helper functions depending on the Scalar type */
+template <typename Scalar>
+struct betainc_helper {};
+
+template <>
+struct betainc_helper<float> {
+ /* Core implementation, assumes a large (> 1.0) */
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE float incbsa(float aa, float bb,
+ float xx) {
+ float ans, a, b, t, x, onemx;
+ bool reversed_a_b = false;
+
+ onemx = 1.0f - xx;
+
+ /* see if x is greater than the mean */
+ if (xx > (aa / (aa + bb))) {
+ reversed_a_b = true;
+ a = bb;
+ b = aa;
+ t = xx;
+ x = onemx;
+ } else {
+ a = aa;
+ b = bb;
+ t = onemx;
+ x = xx;
+ }
+
+ /* Choose expansion for optimal convergence */
+ if (b > 10.0f) {
+ if (numext::abs(b * x / a) < 0.3f) {
+ t = betainc_helper<float>::incbps(a, b, x);
+ if (reversed_a_b) t = 1.0f - t;
+ return t;
+ }
+ }
+
+ ans = x * (a + b - 2.0f) / (a - 1.0f);
+ if (ans < 1.0f) {
+ ans = incbeta_cfe<float>::run(a, b, x, true /* small_branch */);
+ t = b * numext::log(t);
+ } else {
+ ans = incbeta_cfe<float>::run(a, b, x, false /* small_branch */);
+ t = (b - 1.0f) * numext::log(t);
+ }
+
+ t += a * numext::log(x) + lgamma_impl<float>::run(a + b) -
+ lgamma_impl<float>::run(a) - lgamma_impl<float>::run(b);
+ t += numext::log(ans / a);
+ t = numext::exp(t);
+
+ if (reversed_a_b) t = 1.0f - t;
+ return t;
+ }
+
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE float incbps(float a, float b, float x) {
+ float t, u, y, s;
+ const float machep = cephes_helper<float>::machep();
+
+ y = a * numext::log(x) + (b - 1.0f) * numext::log1p(-x) - numext::log(a);
+ y -= lgamma_impl<float>::run(a) + lgamma_impl<float>::run(b);
+ y += lgamma_impl<float>::run(a + b);
+
+ t = x / (1.0f - x);
+ s = 0.0f;
+ u = 1.0f;
+ do {
+ b -= 1.0f;
+ if (b == 0.0f) {
+ break;
+ }
+ a += 1.0f;
+ u *= t * b / a;
+ s += u;
+ } while (numext::abs(u) > machep);
+
+ return numext::exp(y) * (1.0f + s);
+ }
+};
+
+template <>
+struct betainc_impl<float> {
+ EIGEN_DEVICE_FUNC
+ static float run(float a, float b, float x) {
+ const float nan = NumTraits<float>::quiet_NaN();
+ float ans, t;
+
+ if (a <= 0.0f) return nan;
+ if (b <= 0.0f) return nan;
+ if ((x <= 0.0f) || (x >= 1.0f)) {
+ if (x == 0.0f) return 0.0f;
+ if (x == 1.0f) return 1.0f;
+ // mtherr("betaincf", DOMAIN);
+ return nan;
+ }
+
+ /* transformation for small aa */
+ if (a <= 1.0f) {
+ ans = betainc_helper<float>::incbsa(a + 1.0f, b, x);
+ t = a * numext::log(x) + b * numext::log1p(-x) +
+ lgamma_impl<float>::run(a + b) - lgamma_impl<float>::run(a + 1.0f) -
+ lgamma_impl<float>::run(b);
+ return (ans + numext::exp(t));
+ } else {
+ return betainc_helper<float>::incbsa(a, b, x);
+ }
+ }
+};
+
+template <>
+struct betainc_helper<double> {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE double incbps(double a, double b, double x) {
+ const double machep = cephes_helper<double>::machep();
+
+ double s, t, u, v, n, t1, z, ai;
+
+ ai = 1.0 / a;
+ u = (1.0 - b) * x;
+ v = u / (a + 1.0);
+ t1 = v;
+ t = u;
+ n = 2.0;
+ s = 0.0;
+ z = machep * ai;
+ while (numext::abs(v) > z) {
+ u = (n - b) * x / n;
+ t *= u;
+ v = t / (a + n);
+ s += v;
+ n += 1.0;
+ }
+ s += t1;
+ s += ai;
+
+ u = a * numext::log(x);
+ // TODO: gamma() is not directly implemented in Eigen.
+ /*
+ if ((a + b) < maxgam && numext::abs(u) < maxlog) {
+ t = gamma(a + b) / (gamma(a) * gamma(b));
+ s = s * t * pow(x, a);
+ } else {
+ */
+ t = lgamma_impl<double>::run(a + b) - lgamma_impl<double>::run(a) -
+ lgamma_impl<double>::run(b) + u + numext::log(s);
+ return s = numext::exp(t);
+ }
+};
+
+template <>
+struct betainc_impl<double> {
+ EIGEN_DEVICE_FUNC
+ static double run(double aa, double bb, double xx) {
+ const double nan = NumTraits<double>::quiet_NaN();
+ const double machep = cephes_helper<double>::machep();
+ // const double maxgam = 171.624376956302725;
+
+ double a, b, t, x, xc, w, y;
+ bool reversed_a_b = false;
+
+ if (aa <= 0.0 || bb <= 0.0) {
+ return nan; // goto domerr;
+ }
+
+ if ((xx <= 0.0) || (xx >= 1.0)) {
+ if (xx == 0.0) return (0.0);
+ if (xx == 1.0) return (1.0);
+ // mtherr("incbet", DOMAIN);
+ return nan;
+ }
+
+ if ((bb * xx) <= 1.0 && xx <= 0.95) {
+ return betainc_helper<double>::incbps(aa, bb, xx);
+ }
+
+ w = 1.0 - xx;
+
+ /* Reverse a and b if x is greater than the mean. */
+ if (xx > (aa / (aa + bb))) {
+ reversed_a_b = true;
+ a = bb;
+ b = aa;
+ xc = xx;
+ x = w;
+ } else {
+ a = aa;
+ b = bb;
+ xc = w;
+ x = xx;
+ }
+
+ if (reversed_a_b && (b * x) <= 1.0 && x <= 0.95) {
+ t = betainc_helper<double>::incbps(a, b, x);
+ if (t <= machep) {
+ t = 1.0 - machep;
+ } else {
+ t = 1.0 - t;
+ }
+ return t;
+ }
+
+ /* Choose expansion for better convergence. */
+ y = x * (a + b - 2.0) - (a - 1.0);
+ if (y < 0.0) {
+ w = incbeta_cfe<double>::run(a, b, x, true /* small_branch */);
+ } else {
+ w = incbeta_cfe<double>::run(a, b, x, false /* small_branch */) / xc;
+ }
+
+ /* Multiply w by the factor
+ a b _ _ _
+ x (1-x) | (a+b) / ( a | (a) | (b) ) . */
+
+ y = a * numext::log(x);
+ t = b * numext::log(xc);
+ // TODO: gamma is not directly implemented in Eigen.
+ /*
+ if ((a + b) < maxgam && numext::abs(y) < maxlog && numext::abs(t) < maxlog)
+ {
+ t = pow(xc, b);
+ t *= pow(x, a);
+ t /= a;
+ t *= w;
+ t *= gamma(a + b) / (gamma(a) * gamma(b));
+ } else {
+ */
+ /* Resort to logarithms. */
+ y += t + lgamma_impl<double>::run(a + b) - lgamma_impl<double>::run(a) -
+ lgamma_impl<double>::run(b);
+ y += numext::log(w / a);
+ t = numext::exp(y);
+
+ /* } */
+ // done:
+
+ if (reversed_a_b) {
+ if (t <= machep) {
+ t = 1.0 - machep;
+ } else {
+ t = 1.0 - t;
+ }
+ }
+ return t;
+ }
+};
+
+#endif // EIGEN_HAS_C99_MATH
+
+} // end namespace internal
+
+namespace numext {
+
+template <typename Scalar>
+EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(lgamma, Scalar)
+ lgamma(const Scalar& x) {
+ return EIGEN_MATHFUNC_IMPL(lgamma, Scalar)::run(x);
+}
+
+template <typename Scalar>
+EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(digamma, Scalar)
+ digamma(const Scalar& x) {
+ return EIGEN_MATHFUNC_IMPL(digamma, Scalar)::run(x);
+}
+
+template <typename Scalar>
+EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(zeta, Scalar)
+zeta(const Scalar& x, const Scalar& q) {
+ return EIGEN_MATHFUNC_IMPL(zeta, Scalar)::run(x, q);
+}
+
+template <typename Scalar>
+EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(polygamma, Scalar)
+polygamma(const Scalar& n, const Scalar& x) {
+ return EIGEN_MATHFUNC_IMPL(polygamma, Scalar)::run(n, x);
+}
+
+template <typename Scalar>
+EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(erf, Scalar)
+ erf(const Scalar& x) {
+ return EIGEN_MATHFUNC_IMPL(erf, Scalar)::run(x);
+}
+
+template <typename Scalar>
+EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(erfc, Scalar)
+ erfc(const Scalar& x) {
+ return EIGEN_MATHFUNC_IMPL(erfc, Scalar)::run(x);
+}
+
+template <typename Scalar>
+EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(igamma, Scalar)
+ igamma(const Scalar& a, const Scalar& x) {
+ return EIGEN_MATHFUNC_IMPL(igamma, Scalar)::run(a, x);
+}
+
+template <typename Scalar>
+EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(igammac, Scalar)
+ igammac(const Scalar& a, const Scalar& x) {
+ return EIGEN_MATHFUNC_IMPL(igammac, Scalar)::run(a, x);
+}
+
+template <typename Scalar>
+EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(betainc, Scalar)
+ betainc(const Scalar& a, const Scalar& b, const Scalar& x) {
+ return EIGEN_MATHFUNC_IMPL(betainc, Scalar)::run(a, b, x);
+}
+
+} // end namespace numext
+
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPECIAL_FUNCTIONS_H
diff --git a/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsPacketMath.h b/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsPacketMath.h
new file mode 100644
index 000000000..46d60d323
--- /dev/null
+++ b/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsPacketMath.h
@@ -0,0 +1,58 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPECIALFUNCTIONS_PACKETMATH_H
+#define EIGEN_SPECIALFUNCTIONS_PACKETMATH_H
+
+namespace Eigen {
+
+namespace internal {
+
+/** \internal \returns the ln(|gamma(\a a)|) (coeff-wise) */
+template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+Packet plgamma(const Packet& a) { using numext::lgamma; return lgamma(a); }
+
+/** \internal \returns the derivative of lgamma, psi(\a a) (coeff-wise) */
+template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+Packet pdigamma(const Packet& a) { using numext::digamma; return digamma(a); }
+
+/** \internal \returns the zeta function of two arguments (coeff-wise) */
+template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+Packet pzeta(const Packet& x, const Packet& q) { using numext::zeta; return zeta(x, q); }
+
+/** \internal \returns the polygamma function (coeff-wise) */
+template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+Packet ppolygamma(const Packet& n, const Packet& x) { using numext::polygamma; return polygamma(n, x); }
+
+/** \internal \returns the erf(\a a) (coeff-wise) */
+template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+Packet perf(const Packet& a) { using numext::erf; return erf(a); }
+
+/** \internal \returns the erfc(\a a) (coeff-wise) */
+template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+Packet perfc(const Packet& a) { using numext::erfc; return erfc(a); }
+
+/** \internal \returns the incomplete gamma function igamma(\a a, \a x) */
+template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+Packet pigamma(const Packet& a, const Packet& x) { using numext::igamma; return igamma(a, x); }
+
+/** \internal \returns the complementary incomplete gamma function igammac(\a a, \a x) */
+template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+Packet pigammac(const Packet& a, const Packet& x) { using numext::igammac; return igammac(a, x); }
+
+/** \internal \returns the complementary incomplete gamma function betainc(\a a, \a b, \a x) */
+template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+Packet pbetainc(const Packet& a, const Packet& b,const Packet& x) { using numext::betainc; return betainc(a, b, x); }
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPECIALFUNCTIONS_PACKETMATH_H
+
diff --git a/unsupported/Eigen/src/SpecialFunctions/arch/CUDA/CudaSpecialFunctions.h b/unsupported/Eigen/src/SpecialFunctions/arch/CUDA/CudaSpecialFunctions.h
new file mode 100644
index 000000000..ec4fa8448
--- /dev/null
+++ b/unsupported/Eigen/src/SpecialFunctions/arch/CUDA/CudaSpecialFunctions.h
@@ -0,0 +1,165 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CUDA_SPECIALFUNCTIONS_H
+#define EIGEN_CUDA_SPECIALFUNCTIONS_H
+
+namespace Eigen {
+
+namespace internal {
+
+// Make sure this is only available when targeting a GPU: we don't want to
+// introduce conflicts between these packet_traits definitions and the ones
+// we'll use on the host side (SSE, AVX, ...)
+#if defined(__CUDACC__) && defined(EIGEN_USE_GPU)
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+float4 plgamma<float4>(const float4& a)
+{
+ return make_float4(lgammaf(a.x), lgammaf(a.y), lgammaf(a.z), lgammaf(a.w));
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+double2 plgamma<double2>(const double2& a)
+{
+ using numext::lgamma;
+ return make_double2(lgamma(a.x), lgamma(a.y));
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+float4 pdigamma<float4>(const float4& a)
+{
+ using numext::digamma;
+ return make_float4(digamma(a.x), digamma(a.y), digamma(a.z), digamma(a.w));
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+double2 pdigamma<double2>(const double2& a)
+{
+ using numext::digamma;
+ return make_double2(digamma(a.x), digamma(a.y));
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+float4 pzeta<float4>(const float4& x, const float4& q)
+{
+ using numext::zeta;
+ return make_float4(zeta(x.x, q.x), zeta(x.y, q.y), zeta(x.z, q.z), zeta(x.w, q.w));
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+double2 pzeta<double2>(const double2& x, const double2& q)
+{
+ using numext::zeta;
+ return make_double2(zeta(x.x, q.x), zeta(x.y, q.y));
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+float4 ppolygamma<float4>(const float4& n, const float4& x)
+{
+ using numext::polygamma;
+ return make_float4(polygamma(n.x, x.x), polygamma(n.y, x.y), polygamma(n.z, x.z), polygamma(n.w, x.w));
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+double2 ppolygamma<double2>(const double2& n, const double2& x)
+{
+ using numext::polygamma;
+ return make_double2(polygamma(n.x, x.x), polygamma(n.y, x.y));
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+float4 perf<float4>(const float4& a)
+{
+ return make_float4(erff(a.x), erff(a.y), erff(a.z), erff(a.w));
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+double2 perf<double2>(const double2& a)
+{
+ using numext::erf;
+ return make_double2(erf(a.x), erf(a.y));
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+float4 perfc<float4>(const float4& a)
+{
+ using numext::erfc;
+ return make_float4(erfc(a.x), erfc(a.y), erfc(a.z), erfc(a.w));
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+double2 perfc<double2>(const double2& a)
+{
+ using numext::erfc;
+ return make_double2(erfc(a.x), erfc(a.y));
+}
+
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+float4 pigamma<float4>(const float4& a, const float4& x)
+{
+ using numext::igamma;
+ return make_float4(
+ igamma(a.x, x.x),
+ igamma(a.y, x.y),
+ igamma(a.z, x.z),
+ igamma(a.w, x.w));
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+double2 pigamma<double2>(const double2& a, const double2& x)
+{
+ using numext::igamma;
+ return make_double2(igamma(a.x, x.x), igamma(a.y, x.y));
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+float4 pigammac<float4>(const float4& a, const float4& x)
+{
+ using numext::igammac;
+ return make_float4(
+ igammac(a.x, x.x),
+ igammac(a.y, x.y),
+ igammac(a.z, x.z),
+ igammac(a.w, x.w));
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+double2 pigammac<double2>(const double2& a, const double2& x)
+{
+ using numext::igammac;
+ return make_double2(igammac(a.x, x.x), igammac(a.y, x.y));
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+float4 pbetainc<float4>(const float4& a, const float4& b, const float4& x)
+{
+ using numext::betainc;
+ return make_float4(
+ betainc(a.x, b.x, x.x),
+ betainc(a.y, b.y, x.y),
+ betainc(a.z, b.z, x.z),
+ betainc(a.w, b.w, x.w));
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+double2 pbetainc<double2>(const double2& a, const double2& b, const double2& x)
+{
+ using numext::betainc;
+ return make_double2(betainc(a.x, b.x, x.x), betainc(a.y, b.y, x.y));
+}
+
+#endif
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_CUDA_SPECIALFUNCTIONS_H
diff --git a/unsupported/Eigen/src/Splines/CMakeLists.txt b/unsupported/Eigen/src/Splines/CMakeLists.txt
deleted file mode 100644
index 55c6271e9..000000000
--- a/unsupported/Eigen/src/Splines/CMakeLists.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-FILE(GLOB Eigen_Splines_SRCS "*.h")
-
-INSTALL(FILES
- ${Eigen_Splines_SRCS}
- DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/Splines COMPONENT Devel
- )
diff --git a/unsupported/Eigen/src/Splines/Spline.h b/unsupported/Eigen/src/Splines/Spline.h
index 771f10432..627f6e482 100644
--- a/unsupported/Eigen/src/Splines/Spline.h
+++ b/unsupported/Eigen/src/Splines/Spline.h
@@ -44,9 +44,15 @@ namespace Eigen
/** \brief The data type used to store knot vectors. */
typedef typename SplineTraits<Spline>::KnotVectorType KnotVectorType;
+
+ /** \brief The data type used to store parameter vectors. */
+ typedef typename SplineTraits<Spline>::ParameterVectorType ParameterVectorType;
/** \brief The data type used to store non-zero basis functions. */
typedef typename SplineTraits<Spline>::BasisVectorType BasisVectorType;
+
+ /** \brief The data type used to store the values of the basis function derivatives. */
+ typedef typename SplineTraits<Spline>::BasisDerivativeType BasisDerivativeType;
/** \brief The data type representing the spline's control points. */
typedef typename SplineTraits<Spline>::ControlPointVectorType ControlPointVectorType;
@@ -57,7 +63,7 @@ namespace Eigen
**/
Spline()
: m_knots(1, (Degree==Dynamic ? 2 : 2*Degree+2))
- , m_ctrls(ControlPointVectorType::Zero(2,(Degree==Dynamic ? 1 : Degree+1)))
+ , m_ctrls(ControlPointVectorType::Zero(Dimension,(Degree==Dynamic ? 1 : Degree+1)))
{
// in theory this code can go to the initializer list but it will get pretty
// much unreadable ...
@@ -88,7 +94,7 @@ namespace Eigen
const KnotVectorType& knots() const { return m_knots; }
/**
- * \brief Returns the knots of the underlying spline.
+ * \brief Returns the ctrls of the underlying spline.
**/
const ControlPointVectorType& ctrls() const { return m_ctrls; }
@@ -203,10 +209,25 @@ namespace Eigen
**/
static BasisVectorType BasisFunctions(Scalar u, DenseIndex degree, const KnotVectorType& knots);
+ /**
+ * \copydoc Spline::basisFunctionDerivatives
+ * \param degree The degree of the underlying spline
+ * \param knots The underlying spline's knot vector.
+ **/
+ static BasisDerivativeType BasisFunctionDerivatives(
+ const Scalar u, const DenseIndex order, const DenseIndex degree, const KnotVectorType& knots);
private:
KnotVectorType m_knots; /*!< Knot vector. */
ControlPointVectorType m_ctrls; /*!< Control points. */
+
+ template <typename DerivativeType>
+ static void BasisFunctionDerivativesImpl(
+ const typename Spline<_Scalar, _Dim, _Degree>::Scalar u,
+ const DenseIndex order,
+ const DenseIndex p,
+ const typename Spline<_Scalar, _Dim, _Degree>::KnotVectorType& U,
+ DerivativeType& N_);
};
template <typename _Scalar, int _Dim, int _Degree>
@@ -345,20 +366,24 @@ namespace Eigen
}
/* --------------------------------------------------------------------------------------------- */
-
- template <typename SplineType, typename DerivativeType>
- void basisFunctionDerivativesImpl(const SplineType& spline, typename SplineType::Scalar u, DenseIndex order, DerivativeType& N_)
+
+
+ template <typename _Scalar, int _Dim, int _Degree>
+ template <typename DerivativeType>
+ void Spline<_Scalar, _Dim, _Degree>::BasisFunctionDerivativesImpl(
+ const typename Spline<_Scalar, _Dim, _Degree>::Scalar u,
+ const DenseIndex order,
+ const DenseIndex p,
+ const typename Spline<_Scalar, _Dim, _Degree>::KnotVectorType& U,
+ DerivativeType& N_)
{
+ typedef Spline<_Scalar, _Dim, _Degree> SplineType;
enum { Order = SplineTraits<SplineType>::OrderAtCompileTime };
typedef typename SplineTraits<SplineType>::Scalar Scalar;
typedef typename SplineTraits<SplineType>::BasisVectorType BasisVectorType;
- typedef typename SplineTraits<SplineType>::KnotVectorType KnotVectorType;
-
- const KnotVectorType& U = spline.knots();
-
- const DenseIndex p = spline.degree();
- const DenseIndex span = spline.span(u);
+
+ const DenseIndex span = SplineType::Span(u, p, U);
const DenseIndex n = (std::min)(p, order);
@@ -369,7 +394,7 @@ namespace Eigen
Matrix<Scalar,Order,Order> ndu(p+1,p+1);
- double saved, temp;
+ Scalar saved, temp; // FIXME These were double instead of Scalar. Was there a reason for that?
ndu(0,0) = 1.0;
@@ -408,7 +433,7 @@ namespace Eigen
// Compute the k-th derivative
for (DenseIndex k=1; k<=static_cast<DenseIndex>(n); ++k)
{
- double d = 0.0;
+ Scalar d = 0.0;
DenseIndex rk,pk,j1,j2;
rk = r-k; pk = p-k;
@@ -446,7 +471,7 @@ namespace Eigen
r = p;
for (DenseIndex k=1; k<=static_cast<DenseIndex>(n); ++k)
{
- for (DenseIndex j=p; j>=0; --j) N_(k,j) *= r;
+ for (j=p; j>=0; --j) N_(k,j) *= r;
r *= p-k;
}
}
@@ -455,8 +480,8 @@ namespace Eigen
typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::BasisDerivativeType
Spline<_Scalar, _Dim, _Degree>::basisFunctionDerivatives(Scalar u, DenseIndex order) const
{
- typename SplineTraits< Spline >::BasisDerivativeType der;
- basisFunctionDerivativesImpl(*this, u, order, der);
+ typename SplineTraits<Spline<_Scalar, _Dim, _Degree> >::BasisDerivativeType der;
+ BasisFunctionDerivativesImpl(u, order, degree(), knots(), der);
return der;
}
@@ -465,8 +490,21 @@ namespace Eigen
typename SplineTraits< Spline<_Scalar, _Dim, _Degree>, DerivativeOrder >::BasisDerivativeType
Spline<_Scalar, _Dim, _Degree>::basisFunctionDerivatives(Scalar u, DenseIndex order) const
{
- typename SplineTraits< Spline, DerivativeOrder >::BasisDerivativeType der;
- basisFunctionDerivativesImpl(*this, u, order, der);
+ typename SplineTraits< Spline<_Scalar, _Dim, _Degree>, DerivativeOrder >::BasisDerivativeType der;
+ BasisFunctionDerivativesImpl(u, order, degree(), knots(), der);
+ return der;
+ }
+
+ template <typename _Scalar, int _Dim, int _Degree>
+ typename SplineTraits<Spline<_Scalar, _Dim, _Degree> >::BasisDerivativeType
+ Spline<_Scalar, _Dim, _Degree>::BasisFunctionDerivatives(
+ const typename Spline<_Scalar, _Dim, _Degree>::Scalar u,
+ const DenseIndex order,
+ const DenseIndex degree,
+ const typename Spline<_Scalar, _Dim, _Degree>::KnotVectorType& knots)
+ {
+ typename SplineTraits<Spline>::BasisDerivativeType der;
+ BasisFunctionDerivativesImpl(u, order, degree, knots, der);
return der;
}
}
diff --git a/unsupported/Eigen/src/Splines/SplineFitting.h b/unsupported/Eigen/src/Splines/SplineFitting.h
index 0265d532c..c761a9b3d 100644
--- a/unsupported/Eigen/src/Splines/SplineFitting.h
+++ b/unsupported/Eigen/src/Splines/SplineFitting.h
@@ -10,10 +10,14 @@
#ifndef EIGEN_SPLINE_FITTING_H
#define EIGEN_SPLINE_FITTING_H
+#include <algorithm>
+#include <functional>
#include <numeric>
+#include <vector>
#include "SplineFwd.h"
+#include <Eigen/LU>
#include <Eigen/QR>
namespace Eigen
@@ -50,6 +54,129 @@ namespace Eigen
}
/**
+ * \brief Computes knot averages when derivative constraints are present.
+ * Note that this is a technical interpretation of the referenced article
+ * since the algorithm contained therein is incorrect as written.
+ * \ingroup Splines_Module
+ *
+ * \param[in] parameters The parameters at which the interpolation B-Spline
+ * will intersect the given interpolation points. The parameters
+ * are assumed to be a non-decreasing sequence.
+ * \param[in] degree The degree of the interpolating B-Spline. This must be
+ * greater than zero.
+ * \param[in] derivativeIndices The indices corresponding to parameters at
+ * which there are derivative constraints. The indices are assumed
+ * to be a non-decreasing sequence.
+ * \param[out] knots The calculated knot vector. These will be returned as a
+ * non-decreasing sequence
+ *
+ * \sa Les A. Piegl, Khairan Rajab, Volha Smarodzinana. 2008.
+ * Curve interpolation with directional constraints for engineering design.
+ * Engineering with Computers
+ **/
+ template <typename KnotVectorType, typename ParameterVectorType, typename IndexArray>
+ void KnotAveragingWithDerivatives(const ParameterVectorType& parameters,
+ const unsigned int degree,
+ const IndexArray& derivativeIndices,
+ KnotVectorType& knots)
+ {
+ typedef typename ParameterVectorType::Scalar Scalar;
+
+ DenseIndex numParameters = parameters.size();
+ DenseIndex numDerivatives = derivativeIndices.size();
+
+ if (numDerivatives < 1)
+ {
+ KnotAveraging(parameters, degree, knots);
+ return;
+ }
+
+ DenseIndex startIndex;
+ DenseIndex endIndex;
+
+ DenseIndex numInternalDerivatives = numDerivatives;
+
+ if (derivativeIndices[0] == 0)
+ {
+ startIndex = 0;
+ --numInternalDerivatives;
+ }
+ else
+ {
+ startIndex = 1;
+ }
+ if (derivativeIndices[numDerivatives - 1] == numParameters - 1)
+ {
+ endIndex = numParameters - degree;
+ --numInternalDerivatives;
+ }
+ else
+ {
+ endIndex = numParameters - degree - 1;
+ }
+
+ // There are (endIndex - startIndex + 1) knots obtained from the averaging
+ // and 2 for the first and last parameters.
+ DenseIndex numAverageKnots = endIndex - startIndex + 3;
+ KnotVectorType averageKnots(numAverageKnots);
+ averageKnots[0] = parameters[0];
+
+ int newKnotIndex = 0;
+ for (DenseIndex i = startIndex; i <= endIndex; ++i)
+ averageKnots[++newKnotIndex] = parameters.segment(i, degree).mean();
+ averageKnots[++newKnotIndex] = parameters[numParameters - 1];
+
+ newKnotIndex = -1;
+
+ ParameterVectorType temporaryParameters(numParameters + 1);
+ KnotVectorType derivativeKnots(numInternalDerivatives);
+ for (DenseIndex i = 0; i < numAverageKnots - 1; ++i)
+ {
+ temporaryParameters[0] = averageKnots[i];
+ ParameterVectorType parameterIndices(numParameters);
+ int temporaryParameterIndex = 1;
+ for (DenseIndex j = 0; j < numParameters; ++j)
+ {
+ Scalar parameter = parameters[j];
+ if (parameter >= averageKnots[i] && parameter < averageKnots[i + 1])
+ {
+ parameterIndices[temporaryParameterIndex] = j;
+ temporaryParameters[temporaryParameterIndex++] = parameter;
+ }
+ }
+ temporaryParameters[temporaryParameterIndex] = averageKnots[i + 1];
+
+ for (int j = 0; j <= temporaryParameterIndex - 2; ++j)
+ {
+ for (DenseIndex k = 0; k < derivativeIndices.size(); ++k)
+ {
+ if (parameterIndices[j + 1] == derivativeIndices[k]
+ && parameterIndices[j + 1] != 0
+ && parameterIndices[j + 1] != numParameters - 1)
+ {
+ derivativeKnots[++newKnotIndex] = temporaryParameters.segment(j, 3).mean();
+ break;
+ }
+ }
+ }
+ }
+
+ KnotVectorType temporaryKnots(averageKnots.size() + derivativeKnots.size());
+
+ std::merge(averageKnots.data(), averageKnots.data() + averageKnots.size(),
+ derivativeKnots.data(), derivativeKnots.data() + derivativeKnots.size(),
+ temporaryKnots.data());
+
+ // Number of knots (one for each point and derivative) plus spline order.
+ DenseIndex numKnots = numParameters + numDerivatives + degree + 1;
+ knots.resize(numKnots);
+
+ knots.head(degree).fill(temporaryKnots[0]);
+ knots.tail(degree).fill(temporaryKnots.template tail<1>()[0]);
+ knots.segment(degree, temporaryKnots.size()) = temporaryKnots;
+ }
+
+ /**
* \brief Computes chord length parameters which are required for spline interpolation.
* \ingroup Splines_Module
*
@@ -86,6 +213,7 @@ namespace Eigen
struct SplineFitting
{
typedef typename SplineType::KnotVectorType KnotVectorType;
+ typedef typename SplineType::ParameterVectorType ParameterVectorType;
/**
* \brief Fits an interpolating Spline to the given data points.
@@ -109,6 +237,52 @@ namespace Eigen
**/
template <typename PointArrayType>
static SplineType Interpolate(const PointArrayType& pts, DenseIndex degree, const KnotVectorType& knot_parameters);
+
+ /**
+ * \brief Fits an interpolating spline to the given data points and
+ * derivatives.
+ *
+ * \param points The points for which an interpolating spline will be computed.
+ * \param derivatives The desired derivatives of the interpolating spline at interpolation
+ * points.
+ * \param derivativeIndices An array indicating which point each derivative belongs to. This
+ * must be the same size as @a derivatives.
+ * \param degree The degree of the interpolating spline.
+ *
+ * \returns A spline interpolating @a points with @a derivatives at those points.
+ *
+ * \sa Les A. Piegl, Khairan Rajab, Volha Smarodzinana. 2008.
+ * Curve interpolation with directional constraints for engineering design.
+ * Engineering with Computers
+ **/
+ template <typename PointArrayType, typename IndexArray>
+ static SplineType InterpolateWithDerivatives(const PointArrayType& points,
+ const PointArrayType& derivatives,
+ const IndexArray& derivativeIndices,
+ const unsigned int degree);
+
+ /**
+ * \brief Fits an interpolating spline to the given data points and derivatives.
+ *
+ * \param points The points for which an interpolating spline will be computed.
+ * \param derivatives The desired derivatives of the interpolating spline at interpolation points.
+ * \param derivativeIndices An array indicating which point each derivative belongs to. This
+ * must be the same size as @a derivatives.
+ * \param degree The degree of the interpolating spline.
+ * \param parameters The parameters corresponding to the interpolation points.
+ *
+ * \returns A spline interpolating @a points with @a derivatives at those points.
+ *
+ * \sa Les A. Piegl, Khairan Rajab, Volha Smarodzinana. 2008.
+ * Curve interpolation with directional constraints for engineering design.
+ * Engineering with Computers
+ */
+ template <typename PointArrayType, typename IndexArray>
+ static SplineType InterpolateWithDerivatives(const PointArrayType& points,
+ const PointArrayType& derivatives,
+ const IndexArray& derivativeIndices,
+ const unsigned int degree,
+ const ParameterVectorType& parameters);
};
template <typename SplineType>
@@ -151,6 +325,106 @@ namespace Eigen
ChordLengths(pts, chord_lengths);
return Interpolate(pts, degree, chord_lengths);
}
+
+ template <typename SplineType>
+ template <typename PointArrayType, typename IndexArray>
+ SplineType
+ SplineFitting<SplineType>::InterpolateWithDerivatives(const PointArrayType& points,
+ const PointArrayType& derivatives,
+ const IndexArray& derivativeIndices,
+ const unsigned int degree,
+ const ParameterVectorType& parameters)
+ {
+ typedef typename SplineType::KnotVectorType::Scalar Scalar;
+ typedef typename SplineType::ControlPointVectorType ControlPointVectorType;
+
+ typedef Matrix<Scalar, Dynamic, Dynamic> MatrixType;
+
+ const DenseIndex n = points.cols() + derivatives.cols();
+
+ KnotVectorType knots;
+
+ KnotAveragingWithDerivatives(parameters, degree, derivativeIndices, knots);
+
+ // fill matrix
+ MatrixType A = MatrixType::Zero(n, n);
+
+ // Use these dimensions for quicker populating, then transpose for solving.
+ MatrixType b(points.rows(), n);
+
+ DenseIndex startRow;
+ DenseIndex derivativeStart;
+
+ // End derivatives.
+ if (derivativeIndices[0] == 0)
+ {
+ A.template block<1, 2>(1, 0) << -1, 1;
+
+ Scalar y = (knots(degree + 1) - knots(0)) / degree;
+ b.col(1) = y*derivatives.col(0);
+
+ startRow = 2;
+ derivativeStart = 1;
+ }
+ else
+ {
+ startRow = 1;
+ derivativeStart = 0;
+ }
+ if (derivativeIndices[derivatives.cols() - 1] == points.cols() - 1)
+ {
+ A.template block<1, 2>(n - 2, n - 2) << -1, 1;
+
+ Scalar y = (knots(knots.size() - 1) - knots(knots.size() - (degree + 2))) / degree;
+ b.col(b.cols() - 2) = y*derivatives.col(derivatives.cols() - 1);
+ }
+
+ DenseIndex row = startRow;
+ DenseIndex derivativeIndex = derivativeStart;
+ for (DenseIndex i = 1; i < parameters.size() - 1; ++i)
+ {
+ const DenseIndex span = SplineType::Span(parameters[i], degree, knots);
+
+ if (derivativeIndices[derivativeIndex] == i)
+ {
+ A.block(row, span - degree, 2, degree + 1)
+ = SplineType::BasisFunctionDerivatives(parameters[i], 1, degree, knots);
+
+ b.col(row++) = points.col(i);
+ b.col(row++) = derivatives.col(derivativeIndex++);
+ }
+ else
+ {
+ A.row(row++).segment(span - degree, degree + 1)
+ = SplineType::BasisFunctions(parameters[i], degree, knots);
+ }
+ }
+ b.col(0) = points.col(0);
+ b.col(b.cols() - 1) = points.col(points.cols() - 1);
+ A(0,0) = 1;
+ A(n - 1, n - 1) = 1;
+
+ // Solve
+ FullPivLU<MatrixType> lu(A);
+ ControlPointVectorType controlPoints = lu.solve(MatrixType(b.transpose())).transpose();
+
+ SplineType spline(knots, controlPoints);
+
+ return spline;
+ }
+
+ template <typename SplineType>
+ template <typename PointArrayType, typename IndexArray>
+ SplineType
+ SplineFitting<SplineType>::InterpolateWithDerivatives(const PointArrayType& points,
+ const PointArrayType& derivatives,
+ const IndexArray& derivativeIndices,
+ const unsigned int degree)
+ {
+ ParameterVectorType parameters;
+ ChordLengths(points, parameters);
+ return InterpolateWithDerivatives(points, derivatives, derivativeIndices, degree, parameters);
+ }
}
#endif // EIGEN_SPLINE_FITTING_H
diff --git a/unsupported/Eigen/src/Splines/SplineFwd.h b/unsupported/Eigen/src/Splines/SplineFwd.h
index 49db8d35d..0a95fbf3e 100644
--- a/unsupported/Eigen/src/Splines/SplineFwd.h
+++ b/unsupported/Eigen/src/Splines/SplineFwd.h
@@ -31,6 +31,8 @@ namespace Eigen
enum { OrderAtCompileTime = _Degree==Dynamic ? Dynamic : _Degree+1 /*!< The spline curve's order at compile-time. */ };
enum { NumOfDerivativesAtCompileTime = OrderAtCompileTime /*!< The number of derivatives defined for the current spline. */ };
+
+ enum { DerivativeMemoryLayout = Dimension==1 ? RowMajor : ColMajor /*!< The derivative type's memory layout. */ };
/** \brief The data type used to store non-zero basis functions. */
typedef Array<Scalar,1,OrderAtCompileTime> BasisVectorType;
@@ -39,13 +41,16 @@ namespace Eigen
typedef Array<Scalar,Dynamic,Dynamic,RowMajor,NumOfDerivativesAtCompileTime,OrderAtCompileTime> BasisDerivativeType;
/** \brief The data type used to store the spline's derivative values. */
- typedef Array<Scalar,Dimension,Dynamic,ColMajor,Dimension,NumOfDerivativesAtCompileTime> DerivativeType;
+ typedef Array<Scalar,Dimension,Dynamic,DerivativeMemoryLayout,Dimension,NumOfDerivativesAtCompileTime> DerivativeType;
/** \brief The point type the spline is representing. */
typedef Array<Scalar,Dimension,1> PointType;
/** \brief The data type used to store knot vectors. */
typedef Array<Scalar,1,Dynamic> KnotVectorType;
+
+ /** \brief The data type used to store parameter vectors. */
+ typedef Array<Scalar,1,Dynamic> ParameterVectorType;
/** \brief The data type representing the spline's control points. */
typedef Array<Scalar,Dimension,Dynamic> ControlPointVectorType;
@@ -62,12 +67,14 @@ namespace Eigen
{
enum { OrderAtCompileTime = _Degree==Dynamic ? Dynamic : _Degree+1 /*!< The spline curve's order at compile-time. */ };
enum { NumOfDerivativesAtCompileTime = _DerivativeOrder==Dynamic ? Dynamic : _DerivativeOrder+1 /*!< The number of derivatives defined for the current spline. */ };
+
+ enum { DerivativeMemoryLayout = _Dim==1 ? RowMajor : ColMajor /*!< The derivative type's memory layout. */ };
/** \brief The data type used to store the values of the basis function derivatives. */
typedef Array<_Scalar,Dynamic,Dynamic,RowMajor,NumOfDerivativesAtCompileTime,OrderAtCompileTime> BasisDerivativeType;
/** \brief The data type used to store the spline's derivative values. */
- typedef Array<_Scalar,_Dim,Dynamic,ColMajor,_Dim,NumOfDerivativesAtCompileTime> DerivativeType;
+ typedef Array<_Scalar,_Dim,Dynamic,DerivativeMemoryLayout,_Dim,NumOfDerivativesAtCompileTime> DerivativeType;
};
/** \brief 2D float B-spline with dynamic degree. */
diff --git a/unsupported/doc/Overview.dox b/unsupported/doc/Overview.dox
index d048377df..45464a545 100644
--- a/unsupported/doc/Overview.dox
+++ b/unsupported/doc/Overview.dox
@@ -1,14 +1,15 @@
+/// \brief Namespace containing all symbols from the %Eigen library.
namespace Eigen {
-/** \mainpage Eigen's unsupported modules
+/** \mainpage %Eigen's unsupported modules
-This is the API documentation for Eigen's unsupported modules.
+This is the API documentation for %Eigen's unsupported modules.
These modules are contributions from various users. They are provided "as is", without any support.
Click on the \e Modules tab at the top of this page to get a list of all unsupported modules.
-Don't miss the <a href="..//index.html">official Eigen documentation</a>.
+Don't miss the <a href="../index.html">official Eigen documentation</a>.
*/
@@ -18,8 +19,10 @@ Don't miss the <a href="..//index.html">official Eigen documentation</a>.
The unsupported modules are contributions from various users. They are
provided "as is", without any support. Nevertheless, some of them are
-subject to be included in Eigen in the future.
+subject to be included in %Eigen in the future.
*/
+/// \internal \brief Namespace containing low-level routines from the %Eigen library.
+namespace internal {}
}
diff --git a/unsupported/doc/examples/BVH_Example.cpp b/unsupported/doc/examples/BVH_Example.cpp
index 6b6fac075..afb0c94c2 100644
--- a/unsupported/doc/examples/BVH_Example.cpp
+++ b/unsupported/doc/examples/BVH_Example.cpp
@@ -6,9 +6,7 @@ using namespace Eigen;
typedef AlignedBox<double, 2> Box2d;
namespace Eigen {
- namespace internal {
- Box2d bounding_box(const Vector2d &v) { return Box2d(v, v); } //compute the bounding box of a single point
- }
+ Box2d bounding_box(const Vector2d &v) { return Box2d(v, v); } //compute the bounding box of a single point
}
struct PointPointMinimizer //how to compute squared distances between points and rectangles
diff --git a/unsupported/doc/examples/EulerAngles.cpp b/unsupported/doc/examples/EulerAngles.cpp
new file mode 100644
index 000000000..1ef6aee18
--- /dev/null
+++ b/unsupported/doc/examples/EulerAngles.cpp
@@ -0,0 +1,46 @@
+#include <unsupported/Eigen/EulerAngles>
+#include <iostream>
+
+using namespace Eigen;
+
+int main()
+{
+ // A common Euler system by many armies around the world,
+ // where the first one is the azimuth(the angle from the north -
+ // the same angle that is show in compass)
+ // and the second one is elevation(the angle from the horizon)
+ // and the third one is roll(the angle between the horizontal body
+ // direction and the plane ground surface)
+ // Keep remembering we're using radian angles here!
+ typedef EulerSystem<-EULER_Z, EULER_Y, EULER_X> MyArmySystem;
+ typedef EulerAngles<double, MyArmySystem> MyArmyAngles;
+
+ MyArmyAngles vehicleAngles(
+ 3.14/*PI*/ / 2, /* heading to east, notice that this angle is counter-clockwise */
+ -0.3, /* going down from a mountain */
+ 0.1); /* slightly rolled to the right */
+
+ // Some Euler angles representation that our plane use.
+ EulerAnglesZYZd planeAngles(0.78474, 0.5271, -0.513794);
+
+ MyArmyAngles planeAnglesInMyArmyAngles = MyArmyAngles::FromRotation<true, false, false>(planeAngles);
+
+ std::cout << "vehicle angles(MyArmy): " << vehicleAngles << std::endl;
+ std::cout << "plane angles(ZYZ): " << planeAngles << std::endl;
+ std::cout << "plane angles(MyArmy): " << planeAnglesInMyArmyAngles << std::endl;
+
+ // Now lets rotate the plane a little bit
+ std::cout << "==========================================================\n";
+ std::cout << "rotating plane now!\n";
+ std::cout << "==========================================================\n";
+
+ Quaterniond planeRotated = AngleAxisd(-0.342, Vector3d::UnitY()) * planeAngles;
+
+ planeAngles = planeRotated;
+ planeAnglesInMyArmyAngles = MyArmyAngles::FromRotation<true, false, false>(planeRotated);
+
+ std::cout << "new plane angles(ZYZ): " << planeAngles << std::endl;
+ std::cout << "new plane angles(MyArmy): " << planeAnglesInMyArmyAngles << std::endl;
+
+ return 0;
+}
diff --git a/unsupported/test/CMakeLists.txt b/unsupported/test/CMakeLists.txt
index 2e4cfdb2e..b5fa1c845 100644
--- a/unsupported/test/CMakeLists.txt
+++ b/unsupported/test/CMakeLists.txt
@@ -1,10 +1,26 @@
+# generate split test header file only if it does not yet exist
+# in order to prevent a rebuild everytime cmake is configured
+if(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h)
+ file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h "")
+ foreach(i RANGE 1 999)
+ file(APPEND ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h
+ "#ifdef EIGEN_TEST_PART_${i}\n"
+ "#define CALL_SUBTEST_${i}(FUNC) CALL_SUBTEST(FUNC)\n"
+ "#else\n"
+ "#define CALL_SUBTEST_${i}(FUNC)\n"
+ "#endif\n\n"
+ )
+ endforeach()
+endif()
set_property(GLOBAL PROPERTY EIGEN_CURRENT_SUBPROJECT "Unsupported")
add_custom_target(BuildUnsupported)
-include_directories(../../test ../../unsupported ../../Eigen
+include_directories(../../test ../../unsupported ../../Eigen
${CMAKE_CURRENT_BINARY_DIR}/../../test)
+find_package (Threads)
+
find_package(GoogleHash)
if(GOOGLEHASH_FOUND)
add_definitions("-DEIGEN_GOOGLEHASH_SUPPORT")
@@ -28,22 +44,30 @@ endif(ADOLC_FOUND)
ei_add_test(NonLinearOptimization)
ei_add_test(NumericalDiff)
+ei_add_test(autodiff_scalar)
ei_add_test(autodiff)
+
+if (NOT CMAKE_CXX_COMPILER MATCHES "clang\\+\\+$")
ei_add_test(BVH)
+endif()
+
ei_add_test(matrix_exponential)
ei_add_test(matrix_function)
ei_add_test(matrix_power)
ei_add_test(matrix_square_root)
ei_add_test(alignedvector3)
+
ei_add_test(FFT)
+ei_add_test(EulerAngles)
+
find_package(MPFR 2.3.0)
find_package(GMP)
-if(MPFR_FOUND)
+if(MPFR_FOUND AND EIGEN_COMPILER_SUPPORT_CXX11)
include_directories(${MPFR_INCLUDES} ./mpreal)
ei_add_property(EIGEN_TESTED_BACKENDS "MPFR C++, ")
set(EIGEN_MPFR_TEST_LIBRARIES ${MPFR_LIBRARIES} ${GMP_LIBRARIES})
- ei_add_test(mpreal_support "" "${EIGEN_MPFR_TEST_LIBRARIES}" )
+ ei_add_test(mpreal_support "-std=c++11" "${EIGEN_MPFR_TEST_LIBRARIES}" )
else()
ei_add_property(EIGEN_MISSING_BACKENDS "MPFR C++, ")
endif()
@@ -82,9 +106,152 @@ endif()
ei_add_test(polynomialsolver)
ei_add_test(polynomialutils)
-ei_add_test(kronecker_product)
ei_add_test(splines)
ei_add_test(gmres)
ei_add_test(minres)
ei_add_test(levenberg_marquardt)
-ei_add_test(bdcsvd)
+ei_add_test(kronecker_product)
+ei_add_test(special_functions)
+
+# TODO: The following test names are prefixed with the cxx11 string, since historically
+# the tests depended on c++11. This isn't the case anymore so we ought to rename them.
+# FIXME: Old versions of MSVC fail to compile this code, so we just disable these tests
+# when using visual studio. We should make the check more strict to enable the tests for
+# newer versions of MSVC.
+if (NOT CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ei_add_test(cxx11_tensor_dimension)
+ei_add_test(cxx11_tensor_map)
+ei_add_test(cxx11_tensor_assign)
+ei_add_test(cxx11_tensor_comparisons)
+ei_add_test(cxx11_tensor_forced_eval)
+ei_add_test(cxx11_tensor_math)
+ei_add_test(cxx11_tensor_const)
+ei_add_test(cxx11_tensor_intdiv)
+ei_add_test(cxx11_tensor_casts)
+ei_add_test(cxx11_tensor_empty)
+ei_add_test(cxx11_tensor_sugar)
+ei_add_test(cxx11_tensor_roundings)
+ei_add_test(cxx11_tensor_layout_swap)
+ei_add_test(cxx11_tensor_io)
+if("${CMAKE_SIZEOF_VOID_P}" EQUAL "8")
+ # This test requires __uint128_t which is only available on 64bit systems
+ ei_add_test(cxx11_tensor_uint128)
+endif()
+endif()
+
+if(EIGEN_TEST_CXX11)
+ if(EIGEN_TEST_SYCL)
+ ei_add_test_sycl(cxx11_tensor_sycl "-std=c++11")
+ ei_add_test_sycl(cxx11_tensor_forced_eval_sycl "-std=c++11")
+ ei_add_test_sycl(cxx11_tensor_broadcast_sycl "-std=c++11")
+ ei_add_test_sycl(cxx11_tensor_device_sycl "-std=c++11")
+ ei_add_test_sycl(cxx11_tensor_reduction_sycl "-std=c++11")
+ endif(EIGEN_TEST_SYCL)
+ # It should be safe to always run these tests as there is some fallback code for
+ # older compiler that don't support cxx11.
+ set(CMAKE_CXX_STANDARD 11)
+
+ ei_add_test(cxx11_eventcount "-pthread" "${CMAKE_THREAD_LIBS_INIT}")
+ ei_add_test(cxx11_runqueue "-pthread" "${CMAKE_THREAD_LIBS_INIT}")
+ ei_add_test(cxx11_non_blocking_thread_pool "-pthread" "${CMAKE_THREAD_LIBS_INIT}")
+
+ ei_add_test(cxx11_meta)
+ ei_add_test(cxx11_tensor_simple)
+# ei_add_test(cxx11_tensor_symmetry)
+ ei_add_test(cxx11_tensor_index_list)
+ ei_add_test(cxx11_tensor_mixed_indices)
+ ei_add_test(cxx11_tensor_contraction)
+ ei_add_test(cxx11_tensor_convolution)
+ ei_add_test(cxx11_tensor_expr)
+ ei_add_test(cxx11_tensor_fixed_size)
+ ei_add_test(cxx11_tensor_of_const_values)
+ ei_add_test(cxx11_tensor_of_complex)
+ ei_add_test(cxx11_tensor_of_strings)
+ ei_add_test(cxx11_tensor_lvalue)
+ ei_add_test(cxx11_tensor_broadcasting)
+ ei_add_test(cxx11_tensor_chipping)
+ ei_add_test(cxx11_tensor_concatenation)
+ ei_add_test(cxx11_tensor_inflation)
+ ei_add_test(cxx11_tensor_morphing)
+ ei_add_test(cxx11_tensor_padding)
+ ei_add_test(cxx11_tensor_patch)
+ ei_add_test(cxx11_tensor_image_patch)
+ ei_add_test(cxx11_tensor_volume_patch)
+ ei_add_test(cxx11_tensor_reduction)
+ ei_add_test(cxx11_tensor_argmax)
+ ei_add_test(cxx11_tensor_shuffling)
+ ei_add_test(cxx11_tensor_striding)
+ ei_add_test(cxx11_tensor_notification "-pthread" "${CMAKE_THREAD_LIBS_INIT}")
+ ei_add_test(cxx11_tensor_thread_pool "-pthread" "${CMAKE_THREAD_LIBS_INIT}")
+ ei_add_test(cxx11_tensor_ref)
+ ei_add_test(cxx11_tensor_random)
+ ei_add_test(cxx11_tensor_generator)
+ ei_add_test(cxx11_tensor_custom_op)
+ ei_add_test(cxx11_tensor_custom_index)
+ ei_add_test(cxx11_tensor_fft)
+ ei_add_test(cxx11_tensor_ifft)
+ ei_add_test(cxx11_tensor_scan)
+
+endif()
+
+# These tests needs nvcc
+find_package(CUDA 7.0)
+if(CUDA_FOUND AND EIGEN_TEST_CUDA)
+ # Make sure to compile without the -pedantic, -Wundef, -Wnon-virtual-dtor
+ # and -fno-check-new flags since they trigger thousands of compilation warnings
+ # in the CUDA runtime
+ # Also remove -ansi that is incompatible with std=c++11.
+ string(REPLACE "-pedantic" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+ string(REPLACE "-Wundef" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+ string(REPLACE "-Wnon-virtual-dtor" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+ string(REPLACE "-fno-check-new" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+ string(REPLACE "-ansi" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+
+ message(STATUS "Flags used to compile cuda code: " ${CMAKE_CXX_FLAGS})
+
+ if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
+ set(CUDA_NVCC_FLAGS "-ccbin ${CMAKE_C_COMPILER}" CACHE STRING "nvcc flags" FORCE)
+ endif()
+ if(EIGEN_TEST_CUDA_CLANG)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 --cuda-gpu-arch=sm_${EIGEN_CUDA_COMPUTE_ARCH}")
+ endif()
+
+ set(EIGEN_CUDA_RELAXED_CONSTEXPR "--expt-relaxed-constexpr")
+ if (${CUDA_VERSION} STREQUAL "7.0")
+ set(EIGEN_CUDA_RELAXED_CONSTEXPR "--relaxed-constexpr")
+ endif()
+
+ if( (NOT EIGEN_TEST_CXX11) OR (CMAKE_VERSION VERSION_LESS 3.3))
+ set(EIGEN_CUDA_CXX11_FLAG "-std=c++11")
+ else()
+ # otherwise the flag has already been added because of the above set(CMAKE_CXX_STANDARD 11)
+ set(EIGEN_CUDA_CXX11_FLAG "")
+ endif()
+
+ set(CUDA_NVCC_FLAGS "${EIGEN_CUDA_CXX11_FLAG} ${EIGEN_CUDA_RELAXED_CONSTEXPR} -arch compute_${EIGEN_CUDA_COMPUTE_ARCH} -Xcudafe \"--display_error_number\" ${CUDA_NVCC_FLAGS}")
+ cuda_include_directories("${CMAKE_CURRENT_BINARY_DIR}" "${CUDA_TOOLKIT_ROOT_DIR}/include")
+ set(EIGEN_ADD_TEST_FILENAME_EXTENSION "cu")
+
+ ei_add_test(cxx11_tensor_complex_cuda)
+ ei_add_test(cxx11_tensor_complex_cwise_ops_cuda)
+ ei_add_test(cxx11_tensor_reduction_cuda)
+ ei_add_test(cxx11_tensor_argmax_cuda)
+ ei_add_test(cxx11_tensor_cast_float16_cuda)
+ ei_add_test(cxx11_tensor_scan_cuda)
+
+ # Contractions require arch 3.0 or higher
+ if (${EIGEN_CUDA_COMPUTE_ARCH} GREATER 29)
+ ei_add_test(cxx11_tensor_device)
+ ei_add_test(cxx11_tensor_cuda)
+ ei_add_test(cxx11_tensor_contract_cuda)
+ ei_add_test(cxx11_tensor_of_float16_cuda)
+ endif()
+
+ # The random number generation code requires arch 3.5 or greater.
+ if (${EIGEN_CUDA_COMPUTE_ARCH} GREATER 34)
+ ei_add_test(cxx11_tensor_random_cuda)
+ endif()
+
+
+ unset(EIGEN_ADD_TEST_FILENAME_EXTENSION)
+endif()
diff --git a/unsupported/test/EulerAngles.cpp b/unsupported/test/EulerAngles.cpp
new file mode 100644
index 000000000..a8cb52864
--- /dev/null
+++ b/unsupported/test/EulerAngles.cpp
@@ -0,0 +1,208 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2015 Tal Hadad <tal_hd@hotmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <unsupported/Eigen/EulerAngles>
+
+using namespace Eigen;
+
+template<typename EulerSystem, typename Scalar>
+void verify_euler_ranged(const Matrix<Scalar,3,1>& ea,
+ bool positiveRangeAlpha, bool positiveRangeBeta, bool positiveRangeGamma)
+{
+ typedef EulerAngles<Scalar, EulerSystem> EulerAnglesType;
+ typedef Matrix<Scalar,3,3> Matrix3;
+ typedef Matrix<Scalar,3,1> Vector3;
+ typedef Quaternion<Scalar> QuaternionType;
+ typedef AngleAxis<Scalar> AngleAxisType;
+ using std::abs;
+
+ Scalar alphaRangeStart, alphaRangeEnd;
+ Scalar betaRangeStart, betaRangeEnd;
+ Scalar gammaRangeStart, gammaRangeEnd;
+
+ if (positiveRangeAlpha)
+ {
+ alphaRangeStart = Scalar(0);
+ alphaRangeEnd = Scalar(2 * EIGEN_PI);
+ }
+ else
+ {
+ alphaRangeStart = -Scalar(EIGEN_PI);
+ alphaRangeEnd = Scalar(EIGEN_PI);
+ }
+
+ if (positiveRangeBeta)
+ {
+ betaRangeStart = Scalar(0);
+ betaRangeEnd = Scalar(2 * EIGEN_PI);
+ }
+ else
+ {
+ betaRangeStart = -Scalar(EIGEN_PI);
+ betaRangeEnd = Scalar(EIGEN_PI);
+ }
+
+ if (positiveRangeGamma)
+ {
+ gammaRangeStart = Scalar(0);
+ gammaRangeEnd = Scalar(2 * EIGEN_PI);
+ }
+ else
+ {
+ gammaRangeStart = -Scalar(EIGEN_PI);
+ gammaRangeEnd = Scalar(EIGEN_PI);
+ }
+
+ const int i = EulerSystem::AlphaAxisAbs - 1;
+ const int j = EulerSystem::BetaAxisAbs - 1;
+ const int k = EulerSystem::GammaAxisAbs - 1;
+
+ const int iFactor = EulerSystem::IsAlphaOpposite ? -1 : 1;
+ const int jFactor = EulerSystem::IsBetaOpposite ? -1 : 1;
+ const int kFactor = EulerSystem::IsGammaOpposite ? -1 : 1;
+
+ const Vector3 I = EulerAnglesType::AlphaAxisVector();
+ const Vector3 J = EulerAnglesType::BetaAxisVector();
+ const Vector3 K = EulerAnglesType::GammaAxisVector();
+
+ EulerAnglesType e(ea[0], ea[1], ea[2]);
+
+ Matrix3 m(e);
+ Vector3 eabis = EulerAnglesType(m, positiveRangeAlpha, positiveRangeBeta, positiveRangeGamma).angles();
+
+ // Check that eabis in range
+ VERIFY(alphaRangeStart <= eabis[0] && eabis[0] <= alphaRangeEnd);
+ VERIFY(betaRangeStart <= eabis[1] && eabis[1] <= betaRangeEnd);
+ VERIFY(gammaRangeStart <= eabis[2] && eabis[2] <= gammaRangeEnd);
+
+ Vector3 eabis2 = m.eulerAngles(i, j, k);
+
+ // Invert the relevant axes
+ eabis2[0] *= iFactor;
+ eabis2[1] *= jFactor;
+ eabis2[2] *= kFactor;
+
+ // Saturate the angles to the correct range
+ if (positiveRangeAlpha && (eabis2[0] < 0))
+ eabis2[0] += Scalar(2 * EIGEN_PI);
+ if (positiveRangeBeta && (eabis2[1] < 0))
+ eabis2[1] += Scalar(2 * EIGEN_PI);
+ if (positiveRangeGamma && (eabis2[2] < 0))
+ eabis2[2] += Scalar(2 * EIGEN_PI);
+
+ VERIFY_IS_APPROX(eabis, eabis2);// Verify that our estimation is the same as m.eulerAngles() is
+
+ Matrix3 mbis(AngleAxisType(eabis[0], I) * AngleAxisType(eabis[1], J) * AngleAxisType(eabis[2], K));
+ VERIFY_IS_APPROX(m, mbis);
+
+ // Tests that are only relevant for no possitive range
+ if (!(positiveRangeAlpha || positiveRangeBeta || positiveRangeGamma))
+ {
+ /* If I==K, and ea[1]==0, then there no unique solution. */
+ /* The remark apply in the case where I!=K, and |ea[1]| is close to pi/2. */
+ if( (i!=k || ea[1]!=0) && (i==k || !internal::isApprox(abs(ea[1]),Scalar(EIGEN_PI/2),test_precision<Scalar>())) )
+ VERIFY((ea-eabis).norm() <= test_precision<Scalar>());
+
+ // approx_or_less_than does not work for 0
+ VERIFY(0 < eabis[0] || test_isMuchSmallerThan(eabis[0], Scalar(1)));
+ }
+
+ // Quaternions
+ QuaternionType q(e);
+ eabis = EulerAnglesType(q, positiveRangeAlpha, positiveRangeBeta, positiveRangeGamma).angles();
+ VERIFY_IS_APPROX(eabis, eabis2);// Verify that the euler angles are still the same
+}
+
+template<typename EulerSystem, typename Scalar>
+void verify_euler(const Matrix<Scalar,3,1>& ea)
+{
+ verify_euler_ranged<EulerSystem>(ea, false, false, false);
+ verify_euler_ranged<EulerSystem>(ea, false, false, true);
+ verify_euler_ranged<EulerSystem>(ea, false, true, false);
+ verify_euler_ranged<EulerSystem>(ea, false, true, true);
+ verify_euler_ranged<EulerSystem>(ea, true, false, false);
+ verify_euler_ranged<EulerSystem>(ea, true, false, true);
+ verify_euler_ranged<EulerSystem>(ea, true, true, false);
+ verify_euler_ranged<EulerSystem>(ea, true, true, true);
+}
+
+template<typename Scalar> void check_all_var(const Matrix<Scalar,3,1>& ea)
+{
+ verify_euler<EulerSystemXYZ>(ea);
+ verify_euler<EulerSystemXYX>(ea);
+ verify_euler<EulerSystemXZY>(ea);
+ verify_euler<EulerSystemXZX>(ea);
+
+ verify_euler<EulerSystemYZX>(ea);
+ verify_euler<EulerSystemYZY>(ea);
+ verify_euler<EulerSystemYXZ>(ea);
+ verify_euler<EulerSystemYXY>(ea);
+
+ verify_euler<EulerSystemZXY>(ea);
+ verify_euler<EulerSystemZXZ>(ea);
+ verify_euler<EulerSystemZYX>(ea);
+ verify_euler<EulerSystemZYZ>(ea);
+}
+
+template<typename Scalar> void eulerangles()
+{
+ typedef Matrix<Scalar,3,3> Matrix3;
+ typedef Matrix<Scalar,3,1> Vector3;
+ typedef Array<Scalar,3,1> Array3;
+ typedef Quaternion<Scalar> Quaternionx;
+ typedef AngleAxis<Scalar> AngleAxisType;
+
+ Scalar a = internal::random<Scalar>(-Scalar(EIGEN_PI), Scalar(EIGEN_PI));
+ Quaternionx q1;
+ q1 = AngleAxisType(a, Vector3::Random().normalized());
+ Matrix3 m;
+ m = q1;
+
+ Vector3 ea = m.eulerAngles(0,1,2);
+ check_all_var(ea);
+ ea = m.eulerAngles(0,1,0);
+ check_all_var(ea);
+
+ // Check with purely random Quaternion:
+ q1.coeffs() = Quaternionx::Coefficients::Random().normalized();
+ m = q1;
+ ea = m.eulerAngles(0,1,2);
+ check_all_var(ea);
+ ea = m.eulerAngles(0,1,0);
+ check_all_var(ea);
+
+ // Check with random angles in range [0:pi]x[-pi:pi]x[-pi:pi].
+ ea = (Array3::Random() + Array3(1,0,0))*Scalar(EIGEN_PI)*Array3(0.5,1,1);
+ check_all_var(ea);
+
+ ea[2] = ea[0] = internal::random<Scalar>(0,Scalar(EIGEN_PI));
+ check_all_var(ea);
+
+ ea[0] = ea[1] = internal::random<Scalar>(0,Scalar(EIGEN_PI));
+ check_all_var(ea);
+
+ ea[1] = 0;
+ check_all_var(ea);
+
+ ea.head(2).setZero();
+ check_all_var(ea);
+
+ ea.setZero();
+ check_all_var(ea);
+}
+
+void test_EulerAngles()
+{
+ for(int i = 0; i < g_repeat; i++) {
+ CALL_SUBTEST_1( eulerangles<float>() );
+ CALL_SUBTEST_2( eulerangles<double>() );
+ }
+}
diff --git a/unsupported/test/FFTW.cpp b/unsupported/test/FFTW.cpp
index d3718e2d2..8b7528fb7 100644
--- a/unsupported/test/FFTW.cpp
+++ b/unsupported/test/FFTW.cpp
@@ -18,11 +18,11 @@ using namespace Eigen;
template < typename T>
-complex<long double> promote(complex<T> x) { return complex<long double>(x.real(),x.imag()); }
+complex<long double> promote(complex<T> x) { return complex<long double>((long double)x.real(),(long double)x.imag()); }
-complex<long double> promote(float x) { return complex<long double>( x); }
-complex<long double> promote(double x) { return complex<long double>( x); }
-complex<long double> promote(long double x) { return complex<long double>( x); }
+complex<long double> promote(float x) { return complex<long double>((long double)x); }
+complex<long double> promote(double x) { return complex<long double>((long double)x); }
+complex<long double> promote(long double x) { return complex<long double>((long double)x); }
template <typename VT1,typename VT2>
@@ -33,7 +33,7 @@ complex<long double> promote(long double x) { return complex<long double>( x);
long double pi = acos((long double)-1 );
for (size_t k0=0;k0<(size_t)fftbuf.size();++k0) {
complex<long double> acc = 0;
- long double phinc = -2.*k0* pi / timebuf.size();
+ long double phinc = (long double)(-2.)*k0* pi / timebuf.size();
for (size_t k1=0;k1<(size_t)timebuf.size();++k1) {
acc += promote( timebuf[k1] ) * exp( complex<long double>(0,k1*phinc) );
}
@@ -54,8 +54,8 @@ complex<long double> promote(long double x) { return complex<long double>( x);
long double difpower=0;
size_t n = (min)( buf1.size(),buf2.size() );
for (size_t k=0;k<n;++k) {
- totalpower += (numext::abs2( buf1[k] ) + numext::abs2(buf2[k]) )/2.;
- difpower += numext::abs2(buf1[k] - buf2[k]);
+ totalpower += (long double)((numext::abs2( buf1[k] ) + numext::abs2(buf2[k]) )/2);
+ difpower += (long double)(numext::abs2(buf1[k] - buf2[k]));
}
return sqrt(difpower/totalpower);
}
@@ -93,19 +93,19 @@ void test_scalar_generic(int nfft)
fft.SetFlag(fft.HalfSpectrum );
fft.fwd( freqBuf,tbuf);
VERIFY((size_t)freqBuf.size() == (size_t)( (nfft>>1)+1) );
- VERIFY( fft_rmse(freqBuf,tbuf) < test_precision<T>() );// gross check
+ VERIFY( T(fft_rmse(freqBuf,tbuf)) < test_precision<T>() );// gross check
fft.ClearFlag(fft.HalfSpectrum );
fft.fwd( freqBuf,tbuf);
VERIFY( (size_t)freqBuf.size() == (size_t)nfft);
- VERIFY( fft_rmse(freqBuf,tbuf) < test_precision<T>() );// gross check
+ VERIFY( T(fft_rmse(freqBuf,tbuf)) < test_precision<T>() );// gross check
if (nfft&1)
return; // odd FFTs get the wrong size inverse FFT
ScalarVector tbuf2;
fft.inv( tbuf2 , freqBuf);
- VERIFY( dif_rmse(tbuf,tbuf2) < test_precision<T>() );// gross check
+ VERIFY( T(dif_rmse(tbuf,tbuf2)) < test_precision<T>() );// gross check
// verify that the Unscaled flag takes effect
@@ -121,12 +121,12 @@ void test_scalar_generic(int nfft)
//for (size_t i=0;i<(size_t) tbuf.size();++i)
// cout << "freqBuf=" << freqBuf[i] << " in2=" << tbuf3[i] << " - in=" << tbuf[i] << " => " << (tbuf3[i] - tbuf[i] ) << endl;
- VERIFY( dif_rmse(tbuf,tbuf3) < test_precision<T>() );// gross check
+ VERIFY( T(dif_rmse(tbuf,tbuf3)) < test_precision<T>() );// gross check
// verify that ClearFlag works
fft.ClearFlag(fft.Unscaled);
fft.inv( tbuf2 , freqBuf);
- VERIFY( dif_rmse(tbuf,tbuf2) < test_precision<T>() );// gross check
+ VERIFY( T(dif_rmse(tbuf,tbuf2)) < test_precision<T>() );// gross check
}
template <typename T>
@@ -152,10 +152,10 @@ void test_complex_generic(int nfft)
inbuf[k]= Complex( (T)(rand()/(double)RAND_MAX - .5), (T)(rand()/(double)RAND_MAX - .5) );
fft.fwd( outbuf , inbuf);
- VERIFY( fft_rmse(outbuf,inbuf) < test_precision<T>() );// gross check
+ VERIFY( T(fft_rmse(outbuf,inbuf)) < test_precision<T>() );// gross check
fft.inv( buf3 , outbuf);
- VERIFY( dif_rmse(inbuf,buf3) < test_precision<T>() );// gross check
+ VERIFY( T(dif_rmse(inbuf,buf3)) < test_precision<T>() );// gross check
// verify that the Unscaled flag takes effect
ComplexVector buf4;
@@ -163,12 +163,12 @@ void test_complex_generic(int nfft)
fft.inv( buf4 , outbuf);
for (int k=0;k<nfft;++k)
buf4[k] *= T(1./nfft);
- VERIFY( dif_rmse(inbuf,buf4) < test_precision<T>() );// gross check
+ VERIFY( T(dif_rmse(inbuf,buf4)) < test_precision<T>() );// gross check
// verify that ClearFlag works
fft.ClearFlag(fft.Unscaled);
fft.inv( buf3 , outbuf);
- VERIFY( dif_rmse(inbuf,buf3) < test_precision<T>() );// gross check
+ VERIFY( T(dif_rmse(inbuf,buf3)) < test_precision<T>() );// gross check
}
template <typename T>
diff --git a/unsupported/test/NonLinearOptimization.cpp b/unsupported/test/NonLinearOptimization.cpp
index d7376b0f5..1d682dd83 100644
--- a/unsupported/test/NonLinearOptimization.cpp
+++ b/unsupported/test/NonLinearOptimization.cpp
@@ -12,7 +12,8 @@
// It is intended to be done for this test only.
#include <Eigen/src/Core/util/DisableStupidWarnings.h>
-using std::sqrt;
+// tolerance for chekcing number of iterations
+#define LM_EVAL_COUNT_TOL 4/3
int fcn_chkder(const VectorXd &x, VectorXd &fvec, MatrixXd &fjac, int iflag)
{
@@ -246,9 +247,9 @@ struct hybrj_functor : Functor<double>
int operator()(const VectorXd &x, VectorXd &fvec)
{
double temp, temp1, temp2;
- const int n = x.size();
+ const VectorXd::Index n = x.size();
assert(fvec.size()==n);
- for (int k = 0; k < n; k++)
+ for (VectorXd::Index k = 0; k < n; k++)
{
temp = (3. - 2.*x[k])*x[k];
temp1 = 0.;
@@ -261,12 +262,12 @@ struct hybrj_functor : Functor<double>
}
int df(const VectorXd &x, MatrixXd &fjac)
{
- const int n = x.size();
+ const VectorXd::Index n = x.size();
assert(fjac.rows()==n);
assert(fjac.cols()==n);
- for (int k = 0; k < n; k++)
+ for (VectorXd::Index k = 0; k < n; k++)
{
- for (int j = 0; j < n; j++)
+ for (VectorXd::Index j = 0; j < n; j++)
fjac(k,j) = 0.;
fjac(k,k) = 3.- 4.*x[k];
if (k) fjac(k,k-1) = -1.;
@@ -351,10 +352,10 @@ struct hybrd_functor : Functor<double>
int operator()(const VectorXd &x, VectorXd &fvec) const
{
double temp, temp1, temp2;
- const int n = x.size();
+ const VectorXd::Index n = x.size();
assert(fvec.size()==n);
- for (int k=0; k < n; k++)
+ for (VectorXd::Index k=0; k < n; k++)
{
temp = (3. - 2.*x[k])*x[k];
temp1 = 0.;
@@ -455,7 +456,7 @@ struct lmstr_functor : Functor<double>
assert(jac_row.size()==x.size());
double tmp1, tmp2, tmp3, tmp4;
- int i = rownb-2;
+ VectorXd::Index i = rownb-2;
tmp1 = i+1;
tmp2 = 16 - i - 1;
tmp3 = (i>=8)? tmp2 : tmp1;
@@ -1022,7 +1023,9 @@ void testNistLanczos1(void)
VERIFY_IS_EQUAL(lm.nfev, 79);
VERIFY_IS_EQUAL(lm.njev, 72);
// check norm^2
- VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.430899764097e-25); // should be 1.4307867721E-25, but nist results are on 128-bit floats
+ std::cout.precision(30);
+ std::cout << lm.fvec.squaredNorm() << "\n";
+ VERIFY(lm.fvec.squaredNorm() <= 1.4307867721E-25);
// check x
VERIFY_IS_APPROX(x[0], 9.5100000027E-02);
VERIFY_IS_APPROX(x[1], 1.0000000001E+00);
@@ -1043,7 +1046,7 @@ void testNistLanczos1(void)
VERIFY_IS_EQUAL(lm.nfev, 9);
VERIFY_IS_EQUAL(lm.njev, 8);
// check norm^2
- VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.428595533845e-25); // should be 1.4307867721E-25, but nist results are on 128-bit floats
+ VERIFY(lm.fvec.squaredNorm() <= 1.4307867721E-25);
// check x
VERIFY_IS_APPROX(x[0], 9.5100000027E-02);
VERIFY_IS_APPROX(x[1], 1.0000000001E+00);
@@ -1262,8 +1265,8 @@ void testNistBoxBOD(void)
// check return value
VERIFY_IS_EQUAL(info, 1);
- VERIFY_IS_EQUAL(lm.nfev, 31);
- VERIFY_IS_EQUAL(lm.njev, 25);
+ VERIFY(lm.nfev < 31); // 31
+ VERIFY(lm.njev < 25); // 25
// check norm^2
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.1680088766E+03);
// check x
@@ -1342,10 +1345,6 @@ void testNistMGH17(void)
lm.parameters.maxfev = 1000;
info = lm.minimize(x);
- // check return value
- VERIFY_IS_EQUAL(info, 2);
- VERIFY_IS_EQUAL(lm.nfev, 602 );
- VERIFY_IS_EQUAL(lm.njev, 545 );
// check norm^2
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.4648946975E-05);
// check x
@@ -1354,6 +1353,15 @@ void testNistMGH17(void)
VERIFY_IS_APPROX(x[2], -1.4646871366E+00);
VERIFY_IS_APPROX(x[3], 1.2867534640E-02);
VERIFY_IS_APPROX(x[4], 2.2122699662E-02);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 2);
+ ++g_test_level;
+ VERIFY_IS_EQUAL(lm.nfev, 602); // 602
+ VERIFY_IS_EQUAL(lm.njev, 545); // 545
+ --g_test_level;
+ VERIFY(lm.nfev < 602 * LM_EVAL_COUNT_TOL);
+ VERIFY(lm.njev < 545 * LM_EVAL_COUNT_TOL);
/*
* Second try
@@ -1832,8 +1840,8 @@ void test_NonLinearOptimization()
// NIST tests, level of difficulty = "Average"
CALL_SUBTEST/*_5*/(testNistHahn1());
CALL_SUBTEST/*_6*/(testNistMisra1d());
-// CALL_SUBTEST/*_7*/(testNistMGH17());
-// CALL_SUBTEST/*_8*/(testNistLanczos1());
+ CALL_SUBTEST/*_7*/(testNistMGH17());
+ CALL_SUBTEST/*_8*/(testNistLanczos1());
// // NIST tests, level of difficulty = "Higher"
CALL_SUBTEST/*_9*/(testNistRat42());
diff --git a/unsupported/test/alignedvector3.cpp b/unsupported/test/alignedvector3.cpp
index fc2bc2135..252cb1d3f 100644
--- a/unsupported/test/alignedvector3.cpp
+++ b/unsupported/test/alignedvector3.cpp
@@ -10,6 +10,16 @@
#include "main.h"
#include <unsupported/Eigen/AlignedVector3>
+namespace Eigen {
+
+template<typename T,typename Derived>
+T test_relative_error(const AlignedVector3<T> &a, const MatrixBase<Derived> &b)
+{
+ return test_relative_error(a.coeffs().template head<3>(), b);
+}
+
+}
+
template<typename Scalar>
void alignedvector3()
{
@@ -19,8 +29,8 @@ void alignedvector3()
typedef Matrix<Scalar,3,3> Mat33;
typedef AlignedVector3<Scalar> FastType;
RefType r1(RefType::Random()), r2(RefType::Random()), r3(RefType::Random()),
- r4(RefType::Random()), r5(RefType::Random()), r6(RefType::Random());
- FastType f1(r1), f2(r2), f3(r3), f4(r4), f5(r5), f6(r6);
+ r4(RefType::Random()), r5(RefType::Random());
+ FastType f1(r1), f2(r2), f3(r3), f4(r4), f5(r5);
Mat33 m1(Mat33::Random());
VERIFY_IS_APPROX(f1,r1);
@@ -49,6 +59,21 @@ void alignedvector3()
f2.normalize();
r2.normalize();
VERIFY_IS_APPROX(f2,r2);
+
+ {
+ FastType f6 = RefType::Zero();
+ FastType f7 = FastType::Zero();
+ VERIFY_IS_APPROX(f6,f7);
+ f6 = r4+r1;
+ VERIFY_IS_APPROX(f6,r4+r1);
+ f6 -= Scalar(2)*r4;
+ VERIFY_IS_APPROX(f6,r1-r4);
+ }
+
+ std::stringstream ss1, ss2;
+ ss1 << f1;
+ ss2 << r1;
+ VERIFY(ss1.str()==ss2.str());
}
void test_alignedvector3()
diff --git a/unsupported/test/autodiff.cpp b/unsupported/test/autodiff.cpp
index 087e7c542..85743137e 100644
--- a/unsupported/test/autodiff.cpp
+++ b/unsupported/test/autodiff.cpp
@@ -16,7 +16,8 @@ EIGEN_DONT_INLINE Scalar foo(const Scalar& x, const Scalar& y)
using namespace std;
// return x+std::sin(y);
EIGEN_ASM_COMMENT("mybegin");
- return static_cast<Scalar>(x*2 - pow(x,2) + 2*sqrt(y*y) - 4 * sin(x) + 2 * cos(y) - exp(-0.5*x*x));
+ // pow(float, int) promotes to pow(double, double)
+ return x*2 - 1 + static_cast<Scalar>(pow(1+x,2)) + 2*sqrt(y*y+0) - 4 * sin(0+x) + 2 * cos(y+0) - exp(Scalar(-0.5)*x*x+0);
//return x+2*y*x;//x*2 -std::pow(x,2);//(2*y/x);// - y*2;
EIGEN_ASM_COMMENT("myend");
}
@@ -104,6 +105,89 @@ struct TestFunc1
}
};
+
+#if EIGEN_HAS_VARIADIC_TEMPLATES
+/* Test functor for the C++11 features. */
+template <typename Scalar>
+struct integratorFunctor
+{
+ typedef Matrix<Scalar, 2, 1> InputType;
+ typedef Matrix<Scalar, 2, 1> ValueType;
+
+ /*
+ * Implementation starts here.
+ */
+ integratorFunctor(const Scalar gain) : _gain(gain) {}
+ integratorFunctor(const integratorFunctor& f) : _gain(f._gain) {}
+ const Scalar _gain;
+
+ template <typename T1, typename T2>
+ void operator() (const T1 &input, T2 *output, const Scalar dt) const
+ {
+ T2 &o = *output;
+
+ /* Integrator to test the AD. */
+ o[0] = input[0] + input[1] * dt * _gain;
+ o[1] = input[1] * _gain;
+ }
+
+ /* Only needed for the test */
+ template <typename T1, typename T2, typename T3>
+ void operator() (const T1 &input, T2 *output, T3 *jacobian, const Scalar dt) const
+ {
+ T2 &o = *output;
+
+ /* Integrator to test the AD. */
+ o[0] = input[0] + input[1] * dt * _gain;
+ o[1] = input[1] * _gain;
+
+ if (jacobian)
+ {
+ T3 &j = *jacobian;
+
+ j(0, 0) = 1;
+ j(0, 1) = dt * _gain;
+ j(1, 0) = 0;
+ j(1, 1) = _gain;
+ }
+ }
+
+};
+
+template<typename Func> void forward_jacobian_cpp11(const Func& f)
+{
+ typedef typename Func::ValueType::Scalar Scalar;
+ typedef typename Func::ValueType ValueType;
+ typedef typename Func::InputType InputType;
+ typedef typename AutoDiffJacobian<Func>::JacobianType JacobianType;
+
+ InputType x = InputType::Random(InputType::RowsAtCompileTime);
+ ValueType y, yref;
+ JacobianType j, jref;
+
+ const Scalar dt = internal::random<double>();
+
+ jref.setZero();
+ yref.setZero();
+ f(x, &yref, &jref, dt);
+
+ //std::cerr << "y, yref, jref: " << "\n";
+ //std::cerr << y.transpose() << "\n\n";
+ //std::cerr << yref << "\n\n";
+ //std::cerr << jref << "\n\n";
+
+ AutoDiffJacobian<Func> autoj(f);
+ autoj(x, &y, &j, dt);
+
+ //std::cerr << "y j (via autodiff): " << "\n";
+ //std::cerr << y.transpose() << "\n\n";
+ //std::cerr << j << "\n\n";
+
+ VERIFY_IS_APPROX(y, yref);
+ VERIFY_IS_APPROX(j, jref);
+}
+#endif
+
template<typename Func> void forward_jacobian(const Func& f)
{
typename Func::InputType x = Func::InputType::Random(f.inputs());
@@ -127,8 +211,8 @@ template<typename Func> void forward_jacobian(const Func& f)
VERIFY_IS_APPROX(j, jref);
}
-
// TODO also check actual derivatives!
+template <int>
void test_autodiff_scalar()
{
Vector2f p = Vector2f::Random();
@@ -139,7 +223,9 @@ void test_autodiff_scalar()
VERIFY_IS_APPROX(res.value(), foo(p.x(),p.y()));
}
+
// TODO also check actual derivatives!
+template <int>
void test_autodiff_vector()
{
Vector2f p = Vector2f::Random();
@@ -148,11 +234,12 @@ void test_autodiff_vector()
VectorAD ap = p.cast<AD>();
ap.x().derivatives() = Vector2f::UnitX();
ap.y().derivatives() = Vector2f::UnitY();
-
+
AD res = foo<VectorAD>(ap);
VERIFY_IS_APPROX(res.value(), foo(p));
}
+template <int>
void test_autodiff_jacobian()
{
CALL_SUBTEST(( forward_jacobian(TestFunc1<double,2,2>()) ));
@@ -160,14 +247,121 @@ void test_autodiff_jacobian()
CALL_SUBTEST(( forward_jacobian(TestFunc1<double,3,2>()) ));
CALL_SUBTEST(( forward_jacobian(TestFunc1<double,3,3>()) ));
CALL_SUBTEST(( forward_jacobian(TestFunc1<double>(3,3)) ));
+#if EIGEN_HAS_VARIADIC_TEMPLATES
+ CALL_SUBTEST(( forward_jacobian_cpp11(integratorFunctor<double>(10)) ));
+#endif
+}
+
+
+template <int>
+void test_autodiff_hessian()
+{
+ typedef AutoDiffScalar<VectorXd> AD;
+ typedef Matrix<AD,Eigen::Dynamic,1> VectorAD;
+ typedef AutoDiffScalar<VectorAD> ADD;
+ typedef Matrix<ADD,Eigen::Dynamic,1> VectorADD;
+ VectorADD x(2);
+ double s1 = internal::random<double>(), s2 = internal::random<double>(), s3 = internal::random<double>(), s4 = internal::random<double>();
+ x(0).value()=s1;
+ x(1).value()=s2;
+
+ //set unit vectors for the derivative directions (partial derivatives of the input vector)
+ x(0).derivatives().resize(2);
+ x(0).derivatives().setZero();
+ x(0).derivatives()(0)= 1;
+ x(1).derivatives().resize(2);
+ x(1).derivatives().setZero();
+ x(1).derivatives()(1)=1;
+
+ //repeat partial derivatives for the inner AutoDiffScalar
+ x(0).value().derivatives() = VectorXd::Unit(2,0);
+ x(1).value().derivatives() = VectorXd::Unit(2,1);
+
+ //set the hessian matrix to zero
+ for(int idx=0; idx<2; idx++) {
+ x(0).derivatives()(idx).derivatives() = VectorXd::Zero(2);
+ x(1).derivatives()(idx).derivatives() = VectorXd::Zero(2);
+ }
+
+ ADD y = sin(AD(s3)*x(0) + AD(s4)*x(1));
+
+ VERIFY_IS_APPROX(y.value().derivatives()(0), y.derivatives()(0).value());
+ VERIFY_IS_APPROX(y.value().derivatives()(1), y.derivatives()(1).value());
+ VERIFY_IS_APPROX(y.value().derivatives()(0), s3*std::cos(s1*s3+s2*s4));
+ VERIFY_IS_APPROX(y.value().derivatives()(1), s4*std::cos(s1*s3+s2*s4));
+ VERIFY_IS_APPROX(y.derivatives()(0).derivatives(), -std::sin(s1*s3+s2*s4)*Vector2d(s3*s3,s4*s3));
+ VERIFY_IS_APPROX(y.derivatives()(1).derivatives(), -std::sin(s1*s3+s2*s4)*Vector2d(s3*s4,s4*s4));
+
+ ADD z = x(0)*x(1);
+ VERIFY_IS_APPROX(z.derivatives()(0).derivatives(), Vector2d(0,1));
+ VERIFY_IS_APPROX(z.derivatives()(1).derivatives(), Vector2d(1,0));
+}
+
+double bug_1222() {
+ typedef Eigen::AutoDiffScalar<Eigen::Vector3d> AD;
+ const double _cv1_3 = 1.0;
+ const AD chi_3 = 1.0;
+ // this line did not work, because operator+ returns ADS<DerType&>, which then cannot be converted to ADS<DerType>
+ const AD denom = chi_3 + _cv1_3;
+ return denom.value();
+}
+
+double bug_1223() {
+ using std::min;
+ typedef Eigen::AutoDiffScalar<Eigen::Vector3d> AD;
+
+ const double _cv1_3 = 1.0;
+ const AD chi_3 = 1.0;
+ const AD denom = 1.0;
+
+ // failed because implementation of min attempts to construct ADS<DerType&> via constructor AutoDiffScalar(const Real& value)
+ // without initializing m_derivatives (which is a reference in this case)
+ #define EIGEN_TEST_SPACE
+ const AD t = min EIGEN_TEST_SPACE (denom / chi_3, 1.0);
+
+ const AD t2 = min EIGEN_TEST_SPACE (denom / (chi_3 * _cv1_3), 1.0);
+
+ return t.value() + t2.value();
+}
+
+// regression test for some compilation issues with specializations of ScalarBinaryOpTraits
+void bug_1260() {
+ Matrix4d A;
+ Vector4d v;
+ A*v;
+}
+
+// check a compilation issue with numext::max
+double bug_1261() {
+ typedef AutoDiffScalar<Matrix2d> AD;
+ typedef Matrix<AD,2,1> VectorAD;
+
+ VectorAD v;
+ const AD maxVal = v.maxCoeff();
+ const AD minVal = v.minCoeff();
+ return maxVal.value() + minVal.value();
+}
+
+double bug_1264() {
+ typedef AutoDiffScalar<Vector2d> AD;
+ const AD s;
+ const Matrix<AD, 3, 1> v1;
+ const Matrix<AD, 3, 1> v2 = (s + 3.0) * v1;
+ return v2(0).value();
}
void test_autodiff()
{
for(int i = 0; i < g_repeat; i++) {
- CALL_SUBTEST_1( test_autodiff_scalar() );
- CALL_SUBTEST_2( test_autodiff_vector() );
- CALL_SUBTEST_3( test_autodiff_jacobian() );
+ CALL_SUBTEST_1( test_autodiff_scalar<1>() );
+ CALL_SUBTEST_2( test_autodiff_vector<1>() );
+ CALL_SUBTEST_3( test_autodiff_jacobian<1>() );
+ CALL_SUBTEST_4( test_autodiff_hessian<1>() );
}
+
+ bug_1222();
+ bug_1223();
+ bug_1260();
+ bug_1261();
}
diff --git a/unsupported/test/autodiff_scalar.cpp b/unsupported/test/autodiff_scalar.cpp
new file mode 100644
index 000000000..4df2f5c57
--- /dev/null
+++ b/unsupported/test/autodiff_scalar.cpp
@@ -0,0 +1,83 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2013 Christoph Hertzberg <chtz@informatik.uni-bremen.de>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+#include <unsupported/Eigen/AutoDiff>
+
+/*
+ * In this file scalar derivations are tested for correctness.
+ * TODO add more tests!
+ */
+
+template<typename Scalar> void check_atan2()
+{
+ typedef Matrix<Scalar, 1, 1> Deriv1;
+ typedef AutoDiffScalar<Deriv1> AD;
+
+ AD x(internal::random<Scalar>(-3.0, 3.0), Deriv1::UnitX());
+
+ using std::exp;
+ Scalar r = exp(internal::random<Scalar>(-10, 10));
+
+ AD s = sin(x), c = cos(x);
+ AD res = atan2(r*s, r*c);
+
+ VERIFY_IS_APPROX(res.value(), x.value());
+ VERIFY_IS_APPROX(res.derivatives(), x.derivatives());
+
+ res = atan2(r*s+0, r*c+0);
+ VERIFY_IS_APPROX(res.value(), x.value());
+ VERIFY_IS_APPROX(res.derivatives(), x.derivatives());
+}
+
+template<typename Scalar> void check_hyperbolic_functions()
+{
+ using std::sinh;
+ using std::cosh;
+ using std::tanh;
+ typedef Matrix<Scalar, 1, 1> Deriv1;
+ typedef AutoDiffScalar<Deriv1> AD;
+ Deriv1 p = Deriv1::Random();
+ AD val(p.x(),Deriv1::UnitX());
+
+ Scalar cosh_px = std::cosh(p.x());
+ AD res1 = tanh(val);
+ VERIFY_IS_APPROX(res1.value(), std::tanh(p.x()));
+ VERIFY_IS_APPROX(res1.derivatives().x(), Scalar(1.0) / (cosh_px * cosh_px));
+
+ AD res2 = sinh(val);
+ VERIFY_IS_APPROX(res2.value(), std::sinh(p.x()));
+ VERIFY_IS_APPROX(res2.derivatives().x(), cosh_px);
+
+ AD res3 = cosh(val);
+ VERIFY_IS_APPROX(res3.value(), cosh_px);
+ VERIFY_IS_APPROX(res3.derivatives().x(), std::sinh(p.x()));
+
+ // Check constant values.
+ const Scalar sample_point = Scalar(1) / Scalar(3);
+ val = AD(sample_point,Deriv1::UnitX());
+ res1 = tanh(val);
+ VERIFY_IS_APPROX(res1.derivatives().x(), Scalar(0.896629559604914));
+
+ res2 = sinh(val);
+ VERIFY_IS_APPROX(res2.derivatives().x(), Scalar(1.056071867829939));
+
+ res3 = cosh(val);
+ VERIFY_IS_APPROX(res3.derivatives().x(), Scalar(0.339540557256150));
+}
+
+void test_autodiff_scalar()
+{
+ for(int i = 0; i < g_repeat; i++) {
+ CALL_SUBTEST_1( check_atan2<float>() );
+ CALL_SUBTEST_2( check_atan2<double>() );
+ CALL_SUBTEST_3( check_hyperbolic_functions<float>() );
+ CALL_SUBTEST_4( check_hyperbolic_functions<double>() );
+ }
+}
diff --git a/unsupported/test/bdcsvd.cpp b/unsupported/test/bdcsvd.cpp
deleted file mode 100644
index 115a649b0..000000000
--- a/unsupported/test/bdcsvd.cpp
+++ /dev/null
@@ -1,213 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2013 Gauthier Brun <brun.gauthier@gmail.com>
-// Copyright (C) 2013 Nicolas Carre <nicolas.carre@ensimag.fr>
-// Copyright (C) 2013 Jean Ceccato <jean.ceccato@ensimag.fr>
-// Copyright (C) 2013 Pierre Zoppitelli <pierre.zoppitelli@ensimag.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/
-
-#include "svd_common.h"
-#include <iostream>
-#include <Eigen/LU>
-
-// check if "svd" is the good image of "m"
-template<typename MatrixType>
-void bdcsvd_check_full(const MatrixType& m, const BDCSVD<MatrixType>& svd)
-{
- svd_check_full< MatrixType, BDCSVD< MatrixType > >(m, svd);
-}
-
-// Compare to a reference value
-template<typename MatrixType>
-void bdcsvd_compare_to_full(const MatrixType& m,
- unsigned int computationOptions,
- const BDCSVD<MatrixType>& referenceSvd)
-{
- svd_compare_to_full< MatrixType, BDCSVD< MatrixType > >(m, computationOptions, referenceSvd);
-} // end bdcsvd_compare_to_full
-
-
-template<typename MatrixType>
-void bdcsvd_solve(const MatrixType& m, unsigned int computationOptions)
-{
- svd_solve< MatrixType, BDCSVD< MatrixType > >(m, computationOptions);
-} // end template bdcsvd_solve
-
-
-// test the computations options
-template<typename MatrixType>
-void bdcsvd_test_all_computation_options(const MatrixType& m)
-{
- BDCSVD<MatrixType> fullSvd(m, ComputeFullU|ComputeFullV);
- svd_test_computation_options_1< MatrixType, BDCSVD< MatrixType > >(m, fullSvd);
- svd_test_computation_options_2< MatrixType, BDCSVD< MatrixType > >(m, fullSvd);
-} // end bdcsvd_test_all_computation_options
-
-
-// Call a test with all the computations options
-template<typename MatrixType>
-void bdcsvd(const MatrixType& a = MatrixType(), bool pickrandom = true)
-{
- MatrixType m = pickrandom ? MatrixType::Random(a.rows(), a.cols()) : a;
- bdcsvd_test_all_computation_options<MatrixType>(m);
-} // end template bdcsvd
-
-
-// verify assert
-template<typename MatrixType>
-void bdcsvd_verify_assert(const MatrixType& m)
-{
- svd_verify_assert< MatrixType, BDCSVD< MatrixType > >(m);
-}// end template bdcsvd_verify_assert
-
-
-// test weird values
-template<typename MatrixType>
-void bdcsvd_inf_nan()
-{
- svd_inf_nan< MatrixType, BDCSVD< MatrixType > >();
-}// end template bdcsvd_inf_nan
-
-
-
-void bdcsvd_preallocate()
-{
- svd_preallocate< BDCSVD< MatrixXf > >();
-} // end bdcsvd_preallocate
-
-
-// compare the Singular values returned with Jacobi and Bdc
-template<typename MatrixType>
-void compare_bdc_jacobi(const MatrixType& a = MatrixType(), unsigned int computationOptions = 0)
-{
- std::cout << "debut compare" << std::endl;
- MatrixType m = MatrixType::Random(a.rows(), a.cols());
- BDCSVD<MatrixType> bdc_svd(m);
- JacobiSVD<MatrixType> jacobi_svd(m);
- VERIFY_IS_APPROX(bdc_svd.singularValues(), jacobi_svd.singularValues());
- if(computationOptions & ComputeFullU)
- VERIFY_IS_APPROX(bdc_svd.matrixU(), jacobi_svd.matrixU());
- if(computationOptions & ComputeThinU)
- VERIFY_IS_APPROX(bdc_svd.matrixU(), jacobi_svd.matrixU());
- if(computationOptions & ComputeFullV)
- VERIFY_IS_APPROX(bdc_svd.matrixV(), jacobi_svd.matrixV());
- if(computationOptions & ComputeThinV)
- VERIFY_IS_APPROX(bdc_svd.matrixV(), jacobi_svd.matrixV());
- std::cout << "fin compare" << std::endl;
-} // end template compare_bdc_jacobi
-
-
-// call the tests
-void test_bdcsvd()
-{
- // test of Dynamic defined Matrix (42, 42) of float
- CALL_SUBTEST_11(( bdcsvd_verify_assert<Matrix<float,Dynamic,Dynamic> >
- (Matrix<float,Dynamic,Dynamic>(42,42)) ));
- CALL_SUBTEST_11(( compare_bdc_jacobi<Matrix<float,Dynamic,Dynamic> >
- (Matrix<float,Dynamic,Dynamic>(42,42), 0) ));
- CALL_SUBTEST_11(( bdcsvd<Matrix<float,Dynamic,Dynamic> >
- (Matrix<float,Dynamic,Dynamic>(42,42)) ));
-
- // test of Dynamic defined Matrix (50, 50) of double
- CALL_SUBTEST_13(( bdcsvd_verify_assert<Matrix<double,Dynamic,Dynamic> >
- (Matrix<double,Dynamic,Dynamic>(50,50)) ));
- CALL_SUBTEST_13(( compare_bdc_jacobi<Matrix<double,Dynamic,Dynamic> >
- (Matrix<double,Dynamic,Dynamic>(50,50), 0) ));
- CALL_SUBTEST_13(( bdcsvd<Matrix<double,Dynamic,Dynamic> >
- (Matrix<double,Dynamic,Dynamic>(50, 50)) ));
-
- // test of Dynamic defined Matrix (22, 22) of complex double
- CALL_SUBTEST_14(( bdcsvd_verify_assert<Matrix<std::complex<double>,Dynamic,Dynamic> >
- (Matrix<std::complex<double>,Dynamic,Dynamic>(22,22)) ));
- CALL_SUBTEST_14(( compare_bdc_jacobi<Matrix<std::complex<double>,Dynamic,Dynamic> >
- (Matrix<std::complex<double>, Dynamic, Dynamic> (22,22), 0) ));
- CALL_SUBTEST_14(( bdcsvd<Matrix<std::complex<double>,Dynamic,Dynamic> >
- (Matrix<std::complex<double>,Dynamic,Dynamic>(22, 22)) ));
-
- // test of Dynamic defined Matrix (10, 10) of int
- //CALL_SUBTEST_15(( bdcsvd_verify_assert<Matrix<int,Dynamic,Dynamic> >
- // (Matrix<int,Dynamic,Dynamic>(10,10)) ));
- //CALL_SUBTEST_15(( compare_bdc_jacobi<Matrix<int,Dynamic,Dynamic> >
- // (Matrix<int,Dynamic,Dynamic>(10,10), 0) ));
- //CALL_SUBTEST_15(( bdcsvd<Matrix<int,Dynamic,Dynamic> >
- // (Matrix<int,Dynamic,Dynamic>(10, 10)) ));
-
-
- // test of Dynamic defined Matrix (8, 6) of double
-
- CALL_SUBTEST_16(( bdcsvd_verify_assert<Matrix<double,Dynamic,Dynamic> >
- (Matrix<double,Dynamic,Dynamic>(8,6)) ));
- CALL_SUBTEST_16(( compare_bdc_jacobi<Matrix<double,Dynamic,Dynamic> >
- (Matrix<double,Dynamic,Dynamic>(8, 6), 0) ));
- CALL_SUBTEST_16(( bdcsvd<Matrix<double,Dynamic,Dynamic> >
- (Matrix<double,Dynamic,Dynamic>(8, 6)) ));
-
-
-
- // test of Dynamic defined Matrix (36, 12) of float
- CALL_SUBTEST_17(( compare_bdc_jacobi<Matrix<float,Dynamic,Dynamic> >
- (Matrix<float,Dynamic,Dynamic>(36, 12), 0) ));
- CALL_SUBTEST_17(( bdcsvd<Matrix<float,Dynamic,Dynamic> >
- (Matrix<float,Dynamic,Dynamic>(36, 12)) ));
-
- // test of Dynamic defined Matrix (5, 8) of double
- CALL_SUBTEST_18(( compare_bdc_jacobi<Matrix<double,Dynamic,Dynamic> >
- (Matrix<double,Dynamic,Dynamic>(5, 8), 0) ));
- CALL_SUBTEST_18(( bdcsvd<Matrix<double,Dynamic,Dynamic> >
- (Matrix<double,Dynamic,Dynamic>(5, 8)) ));
-
-
- // non regression tests
- CALL_SUBTEST_3(( bdcsvd_verify_assert(Matrix3f()) ));
- CALL_SUBTEST_4(( bdcsvd_verify_assert(Matrix4d()) ));
- CALL_SUBTEST_7(( bdcsvd_verify_assert(MatrixXf(10,12)) ));
- CALL_SUBTEST_8(( bdcsvd_verify_assert(MatrixXcd(7,5)) ));
-
- // SUBTESTS 1 and 2 on specifics matrix
- for(int i = 0; i < g_repeat; i++) {
- Matrix2cd m;
- m << 0, 1,
- 0, 1;
- CALL_SUBTEST_1(( bdcsvd(m, false) ));
- m << 1, 0,
- 1, 0;
- CALL_SUBTEST_1(( bdcsvd(m, false) ));
-
- Matrix2d n;
- n << 0, 0,
- 0, 0;
- CALL_SUBTEST_2(( bdcsvd(n, false) ));
- n << 0, 0,
- 0, 1;
- CALL_SUBTEST_2(( bdcsvd(n, false) ));
-
- // Statics matrix don't work with BDSVD yet
- // bdc algo on a random 3x3 float matrix
- // CALL_SUBTEST_3(( bdcsvd<Matrix3f>() ));
- // bdc algo on a random 4x4 double matrix
- // CALL_SUBTEST_4(( bdcsvd<Matrix4d>() ));
- // bdc algo on a random 3x5 float matrix
- // CALL_SUBTEST_5(( bdcsvd<Matrix<float,3,5> >() ));
-
- int r = internal::random<int>(1, 30),
- c = internal::random<int>(1, 30);
- CALL_SUBTEST_7(( bdcsvd<MatrixXf>(MatrixXf(r,c)) ));
- CALL_SUBTEST_8(( bdcsvd<MatrixXcd>(MatrixXcd(r,c)) ));
- (void) r;
- (void) c;
-
- // Test on inf/nan matrix
- CALL_SUBTEST_7( bdcsvd_inf_nan<MatrixXf>() );
- }
-
- CALL_SUBTEST_7(( bdcsvd<MatrixXf>(MatrixXf(internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2), internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) ));
- CALL_SUBTEST_8(( bdcsvd<MatrixXcd>(MatrixXcd(internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/3), internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/3))) ));
-
- // Test problem size constructors
- CALL_SUBTEST_7( BDCSVD<MatrixXf>(10,10) );
-
-} // end test_bdcsvd
diff --git a/unsupported/test/cxx11_eventcount.cpp b/unsupported/test/cxx11_eventcount.cpp
new file mode 100644
index 000000000..3b598bf42
--- /dev/null
+++ b/unsupported/test/cxx11_eventcount.cpp
@@ -0,0 +1,142 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016 Dmitry Vyukov <dvyukov@google.com>
+// Copyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_USE_THREADS
+#include "main.h"
+#include <Eigen/CXX11/ThreadPool>
+
+// Visual studio doesn't implement a rand_r() function since its
+// implementation of rand() is already thread safe
+int rand_reentrant(unsigned int* s) {
+#ifdef EIGEN_COMP_MSVC_STRICT
+ EIGEN_UNUSED_VARIABLE(s);
+ return rand();
+#else
+ return rand_r(s);
+#endif
+}
+
+static void test_basic_eventcount()
+{
+ MaxSizeVector<EventCount::Waiter> waiters(1);
+ waiters.resize(1);
+ EventCount ec(waiters);
+ EventCount::Waiter& w = waiters[0];
+ ec.Notify(false);
+ ec.Prewait(&w);
+ ec.Notify(true);
+ ec.CommitWait(&w);
+ ec.Prewait(&w);
+ ec.CancelWait(&w);
+}
+
+// Fake bounded counter-based queue.
+struct TestQueue {
+ std::atomic<int> val_;
+ static const int kQueueSize = 10;
+
+ TestQueue() : val_() {}
+
+ ~TestQueue() { VERIFY_IS_EQUAL(val_.load(), 0); }
+
+ bool Push() {
+ int val = val_.load(std::memory_order_relaxed);
+ for (;;) {
+ VERIFY_GE(val, 0);
+ VERIFY_LE(val, kQueueSize);
+ if (val == kQueueSize) return false;
+ if (val_.compare_exchange_weak(val, val + 1, std::memory_order_relaxed))
+ return true;
+ }
+ }
+
+ bool Pop() {
+ int val = val_.load(std::memory_order_relaxed);
+ for (;;) {
+ VERIFY_GE(val, 0);
+ VERIFY_LE(val, kQueueSize);
+ if (val == 0) return false;
+ if (val_.compare_exchange_weak(val, val - 1, std::memory_order_relaxed))
+ return true;
+ }
+ }
+
+ bool Empty() { return val_.load(std::memory_order_relaxed) == 0; }
+};
+
+const int TestQueue::kQueueSize;
+
+// A number of producers send messages to a set of consumers using a set of
+// fake queues. Ensure that it does not crash, consumers don't deadlock and
+// number of blocked and unblocked threads match.
+static void test_stress_eventcount()
+{
+ const int kThreads = std::thread::hardware_concurrency();
+ static const int kEvents = 1 << 16;
+ static const int kQueues = 10;
+
+ MaxSizeVector<EventCount::Waiter> waiters(kThreads);
+ waiters.resize(kThreads);
+ EventCount ec(waiters);
+ TestQueue queues[kQueues];
+
+ std::vector<std::unique_ptr<std::thread>> producers;
+ for (int i = 0; i < kThreads; i++) {
+ producers.emplace_back(new std::thread([&ec, &queues]() {
+ unsigned int rnd = static_cast<unsigned int>(std::hash<std::thread::id>()(std::this_thread::get_id()));
+ for (int j = 0; j < kEvents; j++) {
+ unsigned idx = rand_reentrant(&rnd) % kQueues;
+ if (queues[idx].Push()) {
+ ec.Notify(false);
+ continue;
+ }
+ EIGEN_THREAD_YIELD();
+ j--;
+ }
+ }));
+ }
+
+ std::vector<std::unique_ptr<std::thread>> consumers;
+ for (int i = 0; i < kThreads; i++) {
+ consumers.emplace_back(new std::thread([&ec, &queues, &waiters, i]() {
+ EventCount::Waiter& w = waiters[i];
+ unsigned int rnd = static_cast<unsigned int>(std::hash<std::thread::id>()(std::this_thread::get_id()));
+ for (int j = 0; j < kEvents; j++) {
+ unsigned idx = rand_reentrant(&rnd) % kQueues;
+ if (queues[idx].Pop()) continue;
+ j--;
+ ec.Prewait(&w);
+ bool empty = true;
+ for (int q = 0; q < kQueues; q++) {
+ if (!queues[q].Empty()) {
+ empty = false;
+ break;
+ }
+ }
+ if (!empty) {
+ ec.CancelWait(&w);
+ continue;
+ }
+ ec.CommitWait(&w);
+ }
+ }));
+ }
+
+ for (int i = 0; i < kThreads; i++) {
+ producers[i]->join();
+ consumers[i]->join();
+ }
+}
+
+void test_cxx11_eventcount()
+{
+ CALL_SUBTEST(test_basic_eventcount());
+ CALL_SUBTEST(test_stress_eventcount());
+}
diff --git a/unsupported/test/cxx11_meta.cpp b/unsupported/test/cxx11_meta.cpp
new file mode 100644
index 000000000..8911c59d8
--- /dev/null
+++ b/unsupported/test/cxx11_meta.cpp
@@ -0,0 +1,357 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2013 Christian Seiler <christian@iwakd.de>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <array>
+#include <Eigen/CXX11/src/util/CXX11Meta.h>
+
+using Eigen::internal::is_same;
+using Eigen::internal::type_list;
+using Eigen::internal::numeric_list;
+using Eigen::internal::gen_numeric_list;
+using Eigen::internal::gen_numeric_list_reversed;
+using Eigen::internal::gen_numeric_list_swapped_pair;
+using Eigen::internal::gen_numeric_list_repeated;
+using Eigen::internal::concat;
+using Eigen::internal::mconcat;
+using Eigen::internal::take;
+using Eigen::internal::skip;
+using Eigen::internal::slice;
+using Eigen::internal::get;
+using Eigen::internal::id_numeric;
+using Eigen::internal::id_type;
+using Eigen::internal::is_same_gf;
+using Eigen::internal::apply_op_from_left;
+using Eigen::internal::apply_op_from_right;
+using Eigen::internal::contained_in_list;
+using Eigen::internal::contained_in_list_gf;
+using Eigen::internal::arg_prod;
+using Eigen::internal::arg_sum;
+using Eigen::internal::sum_op;
+using Eigen::internal::product_op;
+using Eigen::internal::array_reverse;
+using Eigen::internal::array_sum;
+using Eigen::internal::array_prod;
+using Eigen::internal::array_reduce;
+using Eigen::internal::array_zip;
+using Eigen::internal::array_zip_and_reduce;
+using Eigen::internal::array_apply;
+using Eigen::internal::array_apply_and_reduce;
+using Eigen::internal::repeat;
+using Eigen::internal::instantiate_by_c_array;
+
+struct dummy_a {};
+struct dummy_b {};
+struct dummy_c {};
+struct dummy_d {};
+struct dummy_e {};
+
+// dummy operation for testing apply
+template<typename A, typename B> struct dummy_op;
+template<> struct dummy_op<dummy_a, dummy_b> { typedef dummy_c type; };
+template<> struct dummy_op<dummy_b, dummy_a> { typedef dummy_d type; };
+template<> struct dummy_op<dummy_b, dummy_c> { typedef dummy_a type; };
+template<> struct dummy_op<dummy_c, dummy_b> { typedef dummy_d type; };
+template<> struct dummy_op<dummy_c, dummy_a> { typedef dummy_b type; };
+template<> struct dummy_op<dummy_a, dummy_c> { typedef dummy_d type; };
+template<> struct dummy_op<dummy_a, dummy_a> { typedef dummy_e type; };
+template<> struct dummy_op<dummy_b, dummy_b> { typedef dummy_e type; };
+template<> struct dummy_op<dummy_c, dummy_c> { typedef dummy_e type; };
+
+template<typename A, typename B> struct dummy_test { constexpr static bool value = false; constexpr static int global_flags = 0; };
+template<> struct dummy_test<dummy_a, dummy_a> { constexpr static bool value = true; constexpr static int global_flags = 1; };
+template<> struct dummy_test<dummy_b, dummy_b> { constexpr static bool value = true; constexpr static int global_flags = 2; };
+template<> struct dummy_test<dummy_c, dummy_c> { constexpr static bool value = true; constexpr static int global_flags = 4; };
+
+struct times2_op { template<typename A> static A run(A v) { return v * 2; } };
+
+struct dummy_inst
+{
+ int c;
+
+ dummy_inst() : c(0) {}
+ explicit dummy_inst(int) : c(1) {}
+ dummy_inst(int, int) : c(2) {}
+ dummy_inst(int, int, int) : c(3) {}
+ dummy_inst(int, int, int, int) : c(4) {}
+ dummy_inst(int, int, int, int, int) : c(5) {}
+};
+
+static void test_gen_numeric_list()
+{
+ VERIFY((is_same<typename gen_numeric_list<int, 0>::type, numeric_list<int>>::value));
+ VERIFY((is_same<typename gen_numeric_list<int, 1>::type, numeric_list<int, 0>>::value));
+ VERIFY((is_same<typename gen_numeric_list<int, 2>::type, numeric_list<int, 0, 1>>::value));
+ VERIFY((is_same<typename gen_numeric_list<int, 5>::type, numeric_list<int, 0, 1, 2, 3, 4>>::value));
+ VERIFY((is_same<typename gen_numeric_list<int, 10>::type, numeric_list<int, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9>>::value));
+
+ VERIFY((is_same<typename gen_numeric_list<int, 0, 42>::type, numeric_list<int>>::value));
+ VERIFY((is_same<typename gen_numeric_list<int, 1, 42>::type, numeric_list<int, 42>>::value));
+ VERIFY((is_same<typename gen_numeric_list<int, 2, 42>::type, numeric_list<int, 42, 43>>::value));
+ VERIFY((is_same<typename gen_numeric_list<int, 5, 42>::type, numeric_list<int, 42, 43, 44, 45, 46>>::value));
+ VERIFY((is_same<typename gen_numeric_list<int, 10, 42>::type, numeric_list<int, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51>>::value));
+
+ VERIFY((is_same<typename gen_numeric_list_reversed<int, 0>::type, numeric_list<int>>::value));
+ VERIFY((is_same<typename gen_numeric_list_reversed<int, 1>::type, numeric_list<int, 0>>::value));
+ VERIFY((is_same<typename gen_numeric_list_reversed<int, 2>::type, numeric_list<int, 1, 0>>::value));
+ VERIFY((is_same<typename gen_numeric_list_reversed<int, 5>::type, numeric_list<int, 4, 3, 2, 1, 0>>::value));
+ VERIFY((is_same<typename gen_numeric_list_reversed<int, 10>::type, numeric_list<int, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0>>::value));
+
+ VERIFY((is_same<typename gen_numeric_list_reversed<int, 0, 42>::type, numeric_list<int>>::value));
+ VERIFY((is_same<typename gen_numeric_list_reversed<int, 1, 42>::type, numeric_list<int, 42>>::value));
+ VERIFY((is_same<typename gen_numeric_list_reversed<int, 2, 42>::type, numeric_list<int, 43, 42>>::value));
+ VERIFY((is_same<typename gen_numeric_list_reversed<int, 5, 42>::type, numeric_list<int, 46, 45, 44, 43, 42>>::value));
+ VERIFY((is_same<typename gen_numeric_list_reversed<int, 10, 42>::type, numeric_list<int, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42>>::value));
+
+ VERIFY((is_same<typename gen_numeric_list_swapped_pair<int, 0, 2, 3>::type, numeric_list<int>>::value));
+ VERIFY((is_same<typename gen_numeric_list_swapped_pair<int, 1, 2, 3>::type, numeric_list<int, 0>>::value));
+ VERIFY((is_same<typename gen_numeric_list_swapped_pair<int, 2, 2, 3>::type, numeric_list<int, 0, 1>>::value));
+ VERIFY((is_same<typename gen_numeric_list_swapped_pair<int, 5, 2, 3>::type, numeric_list<int, 0, 1, 3, 2, 4>>::value));
+ VERIFY((is_same<typename gen_numeric_list_swapped_pair<int, 10, 2, 3>::type, numeric_list<int, 0, 1, 3, 2, 4, 5, 6, 7, 8, 9>>::value));
+
+ VERIFY((is_same<typename gen_numeric_list_swapped_pair<int, 0, 44, 45, 42>::type, numeric_list<int>>::value));
+ VERIFY((is_same<typename gen_numeric_list_swapped_pair<int, 1, 44, 45, 42>::type, numeric_list<int, 42>>::value));
+ VERIFY((is_same<typename gen_numeric_list_swapped_pair<int, 2, 44, 45, 42>::type, numeric_list<int, 42, 43>>::value));
+ VERIFY((is_same<typename gen_numeric_list_swapped_pair<int, 5, 44, 45, 42>::type, numeric_list<int, 42, 43, 45, 44, 46>>::value));
+ VERIFY((is_same<typename gen_numeric_list_swapped_pair<int, 10, 44, 45, 42>::type, numeric_list<int, 42, 43, 45, 44, 46, 47, 48, 49, 50, 51>>::value));
+
+ VERIFY((is_same<typename gen_numeric_list_repeated<int, 0, 0>::type, numeric_list<int>>::value));
+ VERIFY((is_same<typename gen_numeric_list_repeated<int, 1, 0>::type, numeric_list<int, 0>>::value));
+ VERIFY((is_same<typename gen_numeric_list_repeated<int, 2, 0>::type, numeric_list<int, 0, 0>>::value));
+ VERIFY((is_same<typename gen_numeric_list_repeated<int, 5, 0>::type, numeric_list<int, 0, 0, 0, 0, 0>>::value));
+ VERIFY((is_same<typename gen_numeric_list_repeated<int, 10, 0>::type, numeric_list<int, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0>>::value));
+}
+
+static void test_concat()
+{
+ VERIFY((is_same<typename concat<type_list<dummy_a, dummy_a>, type_list<>>::type, type_list<dummy_a, dummy_a>>::value));
+ VERIFY((is_same<typename concat<type_list<>, type_list<dummy_a, dummy_a>>::type, type_list<dummy_a, dummy_a>>::value));
+ VERIFY((is_same<typename concat<type_list<dummy_a, dummy_a>, type_list<dummy_a, dummy_a>>::type, type_list<dummy_a, dummy_a, dummy_a, dummy_a>>::value));
+ VERIFY((is_same<typename concat<type_list<dummy_a, dummy_a>, type_list<dummy_b, dummy_c>>::type, type_list<dummy_a, dummy_a, dummy_b, dummy_c>>::value));
+ VERIFY((is_same<typename concat<type_list<dummy_a>, type_list<dummy_b, dummy_c>>::type, type_list<dummy_a, dummy_b, dummy_c>>::value));
+
+ VERIFY((is_same<typename concat<numeric_list<int, 0, 0>, numeric_list<int>>::type, numeric_list<int, 0, 0>>::value));
+ VERIFY((is_same<typename concat<numeric_list<int>, numeric_list<int, 0, 0>>::type, numeric_list<int, 0, 0>>::value));
+ VERIFY((is_same<typename concat<numeric_list<int, 0, 0>, numeric_list<int, 0, 0>>::type, numeric_list<int, 0, 0, 0, 0>>::value));
+ VERIFY((is_same<typename concat<numeric_list<int, 0, 0>, numeric_list<int, 1, 2>>::type, numeric_list<int, 0, 0, 1, 2>>::value));
+ VERIFY((is_same<typename concat<numeric_list<int, 0>, numeric_list<int, 1, 2>>::type, numeric_list<int, 0, 1, 2>>::value));
+
+ VERIFY((is_same<typename mconcat<type_list<dummy_a>>::type, type_list<dummy_a>>::value));
+ VERIFY((is_same<typename mconcat<type_list<dummy_a>, type_list<dummy_b>>::type, type_list<dummy_a, dummy_b>>::value));
+ VERIFY((is_same<typename mconcat<type_list<dummy_a>, type_list<dummy_b>, type_list<dummy_c>>::type, type_list<dummy_a, dummy_b, dummy_c>>::value));
+ VERIFY((is_same<typename mconcat<type_list<dummy_a>, type_list<dummy_b, dummy_c>>::type, type_list<dummy_a, dummy_b, dummy_c>>::value));
+ VERIFY((is_same<typename mconcat<type_list<dummy_a, dummy_b>, type_list<dummy_c>>::type, type_list<dummy_a, dummy_b, dummy_c>>::value));
+
+ VERIFY((is_same<typename mconcat<numeric_list<int, 0>>::type, numeric_list<int, 0>>::value));
+ VERIFY((is_same<typename mconcat<numeric_list<int, 0>, numeric_list<int, 1>>::type, numeric_list<int, 0, 1>>::value));
+ VERIFY((is_same<typename mconcat<numeric_list<int, 0>, numeric_list<int, 1>, numeric_list<int, 2>>::type, numeric_list<int, 0, 1, 2>>::value));
+ VERIFY((is_same<typename mconcat<numeric_list<int, 0>, numeric_list<int, 1, 2>>::type, numeric_list<int, 0, 1, 2>>::value));
+ VERIFY((is_same<typename mconcat<numeric_list<int, 0, 1>, numeric_list<int, 2>>::type, numeric_list<int, 0, 1, 2>>::value));
+}
+
+static void test_slice()
+{
+ typedef type_list<dummy_a, dummy_a, dummy_b, dummy_b, dummy_c, dummy_c> tl;
+ typedef numeric_list<int, 0, 1, 2, 3, 4, 5> il;
+
+ VERIFY((is_same<typename take<0, tl>::type, type_list<>>::value));
+ VERIFY((is_same<typename take<1, tl>::type, type_list<dummy_a>>::value));
+ VERIFY((is_same<typename take<2, tl>::type, type_list<dummy_a, dummy_a>>::value));
+ VERIFY((is_same<typename take<3, tl>::type, type_list<dummy_a, dummy_a, dummy_b>>::value));
+ VERIFY((is_same<typename take<4, tl>::type, type_list<dummy_a, dummy_a, dummy_b, dummy_b>>::value));
+ VERIFY((is_same<typename take<5, tl>::type, type_list<dummy_a, dummy_a, dummy_b, dummy_b, dummy_c>>::value));
+ VERIFY((is_same<typename take<6, tl>::type, type_list<dummy_a, dummy_a, dummy_b, dummy_b, dummy_c, dummy_c>>::value));
+
+ VERIFY((is_same<typename take<0, il>::type, numeric_list<int>>::value));
+ VERIFY((is_same<typename take<1, il>::type, numeric_list<int, 0>>::value));
+ VERIFY((is_same<typename take<2, il>::type, numeric_list<int, 0, 1>>::value));
+ VERIFY((is_same<typename take<3, il>::type, numeric_list<int, 0, 1, 2>>::value));
+ VERIFY((is_same<typename take<4, il>::type, numeric_list<int, 0, 1, 2, 3>>::value));
+ VERIFY((is_same<typename take<5, il>::type, numeric_list<int, 0, 1, 2, 3, 4>>::value));
+ VERIFY((is_same<typename take<6, il>::type, numeric_list<int, 0, 1, 2, 3, 4, 5>>::value));
+
+ VERIFY((is_same<typename skip<0, tl>::type, type_list<dummy_a, dummy_a, dummy_b, dummy_b, dummy_c, dummy_c>>::value));
+ VERIFY((is_same<typename skip<1, tl>::type, type_list<dummy_a, dummy_b, dummy_b, dummy_c, dummy_c>>::value));
+ VERIFY((is_same<typename skip<2, tl>::type, type_list<dummy_b, dummy_b, dummy_c, dummy_c>>::value));
+ VERIFY((is_same<typename skip<3, tl>::type, type_list<dummy_b, dummy_c, dummy_c>>::value));
+ VERIFY((is_same<typename skip<4, tl>::type, type_list<dummy_c, dummy_c>>::value));
+ VERIFY((is_same<typename skip<5, tl>::type, type_list<dummy_c>>::value));
+ VERIFY((is_same<typename skip<6, tl>::type, type_list<>>::value));
+
+ VERIFY((is_same<typename skip<0, il>::type, numeric_list<int, 0, 1, 2, 3, 4, 5>>::value));
+ VERIFY((is_same<typename skip<1, il>::type, numeric_list<int, 1, 2, 3, 4, 5>>::value));
+ VERIFY((is_same<typename skip<2, il>::type, numeric_list<int, 2, 3, 4, 5>>::value));
+ VERIFY((is_same<typename skip<3, il>::type, numeric_list<int, 3, 4, 5>>::value));
+ VERIFY((is_same<typename skip<4, il>::type, numeric_list<int, 4, 5>>::value));
+ VERIFY((is_same<typename skip<5, il>::type, numeric_list<int, 5>>::value));
+ VERIFY((is_same<typename skip<6, il>::type, numeric_list<int>>::value));
+
+ VERIFY((is_same<typename slice<0, 3, tl>::type, typename take<3, tl>::type>::value));
+ VERIFY((is_same<typename slice<0, 3, il>::type, typename take<3, il>::type>::value));
+ VERIFY((is_same<typename slice<1, 3, tl>::type, type_list<dummy_a, dummy_b, dummy_b>>::value));
+ VERIFY((is_same<typename slice<1, 3, il>::type, numeric_list<int, 1, 2, 3>>::value));
+}
+
+static void test_get()
+{
+ typedef type_list<dummy_a, dummy_a, dummy_b, dummy_b, dummy_c, dummy_c> tl;
+ typedef numeric_list<int, 4, 8, 15, 16, 23, 42> il;
+
+ VERIFY((is_same<typename get<0, tl>::type, dummy_a>::value));
+ VERIFY((is_same<typename get<1, tl>::type, dummy_a>::value));
+ VERIFY((is_same<typename get<2, tl>::type, dummy_b>::value));
+ VERIFY((is_same<typename get<3, tl>::type, dummy_b>::value));
+ VERIFY((is_same<typename get<4, tl>::type, dummy_c>::value));
+ VERIFY((is_same<typename get<5, tl>::type, dummy_c>::value));
+
+ VERIFY_IS_EQUAL(((int)get<0, il>::value), 4);
+ VERIFY_IS_EQUAL(((int)get<1, il>::value), 8);
+ VERIFY_IS_EQUAL(((int)get<2, il>::value), 15);
+ VERIFY_IS_EQUAL(((int)get<3, il>::value), 16);
+ VERIFY_IS_EQUAL(((int)get<4, il>::value), 23);
+ VERIFY_IS_EQUAL(((int)get<5, il>::value), 42);
+}
+
+static void test_id_helper(dummy_a a, dummy_a b, dummy_a c)
+{
+ (void)a;
+ (void)b;
+ (void)c;
+}
+
+template<int... ii>
+static void test_id_numeric()
+{
+ test_id_helper(typename id_numeric<int, ii, dummy_a>::type()...);
+}
+
+template<typename... tt>
+static void test_id_type()
+{
+ test_id_helper(typename id_type<tt, dummy_a>::type()...);
+}
+
+static void test_id()
+{
+ // don't call VERIFY here, just assume it works if it compiles
+ // (otherwise it will complain that it can't find the function)
+ test_id_numeric<1, 4, 6>();
+ test_id_type<dummy_a, dummy_b, dummy_c>();
+}
+
+static void test_is_same_gf()
+{
+ VERIFY((!is_same_gf<dummy_a, dummy_b>::value));
+ VERIFY((!!is_same_gf<dummy_a, dummy_a>::value));
+ VERIFY_IS_EQUAL((!!is_same_gf<dummy_a, dummy_b>::global_flags), false);
+ VERIFY_IS_EQUAL((!!is_same_gf<dummy_a, dummy_a>::global_flags), false);
+}
+
+static void test_apply_op()
+{
+ typedef type_list<dummy_a, dummy_b, dummy_c> tl;
+ VERIFY((!!is_same<typename apply_op_from_left<dummy_op, dummy_a, tl>::type, type_list<dummy_e, dummy_c, dummy_d>>::value));
+ VERIFY((!!is_same<typename apply_op_from_right<dummy_op, dummy_a, tl>::type, type_list<dummy_e, dummy_d, dummy_b>>::value));
+}
+
+static void test_contained_in_list()
+{
+ typedef type_list<dummy_a, dummy_b, dummy_c> tl;
+
+ VERIFY((!!contained_in_list<is_same, dummy_a, tl>::value));
+ VERIFY((!!contained_in_list<is_same, dummy_b, tl>::value));
+ VERIFY((!!contained_in_list<is_same, dummy_c, tl>::value));
+ VERIFY((!contained_in_list<is_same, dummy_d, tl>::value));
+ VERIFY((!contained_in_list<is_same, dummy_e, tl>::value));
+
+ VERIFY((!!contained_in_list_gf<dummy_test, dummy_a, tl>::value));
+ VERIFY((!!contained_in_list_gf<dummy_test, dummy_b, tl>::value));
+ VERIFY((!!contained_in_list_gf<dummy_test, dummy_c, tl>::value));
+ VERIFY((!contained_in_list_gf<dummy_test, dummy_d, tl>::value));
+ VERIFY((!contained_in_list_gf<dummy_test, dummy_e, tl>::value));
+
+ VERIFY_IS_EQUAL(((int)contained_in_list_gf<dummy_test, dummy_a, tl>::global_flags), 1);
+ VERIFY_IS_EQUAL(((int)contained_in_list_gf<dummy_test, dummy_b, tl>::global_flags), 2);
+ VERIFY_IS_EQUAL(((int)contained_in_list_gf<dummy_test, dummy_c, tl>::global_flags), 4);
+ VERIFY_IS_EQUAL(((int)contained_in_list_gf<dummy_test, dummy_d, tl>::global_flags), 0);
+ VERIFY_IS_EQUAL(((int)contained_in_list_gf<dummy_test, dummy_e, tl>::global_flags), 0);
+}
+
+static void test_arg_reductions()
+{
+ VERIFY_IS_EQUAL(arg_sum(1,2,3,4), 10);
+ VERIFY_IS_EQUAL(arg_prod(1,2,3,4), 24);
+ VERIFY_IS_APPROX(arg_sum(0.5, 2, 5), 7.5);
+ VERIFY_IS_APPROX(arg_prod(0.5, 2, 5), 5.0);
+}
+
+static void test_array_reverse_and_reduce()
+{
+ array<int, 6> a{{4, 8, 15, 16, 23, 42}};
+ array<int, 6> b{{42, 23, 16, 15, 8, 4}};
+
+ // there is no operator<< for std::array, so VERIFY_IS_EQUAL will
+ // not compile
+ VERIFY((array_reverse(a) == b));
+ VERIFY((array_reverse(b) == a));
+ VERIFY_IS_EQUAL((array_sum(a)), 108);
+ VERIFY_IS_EQUAL((array_sum(b)), 108);
+ VERIFY_IS_EQUAL((array_prod(a)), 7418880);
+ VERIFY_IS_EQUAL((array_prod(b)), 7418880);
+}
+
+static void test_array_zip_and_apply()
+{
+ array<int, 6> a{{4, 8, 15, 16, 23, 42}};
+ array<int, 6> b{{0, 1, 2, 3, 4, 5}};
+ array<int, 6> c{{4, 9, 17, 19, 27, 47}};
+ array<int, 6> d{{0, 8, 30, 48, 92, 210}};
+ array<int, 6> e{{0, 2, 4, 6, 8, 10}};
+
+ VERIFY((array_zip<sum_op>(a, b) == c));
+ VERIFY((array_zip<product_op>(a, b) == d));
+ VERIFY((array_apply<times2_op>(b) == e));
+ VERIFY_IS_EQUAL((array_apply_and_reduce<sum_op, times2_op>(a)), 216);
+ VERIFY_IS_EQUAL((array_apply_and_reduce<sum_op, times2_op>(b)), 30);
+ VERIFY_IS_EQUAL((array_zip_and_reduce<product_op, sum_op>(a, b)), 14755932);
+ VERIFY_IS_EQUAL((array_zip_and_reduce<sum_op, product_op>(a, b)), 388);
+}
+
+static void test_array_misc()
+{
+ array<int, 3> a3{{1, 1, 1}};
+ array<int, 6> a6{{2, 2, 2, 2, 2, 2}};
+ VERIFY((repeat<3, int>(1) == a3));
+ VERIFY((repeat<6, int>(2) == a6));
+
+ int data[5] = { 0, 1, 2, 3, 4 };
+ VERIFY_IS_EQUAL((instantiate_by_c_array<dummy_inst, int, 0>(data).c), 0);
+ VERIFY_IS_EQUAL((instantiate_by_c_array<dummy_inst, int, 1>(data).c), 1);
+ VERIFY_IS_EQUAL((instantiate_by_c_array<dummy_inst, int, 2>(data).c), 2);
+ VERIFY_IS_EQUAL((instantiate_by_c_array<dummy_inst, int, 3>(data).c), 3);
+ VERIFY_IS_EQUAL((instantiate_by_c_array<dummy_inst, int, 4>(data).c), 4);
+ VERIFY_IS_EQUAL((instantiate_by_c_array<dummy_inst, int, 5>(data).c), 5);
+}
+
+void test_cxx11_meta()
+{
+ CALL_SUBTEST(test_gen_numeric_list());
+ CALL_SUBTEST(test_concat());
+ CALL_SUBTEST(test_slice());
+ CALL_SUBTEST(test_get());
+ CALL_SUBTEST(test_id());
+ CALL_SUBTEST(test_is_same_gf());
+ CALL_SUBTEST(test_apply_op());
+ CALL_SUBTEST(test_contained_in_list());
+ CALL_SUBTEST(test_arg_reductions());
+ CALL_SUBTEST(test_array_reverse_and_reduce());
+ CALL_SUBTEST(test_array_zip_and_apply());
+ CALL_SUBTEST(test_array_misc());
+}
diff --git a/unsupported/test/cxx11_non_blocking_thread_pool.cpp b/unsupported/test/cxx11_non_blocking_thread_pool.cpp
new file mode 100644
index 000000000..5f9bb938b
--- /dev/null
+++ b/unsupported/test/cxx11_non_blocking_thread_pool.cpp
@@ -0,0 +1,107 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016 Dmitry Vyukov <dvyukov@google.com>
+// Copyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_USE_THREADS
+#include "main.h"
+#include "Eigen/CXX11/ThreadPool"
+
+static void test_create_destroy_empty_pool()
+{
+ // Just create and destroy the pool. This will wind up and tear down worker
+ // threads. Ensure there are no issues in that logic.
+ for (int i = 0; i < 16; ++i) {
+ NonBlockingThreadPool tp(i);
+ }
+}
+
+
+static void test_parallelism()
+{
+ // Test we never-ever fail to match available tasks with idle threads.
+ const int kThreads = 16; // code below expects that this is a multiple of 4
+ NonBlockingThreadPool tp(kThreads);
+ VERIFY_IS_EQUAL(tp.NumThreads(), kThreads);
+ VERIFY_IS_EQUAL(tp.CurrentThreadId(), -1);
+ for (int iter = 0; iter < 100; ++iter) {
+ std::atomic<int> running(0);
+ std::atomic<int> done(0);
+ std::atomic<int> phase(0);
+ // Schedule kThreads tasks and ensure that they all are running.
+ for (int i = 0; i < kThreads; ++i) {
+ tp.Schedule([&]() {
+ const int thread_id = tp.CurrentThreadId();
+ VERIFY_GE(thread_id, 0);
+ VERIFY_LE(thread_id, kThreads - 1);
+ running++;
+ while (phase < 1) {
+ }
+ done++;
+ });
+ }
+ while (running != kThreads) {
+ }
+ running = 0;
+ phase = 1;
+ // Now, while the previous tasks exit, schedule another kThreads tasks and
+ // ensure that they are running.
+ for (int i = 0; i < kThreads; ++i) {
+ tp.Schedule([&, i]() {
+ running++;
+ while (phase < 2) {
+ }
+ // When all tasks are running, half of tasks exit, quarter of tasks
+ // continue running and quarter of tasks schedule another 2 tasks each.
+ // Concurrently main thread schedules another quarter of tasks.
+ // This gives us another kThreads tasks and we ensure that they all
+ // are running.
+ if (i < kThreads / 2) {
+ } else if (i < 3 * kThreads / 4) {
+ running++;
+ while (phase < 3) {
+ }
+ done++;
+ } else {
+ for (int j = 0; j < 2; ++j) {
+ tp.Schedule([&]() {
+ running++;
+ while (phase < 3) {
+ }
+ done++;
+ });
+ }
+ }
+ done++;
+ });
+ }
+ while (running != kThreads) {
+ }
+ running = 0;
+ phase = 2;
+ for (int i = 0; i < kThreads / 4; ++i) {
+ tp.Schedule([&]() {
+ running++;
+ while (phase < 3) {
+ }
+ done++;
+ });
+ }
+ while (running != kThreads) {
+ }
+ phase = 3;
+ while (done != 3 * kThreads) {
+ }
+ }
+}
+
+void test_cxx11_non_blocking_thread_pool()
+{
+ CALL_SUBTEST(test_create_destroy_empty_pool());
+ CALL_SUBTEST(test_parallelism());
+}
diff --git a/unsupported/test/cxx11_runqueue.cpp b/unsupported/test/cxx11_runqueue.cpp
new file mode 100644
index 000000000..91f690114
--- /dev/null
+++ b/unsupported/test/cxx11_runqueue.cpp
@@ -0,0 +1,235 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016 Dmitry Vyukov <dvyukov@google.com>
+// Copyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_USE_THREADS
+#include <cstdlib>
+#include "main.h"
+#include <Eigen/CXX11/ThreadPool>
+
+
+// Visual studio doesn't implement a rand_r() function since its
+// implementation of rand() is already thread safe
+int rand_reentrant(unsigned int* s) {
+#ifdef EIGEN_COMP_MSVC_STRICT
+ EIGEN_UNUSED_VARIABLE(s);
+ return rand();
+#else
+ return rand_r(s);
+#endif
+}
+
+void test_basic_runqueue()
+{
+ RunQueue<int, 4> q;
+ // Check empty state.
+ VERIFY(q.Empty());
+ VERIFY_IS_EQUAL(0u, q.Size());
+ VERIFY_IS_EQUAL(0, q.PopFront());
+ std::vector<int> stolen;
+ VERIFY_IS_EQUAL(0u, q.PopBackHalf(&stolen));
+ VERIFY_IS_EQUAL(0u, stolen.size());
+ // Push one front, pop one front.
+ VERIFY_IS_EQUAL(0, q.PushFront(1));
+ VERIFY_IS_EQUAL(1u, q.Size());
+ VERIFY_IS_EQUAL(1, q.PopFront());
+ VERIFY_IS_EQUAL(0u, q.Size());
+ // Push front to overflow.
+ VERIFY_IS_EQUAL(0, q.PushFront(2));
+ VERIFY_IS_EQUAL(1u, q.Size());
+ VERIFY_IS_EQUAL(0, q.PushFront(3));
+ VERIFY_IS_EQUAL(2u, q.Size());
+ VERIFY_IS_EQUAL(0, q.PushFront(4));
+ VERIFY_IS_EQUAL(3u, q.Size());
+ VERIFY_IS_EQUAL(0, q.PushFront(5));
+ VERIFY_IS_EQUAL(4u, q.Size());
+ VERIFY_IS_EQUAL(6, q.PushFront(6));
+ VERIFY_IS_EQUAL(4u, q.Size());
+ VERIFY_IS_EQUAL(5, q.PopFront());
+ VERIFY_IS_EQUAL(3u, q.Size());
+ VERIFY_IS_EQUAL(4, q.PopFront());
+ VERIFY_IS_EQUAL(2u, q.Size());
+ VERIFY_IS_EQUAL(3, q.PopFront());
+ VERIFY_IS_EQUAL(1u, q.Size());
+ VERIFY_IS_EQUAL(2, q.PopFront());
+ VERIFY_IS_EQUAL(0u, q.Size());
+ VERIFY_IS_EQUAL(0, q.PopFront());
+ // Push one back, pop one back.
+ VERIFY_IS_EQUAL(0, q.PushBack(7));
+ VERIFY_IS_EQUAL(1u, q.Size());
+ VERIFY_IS_EQUAL(1u, q.PopBackHalf(&stolen));
+ VERIFY_IS_EQUAL(1u, stolen.size());
+ VERIFY_IS_EQUAL(7, stolen[0]);
+ VERIFY_IS_EQUAL(0u, q.Size());
+ stolen.clear();
+ // Push back to overflow.
+ VERIFY_IS_EQUAL(0, q.PushBack(8));
+ VERIFY_IS_EQUAL(1u, q.Size());
+ VERIFY_IS_EQUAL(0, q.PushBack(9));
+ VERIFY_IS_EQUAL(2u, q.Size());
+ VERIFY_IS_EQUAL(0, q.PushBack(10));
+ VERIFY_IS_EQUAL(3u, q.Size());
+ VERIFY_IS_EQUAL(0, q.PushBack(11));
+ VERIFY_IS_EQUAL(4u, q.Size());
+ VERIFY_IS_EQUAL(12, q.PushBack(12));
+ VERIFY_IS_EQUAL(4u, q.Size());
+ // Pop back in halves.
+ VERIFY_IS_EQUAL(2u, q.PopBackHalf(&stolen));
+ VERIFY_IS_EQUAL(2u, stolen.size());
+ VERIFY_IS_EQUAL(10, stolen[0]);
+ VERIFY_IS_EQUAL(11, stolen[1]);
+ VERIFY_IS_EQUAL(2u, q.Size());
+ stolen.clear();
+ VERIFY_IS_EQUAL(1u, q.PopBackHalf(&stolen));
+ VERIFY_IS_EQUAL(1u, stolen.size());
+ VERIFY_IS_EQUAL(9, stolen[0]);
+ VERIFY_IS_EQUAL(1u, q.Size());
+ stolen.clear();
+ VERIFY_IS_EQUAL(1u, q.PopBackHalf(&stolen));
+ VERIFY_IS_EQUAL(1u, stolen.size());
+ VERIFY_IS_EQUAL(8, stolen[0]);
+ stolen.clear();
+ VERIFY_IS_EQUAL(0u, q.PopBackHalf(&stolen));
+ VERIFY_IS_EQUAL(0u, stolen.size());
+ // Empty again.
+ VERIFY(q.Empty());
+ VERIFY_IS_EQUAL(0u, q.Size());
+ VERIFY_IS_EQUAL(0, q.PushFront(1));
+ VERIFY_IS_EQUAL(0, q.PushFront(2));
+ VERIFY_IS_EQUAL(0, q.PushFront(3));
+ VERIFY_IS_EQUAL(1, q.PopBack());
+ VERIFY_IS_EQUAL(2, q.PopBack());
+ VERIFY_IS_EQUAL(3, q.PopBack());
+ VERIFY(q.Empty());
+ VERIFY_IS_EQUAL(0u, q.Size());
+}
+
+// Empty tests that the queue is not claimed to be empty when is is in fact not.
+// Emptiness property is crucial part of thread pool blocking scheme,
+// so we go to great effort to ensure this property. We create a queue with
+// 1 element and then push 1 element (either front or back at random) and pop
+// 1 element (either front or back at random). So queue always contains at least
+// 1 element, but otherwise changes chaotically. Another thread constantly tests
+// that the queue is not claimed to be empty.
+void test_empty_runqueue()
+{
+ RunQueue<int, 4> q;
+ q.PushFront(1);
+ std::atomic<bool> done(false);
+ std::thread mutator([&q, &done]() {
+ unsigned rnd = 0;
+ std::vector<int> stolen;
+ for (int i = 0; i < 1 << 18; i++) {
+ if (rand_reentrant(&rnd) % 2)
+ VERIFY_IS_EQUAL(0, q.PushFront(1));
+ else
+ VERIFY_IS_EQUAL(0, q.PushBack(1));
+ if (rand_reentrant(&rnd) % 2)
+ VERIFY_IS_EQUAL(1, q.PopFront());
+ else {
+ for (;;) {
+ if (q.PopBackHalf(&stolen) == 1) {
+ stolen.clear();
+ break;
+ }
+ VERIFY_IS_EQUAL(0u, stolen.size());
+ }
+ }
+ }
+ done = true;
+ });
+ while (!done) {
+ VERIFY(!q.Empty());
+ int size = q.Size();
+ VERIFY_GE(size, 1);
+ VERIFY_LE(size, 2);
+ }
+ VERIFY_IS_EQUAL(1, q.PopFront());
+ mutator.join();
+}
+
+// Stress is a chaotic random test.
+// One thread (owner) calls PushFront/PopFront, other threads call PushBack/
+// PopBack. Ensure that we don't crash, deadlock, and all sanity checks pass.
+void test_stress_runqueue()
+{
+ static const int kEvents = 1 << 18;
+ RunQueue<int, 8> q;
+ std::atomic<int> total(0);
+ std::vector<std::unique_ptr<std::thread>> threads;
+ threads.emplace_back(new std::thread([&q, &total]() {
+ int sum = 0;
+ int pushed = 1;
+ int popped = 1;
+ while (pushed < kEvents || popped < kEvents) {
+ if (pushed < kEvents) {
+ if (q.PushFront(pushed) == 0) {
+ sum += pushed;
+ pushed++;
+ }
+ }
+ if (popped < kEvents) {
+ int v = q.PopFront();
+ if (v != 0) {
+ sum -= v;
+ popped++;
+ }
+ }
+ }
+ total += sum;
+ }));
+ for (int i = 0; i < 2; i++) {
+ threads.emplace_back(new std::thread([&q, &total]() {
+ int sum = 0;
+ for (int j = 1; j < kEvents; j++) {
+ if (q.PushBack(j) == 0) {
+ sum += j;
+ continue;
+ }
+ EIGEN_THREAD_YIELD();
+ j--;
+ }
+ total += sum;
+ }));
+ threads.emplace_back(new std::thread([&q, &total]() {
+ int sum = 0;
+ std::vector<int> stolen;
+ for (int j = 1; j < kEvents;) {
+ if (q.PopBackHalf(&stolen) == 0) {
+ EIGEN_THREAD_YIELD();
+ continue;
+ }
+ while (stolen.size() && j < kEvents) {
+ int v = stolen.back();
+ stolen.pop_back();
+ VERIFY_IS_NOT_EQUAL(v, 0);
+ sum += v;
+ j++;
+ }
+ }
+ while (stolen.size()) {
+ int v = stolen.back();
+ stolen.pop_back();
+ VERIFY_IS_NOT_EQUAL(v, 0);
+ while ((v = q.PushBack(v)) != 0) EIGEN_THREAD_YIELD();
+ }
+ total -= sum;
+ }));
+ }
+ for (size_t i = 0; i < threads.size(); i++) threads[i]->join();
+ VERIFY(q.Empty());
+ VERIFY(total.load() == 0);
+}
+
+void test_cxx11_runqueue()
+{
+ CALL_SUBTEST_1(test_basic_runqueue());
+ CALL_SUBTEST_2(test_empty_runqueue());
+ CALL_SUBTEST_3(test_stress_runqueue());
+}
diff --git a/unsupported/test/cxx11_tensor_argmax.cpp b/unsupported/test/cxx11_tensor_argmax.cpp
new file mode 100644
index 000000000..037767270
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_argmax.cpp
@@ -0,0 +1,294 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2015 Eugene Brevdo <ebrevdo@google.com>
+// Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+using Eigen::array;
+using Eigen::Tuple;
+
+template <int DataLayout>
+static void test_simple_index_tuples()
+{
+ Tensor<float, 4, DataLayout> tensor(2,3,5,7);
+ tensor.setRandom();
+ tensor = (tensor + tensor.constant(0.5)).log();
+
+ Tensor<Tuple<DenseIndex, float>, 4, DataLayout> index_tuples(2,3,5,7);
+ index_tuples = tensor.index_tuples();
+
+ for (DenseIndex n = 0; n < 2*3*5*7; ++n) {
+ const Tuple<DenseIndex, float>& v = index_tuples.coeff(n);
+ VERIFY_IS_EQUAL(v.first, n);
+ VERIFY_IS_EQUAL(v.second, tensor.coeff(n));
+ }
+}
+
+template <int DataLayout>
+static void test_index_tuples_dim()
+{
+ Tensor<float, 4, DataLayout> tensor(2,3,5,7);
+ tensor.setRandom();
+ tensor = (tensor + tensor.constant(0.5)).log();
+
+ Tensor<Tuple<DenseIndex, float>, 4, DataLayout> index_tuples(2,3,5,7);
+
+ index_tuples = tensor.index_tuples();
+
+ for (Eigen::DenseIndex n = 0; n < tensor.size(); ++n) {
+ const Tuple<DenseIndex, float>& v = index_tuples(n); //(i, j, k, l);
+ VERIFY_IS_EQUAL(v.first, n);
+ VERIFY_IS_EQUAL(v.second, tensor(n));
+ }
+}
+
+template <int DataLayout>
+static void test_argmax_tuple_reducer()
+{
+ Tensor<float, 4, DataLayout> tensor(2,3,5,7);
+ tensor.setRandom();
+ tensor = (tensor + tensor.constant(0.5)).log();
+
+ Tensor<Tuple<DenseIndex, float>, 4, DataLayout> index_tuples(2,3,5,7);
+ index_tuples = tensor.index_tuples();
+
+ Tensor<Tuple<DenseIndex, float>, 0, DataLayout> reduced;
+ DimensionList<DenseIndex, 4> dims;
+ reduced = index_tuples.reduce(
+ dims, internal::ArgMaxTupleReducer<Tuple<DenseIndex, float> >());
+
+ Tensor<float, 0, DataLayout> maxi = tensor.maximum();
+
+ VERIFY_IS_EQUAL(maxi(), reduced(0).second);
+
+ array<DenseIndex, 3> reduce_dims;
+ for (int d = 0; d < 3; ++d) reduce_dims[d] = d;
+ Tensor<Tuple<DenseIndex, float>, 1, DataLayout> reduced_by_dims(7);
+ reduced_by_dims = index_tuples.reduce(
+ reduce_dims, internal::ArgMaxTupleReducer<Tuple<DenseIndex, float> >());
+
+ Tensor<float, 1, DataLayout> max_by_dims = tensor.maximum(reduce_dims);
+
+ for (int l = 0; l < 7; ++l) {
+ VERIFY_IS_EQUAL(max_by_dims(l), reduced_by_dims(l).second);
+ }
+}
+
+template <int DataLayout>
+static void test_argmin_tuple_reducer()
+{
+ Tensor<float, 4, DataLayout> tensor(2,3,5,7);
+ tensor.setRandom();
+ tensor = (tensor + tensor.constant(0.5)).log();
+
+ Tensor<Tuple<DenseIndex, float>, 4, DataLayout> index_tuples(2,3,5,7);
+ index_tuples = tensor.index_tuples();
+
+ Tensor<Tuple<DenseIndex, float>, 0, DataLayout> reduced;
+ DimensionList<DenseIndex, 4> dims;
+ reduced = index_tuples.reduce(
+ dims, internal::ArgMinTupleReducer<Tuple<DenseIndex, float> >());
+
+ Tensor<float, 0, DataLayout> mini = tensor.minimum();
+
+ VERIFY_IS_EQUAL(mini(), reduced(0).second);
+
+ array<DenseIndex, 3> reduce_dims;
+ for (int d = 0; d < 3; ++d) reduce_dims[d] = d;
+ Tensor<Tuple<DenseIndex, float>, 1, DataLayout> reduced_by_dims(7);
+ reduced_by_dims = index_tuples.reduce(
+ reduce_dims, internal::ArgMinTupleReducer<Tuple<DenseIndex, float> >());
+
+ Tensor<float, 1, DataLayout> min_by_dims = tensor.minimum(reduce_dims);
+
+ for (int l = 0; l < 7; ++l) {
+ VERIFY_IS_EQUAL(min_by_dims(l), reduced_by_dims(l).second);
+ }
+}
+
+template <int DataLayout>
+static void test_simple_argmax()
+{
+ Tensor<float, 4, DataLayout> tensor(2,3,5,7);
+ tensor.setRandom();
+ tensor = (tensor + tensor.constant(0.5)).log();
+ tensor(0,0,0,0) = 10.0;
+
+ Tensor<DenseIndex, 0, DataLayout> tensor_argmax;
+
+ tensor_argmax = tensor.argmax();
+
+ VERIFY_IS_EQUAL(tensor_argmax(0), 0);
+
+ tensor(1,2,4,6) = 20.0;
+
+ tensor_argmax = tensor.argmax();
+
+ VERIFY_IS_EQUAL(tensor_argmax(0), 2*3*5*7 - 1);
+}
+
+template <int DataLayout>
+static void test_simple_argmin()
+{
+ Tensor<float, 4, DataLayout> tensor(2,3,5,7);
+ tensor.setRandom();
+ tensor = (tensor + tensor.constant(0.5)).log();
+ tensor(0,0,0,0) = -10.0;
+
+ Tensor<DenseIndex, 0, DataLayout> tensor_argmin;
+
+ tensor_argmin = tensor.argmin();
+
+ VERIFY_IS_EQUAL(tensor_argmin(0), 0);
+
+ tensor(1,2,4,6) = -20.0;
+
+ tensor_argmin = tensor.argmin();
+
+ VERIFY_IS_EQUAL(tensor_argmin(0), 2*3*5*7 - 1);
+}
+
+template <int DataLayout>
+static void test_argmax_dim()
+{
+ Tensor<float, 4, DataLayout> tensor(2,3,5,7);
+ std::vector<int> dims {2, 3, 5, 7};
+
+ for (int dim = 0; dim < 4; ++dim) {
+ tensor.setRandom();
+ tensor = (tensor + tensor.constant(0.5)).log();
+
+ Tensor<DenseIndex, 3, DataLayout> tensor_argmax;
+ array<DenseIndex, 4> ix;
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l;
+ if (ix[dim] != 0) continue;
+ // suppose dim == 1, then for all i, k, l, set tensor(i, 0, k, l) = 10.0
+ tensor(ix) = 10.0;
+ }
+ }
+ }
+ }
+
+ tensor_argmax = tensor.argmax(dim);
+
+ VERIFY_IS_EQUAL(tensor_argmax.size(),
+ ptrdiff_t(2*3*5*7 / tensor.dimension(dim)));
+ for (ptrdiff_t n = 0; n < tensor_argmax.size(); ++n) {
+ // Expect max to be in the first index of the reduced dimension
+ VERIFY_IS_EQUAL(tensor_argmax.data()[n], 0);
+ }
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l;
+ if (ix[dim] != tensor.dimension(dim) - 1) continue;
+ // suppose dim == 1, then for all i, k, l, set tensor(i, 2, k, l) = 20.0
+ tensor(ix) = 20.0;
+ }
+ }
+ }
+ }
+
+ tensor_argmax = tensor.argmax(dim);
+
+ VERIFY_IS_EQUAL(tensor_argmax.size(),
+ ptrdiff_t(2*3*5*7 / tensor.dimension(dim)));
+ for (ptrdiff_t n = 0; n < tensor_argmax.size(); ++n) {
+ // Expect max to be in the last index of the reduced dimension
+ VERIFY_IS_EQUAL(tensor_argmax.data()[n], tensor.dimension(dim) - 1);
+ }
+ }
+}
+
+template <int DataLayout>
+static void test_argmin_dim()
+{
+ Tensor<float, 4, DataLayout> tensor(2,3,5,7);
+ std::vector<int> dims {2, 3, 5, 7};
+
+ for (int dim = 0; dim < 4; ++dim) {
+ tensor.setRandom();
+ tensor = (tensor + tensor.constant(0.5)).log();
+
+ Tensor<DenseIndex, 3, DataLayout> tensor_argmin;
+ array<DenseIndex, 4> ix;
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l;
+ if (ix[dim] != 0) continue;
+ // suppose dim == 1, then for all i, k, l, set tensor(i, 0, k, l) = -10.0
+ tensor(ix) = -10.0;
+ }
+ }
+ }
+ }
+
+ tensor_argmin = tensor.argmin(dim);
+
+ VERIFY_IS_EQUAL(tensor_argmin.size(),
+ ptrdiff_t(2*3*5*7 / tensor.dimension(dim)));
+ for (ptrdiff_t n = 0; n < tensor_argmin.size(); ++n) {
+ // Expect min to be in the first index of the reduced dimension
+ VERIFY_IS_EQUAL(tensor_argmin.data()[n], 0);
+ }
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l;
+ if (ix[dim] != tensor.dimension(dim) - 1) continue;
+ // suppose dim == 1, then for all i, k, l, set tensor(i, 2, k, l) = -20.0
+ tensor(ix) = -20.0;
+ }
+ }
+ }
+ }
+
+ tensor_argmin = tensor.argmin(dim);
+
+ VERIFY_IS_EQUAL(tensor_argmin.size(),
+ ptrdiff_t(2*3*5*7 / tensor.dimension(dim)));
+ for (ptrdiff_t n = 0; n < tensor_argmin.size(); ++n) {
+ // Expect min to be in the last index of the reduced dimension
+ VERIFY_IS_EQUAL(tensor_argmin.data()[n], tensor.dimension(dim) - 1);
+ }
+ }
+}
+
+void test_cxx11_tensor_argmax()
+{
+ CALL_SUBTEST(test_simple_index_tuples<RowMajor>());
+ CALL_SUBTEST(test_simple_index_tuples<ColMajor>());
+ CALL_SUBTEST(test_index_tuples_dim<RowMajor>());
+ CALL_SUBTEST(test_index_tuples_dim<ColMajor>());
+ CALL_SUBTEST(test_argmax_tuple_reducer<RowMajor>());
+ CALL_SUBTEST(test_argmax_tuple_reducer<ColMajor>());
+ CALL_SUBTEST(test_argmin_tuple_reducer<RowMajor>());
+ CALL_SUBTEST(test_argmin_tuple_reducer<ColMajor>());
+ CALL_SUBTEST(test_simple_argmax<RowMajor>());
+ CALL_SUBTEST(test_simple_argmax<ColMajor>());
+ CALL_SUBTEST(test_simple_argmin<RowMajor>());
+ CALL_SUBTEST(test_simple_argmin<ColMajor>());
+ CALL_SUBTEST(test_argmax_dim<RowMajor>());
+ CALL_SUBTEST(test_argmax_dim<ColMajor>());
+ CALL_SUBTEST(test_argmin_dim<RowMajor>());
+ CALL_SUBTEST(test_argmin_dim<ColMajor>());
+}
diff --git a/unsupported/test/cxx11_tensor_argmax_cuda.cu b/unsupported/test/cxx11_tensor_argmax_cuda.cu
new file mode 100644
index 000000000..653443dc5
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_argmax_cuda.cu
@@ -0,0 +1,254 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+#define EIGEN_TEST_NO_LONGDOUBLE
+#define EIGEN_TEST_FUNC cxx11_tensor_cuda
+#define EIGEN_USE_GPU
+
+#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500
+#include <cuda_fp16.h>
+#endif
+#include "main.h"
+#include <unsupported/Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+
+template <int Layout>
+void test_cuda_simple_argmax()
+{
+ Tensor<double, 3, Layout> in(Eigen::array<DenseIndex, 3>(72,53,97));
+ Tensor<DenseIndex, 1, Layout> out_max(Eigen::array<DenseIndex, 1>(1));
+ Tensor<DenseIndex, 1, Layout> out_min(Eigen::array<DenseIndex, 1>(1));
+ in.setRandom();
+ in *= in.constant(100.0);
+ in(0, 0, 0) = -1000.0;
+ in(71, 52, 96) = 1000.0;
+
+ std::size_t in_bytes = in.size() * sizeof(double);
+ std::size_t out_bytes = out_max.size() * sizeof(DenseIndex);
+
+ double* d_in;
+ DenseIndex* d_out_max;
+ DenseIndex* d_out_min;
+ cudaMalloc((void**)(&d_in), in_bytes);
+ cudaMalloc((void**)(&d_out_max), out_bytes);
+ cudaMalloc((void**)(&d_out_min), out_bytes);
+
+ cudaMemcpy(d_in, in.data(), in_bytes, cudaMemcpyHostToDevice);
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ Eigen::TensorMap<Eigen::Tensor<double, 3, Layout>, Aligned > gpu_in(d_in, Eigen::array<DenseIndex, 3>(72,53,97));
+ Eigen::TensorMap<Eigen::Tensor<DenseIndex, 1, Layout>, Aligned > gpu_out_max(d_out_max, Eigen::array<DenseIndex, 1>(1));
+ Eigen::TensorMap<Eigen::Tensor<DenseIndex, 1, Layout>, Aligned > gpu_out_min(d_out_min, Eigen::array<DenseIndex, 1>(1));
+
+ gpu_out_max.device(gpu_device) = gpu_in.argmax();
+ gpu_out_min.device(gpu_device) = gpu_in.argmin();
+
+ assert(cudaMemcpyAsync(out_max.data(), d_out_max, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
+ assert(cudaMemcpyAsync(out_min.data(), d_out_min, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
+ assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+
+ VERIFY_IS_EQUAL(out_max(Eigen::array<DenseIndex, 1>(0)), 72*53*97 - 1);
+ VERIFY_IS_EQUAL(out_min(Eigen::array<DenseIndex, 1>(0)), 0);
+
+ cudaFree(d_in);
+ cudaFree(d_out_max);
+ cudaFree(d_out_min);
+}
+
+template <int DataLayout>
+void test_cuda_argmax_dim()
+{
+ Tensor<float, 4, DataLayout> tensor(2,3,5,7);
+ std::vector<int> dims;
+ dims.push_back(2); dims.push_back(3); dims.push_back(5); dims.push_back(7);
+
+ for (int dim = 0; dim < 4; ++dim) {
+ tensor.setRandom();
+ tensor = (tensor + tensor.constant(0.5)).log();
+
+ array<DenseIndex, 3> out_shape;
+ for (int d = 0; d < 3; ++d) out_shape[d] = (d < dim) ? dims[d] : dims[d+1];
+
+ Tensor<DenseIndex, 3, DataLayout> tensor_arg(out_shape);
+
+ array<DenseIndex, 4> ix;
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l;
+ if (ix[dim] != 0) continue;
+ // suppose dim == 1, then for all i, k, l, set tensor(i, 0, k, l) = 10.0
+ tensor(ix) = 10.0;
+ }
+ }
+ }
+ }
+
+ std::size_t in_bytes = tensor.size() * sizeof(float);
+ std::size_t out_bytes = tensor_arg.size() * sizeof(DenseIndex);
+
+ float* d_in;
+ DenseIndex* d_out;
+ cudaMalloc((void**)(&d_in), in_bytes);
+ cudaMalloc((void**)(&d_out), out_bytes);
+
+ cudaMemcpy(d_in, tensor.data(), in_bytes, cudaMemcpyHostToDevice);
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout>, Aligned > gpu_in(d_in, Eigen::array<DenseIndex, 4>(2, 3, 5, 7));
+ Eigen::TensorMap<Eigen::Tensor<DenseIndex, 3, DataLayout>, Aligned > gpu_out(d_out, out_shape);
+
+ gpu_out.device(gpu_device) = gpu_in.argmax(dim);
+
+ assert(cudaMemcpyAsync(tensor_arg.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
+ assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+
+ VERIFY_IS_EQUAL(tensor_arg.size(),
+ size_t(2*3*5*7 / tensor.dimension(dim)));
+
+ for (DenseIndex n = 0; n < tensor_arg.size(); ++n) {
+ // Expect max to be in the first index of the reduced dimension
+ VERIFY_IS_EQUAL(tensor_arg.data()[n], 0);
+ }
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l;
+ if (ix[dim] != tensor.dimension(dim) - 1) continue;
+ // suppose dim == 1, then for all i, k, l, set tensor(i, 2, k, l) = 20.0
+ tensor(ix) = 20.0;
+ }
+ }
+ }
+ }
+
+ cudaMemcpy(d_in, tensor.data(), in_bytes, cudaMemcpyHostToDevice);
+
+ gpu_out.device(gpu_device) = gpu_in.argmax(dim);
+
+ assert(cudaMemcpyAsync(tensor_arg.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
+ assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+
+ for (DenseIndex n = 0; n < tensor_arg.size(); ++n) {
+ // Expect max to be in the last index of the reduced dimension
+ VERIFY_IS_EQUAL(tensor_arg.data()[n], tensor.dimension(dim) - 1);
+ }
+
+ cudaFree(d_in);
+ cudaFree(d_out);
+ }
+}
+
+template <int DataLayout>
+void test_cuda_argmin_dim()
+{
+ Tensor<float, 4, DataLayout> tensor(2,3,5,7);
+ std::vector<int> dims;
+ dims.push_back(2); dims.push_back(3); dims.push_back(5); dims.push_back(7);
+
+ for (int dim = 0; dim < 4; ++dim) {
+ tensor.setRandom();
+ tensor = (tensor + tensor.constant(0.5)).log();
+
+ array<DenseIndex, 3> out_shape;
+ for (int d = 0; d < 3; ++d) out_shape[d] = (d < dim) ? dims[d] : dims[d+1];
+
+ Tensor<DenseIndex, 3, DataLayout> tensor_arg(out_shape);
+
+ array<DenseIndex, 4> ix;
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l;
+ if (ix[dim] != 0) continue;
+ // suppose dim == 1, then for all i, k, l, set tensor(i, 0, k, l) = 10.0
+ tensor(ix) = -10.0;
+ }
+ }
+ }
+ }
+
+ std::size_t in_bytes = tensor.size() * sizeof(float);
+ std::size_t out_bytes = tensor_arg.size() * sizeof(DenseIndex);
+
+ float* d_in;
+ DenseIndex* d_out;
+ cudaMalloc((void**)(&d_in), in_bytes);
+ cudaMalloc((void**)(&d_out), out_bytes);
+
+ cudaMemcpy(d_in, tensor.data(), in_bytes, cudaMemcpyHostToDevice);
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout>, Aligned > gpu_in(d_in, Eigen::array<DenseIndex, 4>(2, 3, 5, 7));
+ Eigen::TensorMap<Eigen::Tensor<DenseIndex, 3, DataLayout>, Aligned > gpu_out(d_out, out_shape);
+
+ gpu_out.device(gpu_device) = gpu_in.argmin(dim);
+
+ assert(cudaMemcpyAsync(tensor_arg.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
+ assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+
+ VERIFY_IS_EQUAL(tensor_arg.size(),
+ 2*3*5*7 / tensor.dimension(dim));
+
+ for (DenseIndex n = 0; n < tensor_arg.size(); ++n) {
+ // Expect min to be in the first index of the reduced dimension
+ VERIFY_IS_EQUAL(tensor_arg.data()[n], 0);
+ }
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l;
+ if (ix[dim] != tensor.dimension(dim) - 1) continue;
+ // suppose dim == 1, then for all i, k, l, set tensor(i, 2, k, l) = 20.0
+ tensor(ix) = -20.0;
+ }
+ }
+ }
+ }
+
+ cudaMemcpy(d_in, tensor.data(), in_bytes, cudaMemcpyHostToDevice);
+
+ gpu_out.device(gpu_device) = gpu_in.argmin(dim);
+
+ assert(cudaMemcpyAsync(tensor_arg.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
+ assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+
+ for (DenseIndex n = 0; n < tensor_arg.size(); ++n) {
+ // Expect max to be in the last index of the reduced dimension
+ VERIFY_IS_EQUAL(tensor_arg.data()[n], tensor.dimension(dim) - 1);
+ }
+
+ cudaFree(d_in);
+ cudaFree(d_out);
+ }
+}
+
+void test_cxx11_tensor_cuda()
+{
+ CALL_SUBTEST_1(test_cuda_simple_argmax<RowMajor>());
+ CALL_SUBTEST_1(test_cuda_simple_argmax<ColMajor>());
+ CALL_SUBTEST_2(test_cuda_argmax_dim<RowMajor>());
+ CALL_SUBTEST_2(test_cuda_argmax_dim<ColMajor>());
+ CALL_SUBTEST_3(test_cuda_argmin_dim<RowMajor>());
+ CALL_SUBTEST_3(test_cuda_argmin_dim<ColMajor>());
+}
diff --git a/unsupported/test/cxx11_tensor_assign.cpp b/unsupported/test/cxx11_tensor_assign.cpp
new file mode 100644
index 000000000..8fe85d83c
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_assign.cpp
@@ -0,0 +1,370 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+using Eigen::RowMajor;
+
+static void test_1d()
+{
+ Tensor<int, 1> vec1(6);
+ Tensor<int, 1, RowMajor> vec2(6);
+ vec1(0) = 4; vec2(0) = 0;
+ vec1(1) = 8; vec2(1) = 1;
+ vec1(2) = 15; vec2(2) = 2;
+ vec1(3) = 16; vec2(3) = 3;
+ vec1(4) = 23; vec2(4) = 4;
+ vec1(5) = 42; vec2(5) = 5;
+
+ int col_major[6];
+ int row_major[6];
+ memset(col_major, 0, 6*sizeof(int));
+ memset(row_major, 0, 6*sizeof(int));
+ TensorMap<Tensor<int, 1> > vec3(col_major, 6);
+ TensorMap<Tensor<int, 1, RowMajor> > vec4(row_major, 6);
+
+ vec3 = vec1;
+ vec4 = vec2;
+
+ VERIFY_IS_EQUAL(vec3(0), 4);
+ VERIFY_IS_EQUAL(vec3(1), 8);
+ VERIFY_IS_EQUAL(vec3(2), 15);
+ VERIFY_IS_EQUAL(vec3(3), 16);
+ VERIFY_IS_EQUAL(vec3(4), 23);
+ VERIFY_IS_EQUAL(vec3(5), 42);
+
+ VERIFY_IS_EQUAL(vec4(0), 0);
+ VERIFY_IS_EQUAL(vec4(1), 1);
+ VERIFY_IS_EQUAL(vec4(2), 2);
+ VERIFY_IS_EQUAL(vec4(3), 3);
+ VERIFY_IS_EQUAL(vec4(4), 4);
+ VERIFY_IS_EQUAL(vec4(5), 5);
+
+ vec1.setZero();
+ vec2.setZero();
+ vec1 = vec3;
+ vec2 = vec4;
+
+ VERIFY_IS_EQUAL(vec1(0), 4);
+ VERIFY_IS_EQUAL(vec1(1), 8);
+ VERIFY_IS_EQUAL(vec1(2), 15);
+ VERIFY_IS_EQUAL(vec1(3), 16);
+ VERIFY_IS_EQUAL(vec1(4), 23);
+ VERIFY_IS_EQUAL(vec1(5), 42);
+
+ VERIFY_IS_EQUAL(vec2(0), 0);
+ VERIFY_IS_EQUAL(vec2(1), 1);
+ VERIFY_IS_EQUAL(vec2(2), 2);
+ VERIFY_IS_EQUAL(vec2(3), 3);
+ VERIFY_IS_EQUAL(vec2(4), 4);
+ VERIFY_IS_EQUAL(vec2(5), 5);
+}
+
+static void test_2d()
+{
+ Tensor<int, 2> mat1(2,3);
+ Tensor<int, 2, RowMajor> mat2(2,3);
+
+ mat1(0,0) = 0;
+ mat1(0,1) = 1;
+ mat1(0,2) = 2;
+ mat1(1,0) = 3;
+ mat1(1,1) = 4;
+ mat1(1,2) = 5;
+
+ mat2(0,0) = 0;
+ mat2(0,1) = 1;
+ mat2(0,2) = 2;
+ mat2(1,0) = 3;
+ mat2(1,1) = 4;
+ mat2(1,2) = 5;
+
+ int col_major[6];
+ int row_major[6];
+ memset(col_major, 0, 6*sizeof(int));
+ memset(row_major, 0, 6*sizeof(int));
+ TensorMap<Tensor<int, 2> > mat3(row_major, 2, 3);
+ TensorMap<Tensor<int, 2, RowMajor> > mat4(col_major, 2, 3);
+
+ mat3 = mat1;
+ mat4 = mat2;
+
+ VERIFY_IS_EQUAL(mat3(0,0), 0);
+ VERIFY_IS_EQUAL(mat3(0,1), 1);
+ VERIFY_IS_EQUAL(mat3(0,2), 2);
+ VERIFY_IS_EQUAL(mat3(1,0), 3);
+ VERIFY_IS_EQUAL(mat3(1,1), 4);
+ VERIFY_IS_EQUAL(mat3(1,2), 5);
+
+ VERIFY_IS_EQUAL(mat4(0,0), 0);
+ VERIFY_IS_EQUAL(mat4(0,1), 1);
+ VERIFY_IS_EQUAL(mat4(0,2), 2);
+ VERIFY_IS_EQUAL(mat4(1,0), 3);
+ VERIFY_IS_EQUAL(mat4(1,1), 4);
+ VERIFY_IS_EQUAL(mat4(1,2), 5);
+
+ mat1.setZero();
+ mat2.setZero();
+ mat1 = mat3;
+ mat2 = mat4;
+
+ VERIFY_IS_EQUAL(mat1(0,0), 0);
+ VERIFY_IS_EQUAL(mat1(0,1), 1);
+ VERIFY_IS_EQUAL(mat1(0,2), 2);
+ VERIFY_IS_EQUAL(mat1(1,0), 3);
+ VERIFY_IS_EQUAL(mat1(1,1), 4);
+ VERIFY_IS_EQUAL(mat1(1,2), 5);
+
+ VERIFY_IS_EQUAL(mat2(0,0), 0);
+ VERIFY_IS_EQUAL(mat2(0,1), 1);
+ VERIFY_IS_EQUAL(mat2(0,2), 2);
+ VERIFY_IS_EQUAL(mat2(1,0), 3);
+ VERIFY_IS_EQUAL(mat2(1,1), 4);
+ VERIFY_IS_EQUAL(mat2(1,2), 5);
+}
+
+static void test_3d()
+{
+ Tensor<int, 3> mat1(2,3,7);
+ Tensor<int, 3, RowMajor> mat2(2,3,7);
+
+ int val = 0;
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ mat1(i,j,k) = val;
+ mat2(i,j,k) = val;
+ val++;
+ }
+ }
+ }
+
+ int col_major[2*3*7];
+ int row_major[2*3*7];
+ memset(col_major, 0, 2*3*7*sizeof(int));
+ memset(row_major, 0, 2*3*7*sizeof(int));
+ TensorMap<Tensor<int, 3> > mat3(col_major, 2, 3, 7);
+ TensorMap<Tensor<int, 3, RowMajor> > mat4(row_major, 2, 3, 7);
+
+ mat3 = mat1;
+ mat4 = mat2;
+
+ val = 0;
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ VERIFY_IS_EQUAL(mat3(i,j,k), val);
+ VERIFY_IS_EQUAL(mat4(i,j,k), val);
+ val++;
+ }
+ }
+ }
+
+ mat1.setZero();
+ mat2.setZero();
+ mat1 = mat3;
+ mat2 = mat4;
+
+ val = 0;
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ VERIFY_IS_EQUAL(mat1(i,j,k), val);
+ VERIFY_IS_EQUAL(mat2(i,j,k), val);
+ val++;
+ }
+ }
+ }
+}
+
+static void test_same_type()
+{
+ Tensor<int, 1> orig_tensor(5);
+ Tensor<int, 1> dest_tensor(5);
+ orig_tensor.setRandom();
+ dest_tensor.setRandom();
+ int* orig_data = orig_tensor.data();
+ int* dest_data = dest_tensor.data();
+ dest_tensor = orig_tensor;
+ VERIFY_IS_EQUAL(orig_tensor.data(), orig_data);
+ VERIFY_IS_EQUAL(dest_tensor.data(), dest_data);
+ for (int i = 0; i < 5; ++i) {
+ VERIFY_IS_EQUAL(dest_tensor(i), orig_tensor(i));
+ }
+
+ TensorFixedSize<int, Sizes<5> > orig_array;
+ TensorFixedSize<int, Sizes<5> > dest_array;
+ orig_array.setRandom();
+ dest_array.setRandom();
+ orig_data = orig_array.data();
+ dest_data = dest_array.data();
+ dest_array = orig_array;
+ VERIFY_IS_EQUAL(orig_array.data(), orig_data);
+ VERIFY_IS_EQUAL(dest_array.data(), dest_data);
+ for (int i = 0; i < 5; ++i) {
+ VERIFY_IS_EQUAL(dest_array(i), orig_array(i));
+ }
+
+ int orig[5] = {1, 2, 3, 4, 5};
+ int dest[5] = {6, 7, 8, 9, 10};
+ TensorMap<Tensor<int, 1> > orig_map(orig, 5);
+ TensorMap<Tensor<int, 1> > dest_map(dest, 5);
+ orig_data = orig_map.data();
+ dest_data = dest_map.data();
+ dest_map = orig_map;
+ VERIFY_IS_EQUAL(orig_map.data(), orig_data);
+ VERIFY_IS_EQUAL(dest_map.data(), dest_data);
+ for (int i = 0; i < 5; ++i) {
+ VERIFY_IS_EQUAL(dest[i], i+1);
+ }
+}
+
+static void test_auto_resize()
+{
+ Tensor<int, 1> tensor1;
+ Tensor<int, 1> tensor2(3);
+ Tensor<int, 1> tensor3(5);
+ Tensor<int, 1> tensor4(7);
+
+ Tensor<int, 1> new_tensor(5);
+ new_tensor.setRandom();
+
+ tensor1 = tensor2 = tensor3 = tensor4 = new_tensor;
+
+ VERIFY_IS_EQUAL(tensor1.dimension(0), new_tensor.dimension(0));
+ VERIFY_IS_EQUAL(tensor2.dimension(0), new_tensor.dimension(0));
+ VERIFY_IS_EQUAL(tensor3.dimension(0), new_tensor.dimension(0));
+ VERIFY_IS_EQUAL(tensor4.dimension(0), new_tensor.dimension(0));
+ for (int i = 0; i < new_tensor.dimension(0); ++i) {
+ VERIFY_IS_EQUAL(tensor1(i), new_tensor(i));
+ VERIFY_IS_EQUAL(tensor2(i), new_tensor(i));
+ VERIFY_IS_EQUAL(tensor3(i), new_tensor(i));
+ VERIFY_IS_EQUAL(tensor4(i), new_tensor(i));
+ }
+}
+
+
+static void test_compound_assign()
+{
+ Tensor<int, 1> start_tensor(10);
+ Tensor<int, 1> offset_tensor(10);
+ start_tensor.setRandom();
+ offset_tensor.setRandom();
+
+ Tensor<int, 1> tensor = start_tensor;
+ tensor += offset_tensor;
+ for (int i = 0; i < 10; ++i) {
+ VERIFY_IS_EQUAL(tensor(i), start_tensor(i) + offset_tensor(i));
+ }
+
+ tensor = start_tensor;
+ tensor -= offset_tensor;
+ for (int i = 0; i < 10; ++i) {
+ VERIFY_IS_EQUAL(tensor(i), start_tensor(i) - offset_tensor(i));
+ }
+
+ tensor = start_tensor;
+ tensor *= offset_tensor;
+ for (int i = 0; i < 10; ++i) {
+ VERIFY_IS_EQUAL(tensor(i), start_tensor(i) * offset_tensor(i));
+ }
+
+ tensor = start_tensor;
+ tensor /= offset_tensor;
+ for (int i = 0; i < 10; ++i) {
+ VERIFY_IS_EQUAL(tensor(i), start_tensor(i) / offset_tensor(i));
+ }
+}
+
+static void test_std_initializers_tensor() {
+#if EIGEN_HAS_VARIADIC_TEMPLATES
+ Tensor<int, 1> a(3);
+ a.setValues({0, 1, 2});
+ VERIFY_IS_EQUAL(a(0), 0);
+ VERIFY_IS_EQUAL(a(1), 1);
+ VERIFY_IS_EQUAL(a(2), 2);
+
+ // It fills the top-left slice.
+ a.setValues({10, 20});
+ VERIFY_IS_EQUAL(a(0), 10);
+ VERIFY_IS_EQUAL(a(1), 20);
+ VERIFY_IS_EQUAL(a(2), 2);
+
+ // Chaining.
+ Tensor<int, 1> a2(3);
+ a2 = a.setValues({100, 200, 300});
+ VERIFY_IS_EQUAL(a(0), 100);
+ VERIFY_IS_EQUAL(a(1), 200);
+ VERIFY_IS_EQUAL(a(2), 300);
+ VERIFY_IS_EQUAL(a2(0), 100);
+ VERIFY_IS_EQUAL(a2(1), 200);
+ VERIFY_IS_EQUAL(a2(2), 300);
+
+ Tensor<int, 2> b(2, 3);
+ b.setValues({{0, 1, 2}, {3, 4, 5}});
+ VERIFY_IS_EQUAL(b(0, 0), 0);
+ VERIFY_IS_EQUAL(b(0, 1), 1);
+ VERIFY_IS_EQUAL(b(0, 2), 2);
+ VERIFY_IS_EQUAL(b(1, 0), 3);
+ VERIFY_IS_EQUAL(b(1, 1), 4);
+ VERIFY_IS_EQUAL(b(1, 2), 5);
+
+ // It fills the top-left slice.
+ b.setValues({{10, 20}, {30}});
+ VERIFY_IS_EQUAL(b(0, 0), 10);
+ VERIFY_IS_EQUAL(b(0, 1), 20);
+ VERIFY_IS_EQUAL(b(0, 2), 2);
+ VERIFY_IS_EQUAL(b(1, 0), 30);
+ VERIFY_IS_EQUAL(b(1, 1), 4);
+ VERIFY_IS_EQUAL(b(1, 2), 5);
+
+ Eigen::Tensor<int, 3> c(3, 2, 4);
+ c.setValues({{{0, 1, 2, 3}, {4, 5, 6, 7}},
+ {{10, 11, 12, 13}, {14, 15, 16, 17}},
+ {{20, 21, 22, 23}, {24, 25, 26, 27}}});
+ VERIFY_IS_EQUAL(c(0, 0, 0), 0);
+ VERIFY_IS_EQUAL(c(0, 0, 1), 1);
+ VERIFY_IS_EQUAL(c(0, 0, 2), 2);
+ VERIFY_IS_EQUAL(c(0, 0, 3), 3);
+ VERIFY_IS_EQUAL(c(0, 1, 0), 4);
+ VERIFY_IS_EQUAL(c(0, 1, 1), 5);
+ VERIFY_IS_EQUAL(c(0, 1, 2), 6);
+ VERIFY_IS_EQUAL(c(0, 1, 3), 7);
+ VERIFY_IS_EQUAL(c(1, 0, 0), 10);
+ VERIFY_IS_EQUAL(c(1, 0, 1), 11);
+ VERIFY_IS_EQUAL(c(1, 0, 2), 12);
+ VERIFY_IS_EQUAL(c(1, 0, 3), 13);
+ VERIFY_IS_EQUAL(c(1, 1, 0), 14);
+ VERIFY_IS_EQUAL(c(1, 1, 1), 15);
+ VERIFY_IS_EQUAL(c(1, 1, 2), 16);
+ VERIFY_IS_EQUAL(c(1, 1, 3), 17);
+ VERIFY_IS_EQUAL(c(2, 0, 0), 20);
+ VERIFY_IS_EQUAL(c(2, 0, 1), 21);
+ VERIFY_IS_EQUAL(c(2, 0, 2), 22);
+ VERIFY_IS_EQUAL(c(2, 0, 3), 23);
+ VERIFY_IS_EQUAL(c(2, 1, 0), 24);
+ VERIFY_IS_EQUAL(c(2, 1, 1), 25);
+ VERIFY_IS_EQUAL(c(2, 1, 2), 26);
+ VERIFY_IS_EQUAL(c(2, 1, 3), 27);
+#endif // EIGEN_HAS_VARIADIC_TEMPLATES
+}
+
+void test_cxx11_tensor_assign()
+{
+ CALL_SUBTEST(test_1d());
+ CALL_SUBTEST(test_2d());
+ CALL_SUBTEST(test_3d());
+ CALL_SUBTEST(test_same_type());
+ CALL_SUBTEST(test_auto_resize());
+ CALL_SUBTEST(test_compound_assign());
+ CALL_SUBTEST(test_std_initializers_tensor());
+}
diff --git a/unsupported/test/cxx11_tensor_broadcast_sycl.cpp b/unsupported/test/cxx11_tensor_broadcast_sycl.cpp
new file mode 100644
index 000000000..7201bfe37
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_broadcast_sycl.cpp
@@ -0,0 +1,74 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_TEST_NO_LONGDOUBLE
+#define EIGEN_TEST_NO_COMPLEX
+#define EIGEN_TEST_FUNC cxx11_tensor_broadcast_sycl
+#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
+#define EIGEN_USE_SYCL
+
+#include "main.h"
+#include <unsupported/Eigen/CXX11/Tensor>
+
+using Eigen::array;
+using Eigen::SyclDevice;
+using Eigen::Tensor;
+using Eigen::TensorMap;
+
+static void test_broadcast_sycl(const Eigen::SyclDevice &sycl_device){
+
+ // BROADCAST test:
+ array<int, 4> in_range = {{2, 3, 5, 7}};
+ array<int, 4> broadcasts = {{2, 3, 1, 4}};
+ array<int, 4> out_range; // = in_range * broadcasts
+ for (size_t i = 0; i < out_range.size(); ++i)
+ out_range[i] = in_range[i] * broadcasts[i];
+
+ Tensor<float, 4> input(in_range);
+ Tensor<float, 4> out(out_range);
+
+ for (size_t i = 0; i < in_range.size(); ++i)
+ VERIFY_IS_EQUAL(out.dimension(i), out_range[i]);
+
+
+ for (int i = 0; i < input.size(); ++i)
+ input(i) = static_cast<float>(i);
+
+ float * gpu_in_data = static_cast<float*>(sycl_device.allocate(input.dimensions().TotalSize()*sizeof(float)));
+ float * gpu_out_data = static_cast<float*>(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(float)));
+
+ TensorMap<Tensor<float, 4>> gpu_in(gpu_in_data, in_range);
+ TensorMap<Tensor<float, 4>> gpu_out(gpu_out_data, out_range);
+ sycl_device.memcpyHostToDevice(gpu_in_data, input.data(),(input.dimensions().TotalSize())*sizeof(float));
+ gpu_out.device(sycl_device) = gpu_in.broadcast(broadcasts);
+ sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
+
+ for (int i = 0; i < 4; ++i) {
+ for (int j = 0; j < 9; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 28; ++l) {
+ VERIFY_IS_APPROX(input(i%2,j%3,k%5,l%7), out(i,j,k,l));
+ }
+ }
+ }
+ }
+ printf("Broadcast Test Passed\n");
+ sycl_device.deallocate(gpu_in_data);
+ sycl_device.deallocate(gpu_out_data);
+}
+
+void test_cxx11_tensor_broadcast_sycl() {
+ cl::sycl::gpu_selector s;
+ Eigen::SyclDevice sycl_device(s);
+ CALL_SUBTEST(test_broadcast_sycl(sycl_device));
+}
diff --git a/unsupported/test/cxx11_tensor_broadcasting.cpp b/unsupported/test/cxx11_tensor_broadcasting.cpp
new file mode 100644
index 000000000..5c0ea5889
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_broadcasting.cpp
@@ -0,0 +1,194 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+
+template <int DataLayout>
+static void test_simple_broadcasting()
+{
+ Tensor<float, 4, DataLayout> tensor(2,3,5,7);
+ tensor.setRandom();
+ array<ptrdiff_t, 4> broadcasts;
+ broadcasts[0] = 1;
+ broadcasts[1] = 1;
+ broadcasts[2] = 1;
+ broadcasts[3] = 1;
+
+ Tensor<float, 4, DataLayout> no_broadcast;
+ no_broadcast = tensor.broadcast(broadcasts);
+
+ VERIFY_IS_EQUAL(no_broadcast.dimension(0), 2);
+ VERIFY_IS_EQUAL(no_broadcast.dimension(1), 3);
+ VERIFY_IS_EQUAL(no_broadcast.dimension(2), 5);
+ VERIFY_IS_EQUAL(no_broadcast.dimension(3), 7);
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ VERIFY_IS_EQUAL(tensor(i,j,k,l), no_broadcast(i,j,k,l));
+ }
+ }
+ }
+ }
+
+ broadcasts[0] = 2;
+ broadcasts[1] = 3;
+ broadcasts[2] = 1;
+ broadcasts[3] = 4;
+ Tensor<float, 4, DataLayout> broadcast;
+ broadcast = tensor.broadcast(broadcasts);
+
+ VERIFY_IS_EQUAL(broadcast.dimension(0), 4);
+ VERIFY_IS_EQUAL(broadcast.dimension(1), 9);
+ VERIFY_IS_EQUAL(broadcast.dimension(2), 5);
+ VERIFY_IS_EQUAL(broadcast.dimension(3), 28);
+
+ for (int i = 0; i < 4; ++i) {
+ for (int j = 0; j < 9; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 28; ++l) {
+ VERIFY_IS_EQUAL(tensor(i%2,j%3,k%5,l%7), broadcast(i,j,k,l));
+ }
+ }
+ }
+ }
+}
+
+
+template <int DataLayout>
+static void test_vectorized_broadcasting()
+{
+ Tensor<float, 3, DataLayout> tensor(8,3,5);
+ tensor.setRandom();
+ array<ptrdiff_t, 3> broadcasts;
+ broadcasts[0] = 2;
+ broadcasts[1] = 3;
+ broadcasts[2] = 4;
+
+ Tensor<float, 3, DataLayout> broadcast;
+ broadcast = tensor.broadcast(broadcasts);
+
+ VERIFY_IS_EQUAL(broadcast.dimension(0), 16);
+ VERIFY_IS_EQUAL(broadcast.dimension(1), 9);
+ VERIFY_IS_EQUAL(broadcast.dimension(2), 20);
+
+ for (int i = 0; i < 16; ++i) {
+ for (int j = 0; j < 9; ++j) {
+ for (int k = 0; k < 20; ++k) {
+ VERIFY_IS_EQUAL(tensor(i%8,j%3,k%5), broadcast(i,j,k));
+ }
+ }
+ }
+
+ tensor.resize(11,3,5);
+ tensor.setRandom();
+ broadcast = tensor.broadcast(broadcasts);
+
+ VERIFY_IS_EQUAL(broadcast.dimension(0), 22);
+ VERIFY_IS_EQUAL(broadcast.dimension(1), 9);
+ VERIFY_IS_EQUAL(broadcast.dimension(2), 20);
+
+ for (int i = 0; i < 22; ++i) {
+ for (int j = 0; j < 9; ++j) {
+ for (int k = 0; k < 20; ++k) {
+ VERIFY_IS_EQUAL(tensor(i%11,j%3,k%5), broadcast(i,j,k));
+ }
+ }
+ }
+}
+
+
+template <int DataLayout>
+static void test_static_broadcasting()
+{
+ Tensor<float, 3, DataLayout> tensor(8,3,5);
+ tensor.setRandom();
+
+#if EIGEN_HAS_CONSTEXPR
+ Eigen::IndexList<Eigen::type2index<2>, Eigen::type2index<3>, Eigen::type2index<4>> broadcasts;
+#else
+ Eigen::array<int, 3> broadcasts;
+ broadcasts[0] = 2;
+ broadcasts[1] = 3;
+ broadcasts[2] = 4;
+#endif
+
+ Tensor<float, 3, DataLayout> broadcast;
+ broadcast = tensor.broadcast(broadcasts);
+
+ VERIFY_IS_EQUAL(broadcast.dimension(0), 16);
+ VERIFY_IS_EQUAL(broadcast.dimension(1), 9);
+ VERIFY_IS_EQUAL(broadcast.dimension(2), 20);
+
+ for (int i = 0; i < 16; ++i) {
+ for (int j = 0; j < 9; ++j) {
+ for (int k = 0; k < 20; ++k) {
+ VERIFY_IS_EQUAL(tensor(i%8,j%3,k%5), broadcast(i,j,k));
+ }
+ }
+ }
+
+ tensor.resize(11,3,5);
+ tensor.setRandom();
+ broadcast = tensor.broadcast(broadcasts);
+
+ VERIFY_IS_EQUAL(broadcast.dimension(0), 22);
+ VERIFY_IS_EQUAL(broadcast.dimension(1), 9);
+ VERIFY_IS_EQUAL(broadcast.dimension(2), 20);
+
+ for (int i = 0; i < 22; ++i) {
+ for (int j = 0; j < 9; ++j) {
+ for (int k = 0; k < 20; ++k) {
+ VERIFY_IS_EQUAL(tensor(i%11,j%3,k%5), broadcast(i,j,k));
+ }
+ }
+ }
+}
+
+
+template <int DataLayout>
+static void test_fixed_size_broadcasting()
+{
+ // Need to add a [] operator to the Size class for this to work
+#if 0
+ Tensor<float, 1, DataLayout> t1(10);
+ t1.setRandom();
+ TensorFixedSize<float, Sizes<1>, DataLayout> t2;
+ t2 = t2.constant(20.0f);
+
+ Tensor<float, 1, DataLayout> t3 = t1 + t2.broadcast(Eigen::array<int, 1>{{10}});
+ for (int i = 0; i < 10; ++i) {
+ VERIFY_IS_APPROX(t3(i), t1(i) + t2(0));
+ }
+
+ TensorMap<TensorFixedSize<float, Sizes<1>, DataLayout> > t4(t2.data(), {{1}});
+ Tensor<float, 1, DataLayout> t5 = t1 + t4.broadcast(Eigen::array<int, 1>{{10}});
+ for (int i = 0; i < 10; ++i) {
+ VERIFY_IS_APPROX(t5(i), t1(i) + t2(0));
+ }
+#endif
+}
+
+
+void test_cxx11_tensor_broadcasting()
+{
+ CALL_SUBTEST(test_simple_broadcasting<ColMajor>());
+ CALL_SUBTEST(test_simple_broadcasting<RowMajor>());
+ CALL_SUBTEST(test_vectorized_broadcasting<ColMajor>());
+ CALL_SUBTEST(test_vectorized_broadcasting<RowMajor>());
+ CALL_SUBTEST(test_static_broadcasting<ColMajor>());
+ CALL_SUBTEST(test_static_broadcasting<RowMajor>());
+ CALL_SUBTEST(test_fixed_size_broadcasting<ColMajor>());
+ CALL_SUBTEST(test_fixed_size_broadcasting<RowMajor>());
+}
diff --git a/unsupported/test/cxx11_tensor_cast_float16_cuda.cu b/unsupported/test/cxx11_tensor_cast_float16_cuda.cu
new file mode 100644
index 000000000..88c233994
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_cast_float16_cuda.cu
@@ -0,0 +1,82 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_TEST_NO_LONGDOUBLE
+#define EIGEN_TEST_NO_COMPLEX
+#define EIGEN_TEST_FUNC cxx11_tensor_cast_float16_cuda
+#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
+#define EIGEN_USE_GPU
+
+#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500
+#include <cuda_fp16.h>
+#endif
+#include "main.h"
+#include <unsupported/Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+
+void test_cuda_conversion() {
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+ int num_elem = 101;
+
+ Tensor<float, 1> floats(num_elem);
+ floats.setRandom();
+
+ float* d_float = (float*)gpu_device.allocate(num_elem * sizeof(float));
+ Eigen::half* d_half = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half));
+ float* d_conv = (float*)gpu_device.allocate(num_elem * sizeof(float));
+
+ Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float(
+ d_float, num_elem);
+ Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_half(
+ d_half, num_elem);
+ Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_conv(
+ d_conv, num_elem);
+
+ gpu_device.memcpyHostToDevice(d_float, floats.data(), num_elem*sizeof(float));
+
+ gpu_half.device(gpu_device) = gpu_float.cast<Eigen::half>();
+ gpu_conv.device(gpu_device) = gpu_half.cast<float>();
+
+ Tensor<float, 1> initial(num_elem);
+ Tensor<float, 1> final(num_elem);
+ gpu_device.memcpyDeviceToHost(initial.data(), d_float, num_elem*sizeof(float));
+ gpu_device.memcpyDeviceToHost(final.data(), d_conv, num_elem*sizeof(float));
+ gpu_device.synchronize();
+
+ for (int i = 0; i < num_elem; ++i) {
+ VERIFY_IS_APPROX(initial(i), final(i));
+ }
+
+ gpu_device.deallocate(d_float);
+ gpu_device.deallocate(d_half);
+ gpu_device.deallocate(d_conv);
+}
+
+
+void test_fallback_conversion() {
+ int num_elem = 101;
+ Tensor<float, 1> floats(num_elem);
+ floats.setRandom();
+
+ Eigen::Tensor<Eigen::half, 1> halfs = floats.cast<Eigen::half>();
+ Eigen::Tensor<float, 1> conv = halfs.cast<float>();
+
+ for (int i = 0; i < num_elem; ++i) {
+ VERIFY_IS_APPROX(floats(i), conv(i));
+ }
+}
+
+
+void test_cxx11_tensor_cast_float16_cuda()
+{
+ CALL_SUBTEST(test_cuda_conversion());
+ CALL_SUBTEST(test_fallback_conversion());
+}
diff --git a/unsupported/test/cxx11_tensor_casts.cpp b/unsupported/test/cxx11_tensor_casts.cpp
new file mode 100644
index 000000000..3c6d0d2ff
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_casts.cpp
@@ -0,0 +1,115 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+using Eigen::array;
+
+static void test_simple_cast()
+{
+ Tensor<float, 2> ftensor(20,30);
+ ftensor = ftensor.random() * 100.f;
+ Tensor<char, 2> chartensor(20,30);
+ chartensor.setRandom();
+ Tensor<std::complex<float>, 2> cplextensor(20,30);
+ cplextensor.setRandom();
+
+ chartensor = ftensor.cast<char>();
+ cplextensor = ftensor.cast<std::complex<float> >();
+
+ for (int i = 0; i < 20; ++i) {
+ for (int j = 0; j < 30; ++j) {
+ VERIFY_IS_EQUAL(chartensor(i,j), static_cast<char>(ftensor(i,j)));
+ VERIFY_IS_EQUAL(cplextensor(i,j), static_cast<std::complex<float> >(ftensor(i,j)));
+ }
+ }
+}
+
+
+static void test_vectorized_cast()
+{
+ Tensor<int, 2> itensor(20,30);
+ itensor = itensor.random() / 1000;
+ Tensor<float, 2> ftensor(20,30);
+ ftensor.setRandom();
+ Tensor<double, 2> dtensor(20,30);
+ dtensor.setRandom();
+
+ ftensor = itensor.cast<float>();
+ dtensor = itensor.cast<double>();
+
+ for (int i = 0; i < 20; ++i) {
+ for (int j = 0; j < 30; ++j) {
+ VERIFY_IS_EQUAL(itensor(i,j), static_cast<int>(ftensor(i,j)));
+ VERIFY_IS_EQUAL(dtensor(i,j), static_cast<double>(ftensor(i,j)));
+ }
+ }
+}
+
+
+static void test_float_to_int_cast()
+{
+ Tensor<float, 2> ftensor(20,30);
+ ftensor = ftensor.random() * 1000.0f;
+ Tensor<double, 2> dtensor(20,30);
+ dtensor = dtensor.random() * 1000.0;
+
+ Tensor<int, 2> i1tensor = ftensor.cast<int>();
+ Tensor<int, 2> i2tensor = dtensor.cast<int>();
+
+ for (int i = 0; i < 20; ++i) {
+ for (int j = 0; j < 30; ++j) {
+ VERIFY_IS_EQUAL(i1tensor(i,j), static_cast<int>(ftensor(i,j)));
+ VERIFY_IS_EQUAL(i2tensor(i,j), static_cast<int>(dtensor(i,j)));
+ }
+ }
+}
+
+
+static void test_big_to_small_type_cast()
+{
+ Tensor<double, 2> dtensor(20, 30);
+ dtensor.setRandom();
+ Tensor<float, 2> ftensor(20, 30);
+ ftensor = dtensor.cast<float>();
+
+ for (int i = 0; i < 20; ++i) {
+ for (int j = 0; j < 30; ++j) {
+ VERIFY_IS_APPROX(dtensor(i,j), static_cast<double>(ftensor(i,j)));
+ }
+ }
+}
+
+
+static void test_small_to_big_type_cast()
+{
+ Tensor<float, 2> ftensor(20, 30);
+ ftensor.setRandom();
+ Tensor<double, 2> dtensor(20, 30);
+ dtensor = ftensor.cast<double>();
+
+ for (int i = 0; i < 20; ++i) {
+ for (int j = 0; j < 30; ++j) {
+ VERIFY_IS_APPROX(dtensor(i,j), static_cast<double>(ftensor(i,j)));
+ }
+ }
+}
+
+
+void test_cxx11_tensor_casts()
+{
+ CALL_SUBTEST(test_simple_cast());
+ CALL_SUBTEST(test_vectorized_cast());
+ CALL_SUBTEST(test_float_to_int_cast());
+ CALL_SUBTEST(test_big_to_small_type_cast());
+ CALL_SUBTEST(test_small_to_big_type_cast());
+}
diff --git a/unsupported/test/cxx11_tensor_chipping.cpp b/unsupported/test/cxx11_tensor_chipping.cpp
new file mode 100644
index 000000000..1832dec8b
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_chipping.cpp
@@ -0,0 +1,425 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+
+template<int DataLayout>
+static void test_simple_chip()
+{
+ Tensor<float, 5, DataLayout> tensor(2,3,5,7,11);
+ tensor.setRandom();
+
+ Tensor<float, 4, DataLayout> chip1;
+ chip1 = tensor.template chip<0>(1);
+
+ VERIFY_IS_EQUAL(chip1.dimension(0), 3);
+ VERIFY_IS_EQUAL(chip1.dimension(1), 5);
+ VERIFY_IS_EQUAL(chip1.dimension(2), 7);
+ VERIFY_IS_EQUAL(chip1.dimension(3), 11);
+
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 5; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ for (int l = 0; l < 11; ++l) {
+ VERIFY_IS_EQUAL(chip1(i,j,k,l), tensor(1,i,j,k,l));
+ }
+ }
+ }
+ }
+
+ Tensor<float, 4, DataLayout> chip2 = tensor.template chip<1>(1);
+ VERIFY_IS_EQUAL(chip2.dimension(0), 2);
+ VERIFY_IS_EQUAL(chip2.dimension(1), 5);
+ VERIFY_IS_EQUAL(chip2.dimension(2), 7);
+ VERIFY_IS_EQUAL(chip2.dimension(3), 11);
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ for (int l = 0; l < 11; ++l) {
+ VERIFY_IS_EQUAL(chip2(i,j,k,l), tensor(i,1,j,k,l));
+ }
+ }
+ }
+ }
+
+ Tensor<float, 4, DataLayout> chip3 = tensor.template chip<2>(2);
+ VERIFY_IS_EQUAL(chip3.dimension(0), 2);
+ VERIFY_IS_EQUAL(chip3.dimension(1), 3);
+ VERIFY_IS_EQUAL(chip3.dimension(2), 7);
+ VERIFY_IS_EQUAL(chip3.dimension(3), 11);
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ for (int l = 0; l < 11; ++l) {
+ VERIFY_IS_EQUAL(chip3(i,j,k,l), tensor(i,j,2,k,l));
+ }
+ }
+ }
+ }
+
+ Tensor<float, 4, DataLayout> chip4(tensor.template chip<3>(5));
+ VERIFY_IS_EQUAL(chip4.dimension(0), 2);
+ VERIFY_IS_EQUAL(chip4.dimension(1), 3);
+ VERIFY_IS_EQUAL(chip4.dimension(2), 5);
+ VERIFY_IS_EQUAL(chip4.dimension(3), 11);
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ VERIFY_IS_EQUAL(chip4(i,j,k,l), tensor(i,j,k,5,l));
+ }
+ }
+ }
+ }
+
+ Tensor<float, 4, DataLayout> chip5(tensor.template chip<4>(7));
+ VERIFY_IS_EQUAL(chip5.dimension(0), 2);
+ VERIFY_IS_EQUAL(chip5.dimension(1), 3);
+ VERIFY_IS_EQUAL(chip5.dimension(2), 5);
+ VERIFY_IS_EQUAL(chip5.dimension(3), 7);
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ VERIFY_IS_EQUAL(chip5(i,j,k,l), tensor(i,j,k,l,7));
+ }
+ }
+ }
+ }
+}
+
+template<int DataLayout>
+static void test_dynamic_chip()
+{
+ Tensor<float, 5, DataLayout> tensor(2,3,5,7,11);
+ tensor.setRandom();
+
+ Tensor<float, 4, DataLayout> chip1;
+ chip1 = tensor.chip(1, 0);
+ VERIFY_IS_EQUAL(chip1.dimension(0), 3);
+ VERIFY_IS_EQUAL(chip1.dimension(1), 5);
+ VERIFY_IS_EQUAL(chip1.dimension(2), 7);
+ VERIFY_IS_EQUAL(chip1.dimension(3), 11);
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 5; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ for (int l = 0; l < 11; ++l) {
+ VERIFY_IS_EQUAL(chip1(i,j,k,l), tensor(1,i,j,k,l));
+ }
+ }
+ }
+ }
+
+ Tensor<float, 4, DataLayout> chip2 = tensor.chip(1, 1);
+ VERIFY_IS_EQUAL(chip2.dimension(0), 2);
+ VERIFY_IS_EQUAL(chip2.dimension(1), 5);
+ VERIFY_IS_EQUAL(chip2.dimension(2), 7);
+ VERIFY_IS_EQUAL(chip2.dimension(3), 11);
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ for (int l = 0; l < 11; ++l) {
+ VERIFY_IS_EQUAL(chip2(i,j,k,l), tensor(i,1,j,k,l));
+ }
+ }
+ }
+ }
+
+ Tensor<float, 4, DataLayout> chip3 = tensor.chip(2, 2);
+ VERIFY_IS_EQUAL(chip3.dimension(0), 2);
+ VERIFY_IS_EQUAL(chip3.dimension(1), 3);
+ VERIFY_IS_EQUAL(chip3.dimension(2), 7);
+ VERIFY_IS_EQUAL(chip3.dimension(3), 11);
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ for (int l = 0; l < 11; ++l) {
+ VERIFY_IS_EQUAL(chip3(i,j,k,l), tensor(i,j,2,k,l));
+ }
+ }
+ }
+ }
+
+ Tensor<float, 4, DataLayout> chip4(tensor.chip(5, 3));
+ VERIFY_IS_EQUAL(chip4.dimension(0), 2);
+ VERIFY_IS_EQUAL(chip4.dimension(1), 3);
+ VERIFY_IS_EQUAL(chip4.dimension(2), 5);
+ VERIFY_IS_EQUAL(chip4.dimension(3), 11);
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ VERIFY_IS_EQUAL(chip4(i,j,k,l), tensor(i,j,k,5,l));
+ }
+ }
+ }
+ }
+
+ Tensor<float, 4, DataLayout> chip5(tensor.chip(7, 4));
+ VERIFY_IS_EQUAL(chip5.dimension(0), 2);
+ VERIFY_IS_EQUAL(chip5.dimension(1), 3);
+ VERIFY_IS_EQUAL(chip5.dimension(2), 5);
+ VERIFY_IS_EQUAL(chip5.dimension(3), 7);
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ VERIFY_IS_EQUAL(chip5(i,j,k,l), tensor(i,j,k,l,7));
+ }
+ }
+ }
+ }
+}
+
+template<int DataLayout>
+static void test_chip_in_expr() {
+ Tensor<float, 5, DataLayout> input1(2,3,5,7,11);
+ input1.setRandom();
+ Tensor<float, 4, DataLayout> input2(3,5,7,11);
+ input2.setRandom();
+
+ Tensor<float, 4, DataLayout> result = input1.template chip<0>(0) + input2;
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 5; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ for (int l = 0; l < 11; ++l) {
+ float expected = input1(0,i,j,k,l) + input2(i,j,k,l);
+ VERIFY_IS_EQUAL(result(i,j,k,l), expected);
+ }
+ }
+ }
+ }
+
+ Tensor<float, 3, DataLayout> input3(3,7,11);
+ input3.setRandom();
+ Tensor<float, 3, DataLayout> result2 = input1.template chip<0>(0).template chip<1>(2) + input3;
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 7; ++j) {
+ for (int k = 0; k < 11; ++k) {
+ float expected = input1(0,i,2,j,k) + input3(i,j,k);
+ VERIFY_IS_EQUAL(result2(i,j,k), expected);
+ }
+ }
+ }
+}
+
+template<int DataLayout>
+static void test_chip_as_lvalue()
+{
+ Tensor<float, 5, DataLayout> input1(2,3,5,7,11);
+ input1.setRandom();
+
+ Tensor<float, 4, DataLayout> input2(3,5,7,11);
+ input2.setRandom();
+ Tensor<float, 5, DataLayout> tensor = input1;
+ tensor.template chip<0>(1) = input2;
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ for (int m = 0; m < 11; ++m) {
+ if (i != 1) {
+ VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input1(i,j,k,l,m));
+ } else {
+ VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input2(j,k,l,m));
+ }
+ }
+ }
+ }
+ }
+ }
+
+ Tensor<float, 4, DataLayout> input3(2,5,7,11);
+ input3.setRandom();
+ tensor = input1;
+ tensor.template chip<1>(1) = input3;
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ for (int m = 0; m < 11; ++m) {
+ if (j != 1) {
+ VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input1(i,j,k,l,m));
+ } else {
+ VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input3(i,k,l,m));
+ }
+ }
+ }
+ }
+ }
+ }
+
+ Tensor<float, 4, DataLayout> input4(2,3,7,11);
+ input4.setRandom();
+ tensor = input1;
+ tensor.template chip<2>(3) = input4;
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ for (int m = 0; m < 11; ++m) {
+ if (k != 3) {
+ VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input1(i,j,k,l,m));
+ } else {
+ VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input4(i,j,l,m));
+ }
+ }
+ }
+ }
+ }
+ }
+
+ Tensor<float, 4, DataLayout> input5(2,3,5,11);
+ input5.setRandom();
+ tensor = input1;
+ tensor.template chip<3>(4) = input5;
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ for (int m = 0; m < 11; ++m) {
+ if (l != 4) {
+ VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input1(i,j,k,l,m));
+ } else {
+ VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input5(i,j,k,m));
+ }
+ }
+ }
+ }
+ }
+ }
+
+ Tensor<float, 4, DataLayout> input6(2,3,5,7);
+ input6.setRandom();
+ tensor = input1;
+ tensor.template chip<4>(5) = input6;
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ for (int m = 0; m < 11; ++m) {
+ if (m != 5) {
+ VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input1(i,j,k,l,m));
+ } else {
+ VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input6(i,j,k,l));
+ }
+ }
+ }
+ }
+ }
+ }
+
+ Tensor<float, 5, DataLayout> input7(2,3,5,7,11);
+ input7.setRandom();
+ tensor = input1;
+ tensor.chip(0, 0) = input7.chip(0, 0);
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ for (int m = 0; m < 11; ++m) {
+ if (i != 0) {
+ VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input1(i,j,k,l,m));
+ } else {
+ VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input7(i,j,k,l,m));
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+static void test_chip_raw_data_col_major()
+{
+ Tensor<float, 5, ColMajor> tensor(2,3,5,7,11);
+ tensor.setRandom();
+
+ typedef TensorEvaluator<decltype(tensor.chip<4>(3)), DefaultDevice> Evaluator4;
+ auto chip = Evaluator4(tensor.chip<4>(3), DefaultDevice());
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ int chip_index = i + 2 * (j + 3 * (k + 5 * l));
+ VERIFY_IS_EQUAL(chip.data()[chip_index], tensor(i,j,k,l,3));
+ }
+ }
+ }
+ }
+
+ typedef TensorEvaluator<decltype(tensor.chip<0>(0)), DefaultDevice> Evaluator0;
+ auto chip0 = Evaluator0(tensor.chip<0>(0), DefaultDevice());
+ VERIFY_IS_EQUAL(chip0.data(), static_cast<float*>(0));
+
+ typedef TensorEvaluator<decltype(tensor.chip<1>(0)), DefaultDevice> Evaluator1;
+ auto chip1 = Evaluator1(tensor.chip<1>(0), DefaultDevice());
+ VERIFY_IS_EQUAL(chip1.data(), static_cast<float*>(0));
+
+ typedef TensorEvaluator<decltype(tensor.chip<2>(0)), DefaultDevice> Evaluator2;
+ auto chip2 = Evaluator2(tensor.chip<2>(0), DefaultDevice());
+ VERIFY_IS_EQUAL(chip2.data(), static_cast<float*>(0));
+
+ typedef TensorEvaluator<decltype(tensor.chip<3>(0)), DefaultDevice> Evaluator3;
+ auto chip3 = Evaluator3(tensor.chip<3>(0), DefaultDevice());
+ VERIFY_IS_EQUAL(chip3.data(), static_cast<float*>(0));
+}
+
+static void test_chip_raw_data_row_major()
+{
+ Tensor<float, 5, RowMajor> tensor(11,7,5,3,2);
+ tensor.setRandom();
+
+ typedef TensorEvaluator<decltype(tensor.chip<0>(3)), DefaultDevice> Evaluator0;
+ auto chip = Evaluator0(tensor.chip<0>(3), DefaultDevice());
+ for (int i = 0; i < 7; ++i) {
+ for (int j = 0; j < 5; ++j) {
+ for (int k = 0; k < 3; ++k) {
+ for (int l = 0; l < 2; ++l) {
+ int chip_index = l + 2 * (k + 3 * (j + 5 * i));
+ VERIFY_IS_EQUAL(chip.data()[chip_index], tensor(3,i,j,k,l));
+ }
+ }
+ }
+ }
+
+ typedef TensorEvaluator<decltype(tensor.chip<1>(0)), DefaultDevice> Evaluator1;
+ auto chip1 = Evaluator1(tensor.chip<1>(0), DefaultDevice());
+ VERIFY_IS_EQUAL(chip1.data(), static_cast<float*>(0));
+
+ typedef TensorEvaluator<decltype(tensor.chip<2>(0)), DefaultDevice> Evaluator2;
+ auto chip2 = Evaluator2(tensor.chip<2>(0), DefaultDevice());
+ VERIFY_IS_EQUAL(chip2.data(), static_cast<float*>(0));
+
+ typedef TensorEvaluator<decltype(tensor.chip<3>(0)), DefaultDevice> Evaluator3;
+ auto chip3 = Evaluator3(tensor.chip<3>(0), DefaultDevice());
+ VERIFY_IS_EQUAL(chip3.data(), static_cast<float*>(0));
+
+ typedef TensorEvaluator<decltype(tensor.chip<4>(0)), DefaultDevice> Evaluator4;
+ auto chip4 = Evaluator4(tensor.chip<4>(0), DefaultDevice());
+ VERIFY_IS_EQUAL(chip4.data(), static_cast<float*>(0));
+}
+
+void test_cxx11_tensor_chipping()
+{
+ CALL_SUBTEST(test_simple_chip<ColMajor>());
+ CALL_SUBTEST(test_simple_chip<RowMajor>());
+ CALL_SUBTEST(test_dynamic_chip<ColMajor>());
+ CALL_SUBTEST(test_dynamic_chip<RowMajor>());
+ CALL_SUBTEST(test_chip_in_expr<ColMajor>());
+ CALL_SUBTEST(test_chip_in_expr<RowMajor>());
+ CALL_SUBTEST(test_chip_as_lvalue<ColMajor>());
+ CALL_SUBTEST(test_chip_as_lvalue<RowMajor>());
+ CALL_SUBTEST(test_chip_raw_data_col_major());
+ CALL_SUBTEST(test_chip_raw_data_row_major());
+}
diff --git a/unsupported/test/cxx11_tensor_comparisons.cpp b/unsupported/test/cxx11_tensor_comparisons.cpp
new file mode 100644
index 000000000..b1ff8aecb
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_comparisons.cpp
@@ -0,0 +1,84 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+using Eigen::RowMajor;
+
+static void test_orderings()
+{
+ Tensor<float, 3> mat1(2,3,7);
+ Tensor<float, 3> mat2(2,3,7);
+ Tensor<bool, 3> lt(2,3,7);
+ Tensor<bool, 3> le(2,3,7);
+ Tensor<bool, 3> gt(2,3,7);
+ Tensor<bool, 3> ge(2,3,7);
+
+ mat1.setRandom();
+ mat2.setRandom();
+
+ lt = mat1 < mat2;
+ le = mat1 <= mat2;
+ gt = mat1 > mat2;
+ ge = mat1 >= mat2;
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ VERIFY_IS_EQUAL(lt(i,j,k), mat1(i,j,k) < mat2(i,j,k));
+ VERIFY_IS_EQUAL(le(i,j,k), mat1(i,j,k) <= mat2(i,j,k));
+ VERIFY_IS_EQUAL(gt(i,j,k), mat1(i,j,k) > mat2(i,j,k));
+ VERIFY_IS_EQUAL(ge(i,j,k), mat1(i,j,k) >= mat2(i,j,k));
+ }
+ }
+ }
+}
+
+
+static void test_equality()
+{
+ Tensor<float, 3> mat1(2,3,7);
+ Tensor<float, 3> mat2(2,3,7);
+
+ mat1.setRandom();
+ mat2.setRandom();
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ if (internal::random<bool>()) {
+ mat2(i,j,k) = mat1(i,j,k);
+ }
+ }
+ }
+ }
+
+ Tensor<bool, 3> eq(2,3,7);
+ Tensor<bool, 3> ne(2,3,7);
+ eq = (mat1 == mat2);
+ ne = (mat1 != mat2);
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ VERIFY_IS_EQUAL(eq(i,j,k), mat1(i,j,k) == mat2(i,j,k));
+ VERIFY_IS_EQUAL(ne(i,j,k), mat1(i,j,k) != mat2(i,j,k));
+ }
+ }
+ }
+}
+
+
+void test_cxx11_tensor_comparisons()
+{
+ CALL_SUBTEST(test_orderings());
+ CALL_SUBTEST(test_equality());
+}
diff --git a/unsupported/test/cxx11_tensor_complex_cuda.cu b/unsupported/test/cxx11_tensor_complex_cuda.cu
new file mode 100644
index 000000000..d4e111f5d
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_complex_cuda.cu
@@ -0,0 +1,153 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_TEST_NO_LONGDOUBLE
+#define EIGEN_TEST_FUNC cxx11_tensor_complex
+#define EIGEN_USE_GPU
+
+#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500
+#include <cuda_fp16.h>
+#endif
+#include "main.h"
+#include <unsupported/Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+
+void test_cuda_nullary() {
+ Tensor<std::complex<float>, 1, 0, int> in1(2);
+ Tensor<std::complex<float>, 1, 0, int> in2(2);
+ in1.setRandom();
+ in2.setRandom();
+
+ std::size_t float_bytes = in1.size() * sizeof(float);
+ std::size_t complex_bytes = in1.size() * sizeof(std::complex<float>);
+
+ std::complex<float>* d_in1;
+ std::complex<float>* d_in2;
+ float* d_out2;
+ cudaMalloc((void**)(&d_in1), complex_bytes);
+ cudaMalloc((void**)(&d_in2), complex_bytes);
+ cudaMalloc((void**)(&d_out2), float_bytes);
+ cudaMemcpy(d_in1, in1.data(), complex_bytes, cudaMemcpyHostToDevice);
+ cudaMemcpy(d_in2, in2.data(), complex_bytes, cudaMemcpyHostToDevice);
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ Eigen::TensorMap<Eigen::Tensor<std::complex<float>, 1, 0, int>, Eigen::Aligned> gpu_in1(
+ d_in1, 2);
+ Eigen::TensorMap<Eigen::Tensor<std::complex<float>, 1, 0, int>, Eigen::Aligned> gpu_in2(
+ d_in2, 2);
+ Eigen::TensorMap<Eigen::Tensor<float, 1, 0, int>, Eigen::Aligned> gpu_out2(
+ d_out2, 2);
+
+ gpu_in1.device(gpu_device) = gpu_in1.constant(std::complex<float>(3.14f, 2.7f));
+ gpu_out2.device(gpu_device) = gpu_in2.abs();
+
+ Tensor<std::complex<float>, 1, 0, int> new1(2);
+ Tensor<float, 1, 0, int> new2(2);
+
+ assert(cudaMemcpyAsync(new1.data(), d_in1, complex_bytes, cudaMemcpyDeviceToHost,
+ gpu_device.stream()) == cudaSuccess);
+ assert(cudaMemcpyAsync(new2.data(), d_out2, float_bytes, cudaMemcpyDeviceToHost,
+ gpu_device.stream()) == cudaSuccess);
+
+ assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+
+ for (int i = 0; i < 2; ++i) {
+ VERIFY_IS_APPROX(new1(i), std::complex<float>(3.14f, 2.7f));
+ VERIFY_IS_APPROX(new2(i), std::abs(in2(i)));
+ }
+
+ cudaFree(d_in1);
+ cudaFree(d_in2);
+ cudaFree(d_out2);
+}
+
+
+static void test_cuda_sum_reductions() {
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ const int num_rows = internal::random<int>(1024, 5*1024);
+ const int num_cols = internal::random<int>(1024, 5*1024);
+
+ Tensor<std::complex<float>, 2> in(num_rows, num_cols);
+ in.setRandom();
+
+ Tensor<std::complex<float>, 0> full_redux;
+ full_redux = in.sum();
+
+ std::size_t in_bytes = in.size() * sizeof(std::complex<float>);
+ std::size_t out_bytes = full_redux.size() * sizeof(std::complex<float>);
+ std::complex<float>* gpu_in_ptr = static_cast<std::complex<float>*>(gpu_device.allocate(in_bytes));
+ std::complex<float>* gpu_out_ptr = static_cast<std::complex<float>*>(gpu_device.allocate(out_bytes));
+ gpu_device.memcpyHostToDevice(gpu_in_ptr, in.data(), in_bytes);
+
+ TensorMap<Tensor<std::complex<float>, 2> > in_gpu(gpu_in_ptr, num_rows, num_cols);
+ TensorMap<Tensor<std::complex<float>, 0> > out_gpu(gpu_out_ptr);
+
+ out_gpu.device(gpu_device) = in_gpu.sum();
+
+ Tensor<std::complex<float>, 0> full_redux_gpu;
+ gpu_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_ptr, out_bytes);
+ gpu_device.synchronize();
+
+ // Check that the CPU and GPU reductions return the same result.
+ VERIFY_IS_APPROX(full_redux(), full_redux_gpu());
+
+ gpu_device.deallocate(gpu_in_ptr);
+ gpu_device.deallocate(gpu_out_ptr);
+}
+
+
+static void test_cuda_product_reductions() {
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ const int num_rows = internal::random<int>(1024, 5*1024);
+ const int num_cols = internal::random<int>(1024, 5*1024);
+
+ Tensor<std::complex<float>, 2> in(num_rows, num_cols);
+ in.setRandom();
+
+ Tensor<std::complex<float>, 0> full_redux;
+ full_redux = in.prod();
+
+ std::size_t in_bytes = in.size() * sizeof(std::complex<float>);
+ std::size_t out_bytes = full_redux.size() * sizeof(std::complex<float>);
+ std::complex<float>* gpu_in_ptr = static_cast<std::complex<float>*>(gpu_device.allocate(in_bytes));
+ std::complex<float>* gpu_out_ptr = static_cast<std::complex<float>*>(gpu_device.allocate(out_bytes));
+ gpu_device.memcpyHostToDevice(gpu_in_ptr, in.data(), in_bytes);
+
+ TensorMap<Tensor<std::complex<float>, 2> > in_gpu(gpu_in_ptr, num_rows, num_cols);
+ TensorMap<Tensor<std::complex<float>, 0> > out_gpu(gpu_out_ptr);
+
+ out_gpu.device(gpu_device) = in_gpu.prod();
+
+ Tensor<std::complex<float>, 0> full_redux_gpu;
+ gpu_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_ptr, out_bytes);
+ gpu_device.synchronize();
+
+ // Check that the CPU and GPU reductions return the same result.
+ VERIFY_IS_APPROX(full_redux(), full_redux_gpu());
+
+ gpu_device.deallocate(gpu_in_ptr);
+ gpu_device.deallocate(gpu_out_ptr);
+}
+
+
+void test_cxx11_tensor_complex()
+{
+ CALL_SUBTEST(test_cuda_nullary());
+ CALL_SUBTEST(test_cuda_sum_reductions());
+ CALL_SUBTEST(test_cuda_product_reductions());
+}
diff --git a/unsupported/test/cxx11_tensor_complex_cwise_ops_cuda.cu b/unsupported/test/cxx11_tensor_complex_cwise_ops_cuda.cu
new file mode 100644
index 000000000..2baf5eaad
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_complex_cwise_ops_cuda.cu
@@ -0,0 +1,97 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_TEST_NO_LONGDOUBLE
+#define EIGEN_TEST_FUNC cxx11_tensor_complex_cwise_ops
+#define EIGEN_USE_GPU
+
+#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500
+#include <cuda_fp16.h>
+#endif
+#include "main.h"
+#include <unsupported/Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+
+template<typename T>
+void test_cuda_complex_cwise_ops() {
+ const int kNumItems = 2;
+ std::size_t complex_bytes = kNumItems * sizeof(std::complex<T>);
+
+ std::complex<T>* d_in1;
+ std::complex<T>* d_in2;
+ std::complex<T>* d_out;
+ cudaMalloc((void**)(&d_in1), complex_bytes);
+ cudaMalloc((void**)(&d_in2), complex_bytes);
+ cudaMalloc((void**)(&d_out), complex_bytes);
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ Eigen::TensorMap<Eigen::Tensor<std::complex<T>, 1, 0, int>, Eigen::Aligned> gpu_in1(
+ d_in1, kNumItems);
+ Eigen::TensorMap<Eigen::Tensor<std::complex<T>, 1, 0, int>, Eigen::Aligned> gpu_in2(
+ d_in2, kNumItems);
+ Eigen::TensorMap<Eigen::Tensor<std::complex<T>, 1, 0, int>, Eigen::Aligned> gpu_out(
+ d_out, kNumItems);
+
+ const std::complex<T> a(3.14f, 2.7f);
+ const std::complex<T> b(-10.6f, 1.4f);
+
+ gpu_in1.device(gpu_device) = gpu_in1.constant(a);
+ gpu_in2.device(gpu_device) = gpu_in2.constant(b);
+
+ enum CwiseOp {
+ Add = 0,
+ Sub,
+ Mul,
+ Div
+ };
+
+ Tensor<std::complex<T>, 1, 0, int> actual(kNumItems);
+ for (int op = Add; op <= Div; op++) {
+ std::complex<T> expected;
+ switch (static_cast<CwiseOp>(op)) {
+ case Add:
+ gpu_out.device(gpu_device) = gpu_in1 + gpu_in2;
+ expected = a + b;
+ break;
+ case Sub:
+ gpu_out.device(gpu_device) = gpu_in1 - gpu_in2;
+ expected = a - b;
+ break;
+ case Mul:
+ gpu_out.device(gpu_device) = gpu_in1 * gpu_in2;
+ expected = a * b;
+ break;
+ case Div:
+ gpu_out.device(gpu_device) = gpu_in1 / gpu_in2;
+ expected = a / b;
+ break;
+ }
+ assert(cudaMemcpyAsync(actual.data(), d_out, complex_bytes, cudaMemcpyDeviceToHost,
+ gpu_device.stream()) == cudaSuccess);
+ assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+
+ for (int i = 0; i < kNumItems; ++i) {
+ VERIFY_IS_APPROX(actual(i), expected);
+ }
+ }
+
+ cudaFree(d_in1);
+ cudaFree(d_in2);
+ cudaFree(d_out);
+}
+
+
+void test_cxx11_tensor_complex_cwise_ops()
+{
+ CALL_SUBTEST(test_cuda_complex_cwise_ops<float>());
+ CALL_SUBTEST(test_cuda_complex_cwise_ops<double>());
+}
diff --git a/unsupported/test/cxx11_tensor_concatenation.cpp b/unsupported/test/cxx11_tensor_concatenation.cpp
new file mode 100644
index 000000000..03ef12e63
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_concatenation.cpp
@@ -0,0 +1,137 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+
+template<int DataLayout>
+static void test_dimension_failures()
+{
+ Tensor<int, 3, DataLayout> left(2, 3, 1);
+ Tensor<int, 3, DataLayout> right(3, 3, 1);
+ left.setRandom();
+ right.setRandom();
+
+ // Okay; other dimensions are equal.
+ Tensor<int, 3, DataLayout> concatenation = left.concatenate(right, 0);
+
+ // Dimension mismatches.
+ VERIFY_RAISES_ASSERT(concatenation = left.concatenate(right, 1));
+ VERIFY_RAISES_ASSERT(concatenation = left.concatenate(right, 2));
+
+ // Axis > NumDims or < 0.
+ VERIFY_RAISES_ASSERT(concatenation = left.concatenate(right, 3));
+ VERIFY_RAISES_ASSERT(concatenation = left.concatenate(right, -1));
+}
+
+template<int DataLayout>
+static void test_static_dimension_failure()
+{
+ Tensor<int, 2, DataLayout> left(2, 3);
+ Tensor<int, 3, DataLayout> right(2, 3, 1);
+
+#ifdef CXX11_TENSOR_CONCATENATION_STATIC_DIMENSION_FAILURE
+ // Technically compatible, but we static assert that the inputs have same
+ // NumDims.
+ Tensor<int, 3, DataLayout> concatenation = left.concatenate(right, 0);
+#endif
+
+ // This can be worked around in this case.
+ Tensor<int, 3, DataLayout> concatenation = left
+ .reshape(Tensor<int, 3>::Dimensions(2, 3, 1))
+ .concatenate(right, 0);
+ Tensor<int, 2, DataLayout> alternative = left
+ .concatenate(right.reshape(Tensor<int, 2>::Dimensions{{{2, 3}}}), 0);
+}
+
+template<int DataLayout>
+static void test_simple_concatenation()
+{
+ Tensor<int, 3, DataLayout> left(2, 3, 1);
+ Tensor<int, 3, DataLayout> right(2, 3, 1);
+ left.setRandom();
+ right.setRandom();
+
+ Tensor<int, 3, DataLayout> concatenation = left.concatenate(right, 0);
+ VERIFY_IS_EQUAL(concatenation.dimension(0), 4);
+ VERIFY_IS_EQUAL(concatenation.dimension(1), 3);
+ VERIFY_IS_EQUAL(concatenation.dimension(2), 1);
+ for (int j = 0; j < 3; ++j) {
+ for (int i = 0; i < 2; ++i) {
+ VERIFY_IS_EQUAL(concatenation(i, j, 0), left(i, j, 0));
+ }
+ for (int i = 2; i < 4; ++i) {
+ VERIFY_IS_EQUAL(concatenation(i, j, 0), right(i - 2, j, 0));
+ }
+ }
+
+ concatenation = left.concatenate(right, 1);
+ VERIFY_IS_EQUAL(concatenation.dimension(0), 2);
+ VERIFY_IS_EQUAL(concatenation.dimension(1), 6);
+ VERIFY_IS_EQUAL(concatenation.dimension(2), 1);
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ VERIFY_IS_EQUAL(concatenation(i, j, 0), left(i, j, 0));
+ }
+ for (int j = 3; j < 6; ++j) {
+ VERIFY_IS_EQUAL(concatenation(i, j, 0), right(i, j - 3, 0));
+ }
+ }
+
+ concatenation = left.concatenate(right, 2);
+ VERIFY_IS_EQUAL(concatenation.dimension(0), 2);
+ VERIFY_IS_EQUAL(concatenation.dimension(1), 3);
+ VERIFY_IS_EQUAL(concatenation.dimension(2), 2);
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ VERIFY_IS_EQUAL(concatenation(i, j, 0), left(i, j, 0));
+ VERIFY_IS_EQUAL(concatenation(i, j, 1), right(i, j, 0));
+ }
+ }
+}
+
+
+// TODO(phli): Add test once we have a real vectorized implementation.
+// static void test_vectorized_concatenation() {}
+
+static void test_concatenation_as_lvalue()
+{
+ Tensor<int, 2> t1(2, 3);
+ Tensor<int, 2> t2(2, 3);
+ t1.setRandom();
+ t2.setRandom();
+
+ Tensor<int, 2> result(4, 3);
+ result.setRandom();
+ t1.concatenate(t2, 0) = result;
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ VERIFY_IS_EQUAL(t1(i, j), result(i, j));
+ VERIFY_IS_EQUAL(t2(i, j), result(i+2, j));
+ }
+ }
+}
+
+
+void test_cxx11_tensor_concatenation()
+{
+ CALL_SUBTEST(test_dimension_failures<ColMajor>());
+ CALL_SUBTEST(test_dimension_failures<RowMajor>());
+ CALL_SUBTEST(test_static_dimension_failure<ColMajor>());
+ CALL_SUBTEST(test_static_dimension_failure<RowMajor>());
+ CALL_SUBTEST(test_simple_concatenation<ColMajor>());
+ CALL_SUBTEST(test_simple_concatenation<RowMajor>());
+ // CALL_SUBTEST(test_vectorized_concatenation());
+ CALL_SUBTEST(test_concatenation_as_lvalue());
+
+}
diff --git a/unsupported/test/cxx11_tensor_const.cpp b/unsupported/test/cxx11_tensor_const.cpp
new file mode 100644
index 000000000..ad9c9da39
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_const.cpp
@@ -0,0 +1,62 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+using Eigen::Tensor;
+
+
+static void test_simple_assign()
+{
+ Tensor<int, 3> random(2,3,7);
+ random.setRandom();
+
+ TensorMap<Tensor<const int, 3> > constant(random.data(), 2, 3, 7);
+ Tensor<int, 3> result(2,3,7);
+ result = constant;
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ VERIFY_IS_EQUAL((result(i,j,k)), random(i,j,k));
+ }
+ }
+ }
+}
+
+
+static void test_assign_of_const_tensor()
+{
+ Tensor<int, 3> random(2,3,7);
+ random.setRandom();
+
+ TensorMap<Tensor<const int, 3> > constant1(random.data(), 2, 3, 7);
+ TensorMap<const Tensor<int, 3> > constant2(random.data(), 2, 3, 7);
+ const TensorMap<Tensor<int, 3> > constant3(random.data(), 2, 3, 7);
+
+ Tensor<int, 2> result1 = constant1.chip(0, 2);
+ Tensor<int, 2> result2 = constant2.chip(0, 2);
+ Tensor<int, 2> result3 = constant3.chip(0, 2);
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ VERIFY_IS_EQUAL((result1(i,j)), random(i,j,0));
+ VERIFY_IS_EQUAL((result2(i,j)), random(i,j,0));
+ VERIFY_IS_EQUAL((result3(i,j)), random(i,j,0));
+ }
+ }
+}
+
+
+void test_cxx11_tensor_const()
+{
+ CALL_SUBTEST(test_simple_assign());
+ CALL_SUBTEST(test_assign_of_const_tensor());
+}
diff --git a/unsupported/test/cxx11_tensor_contract_cuda.cu b/unsupported/test/cxx11_tensor_contract_cuda.cu
new file mode 100644
index 000000000..dd68430ce
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_contract_cuda.cu
@@ -0,0 +1,216 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+// Copyright (C) 2014 Navdeep Jaitly <ndjaitly@google.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_TEST_NO_LONGDOUBLE
+#define EIGEN_TEST_NO_COMPLEX
+#define EIGEN_TEST_FUNC cxx11_tensor_cuda
+#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
+#define EIGEN_USE_GPU
+
+#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500
+#include <cuda_fp16.h>
+#endif
+#include "main.h"
+#include <unsupported/Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+typedef Tensor<float, 1>::DimensionPair DimPair;
+
+template<int DataLayout>
+void test_cuda_contraction(int m_size, int k_size, int n_size)
+{
+ std::cout << "Testing for (" << m_size << "," << k_size << "," << n_size << ")" << std::endl;
+ // with these dimensions, the output has 300 * 140 elements, which is
+ // more than 30 * 1024, which is the number of threads in blocks on
+ // a 15 SM GK110 GPU
+ Tensor<float, 2, DataLayout> t_left(m_size, k_size);
+ Tensor<float, 2, DataLayout> t_right(k_size, n_size);
+ Tensor<float, 2, DataLayout> t_result(m_size, n_size);
+ Tensor<float, 2, DataLayout> t_result_gpu(m_size, n_size);
+ Eigen::array<DimPair, 1> dims(DimPair(1, 0));
+
+ t_left.setRandom();
+ t_right.setRandom();
+
+ std::size_t t_left_bytes = t_left.size() * sizeof(float);
+ std::size_t t_right_bytes = t_right.size() * sizeof(float);
+ std::size_t t_result_bytes = t_result.size() * sizeof(float);
+
+ float* d_t_left;
+ float* d_t_right;
+ float* d_t_result;
+
+ cudaMalloc((void**)(&d_t_left), t_left_bytes);
+ cudaMalloc((void**)(&d_t_right), t_right_bytes);
+ cudaMalloc((void**)(&d_t_result), t_result_bytes);
+
+ cudaMemcpy(d_t_left, t_left.data(), t_left_bytes, cudaMemcpyHostToDevice);
+ cudaMemcpy(d_t_right, t_right.data(), t_right_bytes, cudaMemcpyHostToDevice);
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ Eigen::TensorMap<Eigen::Tensor<float, 2, DataLayout> >
+ gpu_t_left(d_t_left, Eigen::array<int, 2>(m_size, k_size));
+ Eigen::TensorMap<Eigen::Tensor<float, 2, DataLayout> >
+ gpu_t_right(d_t_right, Eigen::array<int, 2>(k_size, n_size));
+ Eigen::TensorMap<Eigen::Tensor<float, 2, DataLayout> >
+ gpu_t_result(d_t_result, Eigen::array<int, 2>(m_size, n_size));
+
+
+ gpu_t_result.device(gpu_device) = gpu_t_left.contract(gpu_t_right, dims);
+ t_result = t_left.contract(t_right, dims);
+
+ cudaMemcpy(t_result_gpu.data(), d_t_result, t_result_bytes, cudaMemcpyDeviceToHost);
+ for (DenseIndex i = 0; i < t_result.size(); i++) {
+ if (fabs(t_result(i) - t_result_gpu(i)) < 1e-4f) {
+ continue;
+ }
+ if (Eigen::internal::isApprox(t_result(i), t_result_gpu(i), 1e-4f)) {
+ continue;
+ }
+ std::cout << "mismatch detected at index " << i << ": " << t_result(i)
+ << " vs " << t_result_gpu(i) << std::endl;
+ assert(false);
+ }
+
+ cudaFree((void*)d_t_left);
+ cudaFree((void*)d_t_right);
+ cudaFree((void*)d_t_result);
+}
+
+
+template<int DataLayout>
+void test_scalar(int m_size, int k_size, int n_size)
+{
+ std::cout << "Testing for (" << m_size << "," << k_size << "," << n_size << ")" << std::endl;
+ // with these dimensions, the output has 300 * 140 elements, which is
+ // more than 30 * 1024, which is the number of threads in blocks on
+ // a 15 SM GK110 GPU
+ Tensor<float, 2, DataLayout> t_left(m_size, k_size);
+ Tensor<float, 2, DataLayout> t_right(k_size, n_size);
+ Tensor<float, 0, DataLayout> t_result;
+ Tensor<float, 0, DataLayout> t_result_gpu;
+ Eigen::array<DimPair, 2> dims(DimPair(0, 0), DimPair(1, 1));
+
+ t_left.setRandom();
+ t_right.setRandom();
+
+ std::size_t t_left_bytes = t_left.size() * sizeof(float);
+ std::size_t t_right_bytes = t_right.size() * sizeof(float);
+ std::size_t t_result_bytes = sizeof(float);
+
+ float* d_t_left;
+ float* d_t_right;
+ float* d_t_result;
+
+ cudaMalloc((void**)(&d_t_left), t_left_bytes);
+ cudaMalloc((void**)(&d_t_right), t_right_bytes);
+ cudaMalloc((void**)(&d_t_result), t_result_bytes);
+
+ cudaMemcpy(d_t_left, t_left.data(), t_left_bytes, cudaMemcpyHostToDevice);
+ cudaMemcpy(d_t_right, t_right.data(), t_right_bytes, cudaMemcpyHostToDevice);
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ Eigen::TensorMap<Eigen::Tensor<float, 2, DataLayout> >
+ gpu_t_left(d_t_left, m_size, k_size);
+ Eigen::TensorMap<Eigen::Tensor<float, 2, DataLayout> >
+ gpu_t_right(d_t_right, k_size, n_size);
+ Eigen::TensorMap<Eigen::Tensor<float, 0, DataLayout> >
+ gpu_t_result(d_t_result);
+
+ gpu_t_result.device(gpu_device) = gpu_t_left.contract(gpu_t_right, dims);
+ t_result = t_left.contract(t_right, dims);
+
+ cudaMemcpy(t_result_gpu.data(), d_t_result, t_result_bytes, cudaMemcpyDeviceToHost);
+ if (fabs(t_result() - t_result_gpu()) > 1e-4f &&
+ !Eigen::internal::isApprox(t_result(), t_result_gpu(), 1e-4f)) {
+ std::cout << "mismatch detected: " << t_result()
+ << " vs " << t_result_gpu() << std::endl;
+ assert(false);
+ }
+
+ cudaFree((void*)d_t_left);
+ cudaFree((void*)d_t_right);
+ cudaFree((void*)d_t_result);
+}
+
+
+template<int DataLayout>
+void test_cuda_contraction_m() {
+ for (int k = 32; k < 256; k++) {
+ test_cuda_contraction<ColMajor>(k, 128, 128);
+ test_cuda_contraction<RowMajor>(k, 128, 128);
+ }
+}
+
+template<int DataLayout>
+void test_cuda_contraction_k() {
+ for (int k = 32; k < 256; k++) {
+ test_cuda_contraction<ColMajor>(128, k, 128);
+ test_cuda_contraction<RowMajor>(128, k, 128);
+ }
+}
+
+template<int DataLayout>
+void test_cuda_contraction_n() {
+ for (int k = 32; k < 256; k++) {
+ test_cuda_contraction<ColMajor>(128, 128, k);
+ test_cuda_contraction<RowMajor>(128, 128, k);
+ }
+}
+
+
+template<int DataLayout>
+void test_cuda_contraction_sizes() {
+ int m_sizes[] = { 31, 39, 63, 64, 65,
+ 127, 129, 255, 257 , 511,
+ 512, 513, 1023, 1024, 1025};
+
+ int n_sizes[] = { 31, 39, 63, 64, 65,
+ 127, 129, 255, 257, 511,
+ 512, 513, 1023, 1024, 1025};
+
+ int k_sizes[] = { 31, 39, 63, 64, 65,
+ 95, 96, 127, 129, 255,
+ 257, 511, 512, 513, 1023,
+ 1024, 1025};
+
+ for (int i = 0; i < 15; i++) {
+ for (int j = 0; j < 15; j++) {
+ for (int k = 0; k < 17; k++) {
+ test_cuda_contraction<DataLayout>(m_sizes[i], n_sizes[j], k_sizes[k]);
+ }
+ }
+ }
+}
+
+void test_cxx11_tensor_cuda()
+{
+ CALL_SUBTEST_1(test_cuda_contraction<ColMajor>(128, 128, 128));
+ CALL_SUBTEST_1(test_cuda_contraction<RowMajor>(128, 128, 128));
+
+ CALL_SUBTEST_1(test_scalar<ColMajor>(128, 128, 128));
+ CALL_SUBTEST_1(test_scalar<RowMajor>(128, 128, 128));
+
+ CALL_SUBTEST_2(test_cuda_contraction_m<ColMajor>());
+ CALL_SUBTEST_3(test_cuda_contraction_m<RowMajor>());
+
+ CALL_SUBTEST_4(test_cuda_contraction_k<ColMajor>());
+ CALL_SUBTEST_5(test_cuda_contraction_k<RowMajor>());
+
+ CALL_SUBTEST_6(test_cuda_contraction_n<ColMajor>());
+ CALL_SUBTEST_7(test_cuda_contraction_n<RowMajor>());
+
+ CALL_SUBTEST_8(test_cuda_contraction_sizes<ColMajor>());
+ CALL_SUBTEST_9(test_cuda_contraction_sizes<RowMajor>());
+}
diff --git a/unsupported/test/cxx11_tensor_contraction.cpp b/unsupported/test/cxx11_tensor_contraction.cpp
new file mode 100644
index 000000000..ace97057f
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_contraction.cpp
@@ -0,0 +1,545 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::DefaultDevice;
+using Eigen::Tensor;
+
+typedef Tensor<float, 1>::DimensionPair DimPair;
+
+template<int DataLayout>
+static void test_evals()
+{
+ Tensor<float, 2, DataLayout> mat1(2, 3);
+ Tensor<float, 2, DataLayout> mat2(2, 3);
+ Tensor<float, 2, DataLayout> mat3(3, 2);
+
+ mat1.setRandom();
+ mat2.setRandom();
+ mat3.setRandom();
+
+ Tensor<float, 2, DataLayout> mat4(3,3);
+ mat4.setZero();
+ Eigen::array<DimPair, 1> dims3 = {{DimPair(0, 0)}};
+ typedef TensorEvaluator<decltype(mat1.contract(mat2, dims3)), DefaultDevice> Evaluator;
+ Evaluator eval(mat1.contract(mat2, dims3), DefaultDevice());
+ eval.evalTo(mat4.data());
+ EIGEN_STATIC_ASSERT(Evaluator::NumDims==2ul, YOU_MADE_A_PROGRAMMING_MISTAKE);
+ VERIFY_IS_EQUAL(eval.dimensions()[0], 3);
+ VERIFY_IS_EQUAL(eval.dimensions()[1], 3);
+
+ VERIFY_IS_APPROX(mat4(0,0), mat1(0,0)*mat2(0,0) + mat1(1,0)*mat2(1,0));
+ VERIFY_IS_APPROX(mat4(0,1), mat1(0,0)*mat2(0,1) + mat1(1,0)*mat2(1,1));
+ VERIFY_IS_APPROX(mat4(0,2), mat1(0,0)*mat2(0,2) + mat1(1,0)*mat2(1,2));
+ VERIFY_IS_APPROX(mat4(1,0), mat1(0,1)*mat2(0,0) + mat1(1,1)*mat2(1,0));
+ VERIFY_IS_APPROX(mat4(1,1), mat1(0,1)*mat2(0,1) + mat1(1,1)*mat2(1,1));
+ VERIFY_IS_APPROX(mat4(1,2), mat1(0,1)*mat2(0,2) + mat1(1,1)*mat2(1,2));
+ VERIFY_IS_APPROX(mat4(2,0), mat1(0,2)*mat2(0,0) + mat1(1,2)*mat2(1,0));
+ VERIFY_IS_APPROX(mat4(2,1), mat1(0,2)*mat2(0,1) + mat1(1,2)*mat2(1,1));
+ VERIFY_IS_APPROX(mat4(2,2), mat1(0,2)*mat2(0,2) + mat1(1,2)*mat2(1,2));
+
+ Tensor<float, 2, DataLayout> mat5(2,2);
+ mat5.setZero();
+ Eigen::array<DimPair, 1> dims4 = {{DimPair(1, 1)}};
+ typedef TensorEvaluator<decltype(mat1.contract(mat2, dims4)), DefaultDevice> Evaluator2;
+ Evaluator2 eval2(mat1.contract(mat2, dims4), DefaultDevice());
+ eval2.evalTo(mat5.data());
+ EIGEN_STATIC_ASSERT(Evaluator2::NumDims==2ul, YOU_MADE_A_PROGRAMMING_MISTAKE);
+ VERIFY_IS_EQUAL(eval2.dimensions()[0], 2);
+ VERIFY_IS_EQUAL(eval2.dimensions()[1], 2);
+
+ VERIFY_IS_APPROX(mat5(0,0), mat1(0,0)*mat2(0,0) + mat1(0,1)*mat2(0,1) + mat1(0,2)*mat2(0,2));
+ VERIFY_IS_APPROX(mat5(0,1), mat1(0,0)*mat2(1,0) + mat1(0,1)*mat2(1,1) + mat1(0,2)*mat2(1,2));
+ VERIFY_IS_APPROX(mat5(1,0), mat1(1,0)*mat2(0,0) + mat1(1,1)*mat2(0,1) + mat1(1,2)*mat2(0,2));
+ VERIFY_IS_APPROX(mat5(1,1), mat1(1,0)*mat2(1,0) + mat1(1,1)*mat2(1,1) + mat1(1,2)*mat2(1,2));
+
+ Tensor<float, 2, DataLayout> mat6(2,2);
+ mat6.setZero();
+ Eigen::array<DimPair, 1> dims6 = {{DimPair(1, 0)}};
+ typedef TensorEvaluator<decltype(mat1.contract(mat3, dims6)), DefaultDevice> Evaluator3;
+ Evaluator3 eval3(mat1.contract(mat3, dims6), DefaultDevice());
+ eval3.evalTo(mat6.data());
+ EIGEN_STATIC_ASSERT(Evaluator3::NumDims==2ul, YOU_MADE_A_PROGRAMMING_MISTAKE);
+ VERIFY_IS_EQUAL(eval3.dimensions()[0], 2);
+ VERIFY_IS_EQUAL(eval3.dimensions()[1], 2);
+
+ VERIFY_IS_APPROX(mat6(0,0), mat1(0,0)*mat3(0,0) + mat1(0,1)*mat3(1,0) + mat1(0,2)*mat3(2,0));
+ VERIFY_IS_APPROX(mat6(0,1), mat1(0,0)*mat3(0,1) + mat1(0,1)*mat3(1,1) + mat1(0,2)*mat3(2,1));
+ VERIFY_IS_APPROX(mat6(1,0), mat1(1,0)*mat3(0,0) + mat1(1,1)*mat3(1,0) + mat1(1,2)*mat3(2,0));
+ VERIFY_IS_APPROX(mat6(1,1), mat1(1,0)*mat3(0,1) + mat1(1,1)*mat3(1,1) + mat1(1,2)*mat3(2,1));
+}
+
+template<int DataLayout>
+static void test_scalar()
+{
+ Tensor<float, 1, DataLayout> vec1({6});
+ Tensor<float, 1, DataLayout> vec2({6});
+
+ vec1.setRandom();
+ vec2.setRandom();
+
+ Eigen::array<DimPair, 1> dims = {{DimPair(0, 0)}};
+ Tensor<float, 0, DataLayout> scalar = vec1.contract(vec2, dims);
+
+ float expected = 0.0f;
+ for (int i = 0; i < 6; ++i) {
+ expected += vec1(i) * vec2(i);
+ }
+ VERIFY_IS_APPROX(scalar(), expected);
+}
+
+template<int DataLayout>
+static void test_multidims()
+{
+ Tensor<float, 3, DataLayout> mat1(2, 2, 2);
+ Tensor<float, 4, DataLayout> mat2(2, 2, 2, 2);
+
+ mat1.setRandom();
+ mat2.setRandom();
+
+ Tensor<float, 3, DataLayout> mat3(2, 2, 2);
+ mat3.setZero();
+ Eigen::array<DimPair, 2> dims = {{DimPair(1, 2), DimPair(2, 3)}};
+ typedef TensorEvaluator<decltype(mat1.contract(mat2, dims)), DefaultDevice> Evaluator;
+ Evaluator eval(mat1.contract(mat2, dims), DefaultDevice());
+ eval.evalTo(mat3.data());
+ EIGEN_STATIC_ASSERT(Evaluator::NumDims==3ul, YOU_MADE_A_PROGRAMMING_MISTAKE);
+ VERIFY_IS_EQUAL(eval.dimensions()[0], 2);
+ VERIFY_IS_EQUAL(eval.dimensions()[1], 2);
+ VERIFY_IS_EQUAL(eval.dimensions()[2], 2);
+
+ VERIFY_IS_APPROX(mat3(0,0,0), mat1(0,0,0)*mat2(0,0,0,0) + mat1(0,1,0)*mat2(0,0,1,0) +
+ mat1(0,0,1)*mat2(0,0,0,1) + mat1(0,1,1)*mat2(0,0,1,1));
+ VERIFY_IS_APPROX(mat3(0,0,1), mat1(0,0,0)*mat2(0,1,0,0) + mat1(0,1,0)*mat2(0,1,1,0) +
+ mat1(0,0,1)*mat2(0,1,0,1) + mat1(0,1,1)*mat2(0,1,1,1));
+ VERIFY_IS_APPROX(mat3(0,1,0), mat1(0,0,0)*mat2(1,0,0,0) + mat1(0,1,0)*mat2(1,0,1,0) +
+ mat1(0,0,1)*mat2(1,0,0,1) + mat1(0,1,1)*mat2(1,0,1,1));
+ VERIFY_IS_APPROX(mat3(0,1,1), mat1(0,0,0)*mat2(1,1,0,0) + mat1(0,1,0)*mat2(1,1,1,0) +
+ mat1(0,0,1)*mat2(1,1,0,1) + mat1(0,1,1)*mat2(1,1,1,1));
+ VERIFY_IS_APPROX(mat3(1,0,0), mat1(1,0,0)*mat2(0,0,0,0) + mat1(1,1,0)*mat2(0,0,1,0) +
+ mat1(1,0,1)*mat2(0,0,0,1) + mat1(1,1,1)*mat2(0,0,1,1));
+ VERIFY_IS_APPROX(mat3(1,0,1), mat1(1,0,0)*mat2(0,1,0,0) + mat1(1,1,0)*mat2(0,1,1,0) +
+ mat1(1,0,1)*mat2(0,1,0,1) + mat1(1,1,1)*mat2(0,1,1,1));
+ VERIFY_IS_APPROX(mat3(1,1,0), mat1(1,0,0)*mat2(1,0,0,0) + mat1(1,1,0)*mat2(1,0,1,0) +
+ mat1(1,0,1)*mat2(1,0,0,1) + mat1(1,1,1)*mat2(1,0,1,1));
+ VERIFY_IS_APPROX(mat3(1,1,1), mat1(1,0,0)*mat2(1,1,0,0) + mat1(1,1,0)*mat2(1,1,1,0) +
+ mat1(1,0,1)*mat2(1,1,0,1) + mat1(1,1,1)*mat2(1,1,1,1));
+
+ Tensor<float, 2, DataLayout> mat4(2, 2);
+ Tensor<float, 3, DataLayout> mat5(2, 2, 2);
+
+ mat4.setRandom();
+ mat5.setRandom();
+
+ Tensor<float, 1, DataLayout> mat6(2);
+ mat6.setZero();
+ Eigen::array<DimPair, 2> dims2({{DimPair(0, 1), DimPair(1, 0)}});
+ typedef TensorEvaluator<decltype(mat4.contract(mat5, dims2)), DefaultDevice> Evaluator2;
+ Evaluator2 eval2(mat4.contract(mat5, dims2), DefaultDevice());
+ eval2.evalTo(mat6.data());
+ EIGEN_STATIC_ASSERT(Evaluator2::NumDims==1ul, YOU_MADE_A_PROGRAMMING_MISTAKE);
+ VERIFY_IS_EQUAL(eval2.dimensions()[0], 2);
+
+ VERIFY_IS_APPROX(mat6(0), mat4(0,0)*mat5(0,0,0) + mat4(1,0)*mat5(0,1,0) +
+ mat4(0,1)*mat5(1,0,0) + mat4(1,1)*mat5(1,1,0));
+ VERIFY_IS_APPROX(mat6(1), mat4(0,0)*mat5(0,0,1) + mat4(1,0)*mat5(0,1,1) +
+ mat4(0,1)*mat5(1,0,1) + mat4(1,1)*mat5(1,1,1));
+}
+
+template<int DataLayout>
+static void test_holes() {
+ Tensor<float, 4, DataLayout> t1(2, 5, 7, 3);
+ Tensor<float, 5, DataLayout> t2(2, 7, 11, 13, 3);
+ t1.setRandom();
+ t2.setRandom();
+
+ Eigen::array<DimPair, 2> dims = {{DimPair(0, 0), DimPair(3, 4)}};
+ Tensor<float, 5, DataLayout> result = t1.contract(t2, dims);
+ VERIFY_IS_EQUAL(result.dimension(0), 5);
+ VERIFY_IS_EQUAL(result.dimension(1), 7);
+ VERIFY_IS_EQUAL(result.dimension(2), 7);
+ VERIFY_IS_EQUAL(result.dimension(3), 11);
+ VERIFY_IS_EQUAL(result.dimension(4), 13);
+
+ for (int i = 0; i < 5; ++i) {
+ for (int j = 0; j < 5; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 5; ++l) {
+ for (int m = 0; m < 5; ++m) {
+ VERIFY_IS_APPROX(result(i, j, k, l, m),
+ t1(0, i, j, 0) * t2(0, k, l, m, 0) +
+ t1(1, i, j, 0) * t2(1, k, l, m, 0) +
+ t1(0, i, j, 1) * t2(0, k, l, m, 1) +
+ t1(1, i, j, 1) * t2(1, k, l, m, 1) +
+ t1(0, i, j, 2) * t2(0, k, l, m, 2) +
+ t1(1, i, j, 2) * t2(1, k, l, m, 2));
+ }
+ }
+ }
+ }
+ }
+}
+
+template<int DataLayout>
+static void test_full_redux()
+{
+ Tensor<float, 2, DataLayout> t1(2, 2);
+ Tensor<float, 3, DataLayout> t2(2, 2, 2);
+ t1.setRandom();
+ t2.setRandom();
+
+ Eigen::array<DimPair, 2> dims = {{DimPair(0, 0), DimPair(1, 1)}};
+ Tensor<float, 1, DataLayout> result = t1.contract(t2, dims);
+ VERIFY_IS_EQUAL(result.dimension(0), 2);
+ VERIFY_IS_APPROX(result(0), t1(0, 0) * t2(0, 0, 0) + t1(1, 0) * t2(1, 0, 0)
+ + t1(0, 1) * t2(0, 1, 0) + t1(1, 1) * t2(1, 1, 0));
+ VERIFY_IS_APPROX(result(1), t1(0, 0) * t2(0, 0, 1) + t1(1, 0) * t2(1, 0, 1)
+ + t1(0, 1) * t2(0, 1, 1) + t1(1, 1) * t2(1, 1, 1));
+
+ dims[0] = DimPair(1, 0);
+ dims[1] = DimPair(2, 1);
+ result = t2.contract(t1, dims);
+ VERIFY_IS_EQUAL(result.dimension(0), 2);
+ VERIFY_IS_APPROX(result(0), t1(0, 0) * t2(0, 0, 0) + t1(1, 0) * t2(0, 1, 0)
+ + t1(0, 1) * t2(0, 0, 1) + t1(1, 1) * t2(0, 1, 1));
+ VERIFY_IS_APPROX(result(1), t1(0, 0) * t2(1, 0, 0) + t1(1, 0) * t2(1, 1, 0)
+ + t1(0, 1) * t2(1, 0, 1) + t1(1, 1) * t2(1, 1, 1));
+}
+
+template<int DataLayout>
+static void test_contraction_of_contraction()
+{
+ Tensor<float, 2, DataLayout> t1(2, 2);
+ Tensor<float, 2, DataLayout> t2(2, 2);
+ Tensor<float, 2, DataLayout> t3(2, 2);
+ Tensor<float, 2, DataLayout> t4(2, 2);
+ t1.setRandom();
+ t2.setRandom();
+ t3.setRandom();
+ t4.setRandom();
+
+ Eigen::array<DimPair, 1> dims = {{DimPair(1, 0)}};
+ auto contract1 = t1.contract(t2, dims);
+ auto diff = t3 - contract1;
+ auto contract2 = t1.contract(t4, dims);
+ Tensor<float, 2, DataLayout> result = contract2.contract(diff, dims);
+
+ VERIFY_IS_EQUAL(result.dimension(0), 2);
+ VERIFY_IS_EQUAL(result.dimension(1), 2);
+
+ Eigen::Map<Eigen::Matrix<float, Dynamic, Dynamic, DataLayout>>
+ m1(t1.data(), 2, 2), m2(t2.data(), 2, 2), m3(t3.data(), 2, 2),
+ m4(t4.data(), 2, 2);
+ Eigen::Matrix<float, Dynamic, Dynamic, DataLayout>
+ expected = (m1 * m4) * (m3 - m1 * m2);
+
+ VERIFY_IS_APPROX(result(0, 0), expected(0, 0));
+ VERIFY_IS_APPROX(result(0, 1), expected(0, 1));
+ VERIFY_IS_APPROX(result(1, 0), expected(1, 0));
+ VERIFY_IS_APPROX(result(1, 1), expected(1, 1));
+}
+
+template<int DataLayout>
+static void test_expr()
+{
+ Tensor<float, 2, DataLayout> mat1(2, 3);
+ Tensor<float, 2, DataLayout> mat2(3, 2);
+ mat1.setRandom();
+ mat2.setRandom();
+
+ Tensor<float, 2, DataLayout> mat3(2,2);
+
+ Eigen::array<DimPair, 1> dims = {{DimPair(1, 0)}};
+ mat3 = mat1.contract(mat2, dims);
+
+ VERIFY_IS_APPROX(mat3(0,0), mat1(0,0)*mat2(0,0) + mat1(0,1)*mat2(1,0) + mat1(0,2)*mat2(2,0));
+ VERIFY_IS_APPROX(mat3(0,1), mat1(0,0)*mat2(0,1) + mat1(0,1)*mat2(1,1) + mat1(0,2)*mat2(2,1));
+ VERIFY_IS_APPROX(mat3(1,0), mat1(1,0)*mat2(0,0) + mat1(1,1)*mat2(1,0) + mat1(1,2)*mat2(2,0));
+ VERIFY_IS_APPROX(mat3(1,1), mat1(1,0)*mat2(0,1) + mat1(1,1)*mat2(1,1) + mat1(1,2)*mat2(2,1));
+}
+
+template<int DataLayout>
+static void test_out_of_order_contraction()
+{
+ Tensor<float, 3, DataLayout> mat1(2, 2, 2);
+ Tensor<float, 3, DataLayout> mat2(2, 2, 2);
+
+ mat1.setRandom();
+ mat2.setRandom();
+
+ Tensor<float, 2, DataLayout> mat3(2, 2);
+
+ Eigen::array<DimPair, 2> dims = {{DimPair(2, 0), DimPair(0, 2)}};
+ mat3 = mat1.contract(mat2, dims);
+
+ VERIFY_IS_APPROX(mat3(0, 0),
+ mat1(0,0,0)*mat2(0,0,0) + mat1(1,0,0)*mat2(0,0,1) +
+ mat1(0,0,1)*mat2(1,0,0) + mat1(1,0,1)*mat2(1,0,1));
+ VERIFY_IS_APPROX(mat3(1, 0),
+ mat1(0,1,0)*mat2(0,0,0) + mat1(1,1,0)*mat2(0,0,1) +
+ mat1(0,1,1)*mat2(1,0,0) + mat1(1,1,1)*mat2(1,0,1));
+ VERIFY_IS_APPROX(mat3(0, 1),
+ mat1(0,0,0)*mat2(0,1,0) + mat1(1,0,0)*mat2(0,1,1) +
+ mat1(0,0,1)*mat2(1,1,0) + mat1(1,0,1)*mat2(1,1,1));
+ VERIFY_IS_APPROX(mat3(1, 1),
+ mat1(0,1,0)*mat2(0,1,0) + mat1(1,1,0)*mat2(0,1,1) +
+ mat1(0,1,1)*mat2(1,1,0) + mat1(1,1,1)*mat2(1,1,1));
+
+ Eigen::array<DimPair, 2> dims2 = {{DimPair(0, 2), DimPair(2, 0)}};
+ mat3 = mat1.contract(mat2, dims2);
+
+ VERIFY_IS_APPROX(mat3(0, 0),
+ mat1(0,0,0)*mat2(0,0,0) + mat1(1,0,0)*mat2(0,0,1) +
+ mat1(0,0,1)*mat2(1,0,0) + mat1(1,0,1)*mat2(1,0,1));
+ VERIFY_IS_APPROX(mat3(1, 0),
+ mat1(0,1,0)*mat2(0,0,0) + mat1(1,1,0)*mat2(0,0,1) +
+ mat1(0,1,1)*mat2(1,0,0) + mat1(1,1,1)*mat2(1,0,1));
+ VERIFY_IS_APPROX(mat3(0, 1),
+ mat1(0,0,0)*mat2(0,1,0) + mat1(1,0,0)*mat2(0,1,1) +
+ mat1(0,0,1)*mat2(1,1,0) + mat1(1,0,1)*mat2(1,1,1));
+ VERIFY_IS_APPROX(mat3(1, 1),
+ mat1(0,1,0)*mat2(0,1,0) + mat1(1,1,0)*mat2(0,1,1) +
+ mat1(0,1,1)*mat2(1,1,0) + mat1(1,1,1)*mat2(1,1,1));
+
+}
+
+template<int DataLayout>
+static void test_consistency()
+{
+ // this does something like testing (A*B)^T = (B^T * A^T)
+
+ Tensor<float, 3, DataLayout> mat1(4, 3, 5);
+ Tensor<float, 5, DataLayout> mat2(3, 2, 1, 5, 4);
+ mat1.setRandom();
+ mat2.setRandom();
+
+ Tensor<float, 4, DataLayout> mat3(5, 2, 1, 5);
+ Tensor<float, 4, DataLayout> mat4(2, 1, 5, 5);
+
+ // contract on dimensions of size 4 and 3
+ Eigen::array<DimPair, 2> dims1 = {{DimPair(0, 4), DimPair(1, 0)}};
+ Eigen::array<DimPair, 2> dims2 = {{DimPair(4, 0), DimPair(0, 1)}};
+
+ mat3 = mat1.contract(mat2, dims1);
+ mat4 = mat2.contract(mat1, dims2);
+
+ // check that these are equal except for ordering of dimensions
+ if (DataLayout == ColMajor) {
+ for (size_t i = 0; i < 5; i++) {
+ for (size_t j = 0; j < 10; j++) {
+ VERIFY_IS_APPROX(mat3.data()[i + 5 * j], mat4.data()[j + 10 * i]);
+ }
+ }
+ } else {
+ // Row major
+ for (size_t i = 0; i < 5; i++) {
+ for (size_t j = 0; j < 10; j++) {
+ VERIFY_IS_APPROX(mat3.data()[10 * i + j], mat4.data()[i + 5 * j]);
+ }
+ }
+ }
+}
+
+template<int DataLayout>
+static void test_large_contraction()
+{
+ Tensor<float, 4, DataLayout> t_left(30, 50, 8, 31);
+ Tensor<float, 5, DataLayout> t_right(8, 31, 7, 20, 10);
+ Tensor<float, 5, DataLayout> t_result(30, 50, 7, 20, 10);
+
+ t_left.setRandom();
+ t_right.setRandom();
+
+ // Add a little offset so that the results won't be close to zero.
+ t_left += t_left.constant(1.0f);
+ t_right += t_right.constant(1.0f);
+
+ typedef Map<Eigen::Matrix<float, Dynamic, Dynamic, DataLayout>> MapXf;
+ MapXf m_left(t_left.data(), 1500, 248);
+ MapXf m_right(t_right.data(), 248, 1400);
+ Eigen::Matrix<float, Dynamic, Dynamic, DataLayout> m_result(1500, 1400);
+
+ // this contraction should be equivalent to a single matrix multiplication
+ Eigen::array<DimPair, 2> dims = {{DimPair(2, 0), DimPair(3, 1)}};
+
+ // compute results by separate methods
+ t_result = t_left.contract(t_right, dims);
+ m_result = m_left * m_right;
+
+ for (int i = 0; i < t_result.dimensions().TotalSize(); i++) {
+ VERIFY(&t_result.data()[i] != &m_result.data()[i]);
+ VERIFY_IS_APPROX(t_result.data()[i], m_result.data()[i]);
+ }
+}
+
+template<int DataLayout>
+static void test_matrix_vector()
+{
+ Tensor<float, 2, DataLayout> t_left(30, 50);
+ Tensor<float, 1, DataLayout> t_right(50);
+ Tensor<float, 1, DataLayout> t_result(30);
+
+ t_left.setRandom();
+ t_right.setRandom();
+
+ typedef Map<Eigen::Matrix<float, Dynamic, Dynamic, DataLayout>> MapXf;
+ MapXf m_left(t_left.data(), 30, 50);
+ MapXf m_right(t_right.data(), 50, 1);
+ Eigen::Matrix<float, Dynamic, Dynamic, DataLayout> m_result(30, 1);
+
+ // this contraction should be equivalent to a single matrix multiplication
+ Eigen::array<DimPair, 1> dims{{DimPair(1, 0)}};
+
+ // compute results by separate methods
+ t_result = t_left.contract(t_right, dims);
+ m_result = m_left * m_right;
+
+ for (int i = 0; i < t_result.dimensions().TotalSize(); i++) {
+ VERIFY(internal::isApprox(t_result(i), m_result(i, 0), 1));
+ }
+}
+
+
+template<int DataLayout>
+static void test_tensor_vector()
+{
+ Tensor<float, 3, DataLayout> t_left(7, 13, 17);
+ Tensor<float, 2, DataLayout> t_right(1, 7);
+
+ t_left.setRandom();
+ t_right.setRandom();
+
+ typedef typename Tensor<float, 1, DataLayout>::DimensionPair DimensionPair;
+ Eigen::array<DimensionPair, 1> dim_pair01{{{0, 1}}};
+ Tensor<float, 3, DataLayout> t_result = t_left.contract(t_right, dim_pair01);
+
+ typedef Map<Eigen::Matrix<float, Dynamic, Dynamic, DataLayout>> MapXf;
+ MapXf m_left(t_left.data(), 7, 13*17);
+ MapXf m_right(t_right.data(), 1, 7);
+ Eigen::Matrix<float, Dynamic, Dynamic, DataLayout> m_result = m_left.transpose() * m_right.transpose();
+
+ for (int i = 0; i < t_result.dimensions().TotalSize(); i++) {
+ VERIFY(internal::isApprox(t_result(i), m_result(i, 0), 1));
+ }
+}
+
+
+template<int DataLayout>
+static void test_small_blocking_factors()
+{
+ Tensor<float, 4, DataLayout> t_left(30, 5, 3, 31);
+ Tensor<float, 5, DataLayout> t_right(3, 31, 7, 20, 1);
+ t_left.setRandom();
+ t_right.setRandom();
+
+ // Add a little offset so that the results won't be close to zero.
+ t_left += t_left.constant(1.0f);
+ t_right += t_right.constant(1.0f);
+
+ // Force the cache sizes, which results in smaller blocking factors.
+ Eigen::setCpuCacheSizes(896, 1920, 2944);
+
+ // this contraction should be equivalent to a single matrix multiplication
+ Eigen::array<DimPair, 2> dims = {{DimPair(2, 0), DimPair(3, 1)}};
+ Tensor<float, 5, DataLayout> t_result;
+ t_result = t_left.contract(t_right, dims);
+
+ // compute result using a simple eigen matrix product
+ Map<Eigen::Matrix<float, Dynamic, Dynamic, DataLayout>> m_left(t_left.data(), 150, 93);
+ Map<Eigen::Matrix<float, Dynamic, Dynamic, DataLayout>> m_right(t_right.data(), 93, 140);
+ Eigen::Matrix<float, Dynamic, Dynamic, DataLayout> m_result = m_left * m_right;
+
+ for (int i = 0; i < t_result.dimensions().TotalSize(); i++) {
+ VERIFY_IS_APPROX(t_result.data()[i], m_result.data()[i]);
+ }
+}
+
+template<int DataLayout>
+static void test_tensor_product()
+{
+ Tensor<float, 2, DataLayout> mat1(2, 3);
+ Tensor<float, 2, DataLayout> mat2(4, 1);
+ mat1.setRandom();
+ mat2.setRandom();
+
+ Tensor<float, 4, DataLayout> result = mat1.contract(mat2, Eigen::array<DimPair, 0>{{}});
+
+ VERIFY_IS_EQUAL(result.dimension(0), 2);
+ VERIFY_IS_EQUAL(result.dimension(1), 3);
+ VERIFY_IS_EQUAL(result.dimension(2), 4);
+ VERIFY_IS_EQUAL(result.dimension(3), 1);
+ for (int i = 0; i < result.dimension(0); ++i) {
+ for (int j = 0; j < result.dimension(1); ++j) {
+ for (int k = 0; k < result.dimension(2); ++k) {
+ for (int l = 0; l < result.dimension(3); ++l) {
+ VERIFY_IS_APPROX(result(i, j, k, l), mat1(i, j) * mat2(k, l) );
+ }
+ }
+ }
+ }
+}
+
+
+template<int DataLayout>
+static void test_const_inputs()
+{
+ Tensor<float, 2, DataLayout> in1(2, 3);
+ Tensor<float, 2, DataLayout> in2(3, 2);
+ in1.setRandom();
+ in2.setRandom();
+
+ TensorMap<Tensor<const float, 2, DataLayout> > mat1(in1.data(), 2, 3);
+ TensorMap<Tensor<const float, 2, DataLayout> > mat2(in2.data(), 3, 2);
+ Tensor<float, 2, DataLayout> mat3(2,2);
+
+ Eigen::array<DimPair, 1> dims = {{DimPair(1, 0)}};
+ mat3 = mat1.contract(mat2, dims);
+
+ VERIFY_IS_APPROX(mat3(0,0), mat1(0,0)*mat2(0,0) + mat1(0,1)*mat2(1,0) + mat1(0,2)*mat2(2,0));
+ VERIFY_IS_APPROX(mat3(0,1), mat1(0,0)*mat2(0,1) + mat1(0,1)*mat2(1,1) + mat1(0,2)*mat2(2,1));
+ VERIFY_IS_APPROX(mat3(1,0), mat1(1,0)*mat2(0,0) + mat1(1,1)*mat2(1,0) + mat1(1,2)*mat2(2,0));
+ VERIFY_IS_APPROX(mat3(1,1), mat1(1,0)*mat2(0,1) + mat1(1,1)*mat2(1,1) + mat1(1,2)*mat2(2,1));
+}
+
+void test_cxx11_tensor_contraction()
+{
+ CALL_SUBTEST(test_evals<ColMajor>());
+ CALL_SUBTEST(test_evals<RowMajor>());
+ CALL_SUBTEST(test_scalar<ColMajor>());
+ CALL_SUBTEST(test_scalar<RowMajor>());
+ CALL_SUBTEST(test_multidims<ColMajor>());
+ CALL_SUBTEST(test_multidims<RowMajor>());
+ CALL_SUBTEST(test_holes<ColMajor>());
+ CALL_SUBTEST(test_holes<RowMajor>());
+ CALL_SUBTEST(test_full_redux<ColMajor>());
+ CALL_SUBTEST(test_full_redux<RowMajor>());
+ CALL_SUBTEST(test_contraction_of_contraction<ColMajor>());
+ CALL_SUBTEST(test_contraction_of_contraction<RowMajor>());
+ CALL_SUBTEST(test_expr<ColMajor>());
+ CALL_SUBTEST(test_expr<RowMajor>());
+ CALL_SUBTEST(test_out_of_order_contraction<ColMajor>());
+ CALL_SUBTEST(test_out_of_order_contraction<RowMajor>());
+ CALL_SUBTEST(test_consistency<ColMajor>());
+ CALL_SUBTEST(test_consistency<RowMajor>());
+ CALL_SUBTEST(test_large_contraction<ColMajor>());
+ CALL_SUBTEST(test_large_contraction<RowMajor>());
+ CALL_SUBTEST(test_matrix_vector<ColMajor>());
+ CALL_SUBTEST(test_matrix_vector<RowMajor>());
+ CALL_SUBTEST(test_tensor_vector<ColMajor>());
+ CALL_SUBTEST(test_tensor_vector<RowMajor>());
+ CALL_SUBTEST(test_small_blocking_factors<ColMajor>());
+ CALL_SUBTEST(test_small_blocking_factors<RowMajor>());
+ CALL_SUBTEST(test_tensor_product<ColMajor>());
+ CALL_SUBTEST(test_tensor_product<RowMajor>());
+ CALL_SUBTEST(test_const_inputs<ColMajor>());
+ CALL_SUBTEST(test_const_inputs<RowMajor>());
+}
diff --git a/unsupported/test/cxx11_tensor_convolution.cpp b/unsupported/test/cxx11_tensor_convolution.cpp
new file mode 100644
index 000000000..e3d4675eb
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_convolution.cpp
@@ -0,0 +1,149 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+using Eigen::DefaultDevice;
+
+template <int DataLayout>
+static void test_evals()
+{
+ Tensor<float, 2, DataLayout> input(3, 3);
+ Tensor<float, 1, DataLayout> kernel(2);
+
+ input.setRandom();
+ kernel.setRandom();
+
+ Tensor<float, 2, DataLayout> result(2,3);
+ result.setZero();
+ Eigen::array<Tensor<float, 2>::Index, 1> dims3{{0}};
+
+ typedef TensorEvaluator<decltype(input.convolve(kernel, dims3)), DefaultDevice> Evaluator;
+ Evaluator eval(input.convolve(kernel, dims3), DefaultDevice());
+ eval.evalTo(result.data());
+ EIGEN_STATIC_ASSERT(Evaluator::NumDims==2ul, YOU_MADE_A_PROGRAMMING_MISTAKE);
+ VERIFY_IS_EQUAL(eval.dimensions()[0], 2);
+ VERIFY_IS_EQUAL(eval.dimensions()[1], 3);
+
+ VERIFY_IS_APPROX(result(0,0), input(0,0)*kernel(0) + input(1,0)*kernel(1)); // index 0
+ VERIFY_IS_APPROX(result(0,1), input(0,1)*kernel(0) + input(1,1)*kernel(1)); // index 2
+ VERIFY_IS_APPROX(result(0,2), input(0,2)*kernel(0) + input(1,2)*kernel(1)); // index 4
+ VERIFY_IS_APPROX(result(1,0), input(1,0)*kernel(0) + input(2,0)*kernel(1)); // index 1
+ VERIFY_IS_APPROX(result(1,1), input(1,1)*kernel(0) + input(2,1)*kernel(1)); // index 3
+ VERIFY_IS_APPROX(result(1,2), input(1,2)*kernel(0) + input(2,2)*kernel(1)); // index 5
+}
+
+template <int DataLayout>
+static void test_expr()
+{
+ Tensor<float, 2, DataLayout> input(3, 3);
+ Tensor<float, 2, DataLayout> kernel(2, 2);
+ input.setRandom();
+ kernel.setRandom();
+
+ Tensor<float, 2, DataLayout> result(2,2);
+ Eigen::array<ptrdiff_t, 2> dims;
+ dims[0] = 0;
+ dims[1] = 1;
+ result = input.convolve(kernel, dims);
+
+ VERIFY_IS_APPROX(result(0,0), input(0,0)*kernel(0,0) + input(0,1)*kernel(0,1) +
+ input(1,0)*kernel(1,0) + input(1,1)*kernel(1,1));
+ VERIFY_IS_APPROX(result(0,1), input(0,1)*kernel(0,0) + input(0,2)*kernel(0,1) +
+ input(1,1)*kernel(1,0) + input(1,2)*kernel(1,1));
+ VERIFY_IS_APPROX(result(1,0), input(1,0)*kernel(0,0) + input(1,1)*kernel(0,1) +
+ input(2,0)*kernel(1,0) + input(2,1)*kernel(1,1));
+ VERIFY_IS_APPROX(result(1,1), input(1,1)*kernel(0,0) + input(1,2)*kernel(0,1) +
+ input(2,1)*kernel(1,0) + input(2,2)*kernel(1,1));
+}
+
+template <int DataLayout>
+static void test_modes() {
+ Tensor<float, 1, DataLayout> input(3);
+ Tensor<float, 1, DataLayout> kernel(3);
+ input(0) = 1.0f;
+ input(1) = 2.0f;
+ input(2) = 3.0f;
+ kernel(0) = 0.5f;
+ kernel(1) = 1.0f;
+ kernel(2) = 0.0f;
+
+ Eigen::array<ptrdiff_t, 1> dims;
+ dims[0] = 0;
+ Eigen::array<std::pair<ptrdiff_t, ptrdiff_t>, 1> padding;
+
+ // Emulate VALID mode (as defined in
+ // http://docs.scipy.org/doc/numpy/reference/generated/numpy.convolve.html).
+ padding[0] = std::make_pair(0, 0);
+ Tensor<float, 1, DataLayout> valid(1);
+ valid = input.pad(padding).convolve(kernel, dims);
+ VERIFY_IS_EQUAL(valid.dimension(0), 1);
+ VERIFY_IS_APPROX(valid(0), 2.5f);
+
+ // Emulate SAME mode (as defined in
+ // http://docs.scipy.org/doc/numpy/reference/generated/numpy.convolve.html).
+ padding[0] = std::make_pair(1, 1);
+ Tensor<float, 1, DataLayout> same(3);
+ same = input.pad(padding).convolve(kernel, dims);
+ VERIFY_IS_EQUAL(same.dimension(0), 3);
+ VERIFY_IS_APPROX(same(0), 1.0f);
+ VERIFY_IS_APPROX(same(1), 2.5f);
+ VERIFY_IS_APPROX(same(2), 4.0f);
+
+ // Emulate FULL mode (as defined in
+ // http://docs.scipy.org/doc/numpy/reference/generated/numpy.convolve.html).
+ padding[0] = std::make_pair(2, 2);
+ Tensor<float, 1, DataLayout> full(5);
+ full = input.pad(padding).convolve(kernel, dims);
+ VERIFY_IS_EQUAL(full.dimension(0), 5);
+ VERIFY_IS_APPROX(full(0), 0.0f);
+ VERIFY_IS_APPROX(full(1), 1.0f);
+ VERIFY_IS_APPROX(full(2), 2.5f);
+ VERIFY_IS_APPROX(full(3), 4.0f);
+ VERIFY_IS_APPROX(full(4), 1.5f);
+}
+
+template <int DataLayout>
+static void test_strides() {
+ Tensor<float, 1, DataLayout> input(13);
+ Tensor<float, 1, DataLayout> kernel(3);
+ input.setRandom();
+ kernel.setRandom();
+
+ Eigen::array<ptrdiff_t, 1> dims;
+ dims[0] = 0;
+ Eigen::array<ptrdiff_t, 1> stride_of_3;
+ stride_of_3[0] = 3;
+ Eigen::array<ptrdiff_t, 1> stride_of_2;
+ stride_of_2[0] = 2;
+
+ Tensor<float, 1, DataLayout> result;
+ result = input.stride(stride_of_3).convolve(kernel, dims).stride(stride_of_2);
+
+ VERIFY_IS_EQUAL(result.dimension(0), 2);
+ VERIFY_IS_APPROX(result(0), (input(0)*kernel(0) + input(3)*kernel(1) +
+ input(6)*kernel(2)));
+ VERIFY_IS_APPROX(result(1), (input(6)*kernel(0) + input(9)*kernel(1) +
+ input(12)*kernel(2)));
+}
+
+void test_cxx11_tensor_convolution()
+{
+ CALL_SUBTEST(test_evals<ColMajor>());
+ CALL_SUBTEST(test_evals<RowMajor>());
+ CALL_SUBTEST(test_expr<ColMajor>());
+ CALL_SUBTEST(test_expr<RowMajor>());
+ CALL_SUBTEST(test_modes<ColMajor>());
+ CALL_SUBTEST(test_modes<RowMajor>());
+ CALL_SUBTEST(test_strides<ColMajor>());
+ CALL_SUBTEST(test_strides<RowMajor>());
+}
diff --git a/unsupported/test/cxx11_tensor_cuda.cu b/unsupported/test/cxx11_tensor_cuda.cu
new file mode 100644
index 000000000..0ba9d52e9
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_cuda.cu
@@ -0,0 +1,1287 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_TEST_NO_LONGDOUBLE
+#define EIGEN_TEST_NO_COMPLEX
+#define EIGEN_TEST_FUNC cxx11_tensor_cuda
+#define EIGEN_USE_GPU
+
+#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500
+#include <cuda_fp16.h>
+#endif
+#include "main.h"
+#include <unsupported/Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+
+void test_cuda_nullary() {
+ Tensor<float, 1, 0, int> in1(2);
+ Tensor<float, 1, 0, int> in2(2);
+ in1.setRandom();
+ in2.setRandom();
+
+ std::size_t tensor_bytes = in1.size() * sizeof(float);
+
+ float* d_in1;
+ float* d_in2;
+ cudaMalloc((void**)(&d_in1), tensor_bytes);
+ cudaMalloc((void**)(&d_in2), tensor_bytes);
+ cudaMemcpy(d_in1, in1.data(), tensor_bytes, cudaMemcpyHostToDevice);
+ cudaMemcpy(d_in2, in2.data(), tensor_bytes, cudaMemcpyHostToDevice);
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ Eigen::TensorMap<Eigen::Tensor<float, 1, 0, int>, Eigen::Aligned> gpu_in1(
+ d_in1, 2);
+ Eigen::TensorMap<Eigen::Tensor<float, 1, 0, int>, Eigen::Aligned> gpu_in2(
+ d_in2, 2);
+
+ gpu_in1.device(gpu_device) = gpu_in1.constant(3.14f);
+ gpu_in2.device(gpu_device) = gpu_in2.random();
+
+ Tensor<float, 1, 0, int> new1(2);
+ Tensor<float, 1, 0, int> new2(2);
+
+ assert(cudaMemcpyAsync(new1.data(), d_in1, tensor_bytes, cudaMemcpyDeviceToHost,
+ gpu_device.stream()) == cudaSuccess);
+ assert(cudaMemcpyAsync(new2.data(), d_in2, tensor_bytes, cudaMemcpyDeviceToHost,
+ gpu_device.stream()) == cudaSuccess);
+
+ assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+
+ for (int i = 0; i < 2; ++i) {
+ VERIFY_IS_APPROX(new1(i), 3.14f);
+ VERIFY_IS_NOT_EQUAL(new2(i), in2(i));
+ }
+
+ cudaFree(d_in1);
+ cudaFree(d_in2);
+}
+
+void test_cuda_elementwise_small() {
+ Tensor<float, 1> in1(Eigen::array<Eigen::DenseIndex, 1>(2));
+ Tensor<float, 1> in2(Eigen::array<Eigen::DenseIndex, 1>(2));
+ Tensor<float, 1> out(Eigen::array<Eigen::DenseIndex, 1>(2));
+ in1.setRandom();
+ in2.setRandom();
+
+ std::size_t in1_bytes = in1.size() * sizeof(float);
+ std::size_t in2_bytes = in2.size() * sizeof(float);
+ std::size_t out_bytes = out.size() * sizeof(float);
+
+ float* d_in1;
+ float* d_in2;
+ float* d_out;
+ cudaMalloc((void**)(&d_in1), in1_bytes);
+ cudaMalloc((void**)(&d_in2), in2_bytes);
+ cudaMalloc((void**)(&d_out), out_bytes);
+
+ cudaMemcpy(d_in1, in1.data(), in1_bytes, cudaMemcpyHostToDevice);
+ cudaMemcpy(d_in2, in2.data(), in2_bytes, cudaMemcpyHostToDevice);
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_in1(
+ d_in1, Eigen::array<Eigen::DenseIndex, 1>(2));
+ Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_in2(
+ d_in2, Eigen::array<Eigen::DenseIndex, 1>(2));
+ Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_out(
+ d_out, Eigen::array<Eigen::DenseIndex, 1>(2));
+
+ gpu_out.device(gpu_device) = gpu_in1 + gpu_in2;
+
+ assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost,
+ gpu_device.stream()) == cudaSuccess);
+ assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+
+ for (int i = 0; i < 2; ++i) {
+ VERIFY_IS_APPROX(
+ out(Eigen::array<Eigen::DenseIndex, 1>(i)),
+ in1(Eigen::array<Eigen::DenseIndex, 1>(i)) + in2(Eigen::array<Eigen::DenseIndex, 1>(i)));
+ }
+
+ cudaFree(d_in1);
+ cudaFree(d_in2);
+ cudaFree(d_out);
+}
+
+void test_cuda_elementwise()
+{
+ Tensor<float, 3> in1(Eigen::array<Eigen::DenseIndex, 3>(72,53,97));
+ Tensor<float, 3> in2(Eigen::array<Eigen::DenseIndex, 3>(72,53,97));
+ Tensor<float, 3> in3(Eigen::array<Eigen::DenseIndex, 3>(72,53,97));
+ Tensor<float, 3> out(Eigen::array<Eigen::DenseIndex, 3>(72,53,97));
+ in1.setRandom();
+ in2.setRandom();
+ in3.setRandom();
+
+ std::size_t in1_bytes = in1.size() * sizeof(float);
+ std::size_t in2_bytes = in2.size() * sizeof(float);
+ std::size_t in3_bytes = in3.size() * sizeof(float);
+ std::size_t out_bytes = out.size() * sizeof(float);
+
+ float* d_in1;
+ float* d_in2;
+ float* d_in3;
+ float* d_out;
+ cudaMalloc((void**)(&d_in1), in1_bytes);
+ cudaMalloc((void**)(&d_in2), in2_bytes);
+ cudaMalloc((void**)(&d_in3), in3_bytes);
+ cudaMalloc((void**)(&d_out), out_bytes);
+
+ cudaMemcpy(d_in1, in1.data(), in1_bytes, cudaMemcpyHostToDevice);
+ cudaMemcpy(d_in2, in2.data(), in2_bytes, cudaMemcpyHostToDevice);
+ cudaMemcpy(d_in3, in3.data(), in3_bytes, cudaMemcpyHostToDevice);
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_in1(d_in1, Eigen::array<Eigen::DenseIndex, 3>(72,53,97));
+ Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_in2(d_in2, Eigen::array<Eigen::DenseIndex, 3>(72,53,97));
+ Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_in3(d_in3, Eigen::array<Eigen::DenseIndex, 3>(72,53,97));
+ Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_out(d_out, Eigen::array<Eigen::DenseIndex, 3>(72,53,97));
+
+ gpu_out.device(gpu_device) = gpu_in1 + gpu_in2 * gpu_in3;
+
+ assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
+ assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+
+ for (int i = 0; i < 72; ++i) {
+ for (int j = 0; j < 53; ++j) {
+ for (int k = 0; k < 97; ++k) {
+ VERIFY_IS_APPROX(out(Eigen::array<Eigen::DenseIndex, 3>(i,j,k)), in1(Eigen::array<Eigen::DenseIndex, 3>(i,j,k)) + in2(Eigen::array<Eigen::DenseIndex, 3>(i,j,k)) * in3(Eigen::array<Eigen::DenseIndex, 3>(i,j,k)));
+ }
+ }
+ }
+
+ cudaFree(d_in1);
+ cudaFree(d_in2);
+ cudaFree(d_in3);
+ cudaFree(d_out);
+}
+
+void test_cuda_props() {
+ Tensor<float, 1> in1(200);
+ Tensor<bool, 1> out(200);
+ in1.setRandom();
+
+ std::size_t in1_bytes = in1.size() * sizeof(float);
+ std::size_t out_bytes = out.size() * sizeof(bool);
+
+ float* d_in1;
+ bool* d_out;
+ cudaMalloc((void**)(&d_in1), in1_bytes);
+ cudaMalloc((void**)(&d_out), out_bytes);
+
+ cudaMemcpy(d_in1, in1.data(), in1_bytes, cudaMemcpyHostToDevice);
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_in1(
+ d_in1, 200);
+ Eigen::TensorMap<Eigen::Tensor<bool, 1>, Eigen::Aligned> gpu_out(
+ d_out, 200);
+
+ gpu_out.device(gpu_device) = (gpu_in1.isnan)();
+
+ assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost,
+ gpu_device.stream()) == cudaSuccess);
+ assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+
+ for (int i = 0; i < 200; ++i) {
+ VERIFY_IS_EQUAL(out(i), (std::isnan)(in1(i)));
+ }
+
+ cudaFree(d_in1);
+ cudaFree(d_out);
+}
+
+void test_cuda_reduction()
+{
+ Tensor<float, 4> in1(72,53,97,113);
+ Tensor<float, 2> out(72,97);
+ in1.setRandom();
+
+ std::size_t in1_bytes = in1.size() * sizeof(float);
+ std::size_t out_bytes = out.size() * sizeof(float);
+
+ float* d_in1;
+ float* d_out;
+ cudaMalloc((void**)(&d_in1), in1_bytes);
+ cudaMalloc((void**)(&d_out), out_bytes);
+
+ cudaMemcpy(d_in1, in1.data(), in1_bytes, cudaMemcpyHostToDevice);
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ Eigen::TensorMap<Eigen::Tensor<float, 4> > gpu_in1(d_in1, 72,53,97,113);
+ Eigen::TensorMap<Eigen::Tensor<float, 2> > gpu_out(d_out, 72,97);
+
+ array<Eigen::DenseIndex, 2> reduction_axis;
+ reduction_axis[0] = 1;
+ reduction_axis[1] = 3;
+
+ gpu_out.device(gpu_device) = gpu_in1.maximum(reduction_axis);
+
+ assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
+ assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+
+ for (int i = 0; i < 72; ++i) {
+ for (int j = 0; j < 97; ++j) {
+ float expected = 0;
+ for (int k = 0; k < 53; ++k) {
+ for (int l = 0; l < 113; ++l) {
+ expected =
+ std::max<float>(expected, in1(i, k, j, l));
+ }
+ }
+ VERIFY_IS_APPROX(out(i,j), expected);
+ }
+ }
+
+ cudaFree(d_in1);
+ cudaFree(d_out);
+}
+
+template<int DataLayout>
+void test_cuda_contraction()
+{
+ // with these dimensions, the output has 300 * 140 elements, which is
+ // more than 30 * 1024, which is the number of threads in blocks on
+ // a 15 SM GK110 GPU
+ Tensor<float, 4, DataLayout> t_left(6, 50, 3, 31);
+ Tensor<float, 5, DataLayout> t_right(Eigen::array<Eigen::DenseIndex, 5>(3, 31, 7, 20, 1));
+ Tensor<float, 5, DataLayout> t_result(Eigen::array<Eigen::DenseIndex, 5>(6, 50, 7, 20, 1));
+
+ t_left.setRandom();
+ t_right.setRandom();
+
+ std::size_t t_left_bytes = t_left.size() * sizeof(float);
+ std::size_t t_right_bytes = t_right.size() * sizeof(float);
+ std::size_t t_result_bytes = t_result.size() * sizeof(float);
+
+ float* d_t_left;
+ float* d_t_right;
+ float* d_t_result;
+
+ cudaMalloc((void**)(&d_t_left), t_left_bytes);
+ cudaMalloc((void**)(&d_t_right), t_right_bytes);
+ cudaMalloc((void**)(&d_t_result), t_result_bytes);
+
+ cudaMemcpy(d_t_left, t_left.data(), t_left_bytes, cudaMemcpyHostToDevice);
+ cudaMemcpy(d_t_right, t_right.data(), t_right_bytes, cudaMemcpyHostToDevice);
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout> > gpu_t_left(d_t_left, 6, 50, 3, 31);
+ Eigen::TensorMap<Eigen::Tensor<float, 5, DataLayout> > gpu_t_right(d_t_right, 3, 31, 7, 20, 1);
+ Eigen::TensorMap<Eigen::Tensor<float, 5, DataLayout> > gpu_t_result(d_t_result, 6, 50, 7, 20, 1);
+
+ typedef Eigen::Map<Eigen::Matrix<float, Dynamic, Dynamic, DataLayout> > MapXf;
+ MapXf m_left(t_left.data(), 300, 93);
+ MapXf m_right(t_right.data(), 93, 140);
+ Eigen::Matrix<float, Dynamic, Dynamic, DataLayout> m_result(300, 140);
+
+ typedef Tensor<float, 1>::DimensionPair DimPair;
+ Eigen::array<DimPair, 2> dims;
+ dims[0] = DimPair(2, 0);
+ dims[1] = DimPair(3, 1);
+
+ m_result = m_left * m_right;
+ gpu_t_result.device(gpu_device) = gpu_t_left.contract(gpu_t_right, dims);
+
+ cudaMemcpy(t_result.data(), d_t_result, t_result_bytes, cudaMemcpyDeviceToHost);
+
+ for (DenseIndex i = 0; i < t_result.size(); i++) {
+ if (fabs(t_result.data()[i] - m_result.data()[i]) >= 1e-4f) {
+ std::cout << "mismatch detected at index " << i << ": " << t_result.data()[i] << " vs " << m_result.data()[i] << std::endl;
+ assert(false);
+ }
+ }
+
+ cudaFree(d_t_left);
+ cudaFree(d_t_right);
+ cudaFree(d_t_result);
+}
+
+template<int DataLayout>
+void test_cuda_convolution_1d()
+{
+ Tensor<float, 4, DataLayout> input(74,37,11,137);
+ Tensor<float, 1, DataLayout> kernel(4);
+ Tensor<float, 4, DataLayout> out(74,34,11,137);
+ input = input.constant(10.0f) + input.random();
+ kernel = kernel.constant(7.0f) + kernel.random();
+
+ std::size_t input_bytes = input.size() * sizeof(float);
+ std::size_t kernel_bytes = kernel.size() * sizeof(float);
+ std::size_t out_bytes = out.size() * sizeof(float);
+
+ float* d_input;
+ float* d_kernel;
+ float* d_out;
+ cudaMalloc((void**)(&d_input), input_bytes);
+ cudaMalloc((void**)(&d_kernel), kernel_bytes);
+ cudaMalloc((void**)(&d_out), out_bytes);
+
+ cudaMemcpy(d_input, input.data(), input_bytes, cudaMemcpyHostToDevice);
+ cudaMemcpy(d_kernel, kernel.data(), kernel_bytes, cudaMemcpyHostToDevice);
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout> > gpu_input(d_input, 74,37,11,137);
+ Eigen::TensorMap<Eigen::Tensor<float, 1, DataLayout> > gpu_kernel(d_kernel, 4);
+ Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout> > gpu_out(d_out, 74,34,11,137);
+
+ Eigen::array<Eigen::DenseIndex, 1> dims(1);
+ gpu_out.device(gpu_device) = gpu_input.convolve(gpu_kernel, dims);
+
+ assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
+ assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+
+ for (int i = 0; i < 74; ++i) {
+ for (int j = 0; j < 34; ++j) {
+ for (int k = 0; k < 11; ++k) {
+ for (int l = 0; l < 137; ++l) {
+ const float result = out(i,j,k,l);
+ const float expected = input(i,j+0,k,l) * kernel(0) + input(i,j+1,k,l) * kernel(1) +
+ input(i,j+2,k,l) * kernel(2) + input(i,j+3,k,l) * kernel(3);
+ VERIFY_IS_APPROX(result, expected);
+ }
+ }
+ }
+ }
+
+ cudaFree(d_input);
+ cudaFree(d_kernel);
+ cudaFree(d_out);
+}
+
+void test_cuda_convolution_inner_dim_col_major_1d()
+{
+ Tensor<float, 4, ColMajor> input(74,9,11,7);
+ Tensor<float, 1, ColMajor> kernel(4);
+ Tensor<float, 4, ColMajor> out(71,9,11,7);
+ input = input.constant(10.0f) + input.random();
+ kernel = kernel.constant(7.0f) + kernel.random();
+
+ std::size_t input_bytes = input.size() * sizeof(float);
+ std::size_t kernel_bytes = kernel.size() * sizeof(float);
+ std::size_t out_bytes = out.size() * sizeof(float);
+
+ float* d_input;
+ float* d_kernel;
+ float* d_out;
+ cudaMalloc((void**)(&d_input), input_bytes);
+ cudaMalloc((void**)(&d_kernel), kernel_bytes);
+ cudaMalloc((void**)(&d_out), out_bytes);
+
+ cudaMemcpy(d_input, input.data(), input_bytes, cudaMemcpyHostToDevice);
+ cudaMemcpy(d_kernel, kernel.data(), kernel_bytes, cudaMemcpyHostToDevice);
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ Eigen::TensorMap<Eigen::Tensor<float, 4, ColMajor> > gpu_input(d_input,74,9,11,7);
+ Eigen::TensorMap<Eigen::Tensor<float, 1, ColMajor> > gpu_kernel(d_kernel,4);
+ Eigen::TensorMap<Eigen::Tensor<float, 4, ColMajor> > gpu_out(d_out,71,9,11,7);
+
+ Eigen::array<Eigen::DenseIndex, 1> dims(0);
+ gpu_out.device(gpu_device) = gpu_input.convolve(gpu_kernel, dims);
+
+ assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
+ assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+
+ for (int i = 0; i < 71; ++i) {
+ for (int j = 0; j < 9; ++j) {
+ for (int k = 0; k < 11; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ const float result = out(i,j,k,l);
+ const float expected = input(i+0,j,k,l) * kernel(0) + input(i+1,j,k,l) * kernel(1) +
+ input(i+2,j,k,l) * kernel(2) + input(i+3,j,k,l) * kernel(3);
+ VERIFY_IS_APPROX(result, expected);
+ }
+ }
+ }
+ }
+
+ cudaFree(d_input);
+ cudaFree(d_kernel);
+ cudaFree(d_out);
+}
+
+void test_cuda_convolution_inner_dim_row_major_1d()
+{
+ Tensor<float, 4, RowMajor> input(7,9,11,74);
+ Tensor<float, 1, RowMajor> kernel(4);
+ Tensor<float, 4, RowMajor> out(7,9,11,71);
+ input = input.constant(10.0f) + input.random();
+ kernel = kernel.constant(7.0f) + kernel.random();
+
+ std::size_t input_bytes = input.size() * sizeof(float);
+ std::size_t kernel_bytes = kernel.size() * sizeof(float);
+ std::size_t out_bytes = out.size() * sizeof(float);
+
+ float* d_input;
+ float* d_kernel;
+ float* d_out;
+ cudaMalloc((void**)(&d_input), input_bytes);
+ cudaMalloc((void**)(&d_kernel), kernel_bytes);
+ cudaMalloc((void**)(&d_out), out_bytes);
+
+ cudaMemcpy(d_input, input.data(), input_bytes, cudaMemcpyHostToDevice);
+ cudaMemcpy(d_kernel, kernel.data(), kernel_bytes, cudaMemcpyHostToDevice);
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ Eigen::TensorMap<Eigen::Tensor<float, 4, RowMajor> > gpu_input(d_input, 7,9,11,74);
+ Eigen::TensorMap<Eigen::Tensor<float, 1, RowMajor> > gpu_kernel(d_kernel, 4);
+ Eigen::TensorMap<Eigen::Tensor<float, 4, RowMajor> > gpu_out(d_out, 7,9,11,71);
+
+ Eigen::array<Eigen::DenseIndex, 1> dims(3);
+ gpu_out.device(gpu_device) = gpu_input.convolve(gpu_kernel, dims);
+
+ assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
+ assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+
+ for (int i = 0; i < 7; ++i) {
+ for (int j = 0; j < 9; ++j) {
+ for (int k = 0; k < 11; ++k) {
+ for (int l = 0; l < 71; ++l) {
+ const float result = out(i,j,k,l);
+ const float expected = input(i,j,k,l+0) * kernel(0) + input(i,j,k,l+1) * kernel(1) +
+ input(i,j,k,l+2) * kernel(2) + input(i,j,k,l+3) * kernel(3);
+ VERIFY_IS_APPROX(result, expected);
+ }
+ }
+ }
+ }
+
+ cudaFree(d_input);
+ cudaFree(d_kernel);
+ cudaFree(d_out);
+}
+
+template<int DataLayout>
+void test_cuda_convolution_2d()
+{
+ Tensor<float, 4, DataLayout> input(74,37,11,137);
+ Tensor<float, 2, DataLayout> kernel(3,4);
+ Tensor<float, 4, DataLayout> out(74,35,8,137);
+ input = input.constant(10.0f) + input.random();
+ kernel = kernel.constant(7.0f) + kernel.random();
+
+ std::size_t input_bytes = input.size() * sizeof(float);
+ std::size_t kernel_bytes = kernel.size() * sizeof(float);
+ std::size_t out_bytes = out.size() * sizeof(float);
+
+ float* d_input;
+ float* d_kernel;
+ float* d_out;
+ cudaMalloc((void**)(&d_input), input_bytes);
+ cudaMalloc((void**)(&d_kernel), kernel_bytes);
+ cudaMalloc((void**)(&d_out), out_bytes);
+
+ cudaMemcpy(d_input, input.data(), input_bytes, cudaMemcpyHostToDevice);
+ cudaMemcpy(d_kernel, kernel.data(), kernel_bytes, cudaMemcpyHostToDevice);
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout> > gpu_input(d_input,74,37,11,137);
+ Eigen::TensorMap<Eigen::Tensor<float, 2, DataLayout> > gpu_kernel(d_kernel,3,4);
+ Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout> > gpu_out(d_out,74,35,8,137);
+
+ Eigen::array<Eigen::DenseIndex, 2> dims(1,2);
+ gpu_out.device(gpu_device) = gpu_input.convolve(gpu_kernel, dims);
+
+ assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
+ assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+
+ for (int i = 0; i < 74; ++i) {
+ for (int j = 0; j < 35; ++j) {
+ for (int k = 0; k < 8; ++k) {
+ for (int l = 0; l < 137; ++l) {
+ const float result = out(i,j,k,l);
+ const float expected = input(i,j+0,k+0,l) * kernel(0,0) +
+ input(i,j+1,k+0,l) * kernel(1,0) +
+ input(i,j+2,k+0,l) * kernel(2,0) +
+ input(i,j+0,k+1,l) * kernel(0,1) +
+ input(i,j+1,k+1,l) * kernel(1,1) +
+ input(i,j+2,k+1,l) * kernel(2,1) +
+ input(i,j+0,k+2,l) * kernel(0,2) +
+ input(i,j+1,k+2,l) * kernel(1,2) +
+ input(i,j+2,k+2,l) * kernel(2,2) +
+ input(i,j+0,k+3,l) * kernel(0,3) +
+ input(i,j+1,k+3,l) * kernel(1,3) +
+ input(i,j+2,k+3,l) * kernel(2,3);
+ VERIFY_IS_APPROX(result, expected);
+ }
+ }
+ }
+ }
+
+ cudaFree(d_input);
+ cudaFree(d_kernel);
+ cudaFree(d_out);
+}
+
+template<int DataLayout>
+void test_cuda_convolution_3d()
+{
+ Tensor<float, 5, DataLayout> input(Eigen::array<Eigen::DenseIndex, 5>(74,37,11,137,17));
+ Tensor<float, 3, DataLayout> kernel(3,4,2);
+ Tensor<float, 5, DataLayout> out(Eigen::array<Eigen::DenseIndex, 5>(74,35,8,136,17));
+ input = input.constant(10.0f) + input.random();
+ kernel = kernel.constant(7.0f) + kernel.random();
+
+ std::size_t input_bytes = input.size() * sizeof(float);
+ std::size_t kernel_bytes = kernel.size() * sizeof(float);
+ std::size_t out_bytes = out.size() * sizeof(float);
+
+ float* d_input;
+ float* d_kernel;
+ float* d_out;
+ cudaMalloc((void**)(&d_input), input_bytes);
+ cudaMalloc((void**)(&d_kernel), kernel_bytes);
+ cudaMalloc((void**)(&d_out), out_bytes);
+
+ cudaMemcpy(d_input, input.data(), input_bytes, cudaMemcpyHostToDevice);
+ cudaMemcpy(d_kernel, kernel.data(), kernel_bytes, cudaMemcpyHostToDevice);
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ Eigen::TensorMap<Eigen::Tensor<float, 5, DataLayout> > gpu_input(d_input,74,37,11,137,17);
+ Eigen::TensorMap<Eigen::Tensor<float, 3, DataLayout> > gpu_kernel(d_kernel,3,4,2);
+ Eigen::TensorMap<Eigen::Tensor<float, 5, DataLayout> > gpu_out(d_out,74,35,8,136,17);
+
+ Eigen::array<Eigen::DenseIndex, 3> dims(1,2,3);
+ gpu_out.device(gpu_device) = gpu_input.convolve(gpu_kernel, dims);
+
+ assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
+ assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+
+ for (int i = 0; i < 74; ++i) {
+ for (int j = 0; j < 35; ++j) {
+ for (int k = 0; k < 8; ++k) {
+ for (int l = 0; l < 136; ++l) {
+ for (int m = 0; m < 17; ++m) {
+ const float result = out(i,j,k,l,m);
+ const float expected = input(i,j+0,k+0,l+0,m) * kernel(0,0,0) +
+ input(i,j+1,k+0,l+0,m) * kernel(1,0,0) +
+ input(i,j+2,k+0,l+0,m) * kernel(2,0,0) +
+ input(i,j+0,k+1,l+0,m) * kernel(0,1,0) +
+ input(i,j+1,k+1,l+0,m) * kernel(1,1,0) +
+ input(i,j+2,k+1,l+0,m) * kernel(2,1,0) +
+ input(i,j+0,k+2,l+0,m) * kernel(0,2,0) +
+ input(i,j+1,k+2,l+0,m) * kernel(1,2,0) +
+ input(i,j+2,k+2,l+0,m) * kernel(2,2,0) +
+ input(i,j+0,k+3,l+0,m) * kernel(0,3,0) +
+ input(i,j+1,k+3,l+0,m) * kernel(1,3,0) +
+ input(i,j+2,k+3,l+0,m) * kernel(2,3,0) +
+ input(i,j+0,k+0,l+1,m) * kernel(0,0,1) +
+ input(i,j+1,k+0,l+1,m) * kernel(1,0,1) +
+ input(i,j+2,k+0,l+1,m) * kernel(2,0,1) +
+ input(i,j+0,k+1,l+1,m) * kernel(0,1,1) +
+ input(i,j+1,k+1,l+1,m) * kernel(1,1,1) +
+ input(i,j+2,k+1,l+1,m) * kernel(2,1,1) +
+ input(i,j+0,k+2,l+1,m) * kernel(0,2,1) +
+ input(i,j+1,k+2,l+1,m) * kernel(1,2,1) +
+ input(i,j+2,k+2,l+1,m) * kernel(2,2,1) +
+ input(i,j+0,k+3,l+1,m) * kernel(0,3,1) +
+ input(i,j+1,k+3,l+1,m) * kernel(1,3,1) +
+ input(i,j+2,k+3,l+1,m) * kernel(2,3,1);
+ VERIFY_IS_APPROX(result, expected);
+ }
+ }
+ }
+ }
+ }
+
+ cudaFree(d_input);
+ cudaFree(d_kernel);
+ cudaFree(d_out);
+}
+
+
+template <typename Scalar>
+void test_cuda_lgamma(const Scalar stddev)
+{
+ Tensor<Scalar, 2> in(72,97);
+ in.setRandom();
+ in *= in.constant(stddev);
+ Tensor<Scalar, 2> out(72,97);
+ out.setZero();
+
+ std::size_t bytes = in.size() * sizeof(Scalar);
+
+ Scalar* d_in;
+ Scalar* d_out;
+ cudaMalloc((void**)(&d_in), bytes);
+ cudaMalloc((void**)(&d_out), bytes);
+
+ cudaMemcpy(d_in, in.data(), bytes, cudaMemcpyHostToDevice);
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_in(d_in, 72, 97);
+ Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_out(d_out, 72, 97);
+
+ gpu_out.device(gpu_device) = gpu_in.lgamma();
+
+ assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
+ assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+
+ for (int i = 0; i < 72; ++i) {
+ for (int j = 0; j < 97; ++j) {
+ VERIFY_IS_APPROX(out(i,j), (std::lgamma)(in(i,j)));
+ }
+ }
+
+ cudaFree(d_in);
+ cudaFree(d_out);
+}
+
+template <typename Scalar>
+void test_cuda_digamma()
+{
+ Tensor<Scalar, 1> in(7);
+ Tensor<Scalar, 1> out(7);
+ Tensor<Scalar, 1> expected_out(7);
+ out.setZero();
+
+ in(0) = Scalar(1);
+ in(1) = Scalar(1.5);
+ in(2) = Scalar(4);
+ in(3) = Scalar(-10.5);
+ in(4) = Scalar(10000.5);
+ in(5) = Scalar(0);
+ in(6) = Scalar(-1);
+
+ expected_out(0) = Scalar(-0.5772156649015329);
+ expected_out(1) = Scalar(0.03648997397857645);
+ expected_out(2) = Scalar(1.2561176684318);
+ expected_out(3) = Scalar(2.398239129535781);
+ expected_out(4) = Scalar(9.210340372392849);
+ expected_out(5) = std::numeric_limits<Scalar>::infinity();
+ expected_out(6) = std::numeric_limits<Scalar>::infinity();
+
+ std::size_t bytes = in.size() * sizeof(Scalar);
+
+ Scalar* d_in;
+ Scalar* d_out;
+ cudaMalloc((void**)(&d_in), bytes);
+ cudaMalloc((void**)(&d_out), bytes);
+
+ cudaMemcpy(d_in, in.data(), bytes, cudaMemcpyHostToDevice);
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in(d_in, 7);
+ Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_out(d_out, 7);
+
+ gpu_out.device(gpu_device) = gpu_in.digamma();
+
+ assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
+ assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+
+ for (int i = 0; i < 5; ++i) {
+ VERIFY_IS_APPROX(out(i), expected_out(i));
+ }
+ for (int i = 5; i < 7; ++i) {
+ VERIFY_IS_EQUAL(out(i), expected_out(i));
+ }
+
+ cudaFree(d_in);
+ cudaFree(d_out);
+}
+
+template <typename Scalar>
+void test_cuda_zeta()
+{
+ Tensor<Scalar, 1> in_x(6);
+ Tensor<Scalar, 1> in_q(6);
+ Tensor<Scalar, 1> out(6);
+ Tensor<Scalar, 1> expected_out(6);
+ out.setZero();
+
+ in_x(0) = Scalar(1);
+ in_x(1) = Scalar(1.5);
+ in_x(2) = Scalar(4);
+ in_x(3) = Scalar(-10.5);
+ in_x(4) = Scalar(10000.5);
+ in_x(5) = Scalar(3);
+
+ in_q(0) = Scalar(1.2345);
+ in_q(1) = Scalar(2);
+ in_q(2) = Scalar(1.5);
+ in_q(3) = Scalar(3);
+ in_q(4) = Scalar(1.0001);
+ in_q(5) = Scalar(-2.5);
+
+ expected_out(0) = std::numeric_limits<Scalar>::infinity();
+ expected_out(1) = Scalar(1.61237534869);
+ expected_out(2) = Scalar(0.234848505667);
+ expected_out(3) = Scalar(1.03086757337e-5);
+ expected_out(4) = Scalar(0.367879440865);
+ expected_out(5) = Scalar(0.054102025820864097);
+
+ std::size_t bytes = in_x.size() * sizeof(Scalar);
+
+ Scalar* d_in_x;
+ Scalar* d_in_q;
+ Scalar* d_out;
+ cudaMalloc((void**)(&d_in_x), bytes);
+ cudaMalloc((void**)(&d_in_q), bytes);
+ cudaMalloc((void**)(&d_out), bytes);
+
+ cudaMemcpy(d_in_x, in_x.data(), bytes, cudaMemcpyHostToDevice);
+ cudaMemcpy(d_in_q, in_q.data(), bytes, cudaMemcpyHostToDevice);
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_x(d_in_x, 6);
+ Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_q(d_in_q, 6);
+ Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_out(d_out, 6);
+
+ gpu_out.device(gpu_device) = gpu_in_x.zeta(gpu_in_q);
+
+ assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
+ assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+
+ VERIFY_IS_EQUAL(out(0), expected_out(0));
+ VERIFY((std::isnan)(out(3)));
+
+ for (int i = 1; i < 6; ++i) {
+ if (i != 3) {
+ VERIFY_IS_APPROX(out(i), expected_out(i));
+ }
+ }
+
+ cudaFree(d_in_x);
+ cudaFree(d_in_q);
+ cudaFree(d_out);
+}
+
+template <typename Scalar>
+void test_cuda_polygamma()
+{
+ Tensor<Scalar, 1> in_x(7);
+ Tensor<Scalar, 1> in_n(7);
+ Tensor<Scalar, 1> out(7);
+ Tensor<Scalar, 1> expected_out(7);
+ out.setZero();
+
+ in_n(0) = Scalar(1);
+ in_n(1) = Scalar(1);
+ in_n(2) = Scalar(1);
+ in_n(3) = Scalar(17);
+ in_n(4) = Scalar(31);
+ in_n(5) = Scalar(28);
+ in_n(6) = Scalar(8);
+
+ in_x(0) = Scalar(2);
+ in_x(1) = Scalar(3);
+ in_x(2) = Scalar(25.5);
+ in_x(3) = Scalar(4.7);
+ in_x(4) = Scalar(11.8);
+ in_x(5) = Scalar(17.7);
+ in_x(6) = Scalar(30.2);
+
+ expected_out(0) = Scalar(0.644934066848);
+ expected_out(1) = Scalar(0.394934066848);
+ expected_out(2) = Scalar(0.0399946696496);
+ expected_out(3) = Scalar(293.334565435);
+ expected_out(4) = Scalar(0.445487887616);
+ expected_out(5) = Scalar(-2.47810300902e-07);
+ expected_out(6) = Scalar(-8.29668781082e-09);
+
+ std::size_t bytes = in_x.size() * sizeof(Scalar);
+
+ Scalar* d_in_x;
+ Scalar* d_in_n;
+ Scalar* d_out;
+ cudaMalloc((void**)(&d_in_x), bytes);
+ cudaMalloc((void**)(&d_in_n), bytes);
+ cudaMalloc((void**)(&d_out), bytes);
+
+ cudaMemcpy(d_in_x, in_x.data(), bytes, cudaMemcpyHostToDevice);
+ cudaMemcpy(d_in_n, in_n.data(), bytes, cudaMemcpyHostToDevice);
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_x(d_in_x, 7);
+ Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_n(d_in_n, 7);
+ Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_out(d_out, 7);
+
+ gpu_out.device(gpu_device) = gpu_in_n.polygamma(gpu_in_x);
+
+ assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
+ assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+
+ for (int i = 0; i < 7; ++i) {
+ VERIFY_IS_APPROX(out(i), expected_out(i));
+ }
+
+ cudaFree(d_in_x);
+ cudaFree(d_in_n);
+ cudaFree(d_out);
+}
+
+template <typename Scalar>
+void test_cuda_igamma()
+{
+ Tensor<Scalar, 2> a(6, 6);
+ Tensor<Scalar, 2> x(6, 6);
+ Tensor<Scalar, 2> out(6, 6);
+ out.setZero();
+
+ Scalar a_s[] = {Scalar(0), Scalar(1), Scalar(1.5), Scalar(4), Scalar(0.0001), Scalar(1000.5)};
+ Scalar x_s[] = {Scalar(0), Scalar(1), Scalar(1.5), Scalar(4), Scalar(0.0001), Scalar(1000.5)};
+
+ for (int i = 0; i < 6; ++i) {
+ for (int j = 0; j < 6; ++j) {
+ a(i, j) = a_s[i];
+ x(i, j) = x_s[j];
+ }
+ }
+
+ Scalar nan = std::numeric_limits<Scalar>::quiet_NaN();
+ Scalar igamma_s[][6] = {{0.0, nan, nan, nan, nan, nan},
+ {0.0, 0.6321205588285578, 0.7768698398515702,
+ 0.9816843611112658, 9.999500016666262e-05, 1.0},
+ {0.0, 0.4275932955291202, 0.608374823728911,
+ 0.9539882943107686, 7.522076445089201e-07, 1.0},
+ {0.0, 0.01898815687615381, 0.06564245437845008,
+ 0.5665298796332909, 4.166333347221828e-18, 1.0},
+ {0.0, 0.9999780593618628, 0.9999899967080838,
+ 0.9999996219837988, 0.9991370418689945, 1.0},
+ {0.0, 0.0, 0.0, 0.0, 0.0, 0.5042041932513908}};
+
+
+
+ std::size_t bytes = a.size() * sizeof(Scalar);
+
+ Scalar* d_a;
+ Scalar* d_x;
+ Scalar* d_out;
+ assert(cudaMalloc((void**)(&d_a), bytes) == cudaSuccess);
+ assert(cudaMalloc((void**)(&d_x), bytes) == cudaSuccess);
+ assert(cudaMalloc((void**)(&d_out), bytes) == cudaSuccess);
+
+ cudaMemcpy(d_a, a.data(), bytes, cudaMemcpyHostToDevice);
+ cudaMemcpy(d_x, x.data(), bytes, cudaMemcpyHostToDevice);
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_a(d_a, 6, 6);
+ Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_x(d_x, 6, 6);
+ Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_out(d_out, 6, 6);
+
+ gpu_out.device(gpu_device) = gpu_a.igamma(gpu_x);
+
+ assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
+ assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+
+ for (int i = 0; i < 6; ++i) {
+ for (int j = 0; j < 6; ++j) {
+ if ((std::isnan)(igamma_s[i][j])) {
+ VERIFY((std::isnan)(out(i, j)));
+ } else {
+ VERIFY_IS_APPROX(out(i, j), igamma_s[i][j]);
+ }
+ }
+ }
+
+ cudaFree(d_a);
+ cudaFree(d_x);
+ cudaFree(d_out);
+}
+
+template <typename Scalar>
+void test_cuda_igammac()
+{
+ Tensor<Scalar, 2> a(6, 6);
+ Tensor<Scalar, 2> x(6, 6);
+ Tensor<Scalar, 2> out(6, 6);
+ out.setZero();
+
+ Scalar a_s[] = {Scalar(0), Scalar(1), Scalar(1.5), Scalar(4), Scalar(0.0001), Scalar(1000.5)};
+ Scalar x_s[] = {Scalar(0), Scalar(1), Scalar(1.5), Scalar(4), Scalar(0.0001), Scalar(1000.5)};
+
+ for (int i = 0; i < 6; ++i) {
+ for (int j = 0; j < 6; ++j) {
+ a(i, j) = a_s[i];
+ x(i, j) = x_s[j];
+ }
+ }
+
+ Scalar nan = std::numeric_limits<Scalar>::quiet_NaN();
+ Scalar igammac_s[][6] = {{nan, nan, nan, nan, nan, nan},
+ {1.0, 0.36787944117144233, 0.22313016014842982,
+ 0.018315638888734182, 0.9999000049998333, 0.0},
+ {1.0, 0.5724067044708798, 0.3916251762710878,
+ 0.04601170568923136, 0.9999992477923555, 0.0},
+ {1.0, 0.9810118431238462, 0.9343575456215499,
+ 0.4334701203667089, 1.0, 0.0},
+ {1.0, 2.1940638138146658e-05, 1.0003291916285e-05,
+ 3.7801620118431334e-07, 0.0008629581310054535,
+ 0.0},
+ {1.0, 1.0, 1.0, 1.0, 1.0, 0.49579580674813944}};
+
+ std::size_t bytes = a.size() * sizeof(Scalar);
+
+ Scalar* d_a;
+ Scalar* d_x;
+ Scalar* d_out;
+ cudaMalloc((void**)(&d_a), bytes);
+ cudaMalloc((void**)(&d_x), bytes);
+ cudaMalloc((void**)(&d_out), bytes);
+
+ cudaMemcpy(d_a, a.data(), bytes, cudaMemcpyHostToDevice);
+ cudaMemcpy(d_x, x.data(), bytes, cudaMemcpyHostToDevice);
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_a(d_a, 6, 6);
+ Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_x(d_x, 6, 6);
+ Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_out(d_out, 6, 6);
+
+ gpu_out.device(gpu_device) = gpu_a.igammac(gpu_x);
+
+ assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
+ assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+
+ for (int i = 0; i < 6; ++i) {
+ for (int j = 0; j < 6; ++j) {
+ if ((std::isnan)(igammac_s[i][j])) {
+ VERIFY((std::isnan)(out(i, j)));
+ } else {
+ VERIFY_IS_APPROX(out(i, j), igammac_s[i][j]);
+ }
+ }
+ }
+
+ cudaFree(d_a);
+ cudaFree(d_x);
+ cudaFree(d_out);
+}
+
+template <typename Scalar>
+void test_cuda_erf(const Scalar stddev)
+{
+ Tensor<Scalar, 2> in(72,97);
+ in.setRandom();
+ in *= in.constant(stddev);
+ Tensor<Scalar, 2> out(72,97);
+ out.setZero();
+
+ std::size_t bytes = in.size() * sizeof(Scalar);
+
+ Scalar* d_in;
+ Scalar* d_out;
+ assert(cudaMalloc((void**)(&d_in), bytes) == cudaSuccess);
+ assert(cudaMalloc((void**)(&d_out), bytes) == cudaSuccess);
+
+ cudaMemcpy(d_in, in.data(), bytes, cudaMemcpyHostToDevice);
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_in(d_in, 72, 97);
+ Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_out(d_out, 72, 97);
+
+ gpu_out.device(gpu_device) = gpu_in.erf();
+
+ assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
+ assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+
+ for (int i = 0; i < 72; ++i) {
+ for (int j = 0; j < 97; ++j) {
+ VERIFY_IS_APPROX(out(i,j), (std::erf)(in(i,j)));
+ }
+ }
+
+ cudaFree(d_in);
+ cudaFree(d_out);
+}
+
+template <typename Scalar>
+void test_cuda_erfc(const Scalar stddev)
+{
+ Tensor<Scalar, 2> in(72,97);
+ in.setRandom();
+ in *= in.constant(stddev);
+ Tensor<Scalar, 2> out(72,97);
+ out.setZero();
+
+ std::size_t bytes = in.size() * sizeof(Scalar);
+
+ Scalar* d_in;
+ Scalar* d_out;
+ cudaMalloc((void**)(&d_in), bytes);
+ cudaMalloc((void**)(&d_out), bytes);
+
+ cudaMemcpy(d_in, in.data(), bytes, cudaMemcpyHostToDevice);
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_in(d_in, 72, 97);
+ Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_out(d_out, 72, 97);
+
+ gpu_out.device(gpu_device) = gpu_in.erfc();
+
+ assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
+ assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+
+ for (int i = 0; i < 72; ++i) {
+ for (int j = 0; j < 97; ++j) {
+ VERIFY_IS_APPROX(out(i,j), (std::erfc)(in(i,j)));
+ }
+ }
+
+ cudaFree(d_in);
+ cudaFree(d_out);
+}
+
+template <typename Scalar>
+void test_cuda_betainc()
+{
+ Tensor<Scalar, 1> in_x(125);
+ Tensor<Scalar, 1> in_a(125);
+ Tensor<Scalar, 1> in_b(125);
+ Tensor<Scalar, 1> out(125);
+ Tensor<Scalar, 1> expected_out(125);
+ out.setZero();
+
+ Scalar nan = std::numeric_limits<Scalar>::quiet_NaN();
+
+ Array<Scalar, 1, Dynamic> x(125);
+ Array<Scalar, 1, Dynamic> a(125);
+ Array<Scalar, 1, Dynamic> b(125);
+ Array<Scalar, 1, Dynamic> v(125);
+
+ a << 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.03062277660168379, 0.03062277660168379, 0.03062277660168379,
+ 0.03062277660168379, 0.03062277660168379, 0.03062277660168379,
+ 0.03062277660168379, 0.03062277660168379, 0.03062277660168379,
+ 0.03062277660168379, 0.03062277660168379, 0.03062277660168379,
+ 0.03062277660168379, 0.03062277660168379, 0.03062277660168379,
+ 0.03062277660168379, 0.03062277660168379, 0.03062277660168379,
+ 0.03062277660168379, 0.03062277660168379, 0.03062277660168379,
+ 0.03062277660168379, 0.03062277660168379, 0.03062277660168379,
+ 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999,
+ 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999,
+ 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 31.62177660168379,
+ 31.62177660168379, 31.62177660168379, 31.62177660168379,
+ 31.62177660168379, 31.62177660168379, 31.62177660168379,
+ 31.62177660168379, 31.62177660168379, 31.62177660168379,
+ 31.62177660168379, 31.62177660168379, 31.62177660168379,
+ 31.62177660168379, 31.62177660168379, 31.62177660168379,
+ 31.62177660168379, 31.62177660168379, 31.62177660168379,
+ 31.62177660168379, 31.62177660168379, 31.62177660168379,
+ 31.62177660168379, 31.62177660168379, 31.62177660168379, 999.999, 999.999,
+ 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999,
+ 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999,
+ 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999;
+
+ b << 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379, 0.03062277660168379,
+ 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.999,
+ 0.999, 0.999, 0.999, 0.999, 31.62177660168379, 31.62177660168379,
+ 31.62177660168379, 31.62177660168379, 31.62177660168379, 999.999, 999.999,
+ 999.999, 999.999, 999.999, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379,
+ 0.03062277660168379, 0.03062277660168379, 0.03062277660168379,
+ 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, 31.62177660168379,
+ 31.62177660168379, 31.62177660168379, 31.62177660168379,
+ 31.62177660168379, 999.999, 999.999, 999.999, 999.999, 999.999, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.03062277660168379, 0.03062277660168379,
+ 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.999,
+ 0.999, 0.999, 0.999, 0.999, 31.62177660168379, 31.62177660168379,
+ 31.62177660168379, 31.62177660168379, 31.62177660168379, 999.999, 999.999,
+ 999.999, 999.999, 999.999, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379,
+ 0.03062277660168379, 0.03062277660168379, 0.03062277660168379,
+ 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, 31.62177660168379,
+ 31.62177660168379, 31.62177660168379, 31.62177660168379,
+ 31.62177660168379, 999.999, 999.999, 999.999, 999.999, 999.999, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.03062277660168379, 0.03062277660168379,
+ 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.999,
+ 0.999, 0.999, 0.999, 0.999, 31.62177660168379, 31.62177660168379,
+ 31.62177660168379, 31.62177660168379, 31.62177660168379, 999.999, 999.999,
+ 999.999, 999.999, 999.999;
+
+ x << -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8,
+ 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5,
+ 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2,
+ 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1,
+ 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1,
+ -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8,
+ 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5,
+ 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2,
+ 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1;
+
+ v << nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan,
+ nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan,
+ nan, nan, 0.47972119876364683, 0.5, 0.5202788012363533, nan, nan,
+ 0.9518683957740043, 0.9789663010413743, 0.9931729188073435, nan, nan,
+ 0.999995949033062, 0.9999999999993698, 0.9999999999999999, nan, nan,
+ 0.9999999999999999, 0.9999999999999999, 0.9999999999999999, nan, nan, nan,
+ nan, nan, nan, nan, 0.006827081192655869, 0.0210336989586256,
+ 0.04813160422599567, nan, nan, 0.20014344256217678, 0.5000000000000001,
+ 0.7998565574378232, nan, nan, 0.9991401428435834, 0.999999999698403,
+ 0.9999999999999999, nan, nan, 0.9999999999999999, 0.9999999999999999,
+ 0.9999999999999999, nan, nan, nan, nan, nan, nan, nan,
+ 1.0646600232370887e-25, 6.301722877826246e-13, 4.050966937974938e-06, nan,
+ nan, 7.864342668429763e-23, 3.015969667594166e-10, 0.0008598571564165444,
+ nan, nan, 6.031987710123844e-08, 0.5000000000000007, 0.9999999396801229,
+ nan, nan, 0.9999999999999999, 0.9999999999999999, 0.9999999999999999, nan,
+ nan, nan, nan, nan, nan, nan, 0.0, 7.029920380986636e-306,
+ 2.2450728208591345e-101, nan, nan, 0.0, 9.275871147869727e-302,
+ 1.2232913026152827e-97, nan, nan, 0.0, 3.0891393081932924e-252,
+ 2.9303043666183996e-60, nan, nan, 2.248913486879199e-196,
+ 0.5000000000004947, 0.9999999999999999, nan;
+
+ for (int i = 0; i < 125; ++i) {
+ in_x(i) = x(i);
+ in_a(i) = a(i);
+ in_b(i) = b(i);
+ expected_out(i) = v(i);
+ }
+
+ std::size_t bytes = in_x.size() * sizeof(Scalar);
+
+ Scalar* d_in_x;
+ Scalar* d_in_a;
+ Scalar* d_in_b;
+ Scalar* d_out;
+ cudaMalloc((void**)(&d_in_x), bytes);
+ cudaMalloc((void**)(&d_in_a), bytes);
+ cudaMalloc((void**)(&d_in_b), bytes);
+ cudaMalloc((void**)(&d_out), bytes);
+
+ cudaMemcpy(d_in_x, in_x.data(), bytes, cudaMemcpyHostToDevice);
+ cudaMemcpy(d_in_a, in_a.data(), bytes, cudaMemcpyHostToDevice);
+ cudaMemcpy(d_in_b, in_b.data(), bytes, cudaMemcpyHostToDevice);
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_x(d_in_x, 125);
+ Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_a(d_in_a, 125);
+ Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_b(d_in_b, 125);
+ Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_out(d_out, 125);
+
+ gpu_out.device(gpu_device) = betainc(gpu_in_a, gpu_in_b, gpu_in_x);
+
+ assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
+ assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+
+ for (int i = 1; i < 125; ++i) {
+ if ((std::isnan)(expected_out(i))) {
+ VERIFY((std::isnan)(out(i)));
+ } else {
+ VERIFY_IS_APPROX(out(i), expected_out(i));
+ }
+ }
+
+ cudaFree(d_in_x);
+ cudaFree(d_in_a);
+ cudaFree(d_in_b);
+ cudaFree(d_out);
+}
+
+
+void test_cxx11_tensor_cuda()
+{
+ CALL_SUBTEST_1(test_cuda_nullary());
+ CALL_SUBTEST_1(test_cuda_elementwise_small());
+ CALL_SUBTEST_1(test_cuda_elementwise());
+ CALL_SUBTEST_1(test_cuda_props());
+ CALL_SUBTEST_1(test_cuda_reduction());
+ CALL_SUBTEST_2(test_cuda_contraction<ColMajor>());
+ CALL_SUBTEST_2(test_cuda_contraction<RowMajor>());
+ CALL_SUBTEST_3(test_cuda_convolution_1d<ColMajor>());
+ CALL_SUBTEST_3(test_cuda_convolution_1d<RowMajor>());
+ CALL_SUBTEST_3(test_cuda_convolution_inner_dim_col_major_1d());
+ CALL_SUBTEST_3(test_cuda_convolution_inner_dim_row_major_1d());
+ CALL_SUBTEST_3(test_cuda_convolution_2d<ColMajor>());
+ CALL_SUBTEST_3(test_cuda_convolution_2d<RowMajor>());
+ CALL_SUBTEST_3(test_cuda_convolution_3d<ColMajor>());
+ CALL_SUBTEST_3(test_cuda_convolution_3d<RowMajor>());
+
+#if __cplusplus > 199711L
+ // std::erf, std::erfc, and so on where only added in c++11. We use them
+ // as a golden reference to validate the results produced by Eigen. Therefore
+ // we can only run these tests if we use a c++11 compiler.
+ CALL_SUBTEST_4(test_cuda_lgamma<float>(1.0f));
+ CALL_SUBTEST_4(test_cuda_lgamma<float>(100.0f));
+ CALL_SUBTEST_4(test_cuda_lgamma<float>(0.01f));
+ CALL_SUBTEST_4(test_cuda_lgamma<float>(0.001f));
+
+ CALL_SUBTEST_4(test_cuda_lgamma<double>(1.0));
+ CALL_SUBTEST_4(test_cuda_lgamma<double>(100.0));
+ CALL_SUBTEST_4(test_cuda_lgamma<double>(0.01));
+ CALL_SUBTEST_4(test_cuda_lgamma<double>(0.001));
+
+ CALL_SUBTEST_4(test_cuda_erf<float>(1.0f));
+ CALL_SUBTEST_4(test_cuda_erf<float>(100.0f));
+ CALL_SUBTEST_4(test_cuda_erf<float>(0.01f));
+ CALL_SUBTEST_4(test_cuda_erf<float>(0.001f));
+
+ CALL_SUBTEST_4(test_cuda_erfc<float>(1.0f));
+ // CALL_SUBTEST(test_cuda_erfc<float>(100.0f));
+ CALL_SUBTEST_4(test_cuda_erfc<float>(5.0f)); // CUDA erfc lacks precision for large inputs
+ CALL_SUBTEST_4(test_cuda_erfc<float>(0.01f));
+ CALL_SUBTEST_4(test_cuda_erfc<float>(0.001f));
+
+ CALL_SUBTEST_4(test_cuda_erf<double>(1.0));
+ CALL_SUBTEST_4(test_cuda_erf<double>(100.0));
+ CALL_SUBTEST_4(test_cuda_erf<double>(0.01));
+ CALL_SUBTEST_4(test_cuda_erf<double>(0.001));
+
+ CALL_SUBTEST_4(test_cuda_erfc<double>(1.0));
+ // CALL_SUBTEST(test_cuda_erfc<double>(100.0));
+ CALL_SUBTEST_4(test_cuda_erfc<double>(5.0)); // CUDA erfc lacks precision for large inputs
+ CALL_SUBTEST_4(test_cuda_erfc<double>(0.01));
+ CALL_SUBTEST_4(test_cuda_erfc<double>(0.001));
+
+ CALL_SUBTEST_5(test_cuda_digamma<float>());
+ CALL_SUBTEST_5(test_cuda_digamma<double>());
+
+ CALL_SUBTEST_5(test_cuda_polygamma<float>());
+ CALL_SUBTEST_5(test_cuda_polygamma<double>());
+
+ CALL_SUBTEST_5(test_cuda_zeta<float>());
+ CALL_SUBTEST_5(test_cuda_zeta<double>());
+
+ CALL_SUBTEST_5(test_cuda_igamma<float>());
+ CALL_SUBTEST_5(test_cuda_igammac<float>());
+
+ CALL_SUBTEST_5(test_cuda_igamma<double>());
+ CALL_SUBTEST_5(test_cuda_igammac<double>());
+
+ CALL_SUBTEST_6(test_cuda_betainc<float>());
+ CALL_SUBTEST_6(test_cuda_betainc<double>());
+#endif
+}
diff --git a/unsupported/test/cxx11_tensor_custom_index.cpp b/unsupported/test/cxx11_tensor_custom_index.cpp
new file mode 100644
index 000000000..4528cc176
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_custom_index.cpp
@@ -0,0 +1,100 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2015 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+#include <limits>
+#include <map>
+
+#include <Eigen/Dense>
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+
+
+template <int DataLayout>
+static void test_map_as_index()
+{
+#ifdef EIGEN_HAS_SFINAE
+ Tensor<float, 4, DataLayout> tensor(2, 3, 5, 7);
+ tensor.setRandom();
+
+ using NormalIndex = DSizes<ptrdiff_t, 4>;
+ using CustomIndex = std::map<ptrdiff_t, ptrdiff_t>;
+ CustomIndex coeffC;
+ coeffC[0] = 1;
+ coeffC[1] = 2;
+ coeffC[2] = 4;
+ coeffC[3] = 1;
+ NormalIndex coeff(1,2,4,1);
+
+ VERIFY_IS_EQUAL(tensor.coeff(coeffC), tensor.coeff(coeff));
+ VERIFY_IS_EQUAL(tensor.coeffRef(coeffC), tensor.coeffRef(coeff));
+#endif
+}
+
+
+template <int DataLayout>
+static void test_matrix_as_index()
+{
+#ifdef EIGEN_HAS_SFINAE
+ Tensor<float, 4, DataLayout> tensor(2, 3, 5, 7);
+ tensor.setRandom();
+
+ using NormalIndex = DSizes<ptrdiff_t, 4>;
+ using CustomIndex = Matrix<unsigned int, 4, 1>;
+ CustomIndex coeffC(1,2,4,1);
+ NormalIndex coeff(1,2,4,1);
+
+ VERIFY_IS_EQUAL(tensor.coeff(coeffC), tensor.coeff(coeff));
+ VERIFY_IS_EQUAL(tensor.coeffRef(coeffC), tensor.coeffRef(coeff));
+#endif
+}
+
+
+template <int DataLayout>
+static void test_varlist_as_index()
+{
+#ifdef EIGEN_HAS_SFINAE
+ Tensor<float, 4, DataLayout> tensor(2, 3, 5, 7);
+ tensor.setRandom();
+
+ DSizes<ptrdiff_t, 4> coeff(1,2,4,1);
+
+ VERIFY_IS_EQUAL(tensor.coeff({1,2,4,1}), tensor.coeff(coeff));
+ VERIFY_IS_EQUAL(tensor.coeffRef({1,2,4,1}), tensor.coeffRef(coeff));
+#endif
+}
+
+
+template <int DataLayout>
+static void test_sizes_as_index()
+{
+#ifdef EIGEN_HAS_SFINAE
+ Tensor<float, 4, DataLayout> tensor(2, 3, 5, 7);
+ tensor.setRandom();
+
+ DSizes<ptrdiff_t, 4> coeff(1,2,4,1);
+ Sizes<1,2,4,1> coeffC;
+
+ VERIFY_IS_EQUAL(tensor.coeff(coeffC), tensor.coeff(coeff));
+ VERIFY_IS_EQUAL(tensor.coeffRef(coeffC), tensor.coeffRef(coeff));
+#endif
+}
+
+
+void test_cxx11_tensor_custom_index() {
+ test_map_as_index<ColMajor>();
+ test_map_as_index<RowMajor>();
+ test_matrix_as_index<ColMajor>();
+ test_matrix_as_index<RowMajor>();
+ test_varlist_as_index<ColMajor>();
+ test_varlist_as_index<RowMajor>();
+ test_sizes_as_index<ColMajor>();
+ test_sizes_as_index<RowMajor>();
+}
diff --git a/unsupported/test/cxx11_tensor_custom_op.cpp b/unsupported/test/cxx11_tensor_custom_op.cpp
new file mode 100644
index 000000000..8baa477cc
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_custom_op.cpp
@@ -0,0 +1,111 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+
+
+struct InsertZeros {
+ DSizes<DenseIndex, 2> dimensions(const Tensor<float, 2>& input) const {
+ DSizes<DenseIndex, 2> result;
+ result[0] = input.dimension(0) * 2;
+ result[1] = input.dimension(1) * 2;
+ return result;
+ }
+
+ template <typename Output, typename Device>
+ void eval(const Tensor<float, 2>& input, Output& output, const Device& device) const
+ {
+ array<DenseIndex, 2> strides;
+ strides[0] = 2;
+ strides[1] = 2;
+ output.stride(strides).device(device) = input;
+
+ Eigen::DSizes<DenseIndex, 2> offsets(1,1);
+ Eigen::DSizes<DenseIndex, 2> extents(output.dimension(0)-1, output.dimension(1)-1);
+ output.slice(offsets, extents).stride(strides).device(device) = input.constant(0.0f);
+ }
+};
+
+static void test_custom_unary_op()
+{
+ Tensor<float, 2> tensor(3,5);
+ tensor.setRandom();
+
+ Tensor<float, 2> result = tensor.customOp(InsertZeros());
+ VERIFY_IS_EQUAL(result.dimension(0), 6);
+ VERIFY_IS_EQUAL(result.dimension(1), 10);
+
+ for (int i = 0; i < 6; i+=2) {
+ for (int j = 0; j < 10; j+=2) {
+ VERIFY_IS_EQUAL(result(i, j), tensor(i/2, j/2));
+ }
+ }
+ for (int i = 1; i < 6; i+=2) {
+ for (int j = 1; j < 10; j+=2) {
+ VERIFY_IS_EQUAL(result(i, j), 0);
+ }
+ }
+}
+
+
+struct BatchMatMul {
+ DSizes<DenseIndex, 3> dimensions(const Tensor<float, 3>& input1, const Tensor<float, 3>& input2) const {
+ DSizes<DenseIndex, 3> result;
+ result[0] = input1.dimension(0);
+ result[1] = input2.dimension(1);
+ result[2] = input2.dimension(2);
+ return result;
+ }
+
+ template <typename Output, typename Device>
+ void eval(const Tensor<float, 3>& input1, const Tensor<float, 3>& input2,
+ Output& output, const Device& device) const
+ {
+ typedef Tensor<float, 3>::DimensionPair DimPair;
+ array<DimPair, 1> dims;
+ dims[0] = DimPair(1, 0);
+ for (int i = 0; i < output.dimension(2); ++i) {
+ output.template chip<2>(i).device(device) = input1.chip<2>(i).contract(input2.chip<2>(i), dims);
+ }
+ }
+};
+
+
+static void test_custom_binary_op()
+{
+ Tensor<float, 3> tensor1(2,3,5);
+ tensor1.setRandom();
+ Tensor<float, 3> tensor2(3,7,5);
+ tensor2.setRandom();
+
+ Tensor<float, 3> result = tensor1.customOp(tensor2, BatchMatMul());
+ for (int i = 0; i < 5; ++i) {
+ typedef Tensor<float, 3>::DimensionPair DimPair;
+ array<DimPair, 1> dims;
+ dims[0] = DimPair(1, 0);
+ Tensor<float, 2> reference = tensor1.chip<2>(i).contract(tensor2.chip<2>(i), dims);
+ TensorRef<Tensor<float, 2> > val = result.chip<2>(i);
+ for (int j = 0; j < 2; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ VERIFY_IS_APPROX(val(j, k), reference(j, k));
+ }
+ }
+ }
+}
+
+
+void test_cxx11_tensor_custom_op()
+{
+ CALL_SUBTEST(test_custom_unary_op());
+ CALL_SUBTEST(test_custom_binary_op());
+}
diff --git a/unsupported/test/cxx11_tensor_device.cu b/unsupported/test/cxx11_tensor_device.cu
new file mode 100644
index 000000000..fde20ddf2
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_device.cu
@@ -0,0 +1,390 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_TEST_NO_LONGDOUBLE
+#define EIGEN_TEST_NO_COMPLEX
+#define EIGEN_TEST_FUNC cxx11_tensor_device
+#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
+#define EIGEN_USE_GPU
+
+#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500
+#include <cuda_fp16.h>
+#endif
+#include "main.h"
+#include <unsupported/Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+using Eigen::RowMajor;
+
+// Context for evaluation on cpu
+struct CPUContext {
+ CPUContext(const Eigen::Tensor<float, 3>& in1, Eigen::Tensor<float, 3>& in2, Eigen::Tensor<float, 3>& out) : in1_(in1), in2_(in2), out_(out), kernel_1d_(2), kernel_2d_(2,2), kernel_3d_(2,2,2) {
+ kernel_1d_(0) = 3.14f;
+ kernel_1d_(1) = 2.7f;
+
+ kernel_2d_(0,0) = 3.14f;
+ kernel_2d_(1,0) = 2.7f;
+ kernel_2d_(0,1) = 0.2f;
+ kernel_2d_(1,1) = 7.0f;
+
+ kernel_3d_(0,0,0) = 3.14f;
+ kernel_3d_(0,1,0) = 2.7f;
+ kernel_3d_(0,0,1) = 0.2f;
+ kernel_3d_(0,1,1) = 7.0f;
+ kernel_3d_(1,0,0) = -1.0f;
+ kernel_3d_(1,1,0) = -0.3f;
+ kernel_3d_(1,0,1) = -0.7f;
+ kernel_3d_(1,1,1) = -0.5f;
+ }
+
+ const Eigen::DefaultDevice& device() const { return cpu_device_; }
+
+ const Eigen::Tensor<float, 3>& in1() const { return in1_; }
+ const Eigen::Tensor<float, 3>& in2() const { return in2_; }
+ Eigen::Tensor<float, 3>& out() { return out_; }
+ const Eigen::Tensor<float, 1>& kernel1d() const { return kernel_1d_; }
+ const Eigen::Tensor<float, 2>& kernel2d() const { return kernel_2d_; }
+ const Eigen::Tensor<float, 3>& kernel3d() const { return kernel_3d_; }
+
+ private:
+ const Eigen::Tensor<float, 3>& in1_;
+ const Eigen::Tensor<float, 3>& in2_;
+ Eigen::Tensor<float, 3>& out_;
+
+ Eigen::Tensor<float, 1> kernel_1d_;
+ Eigen::Tensor<float, 2> kernel_2d_;
+ Eigen::Tensor<float, 3> kernel_3d_;
+
+ Eigen::DefaultDevice cpu_device_;
+};
+
+
+// Context for evaluation on GPU
+struct GPUContext {
+ GPUContext(const Eigen::TensorMap<Eigen::Tensor<float, 3> >& in1, Eigen::TensorMap<Eigen::Tensor<float, 3> >& in2, Eigen::TensorMap<Eigen::Tensor<float, 3> >& out) : in1_(in1), in2_(in2), out_(out), gpu_device_(&stream_) {
+ assert(cudaMalloc((void**)(&kernel_1d_), 2*sizeof(float)) == cudaSuccess);
+ float kernel_1d_val[] = {3.14f, 2.7f};
+ assert(cudaMemcpy(kernel_1d_, kernel_1d_val, 2*sizeof(float), cudaMemcpyHostToDevice) == cudaSuccess);
+
+ assert(cudaMalloc((void**)(&kernel_2d_), 4*sizeof(float)) == cudaSuccess);
+ float kernel_2d_val[] = {3.14f, 2.7f, 0.2f, 7.0f};
+ assert(cudaMemcpy(kernel_2d_, kernel_2d_val, 4*sizeof(float), cudaMemcpyHostToDevice) == cudaSuccess);
+
+ assert(cudaMalloc((void**)(&kernel_3d_), 8*sizeof(float)) == cudaSuccess);
+ float kernel_3d_val[] = {3.14f, -1.0f, 2.7f, -0.3f, 0.2f, -0.7f, 7.0f, -0.5f};
+ assert(cudaMemcpy(kernel_3d_, kernel_3d_val, 8*sizeof(float), cudaMemcpyHostToDevice) == cudaSuccess);
+ }
+ ~GPUContext() {
+ assert(cudaFree(kernel_1d_) == cudaSuccess);
+ assert(cudaFree(kernel_2d_) == cudaSuccess);
+ assert(cudaFree(kernel_3d_) == cudaSuccess);
+ }
+
+ const Eigen::GpuDevice& device() const { return gpu_device_; }
+
+ const Eigen::TensorMap<Eigen::Tensor<float, 3> >& in1() const { return in1_; }
+ const Eigen::TensorMap<Eigen::Tensor<float, 3> >& in2() const { return in2_; }
+ Eigen::TensorMap<Eigen::Tensor<float, 3> >& out() { return out_; }
+ Eigen::TensorMap<Eigen::Tensor<float, 1> > kernel1d() const { return Eigen::TensorMap<Eigen::Tensor<float, 1> >(kernel_1d_, 2); }
+ Eigen::TensorMap<Eigen::Tensor<float, 2> > kernel2d() const { return Eigen::TensorMap<Eigen::Tensor<float, 2> >(kernel_2d_, 2, 2); }
+ Eigen::TensorMap<Eigen::Tensor<float, 3> > kernel3d() const { return Eigen::TensorMap<Eigen::Tensor<float, 3> >(kernel_3d_, 2, 2, 2); }
+
+ private:
+ const Eigen::TensorMap<Eigen::Tensor<float, 3> >& in1_;
+ const Eigen::TensorMap<Eigen::Tensor<float, 3> >& in2_;
+ Eigen::TensorMap<Eigen::Tensor<float, 3> >& out_;
+
+ float* kernel_1d_;
+ float* kernel_2d_;
+ float* kernel_3d_;
+
+ Eigen::CudaStreamDevice stream_;
+ Eigen::GpuDevice gpu_device_;
+};
+
+
+// The actual expression to evaluate
+template <typename Context>
+void test_contextual_eval(Context* context)
+{
+ context->out().device(context->device()) = context->in1() + context->in2() * 3.14f + context->in1().constant(2.718f);
+}
+
+template <typename Context>
+void test_forced_contextual_eval(Context* context)
+{
+ context->out().device(context->device()) = (context->in1() + context->in2()).eval() * 3.14f + context->in1().constant(2.718f);
+}
+
+template <typename Context>
+void test_compound_assignment(Context* context)
+{
+ context->out().device(context->device()) = context->in1().constant(2.718f);
+ context->out().device(context->device()) += context->in1() + context->in2() * 3.14f;
+}
+
+
+template <typename Context>
+void test_contraction(Context* context)
+{
+ Eigen::array<std::pair<int, int>, 2> dims;
+ dims[0] = std::make_pair(1, 1);
+ dims[1] = std::make_pair(2, 2);
+
+ Eigen::array<int, 2> shape(40, 50*70);
+
+ Eigen::DSizes<int, 2> indices(0,0);
+ Eigen::DSizes<int, 2> sizes(40,40);
+
+ context->out().reshape(shape).slice(indices, sizes).device(context->device()) = context->in1().contract(context->in2(), dims);
+}
+
+
+template <typename Context>
+void test_1d_convolution(Context* context)
+{
+ Eigen::DSizes<int, 3> indices(0,0,0);
+ Eigen::DSizes<int, 3> sizes(40,49,70);
+
+ Eigen::array<int, 1> dims(1);
+ context->out().slice(indices, sizes).device(context->device()) = context->in1().convolve(context->kernel1d(), dims);
+}
+
+template <typename Context>
+void test_2d_convolution(Context* context)
+{
+ Eigen::DSizes<int, 3> indices(0,0,0);
+ Eigen::DSizes<int, 3> sizes(40,49,69);
+
+ Eigen::array<int, 2> dims(1,2);
+ context->out().slice(indices, sizes).device(context->device()) = context->in1().convolve(context->kernel2d(), dims);
+}
+
+template <typename Context>
+void test_3d_convolution(Context* context)
+{
+ Eigen::DSizes<int, 3> indices(0,0,0);
+ Eigen::DSizes<int, 3> sizes(39,49,69);
+
+ Eigen::array<int, 3> dims(0,1,2);
+ context->out().slice(indices, sizes).device(context->device()) = context->in1().convolve(context->kernel3d(), dims);
+}
+
+
+void test_cpu() {
+ Eigen::Tensor<float, 3> in1(40,50,70);
+ Eigen::Tensor<float, 3> in2(40,50,70);
+ Eigen::Tensor<float, 3> out(40,50,70);
+
+ in1 = in1.random() + in1.constant(10.0f);
+ in2 = in2.random() + in2.constant(10.0f);
+
+ CPUContext context(in1, in2, out);
+ test_contextual_eval(&context);
+ for (int i = 0; i < 40; ++i) {
+ for (int j = 0; j < 50; ++j) {
+ for (int k = 0; k < 70; ++k) {
+ VERIFY_IS_APPROX(out(i,j,k), in1(i,j,k) + in2(i,j,k) * 3.14f + 2.718f);
+ }
+ }
+ }
+
+ test_forced_contextual_eval(&context);
+ for (int i = 0; i < 40; ++i) {
+ for (int j = 0; j < 50; ++j) {
+ for (int k = 0; k < 70; ++k) {
+ VERIFY_IS_APPROX(out(i,j,k), (in1(i,j,k) + in2(i,j,k)) * 3.14f + 2.718f);
+ }
+ }
+ }
+
+ test_compound_assignment(&context);
+ for (int i = 0; i < 40; ++i) {
+ for (int j = 0; j < 50; ++j) {
+ for (int k = 0; k < 70; ++k) {
+ VERIFY_IS_APPROX(out(i,j,k), in1(i,j,k) + in2(i,j,k) * 3.14f + 2.718f);
+ }
+ }
+ }
+
+ test_contraction(&context);
+ for (int i = 0; i < 40; ++i) {
+ for (int j = 0; j < 40; ++j) {
+ const float result = out(i,j,0);
+ float expected = 0;
+ for (int k = 0; k < 50; ++k) {
+ for (int l = 0; l < 70; ++l) {
+ expected += in1(i, k, l) * in2(j, k, l);
+ }
+ }
+ VERIFY_IS_APPROX(expected, result);
+ }
+ }
+
+ test_1d_convolution(&context);
+ for (int i = 0; i < 40; ++i) {
+ for (int j = 0; j < 49; ++j) {
+ for (int k = 0; k < 70; ++k) {
+ VERIFY_IS_APPROX(out(i,j,k), (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f));
+ }
+ }
+ }
+
+ test_2d_convolution(&context);
+ for (int i = 0; i < 40; ++i) {
+ for (int j = 0; j < 49; ++j) {
+ for (int k = 0; k < 69; ++k) {
+ const float result = out(i,j,k);
+ const float expected = (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f) +
+ (in1(i,j,k+1) * 0.2f + in1(i,j+1,k+1) * 7.0f);
+ if (fabs(expected) < 1e-4f && fabs(result) < 1e-4f) {
+ continue;
+ }
+ VERIFY_IS_APPROX(expected, result);
+ }
+ }
+ }
+
+ test_3d_convolution(&context);
+ for (int i = 0; i < 39; ++i) {
+ for (int j = 0; j < 49; ++j) {
+ for (int k = 0; k < 69; ++k) {
+ const float result = out(i,j,k);
+ const float expected = (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f +
+ in1(i,j,k+1) * 0.2f + in1(i,j+1,k+1) * 7.0f) +
+ (in1(i+1,j,k) * -1.0f + in1(i+1,j+1,k) * -0.3f +
+ in1(i+1,j,k+1) * -0.7f + in1(i+1,j+1,k+1) * -0.5f);
+ if (fabs(expected) < 1e-4f && fabs(result) < 1e-4f) {
+ continue;
+ }
+ VERIFY_IS_APPROX(expected, result);
+ }
+ }
+ }
+}
+
+void test_gpu() {
+ Eigen::Tensor<float, 3> in1(40,50,70);
+ Eigen::Tensor<float, 3> in2(40,50,70);
+ Eigen::Tensor<float, 3> out(40,50,70);
+ in1 = in1.random() + in1.constant(10.0f);
+ in2 = in2.random() + in2.constant(10.0f);
+
+ std::size_t in1_bytes = in1.size() * sizeof(float);
+ std::size_t in2_bytes = in2.size() * sizeof(float);
+ std::size_t out_bytes = out.size() * sizeof(float);
+
+ float* d_in1;
+ float* d_in2;
+ float* d_out;
+ cudaMalloc((void**)(&d_in1), in1_bytes);
+ cudaMalloc((void**)(&d_in2), in2_bytes);
+ cudaMalloc((void**)(&d_out), out_bytes);
+
+ cudaMemcpy(d_in1, in1.data(), in1_bytes, cudaMemcpyHostToDevice);
+ cudaMemcpy(d_in2, in2.data(), in2_bytes, cudaMemcpyHostToDevice);
+
+ Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_in1(d_in1, 40,50,70);
+ Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_in2(d_in2, 40,50,70);
+ Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_out(d_out, 40,50,70);
+
+ GPUContext context(gpu_in1, gpu_in2, gpu_out);
+ test_contextual_eval(&context);
+ assert(cudaMemcpy(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost) == cudaSuccess);
+ for (int i = 0; i < 40; ++i) {
+ for (int j = 0; j < 50; ++j) {
+ for (int k = 0; k < 70; ++k) {
+ VERIFY_IS_APPROX(out(i,j,k), in1(i,j,k) + in2(i,j,k) * 3.14f + 2.718f);
+ }
+ }
+ }
+
+ test_forced_contextual_eval(&context);
+ assert(cudaMemcpy(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost) == cudaSuccess);
+ for (int i = 0; i < 40; ++i) {
+ for (int j = 0; j < 50; ++j) {
+ for (int k = 0; k < 70; ++k) {
+ VERIFY_IS_APPROX(out(i,j,k), (in1(i,j,k) + in2(i,j,k)) * 3.14f + 2.718f);
+ }
+ }
+ }
+
+ test_compound_assignment(&context);
+ assert(cudaMemcpy(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost) == cudaSuccess);
+ for (int i = 0; i < 40; ++i) {
+ for (int j = 0; j < 50; ++j) {
+ for (int k = 0; k < 70; ++k) {
+ VERIFY_IS_APPROX(out(i,j,k), in1(i,j,k) + in2(i,j,k) * 3.14f + 2.718f);
+ }
+ }
+ }
+
+ test_contraction(&context);
+ assert(cudaMemcpy(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost) == cudaSuccess);
+ for (int i = 0; i < 40; ++i) {
+ for (int j = 0; j < 40; ++j) {
+ const float result = out(i,j,0);
+ float expected = 0;
+ for (int k = 0; k < 50; ++k) {
+ for (int l = 0; l < 70; ++l) {
+ expected += in1(i, k, l) * in2(j, k, l);
+ }
+ }
+ VERIFY_IS_APPROX(expected, result);
+ }
+ }
+
+ test_1d_convolution(&context);
+ assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, context.device().stream()) == cudaSuccess);
+ assert(cudaStreamSynchronize(context.device().stream()) == cudaSuccess);
+ for (int i = 0; i < 40; ++i) {
+ for (int j = 0; j < 49; ++j) {
+ for (int k = 0; k < 70; ++k) {
+ VERIFY_IS_APPROX(out(i,j,k), (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f));
+ }
+ }
+ }
+
+ test_2d_convolution(&context);
+ assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, context.device().stream()) == cudaSuccess);
+ assert(cudaStreamSynchronize(context.device().stream()) == cudaSuccess);
+ for (int i = 0; i < 40; ++i) {
+ for (int j = 0; j < 49; ++j) {
+ for (int k = 0; k < 69; ++k) {
+ const float result = out(i,j,k);
+ const float expected = (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f +
+ in1(i,j,k+1) * 0.2f + in1(i,j+1,k+1) * 7.0f);
+ VERIFY_IS_APPROX(expected, result);
+ }
+ }
+ }
+
+ test_3d_convolution(&context);
+ assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, context.device().stream()) == cudaSuccess);
+ assert(cudaStreamSynchronize(context.device().stream()) == cudaSuccess);
+ for (int i = 0; i < 39; ++i) {
+ for (int j = 0; j < 49; ++j) {
+ for (int k = 0; k < 69; ++k) {
+ const float result = out(i,j,k);
+ const float expected = (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f +
+ in1(i,j,k+1) * 0.2f + in1(i,j+1,k+1) * 7.0f +
+ in1(i+1,j,k) * -1.0f + in1(i+1,j+1,k) * -0.3f +
+ in1(i+1,j,k+1) * -0.7f + in1(i+1,j+1,k+1) * -0.5f);
+ VERIFY_IS_APPROX(expected, result);
+ }
+ }
+ }
+}
+
+
+void test_cxx11_tensor_device()
+{
+ CALL_SUBTEST_1(test_cpu());
+ CALL_SUBTEST_2(test_gpu());
+}
diff --git a/unsupported/test/cxx11_tensor_device_sycl.cpp b/unsupported/test/cxx11_tensor_device_sycl.cpp
new file mode 100644
index 000000000..7f79753c5
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_device_sycl.cpp
@@ -0,0 +1,31 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_TEST_NO_LONGDOUBLE
+#define EIGEN_TEST_NO_COMPLEX
+#define EIGEN_TEST_FUNC cxx11_tensor_device_sycl
+#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
+#define EIGEN_USE_SYCL
+
+#include "main.h"
+#include <unsupported/Eigen/CXX11/Tensor>
+
+void test_device_sycl(const Eigen::SyclDevice &sycl_device) {
+ std::cout <<"Helo from ComputeCpp: the requested device exists and the device name is : "
+ << sycl_device.m_queue.get_device(). template get_info<cl::sycl::info::device::name>() <<std::endl;;
+}
+void test_cxx11_tensor_device_sycl() {
+ cl::sycl::gpu_selector s;
+ Eigen::SyclDevice sycl_device(s);
+ CALL_SUBTEST(test_device_sycl(sycl_device));
+}
diff --git a/unsupported/test/cxx11_tensor_dimension.cpp b/unsupported/test/cxx11_tensor_dimension.cpp
new file mode 100644
index 000000000..16f168ed4
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_dimension.cpp
@@ -0,0 +1,69 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+
+
+static void test_dynamic_size()
+{
+ Eigen::DSizes<int, 3> dimensions(2,3,7);
+
+ VERIFY_IS_EQUAL((int)Eigen::internal::array_get<0>(dimensions), 2);
+ VERIFY_IS_EQUAL((int)Eigen::internal::array_get<1>(dimensions), 3);
+ VERIFY_IS_EQUAL((int)Eigen::internal::array_get<2>(dimensions), 7);
+ VERIFY_IS_EQUAL((int)dimensions.TotalSize(), 2*3*7);
+ VERIFY_IS_EQUAL((int)dimensions[0], 2);
+ VERIFY_IS_EQUAL((int)dimensions[1], 3);
+ VERIFY_IS_EQUAL((int)dimensions[2], 7);
+}
+
+static void test_fixed_size()
+{
+ Eigen::Sizes<2,3,7> dimensions;
+
+ VERIFY_IS_EQUAL((int)Eigen::internal::array_get<0>(dimensions), 2);
+ VERIFY_IS_EQUAL((int)Eigen::internal::array_get<1>(dimensions), 3);
+ VERIFY_IS_EQUAL((int)Eigen::internal::array_get<2>(dimensions), 7);
+ VERIFY_IS_EQUAL((int)dimensions.TotalSize(), 2*3*7);
+}
+
+static void test_match()
+{
+ Eigen::DSizes<unsigned int, 3> dyn((unsigned int)2,(unsigned int)3,(unsigned int)7);
+ Eigen::Sizes<2,3,7> stat;
+ VERIFY_IS_EQUAL(Eigen::dimensions_match(dyn, stat), true);
+
+ Eigen::DSizes<int, 3> dyn1(2,3,7);
+ Eigen::DSizes<int, 2> dyn2(2,3);
+ VERIFY_IS_EQUAL(Eigen::dimensions_match(dyn1, dyn2), false);
+}
+
+static void test_rank_zero()
+{
+ Eigen::Sizes<> scalar;
+ VERIFY_IS_EQUAL((int)scalar.TotalSize(), 1);
+ VERIFY_IS_EQUAL((int)scalar.rank(), 0);
+ VERIFY_IS_EQUAL((int)internal::array_prod(scalar), 1);
+
+ Eigen::DSizes<ptrdiff_t, 0> dscalar;
+ VERIFY_IS_EQUAL((int)dscalar.TotalSize(), 1);
+ VERIFY_IS_EQUAL((int)dscalar.rank(), 0);
+}
+
+void test_cxx11_tensor_dimension()
+{
+ CALL_SUBTEST(test_dynamic_size());
+ CALL_SUBTEST(test_fixed_size());
+ CALL_SUBTEST(test_match());
+ CALL_SUBTEST(test_rank_zero());
+}
diff --git a/unsupported/test/cxx11_tensor_empty.cpp b/unsupported/test/cxx11_tensor_empty.cpp
new file mode 100644
index 000000000..d7eea42d7
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_empty.cpp
@@ -0,0 +1,40 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2015 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+
+static void test_empty_tensor()
+{
+ Tensor<float, 2> source;
+ Tensor<float, 2> tgt1 = source;
+ Tensor<float, 2> tgt2(source);
+ Tensor<float, 2> tgt3;
+ tgt3 = tgt1;
+ tgt3 = tgt2;
+}
+
+static void test_empty_fixed_size_tensor()
+{
+ TensorFixedSize<float, Sizes<0> > source;
+ TensorFixedSize<float, Sizes<0> > tgt1 = source;
+ TensorFixedSize<float, Sizes<0> > tgt2(source);
+ TensorFixedSize<float, Sizes<0> > tgt3;
+ tgt3 = tgt1;
+ tgt3 = tgt2;
+}
+
+
+void test_cxx11_tensor_empty()
+{
+ CALL_SUBTEST(test_empty_tensor());
+ CALL_SUBTEST(test_empty_fixed_size_tensor());
+}
diff --git a/unsupported/test/cxx11_tensor_expr.cpp b/unsupported/test/cxx11_tensor_expr.cpp
new file mode 100644
index 000000000..77e24cb67
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_expr.cpp
@@ -0,0 +1,314 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+using Eigen::RowMajor;
+
+static void test_1d()
+{
+ Tensor<float, 1> vec1(6);
+ Tensor<float, 1, RowMajor> vec2(6);
+
+ vec1(0) = 4.0; vec2(0) = 0.0;
+ vec1(1) = 8.0; vec2(1) = 1.0;
+ vec1(2) = 15.0; vec2(2) = 2.0;
+ vec1(3) = 16.0; vec2(3) = 3.0;
+ vec1(4) = 23.0; vec2(4) = 4.0;
+ vec1(5) = 42.0; vec2(5) = 5.0;
+
+ float data3[6];
+ TensorMap<Tensor<float, 1>> vec3(data3, 6);
+ vec3 = vec1.sqrt();
+ float data4[6];
+ TensorMap<Tensor<float, 1, RowMajor>> vec4(data4, 6);
+ vec4 = vec2.square();
+ float data5[6];
+ TensorMap<Tensor<float, 1, RowMajor>> vec5(data5, 6);
+ vec5 = vec2.cube();
+
+ VERIFY_IS_APPROX(vec3(0), sqrtf(4.0));
+ VERIFY_IS_APPROX(vec3(1), sqrtf(8.0));
+ VERIFY_IS_APPROX(vec3(2), sqrtf(15.0));
+ VERIFY_IS_APPROX(vec3(3), sqrtf(16.0));
+ VERIFY_IS_APPROX(vec3(4), sqrtf(23.0));
+ VERIFY_IS_APPROX(vec3(5), sqrtf(42.0));
+
+ VERIFY_IS_APPROX(vec4(0), 0.0f);
+ VERIFY_IS_APPROX(vec4(1), 1.0f);
+ VERIFY_IS_APPROX(vec4(2), 2.0f * 2.0f);
+ VERIFY_IS_APPROX(vec4(3), 3.0f * 3.0f);
+ VERIFY_IS_APPROX(vec4(4), 4.0f * 4.0f);
+ VERIFY_IS_APPROX(vec4(5), 5.0f * 5.0f);
+
+ VERIFY_IS_APPROX(vec5(0), 0.0f);
+ VERIFY_IS_APPROX(vec5(1), 1.0f);
+ VERIFY_IS_APPROX(vec5(2), 2.0f * 2.0f * 2.0f);
+ VERIFY_IS_APPROX(vec5(3), 3.0f * 3.0f * 3.0f);
+ VERIFY_IS_APPROX(vec5(4), 4.0f * 4.0f * 4.0f);
+ VERIFY_IS_APPROX(vec5(5), 5.0f * 5.0f * 5.0f);
+
+ vec3 = vec1 + vec2;
+ VERIFY_IS_APPROX(vec3(0), 4.0f + 0.0f);
+ VERIFY_IS_APPROX(vec3(1), 8.0f + 1.0f);
+ VERIFY_IS_APPROX(vec3(2), 15.0f + 2.0f);
+ VERIFY_IS_APPROX(vec3(3), 16.0f + 3.0f);
+ VERIFY_IS_APPROX(vec3(4), 23.0f + 4.0f);
+ VERIFY_IS_APPROX(vec3(5), 42.0f + 5.0f);
+}
+
+static void test_2d()
+{
+ float data1[6];
+ TensorMap<Tensor<float, 2>> mat1(data1, 2, 3);
+ float data2[6];
+ TensorMap<Tensor<float, 2, RowMajor>> mat2(data2, 2, 3);
+
+ mat1(0,0) = 0.0;
+ mat1(0,1) = 1.0;
+ mat1(0,2) = 2.0;
+ mat1(1,0) = 3.0;
+ mat1(1,1) = 4.0;
+ mat1(1,2) = 5.0;
+
+ mat2(0,0) = -0.0;
+ mat2(0,1) = -1.0;
+ mat2(0,2) = -2.0;
+ mat2(1,0) = -3.0;
+ mat2(1,1) = -4.0;
+ mat2(1,2) = -5.0;
+
+ Tensor<float, 2> mat3(2,3);
+ Tensor<float, 2, RowMajor> mat4(2,3);
+ mat3 = mat1.abs();
+ mat4 = mat2.abs();
+
+ VERIFY_IS_APPROX(mat3(0,0), 0.0f);
+ VERIFY_IS_APPROX(mat3(0,1), 1.0f);
+ VERIFY_IS_APPROX(mat3(0,2), 2.0f);
+ VERIFY_IS_APPROX(mat3(1,0), 3.0f);
+ VERIFY_IS_APPROX(mat3(1,1), 4.0f);
+ VERIFY_IS_APPROX(mat3(1,2), 5.0f);
+
+ VERIFY_IS_APPROX(mat4(0,0), 0.0f);
+ VERIFY_IS_APPROX(mat4(0,1), 1.0f);
+ VERIFY_IS_APPROX(mat4(0,2), 2.0f);
+ VERIFY_IS_APPROX(mat4(1,0), 3.0f);
+ VERIFY_IS_APPROX(mat4(1,1), 4.0f);
+ VERIFY_IS_APPROX(mat4(1,2), 5.0f);
+}
+
+static void test_3d()
+{
+ Tensor<float, 3> mat1(2,3,7);
+ Tensor<float, 3, RowMajor> mat2(2,3,7);
+
+ float val = 1.0f;
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ mat1(i,j,k) = val;
+ mat2(i,j,k) = val;
+ val += 1.0f;
+ }
+ }
+ }
+
+ Tensor<float, 3> mat3(2,3,7);
+ mat3 = mat1 + mat1;
+ Tensor<float, 3, RowMajor> mat4(2,3,7);
+ mat4 = mat2 * 3.14f;
+ Tensor<float, 3> mat5(2,3,7);
+ mat5 = mat1.inverse().log();
+ Tensor<float, 3, RowMajor> mat6(2,3,7);
+ mat6 = mat2.pow(0.5f) * 3.14f;
+ Tensor<float, 3> mat7(2,3,7);
+ mat7 = mat1.cwiseMax(mat5 * 2.0f).exp();
+ Tensor<float, 3, RowMajor> mat8(2,3,7);
+ mat8 = (-mat2).exp() * 3.14f;
+ Tensor<float, 3, RowMajor> mat9(2,3,7);
+ mat9 = mat2 + 3.14f;
+ Tensor<float, 3, RowMajor> mat10(2,3,7);
+ mat10 = mat2 - 3.14f;
+ Tensor<float, 3, RowMajor> mat11(2,3,7);
+ mat11 = mat2 / 3.14f;
+
+ val = 1.0f;
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ VERIFY_IS_APPROX(mat3(i,j,k), val + val);
+ VERIFY_IS_APPROX(mat4(i,j,k), val * 3.14f);
+ VERIFY_IS_APPROX(mat5(i,j,k), logf(1.0f/val));
+ VERIFY_IS_APPROX(mat6(i,j,k), sqrtf(val) * 3.14f);
+ VERIFY_IS_APPROX(mat7(i,j,k), expf((std::max)(val, mat5(i,j,k) * 2.0f)));
+ VERIFY_IS_APPROX(mat8(i,j,k), expf(-val) * 3.14f);
+ VERIFY_IS_APPROX(mat9(i,j,k), val + 3.14f);
+ VERIFY_IS_APPROX(mat10(i,j,k), val - 3.14f);
+ VERIFY_IS_APPROX(mat11(i,j,k), val / 3.14f);
+ val += 1.0f;
+ }
+ }
+ }
+}
+
+static void test_constants()
+{
+ Tensor<float, 3> mat1(2,3,7);
+ Tensor<float, 3> mat2(2,3,7);
+ Tensor<float, 3> mat3(2,3,7);
+
+ float val = 1.0f;
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ mat1(i,j,k) = val;
+ val += 1.0f;
+ }
+ }
+ }
+ mat2 = mat1.constant(3.14f);
+ mat3 = mat1.cwiseMax(7.3f).exp();
+
+ val = 1.0f;
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ VERIFY_IS_APPROX(mat2(i,j,k), 3.14f);
+ VERIFY_IS_APPROX(mat3(i,j,k), expf((std::max)(val, 7.3f)));
+ val += 1.0f;
+ }
+ }
+ }
+}
+
+static void test_boolean()
+{
+ Tensor<int, 1> vec(6);
+ std::copy_n(std::begin({0, 1, 2, 3, 4, 5}), 6, vec.data());
+
+ // Test ||.
+ Tensor<bool, 1> bool1 = vec < vec.constant(1) || vec > vec.constant(4);
+ VERIFY_IS_EQUAL(bool1[0], true);
+ VERIFY_IS_EQUAL(bool1[1], false);
+ VERIFY_IS_EQUAL(bool1[2], false);
+ VERIFY_IS_EQUAL(bool1[3], false);
+ VERIFY_IS_EQUAL(bool1[4], false);
+ VERIFY_IS_EQUAL(bool1[5], true);
+
+ // Test &&, including cast of operand vec.
+ Tensor<bool, 1> bool2 = vec.cast<bool>() && vec < vec.constant(4);
+ VERIFY_IS_EQUAL(bool2[0], false);
+ VERIFY_IS_EQUAL(bool2[1], true);
+ VERIFY_IS_EQUAL(bool2[2], true);
+ VERIFY_IS_EQUAL(bool2[3], true);
+ VERIFY_IS_EQUAL(bool2[4], false);
+ VERIFY_IS_EQUAL(bool2[5], false);
+
+ // Compilation tests:
+ // Test Tensor<bool> against results of cast or comparison; verifies that
+ // CoeffReturnType is set to match Op return type of bool for Unary and Binary
+ // Ops.
+ Tensor<bool, 1> bool3 = vec.cast<bool>() && bool2;
+ bool3 = vec < vec.constant(4) && bool2;
+}
+
+static void test_functors()
+{
+ Tensor<float, 3> mat1(2,3,7);
+ Tensor<float, 3> mat2(2,3,7);
+ Tensor<float, 3> mat3(2,3,7);
+
+ float val = 1.0f;
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ mat1(i,j,k) = val;
+ val += 1.0f;
+ }
+ }
+ }
+ mat2 = mat1.inverse().unaryExpr(&asinf);
+ mat3 = mat1.unaryExpr(&tanhf);
+
+ val = 1.0f;
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ VERIFY_IS_APPROX(mat2(i,j,k), asinf(1.0f / mat1(i,j,k)));
+ VERIFY_IS_APPROX(mat3(i,j,k), tanhf(mat1(i,j,k)));
+ val += 1.0f;
+ }
+ }
+ }
+}
+
+static void test_type_casting()
+{
+ Tensor<bool, 3> mat1(2,3,7);
+ Tensor<float, 3> mat2(2,3,7);
+ Tensor<double, 3> mat3(2,3,7);
+ mat1.setRandom();
+ mat2.setRandom();
+
+ mat3 = mat1.cast<double>();
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ VERIFY_IS_APPROX(mat3(i,j,k), mat1(i,j,k) ? 1.0 : 0.0);
+ }
+ }
+ }
+
+ mat3 = mat2.cast<double>();
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ VERIFY_IS_APPROX(mat3(i,j,k), static_cast<double>(mat2(i,j,k)));
+ }
+ }
+ }
+}
+
+static void test_select()
+{
+ Tensor<float, 3> selector(2,3,7);
+ Tensor<float, 3> mat1(2,3,7);
+ Tensor<float, 3> mat2(2,3,7);
+ Tensor<float, 3> result(2,3,7);
+
+ selector.setRandom();
+ mat1.setRandom();
+ mat2.setRandom();
+ result = (selector > selector.constant(0.5f)).select(mat1, mat2);
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ VERIFY_IS_APPROX(result(i,j,k), (selector(i,j,k) > 0.5f) ? mat1(i,j,k) : mat2(i,j,k));
+ }
+ }
+ }
+}
+
+
+void test_cxx11_tensor_expr()
+{
+ CALL_SUBTEST(test_1d());
+ CALL_SUBTEST(test_2d());
+ CALL_SUBTEST(test_3d());
+ CALL_SUBTEST(test_constants());
+ CALL_SUBTEST(test_boolean());
+ CALL_SUBTEST(test_functors());
+ CALL_SUBTEST(test_type_casting());
+ CALL_SUBTEST(test_select());
+}
diff --git a/unsupported/test/cxx11_tensor_fft.cpp b/unsupported/test/cxx11_tensor_fft.cpp
new file mode 100644
index 000000000..2f14ebc62
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_fft.cpp
@@ -0,0 +1,273 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Jianwei Cui <thucjw@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+
+template <int DataLayout>
+static void test_fft_2D_golden() {
+ Tensor<float, 2, DataLayout> input(2, 3);
+ input(0, 0) = 1;
+ input(0, 1) = 2;
+ input(0, 2) = 3;
+ input(1, 0) = 4;
+ input(1, 1) = 5;
+ input(1, 2) = 6;
+
+ array<ptrdiff_t, 2> fft;
+ fft[0] = 0;
+ fft[1] = 1;
+
+ Tensor<std::complex<float>, 2, DataLayout> output = input.template fft<Eigen::BothParts, Eigen::FFT_FORWARD>(fft);
+
+ std::complex<float> output_golden[6]; // in ColMajor order
+ output_golden[0] = std::complex<float>(21, 0);
+ output_golden[1] = std::complex<float>(-9, 0);
+ output_golden[2] = std::complex<float>(-3, 1.73205);
+ output_golden[3] = std::complex<float>( 0, 0);
+ output_golden[4] = std::complex<float>(-3, -1.73205);
+ output_golden[5] = std::complex<float>(0 ,0);
+
+ std::complex<float> c_offset = std::complex<float>(1.0, 1.0);
+
+ if (DataLayout == ColMajor) {
+ VERIFY_IS_APPROX(output(0) + c_offset, output_golden[0] + c_offset);
+ VERIFY_IS_APPROX(output(1) + c_offset, output_golden[1] + c_offset);
+ VERIFY_IS_APPROX(output(2) + c_offset, output_golden[2] + c_offset);
+ VERIFY_IS_APPROX(output(3) + c_offset, output_golden[3] + c_offset);
+ VERIFY_IS_APPROX(output(4) + c_offset, output_golden[4] + c_offset);
+ VERIFY_IS_APPROX(output(5) + c_offset, output_golden[5] + c_offset);
+ }
+ else {
+ VERIFY_IS_APPROX(output(0)+ c_offset, output_golden[0]+ c_offset);
+ VERIFY_IS_APPROX(output(1)+ c_offset, output_golden[2]+ c_offset);
+ VERIFY_IS_APPROX(output(2)+ c_offset, output_golden[4]+ c_offset);
+ VERIFY_IS_APPROX(output(3)+ c_offset, output_golden[1]+ c_offset);
+ VERIFY_IS_APPROX(output(4)+ c_offset, output_golden[3]+ c_offset);
+ VERIFY_IS_APPROX(output(5)+ c_offset, output_golden[5]+ c_offset);
+ }
+}
+
+static void test_fft_complex_input_golden() {
+ Tensor<std::complex<float>, 1, ColMajor> input(5);
+ input(0) = std::complex<float>(1, 1);
+ input(1) = std::complex<float>(2, 2);
+ input(2) = std::complex<float>(3, 3);
+ input(3) = std::complex<float>(4, 4);
+ input(4) = std::complex<float>(5, 5);
+
+ array<ptrdiff_t, 1> fft;
+ fft[0] = 0;
+
+ Tensor<std::complex<float>, 1, ColMajor> forward_output_both_parts = input.fft<BothParts, FFT_FORWARD>(fft);
+ Tensor<std::complex<float>, 1, ColMajor> reverse_output_both_parts = input.fft<BothParts, FFT_REVERSE>(fft);
+
+ Tensor<float, 1, ColMajor> forward_output_real_part = input.fft<RealPart, FFT_FORWARD>(fft);
+ Tensor<float, 1, ColMajor> reverse_output_real_part = input.fft<RealPart, FFT_REVERSE>(fft);
+
+ Tensor<float, 1, ColMajor> forward_output_imag_part = input.fft<ImagPart, FFT_FORWARD>(fft);
+ Tensor<float, 1, ColMajor> reverse_output_imag_part = input.fft<ImagPart, FFT_REVERSE>(fft);
+
+ VERIFY_IS_EQUAL(forward_output_both_parts.dimension(0), input.dimension(0));
+ VERIFY_IS_EQUAL(reverse_output_both_parts.dimension(0), input.dimension(0));
+
+ VERIFY_IS_EQUAL(forward_output_real_part.dimension(0), input.dimension(0));
+ VERIFY_IS_EQUAL(reverse_output_real_part.dimension(0), input.dimension(0));
+
+ VERIFY_IS_EQUAL(forward_output_imag_part.dimension(0), input.dimension(0));
+ VERIFY_IS_EQUAL(reverse_output_imag_part.dimension(0), input.dimension(0));
+
+ std::complex<float> forward_golden_result[5];
+ std::complex<float> reverse_golden_result[5];
+
+ forward_golden_result[0] = std::complex<float>(15.000000000000000,+15.000000000000000);
+ forward_golden_result[1] = std::complex<float>(-5.940954801177935, +0.940954801177934);
+ forward_golden_result[2] = std::complex<float>(-3.312299240582266, -1.687700759417735);
+ forward_golden_result[3] = std::complex<float>(-1.687700759417735, -3.312299240582266);
+ forward_golden_result[4] = std::complex<float>( 0.940954801177934, -5.940954801177935);
+
+ reverse_golden_result[0] = std::complex<float>( 3.000000000000000, + 3.000000000000000);
+ reverse_golden_result[1] = std::complex<float>( 0.188190960235587, - 1.188190960235587);
+ reverse_golden_result[2] = std::complex<float>(-0.337540151883547, - 0.662459848116453);
+ reverse_golden_result[3] = std::complex<float>(-0.662459848116453, - 0.337540151883547);
+ reverse_golden_result[4] = std::complex<float>(-1.188190960235587, + 0.188190960235587);
+
+ for(int i = 0; i < 5; ++i) {
+ VERIFY_IS_APPROX(forward_output_both_parts(i), forward_golden_result[i]);
+ VERIFY_IS_APPROX(forward_output_real_part(i), forward_golden_result[i].real());
+ VERIFY_IS_APPROX(forward_output_imag_part(i), forward_golden_result[i].imag());
+ }
+
+ for(int i = 0; i < 5; ++i) {
+ VERIFY_IS_APPROX(reverse_output_both_parts(i), reverse_golden_result[i]);
+ VERIFY_IS_APPROX(reverse_output_real_part(i), reverse_golden_result[i].real());
+ VERIFY_IS_APPROX(reverse_output_imag_part(i), reverse_golden_result[i].imag());
+ }
+}
+
+static void test_fft_real_input_golden() {
+ Tensor<float, 1, ColMajor> input(5);
+ input(0) = 1.0;
+ input(1) = 2.0;
+ input(2) = 3.0;
+ input(3) = 4.0;
+ input(4) = 5.0;
+
+ array<ptrdiff_t, 1> fft;
+ fft[0] = 0;
+
+ Tensor<std::complex<float>, 1, ColMajor> forward_output_both_parts = input.fft<BothParts, FFT_FORWARD>(fft);
+ Tensor<std::complex<float>, 1, ColMajor> reverse_output_both_parts = input.fft<BothParts, FFT_REVERSE>(fft);
+
+ Tensor<float, 1, ColMajor> forward_output_real_part = input.fft<RealPart, FFT_FORWARD>(fft);
+ Tensor<float, 1, ColMajor> reverse_output_real_part = input.fft<RealPart, FFT_REVERSE>(fft);
+
+ Tensor<float, 1, ColMajor> forward_output_imag_part = input.fft<ImagPart, FFT_FORWARD>(fft);
+ Tensor<float, 1, ColMajor> reverse_output_imag_part = input.fft<ImagPart, FFT_REVERSE>(fft);
+
+ VERIFY_IS_EQUAL(forward_output_both_parts.dimension(0), input.dimension(0));
+ VERIFY_IS_EQUAL(reverse_output_both_parts.dimension(0), input.dimension(0));
+
+ VERIFY_IS_EQUAL(forward_output_real_part.dimension(0), input.dimension(0));
+ VERIFY_IS_EQUAL(reverse_output_real_part.dimension(0), input.dimension(0));
+
+ VERIFY_IS_EQUAL(forward_output_imag_part.dimension(0), input.dimension(0));
+ VERIFY_IS_EQUAL(reverse_output_imag_part.dimension(0), input.dimension(0));
+
+ std::complex<float> forward_golden_result[5];
+ std::complex<float> reverse_golden_result[5];
+
+
+ forward_golden_result[0] = std::complex<float>( 15, 0);
+ forward_golden_result[1] = std::complex<float>(-2.5, +3.44095480117793);
+ forward_golden_result[2] = std::complex<float>(-2.5, +0.81229924058227);
+ forward_golden_result[3] = std::complex<float>(-2.5, -0.81229924058227);
+ forward_golden_result[4] = std::complex<float>(-2.5, -3.44095480117793);
+
+ reverse_golden_result[0] = std::complex<float>( 3.0, 0);
+ reverse_golden_result[1] = std::complex<float>(-0.5, -0.688190960235587);
+ reverse_golden_result[2] = std::complex<float>(-0.5, -0.162459848116453);
+ reverse_golden_result[3] = std::complex<float>(-0.5, +0.162459848116453);
+ reverse_golden_result[4] = std::complex<float>(-0.5, +0.688190960235587);
+
+ std::complex<float> c_offset(1.0, 1.0);
+ float r_offset = 1.0;
+
+ for(int i = 0; i < 5; ++i) {
+ VERIFY_IS_APPROX(forward_output_both_parts(i) + c_offset, forward_golden_result[i] + c_offset);
+ VERIFY_IS_APPROX(forward_output_real_part(i) + r_offset, forward_golden_result[i].real() + r_offset);
+ VERIFY_IS_APPROX(forward_output_imag_part(i) + r_offset, forward_golden_result[i].imag() + r_offset);
+ }
+
+ for(int i = 0; i < 5; ++i) {
+ VERIFY_IS_APPROX(reverse_output_both_parts(i) + c_offset, reverse_golden_result[i] + c_offset);
+ VERIFY_IS_APPROX(reverse_output_real_part(i) + r_offset, reverse_golden_result[i].real() + r_offset);
+ VERIFY_IS_APPROX(reverse_output_imag_part(i) + r_offset, reverse_golden_result[i].imag() + r_offset);
+ }
+}
+
+
+template <int DataLayout, typename RealScalar, bool isComplexInput, int FFTResultType, int FFTDirection, int TensorRank>
+static void test_fft_real_input_energy() {
+
+ Eigen::DSizes<ptrdiff_t, TensorRank> dimensions;
+ ptrdiff_t total_size = 1;
+ for (int i = 0; i < TensorRank; ++i) {
+ dimensions[i] = rand() % 20 + 1;
+ total_size *= dimensions[i];
+ }
+ const DSizes<ptrdiff_t, TensorRank> arr = dimensions;
+
+ typedef typename internal::conditional<isComplexInput == true, std::complex<RealScalar>, RealScalar>::type InputScalar;
+
+ Tensor<InputScalar, TensorRank, DataLayout> input;
+ input.resize(arr);
+ input.setRandom();
+
+ array<ptrdiff_t, TensorRank> fft;
+ for (int i = 0; i < TensorRank; ++i) {
+ fft[i] = i;
+ }
+
+ typedef typename internal::conditional<FFTResultType == Eigen::BothParts, std::complex<RealScalar>, RealScalar>::type OutputScalar;
+ Tensor<OutputScalar, TensorRank, DataLayout> output;
+ output = input.template fft<FFTResultType, FFTDirection>(fft);
+
+ for (int i = 0; i < TensorRank; ++i) {
+ VERIFY_IS_EQUAL(output.dimension(i), input.dimension(i));
+ }
+
+ RealScalar energy_original = 0.0;
+ RealScalar energy_after_fft = 0.0;
+
+ for (int i = 0; i < total_size; ++i) {
+ energy_original += numext::abs2(input(i));
+ }
+
+ for (int i = 0; i < total_size; ++i) {
+ energy_after_fft += numext::abs2(output(i));
+ }
+
+ if(FFTDirection == FFT_FORWARD) {
+ VERIFY_IS_APPROX(energy_original, energy_after_fft / total_size);
+ }
+ else {
+ VERIFY_IS_APPROX(energy_original, energy_after_fft * total_size);
+ }
+}
+
+void test_cxx11_tensor_fft() {
+ test_fft_complex_input_golden();
+ test_fft_real_input_golden();
+
+ test_fft_2D_golden<ColMajor>();
+ test_fft_2D_golden<RowMajor>();
+
+ test_fft_real_input_energy<ColMajor, float, true, Eigen::BothParts, FFT_FORWARD, 1>();
+ test_fft_real_input_energy<ColMajor, double, true, Eigen::BothParts, FFT_FORWARD, 1>();
+ test_fft_real_input_energy<ColMajor, float, false, Eigen::BothParts, FFT_FORWARD, 1>();
+ test_fft_real_input_energy<ColMajor, double, false, Eigen::BothParts, FFT_FORWARD, 1>();
+
+ test_fft_real_input_energy<ColMajor, float, true, Eigen::BothParts, FFT_FORWARD, 2>();
+ test_fft_real_input_energy<ColMajor, double, true, Eigen::BothParts, FFT_FORWARD, 2>();
+ test_fft_real_input_energy<ColMajor, float, false, Eigen::BothParts, FFT_FORWARD, 2>();
+ test_fft_real_input_energy<ColMajor, double, false, Eigen::BothParts, FFT_FORWARD, 2>();
+
+ test_fft_real_input_energy<ColMajor, float, true, Eigen::BothParts, FFT_FORWARD, 3>();
+ test_fft_real_input_energy<ColMajor, double, true, Eigen::BothParts, FFT_FORWARD, 3>();
+ test_fft_real_input_energy<ColMajor, float, false, Eigen::BothParts, FFT_FORWARD, 3>();
+ test_fft_real_input_energy<ColMajor, double, false, Eigen::BothParts, FFT_FORWARD, 3>();
+
+ test_fft_real_input_energy<ColMajor, float, true, Eigen::BothParts, FFT_FORWARD, 4>();
+ test_fft_real_input_energy<ColMajor, double, true, Eigen::BothParts, FFT_FORWARD, 4>();
+ test_fft_real_input_energy<ColMajor, float, false, Eigen::BothParts, FFT_FORWARD, 4>();
+ test_fft_real_input_energy<ColMajor, double, false, Eigen::BothParts, FFT_FORWARD, 4>();
+
+ test_fft_real_input_energy<RowMajor, float, true, Eigen::BothParts, FFT_FORWARD, 1>();
+ test_fft_real_input_energy<RowMajor, double, true, Eigen::BothParts, FFT_FORWARD, 1>();
+ test_fft_real_input_energy<RowMajor, float, false, Eigen::BothParts, FFT_FORWARD, 1>();
+ test_fft_real_input_energy<RowMajor, double, false, Eigen::BothParts, FFT_FORWARD, 1>();
+
+ test_fft_real_input_energy<RowMajor, float, true, Eigen::BothParts, FFT_FORWARD, 2>();
+ test_fft_real_input_energy<RowMajor, double, true, Eigen::BothParts, FFT_FORWARD, 2>();
+ test_fft_real_input_energy<RowMajor, float, false, Eigen::BothParts, FFT_FORWARD, 2>();
+ test_fft_real_input_energy<RowMajor, double, false, Eigen::BothParts, FFT_FORWARD, 2>();
+
+ test_fft_real_input_energy<RowMajor, float, true, Eigen::BothParts, FFT_FORWARD, 3>();
+ test_fft_real_input_energy<RowMajor, double, true, Eigen::BothParts, FFT_FORWARD, 3>();
+ test_fft_real_input_energy<RowMajor, float, false, Eigen::BothParts, FFT_FORWARD, 3>();
+ test_fft_real_input_energy<RowMajor, double, false, Eigen::BothParts, FFT_FORWARD, 3>();
+
+ test_fft_real_input_energy<RowMajor, float, true, Eigen::BothParts, FFT_FORWARD, 4>();
+ test_fft_real_input_energy<RowMajor, double, true, Eigen::BothParts, FFT_FORWARD, 4>();
+ test_fft_real_input_energy<RowMajor, float, false, Eigen::BothParts, FFT_FORWARD, 4>();
+ test_fft_real_input_energy<RowMajor, double, false, Eigen::BothParts, FFT_FORWARD, 4>();
+}
diff --git a/unsupported/test/cxx11_tensor_fixed_size.cpp b/unsupported/test/cxx11_tensor_fixed_size.cpp
new file mode 100644
index 000000000..4c660de65
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_fixed_size.cpp
@@ -0,0 +1,261 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+using Eigen::RowMajor;
+
+
+static void test_0d()
+{
+ TensorFixedSize<float, Sizes<> > scalar1;
+ TensorFixedSize<float, Sizes<>, RowMajor> scalar2;
+ VERIFY_IS_EQUAL(scalar1.rank(), 0);
+ VERIFY_IS_EQUAL(scalar1.size(), 1);
+ VERIFY_IS_EQUAL(array_prod(scalar1.dimensions()), 1);
+
+ scalar1() = 7.0;
+ scalar2() = 13.0;
+
+ // Test against shallow copy.
+ TensorFixedSize<float, Sizes<> > copy = scalar1;
+ VERIFY_IS_NOT_EQUAL(scalar1.data(), copy.data());
+ VERIFY_IS_APPROX(scalar1(), copy());
+ copy = scalar1;
+ VERIFY_IS_NOT_EQUAL(scalar1.data(), copy.data());
+ VERIFY_IS_APPROX(scalar1(), copy());
+
+ TensorFixedSize<float, Sizes<> > scalar3 = scalar1.sqrt();
+ TensorFixedSize<float, Sizes<>, RowMajor> scalar4 = scalar2.sqrt();
+ VERIFY_IS_EQUAL(scalar3.rank(), 0);
+ VERIFY_IS_APPROX(scalar3(), sqrtf(7.0));
+ VERIFY_IS_APPROX(scalar4(), sqrtf(13.0));
+
+ scalar3 = scalar1 + scalar2;
+ VERIFY_IS_APPROX(scalar3(), 7.0f + 13.0f);
+}
+
+static void test_1d()
+{
+ TensorFixedSize<float, Sizes<6> > vec1;
+ TensorFixedSize<float, Sizes<6>, RowMajor> vec2;
+
+ VERIFY_IS_EQUAL((vec1.size()), 6);
+ // VERIFY_IS_EQUAL((vec1.dimensions()[0]), 6);
+ // VERIFY_IS_EQUAL((vec1.dimension(0)), 6);
+
+ vec1(0) = 4.0; vec2(0) = 0.0;
+ vec1(1) = 8.0; vec2(1) = 1.0;
+ vec1(2) = 15.0; vec2(2) = 2.0;
+ vec1(3) = 16.0; vec2(3) = 3.0;
+ vec1(4) = 23.0; vec2(4) = 4.0;
+ vec1(5) = 42.0; vec2(5) = 5.0;
+
+ // Test against shallow copy.
+ TensorFixedSize<float, Sizes<6> > copy = vec1;
+ VERIFY_IS_NOT_EQUAL(vec1.data(), copy.data());
+ for (int i = 0; i < 6; ++i) {
+ VERIFY_IS_APPROX(vec1(i), copy(i));
+ }
+ copy = vec1;
+ VERIFY_IS_NOT_EQUAL(vec1.data(), copy.data());
+ for (int i = 0; i < 6; ++i) {
+ VERIFY_IS_APPROX(vec1(i), copy(i));
+ }
+
+ TensorFixedSize<float, Sizes<6> > vec3 = vec1.sqrt();
+ TensorFixedSize<float, Sizes<6>, RowMajor> vec4 = vec2.sqrt();
+
+ VERIFY_IS_EQUAL((vec3.size()), 6);
+ VERIFY_IS_EQUAL(vec3.rank(), 1);
+ // VERIFY_IS_EQUAL((vec3.dimensions()[0]), 6);
+ // VERIFY_IS_EQUAL((vec3.dimension(0)), 6);
+
+ VERIFY_IS_APPROX(vec3(0), sqrtf(4.0));
+ VERIFY_IS_APPROX(vec3(1), sqrtf(8.0));
+ VERIFY_IS_APPROX(vec3(2), sqrtf(15.0));
+ VERIFY_IS_APPROX(vec3(3), sqrtf(16.0));
+ VERIFY_IS_APPROX(vec3(4), sqrtf(23.0));
+ VERIFY_IS_APPROX(vec3(5), sqrtf(42.0));
+
+ VERIFY_IS_APPROX(vec4(0), sqrtf(0.0));
+ VERIFY_IS_APPROX(vec4(1), sqrtf(1.0));
+ VERIFY_IS_APPROX(vec4(2), sqrtf(2.0));
+ VERIFY_IS_APPROX(vec4(3), sqrtf(3.0));
+ VERIFY_IS_APPROX(vec4(4), sqrtf(4.0));
+ VERIFY_IS_APPROX(vec4(5), sqrtf(5.0));
+
+ vec3 = vec1 + vec2;
+ VERIFY_IS_APPROX(vec3(0), 4.0f + 0.0f);
+ VERIFY_IS_APPROX(vec3(1), 8.0f + 1.0f);
+ VERIFY_IS_APPROX(vec3(2), 15.0f + 2.0f);
+ VERIFY_IS_APPROX(vec3(3), 16.0f + 3.0f);
+ VERIFY_IS_APPROX(vec3(4), 23.0f + 4.0f);
+ VERIFY_IS_APPROX(vec3(5), 42.0f + 5.0f);
+}
+
+static void test_tensor_map()
+{
+ TensorFixedSize<float, Sizes<6> > vec1;
+ TensorFixedSize<float, Sizes<6>, RowMajor> vec2;
+
+ vec1(0) = 4.0; vec2(0) = 0.0;
+ vec1(1) = 8.0; vec2(1) = 1.0;
+ vec1(2) = 15.0; vec2(2) = 2.0;
+ vec1(3) = 16.0; vec2(3) = 3.0;
+ vec1(4) = 23.0; vec2(4) = 4.0;
+ vec1(5) = 42.0; vec2(5) = 5.0;
+
+ float data3[6];
+ TensorMap<TensorFixedSize<float, Sizes<6> > > vec3(data3, 6);
+ vec3 = vec1.sqrt() + vec2;
+
+ VERIFY_IS_APPROX(vec3(0), sqrtf(4.0));
+ VERIFY_IS_APPROX(vec3(1), sqrtf(8.0) + 1.0f);
+ VERIFY_IS_APPROX(vec3(2), sqrtf(15.0) + 2.0f);
+ VERIFY_IS_APPROX(vec3(3), sqrtf(16.0) + 3.0f);
+ VERIFY_IS_APPROX(vec3(4), sqrtf(23.0) + 4.0f);
+ VERIFY_IS_APPROX(vec3(5), sqrtf(42.0) + 5.0f);
+}
+
+static void test_2d()
+{
+ float data1[6];
+ TensorMap<TensorFixedSize<float, Sizes<2, 3> > > mat1(data1,2,3);
+ float data2[6];
+ TensorMap<TensorFixedSize<float, Sizes<2, 3>, RowMajor> > mat2(data2,2,3);
+
+ VERIFY_IS_EQUAL((mat1.size()), 2*3);
+ VERIFY_IS_EQUAL(mat1.rank(), 2);
+ // VERIFY_IS_EQUAL((mat1.dimension(0)), 2);
+ // VERIFY_IS_EQUAL((mat1.dimension(1)), 3);
+
+ mat1(0,0) = 0.0;
+ mat1(0,1) = 1.0;
+ mat1(0,2) = 2.0;
+ mat1(1,0) = 3.0;
+ mat1(1,1) = 4.0;
+ mat1(1,2) = 5.0;
+
+ mat2(0,0) = -0.0;
+ mat2(0,1) = -1.0;
+ mat2(0,2) = -2.0;
+ mat2(1,0) = -3.0;
+ mat2(1,1) = -4.0;
+ mat2(1,2) = -5.0;
+
+ TensorFixedSize<float, Sizes<2, 3> > mat3;
+ TensorFixedSize<float, Sizes<2, 3>, RowMajor> mat4;
+ mat3 = mat1.abs();
+ mat4 = mat2.abs();
+
+ VERIFY_IS_EQUAL((mat3.size()), 2*3);
+ // VERIFY_IS_EQUAL((mat3.dimension(0)), 2);
+ // VERIFY_IS_EQUAL((mat3.dimension(1)), 3);
+
+ VERIFY_IS_APPROX(mat3(0,0), 0.0f);
+ VERIFY_IS_APPROX(mat3(0,1), 1.0f);
+ VERIFY_IS_APPROX(mat3(0,2), 2.0f);
+ VERIFY_IS_APPROX(mat3(1,0), 3.0f);
+ VERIFY_IS_APPROX(mat3(1,1), 4.0f);
+ VERIFY_IS_APPROX(mat3(1,2), 5.0f);
+
+ VERIFY_IS_APPROX(mat4(0,0), 0.0f);
+ VERIFY_IS_APPROX(mat4(0,1), 1.0f);
+ VERIFY_IS_APPROX(mat4(0,2), 2.0f);
+ VERIFY_IS_APPROX(mat4(1,0), 3.0f);
+ VERIFY_IS_APPROX(mat4(1,1), 4.0f);
+ VERIFY_IS_APPROX(mat4(1,2), 5.0f);
+}
+
+static void test_3d()
+{
+ TensorFixedSize<float, Sizes<2, 3, 7> > mat1;
+ TensorFixedSize<float, Sizes<2, 3, 7>, RowMajor> mat2;
+
+ VERIFY_IS_EQUAL((mat1.size()), 2*3*7);
+ VERIFY_IS_EQUAL(mat1.rank(), 3);
+ // VERIFY_IS_EQUAL((mat1.dimension(0)), 2);
+ // VERIFY_IS_EQUAL((mat1.dimension(1)), 3);
+ // VERIFY_IS_EQUAL((mat1.dimension(2)), 7);
+
+ float val = 0.0f;
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ mat1(i,j,k) = val;
+ mat2(i,j,k) = val;
+ val += 1.0f;
+ }
+ }
+ }
+
+ TensorFixedSize<float, Sizes<2, 3, 7> > mat3;
+ mat3 = mat1.sqrt();
+ TensorFixedSize<float, Sizes<2, 3, 7>, RowMajor> mat4;
+ mat4 = mat2.sqrt();
+
+ VERIFY_IS_EQUAL((mat3.size()), 2*3*7);
+ // VERIFY_IS_EQUAL((mat3.dimension(0)), 2);
+ // VERIFY_IS_EQUAL((mat3.dimension(1)), 3);
+ // VERIFY_IS_EQUAL((mat3.dimension(2)), 7);
+
+
+ val = 0.0f;
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ VERIFY_IS_APPROX(mat3(i,j,k), sqrtf(val));
+ VERIFY_IS_APPROX(mat4(i,j,k), sqrtf(val));
+ val += 1.0f;
+ }
+ }
+ }
+}
+
+
+static void test_array()
+{
+ TensorFixedSize<float, Sizes<2, 3, 7> > mat1;
+ float val = 0.0f;
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ mat1(i,j,k) = val;
+ val += 1.0f;
+ }
+ }
+ }
+
+ TensorFixedSize<float, Sizes<2, 3, 7> > mat3;
+ mat3 = mat1.pow(3.5f);
+
+ val = 0.0f;
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ VERIFY_IS_APPROX(mat3(i,j,k), powf(val, 3.5f));
+ val += 1.0f;
+ }
+ }
+ }
+}
+
+void test_cxx11_tensor_fixed_size()
+{
+ CALL_SUBTEST(test_0d());
+ CALL_SUBTEST(test_1d());
+ CALL_SUBTEST(test_tensor_map());
+ CALL_SUBTEST(test_2d());
+ CALL_SUBTEST(test_3d());
+ CALL_SUBTEST(test_array());
+}
diff --git a/unsupported/test/cxx11_tensor_forced_eval.cpp b/unsupported/test/cxx11_tensor_forced_eval.cpp
new file mode 100644
index 000000000..45d7345e9
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_forced_eval.cpp
@@ -0,0 +1,79 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/Core>
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::MatrixXf;
+using Eigen::Tensor;
+
+static void test_simple()
+{
+ MatrixXf m1(3,3);
+ MatrixXf m2(3,3);
+ m1.setRandom();
+ m2.setRandom();
+
+ TensorMap<Tensor<float, 2> > mat1(m1.data(), 3,3);
+ TensorMap<Tensor<float, 2> > mat2(m2.data(), 3,3);
+
+ Tensor<float, 2> mat3(3,3);
+ mat3 = mat1;
+
+ typedef Tensor<float, 1>::DimensionPair DimPair;
+ Eigen::array<DimPair, 1> dims;
+ dims[0] = DimPair(1, 0);
+
+ mat3 = mat3.contract(mat2, dims).eval();
+
+ VERIFY_IS_APPROX(mat3(0, 0), (m1*m2).eval()(0,0));
+ VERIFY_IS_APPROX(mat3(0, 1), (m1*m2).eval()(0,1));
+ VERIFY_IS_APPROX(mat3(0, 2), (m1*m2).eval()(0,2));
+ VERIFY_IS_APPROX(mat3(1, 0), (m1*m2).eval()(1,0));
+ VERIFY_IS_APPROX(mat3(1, 1), (m1*m2).eval()(1,1));
+ VERIFY_IS_APPROX(mat3(1, 2), (m1*m2).eval()(1,2));
+ VERIFY_IS_APPROX(mat3(2, 0), (m1*m2).eval()(2,0));
+ VERIFY_IS_APPROX(mat3(2, 1), (m1*m2).eval()(2,1));
+ VERIFY_IS_APPROX(mat3(2, 2), (m1*m2).eval()(2,2));
+}
+
+
+static void test_const()
+{
+ MatrixXf input(3,3);
+ input.setRandom();
+ MatrixXf output = input;
+ output.rowwise() -= input.colwise().maxCoeff();
+
+ Eigen::array<int, 1> depth_dim;
+ depth_dim[0] = 0;
+ Tensor<float, 2>::Dimensions dims2d;
+ dims2d[0] = 1;
+ dims2d[1] = 3;
+ Eigen::array<int, 2> bcast;
+ bcast[0] = 3;
+ bcast[1] = 1;
+ const TensorMap<Tensor<const float, 2> > input_tensor(input.data(), 3, 3);
+ Tensor<float, 2> output_tensor= (input_tensor - input_tensor.maximum(depth_dim).eval().reshape(dims2d).broadcast(bcast));
+
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ VERIFY_IS_APPROX(output(i, j), output_tensor(i, j));
+ }
+ }
+}
+
+
+void test_cxx11_tensor_forced_eval()
+{
+ CALL_SUBTEST(test_simple());
+ CALL_SUBTEST(test_const());
+}
diff --git a/unsupported/test/cxx11_tensor_forced_eval_sycl.cpp b/unsupported/test/cxx11_tensor_forced_eval_sycl.cpp
new file mode 100644
index 000000000..5690da723
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_forced_eval_sycl.cpp
@@ -0,0 +1,70 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_TEST_NO_LONGDOUBLE
+#define EIGEN_TEST_NO_COMPLEX
+#define EIGEN_TEST_FUNC cxx11_tensor_forced_eval_sycl
+#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
+#define EIGEN_USE_SYCL
+
+#include "main.h"
+#include <unsupported/Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+
+void test_forced_eval_sycl(const Eigen::SyclDevice &sycl_device) {
+
+ int sizeDim1 = 100;
+ int sizeDim2 = 200;
+ int sizeDim3 = 200;
+ Eigen::array<int, 3> tensorRange = {{sizeDim1, sizeDim2, sizeDim3}};
+ Eigen::Tensor<float, 3> in1(tensorRange);
+ Eigen::Tensor<float, 3> in2(tensorRange);
+ Eigen::Tensor<float, 3> out(tensorRange);
+
+ float * gpu_in1_data = static_cast<float*>(sycl_device.allocate(in1.dimensions().TotalSize()*sizeof(float)));
+ float * gpu_in2_data = static_cast<float*>(sycl_device.allocate(in2.dimensions().TotalSize()*sizeof(float)));
+ float * gpu_out_data = static_cast<float*>(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(float)));
+
+ in1 = in1.random() + in1.constant(10.0f);
+ in2 = in2.random() + in2.constant(10.0f);
+
+ // creating TensorMap from tensor
+ Eigen::TensorMap<Eigen::Tensor<float, 3>> gpu_in1(gpu_in1_data, tensorRange);
+ Eigen::TensorMap<Eigen::Tensor<float, 3>> gpu_in2(gpu_in2_data, tensorRange);
+ Eigen::TensorMap<Eigen::Tensor<float, 3>> gpu_out(gpu_out_data, tensorRange);
+ sycl_device.memcpyHostToDevice(gpu_in1_data, in1.data(),(in1.dimensions().TotalSize())*sizeof(float));
+ sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(),(in1.dimensions().TotalSize())*sizeof(float));
+ /// c=(a+b)*b
+ gpu_out.device(sycl_device) =(gpu_in1 + gpu_in2).eval() * gpu_in2;
+ sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
+ for (int i = 0; i < sizeDim1; ++i) {
+ for (int j = 0; j < sizeDim2; ++j) {
+ for (int k = 0; k < sizeDim3; ++k) {
+ VERIFY_IS_APPROX(out(i, j, k),
+ (in1(i, j, k) + in2(i, j, k)) * in2(i, j, k));
+ }
+ }
+ }
+ printf("(a+b)*b Test Passed\n");
+ sycl_device.deallocate(gpu_in1_data);
+ sycl_device.deallocate(gpu_in2_data);
+ sycl_device.deallocate(gpu_out_data);
+
+}
+
+void test_cxx11_tensor_forced_eval_sycl() {
+ cl::sycl::gpu_selector s;
+ Eigen::SyclDevice sycl_device(s);
+ CALL_SUBTEST(test_forced_eval_sycl(sycl_device));
+}
diff --git a/unsupported/test/cxx11_tensor_generator.cpp b/unsupported/test/cxx11_tensor_generator.cpp
new file mode 100644
index 000000000..dcb928714
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_generator.cpp
@@ -0,0 +1,91 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2015 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+struct Generator1D {
+ Generator1D() { }
+
+ float operator()(const array<Eigen::DenseIndex, 1>& coordinates) const {
+ return coordinates[0];
+ }
+};
+
+template <int DataLayout>
+static void test_1D()
+{
+ Tensor<float, 1> vec(6);
+ Tensor<float, 1> result = vec.generate(Generator1D());
+
+ for (int i = 0; i < 6; ++i) {
+ VERIFY_IS_EQUAL(result(i), i);
+ }
+}
+
+
+struct Generator2D {
+ Generator2D() { }
+
+ float operator()(const array<Eigen::DenseIndex, 2>& coordinates) const {
+ return 3 * coordinates[0] + 11 * coordinates[1];
+ }
+};
+
+template <int DataLayout>
+static void test_2D()
+{
+ Tensor<float, 2> matrix(5, 7);
+ Tensor<float, 2> result = matrix.generate(Generator2D());
+
+ for (int i = 0; i < 5; ++i) {
+ for (int j = 0; j < 5; ++j) {
+ VERIFY_IS_EQUAL(result(i, j), 3*i + 11*j);
+ }
+ }
+}
+
+
+template <int DataLayout>
+static void test_gaussian()
+{
+ int rows = 32;
+ int cols = 48;
+ array<float, 2> means;
+ means[0] = rows / 2.0f;
+ means[1] = cols / 2.0f;
+ array<float, 2> std_devs;
+ std_devs[0] = 3.14f;
+ std_devs[1] = 2.7f;
+ internal::GaussianGenerator<float, Eigen::DenseIndex, 2> gaussian_gen(means, std_devs);
+
+ Tensor<float, 2> matrix(rows, cols);
+ Tensor<float, 2> result = matrix.generate(gaussian_gen);
+
+ for (int i = 0; i < rows; ++i) {
+ for (int j = 0; j < cols; ++j) {
+ float g_rows = powf(rows/2.0f - i, 2) / (3.14f * 3.14f) * 0.5f;
+ float g_cols = powf(cols/2.0f - j, 2) / (2.7f * 2.7f) * 0.5f;
+ float gaussian = expf(-g_rows - g_cols);
+ VERIFY_IS_EQUAL(result(i, j), gaussian);
+ }
+ }
+}
+
+
+void test_cxx11_tensor_generator()
+{
+ CALL_SUBTEST(test_1D<ColMajor>());
+ CALL_SUBTEST(test_1D<RowMajor>());
+ CALL_SUBTEST(test_2D<ColMajor>());
+ CALL_SUBTEST(test_2D<RowMajor>());
+ CALL_SUBTEST(test_gaussian<ColMajor>());
+ CALL_SUBTEST(test_gaussian<RowMajor>());
+}
diff --git a/unsupported/test/cxx11_tensor_ifft.cpp b/unsupported/test/cxx11_tensor_ifft.cpp
new file mode 100644
index 000000000..5fd88fa6c
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_ifft.cpp
@@ -0,0 +1,154 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Jianwei Cui <thucjw@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+#include <complex>
+#include <cmath>
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+
+template <int DataLayout>
+static void test_1D_fft_ifft_invariant(int sequence_length) {
+ Tensor<double, 1, DataLayout> tensor(sequence_length);
+ tensor.setRandom();
+
+ array<int, 1> fft;
+ fft[0] = 0;
+
+ Tensor<std::complex<double>, 1, DataLayout> tensor_after_fft;
+ Tensor<std::complex<double>, 1, DataLayout> tensor_after_fft_ifft;
+
+ tensor_after_fft = tensor.template fft<Eigen::BothParts, Eigen::FFT_FORWARD>(fft);
+ tensor_after_fft_ifft = tensor_after_fft.template fft<Eigen::BothParts, Eigen::FFT_REVERSE>(fft);
+
+ VERIFY_IS_EQUAL(tensor_after_fft.dimension(0), sequence_length);
+ VERIFY_IS_EQUAL(tensor_after_fft_ifft.dimension(0), sequence_length);
+
+ for (int i = 0; i < sequence_length; ++i) {
+ VERIFY_IS_APPROX(static_cast<float>(tensor(i)), static_cast<float>(std::real(tensor_after_fft_ifft(i))));
+ }
+}
+
+template <int DataLayout>
+static void test_2D_fft_ifft_invariant(int dim0, int dim1) {
+ Tensor<double, 2, DataLayout> tensor(dim0, dim1);
+ tensor.setRandom();
+
+ array<int, 2> fft;
+ fft[0] = 0;
+ fft[1] = 1;
+
+ Tensor<std::complex<double>, 2, DataLayout> tensor_after_fft;
+ Tensor<std::complex<double>, 2, DataLayout> tensor_after_fft_ifft;
+
+ tensor_after_fft = tensor.template fft<Eigen::BothParts, Eigen::FFT_FORWARD>(fft);
+ tensor_after_fft_ifft = tensor_after_fft.template fft<Eigen::BothParts, Eigen::FFT_REVERSE>(fft);
+
+ VERIFY_IS_EQUAL(tensor_after_fft.dimension(0), dim0);
+ VERIFY_IS_EQUAL(tensor_after_fft.dimension(1), dim1);
+ VERIFY_IS_EQUAL(tensor_after_fft_ifft.dimension(0), dim0);
+ VERIFY_IS_EQUAL(tensor_after_fft_ifft.dimension(1), dim1);
+
+ for (int i = 0; i < dim0; ++i) {
+ for (int j = 0; j < dim1; ++j) {
+ //std::cout << "[" << i << "][" << j << "]" << " Original data: " << tensor(i,j) << " Transformed data:" << tensor_after_fft_ifft(i,j) << std::endl;
+ VERIFY_IS_APPROX(static_cast<float>(tensor(i,j)), static_cast<float>(std::real(tensor_after_fft_ifft(i,j))));
+ }
+ }
+}
+
+template <int DataLayout>
+static void test_3D_fft_ifft_invariant(int dim0, int dim1, int dim2) {
+ Tensor<double, 3, DataLayout> tensor(dim0, dim1, dim2);
+ tensor.setRandom();
+
+ array<int, 3> fft;
+ fft[0] = 0;
+ fft[1] = 1;
+ fft[2] = 2;
+
+ Tensor<std::complex<double>, 3, DataLayout> tensor_after_fft;
+ Tensor<std::complex<double>, 3, DataLayout> tensor_after_fft_ifft;
+
+ tensor_after_fft = tensor.template fft<Eigen::BothParts, Eigen::FFT_FORWARD>(fft);
+ tensor_after_fft_ifft = tensor_after_fft.template fft<Eigen::BothParts, Eigen::FFT_REVERSE>(fft);
+
+ VERIFY_IS_EQUAL(tensor_after_fft.dimension(0), dim0);
+ VERIFY_IS_EQUAL(tensor_after_fft.dimension(1), dim1);
+ VERIFY_IS_EQUAL(tensor_after_fft.dimension(2), dim2);
+ VERIFY_IS_EQUAL(tensor_after_fft_ifft.dimension(0), dim0);
+ VERIFY_IS_EQUAL(tensor_after_fft_ifft.dimension(1), dim1);
+ VERIFY_IS_EQUAL(tensor_after_fft_ifft.dimension(2), dim2);
+
+ for (int i = 0; i < dim0; ++i) {
+ for (int j = 0; j < dim1; ++j) {
+ for (int k = 0; k < dim2; ++k) {
+ VERIFY_IS_APPROX(static_cast<float>(tensor(i,j,k)), static_cast<float>(std::real(tensor_after_fft_ifft(i,j,k))));
+ }
+ }
+ }
+}
+
+template <int DataLayout>
+static void test_sub_fft_ifft_invariant(int dim0, int dim1, int dim2, int dim3) {
+ Tensor<double, 4, DataLayout> tensor(dim0, dim1, dim2, dim3);
+ tensor.setRandom();
+
+ array<int, 2> fft;
+ fft[0] = 2;
+ fft[1] = 0;
+
+ Tensor<std::complex<double>, 4, DataLayout> tensor_after_fft;
+ Tensor<double, 4, DataLayout> tensor_after_fft_ifft;
+
+ tensor_after_fft = tensor.template fft<Eigen::BothParts, Eigen::FFT_FORWARD>(fft);
+ tensor_after_fft_ifft = tensor_after_fft.template fft<Eigen::RealPart, Eigen::FFT_REVERSE>(fft);
+
+ VERIFY_IS_EQUAL(tensor_after_fft.dimension(0), dim0);
+ VERIFY_IS_EQUAL(tensor_after_fft.dimension(1), dim1);
+ VERIFY_IS_EQUAL(tensor_after_fft.dimension(2), dim2);
+ VERIFY_IS_EQUAL(tensor_after_fft.dimension(3), dim3);
+ VERIFY_IS_EQUAL(tensor_after_fft_ifft.dimension(0), dim0);
+ VERIFY_IS_EQUAL(tensor_after_fft_ifft.dimension(1), dim1);
+ VERIFY_IS_EQUAL(tensor_after_fft_ifft.dimension(2), dim2);
+ VERIFY_IS_EQUAL(tensor_after_fft_ifft.dimension(3), dim3);
+
+ for (int i = 0; i < dim0; ++i) {
+ for (int j = 0; j < dim1; ++j) {
+ for (int k = 0; k < dim2; ++k) {
+ for (int l = 0; l < dim3; ++l) {
+ VERIFY_IS_APPROX(static_cast<float>(tensor(i,j,k,l)), static_cast<float>(tensor_after_fft_ifft(i,j,k,l)));
+ }
+ }
+ }
+ }
+}
+
+void test_cxx11_tensor_ifft() {
+ CALL_SUBTEST(test_1D_fft_ifft_invariant<ColMajor>(4));
+ CALL_SUBTEST(test_1D_fft_ifft_invariant<ColMajor>(16));
+ CALL_SUBTEST(test_1D_fft_ifft_invariant<ColMajor>(32));
+ CALL_SUBTEST(test_1D_fft_ifft_invariant<ColMajor>(1024*1024));
+
+ CALL_SUBTEST(test_2D_fft_ifft_invariant<ColMajor>(4,4));
+ CALL_SUBTEST(test_2D_fft_ifft_invariant<ColMajor>(8,16));
+ CALL_SUBTEST(test_2D_fft_ifft_invariant<ColMajor>(16,32));
+ CALL_SUBTEST(test_2D_fft_ifft_invariant<ColMajor>(1024,1024));
+
+ CALL_SUBTEST(test_3D_fft_ifft_invariant<ColMajor>(4,4,4));
+ CALL_SUBTEST(test_3D_fft_ifft_invariant<ColMajor>(8,16,32));
+ CALL_SUBTEST(test_3D_fft_ifft_invariant<ColMajor>(16,4,8));
+ CALL_SUBTEST(test_3D_fft_ifft_invariant<ColMajor>(256,256,256));
+
+ CALL_SUBTEST(test_sub_fft_ifft_invariant<ColMajor>(4,4,4,4));
+ CALL_SUBTEST(test_sub_fft_ifft_invariant<ColMajor>(8,16,32,64));
+ CALL_SUBTEST(test_sub_fft_ifft_invariant<ColMajor>(16,4,8,12));
+ CALL_SUBTEST(test_sub_fft_ifft_invariant<ColMajor>(64,64,64,64));
+}
diff --git a/unsupported/test/cxx11_tensor_image_patch.cpp b/unsupported/test/cxx11_tensor_image_patch.cpp
new file mode 100644
index 000000000..475c59651
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_image_patch.cpp
@@ -0,0 +1,757 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+
+void test_simple_patch()
+{
+ Tensor<float, 4> tensor(2,3,5,7);
+ tensor.setRandom();
+ Tensor<float, 4, RowMajor> tensor_row_major = tensor.swap_layout();
+ VERIFY_IS_EQUAL(tensor.dimension(0), tensor_row_major.dimension(3));
+ VERIFY_IS_EQUAL(tensor.dimension(1), tensor_row_major.dimension(2));
+ VERIFY_IS_EQUAL(tensor.dimension(2), tensor_row_major.dimension(1));
+ VERIFY_IS_EQUAL(tensor.dimension(3), tensor_row_major.dimension(0));
+
+ // Single pixel patch: ColMajor
+ Tensor<float, 5> single_pixel_patch;
+ single_pixel_patch = tensor.extract_image_patches(1, 1);
+ VERIFY_IS_EQUAL(single_pixel_patch.dimension(0), 2);
+ VERIFY_IS_EQUAL(single_pixel_patch.dimension(1), 1);
+ VERIFY_IS_EQUAL(single_pixel_patch.dimension(2), 1);
+ VERIFY_IS_EQUAL(single_pixel_patch.dimension(3), 3*5);
+ VERIFY_IS_EQUAL(single_pixel_patch.dimension(4), 7);
+
+ // Single pixel patch: RowMajor
+ Tensor<float, 5, RowMajor> single_pixel_patch_row_major;
+ single_pixel_patch_row_major = tensor_row_major.extract_image_patches(1, 1);
+ VERIFY_IS_EQUAL(single_pixel_patch_row_major.dimension(0), 7);
+ VERIFY_IS_EQUAL(single_pixel_patch_row_major.dimension(1), 3*5);
+ VERIFY_IS_EQUAL(single_pixel_patch_row_major.dimension(2), 1);
+ VERIFY_IS_EQUAL(single_pixel_patch_row_major.dimension(3), 1);
+ VERIFY_IS_EQUAL(single_pixel_patch_row_major.dimension(4), 2);
+
+ for (int i = 0; i < tensor.size(); ++i) {
+ // ColMajor
+ if (tensor.data()[i] != single_pixel_patch.data()[i]) {
+ std::cout << "Mismatch detected at index " << i << " : "
+ << tensor.data()[i] << " vs " << single_pixel_patch.data()[i]
+ << std::endl;
+ }
+ VERIFY_IS_EQUAL(single_pixel_patch.data()[i], tensor.data()[i]);
+ // RowMajor
+ if (tensor_row_major.data()[i] != single_pixel_patch_row_major.data()[i]) {
+ std::cout << "Mismatch detected at index " << i << " : "
+ << tensor.data()[i] << " vs "
+ << single_pixel_patch_row_major.data()[i] << std::endl;
+ }
+ VERIFY_IS_EQUAL(single_pixel_patch_row_major.data()[i],
+ tensor_row_major.data()[i]);
+ VERIFY_IS_EQUAL(tensor.data()[i], tensor_row_major.data()[i]);
+ VERIFY_IS_EQUAL(single_pixel_patch.data()[i],
+ single_pixel_patch_row_major.data()[i]);
+ }
+
+ // Entire image patch: ColMajor
+ Tensor<float, 5> entire_image_patch;
+ entire_image_patch = tensor.extract_image_patches(3, 5);
+ VERIFY_IS_EQUAL(entire_image_patch.dimension(0), 2);
+ VERIFY_IS_EQUAL(entire_image_patch.dimension(1), 3);
+ VERIFY_IS_EQUAL(entire_image_patch.dimension(2), 5);
+ VERIFY_IS_EQUAL(entire_image_patch.dimension(3), 3*5);
+ VERIFY_IS_EQUAL(entire_image_patch.dimension(4), 7);
+
+ // Entire image patch: RowMajor
+ Tensor<float, 5, RowMajor> entire_image_patch_row_major;
+ entire_image_patch_row_major = tensor_row_major.extract_image_patches(3, 5);
+ VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(0), 7);
+ VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(1), 3*5);
+ VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(2), 5);
+ VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(3), 3);
+ VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(4), 2);
+
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 5; ++j) {
+ int patchId = i+3*j;
+ for (int r = 0; r < 3; ++r) {
+ for (int c = 0; c < 5; ++c) {
+ for (int d = 0; d < 2; ++d) {
+ for (int b = 0; b < 7; ++b) {
+ float expected = 0.0f;
+ float expected_row_major = 0.0f;
+ if (r-1+i >= 0 && c-2+j >= 0 && r-1+i < 3 && c-2+j < 5) {
+ expected = tensor(d, r-1+i, c-2+j, b);
+ expected_row_major = tensor_row_major(b, c-2+j, r-1+i, d);
+ }
+ // ColMajor
+ if (entire_image_patch(d, r, c, patchId, b) != expected) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(entire_image_patch(d, r, c, patchId, b), expected);
+ // RowMajor
+ if (entire_image_patch_row_major(b, patchId, c, r, d) !=
+ expected_row_major) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j
+ << " r=" << r << " c=" << c << " d=" << d << " b=" << b
+ << std::endl;
+ }
+ VERIFY_IS_EQUAL(entire_image_patch_row_major(b, patchId, c, r, d),
+ expected_row_major);
+ // Check that ColMajor and RowMajor agree.
+ VERIFY_IS_EQUAL(expected, expected_row_major);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // 2D patch: ColMajor
+ Tensor<float, 5> twod_patch;
+ twod_patch = tensor.extract_image_patches(2, 2);
+ VERIFY_IS_EQUAL(twod_patch.dimension(0), 2);
+ VERIFY_IS_EQUAL(twod_patch.dimension(1), 2);
+ VERIFY_IS_EQUAL(twod_patch.dimension(2), 2);
+ VERIFY_IS_EQUAL(twod_patch.dimension(3), 3*5);
+ VERIFY_IS_EQUAL(twod_patch.dimension(4), 7);
+
+ // 2D patch: RowMajor
+ Tensor<float, 5, RowMajor> twod_patch_row_major;
+ twod_patch_row_major = tensor_row_major.extract_image_patches(2, 2);
+ VERIFY_IS_EQUAL(twod_patch_row_major.dimension(0), 7);
+ VERIFY_IS_EQUAL(twod_patch_row_major.dimension(1), 3*5);
+ VERIFY_IS_EQUAL(twod_patch_row_major.dimension(2), 2);
+ VERIFY_IS_EQUAL(twod_patch_row_major.dimension(3), 2);
+ VERIFY_IS_EQUAL(twod_patch_row_major.dimension(4), 2);
+
+
+ // Based on the calculation described in TensorTraits.h, padding happens to be 0.
+ int row_padding = 0;
+ int col_padding = 0;
+ int stride = 1;
+
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 5; ++j) {
+ int patchId = i+3*j;
+ for (int r = 0; r < 2; ++r) {
+ for (int c = 0; c < 2; ++c) {
+ for (int d = 0; d < 2; ++d) {
+ for (int b = 0; b < 7; ++b) {
+ float expected = 0.0f;
+ float expected_row_major = 0.0f;
+ int row_offset = r*stride + i - row_padding;
+ int col_offset = c*stride + j - col_padding;
+ // ColMajor
+ if (row_offset >= 0 && col_offset >= 0 && row_offset < tensor.dimension(1) && col_offset < tensor.dimension(2)) {
+ expected = tensor(d, row_offset, col_offset, b);
+ }
+ if (twod_patch(d, r, c, patchId, b) != expected) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(twod_patch(d, r, c, patchId, b), expected);
+
+ // RowMajor
+ if (row_offset >= 0 && col_offset >= 0 && row_offset < tensor_row_major.dimension(2) && col_offset < tensor_row_major.dimension(1)) {
+ expected_row_major = tensor_row_major(b, col_offset, row_offset, d);
+
+ }
+ if (twod_patch_row_major(b, patchId, c, r, d) != expected_row_major) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(twod_patch_row_major(b, patchId, c, r, d), expected_row_major);
+ // Check that ColMajor and RowMajor agree.
+ VERIFY_IS_EQUAL(expected, expected_row_major);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+// Verifies VALID padding (no padding) with incrementing values.
+void test_patch_padding_valid()
+{
+ int input_depth = 3;
+ int input_rows = 3;
+ int input_cols = 3;
+ int input_batches = 1;
+ int ksize = 2; // Corresponds to the Rows and Cols for tensor.extract_image_patches<>.
+ int stride = 2; // Only same stride is supported.
+ Tensor<float, 4> tensor(input_depth, input_rows, input_cols, input_batches);
+ // Initializes tensor with incrementing numbers.
+ for (int i = 0; i < tensor.size(); ++i) {
+ tensor.data()[i] = i + 1;
+ }
+ // ColMajor
+ Tensor<float, 5> result = tensor.extract_image_patches(ksize, ksize, stride, stride, 1, 1, PADDING_VALID);
+
+ VERIFY_IS_EQUAL(result.dimension(0), input_depth); // depth
+ VERIFY_IS_EQUAL(result.dimension(1), ksize); // kernel rows
+ VERIFY_IS_EQUAL(result.dimension(2), ksize); // kernel cols
+ VERIFY_IS_EQUAL(result.dimension(3), 1); // number of patches
+ VERIFY_IS_EQUAL(result.dimension(4), input_batches); // number of batches
+
+ // RowMajor
+ Tensor<float, 4, RowMajor> tensor_row_major = tensor.swap_layout();
+ VERIFY_IS_EQUAL(tensor.dimension(0), tensor_row_major.dimension(3));
+ VERIFY_IS_EQUAL(tensor.dimension(1), tensor_row_major.dimension(2));
+ VERIFY_IS_EQUAL(tensor.dimension(2), tensor_row_major.dimension(1));
+ VERIFY_IS_EQUAL(tensor.dimension(3), tensor_row_major.dimension(0));
+
+ Tensor<float, 5, RowMajor> result_row_major = tensor_row_major.extract_image_patches(ksize, ksize, stride, stride, 1, 1, PADDING_VALID);
+ VERIFY_IS_EQUAL(result.dimension(0), result_row_major.dimension(4));
+ VERIFY_IS_EQUAL(result.dimension(1), result_row_major.dimension(3));
+ VERIFY_IS_EQUAL(result.dimension(2), result_row_major.dimension(2));
+ VERIFY_IS_EQUAL(result.dimension(3), result_row_major.dimension(1));
+ VERIFY_IS_EQUAL(result.dimension(4), result_row_major.dimension(0));
+
+ // No padding is carried out.
+ int row_padding = 0;
+ int col_padding = 0;
+
+ for (int i = 0; (i+stride+ksize-1) < input_rows; i += stride) { // input rows
+ for (int j = 0; (j+stride+ksize-1) < input_cols; j += stride) { // input cols
+ int patchId = i+input_rows*j;
+ for (int r = 0; r < ksize; ++r) { // patch rows
+ for (int c = 0; c < ksize; ++c) { // patch cols
+ for (int d = 0; d < input_depth; ++d) { // depth
+ for (int b = 0; b < input_batches; ++b) { // batch
+ float expected = 0.0f;
+ float expected_row_major = 0.0f;
+ int row_offset = r + i - row_padding;
+ int col_offset = c + j - col_padding;
+ if (row_offset >= 0 && col_offset >= 0 && row_offset < input_rows && col_offset < input_cols) {
+ expected = tensor(d, row_offset, col_offset, b);
+ expected_row_major = tensor_row_major(b, col_offset, row_offset, d);
+ }
+ // ColMajor
+ if (result(d, r, c, patchId, b) != expected) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(result(d, r, c, patchId, b), expected);
+ // RowMajor
+ if (result_row_major(b, patchId, c, r, d) != expected_row_major) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(result_row_major(b, patchId, c, r, d), expected_row_major);
+ // Check that ColMajor and RowMajor agree.
+ VERIFY_IS_EQUAL(expected, expected_row_major);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+// Verifies VALID padding (no padding) with the same value.
+void test_patch_padding_valid_same_value()
+{
+ int input_depth = 1;
+ int input_rows = 5;
+ int input_cols = 5;
+ int input_batches = 2;
+ int ksize = 3; // Corresponds to the Rows and Cols for tensor.extract_image_patches<>.
+ int stride = 2; // Only same stride is supported.
+ // ColMajor
+ Tensor<float, 4> tensor(input_depth, input_rows, input_cols, input_batches);
+ tensor = tensor.constant(11.0f);
+ Tensor<float, 5> result = tensor.extract_image_patches(ksize, ksize, stride, stride, 1, 1, PADDING_VALID);
+
+ VERIFY_IS_EQUAL(result.dimension(0), input_depth); // depth
+ VERIFY_IS_EQUAL(result.dimension(1), ksize); // kernel rows
+ VERIFY_IS_EQUAL(result.dimension(2), ksize); // kernel cols
+ VERIFY_IS_EQUAL(result.dimension(3), 4); // number of patches
+ VERIFY_IS_EQUAL(result.dimension(4), input_batches); // number of batches
+
+ // RowMajor
+ Tensor<float, 4, RowMajor> tensor_row_major = tensor.swap_layout();
+ VERIFY_IS_EQUAL(tensor.dimension(0), tensor_row_major.dimension(3));
+ VERIFY_IS_EQUAL(tensor.dimension(1), tensor_row_major.dimension(2));
+ VERIFY_IS_EQUAL(tensor.dimension(2), tensor_row_major.dimension(1));
+ VERIFY_IS_EQUAL(tensor.dimension(3), tensor_row_major.dimension(0));
+
+ Tensor<float, 5, RowMajor> result_row_major = tensor_row_major.extract_image_patches(ksize, ksize, stride, stride, 1, 1, PADDING_VALID);
+ VERIFY_IS_EQUAL(result.dimension(0), result_row_major.dimension(4));
+ VERIFY_IS_EQUAL(result.dimension(1), result_row_major.dimension(3));
+ VERIFY_IS_EQUAL(result.dimension(2), result_row_major.dimension(2));
+ VERIFY_IS_EQUAL(result.dimension(3), result_row_major.dimension(1));
+ VERIFY_IS_EQUAL(result.dimension(4), result_row_major.dimension(0));
+
+ // No padding is carried out.
+ int row_padding = 0;
+ int col_padding = 0;
+
+ for (int i = 0; (i+stride+ksize-1) <= input_rows; i += stride) { // input rows
+ for (int j = 0; (j+stride+ksize-1) <= input_cols; j += stride) { // input cols
+ int patchId = i+input_rows*j;
+ for (int r = 0; r < ksize; ++r) { // patch rows
+ for (int c = 0; c < ksize; ++c) { // patch cols
+ for (int d = 0; d < input_depth; ++d) { // depth
+ for (int b = 0; b < input_batches; ++b) { // batch
+ float expected = 0.0f;
+ float expected_row_major = 0.0f;
+ int row_offset = r + i - row_padding;
+ int col_offset = c + j - col_padding;
+ if (row_offset >= 0 && col_offset >= 0 && row_offset < input_rows && col_offset < input_cols) {
+ expected = tensor(d, row_offset, col_offset, b);
+ expected_row_major = tensor_row_major(b, col_offset, row_offset, d);
+ }
+ // ColMajor
+ if (result(d, r, c, patchId, b) != expected) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(result(d, r, c, patchId, b), expected);
+ // RowMajor
+ if (result_row_major(b, patchId, c, r, d) != expected_row_major) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(result_row_major(b, patchId, c, r, d), expected_row_major);
+ // Check that ColMajor and RowMajor agree.
+ VERIFY_IS_EQUAL(expected, expected_row_major);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+// Verifies SAME padding.
+void test_patch_padding_same()
+{
+ int input_depth = 3;
+ int input_rows = 4;
+ int input_cols = 2;
+ int input_batches = 1;
+ int ksize = 2; // Corresponds to the Rows and Cols for tensor.extract_image_patches<>.
+ int stride = 2; // Only same stride is supported.
+ // ColMajor
+ Tensor<float, 4> tensor(input_depth, input_rows, input_cols, input_batches);
+ // Initializes tensor with incrementing numbers.
+ for (int i = 0; i < tensor.size(); ++i) {
+ tensor.data()[i] = i + 1;
+ }
+ Tensor<float, 5> result = tensor.extract_image_patches(ksize, ksize, stride, stride, PADDING_SAME);
+
+ VERIFY_IS_EQUAL(result.dimension(0), input_depth); // depth
+ VERIFY_IS_EQUAL(result.dimension(1), ksize); // kernel rows
+ VERIFY_IS_EQUAL(result.dimension(2), ksize); // kernel cols
+ VERIFY_IS_EQUAL(result.dimension(3), 2); // number of patches
+ VERIFY_IS_EQUAL(result.dimension(4), input_batches); // number of batches
+
+ // RowMajor
+ Tensor<float, 4, RowMajor> tensor_row_major = tensor.swap_layout();
+ VERIFY_IS_EQUAL(tensor.dimension(0), tensor_row_major.dimension(3));
+ VERIFY_IS_EQUAL(tensor.dimension(1), tensor_row_major.dimension(2));
+ VERIFY_IS_EQUAL(tensor.dimension(2), tensor_row_major.dimension(1));
+ VERIFY_IS_EQUAL(tensor.dimension(3), tensor_row_major.dimension(0));
+
+ Tensor<float, 5, RowMajor> result_row_major = tensor_row_major.extract_image_patches(ksize, ksize, stride, stride, PADDING_SAME);
+ VERIFY_IS_EQUAL(result.dimension(0), result_row_major.dimension(4));
+ VERIFY_IS_EQUAL(result.dimension(1), result_row_major.dimension(3));
+ VERIFY_IS_EQUAL(result.dimension(2), result_row_major.dimension(2));
+ VERIFY_IS_EQUAL(result.dimension(3), result_row_major.dimension(1));
+ VERIFY_IS_EQUAL(result.dimension(4), result_row_major.dimension(0));
+
+ // Based on the calculation described in TensorTraits.h, padding happens to be
+ // 0.
+ int row_padding = 0;
+ int col_padding = 0;
+
+ for (int i = 0; (i+stride+ksize-1) <= input_rows; i += stride) { // input rows
+ for (int j = 0; (j+stride+ksize-1) <= input_cols; j += stride) { // input cols
+ int patchId = i+input_rows*j;
+ for (int r = 0; r < ksize; ++r) { // patch rows
+ for (int c = 0; c < ksize; ++c) { // patch cols
+ for (int d = 0; d < input_depth; ++d) { // depth
+ for (int b = 0; b < input_batches; ++b) { // batch
+ float expected = 0.0f;
+ float expected_row_major = 0.0f;
+ int row_offset = r*stride + i - row_padding;
+ int col_offset = c*stride + j - col_padding;
+ if (row_offset >= 0 && col_offset >= 0 && row_offset < input_rows && col_offset < input_cols) {
+ expected = tensor(d, row_offset, col_offset, b);
+ expected_row_major = tensor_row_major(b, col_offset, row_offset, d);
+ }
+ // ColMajor
+ if (result(d, r, c, patchId, b) != expected) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(result(d, r, c, patchId, b), expected);
+ // RowMajor
+ if (result_row_major(b, patchId, c, r, d) != expected_row_major) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(result_row_major(b, patchId, c, r, d), expected_row_major);
+ // Check that ColMajor and RowMajor agree.
+ VERIFY_IS_EQUAL(expected, expected_row_major);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+void test_patch_no_extra_dim()
+{
+ Tensor<float, 3> tensor(2,3,5);
+ tensor.setRandom();
+ Tensor<float, 3, RowMajor> tensor_row_major = tensor.swap_layout();
+ VERIFY_IS_EQUAL(tensor.dimension(0), tensor_row_major.dimension(2));
+ VERIFY_IS_EQUAL(tensor.dimension(1), tensor_row_major.dimension(1));
+ VERIFY_IS_EQUAL(tensor.dimension(2), tensor_row_major.dimension(0));
+
+ // Single pixel patch: ColMajor
+ Tensor<float, 4> single_pixel_patch;
+ single_pixel_patch = tensor.extract_image_patches(1, 1);
+ VERIFY_IS_EQUAL(single_pixel_patch.dimension(0), 2);
+ VERIFY_IS_EQUAL(single_pixel_patch.dimension(1), 1);
+ VERIFY_IS_EQUAL(single_pixel_patch.dimension(2), 1);
+ VERIFY_IS_EQUAL(single_pixel_patch.dimension(3), 3*5);
+
+ // Single pixel patch: RowMajor
+ Tensor<float, 4, RowMajor> single_pixel_patch_row_major;
+ single_pixel_patch_row_major = tensor_row_major.extract_image_patches(1, 1);
+ VERIFY_IS_EQUAL(single_pixel_patch_row_major.dimension(0), 3*5);
+ VERIFY_IS_EQUAL(single_pixel_patch_row_major.dimension(1), 1);
+ VERIFY_IS_EQUAL(single_pixel_patch_row_major.dimension(2), 1);
+ VERIFY_IS_EQUAL(single_pixel_patch_row_major.dimension(3), 2);
+
+ for (int i = 0; i < tensor.size(); ++i) {
+ // ColMajor
+ if (tensor.data()[i] != single_pixel_patch.data()[i]) {
+ std::cout << "Mismatch detected at index " << i << " : " << tensor.data()[i] << " vs " << single_pixel_patch.data()[i] << std::endl;
+ }
+ VERIFY_IS_EQUAL(single_pixel_patch.data()[i], tensor.data()[i]);
+ // RowMajor
+ if (tensor_row_major.data()[i] != single_pixel_patch_row_major.data()[i]) {
+ std::cout << "Mismatch detected at index " << i << " : "
+ << tensor.data()[i] << " vs "
+ << single_pixel_patch_row_major.data()[i] << std::endl;
+ }
+ VERIFY_IS_EQUAL(single_pixel_patch_row_major.data()[i],
+ tensor_row_major.data()[i]);
+ VERIFY_IS_EQUAL(tensor.data()[i], tensor_row_major.data()[i]);
+ VERIFY_IS_EQUAL(single_pixel_patch.data()[i],
+ single_pixel_patch_row_major.data()[i]);
+ }
+
+ // Entire image patch: ColMajor
+ Tensor<float, 4> entire_image_patch;
+ entire_image_patch = tensor.extract_image_patches(3, 5);
+ VERIFY_IS_EQUAL(entire_image_patch.dimension(0), 2);
+ VERIFY_IS_EQUAL(entire_image_patch.dimension(1), 3);
+ VERIFY_IS_EQUAL(entire_image_patch.dimension(2), 5);
+ VERIFY_IS_EQUAL(entire_image_patch.dimension(3), 3*5);
+
+ // Entire image patch: RowMajor
+ Tensor<float, 4, RowMajor> entire_image_patch_row_major;
+ entire_image_patch_row_major = tensor_row_major.extract_image_patches(3, 5);
+ VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(0), 3*5);
+ VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(1), 5);
+ VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(2), 3);
+ VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(3), 2);
+
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 5; ++j) {
+ int patchId = i+3*j;
+ for (int r = 0; r < 3; ++r) {
+ for (int c = 0; c < 5; ++c) {
+ for (int d = 0; d < 2; ++d) {
+ float expected = 0.0f;
+ float expected_row_major = 0.0f;
+ if (r-1+i >= 0 && c-2+j >= 0 && r-1+i < 3 && c-2+j < 5) {
+ expected = tensor(d, r-1+i, c-2+j);
+ expected_row_major = tensor_row_major(c-2+j, r-1+i, d);
+ }
+ // ColMajor
+ if (entire_image_patch(d, r, c, patchId) != expected) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << std::endl;
+ }
+ VERIFY_IS_EQUAL(entire_image_patch(d, r, c, patchId), expected);
+ // RowMajor
+ if (entire_image_patch_row_major(patchId, c, r, d) !=
+ expected_row_major) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << std::endl;
+ }
+ VERIFY_IS_EQUAL(entire_image_patch_row_major(patchId, c, r, d),
+ expected_row_major);
+ // Check that ColMajor and RowMajor agree.
+ VERIFY_IS_EQUAL(expected, expected_row_major);
+ }
+ }
+ }
+ }
+ }
+
+ // 2D patch: ColMajor
+ Tensor<float, 4> twod_patch;
+ twod_patch = tensor.extract_image_patches(2, 2);
+ VERIFY_IS_EQUAL(twod_patch.dimension(0), 2);
+ VERIFY_IS_EQUAL(twod_patch.dimension(1), 2);
+ VERIFY_IS_EQUAL(twod_patch.dimension(2), 2);
+ VERIFY_IS_EQUAL(twod_patch.dimension(3), 3*5);
+
+ // 2D patch: RowMajor
+ Tensor<float, 4, RowMajor> twod_patch_row_major;
+ twod_patch_row_major = tensor_row_major.extract_image_patches(2, 2);
+ VERIFY_IS_EQUAL(twod_patch_row_major.dimension(0), 3*5);
+ VERIFY_IS_EQUAL(twod_patch_row_major.dimension(1), 2);
+ VERIFY_IS_EQUAL(twod_patch_row_major.dimension(2), 2);
+ VERIFY_IS_EQUAL(twod_patch_row_major.dimension(3), 2);
+
+ // Based on the calculation described in TensorTraits.h, padding happens to be 0.
+ int row_padding = 0;
+ int col_padding = 0;
+ int stride = 1;
+
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 5; ++j) {
+ int patchId = i+3*j;
+ for (int r = 0; r < 2; ++r) {
+ for (int c = 0; c < 2; ++c) {
+ for (int d = 0; d < 2; ++d) {
+ float expected = 0.0f;
+ float expected_row_major = 0.0f;
+ int row_offset = r*stride + i - row_padding;
+ int col_offset = c*stride + j - col_padding;
+ // ColMajor
+ if (row_offset >= 0 && col_offset >= 0 && row_offset < tensor.dimension(1) && col_offset < tensor.dimension(2)) {
+ expected = tensor(d, row_offset, col_offset);
+ }
+ if (twod_patch(d, r, c, patchId) != expected) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << std::endl;
+ }
+ VERIFY_IS_EQUAL(twod_patch(d, r, c, patchId), expected);
+ // RowMajor
+ if (row_offset >= 0 && col_offset >= 0 && row_offset < tensor_row_major.dimension(1) && col_offset < tensor_row_major.dimension(0)) {
+ expected_row_major = tensor_row_major(col_offset, row_offset, d);
+ }
+ if (twod_patch_row_major(patchId, c, r, d) != expected_row_major) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << std::endl;
+ }
+ VERIFY_IS_EQUAL(twod_patch_row_major(patchId, c, r, d), expected_row_major);
+ // Check that ColMajor and RowMajor agree.
+ VERIFY_IS_EQUAL(expected, expected_row_major);
+ }
+ }
+ }
+ }
+ }
+}
+
+void test_imagenet_patches()
+{
+ // Test the code on typical configurations used by the 'imagenet' benchmarks at
+ // https://github.com/soumith/convnet-benchmarks
+ // ColMajor
+ Tensor<float, 4> l_in(3, 128, 128, 16);
+ l_in.setRandom();
+ Tensor<float, 5> l_out = l_in.extract_image_patches(11, 11);
+ VERIFY_IS_EQUAL(l_out.dimension(0), 3);
+ VERIFY_IS_EQUAL(l_out.dimension(1), 11);
+ VERIFY_IS_EQUAL(l_out.dimension(2), 11);
+ VERIFY_IS_EQUAL(l_out.dimension(3), 128*128);
+ VERIFY_IS_EQUAL(l_out.dimension(4), 16);
+
+ // RowMajor
+ Tensor<float, 5, RowMajor> l_out_row_major = l_in.swap_layout().extract_image_patches(11, 11);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(0), 16);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(1), 128*128);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(2), 11);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(3), 11);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(4), 3);
+
+ for (int b = 0; b < 16; ++b) {
+ for (int i = 0; i < 128; ++i) {
+ for (int j = 0; j < 128; ++j) {
+ int patchId = i+128*j;
+ for (int c = 0; c < 11; ++c) {
+ for (int r = 0; r < 11; ++r) {
+ for (int d = 0; d < 3; ++d) {
+ float expected = 0.0f;
+ if (r-5+i >= 0 && c-5+j >= 0 && r-5+i < 128 && c-5+j < 128) {
+ expected = l_in(d, r-5+i, c-5+j, b);
+ }
+ // ColMajor
+ if (l_out(d, r, c, patchId, b) != expected) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(l_out(d, r, c, patchId, b), expected);
+ // RowMajor
+ if (l_out_row_major(b, patchId, c, r, d) !=
+ expected) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j
+ << " r=" << r << " c=" << c << " d=" << d << " b=" << b
+ << std::endl;
+ }
+ VERIFY_IS_EQUAL(l_out_row_major(b, patchId, c, r, d),
+ expected);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // ColMajor
+ l_in.resize(16, 64, 64, 32);
+ l_in.setRandom();
+ l_out = l_in.extract_image_patches(9, 9);
+ VERIFY_IS_EQUAL(l_out.dimension(0), 16);
+ VERIFY_IS_EQUAL(l_out.dimension(1), 9);
+ VERIFY_IS_EQUAL(l_out.dimension(2), 9);
+ VERIFY_IS_EQUAL(l_out.dimension(3), 64*64);
+ VERIFY_IS_EQUAL(l_out.dimension(4), 32);
+
+ // RowMajor
+ l_out_row_major = l_in.swap_layout().extract_image_patches(9, 9);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(0), 32);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(1), 64*64);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(2), 9);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(3), 9);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(4), 16);
+
+ for (int b = 0; b < 32; ++b) {
+ for (int i = 0; i < 64; ++i) {
+ for (int j = 0; j < 64; ++j) {
+ int patchId = i+64*j;
+ for (int c = 0; c < 9; ++c) {
+ for (int r = 0; r < 9; ++r) {
+ for (int d = 0; d < 16; ++d) {
+ float expected = 0.0f;
+ if (r-4+i >= 0 && c-4+j >= 0 && r-4+i < 64 && c-4+j < 64) {
+ expected = l_in(d, r-4+i, c-4+j, b);
+ }
+ // ColMajor
+ if (l_out(d, r, c, patchId, b) != expected) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(l_out(d, r, c, patchId, b), expected);
+ // RowMajor
+ if (l_out_row_major(b, patchId, c, r, d) != expected) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(l_out_row_major(b, patchId, c, r, d), expected);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // ColMajor
+ l_in.resize(32, 16, 16, 32);
+ l_in.setRandom();
+ l_out = l_in.extract_image_patches(7, 7);
+ VERIFY_IS_EQUAL(l_out.dimension(0), 32);
+ VERIFY_IS_EQUAL(l_out.dimension(1), 7);
+ VERIFY_IS_EQUAL(l_out.dimension(2), 7);
+ VERIFY_IS_EQUAL(l_out.dimension(3), 16*16);
+ VERIFY_IS_EQUAL(l_out.dimension(4), 32);
+
+ // RowMajor
+ l_out_row_major = l_in.swap_layout().extract_image_patches(7, 7);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(0), 32);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(1), 16*16);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(2), 7);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(3), 7);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(4), 32);
+
+ for (int b = 0; b < 32; ++b) {
+ for (int i = 0; i < 16; ++i) {
+ for (int j = 0; j < 16; ++j) {
+ int patchId = i+16*j;
+ for (int c = 0; c < 7; ++c) {
+ for (int r = 0; r < 7; ++r) {
+ for (int d = 0; d < 32; ++d) {
+ float expected = 0.0f;
+ if (r-3+i >= 0 && c-3+j >= 0 && r-3+i < 16 && c-3+j < 16) {
+ expected = l_in(d, r-3+i, c-3+j, b);
+ }
+ // ColMajor
+ if (l_out(d, r, c, patchId, b) != expected) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(l_out(d, r, c, patchId, b), expected);
+ // RowMajor
+ if (l_out_row_major(b, patchId, c, r, d) != expected) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(l_out_row_major(b, patchId, c, r, d), expected);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // ColMajor
+ l_in.resize(64, 13, 13, 32);
+ l_in.setRandom();
+ l_out = l_in.extract_image_patches(3, 3);
+ VERIFY_IS_EQUAL(l_out.dimension(0), 64);
+ VERIFY_IS_EQUAL(l_out.dimension(1), 3);
+ VERIFY_IS_EQUAL(l_out.dimension(2), 3);
+ VERIFY_IS_EQUAL(l_out.dimension(3), 13*13);
+ VERIFY_IS_EQUAL(l_out.dimension(4), 32);
+
+ // RowMajor
+ l_out_row_major = l_in.swap_layout().extract_image_patches(3, 3);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(0), 32);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(1), 13*13);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(2), 3);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(3), 3);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(4), 64);
+
+ for (int b = 0; b < 32; ++b) {
+ for (int i = 0; i < 13; ++i) {
+ for (int j = 0; j < 13; ++j) {
+ int patchId = i+13*j;
+ for (int c = 0; c < 3; ++c) {
+ for (int r = 0; r < 3; ++r) {
+ for (int d = 0; d < 64; ++d) {
+ float expected = 0.0f;
+ if (r-1+i >= 0 && c-1+j >= 0 && r-1+i < 13 && c-1+j < 13) {
+ expected = l_in(d, r-1+i, c-1+j, b);
+ }
+ // ColMajor
+ if (l_out(d, r, c, patchId, b) != expected) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(l_out(d, r, c, patchId, b), expected);
+ // RowMajor
+ if (l_out_row_major(b, patchId, c, r, d) != expected) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(l_out_row_major(b, patchId, c, r, d), expected);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+void test_cxx11_tensor_image_patch()
+{
+ CALL_SUBTEST_1(test_simple_patch());
+ CALL_SUBTEST_2(test_patch_no_extra_dim());
+ CALL_SUBTEST_3(test_patch_padding_valid());
+ CALL_SUBTEST_4(test_patch_padding_valid_same_value());
+ CALL_SUBTEST_5(test_patch_padding_same());
+ CALL_SUBTEST_6(test_imagenet_patches());
+}
diff --git a/unsupported/test/cxx11_tensor_index_list.cpp b/unsupported/test/cxx11_tensor_index_list.cpp
new file mode 100644
index 000000000..4cf5df666
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_index_list.cpp
@@ -0,0 +1,386 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+#ifdef EIGEN_HAS_INDEX_LIST
+
+static void test_static_index_list()
+{
+ Tensor<float, 4> tensor(2,3,5,7);
+ tensor.setRandom();
+
+ constexpr auto reduction_axis = make_index_list(0, 1, 2);
+ VERIFY_IS_EQUAL(internal::array_get<0>(reduction_axis), 0);
+ VERIFY_IS_EQUAL(internal::array_get<1>(reduction_axis), 1);
+ VERIFY_IS_EQUAL(internal::array_get<2>(reduction_axis), 2);
+ VERIFY_IS_EQUAL(static_cast<DenseIndex>(reduction_axis[0]), 0);
+ VERIFY_IS_EQUAL(static_cast<DenseIndex>(reduction_axis[1]), 1);
+ VERIFY_IS_EQUAL(static_cast<DenseIndex>(reduction_axis[2]), 2);
+
+ EIGEN_STATIC_ASSERT((internal::array_get<0>(reduction_axis) == 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((internal::array_get<1>(reduction_axis) == 1), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((internal::array_get<2>(reduction_axis) == 2), YOU_MADE_A_PROGRAMMING_MISTAKE);
+
+ Tensor<float, 1> result = tensor.sum(reduction_axis);
+ for (int i = 0; i < result.size(); ++i) {
+ float expected = 0.0f;
+ for (int j = 0; j < 2; ++j) {
+ for (int k = 0; k < 3; ++k) {
+ for (int l = 0; l < 5; ++l) {
+ expected += tensor(j,k,l,i);
+ }
+ }
+ }
+ VERIFY_IS_APPROX(result(i), expected);
+ }
+}
+
+
+static void test_type2index_list()
+{
+ Tensor<float, 5> tensor(2,3,5,7,11);
+ tensor.setRandom();
+ tensor += tensor.constant(10.0f);
+
+ typedef Eigen::IndexList<Eigen::type2index<0>> Dims0;
+ typedef Eigen::IndexList<Eigen::type2index<0>, Eigen::type2index<1>> Dims1;
+ typedef Eigen::IndexList<Eigen::type2index<0>, Eigen::type2index<1>, Eigen::type2index<2>> Dims2;
+ typedef Eigen::IndexList<Eigen::type2index<0>, Eigen::type2index<1>, Eigen::type2index<2>, Eigen::type2index<3>> Dims3;
+ typedef Eigen::IndexList<Eigen::type2index<0>, Eigen::type2index<1>, Eigen::type2index<2>, Eigen::type2index<3>, Eigen::type2index<4>> Dims4;
+
+#if 0
+ EIGEN_STATIC_ASSERT((internal::indices_statically_known_to_increase<Dims0>() == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((internal::indices_statically_known_to_increase<Dims1>() == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((internal::indices_statically_known_to_increase<Dims2>() == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((internal::indices_statically_known_to_increase<Dims3>() == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((internal::indices_statically_known_to_increase<Dims4>() == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+#endif
+
+ EIGEN_STATIC_ASSERT((internal::are_inner_most_dims<Dims0, 1, ColMajor>::value == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((internal::are_inner_most_dims<Dims1, 2, ColMajor>::value == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((internal::are_inner_most_dims<Dims2, 3, ColMajor>::value == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((internal::are_inner_most_dims<Dims3, 4, ColMajor>::value == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((internal::are_inner_most_dims<Dims4, 5, ColMajor>::value == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+
+ EIGEN_STATIC_ASSERT((internal::are_inner_most_dims<Dims0, 1, RowMajor>::value == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((internal::are_inner_most_dims<Dims1, 2, RowMajor>::value == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((internal::are_inner_most_dims<Dims2, 3, RowMajor>::value == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((internal::are_inner_most_dims<Dims3, 4, RowMajor>::value == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((internal::are_inner_most_dims<Dims4, 5, RowMajor>::value == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+
+ const Dims0 reduction_axis0;
+ Tensor<float, 4> result0 = tensor.sum(reduction_axis0);
+ for (int m = 0; m < 11; ++m) {
+ for (int l = 0; l < 7; ++l) {
+ for (int k = 0; k < 5; ++k) {
+ for (int j = 0; j < 3; ++j) {
+ float expected = 0.0f;
+ for (int i = 0; i < 2; ++i) {
+ expected += tensor(i,j,k,l,m);
+ }
+ VERIFY_IS_APPROX(result0(j,k,l,m), expected);
+ }
+ }
+ }
+ }
+
+ const Dims1 reduction_axis1;
+ Tensor<float, 3> result1 = tensor.sum(reduction_axis1);
+ for (int m = 0; m < 11; ++m) {
+ for (int l = 0; l < 7; ++l) {
+ for (int k = 0; k < 5; ++k) {
+ float expected = 0.0f;
+ for (int j = 0; j < 3; ++j) {
+ for (int i = 0; i < 2; ++i) {
+ expected += tensor(i,j,k,l,m);
+ }
+ }
+ VERIFY_IS_APPROX(result1(k,l,m), expected);
+ }
+ }
+ }
+
+ const Dims2 reduction_axis2;
+ Tensor<float, 2> result2 = tensor.sum(reduction_axis2);
+ for (int m = 0; m < 11; ++m) {
+ for (int l = 0; l < 7; ++l) {
+ float expected = 0.0f;
+ for (int k = 0; k < 5; ++k) {
+ for (int j = 0; j < 3; ++j) {
+ for (int i = 0; i < 2; ++i) {
+ expected += tensor(i,j,k,l,m);
+ }
+ }
+ }
+ VERIFY_IS_APPROX(result2(l,m), expected);
+ }
+ }
+
+ const Dims3 reduction_axis3;
+ Tensor<float, 1> result3 = tensor.sum(reduction_axis3);
+ for (int m = 0; m < 11; ++m) {
+ float expected = 0.0f;
+ for (int l = 0; l < 7; ++l) {
+ for (int k = 0; k < 5; ++k) {
+ for (int j = 0; j < 3; ++j) {
+ for (int i = 0; i < 2; ++i) {
+ expected += tensor(i,j,k,l,m);
+ }
+ }
+ }
+ }
+ VERIFY_IS_APPROX(result3(m), expected);
+ }
+
+ const Dims4 reduction_axis4;
+ Tensor<float, 0> result4 = tensor.sum(reduction_axis4);
+ float expected = 0.0f;
+ for (int m = 0; m < 11; ++m) {
+ for (int l = 0; l < 7; ++l) {
+ for (int k = 0; k < 5; ++k) {
+ for (int j = 0; j < 3; ++j) {
+ for (int i = 0; i < 2; ++i) {
+ expected += tensor(i,j,k,l,m);
+ }
+ }
+ }
+ }
+ }
+ VERIFY_IS_APPROX(result4(), expected);
+}
+
+
+static void test_type2indexpair_list()
+{
+ Tensor<float, 5> tensor(2,3,5,7,11);
+ tensor.setRandom();
+ tensor += tensor.constant(10.0f);
+
+ typedef Eigen::IndexPairList<Eigen::type2indexpair<0,10>> Dims0;
+ typedef Eigen::IndexPairList<Eigen::type2indexpair<0,10>, Eigen::type2indexpair<1,11>, Eigen::type2indexpair<2,12>> Dims2_a;
+ typedef Eigen::IndexPairList<Eigen::type2indexpair<0,10>, Eigen::IndexPair<DenseIndex>, Eigen::type2indexpair<2,12>> Dims2_b;
+ typedef Eigen::IndexPairList<Eigen::IndexPair<DenseIndex>, Eigen::type2indexpair<1,11>, Eigen::IndexPair<DenseIndex>> Dims2_c;
+
+ Dims0 d0;
+ Dims2_a d2_a;
+
+ Dims2_b d2_b;
+ d2_b.set(1, Eigen::IndexPair<DenseIndex>(1,11));
+
+ Dims2_c d2_c;
+ d2_c.set(0, Eigen::IndexPair<DenseIndex>(Eigen::IndexPair<DenseIndex>(0,10)));
+ d2_c.set(1, Eigen::IndexPair<DenseIndex>(1,11)); // setting type2indexpair to correct value.
+ d2_c.set(2, Eigen::IndexPair<DenseIndex>(2,12));
+
+ VERIFY_IS_EQUAL(d2_a[0].first, 0);
+ VERIFY_IS_EQUAL(d2_a[0].second, 10);
+ VERIFY_IS_EQUAL(d2_a[1].first, 1);
+ VERIFY_IS_EQUAL(d2_a[1].second, 11);
+ VERIFY_IS_EQUAL(d2_a[2].first, 2);
+ VERIFY_IS_EQUAL(d2_a[2].second, 12);
+
+ VERIFY_IS_EQUAL(d2_b[0].first, 0);
+ VERIFY_IS_EQUAL(d2_b[0].second, 10);
+ VERIFY_IS_EQUAL(d2_b[1].first, 1);
+ VERIFY_IS_EQUAL(d2_b[1].second, 11);
+ VERIFY_IS_EQUAL(d2_b[2].first, 2);
+ VERIFY_IS_EQUAL(d2_b[2].second, 12);
+
+ VERIFY_IS_EQUAL(d2_c[0].first, 0);
+ VERIFY_IS_EQUAL(d2_c[0].second, 10);
+ VERIFY_IS_EQUAL(d2_c[1].first, 1);
+ VERIFY_IS_EQUAL(d2_c[1].second, 11);
+ VERIFY_IS_EQUAL(d2_c[2].first, 2);
+ VERIFY_IS_EQUAL(d2_c[2].second, 12);
+
+ EIGEN_STATIC_ASSERT((d2_a.value_known_statically(0) == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((d2_a.value_known_statically(1) == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((d2_a.value_known_statically(2) == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+
+ EIGEN_STATIC_ASSERT((d2_b.value_known_statically(0) == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((d2_b.value_known_statically(1) == false), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((d2_b.value_known_statically(2) == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+
+ EIGEN_STATIC_ASSERT((d2_c.value_known_statically(0) == false), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((d2_c.value_known_statically(1) == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((d2_c.value_known_statically(2) == false), YOU_MADE_A_PROGRAMMING_MISTAKE);
+
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq<Dims0>(0, 0) == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq<Dims0>(0, 1) == false), YOU_MADE_A_PROGRAMMING_MISTAKE);
+
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq<Dims2_a>(0, 0) == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq<Dims2_a>(0, 1) == false), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq<Dims2_a>(1, 1) == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq<Dims2_a>(1, 2) == false), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq<Dims2_a>(2, 2) == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq<Dims2_a>(2, 3) == false), YOU_MADE_A_PROGRAMMING_MISTAKE);
+
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq<Dims2_b>(0, 0) == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq<Dims2_b>(0, 1) == false), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq<Dims2_b>(1, 1) == false), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq<Dims2_b>(1, 2) == false), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq<Dims2_b>(2, 2) == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq<Dims2_b>(2, 3) == false), YOU_MADE_A_PROGRAMMING_MISTAKE);
+
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq<Dims2_c>(0, 0) == false), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq<Dims2_c>(0, 1) == false), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq<Dims2_c>(1, 1) == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq<Dims2_c>(1, 2) == false), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq<Dims2_c>(2, 2) == false), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq<Dims2_c>(2, 3) == false), YOU_MADE_A_PROGRAMMING_MISTAKE);
+
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq<Dims0>(0, 10) == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq<Dims0>(0, 11) == false), YOU_MADE_A_PROGRAMMING_MISTAKE);
+
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq<Dims2_a>(0, 10) == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq<Dims2_a>(0, 11) == false), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq<Dims2_a>(1, 11) == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq<Dims2_a>(1, 12) == false), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq<Dims2_a>(2, 12) == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq<Dims2_a>(2, 13) == false), YOU_MADE_A_PROGRAMMING_MISTAKE);
+
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq<Dims2_b>(0, 10) == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq<Dims2_b>(0, 11) == false), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq<Dims2_b>(1, 11) == false), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq<Dims2_b>(1, 12) == false), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq<Dims2_b>(2, 12) == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq<Dims2_b>(2, 13) == false), YOU_MADE_A_PROGRAMMING_MISTAKE);
+
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq<Dims2_c>(0, 10) == false), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq<Dims2_c>(0, 11) == false), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq<Dims2_c>(1, 11) == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq<Dims2_c>(1, 12) == false), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq<Dims2_c>(2, 12) == false), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq<Dims2_c>(2, 13) == false), YOU_MADE_A_PROGRAMMING_MISTAKE);
+}
+
+
+static void test_dynamic_index_list()
+{
+ Tensor<float, 4> tensor(2,3,5,7);
+ tensor.setRandom();
+
+ int dim1 = 2;
+ int dim2 = 1;
+ int dim3 = 0;
+
+ auto reduction_axis = make_index_list(dim1, dim2, dim3);
+
+ VERIFY_IS_EQUAL(internal::array_get<0>(reduction_axis), 2);
+ VERIFY_IS_EQUAL(internal::array_get<1>(reduction_axis), 1);
+ VERIFY_IS_EQUAL(internal::array_get<2>(reduction_axis), 0);
+ VERIFY_IS_EQUAL(static_cast<DenseIndex>(reduction_axis[0]), 2);
+ VERIFY_IS_EQUAL(static_cast<DenseIndex>(reduction_axis[1]), 1);
+ VERIFY_IS_EQUAL(static_cast<DenseIndex>(reduction_axis[2]), 0);
+
+ Tensor<float, 1> result = tensor.sum(reduction_axis);
+ for (int i = 0; i < result.size(); ++i) {
+ float expected = 0.0f;
+ for (int j = 0; j < 2; ++j) {
+ for (int k = 0; k < 3; ++k) {
+ for (int l = 0; l < 5; ++l) {
+ expected += tensor(j,k,l,i);
+ }
+ }
+ }
+ VERIFY_IS_APPROX(result(i), expected);
+ }
+}
+
+static void test_mixed_index_list()
+{
+ Tensor<float, 4> tensor(2,3,5,7);
+ tensor.setRandom();
+
+ int dim2 = 1;
+ int dim4 = 3;
+
+ auto reduction_axis = make_index_list(0, dim2, 2, dim4);
+
+ VERIFY_IS_EQUAL(internal::array_get<0>(reduction_axis), 0);
+ VERIFY_IS_EQUAL(internal::array_get<1>(reduction_axis), 1);
+ VERIFY_IS_EQUAL(internal::array_get<2>(reduction_axis), 2);
+ VERIFY_IS_EQUAL(internal::array_get<3>(reduction_axis), 3);
+ VERIFY_IS_EQUAL(static_cast<DenseIndex>(reduction_axis[0]), 0);
+ VERIFY_IS_EQUAL(static_cast<DenseIndex>(reduction_axis[1]), 1);
+ VERIFY_IS_EQUAL(static_cast<DenseIndex>(reduction_axis[2]), 2);
+ VERIFY_IS_EQUAL(static_cast<DenseIndex>(reduction_axis[3]), 3);
+
+ typedef IndexList<type2index<0>, int, type2index<2>, int> ReductionIndices;
+ ReductionIndices reduction_indices;
+ reduction_indices.set(1, 1);
+ reduction_indices.set(3, 3);
+ EIGEN_STATIC_ASSERT((internal::array_get<0>(reduction_indices) == 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((internal::array_get<2>(reduction_indices) == 2), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((internal::index_known_statically<ReductionIndices>(0) == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((internal::index_known_statically<ReductionIndices>(2) == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((internal::index_statically_eq<ReductionIndices>(0, 0) == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((internal::index_statically_eq<ReductionIndices>(2, 2) == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+#if 0
+ EIGEN_STATIC_ASSERT((internal::all_indices_known_statically<ReductionIndices>() == false), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((internal::indices_statically_known_to_increase<ReductionIndices>() == false), YOU_MADE_A_PROGRAMMING_MISTAKE);
+#endif
+
+ typedef IndexList<type2index<0>, type2index<1>, type2index<2>, type2index<3>> ReductionList;
+ ReductionList reduction_list;
+ EIGEN_STATIC_ASSERT((internal::index_statically_eq<ReductionList>(0, 0) == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((internal::index_statically_eq<ReductionList>(1, 1) == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((internal::index_statically_eq<ReductionList>(2, 2) == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((internal::index_statically_eq<ReductionList>(3, 3) == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+#if 0
+ EIGEN_STATIC_ASSERT((internal::all_indices_known_statically<ReductionList>() == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((internal::indices_statically_known_to_increase<ReductionList>() == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
+#endif
+
+ Tensor<float, 0> result1 = tensor.sum(reduction_axis);
+ Tensor<float, 0> result2 = tensor.sum(reduction_indices);
+ Tensor<float, 0> result3 = tensor.sum(reduction_list);
+
+ float expected = 0.0f;
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ expected += tensor(i,j,k,l);
+ }
+ }
+ }
+ }
+ VERIFY_IS_APPROX(result1(), expected);
+ VERIFY_IS_APPROX(result2(), expected);
+ VERIFY_IS_APPROX(result3(), expected);
+}
+
+
+static void test_dim_check()
+{
+ Eigen::IndexList<Eigen::type2index<1>, int> dim1;
+ dim1.set(1, 2);
+ Eigen::IndexList<Eigen::type2index<1>, int> dim2;
+ dim2.set(1, 2);
+ VERIFY(dimensions_match(dim1, dim2));
+}
+
+
+#endif
+
+void test_cxx11_tensor_index_list()
+{
+#ifdef EIGEN_HAS_INDEX_LIST
+ CALL_SUBTEST(test_static_index_list());
+ CALL_SUBTEST(test_type2index_list());
+ CALL_SUBTEST(test_type2indexpair_list());
+ CALL_SUBTEST(test_dynamic_index_list());
+ CALL_SUBTEST(test_mixed_index_list());
+ CALL_SUBTEST(test_dim_check());
+#endif
+}
diff --git a/unsupported/test/cxx11_tensor_inflation.cpp b/unsupported/test/cxx11_tensor_inflation.cpp
new file mode 100644
index 000000000..4997935e9
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_inflation.cpp
@@ -0,0 +1,81 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2015 Ke Yang <yangke@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+
+template<int DataLayout>
+static void test_simple_inflation()
+{
+ Tensor<float, 4, DataLayout> tensor(2,3,5,7);
+ tensor.setRandom();
+ array<ptrdiff_t, 4> strides;
+
+ strides[0] = 1;
+ strides[1] = 1;
+ strides[2] = 1;
+ strides[3] = 1;
+
+ Tensor<float, 4, DataLayout> no_stride;
+ no_stride = tensor.inflate(strides);
+
+ VERIFY_IS_EQUAL(no_stride.dimension(0), 2);
+ VERIFY_IS_EQUAL(no_stride.dimension(1), 3);
+ VERIFY_IS_EQUAL(no_stride.dimension(2), 5);
+ VERIFY_IS_EQUAL(no_stride.dimension(3), 7);
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ VERIFY_IS_EQUAL(tensor(i,j,k,l), no_stride(i,j,k,l));
+ }
+ }
+ }
+ }
+
+ strides[0] = 2;
+ strides[1] = 4;
+ strides[2] = 2;
+ strides[3] = 3;
+ Tensor<float, 4, DataLayout> inflated;
+ inflated = tensor.inflate(strides);
+
+ VERIFY_IS_EQUAL(inflated.dimension(0), 3);
+ VERIFY_IS_EQUAL(inflated.dimension(1), 9);
+ VERIFY_IS_EQUAL(inflated.dimension(2), 9);
+ VERIFY_IS_EQUAL(inflated.dimension(3), 19);
+
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 9; ++j) {
+ for (int k = 0; k < 9; ++k) {
+ for (int l = 0; l < 19; ++l) {
+ if (i % 2 == 0 &&
+ j % 4 == 0 &&
+ k % 2 == 0 &&
+ l % 3 == 0) {
+ VERIFY_IS_EQUAL(inflated(i,j,k,l),
+ tensor(i/2, j/4, k/2, l/3));
+ } else {
+ VERIFY_IS_EQUAL(0, inflated(i,j,k,l));
+ }
+ }
+ }
+ }
+ }
+}
+
+void test_cxx11_tensor_inflation()
+{
+ CALL_SUBTEST(test_simple_inflation<ColMajor>());
+ CALL_SUBTEST(test_simple_inflation<RowMajor>());
+}
diff --git a/unsupported/test/cxx11_tensor_intdiv.cpp b/unsupported/test/cxx11_tensor_intdiv.cpp
new file mode 100644
index 000000000..8e2b70b75
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_intdiv.cpp
@@ -0,0 +1,147 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014-2015 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+
+void test_signed_32bit()
+{
+ // Divide by one
+ const Eigen::internal::TensorIntDivisor<int32_t, false> div_by_one(1);
+
+ for (int32_t j = 0; j < 25000; ++j) {
+ const int32_t fast_div = j / div_by_one;
+ const int32_t slow_div = j / 1;
+ VERIFY_IS_EQUAL(fast_div, slow_div);
+ }
+
+ // Standard divide by 2 or more
+ for (int32_t i = 2; i < 25000; ++i) {
+ const Eigen::internal::TensorIntDivisor<int32_t, false> div(i);
+
+ for (int32_t j = 0; j < 25000; ++j) {
+ const int32_t fast_div = j / div;
+ const int32_t slow_div = j / i;
+ VERIFY_IS_EQUAL(fast_div, slow_div);
+ }
+ }
+
+ // Optimized divide by 2 or more
+ for (int32_t i = 2; i < 25000; ++i) {
+ const Eigen::internal::TensorIntDivisor<int32_t, true> div(i);
+
+ for (int32_t j = 0; j < 25000; ++j) {
+ const int32_t fast_div = j / div;
+ const int32_t slow_div = j / i;
+ VERIFY_IS_EQUAL(fast_div, slow_div);
+ }
+ }
+}
+
+
+void test_unsigned_32bit()
+{
+ for (uint32_t i = 1; i < 25000; ++i) {
+ const Eigen::internal::TensorIntDivisor<uint32_t> div(i);
+
+ for (uint32_t j = 0; j < 25000; ++j) {
+ const uint32_t fast_div = j / div;
+ const uint32_t slow_div = j / i;
+ VERIFY_IS_EQUAL(fast_div, slow_div);
+ }
+ }
+}
+
+
+void test_signed_64bit()
+{
+ for (int64_t i = 1; i < 25000; ++i) {
+ const Eigen::internal::TensorIntDivisor<int64_t> div(i);
+
+ for (int64_t j = 0; j < 25000; ++j) {
+ const int64_t fast_div = j / div;
+ const int64_t slow_div = j / i;
+ VERIFY_IS_EQUAL(fast_div, slow_div);
+ }
+ }
+}
+
+
+void test_unsigned_64bit()
+{
+ for (uint64_t i = 1; i < 25000; ++i) {
+ const Eigen::internal::TensorIntDivisor<uint64_t> div(i);
+
+ for (uint64_t j = 0; j < 25000; ++j) {
+ const uint64_t fast_div = j / div;
+ const uint64_t slow_div = j / i;
+ VERIFY_IS_EQUAL(fast_div, slow_div);
+ }
+ }
+}
+
+void test_powers_32bit() {
+ for (int expon = 1; expon < 31; expon++) {
+ int32_t div = (1 << expon);
+ for (int num_expon = 0; num_expon < 32; num_expon++) {
+ int32_t start_num = (1 << num_expon) - 100;
+ int32_t end_num = (1 << num_expon) + 100;
+ if (start_num < 0)
+ start_num = 0;
+ for (int32_t num = start_num; num < end_num; num++) {
+ Eigen::internal::TensorIntDivisor<int32_t> divider =
+ Eigen::internal::TensorIntDivisor<int32_t>(div);
+ int32_t result = num/div;
+ int32_t result_op = divider.divide(num);
+ VERIFY_IS_EQUAL(result_op, result);
+ }
+ }
+ }
+}
+
+void test_powers_64bit() {
+ for (int expon = 0; expon < 63; expon++) {
+ int64_t div = (1ull << expon);
+ for (int num_expon = 0; num_expon < 63; num_expon++) {
+ int64_t start_num = (1ull << num_expon) - 10;
+ int64_t end_num = (1ull << num_expon) + 10;
+ if (start_num < 0)
+ start_num = 0;
+ for (int64_t num = start_num; num < end_num; num++) {
+ Eigen::internal::TensorIntDivisor<int64_t> divider(div);
+ int64_t result = num/div;
+ int64_t result_op = divider.divide(num);
+ VERIFY_IS_EQUAL(result_op, result);
+ }
+ }
+ }
+}
+
+void test_specific() {
+ // A particular combination that was previously failing
+ int64_t div = 209715200;
+ int64_t num = 3238002688ll;
+ Eigen::internal::TensorIntDivisor<int64_t> divider(div);
+ int64_t result = num/div;
+ int64_t result_op = divider.divide(num);
+ VERIFY_IS_EQUAL(result, result_op);
+}
+
+void test_cxx11_tensor_intdiv()
+{
+ CALL_SUBTEST_1(test_signed_32bit());
+ CALL_SUBTEST_2(test_unsigned_32bit());
+ CALL_SUBTEST_3(test_signed_64bit());
+ CALL_SUBTEST_4(test_unsigned_64bit());
+ CALL_SUBTEST_5(test_powers_32bit());
+ CALL_SUBTEST_6(test_powers_64bit());
+ CALL_SUBTEST_7(test_specific());
+}
diff --git a/unsupported/test/cxx11_tensor_io.cpp b/unsupported/test/cxx11_tensor_io.cpp
new file mode 100644
index 000000000..489960529
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_io.cpp
@@ -0,0 +1,136 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+#include <sstream>
+#include <string>
+#include <Eigen/CXX11/Tensor>
+
+
+template<int DataLayout>
+static void test_output_0d()
+{
+ Tensor<int, 0, DataLayout> tensor;
+ tensor() = 123;
+
+ std::stringstream os;
+ os << tensor;
+
+ std::string expected("123");
+ VERIFY_IS_EQUAL(std::string(os.str()), expected);
+}
+
+
+template<int DataLayout>
+static void test_output_1d()
+{
+ Tensor<int, 1, DataLayout> tensor(5);
+ for (int i = 0; i < 5; ++i) {
+ tensor(i) = i;
+ }
+
+ std::stringstream os;
+ os << tensor;
+
+ std::string expected("0\n1\n2\n3\n4");
+ VERIFY_IS_EQUAL(std::string(os.str()), expected);
+
+ Eigen::Tensor<double,1,DataLayout> empty_tensor(0);
+ std::stringstream empty_os;
+ empty_os << empty_tensor;
+ std::string empty_string;
+ VERIFY_IS_EQUAL(std::string(empty_os.str()), empty_string);
+}
+
+
+template<int DataLayout>
+static void test_output_2d()
+{
+ Tensor<int, 2, DataLayout> tensor(5, 3);
+ for (int i = 0; i < 5; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ tensor(i, j) = i*j;
+ }
+ }
+
+ std::stringstream os;
+ os << tensor;
+
+ std::string expected("0 0 0\n0 1 2\n0 2 4\n0 3 6\n0 4 8");
+ VERIFY_IS_EQUAL(std::string(os.str()), expected);
+}
+
+
+template<int DataLayout>
+static void test_output_expr()
+{
+ Tensor<int, 1, DataLayout> tensor1(5);
+ Tensor<int, 1, DataLayout> tensor2(5);
+ for (int i = 0; i < 5; ++i) {
+ tensor1(i) = i;
+ tensor2(i) = 7;
+ }
+
+ std::stringstream os;
+ os << tensor1 + tensor2;
+
+ std::string expected(" 7\n 8\n 9\n10\n11");
+ VERIFY_IS_EQUAL(std::string(os.str()), expected);
+}
+
+
+template<int DataLayout>
+static void test_output_string()
+{
+ Tensor<std::string, 2, DataLayout> tensor(5, 3);
+ tensor.setConstant(std::string("foo"));
+
+ std::cout << tensor << std::endl;
+
+ std::stringstream os;
+ os << tensor;
+
+ std::string expected("foo foo foo\nfoo foo foo\nfoo foo foo\nfoo foo foo\nfoo foo foo");
+ VERIFY_IS_EQUAL(std::string(os.str()), expected);
+}
+
+
+template<int DataLayout>
+static void test_output_const()
+{
+ Tensor<int, 1, DataLayout> tensor(5);
+ for (int i = 0; i < 5; ++i) {
+ tensor(i) = i;
+ }
+
+ TensorMap<Tensor<const int, 1, DataLayout> > tensor_map(tensor.data(), 5);
+
+ std::stringstream os;
+ os << tensor_map;
+
+ std::string expected("0\n1\n2\n3\n4");
+ VERIFY_IS_EQUAL(std::string(os.str()), expected);
+}
+
+
+void test_cxx11_tensor_io()
+{
+ CALL_SUBTEST(test_output_0d<ColMajor>());
+ CALL_SUBTEST(test_output_0d<RowMajor>());
+ CALL_SUBTEST(test_output_1d<ColMajor>());
+ CALL_SUBTEST(test_output_1d<RowMajor>());
+ CALL_SUBTEST(test_output_2d<ColMajor>());
+ CALL_SUBTEST(test_output_2d<RowMajor>());
+ CALL_SUBTEST(test_output_expr<ColMajor>());
+ CALL_SUBTEST(test_output_expr<RowMajor>());
+ CALL_SUBTEST(test_output_string<ColMajor>());
+ CALL_SUBTEST(test_output_string<RowMajor>());
+ CALL_SUBTEST(test_output_const<ColMajor>());
+ CALL_SUBTEST(test_output_const<RowMajor>());
+}
diff --git a/unsupported/test/cxx11_tensor_layout_swap.cpp b/unsupported/test/cxx11_tensor_layout_swap.cpp
new file mode 100644
index 000000000..ae297a9da
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_layout_swap.cpp
@@ -0,0 +1,61 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+
+static void test_simple_swap()
+{
+ Tensor<float, 3, ColMajor> tensor(2,3,7);
+ tensor.setRandom();
+
+ Tensor<float, 3, RowMajor> tensor2 = tensor.swap_layout();
+ VERIFY_IS_EQUAL(tensor.dimension(0), tensor2.dimension(2));
+ VERIFY_IS_EQUAL(tensor.dimension(1), tensor2.dimension(1));
+ VERIFY_IS_EQUAL(tensor.dimension(2), tensor2.dimension(0));
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ VERIFY_IS_EQUAL(tensor(i,j,k), tensor2(k,j,i));
+ }
+ }
+ }
+}
+
+
+static void test_swap_as_lvalue()
+{
+ Tensor<float, 3, ColMajor> tensor(2,3,7);
+ tensor.setRandom();
+
+ Tensor<float, 3, RowMajor> tensor2(7,3,2);
+ tensor2.swap_layout() = tensor;
+ VERIFY_IS_EQUAL(tensor.dimension(0), tensor2.dimension(2));
+ VERIFY_IS_EQUAL(tensor.dimension(1), tensor2.dimension(1));
+ VERIFY_IS_EQUAL(tensor.dimension(2), tensor2.dimension(0));
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ VERIFY_IS_EQUAL(tensor(i,j,k), tensor2(k,j,i));
+ }
+ }
+ }
+}
+
+
+void test_cxx11_tensor_layout_swap()
+{
+ CALL_SUBTEST(test_simple_swap());
+ CALL_SUBTEST(test_swap_as_lvalue());
+}
diff --git a/unsupported/test/cxx11_tensor_lvalue.cpp b/unsupported/test/cxx11_tensor_lvalue.cpp
new file mode 100644
index 000000000..071f5b406
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_lvalue.cpp
@@ -0,0 +1,42 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+using Eigen::RowMajor;
+
+
+static void test_compound_assignment()
+{
+ Tensor<float, 3> mat1(2,3,7);
+ Tensor<float, 3> mat2(2,3,7);
+ Tensor<float, 3> mat3(2,3,7);
+
+ mat1.setRandom();
+ mat2.setRandom();
+ mat3 = mat1;
+ mat3 += mat2;
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ VERIFY_IS_APPROX(mat3(i,j,k), mat1(i,j,k) + mat2(i,j,k));
+ }
+ }
+ }
+}
+
+
+void test_cxx11_tensor_lvalue()
+{
+ CALL_SUBTEST(test_compound_assignment());
+}
diff --git a/unsupported/test/cxx11_tensor_map.cpp b/unsupported/test/cxx11_tensor_map.cpp
new file mode 100644
index 000000000..3db0ee7c0
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_map.cpp
@@ -0,0 +1,277 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+using Eigen::RowMajor;
+
+static void test_0d()
+{
+ Tensor<int, 0> scalar1;
+ Tensor<int, 0, RowMajor> scalar2;
+
+ TensorMap<Tensor<const int, 0> > scalar3(scalar1.data());
+ TensorMap<Tensor<const int, 0, RowMajor> > scalar4(scalar2.data());
+
+ scalar1() = 7;
+ scalar2() = 13;
+
+ VERIFY_IS_EQUAL(scalar1.rank(), 0);
+ VERIFY_IS_EQUAL(scalar1.size(), 1);
+
+ VERIFY_IS_EQUAL(scalar3(), 7);
+ VERIFY_IS_EQUAL(scalar4(), 13);
+}
+
+static void test_1d()
+{
+ Tensor<int, 1> vec1(6);
+ Tensor<int, 1, RowMajor> vec2(6);
+
+ TensorMap<Tensor<const int, 1> > vec3(vec1.data(), 6);
+ TensorMap<Tensor<const int, 1, RowMajor> > vec4(vec2.data(), 6);
+
+ vec1(0) = 4; vec2(0) = 0;
+ vec1(1) = 8; vec2(1) = 1;
+ vec1(2) = 15; vec2(2) = 2;
+ vec1(3) = 16; vec2(3) = 3;
+ vec1(4) = 23; vec2(4) = 4;
+ vec1(5) = 42; vec2(5) = 5;
+
+ VERIFY_IS_EQUAL(vec1.rank(), 1);
+ VERIFY_IS_EQUAL(vec1.size(), 6);
+ VERIFY_IS_EQUAL(vec1.dimension(0), 6);
+
+ VERIFY_IS_EQUAL(vec3(0), 4);
+ VERIFY_IS_EQUAL(vec3(1), 8);
+ VERIFY_IS_EQUAL(vec3(2), 15);
+ VERIFY_IS_EQUAL(vec3(3), 16);
+ VERIFY_IS_EQUAL(vec3(4), 23);
+ VERIFY_IS_EQUAL(vec3(5), 42);
+
+ VERIFY_IS_EQUAL(vec4(0), 0);
+ VERIFY_IS_EQUAL(vec4(1), 1);
+ VERIFY_IS_EQUAL(vec4(2), 2);
+ VERIFY_IS_EQUAL(vec4(3), 3);
+ VERIFY_IS_EQUAL(vec4(4), 4);
+ VERIFY_IS_EQUAL(vec4(5), 5);
+}
+
+static void test_2d()
+{
+ Tensor<int, 2> mat1(2,3);
+ Tensor<int, 2, RowMajor> mat2(2,3);
+
+ mat1(0,0) = 0;
+ mat1(0,1) = 1;
+ mat1(0,2) = 2;
+ mat1(1,0) = 3;
+ mat1(1,1) = 4;
+ mat1(1,2) = 5;
+
+ mat2(0,0) = 0;
+ mat2(0,1) = 1;
+ mat2(0,2) = 2;
+ mat2(1,0) = 3;
+ mat2(1,1) = 4;
+ mat2(1,2) = 5;
+
+ TensorMap<Tensor<const int, 2> > mat3(mat1.data(), 2, 3);
+ TensorMap<Tensor<const int, 2, RowMajor> > mat4(mat2.data(), 2, 3);
+
+ VERIFY_IS_EQUAL(mat3.rank(), 2);
+ VERIFY_IS_EQUAL(mat3.size(), 6);
+ VERIFY_IS_EQUAL(mat3.dimension(0), 2);
+ VERIFY_IS_EQUAL(mat3.dimension(1), 3);
+
+ VERIFY_IS_EQUAL(mat4.rank(), 2);
+ VERIFY_IS_EQUAL(mat4.size(), 6);
+ VERIFY_IS_EQUAL(mat4.dimension(0), 2);
+ VERIFY_IS_EQUAL(mat4.dimension(1), 3);
+
+ VERIFY_IS_EQUAL(mat3(0,0), 0);
+ VERIFY_IS_EQUAL(mat3(0,1), 1);
+ VERIFY_IS_EQUAL(mat3(0,2), 2);
+ VERIFY_IS_EQUAL(mat3(1,0), 3);
+ VERIFY_IS_EQUAL(mat3(1,1), 4);
+ VERIFY_IS_EQUAL(mat3(1,2), 5);
+
+ VERIFY_IS_EQUAL(mat4(0,0), 0);
+ VERIFY_IS_EQUAL(mat4(0,1), 1);
+ VERIFY_IS_EQUAL(mat4(0,2), 2);
+ VERIFY_IS_EQUAL(mat4(1,0), 3);
+ VERIFY_IS_EQUAL(mat4(1,1), 4);
+ VERIFY_IS_EQUAL(mat4(1,2), 5);
+}
+
+static void test_3d()
+{
+ Tensor<int, 3> mat1(2,3,7);
+ Tensor<int, 3, RowMajor> mat2(2,3,7);
+
+ int val = 0;
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ mat1(i,j,k) = val;
+ mat2(i,j,k) = val;
+ val++;
+ }
+ }
+ }
+
+ TensorMap<Tensor<const int, 3> > mat3(mat1.data(), 2, 3, 7);
+ TensorMap<Tensor<const int, 3, RowMajor> > mat4(mat2.data(), 2, 3, 7);
+
+ VERIFY_IS_EQUAL(mat3.rank(), 3);
+ VERIFY_IS_EQUAL(mat3.size(), 2*3*7);
+ VERIFY_IS_EQUAL(mat3.dimension(0), 2);
+ VERIFY_IS_EQUAL(mat3.dimension(1), 3);
+ VERIFY_IS_EQUAL(mat3.dimension(2), 7);
+
+ VERIFY_IS_EQUAL(mat4.rank(), 3);
+ VERIFY_IS_EQUAL(mat4.size(), 2*3*7);
+ VERIFY_IS_EQUAL(mat4.dimension(0), 2);
+ VERIFY_IS_EQUAL(mat4.dimension(1), 3);
+ VERIFY_IS_EQUAL(mat4.dimension(2), 7);
+
+ val = 0;
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ VERIFY_IS_EQUAL(mat3(i,j,k), val);
+ VERIFY_IS_EQUAL(mat4(i,j,k), val);
+ val++;
+ }
+ }
+ }
+}
+
+
+static void test_from_tensor()
+{
+ Tensor<int, 3> mat1(2,3,7);
+ Tensor<int, 3, RowMajor> mat2(2,3,7);
+
+ int val = 0;
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ mat1(i,j,k) = val;
+ mat2(i,j,k) = val;
+ val++;
+ }
+ }
+ }
+
+ TensorMap<Tensor<int, 3> > mat3(mat1);
+ TensorMap<Tensor<int, 3, RowMajor> > mat4(mat2);
+
+ VERIFY_IS_EQUAL(mat3.rank(), 3);
+ VERIFY_IS_EQUAL(mat3.size(), 2*3*7);
+ VERIFY_IS_EQUAL(mat3.dimension(0), 2);
+ VERIFY_IS_EQUAL(mat3.dimension(1), 3);
+ VERIFY_IS_EQUAL(mat3.dimension(2), 7);
+
+ VERIFY_IS_EQUAL(mat4.rank(), 3);
+ VERIFY_IS_EQUAL(mat4.size(), 2*3*7);
+ VERIFY_IS_EQUAL(mat4.dimension(0), 2);
+ VERIFY_IS_EQUAL(mat4.dimension(1), 3);
+ VERIFY_IS_EQUAL(mat4.dimension(2), 7);
+
+ val = 0;
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ VERIFY_IS_EQUAL(mat3(i,j,k), val);
+ VERIFY_IS_EQUAL(mat4(i,j,k), val);
+ val++;
+ }
+ }
+ }
+
+ TensorFixedSize<int, Sizes<2,3,7> > mat5;
+
+ val = 0;
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ array<ptrdiff_t, 3> coords;
+ coords[0] = i;
+ coords[1] = j;
+ coords[2] = k;
+ mat5(coords) = val;
+ val++;
+ }
+ }
+ }
+
+ TensorMap<TensorFixedSize<int, Sizes<2,3,7> > > mat6(mat5);
+
+ VERIFY_IS_EQUAL(mat6.rank(), 3);
+ VERIFY_IS_EQUAL(mat6.size(), 2*3*7);
+ VERIFY_IS_EQUAL(mat6.dimension(0), 2);
+ VERIFY_IS_EQUAL(mat6.dimension(1), 3);
+ VERIFY_IS_EQUAL(mat6.dimension(2), 7);
+
+ val = 0;
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ VERIFY_IS_EQUAL(mat6(i,j,k), val);
+ val++;
+ }
+ }
+ }
+}
+
+
+static int f(const TensorMap<Tensor<int, 3> >& tensor) {
+ // Size<0> empty;
+ EIGEN_STATIC_ASSERT((internal::array_size<Sizes<> >::value == 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((internal::array_size<DSizes<int, 0> >::value == 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ Tensor<int, 0> result = tensor.sum();
+ return result();
+}
+
+static void test_casting()
+{
+ Tensor<int, 3> tensor(2,3,7);
+
+ int val = 0;
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ tensor(i,j,k) = val;
+ val++;
+ }
+ }
+ }
+
+ TensorMap<Tensor<int, 3> > map(tensor);
+ int sum1 = f(map);
+ int sum2 = f(tensor);
+
+ VERIFY_IS_EQUAL(sum1, sum2);
+ VERIFY_IS_EQUAL(sum1, 861);
+}
+
+void test_cxx11_tensor_map()
+{
+ CALL_SUBTEST(test_0d());
+ CALL_SUBTEST(test_1d());
+ CALL_SUBTEST(test_2d());
+ CALL_SUBTEST(test_3d());
+
+ CALL_SUBTEST(test_from_tensor());
+ CALL_SUBTEST(test_casting());
+}
diff --git a/unsupported/test/cxx11_tensor_math.cpp b/unsupported/test/cxx11_tensor_math.cpp
new file mode 100644
index 000000000..61c742a16
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_math.cpp
@@ -0,0 +1,46 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2015 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+using Eigen::RowMajor;
+
+static void test_tanh()
+{
+ Tensor<float, 1> vec1(6);
+ vec1.setRandom();
+
+ Tensor<float, 1> vec2 = vec1.tanh();
+
+ for (int i = 0; i < 6; ++i) {
+ VERIFY_IS_APPROX(vec2(i), tanhf(vec1(i)));
+ }
+}
+
+static void test_sigmoid()
+{
+ Tensor<float, 1> vec1(6);
+ vec1.setRandom();
+
+ Tensor<float, 1> vec2 = vec1.sigmoid();
+
+ for (int i = 0; i < 6; ++i) {
+ VERIFY_IS_APPROX(vec2(i), 1.0f / (1.0f + std::exp(-vec1(i))));
+ }
+}
+
+
+void test_cxx11_tensor_math()
+{
+ CALL_SUBTEST(test_tanh());
+ CALL_SUBTEST(test_sigmoid());
+}
diff --git a/unsupported/test/cxx11_tensor_mixed_indices.cpp b/unsupported/test/cxx11_tensor_mixed_indices.cpp
new file mode 100644
index 000000000..4fba6fdd1
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_mixed_indices.cpp
@@ -0,0 +1,53 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+
+static void test_simple()
+{
+ Tensor<float, 1, ColMajor> vec1(6);
+ Tensor<float, 1, ColMajor, int> vec2(6);
+
+ vec1(0) = 4.0; vec2(0) = 0.0;
+ vec1(1) = 8.0; vec2(1) = 1.0;
+ vec1(2) = 15.0; vec2(2) = 2.0;
+ vec1(3) = 16.0; vec2(3) = 3.0;
+ vec1(4) = 23.0; vec2(4) = 4.0;
+ vec1(5) = 42.0; vec2(5) = 5.0;
+
+ float data3[6];
+ TensorMap<Tensor<float, 1, ColMajor>> vec3(data3, 6);
+ vec3 = vec1.sqrt();
+ float data4[6];
+ TensorMap<Tensor<float, 1, ColMajor, int>> vec4(data4, 6);
+ vec4 = vec2.square();
+
+ VERIFY_IS_APPROX(vec3(0), sqrtf(4.0));
+ VERIFY_IS_APPROX(vec3(1), sqrtf(8.0));
+ VERIFY_IS_APPROX(vec3(2), sqrtf(15.0));
+ VERIFY_IS_APPROX(vec3(3), sqrtf(16.0));
+ VERIFY_IS_APPROX(vec3(4), sqrtf(23.0));
+ VERIFY_IS_APPROX(vec3(5), sqrtf(42.0));
+
+ VERIFY_IS_APPROX(vec4(0), 0.0f);
+ VERIFY_IS_APPROX(vec4(1), 1.0f);
+ VERIFY_IS_APPROX(vec4(2), 2.0f * 2.0f);
+ VERIFY_IS_APPROX(vec4(3), 3.0f * 3.0f);
+ VERIFY_IS_APPROX(vec4(4), 4.0f * 4.0f);
+ VERIFY_IS_APPROX(vec4(5), 5.0f * 5.0f);
+}
+
+
+void test_cxx11_tensor_mixed_indices()
+{
+ CALL_SUBTEST(test_simple());
+}
diff --git a/unsupported/test/cxx11_tensor_morphing.cpp b/unsupported/test/cxx11_tensor_morphing.cpp
new file mode 100644
index 000000000..f7de43110
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_morphing.cpp
@@ -0,0 +1,485 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+
+template<typename>
+static void test_simple_reshape()
+{
+ Tensor<float, 5> tensor1(2,3,1,7,1);
+ tensor1.setRandom();
+
+ Tensor<float, 3> tensor2(2,3,7);
+ Tensor<float, 2> tensor3(6,7);
+ Tensor<float, 2> tensor4(2,21);
+
+ Tensor<float, 3>::Dimensions dim1(2,3,7);
+ tensor2 = tensor1.reshape(dim1);
+ Tensor<float, 2>::Dimensions dim2(6,7);
+ tensor3 = tensor1.reshape(dim2);
+ Tensor<float, 2>::Dimensions dim3(2,21);
+ tensor4 = tensor1.reshape(dim1).reshape(dim3);
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ VERIFY_IS_EQUAL(tensor1(i,j,0,k,0), tensor2(i,j,k));
+ VERIFY_IS_EQUAL(tensor1(i,j,0,k,0), tensor3(i+2*j,k));
+ VERIFY_IS_EQUAL(tensor1(i,j,0,k,0), tensor4(i,j+3*k));
+ }
+ }
+ }
+}
+
+template<typename>
+static void test_reshape_in_expr() {
+ MatrixXf m1(2,3*5*7*11);
+ MatrixXf m2(3*5*7*11,13);
+ m1.setRandom();
+ m2.setRandom();
+ MatrixXf m3 = m1 * m2;
+
+ TensorMap<Tensor<float, 5>> tensor1(m1.data(), 2,3,5,7,11);
+ TensorMap<Tensor<float, 5>> tensor2(m2.data(), 3,5,7,11,13);
+ Tensor<float, 2>::Dimensions newDims1(2,3*5*7*11);
+ Tensor<float, 2>::Dimensions newDims2(3*5*7*11,13);
+ typedef Tensor<float, 1>::DimensionPair DimPair;
+ array<DimPair, 1> contract_along{{DimPair(1, 0)}};
+ Tensor<float, 2> tensor3(2,13);
+ tensor3 = tensor1.reshape(newDims1).contract(tensor2.reshape(newDims2), contract_along);
+
+ Map<MatrixXf> res(tensor3.data(), 2, 13);
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 13; ++j) {
+ VERIFY_IS_APPROX(res(i,j), m3(i,j));
+ }
+ }
+}
+
+template<typename>
+static void test_reshape_as_lvalue()
+{
+ Tensor<float, 3> tensor(2,3,7);
+ tensor.setRandom();
+
+ Tensor<float, 2> tensor2d(6,7);
+ Tensor<float, 3>::Dimensions dim(2,3,7);
+ tensor2d.reshape(dim) = tensor;
+
+ float scratch[2*3*1*7*1];
+ TensorMap<Tensor<float, 5>> tensor5d(scratch, 2,3,1,7,1);
+ tensor5d.reshape(dim).device(Eigen::DefaultDevice()) = tensor;
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ VERIFY_IS_EQUAL(tensor2d(i+2*j,k), tensor(i,j,k));
+ VERIFY_IS_EQUAL(tensor5d(i,j,0,k,0), tensor(i,j,k));
+ }
+ }
+ }
+}
+
+template<int DataLayout>
+static void test_simple_slice()
+{
+ Tensor<float, 5, DataLayout> tensor(2,3,5,7,11);
+ tensor.setRandom();
+
+ Tensor<float, 5, DataLayout> slice1(1,1,1,1,1);
+ Eigen::DSizes<ptrdiff_t, 5> indices(1,2,3,4,5);
+ Eigen::DSizes<ptrdiff_t, 5> sizes(1,1,1,1,1);
+ slice1 = tensor.slice(indices, sizes);
+ VERIFY_IS_EQUAL(slice1(0,0,0,0,0), tensor(1,2,3,4,5));
+
+ Tensor<float, 5, DataLayout> slice2(1,1,2,2,3);
+ Eigen::DSizes<ptrdiff_t, 5> indices2(1,1,3,4,5);
+ Eigen::DSizes<ptrdiff_t, 5> sizes2(1,1,2,2,3);
+ slice2 = tensor.slice(indices2, sizes2);
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 2; ++j) {
+ for (int k = 0; k < 3; ++k) {
+ VERIFY_IS_EQUAL(slice2(0,0,i,j,k), tensor(1,1,3+i,4+j,5+k));
+ }
+ }
+ }
+}
+
+template<typename=void>
+static void test_const_slice()
+{
+ const float b[1] = {42};
+ TensorMap<Tensor<const float, 1> > m(b, 1);
+ DSizes<DenseIndex, 1> offsets;
+ offsets[0] = 0;
+ TensorRef<Tensor<const float, 1> > slice_ref(m.slice(offsets, m.dimensions()));
+ VERIFY_IS_EQUAL(slice_ref(0), 42);
+}
+
+template<int DataLayout>
+static void test_slice_in_expr() {
+ typedef Matrix<float, Dynamic, Dynamic, DataLayout> Mtx;
+ Mtx m1(7,7);
+ Mtx m2(3,3);
+ m1.setRandom();
+ m2.setRandom();
+
+ Mtx m3 = m1.block(1, 2, 3, 3) * m2.block(0, 2, 3, 1);
+
+ TensorMap<Tensor<float, 2, DataLayout>> tensor1(m1.data(), 7, 7);
+ TensorMap<Tensor<float, 2, DataLayout>> tensor2(m2.data(), 3, 3);
+ Tensor<float, 2, DataLayout> tensor3(3,1);
+ typedef Tensor<float, 1>::DimensionPair DimPair;
+ array<DimPair, 1> contract_along{{DimPair(1, 0)}};
+
+ Eigen::DSizes<ptrdiff_t, 2> indices1(1,2);
+ Eigen::DSizes<ptrdiff_t, 2> sizes1(3,3);
+ Eigen::DSizes<ptrdiff_t, 2> indices2(0,2);
+ Eigen::DSizes<ptrdiff_t, 2> sizes2(3,1);
+ tensor3 = tensor1.slice(indices1, sizes1).contract(tensor2.slice(indices2, sizes2), contract_along);
+
+ Map<Mtx> res(tensor3.data(), 3, 1);
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 1; ++j) {
+ VERIFY_IS_APPROX(res(i,j), m3(i,j));
+ }
+ }
+
+ // Take an arbitrary slice of an arbitrarily sized tensor.
+ TensorMap<Tensor<const float, 2, DataLayout>> tensor4(m1.data(), 7, 7);
+ Tensor<float, 1, DataLayout> tensor6 = tensor4.reshape(DSizes<ptrdiff_t, 1>(7*7)).exp().slice(DSizes<ptrdiff_t, 1>(0), DSizes<ptrdiff_t, 1>(35));
+ for (int i = 0; i < 35; ++i) {
+ VERIFY_IS_APPROX(tensor6(i), expf(tensor4.data()[i]));
+ }
+}
+
+template<int DataLayout>
+static void test_slice_as_lvalue()
+{
+ Tensor<float, 3, DataLayout> tensor1(2,2,7);
+ tensor1.setRandom();
+ Tensor<float, 3, DataLayout> tensor2(2,2,7);
+ tensor2.setRandom();
+ Tensor<float, 3, DataLayout> tensor3(4,3,5);
+ tensor3.setRandom();
+ Tensor<float, 3, DataLayout> tensor4(4,3,2);
+ tensor4.setRandom();
+ Tensor<float, 3, DataLayout> tensor5(10,13,12);
+ tensor5.setRandom();
+
+ Tensor<float, 3, DataLayout> result(4,5,7);
+ Eigen::DSizes<ptrdiff_t, 3> sizes12(2,2,7);
+ Eigen::DSizes<ptrdiff_t, 3> first_slice(0,0,0);
+ result.slice(first_slice, sizes12) = tensor1;
+ Eigen::DSizes<ptrdiff_t, 3> second_slice(2,0,0);
+ result.slice(second_slice, sizes12).device(Eigen::DefaultDevice()) = tensor2;
+
+ Eigen::DSizes<ptrdiff_t, 3> sizes3(4,3,5);
+ Eigen::DSizes<ptrdiff_t, 3> third_slice(0,2,0);
+ result.slice(third_slice, sizes3) = tensor3;
+
+ Eigen::DSizes<ptrdiff_t, 3> sizes4(4,3,2);
+ Eigen::DSizes<ptrdiff_t, 3> fourth_slice(0,2,5);
+ result.slice(fourth_slice, sizes4) = tensor4;
+
+ for (int j = 0; j < 2; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ for (int i = 0; i < 2; ++i) {
+ VERIFY_IS_EQUAL(result(i,j,k), tensor1(i,j,k));
+ VERIFY_IS_EQUAL(result(i+2,j,k), tensor2(i,j,k));
+ }
+ }
+ }
+ for (int i = 0; i < 4; ++i) {
+ for (int j = 2; j < 5; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ VERIFY_IS_EQUAL(result(i,j,k), tensor3(i,j-2,k));
+ }
+ for (int k = 5; k < 7; ++k) {
+ VERIFY_IS_EQUAL(result(i,j,k), tensor4(i,j-2,k-5));
+ }
+ }
+ }
+
+ Eigen::DSizes<ptrdiff_t, 3> sizes5(4,5,7);
+ Eigen::DSizes<ptrdiff_t, 3> fifth_slice(0,0,0);
+ result.slice(fifth_slice, sizes5) = tensor5.slice(fifth_slice, sizes5);
+ for (int i = 0; i < 4; ++i) {
+ for (int j = 2; j < 5; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ VERIFY_IS_EQUAL(result(i,j,k), tensor5(i,j,k));
+ }
+ }
+ }
+}
+
+template<int DataLayout>
+static void test_slice_raw_data()
+{
+ Tensor<float, 4, DataLayout> tensor(3,5,7,11);
+ tensor.setRandom();
+
+ Eigen::DSizes<ptrdiff_t, 4> offsets(1,2,3,4);
+ Eigen::DSizes<ptrdiff_t, 4> extents(1,1,1,1);
+ typedef TensorEvaluator<decltype(tensor.slice(offsets, extents)), DefaultDevice> SliceEvaluator;
+ auto slice1 = SliceEvaluator(tensor.slice(offsets, extents), DefaultDevice());
+ VERIFY_IS_EQUAL(slice1.dimensions().TotalSize(), 1);
+ VERIFY_IS_EQUAL(slice1.data()[0], tensor(1,2,3,4));
+
+ if (DataLayout == ColMajor) {
+ extents = Eigen::DSizes<ptrdiff_t, 4>(2,1,1,1);
+ auto slice2 = SliceEvaluator(tensor.slice(offsets, extents), DefaultDevice());
+ VERIFY_IS_EQUAL(slice2.dimensions().TotalSize(), 2);
+ VERIFY_IS_EQUAL(slice2.data()[0], tensor(1,2,3,4));
+ VERIFY_IS_EQUAL(slice2.data()[1], tensor(2,2,3,4));
+ } else {
+ extents = Eigen::DSizes<ptrdiff_t, 4>(1,1,1,2);
+ auto slice2 = SliceEvaluator(tensor.slice(offsets, extents), DefaultDevice());
+ VERIFY_IS_EQUAL(slice2.dimensions().TotalSize(), 2);
+ VERIFY_IS_EQUAL(slice2.data()[0], tensor(1,2,3,4));
+ VERIFY_IS_EQUAL(slice2.data()[1], tensor(1,2,3,5));
+ }
+
+ extents = Eigen::DSizes<ptrdiff_t, 4>(1,2,1,1);
+ auto slice3 = SliceEvaluator(tensor.slice(offsets, extents), DefaultDevice());
+ VERIFY_IS_EQUAL(slice3.dimensions().TotalSize(), 2);
+ VERIFY_IS_EQUAL(slice3.data(), static_cast<float*>(0));
+
+ if (DataLayout == ColMajor) {
+ offsets = Eigen::DSizes<ptrdiff_t, 4>(0,2,3,4);
+ extents = Eigen::DSizes<ptrdiff_t, 4>(3,2,1,1);
+ auto slice4 = SliceEvaluator(tensor.slice(offsets, extents), DefaultDevice());
+ VERIFY_IS_EQUAL(slice4.dimensions().TotalSize(), 6);
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 2; ++j) {
+ VERIFY_IS_EQUAL(slice4.data()[i+3*j], tensor(i,2+j,3,4));
+ }
+ }
+ } else {
+ offsets = Eigen::DSizes<ptrdiff_t, 4>(1,2,3,0);
+ extents = Eigen::DSizes<ptrdiff_t, 4>(1,1,2,11);
+ auto slice4 = SliceEvaluator(tensor.slice(offsets, extents), DefaultDevice());
+ VERIFY_IS_EQUAL(slice4.dimensions().TotalSize(), 22);
+ for (int l = 0; l < 11; ++l) {
+ for (int k = 0; k < 2; ++k) {
+ VERIFY_IS_EQUAL(slice4.data()[l+11*k], tensor(1,2,3+k,l));
+ }
+ }
+ }
+
+ if (DataLayout == ColMajor) {
+ offsets = Eigen::DSizes<ptrdiff_t, 4>(0,0,0,4);
+ extents = Eigen::DSizes<ptrdiff_t, 4>(3,5,7,2);
+ auto slice5 = SliceEvaluator(tensor.slice(offsets, extents), DefaultDevice());
+ VERIFY_IS_EQUAL(slice5.dimensions().TotalSize(), 210);
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 5; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ for (int l = 0; l < 2; ++l) {
+ int slice_index = i + 3 * (j + 5 * (k + 7 * l));
+ VERIFY_IS_EQUAL(slice5.data()[slice_index], tensor(i,j,k,l+4));
+ }
+ }
+ }
+ }
+ } else {
+ offsets = Eigen::DSizes<ptrdiff_t, 4>(1,0,0,0);
+ extents = Eigen::DSizes<ptrdiff_t, 4>(2,5,7,11);
+ auto slice5 = SliceEvaluator(tensor.slice(offsets, extents), DefaultDevice());
+ VERIFY_IS_EQUAL(slice5.dimensions().TotalSize(), 770);
+ for (int l = 0; l < 11; ++l) {
+ for (int k = 0; k < 7; ++k) {
+ for (int j = 0; j < 5; ++j) {
+ for (int i = 0; i < 2; ++i) {
+ int slice_index = l + 11 * (k + 7 * (j + 5 * i));
+ VERIFY_IS_EQUAL(slice5.data()[slice_index], tensor(i+1,j,k,l));
+ }
+ }
+ }
+ }
+
+ }
+
+ offsets = Eigen::DSizes<ptrdiff_t, 4>(0,0,0,0);
+ extents = Eigen::DSizes<ptrdiff_t, 4>(3,5,7,11);
+ auto slice6 = SliceEvaluator(tensor.slice(offsets, extents), DefaultDevice());
+ VERIFY_IS_EQUAL(slice6.dimensions().TotalSize(), 3*5*7*11);
+ VERIFY_IS_EQUAL(slice6.data(), tensor.data());
+}
+
+
+template<int DataLayout>
+static void test_strided_slice()
+{
+ typedef Tensor<float, 5, DataLayout> Tensor5f;
+ typedef Eigen::DSizes<Eigen::DenseIndex, 5> Index5;
+ typedef Tensor<float, 2, DataLayout> Tensor2f;
+ typedef Eigen::DSizes<Eigen::DenseIndex, 2> Index2;
+ Tensor<float, 5, DataLayout> tensor(2,3,5,7,11);
+ Tensor<float, 2, DataLayout> tensor2(7,11);
+ tensor.setRandom();
+ tensor2.setRandom();
+
+ if (true) {
+ Tensor2f slice(2,3);
+ Index2 strides(-2,-1);
+ Index2 indicesStart(5,7);
+ Index2 indicesStop(0,4);
+ slice = tensor2.stridedSlice(indicesStart, indicesStop, strides);
+ for (int j = 0; j < 2; ++j) {
+ for (int k = 0; k < 3; ++k) {
+ VERIFY_IS_EQUAL(slice(j,k), tensor2(5-2*j,7-k));
+ }
+ }
+ }
+
+ if(true) {
+ Tensor2f slice(0,1);
+ Index2 strides(1,1);
+ Index2 indicesStart(5,4);
+ Index2 indicesStop(5,5);
+ slice = tensor2.stridedSlice(indicesStart, indicesStop, strides);
+ }
+
+ if(true) { // test clamped degenerate interavls
+ Tensor2f slice(7,11);
+ Index2 strides(1,-1);
+ Index2 indicesStart(-3,20); // should become 0,10
+ Index2 indicesStop(20,-11); // should become 11, -1
+ slice = tensor2.stridedSlice(indicesStart, indicesStop, strides);
+ for (int j = 0; j < 7; ++j) {
+ for (int k = 0; k < 11; ++k) {
+ VERIFY_IS_EQUAL(slice(j,k), tensor2(j,10-k));
+ }
+ }
+ }
+
+ if(true) {
+ Tensor5f slice1(1,1,1,1,1);
+ Eigen::DSizes<Eigen::DenseIndex, 5> indicesStart(1, 2, 3, 4, 5);
+ Eigen::DSizes<Eigen::DenseIndex, 5> indicesStop(2, 3, 4, 5, 6);
+ Eigen::DSizes<Eigen::DenseIndex, 5> strides(1, 1, 1, 1, 1);
+ slice1 = tensor.stridedSlice(indicesStart, indicesStop, strides);
+ VERIFY_IS_EQUAL(slice1(0,0,0,0,0), tensor(1,2,3,4,5));
+ }
+
+ if(true) {
+ Tensor5f slice(1,1,2,2,3);
+ Index5 start(1, 1, 3, 4, 5);
+ Index5 stop(2, 2, 5, 6, 8);
+ Index5 strides(1, 1, 1, 1, 1);
+ slice = tensor.stridedSlice(start, stop, strides);
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 2; ++j) {
+ for (int k = 0; k < 3; ++k) {
+ VERIFY_IS_EQUAL(slice(0,0,i,j,k), tensor(1,1,3+i,4+j,5+k));
+ }
+ }
+ }
+ }
+
+ if(true) {
+ Tensor5f slice(1,1,2,2,3);
+ Index5 strides3(1, 1, -2, 1, -1);
+ Index5 indices3Start(1, 1, 4, 4, 7);
+ Index5 indices3Stop(2, 2, 0, 6, 4);
+ slice = tensor.stridedSlice(indices3Start, indices3Stop, strides3);
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 2; ++j) {
+ for (int k = 0; k < 3; ++k) {
+ VERIFY_IS_EQUAL(slice(0,0,i,j,k), tensor(1,1,4-2*i,4+j,7-k));
+ }
+ }
+ }
+ }
+
+ if(false) { // tests degenerate interval
+ Tensor5f slice(1,1,2,2,3);
+ Index5 strides3(1, 1, 2, 1, 1);
+ Index5 indices3Start(1, 1, 4, 4, 7);
+ Index5 indices3Stop(2, 2, 0, 6, 4);
+ slice = tensor.stridedSlice(indices3Start, indices3Stop, strides3);
+ }
+}
+
+template<int DataLayout>
+static void test_strided_slice_write()
+{
+ typedef Tensor<float, 2, DataLayout> Tensor2f;
+ typedef Eigen::DSizes<Eigen::DenseIndex, 2> Index2;
+
+ Tensor<float, 2, DataLayout> tensor(7,11),tensor2(7,11);
+ tensor.setRandom();
+ tensor2=tensor;
+ Tensor2f slice(2,3);
+
+ slice.setRandom();
+
+ Index2 strides(1,1);
+ Index2 indicesStart(3,4);
+ Index2 indicesStop(5,7);
+ Index2 lengths(2,3);
+
+ tensor.slice(indicesStart,lengths)=slice;
+ tensor2.stridedSlice(indicesStart,indicesStop,strides)=slice;
+
+ for(int i=0;i<7;i++) for(int j=0;j<11;j++){
+ VERIFY_IS_EQUAL(tensor(i,j), tensor2(i,j));
+ }
+}
+
+
+template<int DataLayout>
+static void test_composition()
+{
+ Eigen::Tensor<float, 2, DataLayout> matrix(7, 11);
+ matrix.setRandom();
+
+ const DSizes<ptrdiff_t, 3> newDims(1, 1, 11);
+ Eigen::Tensor<float, 3, DataLayout> tensor =
+ matrix.slice(DSizes<ptrdiff_t, 2>(2, 0), DSizes<ptrdiff_t, 2>(1, 11)).reshape(newDims);
+
+ VERIFY_IS_EQUAL(tensor.dimensions().TotalSize(), 11);
+ VERIFY_IS_EQUAL(tensor.dimension(0), 1);
+ VERIFY_IS_EQUAL(tensor.dimension(1), 1);
+ VERIFY_IS_EQUAL(tensor.dimension(2), 11);
+ for (int i = 0; i < 11; ++i) {
+ VERIFY_IS_EQUAL(tensor(0,0,i), matrix(2,i));
+ }
+}
+
+
+void test_cxx11_tensor_morphing()
+{
+ CALL_SUBTEST_1(test_simple_reshape<void>());
+ CALL_SUBTEST_1(test_reshape_in_expr<void>());
+ CALL_SUBTEST_1(test_reshape_as_lvalue<void>());
+
+ CALL_SUBTEST_1(test_simple_slice<ColMajor>());
+ CALL_SUBTEST_1(test_simple_slice<RowMajor>());
+ CALL_SUBTEST_1(test_const_slice());
+ CALL_SUBTEST_2(test_slice_in_expr<ColMajor>());
+ CALL_SUBTEST_3(test_slice_in_expr<RowMajor>());
+ CALL_SUBTEST_4(test_slice_as_lvalue<ColMajor>());
+ CALL_SUBTEST_4(test_slice_as_lvalue<RowMajor>());
+ CALL_SUBTEST_5(test_slice_raw_data<ColMajor>());
+ CALL_SUBTEST_5(test_slice_raw_data<RowMajor>());
+
+ CALL_SUBTEST_6(test_strided_slice_write<ColMajor>());
+ CALL_SUBTEST_6(test_strided_slice<ColMajor>());
+ CALL_SUBTEST_6(test_strided_slice_write<RowMajor>());
+ CALL_SUBTEST_6(test_strided_slice<RowMajor>());
+
+ CALL_SUBTEST_7(test_composition<ColMajor>());
+ CALL_SUBTEST_7(test_composition<RowMajor>());
+}
diff --git a/unsupported/test/cxx11_tensor_notification.cpp b/unsupported/test/cxx11_tensor_notification.cpp
new file mode 100644
index 000000000..c946007b8
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_notification.cpp
@@ -0,0 +1,81 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2015 Vijay Vasudevan <vrv@google.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_USE_THREADS
+
+#include <stdlib.h>
+#include "main.h"
+#include <Eigen/CXX11/Tensor>
+
+#if EIGEN_OS_WIN || EIGEN_OS_WIN64
+#include <windows.h>
+void sleep(int seconds) {
+ Sleep(seconds*1000);
+}
+#else
+#include <unistd.h>
+#endif
+
+
+namespace {
+
+void WaitAndAdd(Eigen::Notification* n, int* counter) {
+ n->Wait();
+ *counter = *counter + 1;
+}
+
+} // namespace
+
+static void test_notification_single()
+{
+ ThreadPool thread_pool(1);
+
+ int counter = 0;
+ Eigen::Notification n;
+ std::function<void()> func = std::bind(&WaitAndAdd, &n, &counter);
+ thread_pool.Schedule(func);
+ sleep(1);
+
+ // The thread should be waiting for the notification.
+ VERIFY_IS_EQUAL(counter, 0);
+
+ // Unblock the thread
+ n.Notify();
+
+ sleep(1);
+
+ // Verify the counter has been incremented
+ VERIFY_IS_EQUAL(counter, 1);
+}
+
+// Like test_notification_single() but enqueues multiple threads to
+// validate that all threads get notified by Notify().
+static void test_notification_multiple()
+{
+ ThreadPool thread_pool(1);
+
+ int counter = 0;
+ Eigen::Notification n;
+ std::function<void()> func = std::bind(&WaitAndAdd, &n, &counter);
+ thread_pool.Schedule(func);
+ thread_pool.Schedule(func);
+ thread_pool.Schedule(func);
+ thread_pool.Schedule(func);
+ sleep(1);
+ VERIFY_IS_EQUAL(counter, 0);
+ n.Notify();
+ sleep(1);
+ VERIFY_IS_EQUAL(counter, 4);
+}
+
+void test_cxx11_tensor_notification()
+{
+ CALL_SUBTEST(test_notification_single());
+ CALL_SUBTEST(test_notification_multiple());
+}
diff --git a/unsupported/test/cxx11_tensor_of_complex.cpp b/unsupported/test/cxx11_tensor_of_complex.cpp
new file mode 100644
index 000000000..e9d1b2d3c
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_of_complex.cpp
@@ -0,0 +1,103 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+using Eigen::TensorMap;
+
+
+
+static void test_additions()
+{
+ Tensor<std::complex<float>, 1> data1(3);
+ Tensor<std::complex<float>, 1> data2(3);
+ for (int i = 0; i < 3; ++i) {
+ data1(i) = std::complex<float>(i, -i);
+ data2(i) = std::complex<float>(i, 7 * i);
+ }
+
+ Tensor<std::complex<float>, 1> sum = data1 + data2;
+ for (int i = 0; i < 3; ++i) {
+ VERIFY_IS_EQUAL(sum(i), std::complex<float>(2*i, 6*i));
+ }
+}
+
+
+static void test_abs()
+{
+ Tensor<std::complex<float>, 1> data1(3);
+ Tensor<std::complex<double>, 1> data2(3);
+ data1.setRandom();
+ data2.setRandom();
+
+ Tensor<float, 1> abs1 = data1.abs();
+ Tensor<double, 1> abs2 = data2.abs();
+ for (int i = 0; i < 3; ++i) {
+ VERIFY_IS_APPROX(abs1(i), std::abs(data1(i)));
+ VERIFY_IS_APPROX(abs2(i), std::abs(data2(i)));
+ }
+}
+
+
+static void test_conjugate()
+{
+ Tensor<std::complex<float>, 1> data1(3);
+ Tensor<std::complex<double>, 1> data2(3);
+ Tensor<int, 1> data3(3);
+ data1.setRandom();
+ data2.setRandom();
+ data3.setRandom();
+
+ Tensor<std::complex<float>, 1> conj1 = data1.conjugate();
+ Tensor<std::complex<double>, 1> conj2 = data2.conjugate();
+ Tensor<int, 1> conj3 = data3.conjugate();
+ for (int i = 0; i < 3; ++i) {
+ VERIFY_IS_APPROX(conj1(i), std::conj(data1(i)));
+ VERIFY_IS_APPROX(conj2(i), std::conj(data2(i)));
+ VERIFY_IS_APPROX(conj3(i), data3(i));
+ }
+}
+
+static void test_contractions()
+{
+ Tensor<std::complex<float>, 4> t_left(30, 50, 8, 31);
+ Tensor<std::complex<float>, 5> t_right(8, 31, 7, 20, 10);
+ Tensor<std::complex<float>, 5> t_result(30, 50, 7, 20, 10);
+
+ t_left.setRandom();
+ t_right.setRandom();
+
+ typedef Map<Matrix<std::complex<float>, Dynamic, Dynamic>> MapXcf;
+ MapXcf m_left(t_left.data(), 1500, 248);
+ MapXcf m_right(t_right.data(), 248, 1400);
+ Matrix<std::complex<float>, Dynamic, Dynamic> m_result(1500, 1400);
+
+ // This contraction should be equivalent to a regular matrix multiplication
+ typedef Tensor<float, 1>::DimensionPair DimPair;
+ Eigen::array<DimPair, 2> dims;
+ dims[0] = DimPair(2, 0);
+ dims[1] = DimPair(3, 1);
+ t_result = t_left.contract(t_right, dims);
+ m_result = m_left * m_right;
+ for (int i = 0; i < t_result.dimensions().TotalSize(); i++) {
+ VERIFY_IS_APPROX(t_result.data()[i], m_result.data()[i]);
+ }
+}
+
+
+void test_cxx11_tensor_of_complex()
+{
+ CALL_SUBTEST(test_additions());
+ CALL_SUBTEST(test_abs());
+ CALL_SUBTEST(test_conjugate());
+ CALL_SUBTEST(test_contractions());
+}
diff --git a/unsupported/test/cxx11_tensor_of_const_values.cpp b/unsupported/test/cxx11_tensor_of_const_values.cpp
new file mode 100644
index 000000000..f179a0c21
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_of_const_values.cpp
@@ -0,0 +1,105 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+using Eigen::RowMajor;
+
+static void test_assign()
+{
+ float data1[6];
+ TensorMap<Tensor<const float, 2>> mat1(data1, 2, 3);
+ float data2[6];
+ const TensorMap<Tensor<float, 2>> mat2(data2, 2, 3);
+
+ for (int i = 0; i < 6; ++i) {
+ data1[i] = i;
+ data2[i] = -i;
+ }
+
+ Tensor<float, 2> rslt1;
+ rslt1 = mat1;
+ Tensor<float, 2> rslt2;
+ rslt2 = mat2;
+
+ Tensor<float, 2> rslt3 = mat1;
+ Tensor<float, 2> rslt4 = mat2;
+
+ Tensor<float, 2> rslt5(mat1);
+ Tensor<float, 2> rslt6(mat2);
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ VERIFY_IS_APPROX(rslt1(i,j), static_cast<float>(i + 2*j));
+ VERIFY_IS_APPROX(rslt2(i,j), static_cast<float>(-i - 2*j));
+ VERIFY_IS_APPROX(rslt3(i,j), static_cast<float>(i + 2*j));
+ VERIFY_IS_APPROX(rslt4(i,j), static_cast<float>(-i - 2*j));
+ VERIFY_IS_APPROX(rslt5(i,j), static_cast<float>(i + 2*j));
+ VERIFY_IS_APPROX(rslt6(i,j), static_cast<float>(-i - 2*j));
+ }
+ }
+}
+
+
+static void test_plus()
+{
+ float data1[6];
+ TensorMap<Tensor<const float, 2>> mat1(data1, 2, 3);
+ float data2[6];
+ TensorMap<Tensor<float, 2>> mat2(data2, 2, 3);
+
+ for (int i = 0; i < 6; ++i) {
+ data1[i] = i;
+ data2[i] = -i;
+ }
+
+ Tensor<float, 2> sum1;
+ sum1 = mat1 + mat2;
+ Tensor<float, 2> sum2;
+ sum2 = mat2 + mat1;
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ VERIFY_IS_APPROX(sum1(i,j), 0.0f);
+ VERIFY_IS_APPROX(sum2(i,j), 0.0f);
+ }
+ }
+}
+
+
+static void test_plus_equal()
+{
+ float data1[6];
+ TensorMap<Tensor<const float, 2>> mat1(data1, 2, 3);
+ float data2[6];
+ TensorMap<Tensor<float, 2>> mat2(data2, 2, 3);
+
+ for (int i = 0; i < 6; ++i) {
+ data1[i] = i;
+ data2[i] = -i;
+ }
+ mat2 += mat1;
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ VERIFY_IS_APPROX(mat2(i,j), 0.0f);
+ }
+ }
+}
+
+
+void test_cxx11_tensor_of_const_values()
+{
+ CALL_SUBTEST(test_assign());
+ CALL_SUBTEST(test_plus());
+ CALL_SUBTEST(test_plus_equal());
+}
diff --git a/unsupported/test/cxx11_tensor_of_float16_cuda.cu b/unsupported/test/cxx11_tensor_of_float16_cuda.cu
new file mode 100644
index 000000000..2f86980a2
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_of_float16_cuda.cu
@@ -0,0 +1,494 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_TEST_NO_LONGDOUBLE
+#define EIGEN_TEST_NO_COMPLEX
+#define EIGEN_TEST_FUNC cxx11_tensor_of_float16_cuda
+#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
+#define EIGEN_USE_GPU
+
+#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500
+#include <cuda_fp16.h>
+#endif
+#include "main.h"
+#include <unsupported/Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+
+template<typename>
+void test_cuda_numext() {
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+ int num_elem = 101;
+
+ float* d_float = (float*)gpu_device.allocate(num_elem * sizeof(float));
+ bool* d_res_half = (bool*)gpu_device.allocate(num_elem * sizeof(bool));
+ bool* d_res_float = (bool*)gpu_device.allocate(num_elem * sizeof(bool));
+
+ Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float(
+ d_float, num_elem);
+ Eigen::TensorMap<Eigen::Tensor<bool, 1>, Eigen::Aligned> gpu_res_half(
+ d_res_half, num_elem);
+ Eigen::TensorMap<Eigen::Tensor<bool, 1>, Eigen::Aligned> gpu_res_float(
+ d_res_float, num_elem);
+
+ gpu_float.device(gpu_device) = gpu_float.random() - gpu_float.constant(0.5f);
+ gpu_res_float.device(gpu_device) = gpu_float.unaryExpr(Eigen::internal::scalar_isnan_op<float>());
+ gpu_res_half.device(gpu_device) = gpu_float.cast<Eigen::half>().unaryExpr(Eigen::internal::scalar_isnan_op<Eigen::half>());
+
+ Tensor<bool, 1> half_prec(num_elem);
+ Tensor<bool, 1> full_prec(num_elem);
+ gpu_device.memcpyDeviceToHost(half_prec.data(), d_res_half, num_elem*sizeof(bool));
+ gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, num_elem*sizeof(bool));
+ gpu_device.synchronize();
+
+ for (int i = 0; i < num_elem; ++i) {
+ std::cout << "Checking numext " << i << std::endl;
+ VERIFY_IS_EQUAL(full_prec(i), half_prec(i));
+ }
+
+ gpu_device.deallocate(d_float);
+ gpu_device.deallocate(d_res_half);
+ gpu_device.deallocate(d_res_float);
+}
+
+
+#ifdef EIGEN_HAS_CUDA_FP16
+
+template<typename>
+void test_cuda_conversion() {
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+ int num_elem = 101;
+
+ float* d_float = (float*)gpu_device.allocate(num_elem * sizeof(float));
+ Eigen::half* d_half = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half));
+ float* d_conv = (float*)gpu_device.allocate(num_elem * sizeof(float));
+
+ Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float(
+ d_float, num_elem);
+ Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_half(
+ d_half, num_elem);
+ Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_conv(
+ d_conv, num_elem);
+
+ gpu_float.device(gpu_device) = gpu_float.random();
+ gpu_half.device(gpu_device) = gpu_float.cast<Eigen::half>();
+ gpu_conv.device(gpu_device) = gpu_half.cast<float>();
+
+ Tensor<float, 1> initial(num_elem);
+ Tensor<float, 1> final(num_elem);
+ gpu_device.memcpyDeviceToHost(initial.data(), d_float, num_elem*sizeof(float));
+ gpu_device.memcpyDeviceToHost(final.data(), d_conv, num_elem*sizeof(float));
+
+ for (int i = 0; i < num_elem; ++i) {
+ VERIFY_IS_APPROX(initial(i), final(i));
+ }
+
+ gpu_device.deallocate(d_float);
+ gpu_device.deallocate(d_half);
+ gpu_device.deallocate(d_conv);
+}
+
+template<typename>
+void test_cuda_unary() {
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+ int num_elem = 101;
+
+ float* d_float = (float*)gpu_device.allocate(num_elem * sizeof(float));
+ float* d_res_half = (float*)gpu_device.allocate(num_elem * sizeof(float));
+ float* d_res_float = (float*)gpu_device.allocate(num_elem * sizeof(float));
+
+ Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float(
+ d_float, num_elem);
+ Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_res_half(
+ d_res_half, num_elem);
+ Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_res_float(
+ d_res_float, num_elem);
+
+ gpu_float.device(gpu_device) = gpu_float.random() - gpu_float.constant(0.5f);
+ gpu_res_float.device(gpu_device) = gpu_float.abs();
+ gpu_res_half.device(gpu_device) = gpu_float.cast<Eigen::half>().abs().cast<float>();
+
+ Tensor<float, 1> half_prec(num_elem);
+ Tensor<float, 1> full_prec(num_elem);
+ gpu_device.memcpyDeviceToHost(half_prec.data(), d_res_half, num_elem*sizeof(float));
+ gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, num_elem*sizeof(float));
+ gpu_device.synchronize();
+
+ for (int i = 0; i < num_elem; ++i) {
+ std::cout << "Checking unary " << i << std::endl;
+ VERIFY_IS_APPROX(full_prec(i), half_prec(i));
+ }
+
+ gpu_device.deallocate(d_float);
+ gpu_device.deallocate(d_res_half);
+ gpu_device.deallocate(d_res_float);
+}
+
+template<typename>
+void test_cuda_elementwise() {
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+ int num_elem = 101;
+
+ float* d_float1 = (float*)gpu_device.allocate(num_elem * sizeof(float));
+ float* d_float2 = (float*)gpu_device.allocate(num_elem * sizeof(float));
+ float* d_res_half = (float*)gpu_device.allocate(num_elem * sizeof(float));
+ float* d_res_float = (float*)gpu_device.allocate(num_elem * sizeof(float));
+
+ Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float1(
+ d_float1, num_elem);
+ Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float2(
+ d_float2, num_elem);
+ Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_res_half(
+ d_res_half, num_elem);
+ Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_res_float(
+ d_res_float, num_elem);
+
+ gpu_float1.device(gpu_device) = gpu_float1.random();
+ gpu_float2.device(gpu_device) = gpu_float2.random();
+ gpu_res_float.device(gpu_device) = (gpu_float1 + gpu_float2) * gpu_float1;
+ gpu_res_half.device(gpu_device) = ((gpu_float1.cast<Eigen::half>() + gpu_float2.cast<Eigen::half>()) * gpu_float1.cast<Eigen::half>()).cast<float>();
+
+ Tensor<float, 1> half_prec(num_elem);
+ Tensor<float, 1> full_prec(num_elem);
+ gpu_device.memcpyDeviceToHost(half_prec.data(), d_res_half, num_elem*sizeof(float));
+ gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, num_elem*sizeof(float));
+ gpu_device.synchronize();
+
+ for (int i = 0; i < num_elem; ++i) {
+ std::cout << "Checking elemwise " << i << ": full prec = " << full_prec(i) << " vs half prec = " << half_prec(i) << std::endl;
+ VERIFY_IS_APPROX(static_cast<Eigen::half>(full_prec(i)), static_cast<Eigen::half>(half_prec(i)));
+ }
+
+ gpu_device.deallocate(d_float1);
+ gpu_device.deallocate(d_float2);
+ gpu_device.deallocate(d_res_half);
+ gpu_device.deallocate(d_res_float);
+}
+
+template<typename>
+void test_cuda_trancendental() {
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+ int num_elem = 101;
+
+ float* d_float1 = (float*)gpu_device.allocate(num_elem * sizeof(float));
+ float* d_float2 = (float*)gpu_device.allocate(num_elem * sizeof(float));
+ float* d_float3 = (float*)gpu_device.allocate(num_elem * sizeof(float));
+ Eigen::half* d_res1_half = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half));
+ Eigen::half* d_res1_float = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half));
+ Eigen::half* d_res2_half = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half));
+ Eigen::half* d_res2_float = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half));
+ Eigen::half* d_res3_half = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half));
+ Eigen::half* d_res3_float = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half));
+
+ Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float1(d_float1, num_elem);
+ Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float2(d_float2, num_elem);
+ Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float3(d_float3, num_elem);
+ Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_res1_half(d_res1_half, num_elem);
+ Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_res1_float(d_res1_float, num_elem);
+ Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_res2_half(d_res2_half, num_elem);
+ Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_res2_float(d_res2_float, num_elem);
+ Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_res3_half(d_res3_half, num_elem);
+ Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_res3_float(d_res3_float, num_elem);
+
+ gpu_float1.device(gpu_device) = gpu_float1.random() - gpu_float1.constant(0.5f);
+ gpu_float2.device(gpu_device) = gpu_float2.random() + gpu_float1.constant(0.5f);
+ gpu_float3.device(gpu_device) = gpu_float3.random();
+ gpu_res1_float.device(gpu_device) = gpu_float1.exp().cast<Eigen::half>();
+ gpu_res2_float.device(gpu_device) = gpu_float2.log().cast<Eigen::half>();
+ gpu_res3_float.device(gpu_device) = gpu_float3.log1p().cast<Eigen::half>();
+
+ gpu_res1_half.device(gpu_device) = gpu_float1.cast<Eigen::half>();
+ gpu_res1_half.device(gpu_device) = gpu_res1_half.exp();
+
+ gpu_res2_half.device(gpu_device) = gpu_float2.cast<Eigen::half>();
+ gpu_res2_half.device(gpu_device) = gpu_res2_half.log();
+
+ gpu_res3_half.device(gpu_device) = gpu_float3.cast<Eigen::half>();
+ gpu_res3_half.device(gpu_device) = gpu_res3_half.log1p();
+
+ Tensor<float, 1> input1(num_elem);
+ Tensor<Eigen::half, 1> half_prec1(num_elem);
+ Tensor<Eigen::half, 1> full_prec1(num_elem);
+ Tensor<float, 1> input2(num_elem);
+ Tensor<Eigen::half, 1> half_prec2(num_elem);
+ Tensor<Eigen::half, 1> full_prec2(num_elem);
+ Tensor<float, 1> input3(num_elem);
+ Tensor<Eigen::half, 1> half_prec3(num_elem);
+ Tensor<Eigen::half, 1> full_prec3(num_elem);
+ gpu_device.memcpyDeviceToHost(input1.data(), d_float1, num_elem*sizeof(float));
+ gpu_device.memcpyDeviceToHost(input2.data(), d_float2, num_elem*sizeof(float));
+ gpu_device.memcpyDeviceToHost(input3.data(), d_float3, num_elem*sizeof(float));
+ gpu_device.memcpyDeviceToHost(half_prec1.data(), d_res1_half, num_elem*sizeof(Eigen::half));
+ gpu_device.memcpyDeviceToHost(full_prec1.data(), d_res1_float, num_elem*sizeof(Eigen::half));
+ gpu_device.memcpyDeviceToHost(half_prec2.data(), d_res2_half, num_elem*sizeof(Eigen::half));
+ gpu_device.memcpyDeviceToHost(full_prec2.data(), d_res2_float, num_elem*sizeof(Eigen::half));
+ gpu_device.memcpyDeviceToHost(half_prec3.data(), d_res3_half, num_elem*sizeof(Eigen::half));
+ gpu_device.memcpyDeviceToHost(full_prec3.data(), d_res3_float, num_elem*sizeof(Eigen::half));
+ gpu_device.synchronize();
+
+ for (int i = 0; i < num_elem; ++i) {
+ std::cout << "Checking elemwise exp " << i << " input = " << input1(i) << " full = " << full_prec1(i) << " half = " << half_prec1(i) << std::endl;
+ VERIFY_IS_APPROX(full_prec1(i), half_prec1(i));
+ }
+ for (int i = 0; i < num_elem; ++i) {
+ std::cout << "Checking elemwise log " << i << " input = " << input2(i) << " full = " << full_prec2(i) << " half = " << half_prec2(i) << std::endl;
+ if(std::abs(input2(i)-1.f)<0.05f) // log lacks accurary nearby 1
+ VERIFY_IS_APPROX(full_prec2(i)+Eigen::half(0.1f), half_prec2(i)+Eigen::half(0.1f));
+ else
+ VERIFY_IS_APPROX(full_prec2(i), half_prec2(i));
+ }
+ for (int i = 0; i < num_elem; ++i) {
+ std::cout << "Checking elemwise plog1 " << i << " input = " << input3(i) << " full = " << full_prec3(i) << " half = " << half_prec3(i) << std::endl;
+ VERIFY_IS_APPROX(full_prec3(i), half_prec3(i));
+ }
+ gpu_device.deallocate(d_float1);
+ gpu_device.deallocate(d_float2);
+ gpu_device.deallocate(d_float3);
+ gpu_device.deallocate(d_res1_half);
+ gpu_device.deallocate(d_res1_float);
+ gpu_device.deallocate(d_res2_half);
+ gpu_device.deallocate(d_res2_float);
+ gpu_device.deallocate(d_res3_float);
+ gpu_device.deallocate(d_res3_half);
+}
+
+template<typename>
+void test_cuda_contractions() {
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+ int rows = 23;
+ int cols = 23;
+ int num_elem = rows*cols;
+
+ float* d_float1 = (float*)gpu_device.allocate(num_elem * sizeof(float));
+ float* d_float2 = (float*)gpu_device.allocate(num_elem * sizeof(float));
+ Eigen::half* d_res_half = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half));
+ Eigen::half* d_res_float = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half));
+
+ Eigen::TensorMap<Eigen::Tensor<float, 2>, Eigen::Aligned> gpu_float1(
+ d_float1, rows, cols);
+ Eigen::TensorMap<Eigen::Tensor<float, 2>, Eigen::Aligned> gpu_float2(
+ d_float2, rows, cols);
+ Eigen::TensorMap<Eigen::Tensor<Eigen::half, 2>, Eigen::Aligned> gpu_res_half(
+ d_res_half, rows, cols);
+ Eigen::TensorMap<Eigen::Tensor<Eigen::half, 2>, Eigen::Aligned> gpu_res_float(
+ d_res_float, rows, cols);
+
+ gpu_float1.device(gpu_device) = gpu_float1.random() - gpu_float1.constant(0.5f);
+ gpu_float2.device(gpu_device) = gpu_float2.random() - gpu_float2.constant(0.5f);
+
+ typedef Tensor<float, 2>::DimensionPair DimPair;
+ Eigen::array<DimPair, 1> dims(DimPair(1, 0));
+ gpu_res_float.device(gpu_device) = gpu_float1.contract(gpu_float2, dims).cast<Eigen::half>();
+ gpu_res_half.device(gpu_device) = gpu_float1.cast<Eigen::half>().contract(gpu_float2.cast<Eigen::half>(), dims);
+
+ Tensor<Eigen::half, 2> half_prec(rows, cols);
+ Tensor<Eigen::half, 2> full_prec(rows, cols);
+ gpu_device.memcpyDeviceToHost(half_prec.data(), d_res_half, num_elem*sizeof(Eigen::half));
+ gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, num_elem*sizeof(Eigen::half));
+ gpu_device.synchronize();
+
+ for (int i = 0; i < rows; ++i) {
+ for (int j = 0; j < cols; ++j) {
+ std::cout << "Checking contract " << i << " " << j << full_prec(i, j) << " " << half_prec(i, j) << std::endl;
+ if (numext::abs(full_prec(i, j) - half_prec(i, j)) > Eigen::half(1e-2f)) {
+ VERIFY_IS_APPROX(full_prec(i, j), half_prec(i, j));
+ }
+ }
+ }
+
+ gpu_device.deallocate(d_float1);
+ gpu_device.deallocate(d_float2);
+ gpu_device.deallocate(d_res_half);
+ gpu_device.deallocate(d_res_float);
+}
+
+template<typename>
+void test_cuda_reductions(int size1, int size2, int redux) {
+
+ std::cout << "Reducing " << size1 << " by " << size2
+ << " tensor along dim " << redux << std::endl;
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+ int num_elem = size1*size2;
+ int result_size = (redux == 1 ? size1 : size2);
+
+ float* d_float1 = (float*)gpu_device.allocate(num_elem * sizeof(float));
+ float* d_float2 = (float*)gpu_device.allocate(num_elem * sizeof(float));
+ Eigen::half* d_res_half = (Eigen::half*)gpu_device.allocate(result_size * sizeof(Eigen::half));
+ Eigen::half* d_res_float = (Eigen::half*)gpu_device.allocate(result_size * sizeof(Eigen::half));
+
+ Eigen::TensorMap<Eigen::Tensor<float, 2>, Eigen::Aligned> gpu_float1(
+ d_float1, size1, size2);
+ Eigen::TensorMap<Eigen::Tensor<float, 2>, Eigen::Aligned> gpu_float2(
+ d_float2, size1, size2);
+ Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_res_half(
+ d_res_half, result_size);
+ Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_res_float(
+ d_res_float, result_size);
+
+ gpu_float1.device(gpu_device) = gpu_float1.random() * 2.0f;
+ gpu_float2.device(gpu_device) = gpu_float2.random() * 2.0f;
+
+ Eigen::array<int, 1> redux_dim = {{redux}};
+ gpu_res_float.device(gpu_device) = gpu_float1.sum(redux_dim).cast<Eigen::half>();
+ gpu_res_half.device(gpu_device) = gpu_float1.cast<Eigen::half>().sum(redux_dim);
+
+ Tensor<Eigen::half, 1> half_prec(result_size);
+ Tensor<Eigen::half, 1> full_prec(result_size);
+ gpu_device.memcpyDeviceToHost(half_prec.data(), d_res_half, result_size*sizeof(Eigen::half));
+ gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, result_size*sizeof(Eigen::half));
+ gpu_device.synchronize();
+
+ for (int i = 0; i < result_size; ++i) {
+ std::cout << "EXPECTED " << full_prec(i) << " GOT " << half_prec(i) << std::endl;
+ VERIFY_IS_APPROX(full_prec(i), half_prec(i));
+ }
+
+ gpu_device.deallocate(d_float1);
+ gpu_device.deallocate(d_float2);
+ gpu_device.deallocate(d_res_half);
+ gpu_device.deallocate(d_res_float);
+}
+
+template<typename>
+void test_cuda_reductions() {
+ test_cuda_reductions<void>(13, 13, 0);
+ test_cuda_reductions<void>(13, 13, 1);
+
+ test_cuda_reductions<void>(35, 36, 0);
+ test_cuda_reductions<void>(35, 36, 1);
+
+ test_cuda_reductions<void>(36, 35, 0);
+ test_cuda_reductions<void>(36, 35, 1);
+}
+
+template<typename>
+void test_cuda_full_reductions() {
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+ int size = 13;
+ int num_elem = size*size;
+
+ float* d_float1 = (float*)gpu_device.allocate(num_elem * sizeof(float));
+ float* d_float2 = (float*)gpu_device.allocate(num_elem * sizeof(float));
+ Eigen::half* d_res_half = (Eigen::half*)gpu_device.allocate(1 * sizeof(Eigen::half));
+ Eigen::half* d_res_float = (Eigen::half*)gpu_device.allocate(1 * sizeof(Eigen::half));
+
+ Eigen::TensorMap<Eigen::Tensor<float, 2>, Eigen::Aligned> gpu_float1(
+ d_float1, size, size);
+ Eigen::TensorMap<Eigen::Tensor<float, 2>, Eigen::Aligned> gpu_float2(
+ d_float2, size, size);
+ Eigen::TensorMap<Eigen::Tensor<Eigen::half, 0>, Eigen::Aligned> gpu_res_half(
+ d_res_half);
+ Eigen::TensorMap<Eigen::Tensor<Eigen::half, 0>, Eigen::Aligned> gpu_res_float(
+ d_res_float);
+
+ gpu_float1.device(gpu_device) = gpu_float1.random();
+ gpu_float2.device(gpu_device) = gpu_float2.random();
+
+ gpu_res_float.device(gpu_device) = gpu_float1.sum().cast<Eigen::half>();
+ gpu_res_half.device(gpu_device) = gpu_float1.cast<Eigen::half>().sum();
+
+ Tensor<Eigen::half, 0> half_prec;
+ Tensor<Eigen::half, 0> full_prec;
+ gpu_device.memcpyDeviceToHost(half_prec.data(), d_res_half, sizeof(Eigen::half));
+ gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, sizeof(Eigen::half));
+ gpu_device.synchronize();
+
+ VERIFY_IS_APPROX(full_prec(), half_prec());
+
+ gpu_res_float.device(gpu_device) = gpu_float1.maximum().cast<Eigen::half>();
+ gpu_res_half.device(gpu_device) = gpu_float1.cast<Eigen::half>().maximum();
+ gpu_device.memcpyDeviceToHost(half_prec.data(), d_res_half, sizeof(Eigen::half));
+ gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, sizeof(Eigen::half));
+ gpu_device.synchronize();
+
+ VERIFY_IS_APPROX(full_prec(), half_prec());
+
+ gpu_device.deallocate(d_float1);
+ gpu_device.deallocate(d_float2);
+ gpu_device.deallocate(d_res_half);
+ gpu_device.deallocate(d_res_float);
+}
+
+template<typename>
+void test_cuda_forced_evals() {
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+ int num_elem = 101;
+
+ float* d_float = (float*)gpu_device.allocate(num_elem * sizeof(float));
+ float* d_res_half1 = (float*)gpu_device.allocate(num_elem * sizeof(float));
+ float* d_res_half2 = (float*)gpu_device.allocate(num_elem * sizeof(float));
+ float* d_res_float = (float*)gpu_device.allocate(num_elem * sizeof(float));
+
+ Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float(
+ d_float, num_elem);
+ Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_res_half1(
+ d_res_half1, num_elem);
+ Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Unaligned> gpu_res_half2(
+ d_res_half2, num_elem);
+ Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_res_float(
+ d_res_float, num_elem);
+
+ Eigen::array<int, 1> no_bcast;
+ no_bcast[0] = 1;
+
+ gpu_float.device(gpu_device) = gpu_float.random() - gpu_float.constant(0.5f);
+ gpu_res_float.device(gpu_device) = gpu_float.abs();
+ gpu_res_half1.device(gpu_device) = gpu_float.cast<Eigen::half>().abs().eval().cast<float>();
+ gpu_res_half2.device(gpu_device) = gpu_float.cast<Eigen::half>().abs().broadcast(no_bcast).eval().cast<float>();
+
+ Tensor<float, 1> half_prec1(num_elem);
+ Tensor<float, 1> half_prec2(num_elem);
+ Tensor<float, 1> full_prec(num_elem);
+ gpu_device.memcpyDeviceToHost(half_prec1.data(), d_res_half1, num_elem*sizeof(float));
+ gpu_device.memcpyDeviceToHost(half_prec2.data(), d_res_half1, num_elem*sizeof(float));
+ gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, num_elem*sizeof(float));
+ gpu_device.synchronize();
+
+ for (int i = 0; i < num_elem; ++i) {
+ std::cout << "Checking forced eval " << i << full_prec(i) << " vs " << half_prec1(i) << " vs " << half_prec2(i) << std::endl;
+ VERIFY_IS_APPROX(full_prec(i), half_prec1(i));
+ VERIFY_IS_APPROX(full_prec(i), half_prec2(i));
+ }
+
+ gpu_device.deallocate(d_float);
+ gpu_device.deallocate(d_res_half1);
+ gpu_device.deallocate(d_res_half2);
+ gpu_device.deallocate(d_res_float);
+}
+#endif
+
+
+void test_cxx11_tensor_of_float16_cuda()
+{
+ CALL_SUBTEST_1(test_cuda_numext<void>());
+
+#ifdef EIGEN_HAS_CUDA_FP16
+ CALL_SUBTEST_1(test_cuda_conversion<void>());
+ CALL_SUBTEST_1(test_cuda_unary<void>());
+ CALL_SUBTEST_1(test_cuda_elementwise<void>());
+ CALL_SUBTEST_1(test_cuda_trancendental<void>());
+ CALL_SUBTEST_2(test_cuda_contractions<void>());
+ CALL_SUBTEST_3(test_cuda_reductions<void>());
+ CALL_SUBTEST_4(test_cuda_full_reductions<void>());
+ CALL_SUBTEST_5(test_cuda_forced_evals<void>());
+#else
+ std::cout << "Half floats are not supported by this version of cuda: skipping the test" << std::endl;
+#endif
+}
diff --git a/unsupported/test/cxx11_tensor_of_strings.cpp b/unsupported/test/cxx11_tensor_of_strings.cpp
new file mode 100644
index 000000000..4ef9aed91
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_of_strings.cpp
@@ -0,0 +1,152 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+using Eigen::TensorMap;
+
+static void test_assign()
+{
+ std::string data1[6];
+ TensorMap<Tensor<std::string, 2>> mat1(data1, 2, 3);
+ std::string data2[6];
+ const TensorMap<Tensor<const std::string, 2>> mat2(data2, 2, 3);
+
+ for (int i = 0; i < 6; ++i) {
+ std::ostringstream s1;
+ s1 << "abc" << i*3;
+ data1[i] = s1.str();
+ std::ostringstream s2;
+ s2 << "def" << i*5;
+ data2[i] = s2.str();
+ }
+
+ Tensor<std::string, 2> rslt1;
+ rslt1 = mat1;
+ Tensor<std::string, 2> rslt2;
+ rslt2 = mat2;
+
+ Tensor<std::string, 2> rslt3 = mat1;
+ Tensor<std::string, 2> rslt4 = mat2;
+
+ Tensor<std::string, 2> rslt5(mat1);
+ Tensor<std::string, 2> rslt6(mat2);
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ VERIFY_IS_EQUAL(rslt1(i,j), data1[i+2*j]);
+ VERIFY_IS_EQUAL(rslt2(i,j), data2[i+2*j]);
+ VERIFY_IS_EQUAL(rslt3(i,j), data1[i+2*j]);
+ VERIFY_IS_EQUAL(rslt4(i,j), data2[i+2*j]);
+ VERIFY_IS_EQUAL(rslt5(i,j), data1[i+2*j]);
+ VERIFY_IS_EQUAL(rslt6(i,j), data2[i+2*j]);
+ }
+ }
+}
+
+
+static void test_concat()
+{
+ Tensor<std::string, 2> t1(2, 3);
+ Tensor<std::string, 2> t2(2, 3);
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ std::ostringstream s1;
+ s1 << "abc" << i + j*2;
+ t1(i, j) = s1.str();
+ std::ostringstream s2;
+ s2 << "def" << i*5 + j*32;
+ t2(i, j) = s2.str();
+ }
+ }
+
+ Tensor<std::string, 2> result = t1.concatenate(t2, 1);
+ VERIFY_IS_EQUAL(result.dimension(0), 2);
+ VERIFY_IS_EQUAL(result.dimension(1), 6);
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ VERIFY_IS_EQUAL(result(i, j), t1(i, j));
+ VERIFY_IS_EQUAL(result(i, j+3), t2(i, j));
+ }
+ }
+}
+
+
+static void test_slices()
+{
+ Tensor<std::string, 2> data(2, 6);
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ std::ostringstream s1;
+ s1 << "abc" << i + j*2;
+ data(i, j) = s1.str();
+ }
+ }
+
+ const Eigen::DSizes<ptrdiff_t, 2> half_size(2, 3);
+ const Eigen::DSizes<ptrdiff_t, 2> first_half(0, 0);
+ const Eigen::DSizes<ptrdiff_t, 2> second_half(0, 3);
+
+ Tensor<std::string, 2> t1 = data.slice(first_half, half_size);
+ Tensor<std::string, 2> t2 = data.slice(second_half, half_size);
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ VERIFY_IS_EQUAL(data(i, j), t1(i, j));
+ VERIFY_IS_EQUAL(data(i, j+3), t2(i, j));
+ }
+ }
+}
+
+
+static void test_additions()
+{
+ Tensor<std::string, 1> data1(3);
+ Tensor<std::string, 1> data2(3);
+ for (int i = 0; i < 3; ++i) {
+ data1(i) = "abc";
+ std::ostringstream s1;
+ s1 << i;
+ data2(i) = s1.str();
+ }
+
+ Tensor<std::string, 1> sum = data1 + data2;
+ for (int i = 0; i < 3; ++i) {
+ std::ostringstream concat;
+ concat << "abc" << i;
+ std::string expected = concat.str();
+ VERIFY_IS_EQUAL(sum(i), expected);
+ }
+}
+
+
+static void test_initialization()
+{
+ Tensor<std::string, 2> a(2, 3);
+ a.setConstant(std::string("foo"));
+ for (int i = 0; i < 2*3; ++i) {
+ VERIFY_IS_EQUAL(a(i), std::string("foo"));
+ }
+}
+
+
+void test_cxx11_tensor_of_strings()
+{
+ // Beware: none of this is likely to ever work on a GPU.
+ CALL_SUBTEST(test_assign());
+ CALL_SUBTEST(test_concat());
+ CALL_SUBTEST(test_slices());
+ CALL_SUBTEST(test_additions());
+ CALL_SUBTEST(test_initialization());
+}
diff --git a/unsupported/test/cxx11_tensor_padding.cpp b/unsupported/test/cxx11_tensor_padding.cpp
new file mode 100644
index 000000000..ffa19896e
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_padding.cpp
@@ -0,0 +1,93 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+
+template<int DataLayout>
+static void test_simple_padding()
+{
+ Tensor<float, 4, DataLayout> tensor(2,3,5,7);
+ tensor.setRandom();
+
+ array<std::pair<ptrdiff_t, ptrdiff_t>, 4> paddings;
+ paddings[0] = std::make_pair(0, 0);
+ paddings[1] = std::make_pair(2, 1);
+ paddings[2] = std::make_pair(3, 4);
+ paddings[3] = std::make_pair(0, 0);
+
+ Tensor<float, 4, DataLayout> padded;
+ padded = tensor.pad(paddings);
+
+ VERIFY_IS_EQUAL(padded.dimension(0), 2+0);
+ VERIFY_IS_EQUAL(padded.dimension(1), 3+3);
+ VERIFY_IS_EQUAL(padded.dimension(2), 5+7);
+ VERIFY_IS_EQUAL(padded.dimension(3), 7+0);
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 6; ++j) {
+ for (int k = 0; k < 12; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ if (j >= 2 && j < 5 && k >= 3 && k < 8) {
+ VERIFY_IS_EQUAL(padded(i,j,k,l), tensor(i,j-2,k-3,l));
+ } else {
+ VERIFY_IS_EQUAL(padded(i,j,k,l), 0.0f);
+ }
+ }
+ }
+ }
+ }
+}
+
+template<int DataLayout>
+static void test_padded_expr()
+{
+ Tensor<float, 4, DataLayout> tensor(2,3,5,7);
+ tensor.setRandom();
+
+ array<std::pair<ptrdiff_t, ptrdiff_t>, 4> paddings;
+ paddings[0] = std::make_pair(0, 0);
+ paddings[1] = std::make_pair(2, 1);
+ paddings[2] = std::make_pair(3, 4);
+ paddings[3] = std::make_pair(0, 0);
+
+ Eigen::DSizes<ptrdiff_t, 2> reshape_dims;
+ reshape_dims[0] = 12;
+ reshape_dims[1] = 84;
+
+ Tensor<float, 2, DataLayout> result;
+ result = tensor.pad(paddings).reshape(reshape_dims);
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 6; ++j) {
+ for (int k = 0; k < 12; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ const float result_value = DataLayout == ColMajor ?
+ result(i+2*j,k+12*l) : result(j+6*i,l+7*k);
+ if (j >= 2 && j < 5 && k >= 3 && k < 8) {
+ VERIFY_IS_EQUAL(result_value, tensor(i,j-2,k-3,l));
+ } else {
+ VERIFY_IS_EQUAL(result_value, 0.0f);
+ }
+ }
+ }
+ }
+ }
+}
+
+void test_cxx11_tensor_padding()
+{
+ CALL_SUBTEST(test_simple_padding<ColMajor>());
+ CALL_SUBTEST(test_simple_padding<RowMajor>());
+ CALL_SUBTEST(test_padded_expr<ColMajor>());
+ CALL_SUBTEST(test_padded_expr<RowMajor>());
+}
diff --git a/unsupported/test/cxx11_tensor_patch.cpp b/unsupported/test/cxx11_tensor_patch.cpp
new file mode 100644
index 000000000..434359730
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_patch.cpp
@@ -0,0 +1,172 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+
+template<int DataLayout>
+static void test_simple_patch()
+{
+ Tensor<float, 4, DataLayout> tensor(2,3,5,7);
+ tensor.setRandom();
+ array<ptrdiff_t, 4> patch_dims;
+
+ patch_dims[0] = 1;
+ patch_dims[1] = 1;
+ patch_dims[2] = 1;
+ patch_dims[3] = 1;
+
+ Tensor<float, 5, DataLayout> no_patch;
+ no_patch = tensor.extract_patches(patch_dims);
+
+ if (DataLayout == ColMajor) {
+ VERIFY_IS_EQUAL(no_patch.dimension(0), 1);
+ VERIFY_IS_EQUAL(no_patch.dimension(1), 1);
+ VERIFY_IS_EQUAL(no_patch.dimension(2), 1);
+ VERIFY_IS_EQUAL(no_patch.dimension(3), 1);
+ VERIFY_IS_EQUAL(no_patch.dimension(4), tensor.size());
+ } else {
+ VERIFY_IS_EQUAL(no_patch.dimension(0), tensor.size());
+ VERIFY_IS_EQUAL(no_patch.dimension(1), 1);
+ VERIFY_IS_EQUAL(no_patch.dimension(2), 1);
+ VERIFY_IS_EQUAL(no_patch.dimension(3), 1);
+ VERIFY_IS_EQUAL(no_patch.dimension(4), 1);
+ }
+
+ for (int i = 0; i < tensor.size(); ++i) {
+ VERIFY_IS_EQUAL(tensor.data()[i], no_patch.data()[i]);
+ }
+
+ patch_dims[0] = 2;
+ patch_dims[1] = 3;
+ patch_dims[2] = 5;
+ patch_dims[3] = 7;
+ Tensor<float, 5, DataLayout> single_patch;
+ single_patch = tensor.extract_patches(patch_dims);
+
+ if (DataLayout == ColMajor) {
+ VERIFY_IS_EQUAL(single_patch.dimension(0), 2);
+ VERIFY_IS_EQUAL(single_patch.dimension(1), 3);
+ VERIFY_IS_EQUAL(single_patch.dimension(2), 5);
+ VERIFY_IS_EQUAL(single_patch.dimension(3), 7);
+ VERIFY_IS_EQUAL(single_patch.dimension(4), 1);
+ } else {
+ VERIFY_IS_EQUAL(single_patch.dimension(0), 1);
+ VERIFY_IS_EQUAL(single_patch.dimension(1), 2);
+ VERIFY_IS_EQUAL(single_patch.dimension(2), 3);
+ VERIFY_IS_EQUAL(single_patch.dimension(3), 5);
+ VERIFY_IS_EQUAL(single_patch.dimension(4), 7);
+ }
+
+ for (int i = 0; i < tensor.size(); ++i) {
+ VERIFY_IS_EQUAL(tensor.data()[i], single_patch.data()[i]);
+ }
+
+ patch_dims[0] = 1;
+ patch_dims[1] = 2;
+ patch_dims[2] = 2;
+ patch_dims[3] = 1;
+ Tensor<float, 5, DataLayout> twod_patch;
+ twod_patch = tensor.extract_patches(patch_dims);
+
+ if (DataLayout == ColMajor) {
+ VERIFY_IS_EQUAL(twod_patch.dimension(0), 1);
+ VERIFY_IS_EQUAL(twod_patch.dimension(1), 2);
+ VERIFY_IS_EQUAL(twod_patch.dimension(2), 2);
+ VERIFY_IS_EQUAL(twod_patch.dimension(3), 1);
+ VERIFY_IS_EQUAL(twod_patch.dimension(4), 2*2*4*7);
+ } else {
+ VERIFY_IS_EQUAL(twod_patch.dimension(0), 2*2*4*7);
+ VERIFY_IS_EQUAL(twod_patch.dimension(1), 1);
+ VERIFY_IS_EQUAL(twod_patch.dimension(2), 2);
+ VERIFY_IS_EQUAL(twod_patch.dimension(3), 2);
+ VERIFY_IS_EQUAL(twod_patch.dimension(4), 1);
+ }
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 2; ++j) {
+ for (int k = 0; k < 4; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ int patch_loc;
+ if (DataLayout == ColMajor) {
+ patch_loc = i + 2 * (j + 2 * (k + 4 * l));
+ } else {
+ patch_loc = l + 7 * (k + 4 * (j + 2 * i));
+ }
+ for (int x = 0; x < 2; ++x) {
+ for (int y = 0; y < 2; ++y) {
+ if (DataLayout == ColMajor) {
+ VERIFY_IS_EQUAL(tensor(i,j+x,k+y,l), twod_patch(0,x,y,0,patch_loc));
+ } else {
+ VERIFY_IS_EQUAL(tensor(i,j+x,k+y,l), twod_patch(patch_loc,0,x,y,0));
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ patch_dims[0] = 1;
+ patch_dims[1] = 2;
+ patch_dims[2] = 3;
+ patch_dims[3] = 5;
+ Tensor<float, 5, DataLayout> threed_patch;
+ threed_patch = tensor.extract_patches(patch_dims);
+
+ if (DataLayout == ColMajor) {
+ VERIFY_IS_EQUAL(threed_patch.dimension(0), 1);
+ VERIFY_IS_EQUAL(threed_patch.dimension(1), 2);
+ VERIFY_IS_EQUAL(threed_patch.dimension(2), 3);
+ VERIFY_IS_EQUAL(threed_patch.dimension(3), 5);
+ VERIFY_IS_EQUAL(threed_patch.dimension(4), 2*2*3*3);
+ } else {
+ VERIFY_IS_EQUAL(threed_patch.dimension(0), 2*2*3*3);
+ VERIFY_IS_EQUAL(threed_patch.dimension(1), 1);
+ VERIFY_IS_EQUAL(threed_patch.dimension(2), 2);
+ VERIFY_IS_EQUAL(threed_patch.dimension(3), 3);
+ VERIFY_IS_EQUAL(threed_patch.dimension(4), 5);
+ }
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 2; ++j) {
+ for (int k = 0; k < 3; ++k) {
+ for (int l = 0; l < 3; ++l) {
+ int patch_loc;
+ if (DataLayout == ColMajor) {
+ patch_loc = i + 2 * (j + 2 * (k + 3 * l));
+ } else {
+ patch_loc = l + 3 * (k + 3 * (j + 2 * i));
+ }
+ for (int x = 0; x < 2; ++x) {
+ for (int y = 0; y < 3; ++y) {
+ for (int z = 0; z < 5; ++z) {
+ if (DataLayout == ColMajor) {
+ VERIFY_IS_EQUAL(tensor(i,j+x,k+y,l+z), threed_patch(0,x,y,z,patch_loc));
+ } else {
+ VERIFY_IS_EQUAL(tensor(i,j+x,k+y,l+z), threed_patch(patch_loc,0,x,y,z));
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+void test_cxx11_tensor_patch()
+{
+ CALL_SUBTEST(test_simple_patch<ColMajor>());
+ CALL_SUBTEST(test_simple_patch<RowMajor>());
+ // CALL_SUBTEST(test_expr_shuffling());
+}
diff --git a/unsupported/test/cxx11_tensor_random.cpp b/unsupported/test/cxx11_tensor_random.cpp
new file mode 100644
index 000000000..0f3dc5787
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_random.cpp
@@ -0,0 +1,78 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+static void test_default()
+{
+ Tensor<float, 1> vec(6);
+ vec.setRandom();
+
+ // Fixme: we should check that the generated numbers follow a uniform
+ // distribution instead.
+ for (int i = 1; i < 6; ++i) {
+ VERIFY_IS_NOT_EQUAL(vec(i), vec(i-1));
+ }
+}
+
+static void test_normal()
+{
+ Tensor<float, 1> vec(6);
+ vec.setRandom<Eigen::internal::NormalRandomGenerator<float>>();
+
+ // Fixme: we should check that the generated numbers follow a gaussian
+ // distribution instead.
+ for (int i = 1; i < 6; ++i) {
+ VERIFY_IS_NOT_EQUAL(vec(i), vec(i-1));
+ }
+}
+
+
+struct MyGenerator {
+ MyGenerator() { }
+ MyGenerator(const MyGenerator&) { }
+
+ // Return a random value to be used. "element_location" is the
+ // location of the entry to set in the tensor, it can typically
+ // be ignored.
+ int operator()(Eigen::DenseIndex element_location, Eigen::DenseIndex /*unused*/ = 0) const {
+ return static_cast<int>(3 * element_location);
+ }
+
+ // Same as above but generates several numbers at a time.
+ internal::packet_traits<int>::type packetOp(
+ Eigen::DenseIndex packet_location, Eigen::DenseIndex /*unused*/ = 0) const {
+ const int packetSize = internal::packet_traits<int>::size;
+ EIGEN_ALIGN_MAX int values[packetSize];
+ for (int i = 0; i < packetSize; ++i) {
+ values[i] = static_cast<int>(3 * (packet_location + i));
+ }
+ return internal::pload<typename internal::packet_traits<int>::type>(values);
+ }
+};
+
+
+static void test_custom()
+{
+ Tensor<int, 1> vec(6);
+ vec.setRandom<MyGenerator>();
+
+ for (int i = 0; i < 6; ++i) {
+ VERIFY_IS_EQUAL(vec(i), 3*i);
+ }
+}
+
+void test_cxx11_tensor_random()
+{
+ CALL_SUBTEST(test_default());
+ CALL_SUBTEST(test_normal());
+ CALL_SUBTEST(test_custom());
+}
diff --git a/unsupported/test/cxx11_tensor_random_cuda.cu b/unsupported/test/cxx11_tensor_random_cuda.cu
new file mode 100644
index 000000000..b3be199e1
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_random_cuda.cu
@@ -0,0 +1,88 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_TEST_NO_LONGDOUBLE
+#define EIGEN_TEST_NO_COMPLEX
+#define EIGEN_TEST_FUNC cxx11_tensor_random_cuda
+#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
+#define EIGEN_USE_GPU
+
+#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500
+#include <cuda_fp16.h>
+#endif
+#include "main.h"
+#include <Eigen/CXX11/Tensor>
+
+
+void test_cuda_random_uniform()
+{
+ Tensor<float, 2> out(72,97);
+ out.setZero();
+
+ std::size_t out_bytes = out.size() * sizeof(float);
+
+ float* d_out;
+ cudaMalloc((void**)(&d_out), out_bytes);
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ Eigen::TensorMap<Eigen::Tensor<float, 2> > gpu_out(d_out, 72,97);
+
+ gpu_out.device(gpu_device) = gpu_out.random();
+
+ assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
+ assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+
+ // For now we just check thes code doesn't crash.
+ // TODO: come up with a valid test of randomness
+}
+
+
+void test_cuda_random_normal()
+{
+ Tensor<float, 2> out(72,97);
+ out.setZero();
+
+ std::size_t out_bytes = out.size() * sizeof(float);
+
+ float* d_out;
+ cudaMalloc((void**)(&d_out), out_bytes);
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ Eigen::TensorMap<Eigen::Tensor<float, 2> > gpu_out(d_out, 72,97);
+
+ Eigen::internal::NormalRandomGenerator<float> gen(true);
+ gpu_out.device(gpu_device) = gpu_out.random(gen);
+
+ assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
+ assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+}
+
+static void test_complex()
+{
+ Tensor<std::complex<float>, 1> vec(6);
+ vec.setRandom();
+
+ // Fixme: we should check that the generated numbers follow a uniform
+ // distribution instead.
+ for (int i = 1; i < 6; ++i) {
+ VERIFY_IS_NOT_EQUAL(vec(i), vec(i-1));
+ }
+}
+
+
+void test_cxx11_tensor_random_cuda()
+{
+ CALL_SUBTEST(test_cuda_random_uniform());
+ CALL_SUBTEST(test_cuda_random_normal());
+ CALL_SUBTEST(test_complex());
+}
diff --git a/unsupported/test/cxx11_tensor_reduction.cpp b/unsupported/test/cxx11_tensor_reduction.cpp
new file mode 100644
index 000000000..1490ec3da
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_reduction.cpp
@@ -0,0 +1,508 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+#include <limits>
+#include <numeric>
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+
+template <int DataLayout>
+static void test_trivial_reductions() {
+ {
+ Tensor<float, 0, DataLayout> tensor;
+ tensor.setRandom();
+ array<ptrdiff_t, 0> reduction_axis;
+
+ Tensor<float, 0, DataLayout> result = tensor.sum(reduction_axis);
+ VERIFY_IS_EQUAL(result(), tensor());
+ }
+
+ {
+ Tensor<float, 1, DataLayout> tensor(7);
+ tensor.setRandom();
+ array<ptrdiff_t, 0> reduction_axis;
+
+ Tensor<float, 1, DataLayout> result = tensor.sum(reduction_axis);
+ VERIFY_IS_EQUAL(result.dimension(0), 7);
+ for (int i = 0; i < 7; ++i) {
+ VERIFY_IS_EQUAL(result(i), tensor(i));
+ }
+ }
+
+ {
+ Tensor<float, 2, DataLayout> tensor(2, 3);
+ tensor.setRandom();
+ array<ptrdiff_t, 0> reduction_axis;
+
+ Tensor<float, 2, DataLayout> result = tensor.sum(reduction_axis);
+ VERIFY_IS_EQUAL(result.dimension(0), 2);
+ VERIFY_IS_EQUAL(result.dimension(1), 3);
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ VERIFY_IS_EQUAL(result(i, j), tensor(i, j));
+ }
+ }
+ }
+}
+
+template <int DataLayout>
+static void test_simple_reductions() {
+ Tensor<float, 4, DataLayout> tensor(2, 3, 5, 7);
+ tensor.setRandom();
+ array<ptrdiff_t, 2> reduction_axis2;
+ reduction_axis2[0] = 1;
+ reduction_axis2[1] = 3;
+
+ Tensor<float, 2, DataLayout> result = tensor.sum(reduction_axis2);
+ VERIFY_IS_EQUAL(result.dimension(0), 2);
+ VERIFY_IS_EQUAL(result.dimension(1), 5);
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 5; ++j) {
+ float sum = 0.0f;
+ for (int k = 0; k < 3; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ sum += tensor(i, k, j, l);
+ }
+ }
+ VERIFY_IS_APPROX(result(i, j), sum);
+ }
+ }
+
+ {
+ Tensor<float, 0, DataLayout> sum1 = tensor.sum();
+ VERIFY_IS_EQUAL(sum1.rank(), 0);
+
+ array<ptrdiff_t, 4> reduction_axis4;
+ reduction_axis4[0] = 0;
+ reduction_axis4[1] = 1;
+ reduction_axis4[2] = 2;
+ reduction_axis4[3] = 3;
+ Tensor<float, 0, DataLayout> sum2 = tensor.sum(reduction_axis4);
+ VERIFY_IS_EQUAL(sum2.rank(), 0);
+
+ VERIFY_IS_APPROX(sum1(), sum2());
+ }
+
+ reduction_axis2[0] = 0;
+ reduction_axis2[1] = 2;
+ result = tensor.prod(reduction_axis2);
+ VERIFY_IS_EQUAL(result.dimension(0), 3);
+ VERIFY_IS_EQUAL(result.dimension(1), 7);
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 7; ++j) {
+ float prod = 1.0f;
+ for (int k = 0; k < 2; ++k) {
+ for (int l = 0; l < 5; ++l) {
+ prod *= tensor(k, i, l, j);
+ }
+ }
+ VERIFY_IS_APPROX(result(i, j), prod);
+ }
+ }
+
+ {
+ Tensor<float, 0, DataLayout> prod1 = tensor.prod();
+ VERIFY_IS_EQUAL(prod1.rank(), 0);
+
+ array<ptrdiff_t, 4> reduction_axis4;
+ reduction_axis4[0] = 0;
+ reduction_axis4[1] = 1;
+ reduction_axis4[2] = 2;
+ reduction_axis4[3] = 3;
+ Tensor<float, 0, DataLayout> prod2 = tensor.prod(reduction_axis4);
+ VERIFY_IS_EQUAL(prod2.rank(), 0);
+
+ VERIFY_IS_APPROX(prod1(), prod2());
+ }
+
+ reduction_axis2[0] = 0;
+ reduction_axis2[1] = 2;
+ result = tensor.maximum(reduction_axis2);
+ VERIFY_IS_EQUAL(result.dimension(0), 3);
+ VERIFY_IS_EQUAL(result.dimension(1), 7);
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 7; ++j) {
+ float max_val = std::numeric_limits<float>::lowest();
+ for (int k = 0; k < 2; ++k) {
+ for (int l = 0; l < 5; ++l) {
+ max_val = (std::max)(max_val, tensor(k, i, l, j));
+ }
+ }
+ VERIFY_IS_APPROX(result(i, j), max_val);
+ }
+ }
+
+ {
+ Tensor<float, 0, DataLayout> max1 = tensor.maximum();
+ VERIFY_IS_EQUAL(max1.rank(), 0);
+
+ array<ptrdiff_t, 4> reduction_axis4;
+ reduction_axis4[0] = 0;
+ reduction_axis4[1] = 1;
+ reduction_axis4[2] = 2;
+ reduction_axis4[3] = 3;
+ Tensor<float, 0, DataLayout> max2 = tensor.maximum(reduction_axis4);
+ VERIFY_IS_EQUAL(max2.rank(), 0);
+
+ VERIFY_IS_APPROX(max1(), max2());
+ }
+
+ reduction_axis2[0] = 0;
+ reduction_axis2[1] = 1;
+ result = tensor.minimum(reduction_axis2);
+ VERIFY_IS_EQUAL(result.dimension(0), 5);
+ VERIFY_IS_EQUAL(result.dimension(1), 7);
+ for (int i = 0; i < 5; ++i) {
+ for (int j = 0; j < 7; ++j) {
+ float min_val = (std::numeric_limits<float>::max)();
+ for (int k = 0; k < 2; ++k) {
+ for (int l = 0; l < 3; ++l) {
+ min_val = (std::min)(min_val, tensor(k, l, i, j));
+ }
+ }
+ VERIFY_IS_APPROX(result(i, j), min_val);
+ }
+ }
+
+ {
+ Tensor<float, 0, DataLayout> min1 = tensor.minimum();
+ VERIFY_IS_EQUAL(min1.rank(), 0);
+
+ array<ptrdiff_t, 4> reduction_axis4;
+ reduction_axis4[0] = 0;
+ reduction_axis4[1] = 1;
+ reduction_axis4[2] = 2;
+ reduction_axis4[3] = 3;
+ Tensor<float, 0, DataLayout> min2 = tensor.minimum(reduction_axis4);
+ VERIFY_IS_EQUAL(min2.rank(), 0);
+
+ VERIFY_IS_APPROX(min1(), min2());
+ }
+
+ reduction_axis2[0] = 0;
+ reduction_axis2[1] = 1;
+ result = tensor.mean(reduction_axis2);
+ VERIFY_IS_EQUAL(result.dimension(0), 5);
+ VERIFY_IS_EQUAL(result.dimension(1), 7);
+ for (int i = 0; i < 5; ++i) {
+ for (int j = 0; j < 7; ++j) {
+ float sum = 0.0f;
+ int count = 0;
+ for (int k = 0; k < 2; ++k) {
+ for (int l = 0; l < 3; ++l) {
+ sum += tensor(k, l, i, j);
+ ++count;
+ }
+ }
+ VERIFY_IS_APPROX(result(i, j), sum / count);
+ }
+ }
+
+ {
+ Tensor<float, 0, DataLayout> mean1 = tensor.mean();
+ VERIFY_IS_EQUAL(mean1.rank(), 0);
+
+ array<ptrdiff_t, 4> reduction_axis4;
+ reduction_axis4[0] = 0;
+ reduction_axis4[1] = 1;
+ reduction_axis4[2] = 2;
+ reduction_axis4[3] = 3;
+ Tensor<float, 0, DataLayout> mean2 = tensor.mean(reduction_axis4);
+ VERIFY_IS_EQUAL(mean2.rank(), 0);
+
+ VERIFY_IS_APPROX(mean1(), mean2());
+ }
+
+ {
+ Tensor<int, 1> ints(10);
+ std::iota(ints.data(), ints.data() + ints.dimension(0), 0);
+
+ TensorFixedSize<bool, Sizes<> > all;
+ all = ints.all();
+ VERIFY(!all());
+ all = (ints >= ints.constant(0)).all();
+ VERIFY(all());
+
+ TensorFixedSize<bool, Sizes<> > any;
+ any = (ints > ints.constant(10)).any();
+ VERIFY(!any());
+ any = (ints < ints.constant(1)).any();
+ VERIFY(any());
+ }
+}
+
+
+template <int DataLayout>
+static void test_reductions_in_expr() {
+ Tensor<float, 4, DataLayout> tensor(2, 3, 5, 7);
+ tensor.setRandom();
+ array<ptrdiff_t, 2> reduction_axis2;
+ reduction_axis2[0] = 1;
+ reduction_axis2[1] = 3;
+
+ Tensor<float, 2, DataLayout> result(2, 5);
+ result = result.constant(1.0f) - tensor.sum(reduction_axis2);
+ VERIFY_IS_EQUAL(result.dimension(0), 2);
+ VERIFY_IS_EQUAL(result.dimension(1), 5);
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 5; ++j) {
+ float sum = 0.0f;
+ for (int k = 0; k < 3; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ sum += tensor(i, k, j, l);
+ }
+ }
+ VERIFY_IS_APPROX(result(i, j), 1.0f - sum);
+ }
+ }
+}
+
+
+template <int DataLayout>
+static void test_full_reductions() {
+ Tensor<float, 2, DataLayout> tensor(2, 3);
+ tensor.setRandom();
+ array<ptrdiff_t, 2> reduction_axis;
+ reduction_axis[0] = 0;
+ reduction_axis[1] = 1;
+
+ Tensor<float, 0, DataLayout> result = tensor.sum(reduction_axis);
+ VERIFY_IS_EQUAL(result.rank(), 0);
+
+ float sum = 0.0f;
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ sum += tensor(i, j);
+ }
+ }
+ VERIFY_IS_APPROX(result(0), sum);
+
+ result = tensor.square().sum(reduction_axis).sqrt();
+ VERIFY_IS_EQUAL(result.rank(), 0);
+
+ sum = 0.0f;
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ sum += tensor(i, j) * tensor(i, j);
+ }
+ }
+ VERIFY_IS_APPROX(result(), sqrtf(sum));
+}
+
+struct UserReducer {
+ static const bool PacketAccess = false;
+ UserReducer(float offset) : offset_(offset) {}
+ void reduce(const float val, float* accum) { *accum += val * val; }
+ float initialize() const { return 0; }
+ float finalize(const float accum) const { return 1.0f / (accum + offset_); }
+
+ private:
+ const float offset_;
+};
+
+template <int DataLayout>
+static void test_user_defined_reductions() {
+ Tensor<float, 2, DataLayout> tensor(5, 7);
+ tensor.setRandom();
+ array<ptrdiff_t, 1> reduction_axis;
+ reduction_axis[0] = 1;
+
+ UserReducer reducer(10.0f);
+ Tensor<float, 1, DataLayout> result = tensor.reduce(reduction_axis, reducer);
+ VERIFY_IS_EQUAL(result.dimension(0), 5);
+ for (int i = 0; i < 5; ++i) {
+ float expected = 10.0f;
+ for (int j = 0; j < 7; ++j) {
+ expected += tensor(i, j) * tensor(i, j);
+ }
+ expected = 1.0f / expected;
+ VERIFY_IS_APPROX(result(i), expected);
+ }
+}
+
+template <int DataLayout>
+static void test_tensor_maps() {
+ int inputs[2 * 3 * 5 * 7];
+ TensorMap<Tensor<int, 4, DataLayout> > tensor_map(inputs, 2, 3, 5, 7);
+ TensorMap<Tensor<const int, 4, DataLayout> > tensor_map_const(inputs, 2, 3, 5,
+ 7);
+ const TensorMap<Tensor<const int, 4, DataLayout> > tensor_map_const_const(
+ inputs, 2, 3, 5, 7);
+
+ tensor_map.setRandom();
+ array<ptrdiff_t, 2> reduction_axis;
+ reduction_axis[0] = 1;
+ reduction_axis[1] = 3;
+
+ Tensor<int, 2, DataLayout> result = tensor_map.sum(reduction_axis);
+ Tensor<int, 2, DataLayout> result2 = tensor_map_const.sum(reduction_axis);
+ Tensor<int, 2, DataLayout> result3 =
+ tensor_map_const_const.sum(reduction_axis);
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 5; ++j) {
+ int sum = 0;
+ for (int k = 0; k < 3; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ sum += tensor_map(i, k, j, l);
+ }
+ }
+ VERIFY_IS_EQUAL(result(i, j), sum);
+ VERIFY_IS_EQUAL(result2(i, j), sum);
+ VERIFY_IS_EQUAL(result3(i, j), sum);
+ }
+ }
+}
+
+template <int DataLayout>
+static void test_static_dims() {
+ Tensor<float, 4, DataLayout> in(72, 53, 97, 113);
+ Tensor<float, 2, DataLayout> out(72, 97);
+ in.setRandom();
+
+#if !EIGEN_HAS_CONSTEXPR
+ array<int, 2> reduction_axis;
+ reduction_axis[0] = 1;
+ reduction_axis[1] = 3;
+#else
+ Eigen::IndexList<Eigen::type2index<1>, Eigen::type2index<3> > reduction_axis;
+#endif
+
+ out = in.maximum(reduction_axis);
+
+ for (int i = 0; i < 72; ++i) {
+ for (int j = 0; j < 97; ++j) {
+ float expected = -1e10f;
+ for (int k = 0; k < 53; ++k) {
+ for (int l = 0; l < 113; ++l) {
+ expected = (std::max)(expected, in(i, k, j, l));
+ }
+ }
+ VERIFY_IS_APPROX(out(i, j), expected);
+ }
+ }
+}
+
+template <int DataLayout>
+static void test_innermost_last_dims() {
+ Tensor<float, 4, DataLayout> in(72, 53, 97, 113);
+ Tensor<float, 2, DataLayout> out(97, 113);
+ in.setRandom();
+
+// Reduce on the innermost dimensions.
+#if !EIGEN_HAS_CONSTEXPR
+ array<int, 2> reduction_axis;
+ reduction_axis[0] = 0;
+ reduction_axis[1] = 1;
+#else
+ // This triggers the use of packets for ColMajor.
+ Eigen::IndexList<Eigen::type2index<0>, Eigen::type2index<1> > reduction_axis;
+#endif
+
+ out = in.maximum(reduction_axis);
+
+ for (int i = 0; i < 97; ++i) {
+ for (int j = 0; j < 113; ++j) {
+ float expected = -1e10f;
+ for (int k = 0; k < 53; ++k) {
+ for (int l = 0; l < 72; ++l) {
+ expected = (std::max)(expected, in(l, k, i, j));
+ }
+ }
+ VERIFY_IS_APPROX(out(i, j), expected);
+ }
+ }
+}
+
+template <int DataLayout>
+static void test_innermost_first_dims() {
+ Tensor<float, 4, DataLayout> in(72, 53, 97, 113);
+ Tensor<float, 2, DataLayout> out(72, 53);
+ in.setRandom();
+
+// Reduce on the innermost dimensions.
+#if !EIGEN_HAS_CONSTEXPR
+ array<int, 2> reduction_axis;
+ reduction_axis[0] = 2;
+ reduction_axis[1] = 3;
+#else
+ // This triggers the use of packets for RowMajor.
+ Eigen::IndexList<Eigen::type2index<2>, Eigen::type2index<3>> reduction_axis;
+#endif
+
+ out = in.maximum(reduction_axis);
+
+ for (int i = 0; i < 72; ++i) {
+ for (int j = 0; j < 53; ++j) {
+ float expected = -1e10f;
+ for (int k = 0; k < 97; ++k) {
+ for (int l = 0; l < 113; ++l) {
+ expected = (std::max)(expected, in(i, j, k, l));
+ }
+ }
+ VERIFY_IS_APPROX(out(i, j), expected);
+ }
+ }
+}
+
+template <int DataLayout>
+static void test_reduce_middle_dims() {
+ Tensor<float, 4, DataLayout> in(72, 53, 97, 113);
+ Tensor<float, 2, DataLayout> out(72, 53);
+ in.setRandom();
+
+// Reduce on the innermost dimensions.
+#if !EIGEN_HAS_CONSTEXPR
+ array<int, 2> reduction_axis;
+ reduction_axis[0] = 1;
+ reduction_axis[1] = 2;
+#else
+ // This triggers the use of packets for RowMajor.
+ Eigen::IndexList<Eigen::type2index<1>, Eigen::type2index<2>> reduction_axis;
+#endif
+
+ out = in.maximum(reduction_axis);
+
+ for (int i = 0; i < 72; ++i) {
+ for (int j = 0; j < 113; ++j) {
+ float expected = -1e10f;
+ for (int k = 0; k < 53; ++k) {
+ for (int l = 0; l < 97; ++l) {
+ expected = (std::max)(expected, in(i, k, l, j));
+ }
+ }
+ VERIFY_IS_APPROX(out(i, j), expected);
+ }
+ }
+}
+
+void test_cxx11_tensor_reduction() {
+ CALL_SUBTEST(test_trivial_reductions<ColMajor>());
+ CALL_SUBTEST(test_trivial_reductions<RowMajor>());
+ CALL_SUBTEST(test_simple_reductions<ColMajor>());
+ CALL_SUBTEST(test_simple_reductions<RowMajor>());
+ CALL_SUBTEST(test_reductions_in_expr<ColMajor>());
+ CALL_SUBTEST(test_reductions_in_expr<RowMajor>());
+ CALL_SUBTEST(test_full_reductions<ColMajor>());
+ CALL_SUBTEST(test_full_reductions<RowMajor>());
+ CALL_SUBTEST(test_user_defined_reductions<ColMajor>());
+ CALL_SUBTEST(test_user_defined_reductions<RowMajor>());
+ CALL_SUBTEST(test_tensor_maps<ColMajor>());
+ CALL_SUBTEST(test_tensor_maps<RowMajor>());
+ CALL_SUBTEST(test_static_dims<ColMajor>());
+ CALL_SUBTEST(test_static_dims<RowMajor>());
+ CALL_SUBTEST(test_innermost_last_dims<ColMajor>());
+ CALL_SUBTEST(test_innermost_last_dims<RowMajor>());
+ CALL_SUBTEST(test_innermost_first_dims<ColMajor>());
+ CALL_SUBTEST(test_innermost_first_dims<RowMajor>());
+ CALL_SUBTEST(test_reduce_middle_dims<ColMajor>());
+ CALL_SUBTEST(test_reduce_middle_dims<RowMajor>());
+}
diff --git a/unsupported/test/cxx11_tensor_reduction_cuda.cu b/unsupported/test/cxx11_tensor_reduction_cuda.cu
new file mode 100644
index 000000000..6858b43a7
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_reduction_cuda.cu
@@ -0,0 +1,157 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2015 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_TEST_NO_LONGDOUBLE
+#define EIGEN_TEST_NO_COMPLEX
+#define EIGEN_TEST_FUNC cxx11_tensor_reduction_cuda
+#define EIGEN_USE_GPU
+
+#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500
+#include <cuda_fp16.h>
+#endif
+#include "main.h"
+#include <unsupported/Eigen/CXX11/Tensor>
+
+
+template<typename Type, int DataLayout>
+static void test_full_reductions() {
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ const int num_rows = internal::random<int>(1024, 5*1024);
+ const int num_cols = internal::random<int>(1024, 5*1024);
+
+ Tensor<Type, 2, DataLayout> in(num_rows, num_cols);
+ in.setRandom();
+
+ Tensor<Type, 0, DataLayout> full_redux;
+ full_redux = in.sum();
+
+ std::size_t in_bytes = in.size() * sizeof(Type);
+ std::size_t out_bytes = full_redux.size() * sizeof(Type);
+ Type* gpu_in_ptr = static_cast<Type*>(gpu_device.allocate(in_bytes));
+ Type* gpu_out_ptr = static_cast<Type*>(gpu_device.allocate(out_bytes));
+ gpu_device.memcpyHostToDevice(gpu_in_ptr, in.data(), in_bytes);
+
+ TensorMap<Tensor<Type, 2, DataLayout> > in_gpu(gpu_in_ptr, num_rows, num_cols);
+ TensorMap<Tensor<Type, 0, DataLayout> > out_gpu(gpu_out_ptr);
+
+ out_gpu.device(gpu_device) = in_gpu.sum();
+
+ Tensor<Type, 0, DataLayout> full_redux_gpu;
+ gpu_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_ptr, out_bytes);
+ gpu_device.synchronize();
+
+ // Check that the CPU and GPU reductions return the same result.
+ VERIFY_IS_APPROX(full_redux(), full_redux_gpu());
+
+ gpu_device.deallocate(gpu_in_ptr);
+ gpu_device.deallocate(gpu_out_ptr);
+}
+
+template<typename Type, int DataLayout>
+static void test_first_dim_reductions() {
+ int dim_x = 33;
+ int dim_y = 1;
+ int dim_z = 128;
+
+ Tensor<Type, 3, DataLayout> in(dim_x, dim_y, dim_z);
+ in.setRandom();
+
+ Eigen::array<int, 1> red_axis;
+ red_axis[0] = 0;
+ Tensor<Type, 2, DataLayout> redux = in.sum(red_axis);
+
+ // Create device
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice dev(&stream);
+
+ // Create data(T)
+ Type* in_data = (Type*)dev.allocate(dim_x*dim_y*dim_z*sizeof(Type));
+ Type* out_data = (Type*)dev.allocate(dim_z*dim_y*sizeof(Type));
+ Eigen::TensorMap<Eigen::Tensor<Type, 3, DataLayout> > gpu_in(in_data, dim_x, dim_y, dim_z);
+ Eigen::TensorMap<Eigen::Tensor<Type, 2, DataLayout> > gpu_out(out_data, dim_y, dim_z);
+
+ // Perform operation
+ dev.memcpyHostToDevice(in_data, in.data(), in.size()*sizeof(Type));
+ gpu_out.device(dev) = gpu_in.sum(red_axis);
+ gpu_out.device(dev) += gpu_in.sum(red_axis);
+ Tensor<Type, 2, DataLayout> redux_gpu(dim_y, dim_z);
+ dev.memcpyDeviceToHost(redux_gpu.data(), out_data, gpu_out.size()*sizeof(Type));
+ dev.synchronize();
+
+ // Check that the CPU and GPU reductions return the same result.
+ for (int i = 0; i < gpu_out.size(); ++i) {
+ VERIFY_IS_APPROX(2*redux(i), redux_gpu(i));
+ }
+
+ dev.deallocate(in_data);
+ dev.deallocate(out_data);
+}
+
+template<typename Type, int DataLayout>
+static void test_last_dim_reductions() {
+ int dim_x = 128;
+ int dim_y = 1;
+ int dim_z = 33;
+
+ Tensor<Type, 3, DataLayout> in(dim_x, dim_y, dim_z);
+ in.setRandom();
+
+ Eigen::array<int, 1> red_axis;
+ red_axis[0] = 2;
+ Tensor<Type, 2, DataLayout> redux = in.sum(red_axis);
+
+ // Create device
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice dev(&stream);
+
+ // Create data
+ Type* in_data = (Type*)dev.allocate(dim_x*dim_y*dim_z*sizeof(Type));
+ Type* out_data = (Type*)dev.allocate(dim_x*dim_y*sizeof(Type));
+ Eigen::TensorMap<Eigen::Tensor<Type, 3, DataLayout> > gpu_in(in_data, dim_x, dim_y, dim_z);
+ Eigen::TensorMap<Eigen::Tensor<Type, 2, DataLayout> > gpu_out(out_data, dim_x, dim_y);
+
+ // Perform operation
+ dev.memcpyHostToDevice(in_data, in.data(), in.size()*sizeof(Type));
+ gpu_out.device(dev) = gpu_in.sum(red_axis);
+ gpu_out.device(dev) += gpu_in.sum(red_axis);
+ Tensor<Type, 2, DataLayout> redux_gpu(dim_x, dim_y);
+ dev.memcpyDeviceToHost(redux_gpu.data(), out_data, gpu_out.size()*sizeof(Type));
+ dev.synchronize();
+
+ // Check that the CPU and GPU reductions return the same result.
+ for (int i = 0; i < gpu_out.size(); ++i) {
+ VERIFY_IS_APPROX(2*redux(i), redux_gpu(i));
+ }
+
+ dev.deallocate(in_data);
+ dev.deallocate(out_data);
+}
+
+
+void test_cxx11_tensor_reduction_cuda() {
+ CALL_SUBTEST_1((test_full_reductions<float, ColMajor>()));
+ CALL_SUBTEST_1((test_full_reductions<double, ColMajor>()));
+ CALL_SUBTEST_2((test_full_reductions<float, RowMajor>()));
+ CALL_SUBTEST_2((test_full_reductions<double, RowMajor>()));
+
+ CALL_SUBTEST_3((test_first_dim_reductions<float, ColMajor>()));
+ CALL_SUBTEST_3((test_first_dim_reductions<double, ColMajor>()));
+ CALL_SUBTEST_4((test_first_dim_reductions<float, RowMajor>()));
+// Outer reductions of doubles aren't supported just yet.
+// CALL_SUBTEST_4((test_first_dim_reductions<double, RowMajor>()))
+
+ CALL_SUBTEST_5((test_last_dim_reductions<float, ColMajor>()));
+// Outer reductions of doubles aren't supported just yet.
+// CALL_SUBTEST_5((test_last_dim_reductions<double, ColMajor>()));
+ CALL_SUBTEST_6((test_last_dim_reductions<float, RowMajor>()));
+ CALL_SUBTEST_6((test_last_dim_reductions<double, RowMajor>()));
+}
diff --git a/unsupported/test/cxx11_tensor_reduction_sycl.cpp b/unsupported/test/cxx11_tensor_reduction_sycl.cpp
new file mode 100644
index 000000000..a9ef82907
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_reduction_sycl.cpp
@@ -0,0 +1,138 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2015
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_TEST_NO_LONGDOUBLE
+#define EIGEN_TEST_NO_COMPLEX
+#define EIGEN_TEST_FUNC cxx11_tensor_reduction_sycl
+#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
+#define EIGEN_USE_SYCL
+
+#include "main.h"
+#include <unsupported/Eigen/CXX11/Tensor>
+
+
+
+static void test_full_reductions_sycl(const Eigen::SyclDevice& sycl_device) {
+
+ const int num_rows = 452;
+ const int num_cols = 765;
+ array<int, 2> tensorRange = {{num_rows, num_cols}};
+
+ Tensor<float, 2> in(tensorRange);
+ Tensor<float, 0> full_redux;
+ Tensor<float, 0> full_redux_gpu;
+
+ in.setRandom();
+
+ full_redux = in.sum();
+
+ float* gpu_in_data = static_cast<float*>(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(float)));
+ float* gpu_out_data =(float*)sycl_device.allocate(sizeof(float));
+
+ TensorMap<Tensor<float, 2> > in_gpu(gpu_in_data, tensorRange);
+ TensorMap<Tensor<float, 0> > out_gpu(gpu_out_data);
+
+ sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(float));
+ out_gpu.device(sycl_device) = in_gpu.sum();
+ sycl_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_data, sizeof(float));
+ // Check that the CPU and GPU reductions return the same result.
+ VERIFY_IS_APPROX(full_redux_gpu(), full_redux());
+
+ sycl_device.deallocate(gpu_in_data);
+ sycl_device.deallocate(gpu_out_data);
+}
+
+static void test_first_dim_reductions_sycl(const Eigen::SyclDevice& sycl_device) {
+
+ int dim_x = 145;
+ int dim_y = 1;
+ int dim_z = 67;
+
+ array<int, 3> tensorRange = {{dim_x, dim_y, dim_z}};
+ Eigen::array<int, 1> red_axis;
+ red_axis[0] = 0;
+ array<int, 2> reduced_tensorRange = {{dim_y, dim_z}};
+
+ Tensor<float, 3> in(tensorRange);
+ Tensor<float, 2> redux(reduced_tensorRange);
+ Tensor<float, 2> redux_gpu(reduced_tensorRange);
+
+ in.setRandom();
+
+ redux= in.sum(red_axis);
+
+ float* gpu_in_data = static_cast<float*>(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(float)));
+ float* gpu_out_data = static_cast<float*>(sycl_device.allocate(redux_gpu.dimensions().TotalSize()*sizeof(float)));
+
+ TensorMap<Tensor<float, 3> > in_gpu(gpu_in_data, tensorRange);
+ TensorMap<Tensor<float, 2> > out_gpu(gpu_out_data, reduced_tensorRange);
+
+ sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(float));
+ out_gpu.device(sycl_device) = in_gpu.sum(red_axis);
+ sycl_device.memcpyDeviceToHost(redux_gpu.data(), gpu_out_data, redux_gpu.dimensions().TotalSize()*sizeof(float));
+
+ // Check that the CPU and GPU reductions return the same result.
+ for(int j=0; j<reduced_tensorRange[0]; j++ )
+ for(int k=0; k<reduced_tensorRange[1]; k++ )
+ VERIFY_IS_APPROX(redux_gpu(j,k), redux(j,k));
+
+ sycl_device.deallocate(gpu_in_data);
+ sycl_device.deallocate(gpu_out_data);
+}
+
+static void test_last_dim_reductions_sycl(const Eigen::SyclDevice &sycl_device) {
+
+ int dim_x = 567;
+ int dim_y = 1;
+ int dim_z = 47;
+
+ array<int, 3> tensorRange = {{dim_x, dim_y, dim_z}};
+ Eigen::array<int, 1> red_axis;
+ red_axis[0] = 2;
+ array<int, 2> reduced_tensorRange = {{dim_x, dim_y}};
+
+ Tensor<float, 3> in(tensorRange);
+ Tensor<float, 2> redux(reduced_tensorRange);
+ Tensor<float, 2> redux_gpu(reduced_tensorRange);
+
+ in.setRandom();
+
+ redux= in.sum(red_axis);
+
+ float* gpu_in_data = static_cast<float*>(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(float)));
+ float* gpu_out_data = static_cast<float*>(sycl_device.allocate(redux_gpu.dimensions().TotalSize()*sizeof(float)));
+
+ TensorMap<Tensor<float, 3> > in_gpu(gpu_in_data, tensorRange);
+ TensorMap<Tensor<float, 2> > out_gpu(gpu_out_data, reduced_tensorRange);
+
+ sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(float));
+ out_gpu.device(sycl_device) = in_gpu.sum(red_axis);
+ sycl_device.memcpyDeviceToHost(redux_gpu.data(), gpu_out_data, redux_gpu.dimensions().TotalSize()*sizeof(float));
+ // Check that the CPU and GPU reductions return the same result.
+ for(int j=0; j<reduced_tensorRange[0]; j++ )
+ for(int k=0; k<reduced_tensorRange[1]; k++ )
+ VERIFY_IS_APPROX(redux_gpu(j,k), redux(j,k));
+
+ sycl_device.deallocate(gpu_in_data);
+ sycl_device.deallocate(gpu_out_data);
+
+}
+
+void test_cxx11_tensor_reduction_sycl() {
+ cl::sycl::gpu_selector s;
+ Eigen::SyclDevice sycl_device(s);
+ CALL_SUBTEST((test_full_reductions_sycl(sycl_device)));
+ CALL_SUBTEST((test_first_dim_reductions_sycl(sycl_device)));
+ CALL_SUBTEST((test_last_dim_reductions_sycl(sycl_device)));
+
+}
diff --git a/unsupported/test/cxx11_tensor_ref.cpp b/unsupported/test/cxx11_tensor_ref.cpp
new file mode 100644
index 000000000..c8f105e3d
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_ref.cpp
@@ -0,0 +1,248 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+using Eigen::RowMajor;
+
+static void test_simple_lvalue_ref()
+{
+ Tensor<int, 1> input(6);
+ input.setRandom();
+
+ TensorRef<Tensor<int, 1>> ref3(input);
+ TensorRef<Tensor<int, 1>> ref4 = input;
+
+ VERIFY_IS_EQUAL(ref3.data(), input.data());
+ VERIFY_IS_EQUAL(ref4.data(), input.data());
+
+ for (int i = 0; i < 6; ++i) {
+ VERIFY_IS_EQUAL(ref3(i), input(i));
+ VERIFY_IS_EQUAL(ref4(i), input(i));
+ }
+
+ for (int i = 0; i < 6; ++i) {
+ ref3.coeffRef(i) = i;
+ }
+ for (int i = 0; i < 6; ++i) {
+ VERIFY_IS_EQUAL(input(i), i);
+ }
+ for (int i = 0; i < 6; ++i) {
+ ref4.coeffRef(i) = -i * 2;
+ }
+ for (int i = 0; i < 6; ++i) {
+ VERIFY_IS_EQUAL(input(i), -i*2);
+ }
+}
+
+
+static void test_simple_rvalue_ref()
+{
+ Tensor<int, 1> input1(6);
+ input1.setRandom();
+ Tensor<int, 1> input2(6);
+ input2.setRandom();
+
+ TensorRef<Tensor<int, 1>> ref3(input1 + input2);
+ TensorRef<Tensor<int, 1>> ref4 = input1 + input2;
+
+ VERIFY_IS_NOT_EQUAL(ref3.data(), input1.data());
+ VERIFY_IS_NOT_EQUAL(ref4.data(), input1.data());
+ VERIFY_IS_NOT_EQUAL(ref3.data(), input2.data());
+ VERIFY_IS_NOT_EQUAL(ref4.data(), input2.data());
+
+ for (int i = 0; i < 6; ++i) {
+ VERIFY_IS_EQUAL(ref3(i), input1(i) + input2(i));
+ VERIFY_IS_EQUAL(ref4(i), input1(i) + input2(i));
+ }
+}
+
+
+static void test_multiple_dims()
+{
+ Tensor<float, 3> input(3,5,7);
+ input.setRandom();
+
+ TensorRef<Tensor<float, 3>> ref(input);
+ VERIFY_IS_EQUAL(ref.data(), input.data());
+ VERIFY_IS_EQUAL(ref.dimension(0), 3);
+ VERIFY_IS_EQUAL(ref.dimension(1), 5);
+ VERIFY_IS_EQUAL(ref.dimension(2), 7);
+
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 5; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ VERIFY_IS_EQUAL(ref(i,j,k), input(i,j,k));
+ }
+ }
+ }
+}
+
+
+static void test_slice()
+{
+ Tensor<float, 5> tensor(2,3,5,7,11);
+ tensor.setRandom();
+
+ Eigen::DSizes<ptrdiff_t, 5> indices(1,2,3,4,5);
+ Eigen::DSizes<ptrdiff_t, 5> sizes(1,1,1,1,1);
+ TensorRef<Tensor<float, 5>> slice = tensor.slice(indices, sizes);
+ VERIFY_IS_EQUAL(slice(0,0,0,0,0), tensor(1,2,3,4,5));
+
+ Eigen::DSizes<ptrdiff_t, 5> indices2(1,1,3,4,5);
+ Eigen::DSizes<ptrdiff_t, 5> sizes2(1,1,2,2,3);
+ slice = tensor.slice(indices2, sizes2);
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 2; ++j) {
+ for (int k = 0; k < 3; ++k) {
+ VERIFY_IS_EQUAL(slice(0,0,i,j,k), tensor(1,1,3+i,4+j,5+k));
+ }
+ }
+ }
+
+ Eigen::DSizes<ptrdiff_t, 5> indices3(0,0,0,0,0);
+ Eigen::DSizes<ptrdiff_t, 5> sizes3(2,3,1,1,1);
+ slice = tensor.slice(indices3, sizes3);
+ VERIFY_IS_EQUAL(slice.data(), tensor.data());
+}
+
+
+static void test_ref_of_ref()
+{
+ Tensor<float, 3> input(3,5,7);
+ input.setRandom();
+
+ TensorRef<Tensor<float, 3>> ref(input);
+ TensorRef<Tensor<float, 3>> ref_of_ref(ref);
+ TensorRef<Tensor<float, 3>> ref_of_ref2;
+ ref_of_ref2 = ref;
+
+ VERIFY_IS_EQUAL(ref_of_ref.data(), input.data());
+ VERIFY_IS_EQUAL(ref_of_ref.dimension(0), 3);
+ VERIFY_IS_EQUAL(ref_of_ref.dimension(1), 5);
+ VERIFY_IS_EQUAL(ref_of_ref.dimension(2), 7);
+
+ VERIFY_IS_EQUAL(ref_of_ref2.data(), input.data());
+ VERIFY_IS_EQUAL(ref_of_ref2.dimension(0), 3);
+ VERIFY_IS_EQUAL(ref_of_ref2.dimension(1), 5);
+ VERIFY_IS_EQUAL(ref_of_ref2.dimension(2), 7);
+
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 5; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ VERIFY_IS_EQUAL(ref_of_ref(i,j,k), input(i,j,k));
+ VERIFY_IS_EQUAL(ref_of_ref2(i,j,k), input(i,j,k));
+ }
+ }
+ }
+}
+
+
+static void test_ref_in_expr()
+{
+ Tensor<float, 3> input(3,5,7);
+ input.setRandom();
+ TensorRef<Tensor<float, 3>> input_ref(input);
+
+ Tensor<float, 3> result(3,5,7);
+ result.setRandom();
+ TensorRef<Tensor<float, 3>> result_ref(result);
+
+ Tensor<float, 3> bias(3,5,7);
+ bias.setRandom();
+
+ result_ref = input_ref + bias;
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 5; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ VERIFY_IS_EQUAL(result_ref(i,j,k), input(i,j,k) + bias(i,j,k));
+ VERIFY_IS_NOT_EQUAL(result(i,j,k), input(i,j,k) + bias(i,j,k));
+ }
+ }
+ }
+
+ result = result_ref;
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 5; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ VERIFY_IS_EQUAL(result(i,j,k), input(i,j,k) + bias(i,j,k));
+ }
+ }
+ }
+}
+
+
+static void test_coeff_ref()
+{
+ Tensor<float, 5> tensor(2,3,5,7,11);
+ tensor.setRandom();
+ Tensor<float, 5> original = tensor;
+
+ TensorRef<Tensor<float, 4>> slice = tensor.chip(7, 4);
+ slice.coeffRef(0, 0, 0, 0) = 1.0f;
+ slice.coeffRef(1, 0, 0, 0) += 2.0f;
+
+ VERIFY_IS_EQUAL(tensor(0,0,0,0,7), 1.0f);
+ VERIFY_IS_EQUAL(tensor(1,0,0,0,7), original(1,0,0,0,7) + 2.0f);
+}
+
+
+static void test_nested_ops_with_ref()
+{
+ Tensor<float, 4> t(2, 3, 5, 7);
+ t.setRandom();
+ TensorMap<Tensor<const float, 4> > m(t.data(), 2, 3, 5, 7);
+ array<std::pair<ptrdiff_t, ptrdiff_t>, 4> paddings;
+ paddings[0] = std::make_pair(0, 0);
+ paddings[1] = std::make_pair(2, 1);
+ paddings[2] = std::make_pair(3, 4);
+ paddings[3] = std::make_pair(0, 0);
+ DSizes<Eigen::DenseIndex, 4> shuffle_dims(0, 1, 2, 3);
+ TensorRef<Tensor<const float, 4> > ref(m.pad(paddings));
+ array<std::pair<ptrdiff_t, ptrdiff_t>, 4> trivial;
+ trivial[0] = std::make_pair(0, 0);
+ trivial[1] = std::make_pair(0, 0);
+ trivial[2] = std::make_pair(0, 0);
+ trivial[3] = std::make_pair(0, 0);
+ Tensor<float, 4> padded = ref.shuffle(shuffle_dims).pad(trivial);
+ VERIFY_IS_EQUAL(padded.dimension(0), 2+0);
+ VERIFY_IS_EQUAL(padded.dimension(1), 3+3);
+ VERIFY_IS_EQUAL(padded.dimension(2), 5+7);
+ VERIFY_IS_EQUAL(padded.dimension(3), 7+0);
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 6; ++j) {
+ for (int k = 0; k < 12; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ if (j >= 2 && j < 5 && k >= 3 && k < 8) {
+ VERIFY_IS_EQUAL(padded(i,j,k,l), t(i,j-2,k-3,l));
+ } else {
+ VERIFY_IS_EQUAL(padded(i,j,k,l), 0.0f);
+ }
+ }
+ }
+ }
+ }
+}
+
+
+void test_cxx11_tensor_ref()
+{
+ CALL_SUBTEST(test_simple_lvalue_ref());
+ CALL_SUBTEST(test_simple_rvalue_ref());
+ CALL_SUBTEST(test_multiple_dims());
+ CALL_SUBTEST(test_slice());
+ CALL_SUBTEST(test_ref_of_ref());
+ CALL_SUBTEST(test_ref_in_expr());
+ CALL_SUBTEST(test_coeff_ref());
+ CALL_SUBTEST(test_nested_ops_with_ref());
+}
diff --git a/unsupported/test/cxx11_tensor_reverse.cpp b/unsupported/test/cxx11_tensor_reverse.cpp
new file mode 100644
index 000000000..b35b8d29e
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_reverse.cpp
@@ -0,0 +1,190 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Navdeep Jaitly <ndjaitly@google.com and
+// Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+using Eigen::array;
+
+template <int DataLayout>
+static void test_simple_reverse()
+{
+ Tensor<float, 4, DataLayout> tensor(2,3,5,7);
+ tensor.setRandom();
+
+ array<bool, 4> dim_rev;
+ dim_rev[0] = false;
+ dim_rev[1] = true;
+ dim_rev[2] = true;
+ dim_rev[3] = false;
+
+ Tensor<float, 4, DataLayout> reversed_tensor;
+ reversed_tensor = tensor.reverse(dim_rev);
+
+ VERIFY_IS_EQUAL(reversed_tensor.dimension(0), 2);
+ VERIFY_IS_EQUAL(reversed_tensor.dimension(1), 3);
+ VERIFY_IS_EQUAL(reversed_tensor.dimension(2), 5);
+ VERIFY_IS_EQUAL(reversed_tensor.dimension(3), 7);
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ VERIFY_IS_EQUAL(tensor(i,j,k,l), reversed_tensor(i,2-j,4-k,l));
+ }
+ }
+ }
+ }
+
+ dim_rev[0] = true;
+ dim_rev[1] = false;
+ dim_rev[2] = false;
+ dim_rev[3] = false;
+
+ reversed_tensor = tensor.reverse(dim_rev);
+
+ VERIFY_IS_EQUAL(reversed_tensor.dimension(0), 2);
+ VERIFY_IS_EQUAL(reversed_tensor.dimension(1), 3);
+ VERIFY_IS_EQUAL(reversed_tensor.dimension(2), 5);
+ VERIFY_IS_EQUAL(reversed_tensor.dimension(3), 7);
+
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ VERIFY_IS_EQUAL(tensor(i,j,k,l), reversed_tensor(1-i,j,k,l));
+ }
+ }
+ }
+ }
+
+ dim_rev[0] = true;
+ dim_rev[1] = false;
+ dim_rev[2] = false;
+ dim_rev[3] = true;
+
+ reversed_tensor = tensor.reverse(dim_rev);
+
+ VERIFY_IS_EQUAL(reversed_tensor.dimension(0), 2);
+ VERIFY_IS_EQUAL(reversed_tensor.dimension(1), 3);
+ VERIFY_IS_EQUAL(reversed_tensor.dimension(2), 5);
+ VERIFY_IS_EQUAL(reversed_tensor.dimension(3), 7);
+
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ VERIFY_IS_EQUAL(tensor(i,j,k,l), reversed_tensor(1-i,j,k,6-l));
+ }
+ }
+ }
+ }
+}
+
+
+template <int DataLayout>
+static void test_expr_reverse(bool LValue)
+{
+ Tensor<float, 4, DataLayout> tensor(2,3,5,7);
+ tensor.setRandom();
+
+ array<bool, 4> dim_rev;
+ dim_rev[0] = false;
+ dim_rev[1] = true;
+ dim_rev[2] = false;
+ dim_rev[3] = true;
+
+ Tensor<float, 4, DataLayout> expected(2, 3, 5, 7);
+ if (LValue) {
+ expected.reverse(dim_rev) = tensor;
+ } else {
+ expected = tensor.reverse(dim_rev);
+ }
+
+ Tensor<float, 4, DataLayout> result(2,3,5,7);
+
+ array<ptrdiff_t, 4> src_slice_dim;
+ src_slice_dim[0] = 2;
+ src_slice_dim[1] = 3;
+ src_slice_dim[2] = 1;
+ src_slice_dim[3] = 7;
+ array<ptrdiff_t, 4> src_slice_start;
+ src_slice_start[0] = 0;
+ src_slice_start[1] = 0;
+ src_slice_start[2] = 0;
+ src_slice_start[3] = 0;
+ array<ptrdiff_t, 4> dst_slice_dim = src_slice_dim;
+ array<ptrdiff_t, 4> dst_slice_start = src_slice_start;
+
+ for (int i = 0; i < 5; ++i) {
+ if (LValue) {
+ result.slice(dst_slice_start, dst_slice_dim).reverse(dim_rev) =
+ tensor.slice(src_slice_start, src_slice_dim);
+ } else {
+ result.slice(dst_slice_start, dst_slice_dim) =
+ tensor.slice(src_slice_start, src_slice_dim).reverse(dim_rev);
+ }
+ src_slice_start[2] += 1;
+ dst_slice_start[2] += 1;
+ }
+
+ VERIFY_IS_EQUAL(result.dimension(0), 2);
+ VERIFY_IS_EQUAL(result.dimension(1), 3);
+ VERIFY_IS_EQUAL(result.dimension(2), 5);
+ VERIFY_IS_EQUAL(result.dimension(3), 7);
+
+ for (int i = 0; i < expected.dimension(0); ++i) {
+ for (int j = 0; j < expected.dimension(1); ++j) {
+ for (int k = 0; k < expected.dimension(2); ++k) {
+ for (int l = 0; l < expected.dimension(3); ++l) {
+ VERIFY_IS_EQUAL(result(i,j,k,l), expected(i,j,k,l));
+ }
+ }
+ }
+ }
+
+ dst_slice_start[2] = 0;
+ result.setRandom();
+ for (int i = 0; i < 5; ++i) {
+ if (LValue) {
+ result.slice(dst_slice_start, dst_slice_dim).reverse(dim_rev) =
+ tensor.slice(dst_slice_start, dst_slice_dim);
+ } else {
+ result.slice(dst_slice_start, dst_slice_dim) =
+ tensor.reverse(dim_rev).slice(dst_slice_start, dst_slice_dim);
+ }
+ dst_slice_start[2] += 1;
+ }
+
+ for (int i = 0; i < expected.dimension(0); ++i) {
+ for (int j = 0; j < expected.dimension(1); ++j) {
+ for (int k = 0; k < expected.dimension(2); ++k) {
+ for (int l = 0; l < expected.dimension(3); ++l) {
+ VERIFY_IS_EQUAL(result(i,j,k,l), expected(i,j,k,l));
+ }
+ }
+ }
+ }
+}
+
+
+void test_cxx11_tensor_reverse()
+{
+ CALL_SUBTEST(test_simple_reverse<ColMajor>());
+ CALL_SUBTEST(test_simple_reverse<RowMajor>());
+ CALL_SUBTEST(test_expr_reverse<ColMajor>(true));
+ CALL_SUBTEST(test_expr_reverse<RowMajor>(true));
+ CALL_SUBTEST(test_expr_reverse<ColMajor>(false));
+ CALL_SUBTEST(test_expr_reverse<RowMajor>(false));
+}
diff --git a/unsupported/test/cxx11_tensor_roundings.cpp b/unsupported/test/cxx11_tensor_roundings.cpp
new file mode 100644
index 000000000..2c26151ab
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_roundings.cpp
@@ -0,0 +1,62 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+
+static void test_float_rounding()
+{
+ Tensor<float, 2> ftensor(20,30);
+ ftensor = ftensor.random() * 100.f;
+
+ Tensor<float, 2> result = ftensor.round();
+
+ for (int i = 0; i < 20; ++i) {
+ for (int j = 0; j < 30; ++j) {
+ VERIFY_IS_EQUAL(result(i,j), numext::round(ftensor(i,j)));
+ }
+ }
+}
+
+static void test_float_flooring()
+{
+ Tensor<float, 2> ftensor(20,30);
+ ftensor = ftensor.random() * 100.f;
+
+ Tensor<float, 2> result = ftensor.floor();
+
+ for (int i = 0; i < 20; ++i) {
+ for (int j = 0; j < 30; ++j) {
+ VERIFY_IS_EQUAL(result(i,j), numext::floor(ftensor(i,j)));
+ }
+ }
+}
+
+static void test_float_ceiling()
+{
+ Tensor<float, 2> ftensor(20,30);
+ ftensor = ftensor.random() * 100.f;
+
+ Tensor<float, 2> result = ftensor.ceil();
+
+ for (int i = 0; i < 20; ++i) {
+ for (int j = 0; j < 30; ++j) {
+ VERIFY_IS_EQUAL(result(i,j), numext::ceil(ftensor(i,j)));
+ }
+ }
+}
+
+void test_cxx11_tensor_roundings()
+{
+ CALL_SUBTEST(test_float_rounding());
+ CALL_SUBTEST(test_float_ceiling());
+ CALL_SUBTEST(test_float_flooring());
+}
diff --git a/unsupported/test/cxx11_tensor_scan.cpp b/unsupported/test/cxx11_tensor_scan.cpp
new file mode 100644
index 000000000..af59aa3ef
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_scan.cpp
@@ -0,0 +1,110 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016 Igor Babuschkin <igor@babuschk.in>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+#include <limits>
+#include <numeric>
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+
+template <int DataLayout, typename Type=float, bool Exclusive = false>
+static void test_1d_scan()
+{
+ int size = 50;
+ Tensor<Type, 1, DataLayout> tensor(size);
+ tensor.setRandom();
+ Tensor<Type, 1, DataLayout> result = tensor.cumsum(0, Exclusive);
+
+ VERIFY_IS_EQUAL(tensor.dimension(0), result.dimension(0));
+
+ float accum = 0;
+ for (int i = 0; i < size; i++) {
+ if (Exclusive) {
+ VERIFY_IS_EQUAL(result(i), accum);
+ accum += tensor(i);
+ } else {
+ accum += tensor(i);
+ VERIFY_IS_EQUAL(result(i), accum);
+ }
+ }
+
+ accum = 1;
+ result = tensor.cumprod(0, Exclusive);
+ for (int i = 0; i < size; i++) {
+ if (Exclusive) {
+ VERIFY_IS_EQUAL(result(i), accum);
+ accum *= tensor(i);
+ } else {
+ accum *= tensor(i);
+ VERIFY_IS_EQUAL(result(i), accum);
+ }
+ }
+}
+
+template <int DataLayout, typename Type=float>
+static void test_4d_scan()
+{
+ int size = 5;
+ Tensor<Type, 4, DataLayout> tensor(size, size, size, size);
+ tensor.setRandom();
+
+ Tensor<Type, 4, DataLayout> result(size, size, size, size);
+
+ result = tensor.cumsum(0);
+ float accum = 0;
+ for (int i = 0; i < size; i++) {
+ accum += tensor(i, 1, 2, 3);
+ VERIFY_IS_EQUAL(result(i, 1, 2, 3), accum);
+ }
+ result = tensor.cumsum(1);
+ accum = 0;
+ for (int i = 0; i < size; i++) {
+ accum += tensor(1, i, 2, 3);
+ VERIFY_IS_EQUAL(result(1, i, 2, 3), accum);
+ }
+ result = tensor.cumsum(2);
+ accum = 0;
+ for (int i = 0; i < size; i++) {
+ accum += tensor(1, 2, i, 3);
+ VERIFY_IS_EQUAL(result(1, 2, i, 3), accum);
+ }
+ result = tensor.cumsum(3);
+ accum = 0;
+ for (int i = 0; i < size; i++) {
+ accum += tensor(1, 2, 3, i);
+ VERIFY_IS_EQUAL(result(1, 2, 3, i), accum);
+ }
+}
+
+template <int DataLayout>
+static void test_tensor_maps() {
+ int inputs[20];
+ TensorMap<Tensor<int, 1, DataLayout> > tensor_map(inputs, 20);
+ tensor_map.setRandom();
+
+ Tensor<int, 1, DataLayout> result = tensor_map.cumsum(0);
+
+ int accum = 0;
+ for (int i = 0; i < 20; ++i) {
+ accum += tensor_map(i);
+ VERIFY_IS_EQUAL(result(i), accum);
+ }
+}
+
+void test_cxx11_tensor_scan() {
+ CALL_SUBTEST((test_1d_scan<ColMajor, float, true>()));
+ CALL_SUBTEST((test_1d_scan<ColMajor, float, false>()));
+ CALL_SUBTEST((test_1d_scan<RowMajor, float, true>()));
+ CALL_SUBTEST((test_1d_scan<RowMajor, float, false>()));
+ CALL_SUBTEST(test_4d_scan<ColMajor>());
+ CALL_SUBTEST(test_4d_scan<RowMajor>());
+ CALL_SUBTEST(test_tensor_maps<ColMajor>());
+ CALL_SUBTEST(test_tensor_maps<RowMajor>());
+}
diff --git a/unsupported/test/cxx11_tensor_scan_cuda.cu b/unsupported/test/cxx11_tensor_scan_cuda.cu
new file mode 100644
index 000000000..5f146f3c9
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_scan_cuda.cu
@@ -0,0 +1,79 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_TEST_NO_LONGDOUBLE
+#define EIGEN_TEST_NO_COMPLEX
+#define EIGEN_TEST_FUNC cxx11_tensor_scan_cuda
+#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
+#define EIGEN_USE_GPU
+
+#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500
+#include <cuda_fp16.h>
+#endif
+#include "main.h"
+#include <unsupported/Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+typedef Tensor<float, 1>::DimensionPair DimPair;
+
+template<int DataLayout>
+void test_cuda_cumsum(int m_size, int k_size, int n_size)
+{
+ std::cout << "Testing for (" << m_size << "," << k_size << "," << n_size << ")" << std::endl;
+ Tensor<float, 3, DataLayout> t_input(m_size, k_size, n_size);
+ Tensor<float, 3, DataLayout> t_result(m_size, k_size, n_size);
+ Tensor<float, 3, DataLayout> t_result_gpu(m_size, k_size, n_size);
+
+ t_input.setRandom();
+
+ std::size_t t_input_bytes = t_input.size() * sizeof(float);
+ std::size_t t_result_bytes = t_result.size() * sizeof(float);
+
+ float* d_t_input;
+ float* d_t_result;
+
+ cudaMalloc((void**)(&d_t_input), t_input_bytes);
+ cudaMalloc((void**)(&d_t_result), t_result_bytes);
+
+ cudaMemcpy(d_t_input, t_input.data(), t_input_bytes, cudaMemcpyHostToDevice);
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ Eigen::TensorMap<Eigen::Tensor<float, 3, DataLayout> >
+ gpu_t_input(d_t_input, Eigen::array<int, 3>(m_size, k_size, n_size));
+ Eigen::TensorMap<Eigen::Tensor<float, 3, DataLayout> >
+ gpu_t_result(d_t_result, Eigen::array<int, 3>(m_size, k_size, n_size));
+
+ gpu_t_result.device(gpu_device) = gpu_t_input.cumsum(1);
+ t_result = t_input.cumsum(1);
+
+ cudaMemcpy(t_result_gpu.data(), d_t_result, t_result_bytes, cudaMemcpyDeviceToHost);
+ for (DenseIndex i = 0; i < t_result.size(); i++) {
+ if (fabs(t_result(i) - t_result_gpu(i)) < 1e-4f) {
+ continue;
+ }
+ if (Eigen::internal::isApprox(t_result(i), t_result_gpu(i), 1e-4f)) {
+ continue;
+ }
+ std::cout << "mismatch detected at index " << i << ": " << t_result(i)
+ << " vs " << t_result_gpu(i) << std::endl;
+ assert(false);
+ }
+
+ cudaFree((void*)d_t_input);
+ cudaFree((void*)d_t_result);
+}
+
+
+void test_cxx11_tensor_scan_cuda()
+{
+ CALL_SUBTEST_1(test_cuda_cumsum<ColMajor>(128, 128, 128));
+ CALL_SUBTEST_2(test_cuda_cumsum<RowMajor>(128, 128, 128));
+}
diff --git a/unsupported/test/cxx11_tensor_shuffling.cpp b/unsupported/test/cxx11_tensor_shuffling.cpp
new file mode 100644
index 000000000..d11444a14
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_shuffling.cpp
@@ -0,0 +1,228 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+using Eigen::array;
+
+template <int DataLayout>
+static void test_simple_shuffling()
+{
+ Tensor<float, 4, DataLayout> tensor(2,3,5,7);
+ tensor.setRandom();
+ array<ptrdiff_t, 4> shuffles;
+ shuffles[0] = 0;
+ shuffles[1] = 1;
+ shuffles[2] = 2;
+ shuffles[3] = 3;
+
+ Tensor<float, 4, DataLayout> no_shuffle;
+ no_shuffle = tensor.shuffle(shuffles);
+
+ VERIFY_IS_EQUAL(no_shuffle.dimension(0), 2);
+ VERIFY_IS_EQUAL(no_shuffle.dimension(1), 3);
+ VERIFY_IS_EQUAL(no_shuffle.dimension(2), 5);
+ VERIFY_IS_EQUAL(no_shuffle.dimension(3), 7);
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ VERIFY_IS_EQUAL(tensor(i,j,k,l), no_shuffle(i,j,k,l));
+ }
+ }
+ }
+ }
+
+ shuffles[0] = 2;
+ shuffles[1] = 3;
+ shuffles[2] = 1;
+ shuffles[3] = 0;
+ Tensor<float, 4, DataLayout> shuffle;
+ shuffle = tensor.shuffle(shuffles);
+
+ VERIFY_IS_EQUAL(shuffle.dimension(0), 5);
+ VERIFY_IS_EQUAL(shuffle.dimension(1), 7);
+ VERIFY_IS_EQUAL(shuffle.dimension(2), 3);
+ VERIFY_IS_EQUAL(shuffle.dimension(3), 2);
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ VERIFY_IS_EQUAL(tensor(i,j,k,l), shuffle(k,l,j,i));
+ }
+ }
+ }
+ }
+}
+
+
+template <int DataLayout>
+static void test_expr_shuffling()
+{
+ Tensor<float, 4, DataLayout> tensor(2,3,5,7);
+ tensor.setRandom();
+
+ array<ptrdiff_t, 4> shuffles;
+ shuffles[0] = 2;
+ shuffles[1] = 3;
+ shuffles[2] = 1;
+ shuffles[3] = 0;
+ Tensor<float, 4, DataLayout> expected;
+ expected = tensor.shuffle(shuffles);
+
+ Tensor<float, 4, DataLayout> result(5,7,3,2);
+
+ array<int, 4> src_slice_dim{{2,3,1,7}};
+ array<int, 4> src_slice_start{{0,0,0,0}};
+ array<int, 4> dst_slice_dim{{1,7,3,2}};
+ array<int, 4> dst_slice_start{{0,0,0,0}};
+
+ for (int i = 0; i < 5; ++i) {
+ result.slice(dst_slice_start, dst_slice_dim) =
+ tensor.slice(src_slice_start, src_slice_dim).shuffle(shuffles);
+ src_slice_start[2] += 1;
+ dst_slice_start[0] += 1;
+ }
+
+ VERIFY_IS_EQUAL(result.dimension(0), 5);
+ VERIFY_IS_EQUAL(result.dimension(1), 7);
+ VERIFY_IS_EQUAL(result.dimension(2), 3);
+ VERIFY_IS_EQUAL(result.dimension(3), 2);
+
+ for (int i = 0; i < expected.dimension(0); ++i) {
+ for (int j = 0; j < expected.dimension(1); ++j) {
+ for (int k = 0; k < expected.dimension(2); ++k) {
+ for (int l = 0; l < expected.dimension(3); ++l) {
+ VERIFY_IS_EQUAL(result(i,j,k,l), expected(i,j,k,l));
+ }
+ }
+ }
+ }
+
+ dst_slice_start[0] = 0;
+ result.setRandom();
+ for (int i = 0; i < 5; ++i) {
+ result.slice(dst_slice_start, dst_slice_dim) =
+ tensor.shuffle(shuffles).slice(dst_slice_start, dst_slice_dim);
+ dst_slice_start[0] += 1;
+ }
+
+ for (int i = 0; i < expected.dimension(0); ++i) {
+ for (int j = 0; j < expected.dimension(1); ++j) {
+ for (int k = 0; k < expected.dimension(2); ++k) {
+ for (int l = 0; l < expected.dimension(3); ++l) {
+ VERIFY_IS_EQUAL(result(i,j,k,l), expected(i,j,k,l));
+ }
+ }
+ }
+ }
+}
+
+
+template <int DataLayout>
+static void test_shuffling_as_value()
+{
+ Tensor<float, 4, DataLayout> tensor(2,3,5,7);
+ tensor.setRandom();
+ array<ptrdiff_t, 4> shuffles;
+ shuffles[2] = 0;
+ shuffles[3] = 1;
+ shuffles[1] = 2;
+ shuffles[0] = 3;
+ Tensor<float, 4, DataLayout> shuffle(5,7,3,2);
+ shuffle.shuffle(shuffles) = tensor;
+
+ VERIFY_IS_EQUAL(shuffle.dimension(0), 5);
+ VERIFY_IS_EQUAL(shuffle.dimension(1), 7);
+ VERIFY_IS_EQUAL(shuffle.dimension(2), 3);
+ VERIFY_IS_EQUAL(shuffle.dimension(3), 2);
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ VERIFY_IS_EQUAL(tensor(i,j,k,l), shuffle(k,l,j,i));
+ }
+ }
+ }
+ }
+
+ array<ptrdiff_t, 4> no_shuffle;
+ no_shuffle[0] = 0;
+ no_shuffle[1] = 1;
+ no_shuffle[2] = 2;
+ no_shuffle[3] = 3;
+ Tensor<float, 4, DataLayout> shuffle2(5,7,3,2);
+ shuffle2.shuffle(shuffles) = tensor.shuffle(no_shuffle);
+ for (int i = 0; i < 5; ++i) {
+ for (int j = 0; j < 7; ++j) {
+ for (int k = 0; k < 3; ++k) {
+ for (int l = 0; l < 2; ++l) {
+ VERIFY_IS_EQUAL(shuffle2(i,j,k,l), shuffle(i,j,k,l));
+ }
+ }
+ }
+ }
+}
+
+
+template <int DataLayout>
+static void test_shuffle_unshuffle()
+{
+ Tensor<float, 4, DataLayout> tensor(2,3,5,7);
+ tensor.setRandom();
+
+ // Choose a random permutation.
+ array<ptrdiff_t, 4> shuffles;
+ for (int i = 0; i < 4; ++i) {
+ shuffles[i] = i;
+ }
+ array<ptrdiff_t, 4> shuffles_inverse;
+ for (int i = 0; i < 4; ++i) {
+ const ptrdiff_t index = internal::random<ptrdiff_t>(i, 3);
+ shuffles_inverse[shuffles[index]] = i;
+ std::swap(shuffles[i], shuffles[index]);
+ }
+
+ Tensor<float, 4, DataLayout> shuffle;
+ shuffle = tensor.shuffle(shuffles).shuffle(shuffles_inverse);
+
+ VERIFY_IS_EQUAL(shuffle.dimension(0), 2);
+ VERIFY_IS_EQUAL(shuffle.dimension(1), 3);
+ VERIFY_IS_EQUAL(shuffle.dimension(2), 5);
+ VERIFY_IS_EQUAL(shuffle.dimension(3), 7);
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ VERIFY_IS_EQUAL(tensor(i,j,k,l), shuffle(i,j,k,l));
+ }
+ }
+ }
+ }
+}
+
+
+void test_cxx11_tensor_shuffling()
+{
+ CALL_SUBTEST(test_simple_shuffling<ColMajor>());
+ CALL_SUBTEST(test_simple_shuffling<RowMajor>());
+ CALL_SUBTEST(test_expr_shuffling<ColMajor>());
+ CALL_SUBTEST(test_expr_shuffling<RowMajor>());
+ CALL_SUBTEST(test_shuffling_as_value<ColMajor>());
+ CALL_SUBTEST(test_shuffling_as_value<RowMajor>());
+ CALL_SUBTEST(test_shuffle_unshuffle<ColMajor>());
+ CALL_SUBTEST(test_shuffle_unshuffle<RowMajor>());
+}
diff --git a/unsupported/test/cxx11_tensor_simple.cpp b/unsupported/test/cxx11_tensor_simple.cpp
new file mode 100644
index 000000000..5a0d339ef
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_simple.cpp
@@ -0,0 +1,327 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2013 Christian Seiler <christian@iwakd.de>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+using Eigen::RowMajor;
+
+static void test_0d()
+{
+ Tensor<int, 0> scalar1;
+ Tensor<int, 0, RowMajor> scalar2;
+ Tensor<int, 0> scalar3;
+ Tensor<int, 0, RowMajor> scalar4;
+
+ scalar3.resize();
+ scalar4.resize();
+
+ scalar1() = 7;
+ scalar2() = 13;
+ scalar3.setValues(17);
+ scalar4.setZero();
+
+ VERIFY_IS_EQUAL(scalar1.rank(), 0);
+ VERIFY_IS_EQUAL(scalar1.size(), 1);
+
+ VERIFY_IS_EQUAL(scalar1(), 7);
+ VERIFY_IS_EQUAL(scalar2(), 13);
+ VERIFY_IS_EQUAL(scalar3(), 17);
+ VERIFY_IS_EQUAL(scalar4(), 0);
+
+ Tensor<int, 0> scalar5(scalar1);
+
+ VERIFY_IS_EQUAL(scalar5(), 7);
+ VERIFY_IS_EQUAL(scalar5.data()[0], 7);
+}
+
+static void test_1d()
+{
+ Tensor<int, 1> vec1(6);
+ Tensor<int, 1, RowMajor> vec2(6);
+ Tensor<int, 1> vec3;
+ Tensor<int, 1, RowMajor> vec4;
+
+ vec3.resize(6);
+ vec4.resize(6);
+
+ vec1(0) = 4; vec2(0) = 0; vec3(0) = 5;
+ vec1(1) = 8; vec2(1) = 1; vec3(1) = 4;
+ vec1(2) = 15; vec2(2) = 2; vec3(2) = 3;
+ vec1(3) = 16; vec2(3) = 3; vec3(3) = 2;
+ vec1(4) = 23; vec2(4) = 4; vec3(4) = 1;
+ vec1(5) = 42; vec2(5) = 5; vec3(5) = 0;
+ vec4.setZero();
+
+ VERIFY_IS_EQUAL((vec1.rank()), 1);
+ VERIFY_IS_EQUAL((vec1.size()), 6);
+ VERIFY_IS_EQUAL((vec1.dimensions()[0]), 6);
+
+ VERIFY_IS_EQUAL((vec1[0]), 4);
+ VERIFY_IS_EQUAL((vec1[1]), 8);
+ VERIFY_IS_EQUAL((vec1[2]), 15);
+ VERIFY_IS_EQUAL((vec1[3]), 16);
+ VERIFY_IS_EQUAL((vec1[4]), 23);
+ VERIFY_IS_EQUAL((vec1[5]), 42);
+
+ VERIFY_IS_EQUAL((vec2[0]), 0);
+ VERIFY_IS_EQUAL((vec2[1]), 1);
+ VERIFY_IS_EQUAL((vec2[2]), 2);
+ VERIFY_IS_EQUAL((vec2[3]), 3);
+ VERIFY_IS_EQUAL((vec2[4]), 4);
+ VERIFY_IS_EQUAL((vec2[5]), 5);
+
+ VERIFY_IS_EQUAL((vec3[0]), 5);
+ VERIFY_IS_EQUAL((vec3[1]), 4);
+ VERIFY_IS_EQUAL((vec3[2]), 3);
+ VERIFY_IS_EQUAL((vec3[3]), 2);
+ VERIFY_IS_EQUAL((vec3[4]), 1);
+ VERIFY_IS_EQUAL((vec3[5]), 0);
+
+ VERIFY_IS_EQUAL((vec4[0]), 0);
+ VERIFY_IS_EQUAL((vec4[1]), 0);
+ VERIFY_IS_EQUAL((vec4[2]), 0);
+ VERIFY_IS_EQUAL((vec4[3]), 0);
+ VERIFY_IS_EQUAL((vec4[4]), 0);
+ VERIFY_IS_EQUAL((vec4[5]), 0);
+
+ Tensor<int, 1> vec5(vec1);
+
+ VERIFY_IS_EQUAL((vec5(0)), 4);
+ VERIFY_IS_EQUAL((vec5(1)), 8);
+ VERIFY_IS_EQUAL((vec5(2)), 15);
+ VERIFY_IS_EQUAL((vec5(3)), 16);
+ VERIFY_IS_EQUAL((vec5(4)), 23);
+ VERIFY_IS_EQUAL((vec5(5)), 42);
+
+ VERIFY_IS_EQUAL((vec5.data()[0]), 4);
+ VERIFY_IS_EQUAL((vec5.data()[1]), 8);
+ VERIFY_IS_EQUAL((vec5.data()[2]), 15);
+ VERIFY_IS_EQUAL((vec5.data()[3]), 16);
+ VERIFY_IS_EQUAL((vec5.data()[4]), 23);
+ VERIFY_IS_EQUAL((vec5.data()[5]), 42);
+}
+
+static void test_2d()
+{
+ Tensor<int, 2> mat1(2,3);
+ Tensor<int, 2, RowMajor> mat2(2,3);
+
+ mat1(0,0) = 0;
+ mat1(0,1) = 1;
+ mat1(0,2) = 2;
+ mat1(1,0) = 3;
+ mat1(1,1) = 4;
+ mat1(1,2) = 5;
+
+ mat2(0,0) = 0;
+ mat2(0,1) = 1;
+ mat2(0,2) = 2;
+ mat2(1,0) = 3;
+ mat2(1,1) = 4;
+ mat2(1,2) = 5;
+
+ VERIFY_IS_EQUAL((mat1.rank()), 2);
+ VERIFY_IS_EQUAL((mat1.size()), 6);
+ VERIFY_IS_EQUAL((mat1.dimensions()[0]), 2);
+ VERIFY_IS_EQUAL((mat1.dimensions()[1]), 3);
+
+ VERIFY_IS_EQUAL((mat2.rank()), 2);
+ VERIFY_IS_EQUAL((mat2.size()), 6);
+ VERIFY_IS_EQUAL((mat2.dimensions()[0]), 2);
+ VERIFY_IS_EQUAL((mat2.dimensions()[1]), 3);
+
+ VERIFY_IS_EQUAL((mat1.data()[0]), 0);
+ VERIFY_IS_EQUAL((mat1.data()[1]), 3);
+ VERIFY_IS_EQUAL((mat1.data()[2]), 1);
+ VERIFY_IS_EQUAL((mat1.data()[3]), 4);
+ VERIFY_IS_EQUAL((mat1.data()[4]), 2);
+ VERIFY_IS_EQUAL((mat1.data()[5]), 5);
+
+ VERIFY_IS_EQUAL((mat2.data()[0]), 0);
+ VERIFY_IS_EQUAL((mat2.data()[1]), 1);
+ VERIFY_IS_EQUAL((mat2.data()[2]), 2);
+ VERIFY_IS_EQUAL((mat2.data()[3]), 3);
+ VERIFY_IS_EQUAL((mat2.data()[4]), 4);
+ VERIFY_IS_EQUAL((mat2.data()[5]), 5);
+}
+
+static void test_3d()
+{
+ Tensor<int, 3> epsilon(3,3,3);
+ epsilon.setZero();
+ epsilon(0,1,2) = epsilon(2,0,1) = epsilon(1,2,0) = 1;
+ epsilon(2,1,0) = epsilon(0,2,1) = epsilon(1,0,2) = -1;
+
+ VERIFY_IS_EQUAL((epsilon.size()), 27);
+ VERIFY_IS_EQUAL((epsilon.dimensions()[0]), 3);
+ VERIFY_IS_EQUAL((epsilon.dimensions()[1]), 3);
+ VERIFY_IS_EQUAL((epsilon.dimensions()[2]), 3);
+
+ VERIFY_IS_EQUAL((epsilon(0,0,0)), 0);
+ VERIFY_IS_EQUAL((epsilon(0,0,1)), 0);
+ VERIFY_IS_EQUAL((epsilon(0,0,2)), 0);
+ VERIFY_IS_EQUAL((epsilon(0,1,0)), 0);
+ VERIFY_IS_EQUAL((epsilon(0,1,1)), 0);
+ VERIFY_IS_EQUAL((epsilon(0,2,0)), 0);
+ VERIFY_IS_EQUAL((epsilon(0,2,2)), 0);
+ VERIFY_IS_EQUAL((epsilon(1,0,0)), 0);
+ VERIFY_IS_EQUAL((epsilon(1,0,1)), 0);
+ VERIFY_IS_EQUAL((epsilon(1,1,0)), 0);
+ VERIFY_IS_EQUAL((epsilon(1,1,1)), 0);
+ VERIFY_IS_EQUAL((epsilon(1,1,2)), 0);
+ VERIFY_IS_EQUAL((epsilon(1,2,1)), 0);
+ VERIFY_IS_EQUAL((epsilon(1,2,2)), 0);
+ VERIFY_IS_EQUAL((epsilon(2,0,0)), 0);
+ VERIFY_IS_EQUAL((epsilon(2,0,2)), 0);
+ VERIFY_IS_EQUAL((epsilon(2,1,1)), 0);
+ VERIFY_IS_EQUAL((epsilon(2,1,2)), 0);
+ VERIFY_IS_EQUAL((epsilon(2,2,0)), 0);
+ VERIFY_IS_EQUAL((epsilon(2,2,1)), 0);
+ VERIFY_IS_EQUAL((epsilon(2,2,2)), 0);
+
+ VERIFY_IS_EQUAL((epsilon(0,1,2)), 1);
+ VERIFY_IS_EQUAL((epsilon(2,0,1)), 1);
+ VERIFY_IS_EQUAL((epsilon(1,2,0)), 1);
+ VERIFY_IS_EQUAL((epsilon(2,1,0)), -1);
+ VERIFY_IS_EQUAL((epsilon(0,2,1)), -1);
+ VERIFY_IS_EQUAL((epsilon(1,0,2)), -1);
+
+ array<Eigen::DenseIndex, 3> dims;
+ dims[0] = 2;
+ dims[1] = 3;
+ dims[2] = 4;
+ Tensor<int, 3> t1(dims);
+ Tensor<int, 3, RowMajor> t2(dims);
+
+ VERIFY_IS_EQUAL((t1.size()), 24);
+ VERIFY_IS_EQUAL((t1.dimensions()[0]), 2);
+ VERIFY_IS_EQUAL((t1.dimensions()[1]), 3);
+ VERIFY_IS_EQUAL((t1.dimensions()[2]), 4);
+
+ VERIFY_IS_EQUAL((t2.size()), 24);
+ VERIFY_IS_EQUAL((t2.dimensions()[0]), 2);
+ VERIFY_IS_EQUAL((t2.dimensions()[1]), 3);
+ VERIFY_IS_EQUAL((t2.dimensions()[2]), 4);
+
+ for (int i = 0; i < 2; i++) {
+ for (int j = 0; j < 3; j++) {
+ for (int k = 0; k < 4; k++) {
+ t1(i, j, k) = 100 * i + 10 * j + k;
+ t2(i, j, k) = 100 * i + 10 * j + k;
+ }
+ }
+ }
+
+ VERIFY_IS_EQUAL((t1.data()[0]), 0);
+ VERIFY_IS_EQUAL((t1.data()[1]), 100);
+ VERIFY_IS_EQUAL((t1.data()[2]), 10);
+ VERIFY_IS_EQUAL((t1.data()[3]), 110);
+ VERIFY_IS_EQUAL((t1.data()[4]), 20);
+ VERIFY_IS_EQUAL((t1.data()[5]), 120);
+ VERIFY_IS_EQUAL((t1.data()[6]), 1);
+ VERIFY_IS_EQUAL((t1.data()[7]), 101);
+ VERIFY_IS_EQUAL((t1.data()[8]), 11);
+ VERIFY_IS_EQUAL((t1.data()[9]), 111);
+ VERIFY_IS_EQUAL((t1.data()[10]), 21);
+ VERIFY_IS_EQUAL((t1.data()[11]), 121);
+ VERIFY_IS_EQUAL((t1.data()[12]), 2);
+ VERIFY_IS_EQUAL((t1.data()[13]), 102);
+ VERIFY_IS_EQUAL((t1.data()[14]), 12);
+ VERIFY_IS_EQUAL((t1.data()[15]), 112);
+ VERIFY_IS_EQUAL((t1.data()[16]), 22);
+ VERIFY_IS_EQUAL((t1.data()[17]), 122);
+ VERIFY_IS_EQUAL((t1.data()[18]), 3);
+ VERIFY_IS_EQUAL((t1.data()[19]), 103);
+ VERIFY_IS_EQUAL((t1.data()[20]), 13);
+ VERIFY_IS_EQUAL((t1.data()[21]), 113);
+ VERIFY_IS_EQUAL((t1.data()[22]), 23);
+ VERIFY_IS_EQUAL((t1.data()[23]), 123);
+
+ VERIFY_IS_EQUAL((t2.data()[0]), 0);
+ VERIFY_IS_EQUAL((t2.data()[1]), 1);
+ VERIFY_IS_EQUAL((t2.data()[2]), 2);
+ VERIFY_IS_EQUAL((t2.data()[3]), 3);
+ VERIFY_IS_EQUAL((t2.data()[4]), 10);
+ VERIFY_IS_EQUAL((t2.data()[5]), 11);
+ VERIFY_IS_EQUAL((t2.data()[6]), 12);
+ VERIFY_IS_EQUAL((t2.data()[7]), 13);
+ VERIFY_IS_EQUAL((t2.data()[8]), 20);
+ VERIFY_IS_EQUAL((t2.data()[9]), 21);
+ VERIFY_IS_EQUAL((t2.data()[10]), 22);
+ VERIFY_IS_EQUAL((t2.data()[11]), 23);
+ VERIFY_IS_EQUAL((t2.data()[12]), 100);
+ VERIFY_IS_EQUAL((t2.data()[13]), 101);
+ VERIFY_IS_EQUAL((t2.data()[14]), 102);
+ VERIFY_IS_EQUAL((t2.data()[15]), 103);
+ VERIFY_IS_EQUAL((t2.data()[16]), 110);
+ VERIFY_IS_EQUAL((t2.data()[17]), 111);
+ VERIFY_IS_EQUAL((t2.data()[18]), 112);
+ VERIFY_IS_EQUAL((t2.data()[19]), 113);
+ VERIFY_IS_EQUAL((t2.data()[20]), 120);
+ VERIFY_IS_EQUAL((t2.data()[21]), 121);
+ VERIFY_IS_EQUAL((t2.data()[22]), 122);
+ VERIFY_IS_EQUAL((t2.data()[23]), 123);
+}
+
+static void test_simple_assign()
+{
+ Tensor<int, 3> epsilon(3,3,3);
+ epsilon.setZero();
+ epsilon(0,1,2) = epsilon(2,0,1) = epsilon(1,2,0) = 1;
+ epsilon(2,1,0) = epsilon(0,2,1) = epsilon(1,0,2) = -1;
+
+ Tensor<int, 3> e2(3,3,3);
+ e2.setZero();
+ VERIFY_IS_EQUAL((e2(1,2,0)), 0);
+
+ e2 = epsilon;
+ VERIFY_IS_EQUAL((e2(1,2,0)), 1);
+ VERIFY_IS_EQUAL((e2(0,1,2)), 1);
+ VERIFY_IS_EQUAL((e2(2,0,1)), 1);
+ VERIFY_IS_EQUAL((e2(2,1,0)), -1);
+ VERIFY_IS_EQUAL((e2(0,2,1)), -1);
+ VERIFY_IS_EQUAL((e2(1,0,2)), -1);
+}
+
+static void test_resize()
+{
+ Tensor<int, 3> epsilon;
+ epsilon.resize(2,3,7);
+ VERIFY_IS_EQUAL(epsilon.dimension(0), 2);
+ VERIFY_IS_EQUAL(epsilon.dimension(1), 3);
+ VERIFY_IS_EQUAL(epsilon.dimension(2), 7);
+ VERIFY_IS_EQUAL(epsilon.size(), 2*3*7);
+
+ const int* old_data = epsilon.data();
+ epsilon.resize(3,2,7);
+ VERIFY_IS_EQUAL(epsilon.dimension(0), 3);
+ VERIFY_IS_EQUAL(epsilon.dimension(1), 2);
+ VERIFY_IS_EQUAL(epsilon.dimension(2), 7);
+ VERIFY_IS_EQUAL(epsilon.size(), 2*3*7);
+ VERIFY_IS_EQUAL(epsilon.data(), old_data);
+
+ epsilon.resize(3,5,7);
+ VERIFY_IS_EQUAL(epsilon.dimension(0), 3);
+ VERIFY_IS_EQUAL(epsilon.dimension(1), 5);
+ VERIFY_IS_EQUAL(epsilon.dimension(2), 7);
+ VERIFY_IS_EQUAL(epsilon.size(), 3*5*7);
+}
+
+void test_cxx11_tensor_simple()
+{
+ CALL_SUBTEST(test_0d());
+ CALL_SUBTEST(test_1d());
+ CALL_SUBTEST(test_2d());
+ CALL_SUBTEST(test_3d());
+ CALL_SUBTEST(test_simple_assign());
+ CALL_SUBTEST(test_resize());
+}
diff --git a/unsupported/test/cxx11_tensor_striding.cpp b/unsupported/test/cxx11_tensor_striding.cpp
new file mode 100644
index 000000000..935b908cc
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_striding.cpp
@@ -0,0 +1,119 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+
+template<int DataLayout>
+static void test_simple_striding()
+{
+ Tensor<float, 4, DataLayout> tensor(2,3,5,7);
+ tensor.setRandom();
+ array<ptrdiff_t, 4> strides;
+ strides[0] = 1;
+ strides[1] = 1;
+ strides[2] = 1;
+ strides[3] = 1;
+
+ Tensor<float, 4, DataLayout> no_stride;
+ no_stride = tensor.stride(strides);
+
+ VERIFY_IS_EQUAL(no_stride.dimension(0), 2);
+ VERIFY_IS_EQUAL(no_stride.dimension(1), 3);
+ VERIFY_IS_EQUAL(no_stride.dimension(2), 5);
+ VERIFY_IS_EQUAL(no_stride.dimension(3), 7);
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ VERIFY_IS_EQUAL(tensor(i,j,k,l), no_stride(i,j,k,l));
+ }
+ }
+ }
+ }
+
+ strides[0] = 2;
+ strides[1] = 4;
+ strides[2] = 2;
+ strides[3] = 3;
+ Tensor<float, 4, DataLayout> stride;
+ stride = tensor.stride(strides);
+
+ VERIFY_IS_EQUAL(stride.dimension(0), 1);
+ VERIFY_IS_EQUAL(stride.dimension(1), 1);
+ VERIFY_IS_EQUAL(stride.dimension(2), 3);
+ VERIFY_IS_EQUAL(stride.dimension(3), 3);
+
+ for (int i = 0; i < 1; ++i) {
+ for (int j = 0; j < 1; ++j) {
+ for (int k = 0; k < 3; ++k) {
+ for (int l = 0; l < 3; ++l) {
+ VERIFY_IS_EQUAL(tensor(2*i,4*j,2*k,3*l), stride(i,j,k,l));
+ }
+ }
+ }
+ }
+}
+
+
+template<int DataLayout>
+static void test_striding_as_lvalue()
+{
+ Tensor<float, 4, DataLayout> tensor(2,3,5,7);
+ tensor.setRandom();
+ array<ptrdiff_t, 4> strides;
+ strides[0] = 2;
+ strides[1] = 4;
+ strides[2] = 2;
+ strides[3] = 3;
+
+ Tensor<float, 4, DataLayout> result(3, 12, 10, 21);
+ result.stride(strides) = tensor;
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ VERIFY_IS_EQUAL(tensor(i,j,k,l), result(2*i,4*j,2*k,3*l));
+ }
+ }
+ }
+ }
+
+ array<ptrdiff_t, 4> no_strides;
+ no_strides[0] = 1;
+ no_strides[1] = 1;
+ no_strides[2] = 1;
+ no_strides[3] = 1;
+ Tensor<float, 4, DataLayout> result2(3, 12, 10, 21);
+ result2.stride(strides) = tensor.stride(no_strides);
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ VERIFY_IS_EQUAL(tensor(i,j,k,l), result2(2*i,4*j,2*k,3*l));
+ }
+ }
+ }
+ }
+}
+
+
+void test_cxx11_tensor_striding()
+{
+ CALL_SUBTEST(test_simple_striding<ColMajor>());
+ CALL_SUBTEST(test_simple_striding<RowMajor>());
+ CALL_SUBTEST(test_striding_as_lvalue<ColMajor>());
+ CALL_SUBTEST(test_striding_as_lvalue<RowMajor>());
+}
diff --git a/unsupported/test/cxx11_tensor_sugar.cpp b/unsupported/test/cxx11_tensor_sugar.cpp
new file mode 100644
index 000000000..2f56eb495
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_sugar.cpp
@@ -0,0 +1,81 @@
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+using Eigen::RowMajor;
+
+static void test_comparison_sugar() {
+ // we already trust comparisons between tensors, we're simply checking that
+ // the sugared versions are doing the same thing
+ Tensor<int, 3> t(6, 7, 5);
+
+ t.setRandom();
+ // make sure we have at least one value == 0
+ t(0,0,0) = 0;
+
+ Tensor<bool,0> b;
+
+#define TEST_TENSOR_EQUAL(e1, e2) \
+ b = ((e1) == (e2)).all(); \
+ VERIFY(b())
+
+#define TEST_OP(op) TEST_TENSOR_EQUAL(t op 0, t op t.constant(0))
+
+ TEST_OP(==);
+ TEST_OP(!=);
+ TEST_OP(<=);
+ TEST_OP(>=);
+ TEST_OP(<);
+ TEST_OP(>);
+#undef TEST_OP
+#undef TEST_TENSOR_EQUAL
+}
+
+
+static void test_scalar_sugar_add_mul() {
+ Tensor<float, 3> A(6, 7, 5);
+ Tensor<float, 3> B(6, 7, 5);
+ A.setRandom();
+ B.setRandom();
+
+ const float alpha = 0.43f;
+ const float beta = 0.21f;
+ const float gamma = 0.14f;
+
+ Tensor<float, 3> R = A.constant(gamma) + A * A.constant(alpha) + B * B.constant(beta);
+ Tensor<float, 3> S = A * alpha + B * beta + gamma;
+ Tensor<float, 3> T = gamma + alpha * A + beta * B;
+
+ for (int i = 0; i < 6*7*5; ++i) {
+ VERIFY_IS_APPROX(R(i), S(i));
+ VERIFY_IS_APPROX(R(i), T(i));
+ }
+}
+
+static void test_scalar_sugar_sub_div() {
+ Tensor<float, 3> A(6, 7, 5);
+ Tensor<float, 3> B(6, 7, 5);
+ A.setRandom();
+ B.setRandom();
+
+ const float alpha = 0.43f;
+ const float beta = 0.21f;
+ const float gamma = 0.14f;
+ const float delta = 0.32f;
+
+ Tensor<float, 3> R = A.constant(gamma) - A / A.constant(alpha)
+ - B.constant(beta) / B - A.constant(delta);
+ Tensor<float, 3> S = gamma - A / alpha - beta / B - delta;
+
+ for (int i = 0; i < 6*7*5; ++i) {
+ VERIFY_IS_APPROX(R(i), S(i));
+ }
+}
+
+void test_cxx11_tensor_sugar()
+{
+ CALL_SUBTEST(test_comparison_sugar());
+ CALL_SUBTEST(test_scalar_sugar_add_mul());
+ CALL_SUBTEST(test_scalar_sugar_sub_div());
+}
diff --git a/unsupported/test/cxx11_tensor_sycl.cpp b/unsupported/test/cxx11_tensor_sycl.cpp
new file mode 100644
index 000000000..6a9c33422
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_sycl.cpp
@@ -0,0 +1,159 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+// Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+#define EIGEN_TEST_NO_LONGDOUBLE
+#define EIGEN_TEST_NO_COMPLEX
+#define EIGEN_TEST_FUNC cxx11_tensor_sycl
+#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
+#define EIGEN_USE_SYCL
+
+#include "main.h"
+#include <unsupported/Eigen/CXX11/Tensor>
+
+using Eigen::array;
+using Eigen::SyclDevice;
+using Eigen::Tensor;
+using Eigen::TensorMap;
+
+void test_sycl_cpu(const Eigen::SyclDevice &sycl_device) {
+
+ int sizeDim1 = 100;
+ int sizeDim2 = 100;
+ int sizeDim3 = 100;
+ array<int, 3> tensorRange = {{sizeDim1, sizeDim2, sizeDim3}};
+ Tensor<float, 3> in1(tensorRange);
+ Tensor<float, 3> in2(tensorRange);
+ Tensor<float, 3> in3(tensorRange);
+ Tensor<float, 3> out(tensorRange);
+
+ in2 = in2.random();
+ in3 = in3.random();
+
+ float * gpu_in1_data = static_cast<float*>(sycl_device.allocate(in1.dimensions().TotalSize()*sizeof(float)));
+ float * gpu_in2_data = static_cast<float*>(sycl_device.allocate(in2.dimensions().TotalSize()*sizeof(float)));
+ float * gpu_in3_data = static_cast<float*>(sycl_device.allocate(in3.dimensions().TotalSize()*sizeof(float)));
+ float * gpu_out_data = static_cast<float*>(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(float)));
+
+ TensorMap<Tensor<float, 3>> gpu_in1(gpu_in1_data, tensorRange);
+ TensorMap<Tensor<float, 3>> gpu_in2(gpu_in2_data, tensorRange);
+ TensorMap<Tensor<float, 3>> gpu_in3(gpu_in3_data, tensorRange);
+ TensorMap<Tensor<float, 3>> gpu_out(gpu_out_data, tensorRange);
+
+ /// a=1.2f
+ gpu_in1.device(sycl_device) = gpu_in1.constant(1.2f);
+ sycl_device.memcpyDeviceToHost(in1.data(), gpu_in1_data ,(in1.dimensions().TotalSize())*sizeof(float));
+ for (int i = 0; i < sizeDim1; ++i) {
+ for (int j = 0; j < sizeDim2; ++j) {
+ for (int k = 0; k < sizeDim3; ++k) {
+ VERIFY_IS_APPROX(in1(i,j,k), 1.2f);
+ }
+ }
+ }
+ printf("a=1.2f Test passed\n");
+
+ /// a=b*1.2f
+ gpu_out.device(sycl_device) = gpu_in1 * 1.2f;
+ sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data ,(out.dimensions().TotalSize())*sizeof(float));
+ for (int i = 0; i < sizeDim1; ++i) {
+ for (int j = 0; j < sizeDim2; ++j) {
+ for (int k = 0; k < sizeDim3; ++k) {
+ VERIFY_IS_APPROX(out(i,j,k),
+ in1(i,j,k) * 1.2f);
+ }
+ }
+ }
+ printf("a=b*1.2f Test Passed\n");
+
+ /// c=a*b
+ sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(),(in2.dimensions().TotalSize())*sizeof(float));
+ gpu_out.device(sycl_device) = gpu_in1 * gpu_in2;
+ sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
+ for (int i = 0; i < sizeDim1; ++i) {
+ for (int j = 0; j < sizeDim2; ++j) {
+ for (int k = 0; k < sizeDim3; ++k) {
+ VERIFY_IS_APPROX(out(i,j,k),
+ in1(i,j,k) *
+ in2(i,j,k));
+ }
+ }
+ }
+ printf("c=a*b Test Passed\n");
+
+ /// c=a+b
+ gpu_out.device(sycl_device) = gpu_in1 + gpu_in2;
+ sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
+ for (int i = 0; i < sizeDim1; ++i) {
+ for (int j = 0; j < sizeDim2; ++j) {
+ for (int k = 0; k < sizeDim3; ++k) {
+ VERIFY_IS_APPROX(out(i,j,k),
+ in1(i,j,k) +
+ in2(i,j,k));
+ }
+ }
+ }
+ printf("c=a+b Test Passed\n");
+
+ /// c=a*a
+ gpu_out.device(sycl_device) = gpu_in1 * gpu_in1;
+ sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
+ for (int i = 0; i < sizeDim1; ++i) {
+ for (int j = 0; j < sizeDim2; ++j) {
+ for (int k = 0; k < sizeDim3; ++k) {
+ VERIFY_IS_APPROX(out(i,j,k),
+ in1(i,j,k) *
+ in1(i,j,k));
+ }
+ }
+ }
+ printf("c= a*a Test Passed\n");
+
+ //a*3.14f + b*2.7f
+ gpu_out.device(sycl_device) = gpu_in1 * gpu_in1.constant(3.14f) + gpu_in2 * gpu_in2.constant(2.7f);
+ sycl_device.memcpyDeviceToHost(out.data(),gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
+ for (int i = 0; i < sizeDim1; ++i) {
+ for (int j = 0; j < sizeDim2; ++j) {
+ for (int k = 0; k < sizeDim3; ++k) {
+ VERIFY_IS_APPROX(out(i,j,k),
+ in1(i,j,k) * 3.14f
+ + in2(i,j,k) * 2.7f);
+ }
+ }
+ }
+ printf("a*3.14f + b*2.7f Test Passed\n");
+
+ ///d= (a>0.5? b:c)
+ sycl_device.memcpyHostToDevice(gpu_in3_data, in3.data(),(in3.dimensions().TotalSize())*sizeof(float));
+ gpu_out.device(sycl_device) =(gpu_in1 > gpu_in1.constant(0.5f)).select(gpu_in2, gpu_in3);
+ sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
+ for (int i = 0; i < sizeDim1; ++i) {
+ for (int j = 0; j < sizeDim2; ++j) {
+ for (int k = 0; k < sizeDim3; ++k) {
+ VERIFY_IS_APPROX(out(i, j, k), (in1(i, j, k) > 0.5f)
+ ? in2(i, j, k)
+ : in3(i, j, k));
+ }
+ }
+ }
+ printf("d= (a>0.5? b:c) Test Passed\n");
+ sycl_device.deallocate(gpu_in1_data);
+ sycl_device.deallocate(gpu_in2_data);
+ sycl_device.deallocate(gpu_in3_data);
+ sycl_device.deallocate(gpu_out_data);
+}
+void test_cxx11_tensor_sycl() {
+ cl::sycl::gpu_selector s;
+ Eigen::SyclDevice sycl_device(s);
+ CALL_SUBTEST(test_sycl_cpu(sycl_device));
+}
diff --git a/unsupported/test/cxx11_tensor_symmetry.cpp b/unsupported/test/cxx11_tensor_symmetry.cpp
new file mode 100644
index 000000000..d680e9b3b
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_symmetry.cpp
@@ -0,0 +1,818 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2013 Christian Seiler <christian@iwakd.de>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+#include <Eigen/CXX11/TensorSymmetry>
+
+#include <map>
+#include <set>
+
+using Eigen::Tensor;
+using Eigen::SGroup;
+using Eigen::DynamicSGroup;
+using Eigen::StaticSGroup;
+using Eigen::Symmetry;
+using Eigen::AntiSymmetry;
+using Eigen::Hermiticity;
+using Eigen::AntiHermiticity;
+
+using Eigen::NegationFlag;
+using Eigen::ConjugationFlag;
+using Eigen::GlobalZeroFlag;
+using Eigen::GlobalRealFlag;
+using Eigen::GlobalImagFlag;
+
+// helper function to determine if the compiler intantiated a static
+// or dynamic symmetry group
+template<typename... Sym>
+bool isDynGroup(StaticSGroup<Sym...> const& dummy)
+{
+ (void)dummy;
+ return false;
+}
+
+bool isDynGroup(DynamicSGroup const& dummy)
+{
+ (void)dummy;
+ return true;
+}
+
+// helper class for checking that the symmetry groups are correct
+struct checkIdx {
+ template<typename ArrType>
+ static inline int doCheck_(ArrType e, int flags, int dummy, std::set<uint64_t>& found, std::map<uint64_t, int> const& expected)
+ {
+ // use decimal representation of value
+ uint64_t value = e[0];
+ for (std::size_t i = 1; i < e.size(); i++)
+ value = value * 10 + e[i];
+
+ // we want to make sure that we find each element
+ auto it = expected.find(value);
+ VERIFY((it != expected.end()));
+ VERIFY_IS_EQUAL(it->second, flags);
+
+ // we want to make sure we only have each element once;
+ // set::insert returns true for the second part of the pair
+ // if the element was really inserted and not already there
+ auto p = found.insert(value);
+ VERIFY((p.second));
+
+ return dummy;
+ }
+
+ static inline int run(std::vector<int> e, int flags, int dummy, std::set<uint64_t>& found, std::map<uint64_t, int> const& expected)
+ {
+ return doCheck_(e, flags, dummy, found, expected);
+ }
+
+ template<std::size_t N>
+ static inline int run(std::array<int, N> e, int flags, int dummy, std::set<uint64_t>& found, std::map<uint64_t, int> const& expected)
+ {
+ return doCheck_(e, flags, dummy, found, expected);
+ }
+};
+
+static void test_symgroups_static()
+{
+ std::array<int, 7> identity{{0,1,2,3,4,5,6}};
+
+ // Simple static symmetry group
+ StaticSGroup<
+ AntiSymmetry<0,1>,
+ Hermiticity<0,2>
+ > group;
+
+ std::set<uint64_t> found;
+ std::map<uint64_t, int> expected;
+ expected[ 123456] = 0;
+ expected[1023456] = NegationFlag;
+ expected[2103456] = ConjugationFlag;
+ expected[1203456] = ConjugationFlag | NegationFlag;
+ expected[2013456] = ConjugationFlag | NegationFlag;
+ expected[ 213456] = ConjugationFlag;
+
+ VERIFY_IS_EQUAL(group.size(), 6u);
+ VERIFY_IS_EQUAL(group.globalFlags(), GlobalImagFlag);
+ group.apply<checkIdx, int>(identity, 0, found, expected);
+ VERIFY_IS_EQUAL(found.size(), 6u);
+}
+
+static void test_symgroups_dynamic()
+{
+ std::vector<int> identity;
+ for (int i = 0; i <= 6; i++)
+ identity.push_back(i);
+
+ // Simple dynamic symmetry group
+ DynamicSGroup group;
+ group.add(0,1,NegationFlag);
+ group.add(0,2,ConjugationFlag);
+
+ VERIFY_IS_EQUAL(group.size(), 6u);
+ VERIFY_IS_EQUAL(group.globalFlags(), GlobalImagFlag);
+
+ std::set<uint64_t> found;
+ std::map<uint64_t, int> expected;
+ expected[ 123456] = 0;
+ expected[1023456] = NegationFlag;
+ expected[2103456] = ConjugationFlag;
+ expected[1203456] = ConjugationFlag | NegationFlag;
+ expected[2013456] = ConjugationFlag | NegationFlag;
+ expected[ 213456] = ConjugationFlag;
+
+ VERIFY_IS_EQUAL(group.size(), 6u);
+ VERIFY_IS_EQUAL(group.globalFlags(), GlobalImagFlag);
+ group.apply<checkIdx, int>(identity, 0, found, expected);
+ VERIFY_IS_EQUAL(found.size(), 6u);
+}
+
+static void test_symgroups_selection()
+{
+ std::array<int, 7> identity7{{0,1,2,3,4,5,6}};
+ std::array<int, 10> identity10{{0,1,2,3,4,5,6,7,8,9}};
+
+ {
+ // Do the same test as in test_symgroups_static but
+ // require selection via SGroup
+ SGroup<
+ AntiSymmetry<0,1>,
+ Hermiticity<0,2>
+ > group;
+
+ std::set<uint64_t> found;
+ std::map<uint64_t, int> expected;
+ expected[ 123456] = 0;
+ expected[1023456] = NegationFlag;
+ expected[2103456] = ConjugationFlag;
+ expected[1203456] = ConjugationFlag | NegationFlag;
+ expected[2013456] = ConjugationFlag | NegationFlag;
+ expected[ 213456] = ConjugationFlag;
+
+ VERIFY(!isDynGroup(group));
+ VERIFY_IS_EQUAL(group.size(), 6u);
+ VERIFY_IS_EQUAL(group.globalFlags(), GlobalImagFlag);
+ group.apply<checkIdx, int>(identity7, 0, found, expected);
+ VERIFY_IS_EQUAL(found.size(), 6u);
+ }
+
+ {
+ // simple factorizing group: 5 generators, 2^5 = 32 elements
+ // selection should make this dynamic, although static group
+ // can still be reasonably generated
+ SGroup<
+ Symmetry<0,1>,
+ Symmetry<2,3>,
+ Symmetry<4,5>,
+ Symmetry<6,7>,
+ Symmetry<8,9>
+ > group;
+
+ std::set<uint64_t> found;
+ std::map<uint64_t, int> expected;
+ expected[ 123456789] = 0; expected[ 123456798] = 0; expected[ 123457689] = 0; expected[ 123457698] = 0;
+ expected[ 123546789] = 0; expected[ 123546798] = 0; expected[ 123547689] = 0; expected[ 123547698] = 0;
+ expected[ 132456789] = 0; expected[ 132456798] = 0; expected[ 132457689] = 0; expected[ 132457698] = 0;
+ expected[ 132546789] = 0; expected[ 132546798] = 0; expected[ 132547689] = 0; expected[ 132547698] = 0;
+ expected[1023456789] = 0; expected[1023456798] = 0; expected[1023457689] = 0; expected[1023457698] = 0;
+ expected[1023546789] = 0; expected[1023546798] = 0; expected[1023547689] = 0; expected[1023547698] = 0;
+ expected[1032456789] = 0; expected[1032456798] = 0; expected[1032457689] = 0; expected[1032457698] = 0;
+ expected[1032546789] = 0; expected[1032546798] = 0; expected[1032547689] = 0; expected[1032547698] = 0;
+
+ VERIFY(isDynGroup(group));
+ VERIFY_IS_EQUAL(group.size(), 32u);
+ VERIFY_IS_EQUAL(group.globalFlags(), 0);
+ group.apply<checkIdx, int>(identity10, 0, found, expected);
+ VERIFY_IS_EQUAL(found.size(), 32u);
+
+ // no verify that we could also generate a static group
+ // with these generators
+ found.clear();
+ StaticSGroup<
+ Symmetry<0,1>,
+ Symmetry<2,3>,
+ Symmetry<4,5>,
+ Symmetry<6,7>,
+ Symmetry<8,9>
+ > group_static;
+ VERIFY_IS_EQUAL(group_static.size(), 32u);
+ VERIFY_IS_EQUAL(group_static.globalFlags(), 0);
+ group_static.apply<checkIdx, int>(identity10, 0, found, expected);
+ VERIFY_IS_EQUAL(found.size(), 32u);
+ }
+
+ {
+ // try to create a HUGE group
+ SGroup<
+ Symmetry<0,1>,
+ Symmetry<1,2>,
+ Symmetry<2,3>,
+ Symmetry<3,4>,
+ Symmetry<4,5>,
+ Symmetry<5,6>
+ > group;
+
+ std::set<uint64_t> found;
+ uint64_t pre_expected[5040] = {
+ 123456, 1023456, 213456, 2013456, 1203456, 2103456, 132456, 1032456, 312456, 3012456, 1302456, 3102456,
+ 231456, 2031456, 321456, 3021456, 2301456, 3201456, 1230456, 2130456, 1320456, 3120456, 2310456, 3210456,
+ 124356, 1024356, 214356, 2014356, 1204356, 2104356, 142356, 1042356, 412356, 4012356, 1402356, 4102356,
+ 241356, 2041356, 421356, 4021356, 2401356, 4201356, 1240356, 2140356, 1420356, 4120356, 2410356, 4210356,
+ 134256, 1034256, 314256, 3014256, 1304256, 3104256, 143256, 1043256, 413256, 4013256, 1403256, 4103256,
+ 341256, 3041256, 431256, 4031256, 3401256, 4301256, 1340256, 3140256, 1430256, 4130256, 3410256, 4310256,
+ 234156, 2034156, 324156, 3024156, 2304156, 3204156, 243156, 2043156, 423156, 4023156, 2403156, 4203156,
+ 342156, 3042156, 432156, 4032156, 3402156, 4302156, 2340156, 3240156, 2430156, 4230156, 3420156, 4320156,
+ 1234056, 2134056, 1324056, 3124056, 2314056, 3214056, 1243056, 2143056, 1423056, 4123056, 2413056, 4213056,
+ 1342056, 3142056, 1432056, 4132056, 3412056, 4312056, 2341056, 3241056, 2431056, 4231056, 3421056, 4321056,
+ 123546, 1023546, 213546, 2013546, 1203546, 2103546, 132546, 1032546, 312546, 3012546, 1302546, 3102546,
+ 231546, 2031546, 321546, 3021546, 2301546, 3201546, 1230546, 2130546, 1320546, 3120546, 2310546, 3210546,
+ 125346, 1025346, 215346, 2015346, 1205346, 2105346, 152346, 1052346, 512346, 5012346, 1502346, 5102346,
+ 251346, 2051346, 521346, 5021346, 2501346, 5201346, 1250346, 2150346, 1520346, 5120346, 2510346, 5210346,
+ 135246, 1035246, 315246, 3015246, 1305246, 3105246, 153246, 1053246, 513246, 5013246, 1503246, 5103246,
+ 351246, 3051246, 531246, 5031246, 3501246, 5301246, 1350246, 3150246, 1530246, 5130246, 3510246, 5310246,
+ 235146, 2035146, 325146, 3025146, 2305146, 3205146, 253146, 2053146, 523146, 5023146, 2503146, 5203146,
+ 352146, 3052146, 532146, 5032146, 3502146, 5302146, 2350146, 3250146, 2530146, 5230146, 3520146, 5320146,
+ 1235046, 2135046, 1325046, 3125046, 2315046, 3215046, 1253046, 2153046, 1523046, 5123046, 2513046, 5213046,
+ 1352046, 3152046, 1532046, 5132046, 3512046, 5312046, 2351046, 3251046, 2531046, 5231046, 3521046, 5321046,
+ 124536, 1024536, 214536, 2014536, 1204536, 2104536, 142536, 1042536, 412536, 4012536, 1402536, 4102536,
+ 241536, 2041536, 421536, 4021536, 2401536, 4201536, 1240536, 2140536, 1420536, 4120536, 2410536, 4210536,
+ 125436, 1025436, 215436, 2015436, 1205436, 2105436, 152436, 1052436, 512436, 5012436, 1502436, 5102436,
+ 251436, 2051436, 521436, 5021436, 2501436, 5201436, 1250436, 2150436, 1520436, 5120436, 2510436, 5210436,
+ 145236, 1045236, 415236, 4015236, 1405236, 4105236, 154236, 1054236, 514236, 5014236, 1504236, 5104236,
+ 451236, 4051236, 541236, 5041236, 4501236, 5401236, 1450236, 4150236, 1540236, 5140236, 4510236, 5410236,
+ 245136, 2045136, 425136, 4025136, 2405136, 4205136, 254136, 2054136, 524136, 5024136, 2504136, 5204136,
+ 452136, 4052136, 542136, 5042136, 4502136, 5402136, 2450136, 4250136, 2540136, 5240136, 4520136, 5420136,
+ 1245036, 2145036, 1425036, 4125036, 2415036, 4215036, 1254036, 2154036, 1524036, 5124036, 2514036, 5214036,
+ 1452036, 4152036, 1542036, 5142036, 4512036, 5412036, 2451036, 4251036, 2541036, 5241036, 4521036, 5421036,
+ 134526, 1034526, 314526, 3014526, 1304526, 3104526, 143526, 1043526, 413526, 4013526, 1403526, 4103526,
+ 341526, 3041526, 431526, 4031526, 3401526, 4301526, 1340526, 3140526, 1430526, 4130526, 3410526, 4310526,
+ 135426, 1035426, 315426, 3015426, 1305426, 3105426, 153426, 1053426, 513426, 5013426, 1503426, 5103426,
+ 351426, 3051426, 531426, 5031426, 3501426, 5301426, 1350426, 3150426, 1530426, 5130426, 3510426, 5310426,
+ 145326, 1045326, 415326, 4015326, 1405326, 4105326, 154326, 1054326, 514326, 5014326, 1504326, 5104326,
+ 451326, 4051326, 541326, 5041326, 4501326, 5401326, 1450326, 4150326, 1540326, 5140326, 4510326, 5410326,
+ 345126, 3045126, 435126, 4035126, 3405126, 4305126, 354126, 3054126, 534126, 5034126, 3504126, 5304126,
+ 453126, 4053126, 543126, 5043126, 4503126, 5403126, 3450126, 4350126, 3540126, 5340126, 4530126, 5430126,
+ 1345026, 3145026, 1435026, 4135026, 3415026, 4315026, 1354026, 3154026, 1534026, 5134026, 3514026, 5314026,
+ 1453026, 4153026, 1543026, 5143026, 4513026, 5413026, 3451026, 4351026, 3541026, 5341026, 4531026, 5431026,
+ 234516, 2034516, 324516, 3024516, 2304516, 3204516, 243516, 2043516, 423516, 4023516, 2403516, 4203516,
+ 342516, 3042516, 432516, 4032516, 3402516, 4302516, 2340516, 3240516, 2430516, 4230516, 3420516, 4320516,
+ 235416, 2035416, 325416, 3025416, 2305416, 3205416, 253416, 2053416, 523416, 5023416, 2503416, 5203416,
+ 352416, 3052416, 532416, 5032416, 3502416, 5302416, 2350416, 3250416, 2530416, 5230416, 3520416, 5320416,
+ 245316, 2045316, 425316, 4025316, 2405316, 4205316, 254316, 2054316, 524316, 5024316, 2504316, 5204316,
+ 452316, 4052316, 542316, 5042316, 4502316, 5402316, 2450316, 4250316, 2540316, 5240316, 4520316, 5420316,
+ 345216, 3045216, 435216, 4035216, 3405216, 4305216, 354216, 3054216, 534216, 5034216, 3504216, 5304216,
+ 453216, 4053216, 543216, 5043216, 4503216, 5403216, 3450216, 4350216, 3540216, 5340216, 4530216, 5430216,
+ 2345016, 3245016, 2435016, 4235016, 3425016, 4325016, 2354016, 3254016, 2534016, 5234016, 3524016, 5324016,
+ 2453016, 4253016, 2543016, 5243016, 4523016, 5423016, 3452016, 4352016, 3542016, 5342016, 4532016, 5432016,
+ 1234506, 2134506, 1324506, 3124506, 2314506, 3214506, 1243506, 2143506, 1423506, 4123506, 2413506, 4213506,
+ 1342506, 3142506, 1432506, 4132506, 3412506, 4312506, 2341506, 3241506, 2431506, 4231506, 3421506, 4321506,
+ 1235406, 2135406, 1325406, 3125406, 2315406, 3215406, 1253406, 2153406, 1523406, 5123406, 2513406, 5213406,
+ 1352406, 3152406, 1532406, 5132406, 3512406, 5312406, 2351406, 3251406, 2531406, 5231406, 3521406, 5321406,
+ 1245306, 2145306, 1425306, 4125306, 2415306, 4215306, 1254306, 2154306, 1524306, 5124306, 2514306, 5214306,
+ 1452306, 4152306, 1542306, 5142306, 4512306, 5412306, 2451306, 4251306, 2541306, 5241306, 4521306, 5421306,
+ 1345206, 3145206, 1435206, 4135206, 3415206, 4315206, 1354206, 3154206, 1534206, 5134206, 3514206, 5314206,
+ 1453206, 4153206, 1543206, 5143206, 4513206, 5413206, 3451206, 4351206, 3541206, 5341206, 4531206, 5431206,
+ 2345106, 3245106, 2435106, 4235106, 3425106, 4325106, 2354106, 3254106, 2534106, 5234106, 3524106, 5324106,
+ 2453106, 4253106, 2543106, 5243106, 4523106, 5423106, 3452106, 4352106, 3542106, 5342106, 4532106, 5432106,
+ 123465, 1023465, 213465, 2013465, 1203465, 2103465, 132465, 1032465, 312465, 3012465, 1302465, 3102465,
+ 231465, 2031465, 321465, 3021465, 2301465, 3201465, 1230465, 2130465, 1320465, 3120465, 2310465, 3210465,
+ 124365, 1024365, 214365, 2014365, 1204365, 2104365, 142365, 1042365, 412365, 4012365, 1402365, 4102365,
+ 241365, 2041365, 421365, 4021365, 2401365, 4201365, 1240365, 2140365, 1420365, 4120365, 2410365, 4210365,
+ 134265, 1034265, 314265, 3014265, 1304265, 3104265, 143265, 1043265, 413265, 4013265, 1403265, 4103265,
+ 341265, 3041265, 431265, 4031265, 3401265, 4301265, 1340265, 3140265, 1430265, 4130265, 3410265, 4310265,
+ 234165, 2034165, 324165, 3024165, 2304165, 3204165, 243165, 2043165, 423165, 4023165, 2403165, 4203165,
+ 342165, 3042165, 432165, 4032165, 3402165, 4302165, 2340165, 3240165, 2430165, 4230165, 3420165, 4320165,
+ 1234065, 2134065, 1324065, 3124065, 2314065, 3214065, 1243065, 2143065, 1423065, 4123065, 2413065, 4213065,
+ 1342065, 3142065, 1432065, 4132065, 3412065, 4312065, 2341065, 3241065, 2431065, 4231065, 3421065, 4321065,
+ 123645, 1023645, 213645, 2013645, 1203645, 2103645, 132645, 1032645, 312645, 3012645, 1302645, 3102645,
+ 231645, 2031645, 321645, 3021645, 2301645, 3201645, 1230645, 2130645, 1320645, 3120645, 2310645, 3210645,
+ 126345, 1026345, 216345, 2016345, 1206345, 2106345, 162345, 1062345, 612345, 6012345, 1602345, 6102345,
+ 261345, 2061345, 621345, 6021345, 2601345, 6201345, 1260345, 2160345, 1620345, 6120345, 2610345, 6210345,
+ 136245, 1036245, 316245, 3016245, 1306245, 3106245, 163245, 1063245, 613245, 6013245, 1603245, 6103245,
+ 361245, 3061245, 631245, 6031245, 3601245, 6301245, 1360245, 3160245, 1630245, 6130245, 3610245, 6310245,
+ 236145, 2036145, 326145, 3026145, 2306145, 3206145, 263145, 2063145, 623145, 6023145, 2603145, 6203145,
+ 362145, 3062145, 632145, 6032145, 3602145, 6302145, 2360145, 3260145, 2630145, 6230145, 3620145, 6320145,
+ 1236045, 2136045, 1326045, 3126045, 2316045, 3216045, 1263045, 2163045, 1623045, 6123045, 2613045, 6213045,
+ 1362045, 3162045, 1632045, 6132045, 3612045, 6312045, 2361045, 3261045, 2631045, 6231045, 3621045, 6321045,
+ 124635, 1024635, 214635, 2014635, 1204635, 2104635, 142635, 1042635, 412635, 4012635, 1402635, 4102635,
+ 241635, 2041635, 421635, 4021635, 2401635, 4201635, 1240635, 2140635, 1420635, 4120635, 2410635, 4210635,
+ 126435, 1026435, 216435, 2016435, 1206435, 2106435, 162435, 1062435, 612435, 6012435, 1602435, 6102435,
+ 261435, 2061435, 621435, 6021435, 2601435, 6201435, 1260435, 2160435, 1620435, 6120435, 2610435, 6210435,
+ 146235, 1046235, 416235, 4016235, 1406235, 4106235, 164235, 1064235, 614235, 6014235, 1604235, 6104235,
+ 461235, 4061235, 641235, 6041235, 4601235, 6401235, 1460235, 4160235, 1640235, 6140235, 4610235, 6410235,
+ 246135, 2046135, 426135, 4026135, 2406135, 4206135, 264135, 2064135, 624135, 6024135, 2604135, 6204135,
+ 462135, 4062135, 642135, 6042135, 4602135, 6402135, 2460135, 4260135, 2640135, 6240135, 4620135, 6420135,
+ 1246035, 2146035, 1426035, 4126035, 2416035, 4216035, 1264035, 2164035, 1624035, 6124035, 2614035, 6214035,
+ 1462035, 4162035, 1642035, 6142035, 4612035, 6412035, 2461035, 4261035, 2641035, 6241035, 4621035, 6421035,
+ 134625, 1034625, 314625, 3014625, 1304625, 3104625, 143625, 1043625, 413625, 4013625, 1403625, 4103625,
+ 341625, 3041625, 431625, 4031625, 3401625, 4301625, 1340625, 3140625, 1430625, 4130625, 3410625, 4310625,
+ 136425, 1036425, 316425, 3016425, 1306425, 3106425, 163425, 1063425, 613425, 6013425, 1603425, 6103425,
+ 361425, 3061425, 631425, 6031425, 3601425, 6301425, 1360425, 3160425, 1630425, 6130425, 3610425, 6310425,
+ 146325, 1046325, 416325, 4016325, 1406325, 4106325, 164325, 1064325, 614325, 6014325, 1604325, 6104325,
+ 461325, 4061325, 641325, 6041325, 4601325, 6401325, 1460325, 4160325, 1640325, 6140325, 4610325, 6410325,
+ 346125, 3046125, 436125, 4036125, 3406125, 4306125, 364125, 3064125, 634125, 6034125, 3604125, 6304125,
+ 463125, 4063125, 643125, 6043125, 4603125, 6403125, 3460125, 4360125, 3640125, 6340125, 4630125, 6430125,
+ 1346025, 3146025, 1436025, 4136025, 3416025, 4316025, 1364025, 3164025, 1634025, 6134025, 3614025, 6314025,
+ 1463025, 4163025, 1643025, 6143025, 4613025, 6413025, 3461025, 4361025, 3641025, 6341025, 4631025, 6431025,
+ 234615, 2034615, 324615, 3024615, 2304615, 3204615, 243615, 2043615, 423615, 4023615, 2403615, 4203615,
+ 342615, 3042615, 432615, 4032615, 3402615, 4302615, 2340615, 3240615, 2430615, 4230615, 3420615, 4320615,
+ 236415, 2036415, 326415, 3026415, 2306415, 3206415, 263415, 2063415, 623415, 6023415, 2603415, 6203415,
+ 362415, 3062415, 632415, 6032415, 3602415, 6302415, 2360415, 3260415, 2630415, 6230415, 3620415, 6320415,
+ 246315, 2046315, 426315, 4026315, 2406315, 4206315, 264315, 2064315, 624315, 6024315, 2604315, 6204315,
+ 462315, 4062315, 642315, 6042315, 4602315, 6402315, 2460315, 4260315, 2640315, 6240315, 4620315, 6420315,
+ 346215, 3046215, 436215, 4036215, 3406215, 4306215, 364215, 3064215, 634215, 6034215, 3604215, 6304215,
+ 463215, 4063215, 643215, 6043215, 4603215, 6403215, 3460215, 4360215, 3640215, 6340215, 4630215, 6430215,
+ 2346015, 3246015, 2436015, 4236015, 3426015, 4326015, 2364015, 3264015, 2634015, 6234015, 3624015, 6324015,
+ 2463015, 4263015, 2643015, 6243015, 4623015, 6423015, 3462015, 4362015, 3642015, 6342015, 4632015, 6432015,
+ 1234605, 2134605, 1324605, 3124605, 2314605, 3214605, 1243605, 2143605, 1423605, 4123605, 2413605, 4213605,
+ 1342605, 3142605, 1432605, 4132605, 3412605, 4312605, 2341605, 3241605, 2431605, 4231605, 3421605, 4321605,
+ 1236405, 2136405, 1326405, 3126405, 2316405, 3216405, 1263405, 2163405, 1623405, 6123405, 2613405, 6213405,
+ 1362405, 3162405, 1632405, 6132405, 3612405, 6312405, 2361405, 3261405, 2631405, 6231405, 3621405, 6321405,
+ 1246305, 2146305, 1426305, 4126305, 2416305, 4216305, 1264305, 2164305, 1624305, 6124305, 2614305, 6214305,
+ 1462305, 4162305, 1642305, 6142305, 4612305, 6412305, 2461305, 4261305, 2641305, 6241305, 4621305, 6421305,
+ 1346205, 3146205, 1436205, 4136205, 3416205, 4316205, 1364205, 3164205, 1634205, 6134205, 3614205, 6314205,
+ 1463205, 4163205, 1643205, 6143205, 4613205, 6413205, 3461205, 4361205, 3641205, 6341205, 4631205, 6431205,
+ 2346105, 3246105, 2436105, 4236105, 3426105, 4326105, 2364105, 3264105, 2634105, 6234105, 3624105, 6324105,
+ 2463105, 4263105, 2643105, 6243105, 4623105, 6423105, 3462105, 4362105, 3642105, 6342105, 4632105, 6432105,
+ 123564, 1023564, 213564, 2013564, 1203564, 2103564, 132564, 1032564, 312564, 3012564, 1302564, 3102564,
+ 231564, 2031564, 321564, 3021564, 2301564, 3201564, 1230564, 2130564, 1320564, 3120564, 2310564, 3210564,
+ 125364, 1025364, 215364, 2015364, 1205364, 2105364, 152364, 1052364, 512364, 5012364, 1502364, 5102364,
+ 251364, 2051364, 521364, 5021364, 2501364, 5201364, 1250364, 2150364, 1520364, 5120364, 2510364, 5210364,
+ 135264, 1035264, 315264, 3015264, 1305264, 3105264, 153264, 1053264, 513264, 5013264, 1503264, 5103264,
+ 351264, 3051264, 531264, 5031264, 3501264, 5301264, 1350264, 3150264, 1530264, 5130264, 3510264, 5310264,
+ 235164, 2035164, 325164, 3025164, 2305164, 3205164, 253164, 2053164, 523164, 5023164, 2503164, 5203164,
+ 352164, 3052164, 532164, 5032164, 3502164, 5302164, 2350164, 3250164, 2530164, 5230164, 3520164, 5320164,
+ 1235064, 2135064, 1325064, 3125064, 2315064, 3215064, 1253064, 2153064, 1523064, 5123064, 2513064, 5213064,
+ 1352064, 3152064, 1532064, 5132064, 3512064, 5312064, 2351064, 3251064, 2531064, 5231064, 3521064, 5321064,
+ 123654, 1023654, 213654, 2013654, 1203654, 2103654, 132654, 1032654, 312654, 3012654, 1302654, 3102654,
+ 231654, 2031654, 321654, 3021654, 2301654, 3201654, 1230654, 2130654, 1320654, 3120654, 2310654, 3210654,
+ 126354, 1026354, 216354, 2016354, 1206354, 2106354, 162354, 1062354, 612354, 6012354, 1602354, 6102354,
+ 261354, 2061354, 621354, 6021354, 2601354, 6201354, 1260354, 2160354, 1620354, 6120354, 2610354, 6210354,
+ 136254, 1036254, 316254, 3016254, 1306254, 3106254, 163254, 1063254, 613254, 6013254, 1603254, 6103254,
+ 361254, 3061254, 631254, 6031254, 3601254, 6301254, 1360254, 3160254, 1630254, 6130254, 3610254, 6310254,
+ 236154, 2036154, 326154, 3026154, 2306154, 3206154, 263154, 2063154, 623154, 6023154, 2603154, 6203154,
+ 362154, 3062154, 632154, 6032154, 3602154, 6302154, 2360154, 3260154, 2630154, 6230154, 3620154, 6320154,
+ 1236054, 2136054, 1326054, 3126054, 2316054, 3216054, 1263054, 2163054, 1623054, 6123054, 2613054, 6213054,
+ 1362054, 3162054, 1632054, 6132054, 3612054, 6312054, 2361054, 3261054, 2631054, 6231054, 3621054, 6321054,
+ 125634, 1025634, 215634, 2015634, 1205634, 2105634, 152634, 1052634, 512634, 5012634, 1502634, 5102634,
+ 251634, 2051634, 521634, 5021634, 2501634, 5201634, 1250634, 2150634, 1520634, 5120634, 2510634, 5210634,
+ 126534, 1026534, 216534, 2016534, 1206534, 2106534, 162534, 1062534, 612534, 6012534, 1602534, 6102534,
+ 261534, 2061534, 621534, 6021534, 2601534, 6201534, 1260534, 2160534, 1620534, 6120534, 2610534, 6210534,
+ 156234, 1056234, 516234, 5016234, 1506234, 5106234, 165234, 1065234, 615234, 6015234, 1605234, 6105234,
+ 561234, 5061234, 651234, 6051234, 5601234, 6501234, 1560234, 5160234, 1650234, 6150234, 5610234, 6510234,
+ 256134, 2056134, 526134, 5026134, 2506134, 5206134, 265134, 2065134, 625134, 6025134, 2605134, 6205134,
+ 562134, 5062134, 652134, 6052134, 5602134, 6502134, 2560134, 5260134, 2650134, 6250134, 5620134, 6520134,
+ 1256034, 2156034, 1526034, 5126034, 2516034, 5216034, 1265034, 2165034, 1625034, 6125034, 2615034, 6215034,
+ 1562034, 5162034, 1652034, 6152034, 5612034, 6512034, 2561034, 5261034, 2651034, 6251034, 5621034, 6521034,
+ 135624, 1035624, 315624, 3015624, 1305624, 3105624, 153624, 1053624, 513624, 5013624, 1503624, 5103624,
+ 351624, 3051624, 531624, 5031624, 3501624, 5301624, 1350624, 3150624, 1530624, 5130624, 3510624, 5310624,
+ 136524, 1036524, 316524, 3016524, 1306524, 3106524, 163524, 1063524, 613524, 6013524, 1603524, 6103524,
+ 361524, 3061524, 631524, 6031524, 3601524, 6301524, 1360524, 3160524, 1630524, 6130524, 3610524, 6310524,
+ 156324, 1056324, 516324, 5016324, 1506324, 5106324, 165324, 1065324, 615324, 6015324, 1605324, 6105324,
+ 561324, 5061324, 651324, 6051324, 5601324, 6501324, 1560324, 5160324, 1650324, 6150324, 5610324, 6510324,
+ 356124, 3056124, 536124, 5036124, 3506124, 5306124, 365124, 3065124, 635124, 6035124, 3605124, 6305124,
+ 563124, 5063124, 653124, 6053124, 5603124, 6503124, 3560124, 5360124, 3650124, 6350124, 5630124, 6530124,
+ 1356024, 3156024, 1536024, 5136024, 3516024, 5316024, 1365024, 3165024, 1635024, 6135024, 3615024, 6315024,
+ 1563024, 5163024, 1653024, 6153024, 5613024, 6513024, 3561024, 5361024, 3651024, 6351024, 5631024, 6531024,
+ 235614, 2035614, 325614, 3025614, 2305614, 3205614, 253614, 2053614, 523614, 5023614, 2503614, 5203614,
+ 352614, 3052614, 532614, 5032614, 3502614, 5302614, 2350614, 3250614, 2530614, 5230614, 3520614, 5320614,
+ 236514, 2036514, 326514, 3026514, 2306514, 3206514, 263514, 2063514, 623514, 6023514, 2603514, 6203514,
+ 362514, 3062514, 632514, 6032514, 3602514, 6302514, 2360514, 3260514, 2630514, 6230514, 3620514, 6320514,
+ 256314, 2056314, 526314, 5026314, 2506314, 5206314, 265314, 2065314, 625314, 6025314, 2605314, 6205314,
+ 562314, 5062314, 652314, 6052314, 5602314, 6502314, 2560314, 5260314, 2650314, 6250314, 5620314, 6520314,
+ 356214, 3056214, 536214, 5036214, 3506214, 5306214, 365214, 3065214, 635214, 6035214, 3605214, 6305214,
+ 563214, 5063214, 653214, 6053214, 5603214, 6503214, 3560214, 5360214, 3650214, 6350214, 5630214, 6530214,
+ 2356014, 3256014, 2536014, 5236014, 3526014, 5326014, 2365014, 3265014, 2635014, 6235014, 3625014, 6325014,
+ 2563014, 5263014, 2653014, 6253014, 5623014, 6523014, 3562014, 5362014, 3652014, 6352014, 5632014, 6532014,
+ 1235604, 2135604, 1325604, 3125604, 2315604, 3215604, 1253604, 2153604, 1523604, 5123604, 2513604, 5213604,
+ 1352604, 3152604, 1532604, 5132604, 3512604, 5312604, 2351604, 3251604, 2531604, 5231604, 3521604, 5321604,
+ 1236504, 2136504, 1326504, 3126504, 2316504, 3216504, 1263504, 2163504, 1623504, 6123504, 2613504, 6213504,
+ 1362504, 3162504, 1632504, 6132504, 3612504, 6312504, 2361504, 3261504, 2631504, 6231504, 3621504, 6321504,
+ 1256304, 2156304, 1526304, 5126304, 2516304, 5216304, 1265304, 2165304, 1625304, 6125304, 2615304, 6215304,
+ 1562304, 5162304, 1652304, 6152304, 5612304, 6512304, 2561304, 5261304, 2651304, 6251304, 5621304, 6521304,
+ 1356204, 3156204, 1536204, 5136204, 3516204, 5316204, 1365204, 3165204, 1635204, 6135204, 3615204, 6315204,
+ 1563204, 5163204, 1653204, 6153204, 5613204, 6513204, 3561204, 5361204, 3651204, 6351204, 5631204, 6531204,
+ 2356104, 3256104, 2536104, 5236104, 3526104, 5326104, 2365104, 3265104, 2635104, 6235104, 3625104, 6325104,
+ 2563104, 5263104, 2653104, 6253104, 5623104, 6523104, 3562104, 5362104, 3652104, 6352104, 5632104, 6532104,
+ 124563, 1024563, 214563, 2014563, 1204563, 2104563, 142563, 1042563, 412563, 4012563, 1402563, 4102563,
+ 241563, 2041563, 421563, 4021563, 2401563, 4201563, 1240563, 2140563, 1420563, 4120563, 2410563, 4210563,
+ 125463, 1025463, 215463, 2015463, 1205463, 2105463, 152463, 1052463, 512463, 5012463, 1502463, 5102463,
+ 251463, 2051463, 521463, 5021463, 2501463, 5201463, 1250463, 2150463, 1520463, 5120463, 2510463, 5210463,
+ 145263, 1045263, 415263, 4015263, 1405263, 4105263, 154263, 1054263, 514263, 5014263, 1504263, 5104263,
+ 451263, 4051263, 541263, 5041263, 4501263, 5401263, 1450263, 4150263, 1540263, 5140263, 4510263, 5410263,
+ 245163, 2045163, 425163, 4025163, 2405163, 4205163, 254163, 2054163, 524163, 5024163, 2504163, 5204163,
+ 452163, 4052163, 542163, 5042163, 4502163, 5402163, 2450163, 4250163, 2540163, 5240163, 4520163, 5420163,
+ 1245063, 2145063, 1425063, 4125063, 2415063, 4215063, 1254063, 2154063, 1524063, 5124063, 2514063, 5214063,
+ 1452063, 4152063, 1542063, 5142063, 4512063, 5412063, 2451063, 4251063, 2541063, 5241063, 4521063, 5421063,
+ 124653, 1024653, 214653, 2014653, 1204653, 2104653, 142653, 1042653, 412653, 4012653, 1402653, 4102653,
+ 241653, 2041653, 421653, 4021653, 2401653, 4201653, 1240653, 2140653, 1420653, 4120653, 2410653, 4210653,
+ 126453, 1026453, 216453, 2016453, 1206453, 2106453, 162453, 1062453, 612453, 6012453, 1602453, 6102453,
+ 261453, 2061453, 621453, 6021453, 2601453, 6201453, 1260453, 2160453, 1620453, 6120453, 2610453, 6210453,
+ 146253, 1046253, 416253, 4016253, 1406253, 4106253, 164253, 1064253, 614253, 6014253, 1604253, 6104253,
+ 461253, 4061253, 641253, 6041253, 4601253, 6401253, 1460253, 4160253, 1640253, 6140253, 4610253, 6410253,
+ 246153, 2046153, 426153, 4026153, 2406153, 4206153, 264153, 2064153, 624153, 6024153, 2604153, 6204153,
+ 462153, 4062153, 642153, 6042153, 4602153, 6402153, 2460153, 4260153, 2640153, 6240153, 4620153, 6420153,
+ 1246053, 2146053, 1426053, 4126053, 2416053, 4216053, 1264053, 2164053, 1624053, 6124053, 2614053, 6214053,
+ 1462053, 4162053, 1642053, 6142053, 4612053, 6412053, 2461053, 4261053, 2641053, 6241053, 4621053, 6421053,
+ 125643, 1025643, 215643, 2015643, 1205643, 2105643, 152643, 1052643, 512643, 5012643, 1502643, 5102643,
+ 251643, 2051643, 521643, 5021643, 2501643, 5201643, 1250643, 2150643, 1520643, 5120643, 2510643, 5210643,
+ 126543, 1026543, 216543, 2016543, 1206543, 2106543, 162543, 1062543, 612543, 6012543, 1602543, 6102543,
+ 261543, 2061543, 621543, 6021543, 2601543, 6201543, 1260543, 2160543, 1620543, 6120543, 2610543, 6210543,
+ 156243, 1056243, 516243, 5016243, 1506243, 5106243, 165243, 1065243, 615243, 6015243, 1605243, 6105243,
+ 561243, 5061243, 651243, 6051243, 5601243, 6501243, 1560243, 5160243, 1650243, 6150243, 5610243, 6510243,
+ 256143, 2056143, 526143, 5026143, 2506143, 5206143, 265143, 2065143, 625143, 6025143, 2605143, 6205143,
+ 562143, 5062143, 652143, 6052143, 5602143, 6502143, 2560143, 5260143, 2650143, 6250143, 5620143, 6520143,
+ 1256043, 2156043, 1526043, 5126043, 2516043, 5216043, 1265043, 2165043, 1625043, 6125043, 2615043, 6215043,
+ 1562043, 5162043, 1652043, 6152043, 5612043, 6512043, 2561043, 5261043, 2651043, 6251043, 5621043, 6521043,
+ 145623, 1045623, 415623, 4015623, 1405623, 4105623, 154623, 1054623, 514623, 5014623, 1504623, 5104623,
+ 451623, 4051623, 541623, 5041623, 4501623, 5401623, 1450623, 4150623, 1540623, 5140623, 4510623, 5410623,
+ 146523, 1046523, 416523, 4016523, 1406523, 4106523, 164523, 1064523, 614523, 6014523, 1604523, 6104523,
+ 461523, 4061523, 641523, 6041523, 4601523, 6401523, 1460523, 4160523, 1640523, 6140523, 4610523, 6410523,
+ 156423, 1056423, 516423, 5016423, 1506423, 5106423, 165423, 1065423, 615423, 6015423, 1605423, 6105423,
+ 561423, 5061423, 651423, 6051423, 5601423, 6501423, 1560423, 5160423, 1650423, 6150423, 5610423, 6510423,
+ 456123, 4056123, 546123, 5046123, 4506123, 5406123, 465123, 4065123, 645123, 6045123, 4605123, 6405123,
+ 564123, 5064123, 654123, 6054123, 5604123, 6504123, 4560123, 5460123, 4650123, 6450123, 5640123, 6540123,
+ 1456023, 4156023, 1546023, 5146023, 4516023, 5416023, 1465023, 4165023, 1645023, 6145023, 4615023, 6415023,
+ 1564023, 5164023, 1654023, 6154023, 5614023, 6514023, 4561023, 5461023, 4651023, 6451023, 5641023, 6541023,
+ 245613, 2045613, 425613, 4025613, 2405613, 4205613, 254613, 2054613, 524613, 5024613, 2504613, 5204613,
+ 452613, 4052613, 542613, 5042613, 4502613, 5402613, 2450613, 4250613, 2540613, 5240613, 4520613, 5420613,
+ 246513, 2046513, 426513, 4026513, 2406513, 4206513, 264513, 2064513, 624513, 6024513, 2604513, 6204513,
+ 462513, 4062513, 642513, 6042513, 4602513, 6402513, 2460513, 4260513, 2640513, 6240513, 4620513, 6420513,
+ 256413, 2056413, 526413, 5026413, 2506413, 5206413, 265413, 2065413, 625413, 6025413, 2605413, 6205413,
+ 562413, 5062413, 652413, 6052413, 5602413, 6502413, 2560413, 5260413, 2650413, 6250413, 5620413, 6520413,
+ 456213, 4056213, 546213, 5046213, 4506213, 5406213, 465213, 4065213, 645213, 6045213, 4605213, 6405213,
+ 564213, 5064213, 654213, 6054213, 5604213, 6504213, 4560213, 5460213, 4650213, 6450213, 5640213, 6540213,
+ 2456013, 4256013, 2546013, 5246013, 4526013, 5426013, 2465013, 4265013, 2645013, 6245013, 4625013, 6425013,
+ 2564013, 5264013, 2654013, 6254013, 5624013, 6524013, 4562013, 5462013, 4652013, 6452013, 5642013, 6542013,
+ 1245603, 2145603, 1425603, 4125603, 2415603, 4215603, 1254603, 2154603, 1524603, 5124603, 2514603, 5214603,
+ 1452603, 4152603, 1542603, 5142603, 4512603, 5412603, 2451603, 4251603, 2541603, 5241603, 4521603, 5421603,
+ 1246503, 2146503, 1426503, 4126503, 2416503, 4216503, 1264503, 2164503, 1624503, 6124503, 2614503, 6214503,
+ 1462503, 4162503, 1642503, 6142503, 4612503, 6412503, 2461503, 4261503, 2641503, 6241503, 4621503, 6421503,
+ 1256403, 2156403, 1526403, 5126403, 2516403, 5216403, 1265403, 2165403, 1625403, 6125403, 2615403, 6215403,
+ 1562403, 5162403, 1652403, 6152403, 5612403, 6512403, 2561403, 5261403, 2651403, 6251403, 5621403, 6521403,
+ 1456203, 4156203, 1546203, 5146203, 4516203, 5416203, 1465203, 4165203, 1645203, 6145203, 4615203, 6415203,
+ 1564203, 5164203, 1654203, 6154203, 5614203, 6514203, 4561203, 5461203, 4651203, 6451203, 5641203, 6541203,
+ 2456103, 4256103, 2546103, 5246103, 4526103, 5426103, 2465103, 4265103, 2645103, 6245103, 4625103, 6425103,
+ 2564103, 5264103, 2654103, 6254103, 5624103, 6524103, 4562103, 5462103, 4652103, 6452103, 5642103, 6542103,
+ 134562, 1034562, 314562, 3014562, 1304562, 3104562, 143562, 1043562, 413562, 4013562, 1403562, 4103562,
+ 341562, 3041562, 431562, 4031562, 3401562, 4301562, 1340562, 3140562, 1430562, 4130562, 3410562, 4310562,
+ 135462, 1035462, 315462, 3015462, 1305462, 3105462, 153462, 1053462, 513462, 5013462, 1503462, 5103462,
+ 351462, 3051462, 531462, 5031462, 3501462, 5301462, 1350462, 3150462, 1530462, 5130462, 3510462, 5310462,
+ 145362, 1045362, 415362, 4015362, 1405362, 4105362, 154362, 1054362, 514362, 5014362, 1504362, 5104362,
+ 451362, 4051362, 541362, 5041362, 4501362, 5401362, 1450362, 4150362, 1540362, 5140362, 4510362, 5410362,
+ 345162, 3045162, 435162, 4035162, 3405162, 4305162, 354162, 3054162, 534162, 5034162, 3504162, 5304162,
+ 453162, 4053162, 543162, 5043162, 4503162, 5403162, 3450162, 4350162, 3540162, 5340162, 4530162, 5430162,
+ 1345062, 3145062, 1435062, 4135062, 3415062, 4315062, 1354062, 3154062, 1534062, 5134062, 3514062, 5314062,
+ 1453062, 4153062, 1543062, 5143062, 4513062, 5413062, 3451062, 4351062, 3541062, 5341062, 4531062, 5431062,
+ 134652, 1034652, 314652, 3014652, 1304652, 3104652, 143652, 1043652, 413652, 4013652, 1403652, 4103652,
+ 341652, 3041652, 431652, 4031652, 3401652, 4301652, 1340652, 3140652, 1430652, 4130652, 3410652, 4310652,
+ 136452, 1036452, 316452, 3016452, 1306452, 3106452, 163452, 1063452, 613452, 6013452, 1603452, 6103452,
+ 361452, 3061452, 631452, 6031452, 3601452, 6301452, 1360452, 3160452, 1630452, 6130452, 3610452, 6310452,
+ 146352, 1046352, 416352, 4016352, 1406352, 4106352, 164352, 1064352, 614352, 6014352, 1604352, 6104352,
+ 461352, 4061352, 641352, 6041352, 4601352, 6401352, 1460352, 4160352, 1640352, 6140352, 4610352, 6410352,
+ 346152, 3046152, 436152, 4036152, 3406152, 4306152, 364152, 3064152, 634152, 6034152, 3604152, 6304152,
+ 463152, 4063152, 643152, 6043152, 4603152, 6403152, 3460152, 4360152, 3640152, 6340152, 4630152, 6430152,
+ 1346052, 3146052, 1436052, 4136052, 3416052, 4316052, 1364052, 3164052, 1634052, 6134052, 3614052, 6314052,
+ 1463052, 4163052, 1643052, 6143052, 4613052, 6413052, 3461052, 4361052, 3641052, 6341052, 4631052, 6431052,
+ 135642, 1035642, 315642, 3015642, 1305642, 3105642, 153642, 1053642, 513642, 5013642, 1503642, 5103642,
+ 351642, 3051642, 531642, 5031642, 3501642, 5301642, 1350642, 3150642, 1530642, 5130642, 3510642, 5310642,
+ 136542, 1036542, 316542, 3016542, 1306542, 3106542, 163542, 1063542, 613542, 6013542, 1603542, 6103542,
+ 361542, 3061542, 631542, 6031542, 3601542, 6301542, 1360542, 3160542, 1630542, 6130542, 3610542, 6310542,
+ 156342, 1056342, 516342, 5016342, 1506342, 5106342, 165342, 1065342, 615342, 6015342, 1605342, 6105342,
+ 561342, 5061342, 651342, 6051342, 5601342, 6501342, 1560342, 5160342, 1650342, 6150342, 5610342, 6510342,
+ 356142, 3056142, 536142, 5036142, 3506142, 5306142, 365142, 3065142, 635142, 6035142, 3605142, 6305142,
+ 563142, 5063142, 653142, 6053142, 5603142, 6503142, 3560142, 5360142, 3650142, 6350142, 5630142, 6530142,
+ 1356042, 3156042, 1536042, 5136042, 3516042, 5316042, 1365042, 3165042, 1635042, 6135042, 3615042, 6315042,
+ 1563042, 5163042, 1653042, 6153042, 5613042, 6513042, 3561042, 5361042, 3651042, 6351042, 5631042, 6531042,
+ 145632, 1045632, 415632, 4015632, 1405632, 4105632, 154632, 1054632, 514632, 5014632, 1504632, 5104632,
+ 451632, 4051632, 541632, 5041632, 4501632, 5401632, 1450632, 4150632, 1540632, 5140632, 4510632, 5410632,
+ 146532, 1046532, 416532, 4016532, 1406532, 4106532, 164532, 1064532, 614532, 6014532, 1604532, 6104532,
+ 461532, 4061532, 641532, 6041532, 4601532, 6401532, 1460532, 4160532, 1640532, 6140532, 4610532, 6410532,
+ 156432, 1056432, 516432, 5016432, 1506432, 5106432, 165432, 1065432, 615432, 6015432, 1605432, 6105432,
+ 561432, 5061432, 651432, 6051432, 5601432, 6501432, 1560432, 5160432, 1650432, 6150432, 5610432, 6510432,
+ 456132, 4056132, 546132, 5046132, 4506132, 5406132, 465132, 4065132, 645132, 6045132, 4605132, 6405132,
+ 564132, 5064132, 654132, 6054132, 5604132, 6504132, 4560132, 5460132, 4650132, 6450132, 5640132, 6540132,
+ 1456032, 4156032, 1546032, 5146032, 4516032, 5416032, 1465032, 4165032, 1645032, 6145032, 4615032, 6415032,
+ 1564032, 5164032, 1654032, 6154032, 5614032, 6514032, 4561032, 5461032, 4651032, 6451032, 5641032, 6541032,
+ 345612, 3045612, 435612, 4035612, 3405612, 4305612, 354612, 3054612, 534612, 5034612, 3504612, 5304612,
+ 453612, 4053612, 543612, 5043612, 4503612, 5403612, 3450612, 4350612, 3540612, 5340612, 4530612, 5430612,
+ 346512, 3046512, 436512, 4036512, 3406512, 4306512, 364512, 3064512, 634512, 6034512, 3604512, 6304512,
+ 463512, 4063512, 643512, 6043512, 4603512, 6403512, 3460512, 4360512, 3640512, 6340512, 4630512, 6430512,
+ 356412, 3056412, 536412, 5036412, 3506412, 5306412, 365412, 3065412, 635412, 6035412, 3605412, 6305412,
+ 563412, 5063412, 653412, 6053412, 5603412, 6503412, 3560412, 5360412, 3650412, 6350412, 5630412, 6530412,
+ 456312, 4056312, 546312, 5046312, 4506312, 5406312, 465312, 4065312, 645312, 6045312, 4605312, 6405312,
+ 564312, 5064312, 654312, 6054312, 5604312, 6504312, 4560312, 5460312, 4650312, 6450312, 5640312, 6540312,
+ 3456012, 4356012, 3546012, 5346012, 4536012, 5436012, 3465012, 4365012, 3645012, 6345012, 4635012, 6435012,
+ 3564012, 5364012, 3654012, 6354012, 5634012, 6534012, 4563012, 5463012, 4653012, 6453012, 5643012, 6543012,
+ 1345602, 3145602, 1435602, 4135602, 3415602, 4315602, 1354602, 3154602, 1534602, 5134602, 3514602, 5314602,
+ 1453602, 4153602, 1543602, 5143602, 4513602, 5413602, 3451602, 4351602, 3541602, 5341602, 4531602, 5431602,
+ 1346502, 3146502, 1436502, 4136502, 3416502, 4316502, 1364502, 3164502, 1634502, 6134502, 3614502, 6314502,
+ 1463502, 4163502, 1643502, 6143502, 4613502, 6413502, 3461502, 4361502, 3641502, 6341502, 4631502, 6431502,
+ 1356402, 3156402, 1536402, 5136402, 3516402, 5316402, 1365402, 3165402, 1635402, 6135402, 3615402, 6315402,
+ 1563402, 5163402, 1653402, 6153402, 5613402, 6513402, 3561402, 5361402, 3651402, 6351402, 5631402, 6531402,
+ 1456302, 4156302, 1546302, 5146302, 4516302, 5416302, 1465302, 4165302, 1645302, 6145302, 4615302, 6415302,
+ 1564302, 5164302, 1654302, 6154302, 5614302, 6514302, 4561302, 5461302, 4651302, 6451302, 5641302, 6541302,
+ 3456102, 4356102, 3546102, 5346102, 4536102, 5436102, 3465102, 4365102, 3645102, 6345102, 4635102, 6435102,
+ 3564102, 5364102, 3654102, 6354102, 5634102, 6534102, 4563102, 5463102, 4653102, 6453102, 5643102, 6543102,
+ 234561, 2034561, 324561, 3024561, 2304561, 3204561, 243561, 2043561, 423561, 4023561, 2403561, 4203561,
+ 342561, 3042561, 432561, 4032561, 3402561, 4302561, 2340561, 3240561, 2430561, 4230561, 3420561, 4320561,
+ 235461, 2035461, 325461, 3025461, 2305461, 3205461, 253461, 2053461, 523461, 5023461, 2503461, 5203461,
+ 352461, 3052461, 532461, 5032461, 3502461, 5302461, 2350461, 3250461, 2530461, 5230461, 3520461, 5320461,
+ 245361, 2045361, 425361, 4025361, 2405361, 4205361, 254361, 2054361, 524361, 5024361, 2504361, 5204361,
+ 452361, 4052361, 542361, 5042361, 4502361, 5402361, 2450361, 4250361, 2540361, 5240361, 4520361, 5420361,
+ 345261, 3045261, 435261, 4035261, 3405261, 4305261, 354261, 3054261, 534261, 5034261, 3504261, 5304261,
+ 453261, 4053261, 543261, 5043261, 4503261, 5403261, 3450261, 4350261, 3540261, 5340261, 4530261, 5430261,
+ 2345061, 3245061, 2435061, 4235061, 3425061, 4325061, 2354061, 3254061, 2534061, 5234061, 3524061, 5324061,
+ 2453061, 4253061, 2543061, 5243061, 4523061, 5423061, 3452061, 4352061, 3542061, 5342061, 4532061, 5432061,
+ 234651, 2034651, 324651, 3024651, 2304651, 3204651, 243651, 2043651, 423651, 4023651, 2403651, 4203651,
+ 342651, 3042651, 432651, 4032651, 3402651, 4302651, 2340651, 3240651, 2430651, 4230651, 3420651, 4320651,
+ 236451, 2036451, 326451, 3026451, 2306451, 3206451, 263451, 2063451, 623451, 6023451, 2603451, 6203451,
+ 362451, 3062451, 632451, 6032451, 3602451, 6302451, 2360451, 3260451, 2630451, 6230451, 3620451, 6320451,
+ 246351, 2046351, 426351, 4026351, 2406351, 4206351, 264351, 2064351, 624351, 6024351, 2604351, 6204351,
+ 462351, 4062351, 642351, 6042351, 4602351, 6402351, 2460351, 4260351, 2640351, 6240351, 4620351, 6420351,
+ 346251, 3046251, 436251, 4036251, 3406251, 4306251, 364251, 3064251, 634251, 6034251, 3604251, 6304251,
+ 463251, 4063251, 643251, 6043251, 4603251, 6403251, 3460251, 4360251, 3640251, 6340251, 4630251, 6430251,
+ 2346051, 3246051, 2436051, 4236051, 3426051, 4326051, 2364051, 3264051, 2634051, 6234051, 3624051, 6324051,
+ 2463051, 4263051, 2643051, 6243051, 4623051, 6423051, 3462051, 4362051, 3642051, 6342051, 4632051, 6432051,
+ 235641, 2035641, 325641, 3025641, 2305641, 3205641, 253641, 2053641, 523641, 5023641, 2503641, 5203641,
+ 352641, 3052641, 532641, 5032641, 3502641, 5302641, 2350641, 3250641, 2530641, 5230641, 3520641, 5320641,
+ 236541, 2036541, 326541, 3026541, 2306541, 3206541, 263541, 2063541, 623541, 6023541, 2603541, 6203541,
+ 362541, 3062541, 632541, 6032541, 3602541, 6302541, 2360541, 3260541, 2630541, 6230541, 3620541, 6320541,
+ 256341, 2056341, 526341, 5026341, 2506341, 5206341, 265341, 2065341, 625341, 6025341, 2605341, 6205341,
+ 562341, 5062341, 652341, 6052341, 5602341, 6502341, 2560341, 5260341, 2650341, 6250341, 5620341, 6520341,
+ 356241, 3056241, 536241, 5036241, 3506241, 5306241, 365241, 3065241, 635241, 6035241, 3605241, 6305241,
+ 563241, 5063241, 653241, 6053241, 5603241, 6503241, 3560241, 5360241, 3650241, 6350241, 5630241, 6530241,
+ 2356041, 3256041, 2536041, 5236041, 3526041, 5326041, 2365041, 3265041, 2635041, 6235041, 3625041, 6325041,
+ 2563041, 5263041, 2653041, 6253041, 5623041, 6523041, 3562041, 5362041, 3652041, 6352041, 5632041, 6532041,
+ 245631, 2045631, 425631, 4025631, 2405631, 4205631, 254631, 2054631, 524631, 5024631, 2504631, 5204631,
+ 452631, 4052631, 542631, 5042631, 4502631, 5402631, 2450631, 4250631, 2540631, 5240631, 4520631, 5420631,
+ 246531, 2046531, 426531, 4026531, 2406531, 4206531, 264531, 2064531, 624531, 6024531, 2604531, 6204531,
+ 462531, 4062531, 642531, 6042531, 4602531, 6402531, 2460531, 4260531, 2640531, 6240531, 4620531, 6420531,
+ 256431, 2056431, 526431, 5026431, 2506431, 5206431, 265431, 2065431, 625431, 6025431, 2605431, 6205431,
+ 562431, 5062431, 652431, 6052431, 5602431, 6502431, 2560431, 5260431, 2650431, 6250431, 5620431, 6520431,
+ 456231, 4056231, 546231, 5046231, 4506231, 5406231, 465231, 4065231, 645231, 6045231, 4605231, 6405231,
+ 564231, 5064231, 654231, 6054231, 5604231, 6504231, 4560231, 5460231, 4650231, 6450231, 5640231, 6540231,
+ 2456031, 4256031, 2546031, 5246031, 4526031, 5426031, 2465031, 4265031, 2645031, 6245031, 4625031, 6425031,
+ 2564031, 5264031, 2654031, 6254031, 5624031, 6524031, 4562031, 5462031, 4652031, 6452031, 5642031, 6542031,
+ 345621, 3045621, 435621, 4035621, 3405621, 4305621, 354621, 3054621, 534621, 5034621, 3504621, 5304621,
+ 453621, 4053621, 543621, 5043621, 4503621, 5403621, 3450621, 4350621, 3540621, 5340621, 4530621, 5430621,
+ 346521, 3046521, 436521, 4036521, 3406521, 4306521, 364521, 3064521, 634521, 6034521, 3604521, 6304521,
+ 463521, 4063521, 643521, 6043521, 4603521, 6403521, 3460521, 4360521, 3640521, 6340521, 4630521, 6430521,
+ 356421, 3056421, 536421, 5036421, 3506421, 5306421, 365421, 3065421, 635421, 6035421, 3605421, 6305421,
+ 563421, 5063421, 653421, 6053421, 5603421, 6503421, 3560421, 5360421, 3650421, 6350421, 5630421, 6530421,
+ 456321, 4056321, 546321, 5046321, 4506321, 5406321, 465321, 4065321, 645321, 6045321, 4605321, 6405321,
+ 564321, 5064321, 654321, 6054321, 5604321, 6504321, 4560321, 5460321, 4650321, 6450321, 5640321, 6540321,
+ 3456021, 4356021, 3546021, 5346021, 4536021, 5436021, 3465021, 4365021, 3645021, 6345021, 4635021, 6435021,
+ 3564021, 5364021, 3654021, 6354021, 5634021, 6534021, 4563021, 5463021, 4653021, 6453021, 5643021, 6543021,
+ 2345601, 3245601, 2435601, 4235601, 3425601, 4325601, 2354601, 3254601, 2534601, 5234601, 3524601, 5324601,
+ 2453601, 4253601, 2543601, 5243601, 4523601, 5423601, 3452601, 4352601, 3542601, 5342601, 4532601, 5432601,
+ 2346501, 3246501, 2436501, 4236501, 3426501, 4326501, 2364501, 3264501, 2634501, 6234501, 3624501, 6324501,
+ 2463501, 4263501, 2643501, 6243501, 4623501, 6423501, 3462501, 4362501, 3642501, 6342501, 4632501, 6432501,
+ 2356401, 3256401, 2536401, 5236401, 3526401, 5326401, 2365401, 3265401, 2635401, 6235401, 3625401, 6325401,
+ 2563401, 5263401, 2653401, 6253401, 5623401, 6523401, 3562401, 5362401, 3652401, 6352401, 5632401, 6532401,
+ 2456301, 4256301, 2546301, 5246301, 4526301, 5426301, 2465301, 4265301, 2645301, 6245301, 4625301, 6425301,
+ 2564301, 5264301, 2654301, 6254301, 5624301, 6524301, 4562301, 5462301, 4652301, 6452301, 5642301, 6542301,
+ 3456201, 4356201, 3546201, 5346201, 4536201, 5436201, 3465201, 4365201, 3645201, 6345201, 4635201, 6435201,
+ 3564201, 5364201, 3654201, 6354201, 5634201, 6534201, 4563201, 5463201, 4653201, 6453201, 5643201, 6543201,
+ 1234560, 2134560, 1324560, 3124560, 2314560, 3214560, 1243560, 2143560, 1423560, 4123560, 2413560, 4213560,
+ 1342560, 3142560, 1432560, 4132560, 3412560, 4312560, 2341560, 3241560, 2431560, 4231560, 3421560, 4321560,
+ 1235460, 2135460, 1325460, 3125460, 2315460, 3215460, 1253460, 2153460, 1523460, 5123460, 2513460, 5213460,
+ 1352460, 3152460, 1532460, 5132460, 3512460, 5312460, 2351460, 3251460, 2531460, 5231460, 3521460, 5321460,
+ 1245360, 2145360, 1425360, 4125360, 2415360, 4215360, 1254360, 2154360, 1524360, 5124360, 2514360, 5214360,
+ 1452360, 4152360, 1542360, 5142360, 4512360, 5412360, 2451360, 4251360, 2541360, 5241360, 4521360, 5421360,
+ 1345260, 3145260, 1435260, 4135260, 3415260, 4315260, 1354260, 3154260, 1534260, 5134260, 3514260, 5314260,
+ 1453260, 4153260, 1543260, 5143260, 4513260, 5413260, 3451260, 4351260, 3541260, 5341260, 4531260, 5431260,
+ 2345160, 3245160, 2435160, 4235160, 3425160, 4325160, 2354160, 3254160, 2534160, 5234160, 3524160, 5324160,
+ 2453160, 4253160, 2543160, 5243160, 4523160, 5423160, 3452160, 4352160, 3542160, 5342160, 4532160, 5432160,
+ 1234650, 2134650, 1324650, 3124650, 2314650, 3214650, 1243650, 2143650, 1423650, 4123650, 2413650, 4213650,
+ 1342650, 3142650, 1432650, 4132650, 3412650, 4312650, 2341650, 3241650, 2431650, 4231650, 3421650, 4321650,
+ 1236450, 2136450, 1326450, 3126450, 2316450, 3216450, 1263450, 2163450, 1623450, 6123450, 2613450, 6213450,
+ 1362450, 3162450, 1632450, 6132450, 3612450, 6312450, 2361450, 3261450, 2631450, 6231450, 3621450, 6321450,
+ 1246350, 2146350, 1426350, 4126350, 2416350, 4216350, 1264350, 2164350, 1624350, 6124350, 2614350, 6214350,
+ 1462350, 4162350, 1642350, 6142350, 4612350, 6412350, 2461350, 4261350, 2641350, 6241350, 4621350, 6421350,
+ 1346250, 3146250, 1436250, 4136250, 3416250, 4316250, 1364250, 3164250, 1634250, 6134250, 3614250, 6314250,
+ 1463250, 4163250, 1643250, 6143250, 4613250, 6413250, 3461250, 4361250, 3641250, 6341250, 4631250, 6431250,
+ 2346150, 3246150, 2436150, 4236150, 3426150, 4326150, 2364150, 3264150, 2634150, 6234150, 3624150, 6324150,
+ 2463150, 4263150, 2643150, 6243150, 4623150, 6423150, 3462150, 4362150, 3642150, 6342150, 4632150, 6432150,
+ 1235640, 2135640, 1325640, 3125640, 2315640, 3215640, 1253640, 2153640, 1523640, 5123640, 2513640, 5213640,
+ 1352640, 3152640, 1532640, 5132640, 3512640, 5312640, 2351640, 3251640, 2531640, 5231640, 3521640, 5321640,
+ 1236540, 2136540, 1326540, 3126540, 2316540, 3216540, 1263540, 2163540, 1623540, 6123540, 2613540, 6213540,
+ 1362540, 3162540, 1632540, 6132540, 3612540, 6312540, 2361540, 3261540, 2631540, 6231540, 3621540, 6321540,
+ 1256340, 2156340, 1526340, 5126340, 2516340, 5216340, 1265340, 2165340, 1625340, 6125340, 2615340, 6215340,
+ 1562340, 5162340, 1652340, 6152340, 5612340, 6512340, 2561340, 5261340, 2651340, 6251340, 5621340, 6521340,
+ 1356240, 3156240, 1536240, 5136240, 3516240, 5316240, 1365240, 3165240, 1635240, 6135240, 3615240, 6315240,
+ 1563240, 5163240, 1653240, 6153240, 5613240, 6513240, 3561240, 5361240, 3651240, 6351240, 5631240, 6531240,
+ 2356140, 3256140, 2536140, 5236140, 3526140, 5326140, 2365140, 3265140, 2635140, 6235140, 3625140, 6325140,
+ 2563140, 5263140, 2653140, 6253140, 5623140, 6523140, 3562140, 5362140, 3652140, 6352140, 5632140, 6532140,
+ 1245630, 2145630, 1425630, 4125630, 2415630, 4215630, 1254630, 2154630, 1524630, 5124630, 2514630, 5214630,
+ 1452630, 4152630, 1542630, 5142630, 4512630, 5412630, 2451630, 4251630, 2541630, 5241630, 4521630, 5421630,
+ 1246530, 2146530, 1426530, 4126530, 2416530, 4216530, 1264530, 2164530, 1624530, 6124530, 2614530, 6214530,
+ 1462530, 4162530, 1642530, 6142530, 4612530, 6412530, 2461530, 4261530, 2641530, 6241530, 4621530, 6421530,
+ 1256430, 2156430, 1526430, 5126430, 2516430, 5216430, 1265430, 2165430, 1625430, 6125430, 2615430, 6215430,
+ 1562430, 5162430, 1652430, 6152430, 5612430, 6512430, 2561430, 5261430, 2651430, 6251430, 5621430, 6521430,
+ 1456230, 4156230, 1546230, 5146230, 4516230, 5416230, 1465230, 4165230, 1645230, 6145230, 4615230, 6415230,
+ 1564230, 5164230, 1654230, 6154230, 5614230, 6514230, 4561230, 5461230, 4651230, 6451230, 5641230, 6541230,
+ 2456130, 4256130, 2546130, 5246130, 4526130, 5426130, 2465130, 4265130, 2645130, 6245130, 4625130, 6425130,
+ 2564130, 5264130, 2654130, 6254130, 5624130, 6524130, 4562130, 5462130, 4652130, 6452130, 5642130, 6542130,
+ 1345620, 3145620, 1435620, 4135620, 3415620, 4315620, 1354620, 3154620, 1534620, 5134620, 3514620, 5314620,
+ 1453620, 4153620, 1543620, 5143620, 4513620, 5413620, 3451620, 4351620, 3541620, 5341620, 4531620, 5431620,
+ 1346520, 3146520, 1436520, 4136520, 3416520, 4316520, 1364520, 3164520, 1634520, 6134520, 3614520, 6314520,
+ 1463520, 4163520, 1643520, 6143520, 4613520, 6413520, 3461520, 4361520, 3641520, 6341520, 4631520, 6431520,
+ 1356420, 3156420, 1536420, 5136420, 3516420, 5316420, 1365420, 3165420, 1635420, 6135420, 3615420, 6315420,
+ 1563420, 5163420, 1653420, 6153420, 5613420, 6513420, 3561420, 5361420, 3651420, 6351420, 5631420, 6531420,
+ 1456320, 4156320, 1546320, 5146320, 4516320, 5416320, 1465320, 4165320, 1645320, 6145320, 4615320, 6415320,
+ 1564320, 5164320, 1654320, 6154320, 5614320, 6514320, 4561320, 5461320, 4651320, 6451320, 5641320, 6541320,
+ 3456120, 4356120, 3546120, 5346120, 4536120, 5436120, 3465120, 4365120, 3645120, 6345120, 4635120, 6435120,
+ 3564120, 5364120, 3654120, 6354120, 5634120, 6534120, 4563120, 5463120, 4653120, 6453120, 5643120, 6543120,
+ 2345610, 3245610, 2435610, 4235610, 3425610, 4325610, 2354610, 3254610, 2534610, 5234610, 3524610, 5324610,
+ 2453610, 4253610, 2543610, 5243610, 4523610, 5423610, 3452610, 4352610, 3542610, 5342610, 4532610, 5432610,
+ 2346510, 3246510, 2436510, 4236510, 3426510, 4326510, 2364510, 3264510, 2634510, 6234510, 3624510, 6324510,
+ 2463510, 4263510, 2643510, 6243510, 4623510, 6423510, 3462510, 4362510, 3642510, 6342510, 4632510, 6432510,
+ 2356410, 3256410, 2536410, 5236410, 3526410, 5326410, 2365410, 3265410, 2635410, 6235410, 3625410, 6325410,
+ 2563410, 5263410, 2653410, 6253410, 5623410, 6523410, 3562410, 5362410, 3652410, 6352410, 5632410, 6532410,
+ 2456310, 4256310, 2546310, 5246310, 4526310, 5426310, 2465310, 4265310, 2645310, 6245310, 4625310, 6425310,
+ 2564310, 5264310, 2654310, 6254310, 5624310, 6524310, 4562310, 5462310, 4652310, 6452310, 5642310, 6542310,
+ 3456210, 4356210, 3546210, 5346210, 4536210, 5436210, 3465210, 4365210, 3645210, 6345210, 4635210, 6435210,
+ 3564210, 5364210, 3654210, 6354210, 5634210, 6534210, 4563210, 5463210, 4653210, 6453210, 5643210, 6543210
+ };
+ std::map<uint64_t, int> expected;
+ for (std::size_t i = 0; i < 5040; i++)
+ expected[pre_expected[i]] = 0; // flags are 0, everything is symmetric here
+
+ VERIFY(isDynGroup(group));
+ VERIFY_IS_EQUAL(group.size(), 5040u);
+ VERIFY_IS_EQUAL(group.globalFlags(), 0);
+ group.apply<checkIdx, int>(identity7, 0, found, expected);
+ VERIFY_IS_EQUAL(found.size(), 5040u);
+ }
+}
+
+static void test_tensor_epsilon()
+{
+ SGroup<AntiSymmetry<0,1>, AntiSymmetry<1,2>> sym;
+ Tensor<int, 3> epsilon(3,3,3);
+
+ epsilon.setZero();
+ sym(epsilon, 0, 1, 2) = 1;
+
+ for (int i = 0; i < 3; i++) {
+ for (int j = 0; j < 3; j++) {
+ for (int k = 0; k < 3; k++) {
+ VERIFY_IS_EQUAL((epsilon(i,j,k)), (- (j - i) * (k - j) * (i - k) / 2) );
+ }
+ }
+ }
+}
+
+static void test_tensor_sym()
+{
+ SGroup<Symmetry<0,1>, Symmetry<2,3>> sym;
+ Tensor<int, 4> t(10,10,10,10);
+
+ t.setZero();
+
+ for (int l = 0; l < 10; l++) {
+ for (int k = l; k < 10; k++) {
+ for (int j = 0; j < 10; j++) {
+ for (int i = j; i < 10; i++) {
+ sym(t, i, j, k, l) = (i + j) * (k + l);
+ }
+ }
+ }
+ }
+
+ for (int l = 0; l < 10; l++) {
+ for (int k = 0; k < 10; k++) {
+ for (int j = 0; j < 10; j++) {
+ for (int i = 0; i < 10; i++) {
+ VERIFY_IS_EQUAL((t(i, j, k, l)), ((i + j) * (k + l)));
+ }
+ }
+ }
+ }
+
+}
+
+static void test_tensor_asym()
+{
+ SGroup<AntiSymmetry<0,1>, AntiSymmetry<2,3>> sym;
+ Tensor<int, 4> t(10,10,10,10);
+
+ t.setZero();
+
+ for (int l = 0; l < 10; l++) {
+ for (int k = l + 1; k < 10; k++) {
+ for (int j = 0; j < 10; j++) {
+ for (int i = j + 1; i < 10; i++) {
+ sym(t, i, j, k, l) = ((i * j) + (k * l));
+ }
+ }
+ }
+ }
+
+ for (int l = 0; l < 10; l++) {
+ for (int k = 0; k < 10; k++) {
+ for (int j = 0; j < 10; j++) {
+ for (int i = 0; i < 10; i++) {
+ if (i < j && k < l)
+ VERIFY_IS_EQUAL((t(i, j, k, l)), (((i * j) + (k * l))));
+ else if (i > j && k > l)
+ VERIFY_IS_EQUAL((t(i, j, k, l)), (((i * j) + (k * l))));
+ else if (i < j && k > l)
+ VERIFY_IS_EQUAL((t(i, j, k, l)), (- ((i * j) + (k * l))));
+ else if (i > j && k < l)
+ VERIFY_IS_EQUAL((t(i, j, k, l)), (- ((i * j) + (k * l))));
+ else
+ VERIFY_IS_EQUAL((t(i, j, k, l)), 0);
+ }
+ }
+ }
+ }
+}
+
+static void test_tensor_dynsym()
+{
+ DynamicSGroup sym;
+ sym.addSymmetry(0,1);
+ sym.addSymmetry(2,3);
+ Tensor<int, 4> t(10,10,10,10);
+
+ t.setZero();
+
+ for (int l = 0; l < 10; l++) {
+ for (int k = l; k < 10; k++) {
+ for (int j = 0; j < 10; j++) {
+ for (int i = j; i < 10; i++) {
+ sym(t, i, j, k, l) = (i + j) * (k + l);
+ }
+ }
+ }
+ }
+
+ for (int l = 0; l < 10; l++) {
+ for (int k = 0; k < 10; k++) {
+ for (int j = 0; j < 10; j++) {
+ for (int i = 0; i < 10; i++) {
+ VERIFY_IS_EQUAL((t(i, j, k, l)), ((i + j) * (k + l)));
+ }
+ }
+ }
+ }
+}
+
+static void test_tensor_randacc()
+{
+ SGroup<Symmetry<0,1>, Symmetry<2,3>> sym;
+ Tensor<int, 4> t(10,10,10,10);
+
+ t.setZero();
+
+ // set elements 1 million times, that way we access the
+ // entire matrix
+ for (int n = 0; n < 1000000; n++) {
+ int i = rand() % 10;
+ int j = rand() % 10;
+ int k = rand() % 10;
+ int l = rand() % 10;
+ // only access those indices in a given order
+ if (i < j)
+ std::swap(i, j);
+ if (k < l)
+ std::swap(k, l);
+ sym(t, i, j, k, l) = (i + j) * (k + l);
+ }
+
+ for (int l = 0; l < 10; l++) {
+ for (int k = 0; k < 10; k++) {
+ for (int j = 0; j < 10; j++) {
+ for (int i = 0; i < 10; i++) {
+ VERIFY_IS_EQUAL((t(i, j, k, l)), ((i + j) * (k + l)));
+ }
+ }
+ }
+ }
+}
+
+void test_cxx11_tensor_symmetry()
+{
+ CALL_SUBTEST(test_symgroups_static());
+ CALL_SUBTEST(test_symgroups_dynamic());
+ CALL_SUBTEST(test_symgroups_selection());
+ CALL_SUBTEST(test_tensor_epsilon());
+ CALL_SUBTEST(test_tensor_sym());
+ CALL_SUBTEST(test_tensor_asym());
+ CALL_SUBTEST(test_tensor_dynsym());
+ CALL_SUBTEST(test_tensor_randacc());
+}
+
+/*
+ * kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle;
+ */
diff --git a/unsupported/test/cxx11_tensor_thread_pool.cpp b/unsupported/test/cxx11_tensor_thread_pool.cpp
new file mode 100644
index 000000000..2ef665f30
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_thread_pool.cpp
@@ -0,0 +1,373 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_USE_THREADS
+
+
+#include "main.h"
+#include <iostream>
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+
+
+void test_multithread_elementwise()
+{
+ Tensor<float, 3> in1(2,3,7);
+ Tensor<float, 3> in2(2,3,7);
+ Tensor<float, 3> out(2,3,7);
+
+ in1.setRandom();
+ in2.setRandom();
+
+ Eigen::ThreadPool tp(internal::random<int>(3, 11));
+ Eigen::ThreadPoolDevice thread_pool_device(&tp, internal::random<int>(3, 11));
+ out.device(thread_pool_device) = in1 + in2 * 3.14f;
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ VERIFY_IS_APPROX(out(i,j,k), in1(i,j,k) + in2(i,j,k) * 3.14f);
+ }
+ }
+ }
+}
+
+
+void test_multithread_compound_assignment()
+{
+ Tensor<float, 3> in1(2,3,7);
+ Tensor<float, 3> in2(2,3,7);
+ Tensor<float, 3> out(2,3,7);
+
+ in1.setRandom();
+ in2.setRandom();
+
+ Eigen::ThreadPool tp(internal::random<int>(3, 11));
+ Eigen::ThreadPoolDevice thread_pool_device(&tp, internal::random<int>(3, 11));
+ out.device(thread_pool_device) = in1;
+ out.device(thread_pool_device) += in2 * 3.14f;
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ VERIFY_IS_APPROX(out(i,j,k), in1(i,j,k) + in2(i,j,k) * 3.14f);
+ }
+ }
+ }
+}
+
+template<int DataLayout>
+void test_multithread_contraction()
+{
+ Tensor<float, 4, DataLayout> t_left(30, 50, 37, 31);
+ Tensor<float, 5, DataLayout> t_right(37, 31, 70, 2, 10);
+ Tensor<float, 5, DataLayout> t_result(30, 50, 70, 2, 10);
+
+ t_left.setRandom();
+ t_right.setRandom();
+
+ // this contraction should be equivalent to a single matrix multiplication
+ typedef Tensor<float, 1>::DimensionPair DimPair;
+ Eigen::array<DimPair, 2> dims({{DimPair(2, 0), DimPair(3, 1)}});
+
+ typedef Map<Matrix<float, Dynamic, Dynamic, DataLayout>> MapXf;
+ MapXf m_left(t_left.data(), 1500, 1147);
+ MapXf m_right(t_right.data(), 1147, 1400);
+ Matrix<float, Dynamic, Dynamic, DataLayout> m_result(1500, 1400);
+
+ Eigen::ThreadPool tp(4);
+ Eigen::ThreadPoolDevice thread_pool_device(&tp, 4);
+
+ // compute results by separate methods
+ t_result.device(thread_pool_device) = t_left.contract(t_right, dims);
+ m_result = m_left * m_right;
+
+ for (ptrdiff_t i = 0; i < t_result.size(); i++) {
+ VERIFY(&t_result.data()[i] != &m_result.data()[i]);
+ if (fabsf(t_result(i) - m_result(i)) < 1e-4f) {
+ continue;
+ }
+ if (Eigen::internal::isApprox(t_result(i), m_result(i), 1e-4f)) {
+ continue;
+ }
+ std::cout << "mismatch detected at index " << i << ": " << t_result(i)
+ << " vs " << m_result(i) << std::endl;
+ assert(false);
+ }
+}
+
+template<int DataLayout>
+void test_contraction_corner_cases()
+{
+ Tensor<float, 2, DataLayout> t_left(32, 500);
+ Tensor<float, 2, DataLayout> t_right(32, 28*28);
+ Tensor<float, 2, DataLayout> t_result(500, 28*28);
+
+ t_left = (t_left.constant(-0.5f) + t_left.random()) * 2.0f;
+ t_right = (t_right.constant(-0.6f) + t_right.random()) * 2.0f;
+ t_result = t_result.constant(NAN);
+
+ // this contraction should be equivalent to a single matrix multiplication
+ typedef Tensor<float, 1>::DimensionPair DimPair;
+ Eigen::array<DimPair, 1> dims{{DimPair(0, 0)}};
+
+ typedef Map<Matrix<float, Dynamic, Dynamic, DataLayout>> MapXf;
+ MapXf m_left(t_left.data(), 32, 500);
+ MapXf m_right(t_right.data(), 32, 28*28);
+ Matrix<float, Dynamic, Dynamic, DataLayout> m_result(500, 28*28);
+
+ Eigen::ThreadPool tp(12);
+ Eigen::ThreadPoolDevice thread_pool_device(&tp, 12);
+
+ // compute results by separate methods
+ t_result.device(thread_pool_device) = t_left.contract(t_right, dims);
+ m_result = m_left.transpose() * m_right;
+
+ for (ptrdiff_t i = 0; i < t_result.size(); i++) {
+ assert(!(numext::isnan)(t_result.data()[i]));
+ if (fabsf(t_result.data()[i] - m_result.data()[i]) >= 1e-4f) {
+ std::cout << "mismatch detected at index " << i << " : " << t_result.data()[i] << " vs " << m_result.data()[i] << std::endl;
+ assert(false);
+ }
+ }
+
+ t_left.resize(32, 1);
+ t_left = (t_left.constant(-0.5f) + t_left.random()) * 2.0f;
+ t_result.resize (1, 28*28);
+ t_result = t_result.constant(NAN);
+ t_result.device(thread_pool_device) = t_left.contract(t_right, dims);
+ new(&m_left) MapXf(t_left.data(), 32, 1);
+ m_result = m_left.transpose() * m_right;
+ for (ptrdiff_t i = 0; i < t_result.size(); i++) {
+ assert(!(numext::isnan)(t_result.data()[i]));
+ if (fabsf(t_result.data()[i] - m_result.data()[i]) >= 1e-4f) {
+ std::cout << "mismatch detected: " << t_result.data()[i] << " vs " << m_result.data()[i] << std::endl;
+ assert(false);
+ }
+ }
+
+ t_left.resize(32, 500);
+ t_right.resize(32, 4);
+ t_left = (t_left.constant(-0.5f) + t_left.random()) * 2.0f;
+ t_right = (t_right.constant(-0.6f) + t_right.random()) * 2.0f;
+ t_result.resize (500, 4);
+ t_result = t_result.constant(NAN);
+ t_result.device(thread_pool_device) = t_left.contract(t_right, dims);
+ new(&m_left) MapXf(t_left.data(), 32, 500);
+ new(&m_right) MapXf(t_right.data(), 32, 4);
+ m_result = m_left.transpose() * m_right;
+ for (ptrdiff_t i = 0; i < t_result.size(); i++) {
+ assert(!(numext::isnan)(t_result.data()[i]));
+ if (fabsf(t_result.data()[i] - m_result.data()[i]) >= 1e-4f) {
+ std::cout << "mismatch detected: " << t_result.data()[i] << " vs " << m_result.data()[i] << std::endl;
+ assert(false);
+ }
+ }
+
+ t_left.resize(32, 1);
+ t_right.resize(32, 4);
+ t_left = (t_left.constant(-0.5f) + t_left.random()) * 2.0f;
+ t_right = (t_right.constant(-0.6f) + t_right.random()) * 2.0f;
+ t_result.resize (1, 4);
+ t_result = t_result.constant(NAN);
+ t_result.device(thread_pool_device) = t_left.contract(t_right, dims);
+ new(&m_left) MapXf(t_left.data(), 32, 1);
+ new(&m_right) MapXf(t_right.data(), 32, 4);
+ m_result = m_left.transpose() * m_right;
+ for (ptrdiff_t i = 0; i < t_result.size(); i++) {
+ assert(!(numext::isnan)(t_result.data()[i]));
+ if (fabsf(t_result.data()[i] - m_result.data()[i]) >= 1e-4f) {
+ std::cout << "mismatch detected: " << t_result.data()[i] << " vs " << m_result.data()[i] << std::endl;
+ assert(false);
+ }
+ }
+}
+
+template<int DataLayout>
+void test_multithread_contraction_agrees_with_singlethread() {
+ int contract_size = internal::random<int>(1, 5000);
+
+ Tensor<float, 3, DataLayout> left(internal::random<int>(1, 80),
+ contract_size,
+ internal::random<int>(1, 100));
+
+ Tensor<float, 4, DataLayout> right(internal::random<int>(1, 25),
+ internal::random<int>(1, 37),
+ contract_size,
+ internal::random<int>(1, 51));
+
+ left.setRandom();
+ right.setRandom();
+
+ // add constants to shift values away from 0 for more precision
+ left += left.constant(1.5f);
+ right += right.constant(1.5f);
+
+ typedef Tensor<float, 1>::DimensionPair DimPair;
+ Eigen::array<DimPair, 1> dims({{DimPair(1, 2)}});
+
+ Eigen::ThreadPool tp(internal::random<int>(2, 11));
+ Eigen::ThreadPoolDevice thread_pool_device(&tp, internal::random<int>(2, 11));
+
+ Tensor<float, 5, DataLayout> st_result;
+ st_result = left.contract(right, dims);
+
+ Tensor<float, 5, DataLayout> tp_result(st_result.dimensions());
+ tp_result.device(thread_pool_device) = left.contract(right, dims);
+
+ VERIFY(dimensions_match(st_result.dimensions(), tp_result.dimensions()));
+ for (ptrdiff_t i = 0; i < st_result.size(); i++) {
+ // if both of the values are very small, then do nothing (because the test will fail
+ // due to numerical precision issues when values are small)
+ if (numext::abs(st_result.data()[i] - tp_result.data()[i]) >= 1e-4f) {
+ VERIFY_IS_APPROX(st_result.data()[i], tp_result.data()[i]);
+ }
+ }
+}
+
+
+template<int DataLayout>
+void test_full_contraction() {
+ int contract_size1 = internal::random<int>(1, 500);
+ int contract_size2 = internal::random<int>(1, 500);
+
+ Tensor<float, 2, DataLayout> left(contract_size1,
+ contract_size2);
+ Tensor<float, 2, DataLayout> right(contract_size1,
+ contract_size2);
+ left.setRandom();
+ right.setRandom();
+
+ // add constants to shift values away from 0 for more precision
+ left += left.constant(1.5f);
+ right += right.constant(1.5f);
+
+ typedef Tensor<float, 2>::DimensionPair DimPair;
+ Eigen::array<DimPair, 2> dims({{DimPair(0, 0), DimPair(1, 1)}});
+
+ Eigen::ThreadPool tp(internal::random<int>(2, 11));
+ Eigen::ThreadPoolDevice thread_pool_device(&tp, internal::random<int>(2, 11));
+
+ Tensor<float, 0, DataLayout> st_result;
+ st_result = left.contract(right, dims);
+
+ Tensor<float, 0, DataLayout> tp_result;
+ tp_result.device(thread_pool_device) = left.contract(right, dims);
+
+ VERIFY(dimensions_match(st_result.dimensions(), tp_result.dimensions()));
+ // if both of the values are very small, then do nothing (because the test will fail
+ // due to numerical precision issues when values are small)
+ if (numext::abs(st_result() - tp_result()) >= 1e-4f) {
+ VERIFY_IS_APPROX(st_result(), tp_result());
+ }
+}
+
+template<int DataLayout>
+void test_multithreaded_reductions() {
+ const int num_threads = internal::random<int>(3, 11);
+ ThreadPool thread_pool(num_threads);
+ Eigen::ThreadPoolDevice thread_pool_device(&thread_pool, num_threads);
+
+ const int num_rows = internal::random<int>(13, 732);
+ const int num_cols = internal::random<int>(13, 732);
+ Tensor<float, 2, DataLayout> t1(num_rows, num_cols);
+ t1.setRandom();
+
+ Tensor<float, 0, DataLayout> full_redux;
+ full_redux = t1.sum();
+
+ Tensor<float, 0, DataLayout> full_redux_tp;
+ full_redux_tp.device(thread_pool_device) = t1.sum();
+
+ // Check that the single threaded and the multi threaded reductions return
+ // the same result.
+ VERIFY_IS_APPROX(full_redux(), full_redux_tp());
+}
+
+
+void test_memcpy() {
+
+ for (int i = 0; i < 5; ++i) {
+ const int num_threads = internal::random<int>(3, 11);
+ Eigen::ThreadPool tp(num_threads);
+ Eigen::ThreadPoolDevice thread_pool_device(&tp, num_threads);
+
+ const int size = internal::random<int>(13, 7632);
+ Tensor<float, 1> t1(size);
+ t1.setRandom();
+ std::vector<float> result(size);
+ thread_pool_device.memcpy(&result[0], t1.data(), size*sizeof(float));
+ for (int j = 0; j < size; j++) {
+ VERIFY_IS_EQUAL(t1(j), result[j]);
+ }
+ }
+}
+
+
+void test_multithread_random()
+{
+ Eigen::ThreadPool tp(2);
+ Eigen::ThreadPoolDevice device(&tp, 2);
+ Tensor<float, 1> t(1 << 20);
+ t.device(device) = t.random<Eigen::internal::NormalRandomGenerator<float>>();
+}
+
+template<int DataLayout>
+void test_multithread_shuffle()
+{
+ Tensor<float, 4, DataLayout> tensor(17,5,7,11);
+ tensor.setRandom();
+
+ const int num_threads = internal::random<int>(2, 11);
+ ThreadPool threads(num_threads);
+ Eigen::ThreadPoolDevice device(&threads, num_threads);
+
+ Tensor<float, 4, DataLayout> shuffle(7,5,11,17);
+ array<ptrdiff_t, 4> shuffles = {{2,1,3,0}};
+ shuffle.device(device) = tensor.shuffle(shuffles);
+
+ for (int i = 0; i < 17; ++i) {
+ for (int j = 0; j < 5; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ for (int l = 0; l < 11; ++l) {
+ VERIFY_IS_EQUAL(tensor(i,j,k,l), shuffle(k,j,l,i));
+ }
+ }
+ }
+ }
+}
+
+
+void test_cxx11_tensor_thread_pool()
+{
+ CALL_SUBTEST_1(test_multithread_elementwise());
+ CALL_SUBTEST_1(test_multithread_compound_assignment());
+
+ CALL_SUBTEST_2(test_multithread_contraction<ColMajor>());
+ CALL_SUBTEST_2(test_multithread_contraction<RowMajor>());
+
+ CALL_SUBTEST_3(test_multithread_contraction_agrees_with_singlethread<ColMajor>());
+ CALL_SUBTEST_3(test_multithread_contraction_agrees_with_singlethread<RowMajor>());
+
+ // Exercise various cases that have been problematic in the past.
+ CALL_SUBTEST_4(test_contraction_corner_cases<ColMajor>());
+ CALL_SUBTEST_4(test_contraction_corner_cases<RowMajor>());
+
+ CALL_SUBTEST_4(test_full_contraction<ColMajor>());
+ CALL_SUBTEST_4(test_full_contraction<RowMajor>());
+
+ CALL_SUBTEST_5(test_multithreaded_reductions<ColMajor>());
+ CALL_SUBTEST_5(test_multithreaded_reductions<RowMajor>());
+
+ CALL_SUBTEST_6(test_memcpy());
+ CALL_SUBTEST_6(test_multithread_random());
+ CALL_SUBTEST_6(test_multithread_shuffle<ColMajor>());
+ CALL_SUBTEST_6(test_multithread_shuffle<RowMajor>());
+}
diff --git a/unsupported/test/cxx11_tensor_uint128.cpp b/unsupported/test/cxx11_tensor_uint128.cpp
new file mode 100644
index 000000000..d2a1e8673
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_uint128.cpp
@@ -0,0 +1,160 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2015 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+
+#if EIGEN_COMP_MSVC
+#define EIGEN_NO_INT128
+#else
+typedef __uint128_t uint128_t;
+#endif
+
+// Only run the test on compilers that support 128bit integers natively
+#ifndef EIGEN_NO_INT128
+
+using Eigen::internal::TensorUInt128;
+using Eigen::internal::static_val;
+
+void VERIFY_EQUAL(TensorUInt128<uint64_t, uint64_t> actual, uint128_t expected) {
+ bool matchl = actual.lower() == static_cast<uint64_t>(expected);
+ bool matchh = actual.upper() == static_cast<uint64_t>(expected >> 64);
+ if (!matchl || !matchh) {
+ const char* testname = g_test_stack.back().c_str();
+ std::cerr << "Test " << testname << " failed in " << __FILE__
+ << " (" << __LINE__ << ")"
+ << std::endl;
+ abort();
+ }
+}
+
+
+void test_add() {
+ uint64_t incr = internal::random<uint64_t>(1, 9999999999);
+ for (uint64_t i1 = 0; i1 < 100; ++i1) {
+ for (uint64_t i2 = 1; i2 < 100 * incr; i2 += incr) {
+ TensorUInt128<uint64_t, uint64_t> i(i1, i2);
+ uint128_t a = (static_cast<uint128_t>(i1) << 64) + static_cast<uint128_t>(i2);
+ for (uint64_t j1 = 0; j1 < 100; ++j1) {
+ for (uint64_t j2 = 1; j2 < 100 * incr; j2 += incr) {
+ TensorUInt128<uint64_t, uint64_t> j(j1, j2);
+ uint128_t b = (static_cast<uint128_t>(j1) << 64) + static_cast<uint128_t>(j2);
+ TensorUInt128<uint64_t, uint64_t> actual = i + j;
+ uint128_t expected = a + b;
+ VERIFY_EQUAL(actual, expected);
+ }
+ }
+ }
+ }
+}
+
+void test_sub() {
+ uint64_t incr = internal::random<uint64_t>(1, 9999999999);
+ for (uint64_t i1 = 0; i1 < 100; ++i1) {
+ for (uint64_t i2 = 1; i2 < 100 * incr; i2 += incr) {
+ TensorUInt128<uint64_t, uint64_t> i(i1, i2);
+ uint128_t a = (static_cast<uint128_t>(i1) << 64) + static_cast<uint128_t>(i2);
+ for (uint64_t j1 = 0; j1 < 100; ++j1) {
+ for (uint64_t j2 = 1; j2 < 100 * incr; j2 += incr) {
+ TensorUInt128<uint64_t, uint64_t> j(j1, j2);
+ uint128_t b = (static_cast<uint128_t>(j1) << 64) + static_cast<uint128_t>(j2);
+ TensorUInt128<uint64_t, uint64_t> actual = i - j;
+ uint128_t expected = a - b;
+ VERIFY_EQUAL(actual, expected);
+ }
+ }
+ }
+ }
+}
+
+void test_mul() {
+ uint64_t incr = internal::random<uint64_t>(1, 9999999999);
+ for (uint64_t i1 = 0; i1 < 100; ++i1) {
+ for (uint64_t i2 = 1; i2 < 100 * incr; i2 += incr) {
+ TensorUInt128<uint64_t, uint64_t> i(i1, i2);
+ uint128_t a = (static_cast<uint128_t>(i1) << 64) + static_cast<uint128_t>(i2);
+ for (uint64_t j1 = 0; j1 < 100; ++j1) {
+ for (uint64_t j2 = 1; j2 < 100 * incr; j2 += incr) {
+ TensorUInt128<uint64_t, uint64_t> j(j1, j2);
+ uint128_t b = (static_cast<uint128_t>(j1) << 64) + static_cast<uint128_t>(j2);
+ TensorUInt128<uint64_t, uint64_t> actual = i * j;
+ uint128_t expected = a * b;
+ VERIFY_EQUAL(actual, expected);
+ }
+ }
+ }
+ }
+}
+
+void test_div() {
+ uint64_t incr = internal::random<uint64_t>(1, 9999999999);
+ for (uint64_t i1 = 0; i1 < 100; ++i1) {
+ for (uint64_t i2 = 1; i2 < 100 * incr; i2 += incr) {
+ TensorUInt128<uint64_t, uint64_t> i(i1, i2);
+ uint128_t a = (static_cast<uint128_t>(i1) << 64) + static_cast<uint128_t>(i2);
+ for (uint64_t j1 = 0; j1 < 100; ++j1) {
+ for (uint64_t j2 = 1; j2 < 100 * incr; j2 += incr) {
+ TensorUInt128<uint64_t, uint64_t> j(j1, j2);
+ uint128_t b = (static_cast<uint128_t>(j1) << 64) + static_cast<uint128_t>(j2);
+ TensorUInt128<uint64_t, uint64_t> actual = i / j;
+ uint128_t expected = a / b;
+ VERIFY_EQUAL(actual, expected);
+ }
+ }
+ }
+ }
+}
+
+void test_misc1() {
+ uint64_t incr = internal::random<uint64_t>(1, 9999999999);
+ for (uint64_t i2 = 1; i2 < 100 * incr; i2 += incr) {
+ TensorUInt128<static_val<0>, uint64_t> i(0, i2);
+ uint128_t a = static_cast<uint128_t>(i2);
+ for (uint64_t j2 = 1; j2 < 100 * incr; j2 += incr) {
+ TensorUInt128<static_val<0>, uint64_t> j(0, j2);
+ uint128_t b = static_cast<uint128_t>(j2);
+ uint64_t actual = (i * j).upper();
+ uint64_t expected = (a * b) >> 64;
+ VERIFY_IS_EQUAL(actual, expected);
+ }
+ }
+}
+
+void test_misc2() {
+ int64_t incr = internal::random<int64_t>(1, 100);
+ for (int64_t log_div = 0; log_div < 63; ++log_div) {
+ for (int64_t divider = 1; divider <= 1000000 * incr; divider += incr) {
+ uint64_t expected = (static_cast<uint128_t>(1) << (64+log_div)) / static_cast<uint128_t>(divider) - (static_cast<uint128_t>(1) << 64) + 1;
+ uint64_t shift = 1ULL << log_div;
+
+ TensorUInt128<uint64_t, uint64_t> result = (TensorUInt128<uint64_t, static_val<0> >(shift, 0) / TensorUInt128<static_val<0>, uint64_t>(divider) - TensorUInt128<static_val<1>, static_val<0> >(1, 0) + TensorUInt128<static_val<0>, static_val<1> >(1));
+ uint64_t actual = static_cast<uint64_t>(result);
+ VERIFY_IS_EQUAL(actual, expected);
+ }
+ }
+}
+#endif
+
+
+void test_cxx11_tensor_uint128()
+{
+#ifdef EIGEN_NO_INT128
+ // Skip the test on compilers that don't support 128bit integers natively
+ return;
+#else
+ CALL_SUBTEST_1(test_add());
+ CALL_SUBTEST_2(test_sub());
+ CALL_SUBTEST_3(test_mul());
+ CALL_SUBTEST_4(test_div());
+ CALL_SUBTEST_5(test_misc1());
+ CALL_SUBTEST_6(test_misc2());
+#endif
+}
diff --git a/unsupported/test/cxx11_tensor_volume_patch.cpp b/unsupported/test/cxx11_tensor_volume_patch.cpp
new file mode 100644
index 000000000..ca6840f3b
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_volume_patch.cpp
@@ -0,0 +1,112 @@
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+
+static void test_single_voxel_patch()
+{
+ Tensor<float, 5> tensor(4,2,3,5,7);
+ tensor.setRandom();
+ Tensor<float, 5, RowMajor> tensor_row_major = tensor.swap_layout();
+
+ Tensor<float, 6> single_voxel_patch;
+ single_voxel_patch = tensor.extract_volume_patches(1, 1, 1);
+ VERIFY_IS_EQUAL(single_voxel_patch.dimension(0), 4);
+ VERIFY_IS_EQUAL(single_voxel_patch.dimension(1), 1);
+ VERIFY_IS_EQUAL(single_voxel_patch.dimension(2), 1);
+ VERIFY_IS_EQUAL(single_voxel_patch.dimension(3), 1);
+ VERIFY_IS_EQUAL(single_voxel_patch.dimension(4), 2 * 3 * 5);
+ VERIFY_IS_EQUAL(single_voxel_patch.dimension(5), 7);
+
+ Tensor<float, 6, RowMajor> single_voxel_patch_row_major;
+ single_voxel_patch_row_major = tensor_row_major.extract_volume_patches(1, 1, 1);
+ VERIFY_IS_EQUAL(single_voxel_patch_row_major.dimension(0), 7);
+ VERIFY_IS_EQUAL(single_voxel_patch_row_major.dimension(1), 2 * 3 * 5);
+ VERIFY_IS_EQUAL(single_voxel_patch_row_major.dimension(2), 1);
+ VERIFY_IS_EQUAL(single_voxel_patch_row_major.dimension(3), 1);
+ VERIFY_IS_EQUAL(single_voxel_patch_row_major.dimension(4), 1);
+ VERIFY_IS_EQUAL(single_voxel_patch_row_major.dimension(5), 4);
+
+ for (int i = 0; i < tensor.size(); ++i) {
+ VERIFY_IS_EQUAL(tensor.data()[i], single_voxel_patch.data()[i]);
+ VERIFY_IS_EQUAL(tensor_row_major.data()[i], single_voxel_patch_row_major.data()[i]);
+ VERIFY_IS_EQUAL(tensor.data()[i], tensor_row_major.data()[i]);
+ }
+}
+
+
+static void test_entire_volume_patch()
+{
+ const int depth = 4;
+ const int patch_z = 2;
+ const int patch_y = 3;
+ const int patch_x = 5;
+ const int batch = 7;
+
+ Tensor<float, 5> tensor(depth, patch_z, patch_y, patch_x, batch);
+ tensor.setRandom();
+ Tensor<float, 5, RowMajor> tensor_row_major = tensor.swap_layout();
+
+ Tensor<float, 6> entire_volume_patch;
+ entire_volume_patch = tensor.extract_volume_patches(patch_z, patch_y, patch_x);
+ VERIFY_IS_EQUAL(entire_volume_patch.dimension(0), depth);
+ VERIFY_IS_EQUAL(entire_volume_patch.dimension(1), patch_z);
+ VERIFY_IS_EQUAL(entire_volume_patch.dimension(2), patch_y);
+ VERIFY_IS_EQUAL(entire_volume_patch.dimension(3), patch_x);
+ VERIFY_IS_EQUAL(entire_volume_patch.dimension(4), patch_z * patch_y * patch_x);
+ VERIFY_IS_EQUAL(entire_volume_patch.dimension(5), batch);
+
+ Tensor<float, 6, RowMajor> entire_volume_patch_row_major;
+ entire_volume_patch_row_major = tensor_row_major.extract_volume_patches(patch_z, patch_y, patch_x);
+ VERIFY_IS_EQUAL(entire_volume_patch_row_major.dimension(0), batch);
+ VERIFY_IS_EQUAL(entire_volume_patch_row_major.dimension(1), patch_z * patch_y * patch_x);
+ VERIFY_IS_EQUAL(entire_volume_patch_row_major.dimension(2), patch_x);
+ VERIFY_IS_EQUAL(entire_volume_patch_row_major.dimension(3), patch_y);
+ VERIFY_IS_EQUAL(entire_volume_patch_row_major.dimension(4), patch_z);
+ VERIFY_IS_EQUAL(entire_volume_patch_row_major.dimension(5), depth);
+
+ const int dz = patch_z - 1;
+ const int dy = patch_y - 1;
+ const int dx = patch_x - 1;
+
+ const int forward_pad_z = dz - dz / 2;
+ const int forward_pad_y = dy - dy / 2;
+ const int forward_pad_x = dx - dx / 2;
+
+ for (int pz = 0; pz < patch_z; pz++) {
+ for (int py = 0; py < patch_y; py++) {
+ for (int px = 0; px < patch_x; px++) {
+ const int patchId = pz + patch_z * (py + px * patch_y);
+ for (int z = 0; z < patch_z; z++) {
+ for (int y = 0; y < patch_y; y++) {
+ for (int x = 0; x < patch_x; x++) {
+ for (int b = 0; b < batch; b++) {
+ for (int d = 0; d < depth; d++) {
+ float expected = 0.0f;
+ float expected_row_major = 0.0f;
+ const int eff_z = z - forward_pad_z + pz;
+ const int eff_y = y - forward_pad_y + py;
+ const int eff_x = x - forward_pad_x + px;
+ if (eff_z >= 0 && eff_y >= 0 && eff_x >= 0 &&
+ eff_z < patch_z && eff_y < patch_y && eff_x < patch_x) {
+ expected = tensor(d, eff_z, eff_y, eff_x, b);
+ expected_row_major = tensor_row_major(b, eff_x, eff_y, eff_z, d);
+ }
+ VERIFY_IS_EQUAL(entire_volume_patch(d, z, y, x, patchId, b), expected);
+ VERIFY_IS_EQUAL(entire_volume_patch_row_major(b, patchId, x, y, z, d), expected_row_major);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+void test_cxx11_tensor_volume_patch()
+{
+ CALL_SUBTEST(test_single_voxel_patch());
+ CALL_SUBTEST(test_entire_volume_patch());
+}
diff --git a/unsupported/test/forward_adolc.cpp b/unsupported/test/forward_adolc.cpp
index d4baafe62..866db8e86 100644
--- a/unsupported/test/forward_adolc.cpp
+++ b/unsupported/test/forward_adolc.cpp
@@ -13,8 +13,6 @@
#define NUMBER_DIRECTIONS 16
#include <unsupported/Eigen/AdolcForward>
-int adtl::ADOLC_numDir;
-
template<typename Vector>
EIGEN_DONT_INLINE typename Vector::Scalar foo(const Vector& p)
{
@@ -123,7 +121,7 @@ template<typename Func> void adolc_forward_jacobian(const Func& f)
void test_forward_adolc()
{
- adtl::ADOLC_numDir = NUMBER_DIRECTIONS;
+ adtl::setNumDir(NUMBER_DIRECTIONS);
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST(( adolc_forward_jacobian(TestFunc1<double,2,2>()) ));
diff --git a/unsupported/test/jacobisvd.cpp b/unsupported/test/jacobisvd.cpp
deleted file mode 100644
index b4e884eee..000000000
--- a/unsupported/test/jacobisvd.cpp
+++ /dev/null
@@ -1,198 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#include "svd_common.h"
-
-template<typename MatrixType, int QRPreconditioner>
-void jacobisvd_check_full(const MatrixType& m, const JacobiSVD<MatrixType, QRPreconditioner>& svd)
-{
- svd_check_full<MatrixType, JacobiSVD<MatrixType, QRPreconditioner > >(m, svd);
-}
-
-template<typename MatrixType, int QRPreconditioner>
-void jacobisvd_compare_to_full(const MatrixType& m,
- unsigned int computationOptions,
- const JacobiSVD<MatrixType, QRPreconditioner>& referenceSvd)
-{
- svd_compare_to_full<MatrixType, JacobiSVD<MatrixType, QRPreconditioner> >(m, computationOptions, referenceSvd);
-}
-
-
-template<typename MatrixType, int QRPreconditioner>
-void jacobisvd_solve(const MatrixType& m, unsigned int computationOptions)
-{
- svd_solve< MatrixType, JacobiSVD< MatrixType, QRPreconditioner > >(m, computationOptions);
-}
-
-
-
-template<typename MatrixType, int QRPreconditioner>
-void jacobisvd_test_all_computation_options(const MatrixType& m)
-{
-
- if (QRPreconditioner == NoQRPreconditioner && m.rows() != m.cols())
- return;
-
- JacobiSVD< MatrixType, QRPreconditioner > fullSvd(m, ComputeFullU|ComputeFullV);
- svd_test_computation_options_1< MatrixType, JacobiSVD< MatrixType, QRPreconditioner > >(m, fullSvd);
-
- if(QRPreconditioner == FullPivHouseholderQRPreconditioner)
- return;
- svd_test_computation_options_2< MatrixType, JacobiSVD< MatrixType, QRPreconditioner > >(m, fullSvd);
-
-}
-
-template<typename MatrixType>
-void jacobisvd(const MatrixType& a = MatrixType(), bool pickrandom = true)
-{
- MatrixType m = pickrandom ? MatrixType::Random(a.rows(), a.cols()) : a;
-
- jacobisvd_test_all_computation_options<MatrixType, FullPivHouseholderQRPreconditioner>(m);
- jacobisvd_test_all_computation_options<MatrixType, ColPivHouseholderQRPreconditioner>(m);
- jacobisvd_test_all_computation_options<MatrixType, HouseholderQRPreconditioner>(m);
- jacobisvd_test_all_computation_options<MatrixType, NoQRPreconditioner>(m);
-}
-
-
-template<typename MatrixType>
-void jacobisvd_verify_assert(const MatrixType& m)
-{
-
- svd_verify_assert<MatrixType, JacobiSVD< MatrixType > >(m);
-
- typedef typename MatrixType::Index Index;
- Index rows = m.rows();
- Index cols = m.cols();
-
- enum {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = MatrixType::ColsAtCompileTime
- };
-
- MatrixType a = MatrixType::Zero(rows, cols);
- a.setZero();
-
- if (ColsAtCompileTime == Dynamic)
- {
- JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner> svd_fullqr;
- VERIFY_RAISES_ASSERT(svd_fullqr.compute(a, ComputeFullU|ComputeThinV))
- VERIFY_RAISES_ASSERT(svd_fullqr.compute(a, ComputeThinU|ComputeThinV))
- VERIFY_RAISES_ASSERT(svd_fullqr.compute(a, ComputeThinU|ComputeFullV))
- }
-}
-
-template<typename MatrixType>
-void jacobisvd_method()
-{
- enum { Size = MatrixType::RowsAtCompileTime };
- typedef typename MatrixType::RealScalar RealScalar;
- typedef Matrix<RealScalar, Size, 1> RealVecType;
- MatrixType m = MatrixType::Identity();
- VERIFY_IS_APPROX(m.jacobiSvd().singularValues(), RealVecType::Ones());
- VERIFY_RAISES_ASSERT(m.jacobiSvd().matrixU());
- VERIFY_RAISES_ASSERT(m.jacobiSvd().matrixV());
- VERIFY_IS_APPROX(m.jacobiSvd(ComputeFullU|ComputeFullV).solve(m), m);
-}
-
-
-
-template<typename MatrixType>
-void jacobisvd_inf_nan()
-{
- svd_inf_nan<MatrixType, JacobiSVD< MatrixType > >();
-}
-
-
-// Regression test for bug 286: JacobiSVD loops indefinitely with some
-// matrices containing denormal numbers.
-void jacobisvd_bug286()
-{
-#if defined __INTEL_COMPILER
-// shut up warning #239: floating point underflow
-#pragma warning push
-#pragma warning disable 239
-#endif
- Matrix2d M;
- M << -7.90884e-313, -4.94e-324,
- 0, 5.60844e-313;
-#if defined __INTEL_COMPILER
-#pragma warning pop
-#endif
- JacobiSVD<Matrix2d> svd;
- svd.compute(M); // just check we don't loop indefinitely
-}
-
-
-void jacobisvd_preallocate()
-{
- svd_preallocate< JacobiSVD <MatrixXf> >();
-}
-
-void test_jacobisvd()
-{
- CALL_SUBTEST_11(( jacobisvd<Matrix<double,Dynamic,Dynamic> >
- (Matrix<double,Dynamic,Dynamic>(16, 6)) ));
-
- CALL_SUBTEST_3(( jacobisvd_verify_assert(Matrix3f()) ));
- CALL_SUBTEST_4(( jacobisvd_verify_assert(Matrix4d()) ));
- CALL_SUBTEST_7(( jacobisvd_verify_assert(MatrixXf(10,12)) ));
- CALL_SUBTEST_8(( jacobisvd_verify_assert(MatrixXcd(7,5)) ));
-
- for(int i = 0; i < g_repeat; i++) {
- Matrix2cd m;
- m << 0, 1,
- 0, 1;
- CALL_SUBTEST_1(( jacobisvd(m, false) ));
- m << 1, 0,
- 1, 0;
- CALL_SUBTEST_1(( jacobisvd(m, false) ));
-
- Matrix2d n;
- n << 0, 0,
- 0, 0;
- CALL_SUBTEST_2(( jacobisvd(n, false) ));
- n << 0, 0,
- 0, 1;
- CALL_SUBTEST_2(( jacobisvd(n, false) ));
-
- CALL_SUBTEST_3(( jacobisvd<Matrix3f>() ));
- CALL_SUBTEST_4(( jacobisvd<Matrix4d>() ));
- CALL_SUBTEST_5(( jacobisvd<Matrix<float,3,5> >() ));
- CALL_SUBTEST_6(( jacobisvd<Matrix<double,Dynamic,2> >(Matrix<double,Dynamic,2>(10,2)) ));
-
- int r = internal::random<int>(1, 30),
- c = internal::random<int>(1, 30);
- CALL_SUBTEST_7(( jacobisvd<MatrixXf>(MatrixXf(r,c)) ));
- CALL_SUBTEST_8(( jacobisvd<MatrixXcd>(MatrixXcd(r,c)) ));
- (void) r;
- (void) c;
-
- // Test on inf/nan matrix
- CALL_SUBTEST_7( jacobisvd_inf_nan<MatrixXf>() );
- }
-
- CALL_SUBTEST_7(( jacobisvd<MatrixXf>(MatrixXf(internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2), internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) ));
- CALL_SUBTEST_8(( jacobisvd<MatrixXcd>(MatrixXcd(internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/3), internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/3))) ));
-
-
- // test matrixbase method
- CALL_SUBTEST_1(( jacobisvd_method<Matrix2cd>() ));
- CALL_SUBTEST_3(( jacobisvd_method<Matrix3f>() ));
-
-
- // Test problem size constructors
- CALL_SUBTEST_7( JacobiSVD<MatrixXf>(10,10) );
-
- // Check that preallocation avoids subsequent mallocs
- CALL_SUBTEST_9( jacobisvd_preallocate() );
-
- // Regression check for bug 286
- CALL_SUBTEST_2( jacobisvd_bug286() );
-}
diff --git a/unsupported/test/kronecker_product.cpp b/unsupported/test/kronecker_product.cpp
index 8ddc6ec28..e770049e5 100644
--- a/unsupported/test/kronecker_product.cpp
+++ b/unsupported/test/kronecker_product.cpp
@@ -9,12 +9,12 @@
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+#ifdef EIGEN_TEST_PART_1
#include "sparse.h"
#include <Eigen/SparseExtra>
#include <Eigen/KroneckerProduct>
-
template<typename MatrixType>
void check_dimension(const MatrixType& ab, const int rows, const int cols)
{
@@ -107,31 +107,34 @@ void test_kronecker_product()
SparseMatrix<double,RowMajor> SM_row_a(SM_a), SM_row_b(SM_b);
- // test kroneckerProduct(DM_block,DM,DM_fixedSize)
+ // test DM_fixedSize = kroneckerProduct(DM_block,DM)
Matrix<double, 6, 6> DM_fix_ab = kroneckerProduct(DM_a.topLeftCorner<2,3>(),DM_b);
CALL_SUBTEST(check_kronecker_product(DM_fix_ab));
+ CALL_SUBTEST(check_kronecker_product(kroneckerProduct(DM_a.topLeftCorner<2,3>(),DM_b)));
for(int i=0;i<DM_fix_ab.rows();++i)
for(int j=0;j<DM_fix_ab.cols();++j)
VERIFY_IS_APPROX(kroneckerProduct(DM_a,DM_b).coeff(i,j), DM_fix_ab(i,j));
- // test kroneckerProduct(DM,DM,DM_block)
+ // test DM_block = kroneckerProduct(DM,DM)
MatrixXd DM_block_ab(10,15);
DM_block_ab.block<6,6>(2,5) = kroneckerProduct(DM_a,DM_b);
CALL_SUBTEST(check_kronecker_product(DM_block_ab.block<6,6>(2,5)));
- // test kroneckerProduct(DM,DM,DM)
+ // test DM = kroneckerProduct(DM,DM)
MatrixXd DM_ab = kroneckerProduct(DM_a,DM_b);
CALL_SUBTEST(check_kronecker_product(DM_ab));
+ CALL_SUBTEST(check_kronecker_product(kroneckerProduct(DM_a,DM_b)));
- // test kroneckerProduct(SM,DM,SM)
+ // test SM = kroneckerProduct(SM,DM)
SparseMatrix<double> SM_ab = kroneckerProduct(SM_a,DM_b);
CALL_SUBTEST(check_kronecker_product(SM_ab));
SparseMatrix<double,RowMajor> SM_ab2 = kroneckerProduct(SM_a,DM_b);
CALL_SUBTEST(check_kronecker_product(SM_ab2));
+ CALL_SUBTEST(check_kronecker_product(kroneckerProduct(SM_a,DM_b)));
- // test kroneckerProduct(DM,SM,SM)
+ // test SM = kroneckerProduct(DM,SM)
SM_ab.setZero();
SM_ab.insert(0,0)=37.0;
SM_ab = kroneckerProduct(DM_a,SM_b);
@@ -140,8 +143,9 @@ void test_kronecker_product()
SM_ab2.insert(0,0)=37.0;
SM_ab2 = kroneckerProduct(DM_a,SM_b);
CALL_SUBTEST(check_kronecker_product(SM_ab2));
+ CALL_SUBTEST(check_kronecker_product(kroneckerProduct(DM_a,SM_b)));
- // test kroneckerProduct(SM,SM,SM)
+ // test SM = kroneckerProduct(SM,SM)
SM_ab.resize(2,33);
SM_ab.insert(0,0)=37.0;
SM_ab = kroneckerProduct(SM_a,SM_b);
@@ -150,8 +154,9 @@ void test_kronecker_product()
SM_ab2.insert(0,0)=37.0;
SM_ab2 = kroneckerProduct(SM_a,SM_b);
CALL_SUBTEST(check_kronecker_product(SM_ab2));
+ CALL_SUBTEST(check_kronecker_product(kroneckerProduct(SM_a,SM_b)));
- // test kroneckerProduct(SM,SM,SM) with sparse pattern
+ // test SM = kroneckerProduct(SM,SM) with sparse pattern
SM_a.resize(4,5);
SM_b.resize(3,2);
SM_a.resizeNonZeros(0);
@@ -169,7 +174,7 @@ void test_kronecker_product()
SM_ab = kroneckerProduct(SM_a,SM_b);
CALL_SUBTEST(check_sparse_kronecker_product(SM_ab));
- // test dimension of result of kroneckerProduct(DM,DM,DM)
+ // test dimension of result of DM = kroneckerProduct(DM,DM)
MatrixXd DM_a2(2,1);
MatrixXd DM_b2(5,4);
MatrixXd DM_ab2 = kroneckerProduct(DM_a2,DM_b2);
@@ -178,4 +183,70 @@ void test_kronecker_product()
DM_b2.resize(4,8);
DM_ab2 = kroneckerProduct(DM_a2,DM_b2);
CALL_SUBTEST(check_dimension(DM_ab2,10*4,9*8));
+
+ for(int i = 0; i < g_repeat; i++)
+ {
+ double density = Eigen::internal::random<double>(0.01,0.5);
+ int ra = Eigen::internal::random<int>(1,50);
+ int ca = Eigen::internal::random<int>(1,50);
+ int rb = Eigen::internal::random<int>(1,50);
+ int cb = Eigen::internal::random<int>(1,50);
+ SparseMatrix<float,ColMajor> sA(ra,ca), sB(rb,cb), sC;
+ SparseMatrix<float,RowMajor> sC2;
+ MatrixXf dA(ra,ca), dB(rb,cb), dC;
+ initSparse(density, dA, sA);
+ initSparse(density, dB, sB);
+
+ sC = kroneckerProduct(sA,sB);
+ dC = kroneckerProduct(dA,dB);
+ VERIFY_IS_APPROX(MatrixXf(sC),dC);
+
+ sC = kroneckerProduct(sA.transpose(),sB);
+ dC = kroneckerProduct(dA.transpose(),dB);
+ VERIFY_IS_APPROX(MatrixXf(sC),dC);
+
+ sC = kroneckerProduct(sA.transpose(),sB.transpose());
+ dC = kroneckerProduct(dA.transpose(),dB.transpose());
+ VERIFY_IS_APPROX(MatrixXf(sC),dC);
+
+ sC = kroneckerProduct(sA,sB.transpose());
+ dC = kroneckerProduct(dA,dB.transpose());
+ VERIFY_IS_APPROX(MatrixXf(sC),dC);
+
+ sC2 = kroneckerProduct(sA,sB);
+ dC = kroneckerProduct(dA,dB);
+ VERIFY_IS_APPROX(MatrixXf(sC2),dC);
+
+ sC2 = kroneckerProduct(dA,sB);
+ dC = kroneckerProduct(dA,dB);
+ VERIFY_IS_APPROX(MatrixXf(sC2),dC);
+
+ sC2 = kroneckerProduct(sA,dB);
+ dC = kroneckerProduct(dA,dB);
+ VERIFY_IS_APPROX(MatrixXf(sC2),dC);
+
+ sC2 = kroneckerProduct(2*sA,sB);
+ dC = kroneckerProduct(2*dA,dB);
+ VERIFY_IS_APPROX(MatrixXf(sC2),dC);
+ }
+}
+
+#endif
+
+#ifdef EIGEN_TEST_PART_2
+
+// simply check that for a dense kronecker product, sparse module is not needed
+
+#include "main.h"
+#include <Eigen/KroneckerProduct>
+
+void test_kronecker_product()
+{
+ MatrixXd a(2,2), b(3,3), c;
+ a.setRandom();
+ b.setRandom();
+ c = kroneckerProduct(a,b);
+ VERIFY_IS_APPROX(c.block(3,3,3,3), a(1,1)*b);
}
+
+#endif
diff --git a/unsupported/test/levenberg_marquardt.cpp b/unsupported/test/levenberg_marquardt.cpp
index 04464727d..64f168c16 100644
--- a/unsupported/test/levenberg_marquardt.cpp
+++ b/unsupported/test/levenberg_marquardt.cpp
@@ -9,6 +9,9 @@
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+// FIXME: These tests all check for hard-coded values. Ideally, parameters and start estimates should be randomized.
+
+
#include <stdio.h>
#include "main.h"
@@ -20,6 +23,9 @@
using std::sqrt;
+// tolerance for chekcing number of iterations
+#define LM_EVAL_COUNT_TOL 4/3
+
struct lmder_functor : DenseFunctor<double>
{
lmder_functor(void): DenseFunctor<double>(3,15) {}
@@ -275,7 +281,7 @@ const double chwirut2_functor::m_y[54] = { 92.9000E0 ,57.1000E0 ,31.0500E0 ,11.5
void testNistChwirut2(void)
{
const int n=3;
- int info;
+ LevenbergMarquardtSpace::Status info;
VectorXd x(n);
@@ -610,7 +616,7 @@ const double lanczos1_functor::y[24] = { 2.513400000000E+00 ,2.044333373291E+00
void testNistLanczos1(void)
{
const int n=6;
- int info;
+ LevenbergMarquardtSpace::Status info;
VectorXd x(n);
@@ -624,11 +630,11 @@ void testNistLanczos1(void)
info = lm.minimize(x);
// check return value
- VERIFY_IS_EQUAL(info, 2);
+ VERIFY_IS_EQUAL(info, LevenbergMarquardtSpace::RelativeErrorTooSmall);
VERIFY_IS_EQUAL(lm.nfev(), 79);
VERIFY_IS_EQUAL(lm.njev(), 72);
// check norm^2
-// VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 1.430899764097e-25); // should be 1.4307867721E-25, but nist results are on 128-bit floats
+ VERIFY(lm.fvec().squaredNorm() <= 1.4307867721E-25);
// check x
VERIFY_IS_APPROX(x[0], 9.5100000027E-02);
VERIFY_IS_APPROX(x[1], 1.0000000001E+00);
@@ -645,11 +651,11 @@ void testNistLanczos1(void)
info = lm.minimize(x);
// check return value
- VERIFY_IS_EQUAL(info, 2);
+ VERIFY_IS_EQUAL(info, LevenbergMarquardtSpace::RelativeErrorTooSmall);
VERIFY_IS_EQUAL(lm.nfev(), 9);
VERIFY_IS_EQUAL(lm.njev(), 8);
// check norm^2
-// VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 1.428595533845e-25); // should be 1.4307867721E-25, but nist results are on 128-bit floats
+ VERIFY(lm.fvec().squaredNorm() <= 1.4307867721E-25);
// check x
VERIFY_IS_APPROX(x[0], 9.5100000027E-02);
VERIFY_IS_APPROX(x[1], 1.0000000001E+00);
@@ -696,7 +702,7 @@ const double rat42_functor::y[9] = { 8.930E0 ,10.800E0 ,18.590E0 ,22.330E0 ,39.3
void testNistRat42(void)
{
const int n=3;
- int info;
+ LevenbergMarquardtSpace::Status info;
VectorXd x(n);
@@ -710,7 +716,7 @@ void testNistRat42(void)
info = lm.minimize(x);
// check return value
- VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(info, LevenbergMarquardtSpace::RelativeReductionTooSmall);
VERIFY_IS_EQUAL(lm.nfev(), 10);
VERIFY_IS_EQUAL(lm.njev(), 8);
// check norm^2
@@ -728,7 +734,7 @@ void testNistRat42(void)
info = lm.minimize(x);
// check return value
- VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(info, LevenbergMarquardtSpace::RelativeReductionTooSmall);
VERIFY_IS_EQUAL(lm.nfev(), 6);
VERIFY_IS_EQUAL(lm.njev(), 5);
// check norm^2
@@ -774,7 +780,7 @@ const double MGH10_functor::y[16] = { 3.478000E+04, 2.861000E+04, 2.365000E+04,
void testNistMGH10(void)
{
const int n=3;
- int info;
+ LevenbergMarquardtSpace::Status info;
VectorXd x(n);
@@ -786,17 +792,26 @@ void testNistMGH10(void)
MGH10_functor functor;
LevenbergMarquardt<MGH10_functor> lm(functor);
info = lm.minimize(x);
+ ++g_test_level;
+ VERIFY_IS_EQUAL(info, LevenbergMarquardtSpace::RelativeReductionTooSmall);
+ --g_test_level;
+ // was: VERIFY_IS_EQUAL(info, 1);
- // check return value
- VERIFY_IS_EQUAL(info, 1);
- VERIFY_IS_EQUAL(lm.nfev(), 284 );
- VERIFY_IS_EQUAL(lm.njev(), 249 );
// check norm^2
VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 8.7945855171E+01);
// check x
VERIFY_IS_APPROX(x[0], 5.6096364710E-03);
VERIFY_IS_APPROX(x[1], 6.1813463463E+03);
VERIFY_IS_APPROX(x[2], 3.4522363462E+02);
+
+ // check return value
+
+ ++g_test_level;
+ VERIFY_IS_EQUAL(lm.nfev(), 284 );
+ VERIFY_IS_EQUAL(lm.njev(), 249 );
+ --g_test_level;
+ VERIFY(lm.nfev() < 284 * LM_EVAL_COUNT_TOL);
+ VERIFY(lm.njev() < 249 * LM_EVAL_COUNT_TOL);
/*
* Second try
@@ -804,17 +819,25 @@ void testNistMGH10(void)
x<< 0.02, 4000., 250.;
// do the computation
info = lm.minimize(x);
+ ++g_test_level;
+ VERIFY_IS_EQUAL(info, LevenbergMarquardtSpace::RelativeReductionTooSmall);
+ // was: VERIFY_IS_EQUAL(info, 1);
+ --g_test_level;
- // check return value
- VERIFY_IS_EQUAL(info, 1);
- VERIFY_IS_EQUAL(lm.nfev(), 126);
- VERIFY_IS_EQUAL(lm.njev(), 116);
// check norm^2
VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 8.7945855171E+01);
// check x
VERIFY_IS_APPROX(x[0], 5.6096364710E-03);
VERIFY_IS_APPROX(x[1], 6.1813463463E+03);
VERIFY_IS_APPROX(x[2], 3.4522363462E+02);
+
+ // check return value
+ ++g_test_level;
+ VERIFY_IS_EQUAL(lm.nfev(), 126);
+ VERIFY_IS_EQUAL(lm.njev(), 116);
+ --g_test_level;
+ VERIFY(lm.nfev() < 126 * LM_EVAL_COUNT_TOL);
+ VERIFY(lm.njev() < 116 * LM_EVAL_COUNT_TOL);
}
@@ -866,15 +889,16 @@ void testNistBoxBOD(void)
lm.setFactor(10);
info = lm.minimize(x);
- // check return value
- VERIFY_IS_EQUAL(info, 1);
- VERIFY_IS_EQUAL(lm.nfev(), 31);
- VERIFY_IS_EQUAL(lm.njev(), 25);
// check norm^2
VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 1.1680088766E+03);
// check x
VERIFY_IS_APPROX(x[0], 2.1380940889E+02);
VERIFY_IS_APPROX(x[1], 5.4723748542E-01);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY(lm.nfev() < 31); // 31
+ VERIFY(lm.njev() < 25); // 25
/*
* Second try
@@ -888,8 +912,12 @@ void testNistBoxBOD(void)
// check return value
VERIFY_IS_EQUAL(info, 1);
- VERIFY_IS_EQUAL(lm.nfev(), 15 );
- VERIFY_IS_EQUAL(lm.njev(), 14 );
+ ++g_test_level;
+ VERIFY_IS_EQUAL(lm.nfev(), 16 );
+ VERIFY_IS_EQUAL(lm.njev(), 15 );
+ --g_test_level;
+ VERIFY(lm.nfev() < 16 * LM_EVAL_COUNT_TOL);
+ VERIFY(lm.njev() < 15 * LM_EVAL_COUNT_TOL);
// check norm^2
VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 1.1680088766E+03);
// check x
@@ -948,10 +976,6 @@ void testNistMGH17(void)
lm.setMaxfev(1000);
info = lm.minimize(x);
- // check return value
-// VERIFY_IS_EQUAL(info, 2); //FIXME Use (lm.info() == Success)
-// VERIFY_IS_EQUAL(lm.nfev(), 602 );
- VERIFY_IS_EQUAL(lm.njev(), 545 );
// check norm^2
VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 5.4648946975E-05);
// check x
@@ -960,6 +984,11 @@ void testNistMGH17(void)
VERIFY_IS_APPROX(x[2], -1.4646871366E+00);
VERIFY_IS_APPROX(x[3], 1.2867534640E-02);
VERIFY_IS_APPROX(x[4], 2.2122699662E-02);
+
+ // check return value
+// VERIFY_IS_EQUAL(info, 2); //FIXME Use (lm.info() == Success)
+ VERIFY(lm.nfev() < 700 ); // 602
+ VERIFY(lm.njev() < 600 ); // 545
/*
* Second try
@@ -1035,10 +1064,6 @@ void testNistMGH09(void)
lm.setMaxfev(1000);
info = lm.minimize(x);
- // check return value
- VERIFY_IS_EQUAL(info, 1);
- VERIFY_IS_EQUAL(lm.nfev(), 490 );
- VERIFY_IS_EQUAL(lm.njev(), 376 );
// check norm^2
VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 3.0750560385E-04);
// check x
@@ -1046,6 +1071,10 @@ void testNistMGH09(void)
VERIFY_IS_APPROX(x[1], 0.19126423573); // should be 1.9128232873E-01
VERIFY_IS_APPROX(x[2], 0.12305309914); // should be 1.2305650693E-01
VERIFY_IS_APPROX(x[3], 0.13605395375); // should be 1.3606233068E-01
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY(lm.nfev() < 510 ); // 490
+ VERIFY(lm.njev() < 400 ); // 376
/*
* Second try
diff --git a/unsupported/test/matrix_function.cpp b/unsupported/test/matrix_function.cpp
index 3c76cfb65..7c9b68a3c 100644
--- a/unsupported/test/matrix_function.cpp
+++ b/unsupported/test/matrix_function.cpp
@@ -102,7 +102,7 @@ void testMatrixExponential(const MatrixType& A)
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef std::complex<RealScalar> ComplexScalar;
- VERIFY_IS_APPROX(A.exp(), A.matrixFunction(StdStemFunctions<ComplexScalar>::exp));
+ VERIFY_IS_APPROX(A.exp(), A.matrixFunction(internal::stem_function_exp<ComplexScalar>));
}
template<typename MatrixType>
@@ -113,8 +113,8 @@ void testMatrixLogarithm(const MatrixType& A)
MatrixType scaledA;
RealScalar maxImagPartOfSpectrum = A.eigenvalues().imag().cwiseAbs().maxCoeff();
- if (maxImagPartOfSpectrum >= 0.9 * M_PI)
- scaledA = A * 0.9 * M_PI / maxImagPartOfSpectrum;
+ if (maxImagPartOfSpectrum >= RealScalar(0.9L * EIGEN_PI))
+ scaledA = A * RealScalar(0.9L * EIGEN_PI) / maxImagPartOfSpectrum;
else
scaledA = A;
diff --git a/unsupported/test/matrix_functions.h b/unsupported/test/matrix_functions.h
index 5817caef6..4e2636404 100644
--- a/unsupported/test/matrix_functions.h
+++ b/unsupported/test/matrix_functions.h
@@ -10,27 +10,47 @@
#include "main.h"
#include <unsupported/Eigen/MatrixFunctions>
+// For complex matrices, any matrix is fine.
+template<typename MatrixType, int IsComplex = NumTraits<typename internal::traits<MatrixType>::Scalar>::IsComplex>
+struct processTriangularMatrix
+{
+ static void run(MatrixType&, MatrixType&, const MatrixType&)
+ { }
+};
+
+// For real matrices, make sure none of the eigenvalues are negative.
+template<typename MatrixType>
+struct processTriangularMatrix<MatrixType,0>
+{
+ static void run(MatrixType& m, MatrixType& T, const MatrixType& U)
+ {
+ const Index size = m.cols();
+
+ for (Index i=0; i < size; ++i) {
+ if (i == size - 1 || T.coeff(i+1,i) == 0)
+ T.coeffRef(i,i) = std::abs(T.coeff(i,i));
+ else
+ ++i;
+ }
+ m = U * T * U.transpose();
+ }
+};
+
template <typename MatrixType, int IsComplex = NumTraits<typename internal::traits<MatrixType>::Scalar>::IsComplex>
struct generateTestMatrix;
-// for real matrices, make sure none of the eigenvalues are negative
template <typename MatrixType>
struct generateTestMatrix<MatrixType,0>
{
static void run(MatrixType& result, typename MatrixType::Index size)
{
- MatrixType mat = MatrixType::Random(size, size);
- EigenSolver<MatrixType> es(mat);
- typename EigenSolver<MatrixType>::EigenvalueType eivals = es.eigenvalues();
- for (typename MatrixType::Index i = 0; i < size; ++i) {
- if (eivals(i).imag() == 0 && eivals(i).real() < 0)
- eivals(i) = -eivals(i);
- }
- result = (es.eigenvectors() * eivals.asDiagonal() * es.eigenvectors().inverse()).real();
+ result = MatrixType::Random(size, size);
+ RealSchur<MatrixType> schur(result);
+ MatrixType T = schur.matrixT();
+ processTriangularMatrix<MatrixType>::run(result, T, schur.matrixU());
}
};
-// for complex matrices, any matrix is fine
template <typename MatrixType>
struct generateTestMatrix<MatrixType,1>
{
@@ -41,7 +61,7 @@ struct generateTestMatrix<MatrixType,1>
};
template <typename Derived, typename OtherDerived>
-double relerr(const MatrixBase<Derived>& A, const MatrixBase<OtherDerived>& B)
+typename Derived::RealScalar relerr(const MatrixBase<Derived>& A, const MatrixBase<OtherDerived>& B)
{
return std::sqrt((A - B).cwiseAbs2().sum() / (std::min)(A.cwiseAbs2().sum(), B.cwiseAbs2().sum()));
}
diff --git a/unsupported/test/matrix_power.cpp b/unsupported/test/matrix_power.cpp
index b9d513b45..7ccfacfdf 100644
--- a/unsupported/test/matrix_power.cpp
+++ b/unsupported/test/matrix_power.cpp
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2012 Chen-Pang He <jdh8@ms63.hinet.net>
+// Copyright (C) 2012, 2013 Chen-Pang He <jdh8@ms63.hinet.net>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -9,35 +9,8 @@
#include "matrix_functions.h"
-template <typename MatrixType, int IsComplex = NumTraits<typename MatrixType::Scalar>::IsComplex>
-struct generateTriangularMatrix;
-
-// for real matrices, make sure none of the eigenvalues are negative
-template <typename MatrixType>
-struct generateTriangularMatrix<MatrixType,0>
-{
- static void run(MatrixType& result, typename MatrixType::Index size)
- {
- result.resize(size, size);
- result.template triangularView<Upper>() = MatrixType::Random(size, size);
- for (typename MatrixType::Index i = 0; i < size; ++i)
- result.coeffRef(i,i) = std::abs(result.coeff(i,i));
- }
-};
-
-// for complex matrices, any matrix is fine
-template <typename MatrixType>
-struct generateTriangularMatrix<MatrixType,1>
-{
- static void run(MatrixType& result, typename MatrixType::Index size)
- {
- result.resize(size, size);
- result.template triangularView<Upper>() = MatrixType::Random(size, size);
- }
-};
-
template<typename T>
-void test2dRotation(double tol)
+void test2dRotation(const T& tol)
{
Matrix<T,2,2> A, B, C;
T angle, c, s;
@@ -46,19 +19,19 @@ void test2dRotation(double tol)
MatrixPower<Matrix<T,2,2> > Apow(A);
for (int i=0; i<=20; ++i) {
- angle = pow(10, (i-10) / 5.);
+ angle = std::pow(T(10), (i-10) / T(5.));
c = std::cos(angle);
s = std::sin(angle);
B << c, s, -s, c;
- C = Apow(std::ldexp(angle,1) / M_PI);
+ C = Apow(std::ldexp(angle,1) / T(EIGEN_PI));
std::cout << "test2dRotation: i = " << i << " error powerm = " << relerr(C,B) << '\n';
- VERIFY(C.isApprox(B, static_cast<T>(tol)));
+ VERIFY(C.isApprox(B, tol));
}
}
template<typename T>
-void test2dHyperbolicRotation(double tol)
+void test2dHyperbolicRotation(const T& tol)
{
Matrix<std::complex<T>,2,2> A, B, C;
T angle, ch = std::cosh((T)1);
@@ -75,12 +48,26 @@ void test2dHyperbolicRotation(double tol)
C = Apow(angle);
std::cout << "test2dHyperbolicRotation: i = " << i << " error powerm = " << relerr(C,B) << '\n';
- VERIFY(C.isApprox(B, static_cast<T>(tol)));
+ VERIFY(C.isApprox(B, tol));
+ }
+}
+
+template<typename T>
+void test3dRotation(const T& tol)
+{
+ Matrix<T,3,1> v;
+ T angle;
+
+ for (int i=0; i<=20; ++i) {
+ v = Matrix<T,3,1>::Random();
+ v.normalize();
+ angle = std::pow(T(10), (i-10) / T(5.));
+ VERIFY(AngleAxis<T>(angle, v).matrix().isApprox(AngleAxis<T>(1,v).matrix().pow(angle), tol));
}
}
template<typename MatrixType>
-void testExponentLaws(const MatrixType& m, double tol)
+void testGeneral(const MatrixType& m, const typename MatrixType::RealScalar& tol)
{
typedef typename MatrixType::RealScalar RealScalar;
MatrixType m1, m2, m3, m4, m5;
@@ -97,37 +84,121 @@ void testExponentLaws(const MatrixType& m, double tol)
m4 = mpow(x+y);
m5.noalias() = m2 * m3;
- VERIFY(m4.isApprox(m5, static_cast<RealScalar>(tol)));
+ VERIFY(m4.isApprox(m5, tol));
m4 = mpow(x*y);
m5 = m2.pow(y);
- VERIFY(m4.isApprox(m5, static_cast<RealScalar>(tol)));
+ VERIFY(m4.isApprox(m5, tol));
m4 = (std::abs(x) * m1).pow(y);
m5 = std::pow(std::abs(x), y) * m3;
- VERIFY(m4.isApprox(m5, static_cast<RealScalar>(tol)));
+ VERIFY(m4.isApprox(m5, tol));
+ }
+}
+
+template<typename MatrixType>
+void testSingular(const MatrixType& m_const, const typename MatrixType::RealScalar& tol)
+{
+ // we need to pass by reference in order to prevent errors with
+ // MSVC for aligned data types ...
+ MatrixType& m = const_cast<MatrixType&>(m_const);
+
+ const int IsComplex = NumTraits<typename internal::traits<MatrixType>::Scalar>::IsComplex;
+ typedef typename internal::conditional<IsComplex, TriangularView<MatrixType,Upper>, const MatrixType&>::type TriangularType;
+ typename internal::conditional< IsComplex, ComplexSchur<MatrixType>, RealSchur<MatrixType> >::type schur;
+ MatrixType T;
+
+ for (int i=0; i < g_repeat; ++i) {
+ m.setRandom();
+ m.col(0).fill(0);
+
+ schur.compute(m);
+ T = schur.matrixT();
+ const MatrixType& U = schur.matrixU();
+ processTriangularMatrix<MatrixType>::run(m, T, U);
+ MatrixPower<MatrixType> mpow(m);
+
+ T = T.sqrt();
+ VERIFY(mpow(0.5L).isApprox(U * (TriangularType(T) * U.adjoint()), tol));
+
+ T = T.sqrt();
+ VERIFY(mpow(0.25L).isApprox(U * (TriangularType(T) * U.adjoint()), tol));
+
+ T = T.sqrt();
+ VERIFY(mpow(0.125L).isApprox(U * (TriangularType(T) * U.adjoint()), tol));
+ }
+}
+
+template<typename MatrixType>
+void testLogThenExp(const MatrixType& m_const, const typename MatrixType::RealScalar& tol)
+{
+ // we need to pass by reference in order to prevent errors with
+ // MSVC for aligned data types ...
+ MatrixType& m = const_cast<MatrixType&>(m_const);
+
+ typedef typename MatrixType::Scalar Scalar;
+ Scalar x;
+
+ for (int i=0; i < g_repeat; ++i) {
+ generateTestMatrix<MatrixType>::run(m, m.rows());
+ x = internal::random<Scalar>();
+ VERIFY(m.pow(x).isApprox((x * m.log()).exp(), tol));
}
}
typedef Matrix<double,3,3,RowMajor> Matrix3dRowMajor;
+typedef Matrix<long double,3,3> Matrix3e;
typedef Matrix<long double,Dynamic,Dynamic> MatrixXe;
void test_matrix_power()
{
CALL_SUBTEST_2(test2dRotation<double>(1e-13));
CALL_SUBTEST_1(test2dRotation<float>(2e-5)); // was 1e-5, relaxed for clang 2.8 / linux / x86-64
- CALL_SUBTEST_9(test2dRotation<long double>(1e-13));
+ CALL_SUBTEST_9(test2dRotation<long double>(1e-13L));
CALL_SUBTEST_2(test2dHyperbolicRotation<double>(1e-14));
CALL_SUBTEST_1(test2dHyperbolicRotation<float>(1e-5));
- CALL_SUBTEST_9(test2dHyperbolicRotation<long double>(1e-14));
-
- CALL_SUBTEST_2(testExponentLaws(Matrix2d(), 1e-13));
- CALL_SUBTEST_7(testExponentLaws(Matrix3dRowMajor(), 1e-13));
- CALL_SUBTEST_3(testExponentLaws(Matrix4cd(), 1e-13));
- CALL_SUBTEST_4(testExponentLaws(MatrixXd(8,8), 2e-12));
- CALL_SUBTEST_1(testExponentLaws(Matrix2f(), 1e-4));
- CALL_SUBTEST_5(testExponentLaws(Matrix3cf(), 1e-4));
- CALL_SUBTEST_8(testExponentLaws(Matrix4f(), 1e-4));
- CALL_SUBTEST_6(testExponentLaws(MatrixXf(2,2), 1e-3)); // see bug 614
- CALL_SUBTEST_9(testExponentLaws(MatrixXe(7,7), 1e-13));
+ CALL_SUBTEST_9(test2dHyperbolicRotation<long double>(1e-14L));
+
+ CALL_SUBTEST_10(test3dRotation<double>(1e-13));
+ CALL_SUBTEST_11(test3dRotation<float>(1e-5));
+ CALL_SUBTEST_12(test3dRotation<long double>(1e-13L));
+
+ CALL_SUBTEST_2(testGeneral(Matrix2d(), 1e-13));
+ CALL_SUBTEST_7(testGeneral(Matrix3dRowMajor(), 1e-13));
+ CALL_SUBTEST_3(testGeneral(Matrix4cd(), 1e-13));
+ CALL_SUBTEST_4(testGeneral(MatrixXd(8,8), 2e-12));
+ CALL_SUBTEST_1(testGeneral(Matrix2f(), 1e-4));
+ CALL_SUBTEST_5(testGeneral(Matrix3cf(), 1e-4));
+ CALL_SUBTEST_8(testGeneral(Matrix4f(), 1e-4));
+ CALL_SUBTEST_6(testGeneral(MatrixXf(2,2), 1e-3)); // see bug 614
+ CALL_SUBTEST_9(testGeneral(MatrixXe(7,7), 1e-13L));
+ CALL_SUBTEST_10(testGeneral(Matrix3d(), 1e-13));
+ CALL_SUBTEST_11(testGeneral(Matrix3f(), 1e-4));
+ CALL_SUBTEST_12(testGeneral(Matrix3e(), 1e-13L));
+
+ CALL_SUBTEST_2(testSingular(Matrix2d(), 1e-13));
+ CALL_SUBTEST_7(testSingular(Matrix3dRowMajor(), 1e-13));
+ CALL_SUBTEST_3(testSingular(Matrix4cd(), 1e-13));
+ CALL_SUBTEST_4(testSingular(MatrixXd(8,8), 2e-12));
+ CALL_SUBTEST_1(testSingular(Matrix2f(), 1e-4));
+ CALL_SUBTEST_5(testSingular(Matrix3cf(), 1e-4));
+ CALL_SUBTEST_8(testSingular(Matrix4f(), 1e-4));
+ CALL_SUBTEST_6(testSingular(MatrixXf(2,2), 1e-3));
+ CALL_SUBTEST_9(testSingular(MatrixXe(7,7), 1e-13L));
+ CALL_SUBTEST_10(testSingular(Matrix3d(), 1e-13));
+ CALL_SUBTEST_11(testSingular(Matrix3f(), 1e-4));
+ CALL_SUBTEST_12(testSingular(Matrix3e(), 1e-13L));
+
+ CALL_SUBTEST_2(testLogThenExp(Matrix2d(), 1e-13));
+ CALL_SUBTEST_7(testLogThenExp(Matrix3dRowMajor(), 1e-13));
+ CALL_SUBTEST_3(testLogThenExp(Matrix4cd(), 1e-13));
+ CALL_SUBTEST_4(testLogThenExp(MatrixXd(8,8), 2e-12));
+ CALL_SUBTEST_1(testLogThenExp(Matrix2f(), 1e-4));
+ CALL_SUBTEST_5(testLogThenExp(Matrix3cf(), 1e-4));
+ CALL_SUBTEST_8(testLogThenExp(Matrix4f(), 1e-4));
+ CALL_SUBTEST_6(testLogThenExp(MatrixXf(2,2), 1e-3));
+ CALL_SUBTEST_9(testLogThenExp(MatrixXe(7,7), 1e-13L));
+ CALL_SUBTEST_10(testLogThenExp(Matrix3d(), 1e-13));
+ CALL_SUBTEST_11(testLogThenExp(Matrix3f(), 1e-4));
+ CALL_SUBTEST_12(testLogThenExp(Matrix3e(), 1e-13L));
}
diff --git a/unsupported/test/minres.cpp b/unsupported/test/minres.cpp
index 509ebe09a..8b300b78a 100644
--- a/unsupported/test/minres.cpp
+++ b/unsupported/test/minres.cpp
@@ -1,8 +1,8 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2011 Gael Guennebaud <g.gael@free.fr>
// Copyright (C) 2012 Giacomo Po <gpo@ucla.edu>
+// Copyright (C) 2011 Gael Guennebaud <g.gael@free.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -14,21 +14,14 @@
template<typename T> void test_minres_T()
{
- MINRES<SparseMatrix<T>, Lower|Upper, DiagonalPreconditioner<T> > minres_colmajor_diag;
+ // Identity preconditioner
MINRES<SparseMatrix<T>, Lower, IdentityPreconditioner > minres_colmajor_lower_I;
MINRES<SparseMatrix<T>, Upper, IdentityPreconditioner > minres_colmajor_upper_I;
-// MINRES<SparseMatrix<T>, Lower, IncompleteLUT<T> > minres_colmajor_ilut;
- //minres<SparseMatrix<T>, SSORPreconditioner<T> > minres_colmajor_ssor;
-
-
-// CALL_SUBTEST( check_sparse_square_solving(minres_colmajor_diag) );
- // CALL_SUBTEST( check_sparse_square_solving(minres_colmajor_ilut) );
- //CALL_SUBTEST( check_sparse_square_solving(minres_colmajor_ssor) );
// Diagonal preconditioner
MINRES<SparseMatrix<T>, Lower, DiagonalPreconditioner<T> > minres_colmajor_lower_diag;
MINRES<SparseMatrix<T>, Upper, DiagonalPreconditioner<T> > minres_colmajor_upper_diag;
- MINRES<SparseMatrix<T>, Upper|Lower, DiagonalPreconditioner<T> > minres_colmajor_uplo_diag;
+ MINRES<SparseMatrix<T>, Lower|Upper, DiagonalPreconditioner<T> > minres_colmajor_uplo_diag;
// call tests for SPD matrix
CALL_SUBTEST( check_sparse_spd_solving(minres_colmajor_lower_I) );
@@ -36,14 +29,16 @@ template<typename T> void test_minres_T()
CALL_SUBTEST( check_sparse_spd_solving(minres_colmajor_lower_diag) );
CALL_SUBTEST( check_sparse_spd_solving(minres_colmajor_upper_diag) );
-// CALL_SUBTEST( check_sparse_spd_solving(minres_colmajor_uplo_diag) );
+ CALL_SUBTEST( check_sparse_spd_solving(minres_colmajor_uplo_diag) );
// TO DO: symmetric semi-definite matrix
// TO DO: symmetric indefinite matrix
+
}
void test_minres()
{
CALL_SUBTEST_1(test_minres_T<double>());
-// CALL_SUBTEST_2(test_minres_T<std::complex<double> >());
+// CALL_SUBTEST_2(test_minres_T<std::compex<double> >());
+
}
diff --git a/unsupported/test/mpreal/mpreal.h b/unsupported/test/mpreal/mpreal.h
index 7d6f4e79f..8404f1ff8 100644
--- a/unsupported/test/mpreal/mpreal.h
+++ b/unsupported/test/mpreal/mpreal.h
@@ -1,33 +1,34 @@
/*
- MPFR C++: Multi-precision floating point number class for C++.
+ MPFR C++: Multi-precision floating point number class for C++.
Based on MPFR library: http://mpfr.org
Project homepage: http://www.holoborodko.com/pavel/mpfr
Contact e-mail: pavel@holoborodko.com
- Copyright (c) 2008-2014 Pavel Holoborodko
+ Copyright (c) 2008-2015 Pavel Holoborodko
Contributors:
- Dmitriy Gubanov, Konstantin Holoborodko, Brian Gladman,
- Helmut Jarausch, Fokko Beekhof, Ulrich Mutze, Heinz van Saanen,
- Pere Constans, Peter van Hoof, Gael Guennebaud, Tsai Chia Cheng,
+ Dmitriy Gubanov, Konstantin Holoborodko, Brian Gladman,
+ Helmut Jarausch, Fokko Beekhof, Ulrich Mutze, Heinz van Saanen,
+ Pere Constans, Peter van Hoof, Gael Guennebaud, Tsai Chia Cheng,
Alexei Zubanov, Jauhien Piatlicki, Victor Berger, John Westwood,
- Petr Aleksandrov, Orion Poplawski, Charles Karney.
+ Petr Aleksandrov, Orion Poplawski, Charles Karney, Arash Partow,
+ Rodney James, Jorge Leitao.
Licensing:
(A) MPFR C++ is under GNU General Public License ("GPL").
-
- (B) Non-free licenses may also be purchased from the author, for users who
+
+ (B) Non-free licenses may also be purchased from the author, for users who
do not want their programs protected by the GPL.
- The non-free licenses are for users that wish to use MPFR C++ in
- their products but are unwilling to release their software
- under the GPL (which would require them to release source code
+ The non-free licenses are for users that wish to use MPFR C++ in
+ their products but are unwilling to release their software
+ under the GPL (which would require them to release source code
and allow free redistribution).
Such users can purchase an unlimited-use license from the author.
Contact us for more details.
-
+
GNU General Public License ("GPL") copyright permissions statement:
**************************************************************************
This program is free software: you can redistribute it and/or modify
@@ -55,10 +56,10 @@
#include <cmath>
#include <cstring>
#include <limits>
+#include <complex>
+#include <algorithm>
// Options
-// FIXME HAVE_INT64_SUPPORT leads to clashes with long int and int64_t on some systems.
-//#define MPREAL_HAVE_INT64_SUPPORT // Enable int64_t support if possible. Available only for MSVC 2010 & GCC.
#define MPREAL_HAVE_MSVC_DEBUGVIEW // Enable Debugger Visualizer for "Debug" builds in MSVC.
#define MPREAL_HAVE_DYNAMIC_STD_NUMERIC_LIMITS // Enable extended std::numeric_limits<mpfr::mpreal> specialization.
// Meaning that "digits", "round_style" and similar members are defined as functions, not constants.
@@ -66,19 +67,17 @@
// Library version
#define MPREAL_VERSION_MAJOR 3
-#define MPREAL_VERSION_MINOR 5
-#define MPREAL_VERSION_PATCHLEVEL 9
-#define MPREAL_VERSION_STRING "3.5.9"
+#define MPREAL_VERSION_MINOR 6
+#define MPREAL_VERSION_PATCHLEVEL 2
+#define MPREAL_VERSION_STRING "3.6.2"
// Detect compiler using signatures from http://predef.sourceforge.net/
-#if defined(__GNUC__) && defined(__INTEL_COMPILER)
- #define IsInf(x) isinf(x) // Intel ICC compiler on Linux
-
-#elif defined(_MSC_VER) // Microsoft Visual C++
- #define IsInf(x) (!_finite(x))
-
+#if defined(__GNUC__)
+ #define IsInf(x) (isinf)(x) // GNU C++/Intel ICC compiler on Linux
+#elif defined(_MSC_VER) // Microsoft Visual C++
+ #define IsInf(x) (!_finite(x))
#else
- #define IsInf(x) std::isinf(x) // GNU C/C++ (and/or other compilers), just hope for C99 conformance
+ #define IsInf(x) (std::isinf)(x) // GNU C/C++ (and/or other compilers), just hope for C99 conformance
#endif
// A Clang feature extension to determine compiler features.
@@ -93,54 +92,27 @@
#define MPREAL_HAVE_MOVE_SUPPORT
- // Use fields in mpfr_t structure to check if it was initialized / set dummy initialization
+ // Use fields in mpfr_t structure to check if it was initialized / set dummy initialization
#define mpfr_is_initialized(x) (0 != (x)->_mpfr_d)
#define mpfr_set_uninitialized(x) ((x)->_mpfr_d = 0 )
#endif
-// Detect support for explicit converters.
+// Detect support for explicit converters.
#if (__has_feature(cxx_explicit_conversions) || \
- defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L || \
- (defined(_MSC_VER) && _MSC_VER >= 1800))
+ (defined(__GXX_EXPERIMENTAL_CXX0X__) && __GNUC_MINOR__ >= 5) || __cplusplus >= 201103L || \
+ (defined(_MSC_VER) && _MSC_VER >= 1800))
#define MPREAL_HAVE_EXPLICIT_CONVERTERS
#endif
-// Detect available 64-bit capabilities
-#if defined(MPREAL_HAVE_INT64_SUPPORT)
-
- #define MPFR_USE_INTMAX_T // Should be defined before mpfr.h
-
- #if defined(_MSC_VER) // MSVC + Windows
- #if (_MSC_VER >= 1600)
- #include <stdint.h> // <stdint.h> is available only in msvc2010!
-
- #else // MPFR relies on intmax_t which is available only in msvc2010
- #undef MPREAL_HAVE_INT64_SUPPORT // Besides, MPFR & MPIR have to be compiled with msvc2010
- #undef MPFR_USE_INTMAX_T // Since we cannot detect this, disable x64 by default
- // Someone should change this manually if needed.
- #endif
-
- #elif defined (__GNUC__) && defined(__linux__)
- #if defined(__amd64__) || defined(__amd64) || defined(__x86_64__) || defined(__x86_64) || defined(__ia64) || defined(__itanium__) || defined(_M_IA64) || defined (__PPC64__)
- #undef MPREAL_HAVE_INT64_SUPPORT // Remove all shaman dances for x64 builds since
- #undef MPFR_USE_INTMAX_T // GCC already supports x64 as of "long int" is 64-bit integer, nothing left to do
- #else
- #include <stdint.h> // use int64_t, uint64_t otherwise
- #endif
-
- #else
- #include <stdint.h> // rely on int64_t, uint64_t in all other cases, Mac OSX, etc.
- #endif
-
-#endif
+#define MPFR_USE_INTMAX_T // Enable 64-bit integer types - should be defined before mpfr.h
#if defined(MPREAL_HAVE_MSVC_DEBUGVIEW) && defined(_MSC_VER) && defined(_DEBUG)
#define MPREAL_MSVC_DEBUGVIEW_CODE DebugView = toString();
#define MPREAL_MSVC_DEBUGVIEW_DATA std::string DebugView;
#else
- #define MPREAL_MSVC_DEBUGVIEW_CODE
- #define MPREAL_MSVC_DEBUGVIEW_DATA
+ #define MPREAL_MSVC_DEBUGVIEW_CODE
+ #define MPREAL_MSVC_DEBUGVIEW_DATA
#endif
#include <mpfr.h>
@@ -150,9 +122,15 @@
#endif
// Less important options
-#define MPREAL_DOUBLE_BITS_OVERFLOW -1 // Triggers overflow exception during conversion to double if mpreal
+#define MPREAL_DOUBLE_BITS_OVERFLOW -1 // Triggers overflow exception during conversion to double if mpreal
// cannot fit in MPREAL_DOUBLE_BITS_OVERFLOW bits
// = -1 disables overflow checks (default)
+
+// Fast replacement for mpfr_set_zero(x, +1):
+// (a) uses low-level data members, might not be compatible with new versions of MPFR
+// (b) sign is not set, add (x)->_mpfr_sign = 1;
+#define mpfr_set_zero_fast(x) ((x)->_mpfr_exp = __MPFR_EXP_ZERO)
+
#if defined(__GNUC__)
#define MPREAL_PERMISSIVE_EXPR __extension__
#else
@@ -164,9 +142,9 @@ namespace mpfr {
class mpreal {
private:
mpfr_t mp;
-
+
public:
-
+
// Get default rounding mode & precision
inline static mp_rnd_t get_default_rnd() { return (mp_rnd_t)(mpfr_get_default_rounding_mode()); }
inline static mp_prec_t get_default_prec() { return mpfr_get_default_prec(); }
@@ -174,29 +152,26 @@ public:
// Constructors && type conversions
mpreal();
mpreal(const mpreal& u);
- mpreal(const mpf_t u);
- mpreal(const mpz_t u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
- mpreal(const mpq_t u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
- mpreal(const double u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
- mpreal(const long double u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
- mpreal(const unsigned long int u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
- mpreal(const unsigned int u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
- mpreal(const long int u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
- mpreal(const int u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
-
- // Construct mpreal from mpfr_t structure.
- // shared = true allows to avoid deep copy, so that mpreal and 'u' share the same data & pointers.
- mpreal(const mpfr_t u, bool shared = false);
+ mpreal(const mpf_t u);
+ mpreal(const mpz_t u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
+ mpreal(const mpq_t u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
+ mpreal(const double u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
+ mpreal(const long double u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
+ mpreal(const unsigned long long int u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
+ mpreal(const long long int u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
+ mpreal(const unsigned long int u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
+ mpreal(const unsigned int u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
+ mpreal(const long int u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
+ mpreal(const int u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
-#if defined (MPREAL_HAVE_INT64_SUPPORT)
- mpreal(const uint64_t u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
- mpreal(const int64_t u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
-#endif
+ // Construct mpreal from mpfr_t structure.
+ // shared = true allows to avoid deep copy, so that mpreal and 'u' share the same data & pointers.
+ mpreal(const mpfr_t u, bool shared = false);
mpreal(const char* s, mp_prec_t prec = mpreal::get_default_prec(), int base = 10, mp_rnd_t mode = mpreal::get_default_rnd());
mpreal(const std::string& s, mp_prec_t prec = mpreal::get_default_prec(), int base = 10, mp_rnd_t mode = mpreal::get_default_rnd());
- ~mpreal();
+ ~mpreal();
#ifdef MPREAL_HAVE_MOVE_SUPPORT
mpreal& operator=(mpreal&& v);
@@ -205,7 +180,7 @@ public:
// Operations
// =
- // +, -, *, /, ++, --, <<, >>
+ // +, -, *, /, ++, --, <<, >>
// *=, +=, -=, /=,
// <, >, ==, <=, >=
@@ -215,13 +190,16 @@ public:
mpreal& operator=(const mpz_t v);
mpreal& operator=(const mpq_t v);
mpreal& operator=(const long double v);
- mpreal& operator=(const double v);
+ mpreal& operator=(const double v);
mpreal& operator=(const unsigned long int v);
+ mpreal& operator=(const unsigned long long int v);
+ mpreal& operator=(const long long int v);
mpreal& operator=(const unsigned int v);
mpreal& operator=(const long int v);
mpreal& operator=(const int v);
mpreal& operator=(const char* s);
mpreal& operator=(const std::string& s);
+ template <typename real_t> mpreal& operator= (const std::complex<real_t>& z);
// +
mpreal& operator+=(const mpreal& v);
@@ -235,20 +213,18 @@ public:
mpreal& operator+=(const long int u);
mpreal& operator+=(const int u);
-#if defined (MPREAL_HAVE_INT64_SUPPORT)
- mpreal& operator+=(const int64_t u);
- mpreal& operator+=(const uint64_t u);
- mpreal& operator-=(const int64_t u);
- mpreal& operator-=(const uint64_t u);
- mpreal& operator*=(const int64_t u);
- mpreal& operator*=(const uint64_t u);
- mpreal& operator/=(const int64_t u);
- mpreal& operator/=(const uint64_t u);
-#endif
+ mpreal& operator+=(const long long int u);
+ mpreal& operator+=(const unsigned long long int u);
+ mpreal& operator-=(const long long int u);
+ mpreal& operator-=(const unsigned long long int u);
+ mpreal& operator*=(const long long int u);
+ mpreal& operator*=(const unsigned long long int u);
+ mpreal& operator/=(const long long int u);
+ mpreal& operator/=(const unsigned long long int u);
const mpreal operator+() const;
mpreal& operator++ ();
- const mpreal operator++ (int);
+ const mpreal operator++ (int);
// -
mpreal& operator-=(const mpreal& v);
@@ -266,7 +242,7 @@ public:
friend const mpreal operator-(const long int b, const mpreal& a);
friend const mpreal operator-(const int b, const mpreal& a);
friend const mpreal operator-(const double b, const mpreal& a);
- mpreal& operator-- ();
+ mpreal& operator-- ();
const mpreal operator-- (int);
// *
@@ -279,7 +255,7 @@ public:
mpreal& operator*=(const unsigned int v);
mpreal& operator*=(const long int v);
mpreal& operator*=(const int v);
-
+
// /
mpreal& operator/=(const mpreal& v);
mpreal& operator/=(const mpz_t v);
@@ -308,51 +284,27 @@ public:
mpreal& operator>>=(const long int u);
mpreal& operator>>=(const int u);
- // Boolean Operators
- friend bool operator > (const mpreal& a, const mpreal& b);
- friend bool operator >= (const mpreal& a, const mpreal& b);
- friend bool operator < (const mpreal& a, const mpreal& b);
- friend bool operator <= (const mpreal& a, const mpreal& b);
- friend bool operator == (const mpreal& a, const mpreal& b);
- friend bool operator != (const mpreal& a, const mpreal& b);
-
- // Optimized specializations for boolean operators
- friend bool operator == (const mpreal& a, const unsigned long int b);
- friend bool operator == (const mpreal& a, const unsigned int b);
- friend bool operator == (const mpreal& a, const long int b);
- friend bool operator == (const mpreal& a, const int b);
- friend bool operator == (const mpreal& a, const long double b);
- friend bool operator == (const mpreal& a, const double b);
-
// Type Conversion operators
- bool toBool (mp_rnd_t mode = GMP_RNDZ) const;
- long toLong (mp_rnd_t mode = GMP_RNDZ) const;
- unsigned long toULong (mp_rnd_t mode = GMP_RNDZ) const;
- float toFloat (mp_rnd_t mode = GMP_RNDN) const;
- double toDouble (mp_rnd_t mode = GMP_RNDN) const;
- long double toLDouble (mp_rnd_t mode = GMP_RNDN) const;
+ bool toBool ( ) const;
+ long toLong (mp_rnd_t mode = GMP_RNDZ) const;
+ unsigned long toULong (mp_rnd_t mode = GMP_RNDZ) const;
+ long long toLLong (mp_rnd_t mode = GMP_RNDZ) const;
+ unsigned long long toULLong (mp_rnd_t mode = GMP_RNDZ) const;
+ float toFloat (mp_rnd_t mode = GMP_RNDN) const;
+ double toDouble (mp_rnd_t mode = GMP_RNDN) const;
+ long double toLDouble (mp_rnd_t mode = GMP_RNDN) const;
#if defined (MPREAL_HAVE_EXPLICIT_CONVERTERS)
- explicit operator bool () const { return toBool(); }
- explicit operator int () const { return toLong(); }
- explicit operator long () const { return toLong(); }
- explicit operator long long () const { return toLong(); }
- explicit operator unsigned () const { return toULong(); }
- explicit operator unsigned long () const { return toULong(); }
- explicit operator unsigned long long () const { return toULong(); }
- explicit operator float () const { return toFloat(); }
- explicit operator double () const { return toDouble(); }
- explicit operator long double () const { return toLDouble(); }
-#endif
-
-#if defined (MPREAL_HAVE_INT64_SUPPORT)
- int64_t toInt64 (mp_rnd_t mode = GMP_RNDZ) const;
- uint64_t toUInt64 (mp_rnd_t mode = GMP_RNDZ) const;
-
- #if defined (MPREAL_HAVE_EXPLICIT_CONVERTERS)
- explicit operator int64_t () const { return toInt64(); }
- explicit operator uint64_t () const { return toUInt64(); }
- #endif
+ explicit operator bool () const { return toBool(); }
+ explicit operator int () const { return int(toLong()); }
+ explicit operator long () const { return toLong(); }
+ explicit operator long long () const { return toLLong(); }
+ explicit operator unsigned () const { return unsigned(toULong()); }
+ explicit operator unsigned long () const { return toULong(); }
+ explicit operator unsigned long long () const { return toULLong(); }
+ explicit operator float () const { return toFloat(); }
+ explicit operator double () const { return toDouble(); }
+ explicit operator long double () const { return toLDouble(); }
#endif
// Get raw pointers so that mpreal can be directly used in raw mpfr_* functions
@@ -391,11 +343,12 @@ public:
friend inline const mpreal div_2ui(const mpreal& v, unsigned long int k, mp_rnd_t rnd_mode);
friend inline const mpreal div_2si(const mpreal& v, long int k, mp_rnd_t rnd_mode);
friend int cmpabs(const mpreal& a,const mpreal& b);
-
+
friend const mpreal log (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal log2 (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal logb (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal log10(const mpreal& v, mp_rnd_t rnd_mode);
- friend const mpreal exp (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal exp (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal exp2 (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal exp10(const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal log1p(const mpreal& v, mp_rnd_t rnd_mode);
@@ -436,21 +389,22 @@ public:
friend const mpreal eint (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal gamma (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal tgamma (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal lngamma (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal lgamma (const mpreal& v, int *signp, mp_rnd_t rnd_mode);
friend const mpreal zeta (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal erf (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal erfc (const mpreal& v, mp_rnd_t rnd_mode);
- friend const mpreal besselj0 (const mpreal& v, mp_rnd_t rnd_mode);
- friend const mpreal besselj1 (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal besselj0 (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal besselj1 (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal besseljn (long n, const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal bessely0 (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal bessely1 (const mpreal& v, mp_rnd_t rnd_mode);
- friend const mpreal besselyn (long n, const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal besselyn (long n, const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal fma (const mpreal& v1, const mpreal& v2, const mpreal& v3, mp_rnd_t rnd_mode);
friend const mpreal fms (const mpreal& v1, const mpreal& v2, const mpreal& v3, mp_rnd_t rnd_mode);
friend const mpreal agm (const mpreal& v1, const mpreal& v2, mp_rnd_t rnd_mode);
- friend const mpreal sum (const mpreal tab[], unsigned long int n, mp_rnd_t rnd_mode);
+ friend const mpreal sum (const mpreal tab[], const unsigned long int n, int& status, mp_rnd_t rnd_mode);
friend int sgn(const mpreal& v); // returns -1 or +1
// MPFR 2.4.0 Specifics
@@ -465,28 +419,26 @@ public:
friend const mpreal mod (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode); // Modulus after division
#endif
-// MPFR 3.0.0 Specifics
#if (MPFR_VERSION >= MPFR_VERSION_NUM(3,0,0))
friend const mpreal digamma (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal ai (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal urandom (gmp_randstate_t& state, mp_rnd_t rnd_mode); // use gmp_randinit_default() to init state, gmp_randclear() to clear
+#endif
+
+#if (MPFR_VERSION >= MPFR_VERSION_NUM(3,1,0))
friend const mpreal grandom (gmp_randstate_t& state, mp_rnd_t rnd_mode); // use gmp_randinit_default() to init state, gmp_randclear() to clear
friend const mpreal grandom (unsigned int seed);
#endif
-
+
// Uniformly distributed random number generation in [0,1] using
// Mersenne-Twister algorithm by default.
// Use parameter to setup seed, e.g.: random((unsigned)time(NULL))
// Check urandom() for more precise control.
friend const mpreal random(unsigned int seed);
- // Exponent and mantissa manipulation
- friend const mpreal frexp(const mpreal& v, mp_exp_t* exp);
- friend const mpreal ldexp(const mpreal& v, mp_exp_t exp);
-
// Splits mpreal value into fractional and integer parts.
// Returns fractional part and stores integer part in n.
- friend const mpreal modf(const mpreal& v, mpreal& n);
+ friend const mpreal modf(const mpreal& v, mpreal& n);
// Constants
// don't forget to call mpfr_free_cache() for every thread where you are using const-functions
@@ -515,14 +467,14 @@ public:
friend const mpreal frac (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal remainder ( const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode);
friend const mpreal remquo (long* q, const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode);
-
+
// Miscellaneous Functions
friend const mpreal nexttoward (const mpreal& x, const mpreal& y);
friend const mpreal nextabove (const mpreal& x);
friend const mpreal nextbelow (const mpreal& x);
// use gmp_randinit_default() to init state, gmp_randclear() to clear
- friend const mpreal urandomb (gmp_randstate_t& state);
+ friend const mpreal urandomb (gmp_randstate_t& state);
// MPFR < 2.4.2 Specifics
#if (MPFR_VERSION <= MPFR_VERSION_NUM(2,4,2))
@@ -530,9 +482,9 @@ public:
#endif
// Instance Checkers
- friend bool isnan (const mpreal& v);
- friend bool isinf (const mpreal& v);
- friend bool isfinite (const mpreal& v);
+ friend bool (isnan) (const mpreal& v);
+ friend bool (isinf) (const mpreal& v);
+ friend bool (isfinite) (const mpreal& v);
friend bool isnum (const mpreal& v);
friend bool iszero (const mpreal& v);
@@ -549,9 +501,9 @@ public:
// Aliases for get_prec(), set_prec() - needed for compatibility with std::complex<mpreal> interface
inline mpreal& setPrecision(int Precision, mp_rnd_t RoundingMode = get_default_rnd());
inline int getPrecision() const;
-
+
// Set mpreal to +/- inf, NaN, +/-0
- mpreal& setInf (int Sign = +1);
+ mpreal& setInf (int Sign = +1);
mpreal& setNan ();
mpreal& setZero (int Sign = +1);
mpreal& setSign (int Sign, mp_rnd_t RoundingMode = get_default_rnd());
@@ -560,7 +512,7 @@ public:
mp_exp_t get_exp();
int set_exp(mp_exp_t e);
int check_range (int t, mp_rnd_t rnd_mode = get_default_rnd());
- int subnormalize (int t,mp_rnd_t rnd_mode = get_default_rnd());
+ int subnormalize (int t, mp_rnd_t rnd_mode = get_default_rnd());
// Inexact conversion from float
inline bool fits_in_bits(double x, int n);
@@ -580,7 +532,7 @@ public:
// Efficient swapping of two mpreal values - needed for std algorithms
friend void swap(mpreal& x, mpreal& y);
-
+
friend const mpreal fmax(const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode);
friend const mpreal fmin(const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode);
@@ -590,7 +542,7 @@ private:
//
// mpfr::mpreal=<DebugView> ; Show value only
// mpfr::mpreal=<DebugView>, <mp[0]._mpfr_prec,u>bits ; Show value & precision
- //
+ //
// at the beginning of
// [Visual Studio Installation Folder]\Common7\Packages\Debugger\autoexp.dat
MPREAL_MSVC_DEBUGVIEW_DATA
@@ -609,15 +561,15 @@ public:
//////////////////////////////////////////////////////////////////////////
// Constructors & converters
// Default constructor: creates mp number and initializes it to 0.
-inline mpreal::mpreal()
-{
- mpfr_init2 (mpfr_ptr(), mpreal::get_default_prec());
- mpfr_set_ui(mpfr_ptr(), 0, mpreal::get_default_rnd());
+inline mpreal::mpreal()
+{
+ mpfr_init2(mpfr_ptr(), mpreal::get_default_prec());
+ mpfr_set_zero_fast(mpfr_ptr());
MPREAL_MSVC_DEBUGVIEW_CODE;
}
-inline mpreal::mpreal(const mpreal& u)
+inline mpreal::mpreal(const mpreal& u)
{
mpfr_init2(mpfr_ptr(),mpfr_get_prec(u.mpfr_srcptr()));
mpfr_set (mpfr_ptr(),u.mpfr_srcptr(),mpreal::get_default_rnd());
@@ -628,7 +580,7 @@ inline mpreal::mpreal(const mpreal& u)
#ifdef MPREAL_HAVE_MOVE_SUPPORT
inline mpreal::mpreal(mpreal&& other)
{
- mpfr_set_uninitialized(mpfr_ptr()); // make sure "other" holds no pinter to actual data
+ mpfr_set_uninitialized(mpfr_ptr()); // make sure "other" holds no pointer to actual data
mpfr_swap(mpfr_ptr(), other.mpfr_ptr());
MPREAL_MSVC_DEBUGVIEW_CODE;
@@ -700,67 +652,65 @@ inline mpreal::mpreal(const double u, mp_prec_t prec, mp_rnd_t mode)
}
inline mpreal::mpreal(const long double u, mp_prec_t prec, mp_rnd_t mode)
-{
+{
mpfr_init2 (mpfr_ptr(), prec);
mpfr_set_ld(mpfr_ptr(), u, mode);
MPREAL_MSVC_DEBUGVIEW_CODE;
}
-inline mpreal::mpreal(const unsigned long int u, mp_prec_t prec, mp_rnd_t mode)
-{
+inline mpreal::mpreal(const unsigned long long int u, mp_prec_t prec, mp_rnd_t mode)
+{
mpfr_init2 (mpfr_ptr(), prec);
- mpfr_set_ui(mpfr_ptr(), u, mode);
+ mpfr_set_uj(mpfr_ptr(), u, mode);
MPREAL_MSVC_DEBUGVIEW_CODE;
}
-inline mpreal::mpreal(const unsigned int u, mp_prec_t prec, mp_rnd_t mode)
-{
+inline mpreal::mpreal(const long long int u, mp_prec_t prec, mp_rnd_t mode)
+{
mpfr_init2 (mpfr_ptr(), prec);
- mpfr_set_ui(mpfr_ptr(), u, mode);
+ mpfr_set_sj(mpfr_ptr(), u, mode);
MPREAL_MSVC_DEBUGVIEW_CODE;
}
-inline mpreal::mpreal(const long int u, mp_prec_t prec, mp_rnd_t mode)
-{
+inline mpreal::mpreal(const unsigned long int u, mp_prec_t prec, mp_rnd_t mode)
+{
mpfr_init2 (mpfr_ptr(), prec);
- mpfr_set_si(mpfr_ptr(), u, mode);
+ mpfr_set_ui(mpfr_ptr(), u, mode);
MPREAL_MSVC_DEBUGVIEW_CODE;
}
-inline mpreal::mpreal(const int u, mp_prec_t prec, mp_rnd_t mode)
-{
+inline mpreal::mpreal(const unsigned int u, mp_prec_t prec, mp_rnd_t mode)
+{
mpfr_init2 (mpfr_ptr(), prec);
- mpfr_set_si(mpfr_ptr(), u, mode);
+ mpfr_set_ui(mpfr_ptr(), u, mode);
MPREAL_MSVC_DEBUGVIEW_CODE;
}
-#if defined (MPREAL_HAVE_INT64_SUPPORT)
-inline mpreal::mpreal(const uint64_t u, mp_prec_t prec, mp_rnd_t mode)
+inline mpreal::mpreal(const long int u, mp_prec_t prec, mp_rnd_t mode)
{
mpfr_init2 (mpfr_ptr(), prec);
- mpfr_set_uj(mpfr_ptr(), u, mode);
+ mpfr_set_si(mpfr_ptr(), u, mode);
MPREAL_MSVC_DEBUGVIEW_CODE;
}
-inline mpreal::mpreal(const int64_t u, mp_prec_t prec, mp_rnd_t mode)
+inline mpreal::mpreal(const int u, mp_prec_t prec, mp_rnd_t mode)
{
mpfr_init2 (mpfr_ptr(), prec);
- mpfr_set_sj(mpfr_ptr(), u, mode);
+ mpfr_set_si(mpfr_ptr(), u, mode);
MPREAL_MSVC_DEBUGVIEW_CODE;
}
-#endif
inline mpreal::mpreal(const char* s, mp_prec_t prec, int base, mp_rnd_t mode)
{
mpfr_init2 (mpfr_ptr(), prec);
- mpfr_set_str(mpfr_ptr(), s, base, mode);
+ mpfr_set_str(mpfr_ptr(), s, base, mode);
MPREAL_MSVC_DEBUGVIEW_CODE;
}
@@ -768,7 +718,7 @@ inline mpreal::mpreal(const char* s, mp_prec_t prec, int base, mp_rnd_t mode)
inline mpreal::mpreal(const std::string& s, mp_prec_t prec, int base, mp_rnd_t mode)
{
mpfr_init2 (mpfr_ptr(), prec);
- mpfr_set_str(mpfr_ptr(), s.c_str(), base, mode);
+ mpfr_set_str(mpfr_ptr(), s.c_str(), base, mode);
MPREAL_MSVC_DEBUGVIEW_CODE;
}
@@ -776,15 +726,15 @@ inline mpreal::mpreal(const std::string& s, mp_prec_t prec, int base, mp_rnd_t m
inline void mpreal::clear(::mpfr_ptr x)
{
#ifdef MPREAL_HAVE_MOVE_SUPPORT
- if(mpfr_is_initialized(x))
+ if(mpfr_is_initialized(x))
#endif
mpfr_clear(x);
}
-inline mpreal::~mpreal()
-{
+inline mpreal::~mpreal()
+{
clear(mpfr_ptr());
-}
+}
// internal namespace needed for template magic
namespace internal{
@@ -792,58 +742,55 @@ namespace internal{
// Use SFINAE to restrict arithmetic operations instantiation only for numeric types
// This is needed for smooth integration with libraries based on expression templates, like Eigen.
// TODO: Do the same for boolean operators.
- template <typename ArgumentType> struct result_type {};
-
- template <> struct result_type<mpreal> {typedef mpreal type;};
- template <> struct result_type<mpz_t> {typedef mpreal type;};
- template <> struct result_type<mpq_t> {typedef mpreal type;};
- template <> struct result_type<long double> {typedef mpreal type;};
- template <> struct result_type<double> {typedef mpreal type;};
- template <> struct result_type<unsigned long int> {typedef mpreal type;};
- template <> struct result_type<unsigned int> {typedef mpreal type;};
- template <> struct result_type<long int> {typedef mpreal type;};
- template <> struct result_type<int> {typedef mpreal type;};
-
-#if defined (MPREAL_HAVE_INT64_SUPPORT)
- template <> struct result_type<int64_t > {typedef mpreal type;};
- template <> struct result_type<uint64_t > {typedef mpreal type;};
-#endif
+ template <typename ArgumentType> struct result_type {};
+
+ template <> struct result_type<mpreal> {typedef mpreal type;};
+ template <> struct result_type<mpz_t> {typedef mpreal type;};
+ template <> struct result_type<mpq_t> {typedef mpreal type;};
+ template <> struct result_type<long double> {typedef mpreal type;};
+ template <> struct result_type<double> {typedef mpreal type;};
+ template <> struct result_type<unsigned long int> {typedef mpreal type;};
+ template <> struct result_type<unsigned int> {typedef mpreal type;};
+ template <> struct result_type<long int> {typedef mpreal type;};
+ template <> struct result_type<int> {typedef mpreal type;};
+ template <> struct result_type<long long> {typedef mpreal type;};
+ template <> struct result_type<unsigned long long> {typedef mpreal type;};
}
// + Addition
-template <typename Rhs>
-inline const typename internal::result_type<Rhs>::type
+template <typename Rhs>
+inline const typename internal::result_type<Rhs>::type
operator+(const mpreal& lhs, const Rhs& rhs){ return mpreal(lhs) += rhs; }
-template <typename Lhs>
-inline const typename internal::result_type<Lhs>::type
- operator+(const Lhs& lhs, const mpreal& rhs){ return mpreal(rhs) += lhs; }
+template <typename Lhs>
+inline const typename internal::result_type<Lhs>::type
+ operator+(const Lhs& lhs, const mpreal& rhs){ return mpreal(rhs) += lhs; }
// - Subtraction
-template <typename Rhs>
-inline const typename internal::result_type<Rhs>::type
+template <typename Rhs>
+inline const typename internal::result_type<Rhs>::type
operator-(const mpreal& lhs, const Rhs& rhs){ return mpreal(lhs) -= rhs; }
-template <typename Lhs>
-inline const typename internal::result_type<Lhs>::type
+template <typename Lhs>
+inline const typename internal::result_type<Lhs>::type
operator-(const Lhs& lhs, const mpreal& rhs){ return mpreal(lhs) -= rhs; }
// * Multiplication
-template <typename Rhs>
-inline const typename internal::result_type<Rhs>::type
+template <typename Rhs>
+inline const typename internal::result_type<Rhs>::type
operator*(const mpreal& lhs, const Rhs& rhs){ return mpreal(lhs) *= rhs; }
-template <typename Lhs>
-inline const typename internal::result_type<Lhs>::type
- operator*(const Lhs& lhs, const mpreal& rhs){ return mpreal(rhs) *= lhs; }
+template <typename Lhs>
+inline const typename internal::result_type<Lhs>::type
+ operator*(const Lhs& lhs, const mpreal& rhs){ return mpreal(rhs) *= lhs; }
// / Division
-template <typename Rhs>
-inline const typename internal::result_type<Rhs>::type
+template <typename Rhs>
+inline const typename internal::result_type<Rhs>::type
operator/(const mpreal& lhs, const Rhs& rhs){ return mpreal(lhs) /= rhs; }
-template <typename Lhs>
-inline const typename internal::result_type<Lhs>::type
+template <typename Lhs>
+inline const typename internal::result_type<Lhs>::type
operator/(const Lhs& lhs, const mpreal& rhs){ return mpreal(lhs) /= rhs; }
//////////////////////////////////////////////////////////////////////////
@@ -893,17 +840,17 @@ const mpreal pow(const long int a, const double b, mp_rnd_t rnd_mode = mpreal::g
const mpreal pow(const int a, const unsigned long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const int a, const unsigned int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const int a, const long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
-const mpreal pow(const int a, const int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const int a, const int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const int a, const long double b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
-const mpreal pow(const int a, const double b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const int a, const double b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
-const mpreal pow(const long double a, const long double b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const long double a, const long double b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const long double a, const unsigned long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const long double a, const unsigned int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const long double a, const long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const long double a, const int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
-const mpreal pow(const double a, const double b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const double a, const double b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const double a, const unsigned long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const double a, const unsigned int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const double a, const long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
@@ -920,9 +867,9 @@ inline const mpreal div_2si(const mpreal& v, long int k, mp_rnd_t rnd_mode = mpr
inline mpreal machine_epsilon(mp_prec_t prec = mpreal::get_default_prec());
// Returns smallest eps such that x + eps != x (relative machine epsilon)
-inline mpreal machine_epsilon(const mpreal& x);
+inline mpreal machine_epsilon(const mpreal& x);
-// Gives max & min values for the required precision,
+// Gives max & min values for the required precision,
// minval is 'safe' meaning 1 / minval does not overflow
// maxval is 'safe' meaning 1 / maxval does not underflow
inline mpreal minval(mp_prec_t prec = mpreal::get_default_prec());
@@ -935,13 +882,13 @@ inline bool isEqualFuzzy(const mpreal& a, const mpreal& b, const mpreal& eps);
inline bool isEqualFuzzy(const mpreal& a, const mpreal& b);
// 'Bitwise' equality check
-// maxUlps - a and b can be apart by maxUlps binary numbers.
+// maxUlps - a and b can be apart by maxUlps binary numbers.
inline bool isEqualUlps(const mpreal& a, const mpreal& b, int maxUlps);
//////////////////////////////////////////////////////////////////////////
-// Convert precision in 'bits' to decimal digits and vice versa.
-// bits = ceil(digits*log[2](10))
-// digits = floor(bits*log[10](2))
+// Convert precision in 'bits' to decimal digits and vice versa.
+// bits = ceil(digits*log[2](10))
+// digits = floor(bits*log[10](2))
inline mp_prec_t digits2bits(int d);
inline int bits2digits(mp_prec_t b);
@@ -979,7 +926,7 @@ inline mpreal& mpreal::operator=(const mpreal& v)
inline mpreal& mpreal::operator=(const mpf_t v)
{
mpfr_set_f(mpfr_ptr(), v, mpreal::get_default_rnd());
-
+
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
@@ -987,7 +934,7 @@ inline mpreal& mpreal::operator=(const mpf_t v)
inline mpreal& mpreal::operator=(const mpz_t v)
{
mpfr_set_z(mpfr_ptr(), v, mpreal::get_default_rnd());
-
+
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
@@ -1000,16 +947,16 @@ inline mpreal& mpreal::operator=(const mpq_t v)
return *this;
}
-inline mpreal& mpreal::operator=(const long double v)
-{
+inline mpreal& mpreal::operator=(const long double v)
+{
mpfr_set_ld(mpfr_ptr(), v, mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
-inline mpreal& mpreal::operator=(const double v)
-{
+inline mpreal& mpreal::operator=(const double v)
+{
#if (MPREAL_DOUBLE_BITS_OVERFLOW > -1)
if(fits_in_bits(v, MPREAL_DOUBLE_BITS_OVERFLOW))
{
@@ -1024,33 +971,49 @@ inline mpreal& mpreal::operator=(const double v)
return *this;
}
-inline mpreal& mpreal::operator=(const unsigned long int v)
-{
- mpfr_set_ui(mpfr_ptr(), v, mpreal::get_default_rnd());
+inline mpreal& mpreal::operator=(const unsigned long int v)
+{
+ mpfr_set_ui(mpfr_ptr(), v, mpreal::get_default_rnd());
+
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator=(const unsigned int v)
+{
+ mpfr_set_ui(mpfr_ptr(), v, mpreal::get_default_rnd());
+
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator=(const unsigned long long int v)
+{
+ mpfr_set_uj(mpfr_ptr(), v, mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
-inline mpreal& mpreal::operator=(const unsigned int v)
-{
- mpfr_set_ui(mpfr_ptr(), v, mpreal::get_default_rnd());
+inline mpreal& mpreal::operator=(const long long int v)
+{
+ mpfr_set_sj(mpfr_ptr(), v, mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
-inline mpreal& mpreal::operator=(const long int v)
-{
- mpfr_set_si(mpfr_ptr(), v, mpreal::get_default_rnd());
+inline mpreal& mpreal::operator=(const long int v)
+{
+ mpfr_set_si(mpfr_ptr(), v, mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator=(const int v)
-{
- mpfr_set_si(mpfr_ptr(), v, mpreal::get_default_rnd());
+{
+ mpfr_set_si(mpfr_ptr(), v, mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
@@ -1071,7 +1034,7 @@ inline mpreal& mpreal::operator=(const char* s)
if(0 == mpfr_set_str(t, s, 10, mpreal::get_default_rnd()))
{
- mpfr_set(mpfr_ptr(), t, mpreal::get_default_rnd());
+ mpfr_set(mpfr_ptr(), t, mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
}
@@ -1094,7 +1057,7 @@ inline mpreal& mpreal::operator=(const std::string& s)
if(0 == mpfr_set_str(t, s.c_str(), 10, mpreal::get_default_rnd()))
{
- mpfr_set(mpfr_ptr(), t, mpreal::get_default_rnd());
+ mpfr_set(mpfr_ptr(), t, mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
}
@@ -1102,6 +1065,11 @@ inline mpreal& mpreal::operator=(const std::string& s)
return *this;
}
+template <typename real_t>
+inline mpreal& mpreal::operator= (const std::complex<real_t>& z)
+{
+ return *this = z.real();
+}
//////////////////////////////////////////////////////////////////////////
// + Addition
@@ -1135,9 +1103,9 @@ inline mpreal& mpreal::operator+=(const mpq_t u)
inline mpreal& mpreal::operator+= (const long double u)
{
- *this += mpreal(u);
+ *this += mpreal(u);
MPREAL_MSVC_DEBUGVIEW_CODE;
- return *this;
+ return *this;
}
inline mpreal& mpreal::operator+= (const double u)
@@ -1180,16 +1148,14 @@ inline mpreal& mpreal::operator+=(const int u)
return *this;
}
-#if defined (MPREAL_HAVE_INT64_SUPPORT)
-inline mpreal& mpreal::operator+=(const int64_t u){ *this += mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; }
-inline mpreal& mpreal::operator+=(const uint64_t u){ *this += mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; }
-inline mpreal& mpreal::operator-=(const int64_t u){ *this -= mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; }
-inline mpreal& mpreal::operator-=(const uint64_t u){ *this -= mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; }
-inline mpreal& mpreal::operator*=(const int64_t u){ *this *= mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; }
-inline mpreal& mpreal::operator*=(const uint64_t u){ *this *= mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; }
-inline mpreal& mpreal::operator/=(const int64_t u){ *this /= mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; }
-inline mpreal& mpreal::operator/=(const uint64_t u){ *this /= mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; }
-#endif
+inline mpreal& mpreal::operator+=(const long long int u) { *this += mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; }
+inline mpreal& mpreal::operator+=(const unsigned long long int u){ *this += mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; }
+inline mpreal& mpreal::operator-=(const long long int u) { *this -= mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; }
+inline mpreal& mpreal::operator-=(const unsigned long long int u){ *this -= mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; }
+inline mpreal& mpreal::operator*=(const long long int u) { *this *= mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; }
+inline mpreal& mpreal::operator*=(const unsigned long long int u){ *this *= mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; }
+inline mpreal& mpreal::operator/=(const long long int u) { *this /= mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; }
+inline mpreal& mpreal::operator/=(const unsigned long long int u){ *this /= mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; }
inline const mpreal mpreal::operator+()const { return mpreal(*this); }
@@ -1200,7 +1166,7 @@ inline const mpreal operator+(const mpreal& a, const mpreal& b)
return c;
}
-inline mpreal& mpreal::operator++()
+inline mpreal& mpreal::operator++()
{
return *this += 1;
}
@@ -1212,7 +1178,7 @@ inline const mpreal mpreal::operator++ (int)
return x;
}
-inline mpreal& mpreal::operator--()
+inline mpreal& mpreal::operator--()
{
return *this -= 1;
}
@@ -1249,9 +1215,9 @@ inline mpreal& mpreal::operator-=(const mpq_t v)
inline mpreal& mpreal::operator-=(const long double v)
{
- *this -= mpreal(v);
+ *this -= mpreal(v);
MPREAL_MSVC_DEBUGVIEW_CODE;
- return *this;
+ return *this;
}
inline mpreal& mpreal::operator-=(const double v)
@@ -1259,7 +1225,7 @@ inline mpreal& mpreal::operator-=(const double v)
#if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0))
mpfr_sub_d(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
#else
- *this -= mpreal(v);
+ *this -= mpreal(v);
#endif
MPREAL_MSVC_DEBUGVIEW_CODE;
@@ -1374,9 +1340,9 @@ inline mpreal& mpreal::operator*=(const mpq_t v)
inline mpreal& mpreal::operator*=(const long double v)
{
- *this *= mpreal(v);
+ *this *= mpreal(v);
MPREAL_MSVC_DEBUGVIEW_CODE;
- return *this;
+ return *this;
}
inline mpreal& mpreal::operator*=(const double v)
@@ -1384,7 +1350,7 @@ inline mpreal& mpreal::operator*=(const double v)
#if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0))
mpfr_mul_d(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
#else
- *this *= mpreal(v);
+ *this *= mpreal(v);
#endif
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
@@ -1452,7 +1418,7 @@ inline mpreal& mpreal::operator/=(const long double v)
{
*this /= mpreal(v);
MPREAL_MSVC_DEBUGVIEW_CODE;
- return *this;
+ return *this;
}
inline mpreal& mpreal::operator/=(const double v)
@@ -1460,7 +1426,7 @@ inline mpreal& mpreal::operator/=(const double v)
#if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0))
mpfr_div_d(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
#else
- *this /= mpreal(v);
+ *this /= mpreal(v);
#endif
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
@@ -1671,45 +1637,86 @@ inline const mpreal div_2si(const mpreal& v, long int k, mp_rnd_t rnd_mode)
}
//////////////////////////////////////////////////////////////////////////
-//Boolean operators
-inline bool operator > (const mpreal& a, const mpreal& b){ return (mpfr_greater_p (a.mpfr_srcptr(),b.mpfr_srcptr()) !=0 ); }
-inline bool operator >= (const mpreal& a, const mpreal& b){ return (mpfr_greaterequal_p (a.mpfr_srcptr(),b.mpfr_srcptr()) !=0 ); }
-inline bool operator < (const mpreal& a, const mpreal& b){ return (mpfr_less_p (a.mpfr_srcptr(),b.mpfr_srcptr()) !=0 ); }
-inline bool operator <= (const mpreal& a, const mpreal& b){ return (mpfr_lessequal_p (a.mpfr_srcptr(),b.mpfr_srcptr()) !=0 ); }
-inline bool operator == (const mpreal& a, const mpreal& b){ return (mpfr_equal_p (a.mpfr_srcptr(),b.mpfr_srcptr()) !=0 ); }
-inline bool operator != (const mpreal& a, const mpreal& b){ return (mpfr_lessgreater_p (a.mpfr_srcptr(),b.mpfr_srcptr()) !=0 ); }
-
-inline bool operator == (const mpreal& a, const unsigned long int b ){ return (mpfr_cmp_ui(a.mpfr_srcptr(),b) == 0 ); }
-inline bool operator == (const mpreal& a, const unsigned int b ){ return (mpfr_cmp_ui(a.mpfr_srcptr(),b) == 0 ); }
-inline bool operator == (const mpreal& a, const long int b ){ return (mpfr_cmp_si(a.mpfr_srcptr(),b) == 0 ); }
-inline bool operator == (const mpreal& a, const int b ){ return (mpfr_cmp_si(a.mpfr_srcptr(),b) == 0 ); }
-inline bool operator == (const mpreal& a, const long double b ){ return (mpfr_cmp_ld(a.mpfr_srcptr(),b) == 0 ); }
-inline bool operator == (const mpreal& a, const double b ){ return (mpfr_cmp_d (a.mpfr_srcptr(),b) == 0 ); }
-
-
-inline bool isnan (const mpreal& op){ return (mpfr_nan_p (op.mpfr_srcptr()) != 0 ); }
-inline bool isinf (const mpreal& op){ return (mpfr_inf_p (op.mpfr_srcptr()) != 0 ); }
-inline bool isfinite (const mpreal& op){ return (mpfr_number_p (op.mpfr_srcptr()) != 0 ); }
+//Relational operators
+
+// WARNING:
+//
+// Please note that following checks for double-NaN are guaranteed to work only in IEEE math mode:
+//
+// isnan(b) = (b != b)
+// isnan(b) = !(b == b) (we use in code below)
+//
+// Be cautions if you use compiler options which break strict IEEE compliance (e.g. -ffast-math in GCC).
+// Use std::isnan instead (C++11).
+
+inline bool operator > (const mpreal& a, const mpreal& b ){ return (mpfr_greater_p(a.mpfr_srcptr(),b.mpfr_srcptr()) != 0 ); }
+inline bool operator > (const mpreal& a, const unsigned long int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_ui(a.mpfr_srcptr(),b) > 0 ); }
+inline bool operator > (const mpreal& a, const unsigned int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_ui(a.mpfr_srcptr(),b) > 0 ); }
+inline bool operator > (const mpreal& a, const long int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_si(a.mpfr_srcptr(),b) > 0 ); }
+inline bool operator > (const mpreal& a, const int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_si(a.mpfr_srcptr(),b) > 0 ); }
+inline bool operator > (const mpreal& a, const long double b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (b == b) && (mpfr_cmp_ld(a.mpfr_srcptr(),b) > 0 ); }
+inline bool operator > (const mpreal& a, const double b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (b == b) && (mpfr_cmp_d (a.mpfr_srcptr(),b) > 0 ); }
+
+inline bool operator >= (const mpreal& a, const mpreal& b ){ return (mpfr_greaterequal_p(a.mpfr_srcptr(),b.mpfr_srcptr()) != 0 ); }
+inline bool operator >= (const mpreal& a, const unsigned long int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_ui(a.mpfr_srcptr(),b) >= 0 ); }
+// inline bool operator >= (const mpreal& a, const unsigned int b ){ return !isnan EIGEN_NOT_A_MACRO (isnan()a) && (mpfr_cmp_ui(a.mpfr_srcptr(),b) >= 0 ); }
+inline bool operator >= (const mpreal& a, const long int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_si(a.mpfr_srcptr(),b) >= 0 ); }
+inline bool operator >= (const mpreal& a, const int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_si(a.mpfr_srcptr(),b) >= 0 ); }
+inline bool operator >= (const mpreal& a, const long double b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (b == b) && (mpfr_cmp_ld(a.mpfr_srcptr(),b) >= 0 ); }
+inline bool operator >= (const mpreal& a, const double b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (b == b) && (mpfr_cmp_d (a.mpfr_srcptr(),b) >= 0 ); }
+
+inline bool operator < (const mpreal& a, const mpreal& b ){ return (mpfr_less_p(a.mpfr_srcptr(),b.mpfr_srcptr()) != 0 ); }
+inline bool operator < (const mpreal& a, const unsigned long int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_ui(a.mpfr_srcptr(),b) < 0 ); }
+inline bool operator < (const mpreal& a, const unsigned int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_ui(a.mpfr_srcptr(),b) < 0 ); }
+inline bool operator < (const mpreal& a, const long int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_si(a.mpfr_srcptr(),b) < 0 ); }
+inline bool operator < (const mpreal& a, const int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_si(a.mpfr_srcptr(),b) < 0 ); }
+inline bool operator < (const mpreal& a, const long double b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (b == b) && (mpfr_cmp_ld(a.mpfr_srcptr(),b) < 0 ); }
+inline bool operator < (const mpreal& a, const double b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (b == b) && (mpfr_cmp_d (a.mpfr_srcptr(),b) < 0 ); }
+
+inline bool operator <= (const mpreal& a, const mpreal& b ){ return (mpfr_lessequal_p(a.mpfr_srcptr(),b.mpfr_srcptr()) != 0 ); }
+inline bool operator <= (const mpreal& a, const unsigned long int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_ui(a.mpfr_srcptr(),b) <= 0 ); }
+inline bool operator <= (const mpreal& a, const unsigned int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_ui(a.mpfr_srcptr(),b) <= 0 ); }
+inline bool operator <= (const mpreal& a, const long int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_si(a.mpfr_srcptr(),b) <= 0 ); }
+inline bool operator <= (const mpreal& a, const int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_si(a.mpfr_srcptr(),b) <= 0 ); }
+inline bool operator <= (const mpreal& a, const long double b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (b == b) && (mpfr_cmp_ld(a.mpfr_srcptr(),b) <= 0 ); }
+inline bool operator <= (const mpreal& a, const double b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (b == b) && (mpfr_cmp_d (a.mpfr_srcptr(),b) <= 0 ); }
+
+inline bool operator == (const mpreal& a, const mpreal& b ){ return (mpfr_equal_p(a.mpfr_srcptr(),b.mpfr_srcptr()) != 0 ); }
+inline bool operator == (const mpreal& a, const unsigned long int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_ui(a.mpfr_srcptr(),b) == 0 ); }
+inline bool operator == (const mpreal& a, const unsigned int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_ui(a.mpfr_srcptr(),b) == 0 ); }
+inline bool operator == (const mpreal& a, const long int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_si(a.mpfr_srcptr(),b) == 0 ); }
+inline bool operator == (const mpreal& a, const int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_si(a.mpfr_srcptr(),b) == 0 ); }
+inline bool operator == (const mpreal& a, const long double b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (b == b) && (mpfr_cmp_ld(a.mpfr_srcptr(),b) == 0 ); }
+inline bool operator == (const mpreal& a, const double b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (b == b) && (mpfr_cmp_d (a.mpfr_srcptr(),b) == 0 ); }
+
+inline bool operator != (const mpreal& a, const mpreal& b ){ return !(a == b); }
+inline bool operator != (const mpreal& a, const unsigned long int b ){ return !(a == b); }
+inline bool operator != (const mpreal& a, const unsigned int b ){ return !(a == b); }
+inline bool operator != (const mpreal& a, const long int b ){ return !(a == b); }
+inline bool operator != (const mpreal& a, const int b ){ return !(a == b); }
+inline bool operator != (const mpreal& a, const long double b ){ return !(a == b); }
+inline bool operator != (const mpreal& a, const double b ){ return !(a == b); }
+
+inline bool (isnan) (const mpreal& op){ return (mpfr_nan_p (op.mpfr_srcptr()) != 0 ); }
+inline bool (isinf) (const mpreal& op){ return (mpfr_inf_p (op.mpfr_srcptr()) != 0 ); }
+inline bool (isfinite) (const mpreal& op){ return (mpfr_number_p (op.mpfr_srcptr()) != 0 ); }
inline bool iszero (const mpreal& op){ return (mpfr_zero_p (op.mpfr_srcptr()) != 0 ); }
inline bool isint (const mpreal& op){ return (mpfr_integer_p(op.mpfr_srcptr()) != 0 ); }
#if (MPFR_VERSION >= MPFR_VERSION_NUM(3,0,0))
inline bool isregular(const mpreal& op){ return (mpfr_regular_p(op.mpfr_srcptr()));}
-#endif
+#endif
//////////////////////////////////////////////////////////////////////////
// Type Converters
-inline bool mpreal::toBool (mp_rnd_t /*mode*/) const { return mpfr_zero_p (mpfr_srcptr()) == 0; }
-inline long mpreal::toLong (mp_rnd_t mode) const { return mpfr_get_si (mpfr_srcptr(), mode); }
-inline unsigned long mpreal::toULong (mp_rnd_t mode) const { return mpfr_get_ui (mpfr_srcptr(), mode); }
-inline float mpreal::toFloat (mp_rnd_t mode) const { return mpfr_get_flt(mpfr_srcptr(), mode); }
-inline double mpreal::toDouble (mp_rnd_t mode) const { return mpfr_get_d (mpfr_srcptr(), mode); }
-inline long double mpreal::toLDouble(mp_rnd_t mode) const { return mpfr_get_ld (mpfr_srcptr(), mode); }
-
-#if defined (MPREAL_HAVE_INT64_SUPPORT)
-inline int64_t mpreal::toInt64 (mp_rnd_t mode) const{ return mpfr_get_sj(mpfr_srcptr(), mode); }
-inline uint64_t mpreal::toUInt64(mp_rnd_t mode) const{ return mpfr_get_uj(mpfr_srcptr(), mode); }
-#endif
+inline bool mpreal::toBool ( ) const { return mpfr_zero_p (mpfr_srcptr()) == 0; }
+inline long mpreal::toLong (mp_rnd_t mode) const { return mpfr_get_si (mpfr_srcptr(), mode); }
+inline unsigned long mpreal::toULong (mp_rnd_t mode) const { return mpfr_get_ui (mpfr_srcptr(), mode); }
+inline float mpreal::toFloat (mp_rnd_t mode) const { return mpfr_get_flt(mpfr_srcptr(), mode); }
+inline double mpreal::toDouble (mp_rnd_t mode) const { return mpfr_get_d (mpfr_srcptr(), mode); }
+inline long double mpreal::toLDouble(mp_rnd_t mode) const { return mpfr_get_ld (mpfr_srcptr(), mode); }
+inline long long mpreal::toLLong (mp_rnd_t mode) const { return mpfr_get_sj (mpfr_srcptr(), mode); }
+inline unsigned long long mpreal::toULLong (mp_rnd_t mode) const { return mpfr_get_uj (mpfr_srcptr(), mode); }
inline ::mpfr_ptr mpreal::mpfr_ptr() { return mp; }
inline ::mpfr_srcptr mpreal::mpfr_ptr() const { return mp; }
@@ -1755,21 +1762,21 @@ inline std::string mpreal::toString(int n, int b, mp_rnd_t mode) const
std::ostringstream format;
- int digits = (n >= 0) ? n : bits2digits(mpfr_get_prec(mpfr_srcptr()));
-
+ int digits = (n >= 0) ? n : 1 + bits2digits(mpfr_get_prec(mpfr_srcptr()));
+
format << "%." << digits << "RNg";
return toString(format.str());
#else
- char *s, *ns = NULL;
+ char *s, *ns = NULL;
size_t slen, nslen;
mp_exp_t exp;
std::string out;
if(mpfr_inf_p(mp))
- {
+ {
if(mpfr_sgn(mp)>0) return "+Inf";
else return "-Inf";
}
@@ -1784,7 +1791,7 @@ inline std::string mpreal::toString(int n, int b, mp_rnd_t mode) const
{
slen = strlen(s);
nslen = strlen(ns);
- if(nslen<=slen)
+ if(nslen<=slen)
{
mpfr_free_str(s);
s = ns;
@@ -1801,7 +1808,7 @@ inline std::string mpreal::toString(int n, int b, mp_rnd_t mode) const
{
// Remove zeros starting from right end
char* ptr = s+slen-1;
- while (*ptr=='0' && ptr>s+exp) ptr--;
+ while (*ptr=='0' && ptr>s+exp) ptr--;
if(ptr==s+exp) out = std::string(s,exp+1);
else out = std::string(s,exp+1)+'.'+std::string(s+exp+1,ptr-(s+exp+1)+1);
@@ -1812,7 +1819,7 @@ inline std::string mpreal::toString(int n, int b, mp_rnd_t mode) const
{
// Remove zeros starting from right end
char* ptr = s+slen-1;
- while (*ptr=='0' && ptr>s+exp-1) ptr--;
+ while (*ptr=='0' && ptr>s+exp-1) ptr--;
if(ptr==s+exp-1) out = std::string(s,exp);
else out = std::string(s,exp)+'.'+std::string(s+exp,ptr-(s+exp)+1);
@@ -1825,7 +1832,7 @@ inline std::string mpreal::toString(int n, int b, mp_rnd_t mode) const
{
// Remove zeros starting from right end
char* ptr = s+slen-1;
- while (*ptr=='0' && ptr>s+1) ptr--;
+ while (*ptr=='0' && ptr>s+1) ptr--;
if(ptr==s+1) out = std::string(s,2);
else out = std::string(s,2)+'.'+std::string(s+2,ptr-(s+2)+1);
@@ -1836,7 +1843,7 @@ inline std::string mpreal::toString(int n, int b, mp_rnd_t mode) const
{
// Remove zeros starting from right end
char* ptr = s+slen-1;
- while (*ptr=='0' && ptr>s) ptr--;
+ while (*ptr=='0' && ptr>s) ptr--;
if(ptr==s) out = std::string(s,1);
else out = std::string(s,1)+'.'+std::string(s+1,ptr-(s+1)+1);
@@ -1863,7 +1870,7 @@ inline std::string mpreal::toString(int n, int b, mp_rnd_t mode) const
//////////////////////////////////////////////////////////////////////////
// I/O
-inline std::ostream& mpreal::output(std::ostream& os) const
+inline std::ostream& mpreal::output(std::ostream& os) const
{
std::ostringstream format;
const std::ios::fmtflags flags = os.flags();
@@ -1926,8 +1933,7 @@ inline int bits2digits(mp_prec_t b)
// Set/Get number properties
inline int sgn(const mpreal& op)
{
- int r = mpfr_signbit(op.mpfr_srcptr());
- return (r > 0? -1 : 1);
+ return mpfr_sgn(op.mpfr_srcptr());
}
inline mpreal& mpreal::setSign(int sign, mp_rnd_t RoundingMode)
@@ -1949,29 +1955,28 @@ inline mpreal& mpreal::setPrecision(int Precision, mp_rnd_t RoundingMode)
return *this;
}
-inline mpreal& mpreal::setInf(int sign)
-{
+inline mpreal& mpreal::setInf(int sign)
+{
mpfr_set_inf(mpfr_ptr(), sign);
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
-}
+}
-inline mpreal& mpreal::setNan()
+inline mpreal& mpreal::setNan()
{
mpfr_set_nan(mpfr_ptr());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
-inline mpreal& mpreal::setZero(int sign)
+inline mpreal& mpreal::setZero(int sign)
{
-
#if (MPFR_VERSION >= MPFR_VERSION_NUM(3,0,0))
mpfr_set_zero(mpfr_ptr(), sign);
#else
mpfr_set_si(mpfr_ptr(), 0, (mpfr_get_default_rounding_mode)());
setSign(sign);
-#endif
+#endif
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
@@ -2000,23 +2005,32 @@ inline int mpreal::set_exp (mp_exp_t e)
return x;
}
-inline const mpreal frexp(const mpreal& v, mp_exp_t* exp)
+inline const mpreal frexp(const mpreal& x, mp_exp_t* exp, mp_rnd_t mode = mpreal::get_default_rnd())
{
- mpreal x(v);
- *exp = x.get_exp();
- x.set_exp(0);
- return x;
+ mpreal y(x);
+#if (MPFR_VERSION >= MPFR_VERSION_NUM(3,1,0))
+ mpfr_frexp(exp,y.mpfr_ptr(),x.mpfr_srcptr(),mode);
+#else
+ *exp = mpfr_get_exp(y.mpfr_srcptr());
+ mpfr_set_exp(y.mpfr_ptr(),0);
+#endif
+ return y;
}
inline const mpreal ldexp(const mpreal& v, mp_exp_t exp)
{
mpreal x(v);
- // rounding is not important since we just increasing the exponent
- mpfr_mul_2si(x.mpfr_ptr(), x.mpfr_srcptr(), exp, mpreal::get_default_rnd());
+ // rounding is not important since we are just increasing the exponent (= exact operation)
+ mpfr_mul_2si(x.mpfr_ptr(), x.mpfr_srcptr(), exp, mpreal::get_default_rnd());
return x;
}
+inline const mpreal scalbn(const mpreal& v, mp_exp_t exp)
+{
+ return ldexp(v, exp);
+}
+
inline mpreal machine_epsilon(mp_prec_t prec)
{
/* the smallest eps such that 1 + eps != 1 */
@@ -2024,7 +2038,7 @@ inline mpreal machine_epsilon(mp_prec_t prec)
}
inline mpreal machine_epsilon(const mpreal& x)
-{
+{
/* the smallest eps such that x + eps != x */
if( x < 0)
{
@@ -2045,7 +2059,7 @@ inline mpreal minval(mp_prec_t prec)
inline mpreal maxval(mp_prec_t prec)
{
/* max = (1 - eps) * 2^emax, eps is machine epsilon */
- return (mpreal(1, prec) - machine_epsilon(prec)) << mpreal::get_emax();
+ return (mpreal(1, prec) - machine_epsilon(prec)) << mpreal::get_emax();
}
inline bool isEqualUlps(const mpreal& a, const mpreal& b, int maxUlps)
@@ -2063,12 +2077,26 @@ inline bool isEqualFuzzy(const mpreal& a, const mpreal& b)
return isEqualFuzzy(a, b, machine_epsilon((max)(1, (min)(abs(a), abs(b)))));
}
+//////////////////////////////////////////////////////////////////////////
+// C++11 sign functions.
+inline mpreal copysign(const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
+{
+ mpreal rop(0, mpfr_get_prec(x.mpfr_ptr()));
+ mpfr_setsign(rop.mpfr_ptr(), x.mpfr_srcptr(), mpfr_signbit(y.mpfr_srcptr()), rnd_mode);
+ return rop;
+}
+
+inline bool signbit(const mpreal& x)
+{
+ return mpfr_signbit(x.mpfr_srcptr());
+}
+
inline const mpreal modf(const mpreal& v, mpreal& n)
{
mpreal f(v);
// rounding is not important since we are using the same number
- mpfr_frac (f.mpfr_ptr(),f.mpfr_srcptr(),mpreal::get_default_rnd());
+ mpfr_frac (f.mpfr_ptr(),f.mpfr_srcptr(),mpreal::get_default_rnd());
mpfr_trunc(n.mpfr_ptr(),v.mpfr_srcptr());
return f;
}
@@ -2131,7 +2159,7 @@ inline mp_exp_t mpreal::get_emax_max (void)
#define MPREAL_UNARY_MATH_FUNCTION_BODY(f) \
mpreal y(0, mpfr_get_prec(x.mpfr_srcptr())); \
mpfr_##f(y.mpfr_ptr(), x.mpfr_srcptr(), r); \
- return y;
+ return y;
inline const mpreal sqr (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd())
{ MPREAL_UNARY_MATH_FUNCTION_BODY(sqr ); }
@@ -2154,7 +2182,7 @@ inline const mpreal sqrt(const unsigned int v, mp_rnd_t rnd_mode)
inline const mpreal sqrt(const long int v, mp_rnd_t rnd_mode)
{
if (v>=0) return sqrt(static_cast<unsigned long int>(v),rnd_mode);
- else return mpreal().setNan(); // NaN
+ else return mpreal().setNan(); // NaN
}
inline const mpreal sqrt(const int v, mp_rnd_t rnd_mode)
@@ -2165,9 +2193,9 @@ inline const mpreal sqrt(const int v, mp_rnd_t rnd_mode)
inline const mpreal root(const mpreal& x, unsigned long int k, mp_rnd_t r = mpreal::get_default_rnd())
{
- mpreal y(0, mpfr_get_prec(x.mpfr_srcptr()));
- mpfr_root(y.mpfr_ptr(), x.mpfr_srcptr(), k, r);
- return y;
+ mpreal y(0, mpfr_get_prec(x.mpfr_srcptr()));
+ mpfr_root(y.mpfr_ptr(), x.mpfr_srcptr(), k, r);
+ return y;
}
inline const mpreal dim(const mpreal& a, const mpreal& b, mp_rnd_t r = mpreal::get_default_rnd())
@@ -2209,6 +2237,8 @@ inline const mpreal acos (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd
inline const mpreal asin (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(asin ); }
inline const mpreal atan (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(atan ); }
+inline const mpreal logb (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { return log2 (abs(x),r); }
+
inline const mpreal acot (const mpreal& v, mp_rnd_t r = mpreal::get_default_rnd()) { return atan (1/v, r); }
inline const mpreal asec (const mpreal& v, mp_rnd_t r = mpreal::get_default_rnd()) { return acos (1/v, r); }
inline const mpreal acsc (const mpreal& v, mp_rnd_t r = mpreal::get_default_rnd()) { return asin (1/v, r); }
@@ -2230,6 +2260,7 @@ inline const mpreal log1p (const mpreal& x, mp_rnd_t r = mpreal::get_default_r
inline const mpreal expm1 (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(expm1 ); }
inline const mpreal eint (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(eint ); }
inline const mpreal gamma (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(gamma ); }
+inline const mpreal tgamma (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(gamma ); }
inline const mpreal lngamma (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(lngamma); }
inline const mpreal zeta (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(zeta ); }
inline const mpreal erf (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(erf ); }
@@ -2254,7 +2285,7 @@ inline const mpreal hypot (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode =
}
inline const mpreal remainder (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
-{
+{
mpreal a(0,(std::max)(y.getPrecision(), x.getPrecision()));
mpfr_remainder(a.mpfr_ptr(), x.mpfr_srcptr(), y.mpfr_srcptr(), rnd_mode);
return a;
@@ -2307,9 +2338,9 @@ inline const mpreal fma (const mpreal& v1, const mpreal& v2, const mpreal& v3, m
mpreal a;
mp_prec_t p1, p2, p3;
- p1 = v1.get_prec();
- p2 = v2.get_prec();
- p3 = v3.get_prec();
+ p1 = v1.get_prec();
+ p2 = v2.get_prec();
+ p3 = v3.get_prec();
a.set_prec(p3>p2?(p3>p1?p3:p1):(p2>p1?p2:p1));
@@ -2322,9 +2353,9 @@ inline const mpreal fms (const mpreal& v1, const mpreal& v2, const mpreal& v3, m
mpreal a;
mp_prec_t p1, p2, p3;
- p1 = v1.get_prec();
- p2 = v2.get_prec();
- p3 = v3.get_prec();
+ p1 = v1.get_prec();
+ p2 = v2.get_prec();
+ p3 = v3.get_prec();
a.set_prec(p3>p2?(p3>p1?p3:p1):(p2>p1?p2:p1));
@@ -2337,8 +2368,8 @@ inline const mpreal agm (const mpreal& v1, const mpreal& v2, mp_rnd_t rnd_mode =
mpreal a;
mp_prec_t p1, p2;
- p1 = v1.get_prec();
- p2 = v2.get_prec();
+ p1 = v1.get_prec();
+ p2 = v2.get_prec();
a.set_prec(p1>p2?p1:p2);
@@ -2347,16 +2378,17 @@ inline const mpreal agm (const mpreal& v1, const mpreal& v2, mp_rnd_t rnd_mode =
return a;
}
-inline const mpreal sum (const mpreal tab[], unsigned long int n, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
+inline const mpreal sum (const mpreal tab[], const unsigned long int n, int& status, mp_rnd_t mode = mpreal::get_default_rnd())
{
+ mpfr_srcptr *p = new mpfr_srcptr[n];
+
+ for (unsigned long int i = 0; i < n; i++)
+ p[i] = tab[i].mpfr_srcptr();
+
mpreal x;
- mpfr_ptr* t;
- unsigned long int i;
+ status = mpfr_sum(x.mpfr_ptr(), (mpfr_ptr*)p, n, mode);
- t = new mpfr_ptr[n];
- for (i=0;i<n;i++) t[i] = (mpfr_ptr)tab[i].mp;
- mpfr_sum(x.mp,t,n,rnd_mode);
- delete[] t;
+ delete [] p;
return x;
}
@@ -2369,9 +2401,9 @@ inline int sinh_cosh(mpreal& s, mpreal& c, const mpreal& v, mp_rnd_t rnd_mode =
return mpfr_sinh_cosh(s.mp,c.mp,v.mp,rnd_mode);
}
-inline const mpreal li2 (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd())
-{
- MPREAL_UNARY_MATH_FUNCTION_BODY(li2);
+inline const mpreal li2 (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd())
+{
+ MPREAL_UNARY_MATH_FUNCTION_BODY(li2);
}
inline const mpreal rem (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
@@ -2383,23 +2415,23 @@ inline const mpreal rem (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = m
inline const mpreal mod (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
{
(void)rnd_mode;
-
- /*
+
+ /*
m = mod(x,y) if y != 0, returns x - n*y where n = floor(x/y)
The following are true by convention:
- mod(x,0) is x
- mod(x,x) is 0
- - mod(x,y) for x != y and y != 0 has the same sign as y.
-
+ - mod(x,y) for x != y and y != 0 has the same sign as y.
+
*/
if(iszero(y)) return x;
if(x == y) return 0;
mpreal m = x - floor(x / y) * y;
-
+
m.setSign(sgn(y)); // make sure result has the same sign as Y
return m;
@@ -2410,8 +2442,8 @@ inline const mpreal fmod (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode =
mpreal a;
mp_prec_t yp, xp;
- yp = y.get_prec();
- xp = x.get_prec();
+ yp = y.get_prec();
+ xp = x.get_prec();
a.set_prec(yp>xp?yp:xp);
@@ -2553,33 +2585,24 @@ inline const mpreal nextbelow (const mpreal& x)
inline const mpreal urandomb (gmp_randstate_t& state)
{
mpreal x;
- mpfr_urandomb(x.mp,state);
+ mpfr_urandomb(x.mpfr_ptr(),state);
return x;
}
-#if (MPFR_VERSION >= MPFR_VERSION_NUM(3,1,0))
-// use gmp_randinit_default() to init state, gmp_randclear() to clear
+#if (MPFR_VERSION >= MPFR_VERSION_NUM(3,0,0))
inline const mpreal urandom (gmp_randstate_t& state, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
{
mpreal x;
- mpfr_urandom(x.mp,state,rnd_mode);
- return x;
-}
-
-inline const mpreal grandom (gmp_randstate_t& state, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
-{
- mpreal x;
- mpfr_grandom(x.mp, NULL, state, rnd_mode);
+ mpfr_urandom(x.mpfr_ptr(), state, rnd_mode);
return x;
}
-
-#endif
+#endif
#if (MPFR_VERSION <= MPFR_VERSION_NUM(2,4,2))
inline const mpreal random2 (mp_size_t size, mp_exp_t exp)
{
mpreal x;
- mpfr_random2(x.mp,size,exp);
+ mpfr_random2(x.mpfr_ptr(),size,exp);
return x;
}
#endif
@@ -2590,16 +2613,15 @@ inline const mpreal random2 (mp_size_t size, mp_exp_t exp)
// seed != 0
inline const mpreal random(unsigned int seed = 0)
{
-
#if (MPFR_VERSION >= MPFR_VERSION_NUM(3,0,0))
static gmp_randstate_t state;
- static bool isFirstTime = true;
+ static bool initialize = true;
- if(isFirstTime)
+ if(initialize)
{
gmp_randinit_default(state);
gmp_randseed_ui(state,0);
- isFirstTime = false;
+ initialize = false;
}
if(seed != 0) gmp_randseed_ui(state,seed);
@@ -2612,17 +2634,25 @@ inline const mpreal random(unsigned int seed = 0)
}
-#if (MPFR_VERSION >= MPFR_VERSION_NUM(3,0,0))
+#if (MPFR_VERSION >= MPFR_VERSION_NUM(3,1,0))
+
+inline const mpreal grandom (gmp_randstate_t& state, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
+{
+ mpreal x;
+ mpfr_grandom(x.mpfr_ptr(), NULL, state, rnd_mode);
+ return x;
+}
+
inline const mpreal grandom(unsigned int seed = 0)
{
static gmp_randstate_t state;
- static bool isFirstTime = true;
+ static bool initialize = true;
- if(isFirstTime)
+ if(initialize)
{
gmp_randinit_default(state);
gmp_randseed_ui(state,0);
- isFirstTime = false;
+ initialize = false;
}
if(seed != 0) gmp_randseed_ui(state,seed);
@@ -2634,17 +2664,17 @@ inline const mpreal grandom(unsigned int seed = 0)
//////////////////////////////////////////////////////////////////////////
// Set/Get global properties
inline void mpreal::set_default_prec(mp_prec_t prec)
-{
- mpfr_set_default_prec(prec);
+{
+ mpfr_set_default_prec(prec);
}
inline void mpreal::set_default_rnd(mp_rnd_t rnd_mode)
-{
- mpfr_set_default_rounding_mode(rnd_mode);
+{
+ mpfr_set_default_rounding_mode(rnd_mode);
}
inline bool mpreal::fits_in_bits(double x, int n)
-{
+{
int i;
double t;
return IsInf(x) || (std::modf ( std::ldexp ( std::frexp ( x, &i ), n ), &t ) == 0.0);
@@ -2894,7 +2924,7 @@ inline const mpreal pow(const int a, const double b, mp_rnd_t rnd_mode)
else return pow(mpreal(a),mpreal(b),rnd_mode); //mpfr_pow
}
-// pow long double
+// pow long double
inline const mpreal pow(const long double a, const long double b, mp_rnd_t rnd_mode)
{
return pow(mpreal(a),mpreal(b),rnd_mode);
@@ -2953,9 +2983,9 @@ namespace std
{
// we are allowed to extend namespace std with specializations only
template <>
- inline void swap(mpfr::mpreal& x, mpfr::mpreal& y)
- {
- return mpfr::swap(x, y);
+ inline void swap(mpfr::mpreal& x, mpfr::mpreal& y)
+ {
+ return mpfr::swap(x, y);
}
template<>
@@ -2966,7 +2996,7 @@ namespace std
static const bool is_signed = true;
static const bool is_integer = false;
static const bool is_exact = false;
- static const int radix = 2;
+ static const int radix = 2;
static const bool has_infinity = true;
static const bool has_quiet_NaN = true;
@@ -2986,7 +3016,7 @@ namespace std
// Returns smallest eps such that 1 + eps != 1 (classic machine epsilon)
inline static mpfr::mpreal epsilon(mp_prec_t precision = mpfr::mpreal::get_default_prec()) { return mpfr::machine_epsilon(precision); }
-
+
// Returns smallest eps such that x + eps != x (relative machine epsilon)
inline static mpfr::mpreal epsilon(const mpfr::mpreal& x) { return mpfr::machine_epsilon(x); }
@@ -2994,8 +3024,8 @@ namespace std
{
mp_rnd_t r = mpfr::mpreal::get_default_rnd();
- if(r == GMP_RNDN) return mpfr::mpreal(0.5, precision);
- else return mpfr::mpreal(1.0, precision);
+ if(r == GMP_RNDN) return mpfr::mpreal(0.5, precision);
+ else return mpfr::mpreal(1.0, precision);
}
inline static const mpfr::mpreal infinity() { return mpfr::const_infinity(); }
@@ -3006,17 +3036,17 @@ namespace std
// Please note, exponent range is not fixed in MPFR
static const int min_exponent = MPFR_EMIN_DEFAULT;
static const int max_exponent = MPFR_EMAX_DEFAULT;
- MPREAL_PERMISSIVE_EXPR static const int min_exponent10 = (int) (MPFR_EMIN_DEFAULT * 0.3010299956639811);
- MPREAL_PERMISSIVE_EXPR static const int max_exponent10 = (int) (MPFR_EMAX_DEFAULT * 0.3010299956639811);
+ MPREAL_PERMISSIVE_EXPR static const int min_exponent10 = (int) (MPFR_EMIN_DEFAULT * 0.3010299956639811);
+ MPREAL_PERMISSIVE_EXPR static const int max_exponent10 = (int) (MPFR_EMAX_DEFAULT * 0.3010299956639811);
#ifdef MPREAL_HAVE_DYNAMIC_STD_NUMERIC_LIMITS
// Following members should be constant according to standard, but they can be variable in MPFR
- // So we define them as functions here.
+ // So we define them as functions here.
//
// This is preferable way for std::numeric_limits<mpfr::mpreal> specialization.
- // But it is incompatible with standard std::numeric_limits and might not work with other libraries, e.g. boost.
- // See below for compatible implementation.
+ // But it is incompatible with standard std::numeric_limits and might not work with other libraries, e.g. boost.
+ // See below for compatible implementation.
inline static float_round_style round_style()
{
mp_rnd_t r = mpfr::mpreal::get_default_rnd();
@@ -3024,9 +3054,9 @@ namespace std
switch (r)
{
case GMP_RNDN: return round_to_nearest;
- case GMP_RNDZ: return round_toward_zero;
- case GMP_RNDU: return round_toward_infinity;
- case GMP_RNDD: return round_toward_neg_infinity;
+ case GMP_RNDZ: return round_toward_zero;
+ case GMP_RNDU: return round_toward_infinity;
+ case GMP_RNDD: return round_toward_neg_infinity;
default: return round_indeterminate;
}
}
@@ -3053,13 +3083,13 @@ namespace std
// If possible, please use functions digits() and round_style() defined above.
//
// These (default) values are preserved for compatibility with existing libraries, e.g. boost.
- // Change them accordingly to your application.
+ // Change them accordingly to your application.
//
// For example, if you use 256 bits of precision uniformly in your program, then:
// digits = 256
- // digits10 = 77
+ // digits10 = 77
// max_digits10 = 78
- //
+ //
// Approximate formula for decimal digits is: digits10 = floor(log10(2) * digits). See bits2digits() for more details.
static const std::float_round_style round_style = round_to_nearest;
diff --git a/unsupported/test/mpreal_support.cpp b/unsupported/test/mpreal_support.cpp
index bc00382be..685e7ea45 100644
--- a/unsupported/test/mpreal_support.cpp
+++ b/unsupported/test/mpreal_support.cpp
@@ -12,11 +12,13 @@ void test_mpreal_support()
// set precision to 256 bits (double has only 53 bits)
mpreal::set_default_prec(256);
typedef Matrix<mpreal,Eigen::Dynamic,Eigen::Dynamic> MatrixXmp;
+ typedef Matrix<std::complex<mpreal>,Eigen::Dynamic,Eigen::Dynamic> MatrixXcmp;
std::cerr << "epsilon = " << NumTraits<mpreal>::epsilon() << "\n";
std::cerr << "dummy_precision = " << NumTraits<mpreal>::dummy_precision() << "\n";
std::cerr << "highest = " << NumTraits<mpreal>::highest() << "\n";
std::cerr << "lowest = " << NumTraits<mpreal>::lowest() << "\n";
+ std::cerr << "digits10 = " << NumTraits<mpreal>::digits10() << "\n";
for(int i = 0; i < g_repeat; i++) {
int s = Eigen::internal::random<int>(1,100);
@@ -24,6 +26,10 @@ void test_mpreal_support()
MatrixXmp B = MatrixXmp::Random(s,s);
MatrixXmp S = A.adjoint() * A;
MatrixXmp X;
+ MatrixXcmp Ac = MatrixXcmp::Random(s,s);
+ MatrixXcmp Bc = MatrixXcmp::Random(s,s);
+ MatrixXcmp Sc = Ac.adjoint() * Ac;
+ MatrixXcmp Xc;
// Basic stuffs
VERIFY_IS_APPROX(A.real(), A);
@@ -32,12 +38,14 @@ void test_mpreal_support()
VERIFY_IS_APPROX(A.array().abs2().sqrt(), A.array().abs());
VERIFY_IS_APPROX(A.array().sin(), sin(A.array()));
VERIFY_IS_APPROX(A.array().cos(), cos(A.array()));
-
// Cholesky
X = S.selfadjointView<Lower>().llt().solve(B);
VERIFY_IS_APPROX((S.selfadjointView<Lower>()*X).eval(),B);
+ Xc = Sc.selfadjointView<Lower>().llt().solve(Bc);
+ VERIFY_IS_APPROX((Sc.selfadjointView<Lower>()*Xc).eval(),Bc);
+
// partial LU
X = A.lu().solve(B);
VERIFY_IS_APPROX((A*X).eval(),B);
diff --git a/unsupported/test/polynomialsolver.cpp b/unsupported/test/polynomialsolver.cpp
index de79f1538..0c87478dd 100644
--- a/unsupported/test/polynomialsolver.cpp
+++ b/unsupported/test/polynomialsolver.cpp
@@ -38,6 +38,9 @@ bool aux_evalSolver( const POLYNOMIAL& pols, SOLVER& psolve )
const Index deg = pols.size()-1;
+ // Test template constructor from coefficient vector
+ SOLVER solve_constr (pols);
+
psolve.compute( pols );
const RootsType& roots( psolve.roots() );
EvalRootsType evr( deg );
@@ -104,6 +107,7 @@ void evalSolverSugarFunction( const POLYNOMIAL& pols, const ROOTS& roots, const
// 1) the roots found are correct
// 2) the roots have distinct moduli
+ typedef typename POLYNOMIAL::Scalar Scalar;
typedef typename REAL_ROOTS::Scalar Real;
//Test realRoots
@@ -118,7 +122,7 @@ void evalSolverSugarFunction( const POLYNOMIAL& pols, const ROOTS& roots, const
bool found = false;
for( size_t j=0; j<calc_realRoots.size()&& !found; ++j )
{
- if( internal::isApprox( calc_realRoots[i], real_roots[j] ), psPrec ){
+ if( internal::isApprox( calc_realRoots[i], real_roots[j], psPrec ) ){
found = true; }
}
VERIFY( found );
@@ -209,5 +213,6 @@ void test_polynomialsolver()
CALL_SUBTEST_10((polynomialsolver<double,Dynamic>(
internal::random<int>(9,13)
)) );
+ CALL_SUBTEST_11((polynomialsolver<float,Dynamic>(1)) );
}
}
diff --git a/unsupported/test/sparse_extra.cpp b/unsupported/test/sparse_extra.cpp
index 1ee791b0f..a010ceb93 100644
--- a/unsupported/test/sparse_extra.cpp
+++ b/unsupported/test/sparse_extra.cpp
@@ -49,7 +49,6 @@ bool test_random_setter(DynamicSparseMatrix<T>& sm, const DenseType& ref, const
template<typename SparseMatrixType> void sparse_extra(const SparseMatrixType& ref)
{
- typedef typename SparseMatrixType::Index Index;
const Index rows = ref.rows();
const Index cols = ref.cols();
typedef typename SparseMatrixType::Scalar Scalar;
diff --git a/unsupported/test/special_functions.cpp b/unsupported/test/special_functions.cpp
new file mode 100644
index 000000000..057fb3e92
--- /dev/null
+++ b/unsupported/test/special_functions.cpp
@@ -0,0 +1,345 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+#include "../Eigen/SpecialFunctions"
+
+template<typename X, typename Y>
+void verify_component_wise(const X& x, const Y& y)
+{
+ for(Index i=0; i<x.size(); ++i)
+ {
+ if((numext::isfinite)(y(i)))
+ VERIFY_IS_APPROX( x(i), y(i) );
+ else if((numext::isnan)(y(i)))
+ VERIFY((numext::isnan)(x(i)));
+ else
+ VERIFY_IS_EQUAL( x(i), y(i) );
+ }
+}
+
+template<typename ArrayType> void array_special_functions()
+{
+ using std::abs;
+ using std::sqrt;
+ typedef typename ArrayType::Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
+ Scalar plusinf = std::numeric_limits<Scalar>::infinity();
+ Scalar nan = std::numeric_limits<Scalar>::quiet_NaN();
+
+ Index rows = internal::random<Index>(1,30);
+ Index cols = 1;
+
+ // API
+ {
+ ArrayType m1 = ArrayType::Random(rows,cols);
+#if EIGEN_HAS_C99_MATH
+ VERIFY_IS_APPROX(m1.lgamma(), lgamma(m1));
+ VERIFY_IS_APPROX(m1.digamma(), digamma(m1));
+ VERIFY_IS_APPROX(m1.erf(), erf(m1));
+ VERIFY_IS_APPROX(m1.erfc(), erfc(m1));
+#endif // EIGEN_HAS_C99_MATH
+ }
+
+
+#if EIGEN_HAS_C99_MATH
+ // check special functions (comparing against numpy implementation)
+ if (!NumTraits<Scalar>::IsComplex)
+ {
+
+ {
+ ArrayType m1 = ArrayType::Random(rows,cols);
+ ArrayType m2 = ArrayType::Random(rows,cols);
+
+ // Test various propreties of igamma & igammac. These are normalized
+ // gamma integrals where
+ // igammac(a, x) = Gamma(a, x) / Gamma(a)
+ // igamma(a, x) = gamma(a, x) / Gamma(a)
+ // where Gamma and gamma are considered the standard unnormalized
+ // upper and lower incomplete gamma functions, respectively.
+ ArrayType a = m1.abs() + 2;
+ ArrayType x = m2.abs() + 2;
+ ArrayType zero = ArrayType::Zero(rows, cols);
+ ArrayType one = ArrayType::Constant(rows, cols, Scalar(1.0));
+ ArrayType a_m1 = a - one;
+ ArrayType Gamma_a_x = Eigen::igammac(a, x) * a.lgamma().exp();
+ ArrayType Gamma_a_m1_x = Eigen::igammac(a_m1, x) * a_m1.lgamma().exp();
+ ArrayType gamma_a_x = Eigen::igamma(a, x) * a.lgamma().exp();
+ ArrayType gamma_a_m1_x = Eigen::igamma(a_m1, x) * a_m1.lgamma().exp();
+
+ // Gamma(a, 0) == Gamma(a)
+ VERIFY_IS_APPROX(Eigen::igammac(a, zero), one);
+
+ // Gamma(a, x) + gamma(a, x) == Gamma(a)
+ VERIFY_IS_APPROX(Gamma_a_x + gamma_a_x, a.lgamma().exp());
+
+ // Gamma(a, x) == (a - 1) * Gamma(a-1, x) + x^(a-1) * exp(-x)
+ VERIFY_IS_APPROX(Gamma_a_x, (a - 1) * Gamma_a_m1_x + x.pow(a-1) * (-x).exp());
+
+ // gamma(a, x) == (a - 1) * gamma(a-1, x) - x^(a-1) * exp(-x)
+ VERIFY_IS_APPROX(gamma_a_x, (a - 1) * gamma_a_m1_x - x.pow(a-1) * (-x).exp());
+ }
+
+ {
+ // Check exact values of igamma and igammac against a third party calculation.
+ Scalar a_s[] = {Scalar(0), Scalar(1), Scalar(1.5), Scalar(4), Scalar(0.0001), Scalar(1000.5)};
+ Scalar x_s[] = {Scalar(0), Scalar(1), Scalar(1.5), Scalar(4), Scalar(0.0001), Scalar(1000.5)};
+
+ // location i*6+j corresponds to a_s[i], x_s[j].
+ Scalar igamma_s[][6] = {{0.0, nan, nan, nan, nan, nan},
+ {0.0, 0.6321205588285578, 0.7768698398515702,
+ 0.9816843611112658, 9.999500016666262e-05, 1.0},
+ {0.0, 0.4275932955291202, 0.608374823728911,
+ 0.9539882943107686, 7.522076445089201e-07, 1.0},
+ {0.0, 0.01898815687615381, 0.06564245437845008,
+ 0.5665298796332909, 4.166333347221828e-18, 1.0},
+ {0.0, 0.9999780593618628, 0.9999899967080838,
+ 0.9999996219837988, 0.9991370418689945, 1.0},
+ {0.0, 0.0, 0.0, 0.0, 0.0, 0.5042041932513908}};
+ Scalar igammac_s[][6] = {{nan, nan, nan, nan, nan, nan},
+ {1.0, 0.36787944117144233, 0.22313016014842982,
+ 0.018315638888734182, 0.9999000049998333, 0.0},
+ {1.0, 0.5724067044708798, 0.3916251762710878,
+ 0.04601170568923136, 0.9999992477923555, 0.0},
+ {1.0, 0.9810118431238462, 0.9343575456215499,
+ 0.4334701203667089, 1.0, 0.0},
+ {1.0, 2.1940638138146658e-05, 1.0003291916285e-05,
+ 3.7801620118431334e-07, 0.0008629581310054535,
+ 0.0},
+ {1.0, 1.0, 1.0, 1.0, 1.0, 0.49579580674813944}};
+ for (int i = 0; i < 6; ++i) {
+ for (int j = 0; j < 6; ++j) {
+ if ((std::isnan)(igamma_s[i][j])) {
+ VERIFY((std::isnan)(numext::igamma(a_s[i], x_s[j])));
+ } else {
+ VERIFY_IS_APPROX(numext::igamma(a_s[i], x_s[j]), igamma_s[i][j]);
+ }
+
+ if ((std::isnan)(igammac_s[i][j])) {
+ VERIFY((std::isnan)(numext::igammac(a_s[i], x_s[j])));
+ } else {
+ VERIFY_IS_APPROX(numext::igammac(a_s[i], x_s[j]), igammac_s[i][j]);
+ }
+ }
+ }
+ }
+ }
+#endif // EIGEN_HAS_C99_MATH
+
+ // Check the zeta function against scipy.special.zeta
+ {
+ ArrayType x(7), q(7), res(7), ref(7);
+ x << 1.5, 4, 10.5, 10000.5, 3, 1, 0.9;
+ q << 2, 1.5, 3, 1.0001, -2.5, 1.2345, 1.2345;
+ ref << 1.61237534869, 0.234848505667, 1.03086757337e-5, 0.367879440865, 0.054102025820864097, plusinf, nan;
+ CALL_SUBTEST( verify_component_wise(ref, ref); );
+ CALL_SUBTEST( res = x.zeta(q); verify_component_wise(res, ref); );
+ CALL_SUBTEST( res = zeta(x,q); verify_component_wise(res, ref); );
+ }
+
+ // digamma
+ {
+ ArrayType x(7), res(7), ref(7);
+ x << 1, 1.5, 4, -10.5, 10000.5, 0, -1;
+ ref << -0.5772156649015329, 0.03648997397857645, 1.2561176684318, 2.398239129535781, 9.210340372392849, plusinf, plusinf;
+ CALL_SUBTEST( verify_component_wise(ref, ref); );
+
+ CALL_SUBTEST( res = x.digamma(); verify_component_wise(res, ref); );
+ CALL_SUBTEST( res = digamma(x); verify_component_wise(res, ref); );
+ }
+
+
+#if EIGEN_HAS_C99_MATH
+ {
+ ArrayType n(11), x(11), res(11), ref(11);
+ n << 1, 1, 1, 1.5, 17, 31, 28, 8, 42, 147, 170;
+ x << 2, 3, 25.5, 1.5, 4.7, 11.8, 17.7, 30.2, 15.8, 54.1, 64;
+ ref << 0.644934066848, 0.394934066848, 0.0399946696496, nan, 293.334565435, 0.445487887616, -2.47810300902e-07, -8.29668781082e-09, -0.434562276666, 0.567742190178, -0.0108615497927;
+ CALL_SUBTEST( verify_component_wise(ref, ref); );
+
+ if(sizeof(RealScalar)>=8) { // double
+ // Reason for commented line: http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1232
+ // CALL_SUBTEST( res = x.polygamma(n); verify_component_wise(res, ref); );
+ CALL_SUBTEST( res = polygamma(n,x); verify_component_wise(res, ref); );
+ }
+ else {
+ // CALL_SUBTEST( res = x.polygamma(n); verify_component_wise(res.head(8), ref.head(8)); );
+ CALL_SUBTEST( res = polygamma(n,x); verify_component_wise(res.head(8), ref.head(8)); );
+ }
+ }
+#endif
+
+#if EIGEN_HAS_C99_MATH
+ {
+ // Inputs and ground truth generated with scipy via:
+ // a = np.logspace(-3, 3, 5) - 1e-3
+ // b = np.logspace(-3, 3, 5) - 1e-3
+ // x = np.linspace(-0.1, 1.1, 5)
+ // (full_a, full_b, full_x) = np.vectorize(lambda a, b, x: (a, b, x))(*np.ix_(a, b, x))
+ // full_a = full_a.flatten().tolist() # same for full_b, full_x
+ // v = scipy.special.betainc(full_a, full_b, full_x).flatten().tolist()
+ //
+ // Note in Eigen, we call betainc with arguments in the order (x, a, b).
+ ArrayType a(125);
+ ArrayType b(125);
+ ArrayType x(125);
+ ArrayType v(125);
+ ArrayType res(125);
+
+ a << 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.03062277660168379, 0.03062277660168379, 0.03062277660168379,
+ 0.03062277660168379, 0.03062277660168379, 0.03062277660168379,
+ 0.03062277660168379, 0.03062277660168379, 0.03062277660168379,
+ 0.03062277660168379, 0.03062277660168379, 0.03062277660168379,
+ 0.03062277660168379, 0.03062277660168379, 0.03062277660168379,
+ 0.03062277660168379, 0.03062277660168379, 0.03062277660168379,
+ 0.03062277660168379, 0.03062277660168379, 0.03062277660168379,
+ 0.03062277660168379, 0.03062277660168379, 0.03062277660168379,
+ 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999,
+ 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999,
+ 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999,
+ 31.62177660168379, 31.62177660168379, 31.62177660168379,
+ 31.62177660168379, 31.62177660168379, 31.62177660168379,
+ 31.62177660168379, 31.62177660168379, 31.62177660168379,
+ 31.62177660168379, 31.62177660168379, 31.62177660168379,
+ 31.62177660168379, 31.62177660168379, 31.62177660168379,
+ 31.62177660168379, 31.62177660168379, 31.62177660168379,
+ 31.62177660168379, 31.62177660168379, 31.62177660168379,
+ 31.62177660168379, 31.62177660168379, 31.62177660168379,
+ 31.62177660168379, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999,
+ 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999,
+ 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999,
+ 999.999, 999.999, 999.999;
+
+ b << 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379, 0.03062277660168379,
+ 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.999,
+ 0.999, 0.999, 0.999, 0.999, 31.62177660168379, 31.62177660168379,
+ 31.62177660168379, 31.62177660168379, 31.62177660168379, 999.999,
+ 999.999, 999.999, 999.999, 999.999, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.03062277660168379, 0.03062277660168379, 0.03062277660168379,
+ 0.03062277660168379, 0.03062277660168379, 0.999, 0.999, 0.999, 0.999,
+ 0.999, 31.62177660168379, 31.62177660168379, 31.62177660168379,
+ 31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999,
+ 999.999, 999.999, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379,
+ 0.03062277660168379, 0.03062277660168379, 0.03062277660168379,
+ 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999,
+ 31.62177660168379, 31.62177660168379, 31.62177660168379,
+ 31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999,
+ 999.999, 999.999, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379,
+ 0.03062277660168379, 0.03062277660168379, 0.03062277660168379,
+ 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999,
+ 31.62177660168379, 31.62177660168379, 31.62177660168379,
+ 31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999,
+ 999.999, 999.999, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379,
+ 0.03062277660168379, 0.03062277660168379, 0.03062277660168379,
+ 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999,
+ 31.62177660168379, 31.62177660168379, 31.62177660168379,
+ 31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999,
+ 999.999, 999.999;
+
+ x << -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5,
+ 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2,
+ 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1,
+ 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1,
+ -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8,
+ 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5,
+ 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2,
+ 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1,
+ 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5,
+ 0.8, 1.1;
+
+ v << nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan,
+ nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan,
+ nan, nan, nan, 0.47972119876364683, 0.5, 0.5202788012363533, nan, nan,
+ 0.9518683957740043, 0.9789663010413743, 0.9931729188073435, nan, nan,
+ 0.999995949033062, 0.9999999999993698, 0.9999999999999999, nan, nan,
+ 0.9999999999999999, 0.9999999999999999, 0.9999999999999999, nan, nan,
+ nan, nan, nan, nan, nan, 0.006827081192655869, 0.0210336989586256,
+ 0.04813160422599567, nan, nan, 0.20014344256217678, 0.5000000000000001,
+ 0.7998565574378232, nan, nan, 0.9991401428435834, 0.999999999698403,
+ 0.9999999999999999, nan, nan, 0.9999999999999999, 0.9999999999999999,
+ 0.9999999999999999, nan, nan, nan, nan, nan, nan, nan,
+ 1.0646600232370887e-25, 6.301722877826246e-13, 4.050966937974938e-06,
+ nan, nan, 7.864342668429763e-23, 3.015969667594166e-10,
+ 0.0008598571564165444, nan, nan, 6.031987710123844e-08,
+ 0.5000000000000007, 0.9999999396801229, nan, nan, 0.9999999999999999,
+ 0.9999999999999999, 0.9999999999999999, nan, nan, nan, nan, nan, nan,
+ nan, 0.0, 7.029920380986636e-306, 2.2450728208591345e-101, nan, nan,
+ 0.0, 9.275871147869727e-302, 1.2232913026152827e-97, nan, nan, 0.0,
+ 3.0891393081932924e-252, 2.9303043666183996e-60, nan, nan,
+ 2.248913486879199e-196, 0.5000000000004947, 0.9999999999999999, nan;
+
+ CALL_SUBTEST(res = betainc(a, b, x);
+ verify_component_wise(res, v););
+ }
+
+ // Test various properties of betainc
+ {
+ ArrayType m1 = ArrayType::Random(32);
+ ArrayType m2 = ArrayType::Random(32);
+ ArrayType m3 = ArrayType::Random(32);
+ ArrayType one = ArrayType::Constant(32, Scalar(1.0));
+ const Scalar eps = std::numeric_limits<Scalar>::epsilon();
+ ArrayType a = (m1 * 4.0).exp();
+ ArrayType b = (m2 * 4.0).exp();
+ ArrayType x = m3.abs();
+
+ // betainc(a, 1, x) == x**a
+ CALL_SUBTEST(
+ ArrayType test = betainc(a, one, x);
+ ArrayType expected = x.pow(a);
+ verify_component_wise(test, expected););
+
+ // betainc(1, b, x) == 1 - (1 - x)**b
+ CALL_SUBTEST(
+ ArrayType test = betainc(one, b, x);
+ ArrayType expected = one - (one - x).pow(b);
+ verify_component_wise(test, expected););
+
+ // betainc(a, b, x) == 1 - betainc(b, a, 1-x)
+ CALL_SUBTEST(
+ ArrayType test = betainc(a, b, x) + betainc(b, a, one - x);
+ ArrayType expected = one;
+ verify_component_wise(test, expected););
+
+ // betainc(a+1, b, x) = betainc(a, b, x) - x**a * (1 - x)**b / (a * beta(a, b))
+ CALL_SUBTEST(
+ ArrayType num = x.pow(a) * (one - x).pow(b);
+ ArrayType denom = a * (a.lgamma() + b.lgamma() - (a + b).lgamma()).exp();
+ // Add eps to rhs and lhs so that component-wise test doesn't result in
+ // nans when both outputs are zeros.
+ ArrayType expected = betainc(a, b, x) - num / denom + eps;
+ ArrayType test = betainc(a + one, b, x) + eps;
+ if (sizeof(Scalar) >= 8) { // double
+ verify_component_wise(test, expected);
+ } else {
+ // Reason for limited test: http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1232
+ verify_component_wise(test.head(8), expected.head(8));
+ });
+
+ // betainc(a, b+1, x) = betainc(a, b, x) + x**a * (1 - x)**b / (b * beta(a, b))
+ CALL_SUBTEST(
+ // Add eps to rhs and lhs so that component-wise test doesn't result in
+ // nans when both outputs are zeros.
+ ArrayType num = x.pow(a) * (one - x).pow(b);
+ ArrayType denom = b * (a.lgamma() + b.lgamma() - (a + b).lgamma()).exp();
+ ArrayType expected = betainc(a, b, x) + num / denom + eps;
+ ArrayType test = betainc(a, b + one, x) + eps;
+ verify_component_wise(test, expected););
+ }
+#endif
+}
+
+void test_special_functions()
+{
+ CALL_SUBTEST_1(array_special_functions<ArrayXf>());
+ CALL_SUBTEST_2(array_special_functions<ArrayXd>());
+}
diff --git a/unsupported/test/splines.cpp b/unsupported/test/splines.cpp
index a7eb3e0c4..3be020434 100644
--- a/unsupported/test/splines.cpp
+++ b/unsupported/test/splines.cpp
@@ -13,23 +13,23 @@
namespace Eigen {
-// lets do some explicit instantiations and thus
-// force the compilation of all spline functions...
-template class Spline<double, 2, Dynamic>;
-template class Spline<double, 3, Dynamic>;
+ // lets do some explicit instantiations and thus
+ // force the compilation of all spline functions...
+ template class Spline<double, 2, Dynamic>;
+ template class Spline<double, 3, Dynamic>;
-template class Spline<double, 2, 2>;
-template class Spline<double, 2, 3>;
-template class Spline<double, 2, 4>;
-template class Spline<double, 2, 5>;
+ template class Spline<double, 2, 2>;
+ template class Spline<double, 2, 3>;
+ template class Spline<double, 2, 4>;
+ template class Spline<double, 2, 5>;
-template class Spline<float, 2, Dynamic>;
-template class Spline<float, 3, Dynamic>;
+ template class Spline<float, 2, Dynamic>;
+ template class Spline<float, 3, Dynamic>;
-template class Spline<float, 3, 2>;
-template class Spline<float, 3, 3>;
-template class Spline<float, 3, 4>;
-template class Spline<float, 3, 5>;
+ template class Spline<float, 3, 2>;
+ template class Spline<float, 3, 3>;
+ template class Spline<float, 3, 4>;
+ template class Spline<float, 3, 5>;
}
@@ -234,11 +234,48 @@ void check_global_interpolation2d()
}
}
+void check_global_interpolation_with_derivatives2d()
+{
+ typedef Spline2d::PointType PointType;
+ typedef Spline2d::KnotVectorType KnotVectorType;
+
+ const Eigen::DenseIndex numPoints = 100;
+ const unsigned int dimension = 2;
+ const unsigned int degree = 3;
+
+ ArrayXXd points = ArrayXXd::Random(dimension, numPoints);
+
+ KnotVectorType knots;
+ Eigen::ChordLengths(points, knots);
+
+ ArrayXXd derivatives = ArrayXXd::Random(dimension, numPoints);
+ VectorXd derivativeIndices(numPoints);
+
+ for (Eigen::DenseIndex i = 0; i < numPoints; ++i)
+ derivativeIndices(i) = static_cast<double>(i);
+
+ const Spline2d spline = SplineFitting<Spline2d>::InterpolateWithDerivatives(
+ points, derivatives, derivativeIndices, degree);
+
+ for (Eigen::DenseIndex i = 0; i < points.cols(); ++i)
+ {
+ PointType point = spline(knots(i));
+ PointType referencePoint = points.col(i);
+ VERIFY_IS_APPROX(point, referencePoint);
+ PointType derivative = spline.derivatives(knots(i), 1).col(1);
+ PointType referenceDerivative = derivatives.col(i);
+ VERIFY_IS_APPROX(derivative, referenceDerivative);
+ }
+}
void test_splines()
{
- CALL_SUBTEST( eval_spline3d() );
- CALL_SUBTEST( eval_spline3d_onbrks() );
- CALL_SUBTEST( eval_closed_spline2d() );
- CALL_SUBTEST( check_global_interpolation2d() );
+ for (int i = 0; i < g_repeat; ++i)
+ {
+ CALL_SUBTEST( eval_spline3d() );
+ CALL_SUBTEST( eval_spline3d_onbrks() );
+ CALL_SUBTEST( eval_closed_spline2d() );
+ CALL_SUBTEST( check_global_interpolation2d() );
+ CALL_SUBTEST( check_global_interpolation_with_derivatives2d() );
+ }
}
diff --git a/unsupported/test/svd_common.h b/unsupported/test/svd_common.h
deleted file mode 100644
index b40c23a2b..000000000
--- a/unsupported/test/svd_common.h
+++ /dev/null
@@ -1,261 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// Copyright (C) 2013 Gauthier Brun <brun.gauthier@gmail.com>
-// Copyright (C) 2013 Nicolas Carre <nicolas.carre@ensimag.fr>
-// Copyright (C) 2013 Jean Ceccato <jean.ceccato@ensimag.fr>
-// Copyright (C) 2013 Pierre Zoppitelli <pierre.zoppitelli@ensimag.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-// discard stack allocation as that too bypasses malloc
-#define EIGEN_STACK_ALLOCATION_LIMIT 0
-#define EIGEN_RUNTIME_NO_MALLOC
-
-#include "main.h"
-#include <unsupported/Eigen/SVD>
-#include <Eigen/LU>
-
-
-// check if "svd" is the good image of "m"
-template<typename MatrixType, typename SVD>
-void svd_check_full(const MatrixType& m, const SVD& svd)
-{
- typedef typename MatrixType::Index Index;
- Index rows = m.rows();
- Index cols = m.cols();
- enum {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = MatrixType::ColsAtCompileTime
- };
-
- typedef typename MatrixType::Scalar Scalar;
- typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime> MatrixUType;
- typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime> MatrixVType;
-
-
- MatrixType sigma = MatrixType::Zero(rows, cols);
- sigma.diagonal() = svd.singularValues().template cast<Scalar>();
- MatrixUType u = svd.matrixU();
- MatrixVType v = svd.matrixV();
- VERIFY_IS_APPROX(m, u * sigma * v.adjoint());
- VERIFY_IS_UNITARY(u);
- VERIFY_IS_UNITARY(v);
-} // end svd_check_full
-
-
-
-// Compare to a reference value
-template<typename MatrixType, typename SVD>
-void svd_compare_to_full(const MatrixType& m,
- unsigned int computationOptions,
- const SVD& referenceSvd)
-{
- typedef typename MatrixType::Index Index;
- Index rows = m.rows();
- Index cols = m.cols();
- Index diagSize = (std::min)(rows, cols);
-
- SVD svd(m, computationOptions);
-
- VERIFY_IS_APPROX(svd.singularValues(), referenceSvd.singularValues());
- if(computationOptions & ComputeFullU)
- VERIFY_IS_APPROX(svd.matrixU(), referenceSvd.matrixU());
- if(computationOptions & ComputeThinU)
- VERIFY_IS_APPROX(svd.matrixU(), referenceSvd.matrixU().leftCols(diagSize));
- if(computationOptions & ComputeFullV)
- VERIFY_IS_APPROX(svd.matrixV(), referenceSvd.matrixV());
- if(computationOptions & ComputeThinV)
- VERIFY_IS_APPROX(svd.matrixV(), referenceSvd.matrixV().leftCols(diagSize));
-} // end svd_compare_to_full
-
-
-
-template<typename MatrixType, typename SVD>
-void svd_solve(const MatrixType& m, unsigned int computationOptions)
-{
- typedef typename MatrixType::Scalar Scalar;
- typedef typename MatrixType::Index Index;
- Index rows = m.rows();
- Index cols = m.cols();
-
- enum {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = MatrixType::ColsAtCompileTime
- };
-
- typedef Matrix<Scalar, RowsAtCompileTime, Dynamic> RhsType;
- typedef Matrix<Scalar, ColsAtCompileTime, Dynamic> SolutionType;
-
- RhsType rhs = RhsType::Random(rows, internal::random<Index>(1, cols));
- SVD svd(m, computationOptions);
- SolutionType x = svd.solve(rhs);
- // evaluate normal equation which works also for least-squares solutions
- VERIFY_IS_APPROX(m.adjoint()*m*x,m.adjoint()*rhs);
-} // end svd_solve
-
-
-// test computations options
-// 2 functions because Jacobisvd can return before the second function
-template<typename MatrixType, typename SVD>
-void svd_test_computation_options_1(const MatrixType& m, const SVD& fullSvd)
-{
- svd_check_full< MatrixType, SVD >(m, fullSvd);
- svd_solve< MatrixType, SVD >(m, ComputeFullU | ComputeFullV);
-}
-
-
-template<typename MatrixType, typename SVD>
-void svd_test_computation_options_2(const MatrixType& m, const SVD& fullSvd)
-{
- svd_compare_to_full< MatrixType, SVD >(m, ComputeFullU, fullSvd);
- svd_compare_to_full< MatrixType, SVD >(m, ComputeFullV, fullSvd);
- svd_compare_to_full< MatrixType, SVD >(m, 0, fullSvd);
-
- if (MatrixType::ColsAtCompileTime == Dynamic) {
- // thin U/V are only available with dynamic number of columns
-
- svd_compare_to_full< MatrixType, SVD >(m, ComputeFullU|ComputeThinV, fullSvd);
- svd_compare_to_full< MatrixType, SVD >(m, ComputeThinV, fullSvd);
- svd_compare_to_full< MatrixType, SVD >(m, ComputeThinU|ComputeFullV, fullSvd);
- svd_compare_to_full< MatrixType, SVD >(m, ComputeThinU , fullSvd);
- svd_compare_to_full< MatrixType, SVD >(m, ComputeThinU|ComputeThinV, fullSvd);
- svd_solve<MatrixType, SVD>(m, ComputeFullU | ComputeThinV);
- svd_solve<MatrixType, SVD>(m, ComputeThinU | ComputeFullV);
- svd_solve<MatrixType, SVD>(m, ComputeThinU | ComputeThinV);
-
- typedef typename MatrixType::Index Index;
- Index diagSize = (std::min)(m.rows(), m.cols());
- SVD svd(m, ComputeThinU | ComputeThinV);
- VERIFY_IS_APPROX(m, svd.matrixU().leftCols(diagSize) * svd.singularValues().asDiagonal() * svd.matrixV().leftCols(diagSize).adjoint());
- }
-}
-
-template<typename MatrixType, typename SVD>
-void svd_verify_assert(const MatrixType& m)
-{
- typedef typename MatrixType::Scalar Scalar;
- typedef typename MatrixType::Index Index;
- Index rows = m.rows();
- Index cols = m.cols();
-
- enum {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = MatrixType::ColsAtCompileTime
- };
-
- typedef Matrix<Scalar, RowsAtCompileTime, 1> RhsType;
- RhsType rhs(rows);
- SVD svd;
- VERIFY_RAISES_ASSERT(svd.matrixU())
- VERIFY_RAISES_ASSERT(svd.singularValues())
- VERIFY_RAISES_ASSERT(svd.matrixV())
- VERIFY_RAISES_ASSERT(svd.solve(rhs))
- MatrixType a = MatrixType::Zero(rows, cols);
- a.setZero();
- svd.compute(a, 0);
- VERIFY_RAISES_ASSERT(svd.matrixU())
- VERIFY_RAISES_ASSERT(svd.matrixV())
- svd.singularValues();
- VERIFY_RAISES_ASSERT(svd.solve(rhs))
-
- if (ColsAtCompileTime == Dynamic)
- {
- svd.compute(a, ComputeThinU);
- svd.matrixU();
- VERIFY_RAISES_ASSERT(svd.matrixV())
- VERIFY_RAISES_ASSERT(svd.solve(rhs))
- svd.compute(a, ComputeThinV);
- svd.matrixV();
- VERIFY_RAISES_ASSERT(svd.matrixU())
- VERIFY_RAISES_ASSERT(svd.solve(rhs))
- }
- else
- {
- VERIFY_RAISES_ASSERT(svd.compute(a, ComputeThinU))
- VERIFY_RAISES_ASSERT(svd.compute(a, ComputeThinV))
- }
-}
-
-// work around stupid msvc error when constructing at compile time an expression that involves
-// a division by zero, even if the numeric type has floating point
-template<typename Scalar>
-EIGEN_DONT_INLINE Scalar zero() { return Scalar(0); }
-
-// workaround aggressive optimization in ICC
-template<typename T> EIGEN_DONT_INLINE T sub(T a, T b) { return a - b; }
-
-
-template<typename MatrixType, typename SVD>
-void svd_inf_nan()
-{
- // all this function does is verify we don't iterate infinitely on nan/inf values
-
- SVD svd;
- typedef typename MatrixType::Scalar Scalar;
- Scalar some_inf = Scalar(1) / zero<Scalar>();
- VERIFY(sub(some_inf, some_inf) != sub(some_inf, some_inf));
- svd.compute(MatrixType::Constant(10,10,some_inf), ComputeFullU | ComputeFullV);
-
- Scalar some_nan = zero<Scalar> () / zero<Scalar> ();
- VERIFY(some_nan != some_nan);
- svd.compute(MatrixType::Constant(10,10,some_nan), ComputeFullU | ComputeFullV);
-
- MatrixType m = MatrixType::Zero(10,10);
- m(internal::random<int>(0,9), internal::random<int>(0,9)) = some_inf;
- svd.compute(m, ComputeFullU | ComputeFullV);
-
- m = MatrixType::Zero(10,10);
- m(internal::random<int>(0,9), internal::random<int>(0,9)) = some_nan;
- svd.compute(m, ComputeFullU | ComputeFullV);
-}
-
-
-template<typename SVD>
-void svd_preallocate()
-{
- Vector3f v(3.f, 2.f, 1.f);
- MatrixXf m = v.asDiagonal();
-
- internal::set_is_malloc_allowed(false);
- VERIFY_RAISES_ASSERT(VectorXf v(10);)
- SVD svd;
- internal::set_is_malloc_allowed(true);
- svd.compute(m);
- VERIFY_IS_APPROX(svd.singularValues(), v);
-
- SVD svd2(3,3);
- internal::set_is_malloc_allowed(false);
- svd2.compute(m);
- internal::set_is_malloc_allowed(true);
- VERIFY_IS_APPROX(svd2.singularValues(), v);
- VERIFY_RAISES_ASSERT(svd2.matrixU());
- VERIFY_RAISES_ASSERT(svd2.matrixV());
- svd2.compute(m, ComputeFullU | ComputeFullV);
- VERIFY_IS_APPROX(svd2.matrixU(), Matrix3f::Identity());
- VERIFY_IS_APPROX(svd2.matrixV(), Matrix3f::Identity());
- internal::set_is_malloc_allowed(false);
- svd2.compute(m);
- internal::set_is_malloc_allowed(true);
-
- SVD svd3(3,3,ComputeFullU|ComputeFullV);
- internal::set_is_malloc_allowed(false);
- svd2.compute(m);
- internal::set_is_malloc_allowed(true);
- VERIFY_IS_APPROX(svd2.singularValues(), v);
- VERIFY_IS_APPROX(svd2.matrixU(), Matrix3f::Identity());
- VERIFY_IS_APPROX(svd2.matrixV(), Matrix3f::Identity());
- internal::set_is_malloc_allowed(false);
- svd2.compute(m, ComputeFullU|ComputeFullV);
- internal::set_is_malloc_allowed(true);
-}
-
-
-
-
-