aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--.hgignore2
-rw-r--r--CMakeLists.txt55
-rw-r--r--CTestConfig.cmake8
-rw-r--r--CTestCustom.cmake.in1
-rw-r--r--Eigen/Cholesky5
-rw-r--r--Eigen/Core289
-rw-r--r--Eigen/Eigenvalues8
-rw-r--r--Eigen/Geometry4
-rw-r--r--Eigen/KLUSupport41
-rw-r--r--Eigen/LU4
-rw-r--r--Eigen/PaStiXSupport1
-rw-r--r--[-rwxr-xr-x]Eigen/PardisoSupport0
-rw-r--r--Eigen/QR8
-rw-r--r--Eigen/QtAlignedMalloc2
-rw-r--r--Eigen/SVD4
-rw-r--r--Eigen/SparseLU4
-rw-r--r--Eigen/SparseQR1
-rw-r--r--Eigen/src/Cholesky/LDLT.h14
-rw-r--r--Eigen/src/Cholesky/LLT.h35
-rw-r--r--Eigen/src/CholmodSupport/CholmodSupport.h86
-rw-r--r--Eigen/src/Core/ArithmeticSequence.h143
-rw-r--r--Eigen/src/Core/Array.h8
-rw-r--r--Eigen/src/Core/ArrayBase.h8
-rw-r--r--Eigen/src/Core/ArrayWrapper.h8
-rw-r--r--Eigen/src/Core/Assign.h2
-rw-r--r--Eigen/src/Core/AssignEvaluator.h10
-rwxr-xr-xEigen/src/Core/Assign_MKL.h6
-rw-r--r--Eigen/src/Core/BooleanRedux.h6
-rw-r--r--Eigen/src/Core/CommaInitializer.h4
-rw-r--r--Eigen/src/Core/CoreEvaluators.h51
-rw-r--r--Eigen/src/Core/CwiseBinaryOp.h5
-rw-r--r--Eigen/src/Core/CwiseNullaryOp.h120
-rw-r--r--Eigen/src/Core/DenseBase.h15
-rw-r--r--Eigen/src/Core/DenseStorage.h60
-rw-r--r--Eigen/src/Core/Diagonal.h15
-rw-r--r--Eigen/src/Core/DiagonalMatrix.h4
-rw-r--r--Eigen/src/Core/DiagonalProduct.h2
-rw-r--r--Eigen/src/Core/Dot.h23
-rw-r--r--Eigen/src/Core/EigenBase.h4
-rw-r--r--Eigen/src/Core/Fuzzy.h6
-rw-r--r--Eigen/src/Core/GeneralProduct.h38
-rw-r--r--Eigen/src/Core/GenericPacketMath.h20
-rw-r--r--Eigen/src/Core/GlobalFunctions.h60
-rw-r--r--Eigen/src/Core/Map.h15
-rw-r--r--Eigen/src/Core/MapBase.h6
-rw-r--r--Eigen/src/Core/MathFunctions.h470
-rw-r--r--Eigen/src/Core/MathFunctionsImpl.h24
-rw-r--r--Eigen/src/Core/MatrixBase.h27
-rw-r--r--Eigen/src/Core/NestByValue.h10
-rw-r--r--Eigen/src/Core/NoAlias.h7
-rw-r--r--Eigen/src/Core/NumTraits.h42
-rw-r--r--Eigen/src/Core/PermutationMatrix.h6
-rw-r--r--Eigen/src/Core/PlainObjectBase.h35
-rw-r--r--Eigen/src/Core/Product.h12
-rw-r--r--Eigen/src/Core/ProductEvaluators.h118
-rw-r--r--Eigen/src/Core/Random.h2
-rw-r--r--Eigen/src/Core/Redux.h290
-rw-r--r--Eigen/src/Core/Ref.h2
-rw-r--r--Eigen/src/Core/Replicate.h4
-rw-r--r--Eigen/src/Core/ReturnByValue.h2
-rw-r--r--Eigen/src/Core/Reverse.h6
-rw-r--r--Eigen/src/Core/SelfAdjointView.h10
-rw-r--r--Eigen/src/Core/SelfCwiseBinaryOp.h12
-rw-r--r--Eigen/src/Core/Solve.h6
-rw-r--r--Eigen/src/Core/SolveTriangular.h2
-rw-r--r--Eigen/src/Core/SolverBase.h3
-rw-r--r--Eigen/src/Core/StableNorm.h116
-rw-r--r--Eigen/src/Core/Transpose.h11
-rw-r--r--Eigen/src/Core/Transpositions.h4
-rw-r--r--Eigen/src/Core/TriangularMatrix.h21
-rw-r--r--Eigen/src/Core/VectorBlock.h2
-rw-r--r--Eigen/src/Core/VectorwiseOp.h4
-rw-r--r--Eigen/src/Core/arch/AVX/Complex.h36
-rw-r--r--Eigen/src/Core/arch/AVX/PacketMath.h13
-rw-r--r--Eigen/src/Core/arch/AVX512/MathFunctions.h76
-rw-r--r--Eigen/src/Core/arch/AVX512/PacketMath.h80
-rw-r--r--Eigen/src/Core/arch/AltiVec/Complex.h35
-rwxr-xr-xEigen/src/Core/arch/AltiVec/PacketMath.h52
-rw-r--r--Eigen/src/Core/arch/CUDA/Complex.h6
-rw-r--r--Eigen/src/Core/arch/Default/ConjHelper.h29
-rw-r--r--Eigen/src/Core/arch/GPU/Half.h (renamed from Eigen/src/Core/arch/CUDA/Half.h)297
-rw-r--r--Eigen/src/Core/arch/GPU/MathFunctions.h (renamed from Eigen/src/Core/arch/CUDA/MathFunctions.h)8
-rw-r--r--Eigen/src/Core/arch/GPU/PacketMath.h (renamed from Eigen/src/Core/arch/CUDA/PacketMath.h)30
-rw-r--r--Eigen/src/Core/arch/GPU/PacketMathHalf.h (renamed from Eigen/src/Core/arch/CUDA/PacketMathHalf.h)453
-rw-r--r--Eigen/src/Core/arch/GPU/TypeCasting.h (renamed from Eigen/src/Core/arch/CUDA/TypeCasting.h)18
-rw-r--r--Eigen/src/Core/arch/HIP/hcc/math_constants.h23
-rw-r--r--Eigen/src/Core/arch/MSA/Complex.h759
-rw-r--r--Eigen/src/Core/arch/MSA/MathFunctions.h387
-rw-r--r--Eigen/src/Core/arch/MSA/PacketMath.h1317
-rw-r--r--Eigen/src/Core/arch/NEON/Complex.h12
-rw-r--r--Eigen/src/Core/arch/NEON/MathFunctions.h92
-rw-r--r--Eigen/src/Core/arch/NEON/PacketMath.h48
-rw-r--r--Eigen/src/Core/arch/NEON/TypeCasting.h48
-rw-r--r--Eigen/src/Core/arch/SSE/Complex.h40
-rw-r--r--Eigen/src/Core/arch/SSE/MathFunctions.h2
-rwxr-xr-xEigen/src/Core/arch/SSE/PacketMath.h24
-rw-r--r--Eigen/src/Core/arch/SSE/TypeCasting.h28
-rw-r--r--Eigen/src/Core/arch/SYCL/InteropHeaders.h104
-rw-r--r--Eigen/src/Core/arch/SYCL/MathFunctions.h221
-rw-r--r--Eigen/src/Core/arch/SYCL/PacketMath.h458
-rw-r--r--Eigen/src/Core/arch/SYCL/TypeCasting.h89
-rw-r--r--Eigen/src/Core/arch/ZVector/Complex.h407
-rw-r--r--Eigen/src/Core/arch/ZVector/MathFunctions.h105
-rwxr-xr-xEigen/src/Core/arch/ZVector/PacketMath.h840
-rw-r--r--Eigen/src/Core/functors/AssignmentFunctors.h2
-rw-r--r--Eigen/src/Core/functors/BinaryFunctors.h29
-rw-r--r--Eigen/src/Core/functors/NullaryFunctors.h11
-rw-r--r--Eigen/src/Core/functors/StlFunctors.h4
-rw-r--r--Eigen/src/Core/functors/UnaryFunctors.h32
-rw-r--r--Eigen/src/Core/products/GeneralBlockPanelKernel.h126
-rw-r--r--Eigen/src/Core/products/GeneralMatrixMatrix.h24
-rw-r--r--Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h25
-rw-r--r--Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h12
-rw-r--r--Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h19
-rw-r--r--Eigen/src/Core/products/GeneralMatrixVector.h10
-rw-r--r--Eigen/src/Core/products/GeneralMatrixVector_BLAS.h19
-rw-r--r--Eigen/src/Core/products/Parallelizer.h22
-rw-r--r--Eigen/src/Core/products/SelfadjointMatrixMatrix.h6
-rw-r--r--Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h48
-rw-r--r--Eigen/src/Core/products/SelfadjointMatrixVector.h14
-rw-r--r--Eigen/src/Core/products/SelfadjointMatrixVector_BLAS.h9
-rw-r--r--Eigen/src/Core/products/SelfadjointProduct.h2
-rw-r--r--Eigen/src/Core/products/SelfadjointRank2Update.h5
-rw-r--r--Eigen/src/Core/products/TriangularMatrixMatrix.h41
-rw-r--r--Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h39
-rw-r--r--Eigen/src/Core/products/TriangularMatrixVector.h22
-rw-r--r--Eigen/src/Core/products/TriangularMatrixVector_BLAS.h46
-rw-r--r--Eigen/src/Core/products/TriangularSolverMatrix.h4
-rw-r--r--Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h40
-rw-r--r--Eigen/src/Core/products/TriangularSolverVector.h23
-rwxr-xr-xEigen/src/Core/util/BlasUtil.h53
-rw-r--r--Eigen/src/Core/util/ConfigureVectorization.h436
-rw-r--r--Eigen/src/Core/util/Constants.h3
-rwxr-xr-xEigen/src/Core/util/DisableStupidWarnings.h22
-rw-r--r--Eigen/src/Core/util/IndexedViewHelper.h49
-rw-r--r--Eigen/src/Core/util/IntegralConstant.h4
-rwxr-xr-xEigen/src/Core/util/MKL_support.h19
-rw-r--r--Eigen/src/Core/util/Macros.h537
-rw-r--r--Eigen/src/Core/util/Memory.h160
-rwxr-xr-xEigen/src/Core/util/Meta.h135
-rw-r--r--Eigen/src/Core/util/ReenableStupidWarnings.h8
-rw-r--r--Eigen/src/Core/util/StaticAssert.h120
-rw-r--r--Eigen/src/Core/util/SymbolicIndex.h23
-rw-r--r--Eigen/src/Core/util/XprHelper.h43
-rw-r--r--Eigen/src/Eigenvalues/ComplexEigenSolver.h2
-rw-r--r--Eigen/src/Eigenvalues/ComplexSchur.h2
-rw-r--r--Eigen/src/Eigenvalues/EigenSolver.h2
-rw-r--r--Eigen/src/Eigenvalues/GeneralizedEigenSolver.h5
-rw-r--r--Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h2
-rw-r--r--Eigen/src/Eigenvalues/HessenbergDecomposition.h2
-rw-r--r--Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h6
-rw-r--r--Eigen/src/Eigenvalues/RealQZ.h2
-rw-r--r--Eigen/src/Eigenvalues/RealSchur.h19
-rw-r--r--Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h26
-rw-r--r--Eigen/src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h23
-rw-r--r--Eigen/src/Eigenvalues/Tridiagonalization.h9
-rw-r--r--Eigen/src/Geometry/AngleAxis.h2
-rw-r--r--Eigen/src/Geometry/Hyperplane.h2
-rw-r--r--Eigen/src/Geometry/Quaternion.h62
-rwxr-xr-xEigen/src/Geometry/Scaling.h26
-rw-r--r--Eigen/src/Geometry/Transform.h2
-rw-r--r--Eigen/src/Geometry/Translation.h12
-rw-r--r--Eigen/src/Geometry/arch/Geometry_SSE.h72
-rw-r--r--Eigen/src/Householder/BlockHouseholder.h11
-rw-r--r--Eigen/src/Householder/Householder.h12
-rw-r--r--Eigen/src/Householder/HouseholderSequence.h129
-rw-r--r--Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h27
-rw-r--r--Eigen/src/IterativeLinearSolvers/ConjugateGradient.h5
-rw-r--r--Eigen/src/IterativeLinearSolvers/IncompleteLUT.h4
-rw-r--r--Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h2
-rw-r--r--Eigen/src/IterativeLinearSolvers/SolveWithGuess.h2
-rw-r--r--Eigen/src/Jacobi/Jacobi.h278
-rw-r--r--Eigen/src/KLUSupport/KLUSupport.h358
-rw-r--r--Eigen/src/LU/Determinant.h15
-rw-r--r--Eigen/src/LU/FullPivLU.h8
-rw-r--r--Eigen/src/LU/InverseImpl.h4
-rw-r--r--Eigen/src/LU/PartialPivLU.h4
-rw-r--r--Eigen/src/OrderingMethods/Eigen_Colamd.h8
-rw-r--r--Eigen/src/OrderingMethods/Ordering.h2
-rw-r--r--Eigen/src/PaStiXSupport/PaStiXSupport.h10
-rw-r--r--Eigen/src/PardisoSupport/PardisoSupport.h2
-rw-r--r--Eigen/src/QR/ColPivHouseholderQR.h21
-rw-r--r--Eigen/src/QR/CompleteOrthogonalDecomposition.h11
-rw-r--r--Eigen/src/QR/FullPivHouseholderQR.h9
-rw-r--r--Eigen/src/QR/HouseholderQR.h17
-rw-r--r--Eigen/src/SPQRSupport/SuiteSparseQRSupport.h2
-rw-r--r--Eigen/src/SVD/BDCSVD.h265
-rw-r--r--Eigen/src/SVD/JacobiSVD.h2
-rw-r--r--Eigen/src/SVD/JacobiSVD_LAPACKE.h5
-rw-r--r--Eigen/src/SVD/SVDBase.h1
-rw-r--r--Eigen/src/SVD/UpperBidiagonalization.h10
-rw-r--r--Eigen/src/SparseCholesky/SimplicialCholesky.h2
-rw-r--r--Eigen/src/SparseCholesky/SimplicialCholesky_impl.h8
-rw-r--r--Eigen/src/SparseCore/AmbiVector.h2
-rw-r--r--Eigen/src/SparseCore/ConservativeSparseSparseProduct.h67
-rw-r--r--Eigen/src/SparseCore/SparseAssign.h4
-rw-r--r--Eigen/src/SparseCore/SparseBlock.h40
-rw-r--r--Eigen/src/SparseCore/SparseDenseProduct.h38
-rw-r--r--Eigen/src/SparseCore/SparseMatrix.h10
-rw-r--r--Eigen/src/SparseCore/SparseMatrixBase.h17
-rw-r--r--Eigen/src/SparseCore/SparseProduct.h2
-rw-r--r--Eigen/src/SparseCore/SparseSelfAdjointView.h11
-rw-r--r--Eigen/src/SparseCore/SparseSparseProductWithPruning.h22
-rw-r--r--Eigen/src/SparseCore/SparseVector.h2
-rw-r--r--Eigen/src/SparseLU/SparseLU.h16
-rw-r--r--Eigen/src/SparseLU/SparseLU_Memory.h2
-rw-r--r--Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h4
-rw-r--r--Eigen/src/SparseLU/SparseLU_column_dfs.h4
-rw-r--r--Eigen/src/SparseLU/SparseLU_gemm_kernel.h2
-rw-r--r--Eigen/src/SparseLU/SparseLU_panel_bmod.h2
-rw-r--r--Eigen/src/SparseQR/SparseQR.h29
-rw-r--r--Eigen/src/StlSupport/StdDeque.h4
-rw-r--r--Eigen/src/StlSupport/StdList.h4
-rw-r--r--Eigen/src/StlSupport/StdVector.h4
-rw-r--r--Eigen/src/SuperLUSupport/SuperLUSupport.h6
-rw-r--r--Eigen/src/UmfPackSupport/UmfPackSupport.h208
-rwxr-xr-xEigen/src/misc/lapacke.h9
-rw-r--r--Eigen/src/plugins/ArrayCwiseUnaryOps.h10
-rw-r--r--Eigen/src/plugins/BlockMethods.h47
-rw-r--r--Eigen/src/plugins/IndexedViewMethods.h25
-rw-r--r--Eigen/src/plugins/ReshapedMethods.h4
-rw-r--r--README.md4
-rw-r--r--bench/analyze-blocking-sizes.cpp2
-rw-r--r--bench/bench_gemm.cpp2
-rw-r--r--bench/btl/README2
-rw-r--r--bench/btl/generic_bench/bench.hh2
-rw-r--r--bench/btl/generic_bench/utils/size_log.hh2
-rw-r--r--bench/btl/generic_bench/utils/xy_file.hh2
-rw-r--r--bench/btl/libs/ublas/ublas_interface.hh2
-rw-r--r--bench/eig33.cpp2
-rw-r--r--bench/spbench/CMakeLists.txt29
-rw-r--r--bench/spbench/spbenchsolver.cpp2
-rw-r--r--bench/tensors/README10
-rw-r--r--bench/tensors/tensor_benchmarks.h111
-rw-r--r--bench/tensors/tensor_benchmarks_sycl.cc73
-rw-r--r--bench/tensors/tensor_benchmarks_sycl_include_headers.cc2
-rw-r--r--blas/common.h8
-rw-r--r--blas/f2c/ctbmv.c2
-rw-r--r--blas/f2c/dtbmv.c2
-rw-r--r--blas/f2c/stbmv.c2
-rw-r--r--blas/f2c/ztbmv.c2
-rw-r--r--blas/level1_impl.h2
-rw-r--r--blas/testing/cblat1.f2
-rw-r--r--blas/testing/dblat1.f2
-rw-r--r--blas/testing/sblat1.f2
-rw-r--r--blas/testing/zblat1.f2
-rw-r--r--cmake/Eigen3Config.cmake.in4
-rw-r--r--cmake/EigenConfigureTesting.cmake12
-rw-r--r--cmake/EigenTesting.cmake117
-rw-r--r--cmake/FindBLAS.cmake1499
-rw-r--r--cmake/FindBLASEXT.cmake380
-rw-r--r--cmake/FindComputeCpp.cmake53
-rw-r--r--cmake/FindEigen3.cmake12
-rw-r--r--cmake/FindHWLOC.cmake331
-rw-r--r--cmake/FindKLU.cmake48
-rw-r--r--cmake/FindMetis.cmake297
-rw-r--r--cmake/FindPTSCOTCH.cmake423
-rw-r--r--cmake/FindPastix.cmake713
-rw-r--r--cmake/FindScotch.cmake379
-rw-r--r--cmake/FindTriSYCL.cmake152
-rw-r--r--cmake/language_support.cmake2
-rw-r--r--debug/msvc/eigen_autoexp_part.dat2
-rw-r--r--doc/AsciiQuickReference.txt2
-rw-r--r--doc/Doxyfile.in2
-rw-r--r--doc/FunctionsTakingEigenTypes.dox6
-rw-r--r--doc/LeastSquares.dox2
-rw-r--r--doc/Pitfalls.dox6
-rw-r--r--doc/PreprocessorDirectives.dox10
-rw-r--r--doc/QuickReference.dox12
-rw-r--r--doc/QuickStartGuide.dox2
-rw-r--r--doc/SparseLinearSystems.dox3
-rw-r--r--doc/SparseQuickReference.dox2
-rw-r--r--doc/TemplateKeyword.dox2
-rw-r--r--doc/TopicLazyEvaluation.dox2
-rw-r--r--doc/TopicLinearAlgebraDecompositions.dox16
-rw-r--r--doc/TopicMultithreading.dox2
-rw-r--r--doc/TutorialGeometry.dox2
-rw-r--r--doc/TutorialLinearAlgebra.dox28
-rw-r--r--doc/TutorialMapClass.dox4
-rw-r--r--doc/TutorialSparse.dox4
-rw-r--r--doc/UnalignedArrayAssert.dox4
-rw-r--r--doc/UsingIntelMKL.dox6
-rw-r--r--doc/UsingNVCC.dox12
-rw-r--r--doc/eigen_navtree_hacks.js4
-rw-r--r--doc/eigendoxy.css7
-rw-r--r--doc/eigendoxy_footer.html.in4
-rw-r--r--doc/eigendoxy_header.html.in18
-rw-r--r--doc/examples/Cwise_lgamma.cpp2
-rw-r--r--doc/examples/TutorialLinAlgSVDSolve.cpp2
-rw-r--r--doc/examples/Tutorial_simple_example_dynamic_size.cpp2
-rw-r--r--doc/examples/matrixfree_cg.cpp1
-rw-r--r--doc/examples/nullary_indexing.cpp8
-rw-r--r--doc/snippets/DirectionWise_hnormalized.cpp3
-rw-r--r--doc/snippets/MatrixBase_cwiseEqual.cpp2
-rw-r--r--doc/snippets/MatrixBase_cwiseNotEqual.cpp2
-rw-r--r--doc/snippets/MatrixBase_reshaped_all.cpp1
-rw-r--r--doc/snippets/Matrix_Map_stride.cpp7
-rw-r--r--doc/snippets/VectorwiseOp_homogeneous.cpp3
-rw-r--r--doc/special_examples/Tutorial_sparse_example.cpp8
-rw-r--r--lapack/CMakeLists.txt6
-rwxr-xr-xscripts/buildtests.in2
-rw-r--r--scripts/eigen_gen_split_test_help.cmake11
-rwxr-xr-xscripts/eigen_monitor_perf.sh6
-rw-r--r--test/AnnoyingScalar.h154
-rw-r--r--test/CMakeLists.txt118
-rw-r--r--test/adjoint.cpp59
-rw-r--r--test/array_cwise.cpp (renamed from test/array.cpp)13
-rw-r--r--test/array_for_matrix.cpp30
-rw-r--r--test/array_of_string.cpp2
-rw-r--r--test/array_replicate.cpp3
-rw-r--r--test/array_reverse.cpp19
-rw-r--r--test/bandmatrix.cpp2
-rw-r--r--test/basicstuff.cpp17
-rw-r--r--test/bdcsvd.cpp5
-rw-r--r--test/bicgstab.cpp2
-rw-r--r--test/block.cpp25
-rw-r--r--test/boostmultiprec.cpp10
-rw-r--r--test/cholesky.cpp22
-rw-r--r--test/cholmod_support.cpp2
-rw-r--r--test/commainitializer.cpp2
-rw-r--r--test/conjugate_gradient.cpp2
-rw-r--r--test/conservative_resize.cpp34
-rw-r--r--test/constructor.cpp2
-rw-r--r--test/corners.cpp3
-rw-r--r--test/ctorleak.cpp2
-rw-r--r--test/cuda_common.h101
-rw-r--r--test/denseLM.cpp2
-rw-r--r--test/dense_storage.cpp2
-rw-r--r--test/determinant.cpp3
-rw-r--r--test/diagonal.cpp12
-rw-r--r--test/diagonalmatrices.cpp41
-rw-r--r--test/dontalign.cpp3
-rw-r--r--test/dynalloc.cpp8
-rw-r--r--test/eigen2support.cpp3
-rw-r--r--test/eigensolver_complex.cpp5
-rw-r--r--test/eigensolver_generalized_real.cpp10
-rw-r--r--test/eigensolver_generic.cpp57
-rw-r--r--test/eigensolver_selfadjoint.cpp3
-rw-r--r--test/evaluators.cpp2
-rw-r--r--test/exceptions.cpp90
-rw-r--r--test/fastmath.cpp2
-rw-r--r--test/first_aligned.cpp2
-rw-r--r--test/geo_alignedbox.cpp4
-rw-r--r--test/geo_eulerangles.cpp2
-rw-r--r--test/geo_homogeneous.cpp2
-rw-r--r--test/geo_hyperplane.cpp3
-rw-r--r--test/geo_orthomethods.cpp2
-rw-r--r--test/geo_parametrizedline.cpp3
-rw-r--r--test/geo_quaternion.cpp50
-rwxr-xr-xtest/geo_transformations.cpp2
-rw-r--r--test/gpu_basic.cu (renamed from test/cuda_basic.cu)89
-rw-r--r--test/gpu_common.h157
-rw-r--r--test/half_float.cpp96
-rw-r--r--test/hessenberg.cpp2
-rw-r--r--test/householder.cpp32
-rw-r--r--test/incomplete_cholesky.cpp20
-rw-r--r--test/indexed_view.cpp63
-rw-r--r--test/inplace_decomposition.cpp2
-rw-r--r--test/integer_types.cpp24
-rw-r--r--test/inverse.cpp82
-rw-r--r--test/is_same_dense.cpp10
-rw-r--r--test/jacobi.cpp3
-rw-r--r--test/jacobisvd.cpp3
-rw-r--r--test/klu_support.cpp32
-rw-r--r--test/linearstructure.cpp32
-rw-r--r--test/lscg.cpp10
-rw-r--r--test/lu.cpp4
-rw-r--r--test/main.h174
-rw-r--r--test/mapped_matrix.cpp12
-rw-r--r--test/mapstaticmethods.cpp8
-rw-r--r--test/mapstride.cpp61
-rw-r--r--test/meta.cpp63
-rw-r--r--test/metis_support.cpp2
-rw-r--r--test/miscmatrices.cpp3
-rw-r--r--test/mixingtypes.cpp60
-rw-r--r--test/nesting_ops.cpp2
-rw-r--r--test/nomalloc.cpp5
-rw-r--r--test/nullary.cpp94
-rw-r--r--test/num_dimensions.cpp90
-rw-r--r--test/numext.cpp53
-rw-r--r--test/packetmath.cpp127
-rw-r--r--test/pardiso_support.cpp2
-rw-r--r--test/pastix_support.cpp2
-rw-r--r--test/permutationmatrices.cpp15
-rw-r--r--test/prec_inverse_4x4.cpp2
-rw-r--r--test/product.h13
-rw-r--r--test/product_extra.cpp3
-rw-r--r--test/product_large.cpp32
-rw-r--r--test/product_mmtr.cpp13
-rw-r--r--test/product_notemporary.cpp14
-rw-r--r--test/product_selfadjoint.cpp3
-rw-r--r--test/product_small.cpp60
-rw-r--r--test/product_symm.cpp3
-rw-r--r--test/product_syrk.cpp3
-rw-r--r--test/product_trmm.cpp30
-rw-r--r--test/product_trmv.cpp3
-rw-r--r--test/product_trsolve.cpp2
-rw-r--r--test/qr.cpp6
-rw-r--r--test/qr_colpivoting.cpp6
-rw-r--r--test/qr_fullpivoting.cpp18
-rw-r--r--test/qtvector.cpp4
-rw-r--r--test/rand.cpp2
-rw-r--r--test/real_qz.cpp3
-rw-r--r--test/redux.cpp6
-rw-r--r--test/ref.cpp18
-rw-r--r--test/reshape.cpp1
-rw-r--r--test/resize.cpp2
-rw-r--r--test/rvalue_types.cpp2
-rw-r--r--test/schur_complex.cpp2
-rw-r--r--test/schur_real.cpp4
-rw-r--r--test/selfadjoint.cpp7
-rw-r--r--test/simplicial_cholesky.cpp6
-rw-r--r--test/sizeof.cpp4
-rw-r--r--test/sizeoverflow.cpp2
-rw-r--r--test/smallvectors.cpp2
-rw-r--r--test/sparseLM.cpp2
-rw-r--r--test/sparse_basic.cpp33
-rw-r--r--test/sparse_block.cpp11
-rw-r--r--test/sparse_permutations.cpp2
-rw-r--r--test/sparse_product.cpp96
-rw-r--r--test/sparse_ref.cpp2
-rw-r--r--test/sparse_solver.h2
-rw-r--r--test/sparse_solvers.cpp2
-rw-r--r--test/sparse_vector.cpp2
-rw-r--r--test/sparselu.cpp2
-rw-r--r--test/sparseqr.cpp24
-rw-r--r--test/special_numbers.cpp2
-rw-r--r--test/split_test_helper.h5994
-rw-r--r--test/spqr_support.cpp2
-rw-r--r--test/stable_norm.cpp51
-rw-r--r--test/stddeque.cpp4
-rw-r--r--test/stddeque_overload.cpp6
-rw-r--r--test/stdlist.cpp4
-rw-r--r--test/stdlist_overload.cpp6
-rw-r--r--test/stdvector.cpp6
-rw-r--r--test/stdvector_overload.cpp6
-rw-r--r--test/superlu_support.cpp2
-rw-r--r--test/svd_common.h5
-rw-r--r--test/svd_fill.h1
-rw-r--r--test/swap.cpp6
-rw-r--r--test/symbolic_index.cpp36
-rw-r--r--test/triangular.cpp9
-rw-r--r--test/umeyama.cpp2
-rw-r--r--test/umfpack_support.cpp14
-rw-r--r--test/unalignedassert.cpp2
-rw-r--r--test/unalignedcount.cpp2
-rw-r--r--test/upperbidiagonalization.cpp6
-rw-r--r--test/vectorization_logic.cpp16
-rw-r--r--test/vectorwiseop.cpp4
-rw-r--r--test/visitor.cpp4
-rw-r--r--test/zerosized.cpp2
-rw-r--r--unsupported/Eigen/AdolcForward2
-rw-r--r--unsupported/Eigen/AlignedVector36
-rw-r--r--unsupported/Eigen/ArpackSupport10
-rw-r--r--unsupported/Eigen/AutoDiff6
-rw-r--r--unsupported/Eigen/BVH6
-rw-r--r--unsupported/Eigen/CXX11/Tensor32
-rw-r--r--unsupported/Eigen/CXX11/TensorSymmetry6
-rw-r--r--unsupported/Eigen/CXX11/ThreadPool30
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/README.md91
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/Tensor.h31
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorArgMax.h34
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorArgMaxSycl.h148
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h51
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorBase.h98
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h1077
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h551
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h163
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorConcatenation.h12
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h232
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h24
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorContractionCuda.h1393
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorContractionGpu.h1412
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h75
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorContractionSycl.h50
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h501
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorConversion.h7
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h156
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorConvolutionSycl.h51
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h8
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorCustomOp.h54
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorDeviceCuda.h337
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorDeviceDefault.h29
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorDeviceGpu.h366
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h445
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h105
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h139
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h5
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h191
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h376
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h18
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorFFT.h44
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h5
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h23
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h44
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h132
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorGenerator.h23
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h88
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaUndefines.h40
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h295
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h189
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorInflation.h11
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorIntDiv.h16
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h7
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorMacros.h2
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorMap.h2
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorMeta.h4
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h373
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h7
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorPatch.h19
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorRandom.h72
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h598
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h750
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h825
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h24
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorRef.h8
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorReverse.h11
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorScan.h13
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorShuffling.h260
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h8
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorStriding.h11
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSycl.h30
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclConvertToDeviceExpression.h66
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h175
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h85
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h198
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclFunctors.h45
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclLeafCount.h75
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclPlaceHolderExpr.h96
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclRun.h3
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclTuple.h4
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorTrace.h290
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h8
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorUInt128.h1
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorVolumePatch.h76
-rw-r--r--unsupported/Eigen/CXX11/src/TensorSymmetry/util/TemplateGroupTheory.h2
-rw-r--r--unsupported/Eigen/CXX11/src/ThreadPool/Barrier.h64
-rw-r--r--unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h13
-rw-r--r--unsupported/Eigen/CXX11/src/ThreadPool/NonBlockingThreadPool.h351
-rw-r--r--unsupported/Eigen/CXX11/src/ThreadPool/RunQueue.h10
-rw-r--r--unsupported/Eigen/CXX11/src/ThreadPool/SimpleThreadPool.h162
-rw-r--r--unsupported/Eigen/CXX11/src/ThreadPool/ThreadLocal.h45
-rw-r--r--unsupported/Eigen/CXX11/src/ThreadPool/ThreadPoolInterface.h9
-rw-r--r--unsupported/Eigen/CXX11/src/util/CXX11Meta.h74
-rw-r--r--unsupported/Eigen/CXX11/src/util/EmulateArray.h22
-rw-r--r--unsupported/Eigen/CXX11/src/util/EmulateCXX11Meta.h2
-rw-r--r--unsupported/Eigen/CXX11/src/util/MaxSizeVector.h51
-rw-r--r--unsupported/Eigen/EulerAngles8
-rw-r--r--unsupported/Eigen/FFT12
-rw-r--r--unsupported/Eigen/IterativeSolvers10
-rw-r--r--unsupported/Eigen/LevenbergMarquardt16
-rw-r--r--unsupported/Eigen/MPRealSupport6
-rw-r--r--unsupported/Eigen/MatrixFunctions10
-rw-r--r--unsupported/Eigen/MoreVectorization2
-rw-r--r--unsupported/Eigen/NonLinearOptimization44
-rw-r--r--unsupported/Eigen/NumericalDiff2
-rw-r--r--unsupported/Eigen/OpenGLSupport6
-rw-r--r--unsupported/Eigen/Polynomials8
-rw-r--r--unsupported/Eigen/Skyline6
-rw-r--r--unsupported/Eigen/SpecialFunctions8
-rw-r--r--unsupported/Eigen/Splines4
-rwxr-xr-xunsupported/Eigen/src/AutoDiff/AutoDiffScalar.h19
-rw-r--r--unsupported/Eigen/src/BVH/KdBVH.h3
-rw-r--r--unsupported/Eigen/src/Eigenvalues/ArpackSelfAdjointEigenSolver.h4
-rw-r--r--unsupported/Eigen/src/EulerAngles/EulerAngles.h2
-rw-r--r--unsupported/Eigen/src/EulerAngles/EulerSystem.h7
-rw-r--r--unsupported/Eigen/src/FFT/ei_kissfft_impl.h4
-rw-r--r--unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h4
-rw-r--r--unsupported/Eigen/src/IterativeSolvers/DGMRES.h69
-rw-r--r--unsupported/Eigen/src/IterativeSolvers/MINRES.h15
-rw-r--r--unsupported/Eigen/src/IterativeSolvers/Scaling.h6
-rw-r--r--unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h2
-rw-r--r--unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h6
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h51
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h36
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h6
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixPower.h7
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h16
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/qrsolv.h2
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/r1updt.h2
-rw-r--r--unsupported/Eigen/src/Polynomials/Companion.h10
-rw-r--r--unsupported/Eigen/src/Skyline/SkylineInplaceLU.h2
-rw-r--r--unsupported/Eigen/src/Skyline/SkylineMatrix.h16
-rw-r--r--unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h8
-rw-r--r--unsupported/Eigen/src/SparseExtra/MarketIO.h3
-rw-r--r--unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsArrayAPI.h98
-rw-r--r--unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsFunctors.h132
-rw-r--r--unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsHalf.h16
-rw-r--r--unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsImpl.h1013
-rw-r--r--unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsPacketMath.h27
-rw-r--r--unsupported/Eigen/src/SpecialFunctions/arch/GPU/GpuSpecialFunctions.h (renamed from unsupported/Eigen/src/SpecialFunctions/arch/CUDA/CudaSpecialFunctions.h)69
-rw-r--r--unsupported/Eigen/src/Splines/Spline.h7
-rw-r--r--unsupported/Eigen/src/Splines/SplineFitting.h6
-rw-r--r--unsupported/Eigen/src/Splines/SplineFwd.h2
-rw-r--r--unsupported/README.txt2
-rw-r--r--unsupported/bench/bench_svd.cpp2
-rw-r--r--unsupported/doc/examples/FFT.cpp6
-rw-r--r--unsupported/test/BVH.cpp2
-rw-r--r--unsupported/test/CMakeLists.txt166
-rw-r--r--unsupported/test/EulerAngles.cpp14
-rw-r--r--unsupported/test/FFTW.cpp2
-rw-r--r--unsupported/test/NonLinearOptimization.cpp4
-rw-r--r--unsupported/test/NumericalDiff.cpp4
-rw-r--r--unsupported/test/alignedvector3.cpp2
-rw-r--r--unsupported/test/autodiff.cpp26
-rw-r--r--unsupported/test/autodiff_scalar.cpp20
-rw-r--r--unsupported/test/cxx11_eventcount.cpp2
-rw-r--r--unsupported/test/cxx11_maxsizevector.cpp77
-rw-r--r--unsupported/test/cxx11_meta.cpp2
-rw-r--r--unsupported/test/cxx11_non_blocking_thread_pool.cpp66
-rw-r--r--unsupported/test/cxx11_runqueue.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_argmax.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_argmax_gpu.cu (renamed from unsupported/test/cxx11_tensor_argmax_cuda.cu)93
-rw-r--r--unsupported/test/cxx11_tensor_argmax_sycl.cpp245
-rw-r--r--unsupported/test/cxx11_tensor_assign.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_block_access.cpp1110
-rw-r--r--unsupported/test/cxx11_tensor_broadcast_sycl.cpp4
-rw-r--r--unsupported/test/cxx11_tensor_broadcasting.cpp123
-rw-r--r--unsupported/test/cxx11_tensor_builtins_sycl.cpp4
-rw-r--r--unsupported/test/cxx11_tensor_cast_float16_gpu.cu (renamed from unsupported/test/cxx11_tensor_cast_float16_cuda.cu)13
-rw-r--r--unsupported/test/cxx11_tensor_casts.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_chipping.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_chipping_sycl.cpp4
-rw-r--r--unsupported/test/cxx11_tensor_comparisons.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_complex_cwise_ops_gpu.cu (renamed from unsupported/test/cxx11_tensor_complex_cwise_ops_cuda.cu)19
-rw-r--r--unsupported/test/cxx11_tensor_complex_gpu.cu (renamed from unsupported/test/cxx11_tensor_complex_cuda.cu)49
-rw-r--r--unsupported/test/cxx11_tensor_concatenation.cpp10
-rw-r--r--unsupported/test/cxx11_tensor_concatenation_sycl.cpp4
-rw-r--r--unsupported/test/cxx11_tensor_const.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_contract_gpu.cu (renamed from unsupported/test/cxx11_tensor_contract_cuda.cu)96
-rw-r--r--unsupported/test/cxx11_tensor_contract_sycl.cpp4
-rw-r--r--unsupported/test/cxx11_tensor_contraction.cpp53
-rw-r--r--unsupported/test/cxx11_tensor_convolution.cpp5
-rw-r--r--unsupported/test/cxx11_tensor_convolution_sycl.cpp4
-rw-r--r--unsupported/test/cxx11_tensor_custom_index.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_custom_op.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_custom_op_sycl.cpp165
-rw-r--r--unsupported/test/cxx11_tensor_device.cu66
-rw-r--r--unsupported/test/cxx11_tensor_device_sycl.cpp4
-rw-r--r--unsupported/test/cxx11_tensor_dimension.cpp21
-rw-r--r--unsupported/test/cxx11_tensor_empty.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_executor.cpp535
-rw-r--r--unsupported/test/cxx11_tensor_expr.cpp23
-rw-r--r--unsupported/test/cxx11_tensor_fft.cpp33
-rw-r--r--unsupported/test/cxx11_tensor_fixed_size.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_forced_eval.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_forced_eval_sycl.cpp6
-rw-r--r--unsupported/test/cxx11_tensor_generator.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_generator_sycl.cpp147
-rw-r--r--unsupported/test/cxx11_tensor_gpu.cu (renamed from unsupported/test/cxx11_tensor_cuda.cu)884
-rw-r--r--unsupported/test/cxx11_tensor_ifft.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_image_patch.cpp54
-rw-r--r--unsupported/test/cxx11_tensor_image_patch_sycl.cpp1092
-rw-r--r--unsupported/test/cxx11_tensor_index_list.cpp35
-rw-r--r--unsupported/test/cxx11_tensor_inflation.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_inflation_sycl.cpp136
-rw-r--r--unsupported/test/cxx11_tensor_intdiv.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_io.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_layout_swap.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_layout_swap_sycl.cpp126
-rw-r--r--unsupported/test/cxx11_tensor_lvalue.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_map.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_math.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_mixed_indices.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_morphing.cpp25
-rw-r--r--unsupported/test/cxx11_tensor_morphing_sycl.cpp4
-rw-r--r--unsupported/test/cxx11_tensor_move.cpp81
-rw-r--r--unsupported/test/cxx11_tensor_notification.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_of_complex.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_of_const_values.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_of_float16_gpu.cu (renamed from unsupported/test/cxx11_tensor_of_float16_cuda.cu)86
-rw-r--r--unsupported/test/cxx11_tensor_of_strings.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_padding.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_padding_sycl.cpp4
-rw-r--r--unsupported/test/cxx11_tensor_patch.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_patch_sycl.cpp249
-rw-r--r--unsupported/test/cxx11_tensor_random.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_random_gpu.cu (renamed from unsupported/test/cxx11_tensor_random_cuda.cu)34
-rw-r--r--unsupported/test/cxx11_tensor_reduction.cpp71
-rw-r--r--unsupported/test/cxx11_tensor_reduction_gpu.cu (renamed from unsupported/test/cxx11_tensor_reduction_cuda.cu)13
-rw-r--r--unsupported/test/cxx11_tensor_reduction_sycl.cpp4
-rw-r--r--unsupported/test/cxx11_tensor_ref.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_reverse.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_reverse_sycl.cpp4
-rw-r--r--unsupported/test/cxx11_tensor_roundings.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_scan.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_scan_gpu.cu (renamed from unsupported/test/cxx11_tensor_scan_cuda.cu)29
-rw-r--r--unsupported/test/cxx11_tensor_shuffling.cpp12
-rw-r--r--unsupported/test/cxx11_tensor_shuffling_sycl.cpp4
-rw-r--r--unsupported/test/cxx11_tensor_simple.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_striding.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_striding_sycl.cpp4
-rw-r--r--unsupported/test/cxx11_tensor_sugar.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_sycl.cpp4
-rw-r--r--unsupported/test/cxx11_tensor_symmetry.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_thread_pool.cpp103
-rw-r--r--unsupported/test/cxx11_tensor_trace.cpp171
-rw-r--r--unsupported/test/cxx11_tensor_uint128.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_volume_patch.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_volume_patch_sycl.cpp222
-rw-r--r--unsupported/test/dgmres.cpp4
-rw-r--r--unsupported/test/forward_adolc.cpp6
-rw-r--r--unsupported/test/gmres.cpp2
-rw-r--r--unsupported/test/kronecker_product.cpp28
-rw-r--r--unsupported/test/levenberg_marquardt.cpp2
-rw-r--r--unsupported/test/matrix_exponential.cpp2
-rw-r--r--unsupported/test/matrix_function.cpp14
-rw-r--r--unsupported/test/matrix_power.cpp2
-rw-r--r--unsupported/test/matrix_square_root.cpp2
-rw-r--r--unsupported/test/minres.cpp2
-rw-r--r--unsupported/test/mpreal_support.cpp2
-rw-r--r--unsupported/test/openglsupport.cpp6
-rw-r--r--unsupported/test/polynomialsolver.cpp3
-rw-r--r--unsupported/test/polynomialutils.cpp2
-rw-r--r--unsupported/test/sparse_extra.cpp6
-rw-r--r--unsupported/test/special_functions.cpp138
-rw-r--r--unsupported/test/splines.cpp2
717 files changed, 39430 insertions, 10456 deletions
diff --git a/.hgignore b/.hgignore
index dcd9f4431..ebbf746bf 100644
--- a/.hgignore
+++ b/.hgignore
@@ -13,7 +13,7 @@ core
core.*
*.bak
*~
-build*
+*build*
*.moc.*
*.moc
ui_*
diff --git a/CMakeLists.txt b/CMakeLists.txt
index fe4227cbb..6d74709a3 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -8,6 +8,7 @@ if(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})
message(FATAL_ERROR "In-source builds not allowed. Please make a new directory (called a build directory) and run CMake from there. You may need to remove CMakeCache.txt. ")
endif()
+
# Alias Eigen_*_DIR to Eigen3_*_DIR:
set(Eigen_SOURCE_DIR ${Eigen3_SOURCE_DIR})
@@ -41,10 +42,13 @@ string(REGEX MATCH "define[ \t]+EIGEN_MINOR_VERSION[ \t]+([0-9]+)" _eigen_minor_
set(EIGEN_MINOR_VERSION "${CMAKE_MATCH_1}")
set(EIGEN_VERSION_NUMBER ${EIGEN_WORLD_VERSION}.${EIGEN_MAJOR_VERSION}.${EIGEN_MINOR_VERSION})
-# if the mercurial program is absent, this will leave the EIGEN_HG_CHANGESET string empty,
-# but won't stop CMake.
-execute_process(COMMAND hg tip -R ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE EIGEN_HGTIP_OUTPUT)
-execute_process(COMMAND hg branch -R ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE EIGEN_BRANCH_OUTPUT)
+# if we are not in a mercurial clone
+if(IS_DIRECTORY ${CMAKE_SOURCE_DIR}/.hg)
+ # if the mercurial program is absent or this will leave the EIGEN_HG_CHANGESET string empty,
+ # but won't stop CMake.
+ execute_process(COMMAND hg tip -R ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE EIGEN_HGTIP_OUTPUT)
+ execute_process(COMMAND hg branch -R ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE EIGEN_BRANCH_OUTPUT)
+endif()
# if this is the default (aka development) branch, extract the mercurial changeset number from the hg tip output...
if(EIGEN_BRANCH_OUTPUT MATCHES "default")
@@ -104,7 +108,7 @@ if(NOT WIN32 OR NOT CMAKE_HOST_SYSTEM_NAME MATCHES Windows)
option(EIGEN_BUILD_PKGCONFIG "Build pkg-config .pc file for Eigen" ON)
endif()
-set(CMAKE_INCLUDE_CURRENT_DIR ON)
+set(CMAKE_INCLUDE_CURRENT_DIR OFF)
option(EIGEN_SPLIT_LARGE_TESTS "Split large tests into smaller executables" ON)
@@ -153,11 +157,7 @@ if(NOT MSVC)
ei_add_cxx_compiler_flag("-Wdouble-promotion")
# ei_add_cxx_compiler_flag("-Wconversion")
- # -Wshadow is insanely too strict with gcc, hopefully it will become usable with gcc 6
- # if(NOT CMAKE_COMPILER_IS_GNUCXX OR (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER "5.0.0"))
- if(NOT CMAKE_COMPILER_IS_GNUCXX)
- ei_add_cxx_compiler_flag("-Wshadow")
- endif()
+ ei_add_cxx_compiler_flag("-Wshadow")
ei_add_cxx_compiler_flag("-Wno-psabi")
ei_add_cxx_compiler_flag("-Wno-variadic-macros")
@@ -232,7 +232,10 @@ if(NOT MSVC)
option(EIGEN_TEST_AVX512 "Enable/Disable AVX512 in tests/examples" OFF)
if(EIGEN_TEST_AVX512)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512f -fabi-version=6 -DEIGEN_ENABLE_AVX512")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512f -DEIGEN_ENABLE_AVX512")
+ if (NOT "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fabi-version=6")
+ endif()
message(STATUS "Enabling AVX512 in tests/examples")
endif()
@@ -254,6 +257,12 @@ if(NOT MSVC)
message(STATUS "Enabling VSX in tests/examples")
endif()
+ option(EIGEN_TEST_MSA "Enable/Disable MSA in tests/examples" OFF)
+ if(EIGEN_TEST_MSA)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mmsa")
+ message(STATUS "Enabling MSA in tests/examples")
+ endif()
+
option(EIGEN_TEST_NEON "Enable/Disable Neon in tests/examples" OFF)
if(EIGEN_TEST_NEON)
if(EIGEN_TEST_FMA)
@@ -271,12 +280,18 @@ if(NOT MSVC)
message(STATUS "Enabling NEON in tests/examples")
endif()
- option(EIGEN_TEST_ZVECTOR "Enable/Disable S390X(zEC13) ZVECTOR in tests/examples" OFF)
- if(EIGEN_TEST_ZVECTOR)
+ option(EIGEN_TEST_Z13 "Enable/Disable S390X(zEC13) ZVECTOR in tests/examples" OFF)
+ if(EIGEN_TEST_Z13)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=z13 -mzvector")
message(STATUS "Enabling S390X(zEC13) ZVECTOR in tests/examples")
endif()
+ option(EIGEN_TEST_Z14 "Enable/Disable S390X(zEC14) ZVECTOR in tests/examples" OFF)
+ if(EIGEN_TEST_Z14)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=z14 -mzvector")
+ message(STATUS "Enabling S390X(zEC13) ZVECTOR in tests/examples")
+ endif()
+
check_cxx_compiler_flag("-fopenmp" COMPILER_SUPPORT_OPENMP)
if(COMPILER_SUPPORT_OPENMP)
option(EIGEN_TEST_OPENMP "Enable/Disable OpenMP in tests/examples" OFF)
@@ -363,7 +378,7 @@ option(EIGEN_TEST_CXX11 "Enable testing with C++11 and C++11 features (e.g. Tens
set(EIGEN_CUDA_COMPUTE_ARCH 30 CACHE STRING "The CUDA compute architecture level to target when compiling CUDA code")
-include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
+include_directories(${CMAKE_CURRENT_SOURCE_DIR})
# Backward compatibility support for EIGEN_INCLUDE_INSTALL_DIR
if(EIGEN_INCLUDE_INSTALL_DIR)
@@ -437,10 +452,17 @@ endif()
# add SYCL
option(EIGEN_TEST_SYCL "Add Sycl support." OFF)
+option(EIGEN_SYCL_TRISYCL "Use the triSYCL Sycl implementation (ComputeCPP by default)." OFF)
if(EIGEN_TEST_SYCL)
set (CMAKE_MODULE_PATH "${CMAKE_ROOT}/Modules" "cmake/Modules/" "${CMAKE_MODULE_PATH}")
- include(FindComputeCpp)
-endif()
+ if(EIGEN_SYCL_TRISYCL)
+ message(STATUS "Using triSYCL")
+ include(FindTriSYCL)
+ else(EIGEN_SYCL_TRISYCL)
+ message(STATUS "Using ComputeCPP SYCL")
+ include(FindComputeCpp)
+ endif(EIGEN_SYCL_TRISYCL)
+endif(EIGEN_TEST_SYCL)
add_subdirectory(unsupported)
@@ -516,6 +538,7 @@ if (NOT CMAKE_VERSION VERSION_LESS 3.0)
# Imported target support
add_library (eigen INTERFACE)
+ add_library (Eigen3::Eigen ALIAS eigen)
target_compile_definitions (eigen INTERFACE ${EIGEN_DEFINITIONS})
target_include_directories (eigen INTERFACE
diff --git a/CTestConfig.cmake b/CTestConfig.cmake
index 4c0027824..8b4cd798e 100644
--- a/CTestConfig.cmake
+++ b/CTestConfig.cmake
@@ -11,7 +11,7 @@ set(CTEST_DROP_METHOD "http")
set(CTEST_DROP_SITE "manao.inria.fr")
set(CTEST_DROP_LOCATION "/CDash/submit.php?project=Eigen")
set(CTEST_DROP_SITE_CDASH TRUE)
-set(CTEST_PROJECT_SUBPROJECTS
-Official
-Unsupported
-)
+#set(CTEST_PROJECT_SUBPROJECTS
+#Official
+#Unsupported
+#)
diff --git a/CTestCustom.cmake.in b/CTestCustom.cmake.in
index 9fed9d327..89e487f05 100644
--- a/CTestCustom.cmake.in
+++ b/CTestCustom.cmake.in
@@ -1,3 +1,4 @@
set(CTEST_CUSTOM_MAXIMUM_NUMBER_OF_WARNINGS "2000")
set(CTEST_CUSTOM_MAXIMUM_NUMBER_OF_ERRORS "2000")
+list(APPEND CTEST_CUSTOM_ERROR_EXCEPTION @EIGEN_CTEST_ERROR_EXCEPTION@)
diff --git a/Eigen/Cholesky b/Eigen/Cholesky
index 369d1f5ec..1332b540d 100644
--- a/Eigen/Cholesky
+++ b/Eigen/Cholesky
@@ -9,6 +9,7 @@
#define EIGEN_CHOLESKY_MODULE_H
#include "Core"
+#include "Jacobi"
#include "src/Core/util/DisableStupidWarnings.h"
@@ -31,7 +32,11 @@
#include "src/Cholesky/LLT.h"
#include "src/Cholesky/LDLT.h"
#ifdef EIGEN_USE_LAPACKE
+#ifdef EIGEN_USE_MKL
+#include "mkl_lapacke.h"
+#else
#include "src/misc/lapacke.h"
+#endif
#include "src/Cholesky/LLT_LAPACKE.h"
#endif
diff --git a/Eigen/Core b/Eigen/Core
index 884546a2b..7347a2480 100644
--- a/Eigen/Core
+++ b/Eigen/Core
@@ -14,61 +14,26 @@
// first thing Eigen does: stop the compiler from committing suicide
#include "src/Core/util/DisableStupidWarnings.h"
-// Handle NVCC/CUDA/SYCL
-#if defined(__CUDACC__) || defined(__SYCL_DEVICE_ONLY__)
- // Do not try asserts on CUDA and SYCL!
- #ifndef EIGEN_NO_DEBUG
- #define EIGEN_NO_DEBUG
- #endif
-
- #ifdef EIGEN_INTERNAL_DEBUGGING
- #undef EIGEN_INTERNAL_DEBUGGING
- #endif
-
- #ifdef EIGEN_EXCEPTIONS
- #undef EIGEN_EXCEPTIONS
- #endif
-
- // All functions callable from CUDA code must be qualified with __device__
- #ifdef __CUDACC__
- // Do not try to vectorize on CUDA and SYCL!
- #ifndef EIGEN_DONT_VECTORIZE
- #define EIGEN_DONT_VECTORIZE
- #endif
-
- #define EIGEN_DEVICE_FUNC __host__ __device__
- // We need math_functions.hpp to ensure that that EIGEN_USING_STD_MATH macro
- // works properly on the device side
- #include <math_functions.hpp>
- #else
- #define EIGEN_DEVICE_FUNC
- #endif
-#else
- #define EIGEN_DEVICE_FUNC
-#endif
+// then include this file where all our macros are defined. It's really important to do it first because
+// it's where we do all the compiler/OS/arch detections and define most defaults.
+#include "src/Core/util/Macros.h"
-// When compiling CUDA device code with NVCC, pull in math functions from the
-// global namespace. In host mode, and when device doee with clang, use the
-// std versions.
-#if defined(__CUDA_ARCH__) && defined(__NVCC__)
- #define EIGEN_USING_STD_MATH(FUNC) using ::FUNC;
-#else
- #define EIGEN_USING_STD_MATH(FUNC) using std::FUNC;
-#endif
+// This detects SSE/AVX/NEON/etc. and configure alignment settings
+#include "src/Core/util/ConfigureVectorization.h"
-#if (defined(_CPPUNWIND) || defined(__EXCEPTIONS)) && !defined(__CUDA_ARCH__) && !defined(EIGEN_EXCEPTIONS) && !defined(EIGEN_USE_SYCL)
- #define EIGEN_EXCEPTIONS
+// We need cuda_runtime.h/hip_runtime.h to ensure that
+// the EIGEN_USING_STD_MATH macro works properly on the device side
+#if defined(EIGEN_CUDACC)
+ #include <cuda_runtime.h>
+#elif defined(EIGEN_HIPCC)
+ #include <hip/hip_runtime.h>
#endif
+
#ifdef EIGEN_EXCEPTIONS
#include <new>
#endif
-// then include this file where all our macros are defined. It's really important to do it first because
-// it's where we do all the alignment settings (platform detection and honoring the user's will if he
-// defined e.g. EIGEN_DONT_ALIGN) so it needs to be done before we do anything with vectorization.
-#include "src/Core/util/Macros.h"
-
// Disable the ipa-cp-clone optimization flag with MinGW 6.x or newer (enabled by default with -O3)
// See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=556 for details.
#if EIGEN_COMP_MINGW && EIGEN_GNUC_AT_LEAST(4,6)
@@ -81,169 +46,9 @@
// and inclusion of their respective header files
#include "src/Core/util/MKL_support.h"
-// if alignment is disabled, then disable vectorization. Note: EIGEN_MAX_ALIGN_BYTES is the proper check, it takes into
-// account both the user's will (EIGEN_MAX_ALIGN_BYTES,EIGEN_DONT_ALIGN) and our own platform checks
-#if EIGEN_MAX_ALIGN_BYTES==0
- #ifndef EIGEN_DONT_VECTORIZE
- #define EIGEN_DONT_VECTORIZE
- #endif
-#endif
-
-#if EIGEN_COMP_MSVC
- #include <malloc.h> // for _aligned_malloc -- need it regardless of whether vectorization is enabled
- #if (EIGEN_COMP_MSVC >= 1500) // 2008 or later
- // Remember that usage of defined() in a #define is undefined by the standard.
- // a user reported that in 64-bit mode, MSVC doesn't care to define _M_IX86_FP.
- #if (defined(_M_IX86_FP) && (_M_IX86_FP >= 2)) || EIGEN_ARCH_x86_64
- #define EIGEN_SSE2_ON_MSVC_2008_OR_LATER
- #endif
- #endif
-#else
- // Remember that usage of defined() in a #define is undefined by the standard
- #if (defined __SSE2__) && ( (!EIGEN_COMP_GNUC) || EIGEN_COMP_ICC || EIGEN_GNUC_AT_LEAST(4,2) )
- #define EIGEN_SSE2_ON_NON_MSVC_BUT_NOT_OLD_GCC
- #endif
-#endif
-
-#ifndef EIGEN_DONT_VECTORIZE
-
- #if defined (EIGEN_SSE2_ON_NON_MSVC_BUT_NOT_OLD_GCC) || defined(EIGEN_SSE2_ON_MSVC_2008_OR_LATER)
-
- // Defines symbols for compile-time detection of which instructions are
- // used.
- // EIGEN_VECTORIZE_YY is defined if and only if the instruction set YY is used
- #define EIGEN_VECTORIZE
- #define EIGEN_VECTORIZE_SSE
- #define EIGEN_VECTORIZE_SSE2
-
- // Detect sse3/ssse3/sse4:
- // gcc and icc defines __SSE3__, ...
- // there is no way to know about this on msvc. You can define EIGEN_VECTORIZE_SSE* if you
- // want to force the use of those instructions with msvc.
- #ifdef __SSE3__
- #define EIGEN_VECTORIZE_SSE3
- #endif
- #ifdef __SSSE3__
- #define EIGEN_VECTORIZE_SSSE3
- #endif
- #ifdef __SSE4_1__
- #define EIGEN_VECTORIZE_SSE4_1
- #endif
- #ifdef __SSE4_2__
- #define EIGEN_VECTORIZE_SSE4_2
- #endif
- #ifdef __AVX__
- #define EIGEN_VECTORIZE_AVX
- #define EIGEN_VECTORIZE_SSE3
- #define EIGEN_VECTORIZE_SSSE3
- #define EIGEN_VECTORIZE_SSE4_1
- #define EIGEN_VECTORIZE_SSE4_2
- #endif
- #ifdef __AVX2__
- #define EIGEN_VECTORIZE_AVX2
- #define EIGEN_VECTORIZE_AVX
- #define EIGEN_VECTORIZE_SSE3
- #define EIGEN_VECTORIZE_SSSE3
- #define EIGEN_VECTORIZE_SSE4_1
- #define EIGEN_VECTORIZE_SSE4_2
- #endif
- #ifdef __FMA__
- #define EIGEN_VECTORIZE_FMA
- #endif
- #if defined(__AVX512F__)
- #define EIGEN_VECTORIZE_AVX512
- #define EIGEN_VECTORIZE_AVX2
- #define EIGEN_VECTORIZE_AVX
- #define EIGEN_VECTORIZE_FMA
- #define EIGEN_VECTORIZE_SSE3
- #define EIGEN_VECTORIZE_SSSE3
- #define EIGEN_VECTORIZE_SSE4_1
- #define EIGEN_VECTORIZE_SSE4_2
- #ifdef __AVX512DQ__
- #define EIGEN_VECTORIZE_AVX512DQ
- #endif
- #endif
-
- // include files
-
- // This extern "C" works around a MINGW-w64 compilation issue
- // https://sourceforge.net/tracker/index.php?func=detail&aid=3018394&group_id=202880&atid=983354
- // In essence, intrin.h is included by windows.h and also declares intrinsics (just as emmintrin.h etc. below do).
- // However, intrin.h uses an extern "C" declaration, and g++ thus complains of duplicate declarations
- // with conflicting linkage. The linkage for intrinsics doesn't matter, but at that stage the compiler doesn't know;
- // so, to avoid compile errors when windows.h is included after Eigen/Core, ensure intrinsics are extern "C" here too.
- // notice that since these are C headers, the extern "C" is theoretically needed anyways.
- extern "C" {
- // In theory we should only include immintrin.h and not the other *mmintrin.h header files directly.
- // Doing so triggers some issues with ICC. However old gcc versions seems to not have this file, thus:
- #if EIGEN_COMP_ICC >= 1110
- #include <immintrin.h>
- #else
- #include <mmintrin.h>
- #include <emmintrin.h>
- #include <xmmintrin.h>
- #ifdef EIGEN_VECTORIZE_SSE3
- #include <pmmintrin.h>
- #endif
- #ifdef EIGEN_VECTORIZE_SSSE3
- #include <tmmintrin.h>
- #endif
- #ifdef EIGEN_VECTORIZE_SSE4_1
- #include <smmintrin.h>
- #endif
- #ifdef EIGEN_VECTORIZE_SSE4_2
- #include <nmmintrin.h>
- #endif
- #if defined(EIGEN_VECTORIZE_AVX) || defined(EIGEN_VECTORIZE_AVX512)
- #include <immintrin.h>
- #endif
- #endif
- } // end extern "C"
- #elif defined __VSX__
- #define EIGEN_VECTORIZE
- #define EIGEN_VECTORIZE_VSX
- #include <altivec.h>
- // We need to #undef all these ugly tokens defined in <altivec.h>
- // => use __vector instead of vector
- #undef bool
- #undef vector
- #undef pixel
- #elif defined __ALTIVEC__
- #define EIGEN_VECTORIZE
- #define EIGEN_VECTORIZE_ALTIVEC
- #include <altivec.h>
- // We need to #undef all these ugly tokens defined in <altivec.h>
- // => use __vector instead of vector
- #undef bool
- #undef vector
- #undef pixel
- #elif (defined __ARM_NEON) || (defined __ARM_NEON__)
- #define EIGEN_VECTORIZE
- #define EIGEN_VECTORIZE_NEON
- #include <arm_neon.h>
- #elif (defined __s390x__ && defined __VEC__)
- #define EIGEN_VECTORIZE
- #define EIGEN_VECTORIZE_ZVECTOR
- #include <vecintrin.h>
- #endif
-#endif
-
-#if defined(__F16C__) && !defined(EIGEN_COMP_CLANG)
- // We can use the optimized fp16 to float and float to fp16 conversion routines
- #define EIGEN_HAS_FP16_C
-#endif
-
-#if defined __CUDACC__
- #define EIGEN_VECTORIZE_CUDA
- #include <vector_types.h>
- #if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500
- #define EIGEN_HAS_CUDA_FP16
- #endif
-#endif
-#if defined EIGEN_HAS_CUDA_FP16
- #include <host_defines.h>
- #include <cuda_fp16.h>
+#if defined(EIGEN_HAS_CUDA_FP16) || defined(EIGEN_HAS_HIP_FP16)
+ #define EIGEN_HAS_GPU_FP16
#endif
#if (defined _OPENMP) && (!defined EIGEN_DONT_PARALLELIZE)
@@ -275,6 +80,10 @@
// for min/max:
#include <algorithm>
+#if EIGEN_HAS_CXX11
+#include <array>
+#endif
+
// for std::is_nothrow_move_assignable
#ifdef EIGEN_INCLUDE_TYPE_TRAITS
#include <type_traits>
@@ -299,38 +108,6 @@
#include <SYCL/sycl.hpp>
#endif
-/** \brief Namespace containing all symbols from the %Eigen library. */
-namespace Eigen {
-
-inline static const char *SimdInstructionSetsInUse(void) {
-#if defined(EIGEN_VECTORIZE_AVX512)
- return "AVX512, FMA, AVX2, AVX, SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2";
-#elif defined(EIGEN_VECTORIZE_AVX)
- return "AVX SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2";
-#elif defined(EIGEN_VECTORIZE_SSE4_2)
- return "SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2";
-#elif defined(EIGEN_VECTORIZE_SSE4_1)
- return "SSE, SSE2, SSE3, SSSE3, SSE4.1";
-#elif defined(EIGEN_VECTORIZE_SSSE3)
- return "SSE, SSE2, SSE3, SSSE3";
-#elif defined(EIGEN_VECTORIZE_SSE3)
- return "SSE, SSE2, SSE3";
-#elif defined(EIGEN_VECTORIZE_SSE2)
- return "SSE, SSE2";
-#elif defined(EIGEN_VECTORIZE_ALTIVEC)
- return "AltiVec";
-#elif defined(EIGEN_VECTORIZE_VSX)
- return "VSX";
-#elif defined(EIGEN_VECTORIZE_NEON)
- return "ARM NEON";
-#elif defined(EIGEN_VECTORIZE_ZVECTOR)
- return "S390X ZVECTOR";
-#else
- return "None";
-#endif
-}
-
-} // end namespace Eigen
#if defined EIGEN2_SUPPORT_STAGE40_FULL_EIGEN3_STRICTNESS || defined EIGEN2_SUPPORT_STAGE30_FULL_EIGEN3_API || defined EIGEN2_SUPPORT_STAGE20_RESOLVE_API_CONFLICTS || defined EIGEN2_SUPPORT_STAGE10_FULL_EIGEN2_API || defined EIGEN2_SUPPORT
// This will generate an error message:
@@ -339,7 +116,7 @@ inline static const char *SimdInstructionSetsInUse(void) {
namespace Eigen {
-// we use size_t frequently and we'll never remember to prepend it with std:: everytime just to
+// we use size_t frequently and we'll never remember to prepend it with std:: every time just to
// ensure QNX/QCC support
using std::size_t;
// gcc 4.6.0 wants std:: for ptrdiff_t
@@ -366,11 +143,11 @@ using std::ptrdiff_t;
#include "src/Core/util/IntegralConstant.h"
#include "src/Core/util/SymbolicIndex.h"
-
#include "src/Core/NumTraits.h"
#include "src/Core/MathFunctions.h"
#include "src/Core/GenericPacketMath.h"
#include "src/Core/MathFunctionsImpl.h"
+#include "src/Core/arch/Default/ConjHelper.h"
#if defined EIGEN_VECTORIZE_AVX512
#include "src/Core/arch/SSE/PacketMath.h"
@@ -388,6 +165,7 @@ using std::ptrdiff_t;
#include "src/Core/arch/AVX/MathFunctions.h"
#include "src/Core/arch/AVX/Complex.h"
#include "src/Core/arch/AVX/TypeCasting.h"
+ #include "src/Core/arch/SSE/TypeCasting.h"
#elif defined EIGEN_VECTORIZE_SSE
#include "src/Core/arch/SSE/PacketMath.h"
#include "src/Core/arch/SSE/MathFunctions.h"
@@ -401,22 +179,33 @@ using std::ptrdiff_t;
#include "src/Core/arch/NEON/PacketMath.h"
#include "src/Core/arch/NEON/MathFunctions.h"
#include "src/Core/arch/NEON/Complex.h"
+ #include "src/Core/arch/NEON/TypeCasting.h"
#elif defined EIGEN_VECTORIZE_ZVECTOR
#include "src/Core/arch/ZVector/PacketMath.h"
#include "src/Core/arch/ZVector/MathFunctions.h"
#include "src/Core/arch/ZVector/Complex.h"
+#elif defined EIGEN_VECTORIZE_MSA
+ #include "src/Core/arch/MSA/PacketMath.h"
+ #include "src/Core/arch/MSA/MathFunctions.h"
+ #include "src/Core/arch/MSA/Complex.h"
#endif
// Half float support
-#include "src/Core/arch/CUDA/Half.h"
-#include "src/Core/arch/CUDA/PacketMathHalf.h"
-#include "src/Core/arch/CUDA/TypeCasting.h"
+#include "src/Core/arch/GPU/Half.h"
+#include "src/Core/arch/GPU/PacketMathHalf.h"
+#include "src/Core/arch/GPU/TypeCasting.h"
-#if defined EIGEN_VECTORIZE_CUDA
- #include "src/Core/arch/CUDA/PacketMath.h"
- #include "src/Core/arch/CUDA/MathFunctions.h"
+#if defined EIGEN_VECTORIZE_GPU
+ #include "src/Core/arch/GPU/PacketMath.h"
+ #include "src/Core/arch/GPU/MathFunctions.h"
#endif
+#if defined EIGEN_VECTORIZE_SYCL
+ #include "src/Core/arch/SYCL/InteropHeaders.h"
+ #include "src/Core/arch/SYCL/PacketMath.h"
+ #include "src/Core/arch/SYCL/MathFunctions.h"
+ #include "src/Core/arch/SYCL/TypeCasting.h"
+#endif
#include "src/Core/arch/Default/Settings.h"
#include "src/Core/functors/TernaryFunctors.h"
@@ -428,7 +217,9 @@ using std::ptrdiff_t;
// Specialized functors to enable the processing of complex numbers
// on CUDA devices
+#ifdef EIGEN_CUDACC
#include "src/Core/arch/CUDA/Complex.h"
+#endif
#include "src/Core/util/IndexedViewHelper.h"
#include "src/Core/util/ReshapedHelper.h"
diff --git a/Eigen/Eigenvalues b/Eigen/Eigenvalues
index 009e529e1..7d6ac787b 100644
--- a/Eigen/Eigenvalues
+++ b/Eigen/Eigenvalues
@@ -10,14 +10,14 @@
#include "Core"
-#include "src/Core/util/DisableStupidWarnings.h"
-
#include "Cholesky"
#include "Jacobi"
#include "Householder"
#include "LU"
#include "Geometry"
+#include "src/Core/util/DisableStupidWarnings.h"
+
/** \defgroup Eigenvalues_Module Eigenvalues module
*
*
@@ -45,7 +45,11 @@
#include "src/Eigenvalues/GeneralizedEigenSolver.h"
#include "src/Eigenvalues/MatrixBaseEigenvalues.h"
#ifdef EIGEN_USE_LAPACKE
+#ifdef EIGEN_USE_MKL
+#include "mkl_lapacke.h"
+#else
#include "src/misc/lapacke.h"
+#endif
#include "src/Eigenvalues/RealSchur_LAPACKE.h"
#include "src/Eigenvalues/ComplexSchur_LAPACKE.h"
#include "src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h"
diff --git a/Eigen/Geometry b/Eigen/Geometry
index 131a4edfc..04aa316cb 100644
--- a/Eigen/Geometry
+++ b/Eigen/Geometry
@@ -10,12 +10,12 @@
#include "Core"
-#include "src/Core/util/DisableStupidWarnings.h"
-
#include "SVD"
#include "LU"
#include <limits>
+#include "src/Core/util/DisableStupidWarnings.h"
+
/** \defgroup Geometry_Module Geometry module
*
* This module provides support for:
diff --git a/Eigen/KLUSupport b/Eigen/KLUSupport
new file mode 100644
index 000000000..b23d90535
--- /dev/null
+++ b/Eigen/KLUSupport
@@ -0,0 +1,41 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_KLUSUPPORT_MODULE_H
+#define EIGEN_KLUSUPPORT_MODULE_H
+
+#include <Eigen/SparseCore>
+
+#include <Eigen/src/Core/util/DisableStupidWarnings.h>
+
+extern "C" {
+#include <btf.h>
+#include <klu.h>
+ }
+
+/** \ingroup Support_modules
+ * \defgroup KLUSupport_Module KLUSupport module
+ *
+ * This module provides an interface to the KLU library which is part of the <a href="http://www.suitesparse.com">suitesparse</a> package.
+ * It provides the following factorization class:
+ * - class KLU: a sparse LU factorization, well-suited for circuit simulation.
+ *
+ * \code
+ * #include <Eigen/KLUSupport>
+ * \endcode
+ *
+ * In order to use this module, the klu and btf headers must be accessible from the include paths, and your binary must be linked to the klu library and its dependencies.
+ * The dependencies depend on how umfpack has been compiled.
+ * For a cmake based project, you can use our FindKLU.cmake module to help you in this task.
+ *
+ */
+
+#include "src/KLUSupport/KLUSupport.h"
+
+#include <Eigen/src/Core/util/ReenableStupidWarnings.h>
+
+#endif // EIGEN_KLUSUPPORT_MODULE_H
diff --git a/Eigen/LU b/Eigen/LU
index 6f6c55629..6418a86e1 100644
--- a/Eigen/LU
+++ b/Eigen/LU
@@ -28,7 +28,11 @@
#include "src/LU/FullPivLU.h"
#include "src/LU/PartialPivLU.h"
#ifdef EIGEN_USE_LAPACKE
+#ifdef EIGEN_USE_MKL
+#include "mkl_lapacke.h"
+#else
#include "src/misc/lapacke.h"
+#endif
#include "src/LU/PartialPivLU_LAPACKE.h"
#endif
#include "src/LU/Determinant.h"
diff --git a/Eigen/PaStiXSupport b/Eigen/PaStiXSupport
index de3a63b4d..234619acc 100644
--- a/Eigen/PaStiXSupport
+++ b/Eigen/PaStiXSupport
@@ -36,6 +36,7 @@ extern "C" {
* \endcode
*
* In order to use this module, the PaSTiX headers must be accessible from the include paths, and your binary must be linked to the PaSTiX library and its dependencies.
+ * This wrapper resuires PaStiX version 5.x compiled without MPI support.
* The dependencies depend on how PaSTiX has been compiled.
* For a cmake based project, you can use our FindPaSTiX.cmake module to help you in this task.
*
diff --git a/Eigen/PardisoSupport b/Eigen/PardisoSupport
index 340edf51f..340edf51f 100755..100644
--- a/Eigen/PardisoSupport
+++ b/Eigen/PardisoSupport
diff --git a/Eigen/QR b/Eigen/QR
index 80838e3bd..1be1863a1 100644
--- a/Eigen/QR
+++ b/Eigen/QR
@@ -10,12 +10,12 @@
#include "Core"
-#include "src/Core/util/DisableStupidWarnings.h"
-
#include "Cholesky"
#include "Jacobi"
#include "Householder"
+#include "src/Core/util/DisableStupidWarnings.h"
+
/** \defgroup QR_Module QR module
*
*
@@ -36,7 +36,11 @@
#include "src/QR/ColPivHouseholderQR.h"
#include "src/QR/CompleteOrthogonalDecomposition.h"
#ifdef EIGEN_USE_LAPACKE
+#ifdef EIGEN_USE_MKL
+#include "mkl_lapacke.h"
+#else
#include "src/misc/lapacke.h"
+#endif
#include "src/QR/HouseholderQR_LAPACKE.h"
#include "src/QR/ColPivHouseholderQR_LAPACKE.h"
#endif
diff --git a/Eigen/QtAlignedMalloc b/Eigen/QtAlignedMalloc
index c6571f129..4f07df02a 100644
--- a/Eigen/QtAlignedMalloc
+++ b/Eigen/QtAlignedMalloc
@@ -27,7 +27,7 @@ void qFree(void *ptr)
void *qRealloc(void *ptr, std::size_t size)
{
void* newPtr = Eigen::internal::aligned_malloc(size);
- memcpy(newPtr, ptr, size);
+ std::memcpy(newPtr, ptr, size);
Eigen::internal::aligned_free(ptr);
return newPtr;
}
diff --git a/Eigen/SVD b/Eigen/SVD
index 86143c23d..5d0e75f7f 100644
--- a/Eigen/SVD
+++ b/Eigen/SVD
@@ -37,7 +37,11 @@
#include "src/SVD/JacobiSVD.h"
#include "src/SVD/BDCSVD.h"
#if defined(EIGEN_USE_LAPACKE) && !defined(EIGEN_USE_LAPACKE_STRICT)
+#ifdef EIGEN_USE_MKL
+#include "mkl_lapacke.h"
+#else
#include "src/misc/lapacke.h"
+#endif
#include "src/SVD/JacobiSVD_LAPACKE.h"
#endif
diff --git a/Eigen/SparseLU b/Eigen/SparseLU
index 38b38b531..37c4a5c5a 100644
--- a/Eigen/SparseLU
+++ b/Eigen/SparseLU
@@ -23,6 +23,8 @@
// Ordering interface
#include "OrderingMethods"
+#include "src/Core/util/DisableStupidWarnings.h"
+
#include "src/SparseLU/SparseLU_gemm_kernel.h"
#include "src/SparseLU/SparseLU_Structs.h"
@@ -43,4 +45,6 @@
#include "src/SparseLU/SparseLU_Utils.h"
#include "src/SparseLU/SparseLU.h"
+#include "src/Core/util/ReenableStupidWarnings.h"
+
#endif // EIGEN_SPARSELU_MODULE_H
diff --git a/Eigen/SparseQR b/Eigen/SparseQR
index a6f3b7f7d..f5fc5fa7f 100644
--- a/Eigen/SparseQR
+++ b/Eigen/SparseQR
@@ -28,7 +28,6 @@
*
*/
-#include "OrderingMethods"
#include "src/SparseCore/SparseColEtree.h"
#include "src/SparseQR/SparseQR.h"
diff --git a/Eigen/src/Cholesky/LDLT.h b/Eigen/src/Cholesky/LDLT.h
index fcee7b2e3..2dfeac333 100644
--- a/Eigen/src/Cholesky/LDLT.h
+++ b/Eigen/src/Cholesky/LDLT.h
@@ -247,8 +247,8 @@ template<typename _MatrixType, int _UpLo> class LDLT
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful,
- * \c NumericalIssue if the matrix.appears to be negative.
+ * \returns \c Success if computation was successful,
+ * \c NumericalIssue if the factorization failed because of a zero pivot.
*/
ComputationInfo info() const
{
@@ -258,7 +258,6 @@ template<typename _MatrixType, int _UpLo> class LDLT
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename RhsType, typename DstType>
- EIGEN_DEVICE_FUNC
void _solve_impl(const RhsType &rhs, DstType &dst) const;
#endif
@@ -376,6 +375,8 @@ template<> struct ldlt_inplace<Lower>
if((rs>0) && pivot_is_valid)
A21 /= realAkk;
+ else if(rs>0)
+ ret = ret && (A21.array()==Scalar(0)).all();
if(found_zero_pivot && pivot_is_valid) ret = false; // factorization failed
else if(!pivot_is_valid) found_zero_pivot = true;
@@ -568,13 +569,14 @@ void LDLT<_MatrixType,_UpLo>::_solve_impl(const RhsType &rhs, DstType &dst) cons
// more precisely, use pseudo-inverse of D (see bug 241)
using std::abs;
const typename Diagonal<const MatrixType>::RealReturnType vecD(vectorD());
- // In some previous versions, tolerance was set to the max of 1/highest and the maximal diagonal entry * epsilon
- // as motivated by LAPACK's xGELSS:
+ // In some previous versions, tolerance was set to the max of 1/highest (or rather numeric_limits::min())
+ // and the maximal diagonal entry * epsilon as motivated by LAPACK's xGELSS:
// RealScalar tolerance = numext::maxi(vecD.array().abs().maxCoeff() * NumTraits<RealScalar>::epsilon(),RealScalar(1) / NumTraits<RealScalar>::highest());
// However, LDLT is not rank revealing, and so adjusting the tolerance wrt to the highest
// diagonal element is not well justified and leads to numerical issues in some cases.
// Moreover, Lapack's xSYTRS routines use 0 for the tolerance.
- RealScalar tolerance = RealScalar(1) / NumTraits<RealScalar>::highest();
+ // Using numeric_limits::min() gives us more robustness to denormals.
+ RealScalar tolerance = (std::numeric_limits<RealScalar>::min)();
for (Index i = 0; i < vecD.size(); ++i)
{
diff --git a/Eigen/src/Cholesky/LLT.h b/Eigen/src/Cholesky/LLT.h
index 87ca8d423..868766365 100644
--- a/Eigen/src/Cholesky/LLT.h
+++ b/Eigen/src/Cholesky/LLT.h
@@ -24,7 +24,7 @@ template<typename MatrixType, int UpLo> struct LLT_Traits;
*
* \tparam _MatrixType the type of the matrix of which we are computing the LL^T Cholesky decomposition
* \tparam _UpLo the triangular part that will be used for the decompositon: Lower (default) or Upper.
- * The other triangular part won't be read.
+ * The other triangular part won't be read.
*
* This class performs a LL^T Cholesky decomposition of a symmetric, positive definite
* matrix A such that A = LL^* = U^*U, where L is lower triangular.
@@ -41,14 +41,18 @@ template<typename MatrixType, int UpLo> struct LLT_Traits;
* Example: \include LLT_example.cpp
* Output: \verbinclude LLT_example.out
*
+ * \b Performance: for best performance, it is recommended to use a column-major storage format
+ * with the Lower triangular part (the default), or, equivalently, a row-major storage format
+ * with the Upper triangular part. Otherwise, you might get a 20% slowdown for the full factorization
+ * step, and rank-updates can be up to 3 times slower.
+ *
* This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism.
*
+ * Note that during the decomposition, only the lower (or upper, as defined by _UpLo) triangular part of A is considered.
+ * Therefore, the strict lower part does not have to store correct values.
+ *
* \sa MatrixBase::llt(), SelfAdjointView::llt(), class LDLT
*/
- /* HEY THIS DOX IS DISABLED BECAUSE THERE's A BUG EITHER HERE OR IN LDLT ABOUT THAT (OR BOTH)
- * Note that during the decomposition, only the upper triangular part of A is considered. Therefore,
- * the strict lower part does not have to store correct values.
- */
template<typename _MatrixType, int _UpLo> class LLT
{
public:
@@ -96,7 +100,7 @@ template<typename _MatrixType, int _UpLo> class LLT
compute(matrix.derived());
}
- /** \brief Constructs a LDLT factorization from a given matrix
+ /** \brief Constructs a LLT factorization from a given matrix
*
* This overloaded constructor is provided for \link InplaceDecomposition inplace decomposition \endlink when
* \c MatrixType is a Eigen::Ref.
@@ -146,7 +150,7 @@ template<typename _MatrixType, int _UpLo> class LLT
}
template<typename Derived>
- void solveInPlace(MatrixBase<Derived> &bAndX) const;
+ void solveInPlace(const MatrixBase<Derived> &bAndX) const;
template<typename InputType>
LLT& compute(const EigenBase<InputType>& matrix);
@@ -176,8 +180,8 @@ template<typename _MatrixType, int _UpLo> class LLT
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful,
- * \c NumericalIssue if the matrix.appears to be negative.
+ * \returns \c Success if computation was successful,
+ * \c NumericalIssue if the matrix.appears not to be positive definite.
*/
ComputationInfo info() const
{
@@ -196,11 +200,10 @@ template<typename _MatrixType, int _UpLo> class LLT
inline Index cols() const { return m_matrix.cols(); }
template<typename VectorType>
- LLT rankUpdate(const VectorType& vec, const RealScalar& sigma = 1);
+ LLT & rankUpdate(const VectorType& vec, const RealScalar& sigma = 1);
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename RhsType, typename DstType>
- EIGEN_DEVICE_FUNC
void _solve_impl(const RhsType &rhs, DstType &dst) const;
#endif
@@ -425,7 +428,8 @@ LLT<MatrixType,_UpLo>& LLT<MatrixType,_UpLo>::compute(const EigenBase<InputType>
eigen_assert(a.rows()==a.cols());
const Index size = a.rows();
m_matrix.resize(size, size);
- m_matrix = a.derived();
+ if (!internal::is_same_dense(m_matrix, a.derived()))
+ m_matrix = a.derived();
// Compute matrix L1 norm = max abs column sum.
m_l1_norm = RealScalar(0);
@@ -454,7 +458,7 @@ LLT<MatrixType,_UpLo>& LLT<MatrixType,_UpLo>::compute(const EigenBase<InputType>
*/
template<typename _MatrixType, int _UpLo>
template<typename VectorType>
-LLT<_MatrixType,_UpLo> LLT<_MatrixType,_UpLo>::rankUpdate(const VectorType& v, const RealScalar& sigma)
+LLT<_MatrixType,_UpLo> & LLT<_MatrixType,_UpLo>::rankUpdate(const VectorType& v, const RealScalar& sigma)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorType);
eigen_assert(v.size()==m_matrix.cols());
@@ -485,11 +489,14 @@ void LLT<_MatrixType,_UpLo>::_solve_impl(const RhsType &rhs, DstType &dst) const
*
* This version avoids a copy when the right hand side matrix b is not needed anymore.
*
+ * \warning The parameter is only marked 'const' to make the C++ compiler accept a temporary expression here.
+ * This function will const_cast it, so constness isn't honored here.
+ *
* \sa LLT::solve(), MatrixBase::llt()
*/
template<typename MatrixType, int _UpLo>
template<typename Derived>
-void LLT<MatrixType,_UpLo>::solveInPlace(MatrixBase<Derived> &bAndX) const
+void LLT<MatrixType,_UpLo>::solveInPlace(const MatrixBase<Derived> &bAndX) const
{
eigen_assert(m_isInitialized && "LLT is not initialized.");
eigen_assert(m_matrix.rows()==bAndX.rows());
diff --git a/Eigen/src/CholmodSupport/CholmodSupport.h b/Eigen/src/CholmodSupport/CholmodSupport.h
index 61faf43ba..adaf52858 100644
--- a/Eigen/src/CholmodSupport/CholmodSupport.h
+++ b/Eigen/src/CholmodSupport/CholmodSupport.h
@@ -10,7 +10,7 @@
#ifndef EIGEN_CHOLMODSUPPORT_H
#define EIGEN_CHOLMODSUPPORT_H
-namespace Eigen {
+namespace Eigen {
namespace internal {
@@ -79,12 +79,12 @@ cholmod_sparse viewAsCholmod(Ref<SparseMatrix<_Scalar,_Options,_StorageIndex> >
res.dtype = 0;
res.stype = -1;
-
+
if (internal::is_same<_StorageIndex,int>::value)
{
res.itype = CHOLMOD_INT;
}
- else if (internal::is_same<_StorageIndex,long>::value)
+ else if (internal::is_same<_StorageIndex,SuiteSparse_long>::value)
{
res.itype = CHOLMOD_LONG;
}
@@ -95,9 +95,9 @@ cholmod_sparse viewAsCholmod(Ref<SparseMatrix<_Scalar,_Options,_StorageIndex> >
// setup res.xtype
internal::cholmod_configure_matrix<_Scalar>::run(res);
-
+
res.stype = 0;
-
+
return res;
}
@@ -121,7 +121,7 @@ template<typename _Scalar, int _Options, typename _Index, unsigned int UpLo>
cholmod_sparse viewAsCholmod(const SparseSelfAdjointView<const SparseMatrix<_Scalar,_Options,_Index>, UpLo>& mat)
{
cholmod_sparse res = viewAsCholmod(Ref<SparseMatrix<_Scalar,_Options,_Index> >(mat.matrix().const_cast_derived()));
-
+
if(UpLo==Upper) res.stype = 1;
if(UpLo==Lower) res.stype = -1;
// swap stype for rowmajor matrices (only works for real matrices)
@@ -167,12 +167,12 @@ namespace internal {
// template specializations for int and long that call the correct cholmod method
#define EIGEN_CHOLMOD_SPECIALIZE0(ret, name) \
- template<typename _StorageIndex> ret cm_ ## name (cholmod_common &Common) { return cholmod_ ## name (&Common); } \
- template<> ret cm_ ## name<long> (cholmod_common &Common) { return cholmod_l_ ## name (&Common); }
+ template<typename _StorageIndex> inline ret cm_ ## name (cholmod_common &Common) { return cholmod_ ## name (&Common); } \
+ template<> inline ret cm_ ## name<SuiteSparse_long> (cholmod_common &Common) { return cholmod_l_ ## name (&Common); }
#define EIGEN_CHOLMOD_SPECIALIZE1(ret, name, t1, a1) \
- template<typename _StorageIndex> ret cm_ ## name (t1& a1, cholmod_common &Common) { return cholmod_ ## name (&a1, &Common); } \
- template<> ret cm_ ## name<long> (t1& a1, cholmod_common &Common) { return cholmod_l_ ## name (&a1, &Common); }
+ template<typename _StorageIndex> inline ret cm_ ## name (t1& a1, cholmod_common &Common) { return cholmod_ ## name (&a1, &Common); } \
+ template<> inline ret cm_ ## name<SuiteSparse_long> (t1& a1, cholmod_common &Common) { return cholmod_l_ ## name (&a1, &Common); }
EIGEN_CHOLMOD_SPECIALIZE0(int, start)
EIGEN_CHOLMOD_SPECIALIZE0(int, finish)
@@ -183,16 +183,16 @@ EIGEN_CHOLMOD_SPECIALIZE1(int, free_sparse, cholmod_sparse*, A)
EIGEN_CHOLMOD_SPECIALIZE1(cholmod_factor*, analyze, cholmod_sparse, A)
-template<typename _StorageIndex> cholmod_dense* cm_solve (int sys, cholmod_factor& L, cholmod_dense& B, cholmod_common &Common) { return cholmod_solve (sys, &L, &B, &Common); }
-template<> cholmod_dense* cm_solve<long> (int sys, cholmod_factor& L, cholmod_dense& B, cholmod_common &Common) { return cholmod_l_solve (sys, &L, &B, &Common); }
+template<typename _StorageIndex> inline cholmod_dense* cm_solve (int sys, cholmod_factor& L, cholmod_dense& B, cholmod_common &Common) { return cholmod_solve (sys, &L, &B, &Common); }
+template<> inline cholmod_dense* cm_solve<SuiteSparse_long> (int sys, cholmod_factor& L, cholmod_dense& B, cholmod_common &Common) { return cholmod_l_solve (sys, &L, &B, &Common); }
-template<typename _StorageIndex> cholmod_sparse* cm_spsolve (int sys, cholmod_factor& L, cholmod_sparse& B, cholmod_common &Common) { return cholmod_spsolve (sys, &L, &B, &Common); }
-template<> cholmod_sparse* cm_spsolve<long> (int sys, cholmod_factor& L, cholmod_sparse& B, cholmod_common &Common) { return cholmod_l_spsolve (sys, &L, &B, &Common); }
+template<typename _StorageIndex> inline cholmod_sparse* cm_spsolve (int sys, cholmod_factor& L, cholmod_sparse& B, cholmod_common &Common) { return cholmod_spsolve (sys, &L, &B, &Common); }
+template<> inline cholmod_sparse* cm_spsolve<SuiteSparse_long> (int sys, cholmod_factor& L, cholmod_sparse& B, cholmod_common &Common) { return cholmod_l_spsolve (sys, &L, &B, &Common); }
template<typename _StorageIndex>
-int cm_factorize_p (cholmod_sparse* A, double beta[2], _StorageIndex* fset, std::size_t fsize, cholmod_factor* L, cholmod_common &Common) { return cholmod_factorize_p (A, beta, fset, fsize, L, &Common); }
+inline int cm_factorize_p (cholmod_sparse* A, double beta[2], _StorageIndex* fset, std::size_t fsize, cholmod_factor* L, cholmod_common &Common) { return cholmod_factorize_p (A, beta, fset, fsize, L, &Common); }
template<>
-int cm_factorize_p<long> (cholmod_sparse* A, double beta[2], long* fset, std::size_t fsize, cholmod_factor* L, cholmod_common &Common) { return cholmod_l_factorize_p (A, beta, fset, fsize, L, &Common); }
+inline int cm_factorize_p<SuiteSparse_long> (cholmod_sparse* A, double beta[2], SuiteSparse_long* fset, std::size_t fsize, cholmod_factor* L, cholmod_common &Common) { return cholmod_l_factorize_p (A, beta, fset, fsize, L, &Common); }
#undef EIGEN_CHOLMOD_SPECIALIZE0
#undef EIGEN_CHOLMOD_SPECIALIZE1
@@ -254,10 +254,10 @@ class CholmodBase : public SparseSolverBase<Derived>
internal::cm_free_factor<StorageIndex>(m_cholmodFactor, m_cholmod);
internal::cm_finish<StorageIndex>(m_cholmod);
}
-
+
inline StorageIndex cols() const { return internal::convert_index<StorageIndex, Index>(m_cholmodFactor->n); }
inline StorageIndex rows() const { return internal::convert_index<StorageIndex, Index>(m_cholmodFactor->n); }
-
+
/** \brief Reports whether previous computation was successful.
*
* \returns \c Success if computation was successful,
@@ -276,11 +276,11 @@ class CholmodBase : public SparseSolverBase<Derived>
factorize(matrix);
return derived();
}
-
+
/** Performs a symbolic decomposition on the sparsity pattern of \a matrix.
*
* This function is particularly useful when solving for several problems having the same structure.
- *
+ *
* \sa factorize()
*/
void analyzePattern(const MatrixType& matrix)
@@ -292,13 +292,13 @@ class CholmodBase : public SparseSolverBase<Derived>
}
cholmod_sparse A = viewAsCholmod(matrix.template selfadjointView<UpLo>());
m_cholmodFactor = internal::cm_analyze<StorageIndex>(A, m_cholmod);
-
+
this->m_isInitialized = true;
this->m_info = Success;
m_analysisIsOk = true;
m_factorizationIsOk = false;
}
-
+
/** Performs a numeric decomposition of \a matrix
*
* The given matrix must have the same sparsity pattern as the matrix on which the symbolic decomposition has been performed.
@@ -315,11 +315,11 @@ class CholmodBase : public SparseSolverBase<Derived>
this->m_info = (m_cholmodFactor->minor == m_cholmodFactor->n ? Success : NumericalIssue);
m_factorizationIsOk = true;
}
-
+
/** Returns a reference to the Cholmod's configuration structure to get a full control over the performed operations.
* See the Cholmod user guide for details. */
cholmod_common& cholmod() { return m_cholmod; }
-
+
#ifndef EIGEN_PARSED_BY_DOXYGEN
/** \internal */
template<typename Rhs,typename Dest>
@@ -329,7 +329,7 @@ class CholmodBase : public SparseSolverBase<Derived>
const Index size = m_cholmodFactor->n;
EIGEN_UNUSED_VARIABLE(size);
eigen_assert(size==b.rows());
-
+
// Cholmod needs column-major storage without inner-stride, which corresponds to the default behavior of Ref.
Ref<const Matrix<typename Rhs::Scalar,Dynamic,Dynamic,ColMajor> > b_ref(b.derived());
@@ -345,7 +345,7 @@ class CholmodBase : public SparseSolverBase<Derived>
dest = Matrix<Scalar,Dest::RowsAtCompileTime,Dest::ColsAtCompileTime>::Map(reinterpret_cast<Scalar*>(x_cd->x),b.rows(),b.cols());
internal::cm_free_dense<StorageIndex>(x_cd, m_cholmod);
}
-
+
/** \internal */
template<typename RhsDerived, typename DestDerived>
void _solve_impl(const SparseMatrixBase<RhsDerived> &b, SparseMatrixBase<DestDerived> &dest) const
@@ -370,8 +370,8 @@ class CholmodBase : public SparseSolverBase<Derived>
internal::cm_free_sparse<StorageIndex>(x_cs, m_cholmod);
}
#endif // EIGEN_PARSED_BY_DOXYGEN
-
-
+
+
/** Sets the shift parameter that will be used to adjust the diagonal coefficients during the numerical factorization.
*
* During the numerical factorization, an offset term is added to the diagonal coefficients:\n
@@ -386,7 +386,7 @@ class CholmodBase : public SparseSolverBase<Derived>
m_shiftOffset[0] = double(offset);
return derived();
}
-
+
/** \returns the determinant of the underlying matrix from the current factorization */
Scalar determinant() const
{
@@ -441,7 +441,7 @@ class CholmodBase : public SparseSolverBase<Derived>
template<typename Stream>
void dumpMemory(Stream& /*s*/)
{}
-
+
protected:
mutable cholmod_common m_cholmod;
cholmod_factor* m_cholmodFactor;
@@ -478,11 +478,11 @@ class CholmodSimplicialLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimpl
{
typedef CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLLT> Base;
using Base::m_cholmod;
-
+
public:
-
+
typedef _MatrixType MatrixType;
-
+
CholmodSimplicialLLT() : Base() { init(); }
CholmodSimplicialLLT(const MatrixType& matrix) : Base()
@@ -529,11 +529,11 @@ class CholmodSimplicialLDLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimp
{
typedef CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLDLT> Base;
using Base::m_cholmod;
-
+
public:
-
+
typedef _MatrixType MatrixType;
-
+
CholmodSimplicialLDLT() : Base() { init(); }
CholmodSimplicialLDLT(const MatrixType& matrix) : Base()
@@ -578,11 +578,11 @@ class CholmodSupernodalLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSuper
{
typedef CholmodBase<_MatrixType, _UpLo, CholmodSupernodalLLT> Base;
using Base::m_cholmod;
-
+
public:
-
+
typedef _MatrixType MatrixType;
-
+
CholmodSupernodalLLT() : Base() { init(); }
CholmodSupernodalLLT(const MatrixType& matrix) : Base()
@@ -629,11 +629,11 @@ class CholmodDecomposition : public CholmodBase<_MatrixType, _UpLo, CholmodDecom
{
typedef CholmodBase<_MatrixType, _UpLo, CholmodDecomposition> Base;
using Base::m_cholmod;
-
+
public:
-
+
typedef _MatrixType MatrixType;
-
+
CholmodDecomposition() : Base() { init(); }
CholmodDecomposition(const MatrixType& matrix) : Base()
@@ -643,7 +643,7 @@ class CholmodDecomposition : public CholmodBase<_MatrixType, _UpLo, CholmodDecom
}
~CholmodDecomposition() {}
-
+
void setMode(CholmodMode mode)
{
switch(mode)
diff --git a/Eigen/src/Core/ArithmeticSequence.h b/Eigen/src/Core/ArithmeticSequence.h
index ada1571f1..db6da0001 100644
--- a/Eigen/src/Core/ArithmeticSequence.h
+++ b/Eigen/src/Core/ArithmeticSequence.h
@@ -29,17 +29,17 @@ template<int N> struct aseq_negate<FixedInt<N> > {
template<> struct aseq_negate<FixedInt<DynamicIndex> > {};
template<typename FirstType,typename SizeType,typename IncrType,
- bool FirstIsSymbolic=Symbolic::is_symbolic<FirstType>::value,
- bool SizeIsSymbolic =Symbolic::is_symbolic<SizeType>::value>
+ bool FirstIsSymbolic=symbolic::is_symbolic<FirstType>::value,
+ bool SizeIsSymbolic =symbolic::is_symbolic<SizeType>::value>
struct aseq_reverse_first_type {
typedef Index type;
};
template<typename FirstType,typename SizeType,typename IncrType>
struct aseq_reverse_first_type<FirstType,SizeType,IncrType,true,true> {
- typedef Symbolic::AddExpr<FirstType,
- Symbolic::ProductExpr<Symbolic::AddExpr<SizeType,Symbolic::ValueExpr<FixedInt<-1> > >,
- Symbolic::ValueExpr<IncrType> >
+ typedef symbolic::AddExpr<FirstType,
+ symbolic::ProductExpr<symbolic::AddExpr<SizeType,symbolic::ValueExpr<FixedInt<-1> > >,
+ symbolic::ValueExpr<IncrType> >
> type;
};
@@ -56,14 +56,14 @@ struct aseq_reverse_first_type_aux<SizeType,IncrType,typename internal::enable_i
template<typename FirstType,typename SizeType,typename IncrType>
struct aseq_reverse_first_type<FirstType,SizeType,IncrType,true,false> {
typedef typename aseq_reverse_first_type_aux<SizeType,IncrType>::type Aux;
- typedef Symbolic::AddExpr<FirstType,Symbolic::ValueExpr<Aux> > type;
+ typedef symbolic::AddExpr<FirstType,symbolic::ValueExpr<Aux> > type;
};
template<typename FirstType,typename SizeType,typename IncrType>
struct aseq_reverse_first_type<FirstType,SizeType,IncrType,false,true> {
- typedef Symbolic::AddExpr<Symbolic::ProductExpr<Symbolic::AddExpr<SizeType,Symbolic::ValueExpr<FixedInt<-1> > >,
- Symbolic::ValueExpr<IncrType> >,
- Symbolic::ValueExpr<> > type;
+ typedef symbolic::AddExpr<symbolic::ProductExpr<symbolic::AddExpr<SizeType,symbolic::ValueExpr<FixedInt<-1> > >,
+ symbolic::ValueExpr<IncrType> >,
+ symbolic::ValueExpr<> > type;
};
#endif
@@ -225,10 +225,11 @@ auto seq(FirstType f, LastType l, IncrType incr)
-typename internal::cleanup_index_type<FirstType>::type(f)+CleanedIncrType(incr)) / CleanedIncrType(incr),
CleanedIncrType(incr));
}
-#else
+
+#else // EIGEN_HAS_CXX11
template<typename FirstType,typename LastType>
-typename internal::enable_if<!(Symbolic::is_symbolic<FirstType>::value || Symbolic::is_symbolic<LastType>::value),
+typename internal::enable_if<!(symbolic::is_symbolic<FirstType>::value || symbolic::is_symbolic<LastType>::value),
ArithmeticSequence<typename internal::cleanup_index_type<FirstType>::type,Index> >::type
seq(FirstType f, LastType l)
{
@@ -237,35 +238,35 @@ seq(FirstType f, LastType l)
}
template<typename FirstTypeDerived,typename LastType>
-typename internal::enable_if<!Symbolic::is_symbolic<LastType>::value,
- ArithmeticSequence<FirstTypeDerived, Symbolic::AddExpr<Symbolic::AddExpr<Symbolic::NegateExpr<FirstTypeDerived>,Symbolic::ValueExpr<> >,
- Symbolic::ValueExpr<internal::FixedInt<1> > > > >::type
-seq(const Symbolic::BaseExpr<FirstTypeDerived> &f, LastType l)
+typename internal::enable_if<!symbolic::is_symbolic<LastType>::value,
+ ArithmeticSequence<FirstTypeDerived, symbolic::AddExpr<symbolic::AddExpr<symbolic::NegateExpr<FirstTypeDerived>,symbolic::ValueExpr<> >,
+ symbolic::ValueExpr<internal::FixedInt<1> > > > >::type
+seq(const symbolic::BaseExpr<FirstTypeDerived> &f, LastType l)
{
return seqN(f.derived(),(typename internal::cleanup_index_type<LastType>::type(l)-f.derived()+fix<1>()));
}
template<typename FirstType,typename LastTypeDerived>
-typename internal::enable_if<!Symbolic::is_symbolic<FirstType>::value,
+typename internal::enable_if<!symbolic::is_symbolic<FirstType>::value,
ArithmeticSequence<typename internal::cleanup_index_type<FirstType>::type,
- Symbolic::AddExpr<Symbolic::AddExpr<LastTypeDerived,Symbolic::ValueExpr<> >,
- Symbolic::ValueExpr<internal::FixedInt<1> > > > >::type
-seq(FirstType f, const Symbolic::BaseExpr<LastTypeDerived> &l)
+ symbolic::AddExpr<symbolic::AddExpr<LastTypeDerived,symbolic::ValueExpr<> >,
+ symbolic::ValueExpr<internal::FixedInt<1> > > > >::type
+seq(FirstType f, const symbolic::BaseExpr<LastTypeDerived> &l)
{
return seqN(typename internal::cleanup_index_type<FirstType>::type(f),(l.derived()-typename internal::cleanup_index_type<FirstType>::type(f)+fix<1>()));
}
template<typename FirstTypeDerived,typename LastTypeDerived>
ArithmeticSequence<FirstTypeDerived,
- Symbolic::AddExpr<Symbolic::AddExpr<LastTypeDerived,Symbolic::NegateExpr<FirstTypeDerived> >,Symbolic::ValueExpr<internal::FixedInt<1> > > >
-seq(const Symbolic::BaseExpr<FirstTypeDerived> &f, const Symbolic::BaseExpr<LastTypeDerived> &l)
+ symbolic::AddExpr<symbolic::AddExpr<LastTypeDerived,symbolic::NegateExpr<FirstTypeDerived> >,symbolic::ValueExpr<internal::FixedInt<1> > > >
+seq(const symbolic::BaseExpr<FirstTypeDerived> &f, const symbolic::BaseExpr<LastTypeDerived> &l)
{
return seqN(f.derived(),(l.derived()-f.derived()+fix<1>()));
}
template<typename FirstType,typename LastType, typename IncrType>
-typename internal::enable_if<!(Symbolic::is_symbolic<FirstType>::value || Symbolic::is_symbolic<LastType>::value),
+typename internal::enable_if<!(symbolic::is_symbolic<FirstType>::value || symbolic::is_symbolic<LastType>::value),
ArithmeticSequence<typename internal::cleanup_index_type<FirstType>::type,Index,typename internal::cleanup_seq_incr<IncrType>::type> >::type
seq(FirstType f, LastType l, IncrType incr)
{
@@ -275,27 +276,27 @@ seq(FirstType f, LastType l, IncrType incr)
}
template<typename FirstTypeDerived,typename LastType, typename IncrType>
-typename internal::enable_if<!Symbolic::is_symbolic<LastType>::value,
+typename internal::enable_if<!symbolic::is_symbolic<LastType>::value,
ArithmeticSequence<FirstTypeDerived,
- Symbolic::QuotientExpr<Symbolic::AddExpr<Symbolic::AddExpr<Symbolic::NegateExpr<FirstTypeDerived>,
- Symbolic::ValueExpr<> >,
- Symbolic::ValueExpr<typename internal::cleanup_seq_incr<IncrType>::type> >,
- Symbolic::ValueExpr<typename internal::cleanup_seq_incr<IncrType>::type> >,
+ symbolic::QuotientExpr<symbolic::AddExpr<symbolic::AddExpr<symbolic::NegateExpr<FirstTypeDerived>,
+ symbolic::ValueExpr<> >,
+ symbolic::ValueExpr<typename internal::cleanup_seq_incr<IncrType>::type> >,
+ symbolic::ValueExpr<typename internal::cleanup_seq_incr<IncrType>::type> >,
typename internal::cleanup_seq_incr<IncrType>::type> >::type
-seq(const Symbolic::BaseExpr<FirstTypeDerived> &f, LastType l, IncrType incr)
+seq(const symbolic::BaseExpr<FirstTypeDerived> &f, LastType l, IncrType incr)
{
typedef typename internal::cleanup_seq_incr<IncrType>::type CleanedIncrType;
return seqN(f.derived(),(typename internal::cleanup_index_type<LastType>::type(l)-f.derived()+CleanedIncrType(incr))/CleanedIncrType(incr), incr);
}
template<typename FirstType,typename LastTypeDerived, typename IncrType>
-typename internal::enable_if<!Symbolic::is_symbolic<FirstType>::value,
+typename internal::enable_if<!symbolic::is_symbolic<FirstType>::value,
ArithmeticSequence<typename internal::cleanup_index_type<FirstType>::type,
- Symbolic::QuotientExpr<Symbolic::AddExpr<Symbolic::AddExpr<LastTypeDerived,Symbolic::ValueExpr<> >,
- Symbolic::ValueExpr<typename internal::cleanup_seq_incr<IncrType>::type> >,
- Symbolic::ValueExpr<typename internal::cleanup_seq_incr<IncrType>::type> >,
+ symbolic::QuotientExpr<symbolic::AddExpr<symbolic::AddExpr<LastTypeDerived,symbolic::ValueExpr<> >,
+ symbolic::ValueExpr<typename internal::cleanup_seq_incr<IncrType>::type> >,
+ symbolic::ValueExpr<typename internal::cleanup_seq_incr<IncrType>::type> >,
typename internal::cleanup_seq_incr<IncrType>::type> >::type
-seq(FirstType f, const Symbolic::BaseExpr<LastTypeDerived> &l, IncrType incr)
+seq(FirstType f, const symbolic::BaseExpr<LastTypeDerived> &l, IncrType incr)
{
typedef typename internal::cleanup_seq_incr<IncrType>::type CleanedIncrType;
return seqN(typename internal::cleanup_index_type<FirstType>::type(f),
@@ -304,26 +305,55 @@ seq(FirstType f, const Symbolic::BaseExpr<LastTypeDerived> &l, IncrType incr)
template<typename FirstTypeDerived,typename LastTypeDerived, typename IncrType>
ArithmeticSequence<FirstTypeDerived,
- Symbolic::QuotientExpr<Symbolic::AddExpr<Symbolic::AddExpr<LastTypeDerived,
- Symbolic::NegateExpr<FirstTypeDerived> >,
- Symbolic::ValueExpr<typename internal::cleanup_seq_incr<IncrType>::type> >,
- Symbolic::ValueExpr<typename internal::cleanup_seq_incr<IncrType>::type> >,
+ symbolic::QuotientExpr<symbolic::AddExpr<symbolic::AddExpr<LastTypeDerived,
+ symbolic::NegateExpr<FirstTypeDerived> >,
+ symbolic::ValueExpr<typename internal::cleanup_seq_incr<IncrType>::type> >,
+ symbolic::ValueExpr<typename internal::cleanup_seq_incr<IncrType>::type> >,
typename internal::cleanup_seq_incr<IncrType>::type>
-seq(const Symbolic::BaseExpr<FirstTypeDerived> &f, const Symbolic::BaseExpr<LastTypeDerived> &l, IncrType incr)
+seq(const symbolic::BaseExpr<FirstTypeDerived> &f, const symbolic::BaseExpr<LastTypeDerived> &l, IncrType incr)
{
typedef typename internal::cleanup_seq_incr<IncrType>::type CleanedIncrType;
return seqN(f.derived(),(l.derived()-f.derived()+CleanedIncrType(incr))/CleanedIncrType(incr), incr);
}
-#endif
+#endif // EIGEN_HAS_CXX11
#endif // EIGEN_PARSED_BY_DOXYGEN
+
+#if EIGEN_HAS_CXX11
+/** \cpp11
+ * \returns a symbolic ArithmeticSequence representing the last \a size elements with increment \a incr.
+ *
+ * It is a shortcut for: \code seqN(last-(size-fix<1>)*incr, size, incr) \endcode
+ *
+ * \sa lastN(SizeType), seqN(FirstType,SizeType), seq(FirstType,LastType,IncrType) */
+template<typename SizeType,typename IncrType>
+auto lastN(SizeType size, IncrType incr)
+-> decltype(seqN(Eigen::last-(size-fix<1>())*incr, size, incr))
+{
+ return seqN(Eigen::last-(size-fix<1>())*incr, size, incr);
+}
+
+/** \cpp11
+ * \returns a symbolic ArithmeticSequence representing the last \a size elements with a unit increment.
+ *
+ * It is a shortcut for: \code seq(last+fix<1>-size, last) \endcode
+ *
+ * \sa lastN(SizeType,IncrType, seqN(FirstType,SizeType), seq(FirstType,LastType) */
+template<typename SizeType>
+auto lastN(SizeType size)
+-> decltype(seqN(Eigen::last+fix<1>()-size, size))
+{
+ return seqN(Eigen::last+fix<1>()-size, size);
+}
+#endif
+
namespace internal {
// Convert a symbolic span into a usable one (i.e., remove last/end "keywords")
template<typename T>
struct make_size_type {
- typedef typename internal::conditional<Symbolic::is_symbolic<T>::value, Index, T>::type type;
+ typedef typename internal::conditional<symbolic::is_symbolic<T>::value, Index, T>::type type;
};
template<typename FirstType,typename SizeType,typename IncrType,int XprSize>
@@ -345,6 +375,39 @@ struct get_compile_time_incr<ArithmeticSequence<FirstType,SizeType,IncrType> > {
} // end namespace internal
+/** \namespace Eigen::indexing
+ * \ingroup Core_Module
+ *
+ * The sole purpose of this namespace is to be able to import all functions
+ * and symbols that are expected to be used within operator() for indexing
+ * and slicing. If you already imported the whole Eigen namespace:
+ * \code using namespace Eigen; \endcode
+ * then you are already all set. Otherwise, if you don't want/cannot import
+ * the whole Eigen namespace, the following line:
+ * \code using namespace Eigen::indexing; \endcode
+ * is equivalent to:
+ * \code
+ using Eigen::all;
+ using Eigen::seq;
+ using Eigen::seqN;
+ using Eigen::lastN; // c++11 only
+ using Eigen::last;
+ using Eigen::lastp1;
+ using Eigen::fix;
+ \endcode
+ */
+namespace indexing {
+ using Eigen::all;
+ using Eigen::seq;
+ using Eigen::seqN;
+ #if EIGEN_HAS_CXX11
+ using Eigen::lastN;
+ #endif
+ using Eigen::last;
+ using Eigen::lastp1;
+ using Eigen::fix;
+}
+
} // end namespace Eigen
#endif // EIGEN_ARITHMETIC_SEQUENCE_H
diff --git a/Eigen/src/Core/Array.h b/Eigen/src/Core/Array.h
index 0d34269fd..e10020d4f 100644
--- a/Eigen/src/Core/Array.h
+++ b/Eigen/src/Core/Array.h
@@ -231,10 +231,16 @@ class Array
: Base(other)
{ }
+ private:
+ struct PrivateType {};
+ public:
+
/** \sa MatrixBase::operator=(const EigenBase<OtherDerived>&) */
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
- EIGEN_STRONG_INLINE Array(const EigenBase<OtherDerived> &other)
+ EIGEN_STRONG_INLINE Array(const EigenBase<OtherDerived> &other,
+ typename internal::enable_if<internal::is_convertible<typename OtherDerived::Scalar,Scalar>::value,
+ PrivateType>::type = PrivateType())
: Base(other.derived())
{ }
diff --git a/Eigen/src/Core/ArrayBase.h b/Eigen/src/Core/ArrayBase.h
index af5fb2566..9da960f08 100644
--- a/Eigen/src/Core/ArrayBase.h
+++ b/Eigen/src/Core/ArrayBase.h
@@ -175,7 +175,7 @@ template<typename Derived> class ArrayBase
*/
template<typename Derived>
template<typename OtherDerived>
-EIGEN_STRONG_INLINE Derived &
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived &
ArrayBase<Derived>::operator-=(const ArrayBase<OtherDerived> &other)
{
call_assignment(derived(), other.derived(), internal::sub_assign_op<Scalar,typename OtherDerived::Scalar>());
@@ -188,7 +188,7 @@ ArrayBase<Derived>::operator-=(const ArrayBase<OtherDerived> &other)
*/
template<typename Derived>
template<typename OtherDerived>
-EIGEN_STRONG_INLINE Derived &
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived &
ArrayBase<Derived>::operator+=(const ArrayBase<OtherDerived>& other)
{
call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>());
@@ -201,7 +201,7 @@ ArrayBase<Derived>::operator+=(const ArrayBase<OtherDerived>& other)
*/
template<typename Derived>
template<typename OtherDerived>
-EIGEN_STRONG_INLINE Derived &
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived &
ArrayBase<Derived>::operator*=(const ArrayBase<OtherDerived>& other)
{
call_assignment(derived(), other.derived(), internal::mul_assign_op<Scalar,typename OtherDerived::Scalar>());
@@ -214,7 +214,7 @@ ArrayBase<Derived>::operator*=(const ArrayBase<OtherDerived>& other)
*/
template<typename Derived>
template<typename OtherDerived>
-EIGEN_STRONG_INLINE Derived &
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived &
ArrayBase<Derived>::operator/=(const ArrayBase<OtherDerived>& other)
{
call_assignment(derived(), other.derived(), internal::div_assign_op<Scalar,typename OtherDerived::Scalar>());
diff --git a/Eigen/src/Core/ArrayWrapper.h b/Eigen/src/Core/ArrayWrapper.h
index a04521a16..757b31825 100644
--- a/Eigen/src/Core/ArrayWrapper.h
+++ b/Eigen/src/Core/ArrayWrapper.h
@@ -32,7 +32,8 @@ struct traits<ArrayWrapper<ExpressionType> >
// Let's remove NestByRefBit
enum {
Flags0 = traits<typename remove_all<typename ExpressionType::Nested>::type >::Flags,
- Flags = Flags0 & ~NestByRefBit
+ LvalueBitFlag = is_lvalue<ExpressionType>::value ? LvalueBit : 0,
+ Flags = (Flags0 & ~(NestByRefBit | LvalueBit)) | LvalueBitFlag
};
};
}
@@ -89,8 +90,8 @@ class ArrayWrapper : public ArrayBase<ArrayWrapper<ExpressionType> >
EIGEN_DEVICE_FUNC
inline void evalTo(Dest& dst) const { dst = m_expression; }
- const typename internal::remove_all<NestedExpressionType>::type&
EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<NestedExpressionType>::type&
nestedExpression() const
{
return m_expression;
@@ -129,7 +130,8 @@ struct traits<MatrixWrapper<ExpressionType> >
// Let's remove NestByRefBit
enum {
Flags0 = traits<typename remove_all<typename ExpressionType::Nested>::type >::Flags,
- Flags = Flags0 & ~NestByRefBit
+ LvalueBitFlag = is_lvalue<ExpressionType>::value ? LvalueBit : 0,
+ Flags = (Flags0 & ~(NestByRefBit | LvalueBit)) | LvalueBitFlag
};
};
}
diff --git a/Eigen/src/Core/Assign.h b/Eigen/src/Core/Assign.h
index 53806ba33..655412efd 100644
--- a/Eigen/src/Core/Assign.h
+++ b/Eigen/src/Core/Assign.h
@@ -16,7 +16,7 @@ namespace Eigen {
template<typename Derived>
template<typename OtherDerived>
-EIGEN_STRONG_INLINE Derived& DenseBase<Derived>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>
::lazyAssign(const DenseBase<OtherDerived>& other)
{
enum{
diff --git a/Eigen/src/Core/AssignEvaluator.h b/Eigen/src/Core/AssignEvaluator.h
index b0ec7b7ca..362d905d2 100644
--- a/Eigen/src/Core/AssignEvaluator.h
+++ b/Eigen/src/Core/AssignEvaluator.h
@@ -39,7 +39,7 @@ public:
enum {
DstAlignment = DstEvaluator::Alignment,
SrcAlignment = SrcEvaluator::Alignment,
- DstHasDirectAccess = DstFlags & DirectAccessBit,
+ DstHasDirectAccess = (DstFlags & DirectAccessBit) == DirectAccessBit,
JointAlignment = EIGEN_PLAIN_ENUM_MIN(DstAlignment,SrcAlignment)
};
@@ -83,7 +83,7 @@ private:
&& int(OuterStride)!=Dynamic && int(OuterStride)%int(InnerPacketSize)==0
&& (EIGEN_UNALIGNED_VECTORIZE || int(JointAlignment)>=int(InnerRequiredAlignment)),
MayLinearize = bool(StorageOrdersAgree) && (int(DstFlags) & int(SrcFlags) & LinearAccessBit),
- MayLinearVectorize = bool(MightVectorize) && MayLinearize && DstHasDirectAccess
+ MayLinearVectorize = bool(MightVectorize) && bool(MayLinearize) && bool(DstHasDirectAccess)
&& (EIGEN_UNALIGNED_VECTORIZE || (int(DstAlignment)>=int(LinearRequiredAlignment)) || MaxSizeAtCompileTime == Dynamic),
/* If the destination isn't aligned, we have to do runtime checks and we don't unroll,
so it's only good for large enough sizes. */
@@ -97,7 +97,7 @@ private:
public:
enum {
- Traversal = int(MayLinearVectorize) && (LinearPacketSize>InnerPacketSize) ? int(LinearVectorizedTraversal)
+ Traversal = (int(MayLinearVectorize) && (LinearPacketSize>InnerPacketSize)) ? int(LinearVectorizedTraversal)
: int(MayInnerVectorize) ? int(InnerVectorizedTraversal)
: int(MayLinearVectorize) ? int(LinearVectorizedTraversal)
: int(MaySliceVectorize) ? int(SliceVectorizedTraversal)
@@ -756,7 +756,7 @@ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_dense_assignment_loop(DstXprType
// AssignmentKind must define a Kind typedef.
template<typename DstShape, typename SrcShape> struct AssignmentKind;
-// Assignement kind defined in this file:
+// Assignment kind defined in this file:
struct Dense2Dense {};
struct EigenBase2EigenBase {};
@@ -899,7 +899,7 @@ struct Assignment<DstXprType, SrcXprType, Functor, EigenBase2EigenBase, Weak>
src.evalTo(dst);
}
- // NOTE The following two functions are templated to avoid their instanciation if not needed
+ // NOTE The following two functions are templated to avoid their instantiation if not needed
// This is needed because some expressions supports evalTo only and/or have 'void' as scalar type.
template<typename SrcScalarType>
EIGEN_DEVICE_FUNC
diff --git a/Eigen/src/Core/Assign_MKL.h b/Eigen/src/Core/Assign_MKL.h
index 6c2ab9264..6866095bf 100755
--- a/Eigen/src/Core/Assign_MKL.h
+++ b/Eigen/src/Core/Assign_MKL.h
@@ -84,7 +84,8 @@ class vml_assign_traits
struct Assignment<DstXprType, CwiseUnaryOp<scalar_##EIGENOP##_op<EIGENTYPE>, SrcXprNested>, assign_op<EIGENTYPE,EIGENTYPE>, \
Dense2Dense, typename enable_if<vml_assign_traits<DstXprType,SrcXprNested>::EnableVml>::type> { \
typedef CwiseUnaryOp<scalar_##EIGENOP##_op<EIGENTYPE>, SrcXprNested> SrcXprType; \
- static void run(DstXprType &dst, const SrcXprType &src, const assign_op<EIGENTYPE,EIGENTYPE> &/*func*/) { \
+ static void run(DstXprType &dst, const SrcXprType &src, const assign_op<EIGENTYPE,EIGENTYPE> &func) { \
+ resize_if_allowed(dst, src, func); \
eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); \
if(vml_assign_traits<DstXprType,SrcXprNested>::Traversal==LinearTraversal) { \
VMLOP(dst.size(), (const VMLTYPE*)src.nestedExpression().data(), \
@@ -144,7 +145,8 @@ EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(ceil, Ceil, _)
Dense2Dense, typename enable_if<vml_assign_traits<DstXprType,SrcXprNested>::EnableVml>::type> { \
typedef CwiseBinaryOp<scalar_##EIGENOP##_op<EIGENTYPE,EIGENTYPE>, SrcXprNested, \
const CwiseNullaryOp<internal::scalar_constant_op<EIGENTYPE>,Plain> > SrcXprType; \
- static void run(DstXprType &dst, const SrcXprType &src, const assign_op<EIGENTYPE,EIGENTYPE> &/*func*/) { \
+ static void run(DstXprType &dst, const SrcXprType &src, const assign_op<EIGENTYPE,EIGENTYPE> &func) { \
+ resize_if_allowed(dst, src, func); \
eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); \
VMLTYPE exponent = reinterpret_cast<const VMLTYPE&>(src.rhs().functor().m_other); \
if(vml_assign_traits<DstXprType,SrcXprNested>::Traversal==LinearTraversal) \
diff --git a/Eigen/src/Core/BooleanRedux.h b/Eigen/src/Core/BooleanRedux.h
index ed607d5d8..ccf519067 100644
--- a/Eigen/src/Core/BooleanRedux.h
+++ b/Eigen/src/Core/BooleanRedux.h
@@ -76,7 +76,7 @@ struct any_unroller<Derived, Dynamic, Rows>
* \sa any(), Cwise::operator<()
*/
template<typename Derived>
-inline bool DenseBase<Derived>::all() const
+EIGEN_DEVICE_FUNC inline bool DenseBase<Derived>::all() const
{
typedef internal::evaluator<Derived> Evaluator;
enum {
@@ -100,7 +100,7 @@ inline bool DenseBase<Derived>::all() const
* \sa all()
*/
template<typename Derived>
-inline bool DenseBase<Derived>::any() const
+EIGEN_DEVICE_FUNC inline bool DenseBase<Derived>::any() const
{
typedef internal::evaluator<Derived> Evaluator;
enum {
@@ -124,7 +124,7 @@ inline bool DenseBase<Derived>::any() const
* \sa all(), any()
*/
template<typename Derived>
-inline Eigen::Index DenseBase<Derived>::count() const
+EIGEN_DEVICE_FUNC inline Eigen::Index DenseBase<Derived>::count() const
{
return derived().template cast<bool>().template cast<Index>().sum();
}
diff --git a/Eigen/src/Core/CommaInitializer.h b/Eigen/src/Core/CommaInitializer.h
index d218e9814..35fdbb819 100644
--- a/Eigen/src/Core/CommaInitializer.h
+++ b/Eigen/src/Core/CommaInitializer.h
@@ -141,7 +141,7 @@ struct CommaInitializer
* \sa CommaInitializer::finished(), class CommaInitializer
*/
template<typename Derived>
-inline CommaInitializer<Derived> DenseBase<Derived>::operator<< (const Scalar& s)
+EIGEN_DEVICE_FUNC inline CommaInitializer<Derived> DenseBase<Derived>::operator<< (const Scalar& s)
{
return CommaInitializer<Derived>(*static_cast<Derived*>(this), s);
}
@@ -149,7 +149,7 @@ inline CommaInitializer<Derived> DenseBase<Derived>::operator<< (const Scalar& s
/** \sa operator<<(const Scalar&) */
template<typename Derived>
template<typename OtherDerived>
-inline CommaInitializer<Derived>
+EIGEN_DEVICE_FUNC inline CommaInitializer<Derived>
DenseBase<Derived>::operator<<(const DenseBase<OtherDerived>& other)
{
return CommaInitializer<Derived>(*static_cast<Derived *>(this), other);
diff --git a/Eigen/src/Core/CoreEvaluators.h b/Eigen/src/Core/CoreEvaluators.h
index 412f5a661..264446f65 100644
--- a/Eigen/src/Core/CoreEvaluators.h
+++ b/Eigen/src/Core/CoreEvaluators.h
@@ -134,19 +134,21 @@ private:
// this helper permits to completely eliminate m_outerStride if it is known at compiletime.
template<typename Scalar,int OuterStride> class plainobjectbase_evaluator_data {
public:
- plainobjectbase_evaluator_data(const Scalar* ptr, Index outerStride) : data(ptr)
+ EIGEN_DEVICE_FUNC plainobjectbase_evaluator_data(const Scalar* ptr, Index outerStride) : data(ptr)
{
- EIGEN_ONLY_USED_FOR_DEBUG(outerStride);
+#ifndef EIGEN_INTERNAL_DEBUGGING
+ EIGEN_UNUSED_VARIABLE(outerStride);
+#endif
eigen_internal_assert(outerStride==OuterStride);
}
- Index outerStride() const { return OuterStride; }
+ EIGEN_DEVICE_FUNC Index outerStride() const { return OuterStride; }
const Scalar *data;
};
template<typename Scalar> class plainobjectbase_evaluator_data<Scalar,Dynamic> {
public:
- plainobjectbase_evaluator_data(const Scalar* ptr, Index outerStride) : data(ptr), m_outerStride(outerStride) {}
- Index outerStride() const { return m_outerStride; }
+ EIGEN_DEVICE_FUNC plainobjectbase_evaluator_data(const Scalar* ptr, Index outerStride) : data(ptr), m_outerStride(outerStride) {}
+ EIGEN_DEVICE_FUNC Index outerStride() const { return m_outerStride; }
const Scalar *data;
protected:
Index m_outerStride;
@@ -1034,7 +1036,7 @@ struct evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel> >
OuterStrideAtCompileTime = HasSameStorageOrderAsArgType
? int(outer_stride_at_compile_time<ArgType>::ret)
: int(inner_stride_at_compile_time<ArgType>::ret),
- MaskPacketAccessBit = (InnerStrideAtCompileTime == 1) ? PacketAccessBit : 0,
+ MaskPacketAccessBit = (InnerStrideAtCompileTime == 1 || HasSameStorageOrderAsArgType) ? PacketAccessBit : 0,
FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1 || (InnerPanel && (evaluator<ArgType>::Flags&LinearAccessBit))) ? LinearAccessBit : 0,
FlagsRowMajorBit = XprType::Flags&RowMajorBit,
@@ -1044,7 +1046,9 @@ struct evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel> >
Flags = Flags0 | FlagsLinearAccessBit | FlagsRowMajorBit,
PacketAlignment = unpacket_traits<PacketScalar>::alignment,
- Alignment0 = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic) && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % int(PacketAlignment)) == 0)) ? int(PacketAlignment) : 0,
+ Alignment0 = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic)
+ && (OuterStrideAtCompileTime!=0)
+ && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % int(PacketAlignment)) == 0)) ? int(PacketAlignment) : 0,
Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<ArgType>::Alignment, Alignment0)
};
typedef block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel> block_evaluator_type;
@@ -1075,14 +1079,16 @@ struct unary_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel>, IndexBa
EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& block)
: m_argImpl(block.nestedExpression()),
m_startRow(block.startRow()),
- m_startCol(block.startCol())
+ m_startCol(block.startCol()),
+ m_linear_offset(ForwardLinearAccess?(ArgType::IsRowMajor ? block.startRow()*block.nestedExpression().cols() + block.startCol() : block.startCol()*block.nestedExpression().rows() + block.startRow()):0)
{ }
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
enum {
- RowsAtCompileTime = XprType::RowsAtCompileTime
+ RowsAtCompileTime = XprType::RowsAtCompileTime,
+ ForwardLinearAccess = (InnerPanel || int(XprType::IsRowMajor)==int(ArgType::IsRowMajor)) && bool(evaluator<ArgType>::Flags&LinearAccessBit)
};
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
@@ -1094,7 +1100,10 @@ struct unary_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel>, IndexBa
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeff(Index index) const
{
- return coeff(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0);
+ if (ForwardLinearAccess)
+ return m_argImpl.coeff(m_linear_offset.value() + index);
+ else
+ return coeff(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
@@ -1106,7 +1115,10 @@ struct unary_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel>, IndexBa
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Scalar& coeffRef(Index index)
{
- return coeffRef(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0);
+ if (ForwardLinearAccess)
+ return m_argImpl.coeffRef(m_linear_offset.value() + index);
+ else
+ return coeffRef(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0);
}
template<int LoadMode, typename PacketType>
@@ -1120,8 +1132,11 @@ struct unary_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel>, IndexBa
EIGEN_STRONG_INLINE
PacketType packet(Index index) const
{
- return packet<LoadMode,PacketType>(RowsAtCompileTime == 1 ? 0 : index,
- RowsAtCompileTime == 1 ? index : 0);
+ if (ForwardLinearAccess)
+ return m_argImpl.template packet<LoadMode,PacketType>(m_linear_offset.value() + index);
+ else
+ return packet<LoadMode,PacketType>(RowsAtCompileTime == 1 ? 0 : index,
+ RowsAtCompileTime == 1 ? index : 0);
}
template<int StoreMode, typename PacketType>
@@ -1135,15 +1150,19 @@ struct unary_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel>, IndexBa
EIGEN_STRONG_INLINE
void writePacket(Index index, const PacketType& x)
{
- return writePacket<StoreMode,PacketType>(RowsAtCompileTime == 1 ? 0 : index,
- RowsAtCompileTime == 1 ? index : 0,
- x);
+ if (ForwardLinearAccess)
+ return m_argImpl.template writePacket<StoreMode,PacketType>(m_linear_offset.value() + index, x);
+ else
+ return writePacket<StoreMode,PacketType>(RowsAtCompileTime == 1 ? 0 : index,
+ RowsAtCompileTime == 1 ? index : 0,
+ x);
}
protected:
evaluator<ArgType> m_argImpl;
const variable_if_dynamic<Index, (ArgType::RowsAtCompileTime == 1 && BlockRows==1) ? 0 : Dynamic> m_startRow;
const variable_if_dynamic<Index, (ArgType::ColsAtCompileTime == 1 && BlockCols==1) ? 0 : Dynamic> m_startCol;
+ const variable_if_dynamic<Index, ForwardLinearAccess ? Dynamic : 0> m_linear_offset;
};
// TODO: This evaluator does not actually use the child evaluator;
diff --git a/Eigen/src/Core/CwiseBinaryOp.h b/Eigen/src/Core/CwiseBinaryOp.h
index a36765e39..bf2632d9e 100644
--- a/Eigen/src/Core/CwiseBinaryOp.h
+++ b/Eigen/src/Core/CwiseBinaryOp.h
@@ -158,7 +158,7 @@ public:
*/
template<typename Derived>
template<typename OtherDerived>
-EIGEN_STRONG_INLINE Derived &
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived &
MatrixBase<Derived>::operator-=(const MatrixBase<OtherDerived> &other)
{
call_assignment(derived(), other.derived(), internal::sub_assign_op<Scalar,typename OtherDerived::Scalar>());
@@ -171,7 +171,7 @@ MatrixBase<Derived>::operator-=(const MatrixBase<OtherDerived> &other)
*/
template<typename Derived>
template<typename OtherDerived>
-EIGEN_STRONG_INLINE Derived &
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived &
MatrixBase<Derived>::operator+=(const MatrixBase<OtherDerived>& other)
{
call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>());
@@ -181,4 +181,3 @@ MatrixBase<Derived>::operator+=(const MatrixBase<OtherDerived>& other)
} // end namespace Eigen
#endif // EIGEN_CWISE_BINARY_OP_H
-
diff --git a/Eigen/src/Core/CwiseNullaryOp.h b/Eigen/src/Core/CwiseNullaryOp.h
index dd498f758..b1923da0f 100644
--- a/Eigen/src/Core/CwiseNullaryOp.h
+++ b/Eigen/src/Core/CwiseNullaryOp.h
@@ -105,7 +105,7 @@ class CwiseNullaryOp : public internal::dense_xpr_base< CwiseNullaryOp<NullaryOp
*/
template<typename Derived>
template<typename CustomNullaryOp>
-EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, typename DenseBase<Derived>::PlainObject>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, typename DenseBase<Derived>::PlainObject>
DenseBase<Derived>::NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func)
{
return CwiseNullaryOp<CustomNullaryOp, PlainObject>(rows, cols, func);
@@ -131,7 +131,7 @@ DenseBase<Derived>::NullaryExpr(Index rows, Index cols, const CustomNullaryOp& f
*/
template<typename Derived>
template<typename CustomNullaryOp>
-EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, typename DenseBase<Derived>::PlainObject>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, typename DenseBase<Derived>::PlainObject>
DenseBase<Derived>::NullaryExpr(Index size, const CustomNullaryOp& func)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
@@ -150,7 +150,7 @@ DenseBase<Derived>::NullaryExpr(Index size, const CustomNullaryOp& func)
*/
template<typename Derived>
template<typename CustomNullaryOp>
-EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, typename DenseBase<Derived>::PlainObject>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, typename DenseBase<Derived>::PlainObject>
DenseBase<Derived>::NullaryExpr(const CustomNullaryOp& func)
{
return CwiseNullaryOp<CustomNullaryOp, PlainObject>(RowsAtCompileTime, ColsAtCompileTime, func);
@@ -170,7 +170,7 @@ DenseBase<Derived>::NullaryExpr(const CustomNullaryOp& func)
* \sa class CwiseNullaryOp
*/
template<typename Derived>
-EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
DenseBase<Derived>::Constant(Index rows, Index cols, const Scalar& value)
{
return DenseBase<Derived>::NullaryExpr(rows, cols, internal::scalar_constant_op<Scalar>(value));
@@ -192,7 +192,7 @@ DenseBase<Derived>::Constant(Index rows, Index cols, const Scalar& value)
* \sa class CwiseNullaryOp
*/
template<typename Derived>
-EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
DenseBase<Derived>::Constant(Index size, const Scalar& value)
{
return DenseBase<Derived>::NullaryExpr(size, internal::scalar_constant_op<Scalar>(value));
@@ -208,7 +208,7 @@ DenseBase<Derived>::Constant(Index size, const Scalar& value)
* \sa class CwiseNullaryOp
*/
template<typename Derived>
-EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
DenseBase<Derived>::Constant(const Scalar& value)
{
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
@@ -220,7 +220,7 @@ DenseBase<Derived>::Constant(const Scalar& value)
* \sa LinSpaced(Index,Scalar,Scalar), setLinSpaced(Index,const Scalar&,const Scalar&)
*/
template<typename Derived>
-EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType
DenseBase<Derived>::LinSpaced(Sequential_t, Index size, const Scalar& low, const Scalar& high)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
@@ -232,7 +232,7 @@ DenseBase<Derived>::LinSpaced(Sequential_t, Index size, const Scalar& low, const
* \sa LinSpaced(Scalar,Scalar)
*/
template<typename Derived>
-EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType
DenseBase<Derived>::LinSpaced(Sequential_t, const Scalar& low, const Scalar& high)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
@@ -264,7 +264,7 @@ DenseBase<Derived>::LinSpaced(Sequential_t, const Scalar& low, const Scalar& hig
* \sa setLinSpaced(Index,const Scalar&,const Scalar&), CwiseNullaryOp
*/
template<typename Derived>
-EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType
DenseBase<Derived>::LinSpaced(Index size, const Scalar& low, const Scalar& high)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
@@ -276,7 +276,7 @@ DenseBase<Derived>::LinSpaced(Index size, const Scalar& low, const Scalar& high)
* Special version for fixed size types which does not require the size parameter.
*/
template<typename Derived>
-EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType
DenseBase<Derived>::LinSpaced(const Scalar& low, const Scalar& high)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
@@ -286,7 +286,7 @@ DenseBase<Derived>::LinSpaced(const Scalar& low, const Scalar& high)
/** \returns true if all coefficients in this matrix are approximately equal to \a val, to within precision \a prec */
template<typename Derived>
-bool DenseBase<Derived>::isApproxToConstant
+EIGEN_DEVICE_FUNC bool DenseBase<Derived>::isApproxToConstant
(const Scalar& val, const RealScalar& prec) const
{
typename internal::nested_eval<Derived,1>::type self(derived());
@@ -301,7 +301,7 @@ bool DenseBase<Derived>::isApproxToConstant
*
* \returns true if all coefficients in this matrix are approximately equal to \a value, to within precision \a prec */
template<typename Derived>
-bool DenseBase<Derived>::isConstant
+EIGEN_DEVICE_FUNC bool DenseBase<Derived>::isConstant
(const Scalar& val, const RealScalar& prec) const
{
return isApproxToConstant(val, prec);
@@ -312,7 +312,7 @@ bool DenseBase<Derived>::isConstant
* \sa setConstant(), Constant(), class CwiseNullaryOp
*/
template<typename Derived>
-EIGEN_STRONG_INLINE void DenseBase<Derived>::fill(const Scalar& val)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void DenseBase<Derived>::fill(const Scalar& val)
{
setConstant(val);
}
@@ -322,7 +322,7 @@ EIGEN_STRONG_INLINE void DenseBase<Derived>::fill(const Scalar& val)
* \sa fill(), setConstant(Index,const Scalar&), setConstant(Index,Index,const Scalar&), setZero(), setOnes(), Constant(), class CwiseNullaryOp, setZero(), setOnes()
*/
template<typename Derived>
-EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setConstant(const Scalar& val)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setConstant(const Scalar& val)
{
return derived() = Constant(rows(), cols(), val);
}
@@ -337,7 +337,7 @@ EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setConstant(const Scalar& val)
* \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&)
*/
template<typename Derived>
-EIGEN_STRONG_INLINE Derived&
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived&
PlainObjectBase<Derived>::setConstant(Index size, const Scalar& val)
{
resize(size);
@@ -356,7 +356,7 @@ PlainObjectBase<Derived>::setConstant(Index size, const Scalar& val)
* \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&)
*/
template<typename Derived>
-EIGEN_STRONG_INLINE Derived&
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived&
PlainObjectBase<Derived>::setConstant(Index rows, Index cols, const Scalar& val)
{
resize(rows, cols);
@@ -380,7 +380,7 @@ PlainObjectBase<Derived>::setConstant(Index rows, Index cols, const Scalar& val)
* \sa LinSpaced(Index,const Scalar&,const Scalar&), CwiseNullaryOp
*/
template<typename Derived>
-EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(Index newSize, const Scalar& low, const Scalar& high)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(Index newSize, const Scalar& low, const Scalar& high)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return derived() = Derived::NullaryExpr(newSize, internal::linspaced_op<Scalar,PacketScalar>(low,high,newSize));
@@ -400,7 +400,7 @@ EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(Index newSize, con
* \sa LinSpaced(Index,const Scalar&,const Scalar&), setLinSpaced(Index, const Scalar&, const Scalar&), CwiseNullaryOp
*/
template<typename Derived>
-EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(const Scalar& low, const Scalar& high)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(const Scalar& low, const Scalar& high)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return setLinSpaced(size(), low, high);
@@ -423,7 +423,7 @@ EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(const Scalar& low,
* \sa Zero(), Zero(Index)
*/
template<typename Derived>
-EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
DenseBase<Derived>::Zero(Index rows, Index cols)
{
return Constant(rows, cols, Scalar(0));
@@ -446,7 +446,7 @@ DenseBase<Derived>::Zero(Index rows, Index cols)
* \sa Zero(), Zero(Index,Index)
*/
template<typename Derived>
-EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
DenseBase<Derived>::Zero(Index size)
{
return Constant(size, Scalar(0));
@@ -463,7 +463,7 @@ DenseBase<Derived>::Zero(Index size)
* \sa Zero(Index), Zero(Index,Index)
*/
template<typename Derived>
-EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
DenseBase<Derived>::Zero()
{
return Constant(Scalar(0));
@@ -478,7 +478,7 @@ DenseBase<Derived>::Zero()
* \sa class CwiseNullaryOp, Zero()
*/
template<typename Derived>
-bool DenseBase<Derived>::isZero(const RealScalar& prec) const
+EIGEN_DEVICE_FUNC bool DenseBase<Derived>::isZero(const RealScalar& prec) const
{
typename internal::nested_eval<Derived,1>::type self(derived());
for(Index j = 0; j < cols(); ++j)
@@ -496,7 +496,7 @@ bool DenseBase<Derived>::isZero(const RealScalar& prec) const
* \sa class CwiseNullaryOp, Zero()
*/
template<typename Derived>
-EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setZero()
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setZero()
{
return setConstant(Scalar(0));
}
@@ -511,7 +511,7 @@ EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setZero()
* \sa DenseBase::setZero(), setZero(Index,Index), class CwiseNullaryOp, DenseBase::Zero()
*/
template<typename Derived>
-EIGEN_STRONG_INLINE Derived&
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived&
PlainObjectBase<Derived>::setZero(Index newSize)
{
resize(newSize);
@@ -529,7 +529,7 @@ PlainObjectBase<Derived>::setZero(Index newSize)
* \sa DenseBase::setZero(), setZero(Index), class CwiseNullaryOp, DenseBase::Zero()
*/
template<typename Derived>
-EIGEN_STRONG_INLINE Derived&
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived&
PlainObjectBase<Derived>::setZero(Index rows, Index cols)
{
resize(rows, cols);
@@ -553,7 +553,7 @@ PlainObjectBase<Derived>::setZero(Index rows, Index cols)
* \sa Ones(), Ones(Index), isOnes(), class Ones
*/
template<typename Derived>
-EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
DenseBase<Derived>::Ones(Index rows, Index cols)
{
return Constant(rows, cols, Scalar(1));
@@ -576,7 +576,7 @@ DenseBase<Derived>::Ones(Index rows, Index cols)
* \sa Ones(), Ones(Index,Index), isOnes(), class Ones
*/
template<typename Derived>
-EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
DenseBase<Derived>::Ones(Index newSize)
{
return Constant(newSize, Scalar(1));
@@ -593,7 +593,7 @@ DenseBase<Derived>::Ones(Index newSize)
* \sa Ones(Index), Ones(Index,Index), isOnes(), class Ones
*/
template<typename Derived>
-EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
DenseBase<Derived>::Ones()
{
return Constant(Scalar(1));
@@ -608,7 +608,7 @@ DenseBase<Derived>::Ones()
* \sa class CwiseNullaryOp, Ones()
*/
template<typename Derived>
-bool DenseBase<Derived>::isOnes
+EIGEN_DEVICE_FUNC bool DenseBase<Derived>::isOnes
(const RealScalar& prec) const
{
return isApproxToConstant(Scalar(1), prec);
@@ -622,7 +622,7 @@ bool DenseBase<Derived>::isOnes
* \sa class CwiseNullaryOp, Ones()
*/
template<typename Derived>
-EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setOnes()
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setOnes()
{
return setConstant(Scalar(1));
}
@@ -637,7 +637,7 @@ EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setOnes()
* \sa MatrixBase::setOnes(), setOnes(Index,Index), class CwiseNullaryOp, MatrixBase::Ones()
*/
template<typename Derived>
-EIGEN_STRONG_INLINE Derived&
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived&
PlainObjectBase<Derived>::setOnes(Index newSize)
{
resize(newSize);
@@ -655,7 +655,7 @@ PlainObjectBase<Derived>::setOnes(Index newSize)
* \sa MatrixBase::setOnes(), setOnes(Index), class CwiseNullaryOp, MatrixBase::Ones()
*/
template<typename Derived>
-EIGEN_STRONG_INLINE Derived&
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived&
PlainObjectBase<Derived>::setOnes(Index rows, Index cols)
{
resize(rows, cols);
@@ -679,7 +679,7 @@ PlainObjectBase<Derived>::setOnes(Index rows, Index cols)
* \sa Identity(), setIdentity(), isIdentity()
*/
template<typename Derived>
-EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::IdentityReturnType
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::IdentityReturnType
MatrixBase<Derived>::Identity(Index rows, Index cols)
{
return DenseBase<Derived>::NullaryExpr(rows, cols, internal::scalar_identity_op<Scalar>());
@@ -696,7 +696,7 @@ MatrixBase<Derived>::Identity(Index rows, Index cols)
* \sa Identity(Index,Index), setIdentity(), isIdentity()
*/
template<typename Derived>
-EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::IdentityReturnType
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::IdentityReturnType
MatrixBase<Derived>::Identity()
{
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
@@ -771,7 +771,7 @@ struct setIdentity_impl<Derived, true>
* \sa class CwiseNullaryOp, Identity(), Identity(Index,Index), isIdentity()
*/
template<typename Derived>
-EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity()
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity()
{
return internal::setIdentity_impl<Derived>::run(derived());
}
@@ -787,7 +787,7 @@ EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity()
* \sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Identity()
*/
template<typename Derived>
-EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity(Index rows, Index cols)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity(Index rows, Index cols)
{
derived().resize(rows, cols);
return setIdentity();
@@ -800,7 +800,7 @@ EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity(Index rows, Index
* \sa MatrixBase::Unit(Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
*/
template<typename Derived>
-EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(Index newSize, Index i)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(Index newSize, Index i)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return BasisReturnType(SquareMatrixType::Identity(newSize,newSize), i);
@@ -815,7 +815,7 @@ EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBa
* \sa MatrixBase::Unit(Index,Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
*/
template<typename Derived>
-EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(Index i)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(Index i)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return BasisReturnType(SquareMatrixType::Identity(),i);
@@ -828,7 +828,7 @@ EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBa
* \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
*/
template<typename Derived>
-EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitX()
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitX()
{ return Derived::Unit(0); }
/** \returns an expression of the Y axis unit vector (0,1{,0}^*)
@@ -838,7 +838,7 @@ EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBa
* \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
*/
template<typename Derived>
-EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitY()
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitY()
{ return Derived::Unit(1); }
/** \returns an expression of the Z axis unit vector (0,0,1{,0}^*)
@@ -848,7 +848,7 @@ EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBa
* \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
*/
template<typename Derived>
-EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitZ()
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitZ()
{ return Derived::Unit(2); }
/** \returns an expression of the W axis unit vector (0,0,0,1)
@@ -858,9 +858,45 @@ EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBa
* \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
*/
template<typename Derived>
-EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitW()
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitW()
{ return Derived::Unit(3); }
+/** \brief Set the coefficients of \c *this to the i-th unit (basis) vector
+ *
+ * \param i index of the unique coefficient to be set to 1
+ *
+ * \only_for_vectors
+ *
+ * \sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Unit(Index,Index)
+ */
+template<typename Derived>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setUnit(Index i)
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
+ eigen_assert(i<size());
+ derived().setZero();
+ derived().coeffRef(i) = Scalar(1);
+ return derived();
+}
+
+/** \brief Resizes to the given \a newSize, and writes the i-th unit (basis) vector into *this.
+ *
+ * \param newSize the new size of the vector
+ * \param i index of the unique coefficient to be set to 1
+ *
+ * \only_for_vectors
+ *
+ * \sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Unit(Index,Index)
+ */
+template<typename Derived>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setUnit(Index newSize, Index i)
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
+ eigen_assert(i<newSize);
+ derived().resize(newSize);
+ return setUnit(i);
+}
+
} // end namespace Eigen
#endif // EIGEN_CWISE_NULLARY_OP_H
diff --git a/Eigen/src/Core/DenseBase.h b/Eigen/src/Core/DenseBase.h
index cd84b07a5..0c0ea95f4 100644
--- a/Eigen/src/Core/DenseBase.h
+++ b/Eigen/src/Core/DenseBase.h
@@ -157,6 +157,11 @@ template<typename Derived> class DenseBase
* we are dealing with a column-vector (if there is only one column) or with
* a row-vector (if there is only one row). */
+ NumDimensions = int(MaxSizeAtCompileTime) == 1 ? 0 : bool(IsVectorAtCompileTime) ? 1 : 2,
+ /**< This value is equal to Tensor::NumDimensions, i.e. 0 for scalars, 1 for vectors,
+ * and 2 for matrices.
+ */
+
Flags = internal::traits<Derived>::Flags,
/**< This stores expression \ref flags flags which may or may not be inherited by new expressions
* constructed from this one. See the \ref flags "list of flags".
@@ -296,7 +301,7 @@ template<typename Derived> class DenseBase
EIGEN_DEVICE_FUNC
Derived& operator=(const ReturnByValue<OtherDerived>& func);
- /** \ínternal
+ /** \internal
* Copies \a other into *this without evaluating other. \returns a reference to *this.
* \deprecated */
template<typename OtherDerived>
@@ -395,7 +400,7 @@ template<typename Derived> class DenseBase
* Notice that in the case of a plain matrix or vector (not an expression) this function just returns
* a const reference, in order to avoid a useless copy.
*
- * \warning Be carefull with eval() and the auto C++ keyword, as detailed in this \link TopicPitfalls_auto_keyword page \endlink.
+ * \warning Be careful with eval() and the auto C++ keyword, as detailed in this \link TopicPitfalls_auto_keyword page \endlink.
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE EvalReturnType eval() const
@@ -484,9 +489,9 @@ template<typename Derived> class DenseBase
return derived().coeff(0,0);
}
- bool all() const;
- bool any() const;
- Index count() const;
+ EIGEN_DEVICE_FUNC bool all() const;
+ EIGEN_DEVICE_FUNC bool any() const;
+ EIGEN_DEVICE_FUNC Index count() const;
typedef VectorwiseOp<Derived, Horizontal> RowwiseReturnType;
typedef const VectorwiseOp<const Derived, Horizontal> ConstRowwiseReturnType;
diff --git a/Eigen/src/Core/DenseStorage.h b/Eigen/src/Core/DenseStorage.h
index 7958feeb9..3c02a1025 100644
--- a/Eigen/src/Core/DenseStorage.h
+++ b/Eigen/src/Core/DenseStorage.h
@@ -61,7 +61,7 @@ struct plain_array
#if defined(EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT)
#define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask)
#elif EIGEN_GNUC_AT_LEAST(4,7)
- // GCC 4.7 is too aggressive in its optimizations and remove the alignement test based on the fact the array is declared to be aligned.
+ // GCC 4.7 is too aggressive in its optimizations and remove the alignment test based on the fact the array is declared to be aligned.
// See this bug report: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=53900
// Hiding the origin of the array pointer behind a function argument seems to do the trick even if the function is inlined:
template<typename PtrType>
@@ -207,7 +207,9 @@ template<typename T, int Size, int _Rows, int _Cols, int _Options> class DenseSt
EIGEN_UNUSED_VARIABLE(rows);
EIGEN_UNUSED_VARIABLE(cols);
}
- EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { std::swap(m_data,other.m_data); }
+ EIGEN_DEVICE_FUNC void swap(DenseStorage& other) {
+ numext::swap(m_data, other.m_data);
+ }
EIGEN_DEVICE_FUNC static Index rows(void) {return _Rows;}
EIGEN_DEVICE_FUNC static Index cols(void) {return _Cols;}
EIGEN_DEVICE_FUNC void conservativeResize(Index,Index,Index) {}
@@ -267,7 +269,11 @@ template<typename T, int Size, int _Options> class DenseStorage<T, Size, Dynamic
}
EIGEN_DEVICE_FUNC DenseStorage(Index, Index rows, Index cols) : m_rows(rows), m_cols(cols) {}
EIGEN_DEVICE_FUNC void swap(DenseStorage& other)
- { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
+ {
+ numext::swap(m_data,other.m_data);
+ numext::swap(m_rows,other.m_rows);
+ numext::swap(m_cols,other.m_cols);
+ }
EIGEN_DEVICE_FUNC Index rows() const {return m_rows;}
EIGEN_DEVICE_FUNC Index cols() const {return m_cols;}
EIGEN_DEVICE_FUNC void conservativeResize(Index, Index rows, Index cols) { m_rows = rows; m_cols = cols; }
@@ -296,7 +302,11 @@ template<typename T, int Size, int _Cols, int _Options> class DenseStorage<T, Si
return *this;
}
EIGEN_DEVICE_FUNC DenseStorage(Index, Index rows, Index) : m_rows(rows) {}
- EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
+ EIGEN_DEVICE_FUNC void swap(DenseStorage& other)
+ {
+ numext::swap(m_data,other.m_data);
+ numext::swap(m_rows,other.m_rows);
+ }
EIGEN_DEVICE_FUNC Index rows(void) const {return m_rows;}
EIGEN_DEVICE_FUNC Index cols(void) const {return _Cols;}
EIGEN_DEVICE_FUNC void conservativeResize(Index, Index rows, Index) { m_rows = rows; }
@@ -325,11 +335,14 @@ template<typename T, int Size, int _Rows, int _Options> class DenseStorage<T, Si
return *this;
}
EIGEN_DEVICE_FUNC DenseStorage(Index, Index, Index cols) : m_cols(cols) {}
- EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
+ EIGEN_DEVICE_FUNC void swap(DenseStorage& other) {
+ numext::swap(m_data,other.m_data);
+ numext::swap(m_cols,other.m_cols);
+ }
EIGEN_DEVICE_FUNC Index rows(void) const {return _Rows;}
EIGEN_DEVICE_FUNC Index cols(void) const {return m_cols;}
- void conservativeResize(Index, Index, Index cols) { m_cols = cols; }
- void resize(Index, Index, Index cols) { m_cols = cols; }
+ EIGEN_DEVICE_FUNC void conservativeResize(Index, Index, Index cols) { m_cols = cols; }
+ EIGEN_DEVICE_FUNC void resize(Index, Index, Index cols) { m_cols = cols; }
EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; }
EIGEN_DEVICE_FUNC T *data() { return m_data.array; }
};
@@ -381,16 +394,19 @@ template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynam
EIGEN_DEVICE_FUNC
DenseStorage& operator=(DenseStorage&& other) EIGEN_NOEXCEPT
{
- using std::swap;
- swap(m_data, other.m_data);
- swap(m_rows, other.m_rows);
- swap(m_cols, other.m_cols);
+ numext::swap(m_data, other.m_data);
+ numext::swap(m_rows, other.m_rows);
+ numext::swap(m_cols, other.m_cols);
return *this;
}
#endif
EIGEN_DEVICE_FUNC ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols); }
EIGEN_DEVICE_FUNC void swap(DenseStorage& other)
- { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
+ {
+ numext::swap(m_data,other.m_data);
+ numext::swap(m_rows,other.m_rows);
+ numext::swap(m_cols,other.m_cols);
+ }
EIGEN_DEVICE_FUNC Index rows(void) const {return m_rows;}
EIGEN_DEVICE_FUNC Index cols(void) const {return m_cols;}
void conservativeResize(Index size, Index rows, Index cols)
@@ -459,14 +475,16 @@ template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Ro
EIGEN_DEVICE_FUNC
DenseStorage& operator=(DenseStorage&& other) EIGEN_NOEXCEPT
{
- using std::swap;
- swap(m_data, other.m_data);
- swap(m_cols, other.m_cols);
+ numext::swap(m_data, other.m_data);
+ numext::swap(m_cols, other.m_cols);
return *this;
}
#endif
EIGEN_DEVICE_FUNC ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols); }
- EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
+ EIGEN_DEVICE_FUNC void swap(DenseStorage& other) {
+ numext::swap(m_data,other.m_data);
+ numext::swap(m_cols,other.m_cols);
+ }
EIGEN_DEVICE_FUNC static Index rows(void) {return _Rows;}
EIGEN_DEVICE_FUNC Index cols(void) const {return m_cols;}
EIGEN_DEVICE_FUNC void conservativeResize(Index size, Index, Index cols)
@@ -533,14 +551,16 @@ template<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dyn
EIGEN_DEVICE_FUNC
DenseStorage& operator=(DenseStorage&& other) EIGEN_NOEXCEPT
{
- using std::swap;
- swap(m_data, other.m_data);
- swap(m_rows, other.m_rows);
+ numext::swap(m_data, other.m_data);
+ numext::swap(m_rows, other.m_rows);
return *this;
}
#endif
EIGEN_DEVICE_FUNC ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows); }
- EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
+ EIGEN_DEVICE_FUNC void swap(DenseStorage& other) {
+ numext::swap(m_data,other.m_data);
+ numext::swap(m_rows,other.m_rows);
+ }
EIGEN_DEVICE_FUNC Index rows(void) const {return m_rows;}
EIGEN_DEVICE_FUNC static Index cols(void) {return _Cols;}
void conservativeResize(Index size, Index rows, Index)
diff --git a/Eigen/src/Core/Diagonal.h b/Eigen/src/Core/Diagonal.h
index 49e711257..563135fb2 100644
--- a/Eigen/src/Core/Diagonal.h
+++ b/Eigen/src/Core/Diagonal.h
@@ -70,7 +70,10 @@ template<typename MatrixType, int _DiagIndex> class Diagonal
EIGEN_DENSE_PUBLIC_INTERFACE(Diagonal)
EIGEN_DEVICE_FUNC
- explicit inline Diagonal(MatrixType& matrix, Index a_index = DiagIndex) : m_matrix(matrix), m_index(a_index) {}
+ explicit inline Diagonal(MatrixType& matrix, Index a_index = DiagIndex) : m_matrix(matrix), m_index(a_index)
+ {
+ eigen_assert( a_index <= m_matrix.cols() && -a_index <= m_matrix.rows() );
+ }
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Diagonal)
@@ -184,7 +187,7 @@ template<typename MatrixType, int _DiagIndex> class Diagonal
*
* \sa class Diagonal */
template<typename Derived>
-inline typename MatrixBase<Derived>::DiagonalReturnType
+EIGEN_DEVICE_FUNC inline typename MatrixBase<Derived>::DiagonalReturnType
MatrixBase<Derived>::diagonal()
{
return DiagonalReturnType(derived());
@@ -192,7 +195,7 @@ MatrixBase<Derived>::diagonal()
/** This is the const version of diagonal(). */
template<typename Derived>
-inline typename MatrixBase<Derived>::ConstDiagonalReturnType
+EIGEN_DEVICE_FUNC inline typename MatrixBase<Derived>::ConstDiagonalReturnType
MatrixBase<Derived>::diagonal() const
{
return ConstDiagonalReturnType(derived());
@@ -210,7 +213,7 @@ MatrixBase<Derived>::diagonal() const
*
* \sa MatrixBase::diagonal(), class Diagonal */
template<typename Derived>
-inline typename MatrixBase<Derived>::DiagonalDynamicIndexReturnType
+EIGEN_DEVICE_FUNC inline typename MatrixBase<Derived>::DiagonalDynamicIndexReturnType
MatrixBase<Derived>::diagonal(Index index)
{
return DiagonalDynamicIndexReturnType(derived(), index);
@@ -218,7 +221,7 @@ MatrixBase<Derived>::diagonal(Index index)
/** This is the const version of diagonal(Index). */
template<typename Derived>
-inline typename MatrixBase<Derived>::ConstDiagonalDynamicIndexReturnType
+EIGEN_DEVICE_FUNC inline typename MatrixBase<Derived>::ConstDiagonalDynamicIndexReturnType
MatrixBase<Derived>::diagonal(Index index) const
{
return ConstDiagonalDynamicIndexReturnType(derived(), index);
@@ -237,6 +240,7 @@ MatrixBase<Derived>::diagonal(Index index) const
* \sa MatrixBase::diagonal(), class Diagonal */
template<typename Derived>
template<int Index_>
+EIGEN_DEVICE_FUNC
inline typename MatrixBase<Derived>::template DiagonalIndexReturnType<Index_>::Type
MatrixBase<Derived>::diagonal()
{
@@ -246,6 +250,7 @@ MatrixBase<Derived>::diagonal()
/** This is the const version of diagonal<int>(). */
template<typename Derived>
template<int Index_>
+EIGEN_DEVICE_FUNC
inline typename MatrixBase<Derived>::template ConstDiagonalIndexReturnType<Index_>::Type
MatrixBase<Derived>::diagonal() const
{
diff --git a/Eigen/src/Core/DiagonalMatrix.h b/Eigen/src/Core/DiagonalMatrix.h
index ecfdce8ef..4e8297ee6 100644
--- a/Eigen/src/Core/DiagonalMatrix.h
+++ b/Eigen/src/Core/DiagonalMatrix.h
@@ -44,7 +44,7 @@ class DiagonalBase : public EigenBase<Derived>
EIGEN_DEVICE_FUNC
DenseMatrixType toDenseMatrix() const { return derived(); }
-
+
EIGEN_DEVICE_FUNC
inline const DiagonalVectorType& diagonal() const { return derived().diagonal(); }
EIGEN_DEVICE_FUNC
@@ -273,7 +273,7 @@ class DiagonalWrapper
* \sa class DiagonalWrapper, class DiagonalMatrix, diagonal(), isDiagonal()
**/
template<typename Derived>
-inline const DiagonalWrapper<const Derived>
+EIGEN_DEVICE_FUNC inline const DiagonalWrapper<const Derived>
MatrixBase<Derived>::asDiagonal() const
{
return DiagonalWrapper<const Derived>(derived());
diff --git a/Eigen/src/Core/DiagonalProduct.h b/Eigen/src/Core/DiagonalProduct.h
index d372b938f..7911d1cd1 100644
--- a/Eigen/src/Core/DiagonalProduct.h
+++ b/Eigen/src/Core/DiagonalProduct.h
@@ -17,7 +17,7 @@ namespace Eigen {
*/
template<typename Derived>
template<typename DiagonalDerived>
-inline const Product<Derived, DiagonalDerived, LazyProduct>
+EIGEN_DEVICE_FUNC inline const Product<Derived, DiagonalDerived, LazyProduct>
MatrixBase<Derived>::operator*(const DiagonalBase<DiagonalDerived> &a_diagonal) const
{
return Product<Derived, DiagonalDerived, LazyProduct>(derived(),a_diagonal.derived());
diff --git a/Eigen/src/Core/Dot.h b/Eigen/src/Core/Dot.h
index 06ef18b8b..11da432b2 100644
--- a/Eigen/src/Core/Dot.h
+++ b/Eigen/src/Core/Dot.h
@@ -31,7 +31,8 @@ struct dot_nocheck
typedef scalar_conj_product_op<typename traits<T>::Scalar,typename traits<U>::Scalar> conj_prod;
typedef typename conj_prod::result_type ResScalar;
EIGEN_DEVICE_FUNC
- static inline ResScalar run(const MatrixBase<T>& a, const MatrixBase<U>& b)
+ EIGEN_STRONG_INLINE
+ static ResScalar run(const MatrixBase<T>& a, const MatrixBase<U>& b)
{
return a.template binaryExpr<conj_prod>(b).sum();
}
@@ -43,7 +44,8 @@ struct dot_nocheck<T, U, true>
typedef scalar_conj_product_op<typename traits<T>::Scalar,typename traits<U>::Scalar> conj_prod;
typedef typename conj_prod::result_type ResScalar;
EIGEN_DEVICE_FUNC
- static inline ResScalar run(const MatrixBase<T>& a, const MatrixBase<U>& b)
+ EIGEN_STRONG_INLINE
+ static ResScalar run(const MatrixBase<T>& a, const MatrixBase<U>& b)
{
return a.transpose().template binaryExpr<conj_prod>(b).sum();
}
@@ -65,6 +67,7 @@ struct dot_nocheck<T, U, true>
template<typename Derived>
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
+EIGEN_STRONG_INLINE
typename ScalarBinaryOpTraits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType
MatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const
{
@@ -90,7 +93,7 @@ MatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const
* \sa dot(), norm(), lpNorm()
*/
template<typename Derived>
-EIGEN_STRONG_INLINE typename NumTraits<typename internal::traits<Derived>::Scalar>::Real MatrixBase<Derived>::squaredNorm() const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename NumTraits<typename internal::traits<Derived>::Scalar>::Real MatrixBase<Derived>::squaredNorm() const
{
return numext::real((*this).cwiseAbs2().sum());
}
@@ -102,7 +105,7 @@ EIGEN_STRONG_INLINE typename NumTraits<typename internal::traits<Derived>::Scala
* \sa lpNorm(), dot(), squaredNorm()
*/
template<typename Derived>
-inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real MatrixBase<Derived>::norm() const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename NumTraits<typename internal::traits<Derived>::Scalar>::Real MatrixBase<Derived>::norm() const
{
return numext::sqrt(squaredNorm());
}
@@ -117,7 +120,7 @@ inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real Matr
* \sa norm(), normalize()
*/
template<typename Derived>
-inline const typename MatrixBase<Derived>::PlainObject
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::PlainObject
MatrixBase<Derived>::normalized() const
{
typedef typename internal::nested_eval<Derived,2>::type _Nested;
@@ -139,7 +142,7 @@ MatrixBase<Derived>::normalized() const
* \sa norm(), normalized()
*/
template<typename Derived>
-inline void MatrixBase<Derived>::normalize()
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void MatrixBase<Derived>::normalize()
{
RealScalar z = squaredNorm();
// NOTE: after extensive benchmarking, this conditional does not impact performance, at least on recent x86 CPU
@@ -160,7 +163,7 @@ inline void MatrixBase<Derived>::normalize()
* \sa stableNorm(), stableNormalize(), normalized()
*/
template<typename Derived>
-inline const typename MatrixBase<Derived>::PlainObject
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::PlainObject
MatrixBase<Derived>::stableNormalized() const
{
typedef typename internal::nested_eval<Derived,3>::type _Nested;
@@ -185,7 +188,7 @@ MatrixBase<Derived>::stableNormalized() const
* \sa stableNorm(), stableNormalized(), normalize()
*/
template<typename Derived>
-inline void MatrixBase<Derived>::stableNormalize()
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void MatrixBase<Derived>::stableNormalize()
{
RealScalar w = cwiseAbs().maxCoeff();
RealScalar z = (derived()/w).squaredNorm();
@@ -257,9 +260,9 @@ struct lpNorm_selector<Derived, Infinity>
template<typename Derived>
template<int p>
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
+EIGEN_DEVICE_FUNC inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
#else
-MatrixBase<Derived>::RealScalar
+EIGEN_DEVICE_FUNC MatrixBase<Derived>::RealScalar
#endif
MatrixBase<Derived>::lpNorm() const
{
diff --git a/Eigen/src/Core/EigenBase.h b/Eigen/src/Core/EigenBase.h
index f76995af9..b195506a9 100644
--- a/Eigen/src/Core/EigenBase.h
+++ b/Eigen/src/Core/EigenBase.h
@@ -14,6 +14,7 @@
namespace Eigen {
/** \class EigenBase
+ * \ingroup Core_Module
*
* Common base class for all classes T such that MatrixBase has an operator=(T) and a constructor MatrixBase(T).
*
@@ -128,6 +129,7 @@ template<typename Derived> struct EigenBase
*/
template<typename Derived>
template<typename OtherDerived>
+EIGEN_DEVICE_FUNC
Derived& DenseBase<Derived>::operator=(const EigenBase<OtherDerived> &other)
{
call_assignment(derived(), other.derived());
@@ -136,6 +138,7 @@ Derived& DenseBase<Derived>::operator=(const EigenBase<OtherDerived> &other)
template<typename Derived>
template<typename OtherDerived>
+EIGEN_DEVICE_FUNC
Derived& DenseBase<Derived>::operator+=(const EigenBase<OtherDerived> &other)
{
call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>());
@@ -144,6 +147,7 @@ Derived& DenseBase<Derived>::operator+=(const EigenBase<OtherDerived> &other)
template<typename Derived>
template<typename OtherDerived>
+EIGEN_DEVICE_FUNC
Derived& DenseBase<Derived>::operator-=(const EigenBase<OtherDerived> &other)
{
call_assignment(derived(), other.derived(), internal::sub_assign_op<Scalar,typename OtherDerived::Scalar>());
diff --git a/Eigen/src/Core/Fuzzy.h b/Eigen/src/Core/Fuzzy.h
index 3e403a09d..43aa49b2b 100644
--- a/Eigen/src/Core/Fuzzy.h
+++ b/Eigen/src/Core/Fuzzy.h
@@ -100,7 +100,7 @@ struct isMuchSmallerThan_scalar_selector<Derived, true>
*/
template<typename Derived>
template<typename OtherDerived>
-bool DenseBase<Derived>::isApprox(
+EIGEN_DEVICE_FUNC bool DenseBase<Derived>::isApprox(
const DenseBase<OtherDerived>& other,
const RealScalar& prec
) const
@@ -122,7 +122,7 @@ bool DenseBase<Derived>::isApprox(
* \sa isApprox(), isMuchSmallerThan(const DenseBase<OtherDerived>&, RealScalar) const
*/
template<typename Derived>
-bool DenseBase<Derived>::isMuchSmallerThan(
+EIGEN_DEVICE_FUNC bool DenseBase<Derived>::isMuchSmallerThan(
const typename NumTraits<Scalar>::Real& other,
const RealScalar& prec
) const
@@ -142,7 +142,7 @@ bool DenseBase<Derived>::isMuchSmallerThan(
*/
template<typename Derived>
template<typename OtherDerived>
-bool DenseBase<Derived>::isMuchSmallerThan(
+EIGEN_DEVICE_FUNC bool DenseBase<Derived>::isMuchSmallerThan(
const DenseBase<OtherDerived>& other,
const RealScalar& prec
) const
diff --git a/Eigen/src/Core/GeneralProduct.h b/Eigen/src/Core/GeneralProduct.h
index 0f16cd8e3..43f3b84c8 100644
--- a/Eigen/src/Core/GeneralProduct.h
+++ b/Eigen/src/Core/GeneralProduct.h
@@ -18,18 +18,33 @@ enum {
Small = 3
};
+// Define the threshold value to fallback from the generic matrix-matrix product
+// implementation (heavy) to the lightweight coeff-based product one.
+// See generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct>
+// in products/GeneralMatrixMatrix.h for more details.
+// TODO This threshold should also be used in the compile-time selector below.
+#ifndef EIGEN_GEMM_TO_COEFFBASED_THRESHOLD
+// This default value has been obtained on a Haswell architecture.
+#define EIGEN_GEMM_TO_COEFFBASED_THRESHOLD 20
+#endif
+
namespace internal {
template<int Rows, int Cols, int Depth> struct product_type_selector;
template<int Size, int MaxSize> struct product_size_category
{
- enum { is_large = MaxSize == Dynamic ||
- Size >= EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD ||
- (Size==Dynamic && MaxSize>=EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD),
- value = is_large ? Large
- : Size == 1 ? 1
- : Small
+ enum {
+ #ifndef EIGEN_GPU_COMPILE_PHASE
+ is_large = MaxSize == Dynamic ||
+ Size >= EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD ||
+ (Size==Dynamic && MaxSize>=EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD),
+ #else
+ is_large = 0,
+ #endif
+ value = is_large ? Large
+ : Size == 1 ? 1
+ : Small
};
};
@@ -148,13 +163,13 @@ template<typename Scalar,int Size,int MaxSize,bool Cond> struct gemv_static_vect
template<typename Scalar,int Size,int MaxSize>
struct gemv_static_vector_if<Scalar,Size,MaxSize,false>
{
- EIGEN_STRONG_INLINE Scalar* data() { eigen_internal_assert(false && "should never be called"); return 0; }
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Scalar* data() { eigen_internal_assert(false && "should never be called"); return 0; }
};
template<typename Scalar,int Size>
struct gemv_static_vector_if<Scalar,Size,Dynamic,true>
{
- EIGEN_STRONG_INLINE Scalar* data() { return 0; }
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Scalar* data() { return 0; }
};
template<typename Scalar,int Size,int MaxSize>
@@ -379,10 +394,9 @@ template<> struct gemv_dense_selector<OnTheRight,RowMajor,false>
*
* \sa lazyProduct(), operator*=(const MatrixBase&), Cwise::operator*()
*/
-#ifndef __CUDACC__
-
template<typename Derived>
template<typename OtherDerived>
+EIGEN_DEVICE_FUNC
inline const Product<Derived, OtherDerived>
MatrixBase<Derived>::operator*(const MatrixBase<OtherDerived> &other) const
{
@@ -412,8 +426,6 @@ MatrixBase<Derived>::operator*(const MatrixBase<OtherDerived> &other) const
return Product<Derived, OtherDerived>(derived(), other.derived());
}
-#endif // __CUDACC__
-
/** \returns an expression of the matrix product of \c *this and \a other without implicit evaluation.
*
* The returned product will behave like any other expressions: the coefficients of the product will be
@@ -428,7 +440,7 @@ MatrixBase<Derived>::operator*(const MatrixBase<OtherDerived> &other) const
template<typename Derived>
template<typename OtherDerived>
const Product<Derived,OtherDerived,LazyProduct>
-MatrixBase<Derived>::lazyProduct(const MatrixBase<OtherDerived> &other) const
+EIGEN_DEVICE_FUNC MatrixBase<Derived>::lazyProduct(const MatrixBase<OtherDerived> &other) const
{
enum {
ProductIsValid = Derived::ColsAtCompileTime==Dynamic
diff --git a/Eigen/src/Core/GenericPacketMath.h b/Eigen/src/Core/GenericPacketMath.h
index ac5552d3e..b67c41d8a 100644
--- a/Eigen/src/Core/GenericPacketMath.h
+++ b/Eigen/src/Core/GenericPacketMath.h
@@ -82,7 +82,11 @@ struct default_packet_traits
HasPolygamma = 0,
HasErf = 0,
HasErfc = 0,
+ HasI0e = 0,
+ HasI1e = 0,
HasIGamma = 0,
+ HasIGammaDerA = 0,
+ HasGammaSampleDerAlpha = 0,
HasIGammac = 0,
HasBetaInc = 0,
@@ -231,7 +235,7 @@ pload1(const typename unpacket_traits<Packet>::type *a) { return pset1<Packet>(
* duplicated to form: {from[0],from[0],from[1],from[1],from[2],from[2],from[3],from[3]}
* Currently, this function is only used for scalar * complex products.
*/
-template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
+template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet
ploaddup(const typename unpacket_traits<Packet>::type* from) { return *from; }
/** \internal \returns a packet with elements of \a *from quadrupled.
@@ -279,7 +283,7 @@ inline void pbroadcast2(const typename unpacket_traits<Packet>::type *a,
}
/** \internal \brief Returns a packet with coefficients (a,a+1,...,a+packet_size-1). */
-template<typename Packet> inline Packet
+template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet
plset(const typename unpacket_traits<Packet>::type& a) { return a; }
/** \internal copy the packet \a from to \a *to, \a to must be 16 bytes aligned */
@@ -299,7 +303,9 @@ template<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline void pstoreu
/** \internal tries to do cache prefetching of \a addr */
template<typename Scalar> EIGEN_DEVICE_FUNC inline void prefetch(const Scalar* addr)
{
-#ifdef __CUDA_ARCH__
+#if defined(EIGEN_HIP_DEVICE_COMPILE)
+ // do nothing
+#elif defined(EIGEN_CUDA_ARCH)
#if defined(__LP64__)
// 64-bit pointer operand constraint for inlined asm
asm(" prefetch.L1 [ %1 ];" : "=l"(addr) : "l"(addr));
@@ -324,13 +330,13 @@ preduxp(const Packet* vecs) { return vecs[0]; }
template<typename Packet> EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux(const Packet& a)
{ return a; }
-/** \internal \returns the sum of the elements of \a a by block of 4 elements.
+/** \internal \returns the sum of the elements of upper and lower half of \a a if \a a is larger than 4.
* For a packet {a0, a1, a2, a3, a4, a5, a6, a7}, it returns a half packet {a0+a4, a1+a5, a2+a6, a3+a7}
* For packet-size smaller or equal to 4, this boils down to a noop.
*/
template<typename Packet> EIGEN_DEVICE_FUNC inline
typename conditional<(unpacket_traits<Packet>::size%8)==0,typename unpacket_traits<Packet>::half,Packet>::type
-predux_downto4(const Packet& a)
+predux_half_dowto4(const Packet& a)
{ return a; }
/** \internal \returns the product of the elements of \a a*/
@@ -487,7 +493,7 @@ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void pstoret(Scalar* to, const Packet& fro
* by the current computation.
*/
template<typename Packet, int LoadMode>
-inline Packet ploadt_ro(const typename unpacket_traits<Packet>::type* from)
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet ploadt_ro(const typename unpacket_traits<Packet>::type* from)
{
return ploadt<Packet, LoadMode>(from);
}
@@ -526,7 +532,7 @@ inline void palign(PacketType& first, const PacketType& second)
***************************************************************************/
// Eigen+CUDA does not support complexes.
-#ifndef __CUDACC__
+#if !defined(EIGEN_GPUCC)
template<> inline std::complex<float> pmul(const std::complex<float>& a, const std::complex<float>& b)
{ return std::complex<float>(real(a)*real(b) - imag(a)*imag(b), imag(a)*real(b) + real(a)*imag(b)); }
diff --git a/Eigen/src/Core/GlobalFunctions.h b/Eigen/src/Core/GlobalFunctions.h
index 12828a7c3..563df6e84 100644
--- a/Eigen/src/Core/GlobalFunctions.h
+++ b/Eigen/src/Core/GlobalFunctions.h
@@ -66,6 +66,7 @@ namespace Eigen
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sinh,scalar_sinh_op,hyperbolic sine,\sa ArrayBase::sinh)
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(cosh,scalar_cosh_op,hyperbolic cosine,\sa ArrayBase::cosh)
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(tanh,scalar_tanh_op,hyperbolic tangent,\sa ArrayBase::tanh)
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(logistic,scalar_logistic_op,logistic function,\sa ArrayBase::logistic)
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(lgamma,scalar_lgamma_op,natural logarithm of the gamma function,\sa ArrayBase::lgamma)
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(digamma,scalar_digamma_op,derivative of lgamma,\sa ArrayBase::digamma)
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(erf,scalar_erf_op,error function,\sa ArrayBase::erf)
@@ -89,7 +90,7 @@ namespace Eigen
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(isinf,scalar_isinf_op,infinite value test,\sa Eigen::isnan DOXCOMMA Eigen::isfinite DOXCOMMA ArrayBase::isinf)
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(isfinite,scalar_isfinite_op,finite value test,\sa Eigen::isinf DOXCOMMA Eigen::isnan DOXCOMMA ArrayBase::isfinite)
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sign,scalar_sign_op,sign (or 0),\sa ArrayBase::sign)
-
+
/** \returns an expression of the coefficient-wise power of \a x to the given constant \a exponent.
*
* \tparam ScalarExponent is the scalar type of \a exponent. It must be compatible with the scalar type of the given expression (\c Derived::Scalar).
@@ -103,17 +104,18 @@ namespace Eigen
inline const CwiseBinaryOp<internal::scalar_pow_op<Derived::Scalar,ScalarExponent>,Derived,Constant<ScalarExponent> >
pow(const Eigen::ArrayBase<Derived>& x, const ScalarExponent& exponent);
#else
- template<typename Derived,typename ScalarExponent>
- inline typename internal::enable_if< !(internal::is_same<typename Derived::Scalar,ScalarExponent>::value) && EIGEN_SCALAR_BINARY_SUPPORTED(pow,typename Derived::Scalar,ScalarExponent),
- const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,ScalarExponent,pow) >::type
- pow(const Eigen::ArrayBase<Derived>& x, const ScalarExponent& exponent) {
- return x.derived().pow(exponent);
- }
-
- template<typename Derived>
- inline const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,typename Derived::Scalar,pow)
- pow(const Eigen::ArrayBase<Derived>& x, const typename Derived::Scalar& exponent) {
- return x.derived().pow(exponent);
+ template <typename Derived,typename ScalarExponent>
+ EIGEN_DEVICE_FUNC inline
+ EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(
+ const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,typename internal::promote_scalar_arg<typename Derived::Scalar
+ EIGEN_COMMA ScalarExponent EIGEN_COMMA
+ EIGEN_SCALAR_BINARY_SUPPORTED(pow,typename Derived::Scalar,ScalarExponent)>::type,pow))
+ pow(const Eigen::ArrayBase<Derived>& x, const ScalarExponent& exponent)
+ {
+ typedef typename internal::promote_scalar_arg<typename Derived::Scalar,ScalarExponent,
+ EIGEN_SCALAR_BINARY_SUPPORTED(pow,typename Derived::Scalar,ScalarExponent)>::type PromotedExponent;
+ return EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,PromotedExponent,pow)(x.derived(),
+ typename internal::plain_constant_type<Derived,PromotedExponent>::type(x.derived().rows(), x.derived().cols(), internal::scalar_constant_op<PromotedExponent>(exponent)));
}
#endif
@@ -123,21 +125,21 @@ namespace Eigen
*
* Example: \include Cwise_array_power_array.cpp
* Output: \verbinclude Cwise_array_power_array.out
- *
+ *
* \sa ArrayBase::pow()
*
* \relates ArrayBase
*/
template<typename Derived,typename ExponentDerived>
inline const Eigen::CwiseBinaryOp<Eigen::internal::scalar_pow_op<typename Derived::Scalar, typename ExponentDerived::Scalar>, const Derived, const ExponentDerived>
- pow(const Eigen::ArrayBase<Derived>& x, const Eigen::ArrayBase<ExponentDerived>& exponents)
+ pow(const Eigen::ArrayBase<Derived>& x, const Eigen::ArrayBase<ExponentDerived>& exponents)
{
return Eigen::CwiseBinaryOp<Eigen::internal::scalar_pow_op<typename Derived::Scalar, typename ExponentDerived::Scalar>, const Derived, const ExponentDerived>(
x.derived(),
exponents.derived()
);
}
-
+
/** \returns an expression of the coefficient-wise power of the scalar \a x to the given array of \a exponents.
*
* This function computes the coefficient-wise power between a scalar and an array of exponents.
@@ -146,7 +148,7 @@ namespace Eigen
*
* Example: \include Cwise_scalar_power_array.cpp
* Output: \verbinclude Cwise_scalar_power_array.out
- *
+ *
* \sa ArrayBase::pow()
*
* \relates ArrayBase
@@ -156,21 +158,17 @@ namespace Eigen
inline const CwiseBinaryOp<internal::scalar_pow_op<Scalar,Derived::Scalar>,Constant<Scalar>,Derived>
pow(const Scalar& x,const Eigen::ArrayBase<Derived>& x);
#else
- template<typename Scalar, typename Derived>
- inline typename internal::enable_if< !(internal::is_same<typename Derived::Scalar,Scalar>::value) && EIGEN_SCALAR_BINARY_SUPPORTED(pow,Scalar,typename Derived::Scalar),
- const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(Scalar,Derived,pow) >::type
- pow(const Scalar& x, const Eigen::ArrayBase<Derived>& exponents)
- {
- return EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(Scalar,Derived,pow)(
- typename internal::plain_constant_type<Derived,Scalar>::type(exponents.rows(), exponents.cols(), x), exponents.derived() );
- }
-
- template<typename Derived>
- inline const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(typename Derived::Scalar,Derived,pow)
- pow(const typename Derived::Scalar& x, const Eigen::ArrayBase<Derived>& exponents)
- {
- return EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(typename Derived::Scalar,Derived,pow)(
- typename internal::plain_constant_type<Derived,typename Derived::Scalar>::type(exponents.rows(), exponents.cols(), x), exponents.derived() );
+ template <typename Scalar, typename Derived>
+ EIGEN_DEVICE_FUNC inline
+ EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(
+ const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(typename internal::promote_scalar_arg<typename Derived::Scalar
+ EIGEN_COMMA Scalar EIGEN_COMMA
+ EIGEN_SCALAR_BINARY_SUPPORTED(pow,Scalar,typename Derived::Scalar)>::type,Derived,pow))
+ pow(const Scalar& x, const Eigen::ArrayBase<Derived>& exponents) {
+ typedef typename internal::promote_scalar_arg<typename Derived::Scalar,Scalar,
+ EIGEN_SCALAR_BINARY_SUPPORTED(pow,Scalar,typename Derived::Scalar)>::type PromotedScalar;
+ return EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(PromotedScalar,Derived,pow)(
+ typename internal::plain_constant_type<Derived,PromotedScalar>::type(exponents.derived().rows(), exponents.derived().cols(), internal::scalar_constant_op<PromotedScalar>(x)), exponents.derived());
}
#endif
diff --git a/Eigen/src/Core/Map.h b/Eigen/src/Core/Map.h
index 06d196702..c437f1a92 100644
--- a/Eigen/src/Core/Map.h
+++ b/Eigen/src/Core/Map.h
@@ -20,11 +20,17 @@ struct traits<Map<PlainObjectType, MapOptions, StrideType> >
{
typedef traits<PlainObjectType> TraitsBase;
enum {
+ PlainObjectTypeInnerSize = ((traits<PlainObjectType>::Flags&RowMajorBit)==RowMajorBit)
+ ? PlainObjectType::ColsAtCompileTime
+ : PlainObjectType::RowsAtCompileTime,
+
InnerStrideAtCompileTime = StrideType::InnerStrideAtCompileTime == 0
? int(PlainObjectType::InnerStrideAtCompileTime)
: int(StrideType::InnerStrideAtCompileTime),
OuterStrideAtCompileTime = StrideType::OuterStrideAtCompileTime == 0
- ? int(PlainObjectType::OuterStrideAtCompileTime)
+ ? (InnerStrideAtCompileTime==Dynamic || PlainObjectTypeInnerSize==Dynamic
+ ? Dynamic
+ : int(InnerStrideAtCompileTime) * int(PlainObjectTypeInnerSize))
: int(StrideType::OuterStrideAtCompileTime),
Alignment = int(MapOptions)&int(AlignedMask),
Flags0 = TraitsBase::Flags & (~NestByRefBit),
@@ -108,9 +114,10 @@ template<typename PlainObjectType, int MapOptions, typename StrideType> class Ma
inline Index outerStride() const
{
return StrideType::OuterStrideAtCompileTime != 0 ? m_stride.outer()
- : IsVectorAtCompileTime ? this->size()
- : int(Flags)&RowMajorBit ? this->cols()
- : this->rows();
+ : internal::traits<Map>::OuterStrideAtCompileTime != Dynamic ? Index(internal::traits<Map>::OuterStrideAtCompileTime)
+ : IsVectorAtCompileTime ? (this->size() * innerStride())
+ : int(Flags)&RowMajorBit ? (this->cols() * innerStride())
+ : (this->rows() * innerStride());
}
/** Constructor in the fixed-size case.
diff --git a/Eigen/src/Core/MapBase.h b/Eigen/src/Core/MapBase.h
index 020f939ad..668922ffc 100644
--- a/Eigen/src/Core/MapBase.h
+++ b/Eigen/src/Core/MapBase.h
@@ -43,6 +43,7 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
enum {
RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
+ InnerStrideAtCompileTime = internal::traits<Derived>::InnerStrideAtCompileTime,
SizeAtCompileTime = Base::SizeAtCompileTime
};
@@ -187,8 +188,11 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
void checkSanity(typename internal::enable_if<(internal::traits<T>::Alignment>0),void*>::type = 0) const
{
#if EIGEN_MAX_ALIGN_BYTES>0
+ // innerStride() is not set yet when this function is called, so we optimistically assume the lowest plausible value:
+ const Index minInnerStride = InnerStrideAtCompileTime == Dynamic ? 1 : Index(InnerStrideAtCompileTime);
+ EIGEN_ONLY_USED_FOR_DEBUG(minInnerStride);
eigen_assert(( ((internal::UIntPtr(m_data) % internal::traits<Derived>::Alignment) == 0)
- || (cols() * rows() * innerStride() * sizeof(Scalar)) < internal::traits<Derived>::Alignment ) && "data is not aligned");
+ || (cols() * rows() * minInnerStride * sizeof(Scalar)) < internal::traits<Derived>::Alignment ) && "data is not aligned");
#endif
}
diff --git a/Eigen/src/Core/MathFunctions.h b/Eigen/src/Core/MathFunctions.h
index 7a6b999af..72116e144 100644
--- a/Eigen/src/Core/MathFunctions.h
+++ b/Eigen/src/Core/MathFunctions.h
@@ -96,7 +96,7 @@ struct real_default_impl<Scalar,true>
template<typename Scalar> struct real_impl : real_default_impl<Scalar> {};
-#ifdef __CUDA_ARCH__
+#if defined(EIGEN_GPU_COMPILE_PHASE)
template<typename T>
struct real_impl<std::complex<T> >
{
@@ -144,7 +144,7 @@ struct imag_default_impl<Scalar,true>
template<typename Scalar> struct imag_impl : imag_default_impl<Scalar> {};
-#ifdef __CUDA_ARCH__
+#if defined(EIGEN_GPU_COMPILE_PHASE)
template<typename T>
struct imag_impl<std::complex<T> >
{
@@ -238,7 +238,7 @@ struct imag_ref_retval
****************************************************************************/
template<typename Scalar, bool IsComplex = NumTraits<Scalar>::IsComplex>
-struct conj_impl
+struct conj_default_impl
{
EIGEN_DEVICE_FUNC
static inline Scalar run(const Scalar& x)
@@ -248,7 +248,7 @@ struct conj_impl
};
template<typename Scalar>
-struct conj_impl<Scalar,true>
+struct conj_default_impl<Scalar,true>
{
EIGEN_DEVICE_FUNC
static inline Scalar run(const Scalar& x)
@@ -258,6 +258,20 @@ struct conj_impl<Scalar,true>
}
};
+template<typename Scalar> struct conj_impl : conj_default_impl<Scalar> {};
+
+#if defined(EIGEN_GPU_COMPILE_PHASE)
+template<typename T>
+struct conj_impl<std::complex<T> >
+{
+ EIGEN_DEVICE_FUNC
+ static inline std::complex<T> run(const std::complex<T>& x)
+ {
+ return std::complex<T>(x.real(), -x.imag());
+ }
+};
+#endif
+
template<typename Scalar>
struct conj_retval
{
@@ -347,31 +361,7 @@ struct norm1_retval
* Implementation of hypot *
****************************************************************************/
-template<typename Scalar>
-struct hypot_impl
-{
- typedef typename NumTraits<Scalar>::Real RealScalar;
- static inline RealScalar run(const Scalar& x, const Scalar& y)
- {
- EIGEN_USING_STD_MATH(abs);
- EIGEN_USING_STD_MATH(sqrt);
- RealScalar _x = abs(x);
- RealScalar _y = abs(y);
- Scalar p, qp;
- if(_x>_y)
- {
- p = _x;
- qp = _y / p;
- }
- else
- {
- p = _y;
- qp = _x / p;
- }
- if(p==RealScalar(0)) return RealScalar(0);
- return p * sqrt(RealScalar(1) + qp*qp);
- }
-};
+template<typename Scalar> struct hypot_impl;
template<typename Scalar>
struct hypot_retval
@@ -445,7 +435,12 @@ struct round_retval
struct arg_impl {
static inline Scalar run(const Scalar& x)
{
+ #if defined(EIGEN_HIP_DEVICE_COMPILE)
+ // HIP does not seem to have a native device side implementation for the math routine "arg"
+ using std::arg;
+ #else
EIGEN_USING_STD_MATH(arg);
+ #endif
return arg(x);
}
};
@@ -497,11 +492,11 @@ namespace std_fallback {
EIGEN_USING_STD_MATH(exp);
Scalar u = exp(x);
- if (u == Scalar(1)) {
+ if (numext::equal_strict(u, Scalar(1))) {
return x;
}
Scalar um1 = u - RealScalar(1);
- if (um1 == Scalar(-1)) {
+ if (numext::equal_strict(um1, Scalar(-1))) {
return RealScalar(-1);
}
@@ -512,7 +507,7 @@ namespace std_fallback {
template<typename Scalar>
struct expm1_impl {
- static inline Scalar run(const Scalar& x)
+ EIGEN_DEVICE_FUNC static inline Scalar run(const Scalar& x)
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar)
#if EIGEN_HAS_CXX11_MATH
@@ -543,13 +538,13 @@ namespace std_fallback {
typedef typename NumTraits<Scalar>::Real RealScalar;
EIGEN_USING_STD_MATH(log);
Scalar x1p = RealScalar(1) + x;
- return ( x1p == Scalar(1) ) ? x : x * ( log(x1p) / (x1p - RealScalar(1)) );
+ return numext::equal_strict(x1p, Scalar(1)) ? x : x * ( log(x1p) / (x1p - RealScalar(1)) );
}
}
template<typename Scalar>
struct log1p_impl {
- static inline Scalar run(const Scalar& x)
+ EIGEN_DEVICE_FUNC static inline Scalar run(const Scalar& x)
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar)
#if EIGEN_HAS_CXX11_MATH
@@ -689,20 +684,27 @@ struct random_default_impl<Scalar, false, true>
{
static inline Scalar run(const Scalar& x, const Scalar& y)
{
- typedef typename conditional<NumTraits<Scalar>::IsSigned,std::ptrdiff_t,std::size_t>::type ScalarX;
- if(y<x)
+ if (y <= x)
return x;
- // the following difference might overflow on a 32 bits system,
- // but since y>=x the result converted to an unsigned long is still correct.
- std::size_t range = ScalarX(y)-ScalarX(x);
- std::size_t offset = 0;
- // rejection sampling
- std::size_t divisor = 1;
- std::size_t multiplier = 1;
- if(range<RAND_MAX) divisor = (std::size_t(RAND_MAX)+1)/(range+1);
- else multiplier = 1 + range/(std::size_t(RAND_MAX)+1);
+ // ScalarU is the unsigned counterpart of Scalar, possibly Scalar itself.
+ typedef typename make_unsigned<Scalar>::type ScalarU;
+ // ScalarX is the widest of ScalarU and unsigned int.
+ // We'll deal only with ScalarX and unsigned int below thus avoiding signed
+ // types and arithmetic and signed overflows (which are undefined behavior).
+ typedef typename conditional<(ScalarU(-1) > unsigned(-1)), ScalarU, unsigned>::type ScalarX;
+ // The following difference doesn't overflow, provided our integer types are two's
+ // complement and have the same number of padding bits in signed and unsigned variants.
+ // This is the case in most modern implementations of C++.
+ ScalarX range = ScalarX(y) - ScalarX(x);
+ ScalarX offset = 0;
+ ScalarX divisor = 1;
+ ScalarX multiplier = 1;
+ const unsigned rand_max = RAND_MAX;
+ if (range <= rand_max) divisor = (rand_max + 1) / (range + 1);
+ else multiplier = 1 + range / (rand_max + 1);
+ // Rejection sampling.
do {
- offset = (std::size_t(std::rand()) * multiplier) / divisor;
+ offset = (unsigned(std::rand()) * multiplier) / divisor;
} while (offset > range);
return Scalar(ScalarX(x) + offset);
}
@@ -749,7 +751,7 @@ inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random()
return EIGEN_MATHFUNC_IMPL(random, Scalar)::run();
}
-// Implementatin of is* functions
+// Implementation of is* functions
// std::is* do not work with fast-math and gcc, std::is* are available on MSVC 2013 and newer, as well as in clang.
#if (EIGEN_HAS_CXX11_MATH && !(EIGEN_COMP_GNUC_STRICT && __FINITE_MATH_ONLY__)) || (EIGEN_COMP_MSVC>=1800) || (EIGEN_COMP_CLANG)
@@ -778,7 +780,7 @@ EIGEN_DEVICE_FUNC
typename internal::enable_if<(!internal::is_integral<T>::value)&&(!NumTraits<T>::IsComplex),bool>::type
isfinite_impl(const T& x)
{
- #ifdef __CUDA_ARCH__
+ #if defined(EIGEN_GPU_COMPILE_PHASE)
return (::isfinite)(x);
#elif EIGEN_USE_STD_FPCLASSIFY
using std::isfinite;
@@ -793,7 +795,7 @@ EIGEN_DEVICE_FUNC
typename internal::enable_if<(!internal::is_integral<T>::value)&&(!NumTraits<T>::IsComplex),bool>::type
isinf_impl(const T& x)
{
- #ifdef __CUDA_ARCH__
+ #if defined(EIGEN_GPU_COMPILE_PHASE)
return (::isinf)(x);
#elif EIGEN_USE_STD_FPCLASSIFY
using std::isinf;
@@ -808,7 +810,7 @@ EIGEN_DEVICE_FUNC
typename internal::enable_if<(!internal::is_integral<T>::value)&&(!NumTraits<T>::IsComplex),bool>::type
isnan_impl(const T& x)
{
- #ifdef __CUDA_ARCH__
+ #if defined(EIGEN_GPU_COMPILE_PHASE)
return (::isnan)(x);
#elif EIGEN_USE_STD_FPCLASSIFY
using std::isnan;
@@ -874,7 +876,7 @@ template<typename T> T generic_fast_tanh_float(const T& a_x);
namespace numext {
-#if !defined(__CUDA_ARCH__) && !defined(__SYCL_DEVICE_ONLY__)
+#if (!defined(EIGEN_GPUCC) || defined(EIGEN_CONSTEXPR_ARE_DEVICE_FUNC))
template<typename T>
EIGEN_DEVICE_FUNC
EIGEN_ALWAYS_INLINE T mini(const T& x, const T& y)
@@ -890,111 +892,137 @@ EIGEN_ALWAYS_INLINE T maxi(const T& x, const T& y)
EIGEN_USING_STD_MATH(max);
return max EIGEN_NOT_A_MACRO (x,y);
}
-
-
-#elif defined(__SYCL_DEVICE_ONLY__)
+#else
template<typename T>
+EIGEN_DEVICE_FUNC
EIGEN_ALWAYS_INLINE T mini(const T& x, const T& y)
{
-
return y < x ? y : x;
}
-
-template<typename T>
-EIGEN_ALWAYS_INLINE T maxi(const T& x, const T& y)
-{
-
- return x < y ? y : x;
-}
-
-EIGEN_ALWAYS_INLINE int mini(const int& x, const int& y)
-{
- return cl::sycl::min(x,y);
-}
-
-EIGEN_ALWAYS_INLINE int maxi(const int& x, const int& y)
-{
- return cl::sycl::max(x,y);
-}
-
-EIGEN_ALWAYS_INLINE unsigned int mini(const unsigned int& x, const unsigned int& y)
-{
- return cl::sycl::min(x,y);
-}
-
-EIGEN_ALWAYS_INLINE unsigned int maxi(const unsigned int& x, const unsigned int& y)
-{
- return cl::sycl::max(x,y);
-}
-
-EIGEN_ALWAYS_INLINE long mini(const long & x, const long & y)
-{
- return cl::sycl::min(x,y);
-}
-
-EIGEN_ALWAYS_INLINE long maxi(const long & x, const long & y)
-{
- return cl::sycl::max(x,y);
-}
-
-EIGEN_ALWAYS_INLINE unsigned long mini(const unsigned long& x, const unsigned long& y)
-{
- return cl::sycl::min(x,y);
-}
-
-EIGEN_ALWAYS_INLINE unsigned long maxi(const unsigned long& x, const unsigned long& y)
-{
- return cl::sycl::max(x,y);
-}
-
-
+template<>
+EIGEN_DEVICE_FUNC
EIGEN_ALWAYS_INLINE float mini(const float& x, const float& y)
{
- return cl::sycl::fmin(x,y);
-}
-
-EIGEN_ALWAYS_INLINE float maxi(const float& x, const float& y)
-{
- return cl::sycl::fmax(x,y);
+ return fminf(x, y);
}
-
+template<>
+EIGEN_DEVICE_FUNC
EIGEN_ALWAYS_INLINE double mini(const double& x, const double& y)
{
- return cl::sycl::fmin(x,y);
+ return fmin(x, y);
}
-
-EIGEN_ALWAYS_INLINE double maxi(const double& x, const double& y)
+template<>
+EIGEN_DEVICE_FUNC
+EIGEN_ALWAYS_INLINE long double mini(const long double& x, const long double& y)
{
- return cl::sycl::fmax(x,y);
+#if defined(EIGEN_HIPCC)
+ // no "fminl" on HIP yet
+ return (x < y) ? x : y;
+#else
+ return fminl(x, y);
+#endif
}
-#else
template<typename T>
EIGEN_DEVICE_FUNC
-EIGEN_ALWAYS_INLINE T mini(const T& x, const T& y)
+EIGEN_ALWAYS_INLINE T maxi(const T& x, const T& y)
{
- return y < x ? y : x;
+ return x < y ? y : x;
}
template<>
EIGEN_DEVICE_FUNC
-EIGEN_ALWAYS_INLINE float mini(const float& x, const float& y)
+EIGEN_ALWAYS_INLINE float maxi(const float& x, const float& y)
{
- return fminf(x, y);
+ return fmaxf(x, y);
}
-template<typename T>
+template<>
EIGEN_DEVICE_FUNC
-EIGEN_ALWAYS_INLINE T maxi(const T& x, const T& y)
+EIGEN_ALWAYS_INLINE double maxi(const double& x, const double& y)
{
- return x < y ? y : x;
+ return fmax(x, y);
}
template<>
EIGEN_DEVICE_FUNC
-EIGEN_ALWAYS_INLINE float maxi(const float& x, const float& y)
+EIGEN_ALWAYS_INLINE long double maxi(const long double& x, const long double& y)
{
- return fmaxf(x, y);
+#if defined(EIGEN_HIPCC)
+ // no "fmaxl" on HIP yet
+ return (x > y) ? x : y;
+#else
+ return fmaxl(x, y);
+#endif
}
#endif
+#if defined(__SYCL_DEVICE_ONLY__)
+
+
+#define SYCL_SPECIALIZE_SIGNED_INTEGER_TYPES_BINARY(NAME, FUNC) \
+ SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_char) \
+ SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_short) \
+ SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_int) \
+ SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_long)
+#define SYCL_SPECIALIZE_SIGNED_INTEGER_TYPES_UNARY(NAME, FUNC) \
+ SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_char) \
+ SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_short) \
+ SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_int) \
+ SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_long)
+#define SYCL_SPECIALIZE_UNSIGNED_INTEGER_TYPES_BINARY(NAME, FUNC) \
+ SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_uchar) \
+ SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_ushort) \
+ SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_uint) \
+ SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_ulong)
+#define SYCL_SPECIALIZE_UNSIGNED_INTEGER_TYPES_UNARY(NAME, FUNC) \
+ SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_uchar) \
+ SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_ushort) \
+ SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_uint) \
+ SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_ulong)
+#define SYCL_SPECIALIZE_INTEGER_TYPES_BINARY(NAME, FUNC) \
+ SYCL_SPECIALIZE_SIGNED_INTEGER_TYPES_BINARY(NAME, FUNC) \
+ SYCL_SPECIALIZE_UNSIGNED_INTEGER_TYPES_BINARY(NAME, FUNC)
+#define SYCL_SPECIALIZE_INTEGER_TYPES_UNARY(NAME, FUNC) \
+ SYCL_SPECIALIZE_SIGNED_INTEGER_TYPES_UNARY(NAME, FUNC) \
+ SYCL_SPECIALIZE_UNSIGNED_INTEGER_TYPES_UNARY(NAME, FUNC)
+#define SYCL_SPECIALIZE_FLOATING_TYPES_BINARY(NAME, FUNC) \
+ SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_float) \
+ SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC,cl::sycl::cl_double)
+#define SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(NAME, FUNC) \
+ SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_float) \
+ SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC,cl::sycl::cl_double)
+#define SYCL_SPECIALIZE_FLOATING_TYPES_UNARY_FUNC_RET_TYPE(NAME, FUNC, RET_TYPE) \
+ SYCL_SPECIALIZE_GEN_UNARY_FUNC(NAME, FUNC, RET_TYPE, cl::sycl::cl_float) \
+ SYCL_SPECIALIZE_GEN_UNARY_FUNC(NAME, FUNC, RET_TYPE, cl::sycl::cl_double)
+
+#define SYCL_SPECIALIZE_GEN_UNARY_FUNC(NAME, FUNC, RET_TYPE, ARG_TYPE) \
+template<> \
+ EIGEN_DEVICE_FUNC \
+ EIGEN_ALWAYS_INLINE RET_TYPE NAME(const ARG_TYPE& x) { \
+ return cl::sycl::FUNC(x); \
+ }
+
+#define SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, TYPE) \
+ SYCL_SPECIALIZE_GEN_UNARY_FUNC(NAME, FUNC, TYPE, TYPE)
+
+#define SYCL_SPECIALIZE_GEN1_BINARY_FUNC(NAME, FUNC, RET_TYPE, ARG_TYPE1, ARG_TYPE2) \
+ template<> \
+ EIGEN_DEVICE_FUNC \
+ EIGEN_ALWAYS_INLINE RET_TYPE NAME(const ARG_TYPE1& x, const ARG_TYPE2& y) { \
+ return cl::sycl::FUNC(x, y); \
+ }
+
+#define SYCL_SPECIALIZE_GEN2_BINARY_FUNC(NAME, FUNC, RET_TYPE, ARG_TYPE) \
+ SYCL_SPECIALIZE_GEN1_BINARY_FUNC(NAME, FUNC, RET_TYPE, ARG_TYPE, ARG_TYPE)
+
+#define SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, TYPE) \
+ SYCL_SPECIALIZE_GEN2_BINARY_FUNC(NAME, FUNC, TYPE, TYPE)
+
+SYCL_SPECIALIZE_INTEGER_TYPES_BINARY(mini, min)
+SYCL_SPECIALIZE_FLOATING_TYPES_BINARY(mini, fmin)
+SYCL_SPECIALIZE_INTEGER_TYPES_BINARY(maxi, max)
+SYCL_SPECIALIZE_FLOATING_TYPES_BINARY(maxi, fmax)
+
+#endif // defined(__SYCL_DEVICE_ONLY__)
+
template<typename Scalar>
EIGEN_DEVICE_FUNC
@@ -1059,6 +1087,9 @@ inline EIGEN_MATHFUNC_RETVAL(abs2, Scalar) abs2(const Scalar& x)
return EIGEN_MATHFUNC_IMPL(abs2, Scalar)::run(x);
}
+EIGEN_DEVICE_FUNC
+inline bool abs2(bool x) { return x; }
+
template<typename Scalar>
EIGEN_DEVICE_FUNC
inline EIGEN_MATHFUNC_RETVAL(norm1, Scalar) norm1(const Scalar& x)
@@ -1073,6 +1104,10 @@ inline EIGEN_MATHFUNC_RETVAL(hypot, Scalar) hypot(const Scalar& x, const Scalar&
return EIGEN_MATHFUNC_IMPL(hypot, Scalar)::run(x, y);
}
+#if defined(__SYCL_DEVICE_ONLY__)
+ SYCL_SPECIALIZE_FLOATING_TYPES_BINARY(hypot, hypot)
+#endif // defined(__SYCL_DEVICE_ONLY__)
+
template<typename Scalar>
EIGEN_DEVICE_FUNC
inline EIGEN_MATHFUNC_RETVAL(log1p, Scalar) log1p(const Scalar& x)
@@ -1081,11 +1116,10 @@ inline EIGEN_MATHFUNC_RETVAL(log1p, Scalar) log1p(const Scalar& x)
}
#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float log1p(float x) { return cl::sycl::log1p(x); }
-EIGEN_ALWAYS_INLINE double log1p(double x) { return cl::sycl::log1p(x); }
-#endif // defined(__SYCL_DEVICE_ONLY__)
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(log1p, log1p)
+#endif //defined(__SYCL_DEVICE_ONLY__)
-#ifdef __CUDACC__
+#if defined(EIGEN_GPUCC)
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float log1p(const float &x) { return ::log1pf(x); }
@@ -1101,8 +1135,7 @@ inline typename internal::pow_impl<ScalarX,ScalarY>::result_type pow(const Scala
}
#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float pow(float x, float y) { return cl::sycl::pow(x, y); }
-EIGEN_ALWAYS_INLINE double pow(double x, double y) { return cl::sycl::pow(x, y); }
+SYCL_SPECIALIZE_FLOATING_TYPES_BINARY(pow, pow)
#endif // defined(__SYCL_DEVICE_ONLY__)
template<typename T> EIGEN_DEVICE_FUNC bool (isnan) (const T &x) { return internal::isnan_impl(x); }
@@ -1110,12 +1143,9 @@ template<typename T> EIGEN_DEVICE_FUNC bool (isinf) (const T &x) { return inte
template<typename T> EIGEN_DEVICE_FUNC bool (isfinite)(const T &x) { return internal::isfinite_impl(x); }
#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float isnan(float x) { return cl::sycl::isnan(x); }
-EIGEN_ALWAYS_INLINE double isnan(double x) { return cl::sycl::isnan(x); }
-EIGEN_ALWAYS_INLINE float isinf(float x) { return cl::sycl::isinf(x); }
-EIGEN_ALWAYS_INLINE double isinf(double x) { return cl::sycl::isinf(x); }
-EIGEN_ALWAYS_INLINE float isfinite(float x) { return cl::sycl::isfinite(x); }
-EIGEN_ALWAYS_INLINE double isfinite(double x) { return cl::sycl::isfinite(x); }
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY_FUNC_RET_TYPE(isnan, isnan, bool)
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY_FUNC_RET_TYPE(isinf, isinf, bool)
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY_FUNC_RET_TYPE(isfinite, isfinite, bool)
#endif // defined(__SYCL_DEVICE_ONLY__)
template<typename Scalar>
@@ -1126,8 +1156,7 @@ inline EIGEN_MATHFUNC_RETVAL(round, Scalar) round(const Scalar& x)
}
#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float round(float x) { return cl::sycl::round(x); }
-EIGEN_ALWAYS_INLINE double round(double x) { return cl::sycl::round(x); }
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(round, round)
#endif // defined(__SYCL_DEVICE_ONLY__)
template<typename T>
@@ -1139,11 +1168,10 @@ T (floor)(const T& x)
}
#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float floor(float x) { return cl::sycl::floor(x); }
-EIGEN_ALWAYS_INLINE double floor(double x) { return cl::sycl::floor(x); }
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(floor, floor)
#endif // defined(__SYCL_DEVICE_ONLY__)
-#ifdef __CUDACC__
+#if defined(EIGEN_GPUCC)
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float floor(const float &x) { return ::floorf(x); }
@@ -1160,11 +1188,10 @@ T (ceil)(const T& x)
}
#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float ceil(float x) { return cl::sycl::ceil(x); }
-EIGEN_ALWAYS_INLINE double ceil(double x) { return cl::sycl::ceil(x); }
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(ceil, ceil)
#endif // defined(__SYCL_DEVICE_ONLY__)
-#ifdef __CUDACC__
+#if defined(EIGEN_GPUCC)
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float ceil(const float &x) { return ::ceilf(x); }
@@ -1205,8 +1232,7 @@ T sqrt(const T &x)
}
#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float sqrt(float x) { return cl::sycl::sqrt(x); }
-EIGEN_ALWAYS_INLINE double sqrt(double x) { return cl::sycl::sqrt(x); }
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(sqrt, sqrt)
#endif // defined(__SYCL_DEVICE_ONLY__)
template<typename T>
@@ -1217,12 +1243,11 @@ T log(const T &x) {
}
#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float log(float x) { return cl::sycl::log(x); }
-EIGEN_ALWAYS_INLINE double log(double x) { return cl::sycl::log(x); }
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(log, log)
#endif // defined(__SYCL_DEVICE_ONLY__)
-#ifdef __CUDACC__
+#if defined(EIGEN_GPUCC)
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float log(const float &x) { return ::logf(x); }
@@ -1232,17 +1257,25 @@ double log(const double &x) { return ::log(x); }
template<typename T>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
-typename NumTraits<T>::Real abs(const T &x) {
+typename internal::enable_if<NumTraits<T>::IsSigned || NumTraits<T>::IsComplex,typename NumTraits<T>::Real>::type
+abs(const T &x) {
EIGEN_USING_STD_MATH(abs);
return abs(x);
}
+template<typename T>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+typename internal::enable_if<!(NumTraits<T>::IsSigned || NumTraits<T>::IsComplex),typename NumTraits<T>::Real>::type
+abs(const T &x) {
+ return x;
+}
+
#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float abs(float x) { return cl::sycl::fabs(x); }
-EIGEN_ALWAYS_INLINE double abs(double x) { return cl::sycl::fabs(x); }
+SYCL_SPECIALIZE_INTEGER_TYPES_UNARY(abs, abs)
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(abs, fabs)
#endif // defined(__SYCL_DEVICE_ONLY__)
-#ifdef __CUDACC__
+#if defined(EIGEN_GPUCC)
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float abs(const float &x) { return ::fabsf(x); }
@@ -1268,16 +1301,31 @@ T exp(const T &x) {
}
#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float exp(float x) { return cl::sycl::exp(x); }
-EIGEN_ALWAYS_INLINE double exp(double x) { return cl::sycl::exp(x); }
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(exp, exp)
#endif // defined(__SYCL_DEVICE_ONLY__)
-#ifdef __CUDACC__
+#if defined(EIGEN_GPUCC)
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float exp(const float &x) { return ::expf(x); }
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
double exp(const double &x) { return ::exp(x); }
+
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+std::complex<float> exp(const std::complex<float>& x) {
+ float com = ::expf(x.real());
+ float res_real = com * ::cosf(x.imag());
+ float res_imag = com * ::sinf(x.imag());
+ return std::complex<float>(res_real, res_imag);
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+std::complex<double> exp(const std::complex<double>& x) {
+ double com = ::exp(x.real());
+ double res_real = com * ::cos(x.imag());
+ double res_imag = com * ::sin(x.imag());
+ return std::complex<double>(res_real, res_imag);
+}
#endif
template<typename Scalar>
@@ -1288,11 +1336,10 @@ inline EIGEN_MATHFUNC_RETVAL(expm1, Scalar) expm1(const Scalar& x)
}
#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float expm1(float x) { return cl::sycl::expm1(x); }
-EIGEN_ALWAYS_INLINE double expm1(double x) { return cl::sycl::expm1(x); }
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(expm1, expm1)
#endif // defined(__SYCL_DEVICE_ONLY__)
-#ifdef __CUDACC__
+#if defined(EIGEN_GPUCC)
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float expm1(const float &x) { return ::expm1f(x); }
@@ -1308,11 +1355,10 @@ T cos(const T &x) {
}
#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float cos(float x) { return cl::sycl::cos(x); }
-EIGEN_ALWAYS_INLINE double cos(double x) { return cl::sycl::cos(x); }
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(cos,cos)
#endif // defined(__SYCL_DEVICE_ONLY__)
-#ifdef __CUDACC__
+#if defined(EIGEN_GPUCC)
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float cos(const float &x) { return ::cosf(x); }
@@ -1328,11 +1374,10 @@ T sin(const T &x) {
}
#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float sin(float x) { return cl::sycl::sin(x); }
-EIGEN_ALWAYS_INLINE double sin(double x) { return cl::sycl::sin(x); }
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(sin, sin)
#endif // defined(__SYCL_DEVICE_ONLY__)
-#ifdef __CUDACC__
+#if defined(EIGEN_GPUCC)
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float sin(const float &x) { return ::sinf(x); }
@@ -1348,11 +1393,10 @@ T tan(const T &x) {
}
#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float tan(float x) { return cl::sycl::tan(x); }
-EIGEN_ALWAYS_INLINE double tan(double x) { return cl::sycl::tan(x); }
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(tan, tan)
#endif // defined(__SYCL_DEVICE_ONLY__)
-#ifdef __CUDACC__
+#if defined(EIGEN_GPUCC)
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float tan(const float &x) { return ::tanf(x); }
@@ -1367,12 +1411,21 @@ T acos(const T &x) {
return acos(x);
}
+#if EIGEN_HAS_CXX11_MATH
+template<typename T>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+T acosh(const T &x) {
+ EIGEN_USING_STD_MATH(acosh);
+ return acosh(x);
+}
+#endif
+
#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float acos(float x) { return cl::sycl::acos(x); }
-EIGEN_ALWAYS_INLINE double acos(double x) { return cl::sycl::acos(x); }
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(acos, acos)
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(acosh, acosh)
#endif // defined(__SYCL_DEVICE_ONLY__)
-#ifdef __CUDACC__
+#if defined(EIGEN_GPUCC)
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float acos(const float &x) { return ::acosf(x); }
@@ -1387,12 +1440,21 @@ T asin(const T &x) {
return asin(x);
}
+#if EIGEN_HAS_CXX11_MATH
+template<typename T>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+T asinh(const T &x) {
+ EIGEN_USING_STD_MATH(asinh);
+ return asinh(x);
+}
+#endif
+
#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float asin(float x) { return cl::sycl::asin(x); }
-EIGEN_ALWAYS_INLINE double asin(double x) { return cl::sycl::asin(x); }
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(asin, asin)
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(asinh, asinh)
#endif // defined(__SYCL_DEVICE_ONLY__)
-#ifdef __CUDACC__
+#if defined(EIGEN_GPUCC)
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float asin(const float &x) { return ::asinf(x); }
@@ -1407,12 +1469,21 @@ T atan(const T &x) {
return atan(x);
}
+#if EIGEN_HAS_CXX11_MATH
+template<typename T>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+T atanh(const T &x) {
+ EIGEN_USING_STD_MATH(atanh);
+ return atanh(x);
+}
+#endif
+
#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float atan(float x) { return cl::sycl::atan(x); }
-EIGEN_ALWAYS_INLINE double atan(double x) { return cl::sycl::atan(x); }
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(atan, atan)
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(atanh, atanh)
#endif // defined(__SYCL_DEVICE_ONLY__)
-#ifdef __CUDACC__
+#if defined(EIGEN_GPUCC)
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float atan(const float &x) { return ::atanf(x); }
@@ -1429,11 +1500,10 @@ T cosh(const T &x) {
}
#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float cosh(float x) { return cl::sycl::cosh(x); }
-EIGEN_ALWAYS_INLINE double cosh(double x) { return cl::sycl::cosh(x); }
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(cosh, cosh)
#endif // defined(__SYCL_DEVICE_ONLY__)
-#ifdef __CUDACC__
+#if defined(EIGEN_GPUCC)
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float cosh(const float &x) { return ::coshf(x); }
@@ -1449,11 +1519,10 @@ T sinh(const T &x) {
}
#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float sinh(float x) { return cl::sycl::sinh(x); }
-EIGEN_ALWAYS_INLINE double sinh(double x) { return cl::sycl::sinh(x); }
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(sinh, sinh)
#endif // defined(__SYCL_DEVICE_ONLY__)
-#ifdef __CUDACC__
+#if defined(EIGEN_GPUCC)
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float sinh(const float &x) { return ::sinhf(x); }
@@ -1468,15 +1537,16 @@ T tanh(const T &x) {
return tanh(x);
}
-#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float tanh(float x) { return cl::sycl::tanh(x); }
-EIGEN_ALWAYS_INLINE double tanh(double x) { return cl::sycl::tanh(x); }
-#elif (!defined(__CUDACC__)) && EIGEN_FAST_MATH
+#if (!defined(EIGEN_GPUCC)) && EIGEN_FAST_MATH && (!defined(__SYCL_DEVICE_ONLY__))
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float tanh(float x) { return internal::generic_fast_tanh_float(x); }
#endif
-#ifdef __CUDACC__
+#if defined(__SYCL_DEVICE_ONLY__)
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(tanh, tanh)
+#endif // defined(__SYCL_DEVICE_ONLY__)
+
+#if defined(EIGEN_GPUCC)
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float tanh(const float &x) { return ::tanhf(x); }
@@ -1492,11 +1562,10 @@ T fmod(const T& a, const T& b) {
}
#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float fmod(float x, float y) { return cl::sycl::fmod(x, y); }
-EIGEN_ALWAYS_INLINE double fmod(double x, double y) { return cl::sycl::fmod(x, y); }
+SYCL_SPECIALIZE_FLOATING_TYPES_BINARY(fmod, fmod)
#endif // defined(__SYCL_DEVICE_ONLY__)
-#ifdef __CUDACC__
+#if defined(EIGEN_GPUCC)
template <>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float fmod(const float& a, const float& b) {
@@ -1510,6 +1579,23 @@ double fmod(const double& a, const double& b) {
}
#endif
+#if defined(__SYCL_DEVICE_ONLY__)
+#undef SYCL_SPECIALIZE_SIGNED_INTEGER_TYPES_BINARY
+#undef SYCL_SPECIALIZE_SIGNED_INTEGER_TYPES_UNARY
+#undef SYCL_SPECIALIZE_UNSIGNED_INTEGER_TYPES_BINARY
+#undef SYCL_SPECIALIZE_UNSIGNED_INTEGER_TYPES_UNARY
+#undef SYCL_SPECIALIZE_INTEGER_TYPES_BINARY
+#undef SYCL_SPECIALIZE_UNSIGNED_INTEGER_TYPES_UNARY
+#undef SYCL_SPECIALIZE_FLOATING_TYPES_BINARY
+#undef SYCL_SPECIALIZE_FLOATING_TYPES_UNARY
+#undef SYCL_SPECIALIZE_FLOATING_TYPES_UNARY_FUNC_RET_TYPE
+#undef SYCL_SPECIALIZE_GEN_UNARY_FUNC
+#undef SYCL_SPECIALIZE_UNARY_FUNC
+#undef SYCL_SPECIALIZE_GEN1_BINARY_FUNC
+#undef SYCL_SPECIALIZE_GEN2_BINARY_FUNC
+#undef SYCL_SPECIALIZE_BINARY_FUNC
+#endif // defined(__SYCL_DEVICE_ONLY__)
+
} // end namespace numext
namespace internal {
diff --git a/Eigen/src/Core/MathFunctionsImpl.h b/Eigen/src/Core/MathFunctionsImpl.h
index ae1386b4c..a23e93ccb 100644
--- a/Eigen/src/Core/MathFunctionsImpl.h
+++ b/Eigen/src/Core/MathFunctionsImpl.h
@@ -66,6 +66,30 @@ T generic_fast_tanh_float(const T& a_x)
return pdiv(p, q);
}
+template<typename RealScalar>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+RealScalar positive_real_hypot(const RealScalar& x, const RealScalar& y)
+{
+ EIGEN_USING_STD_MATH(sqrt);
+ RealScalar p, qp;
+ p = numext::maxi(x,y);
+ if(p==RealScalar(0)) return RealScalar(0);
+ qp = numext::mini(y,x) / p;
+ return p * sqrt(RealScalar(1) + qp*qp);
+}
+
+template<typename Scalar>
+struct hypot_impl
+{
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ static EIGEN_DEVICE_FUNC
+ inline RealScalar run(const Scalar& x, const Scalar& y)
+ {
+ EIGEN_USING_STD_MATH(abs);
+ return positive_real_hypot<RealScalar>(abs(x), abs(y));
+ }
+};
+
} // end namespace internal
} // end namespace Eigen
diff --git a/Eigen/src/Core/MatrixBase.h b/Eigen/src/Core/MatrixBase.h
index 675c94e12..6046c8bae 100644
--- a/Eigen/src/Core/MatrixBase.h
+++ b/Eigen/src/Core/MatrixBase.h
@@ -160,20 +160,11 @@ template<typename Derived> class MatrixBase
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Derived& operator-=(const MatrixBase<OtherDerived>& other);
-#ifdef __CUDACC__
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
- const Product<Derived,OtherDerived,LazyProduct>
- operator*(const MatrixBase<OtherDerived> &other) const
- { return this->lazyProduct(other); }
-#else
-
- template<typename OtherDerived>
const Product<Derived,OtherDerived>
operator*(const MatrixBase<OtherDerived> &other) const;
-#endif
-
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
const Product<Derived,OtherDerived,LazyProduct>
@@ -277,6 +268,8 @@ template<typename Derived> class MatrixBase
Derived& setIdentity();
EIGEN_DEVICE_FUNC
Derived& setIdentity(Index rows, Index cols);
+ EIGEN_DEVICE_FUNC Derived& setUnit(Index i);
+ EIGEN_DEVICE_FUNC Derived& setUnit(Index newSize, Index i);
bool isIdentity(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
bool isDiagonal(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
@@ -294,7 +287,7 @@ template<typename Derived> class MatrixBase
* fuzzy comparison such as isApprox()
* \sa isApprox(), operator!= */
template<typename OtherDerived>
- inline bool operator==(const MatrixBase<OtherDerived>& other) const
+ EIGEN_DEVICE_FUNC inline bool operator==(const MatrixBase<OtherDerived>& other) const
{ return cwiseEqual(other).all(); }
/** \returns true if at least one pair of coefficients of \c *this and \a other are not exactly equal to each other.
@@ -302,10 +295,10 @@ template<typename Derived> class MatrixBase
* fuzzy comparison such as isApprox()
* \sa isApprox(), operator== */
template<typename OtherDerived>
- inline bool operator!=(const MatrixBase<OtherDerived>& other) const
+ EIGEN_DEVICE_FUNC inline bool operator!=(const MatrixBase<OtherDerived>& other) const
{ return cwiseNotEqual(other).any(); }
- NoAlias<Derived,Eigen::MatrixBase > noalias();
+ NoAlias<Derived,Eigen::MatrixBase > EIGEN_DEVICE_FUNC noalias();
// TODO forceAlignedAccess is temporarily disabled
// Need to find a nicer workaround.
@@ -335,6 +328,7 @@ template<typename Derived> class MatrixBase
inline const PartialPivLU<PlainObject> lu() const;
+ EIGEN_DEVICE_FUNC
inline const Inverse<Derived> inverse() const;
template<typename ResultType>
@@ -344,12 +338,15 @@ template<typename Derived> class MatrixBase
bool& invertible,
const RealScalar& absDeterminantThreshold = NumTraits<Scalar>::dummy_precision()
) const;
+
template<typename ResultType>
inline void computeInverseWithCheck(
ResultType& inverse,
bool& invertible,
const RealScalar& absDeterminantThreshold = NumTraits<Scalar>::dummy_precision()
) const;
+
+ EIGEN_DEVICE_FUNC
Scalar determinant() const;
/////////// Cholesky module ///////////
@@ -421,15 +418,19 @@ template<typename Derived> class MatrixBase
////////// Householder module ///////////
+ EIGEN_DEVICE_FUNC
void makeHouseholderInPlace(Scalar& tau, RealScalar& beta);
template<typename EssentialPart>
+ EIGEN_DEVICE_FUNC
void makeHouseholder(EssentialPart& essential,
Scalar& tau, RealScalar& beta) const;
template<typename EssentialPart>
+ EIGEN_DEVICE_FUNC
void applyHouseholderOnTheLeft(const EssentialPart& essential,
const Scalar& tau,
Scalar* workspace);
template<typename EssentialPart>
+ EIGEN_DEVICE_FUNC
void applyHouseholderOnTheRight(const EssentialPart& essential,
const Scalar& tau,
Scalar* workspace);
@@ -437,8 +438,10 @@ template<typename Derived> class MatrixBase
///////// Jacobi module /////////
template<typename OtherScalar>
+ EIGEN_DEVICE_FUNC
void applyOnTheLeft(Index p, Index q, const JacobiRotation<OtherScalar>& j);
template<typename OtherScalar>
+ EIGEN_DEVICE_FUNC
void applyOnTheRight(Index p, Index q, const JacobiRotation<OtherScalar>& j);
///////// SparseCore module /////////
diff --git a/Eigen/src/Core/NestByValue.h b/Eigen/src/Core/NestByValue.h
index 13adf070e..01cf192e9 100644
--- a/Eigen/src/Core/NestByValue.h
+++ b/Eigen/src/Core/NestByValue.h
@@ -67,25 +67,25 @@ template<typename ExpressionType> class NestByValue
}
template<int LoadMode>
- inline const PacketScalar packet(Index row, Index col) const
+ EIGEN_DEVICE_FUNC inline const PacketScalar packet(Index row, Index col) const
{
return m_expression.template packet<LoadMode>(row, col);
}
template<int LoadMode>
- inline void writePacket(Index row, Index col, const PacketScalar& x)
+ EIGEN_DEVICE_FUNC inline void writePacket(Index row, Index col, const PacketScalar& x)
{
m_expression.const_cast_derived().template writePacket<LoadMode>(row, col, x);
}
template<int LoadMode>
- inline const PacketScalar packet(Index index) const
+ EIGEN_DEVICE_FUNC inline const PacketScalar packet(Index index) const
{
return m_expression.template packet<LoadMode>(index);
}
template<int LoadMode>
- inline void writePacket(Index index, const PacketScalar& x)
+ EIGEN_DEVICE_FUNC inline void writePacket(Index index, const PacketScalar& x)
{
m_expression.const_cast_derived().template writePacket<LoadMode>(index, x);
}
@@ -99,7 +99,7 @@ template<typename ExpressionType> class NestByValue
/** \returns an expression of the temporary version of *this.
*/
template<typename Derived>
-inline const NestByValue<Derived>
+EIGEN_DEVICE_FUNC inline const NestByValue<Derived>
DenseBase<Derived>::nestByValue() const
{
return NestByValue<Derived>(derived());
diff --git a/Eigen/src/Core/NoAlias.h b/Eigen/src/Core/NoAlias.h
index 33908010b..570283d90 100644
--- a/Eigen/src/Core/NoAlias.h
+++ b/Eigen/src/Core/NoAlias.h
@@ -33,6 +33,7 @@ class NoAlias
public:
typedef typename ExpressionType::Scalar Scalar;
+ EIGEN_DEVICE_FUNC
explicit NoAlias(ExpressionType& expression) : m_expression(expression) {}
template<typename OtherDerived>
@@ -74,10 +75,10 @@ class NoAlias
*
* More precisely, noalias() allows to bypass the EvalBeforeAssignBit flag.
* Currently, even though several expressions may alias, only product
- * expressions have this flag. Therefore, noalias() is only usefull when
+ * expressions have this flag. Therefore, noalias() is only useful when
* the source expression contains a matrix product.
*
- * Here are some examples where noalias is usefull:
+ * Here are some examples where noalias is useful:
* \code
* D.noalias() = A * B;
* D.noalias() += A.transpose() * B;
@@ -98,7 +99,7 @@ class NoAlias
* \sa class NoAlias
*/
template<typename Derived>
-NoAlias<Derived,MatrixBase> MatrixBase<Derived>::noalias()
+NoAlias<Derived,MatrixBase> EIGEN_DEVICE_FUNC MatrixBase<Derived>::noalias()
{
return NoAlias<Derived, Eigen::MatrixBase >(derived());
}
diff --git a/Eigen/src/Core/NumTraits.h b/Eigen/src/Core/NumTraits.h
index aebc0c259..b053cff07 100644
--- a/Eigen/src/Core/NumTraits.h
+++ b/Eigen/src/Core/NumTraits.h
@@ -21,12 +21,14 @@ template< typename T,
bool is_integer = NumTraits<T>::IsInteger>
struct default_digits10_impl
{
+ EIGEN_DEVICE_FUNC
static int run() { return std::numeric_limits<T>::digits10; }
};
template<typename T>
struct default_digits10_impl<T,false,false> // Floating point
{
+ EIGEN_DEVICE_FUNC
static int run() {
using std::log10;
using std::ceil;
@@ -38,6 +40,38 @@ struct default_digits10_impl<T,false,false> // Floating point
template<typename T>
struct default_digits10_impl<T,false,true> // Integer
{
+ EIGEN_DEVICE_FUNC
+ static int run() { return 0; }
+};
+
+
+// default implementation of digits(), based on numeric_limits if specialized,
+// 0 for integer types, and log2(epsilon()) otherwise.
+template< typename T,
+ bool use_numeric_limits = std::numeric_limits<T>::is_specialized,
+ bool is_integer = NumTraits<T>::IsInteger>
+struct default_digits_impl
+{
+ EIGEN_DEVICE_FUNC
+ static int run() { return std::numeric_limits<T>::digits; }
+};
+
+template<typename T>
+struct default_digits_impl<T,false,false> // Floating point
+{
+ EIGEN_DEVICE_FUNC
+ static int run() {
+ using std::log;
+ using std::ceil;
+ typedef typename NumTraits<T>::Real Real;
+ return int(ceil(-log(NumTraits<Real>::epsilon())/log(static_cast<Real>(2))));
+ }
+};
+
+template<typename T>
+struct default_digits_impl<T,false,true> // Integer
+{
+ EIGEN_DEVICE_FUNC
static int run() { return 0; }
};
@@ -119,6 +153,12 @@ template<typename T> struct GenericNumTraits
}
EIGEN_DEVICE_FUNC
+ static inline int digits()
+ {
+ return internal::default_digits_impl<T>::run();
+ }
+
+ EIGEN_DEVICE_FUNC
static inline Real dummy_precision()
{
// make sure to override this for floating-point types
@@ -215,6 +255,8 @@ struct NumTraits<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> >
static inline RealScalar epsilon() { return NumTraits<RealScalar>::epsilon(); }
EIGEN_DEVICE_FUNC
static inline RealScalar dummy_precision() { return NumTraits<RealScalar>::dummy_precision(); }
+
+ static inline int digits10() { return NumTraits<Scalar>::digits10(); }
};
template<> struct NumTraits<std::string>
diff --git a/Eigen/src/Core/PermutationMatrix.h b/Eigen/src/Core/PermutationMatrix.h
index b1fb455b9..acd085301 100644
--- a/Eigen/src/Core/PermutationMatrix.h
+++ b/Eigen/src/Core/PermutationMatrix.h
@@ -99,13 +99,13 @@ class PermutationBase : public EigenBase<Derived>
#endif
/** \returns the number of rows */
- inline Index rows() const { return Index(indices().size()); }
+ inline EIGEN_DEVICE_FUNC Index rows() const { return Index(indices().size()); }
/** \returns the number of columns */
- inline Index cols() const { return Index(indices().size()); }
+ inline EIGEN_DEVICE_FUNC Index cols() const { return Index(indices().size()); }
/** \returns the size of a side of the respective square matrix, i.e., the number of indices */
- inline Index size() const { return Index(indices().size()); }
+ inline EIGEN_DEVICE_FUNC Index size() const { return Index(indices().size()); }
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename DenseDerived>
diff --git a/Eigen/src/Core/PlainObjectBase.h b/Eigen/src/Core/PlainObjectBase.h
index 77f4f6066..da329fd4f 100644
--- a/Eigen/src/Core/PlainObjectBase.h
+++ b/Eigen/src/Core/PlainObjectBase.h
@@ -577,6 +577,10 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
* while the AlignedMap() functions return aligned Map objects and thus should be called only with 16-byte-aligned
* \a data pointers.
*
+ * Here is an example using strides:
+ * \include Matrix_Map_stride.cpp
+ * Output: \verbinclude Matrix_Map_stride.out
+ *
* \see class Map
*/
//@{
@@ -776,7 +780,7 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
resize(size);
}
- // We have a 1x1 matrix/array => the argument is interpreted as the value of the unique coefficient (case where scalar type can be implicitely converted)
+ // We have a 1x1 matrix/array => the argument is interpreted as the value of the unique coefficient (case where scalar type can be implicitly converted)
template<typename T>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE void _init1(const Scalar& val0, typename internal::enable_if<Base::SizeAtCompileTime==1 && internal::is_convertible<T, Scalar>::value,T>::type* = 0)
@@ -917,13 +921,19 @@ namespace internal {
template <typename Derived, typename OtherDerived, bool IsVector>
struct conservative_resize_like_impl
{
+ #if EIGEN_HAS_TYPE_TRAITS
+ static const bool IsRelocatable = std::is_trivially_copyable<typename Derived::Scalar>::value;
+ #else
+ static const bool IsRelocatable = !NumTraits<typename Derived::Scalar>::RequireInitialization;
+ #endif
static void run(DenseBase<Derived>& _this, Index rows, Index cols)
{
if (_this.rows() == rows && _this.cols() == cols) return;
EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived)
- if ( ( Derived::IsRowMajor && _this.cols() == cols) || // row-major and we change only the number of rows
- (!Derived::IsRowMajor && _this.rows() == rows) ) // column-major and we change only the number of columns
+ if ( IsRelocatable
+ && (( Derived::IsRowMajor && _this.cols() == cols) || // row-major and we change only the number of rows
+ (!Derived::IsRowMajor && _this.rows() == rows) )) // column-major and we change only the number of columns
{
internal::check_rows_cols_for_overflow<Derived::MaxSizeAtCompileTime>::run(rows, cols);
_this.derived().m_storage.conservativeResize(rows*cols,rows,cols);
@@ -951,8 +961,9 @@ struct conservative_resize_like_impl
EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived)
EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(OtherDerived)
- if ( ( Derived::IsRowMajor && _this.cols() == other.cols()) || // row-major and we change only the number of rows
- (!Derived::IsRowMajor && _this.rows() == other.rows()) ) // column-major and we change only the number of columns
+ if ( IsRelocatable &&
+ (( Derived::IsRowMajor && _this.cols() == other.cols()) || // row-major and we change only the number of rows
+ (!Derived::IsRowMajor && _this.rows() == other.rows()) )) // column-major and we change only the number of columns
{
const Index new_rows = other.rows() - _this.rows();
const Index new_cols = other.cols() - _this.cols();
@@ -980,13 +991,18 @@ template <typename Derived, typename OtherDerived>
struct conservative_resize_like_impl<Derived,OtherDerived,true>
: conservative_resize_like_impl<Derived,OtherDerived,false>
{
- using conservative_resize_like_impl<Derived,OtherDerived,false>::run;
+ typedef conservative_resize_like_impl<Derived,OtherDerived,false> Base;
+ using Base::run;
+ using Base::IsRelocatable;
static void run(DenseBase<Derived>& _this, Index size)
{
const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : size;
const Index new_cols = Derived::RowsAtCompileTime==1 ? size : 1;
- _this.derived().m_storage.conservativeResize(size,new_rows,new_cols);
+ if(IsRelocatable)
+ _this.derived().m_storage.conservativeResize(size,new_rows,new_cols);
+ else
+ Base::run(_this.derived(), new_rows, new_cols);
}
static void run(DenseBase<Derived>& _this, const DenseBase<OtherDerived>& other)
@@ -997,7 +1013,10 @@ struct conservative_resize_like_impl<Derived,OtherDerived,true>
const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : other.rows();
const Index new_cols = Derived::RowsAtCompileTime==1 ? other.cols() : 1;
- _this.derived().m_storage.conservativeResize(other.size(),new_rows,new_cols);
+ if(IsRelocatable)
+ _this.derived().m_storage.conservativeResize(other.size(),new_rows,new_cols);
+ else
+ Base::run(_this.derived(), new_rows, new_cols);
if (num_new_elements > 0)
_this.tail(num_new_elements) = other.tail(num_new_elements);
diff --git a/Eigen/src/Core/Product.h b/Eigen/src/Core/Product.h
index ae0c94b38..70790dbd4 100644
--- a/Eigen/src/Core/Product.h
+++ b/Eigen/src/Core/Product.h
@@ -97,8 +97,8 @@ class Product : public ProductImpl<_Lhs,_Rhs,Option,
&& "if you wanted a coeff-wise or a dot product use the respective explicit functions");
}
- EIGEN_DEVICE_FUNC inline Index rows() const { return m_lhs.rows(); }
- EIGEN_DEVICE_FUNC inline Index cols() const { return m_rhs.cols(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); }
EIGEN_DEVICE_FUNC const LhsNestedCleaned& lhs() const { return m_lhs; }
EIGEN_DEVICE_FUNC const RhsNestedCleaned& rhs() const { return m_rhs; }
@@ -116,7 +116,7 @@ class dense_product_base
: public internal::dense_xpr_base<Product<Lhs,Rhs,Option> >::type
{};
-/** Convertion to scalar for inner-products */
+/** Conversion to scalar for inner-products */
template<typename Lhs, typename Rhs, int Option>
class dense_product_base<Lhs, Rhs, Option, InnerProduct>
: public internal::dense_xpr_base<Product<Lhs,Rhs,Option> >::type
@@ -127,7 +127,7 @@ public:
using Base::derived;
typedef typename Base::Scalar Scalar;
- operator const Scalar() const
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE operator const Scalar() const
{
return internal::evaluator<ProductXpr>(derived()).coeff(0,0);
}
@@ -162,7 +162,7 @@ class ProductImpl<Lhs,Rhs,Option,Dense>
public:
- EIGEN_DEVICE_FUNC Scalar coeff(Index row, Index col) const
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar coeff(Index row, Index col) const
{
EIGEN_STATIC_ASSERT(EnableCoeff, THIS_METHOD_IS_ONLY_FOR_INNER_OR_LAZY_PRODUCTS);
eigen_assert( (Option==LazyProduct) || (this->rows() == 1 && this->cols() == 1) );
@@ -170,7 +170,7 @@ class ProductImpl<Lhs,Rhs,Option,Dense>
return internal::evaluator<Derived>(derived()).coeff(row,col);
}
- EIGEN_DEVICE_FUNC Scalar coeff(Index i) const
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar coeff(Index i) const
{
EIGEN_STATIC_ASSERT(EnableCoeff, THIS_METHOD_IS_ONLY_FOR_INNER_OR_LAZY_PRODUCTS);
eigen_assert( (Option==LazyProduct) || (this->rows() == 1 && this->cols() == 1) );
diff --git a/Eigen/src/Core/ProductEvaluators.h b/Eigen/src/Core/ProductEvaluators.h
index 583b7f59e..2787987e7 100644
--- a/Eigen/src/Core/ProductEvaluators.h
+++ b/Eigen/src/Core/ProductEvaluators.h
@@ -20,7 +20,7 @@ namespace internal {
/** \internal
* Evaluator of a product expression.
* Since products require special treatments to handle all possible cases,
- * we simply deffer the evaluation logic to a product_evaluator class
+ * we simply defer the evaluation logic to a product_evaluator class
* which offers more partial specialization possibilities.
*
* \sa class product_evaluator
@@ -32,7 +32,7 @@ struct evaluator<Product<Lhs, Rhs, Options> >
typedef Product<Lhs, Rhs, Options> XprType;
typedef product_evaluator<XprType> Base;
- EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : Base(xpr) {}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& xpr) : Base(xpr) {}
};
// Catch "scalar * ( A * B )" and transform it to "(A*scalar) * B"
@@ -55,7 +55,7 @@ struct evaluator<CwiseBinaryOp<internal::scalar_product_op<Scalar1,Scalar2>,
const Product<Lhs, Rhs, DefaultProduct> > XprType;
typedef evaluator<Product<EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(Scalar1,Lhs,product), Rhs, DefaultProduct> > Base;
- EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& xpr)
: Base(xpr.lhs().functor().m_other * xpr.rhs().lhs() * xpr.rhs().rhs())
{}
};
@@ -68,7 +68,7 @@ struct evaluator<Diagonal<const Product<Lhs, Rhs, DefaultProduct>, DiagIndex> >
typedef Diagonal<const Product<Lhs, Rhs, DefaultProduct>, DiagIndex> XprType;
typedef evaluator<Diagonal<const Product<Lhs, Rhs, LazyProduct>, DiagIndex> > Base;
- EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& xpr)
: Base(Diagonal<const Product<Lhs, Rhs, LazyProduct>, DiagIndex>(
Product<Lhs, Rhs, LazyProduct>(xpr.nestedExpression().lhs(), xpr.nestedExpression().rhs()),
xpr.index() ))
@@ -128,7 +128,7 @@ protected:
PlainObject m_result;
};
-// The following three shortcuts are enabled only if the scalar types match excatly.
+// The following three shortcuts are enabled only if the scalar types match exactly.
// TODO: we could enable them for different scalar types when the product is not vectorized.
// Dense = Product
@@ -137,7 +137,7 @@ struct Assignment<DstXprType, Product<Lhs,Rhs,Options>, internal::assign_op<Scal
typename enable_if<(Options==DefaultProduct || Options==AliasFreeProduct)>::type>
{
typedef Product<Lhs,Rhs,Options> SrcXprType;
- static EIGEN_STRONG_INLINE
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
{
Index dstRows = src.rows();
@@ -155,7 +155,7 @@ struct Assignment<DstXprType, Product<Lhs,Rhs,Options>, internal::add_assign_op<
typename enable_if<(Options==DefaultProduct || Options==AliasFreeProduct)>::type>
{
typedef Product<Lhs,Rhs,Options> SrcXprType;
- static EIGEN_STRONG_INLINE
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<Scalar,Scalar> &)
{
eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols());
@@ -170,7 +170,7 @@ struct Assignment<DstXprType, Product<Lhs,Rhs,Options>, internal::sub_assign_op<
typename enable_if<(Options==DefaultProduct || Options==AliasFreeProduct)>::type>
{
typedef Product<Lhs,Rhs,Options> SrcXprType;
- static EIGEN_STRONG_INLINE
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<Scalar,Scalar> &)
{
eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols());
@@ -190,7 +190,7 @@ struct Assignment<DstXprType, CwiseBinaryOp<internal::scalar_product_op<ScalarBi
typedef CwiseBinaryOp<internal::scalar_product_op<ScalarBis,Scalar>,
const CwiseNullaryOp<internal::scalar_constant_op<ScalarBis>,Plain>,
const Product<Lhs,Rhs,DefaultProduct> > SrcXprType;
- static EIGEN_STRONG_INLINE
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void run(DstXprType &dst, const SrcXprType &src, const AssignFunc& func)
{
call_assignment_no_alias(dst, (src.lhs().functor().m_other * src.rhs().lhs())*src.rhs().rhs(), func);
@@ -207,11 +207,17 @@ struct evaluator_assume_aliasing<CwiseBinaryOp<internal::scalar_sum_op<typename
static const bool value = true;
};
+template<typename OtherXpr, typename Lhs, typename Rhs>
+struct evaluator_assume_aliasing<CwiseBinaryOp<internal::scalar_difference_op<typename OtherXpr::Scalar,typename Product<Lhs,Rhs,DefaultProduct>::Scalar>, const OtherXpr,
+ const Product<Lhs,Rhs,DefaultProduct> >, DenseShape > {
+ static const bool value = true;
+};
+
template<typename DstXprType, typename OtherXpr, typename ProductType, typename Func1, typename Func2>
struct assignment_from_xpr_op_product
{
template<typename SrcXprType, typename InitialFunc>
- static EIGEN_STRONG_INLINE
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void run(DstXprType &dst, const SrcXprType &src, const InitialFunc& /*func*/)
{
call_assignment_no_alias(dst, src.lhs(), Func1());
@@ -240,19 +246,19 @@ template<typename Lhs, typename Rhs>
struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,InnerProduct>
{
template<typename Dst>
- static inline void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
dst.coeffRef(0,0) = (lhs.transpose().cwiseProduct(rhs)).sum();
}
template<typename Dst>
- static inline void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
dst.coeffRef(0,0) += (lhs.transpose().cwiseProduct(rhs)).sum();
}
template<typename Dst>
- static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{ dst.coeffRef(0,0) -= (lhs.transpose().cwiseProduct(rhs)).sum(); }
};
@@ -263,10 +269,10 @@ struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,InnerProduct>
// Column major result
template<typename Dst, typename Lhs, typename Rhs, typename Func>
-void outer_product_selector_run(Dst& dst, const Lhs &lhs, const Rhs &rhs, const Func& func, const false_type&)
+void EIGEN_DEVICE_FUNC outer_product_selector_run(Dst& dst, const Lhs &lhs, const Rhs &rhs, const Func& func, const false_type&)
{
evaluator<Rhs> rhsEval(rhs);
- typename nested_eval<Lhs,Rhs::SizeAtCompileTime>::type actual_lhs(lhs);
+ ei_declare_local_nested_eval(Lhs,lhs,Rhs::SizeAtCompileTime,actual_lhs);
// FIXME if cols is large enough, then it might be useful to make sure that lhs is sequentially stored
// FIXME not very good if rhs is real and lhs complex while alpha is real too
const Index cols = dst.cols();
@@ -276,10 +282,10 @@ void outer_product_selector_run(Dst& dst, const Lhs &lhs, const Rhs &rhs, const
// Row major result
template<typename Dst, typename Lhs, typename Rhs, typename Func>
-void outer_product_selector_run(Dst& dst, const Lhs &lhs, const Rhs &rhs, const Func& func, const true_type&)
+void EIGEN_DEVICE_FUNC outer_product_selector_run(Dst& dst, const Lhs &lhs, const Rhs &rhs, const Func& func, const true_type&)
{
evaluator<Lhs> lhsEval(lhs);
- typename nested_eval<Rhs,Lhs::SizeAtCompileTime>::type actual_rhs(rhs);
+ ei_declare_local_nested_eval(Rhs,rhs,Lhs::SizeAtCompileTime,actual_rhs);
// FIXME if rows is large enough, then it might be useful to make sure that rhs is sequentially stored
// FIXME not very good if lhs is real and rhs complex while alpha is real too
const Index rows = dst.rows();
@@ -294,37 +300,37 @@ struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,OuterProduct>
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
// TODO it would be nice to be able to exploit our *_assign_op functors for that purpose
- struct set { template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() = src; } };
- struct add { template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() += src; } };
- struct sub { template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() -= src; } };
+ struct set { template<typename Dst, typename Src> EIGEN_DEVICE_FUNC void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() = src; } };
+ struct add { template<typename Dst, typename Src> EIGEN_DEVICE_FUNC void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() += src; } };
+ struct sub { template<typename Dst, typename Src> EIGEN_DEVICE_FUNC void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() -= src; } };
struct adds {
Scalar m_scale;
explicit adds(const Scalar& s) : m_scale(s) {}
- template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const {
+ template<typename Dst, typename Src> void EIGEN_DEVICE_FUNC operator()(const Dst& dst, const Src& src) const {
dst.const_cast_derived() += m_scale * src;
}
};
template<typename Dst>
- static inline void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
internal::outer_product_selector_run(dst, lhs, rhs, set(), is_row_major<Dst>());
}
template<typename Dst>
- static inline void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
internal::outer_product_selector_run(dst, lhs, rhs, add(), is_row_major<Dst>());
}
template<typename Dst>
- static inline void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
internal::outer_product_selector_run(dst, lhs, rhs, sub(), is_row_major<Dst>());
}
template<typename Dst>
- static inline void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
{
internal::outer_product_selector_run(dst, lhs, rhs, adds(alpha), is_row_major<Dst>());
}
@@ -339,19 +345,19 @@ struct generic_product_impl_base
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
template<typename Dst>
- static EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{ dst.setZero(); scaleAndAddTo(dst, lhs, rhs, Scalar(1)); }
template<typename Dst>
- static EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{ scaleAndAddTo(dst,lhs, rhs, Scalar(1)); }
template<typename Dst>
- static EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{ scaleAndAddTo(dst, lhs, rhs, Scalar(-1)); }
template<typename Dst>
- static EIGEN_STRONG_INLINE void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
{ Derived::scaleAndAddTo(dst,lhs,rhs,alpha); }
};
@@ -367,7 +373,7 @@ struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemvProduct>
typedef typename internal::remove_all<typename internal::conditional<int(Side)==OnTheRight,LhsNested,RhsNested>::type>::type MatrixType;
template<typename Dest>
- static EIGEN_STRONG_INLINE void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
{
LhsNested actual_lhs(lhs);
RhsNested actual_rhs(rhs);
@@ -384,26 +390,52 @@ struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode>
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
template<typename Dst>
- static EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
// Same as: dst.noalias() = lhs.lazyProduct(rhs);
// but easier on the compiler side
call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::assign_op<typename Dst::Scalar,Scalar>());
}
-
+
template<typename Dst>
- static EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
// dst.noalias() += lhs.lazyProduct(rhs);
call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::add_assign_op<typename Dst::Scalar,Scalar>());
}
template<typename Dst>
- static EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
// dst.noalias() -= lhs.lazyProduct(rhs);
call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::sub_assign_op<typename Dst::Scalar,Scalar>());
}
+
+ // Catch "dst {,+,-}= (s*A)*B" and evaluate it lazily by moving out the scalar factor:
+ // dst {,+,-}= s * (A.lazyProduct(B))
+ // This is a huge benefit for heap-allocated matrix types as it save one costly allocation.
+ // For them, this strategy is also faster than simply by-passing the heap allocation through
+ // stack allocation.
+ // For fixed sizes matrices, this is less obvious, it is sometimes x2 faster, but sometimes x3 slower,
+ // and the behavior depends also a lot on the compiler... so let's be conservative and enable them for dynamic-size only,
+ // that is when coming from generic_product_impl<...,GemmProduct> in file GeneralMatrixMatrix.h
+ template<typename Dst, typename Scalar1, typename Scalar2, typename Plain1, typename Xpr2, typename Func>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ void eval_dynamic(Dst& dst, const CwiseBinaryOp<internal::scalar_product_op<Scalar1,Scalar2>,
+ const CwiseNullaryOp<internal::scalar_constant_op<Scalar1>, Plain1>, Xpr2>& lhs, const Rhs& rhs, const Func &func)
+ {
+ call_assignment_no_alias(dst, lhs.lhs().functor().m_other * lhs.rhs().lazyProduct(rhs), func);
+ }
+
+ // Here, we we always have LhsT==Lhs, but we need to make it a template type to make the above
+ // overload more specialized.
+ template<typename Dst, typename LhsT, typename Func>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ void eval_dynamic(Dst& dst, const LhsT& lhs, const Rhs& rhs, const Func &func)
+ {
+ call_assignment_no_alias(dst, lhs.lazyProduct(rhs), func);
+ }
+
// template<typename Dst>
// static inline void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
@@ -735,7 +767,8 @@ struct generic_product_impl<Lhs,Rhs,SelfAdjointShape,DenseShape,ProductTag>
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
template<typename Dest>
- static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
+ static EIGEN_DEVICE_FUNC
+ void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
{
selfadjoint_product_impl<typename Lhs::MatrixType,Lhs::Mode,false,Rhs,0,Rhs::IsVectorAtCompileTime>::run(dst, lhs.nestedExpression(), rhs, alpha);
}
@@ -779,7 +812,11 @@ public:
_Vectorizable = bool(int(MatrixFlags)&PacketAccessBit) && _SameTypes && (_ScalarAccessOnDiag || (bool(int(DiagFlags)&PacketAccessBit))),
_LinearAccessMask = (MatrixType::RowsAtCompileTime==1 || MatrixType::ColsAtCompileTime==1) ? LinearAccessBit : 0,
Flags = ((HereditaryBits|_LinearAccessMask) & (unsigned int)(MatrixFlags)) | (_Vectorizable ? PacketAccessBit : 0),
- Alignment = evaluator<MatrixType>::Alignment
+ Alignment = evaluator<MatrixType>::Alignment,
+
+ AsScalarProduct = (DiagonalType::SizeAtCompileTime==1)
+ || (DiagonalType::SizeAtCompileTime==Dynamic && MatrixType::RowsAtCompileTime==1 && ProductOrder==OnTheLeft)
+ || (DiagonalType::SizeAtCompileTime==Dynamic && MatrixType::ColsAtCompileTime==1 && ProductOrder==OnTheRight)
};
diagonal_product_evaluator_base(const MatrixType &mat, const DiagonalType &diag)
@@ -791,7 +828,10 @@ public:
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index idx) const
{
- return m_diagImpl.coeff(idx) * m_matImpl.coeff(idx);
+ if(AsScalarProduct)
+ return m_diagImpl.coeff(0) * m_matImpl.coeff(idx);
+ else
+ return m_diagImpl.coeff(idx) * m_matImpl.coeff(idx);
}
protected:
@@ -845,7 +885,7 @@ struct product_evaluator<Product<Lhs, Rhs, ProductKind>, ProductTag, DiagonalSha
return m_diagImpl.coeff(row) * m_matImpl.coeff(row, col);
}
-#ifndef __CUDACC__
+#ifndef EIGEN_GPUCC
template<int LoadMode,typename PacketType>
EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const
{
@@ -889,7 +929,7 @@ struct product_evaluator<Product<Lhs, Rhs, ProductKind>, ProductTag, DenseShape,
return m_matImpl.coeff(row, col) * m_diagImpl.coeff(col);
}
-#ifndef __CUDACC__
+#ifndef EIGEN_GPUCC
template<int LoadMode,typename PacketType>
EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const
{
diff --git a/Eigen/src/Core/Random.h b/Eigen/src/Core/Random.h
index 6faf789c7..486e9ed52 100644
--- a/Eigen/src/Core/Random.h
+++ b/Eigen/src/Core/Random.h
@@ -128,7 +128,7 @@ DenseBase<Derived>::Random()
* \sa class CwiseNullaryOp, setRandom(Index), setRandom(Index,Index)
*/
template<typename Derived>
-inline Derived& DenseBase<Derived>::setRandom()
+EIGEN_DEVICE_FUNC inline Derived& DenseBase<Derived>::setRandom()
{
return *this = Random(rows(), cols());
}
diff --git a/Eigen/src/Core/Redux.h b/Eigen/src/Core/Redux.h
index b6e8f8887..e449ef3ac 100644
--- a/Eigen/src/Core/Redux.h
+++ b/Eigen/src/Core/Redux.h
@@ -23,22 +23,22 @@ namespace internal {
* Part 1 : the logic deciding a strategy for vectorization and unrolling
***************************************************************************/
-template<typename Func, typename Derived>
+template<typename Func, typename Evaluator>
struct redux_traits
{
public:
- typedef typename find_best_packet<typename Derived::Scalar,Derived::SizeAtCompileTime>::type PacketType;
+ typedef typename find_best_packet<typename Evaluator::Scalar,Evaluator::SizeAtCompileTime>::type PacketType;
enum {
PacketSize = unpacket_traits<PacketType>::size,
- InnerMaxSize = int(Derived::IsRowMajor)
- ? Derived::MaxColsAtCompileTime
- : Derived::MaxRowsAtCompileTime
+ InnerMaxSize = int(Evaluator::IsRowMajor)
+ ? Evaluator::MaxColsAtCompileTime
+ : Evaluator::MaxRowsAtCompileTime
};
enum {
- MightVectorize = (int(Derived::Flags)&ActualPacketAccessBit)
+ MightVectorize = (int(Evaluator::Flags)&ActualPacketAccessBit)
&& (functor_traits<Func>::PacketAccess),
- MayLinearVectorize = bool(MightVectorize) && (int(Derived::Flags)&LinearAccessBit),
+ MayLinearVectorize = bool(MightVectorize) && (int(Evaluator::Flags)&LinearAccessBit),
MaySliceVectorize = bool(MightVectorize) && int(InnerMaxSize)>=3*PacketSize
};
@@ -51,8 +51,8 @@ public:
public:
enum {
- Cost = Derived::SizeAtCompileTime == Dynamic ? HugeCost
- : Derived::SizeAtCompileTime * Derived::CoeffReadCost + (Derived::SizeAtCompileTime-1) * functor_traits<Func>::Cost,
+ Cost = Evaluator::SizeAtCompileTime == Dynamic ? HugeCost
+ : Evaluator::SizeAtCompileTime * Evaluator::CoeffReadCost + (Evaluator::SizeAtCompileTime-1) * functor_traits<Func>::Cost,
UnrollingLimit = EIGEN_UNROLLING_LIMIT * (int(Traversal) == int(DefaultTraversal) ? 1 : int(PacketSize))
};
@@ -64,9 +64,9 @@ public:
#ifdef EIGEN_DEBUG_ASSIGN
static void debug()
{
- std::cerr << "Xpr: " << typeid(typename Derived::XprType).name() << std::endl;
+ std::cerr << "Xpr: " << typeid(typename Evaluator::XprType).name() << std::endl;
std::cerr.setf(std::ios::hex, std::ios::basefield);
- EIGEN_DEBUG_VAR(Derived::Flags)
+ EIGEN_DEBUG_VAR(Evaluator::Flags)
std::cerr.unsetf(std::ios::hex);
EIGEN_DEBUG_VAR(InnerMaxSize)
EIGEN_DEBUG_VAR(PacketSize)
@@ -87,88 +87,88 @@ public:
/*** no vectorization ***/
-template<typename Func, typename Derived, int Start, int Length>
+template<typename Func, typename Evaluator, int Start, int Length>
struct redux_novec_unroller
{
enum {
HalfLength = Length/2
};
- typedef typename Derived::Scalar Scalar;
+ typedef typename Evaluator::Scalar Scalar;
EIGEN_DEVICE_FUNC
- static EIGEN_STRONG_INLINE Scalar run(const Derived &mat, const Func& func)
+ static EIGEN_STRONG_INLINE Scalar run(const Evaluator &eval, const Func& func)
{
- return func(redux_novec_unroller<Func, Derived, Start, HalfLength>::run(mat,func),
- redux_novec_unroller<Func, Derived, Start+HalfLength, Length-HalfLength>::run(mat,func));
+ return func(redux_novec_unroller<Func, Evaluator, Start, HalfLength>::run(eval,func),
+ redux_novec_unroller<Func, Evaluator, Start+HalfLength, Length-HalfLength>::run(eval,func));
}
};
-template<typename Func, typename Derived, int Start>
-struct redux_novec_unroller<Func, Derived, Start, 1>
+template<typename Func, typename Evaluator, int Start>
+struct redux_novec_unroller<Func, Evaluator, Start, 1>
{
enum {
- outer = Start / Derived::InnerSizeAtCompileTime,
- inner = Start % Derived::InnerSizeAtCompileTime
+ outer = Start / Evaluator::InnerSizeAtCompileTime,
+ inner = Start % Evaluator::InnerSizeAtCompileTime
};
- typedef typename Derived::Scalar Scalar;
+ typedef typename Evaluator::Scalar Scalar;
EIGEN_DEVICE_FUNC
- static EIGEN_STRONG_INLINE Scalar run(const Derived &mat, const Func&)
+ static EIGEN_STRONG_INLINE Scalar run(const Evaluator &eval, const Func&)
{
- return mat.coeffByOuterInner(outer, inner);
+ return eval.coeffByOuterInner(outer, inner);
}
};
// This is actually dead code and will never be called. It is required
// to prevent false warnings regarding failed inlining though
// for 0 length run() will never be called at all.
-template<typename Func, typename Derived, int Start>
-struct redux_novec_unroller<Func, Derived, Start, 0>
+template<typename Func, typename Evaluator, int Start>
+struct redux_novec_unroller<Func, Evaluator, Start, 0>
{
- typedef typename Derived::Scalar Scalar;
+ typedef typename Evaluator::Scalar Scalar;
EIGEN_DEVICE_FUNC
- static EIGEN_STRONG_INLINE Scalar run(const Derived&, const Func&) { return Scalar(); }
+ static EIGEN_STRONG_INLINE Scalar run(const Evaluator&, const Func&) { return Scalar(); }
};
/*** vectorization ***/
-template<typename Func, typename Derived, int Start, int Length>
+template<typename Func, typename Evaluator, int Start, int Length>
struct redux_vec_unroller
{
enum {
- PacketSize = redux_traits<Func, Derived>::PacketSize,
+ PacketSize = redux_traits<Func, Evaluator>::PacketSize,
HalfLength = Length/2
};
- typedef typename Derived::Scalar Scalar;
- typedef typename redux_traits<Func, Derived>::PacketType PacketScalar;
+ typedef typename Evaluator::Scalar Scalar;
+ typedef typename redux_traits<Func, Evaluator>::PacketType PacketScalar;
- static EIGEN_STRONG_INLINE PacketScalar run(const Derived &mat, const Func& func)
+ static EIGEN_STRONG_INLINE PacketScalar run(const Evaluator &eval, const Func& func)
{
return func.packetOp(
- redux_vec_unroller<Func, Derived, Start, HalfLength>::run(mat,func),
- redux_vec_unroller<Func, Derived, Start+HalfLength, Length-HalfLength>::run(mat,func) );
+ redux_vec_unroller<Func, Evaluator, Start, HalfLength>::run(eval,func),
+ redux_vec_unroller<Func, Evaluator, Start+HalfLength, Length-HalfLength>::run(eval,func) );
}
};
-template<typename Func, typename Derived, int Start>
-struct redux_vec_unroller<Func, Derived, Start, 1>
+template<typename Func, typename Evaluator, int Start>
+struct redux_vec_unroller<Func, Evaluator, Start, 1>
{
enum {
- index = Start * redux_traits<Func, Derived>::PacketSize,
- outer = index / int(Derived::InnerSizeAtCompileTime),
- inner = index % int(Derived::InnerSizeAtCompileTime),
- alignment = Derived::Alignment
+ index = Start * redux_traits<Func, Evaluator>::PacketSize,
+ outer = index / int(Evaluator::InnerSizeAtCompileTime),
+ inner = index % int(Evaluator::InnerSizeAtCompileTime),
+ alignment = Evaluator::Alignment
};
- typedef typename Derived::Scalar Scalar;
- typedef typename redux_traits<Func, Derived>::PacketType PacketScalar;
+ typedef typename Evaluator::Scalar Scalar;
+ typedef typename redux_traits<Func, Evaluator>::PacketType PacketScalar;
- static EIGEN_STRONG_INLINE PacketScalar run(const Derived &mat, const Func&)
+ static EIGEN_STRONG_INLINE PacketScalar run(const Evaluator &eval, const Func&)
{
- return mat.template packetByOuterInner<alignment,PacketScalar>(outer, inner);
+ return eval.template packetByOuterInner<alignment,PacketScalar>(outer, inner);
}
};
@@ -176,53 +176,65 @@ struct redux_vec_unroller<Func, Derived, Start, 1>
* Part 3 : implementation of all cases
***************************************************************************/
-template<typename Func, typename Derived,
- int Traversal = redux_traits<Func, Derived>::Traversal,
- int Unrolling = redux_traits<Func, Derived>::Unrolling
+template<typename Func, typename Evaluator,
+ int Traversal = redux_traits<Func, Evaluator>::Traversal,
+ int Unrolling = redux_traits<Func, Evaluator>::Unrolling
>
struct redux_impl;
-template<typename Func, typename Derived>
-struct redux_impl<Func, Derived, DefaultTraversal, NoUnrolling>
+template<typename Func, typename Evaluator>
+struct redux_impl<Func, Evaluator, DefaultTraversal, NoUnrolling>
{
- typedef typename Derived::Scalar Scalar;
- EIGEN_DEVICE_FUNC
- static EIGEN_STRONG_INLINE Scalar run(const Derived &mat, const Func& func)
+ typedef typename Evaluator::Scalar Scalar;
+
+ template<typename XprType>
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE
+ Scalar run(const Evaluator &eval, const Func& func, const XprType& xpr)
{
- eigen_assert(mat.rows()>0 && mat.cols()>0 && "you are using an empty matrix");
+ eigen_assert(xpr.rows()>0 && xpr.cols()>0 && "you are using an empty matrix");
Scalar res;
- res = mat.coeffByOuterInner(0, 0);
- for(Index i = 1; i < mat.innerSize(); ++i)
- res = func(res, mat.coeffByOuterInner(0, i));
- for(Index i = 1; i < mat.outerSize(); ++i)
- for(Index j = 0; j < mat.innerSize(); ++j)
- res = func(res, mat.coeffByOuterInner(i, j));
+ res = eval.coeffByOuterInner(0, 0);
+ for(Index i = 1; i < xpr.innerSize(); ++i)
+ res = func(res, eval.coeffByOuterInner(0, i));
+ for(Index i = 1; i < xpr.outerSize(); ++i)
+ for(Index j = 0; j < xpr.innerSize(); ++j)
+ res = func(res, eval.coeffByOuterInner(i, j));
return res;
}
};
-template<typename Func, typename Derived>
-struct redux_impl<Func,Derived, DefaultTraversal, CompleteUnrolling>
- : public redux_novec_unroller<Func,Derived, 0, Derived::SizeAtCompileTime>
-{};
+template<typename Func, typename Evaluator>
+struct redux_impl<Func,Evaluator, DefaultTraversal, CompleteUnrolling>
+ : redux_novec_unroller<Func,Evaluator, 0, Evaluator::SizeAtCompileTime>
+{
+ typedef redux_novec_unroller<Func,Evaluator, 0, Evaluator::SizeAtCompileTime> Base;
+ typedef typename Evaluator::Scalar Scalar;
+ template<typename XprType>
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE
+ Scalar run(const Evaluator &eval, const Func& func, const XprType& /*xpr*/)
+ {
+ return Base::run(eval,func);
+ }
+};
-template<typename Func, typename Derived>
-struct redux_impl<Func, Derived, LinearVectorizedTraversal, NoUnrolling>
+template<typename Func, typename Evaluator>
+struct redux_impl<Func, Evaluator, LinearVectorizedTraversal, NoUnrolling>
{
- typedef typename Derived::Scalar Scalar;
- typedef typename redux_traits<Func, Derived>::PacketType PacketScalar;
+ typedef typename Evaluator::Scalar Scalar;
+ typedef typename redux_traits<Func, Evaluator>::PacketType PacketScalar;
- static Scalar run(const Derived &mat, const Func& func)
+ template<typename XprType>
+ static Scalar run(const Evaluator &eval, const Func& func, const XprType& xpr)
{
- const Index size = mat.size();
+ const Index size = xpr.size();
- const Index packetSize = redux_traits<Func, Derived>::PacketSize;
+ const Index packetSize = redux_traits<Func, Evaluator>::PacketSize;
const int packetAlignment = unpacket_traits<PacketScalar>::alignment;
enum {
- alignment0 = (bool(Derived::Flags & DirectAccessBit) && bool(packet_traits<Scalar>::AlignedOnScalar)) ? int(packetAlignment) : int(Unaligned),
- alignment = EIGEN_PLAIN_ENUM_MAX(alignment0, Derived::Alignment)
+ alignment0 = (bool(Evaluator::Flags & DirectAccessBit) && bool(packet_traits<Scalar>::AlignedOnScalar)) ? int(packetAlignment) : int(Unaligned),
+ alignment = EIGEN_PLAIN_ENUM_MAX(alignment0, Evaluator::Alignment)
};
- const Index alignedStart = internal::first_default_aligned(mat.nestedExpression());
+ const Index alignedStart = internal::first_default_aligned(xpr);
const Index alignedSize2 = ((size-alignedStart)/(2*packetSize))*(2*packetSize);
const Index alignedSize = ((size-alignedStart)/(packetSize))*(packetSize);
const Index alignedEnd2 = alignedStart + alignedSize2;
@@ -230,34 +242,34 @@ struct redux_impl<Func, Derived, LinearVectorizedTraversal, NoUnrolling>
Scalar res;
if(alignedSize)
{
- PacketScalar packet_res0 = mat.template packet<alignment,PacketScalar>(alignedStart);
+ PacketScalar packet_res0 = eval.template packet<alignment,PacketScalar>(alignedStart);
if(alignedSize>packetSize) // we have at least two packets to partly unroll the loop
{
- PacketScalar packet_res1 = mat.template packet<alignment,PacketScalar>(alignedStart+packetSize);
+ PacketScalar packet_res1 = eval.template packet<alignment,PacketScalar>(alignedStart+packetSize);
for(Index index = alignedStart + 2*packetSize; index < alignedEnd2; index += 2*packetSize)
{
- packet_res0 = func.packetOp(packet_res0, mat.template packet<alignment,PacketScalar>(index));
- packet_res1 = func.packetOp(packet_res1, mat.template packet<alignment,PacketScalar>(index+packetSize));
+ packet_res0 = func.packetOp(packet_res0, eval.template packet<alignment,PacketScalar>(index));
+ packet_res1 = func.packetOp(packet_res1, eval.template packet<alignment,PacketScalar>(index+packetSize));
}
packet_res0 = func.packetOp(packet_res0,packet_res1);
if(alignedEnd>alignedEnd2)
- packet_res0 = func.packetOp(packet_res0, mat.template packet<alignment,PacketScalar>(alignedEnd2));
+ packet_res0 = func.packetOp(packet_res0, eval.template packet<alignment,PacketScalar>(alignedEnd2));
}
res = func.predux(packet_res0);
for(Index index = 0; index < alignedStart; ++index)
- res = func(res,mat.coeff(index));
+ res = func(res,eval.coeff(index));
for(Index index = alignedEnd; index < size; ++index)
- res = func(res,mat.coeff(index));
+ res = func(res,eval.coeff(index));
}
else // too small to vectorize anything.
// since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize.
{
- res = mat.coeff(0);
+ res = eval.coeff(0);
for(Index index = 1; index < size; ++index)
- res = func(res,mat.coeff(index));
+ res = func(res,eval.coeff(index));
}
return res;
@@ -265,130 +277,106 @@ struct redux_impl<Func, Derived, LinearVectorizedTraversal, NoUnrolling>
};
// NOTE: for SliceVectorizedTraversal we simply bypass unrolling
-template<typename Func, typename Derived, int Unrolling>
-struct redux_impl<Func, Derived, SliceVectorizedTraversal, Unrolling>
+template<typename Func, typename Evaluator, int Unrolling>
+struct redux_impl<Func, Evaluator, SliceVectorizedTraversal, Unrolling>
{
- typedef typename Derived::Scalar Scalar;
- typedef typename redux_traits<Func, Derived>::PacketType PacketType;
+ typedef typename Evaluator::Scalar Scalar;
+ typedef typename redux_traits<Func, Evaluator>::PacketType PacketType;
- EIGEN_DEVICE_FUNC static Scalar run(const Derived &mat, const Func& func)
+ template<typename XprType>
+ EIGEN_DEVICE_FUNC static Scalar run(const Evaluator &eval, const Func& func, const XprType& xpr)
{
- eigen_assert(mat.rows()>0 && mat.cols()>0 && "you are using an empty matrix");
- const Index innerSize = mat.innerSize();
- const Index outerSize = mat.outerSize();
+ eigen_assert(xpr.rows()>0 && xpr.cols()>0 && "you are using an empty matrix");
+ const Index innerSize = xpr.innerSize();
+ const Index outerSize = xpr.outerSize();
enum {
- packetSize = redux_traits<Func, Derived>::PacketSize
+ packetSize = redux_traits<Func, Evaluator>::PacketSize
};
const Index packetedInnerSize = ((innerSize)/packetSize)*packetSize;
Scalar res;
if(packetedInnerSize)
{
- PacketType packet_res = mat.template packet<Unaligned,PacketType>(0,0);
+ PacketType packet_res = eval.template packet<Unaligned,PacketType>(0,0);
for(Index j=0; j<outerSize; ++j)
for(Index i=(j==0?packetSize:0); i<packetedInnerSize; i+=Index(packetSize))
- packet_res = func.packetOp(packet_res, mat.template packetByOuterInner<Unaligned,PacketType>(j,i));
+ packet_res = func.packetOp(packet_res, eval.template packetByOuterInner<Unaligned,PacketType>(j,i));
res = func.predux(packet_res);
for(Index j=0; j<outerSize; ++j)
for(Index i=packetedInnerSize; i<innerSize; ++i)
- res = func(res, mat.coeffByOuterInner(j,i));
+ res = func(res, eval.coeffByOuterInner(j,i));
}
else // too small to vectorize anything.
// since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize.
{
- res = redux_impl<Func, Derived, DefaultTraversal, NoUnrolling>::run(mat, func);
+ res = redux_impl<Func, Evaluator, DefaultTraversal, NoUnrolling>::run(eval, func, xpr);
}
return res;
}
};
-template<typename Func, typename Derived>
-struct redux_impl<Func, Derived, LinearVectorizedTraversal, CompleteUnrolling>
+template<typename Func, typename Evaluator>
+struct redux_impl<Func, Evaluator, LinearVectorizedTraversal, CompleteUnrolling>
{
- typedef typename Derived::Scalar Scalar;
+ typedef typename Evaluator::Scalar Scalar;
- typedef typename redux_traits<Func, Derived>::PacketType PacketScalar;
+ typedef typename redux_traits<Func, Evaluator>::PacketType PacketScalar;
enum {
- PacketSize = redux_traits<Func, Derived>::PacketSize,
- Size = Derived::SizeAtCompileTime,
+ PacketSize = redux_traits<Func, Evaluator>::PacketSize,
+ Size = Evaluator::SizeAtCompileTime,
VectorizedSize = (Size / PacketSize) * PacketSize
};
- EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Derived &mat, const Func& func)
+
+ template<typename XprType>
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE
+ Scalar run(const Evaluator &eval, const Func& func, const XprType &xpr)
{
- eigen_assert(mat.rows()>0 && mat.cols()>0 && "you are using an empty matrix");
+ EIGEN_ONLY_USED_FOR_DEBUG(xpr)
+ eigen_assert(xpr.rows()>0 && xpr.cols()>0 && "you are using an empty matrix");
if (VectorizedSize > 0) {
- Scalar res = func.predux(redux_vec_unroller<Func, Derived, 0, Size / PacketSize>::run(mat,func));
+ Scalar res = func.predux(redux_vec_unroller<Func, Evaluator, 0, Size / PacketSize>::run(eval,func));
if (VectorizedSize != Size)
- res = func(res,redux_novec_unroller<Func, Derived, VectorizedSize, Size-VectorizedSize>::run(mat,func));
+ res = func(res,redux_novec_unroller<Func, Evaluator, VectorizedSize, Size-VectorizedSize>::run(eval,func));
return res;
}
else {
- return redux_novec_unroller<Func, Derived, 0, Size>::run(mat,func);
+ return redux_novec_unroller<Func, Evaluator, 0, Size>::run(eval,func);
}
}
};
// evaluator adaptor
template<typename _XprType>
-class redux_evaluator
+class redux_evaluator : public internal::evaluator<_XprType>
{
+ typedef internal::evaluator<_XprType> Base;
public:
typedef _XprType XprType;
- EIGEN_DEVICE_FUNC explicit redux_evaluator(const XprType &xpr) : m_evaluator(xpr), m_xpr(xpr) {}
+ EIGEN_DEVICE_FUNC explicit redux_evaluator(const XprType &xpr) : Base(xpr) {}
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename XprType::PacketScalar PacketScalar;
- typedef typename XprType::PacketReturnType PacketReturnType;
enum {
MaxRowsAtCompileTime = XprType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = XprType::MaxColsAtCompileTime,
// TODO we should not remove DirectAccessBit and rather find an elegant way to query the alignment offset at runtime from the evaluator
- Flags = evaluator<XprType>::Flags & ~DirectAccessBit,
+ Flags = Base::Flags & ~DirectAccessBit,
IsRowMajor = XprType::IsRowMajor,
SizeAtCompileTime = XprType::SizeAtCompileTime,
- InnerSizeAtCompileTime = XprType::InnerSizeAtCompileTime,
- CoeffReadCost = evaluator<XprType>::CoeffReadCost,
- Alignment = evaluator<XprType>::Alignment
+ InnerSizeAtCompileTime = XprType::InnerSizeAtCompileTime
};
- EIGEN_DEVICE_FUNC Index rows() const { return m_xpr.rows(); }
- EIGEN_DEVICE_FUNC Index cols() const { return m_xpr.cols(); }
- EIGEN_DEVICE_FUNC Index size() const { return m_xpr.size(); }
- EIGEN_DEVICE_FUNC Index innerSize() const { return m_xpr.innerSize(); }
- EIGEN_DEVICE_FUNC Index outerSize() const { return m_xpr.outerSize(); }
-
- EIGEN_DEVICE_FUNC
- CoeffReturnType coeff(Index row, Index col) const
- { return m_evaluator.coeff(row, col); }
-
- EIGEN_DEVICE_FUNC
- CoeffReturnType coeff(Index index) const
- { return m_evaluator.coeff(index); }
-
- template<int LoadMode, typename PacketType>
- PacketType packet(Index row, Index col) const
- { return m_evaluator.template packet<LoadMode,PacketType>(row, col); }
-
- template<int LoadMode, typename PacketType>
- PacketType packet(Index index) const
- { return m_evaluator.template packet<LoadMode,PacketType>(index); }
-
EIGEN_DEVICE_FUNC
CoeffReturnType coeffByOuterInner(Index outer, Index inner) const
- { return m_evaluator.coeff(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer); }
+ { return Base::coeff(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer); }
template<int LoadMode, typename PacketType>
PacketType packetByOuterInner(Index outer, Index inner) const
- { return m_evaluator.template packet<LoadMode,PacketType>(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer); }
-
- const XprType & nestedExpression() const { return m_xpr; }
+ { return Base::template packet<LoadMode,PacketType>(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer); }
-protected:
- internal::evaluator<XprType> m_evaluator;
- const XprType &m_xpr;
};
} // end namespace internal
@@ -407,7 +395,7 @@ protected:
*/
template<typename Derived>
template<typename Func>
-typename internal::traits<Derived>::Scalar
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
DenseBase<Derived>::redux(const Func& func) const
{
eigen_assert(this->rows()>0 && this->cols()>0 && "you are using an empty matrix");
@@ -415,14 +403,16 @@ DenseBase<Derived>::redux(const Func& func) const
typedef typename internal::redux_evaluator<Derived> ThisEvaluator;
ThisEvaluator thisEval(derived());
- return internal::redux_impl<Func, ThisEvaluator>::run(thisEval, func);
+ // The initial expression is passed to the reducer as an additional argument instead of
+ // passing it as a member of redux_evaluator to help
+ return internal::redux_impl<Func, ThisEvaluator>::run(thisEval, func, derived());
}
/** \returns the minimum of all coefficients of \c *this.
* \warning the result is undefined if \c *this contains NaN.
*/
template<typename Derived>
-EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
DenseBase<Derived>::minCoeff() const
{
return derived().redux(Eigen::internal::scalar_min_op<Scalar,Scalar>());
@@ -432,7 +422,7 @@ DenseBase<Derived>::minCoeff() const
* \warning the result is undefined if \c *this contains NaN.
*/
template<typename Derived>
-EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
DenseBase<Derived>::maxCoeff() const
{
return derived().redux(Eigen::internal::scalar_max_op<Scalar,Scalar>());
@@ -445,7 +435,7 @@ DenseBase<Derived>::maxCoeff() const
* \sa trace(), prod(), mean()
*/
template<typename Derived>
-EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
DenseBase<Derived>::sum() const
{
if(SizeAtCompileTime==0 || (SizeAtCompileTime==Dynamic && size()==0))
@@ -458,7 +448,7 @@ DenseBase<Derived>::sum() const
* \sa trace(), prod(), sum()
*/
template<typename Derived>
-EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
DenseBase<Derived>::mean() const
{
#ifdef __INTEL_COMPILER
@@ -479,7 +469,7 @@ DenseBase<Derived>::mean() const
* \sa sum(), mean(), trace()
*/
template<typename Derived>
-EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
DenseBase<Derived>::prod() const
{
if(SizeAtCompileTime==0 || (SizeAtCompileTime==Dynamic && size()==0))
@@ -494,7 +484,7 @@ DenseBase<Derived>::prod() const
* \sa diagonal(), sum()
*/
template<typename Derived>
-EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
MatrixBase<Derived>::trace() const
{
return derived().diagonal().sum();
diff --git a/Eigen/src/Core/Ref.h b/Eigen/src/Core/Ref.h
index abb1e5121..ac9502bc4 100644
--- a/Eigen/src/Core/Ref.h
+++ b/Eigen/src/Core/Ref.h
@@ -95,6 +95,8 @@ protected:
template<typename Expression>
EIGEN_DEVICE_FUNC void construct(Expression& expr)
{
+ EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(PlainObjectType,Expression);
+
if(PlainObjectType::RowsAtCompileTime==1)
{
eigen_assert(expr.rows()==1 || expr.cols()==1);
diff --git a/Eigen/src/Core/Replicate.h b/Eigen/src/Core/Replicate.h
index 9960ef884..0b2d6d743 100644
--- a/Eigen/src/Core/Replicate.h
+++ b/Eigen/src/Core/Replicate.h
@@ -115,7 +115,7 @@ template<typename MatrixType,int RowFactor,int ColFactor> class Replicate
*/
template<typename Derived>
template<int RowFactor, int ColFactor>
-const Replicate<Derived,RowFactor,ColFactor>
+EIGEN_DEVICE_FUNC const Replicate<Derived,RowFactor,ColFactor>
DenseBase<Derived>::replicate() const
{
return Replicate<Derived,RowFactor,ColFactor>(derived());
@@ -130,7 +130,7 @@ DenseBase<Derived>::replicate() const
* \sa VectorwiseOp::replicate(), DenseBase::replicate(), class Replicate
*/
template<typename ExpressionType, int Direction>
-const typename VectorwiseOp<ExpressionType,Direction>::ReplicateReturnType
+EIGEN_DEVICE_FUNC const typename VectorwiseOp<ExpressionType,Direction>::ReplicateReturnType
VectorwiseOp<ExpressionType,Direction>::replicate(Index factor) const
{
return typename VectorwiseOp<ExpressionType,Direction>::ReplicateReturnType
diff --git a/Eigen/src/Core/ReturnByValue.h b/Eigen/src/Core/ReturnByValue.h
index c44b7673b..11dc86d07 100644
--- a/Eigen/src/Core/ReturnByValue.h
+++ b/Eigen/src/Core/ReturnByValue.h
@@ -79,7 +79,7 @@ template<typename Derived> class ReturnByValue
template<typename Derived>
template<typename OtherDerived>
-Derived& DenseBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other)
+EIGEN_DEVICE_FUNC Derived& DenseBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other)
{
other.evalTo(derived());
return derived();
diff --git a/Eigen/src/Core/Reverse.h b/Eigen/src/Core/Reverse.h
index 0640cda2a..8b6b3ab03 100644
--- a/Eigen/src/Core/Reverse.h
+++ b/Eigen/src/Core/Reverse.h
@@ -114,7 +114,7 @@ template<typename MatrixType, int Direction> class Reverse
*
*/
template<typename Derived>
-inline typename DenseBase<Derived>::ReverseReturnType
+EIGEN_DEVICE_FUNC inline typename DenseBase<Derived>::ReverseReturnType
DenseBase<Derived>::reverse()
{
return ReverseReturnType(derived());
@@ -136,7 +136,7 @@ DenseBase<Derived>::reverse()
*
* \sa VectorwiseOp::reverseInPlace(), reverse() */
template<typename Derived>
-inline void DenseBase<Derived>::reverseInPlace()
+EIGEN_DEVICE_FUNC inline void DenseBase<Derived>::reverseInPlace()
{
if(cols()>rows())
{
@@ -201,7 +201,7 @@ struct vectorwise_reverse_inplace_impl<Horizontal>
*
* \sa DenseBase::reverseInPlace(), reverse() */
template<typename ExpressionType, int Direction>
-void VectorwiseOp<ExpressionType,Direction>::reverseInPlace()
+EIGEN_DEVICE_FUNC void VectorwiseOp<ExpressionType,Direction>::reverseInPlace()
{
internal::vectorwise_reverse_inplace_impl<Direction>::run(_expression().const_cast_derived());
}
diff --git a/Eigen/src/Core/SelfAdjointView.h b/Eigen/src/Core/SelfAdjointView.h
index 504c98f0e..2cf3fa1ef 100644
--- a/Eigen/src/Core/SelfAdjointView.h
+++ b/Eigen/src/Core/SelfAdjointView.h
@@ -71,7 +71,9 @@ template<typename _MatrixType, unsigned int UpLo> class SelfAdjointView
EIGEN_DEVICE_FUNC
explicit inline SelfAdjointView(MatrixType& matrix) : m_matrix(matrix)
- {}
+ {
+ EIGEN_STATIC_ASSERT(UpLo==Lower || UpLo==Upper,SELFADJOINTVIEW_ACCEPTS_UPPER_AND_LOWER_MODE_ONLY);
+ }
EIGEN_DEVICE_FUNC
inline Index rows() const { return m_matrix.rows(); }
@@ -189,7 +191,7 @@ template<typename _MatrixType, unsigned int UpLo> class SelfAdjointView
TriangularView<typename MatrixType::AdjointReturnType,TriMode> >::type(tmp2);
}
- typedef SelfAdjointView<const MatrixConjugateReturnType,Mode> ConjugateReturnType;
+ typedef SelfAdjointView<const MatrixConjugateReturnType,UpLo> ConjugateReturnType;
/** \sa MatrixBase::conjugate() const */
EIGEN_DEVICE_FUNC
inline const ConjugateReturnType conjugate() const
@@ -322,7 +324,7 @@ public:
/** This is the const version of MatrixBase::selfadjointView() */
template<typename Derived>
template<unsigned int UpLo>
-typename MatrixBase<Derived>::template ConstSelfAdjointViewReturnType<UpLo>::Type
+EIGEN_DEVICE_FUNC typename MatrixBase<Derived>::template ConstSelfAdjointViewReturnType<UpLo>::Type
MatrixBase<Derived>::selfadjointView() const
{
return typename ConstSelfAdjointViewReturnType<UpLo>::Type(derived());
@@ -339,7 +341,7 @@ MatrixBase<Derived>::selfadjointView() const
*/
template<typename Derived>
template<unsigned int UpLo>
-typename MatrixBase<Derived>::template SelfAdjointViewReturnType<UpLo>::Type
+EIGEN_DEVICE_FUNC typename MatrixBase<Derived>::template SelfAdjointViewReturnType<UpLo>::Type
MatrixBase<Derived>::selfadjointView()
{
return typename SelfAdjointViewReturnType<UpLo>::Type(derived());
diff --git a/Eigen/src/Core/SelfCwiseBinaryOp.h b/Eigen/src/Core/SelfCwiseBinaryOp.h
index 719ed72a5..7c89c2e23 100644
--- a/Eigen/src/Core/SelfCwiseBinaryOp.h
+++ b/Eigen/src/Core/SelfCwiseBinaryOp.h
@@ -15,33 +15,29 @@ namespace Eigen {
// TODO generalize the scalar type of 'other'
template<typename Derived>
-EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator*=(const Scalar& other)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator*=(const Scalar& other)
{
- typedef typename Derived::PlainObject PlainObject;
internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::mul_assign_op<Scalar,Scalar>());
return derived();
}
template<typename Derived>
-EIGEN_STRONG_INLINE Derived& ArrayBase<Derived>::operator+=(const Scalar& other)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& ArrayBase<Derived>::operator+=(const Scalar& other)
{
- typedef typename Derived::PlainObject PlainObject;
internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::add_assign_op<Scalar,Scalar>());
return derived();
}
template<typename Derived>
-EIGEN_STRONG_INLINE Derived& ArrayBase<Derived>::operator-=(const Scalar& other)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& ArrayBase<Derived>::operator-=(const Scalar& other)
{
- typedef typename Derived::PlainObject PlainObject;
internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::sub_assign_op<Scalar,Scalar>());
return derived();
}
template<typename Derived>
-EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator/=(const Scalar& other)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator/=(const Scalar& other)
{
- typedef typename Derived::PlainObject PlainObject;
internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::div_assign_op<Scalar,Scalar>());
return derived();
}
diff --git a/Eigen/src/Core/Solve.h b/Eigen/src/Core/Solve.h
index 960a58597..2bf940a26 100644
--- a/Eigen/src/Core/Solve.h
+++ b/Eigen/src/Core/Solve.h
@@ -34,12 +34,12 @@ template<typename Decomposition, typename RhsType,typename StorageKind> struct s
template<typename Decomposition, typename RhsType>
struct solve_traits<Decomposition,RhsType,Dense>
{
- typedef Matrix<typename RhsType::Scalar,
+ typedef typename make_proper_matrix_type<typename RhsType::Scalar,
Decomposition::ColsAtCompileTime,
RhsType::ColsAtCompileTime,
RhsType::PlainObject::Options,
Decomposition::MaxColsAtCompileTime,
- RhsType::MaxColsAtCompileTime> PlainObject;
+ RhsType::MaxColsAtCompileTime>::type PlainObject;
};
template<typename Decomposition, typename RhsType>
@@ -181,7 +181,7 @@ struct Assignment<DstXprType, Solve<CwiseUnaryOp<internal::scalar_conjugate_op<t
}
};
-} // end namepsace internal
+} // end namespace internal
} // end namespace Eigen
diff --git a/Eigen/src/Core/SolveTriangular.h b/Eigen/src/Core/SolveTriangular.h
index 049890b25..a0011d4f9 100644
--- a/Eigen/src/Core/SolveTriangular.h
+++ b/Eigen/src/Core/SolveTriangular.h
@@ -164,7 +164,7 @@ struct triangular_solver_selector<Lhs,Rhs,OnTheRight,Mode,CompleteUnrolling,1> {
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename MatrixType, unsigned int Mode>
template<int Side, typename OtherDerived>
-void TriangularViewImpl<MatrixType,Mode,Dense>::solveInPlace(const MatrixBase<OtherDerived>& _other) const
+EIGEN_DEVICE_FUNC void TriangularViewImpl<MatrixType,Mode,Dense>::solveInPlace(const MatrixBase<OtherDerived>& _other) const
{
OtherDerived& other = _other.const_cast_derived();
eigen_assert( derived().cols() == derived().rows() && ((Side==OnTheLeft && derived().cols() == other.rows()) || (Side==OnTheRight && derived().cols() == other.cols())) );
diff --git a/Eigen/src/Core/SolverBase.h b/Eigen/src/Core/SolverBase.h
index 8a4adc229..702a5485c 100644
--- a/Eigen/src/Core/SolverBase.h
+++ b/Eigen/src/Core/SolverBase.h
@@ -56,7 +56,8 @@ class SolverBase : public EigenBase<Derived>
MaxSizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::MaxRowsAtCompileTime,
internal::traits<Derived>::MaxColsAtCompileTime>::ret),
IsVectorAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime == 1
- || internal::traits<Derived>::MaxColsAtCompileTime == 1
+ || internal::traits<Derived>::MaxColsAtCompileTime == 1,
+ NumDimensions = int(MaxSizeAtCompileTime) == 1 ? 0 : bool(IsVectorAtCompileTime) ? 1 : 2
};
/** Default constructor */
diff --git a/Eigen/src/Core/StableNorm.h b/Eigen/src/Core/StableNorm.h
index d2fe1e199..77ea3c261 100644
--- a/Eigen/src/Core/StableNorm.h
+++ b/Eigen/src/Core/StableNorm.h
@@ -50,6 +50,71 @@ inline void stable_norm_kernel(const ExpressionType& bl, Scalar& ssq, Scalar& sc
ssq += (bl*invScale).squaredNorm();
}
+template<typename VectorType, typename RealScalar>
+void stable_norm_impl_inner_step(const VectorType &vec, RealScalar& ssq, RealScalar& scale, RealScalar& invScale)
+{
+ typedef typename VectorType::Scalar Scalar;
+ const Index blockSize = 4096;
+
+ typedef typename internal::nested_eval<VectorType,2>::type VectorTypeCopy;
+ typedef typename internal::remove_all<VectorTypeCopy>::type VectorTypeCopyClean;
+ const VectorTypeCopy copy(vec);
+
+ enum {
+ CanAlign = ( (int(VectorTypeCopyClean::Flags)&DirectAccessBit)
+ || (int(internal::evaluator<VectorTypeCopyClean>::Alignment)>0) // FIXME Alignment)>0 might not be enough
+ ) && (blockSize*sizeof(Scalar)*2<EIGEN_STACK_ALLOCATION_LIMIT)
+ && (EIGEN_MAX_STATIC_ALIGN_BYTES>0) // if we cannot allocate on the stack, then let's not bother about this optimization
+ };
+ typedef typename internal::conditional<CanAlign, Ref<const Matrix<Scalar,Dynamic,1,0,blockSize,1>, internal::evaluator<VectorTypeCopyClean>::Alignment>,
+ typename VectorTypeCopyClean::ConstSegmentReturnType>::type SegmentWrapper;
+ Index n = vec.size();
+
+ Index bi = internal::first_default_aligned(copy);
+ if (bi>0)
+ internal::stable_norm_kernel(copy.head(bi), ssq, scale, invScale);
+ for (; bi<n; bi+=blockSize)
+ internal::stable_norm_kernel(SegmentWrapper(copy.segment(bi,numext::mini(blockSize, n - bi))), ssq, scale, invScale);
+}
+
+template<typename VectorType>
+typename VectorType::RealScalar
+stable_norm_impl(const VectorType &vec, typename enable_if<VectorType::IsVectorAtCompileTime>::type* = 0 )
+{
+ using std::sqrt;
+ using std::abs;
+
+ Index n = vec.size();
+
+ if(n==1)
+ return abs(vec.coeff(0));
+
+ typedef typename VectorType::RealScalar RealScalar;
+ RealScalar scale(0);
+ RealScalar invScale(1);
+ RealScalar ssq(0); // sum of squares
+
+ stable_norm_impl_inner_step(vec, ssq, scale, invScale);
+
+ return scale * sqrt(ssq);
+}
+
+template<typename MatrixType>
+typename MatrixType::RealScalar
+stable_norm_impl(const MatrixType &mat, typename enable_if<!MatrixType::IsVectorAtCompileTime>::type* = 0 )
+{
+ using std::sqrt;
+
+ typedef typename MatrixType::RealScalar RealScalar;
+ RealScalar scale(0);
+ RealScalar invScale(1);
+ RealScalar ssq(0); // sum of squares
+
+ for(Index j=0; j<mat.outerSize(); ++j)
+ stable_norm_impl_inner_step(mat.innerVector(j), ssq, scale, invScale);
+ return scale * sqrt(ssq);
+}
+
template<typename Derived>
inline typename NumTraits<typename traits<Derived>::Scalar>::Real
blueNorm_impl(const EigenBase<Derived>& _vec)
@@ -74,7 +139,7 @@ blueNorm_impl(const EigenBase<Derived>& _vec)
// are used. For any specific computer, each of the assignment
// statements can be replaced
ibeta = std::numeric_limits<RealScalar>::radix; // base for floating-point numbers
- it = std::numeric_limits<RealScalar>::digits; // number of base-beta digits in mantissa
+ it = NumTraits<RealScalar>::digits(); // number of base-beta digits in mantissa
iemin = std::numeric_limits<RealScalar>::min_exponent; // minimum exponent
iemax = std::numeric_limits<RealScalar>::max_exponent; // maximum exponent
rbig = (std::numeric_limits<RealScalar>::max)(); // largest floating-point number
@@ -98,12 +163,16 @@ blueNorm_impl(const EigenBase<Derived>& _vec)
RealScalar asml = RealScalar(0);
RealScalar amed = RealScalar(0);
RealScalar abig = RealScalar(0);
- for(typename Derived::InnerIterator it(vec, 0); it; ++it)
+
+ for(Index j=0; j<vec.outerSize(); ++j)
{
- RealScalar ax = abs(it.value());
- if(ax > ab2) abig += numext::abs2(ax*s2m);
- else if(ax < b1) asml += numext::abs2(ax*s1m);
- else amed += numext::abs2(ax);
+ for(typename Derived::InnerIterator it(vec, j); it; ++it)
+ {
+ RealScalar ax = abs(it.value());
+ if(ax > ab2) abig += numext::abs2(ax*s2m);
+ else if(ax < b1) asml += numext::abs2(ax*s1m);
+ else amed += numext::abs2(ax);
+ }
}
if(amed!=amed)
return amed; // we got a NaN
@@ -156,35 +225,7 @@ template<typename Derived>
inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
MatrixBase<Derived>::stableNorm() const
{
- using std::sqrt;
- using std::abs;
- const Index blockSize = 4096;
- RealScalar scale(0);
- RealScalar invScale(1);
- RealScalar ssq(0); // sum of square
-
- typedef typename internal::nested_eval<Derived,2>::type DerivedCopy;
- typedef typename internal::remove_all<DerivedCopy>::type DerivedCopyClean;
- DerivedCopy copy(derived());
-
- enum {
- CanAlign = ( (int(DerivedCopyClean::Flags)&DirectAccessBit)
- || (int(internal::evaluator<DerivedCopyClean>::Alignment)>0) // FIXME Alignment)>0 might not be enough
- ) && (blockSize*sizeof(Scalar)*2<EIGEN_STACK_ALLOCATION_LIMIT) // ifwe cannot allocate on the stack, then let's not bother about this optimization
- };
- typedef typename internal::conditional<CanAlign, Ref<const Matrix<Scalar,Dynamic,1,0,blockSize,1>, internal::evaluator<DerivedCopyClean>::Alignment>,
- typename DerivedCopyClean::ConstSegmentReturnType>::type SegmentWrapper;
- Index n = size();
-
- if(n==1)
- return abs(this->coeff(0));
-
- Index bi = internal::first_default_aligned(copy);
- if (bi>0)
- internal::stable_norm_kernel(copy.head(bi), ssq, scale, invScale);
- for (; bi<n; bi+=blockSize)
- internal::stable_norm_kernel(SegmentWrapper(copy.segment(bi,numext::mini(blockSize, n - bi))), ssq, scale, invScale);
- return scale * sqrt(ssq);
+ return internal::stable_norm_impl(derived());
}
/** \returns the \em l2 norm of \c *this using the Blue's algorithm.
@@ -212,7 +253,10 @@ template<typename Derived>
inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
MatrixBase<Derived>::hypotNorm() const
{
- return this->cwiseAbs().redux(internal::scalar_hypot_op<RealScalar>());
+ if(size()==1)
+ return numext::abs(coeff(0,0));
+ else
+ return this->cwiseAbs().redux(internal::scalar_hypot_op<RealScalar>());
}
} // end namespace Eigen
diff --git a/Eigen/src/Core/Transpose.h b/Eigen/src/Core/Transpose.h
index 79b767bcc..d7c204579 100644
--- a/Eigen/src/Core/Transpose.h
+++ b/Eigen/src/Core/Transpose.h
@@ -79,6 +79,7 @@ template<typename MatrixType> class Transpose
nestedExpression() { return m_matrix; }
/** \internal */
+ EIGEN_DEVICE_FUNC
void resize(Index nrows, Index ncols) {
m_matrix.resize(ncols,nrows);
}
@@ -168,7 +169,7 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Dense>
*
* \sa transposeInPlace(), adjoint() */
template<typename Derived>
-inline Transpose<Derived>
+EIGEN_DEVICE_FUNC inline Transpose<Derived>
DenseBase<Derived>::transpose()
{
return TransposeReturnType(derived());
@@ -180,7 +181,7 @@ DenseBase<Derived>::transpose()
*
* \sa transposeInPlace(), adjoint() */
template<typename Derived>
-inline typename DenseBase<Derived>::ConstTransposeReturnType
+EIGEN_DEVICE_FUNC inline typename DenseBase<Derived>::ConstTransposeReturnType
DenseBase<Derived>::transpose() const
{
return ConstTransposeReturnType(derived());
@@ -206,7 +207,7 @@ DenseBase<Derived>::transpose() const
*
* \sa adjointInPlace(), transpose(), conjugate(), class Transpose, class internal::scalar_conjugate_op */
template<typename Derived>
-inline const typename MatrixBase<Derived>::AdjointReturnType
+EIGEN_DEVICE_FUNC inline const typename MatrixBase<Derived>::AdjointReturnType
MatrixBase<Derived>::adjoint() const
{
return AdjointReturnType(this->transpose());
@@ -281,7 +282,7 @@ struct inplace_transpose_selector<MatrixType,false,MatchPacketSize> { // non squ
*
* \sa transpose(), adjoint(), adjointInPlace() */
template<typename Derived>
-inline void DenseBase<Derived>::transposeInPlace()
+EIGEN_DEVICE_FUNC inline void DenseBase<Derived>::transposeInPlace()
{
eigen_assert((rows() == cols() || (RowsAtCompileTime == Dynamic && ColsAtCompileTime == Dynamic))
&& "transposeInPlace() called on a non-square non-resizable matrix");
@@ -312,7 +313,7 @@ inline void DenseBase<Derived>::transposeInPlace()
*
* \sa transpose(), adjoint(), transposeInPlace() */
template<typename Derived>
-inline void MatrixBase<Derived>::adjointInPlace()
+EIGEN_DEVICE_FUNC inline void MatrixBase<Derived>::adjointInPlace()
{
derived() = adjoint().eval();
}
diff --git a/Eigen/src/Core/Transpositions.h b/Eigen/src/Core/Transpositions.h
index 19c17bb4a..81a4a5855 100644
--- a/Eigen/src/Core/Transpositions.h
+++ b/Eigen/src/Core/Transpositions.h
@@ -84,7 +84,7 @@ class TranspositionsBase
}
// FIXME: do we want such methods ?
- // might be usefull when the target matrix expression is complex, e.g.:
+ // might be useful when the target matrix expression is complex, e.g.:
// object.matrix().block(..,..,..,..) = trans * object.matrix().block(..,..,..,..);
/*
template<typename MatrixType>
@@ -384,7 +384,7 @@ class Transpose<TranspositionsBase<TranspositionsDerived> >
const Product<OtherDerived, Transpose, AliasFreeProduct>
operator*(const MatrixBase<OtherDerived>& matrix, const Transpose& trt)
{
- return Product<OtherDerived, Transpose, AliasFreeProduct>(matrix.derived(), trt.derived());
+ return Product<OtherDerived, Transpose, AliasFreeProduct>(matrix.derived(), trt);
}
/** \returns the \a matrix with the inverse transpositions applied to the rows.
diff --git a/Eigen/src/Core/TriangularMatrix.h b/Eigen/src/Core/TriangularMatrix.h
index 667ef09dc..521de6160 100644
--- a/Eigen/src/Core/TriangularMatrix.h
+++ b/Eigen/src/Core/TriangularMatrix.h
@@ -65,6 +65,7 @@ template<typename Derived> class TriangularBase : public EigenBase<Derived>
inline Index innerStride() const { return derived().innerStride(); }
// dummy resize function
+ EIGEN_DEVICE_FUNC
void resize(Index rows, Index cols)
{
EIGEN_UNUSED_VARIABLE(rows);
@@ -470,7 +471,7 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularViewImpl<_Mat
* \a Side==OnTheLeft (the default), or the right-inverse-multiply \a other * inverse(\c *this) if
* \a Side==OnTheRight.
*
- * Note that the template parameter \c Side can be ommitted, in which case \c Side==OnTheLeft
+ * Note that the template parameter \c Side can be omitted, in which case \c Side==OnTheLeft
*
* The matrix \c *this must be triangular and invertible (i.e., all the coefficients of the
* diagonal must be non zero). It works as a forward (resp. backward) substitution if \c *this
@@ -488,7 +489,6 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularViewImpl<_Mat
* \sa TriangularView::solveInPlace()
*/
template<int Side, typename Other>
- EIGEN_DEVICE_FUNC
inline const internal::triangular_solve_retval<Side,TriangularViewType, Other>
solve(const MatrixBase<Other>& other) const;
@@ -497,7 +497,7 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularViewImpl<_Mat
* \warning The parameter is only marked 'const' to make the C++ compiler accept a temporary expression here.
* This function will const_cast it, so constness isn't honored here.
*
- * Note that the template parameter \c Side can be ommitted, in which case \c Side==OnTheLeft
+ * Note that the template parameter \c Side can be omitted, in which case \c Side==OnTheLeft
*
* See TriangularView:solve() for the details.
*/
@@ -554,7 +554,7 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularViewImpl<_Mat
// FIXME should we keep that possibility
template<typename MatrixType, unsigned int Mode>
template<typename OtherDerived>
-inline TriangularView<MatrixType, Mode>&
+EIGEN_DEVICE_FUNC inline TriangularView<MatrixType, Mode>&
TriangularViewImpl<MatrixType, Mode, Dense>::operator=(const MatrixBase<OtherDerived>& other)
{
internal::call_assignment_no_alias(derived(), other.derived(), internal::assign_op<Scalar,typename OtherDerived::Scalar>());
@@ -564,7 +564,7 @@ TriangularViewImpl<MatrixType, Mode, Dense>::operator=(const MatrixBase<OtherDer
// FIXME should we keep that possibility
template<typename MatrixType, unsigned int Mode>
template<typename OtherDerived>
-void TriangularViewImpl<MatrixType, Mode, Dense>::lazyAssign(const MatrixBase<OtherDerived>& other)
+EIGEN_DEVICE_FUNC void TriangularViewImpl<MatrixType, Mode, Dense>::lazyAssign(const MatrixBase<OtherDerived>& other)
{
internal::call_assignment_no_alias(derived(), other.template triangularView<Mode>());
}
@@ -573,7 +573,7 @@ void TriangularViewImpl<MatrixType, Mode, Dense>::lazyAssign(const MatrixBase<Ot
template<typename MatrixType, unsigned int Mode>
template<typename OtherDerived>
-inline TriangularView<MatrixType, Mode>&
+EIGEN_DEVICE_FUNC inline TriangularView<MatrixType, Mode>&
TriangularViewImpl<MatrixType, Mode, Dense>::operator=(const TriangularBase<OtherDerived>& other)
{
eigen_assert(Mode == int(OtherDerived::Mode));
@@ -583,7 +583,7 @@ TriangularViewImpl<MatrixType, Mode, Dense>::operator=(const TriangularBase<Othe
template<typename MatrixType, unsigned int Mode>
template<typename OtherDerived>
-void TriangularViewImpl<MatrixType, Mode, Dense>::lazyAssign(const TriangularBase<OtherDerived>& other)
+EIGEN_DEVICE_FUNC void TriangularViewImpl<MatrixType, Mode, Dense>::lazyAssign(const TriangularBase<OtherDerived>& other)
{
eigen_assert(Mode == int(OtherDerived::Mode));
internal::call_assignment_no_alias(derived(), other.derived());
@@ -598,7 +598,7 @@ void TriangularViewImpl<MatrixType, Mode, Dense>::lazyAssign(const TriangularBas
* If the matrix is triangular, the opposite part is set to zero. */
template<typename Derived>
template<typename DenseDerived>
-void TriangularBase<Derived>::evalTo(MatrixBase<DenseDerived> &other) const
+EIGEN_DEVICE_FUNC void TriangularBase<Derived>::evalTo(MatrixBase<DenseDerived> &other) const
{
evalToLazy(other.derived());
}
@@ -624,6 +624,7 @@ void TriangularBase<Derived>::evalTo(MatrixBase<DenseDerived> &other) const
*/
template<typename Derived>
template<unsigned int Mode>
+EIGEN_DEVICE_FUNC
typename MatrixBase<Derived>::template TriangularViewReturnType<Mode>::Type
MatrixBase<Derived>::triangularView()
{
@@ -633,6 +634,7 @@ MatrixBase<Derived>::triangularView()
/** This is the const version of MatrixBase::triangularView() */
template<typename Derived>
template<unsigned int Mode>
+EIGEN_DEVICE_FUNC
typename MatrixBase<Derived>::template ConstTriangularViewReturnType<Mode>::Type
MatrixBase<Derived>::triangularView() const
{
@@ -715,6 +717,7 @@ struct unary_evaluator<TriangularView<MatrixType,Mode>, IndexBased>
{
typedef TriangularView<MatrixType,Mode> XprType;
typedef evaluator<typename internal::remove_all<MatrixType>::type> Base;
+ EIGEN_DEVICE_FUNC
unary_evaluator(const XprType &xpr) : Base(xpr.nestedExpression()) {}
};
@@ -930,7 +933,7 @@ struct triangular_assignment_loop<Kernel, Mode, Dynamic, SetOpposite>
* If the matrix is triangular, the opposite part is set to zero. */
template<typename Derived>
template<typename DenseDerived>
-void TriangularBase<Derived>::evalToLazy(MatrixBase<DenseDerived> &other) const
+EIGEN_DEVICE_FUNC void TriangularBase<Derived>::evalToLazy(MatrixBase<DenseDerived> &other) const
{
other.derived().resize(this->rows(), this->cols());
internal::call_triangular_assignment_loop<Derived::Mode,(Derived::Mode&SelfAdjoint)==0 /* SetOpposite */>(other.derived(), derived().nestedExpression());
diff --git a/Eigen/src/Core/VectorBlock.h b/Eigen/src/Core/VectorBlock.h
index d72fbf7e9..0ede5d58e 100644
--- a/Eigen/src/Core/VectorBlock.h
+++ b/Eigen/src/Core/VectorBlock.h
@@ -35,7 +35,7 @@ struct traits<VectorBlock<VectorType, Size> >
* It is the return type of DenseBase::segment(Index,Index) and DenseBase::segment<int>(Index) and
* most of the time this is the only way it is used.
*
- * However, if you want to directly maniputate sub-vector expressions,
+ * However, if you want to directly manipulate sub-vector expressions,
* for instance if you want to write a function returning such an expression, you
* will need to use this class.
*
diff --git a/Eigen/src/Core/VectorwiseOp.h b/Eigen/src/Core/VectorwiseOp.h
index 4fe267e9f..893bc796f 100644
--- a/Eigen/src/Core/VectorwiseOp.h
+++ b/Eigen/src/Core/VectorwiseOp.h
@@ -670,7 +670,7 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
* \sa rowwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting
*/
template<typename Derived>
-inline typename DenseBase<Derived>::ColwiseReturnType
+EIGEN_DEVICE_FUNC inline typename DenseBase<Derived>::ColwiseReturnType
DenseBase<Derived>::colwise()
{
return ColwiseReturnType(derived());
@@ -684,7 +684,7 @@ DenseBase<Derived>::colwise()
* \sa colwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting
*/
template<typename Derived>
-inline typename DenseBase<Derived>::RowwiseReturnType
+EIGEN_DEVICE_FUNC inline typename DenseBase<Derived>::RowwiseReturnType
DenseBase<Derived>::rowwise()
{
return RowwiseReturnType(derived());
diff --git a/Eigen/src/Core/arch/AVX/Complex.h b/Eigen/src/Core/arch/AVX/Complex.h
index 99439c8aa..7fa61969d 100644
--- a/Eigen/src/Core/arch/AVX/Complex.h
+++ b/Eigen/src/Core/arch/AVX/Complex.h
@@ -204,23 +204,7 @@ template<> struct conj_helper<Packet4cf, Packet4cf, true,true>
}
};
-template<> struct conj_helper<Packet8f, Packet4cf, false,false>
-{
- EIGEN_STRONG_INLINE Packet4cf pmadd(const Packet8f& x, const Packet4cf& y, const Packet4cf& c) const
- { return padd(c, pmul(x,y)); }
-
- EIGEN_STRONG_INLINE Packet4cf pmul(const Packet8f& x, const Packet4cf& y) const
- { return Packet4cf(Eigen::internal::pmul(x, y.v)); }
-};
-
-template<> struct conj_helper<Packet4cf, Packet8f, false,false>
-{
- EIGEN_STRONG_INLINE Packet4cf pmadd(const Packet4cf& x, const Packet8f& y, const Packet4cf& c) const
- { return padd(c, pmul(x,y)); }
-
- EIGEN_STRONG_INLINE Packet4cf pmul(const Packet4cf& x, const Packet8f& y) const
- { return Packet4cf(Eigen::internal::pmul(x.v, y)); }
-};
+EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet4cf,Packet8f)
template<> EIGEN_STRONG_INLINE Packet4cf pdiv<Packet4cf>(const Packet4cf& a, const Packet4cf& b)
{
@@ -400,23 +384,7 @@ template<> struct conj_helper<Packet2cd, Packet2cd, true,true>
}
};
-template<> struct conj_helper<Packet4d, Packet2cd, false,false>
-{
- EIGEN_STRONG_INLINE Packet2cd pmadd(const Packet4d& x, const Packet2cd& y, const Packet2cd& c) const
- { return padd(c, pmul(x,y)); }
-
- EIGEN_STRONG_INLINE Packet2cd pmul(const Packet4d& x, const Packet2cd& y) const
- { return Packet2cd(Eigen::internal::pmul(x, y.v)); }
-};
-
-template<> struct conj_helper<Packet2cd, Packet4d, false,false>
-{
- EIGEN_STRONG_INLINE Packet2cd pmadd(const Packet2cd& x, const Packet4d& y, const Packet2cd& c) const
- { return padd(c, pmul(x,y)); }
-
- EIGEN_STRONG_INLINE Packet2cd pmul(const Packet2cd& x, const Packet4d& y) const
- { return Packet2cd(Eigen::internal::pmul(x.v, y)); }
-};
+EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cd,Packet4d)
template<> EIGEN_STRONG_INLINE Packet2cd pdiv<Packet2cd>(const Packet2cd& a, const Packet2cd& b)
{
diff --git a/Eigen/src/Core/arch/AVX/PacketMath.h b/Eigen/src/Core/arch/AVX/PacketMath.h
index 636230944..774e64981 100644
--- a/Eigen/src/Core/arch/AVX/PacketMath.h
+++ b/Eigen/src/Core/arch/AVX/PacketMath.h
@@ -318,9 +318,9 @@ template<> EIGEN_STRONG_INLINE void pstore1<Packet8i>(int* to, const int& a)
}
#ifndef EIGEN_VECTORIZE_AVX512
-template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
-template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
-template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
+template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
+template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
+template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
#endif
template<> EIGEN_STRONG_INLINE float pfirst<Packet8f>(const Packet8f& a) {
@@ -343,9 +343,12 @@ template<> EIGEN_STRONG_INLINE Packet4d preverse(const Packet4d& a)
{
__m256d tmp = _mm256_shuffle_pd(a,a,5);
return _mm256_permute2f128_pd(tmp, tmp, 1);
-
+ #if 0
+ // This version is unlikely to be faster as _mm256_shuffle_ps and _mm256_permute_pd
+ // exhibit the same latency/throughput, but it is here for future reference/benchmarking...
__m256d swap_halves = _mm256_permute2f128_pd(a,a,1);
return _mm256_permute_pd(swap_halves,5);
+ #endif
}
// pabs should be ok
@@ -412,7 +415,7 @@ template<> EIGEN_STRONG_INLINE double predux<Packet4d>(const Packet4d& a)
return predux(Packet2d(_mm_add_pd(_mm256_castpd256_pd128(a),_mm256_extractf128_pd(a,1))));
}
-template<> EIGEN_STRONG_INLINE Packet4f predux_downto4<Packet8f>(const Packet8f& a)
+template<> EIGEN_STRONG_INLINE Packet4f predux_half_dowto4<Packet8f>(const Packet8f& a)
{
return _mm_add_ps(_mm256_castps256_ps128(a),_mm256_extractf128_ps(a,1));
}
diff --git a/Eigen/src/Core/arch/AVX512/MathFunctions.h b/Eigen/src/Core/arch/AVX512/MathFunctions.h
index 399be0ee4..ba1246722 100644
--- a/Eigen/src/Core/arch/AVX512/MathFunctions.h
+++ b/Eigen/src/Core/arch/AVX512/MathFunctions.h
@@ -88,9 +88,9 @@ plog<Packet16f>(const Packet16f& _x) {
// x = x + x - 1.0;
// } else { x = x - 1.0; }
__mmask16 mask = _mm512_cmp_ps_mask(x, p16f_cephes_SQRTHF, _CMP_LT_OQ);
- Packet16f tmp = _mm512_mask_blend_ps(mask, x, _mm512_setzero_ps());
+ Packet16f tmp = _mm512_mask_blend_ps(mask, _mm512_setzero_ps(), x);
x = psub(x, p16f_1);
- e = psub(e, _mm512_mask_blend_ps(mask, p16f_1, _mm512_setzero_ps()));
+ e = psub(e, _mm512_mask_blend_ps(mask, _mm512_setzero_ps(), p16f_1));
x = padd(x, tmp);
Packet16f x2 = pmul(x, x);
@@ -119,8 +119,9 @@ plog<Packet16f>(const Packet16f& _x) {
x = padd(x, y2);
// Filter out invalid inputs, i.e. negative arg will be NAN, 0 will be -INF.
- return _mm512_mask_blend_ps(iszero_mask, p16f_minus_inf,
- _mm512_mask_blend_ps(invalid_mask, p16f_nan, x));
+ return _mm512_mask_blend_ps(iszero_mask,
+ _mm512_mask_blend_ps(invalid_mask, x, p16f_nan),
+ p16f_minus_inf);
}
#endif
@@ -257,50 +258,39 @@ pexp<Packet8d>(const Packet8d& _x) {
template <>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f
psqrt<Packet16f>(const Packet16f& _x) {
- _EIGEN_DECLARE_CONST_Packet16f(one_point_five, 1.5f);
- _EIGEN_DECLARE_CONST_Packet16f(minus_half, -0.5f);
- _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(flt_min, 0x00800000);
-
- Packet16f neg_half = pmul(_x, p16f_minus_half);
+ Packet16f neg_half = pmul(_x, pset1<Packet16f>(-.5f));
+ __mmask16 denormal_mask = _mm512_kand(
+ _mm512_cmp_ps_mask(_x, pset1<Packet16f>((std::numeric_limits<float>::min)()),
+ _CMP_LT_OQ),
+ _mm512_cmp_ps_mask(_x, _mm512_setzero_ps(), _CMP_GE_OQ));
- // select only the inverse sqrt of positive normal inputs (denormals are
- // flushed to zero and cause infs as well).
- __mmask16 non_zero_mask = _mm512_cmp_ps_mask(_x, p16f_flt_min, _CMP_GE_OQ);
- Packet16f x = _mm512_mask_blend_ps(non_zero_mask, _mm512_rsqrt14_ps(_x),
- _mm512_setzero_ps());
+ Packet16f x = _mm512_rsqrt14_ps(_x);
// Do a single step of Newton's iteration.
- x = pmul(x, pmadd(neg_half, pmul(x, x), p16f_one_point_five));
+ x = pmul(x, pmadd(neg_half, pmul(x, x), pset1<Packet16f>(1.5f)));
- // Multiply the original _x by it's reciprocal square root to extract the
- // square root.
- return pmul(_x, x);
+ // Flush results for denormals to zero.
+ return _mm512_mask_blend_ps(denormal_mask, pmul(_x,x), _mm512_setzero_ps());
}
template <>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8d
psqrt<Packet8d>(const Packet8d& _x) {
- _EIGEN_DECLARE_CONST_Packet8d(one_point_five, 1.5);
- _EIGEN_DECLARE_CONST_Packet8d(minus_half, -0.5);
- _EIGEN_DECLARE_CONST_Packet8d_FROM_INT64(dbl_min, 0x0010000000000000LL);
-
- Packet8d neg_half = pmul(_x, p8d_minus_half);
+ Packet8d neg_half = pmul(_x, pset1<Packet8d>(-.5f));
+ __mmask16 denormal_mask = _mm512_kand(
+ _mm512_cmp_pd_mask(_x, pset1<Packet8d>((std::numeric_limits<double>::min)()),
+ _CMP_LT_OQ),
+ _mm512_cmp_pd_mask(_x, _mm512_setzero_pd(), _CMP_GE_OQ));
- // select only the inverse sqrt of positive normal inputs (denormals are
- // flushed to zero and cause infs as well).
- __mmask8 non_zero_mask = _mm512_cmp_pd_mask(_x, p8d_dbl_min, _CMP_GE_OQ);
- Packet8d x = _mm512_mask_blend_pd(non_zero_mask, _mm512_rsqrt14_pd(_x),
- _mm512_setzero_pd());
+ Packet8d x = _mm512_rsqrt14_pd(_x);
- // Do a first step of Newton's iteration.
- x = pmul(x, pmadd(neg_half, pmul(x, x), p8d_one_point_five));
+ // Do a single step of Newton's iteration.
+ x = pmul(x, pmadd(neg_half, pmul(x, x), pset1<Packet8d>(1.5f)));
// Do a second step of Newton's iteration.
- x = pmul(x, pmadd(neg_half, pmul(x, x), p8d_one_point_five));
+ x = pmul(x, pmadd(neg_half, pmul(x, x), pset1<Packet8d>(1.5f)));
- // Multiply the original _x by it's reciprocal square root to extract the
- // square root.
- return pmul(_x, x);
+ return _mm512_mask_blend_pd(denormal_mask, pmul(_x,x), _mm512_setzero_pd());
}
#else
template <>
@@ -333,20 +323,18 @@ prsqrt<Packet16f>(const Packet16f& _x) {
// select only the inverse sqrt of positive normal inputs (denormals are
// flushed to zero and cause infs as well).
__mmask16 le_zero_mask = _mm512_cmp_ps_mask(_x, p16f_flt_min, _CMP_LT_OQ);
- Packet16f x = _mm512_mask_blend_ps(le_zero_mask, _mm512_setzero_ps(),
- _mm512_rsqrt14_ps(_x));
+ Packet16f x = _mm512_mask_blend_ps(le_zero_mask, _mm512_rsqrt14_ps(_x), _mm512_setzero_ps());
// Fill in NaNs and Infs for the negative/zero entries.
__mmask16 neg_mask = _mm512_cmp_ps_mask(_x, _mm512_setzero_ps(), _CMP_LT_OQ);
Packet16f infs_and_nans = _mm512_mask_blend_ps(
- neg_mask, p16f_nan,
- _mm512_mask_blend_ps(le_zero_mask, p16f_inf, _mm512_setzero_ps()));
+ neg_mask, _mm512_mask_blend_ps(le_zero_mask, _mm512_setzero_ps(), p16f_inf), p16f_nan);
// Do a single step of Newton's iteration.
x = pmul(x, pmadd(neg_half, pmul(x, x), p16f_one_point_five));
// Insert NaNs and Infs in all the right places.
- return _mm512_mask_blend_ps(le_zero_mask, infs_and_nans, x);
+ return _mm512_mask_blend_ps(le_zero_mask, x, infs_and_nans);
}
template <>
@@ -363,14 +351,12 @@ prsqrt<Packet8d>(const Packet8d& _x) {
// select only the inverse sqrt of positive normal inputs (denormals are
// flushed to zero and cause infs as well).
__mmask8 le_zero_mask = _mm512_cmp_pd_mask(_x, p8d_dbl_min, _CMP_LT_OQ);
- Packet8d x = _mm512_mask_blend_pd(le_zero_mask, _mm512_setzero_pd(),
- _mm512_rsqrt14_pd(_x));
+ Packet8d x = _mm512_mask_blend_pd(le_zero_mask, _mm512_rsqrt14_pd(_x), _mm512_setzero_pd());
// Fill in NaNs and Infs for the negative/zero entries.
__mmask8 neg_mask = _mm512_cmp_pd_mask(_x, _mm512_setzero_pd(), _CMP_LT_OQ);
Packet8d infs_and_nans = _mm512_mask_blend_pd(
- neg_mask, p8d_nan,
- _mm512_mask_blend_pd(le_zero_mask, p8d_inf, _mm512_setzero_pd()));
+ neg_mask, _mm512_mask_blend_pd(le_zero_mask, _mm512_setzero_pd(), p8d_inf), p8d_nan);
// Do a first step of Newton's iteration.
x = pmul(x, pmadd(neg_half, pmul(x, x), p8d_one_point_five));
@@ -379,9 +365,9 @@ prsqrt<Packet8d>(const Packet8d& _x) {
x = pmul(x, pmadd(neg_half, pmul(x, x), p8d_one_point_five));
// Insert NaNs and Infs in all the right places.
- return _mm512_mask_blend_pd(le_zero_mask, infs_and_nans, x);
+ return _mm512_mask_blend_pd(le_zero_mask, x, infs_and_nans);
}
-#else
+#elif defined(EIGEN_VECTORIZE_AVX512ER)
template <>
EIGEN_STRONG_INLINE Packet16f prsqrt<Packet16f>(const Packet16f& x) {
return _mm512_rsqrt28_ps(x);
diff --git a/Eigen/src/Core/arch/AVX512/PacketMath.h b/Eigen/src/Core/arch/AVX512/PacketMath.h
index 12b897572..9fbb256a1 100644
--- a/Eigen/src/Core/arch/AVX512/PacketMath.h
+++ b/Eigen/src/Core/arch/AVX512/PacketMath.h
@@ -54,6 +54,7 @@ template<> struct packet_traits<float> : default_packet_traits
AlignedOnScalar = 1,
size = 16,
HasHalfPacket = 1,
+ HasBlend = 0,
#if EIGEN_GNUC_AT_LEAST(5, 3)
#ifdef EIGEN_VECTORIZE_AVX512DQ
HasLog = 1,
@@ -470,6 +471,8 @@ EIGEN_STRONG_INLINE Packet16f ploaddup<Packet16f>(const float* from) {
__m512 pairs = _mm512_permute_ps(even_elements, _MM_SHUFFLE(2, 2, 0, 0));
return pairs;
}
+
+#ifdef EIGEN_VECTORIZE_AVX512DQ
// Loads 4 doubles from memory a returns the packet {a0, a0 a1, a1, a2, a2, a3,
// a3}
template <>
@@ -481,6 +484,17 @@ EIGEN_STRONG_INLINE Packet8d ploaddup<Packet8d>(const double* from) {
x = _mm512_insertf64x2(x, _mm_loaddup_pd(&from[3]), 3);
return x;
}
+#else
+template <>
+EIGEN_STRONG_INLINE Packet8d ploaddup<Packet8d>(const double* from) {
+ __m512d x = _mm512_setzero_pd();
+ x = _mm512_mask_broadcastsd_pd(x, 0x3<<0, _mm_load_sd(from+0));
+ x = _mm512_mask_broadcastsd_pd(x, 0x3<<2, _mm_load_sd(from+1));
+ x = _mm512_mask_broadcastsd_pd(x, 0x3<<4, _mm_load_sd(from+2));
+ x = _mm512_mask_broadcastsd_pd(x, 0x3<<6, _mm_load_sd(from+3));
+ return x;
+}
+#endif
// Loads 4 floats from memory a returns the packet
// {a0, a0 a0, a0, a1, a1, a1, a1, a2, a2, a2, a2, a3, a3, a3, a3}
@@ -537,7 +551,7 @@ EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet16i& from) {
template <>
EIGEN_DEVICE_FUNC inline Packet16f pgather<float, Packet16f>(const float* from,
Index stride) {
- Packet16i stride_vector = _mm512_set1_epi32(stride);
+ Packet16i stride_vector = _mm512_set1_epi32(convert_index<int>(stride));
Packet16i stride_multiplier =
_mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
Packet16i indices = _mm512_mullo_epi32(stride_vector, stride_multiplier);
@@ -547,7 +561,7 @@ EIGEN_DEVICE_FUNC inline Packet16f pgather<float, Packet16f>(const float* from,
template <>
EIGEN_DEVICE_FUNC inline Packet8d pgather<double, Packet8d>(const double* from,
Index stride) {
- Packet8i stride_vector = _mm256_set1_epi32(stride);
+ Packet8i stride_vector = _mm256_set1_epi32(convert_index<int>(stride));
Packet8i stride_multiplier = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0);
Packet8i indices = _mm256_mullo_epi32(stride_vector, stride_multiplier);
@@ -558,7 +572,7 @@ template <>
EIGEN_DEVICE_FUNC inline void pscatter<float, Packet16f>(float* to,
const Packet16f& from,
Index stride) {
- Packet16i stride_vector = _mm512_set1_epi32(stride);
+ Packet16i stride_vector = _mm512_set1_epi32(convert_index<int>(stride));
Packet16i stride_multiplier =
_mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
Packet16i indices = _mm512_mullo_epi32(stride_vector, stride_multiplier);
@@ -568,7 +582,7 @@ template <>
EIGEN_DEVICE_FUNC inline void pscatter<double, Packet8d>(double* to,
const Packet8d& from,
Index stride) {
- Packet8i stride_vector = _mm256_set1_epi32(stride);
+ Packet8i stride_vector = _mm256_set1_epi32(convert_index<int>(stride));
Packet8i stride_multiplier = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0);
Packet8i indices = _mm256_mullo_epi32(stride_vector, stride_multiplier);
_mm512_i32scatter_pd(to, indices, from, 8);
@@ -590,9 +604,9 @@ EIGEN_STRONG_INLINE void pstore1<Packet16i>(int* to, const int& a) {
pstore(to, pa);
}
-template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
-template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
-template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
+template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
+template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
+template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
template <>
EIGEN_STRONG_INLINE float pfirst<Packet16f>(const Packet16f& a) {
@@ -620,13 +634,13 @@ template<> EIGEN_STRONG_INLINE Packet8d preverse(const Packet8d& a)
template<> EIGEN_STRONG_INLINE Packet16f pabs(const Packet16f& a)
{
// _mm512_abs_ps intrinsic not found, so hack around it
- return (__m512)_mm512_and_si512((__m512i)a, _mm512_set1_epi32(0x7fffffff));
+ return _mm512_castsi512_ps(_mm512_and_si512(_mm512_castps_si512(a), _mm512_set1_epi32(0x7fffffff)));
}
template <>
EIGEN_STRONG_INLINE Packet8d pabs(const Packet8d& a) {
// _mm512_abs_ps intrinsic not found, so hack around it
- return (__m512d)_mm512_and_si512((__m512i)a,
- _mm512_set1_epi64(0x7fffffffffffffff));
+ return _mm512_castsi512_pd(_mm512_and_si512(_mm512_castpd_si512(a),
+ _mm512_set1_epi64(0x7fffffffffffffff)));
}
#ifdef EIGEN_VECTORIZE_AVX512DQ
@@ -646,8 +660,7 @@ EIGEN_STRONG_INLINE Packet8d pabs(const Packet8d& a) {
#ifdef EIGEN_VECTORIZE_AVX512DQ
#define EIGEN_INSERT_8f_INTO_16f(OUTPUT, INPUTA, INPUTB) \
- OUTPUT = _mm512_insertf32x8(OUTPUT, INPUTA, 0); \
- OUTPUT = _mm512_insertf32x8(OUTPUT, INPUTB, 1);
+ OUTPUT = _mm512_insertf32x8(_mm512_castps256_ps512(INPUTA), INPUTB, 1);
#else
#define EIGEN_INSERT_8f_INTO_16f(OUTPUT, INPUTA, INPUTB) \
OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTA, 0), 0); \
@@ -841,7 +854,7 @@ template<> EIGEN_STRONG_INLINE Packet8d preduxp<Packet8d>(const Packet8d* vecs)
final_1 = _mm256_add_pd(final_1, _mm256_blend_pd(tmp0, tmp1, 0xC));
- __m512d final_output = _mm512_insertf64x4(final_output, final_0, 0);
+ __m512d final_output = _mm512_castpd256_pd512(final_0);
return _mm512_insertf64x4(final_output, final_1, 1);
}
@@ -874,7 +887,7 @@ EIGEN_STRONG_INLINE double predux<Packet8d>(const Packet8d& a) {
}
template <>
-EIGEN_STRONG_INLINE Packet8f predux_downto4<Packet16f>(const Packet16f& a) {
+EIGEN_STRONG_INLINE Packet8f predux_half_dowto4<Packet16f>(const Packet16f& a) {
#ifdef EIGEN_VECTORIZE_AVX512DQ
__m256 lane0 = _mm512_extractf32x8_ps(a, 0);
__m256 lane1 = _mm512_extractf32x8_ps(a, 1);
@@ -890,7 +903,7 @@ EIGEN_STRONG_INLINE Packet8f predux_downto4<Packet16f>(const Packet16f& a) {
#endif
}
template <>
-EIGEN_STRONG_INLINE Packet4d predux_downto4<Packet8d>(const Packet8d& a) {
+EIGEN_STRONG_INLINE Packet4d predux_half_dowto4<Packet8d>(const Packet8d& a) {
__m256d lane0 = _mm512_extractf64x4_pd(a, 0);
__m256d lane1 = _mm512_extractf64x4_pd(a, 1);
__m256d res = _mm256_add_pd(lane0, lane1);
@@ -1272,11 +1285,38 @@ EIGEN_STRONG_INLINE Packet16f pblend(const Selector<16>& /*ifPacket*/,
return Packet16f();
}
template <>
-EIGEN_STRONG_INLINE Packet8d pblend(const Selector<8>& /*ifPacket*/,
- const Packet8d& /*thenPacket*/,
- const Packet8d& /*elsePacket*/) {
- assert(false && "To be implemented");
- return Packet8d();
+EIGEN_STRONG_INLINE Packet8d pblend(const Selector<8>& ifPacket,
+ const Packet8d& thenPacket,
+ const Packet8d& elsePacket) {
+ __mmask8 m = (ifPacket.select[0] )
+ | (ifPacket.select[1]<<1)
+ | (ifPacket.select[2]<<2)
+ | (ifPacket.select[3]<<3)
+ | (ifPacket.select[4]<<4)
+ | (ifPacket.select[5]<<5)
+ | (ifPacket.select[6]<<6)
+ | (ifPacket.select[7]<<7);
+ return _mm512_mask_blend_pd(m, elsePacket, thenPacket);
+}
+
+template<> EIGEN_STRONG_INLINE Packet16f pinsertfirst(const Packet16f& a, float b)
+{
+ return _mm512_mask_broadcastss_ps(a, (1), _mm_load_ss(&b));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8d pinsertfirst(const Packet8d& a, double b)
+{
+ return _mm512_mask_broadcastsd_pd(a, (1), _mm_load_sd(&b));
+}
+
+template<> EIGEN_STRONG_INLINE Packet16f pinsertlast(const Packet16f& a, float b)
+{
+ return _mm512_mask_broadcastss_ps(a, (1<<15), _mm_load_ss(&b));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8d pinsertlast(const Packet8d& a, double b)
+{
+ return _mm512_mask_broadcastsd_pd(a, (1<<7), _mm_load_sd(&b));
}
} // end namespace internal
diff --git a/Eigen/src/Core/arch/AltiVec/Complex.h b/Eigen/src/Core/arch/AltiVec/Complex.h
index 67db2f8ee..3e665730c 100644
--- a/Eigen/src/Core/arch/AltiVec/Complex.h
+++ b/Eigen/src/Core/arch/AltiVec/Complex.h
@@ -224,23 +224,7 @@ template<> struct conj_helper<Packet2cf, Packet2cf, true,true>
}
};
-template<> struct conj_helper<Packet4f, Packet2cf, false,false>
-{
- EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet4f& x, const Packet2cf& y, const Packet2cf& c) const
- { return padd(c, pmul(x,y)); }
-
- EIGEN_STRONG_INLINE Packet2cf pmul(const Packet4f& x, const Packet2cf& y) const
- { return Packet2cf(internal::pmul<Packet4f>(x, y.v)); }
-};
-
-template<> struct conj_helper<Packet2cf, Packet4f, false,false>
-{
- EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet4f& y, const Packet2cf& c) const
- { return padd(c, pmul(x,y)); }
-
- EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& x, const Packet4f& y) const
- { return Packet2cf(internal::pmul<Packet4f>(x.v, y)); }
-};
+EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cf,Packet4f)
template<> EIGEN_STRONG_INLINE Packet2cf pdiv<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
{
@@ -416,23 +400,8 @@ template<> struct conj_helper<Packet1cd, Packet1cd, true,true>
return pconj(internal::pmul(a, b));
}
};
-template<> struct conj_helper<Packet2d, Packet1cd, false,false>
-{
- EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet2d& x, const Packet1cd& y, const Packet1cd& c) const
- { return padd(c, pmul(x,y)); }
-
- EIGEN_STRONG_INLINE Packet1cd pmul(const Packet2d& x, const Packet1cd& y) const
- { return Packet1cd(internal::pmul<Packet2d>(x, y.v)); }
-};
-template<> struct conj_helper<Packet1cd, Packet2d, false,false>
-{
- EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet2d& y, const Packet1cd& c) const
- { return padd(c, pmul(x,y)); }
-
- EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& x, const Packet2d& y) const
- { return Packet1cd(internal::pmul<Packet2d>(x.v, y)); }
-};
+EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet1cd,Packet2d)
template<> EIGEN_STRONG_INLINE Packet1cd pdiv<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
{
diff --git a/Eigen/src/Core/arch/AltiVec/PacketMath.h b/Eigen/src/Core/arch/AltiVec/PacketMath.h
index b3f1ea199..7f4e90f75 100755
--- a/Eigen/src/Core/arch/AltiVec/PacketMath.h
+++ b/Eigen/src/Core/arch/AltiVec/PacketMath.h
@@ -103,7 +103,7 @@ static Packet16uc p16uc_PSET32_WODD = vec_sld((Packet16uc) vec_splat((Packet4u
static Packet16uc p16uc_PSET32_WEVEN = vec_sld(p16uc_DUPLICATE32_HI, (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 3), 8);//{ 4,5,6,7, 4,5,6,7, 12,13,14,15, 12,13,14,15 };
static Packet16uc p16uc_HALF64_0_16 = vec_sld((Packet16uc)p4i_ZERO, vec_splat((Packet16uc) vec_abs(p4i_MINUS16), 3), 8); //{ 0,0,0,0, 0,0,0,0, 16,16,16,16, 16,16,16,16};
#else
-static Packet16uc p16uc_FORWARD = p16uc_REVERSE32;
+static Packet16uc p16uc_FORWARD = p16uc_REVERSE32;
static Packet16uc p16uc_REVERSE64 = { 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 };
static Packet16uc p16uc_PSET32_WODD = vec_sld((Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 1), (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 3), 8);//{ 0,1,2,3, 0,1,2,3, 8,9,10,11, 8,9,10,11 };
static Packet16uc p16uc_PSET32_WEVEN = vec_sld((Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 0), (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 2), 8);//{ 4,5,6,7, 4,5,6,7, 12,13,14,15, 12,13,14,15 };
@@ -388,10 +388,30 @@ template<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& /*a*/, co
template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return vec_madd(a,b,c); }
template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return a*b + c; }
-template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_min(a, b); }
+template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b)
+{
+ #ifdef __VSX__
+ // NOTE: about 10% slower than vec_min, but consistent with std::min and SSE regarding NaN
+ Packet4f ret;
+ __asm__ ("xvcmpgesp %x0,%x1,%x2\n\txxsel %x0,%x1,%x2,%x0" : "=&wa" (ret) : "wa" (a), "wa" (b));
+ return ret;
+ #else
+ return vec_min(a, b);
+ #endif
+}
template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_min(a, b); }
-template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_max(a, b); }
+template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b)
+{
+ #ifdef __VSX__
+ // NOTE: about 10% slower than vec_max, but consistent with std::max and SSE regarding NaN
+ Packet4f ret;
+ __asm__ ("xvcmpgtsp %x0,%x2,%x1\n\txxsel %x0,%x1,%x2,%x0" : "=&wa" (ret) : "wa" (a), "wa" (b));
+ return ret;
+ #else
+ return vec_max(a, b);
+ #endif
+}
template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_max(a, b); }
template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_and(a, b); }
@@ -434,7 +454,7 @@ template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)
return (Packet4i) vec_perm(MSQ, LSQ, mask); // align the data
}
#else
-// We also need ot redefine little endian loading of Packet4i/Packet4f using VSX
+// We also need to redefine little endian loading of Packet4i/Packet4f using VSX
template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)
{
EIGEN_DEBUG_UNALIGNED_LOAD
@@ -500,7 +520,7 @@ template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& f
vec_st( MSQ, 0, (unsigned char *)to ); // Store the MSQ part
}
#else
-// We also need ot redefine little endian loading of Packet4i/Packet4f using VSX
+// We also need to redefine little endian loading of Packet4i/Packet4f using VSX
template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from)
{
EIGEN_DEBUG_ALIGNED_STORE
@@ -764,7 +784,7 @@ typedef __vector __bool long Packet2bl;
static Packet2l p2l_ONE = { 1, 1 };
static Packet2l p2l_ZERO = reinterpret_cast<Packet2l>(p4i_ZERO);
-static Packet2d p2d_ONE = { 1.0, 1.0 };
+static Packet2d p2d_ONE = { 1.0, 1.0 };
static Packet2d p2d_ZERO = reinterpret_cast<Packet2d>(p4f_ZERO);
static Packet2d p2d_MZERO = { -0.0, -0.0 };
@@ -910,9 +930,21 @@ template<> EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const
// for some weird raisons, it has to be overloaded for packet of integers
template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return vec_madd(a, b, c); }
-template<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_min(a, b); }
+template<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b)
+{
+ // NOTE: about 10% slower than vec_min, but consistent with std::min and SSE regarding NaN
+ Packet2d ret;
+ __asm__ ("xvcmpgedp %x0,%x1,%x2\n\txxsel %x0,%x1,%x2,%x0" : "=&wa" (ret) : "wa" (a), "wa" (b));
+ return ret;
+ }
-template<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_max(a, b); }
+template<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b)
+{
+ // NOTE: about 10% slower than vec_max, but consistent with std::max and SSE regarding NaN
+ Packet2d ret;
+ __asm__ ("xvcmpgtdp %x0,%x2,%x1\n\txxsel %x0,%x1,%x2,%x0" : "=&wa" (ret) : "wa" (a), "wa" (b));
+ return ret;
+}
template<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_and(a, b); }
@@ -969,7 +1001,7 @@ template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)
Packet2d v[2], sum;
v[0] = vecs[0] + reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4f>(vecs[0]), reinterpret_cast<Packet4f>(vecs[0]), 8));
v[1] = vecs[1] + reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4f>(vecs[1]), reinterpret_cast<Packet4f>(vecs[1]), 8));
-
+
#ifdef _BIG_ENDIAN
sum = reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4f>(v[0]), reinterpret_cast<Packet4f>(v[1]), 8));
#else
@@ -1022,7 +1054,7 @@ ptranspose(PacketBlock<Packet2d,2>& kernel) {
template<> EIGEN_STRONG_INLINE Packet2d pblend(const Selector<2>& ifPacket, const Packet2d& thenPacket, const Packet2d& elsePacket) {
Packet2l select = { ifPacket.select[0], ifPacket.select[1] };
- Packet2bl mask = vec_cmpeq(reinterpret_cast<Packet2d>(select), reinterpret_cast<Packet2d>(p2l_ONE));
+ Packet2bl mask = reinterpret_cast<Packet2bl>( vec_cmpeq(reinterpret_cast<Packet2d>(select), reinterpret_cast<Packet2d>(p2l_ONE)) );
return vec_sel(elsePacket, thenPacket, mask);
}
#endif // __VSX__
diff --git a/Eigen/src/Core/arch/CUDA/Complex.h b/Eigen/src/Core/arch/CUDA/Complex.h
index 9c2536509..57d1201f4 100644
--- a/Eigen/src/Core/arch/CUDA/Complex.h
+++ b/Eigen/src/Core/arch/CUDA/Complex.h
@@ -16,7 +16,7 @@ namespace Eigen {
namespace internal {
-#if defined(__CUDACC__) && defined(EIGEN_USE_GPU)
+#if defined(EIGEN_CUDACC) && defined(EIGEN_USE_GPU)
// Many std::complex methods such as operator+, operator-, operator* and
// operator/ are not constexpr. Due to this, clang does not treat them as device
@@ -55,7 +55,7 @@ template<typename T> struct scalar_difference_op<std::complex<T>, std::complex<T
// Product
template<typename T> struct scalar_product_op<const std::complex<T>, const std::complex<T> > : binary_op_base<const std::complex<T>, const std::complex<T> > {
enum {
- Vectorizable = packet_traits<std::complex<T>>::HasMul
+ Vectorizable = packet_traits<std::complex<T> >::HasMul
};
typedef typename std::complex<T> result_type;
@@ -76,7 +76,7 @@ template<typename T> struct scalar_product_op<std::complex<T>, std::complex<T> >
// Quotient
template<typename T> struct scalar_quotient_op<const std::complex<T>, const std::complex<T> > : binary_op_base<const std::complex<T>, const std::complex<T> > {
enum {
- Vectorizable = packet_traits<std::complex<T>>::HasDiv
+ Vectorizable = packet_traits<std::complex<T> >::HasDiv
};
typedef typename std::complex<T> result_type;
diff --git a/Eigen/src/Core/arch/Default/ConjHelper.h b/Eigen/src/Core/arch/Default/ConjHelper.h
new file mode 100644
index 000000000..4cfe34e05
--- /dev/null
+++ b/Eigen/src/Core/arch/Default/ConjHelper.h
@@ -0,0 +1,29 @@
+
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2017 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_ARCH_CONJ_HELPER_H
+#define EIGEN_ARCH_CONJ_HELPER_H
+
+#define EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(PACKET_CPLX, PACKET_REAL) \
+ template<> struct conj_helper<PACKET_REAL, PACKET_CPLX, false,false> { \
+ EIGEN_STRONG_INLINE PACKET_CPLX pmadd(const PACKET_REAL& x, const PACKET_CPLX& y, const PACKET_CPLX& c) const \
+ { return padd(c, pmul(x,y)); } \
+ EIGEN_STRONG_INLINE PACKET_CPLX pmul(const PACKET_REAL& x, const PACKET_CPLX& y) const \
+ { return PACKET_CPLX(Eigen::internal::pmul<PACKET_REAL>(x, y.v)); } \
+ }; \
+ \
+ template<> struct conj_helper<PACKET_CPLX, PACKET_REAL, false,false> { \
+ EIGEN_STRONG_INLINE PACKET_CPLX pmadd(const PACKET_CPLX& x, const PACKET_REAL& y, const PACKET_CPLX& c) const \
+ { return padd(c, pmul(x,y)); } \
+ EIGEN_STRONG_INLINE PACKET_CPLX pmul(const PACKET_CPLX& x, const PACKET_REAL& y) const \
+ { return PACKET_CPLX(Eigen::internal::pmul<PACKET_REAL>(x.v, y)); } \
+ };
+
+#endif // EIGEN_ARCH_CONJ_HELPER_H
diff --git a/Eigen/src/Core/arch/CUDA/Half.h b/Eigen/src/Core/arch/GPU/Half.h
index db9878796..65b38bbfb 100644
--- a/Eigen/src/Core/arch/CUDA/Half.h
+++ b/Eigen/src/Core/arch/GPU/Half.h
@@ -13,7 +13,7 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted.
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
@@ -26,15 +26,15 @@
// Standard 16-bit float type, mostly useful for GPUs. Defines a new
-// type Eigen::half (inheriting from CUDA's __half struct) with
+// type Eigen::half (inheriting either from CUDA's or HIP's __half struct) with
// operator overloads such that it behaves basically as an arithmetic
// type. It will be quite slow on CPUs (so it is recommended to stay
// in fp32 for CPUs, except for simple parameter conversions, I/O
// to disk and the likes), but fast on GPUs.
-#ifndef EIGEN_HALF_CUDA_H
-#define EIGEN_HALF_CUDA_H
+#ifndef EIGEN_HALF_GPU_H
+#define EIGEN_HALF_GPU_H
#if __cplusplus > 199711L
#define EIGEN_EXPLICIT_CAST(tgt_type) explicit operator tgt_type()
@@ -49,39 +49,107 @@ struct half;
namespace half_impl {
-#if !defined(EIGEN_HAS_CUDA_FP16)
-
-// Make our own __half definition that is similar to CUDA's.
-struct __half {
- EIGEN_DEVICE_FUNC __half() : x(0) {}
- explicit EIGEN_DEVICE_FUNC __half(unsigned short raw) : x(raw) {}
+#if !defined(EIGEN_HAS_GPU_FP16)
+// Make our own __half_raw definition that is similar to CUDA's.
+struct __half_raw {
+ EIGEN_DEVICE_FUNC __half_raw() : x(0) {}
+ explicit EIGEN_DEVICE_FUNC __half_raw(unsigned short raw) : x(raw) {}
unsigned short x;
};
+#elif defined(EIGEN_HAS_HIP_FP16)
+ #if defined(EIGEN_HAS_OLD_HIP_FP16)
+// Make a __half_raw definition that is
+// ++ compatible with that of Eigen and
+// ++ add an implicit conversion to the native __half of the old HIP implementation.
+//
+// Keeping ".x" as "unsigned short" keeps the interface the same between the Eigen and HIP implementation.
+//
+// In the old HIP implementation,
+// ++ __half is a typedef of __fp16
+// ++ the "__h*" routines take "__half" arguments
+// so we need to implicitly convert "__half_raw" to "__half" to avoid having to explicitly make
+// that conversiion in each call to a "__h*" routine...that is why we have "operator __half" routine
+struct __half_raw {
+ EIGEN_DEVICE_FUNC __half_raw() : x(0) {}
+ explicit EIGEN_DEVICE_FUNC __half_raw(unsigned short raw) : x(raw) {}
+ union {
+ unsigned short x;
+ __half data;
+ };
+ operator __half(void) const { return data; }
+};
+ #endif
+#elif defined(EIGEN_HAS_CUDA_FP16)
+ #if defined(EIGEN_CUDACC_VER) && EIGEN_CUDACC_VER < 90000
+// In CUDA < 9.0, __half is the equivalent of CUDA 9's __half_raw
+ typedef __half __half_raw;
+ #endif // defined(EIGEN_HAS_CUDA_FP16)
+
+#elif defined(EIGEN_USE_SYCL) && defined(__SYCL_DEVICE_ONLY__)
+typedef cl::sycl::half __half_raw;
#endif
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __half raw_uint16_to_half(unsigned short x);
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __half float_to_half_rtne(float ff);
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC float half_to_float(__half h);
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __half_raw raw_uint16_to_half(unsigned short x);
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __half_raw float_to_half_rtne(float ff);
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC float half_to_float(__half_raw h);
-struct half_base : public __half {
+struct half_base : public __half_raw {
EIGEN_DEVICE_FUNC half_base() {}
- EIGEN_DEVICE_FUNC half_base(const half_base& h) : __half(h) {}
- EIGEN_DEVICE_FUNC half_base(const __half& h) : __half(h) {}
+ EIGEN_DEVICE_FUNC half_base(const half_base& h) : __half_raw(h) {}
+ EIGEN_DEVICE_FUNC half_base(const __half_raw& h) : __half_raw(h) {}
+
+#if defined(EIGEN_HAS_GPU_FP16)
+ #if defined(EIGEN_HAS_HIP_FP16)
+ #if defined(EIGEN_HAS_OLD_HIP_FP16)
+ EIGEN_DEVICE_FUNC half_base(const __half& h) : __half_raw(__half_as_ushort(h)) {}
+ #else
+ EIGEN_DEVICE_FUNC half_base(const __half& h) { x = __half_as_ushort(h); }
+ #endif
+ #elif defined(EIGEN_HAS_CUDA_FP16)
+ #if (defined(EIGEN_CUDACC_VER) && EIGEN_CUDACC_VER >= 90000)
+ EIGEN_DEVICE_FUNC half_base(const __half& h) : __half_raw(*(__half_raw*)&h) {}
+ #endif
+ #endif
+#endif
};
} // namespace half_impl
// Class definition.
struct half : public half_impl::half_base {
- #if !defined(EIGEN_HAS_CUDA_FP16)
- typedef half_impl::__half __half;
- #endif
+
+ // Writing this out as separate #if-else blocks to make the code easier to follow
+ // The same applies to most #if-else blocks in this file
+#if !defined(EIGEN_HAS_GPU_FP16)
+ typedef half_impl::__half_raw __half_raw;
+#elif defined(EIGEN_HAS_HIP_FP16)
+ #if defined(EIGEN_HAS_OLD_HIP_FP16)
+ typedef half_impl::__half_raw __half_raw;
+ #endif
+#elif defined(EIGEN_HAS_CUDA_FP16)
+ // Note that EIGEN_CUDACC_VER is set to 0 even when compiling with HIP, so (EIGEN_CUDACC_VER < 90000) is true even for HIP!
+ // So keeping this within #if defined(EIGEN_HAS_CUDA_FP16) is needed
+ #if defined(EIGEN_CUDACC_VER) && EIGEN_CUDACC_VER < 90000
+ typedef half_impl::__half_raw __half_raw;
+ #endif
+#endif
EIGEN_DEVICE_FUNC half() {}
- EIGEN_DEVICE_FUNC half(const __half& h) : half_impl::half_base(h) {}
+ EIGEN_DEVICE_FUNC half(const __half_raw& h) : half_impl::half_base(h) {}
EIGEN_DEVICE_FUNC half(const half& h) : half_impl::half_base(h) {}
+
+#if defined(EIGEN_HAS_GPU_FP16)
+ #if defined(EIGEN_HAS_HIP_FP16)
+ EIGEN_DEVICE_FUNC half(const __half& h) : half_impl::half_base(h) {}
+ #elif defined(EIGEN_HAS_CUDA_FP16)
+ #if defined(EIGEN_CUDACC_VER) && EIGEN_CUDACC_VER >= 90000
+ EIGEN_DEVICE_FUNC half(const __half& h) : half_impl::half_base(h) {}
+ #endif
+ #endif
+#endif
+
explicit EIGEN_DEVICE_FUNC half(bool b)
: half_impl::half_base(half_impl::raw_uint16_to_half(b ? 0x3c00 : 0)) {}
@@ -136,72 +204,136 @@ struct half : public half_impl::half_base {
x = other.x;
return *this;
}
+
};
+} // end namespace Eigen
+
+namespace std {
+template<>
+struct numeric_limits<Eigen::half> {
+ static const bool is_specialized = true;
+ static const bool is_signed = true;
+ static const bool is_integer = false;
+ static const bool is_exact = false;
+ static const bool has_infinity = true;
+ static const bool has_quiet_NaN = true;
+ static const bool has_signaling_NaN = true;
+ static const float_denorm_style has_denorm = denorm_present;
+ static const bool has_denorm_loss = false;
+ static const std::float_round_style round_style = std::round_to_nearest;
+ static const bool is_iec559 = false;
+ static const bool is_bounded = false;
+ static const bool is_modulo = false;
+ static const int digits = 11;
+ static const int digits10 = 3; // according to http://half.sourceforge.net/structstd_1_1numeric__limits_3_01half__float_1_1half_01_4.html
+ static const int max_digits10 = 5; // according to http://half.sourceforge.net/structstd_1_1numeric__limits_3_01half__float_1_1half_01_4.html
+ static const int radix = 2;
+ static const int min_exponent = -13;
+ static const int min_exponent10 = -4;
+ static const int max_exponent = 16;
+ static const int max_exponent10 = 4;
+ static const bool traps = true;
+ static const bool tinyness_before = false;
+
+ static Eigen::half (min)() { return Eigen::half_impl::raw_uint16_to_half(0x400); }
+ static Eigen::half lowest() { return Eigen::half_impl::raw_uint16_to_half(0xfbff); }
+ static Eigen::half (max)() { return Eigen::half_impl::raw_uint16_to_half(0x7bff); }
+ static Eigen::half epsilon() { return Eigen::half_impl::raw_uint16_to_half(0x0800); }
+ static Eigen::half round_error() { return Eigen::half(0.5); }
+ static Eigen::half infinity() { return Eigen::half_impl::raw_uint16_to_half(0x7c00); }
+ static Eigen::half quiet_NaN() { return Eigen::half_impl::raw_uint16_to_half(0x7e00); }
+ static Eigen::half signaling_NaN() { return Eigen::half_impl::raw_uint16_to_half(0x7e00); }
+ static Eigen::half denorm_min() { return Eigen::half_impl::raw_uint16_to_half(0x1); }
+};
+
+// If std::numeric_limits<T> is specialized, should also specialize
+// std::numeric_limits<const T>, std::numeric_limits<volatile T>, and
+// std::numeric_limits<const volatile T>
+// https://stackoverflow.com/a/16519653/
+template<>
+struct numeric_limits<const Eigen::half> : numeric_limits<Eigen::half> {};
+template<>
+struct numeric_limits<volatile Eigen::half> : numeric_limits<Eigen::half> {};
+template<>
+struct numeric_limits<const volatile Eigen::half> : numeric_limits<Eigen::half> {};
+} // end namespace std
+
+namespace Eigen {
+
namespace half_impl {
-#if defined(EIGEN_HAS_CUDA_FP16) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530
+#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530) || \
+ (defined(EIGEN_HAS_HIP_FP16) && defined(HIP_DEVICE_COMPILE))
// Intrinsics for native fp16 support. Note that on current hardware,
// these are no faster than fp32 arithmetic (you need to use the half2
// versions to get the ALU speed increased), but you do save the
// conversion steps back and forth.
-__device__ half operator + (const half& a, const half& b) {
+EIGEN_STRONG_INLINE __device__ half operator + (const half& a, const half& b) {
+#if defined(EIGEN_CUDACC_VER) && EIGEN_CUDACC_VER >= 90000
+ return __hadd(::__half(a), ::__half(b));
+#else
return __hadd(a, b);
+#endif
}
-__device__ half operator * (const half& a, const half& b) {
+EIGEN_STRONG_INLINE __device__ half operator * (const half& a, const half& b) {
return __hmul(a, b);
}
-__device__ half operator - (const half& a, const half& b) {
+EIGEN_STRONG_INLINE __device__ half operator - (const half& a, const half& b) {
return __hsub(a, b);
}
-__device__ half operator / (const half& a, const half& b) {
+EIGEN_STRONG_INLINE __device__ half operator / (const half& a, const half& b) {
+#if defined(EIGEN_CUDACC_VER) && EIGEN_CUDACC_VER >= 90000
+ return __hdiv(a, b);
+#else
float num = __half2float(a);
float denom = __half2float(b);
return __float2half(num / denom);
+#endif
}
-__device__ half operator - (const half& a) {
+EIGEN_STRONG_INLINE __device__ half operator - (const half& a) {
return __hneg(a);
}
-__device__ half& operator += (half& a, const half& b) {
+EIGEN_STRONG_INLINE __device__ half& operator += (half& a, const half& b) {
a = a + b;
return a;
}
-__device__ half& operator *= (half& a, const half& b) {
+EIGEN_STRONG_INLINE __device__ half& operator *= (half& a, const half& b) {
a = a * b;
return a;
}
-__device__ half& operator -= (half& a, const half& b) {
+EIGEN_STRONG_INLINE __device__ half& operator -= (half& a, const half& b) {
a = a - b;
return a;
}
-__device__ half& operator /= (half& a, const half& b) {
+EIGEN_STRONG_INLINE __device__ half& operator /= (half& a, const half& b) {
a = a / b;
return a;
}
-__device__ bool operator == (const half& a, const half& b) {
+EIGEN_STRONG_INLINE __device__ bool operator == (const half& a, const half& b) {
return __heq(a, b);
}
-__device__ bool operator != (const half& a, const half& b) {
+EIGEN_STRONG_INLINE __device__ bool operator != (const half& a, const half& b) {
return __hne(a, b);
}
-__device__ bool operator < (const half& a, const half& b) {
+EIGEN_STRONG_INLINE __device__ bool operator < (const half& a, const half& b) {
return __hlt(a, b);
}
-__device__ bool operator <= (const half& a, const half& b) {
+EIGEN_STRONG_INLINE __device__ bool operator <= (const half& a, const half& b) {
return __hle(a, b);
}
-__device__ bool operator > (const half& a, const half& b) {
+EIGEN_STRONG_INLINE __device__ bool operator > (const half& a, const half& b) {
return __hgt(a, b);
}
-__device__ bool operator >= (const half& a, const half& b) {
+EIGEN_STRONG_INLINE __device__ bool operator >= (const half& a, const half& b) {
return __hge(a, b);
}
#else // Emulate support for half floats
-// Definitions for CPUs and older CUDA, mostly working through conversion
+// Definitions for CPUs and older HIP+CUDA, mostly working through conversion
// to/from fp32.
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator + (const half& a, const half& b) {
@@ -238,10 +370,10 @@ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator /= (half& a, const half& b)
return a;
}
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator == (const half& a, const half& b) {
- return float(a) == float(b);
+ return numext::equal_strict(float(a),float(b));
}
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator != (const half& a, const half& b) {
- return float(a) != float(b);
+ return numext::not_equal_strict(float(a), float(b));
}
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator < (const half& a, const half& b) {
return float(a) < float(b);
@@ -269,34 +401,36 @@ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator / (const half& a, Index b) {
// these in hardware. If we need more performance on older/other CPUs, they are
// also possible to vectorize directly.
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __half raw_uint16_to_half(unsigned short x) {
- __half h;
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __half_raw raw_uint16_to_half(unsigned short x) {
+ __half_raw h;
h.x = x;
return h;
}
-union FP32 {
+union float32_bits {
unsigned int u;
float f;
};
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __half float_to_half_rtne(float ff) {
-#if defined(EIGEN_HAS_CUDA_FP16) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300
- return __float2half(ff);
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __half_raw float_to_half_rtne(float ff) {
+#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \
+ (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
+ __half tmp_ff = __float2half(ff);
+ return *(__half_raw*)&tmp_ff;
#elif defined(EIGEN_HAS_FP16_C)
- __half h;
+ __half_raw h;
h.x = _cvtss_sh(ff, 0);
return h;
#else
- FP32 f; f.f = ff;
+ float32_bits f; f.f = ff;
- const FP32 f32infty = { 255 << 23 };
- const FP32 f16max = { (127 + 16) << 23 };
- const FP32 denorm_magic = { ((127 - 15) + (23 - 10) + 1) << 23 };
+ const float32_bits f32infty = { 255 << 23 };
+ const float32_bits f16max = { (127 + 16) << 23 };
+ const float32_bits denorm_magic = { ((127 - 15) + (23 - 10) + 1) << 23 };
unsigned int sign_mask = 0x80000000u;
- __half o;
+ __half_raw o;
o.x = static_cast<unsigned short>(0x0u);
unsigned int sign = f.u & sign_mask;
@@ -335,17 +469,18 @@ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __half float_to_half_rtne(float ff) {
#endif
}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC float half_to_float(__half h) {
-#if defined(EIGEN_HAS_CUDA_FP16) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC float half_to_float(__half_raw h) {
+#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \
+ (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
return __half2float(h);
#elif defined(EIGEN_HAS_FP16_C)
return _cvtsh_ss(h.x);
#else
- const FP32 magic = { 113 << 23 };
+ const float32_bits magic = { 113 << 23 };
const unsigned int shifted_exp = 0x7c00 << 13; // exponent mask after shift
- FP32 o;
+ float32_bits o;
o.u = (h.x & 0x7fff) << 13; // exponent/mantissa bits
unsigned int exp = shifted_exp & o.u; // just the exponent
@@ -370,7 +505,8 @@ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isinf)(const half& a) {
return (a.x & 0x7fff) == 0x7c00;
}
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isnan)(const half& a) {
-#if defined(EIGEN_HAS_CUDA_FP16) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530
+#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530) || \
+ (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
return __hisnan(a);
#else
return (a.x & 0x7fff) > 0x7c00;
@@ -386,7 +522,8 @@ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half abs(const half& a) {
return result;
}
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half exp(const half& a) {
-#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 80000 && defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 530
+#if (EIGEN_CUDACC_VER >= 80000 && defined EIGEN_CUDA_ARCH && EIGEN_CUDA_ARCH >= 530) || \
+ defined(EIGEN_HIP_DEVICE_COMPILE)
return half(hexp(a));
#else
return half(::expf(float(a)));
@@ -396,7 +533,8 @@ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half expm1(const half& a) {
return half(numext::expm1(float(a)));
}
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half log(const half& a) {
-#if defined(EIGEN_HAS_CUDA_FP16) && defined __CUDACC_VER__ && __CUDACC_VER__ >= 80000 && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530
+#if (defined(EIGEN_HAS_CUDA_FP16) && EIGEN_CUDACC_VER >= 80000 && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530) || \
+ (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
return half(::hlog(a));
#else
return half(::logf(float(a)));
@@ -409,7 +547,8 @@ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half log10(const half& a) {
return half(::log10f(float(a)));
}
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half sqrt(const half& a) {
-#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 80000 && defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 530
+#if (EIGEN_CUDACC_VER >= 80000 && defined EIGEN_CUDA_ARCH && EIGEN_CUDA_ARCH >= 530) || \
+ defined(EIGEN_HIP_DEVICE_COMPILE)
return half(hsqrt(a));
#else
return half(::sqrtf(float(a)));
@@ -431,14 +570,16 @@ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half tanh(const half& a) {
return half(::tanhf(float(a)));
}
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half floor(const half& a) {
-#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 80000 && defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
+#if (EIGEN_CUDACC_VER >= 80000 && defined EIGEN_CUDA_ARCH && EIGEN_CUDA_ARCH >= 300) || \
+ defined(EIGEN_HIP_DEVICE_COMPILE)
return half(hfloor(a));
#else
return half(::floorf(float(a)));
#endif
}
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half ceil(const half& a) {
-#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 80000 && defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
+#if (EIGEN_CUDACC_VER >= 80000 && defined EIGEN_CUDA_ARCH && EIGEN_CUDA_ARCH >= 300) || \
+ defined(EIGEN_HIP_DEVICE_COMPILE)
return half(hceil(a));
#else
return half(::ceilf(float(a)));
@@ -446,7 +587,8 @@ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half ceil(const half& a) {
}
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half (min)(const half& a, const half& b) {
-#if defined(EIGEN_HAS_CUDA_FP16) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530
+#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530) || \
+ (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
return __hlt(b, a) ? b : a;
#else
const float f1 = static_cast<float>(a);
@@ -455,7 +597,8 @@ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half (min)(const half& a, const half& b) {
#endif
}
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half (max)(const half& a, const half& b) {
-#if defined(EIGEN_HAS_CUDA_FP16) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530
+#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530) || \
+ (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
return __hlt(a, b) ? b : a;
#else
const float f1 = static_cast<float>(a);
@@ -496,6 +639,13 @@ template<> struct is_arithmetic<half> { enum { value = true }; };
template<> struct NumTraits<Eigen::half>
: GenericNumTraits<Eigen::half>
{
+ enum {
+ IsSigned = true,
+ IsInteger = false,
+ IsComplex = false,
+ RequireInitialization = false
+ };
+
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Eigen::half epsilon() {
return half_impl::raw_uint16_to_half(0x0800);
}
@@ -526,7 +676,8 @@ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half exph(const Eigen::half& a) {
return Eigen::half(::expf(float(a)));
}
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half logh(const Eigen::half& a) {
-#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 80000 && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530
+#if (EIGEN_CUDACC_VER >= 80000 && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530) || \
+ defined(EIGEN_HIP_DEVICE_COMPILE)
return Eigen::half(::hlog(a));
#else
return Eigen::half(::logf(float(a)));
@@ -560,14 +711,22 @@ struct hash<Eigen::half> {
// Add the missing shfl_xor intrinsic
-#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300
+#if (defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \
+ defined(EIGEN_HIP_DEVICE_COMPILE)
+
__device__ EIGEN_STRONG_INLINE Eigen::half __shfl_xor(Eigen::half var, int laneMask, int width=warpSize) {
+ #if (EIGEN_CUDACC_VER < 90000) || \
+ defined(EIGEN_HAS_HIP_FP16)
return static_cast<Eigen::half>(__shfl_xor(static_cast<float>(var), laneMask, width));
+ #else
+ return static_cast<Eigen::half>(__shfl_xor_sync(0xFFFFFFFF, static_cast<float>(var), laneMask, width));
+ #endif
}
#endif
-// ldg() has an overload for __half, but we also need one for Eigen::half.
-#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 350
+// ldg() has an overload for __half_raw, but we also need one for Eigen::half.
+#if (defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 350) || \
+ defined(EIGEN_HIP_DEVICE_COMPILE)
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half __ldg(const Eigen::half* ptr) {
return Eigen::half_impl::raw_uint16_to_half(
__ldg(reinterpret_cast<const unsigned short*>(ptr)));
@@ -575,7 +734,7 @@ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half __ldg(const Eigen::half* ptr)
#endif
-#if defined(__CUDA_ARCH__)
+#if defined(EIGEN_GPU_COMPILE_PHASE)
namespace Eigen {
namespace numext {
@@ -601,4 +760,4 @@ bool (isfinite)(const Eigen::half& h) {
} // namespace numext
#endif
-#endif // EIGEN_HALF_CUDA_H
+#endif // EIGEN_HALF_GPU_H
diff --git a/Eigen/src/Core/arch/CUDA/MathFunctions.h b/Eigen/src/Core/arch/GPU/MathFunctions.h
index 987a5291c..d2b3a2568 100644
--- a/Eigen/src/Core/arch/CUDA/MathFunctions.h
+++ b/Eigen/src/Core/arch/GPU/MathFunctions.h
@@ -7,8 +7,8 @@
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-#ifndef EIGEN_MATH_FUNCTIONS_CUDA_H
-#define EIGEN_MATH_FUNCTIONS_CUDA_H
+#ifndef EIGEN_MATH_FUNCTIONS_GPU_H
+#define EIGEN_MATH_FUNCTIONS_GPU_H
namespace Eigen {
@@ -17,7 +17,7 @@ namespace internal {
// Make sure this is only available when targeting a GPU: we don't want to
// introduce conflicts between these packet_traits definitions and the ones
// we'll use on the host side (SSE, AVX, ...)
-#if defined(__CUDACC__) && defined(EIGEN_USE_GPU)
+#if defined(EIGEN_GPUCC) && defined(EIGEN_USE_GPU)
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
float4 plog<float4>(const float4& a)
{
@@ -100,4 +100,4 @@ double2 prsqrt<double2>(const double2& a)
} // end namespace Eigen
-#endif // EIGEN_MATH_FUNCTIONS_CUDA_H
+#endif // EIGEN_MATH_FUNCTIONS_GPU_H
diff --git a/Eigen/src/Core/arch/CUDA/PacketMath.h b/Eigen/src/Core/arch/GPU/PacketMath.h
index ad66399e0..ddf37b9c1 100644
--- a/Eigen/src/Core/arch/CUDA/PacketMath.h
+++ b/Eigen/src/Core/arch/GPU/PacketMath.h
@@ -7,8 +7,8 @@
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-#ifndef EIGEN_PACKET_MATH_CUDA_H
-#define EIGEN_PACKET_MATH_CUDA_H
+#ifndef EIGEN_PACKET_MATH_GPU_H
+#define EIGEN_PACKET_MATH_GPU_H
namespace Eigen {
@@ -17,7 +17,7 @@ namespace internal {
// Make sure this is only available when targeting a GPU: we don't want to
// introduce conflicts between these packet_traits definitions and the ones
// we'll use on the host side (SSE, AVX, ...)
-#if defined(__CUDACC__) && defined(EIGEN_USE_GPU)
+#if defined(EIGEN_GPUCC) && defined(EIGEN_USE_GPU)
template<> struct is_arithmetic<float4> { enum { value = true }; };
template<> struct is_arithmetic<double2> { enum { value = true }; };
@@ -44,7 +44,11 @@ template<> struct packet_traits<float> : default_packet_traits
HasPolygamma = 1,
HasErf = 1,
HasErfc = 1,
+ HasI0e = 1,
+ HasI1e = 1,
HasIGamma = 1,
+ HasIGammaDerA = 1,
+ HasGammaSampleDerAlpha = 1,
HasIGammac = 1,
HasBetaInc = 1,
@@ -73,7 +77,11 @@ template<> struct packet_traits<double> : default_packet_traits
HasPolygamma = 1,
HasErf = 1,
HasErfc = 1,
+ HasI0e = 1,
+ HasI1e = 1,
HasIGamma = 1,
+ HasIGammaDerA = 1,
+ HasGammaSampleDerAlpha = 1,
HasIGammac = 1,
HasBetaInc = 1,
@@ -167,10 +175,10 @@ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 ploadu<double2>(const d
return make_double2(from[0], from[1]);
}
-template<> EIGEN_STRONG_INLINE float4 ploaddup<float4>(const float* from) {
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 ploaddup<float4>(const float* from) {
return make_float4(from[0], from[0], from[1], from[1]);
}
-template<> EIGEN_STRONG_INLINE double2 ploaddup<double2>(const double* from) {
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 ploaddup<double2>(const double* from) {
return make_double2(from[0], from[0]);
}
@@ -196,7 +204,7 @@ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstoreu<double>(double* to
template<>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float4 ploadt_ro<float4, Aligned>(const float* from) {
-#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 350
+#if defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 350
return __ldg((const float4*)from);
#else
return make_float4(from[0], from[1], from[2], from[3]);
@@ -204,7 +212,7 @@ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float4 ploadt_ro<float4, Aligned>(const fl
}
template<>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double2 ploadt_ro<double2, Aligned>(const double* from) {
-#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 350
+#if defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 350
return __ldg((const double2*)from);
#else
return make_double2(from[0], from[1]);
@@ -213,7 +221,7 @@ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double2 ploadt_ro<double2, Aligned>(const
template<>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float4 ploadt_ro<float4, Unaligned>(const float* from) {
-#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 350
+#if defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 350
return make_float4(__ldg(from+0), __ldg(from+1), __ldg(from+2), __ldg(from+3));
#else
return make_float4(from[0], from[1], from[2], from[3]);
@@ -221,7 +229,7 @@ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float4 ploadt_ro<float4, Unaligned>(const
}
template<>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double2 ploadt_ro<double2, Unaligned>(const double* from) {
-#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 350
+#if defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 350
return make_double2(__ldg(from+0), __ldg(from+1));
#else
return make_double2(from[0], from[1]);
@@ -291,7 +299,7 @@ template<> EIGEN_DEVICE_FUNC inline double2 pabs<double2>(const double2& a) {
EIGEN_DEVICE_FUNC inline void
ptranspose(PacketBlock<float4,4>& kernel) {
- double tmp = kernel.packet[0].y;
+ float tmp = kernel.packet[0].y;
kernel.packet[0].y = kernel.packet[1].x;
kernel.packet[1].x = tmp;
@@ -330,4 +338,4 @@ ptranspose(PacketBlock<double2,2>& kernel) {
} // end namespace Eigen
-#endif // EIGEN_PACKET_MATH_CUDA_H
+#endif // EIGEN_PACKET_MATH_GPU_H
diff --git a/Eigen/src/Core/arch/CUDA/PacketMathHalf.h b/Eigen/src/Core/arch/GPU/PacketMathHalf.h
index b9a125b42..8787adcde 100644
--- a/Eigen/src/Core/arch/CUDA/PacketMathHalf.h
+++ b/Eigen/src/Core/arch/GPU/PacketMathHalf.h
@@ -7,15 +7,16 @@
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-#ifndef EIGEN_PACKET_MATH_HALF_CUDA_H
-#define EIGEN_PACKET_MATH_HALF_CUDA_H
+#ifndef EIGEN_PACKET_MATH_HALF_GPU_H
+#define EIGEN_PACKET_MATH_HALF_GPU_H
namespace Eigen {
namespace internal {
// Most of the following operations require arch >= 3.0
-#if defined(EIGEN_HAS_CUDA_FP16) && defined(__CUDACC__) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300
+#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDACC) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \
+ (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIPCC) && defined(EIGEN_HIP_DEVICE_COMPILE))
template<> struct is_arithmetic<half2> { enum { value = true }; };
@@ -42,70 +43,108 @@ template<> struct packet_traits<Eigen::half> : default_packet_traits
template<> struct unpacket_traits<half2> { typedef Eigen::half type; enum {size=2, alignment=Aligned16}; typedef half2 half; };
-template<> __device__ EIGEN_STRONG_INLINE half2 pset1<half2>(const Eigen::half& from) {
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pset1<half2>(const Eigen::half& from) {
+
+#if defined(EIGEN_HIP_DEVICE_COMPILE)
+
+#if defined(EIGEN_HAS_OLD_HIP_FP16)
+ return half2half2(from);
+#else
+ return __half2half2(from);
+#endif
+
+#else // EIGEN_CUDA_ARCH
return __half2half2(from);
+#endif
}
-template<> __device__ EIGEN_STRONG_INLINE half2 pload<half2>(const Eigen::half* from) {
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pload<half2>(const Eigen::half* from) {
return *reinterpret_cast<const half2*>(from);
}
-template<> __device__ EIGEN_STRONG_INLINE half2 ploadu<half2>(const Eigen::half* from) {
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 ploadu<half2>(const Eigen::half* from) {
return __halves2half2(from[0], from[1]);
}
-template<> EIGEN_STRONG_INLINE half2 ploaddup<half2>(const Eigen::half* from) {
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 ploaddup<half2>(const Eigen::half* from) {
return __halves2half2(from[0], from[0]);
}
-template<> __device__ EIGEN_STRONG_INLINE void pstore<Eigen::half>(Eigen::half* to, const half2& from) {
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstore<Eigen::half>(Eigen::half* to, const half2& from) {
*reinterpret_cast<half2*>(to) = from;
}
-template<> __device__ EIGEN_STRONG_INLINE void pstoreu<Eigen::half>(Eigen::half* to, const half2& from) {
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstoreu<Eigen::half>(Eigen::half* to, const half2& from) {
to[0] = __low2half(from);
to[1] = __high2half(from);
}
template<>
- __device__ EIGEN_ALWAYS_INLINE half2 ploadt_ro<half2, Aligned>(const Eigen::half* from) {
-#if __CUDA_ARCH__ >= 350
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE half2 ploadt_ro<half2, Aligned>(const Eigen::half* from) {
+
+#if defined(EIGEN_HIP_DEVICE_COMPILE)
+
+#if defined(EIGEN_HAS_OLD_HIP_FP16)
+ return __halves2half2((*(from+0)), (*(from+1)));
+#else
+ return __ldg((const half2*)from);
+#endif
+
+#else // EIGEN_CUDA_ARCH
+
+#if EIGEN_CUDA_ARCH >= 350
return __ldg((const half2*)from);
#else
return __halves2half2(*(from+0), *(from+1));
#endif
+
+#endif
}
template<>
-__device__ EIGEN_ALWAYS_INLINE half2 ploadt_ro<half2, Unaligned>(const Eigen::half* from) {
-#if __CUDA_ARCH__ >= 350
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE half2 ploadt_ro<half2, Unaligned>(const Eigen::half* from) {
+
+#if defined(EIGEN_HIP_DEVICE_COMPILE)
+
+#if defined(EIGEN_HAS_OLD_HIP_FP16)
+ return __halves2half2((*(from+0)), (*(from+1)));
+#else
+ return __halves2half2(__ldg(from+0), __ldg(from+1));
+#endif
+
+#else // EIGEN_CUDA_ARCH
+
+#if EIGEN_CUDA_ARCH >= 350
return __halves2half2(__ldg(from+0), __ldg(from+1));
#else
return __halves2half2(*(from+0), *(from+1));
#endif
+
+#endif
}
-template<> __device__ EIGEN_STRONG_INLINE half2 pgather<Eigen::half, half2>(const Eigen::half* from, Index stride) {
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pgather<Eigen::half, half2>(const Eigen::half* from, Index stride) {
return __halves2half2(from[0*stride], from[1*stride]);
}
-template<> __device__ EIGEN_STRONG_INLINE void pscatter<Eigen::half, half2>(Eigen::half* to, const half2& from, Index stride) {
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter<Eigen::half, half2>(Eigen::half* to, const half2& from, Index stride) {
to[stride*0] = __low2half(from);
to[stride*1] = __high2half(from);
}
-template<> __device__ EIGEN_STRONG_INLINE Eigen::half pfirst<half2>(const half2& a) {
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half pfirst<half2>(const half2& a) {
return __low2half(a);
}
-template<> __device__ EIGEN_STRONG_INLINE half2 pabs<half2>(const half2& a) {
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pabs<half2>(const half2& a) {
half2 result;
- result.x = a.x & 0x7FFF7FFF;
+ unsigned temp = *(reinterpret_cast<const unsigned*>(&(a)));
+ *(reinterpret_cast<unsigned*>(&(result))) = temp & 0x7FFF7FFF;
return result;
}
-__device__ EIGEN_STRONG_INLINE void
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void
ptranspose(PacketBlock<half2,2>& kernel) {
__half a1 = __low2half(kernel.packet[0]);
__half a2 = __high2half(kernel.packet[0]);
@@ -115,17 +154,31 @@ ptranspose(PacketBlock<half2,2>& kernel) {
kernel.packet[1] = __halves2half2(a2, b2);
}
-template<> __device__ EIGEN_STRONG_INLINE half2 plset<half2>(const Eigen::half& a) {
-#if __CUDA_ARCH__ >= 530
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 plset<half2>(const Eigen::half& a) {
+#if defined(EIGEN_HIP_DEVICE_COMPILE)
+
+ return __halves2half2(a, __hadd(a, __float2half(1.0f)));
+
+#else // EIGEN_CUDA_ARCH
+
+#if EIGEN_CUDA_ARCH >= 530
return __halves2half2(a, __hadd(a, __float2half(1.0f)));
#else
float f = __half2float(a) + 1.0f;
return __halves2half2(a, __float2half(f));
#endif
+
+#endif
}
-template<> __device__ EIGEN_STRONG_INLINE half2 padd<half2>(const half2& a, const half2& b) {
-#if __CUDA_ARCH__ >= 530
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 padd<half2>(const half2& a, const half2& b) {
+#if defined(EIGEN_HIP_DEVICE_COMPILE)
+
+ return __hadd2(a, b);
+
+#else // EIGEN_CUDA_ARCH
+
+#if EIGEN_CUDA_ARCH >= 530
return __hadd2(a, b);
#else
float a1 = __low2float(a);
@@ -136,10 +189,18 @@ template<> __device__ EIGEN_STRONG_INLINE half2 padd<half2>(const half2& a, cons
float r2 = a2 + b2;
return __floats2half2_rn(r1, r2);
#endif
+
+#endif
}
-template<> __device__ EIGEN_STRONG_INLINE half2 psub<half2>(const half2& a, const half2& b) {
-#if __CUDA_ARCH__ >= 530
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 psub<half2>(const half2& a, const half2& b) {
+#if defined(EIGEN_HIP_DEVICE_COMPILE)
+
+ return __hsub2(a, b);
+
+#else // EIGEN_CUDA_ARCH
+
+#if EIGEN_CUDA_ARCH >= 530
return __hsub2(a, b);
#else
float a1 = __low2float(a);
@@ -150,22 +211,38 @@ template<> __device__ EIGEN_STRONG_INLINE half2 psub<half2>(const half2& a, cons
float r2 = a2 - b2;
return __floats2half2_rn(r1, r2);
#endif
+
+#endif
}
-template<> __device__ EIGEN_STRONG_INLINE half2 pnegate(const half2& a) {
-#if __CUDA_ARCH__ >= 530
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pnegate(const half2& a) {
+#if defined(EIGEN_HIP_DEVICE_COMPILE)
+
+ return __hneg2(a);
+
+#else // EIGEN_CUDA_ARCH
+
+#if EIGEN_CUDA_ARCH >= 530
return __hneg2(a);
#else
float a1 = __low2float(a);
float a2 = __high2float(a);
return __floats2half2_rn(-a1, -a2);
#endif
+
+#endif
}
-template<> __device__ EIGEN_STRONG_INLINE half2 pconj(const half2& a) { return a; }
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pconj(const half2& a) { return a; }
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pmul<half2>(const half2& a, const half2& b) {
+#if defined(EIGEN_HIP_DEVICE_COMPILE)
+
+ return __hmul2(a, b);
+
+#else // EIGEN_CUDA_ARCH
-template<> __device__ EIGEN_STRONG_INLINE half2 pmul<half2>(const half2& a, const half2& b) {
-#if __CUDA_ARCH__ >= 530
+#if EIGEN_CUDA_ARCH >= 530
return __hmul2(a, b);
#else
float a1 = __low2float(a);
@@ -176,10 +253,18 @@ template<> __device__ EIGEN_STRONG_INLINE half2 pmul<half2>(const half2& a, cons
float r2 = a2 * b2;
return __floats2half2_rn(r1, r2);
#endif
+
+#endif
}
-template<> __device__ EIGEN_STRONG_INLINE half2 pmadd<half2>(const half2& a, const half2& b, const half2& c) {
-#if __CUDA_ARCH__ >= 530
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pmadd<half2>(const half2& a, const half2& b, const half2& c) {
+#if defined(EIGEN_HIP_DEVICE_COMPILE)
+
+ return __hfma2(a, b, c);
+
+#else // EIGEN_CUDA_ARCH
+
+#if EIGEN_CUDA_ARCH >= 530
return __hfma2(a, b, c);
#else
float a1 = __low2float(a);
@@ -192,9 +277,21 @@ template<> __device__ EIGEN_STRONG_INLINE half2 pmadd<half2>(const half2& a, con
float r2 = a2 * b2 + c2;
return __floats2half2_rn(r1, r2);
#endif
+
+#endif
}
-template<> __device__ EIGEN_STRONG_INLINE half2 pdiv<half2>(const half2& a, const half2& b) {
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pdiv<half2>(const half2& a, const half2& b) {
+#if defined(EIGEN_HIP_DEVICE_COMPILE)
+
+#if defined(EIGEN_HAS_OLD_HIP_FP16)
+ return h2div(a, b);
+#else
+ return __h2div(a, b);
+#endif
+
+#else // EIGEN_CUDA_ARCH
+
float a1 = __low2float(a);
float a2 = __high2float(a);
float b1 = __low2float(b);
@@ -202,9 +299,11 @@ template<> __device__ EIGEN_STRONG_INLINE half2 pdiv<half2>(const half2& a, cons
float r1 = a1 / b1;
float r2 = a2 / b2;
return __floats2half2_rn(r1, r2);
+
+#endif
}
-template<> __device__ EIGEN_STRONG_INLINE half2 pmin<half2>(const half2& a, const half2& b) {
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pmin<half2>(const half2& a, const half2& b) {
float a1 = __low2float(a);
float a2 = __high2float(a);
float b1 = __low2float(b);
@@ -214,7 +313,7 @@ template<> __device__ EIGEN_STRONG_INLINE half2 pmin<half2>(const half2& a, cons
return __halves2half2(r1, r2);
}
-template<> __device__ EIGEN_STRONG_INLINE half2 pmax<half2>(const half2& a, const half2& b) {
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pmax<half2>(const half2& a, const half2& b) {
float a1 = __low2float(a);
float a2 = __high2float(a);
float b1 = __low2float(b);
@@ -224,18 +323,34 @@ template<> __device__ EIGEN_STRONG_INLINE half2 pmax<half2>(const half2& a, cons
return __halves2half2(r1, r2);
}
-template<> __device__ EIGEN_STRONG_INLINE Eigen::half predux<half2>(const half2& a) {
-#if __CUDA_ARCH__ >= 530
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half predux<half2>(const half2& a) {
+#if defined(EIGEN_HIP_DEVICE_COMPILE)
+
+ return __hadd(__low2half(a), __high2half(a));
+
+#else // EIGEN_CUDA_ARCH
+
+#if EIGEN_CUDA_ARCH >= 530
return __hadd(__low2half(a), __high2half(a));
#else
float a1 = __low2float(a);
float a2 = __high2float(a);
- return Eigen::half(half_impl::raw_uint16_to_half(__float2half_rn(a1 + a2)));
+ return Eigen::half(__float2half(a1 + a2));
+#endif
+
#endif
}
-template<> __device__ EIGEN_STRONG_INLINE Eigen::half predux_max<half2>(const half2& a) {
-#if __CUDA_ARCH__ >= 530
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half predux_max<half2>(const half2& a) {
+#if defined(EIGEN_HIP_DEVICE_COMPILE)
+
+ __half first = __low2half(a);
+ __half second = __high2half(a);
+ return __hgt(first, second) ? first : second;
+
+#else // EIGEN_CUDA_ARCH
+
+#if EIGEN_CUDA_ARCH >= 530
__half first = __low2half(a);
__half second = __high2half(a);
return __hgt(first, second) ? first : second;
@@ -244,10 +359,20 @@ template<> __device__ EIGEN_STRONG_INLINE Eigen::half predux_max<half2>(const ha
float a2 = __high2float(a);
return a1 > a2 ? __low2half(a) : __high2half(a);
#endif
+
+#endif
}
-template<> __device__ EIGEN_STRONG_INLINE Eigen::half predux_min<half2>(const half2& a) {
-#if __CUDA_ARCH__ >= 530
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half predux_min<half2>(const half2& a) {
+#if defined(EIGEN_HIP_DEVICE_COMPILE)
+
+ __half first = __low2half(a);
+ __half second = __high2half(a);
+ return __hlt(first, second) ? first : second;
+
+#else // EIGEN_CUDA_ARCH
+
+#if EIGEN_CUDA_ARCH >= 530
__half first = __low2half(a);
__half second = __high2half(a);
return __hlt(first, second) ? first : second;
@@ -256,19 +381,29 @@ template<> __device__ EIGEN_STRONG_INLINE Eigen::half predux_min<half2>(const ha
float a2 = __high2float(a);
return a1 < a2 ? __low2half(a) : __high2half(a);
#endif
+
+#endif
}
-template<> __device__ EIGEN_STRONG_INLINE Eigen::half predux_mul<half2>(const half2& a) {
-#if __CUDA_ARCH__ >= 530
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half predux_mul<half2>(const half2& a) {
+#if defined(EIGEN_HIP_DEVICE_COMPILE)
+
+ return __hmul(__low2half(a), __high2half(a));
+
+#else // EIGEN_CUDA_ARCH
+
+#if EIGEN_CUDA_ARCH >= 530
return __hmul(__low2half(a), __high2half(a));
#else
float a1 = __low2float(a);
float a2 = __high2float(a);
- return Eigen::half(half_impl::raw_uint16_to_half(__float2half_rn(a1 * a2)));
+ return Eigen::half(__float2half(a1 * a2));
+#endif
+
#endif
}
-template<> __device__ EIGEN_STRONG_INLINE half2 plog1p<half2>(const half2& a) {
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 plog1p<half2>(const half2& a) {
float a1 = __low2float(a);
float a2 = __high2float(a);
float r1 = log1pf(a1);
@@ -276,7 +411,7 @@ template<> __device__ EIGEN_STRONG_INLINE half2 plog1p<half2>(const half2& a) {
return __floats2half2_rn(r1, r2);
}
-template<> __device__ EIGEN_STRONG_INLINE half2 pexpm1<half2>(const half2& a) {
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pexpm1<half2>(const half2& a) {
float a1 = __low2float(a);
float a2 = __high2float(a);
float r1 = expm1f(a1);
@@ -284,31 +419,32 @@ template<> __device__ EIGEN_STRONG_INLINE half2 pexpm1<half2>(const half2& a) {
return __floats2half2_rn(r1, r2);
}
-#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 80000 && defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 530
+#if (EIGEN_CUDACC_VER >= 80000 && defined EIGEN_CUDA_ARCH && EIGEN_CUDA_ARCH >= 530) || \
+ defined(EIGEN_HIP_DEVICE_COMPILE)
-template<> __device__ EIGEN_STRONG_INLINE
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
half2 plog<half2>(const half2& a) {
return h2log(a);
}
-template<> __device__ EIGEN_STRONG_INLINE
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
half2 pexp<half2>(const half2& a) {
return h2exp(a);
}
-template<> __device__ EIGEN_STRONG_INLINE
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
half2 psqrt<half2>(const half2& a) {
return h2sqrt(a);
}
-template<> __device__ EIGEN_STRONG_INLINE
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
half2 prsqrt<half2>(const half2& a) {
return h2rsqrt(a);
}
#else
-template<> __device__ EIGEN_STRONG_INLINE half2 plog<half2>(const half2& a) {
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 plog<half2>(const half2& a) {
float a1 = __low2float(a);
float a2 = __high2float(a);
float r1 = logf(a1);
@@ -316,7 +452,7 @@ template<> __device__ EIGEN_STRONG_INLINE half2 plog<half2>(const half2& a) {
return __floats2half2_rn(r1, r2);
}
-template<> __device__ EIGEN_STRONG_INLINE half2 pexp<half2>(const half2& a) {
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pexp<half2>(const half2& a) {
float a1 = __low2float(a);
float a2 = __high2float(a);
float r1 = expf(a1);
@@ -324,7 +460,7 @@ template<> __device__ EIGEN_STRONG_INLINE half2 pexp<half2>(const half2& a) {
return __floats2half2_rn(r1, r2);
}
-template<> __device__ EIGEN_STRONG_INLINE half2 psqrt<half2>(const half2& a) {
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 psqrt<half2>(const half2& a) {
float a1 = __low2float(a);
float a2 = __high2float(a);
float r1 = sqrtf(a1);
@@ -332,7 +468,7 @@ template<> __device__ EIGEN_STRONG_INLINE half2 psqrt<half2>(const half2& a) {
return __floats2half2_rn(r1, r2);
}
-template<> __device__ EIGEN_STRONG_INLINE half2 prsqrt<half2>(const half2& a) {
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 prsqrt<half2>(const half2& a) {
float a1 = __low2float(a);
float a2 = __high2float(a);
float r1 = rsqrtf(a1);
@@ -361,10 +497,10 @@ struct packet_traits<half> : default_packet_traits {
AlignedOnScalar = 1,
size = 16,
HasHalfPacket = 0,
- HasAdd = 0,
- HasSub = 0,
- HasMul = 0,
- HasNegate = 0,
+ HasAdd = 1,
+ HasSub = 1,
+ HasMul = 1,
+ HasNegate = 1,
HasAbs = 0,
HasAbs2 = 0,
HasMin = 0,
@@ -406,11 +542,30 @@ template<> EIGEN_STRONG_INLINE Packet16h ploadu<Packet16h>(const Eigen::half* fr
}
template<> EIGEN_STRONG_INLINE void pstore<half>(Eigen::half* to, const Packet16h& from) {
- _mm256_store_si256((__m256i*)to, from.x);
+ // (void*) -> workaround clang warning:
+ // cast from 'Eigen::half *' to '__m256i *' increases required alignment from 2 to 32
+ _mm256_store_si256((__m256i*)(void*)to, from.x);
}
template<> EIGEN_STRONG_INLINE void pstoreu<half>(Eigen::half* to, const Packet16h& from) {
- _mm256_storeu_si256((__m256i*)to, from.x);
+ // (void*) -> workaround clang warning:
+ // cast from 'Eigen::half *' to '__m256i *' increases required alignment from 2 to 32
+ _mm256_storeu_si256((__m256i*)(void*)to, from.x);
+}
+
+template<> EIGEN_STRONG_INLINE Packet16h
+ploaddup<Packet16h>(const Eigen::half* from) {
+ Packet16h result;
+ unsigned short a = from[0].x;
+ unsigned short b = from[1].x;
+ unsigned short c = from[2].x;
+ unsigned short d = from[3].x;
+ unsigned short e = from[4].x;
+ unsigned short f = from[5].x;
+ unsigned short g = from[6].x;
+ unsigned short h = from[7].x;
+ result.x = _mm256_set_epi16(h, h, g, g, f, f, e, e, d, d, c, c, b, b, a, a);
+ return result;
}
template<> EIGEN_STRONG_INLINE Packet16h
@@ -485,6 +640,13 @@ EIGEN_STRONG_INLINE Packet16h float2half(const Packet16f& a) {
#endif
}
+template<> EIGEN_STRONG_INLINE Packet16h pnegate(const Packet16h& a) {
+ // FIXME we could do that with bit manipulation
+ Packet16f af = half2float(a);
+ Packet16f rf = pnegate(af);
+ return float2half(rf);
+}
+
template<> EIGEN_STRONG_INLINE Packet16h padd<Packet16h>(const Packet16h& a, const Packet16h& b) {
Packet16f af = half2float(a);
Packet16f bf = half2float(b);
@@ -492,6 +654,13 @@ template<> EIGEN_STRONG_INLINE Packet16h padd<Packet16h>(const Packet16h& a, con
return float2half(rf);
}
+template<> EIGEN_STRONG_INLINE Packet16h psub<Packet16h>(const Packet16h& a, const Packet16h& b) {
+ Packet16f af = half2float(a);
+ Packet16f bf = half2float(b);
+ Packet16f rf = psub(af, bf);
+ return float2half(rf);
+}
+
template<> EIGEN_STRONG_INLINE Packet16h pmul<Packet16h>(const Packet16h& a, const Packet16h& b) {
Packet16f af = half2float(a);
Packet16f bf = half2float(b);
@@ -504,6 +673,57 @@ template<> EIGEN_STRONG_INLINE half predux<Packet16h>(const Packet16h& from) {
return half(predux(from_float));
}
+template<> EIGEN_STRONG_INLINE half predux_mul<Packet16h>(const Packet16h& from) {
+ Packet16f from_float = half2float(from);
+ return half(predux_mul(from_float));
+}
+
+template<> EIGEN_STRONG_INLINE Packet16h preduxp<Packet16h>(const Packet16h* p) {
+ Packet16f pf[16];
+ pf[0] = half2float(p[0]);
+ pf[1] = half2float(p[1]);
+ pf[2] = half2float(p[2]);
+ pf[3] = half2float(p[3]);
+ pf[4] = half2float(p[4]);
+ pf[5] = half2float(p[5]);
+ pf[6] = half2float(p[6]);
+ pf[7] = half2float(p[7]);
+ pf[8] = half2float(p[8]);
+ pf[9] = half2float(p[9]);
+ pf[10] = half2float(p[10]);
+ pf[11] = half2float(p[11]);
+ pf[12] = half2float(p[12]);
+ pf[13] = half2float(p[13]);
+ pf[14] = half2float(p[14]);
+ pf[15] = half2float(p[15]);
+ Packet16f reduced = preduxp<Packet16f>(pf);
+ return float2half(reduced);
+}
+
+template<> EIGEN_STRONG_INLINE Packet16h preverse(const Packet16h& a)
+{
+ __m128i m = _mm_setr_epi8(14,15,12,13,10,11,8,9,6,7,4,5,2,3,0,1);
+ Packet16h res;
+ res.x = _mm256_insertf128_si256(
+ _mm256_castsi128_si256(_mm_shuffle_epi8(_mm256_extractf128_si256(a.x,1),m)),
+ _mm_shuffle_epi8(_mm256_extractf128_si256(a.x,0),m), 1);
+ return res;
+}
+
+template<> EIGEN_STRONG_INLINE Packet16h pinsertfirst(const Packet16h& a, Eigen::half b)
+{
+ Packet16h res;
+ res.x = _mm256_insert_epi16(a.x,b.x,0);
+ return res;
+}
+
+template<> EIGEN_STRONG_INLINE Packet16h pinsertlast(const Packet16h& a, Eigen::half b)
+{
+ Packet16h res;
+ res.x = _mm256_insert_epi16(a.x,b.x,15);
+ return res;
+}
+
template<> EIGEN_STRONG_INLINE Packet16h pgather<Eigen::half, Packet16h>(const Eigen::half* from, Index stride)
{
Packet16h result;
@@ -611,20 +831,20 @@ ptranspose(PacketBlock<Packet16h,16>& kernel) {
// NOTE: no unpacklo/hi instr in this case, so using permute instr.
__m256i a_p_0 = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x20);
- __m256i a_p_1 = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x31);
- __m256i a_p_2 = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x20);
- __m256i a_p_3 = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x31);
- __m256i a_p_4 = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x20);
- __m256i a_p_5 = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x31);
- __m256i a_p_6 = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x20);
- __m256i a_p_7 = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x31);
- __m256i a_p_8 = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x20);
- __m256i a_p_9 = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x31);
- __m256i a_p_a = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x20);
- __m256i a_p_b = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x31);
- __m256i a_p_c = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x20);
- __m256i a_p_d = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x31);
- __m256i a_p_e = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x20);
+ __m256i a_p_1 = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x20);
+ __m256i a_p_2 = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x20);
+ __m256i a_p_3 = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x20);
+ __m256i a_p_4 = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x20);
+ __m256i a_p_5 = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x20);
+ __m256i a_p_6 = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x20);
+ __m256i a_p_7 = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x20);
+ __m256i a_p_8 = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x31);
+ __m256i a_p_9 = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x31);
+ __m256i a_p_a = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x31);
+ __m256i a_p_b = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x31);
+ __m256i a_p_c = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x31);
+ __m256i a_p_d = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x31);
+ __m256i a_p_e = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x31);
__m256i a_p_f = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x31);
kernel.packet[0].x = a_p_0;
@@ -729,10 +949,10 @@ struct packet_traits<Eigen::half> : default_packet_traits {
AlignedOnScalar = 1,
size = 8,
HasHalfPacket = 0,
- HasAdd = 0,
- HasSub = 0,
- HasMul = 0,
- HasNegate = 0,
+ HasAdd = 1,
+ HasSub = 1,
+ HasMul = 1,
+ HasNegate = 1,
HasAbs = 0,
HasAbs2 = 0,
HasMin = 0,
@@ -782,6 +1002,17 @@ template<> EIGEN_STRONG_INLINE void pstoreu<Eigen::half>(Eigen::half* to, const
}
template<> EIGEN_STRONG_INLINE Packet8h
+ploaddup<Packet8h>(const Eigen::half* from) {
+ Packet8h result;
+ unsigned short a = from[0].x;
+ unsigned short b = from[1].x;
+ unsigned short c = from[2].x;
+ unsigned short d = from[3].x;
+ result.x = _mm_set_epi16(d, d, c, c, b, b, a, a);
+ return result;
+}
+
+template<> EIGEN_STRONG_INLINE Packet8h
ploadquad<Packet8h>(const Eigen::half* from) {
Packet8h result;
unsigned short a = from[0].x;
@@ -834,6 +1065,13 @@ EIGEN_STRONG_INLINE Packet8h float2half(const Packet8f& a) {
template<> EIGEN_STRONG_INLINE Packet8h pconj(const Packet8h& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet8h pnegate(const Packet8h& a) {
+ // FIXME we could do that with bit manipulation
+ Packet8f af = half2float(a);
+ Packet8f rf = pnegate(af);
+ return float2half(rf);
+}
+
template<> EIGEN_STRONG_INLINE Packet8h padd<Packet8h>(const Packet8h& a, const Packet8h& b) {
Packet8f af = half2float(a);
Packet8f bf = half2float(b);
@@ -841,6 +1079,13 @@ template<> EIGEN_STRONG_INLINE Packet8h padd<Packet8h>(const Packet8h& a, const
return float2half(rf);
}
+template<> EIGEN_STRONG_INLINE Packet8h psub<Packet8h>(const Packet8h& a, const Packet8h& b) {
+ Packet8f af = half2float(a);
+ Packet8f bf = half2float(b);
+ Packet8f rf = psub(af, bf);
+ return float2half(rf);
+}
+
template<> EIGEN_STRONG_INLINE Packet8h pmul<Packet8h>(const Packet8h& a, const Packet8h& b) {
Packet8f af = half2float(a);
Packet8f bf = half2float(b);
@@ -893,6 +1138,52 @@ template<> EIGEN_STRONG_INLINE Eigen::half predux_mul<Packet8h>(const Packet8h&
return Eigen::half(reduced);
}
+template<> EIGEN_STRONG_INLINE Packet8h preduxp<Packet8h>(const Packet8h* p) {
+ Packet8f pf[8];
+ pf[0] = half2float(p[0]);
+ pf[1] = half2float(p[1]);
+ pf[2] = half2float(p[2]);
+ pf[3] = half2float(p[3]);
+ pf[4] = half2float(p[4]);
+ pf[5] = half2float(p[5]);
+ pf[6] = half2float(p[6]);
+ pf[7] = half2float(p[7]);
+ Packet8f reduced = preduxp<Packet8f>(pf);
+ return float2half(reduced);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8h preverse(const Packet8h& a)
+{
+ __m128i m = _mm_setr_epi8(14,15,12,13,10,11,8,9,6,7,4,5,2,3,0,1);
+ Packet8h res;
+ res.x = _mm_shuffle_epi8(a.x,m);
+ return res;
+}
+
+template<> EIGEN_STRONG_INLINE Packet8h pinsertfirst(const Packet8h& a, Eigen::half b)
+{
+ Packet8h res;
+ res.x = _mm_insert_epi16(a.x,int(b.x),0);
+ return res;
+}
+
+template<> EIGEN_STRONG_INLINE Packet8h pinsertlast(const Packet8h& a, Eigen::half b)
+{
+ Packet8h res;
+ res.x = _mm_insert_epi16(a.x,int(b.x),7);
+ return res;
+}
+
+template<int Offset>
+struct palign_impl<Offset,Packet8h>
+{
+ static EIGEN_STRONG_INLINE void run(Packet8h& first, const Packet8h& second)
+ {
+ if (Offset!=0)
+ first.x = _mm_alignr_epi8(second.x,first.x, Offset*2);
+ }
+};
+
EIGEN_STRONG_INLINE void
ptranspose(PacketBlock<Packet8h,8>& kernel) {
__m128i a = kernel.packet[0].x;
@@ -1129,4 +1420,4 @@ ptranspose(PacketBlock<Packet4h,4>& kernel) {
}
}
-#endif // EIGEN_PACKET_MATH_HALF_CUDA_H
+#endif // EIGEN_PACKET_MATH_HALF_GPU_H
diff --git a/Eigen/src/Core/arch/CUDA/TypeCasting.h b/Eigen/src/Core/arch/GPU/TypeCasting.h
index aa5fbce8e..57a55d08b 100644
--- a/Eigen/src/Core/arch/CUDA/TypeCasting.h
+++ b/Eigen/src/Core/arch/GPU/TypeCasting.h
@@ -7,8 +7,8 @@
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-#ifndef EIGEN_TYPE_CASTING_CUDA_H
-#define EIGEN_TYPE_CASTING_CUDA_H
+#ifndef EIGEN_TYPE_CASTING_GPU_H
+#define EIGEN_TYPE_CASTING_GPU_H
namespace Eigen {
@@ -19,7 +19,8 @@ struct scalar_cast_op<float, Eigen::half> {
EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op)
typedef Eigen::half result_type;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half operator() (const float& a) const {
- #if defined(EIGEN_HAS_CUDA_FP16) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300
+ #if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \
+ (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
return __float2half(a);
#else
return Eigen::half(a);
@@ -37,7 +38,8 @@ struct scalar_cast_op<int, Eigen::half> {
EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op)
typedef Eigen::half result_type;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half operator() (const int& a) const {
- #if defined(EIGEN_HAS_CUDA_FP16) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300
+ #if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \
+ (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
return __float2half(static_cast<float>(a));
#else
return Eigen::half(static_cast<float>(a));
@@ -55,7 +57,8 @@ struct scalar_cast_op<Eigen::half, float> {
EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op)
typedef float result_type;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float operator() (const Eigen::half& a) const {
- #if defined(EIGEN_HAS_CUDA_FP16) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300
+ #if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \
+ (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
return __half2float(a);
#else
return static_cast<float>(a);
@@ -69,7 +72,8 @@ struct functor_traits<scalar_cast_op<Eigen::half, float> >
-#if defined(EIGEN_HAS_CUDA_FP16) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300
+#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \
+ (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
template <>
struct type_casting_traits<Eigen::half, float> {
@@ -209,4 +213,4 @@ template<> EIGEN_STRONG_INLINE Packet4h pcast<Packet4f, Packet4h>(const Packet4f
} // end namespace Eigen
-#endif // EIGEN_TYPE_CASTING_CUDA_H
+#endif // EIGEN_TYPE_CASTING_GPU_H
diff --git a/Eigen/src/Core/arch/HIP/hcc/math_constants.h b/Eigen/src/Core/arch/HIP/hcc/math_constants.h
new file mode 100644
index 000000000..25375a0a4
--- /dev/null
+++ b/Eigen/src/Core/arch/HIP/hcc/math_constants.h
@@ -0,0 +1,23 @@
+/*
+ * math_constants.h -
+ * HIP equivalent of the CUDA header of the same name
+ */
+
+#ifndef __MATH_CONSTANTS_H__
+#define __MATH_CONSTANTS_H__
+
+/* single precision constants */
+
+#define HIPRT_INF_F __int_as_float(0x7f800000)
+#define HIPRT_NAN_F __int_as_float(0x7fffffff)
+#define HIPRT_MIN_DENORM_F __int_as_float(0x00000001)
+#define HIPRT_MAX_NORMAL_F __int_as_float(0x7f7fffff)
+#define HIPRT_NEG_ZERO_F __int_as_float(0x80000000)
+#define HIPRT_ZERO_F 0.0f
+#define HIPRT_ONE_F 1.0f
+
+/* double precision constants */
+#define HIPRT_INF __hiloint2double(0x7ff00000, 0x00000000)
+#define HIPRT_NAN __hiloint2double(0xfff80000, 0x00000000)
+
+#endif
diff --git a/Eigen/src/Core/arch/MSA/Complex.h b/Eigen/src/Core/arch/MSA/Complex.h
new file mode 100644
index 000000000..9a45cf51e
--- /dev/null
+++ b/Eigen/src/Core/arch/MSA/Complex.h
@@ -0,0 +1,759 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2018 Wave Computing, Inc.
+// Written by:
+// Chris Larsen
+// Alexey Frunze (afrunze@wavecomp.com)
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_COMPLEX_MSA_H
+#define EIGEN_COMPLEX_MSA_H
+
+#include <iostream>
+
+namespace Eigen {
+
+namespace internal {
+
+//---------- float ----------
+struct Packet2cf {
+ EIGEN_STRONG_INLINE Packet2cf() {
+ }
+ EIGEN_STRONG_INLINE explicit Packet2cf(const std::complex<float>& a,
+ const std::complex<float>& b) {
+ Packet4f t = { std::real(a), std::imag(a), std::real(b), std::imag(b) };
+ v = t;
+ }
+ EIGEN_STRONG_INLINE explicit Packet2cf(const Packet4f& a) : v(a) {
+ }
+ EIGEN_STRONG_INLINE Packet2cf(const Packet2cf& a) : v(a.v) {
+ }
+ EIGEN_STRONG_INLINE Packet2cf& operator=(const Packet2cf& b) {
+ v = b.v;
+ return *this;
+ }
+ EIGEN_STRONG_INLINE Packet2cf conjugate(void) const {
+ return Packet2cf((Packet4f)__builtin_msa_bnegi_d((v2u64)v, 63));
+ }
+ EIGEN_STRONG_INLINE Packet2cf& operator*=(const Packet2cf& b) {
+ Packet4f v1, v2;
+
+ // Get the real values of a | a1_re | a1_re | a2_re | a2_re |
+ v1 = (Packet4f)__builtin_msa_ilvev_w((v4i32)v, (v4i32)v);
+ // Get the imag values of a | a1_im | a1_im | a2_im | a2_im |
+ v2 = (Packet4f)__builtin_msa_ilvod_w((v4i32)v, (v4i32)v);
+ // Multiply the real a with b
+ v1 = pmul(v1, b.v);
+ // Multiply the imag a with b
+ v2 = pmul(v2, b.v);
+ // Conjugate v2
+ v2 = Packet2cf(v2).conjugate().v;
+ // Swap real/imag elements in v2.
+ v2 = (Packet4f)__builtin_msa_shf_w((v4i32)v2, EIGEN_MSA_SHF_I8(1, 0, 3, 2));
+ // Add and return the result
+ v = padd(v1, v2);
+ return *this;
+ }
+ EIGEN_STRONG_INLINE Packet2cf operator*(const Packet2cf& b) const {
+ return Packet2cf(*this) *= b;
+ }
+ EIGEN_STRONG_INLINE Packet2cf& operator+=(const Packet2cf& b) {
+ v = padd(v, b.v);
+ return *this;
+ }
+ EIGEN_STRONG_INLINE Packet2cf operator+(const Packet2cf& b) const {
+ return Packet2cf(*this) += b;
+ }
+ EIGEN_STRONG_INLINE Packet2cf& operator-=(const Packet2cf& b) {
+ v = psub(v, b.v);
+ return *this;
+ }
+ EIGEN_STRONG_INLINE Packet2cf operator-(const Packet2cf& b) const {
+ return Packet2cf(*this) -= b;
+ }
+ EIGEN_STRONG_INLINE Packet2cf& operator/=(const Packet2cf& b) {
+ *this *= b.conjugate();
+ Packet4f s = pmul<Packet4f>(b.v, b.v);
+ s = padd(s, (Packet4f)__builtin_msa_shf_w((v4i32)s, EIGEN_MSA_SHF_I8(1, 0, 3, 2)));
+ v = pdiv(v, s);
+ return *this;
+ }
+ EIGEN_STRONG_INLINE Packet2cf operator/(const Packet2cf& b) const {
+ return Packet2cf(*this) /= b;
+ }
+ EIGEN_STRONG_INLINE Packet2cf operator-(void) const {
+ return Packet2cf(pnegate(v));
+ }
+
+ Packet4f v;
+};
+
+inline std::ostream& operator<<(std::ostream& os, const Packet2cf& value) {
+ os << "[ (" << value.v[0] << ", " << value.v[1]
+ << "i),"
+ " ("
+ << value.v[2] << ", " << value.v[3] << "i) ]";
+ return os;
+}
+
+template <>
+struct packet_traits<std::complex<float> > : default_packet_traits {
+ typedef Packet2cf type;
+ typedef Packet2cf half;
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = 2,
+ HasHalfPacket = 0,
+
+ HasAdd = 1,
+ HasSub = 1,
+ HasMul = 1,
+ HasDiv = 1,
+ HasNegate = 1,
+ HasAbs = 0,
+ HasAbs2 = 0,
+ HasMin = 0,
+ HasMax = 0,
+ HasSetLinear = 0,
+ HasBlend = 1
+ };
+};
+
+template <>
+struct unpacket_traits<Packet2cf> {
+ typedef std::complex<float> type;
+ enum { size = 2, alignment = Aligned16 };
+ typedef Packet2cf half;
+};
+
+template <>
+EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>& from) {
+ EIGEN_MSA_DEBUG;
+
+ float f0 = from.real(), f1 = from.imag();
+ Packet4f v0 = { f0, f0, f0, f0 };
+ Packet4f v1 = { f1, f1, f1, f1 };
+ return Packet2cf((Packet4f)__builtin_msa_ilvr_w((Packet4i)v1, (Packet4i)v0));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2cf padd<Packet2cf>(const Packet2cf& a, const Packet2cf& b) {
+ EIGEN_MSA_DEBUG;
+
+ return a + b;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2cf psub<Packet2cf>(const Packet2cf& a, const Packet2cf& b) {
+ EIGEN_MSA_DEBUG;
+
+ return a - b;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a) {
+ EIGEN_MSA_DEBUG;
+
+ return -a;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a) {
+ EIGEN_MSA_DEBUG;
+
+ return a.conjugate();
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2cf pmul<Packet2cf>(const Packet2cf& a, const Packet2cf& b) {
+ EIGEN_MSA_DEBUG;
+
+ return a * b;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2cf pand<Packet2cf>(const Packet2cf& a, const Packet2cf& b) {
+ EIGEN_MSA_DEBUG;
+
+ return Packet2cf(pand(a.v, b.v));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2cf por<Packet2cf>(const Packet2cf& a, const Packet2cf& b) {
+ EIGEN_MSA_DEBUG;
+
+ return Packet2cf(por(a.v, b.v));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2cf pxor<Packet2cf>(const Packet2cf& a, const Packet2cf& b) {
+ EIGEN_MSA_DEBUG;
+
+ return Packet2cf(pxor(a.v, b.v));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2cf pandnot<Packet2cf>(const Packet2cf& a, const Packet2cf& b) {
+ EIGEN_MSA_DEBUG;
+
+ return Packet2cf(pandnot(a.v, b.v));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2cf pload<Packet2cf>(const std::complex<float>* from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload<Packet4f>((const float*)from));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2cf ploadu<Packet2cf>(const std::complex<float>* from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu<Packet4f>((const float*)from));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2cf ploaddup<Packet2cf>(const std::complex<float>* from) {
+ EIGEN_MSA_DEBUG;
+
+ return pset1<Packet2cf>(*from);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstore<std::complex<float> >(std::complex<float>* to,
+ const Packet2cf& from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_ALIGNED_STORE pstore<float>((float*)to, from.v);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float>* to,
+ const Packet2cf& from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_UNALIGNED_STORE pstoreu<float>((float*)to, from.v);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(
+ const std::complex<float>* from, Index stride) {
+ EIGEN_MSA_DEBUG;
+
+ return Packet2cf(from[0 * stride], from[1 * stride]);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to,
+ const Packet2cf& from,
+ Index stride) {
+ EIGEN_MSA_DEBUG;
+
+ *to = std::complex<float>(from.v[0], from.v[1]);
+ to += stride;
+ *to = std::complex<float>(from.v[2], from.v[3]);
+}
+
+template <>
+EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float>* addr) {
+ EIGEN_MSA_DEBUG;
+
+ prefetch(reinterpret_cast<const float*>(addr));
+}
+
+template <>
+EIGEN_STRONG_INLINE std::complex<float> pfirst<Packet2cf>(const Packet2cf& a) {
+ EIGEN_MSA_DEBUG;
+
+ return std::complex<float>(a.v[0], a.v[1]);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a) {
+ EIGEN_MSA_DEBUG;
+
+ return Packet2cf((Packet4f)__builtin_msa_shf_w((v4i32)a.v, EIGEN_MSA_SHF_I8(2, 3, 0, 1)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2cf pcplxflip<Packet2cf>(const Packet2cf& a) {
+ EIGEN_MSA_DEBUG;
+
+ return Packet2cf((Packet4f)__builtin_msa_shf_w((v4i32)a.v, EIGEN_MSA_SHF_I8(1, 0, 3, 2)));
+}
+
+template <>
+EIGEN_STRONG_INLINE std::complex<float> predux<Packet2cf>(const Packet2cf& a) {
+ EIGEN_MSA_DEBUG;
+
+ Packet4f value = (Packet4f)preverse((Packet2d)a.v);
+ value += a.v;
+ return std::complex<float>(value[0], value[1]);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2cf preduxp<Packet2cf>(const Packet2cf* vecs) {
+ EIGEN_MSA_DEBUG;
+
+ Packet4f sum1, sum2, sum;
+
+ // Add the first two 64-bit float32x2_t of vecs[0]
+ sum1 = (Packet4f)__builtin_msa_ilvr_d((v2i64)vecs[1].v, (v2i64)vecs[0].v);
+ sum2 = (Packet4f)__builtin_msa_ilvl_d((v2i64)vecs[1].v, (v2i64)vecs[0].v);
+ sum = padd(sum1, sum2);
+
+ return Packet2cf(sum);
+}
+
+template <>
+EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet2cf>(const Packet2cf& a) {
+ EIGEN_MSA_DEBUG;
+
+ return std::complex<float>((a.v[0] * a.v[2]) - (a.v[1] * a.v[3]),
+ (a.v[0] * a.v[3]) + (a.v[1] * a.v[2]));
+}
+
+template <int Offset>
+struct palign_impl<Offset, Packet2cf> {
+ EIGEN_STRONG_INLINE static void run(Packet2cf& first, const Packet2cf& second) {
+ if (Offset == 1) {
+ first.v = (Packet4f)__builtin_msa_sldi_b((v16i8)second.v, (v16i8)first.v, Offset * 8);
+ }
+ }
+};
+
+template <>
+struct conj_helper<Packet2cf, Packet2cf, false, true> {
+ EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y,
+ const Packet2cf& c) const {
+ return padd(pmul(x, y), c);
+ }
+
+ EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const {
+ return internal::pmul(a, pconj(b));
+ }
+};
+
+template <>
+struct conj_helper<Packet2cf, Packet2cf, true, false> {
+ EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y,
+ const Packet2cf& c) const {
+ return padd(pmul(x, y), c);
+ }
+
+ EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const {
+ return internal::pmul(pconj(a), b);
+ }
+};
+
+template <>
+struct conj_helper<Packet2cf, Packet2cf, true, true> {
+ EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y,
+ const Packet2cf& c) const {
+ return padd(pmul(x, y), c);
+ }
+
+ EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const {
+ return pconj(internal::pmul(a, b));
+ }
+};
+
+EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cf, Packet4f)
+
+template <>
+EIGEN_STRONG_INLINE Packet2cf pdiv<Packet2cf>(const Packet2cf& a, const Packet2cf& b) {
+ EIGEN_MSA_DEBUG;
+
+ return a / b;
+}
+
+inline std::ostream& operator<<(std::ostream& os, const PacketBlock<Packet2cf, 2>& value) {
+ os << "[ " << value.packet[0] << ", " << std::endl << " " << value.packet[1] << " ]";
+ return os;
+}
+
+EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet2cf, 2>& kernel) {
+ EIGEN_MSA_DEBUG;
+
+ Packet4f tmp =
+ (Packet4f)__builtin_msa_ilvl_d((v2i64)kernel.packet[1].v, (v2i64)kernel.packet[0].v);
+ kernel.packet[0].v =
+ (Packet4f)__builtin_msa_ilvr_d((v2i64)kernel.packet[1].v, (v2i64)kernel.packet[0].v);
+ kernel.packet[1].v = tmp;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2cf pblend(const Selector<2>& ifPacket, const Packet2cf& thenPacket,
+ const Packet2cf& elsePacket) {
+ return (Packet2cf)(Packet4f)pblend<Packet2d>(ifPacket, (Packet2d)thenPacket.v,
+ (Packet2d)elsePacket.v);
+}
+
+//---------- double ----------
+
+struct Packet1cd {
+ EIGEN_STRONG_INLINE Packet1cd() {
+ }
+ EIGEN_STRONG_INLINE explicit Packet1cd(const std::complex<double>& a) {
+ v[0] = std::real(a);
+ v[1] = std::imag(a);
+ }
+ EIGEN_STRONG_INLINE explicit Packet1cd(const Packet2d& a) : v(a) {
+ }
+ EIGEN_STRONG_INLINE Packet1cd(const Packet1cd& a) : v(a.v) {
+ }
+ EIGEN_STRONG_INLINE Packet1cd& operator=(const Packet1cd& b) {
+ v = b.v;
+ return *this;
+ }
+ EIGEN_STRONG_INLINE Packet1cd conjugate(void) const {
+ static const v2u64 p2ul_CONJ_XOR = { 0x0, 0x8000000000000000 };
+ return (Packet1cd)pxor(v, (Packet2d)p2ul_CONJ_XOR);
+ }
+ EIGEN_STRONG_INLINE Packet1cd& operator*=(const Packet1cd& b) {
+ Packet2d v1, v2;
+
+ // Get the real values of a | a1_re | a1_re
+ v1 = (Packet2d)__builtin_msa_ilvev_d((v2i64)v, (v2i64)v);
+ // Get the imag values of a | a1_im | a1_im
+ v2 = (Packet2d)__builtin_msa_ilvod_d((v2i64)v, (v2i64)v);
+ // Multiply the real a with b
+ v1 = pmul(v1, b.v);
+ // Multiply the imag a with b
+ v2 = pmul(v2, b.v);
+ // Conjugate v2
+ v2 = Packet1cd(v2).conjugate().v;
+ // Swap real/imag elements in v2.
+ v2 = (Packet2d)__builtin_msa_shf_w((v4i32)v2, EIGEN_MSA_SHF_I8(2, 3, 0, 1));
+ // Add and return the result
+ v = padd(v1, v2);
+ return *this;
+ }
+ EIGEN_STRONG_INLINE Packet1cd operator*(const Packet1cd& b) const {
+ return Packet1cd(*this) *= b;
+ }
+ EIGEN_STRONG_INLINE Packet1cd& operator+=(const Packet1cd& b) {
+ v = padd(v, b.v);
+ return *this;
+ }
+ EIGEN_STRONG_INLINE Packet1cd operator+(const Packet1cd& b) const {
+ return Packet1cd(*this) += b;
+ }
+ EIGEN_STRONG_INLINE Packet1cd& operator-=(const Packet1cd& b) {
+ v = psub(v, b.v);
+ return *this;
+ }
+ EIGEN_STRONG_INLINE Packet1cd operator-(const Packet1cd& b) const {
+ return Packet1cd(*this) -= b;
+ }
+ EIGEN_STRONG_INLINE Packet1cd& operator/=(const Packet1cd& b) {
+ *this *= b.conjugate();
+ Packet2d s = pmul<Packet2d>(b.v, b.v);
+ s = padd(s, preverse<Packet2d>(s));
+ v = pdiv(v, s);
+ return *this;
+ }
+ EIGEN_STRONG_INLINE Packet1cd operator/(const Packet1cd& b) const {
+ return Packet1cd(*this) /= b;
+ }
+ EIGEN_STRONG_INLINE Packet1cd operator-(void) const {
+ return Packet1cd(pnegate(v));
+ }
+
+ Packet2d v;
+};
+
+inline std::ostream& operator<<(std::ostream& os, const Packet1cd& value) {
+ os << "[ (" << value.v[0] << ", " << value.v[1] << "i) ]";
+ return os;
+}
+
+template <>
+struct packet_traits<std::complex<double> > : default_packet_traits {
+ typedef Packet1cd type;
+ typedef Packet1cd half;
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 0,
+ size = 1,
+ HasHalfPacket = 0,
+
+ HasAdd = 1,
+ HasSub = 1,
+ HasMul = 1,
+ HasDiv = 1,
+ HasNegate = 1,
+ HasAbs = 0,
+ HasAbs2 = 0,
+ HasMin = 0,
+ HasMax = 0,
+ HasSetLinear = 0
+ };
+};
+
+template <>
+struct unpacket_traits<Packet1cd> {
+ typedef std::complex<double> type;
+ enum { size = 1, alignment = Aligned16 };
+ typedef Packet1cd half;
+};
+
+template <>
+EIGEN_STRONG_INLINE Packet1cd pload<Packet1cd>(const std::complex<double>* from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_ALIGNED_LOAD return Packet1cd(pload<Packet2d>((const double*)from));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1cd ploadu<Packet1cd>(const std::complex<double>* from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_UNALIGNED_LOAD return Packet1cd(ploadu<Packet2d>((const double*)from));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1cd pset1<Packet1cd>(const std::complex<double>& from) {
+ EIGEN_MSA_DEBUG;
+
+ return Packet1cd(from);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1cd padd<Packet1cd>(const Packet1cd& a, const Packet1cd& b) {
+ EIGEN_MSA_DEBUG;
+
+ return a + b;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1cd psub<Packet1cd>(const Packet1cd& a, const Packet1cd& b) {
+ EIGEN_MSA_DEBUG;
+
+ return a - b;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1cd pnegate(const Packet1cd& a) {
+ EIGEN_MSA_DEBUG;
+
+ return -a;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1cd pconj(const Packet1cd& a) {
+ EIGEN_MSA_DEBUG;
+
+ return a.conjugate();
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1cd pmul<Packet1cd>(const Packet1cd& a, const Packet1cd& b) {
+ EIGEN_MSA_DEBUG;
+
+ return a * b;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1cd pand<Packet1cd>(const Packet1cd& a, const Packet1cd& b) {
+ EIGEN_MSA_DEBUG;
+
+ return Packet1cd(pand(a.v, b.v));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1cd por<Packet1cd>(const Packet1cd& a, const Packet1cd& b) {
+ EIGEN_MSA_DEBUG;
+
+ return Packet1cd(por(a.v, b.v));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1cd pxor<Packet1cd>(const Packet1cd& a, const Packet1cd& b) {
+ EIGEN_MSA_DEBUG;
+
+ return Packet1cd(pxor(a.v, b.v));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1cd pandnot<Packet1cd>(const Packet1cd& a, const Packet1cd& b) {
+ EIGEN_MSA_DEBUG;
+
+ return Packet1cd(pandnot(a.v, b.v));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1cd ploaddup<Packet1cd>(const std::complex<double>* from) {
+ EIGEN_MSA_DEBUG;
+
+ return pset1<Packet1cd>(*from);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstore<std::complex<double> >(std::complex<double>* to,
+ const Packet1cd& from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_ALIGNED_STORE pstore<double>((double*)to, from.v);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstoreu<std::complex<double> >(std::complex<double>* to,
+ const Packet1cd& from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_UNALIGNED_STORE pstoreu<double>((double*)to, from.v);
+}
+
+template <>
+EIGEN_STRONG_INLINE void prefetch<std::complex<double> >(const std::complex<double>* addr) {
+ EIGEN_MSA_DEBUG;
+
+ prefetch(reinterpret_cast<const double*>(addr));
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline Packet1cd pgather<std::complex<double>, Packet1cd>(
+ const std::complex<double>* from, Index stride __attribute__((unused))) {
+ EIGEN_MSA_DEBUG;
+
+ Packet1cd res;
+ res.v[0] = std::real(from[0]);
+ res.v[1] = std::imag(from[0]);
+ return res;
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet1cd>(std::complex<double>* to,
+ const Packet1cd& from,
+ Index stride
+ __attribute__((unused))) {
+ EIGEN_MSA_DEBUG;
+
+ pstore(to, from);
+}
+
+template <>
+EIGEN_STRONG_INLINE std::complex<double> pfirst<Packet1cd>(const Packet1cd& a) {
+ EIGEN_MSA_DEBUG;
+
+ return std::complex<double>(a.v[0], a.v[1]);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1cd preverse(const Packet1cd& a) {
+ EIGEN_MSA_DEBUG;
+
+ return a;
+}
+
+template <>
+EIGEN_STRONG_INLINE std::complex<double> predux<Packet1cd>(const Packet1cd& a) {
+ EIGEN_MSA_DEBUG;
+
+ return pfirst(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1cd preduxp<Packet1cd>(const Packet1cd* vecs) {
+ EIGEN_MSA_DEBUG;
+
+ return vecs[0];
+}
+
+template <>
+EIGEN_STRONG_INLINE std::complex<double> predux_mul<Packet1cd>(const Packet1cd& a) {
+ EIGEN_MSA_DEBUG;
+
+ return pfirst(a);
+}
+
+template <int Offset>
+struct palign_impl<Offset, Packet1cd> {
+ static EIGEN_STRONG_INLINE void run(Packet1cd& /*first*/, const Packet1cd& /*second*/) {
+ // FIXME is it sure we never have to align a Packet1cd?
+ // Even though a std::complex<double> has 16 bytes, it is not necessarily aligned on a 16 bytes
+ // boundary...
+ }
+};
+
+template <>
+struct conj_helper<Packet1cd, Packet1cd, false, true> {
+ EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y,
+ const Packet1cd& c) const {
+ return padd(pmul(x, y), c);
+ }
+
+ EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const {
+ return internal::pmul(a, pconj(b));
+ }
+};
+
+template <>
+struct conj_helper<Packet1cd, Packet1cd, true, false> {
+ EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y,
+ const Packet1cd& c) const {
+ return padd(pmul(x, y), c);
+ }
+
+ EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const {
+ return internal::pmul(pconj(a), b);
+ }
+};
+
+template <>
+struct conj_helper<Packet1cd, Packet1cd, true, true> {
+ EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y,
+ const Packet1cd& c) const {
+ return padd(pmul(x, y), c);
+ }
+
+ EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const {
+ return pconj(internal::pmul(a, b));
+ }
+};
+
+EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet1cd, Packet2d)
+
+template <>
+EIGEN_STRONG_INLINE Packet1cd pdiv<Packet1cd>(const Packet1cd& a, const Packet1cd& b) {
+ EIGEN_MSA_DEBUG;
+
+ return a / b;
+}
+
+EIGEN_STRONG_INLINE Packet1cd pcplxflip /*<Packet1cd>*/ (const Packet1cd& x) {
+ EIGEN_MSA_DEBUG;
+
+ return Packet1cd(preverse(Packet2d(x.v)));
+}
+
+inline std::ostream& operator<<(std::ostream& os, const PacketBlock<Packet1cd, 2>& value) {
+ os << "[ " << value.packet[0] << ", " << std::endl << " " << value.packet[1] << " ]";
+ return os;
+}
+
+EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet1cd, 2>& kernel) {
+ EIGEN_MSA_DEBUG;
+
+ Packet2d v1, v2;
+
+ v1 = (Packet2d)__builtin_msa_ilvev_d((v2i64)kernel.packet[0].v, (v2i64)kernel.packet[1].v);
+ // Get the imag values of a
+ v2 = (Packet2d)__builtin_msa_ilvod_d((v2i64)kernel.packet[0].v, (v2i64)kernel.packet[1].v);
+
+ kernel.packet[0].v = v1;
+ kernel.packet[1].v = v2;
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_COMPLEX_MSA_H
diff --git a/Eigen/src/Core/arch/MSA/MathFunctions.h b/Eigen/src/Core/arch/MSA/MathFunctions.h
new file mode 100644
index 000000000..98e23e36f
--- /dev/null
+++ b/Eigen/src/Core/arch/MSA/MathFunctions.h
@@ -0,0 +1,387 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2007 Julien Pommier
+// Copyright (C) 2014 Pedro Gonnet (pedro.gonnet@gmail.com)
+// Copyright (C) 2016 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Copyright (C) 2018 Wave Computing, Inc.
+// Written by:
+// Chris Larsen
+// Alexey Frunze (afrunze@wavecomp.com)
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/* The sin, cos, exp, and log functions of this file come from
+ * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/
+ */
+
+/* The tanh function of this file is an adaptation of
+ * template<typename T> T generic_fast_tanh_float(const T&)
+ * from MathFunctionsImpl.h.
+ */
+
+#ifndef EIGEN_MATH_FUNCTIONS_MSA_H
+#define EIGEN_MATH_FUNCTIONS_MSA_H
+
+namespace Eigen {
+
+namespace internal {
+
+template <>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f
+plog<Packet4f>(const Packet4f& _x) {
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_SQRTHF, 0.707106781186547524f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p0, 7.0376836292e-2f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p1, -1.1514610310e-1f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p2, 1.1676998740e-1f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p3, -1.2420140846e-1f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p4, +1.4249322787e-1f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p5, -1.6668057665e-1f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p6, +2.0000714765e-1f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p7, -2.4999993993e-1f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p8, +3.3333331174e-1f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q1, -2.12194440e-4f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q2, 0.693359375f);
+ static _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
+ static _EIGEN_DECLARE_CONST_Packet4f(1, 1.0f);
+
+ // Convert negative argument into NAN (quiet negative, to be specific).
+ Packet4f zero = (Packet4f)__builtin_msa_ldi_w(0);
+ Packet4i neg_mask = __builtin_msa_fclt_w(_x, zero);
+ Packet4i zero_mask = __builtin_msa_fceq_w(_x, zero);
+ Packet4f non_neg_x_or_nan = padd(_x, (Packet4f)neg_mask); // Add 0.0 or NAN.
+ Packet4f x = non_neg_x_or_nan;
+
+ // Extract exponent from x = mantissa * 2**exponent, where 1.0 <= mantissa < 2.0.
+ // N.B. the exponent is one less of what frexpf() would return.
+ Packet4i e_int = __builtin_msa_ftint_s_w(__builtin_msa_flog2_w(x));
+ // Multiply x by 2**(-exponent-1) to get 0.5 <= x < 1.0 as from frexpf().
+ x = __builtin_msa_fexp2_w(x, (Packet4i)__builtin_msa_nori_b((v16u8)e_int, 0));
+
+ /*
+ if (x < SQRTHF) {
+ x = x + x - 1.0;
+ } else {
+ e += 1;
+ x = x - 1.0;
+ }
+ */
+ Packet4f xx = padd(x, x);
+ Packet4i ge_mask = __builtin_msa_fcle_w(p4f_cephes_SQRTHF, x);
+ e_int = psub(e_int, ge_mask);
+ x = (Packet4f)__builtin_msa_bsel_v((v16u8)ge_mask, (v16u8)xx, (v16u8)x);
+ x = psub(x, p4f_1);
+ Packet4f e = __builtin_msa_ffint_s_w(e_int);
+
+ Packet4f x2 = pmul(x, x);
+ Packet4f x3 = pmul(x2, x);
+
+ Packet4f y, y1, y2;
+ y = pmadd(p4f_cephes_log_p0, x, p4f_cephes_log_p1);
+ y1 = pmadd(p4f_cephes_log_p3, x, p4f_cephes_log_p4);
+ y2 = pmadd(p4f_cephes_log_p6, x, p4f_cephes_log_p7);
+ y = pmadd(y, x, p4f_cephes_log_p2);
+ y1 = pmadd(y1, x, p4f_cephes_log_p5);
+ y2 = pmadd(y2, x, p4f_cephes_log_p8);
+ y = pmadd(y, x3, y1);
+ y = pmadd(y, x3, y2);
+ y = pmul(y, x3);
+
+ y = pmadd(e, p4f_cephes_log_q1, y);
+ x = __builtin_msa_fmsub_w(x, x2, p4f_half);
+ x = padd(x, y);
+ x = pmadd(e, p4f_cephes_log_q2, x);
+
+ // x is now the logarithm result candidate. We still need to handle the
+ // extreme arguments of zero and positive infinity, though.
+ // N.B. if the argument is +INFINITY, x is NAN because the polynomial terms
+ // contain infinities of both signs (see the coefficients and code above).
+ // INFINITY - INFINITY is NAN.
+
+ // If the argument is +INFINITY, make it the new result candidate.
+ // To achieve that we choose the smaller of the result candidate and the
+ // argument.
+ // This is correct for all finite pairs of values (the logarithm is smaller
+ // than the argument).
+ // This is also correct in the special case when the argument is +INFINITY
+ // and the result candidate is NAN. This is because the fmin.df instruction
+ // prefers non-NANs to NANs.
+ x = __builtin_msa_fmin_w(x, non_neg_x_or_nan);
+
+ // If the argument is zero (including -0.0), the result becomes -INFINITY.
+ Packet4i neg_infs = __builtin_msa_slli_w(zero_mask, 23);
+ x = (Packet4f)__builtin_msa_bsel_v((v16u8)zero_mask, (v16u8)x, (v16u8)neg_infs);
+
+ return x;
+}
+
+template <>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f
+pexp<Packet4f>(const Packet4f& _x) {
+ // Limiting single-precision pexp's argument to [-128, +128] lets pexp
+ // reach 0 and INFINITY naturally.
+ static _EIGEN_DECLARE_CONST_Packet4f(exp_lo, -128.0f);
+ static _EIGEN_DECLARE_CONST_Packet4f(exp_hi, +128.0f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_LOG2EF, 1.44269504088896341f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C1, 0.693359375f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C2, -2.12194440e-4f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p0, 1.9875691500e-4f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p1, 1.3981999507e-3f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p2, 8.3334519073e-3f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p3, 4.1665795894e-2f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459e-1f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201e-1f);
+ static _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
+ static _EIGEN_DECLARE_CONST_Packet4f(1, 1.0f);
+
+ Packet4f x = _x;
+
+ // Clamp x.
+ x = (Packet4f)__builtin_msa_bsel_v((v16u8)__builtin_msa_fclt_w(x, p4f_exp_lo), (v16u8)x,
+ (v16u8)p4f_exp_lo);
+ x = (Packet4f)__builtin_msa_bsel_v((v16u8)__builtin_msa_fclt_w(p4f_exp_hi, x), (v16u8)x,
+ (v16u8)p4f_exp_hi);
+
+ // Round to nearest integer by adding 0.5 (with x's sign) and truncating.
+ Packet4f x2_add = (Packet4f)__builtin_msa_binsli_w((v4u32)p4f_half, (v4u32)x, 0);
+ Packet4f x2 = pmadd(x, p4f_cephes_LOG2EF, x2_add);
+ Packet4i x2_int = __builtin_msa_ftrunc_s_w(x2);
+ Packet4f x2_int_f = __builtin_msa_ffint_s_w(x2_int);
+
+ x = __builtin_msa_fmsub_w(x, x2_int_f, p4f_cephes_exp_C1);
+ x = __builtin_msa_fmsub_w(x, x2_int_f, p4f_cephes_exp_C2);
+
+ Packet4f z = pmul(x, x);
+
+ Packet4f y = p4f_cephes_exp_p0;
+ y = pmadd(y, x, p4f_cephes_exp_p1);
+ y = pmadd(y, x, p4f_cephes_exp_p2);
+ y = pmadd(y, x, p4f_cephes_exp_p3);
+ y = pmadd(y, x, p4f_cephes_exp_p4);
+ y = pmadd(y, x, p4f_cephes_exp_p5);
+ y = pmadd(y, z, x);
+ y = padd(y, p4f_1);
+
+ // y *= 2**exponent.
+ y = __builtin_msa_fexp2_w(y, x2_int);
+
+ return y;
+}
+
+template <>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f
+ptanh<Packet4f>(const Packet4f& _x) {
+ static _EIGEN_DECLARE_CONST_Packet4f(tanh_tiny, 1e-4f);
+ static _EIGEN_DECLARE_CONST_Packet4f(tanh_hi, 9.0f);
+ // The monomial coefficients of the numerator polynomial (odd).
+ static _EIGEN_DECLARE_CONST_Packet4f(alpha_1, 4.89352455891786e-3f);
+ static _EIGEN_DECLARE_CONST_Packet4f(alpha_3, 6.37261928875436e-4f);
+ static _EIGEN_DECLARE_CONST_Packet4f(alpha_5, 1.48572235717979e-5f);
+ static _EIGEN_DECLARE_CONST_Packet4f(alpha_7, 5.12229709037114e-8f);
+ static _EIGEN_DECLARE_CONST_Packet4f(alpha_9, -8.60467152213735e-11f);
+ static _EIGEN_DECLARE_CONST_Packet4f(alpha_11, 2.00018790482477e-13f);
+ static _EIGEN_DECLARE_CONST_Packet4f(alpha_13, -2.76076847742355e-16f);
+ // The monomial coefficients of the denominator polynomial (even).
+ static _EIGEN_DECLARE_CONST_Packet4f(beta_0, 4.89352518554385e-3f);
+ static _EIGEN_DECLARE_CONST_Packet4f(beta_2, 2.26843463243900e-3f);
+ static _EIGEN_DECLARE_CONST_Packet4f(beta_4, 1.18534705686654e-4f);
+ static _EIGEN_DECLARE_CONST_Packet4f(beta_6, 1.19825839466702e-6f);
+
+ Packet4f x = pabs(_x);
+ Packet4i tiny_mask = __builtin_msa_fclt_w(x, p4f_tanh_tiny);
+
+ // Clamp the inputs to the range [-9, 9] since anything outside
+ // this range is -/+1.0f in single-precision.
+ x = (Packet4f)__builtin_msa_bsel_v((v16u8)__builtin_msa_fclt_w(p4f_tanh_hi, x), (v16u8)x,
+ (v16u8)p4f_tanh_hi);
+
+ // Since the polynomials are odd/even, we need x**2.
+ Packet4f x2 = pmul(x, x);
+
+ // Evaluate the numerator polynomial p.
+ Packet4f p = pmadd(x2, p4f_alpha_13, p4f_alpha_11);
+ p = pmadd(x2, p, p4f_alpha_9);
+ p = pmadd(x2, p, p4f_alpha_7);
+ p = pmadd(x2, p, p4f_alpha_5);
+ p = pmadd(x2, p, p4f_alpha_3);
+ p = pmadd(x2, p, p4f_alpha_1);
+ p = pmul(x, p);
+
+ // Evaluate the denominator polynomial q.
+ Packet4f q = pmadd(x2, p4f_beta_6, p4f_beta_4);
+ q = pmadd(x2, q, p4f_beta_2);
+ q = pmadd(x2, q, p4f_beta_0);
+
+ // Divide the numerator by the denominator.
+ p = pdiv(p, q);
+
+ // Reinstate the sign.
+ p = (Packet4f)__builtin_msa_binsli_w((v4u32)p, (v4u32)_x, 0);
+
+ // When the argument is very small in magnitude it's more accurate to just return it.
+ p = (Packet4f)__builtin_msa_bsel_v((v16u8)tiny_mask, (v16u8)p, (v16u8)_x);
+
+ return p;
+}
+
+template <bool sine>
+Packet4f psincos_inner_msa_float(const Packet4f& _x) {
+ static _EIGEN_DECLARE_CONST_Packet4f(sincos_max_arg, 13176795.0f); // Approx. (2**24) / (4/Pi).
+ static _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP1, -0.78515625f);
+ static _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP2, -2.4187564849853515625e-4f);
+ static _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP3, -3.77489497744594108e-8f);
+ static _EIGEN_DECLARE_CONST_Packet4f(sincof_p0, -1.9515295891e-4f);
+ static _EIGEN_DECLARE_CONST_Packet4f(sincof_p1, 8.3321608736e-3f);
+ static _EIGEN_DECLARE_CONST_Packet4f(sincof_p2, -1.6666654611e-1f);
+ static _EIGEN_DECLARE_CONST_Packet4f(coscof_p0, 2.443315711809948e-5f);
+ static _EIGEN_DECLARE_CONST_Packet4f(coscof_p1, -1.388731625493765e-3f);
+ static _EIGEN_DECLARE_CONST_Packet4f(coscof_p2, 4.166664568298827e-2f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4/Pi.
+ static _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
+ static _EIGEN_DECLARE_CONST_Packet4f(1, 1.0f);
+
+ Packet4f x = pabs(_x);
+
+ // Translate infinite arguments into NANs.
+ Packet4f zero_or_nan_if_inf = psub(_x, _x);
+ x = padd(x, zero_or_nan_if_inf);
+ // Prevent sin/cos from generating values larger than 1.0 in magnitude
+ // for very large arguments by setting x to 0.0.
+ Packet4i small_or_nan_mask = __builtin_msa_fcult_w(x, p4f_sincos_max_arg);
+ x = pand(x, (Packet4f)small_or_nan_mask);
+
+ // Scale x by 4/Pi to find x's octant.
+ Packet4f y = pmul(x, p4f_cephes_FOPI);
+ // Get the octant. We'll reduce x by this number of octants or by one more than it.
+ Packet4i y_int = __builtin_msa_ftrunc_s_w(y);
+ // x's from even-numbered octants will translate to octant 0: [0, +Pi/4].
+ // x's from odd-numbered octants will translate to octant -1: [-Pi/4, 0].
+ // Adjustment for odd-numbered octants: octant = (octant + 1) & (~1).
+ Packet4i y_int1 = __builtin_msa_addvi_w(y_int, 1);
+ Packet4i y_int2 = (Packet4i)__builtin_msa_bclri_w((Packet4ui)y_int1, 0);
+ y = __builtin_msa_ffint_s_w(y_int2);
+
+ // Compute the sign to apply to the polynomial.
+ Packet4i sign_mask = sine ? pxor(__builtin_msa_slli_w(y_int1, 29), (Packet4i)_x)
+ : __builtin_msa_slli_w(__builtin_msa_addvi_w(y_int, 3), 29);
+
+ // Get the polynomial selection mask.
+ // We'll calculate both (sin and cos) polynomials and then select from the two.
+ Packet4i poly_mask = __builtin_msa_ceqi_w(__builtin_msa_slli_w(y_int2, 30), 0);
+
+ // Reduce x by y octants to get: -Pi/4 <= x <= +Pi/4.
+ // The magic pass: "Extended precision modular arithmetic"
+ // x = ((x - y * DP1) - y * DP2) - y * DP3
+ Packet4f tmp1 = pmul(y, p4f_minus_cephes_DP1);
+ Packet4f tmp2 = pmul(y, p4f_minus_cephes_DP2);
+ Packet4f tmp3 = pmul(y, p4f_minus_cephes_DP3);
+ x = padd(x, tmp1);
+ x = padd(x, tmp2);
+ x = padd(x, tmp3);
+
+ // Evaluate the cos(x) polynomial.
+ y = p4f_coscof_p0;
+ Packet4f z = pmul(x, x);
+ y = pmadd(y, z, p4f_coscof_p1);
+ y = pmadd(y, z, p4f_coscof_p2);
+ y = pmul(y, z);
+ y = pmul(y, z);
+ y = __builtin_msa_fmsub_w(y, z, p4f_half);
+ y = padd(y, p4f_1);
+
+ // Evaluate the sin(x) polynomial.
+ Packet4f y2 = p4f_sincof_p0;
+ y2 = pmadd(y2, z, p4f_sincof_p1);
+ y2 = pmadd(y2, z, p4f_sincof_p2);
+ y2 = pmul(y2, z);
+ y2 = pmadd(y2, x, x);
+
+ // Select the correct result from the two polynomials.
+ y = sine ? (Packet4f)__builtin_msa_bsel_v((v16u8)poly_mask, (v16u8)y, (v16u8)y2)
+ : (Packet4f)__builtin_msa_bsel_v((v16u8)poly_mask, (v16u8)y2, (v16u8)y);
+
+ // Update the sign.
+ sign_mask = pxor(sign_mask, (Packet4i)y);
+ y = (Packet4f)__builtin_msa_binsli_w((v4u32)y, (v4u32)sign_mask, 0);
+ return y;
+}
+
+template <>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f
+psin<Packet4f>(const Packet4f& x) {
+ return psincos_inner_msa_float</* sine */ true>(x);
+}
+
+template <>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f
+pcos<Packet4f>(const Packet4f& x) {
+ return psincos_inner_msa_float</* sine */ false>(x);
+}
+
+template <>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2d
+pexp<Packet2d>(const Packet2d& _x) {
+ // Limiting double-precision pexp's argument to [-1024, +1024] lets pexp
+ // reach 0 and INFINITY naturally.
+ static _EIGEN_DECLARE_CONST_Packet2d(exp_lo, -1024.0);
+ static _EIGEN_DECLARE_CONST_Packet2d(exp_hi, +1024.0);
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_LOG2EF, 1.4426950408889634073599);
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C1, 0.693145751953125);
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C2, 1.42860682030941723212e-6);
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p0, 1.26177193074810590878e-4);
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p1, 3.02994407707441961300e-2);
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p2, 9.99999999999999999910e-1);
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q0, 3.00198505138664455042e-6);
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q1, 2.52448340349684104192e-3);
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q2, 2.27265548208155028766e-1);
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q3, 2.00000000000000000009e0);
+ static _EIGEN_DECLARE_CONST_Packet2d(half, 0.5);
+ static _EIGEN_DECLARE_CONST_Packet2d(1, 1.0);
+ static _EIGEN_DECLARE_CONST_Packet2d(2, 2.0);
+
+ Packet2d x = _x;
+
+ // Clamp x.
+ x = (Packet2d)__builtin_msa_bsel_v((v16u8)__builtin_msa_fclt_d(x, p2d_exp_lo), (v16u8)x,
+ (v16u8)p2d_exp_lo);
+ x = (Packet2d)__builtin_msa_bsel_v((v16u8)__builtin_msa_fclt_d(p2d_exp_hi, x), (v16u8)x,
+ (v16u8)p2d_exp_hi);
+
+ // Round to nearest integer by adding 0.5 (with x's sign) and truncating.
+ Packet2d x2_add = (Packet2d)__builtin_msa_binsli_d((v2u64)p2d_half, (v2u64)x, 0);
+ Packet2d x2 = pmadd(x, p2d_cephes_LOG2EF, x2_add);
+ Packet2l x2_long = __builtin_msa_ftrunc_s_d(x2);
+ Packet2d x2_long_d = __builtin_msa_ffint_s_d(x2_long);
+
+ x = __builtin_msa_fmsub_d(x, x2_long_d, p2d_cephes_exp_C1);
+ x = __builtin_msa_fmsub_d(x, x2_long_d, p2d_cephes_exp_C2);
+
+ x2 = pmul(x, x);
+
+ Packet2d px = p2d_cephes_exp_p0;
+ px = pmadd(px, x2, p2d_cephes_exp_p1);
+ px = pmadd(px, x2, p2d_cephes_exp_p2);
+ px = pmul(px, x);
+
+ Packet2d qx = p2d_cephes_exp_q0;
+ qx = pmadd(qx, x2, p2d_cephes_exp_q1);
+ qx = pmadd(qx, x2, p2d_cephes_exp_q2);
+ qx = pmadd(qx, x2, p2d_cephes_exp_q3);
+
+ x = pdiv(px, psub(qx, px));
+ x = pmadd(p2d_2, x, p2d_1);
+
+ // x *= 2**exponent.
+ x = __builtin_msa_fexp2_d(x, x2_long);
+
+ return x;
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_MATH_FUNCTIONS_MSA_H
diff --git a/Eigen/src/Core/arch/MSA/PacketMath.h b/Eigen/src/Core/arch/MSA/PacketMath.h
new file mode 100644
index 000000000..094c874ee
--- /dev/null
+++ b/Eigen/src/Core/arch/MSA/PacketMath.h
@@ -0,0 +1,1317 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2018 Wave Computing, Inc.
+// Written by:
+// Chris Larsen
+// Alexey Frunze (afrunze@wavecomp.com)
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_PACKET_MATH_MSA_H
+#define EIGEN_PACKET_MATH_MSA_H
+
+#include <iostream>
+#include <string>
+
+namespace Eigen {
+
+namespace internal {
+
+#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
+#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
+#endif
+
+#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
+#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
+#endif
+
+#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD
+#define EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD
+#endif
+
+#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
+#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 32
+#endif
+
+#if 0
+#define EIGEN_MSA_DEBUG \
+ static bool firstTime = true; \
+ do { \
+ if (firstTime) { \
+ std::cout << __FILE__ << ':' << __LINE__ << ':' << __FUNCTION__ << std::endl; \
+ firstTime = false; \
+ } \
+ } while (0)
+#else
+#define EIGEN_MSA_DEBUG
+#endif
+
+#define EIGEN_MSA_SHF_I8(a, b, c, d) (((d) << 6) | ((c) << 4) | ((b) << 2) | (a))
+
+typedef v4f32 Packet4f;
+typedef v4i32 Packet4i;
+typedef v4u32 Packet4ui;
+
+#define _EIGEN_DECLARE_CONST_Packet4f(NAME, X) const Packet4f p4f_##NAME = { X, X, X, X }
+#define _EIGEN_DECLARE_CONST_Packet4i(NAME, X) const Packet4i p4i_##NAME = { X, X, X, X }
+#define _EIGEN_DECLARE_CONST_Packet4ui(NAME, X) const Packet4ui p4ui_##NAME = { X, X, X, X }
+
+inline std::ostream& operator<<(std::ostream& os, const Packet4f& value) {
+ os << "[ " << value[0] << ", " << value[1] << ", " << value[2] << ", " << value[3] << " ]";
+ return os;
+}
+
+inline std::ostream& operator<<(std::ostream& os, const Packet4i& value) {
+ os << "[ " << value[0] << ", " << value[1] << ", " << value[2] << ", " << value[3] << " ]";
+ return os;
+}
+
+inline std::ostream& operator<<(std::ostream& os, const Packet4ui& value) {
+ os << "[ " << value[0] << ", " << value[1] << ", " << value[2] << ", " << value[3] << " ]";
+ return os;
+}
+
+template <>
+struct packet_traits<float> : default_packet_traits {
+ typedef Packet4f type;
+ typedef Packet4f half; // Packet2f intrinsics not implemented yet
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = 4,
+ HasHalfPacket = 0, // Packet2f intrinsics not implemented yet
+ // FIXME check the Has*
+ HasDiv = 1,
+ HasSin = EIGEN_FAST_MATH,
+ HasCos = EIGEN_FAST_MATH,
+ HasTanh = EIGEN_FAST_MATH,
+ HasLog = 1,
+ HasExp = 1,
+ HasSqrt = 1,
+ HasRsqrt = 1,
+ HasRound = 1,
+ HasFloor = 1,
+ HasCeil = 1,
+ HasBlend = 1
+ };
+};
+
+template <>
+struct packet_traits<int32_t> : default_packet_traits {
+ typedef Packet4i type;
+ typedef Packet4i half; // Packet2i intrinsics not implemented yet
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = 4,
+ HasHalfPacket = 0, // Packet2i intrinsics not implemented yet
+ // FIXME check the Has*
+ HasDiv = 1,
+ HasBlend = 1
+ };
+};
+
+template <>
+struct unpacket_traits<Packet4f> {
+ typedef float type;
+ enum { size = 4, alignment = Aligned16 };
+ typedef Packet4f half;
+};
+
+template <>
+struct unpacket_traits<Packet4i> {
+ typedef int32_t type;
+ enum { size = 4, alignment = Aligned16 };
+ typedef Packet4i half;
+};
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) {
+ EIGEN_MSA_DEBUG;
+
+ Packet4f v = { from, from, from, from };
+ return v;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int32_t& from) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_fill_w(from);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pload1<Packet4f>(const float* from) {
+ EIGEN_MSA_DEBUG;
+
+ float f = *from;
+ Packet4f v = { f, f, f, f };
+ return v;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i pload1<Packet4i>(const int32_t* from) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_fill_w(*from);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_fadd_w(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_addv_w(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f plset<Packet4f>(const float& a) {
+ EIGEN_MSA_DEBUG;
+
+ static const Packet4f countdown = { 0.0f, 1.0f, 2.0f, 3.0f };
+ return padd(pset1<Packet4f>(a), countdown);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i plset<Packet4i>(const int32_t& a) {
+ EIGEN_MSA_DEBUG;
+
+ static const Packet4i countdown = { 0, 1, 2, 3 };
+ return padd(pset1<Packet4i>(a), countdown);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_fsub_w(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_subv_w(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) {
+ EIGEN_MSA_DEBUG;
+
+ return (Packet4f)__builtin_msa_bnegi_w((v4u32)a, 31);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_addvi_w((v4i32)__builtin_msa_nori_b((v16u8)a, 0), 1);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) {
+ EIGEN_MSA_DEBUG;
+
+ return a;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) {
+ EIGEN_MSA_DEBUG;
+
+ return a;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_fmul_w(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_mulv_w(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_fdiv_w(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& a, const Packet4i& b) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_div_s_w(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_fmadd_w(c, a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) {
+ EIGEN_MSA_DEBUG;
+
+ // Use "asm" construct to avoid __builtin_msa_maddv_w GNU C bug.
+ Packet4i value = c;
+ __asm__("maddv.w %w[value], %w[a], %w[b]\n"
+ // Outputs
+ : [value] "+f"(value)
+ // Inputs
+ : [a] "f"(a), [b] "f"(b));
+ return value;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b) {
+ EIGEN_MSA_DEBUG;
+
+ return (Packet4f)__builtin_msa_and_v((v16u8)a, (v16u8)b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) {
+ EIGEN_MSA_DEBUG;
+
+ return (Packet4i)__builtin_msa_and_v((v16u8)a, (v16u8)b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b) {
+ EIGEN_MSA_DEBUG;
+
+ return (Packet4f)__builtin_msa_or_v((v16u8)a, (v16u8)b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) {
+ EIGEN_MSA_DEBUG;
+
+ return (Packet4i)__builtin_msa_or_v((v16u8)a, (v16u8)b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b) {
+ EIGEN_MSA_DEBUG;
+
+ return (Packet4f)__builtin_msa_xor_v((v16u8)a, (v16u8)b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) {
+ EIGEN_MSA_DEBUG;
+
+ return (Packet4i)__builtin_msa_xor_v((v16u8)a, (v16u8)b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) {
+ EIGEN_MSA_DEBUG;
+
+ return pand(a, (Packet4f)__builtin_msa_xori_b((v16u8)b, 255));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) {
+ EIGEN_MSA_DEBUG;
+
+ return pand(a, (Packet4i)__builtin_msa_xori_b((v16u8)b, 255));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) {
+ EIGEN_MSA_DEBUG;
+
+#if EIGEN_FAST_MATH
+ // This prefers numbers to NaNs.
+ return __builtin_msa_fmin_w(a, b);
+#else
+ // This prefers NaNs to numbers.
+ Packet4i aNaN = __builtin_msa_fcun_w(a, a);
+ Packet4i aMinOrNaN = por(__builtin_msa_fclt_w(a, b), aNaN);
+ return (Packet4f)__builtin_msa_bsel_v((v16u8)aMinOrNaN, (v16u8)b, (v16u8)a);
+#endif
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_min_s_w(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) {
+ EIGEN_MSA_DEBUG;
+
+#if EIGEN_FAST_MATH
+ // This prefers numbers to NaNs.
+ return __builtin_msa_fmax_w(a, b);
+#else
+ // This prefers NaNs to numbers.
+ Packet4i aNaN = __builtin_msa_fcun_w(a, a);
+ Packet4i aMaxOrNaN = por(__builtin_msa_fclt_w(b, a), aNaN);
+ return (Packet4f)__builtin_msa_bsel_v((v16u8)aMaxOrNaN, (v16u8)b, (v16u8)a);
+#endif
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_max_s_w(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_ALIGNED_LOAD return (Packet4f)__builtin_msa_ld_w(const_cast<float*>(from), 0);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int32_t* from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_ALIGNED_LOAD return __builtin_msa_ld_w(const_cast<int32_t*>(from), 0);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_UNALIGNED_LOAD return (Packet4f)__builtin_msa_ld_w(const_cast<float*>(from), 0);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int32_t* from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_UNALIGNED_LOAD return (Packet4i)__builtin_msa_ld_w(const_cast<int32_t*>(from), 0);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from) {
+ EIGEN_MSA_DEBUG;
+
+ float f0 = from[0], f1 = from[1];
+ Packet4f v0 = { f0, f0, f0, f0 };
+ Packet4f v1 = { f1, f1, f1, f1 };
+ return (Packet4f)__builtin_msa_ilvr_d((v2i64)v1, (v2i64)v0);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int32_t* from) {
+ EIGEN_MSA_DEBUG;
+
+ int32_t i0 = from[0], i1 = from[1];
+ Packet4i v0 = { i0, i0, i0, i0 };
+ Packet4i v1 = { i1, i1, i1, i1 };
+ return (Packet4i)__builtin_msa_ilvr_d((v2i64)v1, (v2i64)v0);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet4f& from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_ALIGNED_STORE __builtin_msa_st_w((Packet4i)from, to, 0);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstore<int32_t>(int32_t* to, const Packet4i& from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_ALIGNED_STORE __builtin_msa_st_w(from, to, 0);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_UNALIGNED_STORE __builtin_msa_st_w((Packet4i)from, to, 0);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstoreu<int32_t>(int32_t* to, const Packet4i& from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_UNALIGNED_STORE __builtin_msa_st_w(from, to, 0);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride) {
+ EIGEN_MSA_DEBUG;
+
+ float f = *from;
+ Packet4f v = { f, f, f, f };
+ v[1] = from[stride];
+ v[2] = from[2 * stride];
+ v[3] = from[3 * stride];
+ return v;
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline Packet4i pgather<int32_t, Packet4i>(const int32_t* from, Index stride) {
+ EIGEN_MSA_DEBUG;
+
+ int32_t i = *from;
+ Packet4i v = { i, i, i, i };
+ v[1] = from[stride];
+ v[2] = from[2 * stride];
+ v[3] = from[3 * stride];
+ return v;
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from,
+ Index stride) {
+ EIGEN_MSA_DEBUG;
+
+ *to = from[0];
+ to += stride;
+ *to = from[1];
+ to += stride;
+ *to = from[2];
+ to += stride;
+ *to = from[3];
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline void pscatter<int32_t, Packet4i>(int32_t* to, const Packet4i& from,
+ Index stride) {
+ EIGEN_MSA_DEBUG;
+
+ *to = from[0];
+ to += stride;
+ *to = from[1];
+ to += stride;
+ *to = from[2];
+ to += stride;
+ *to = from[3];
+}
+
+template <>
+EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) {
+ EIGEN_MSA_DEBUG;
+
+ __builtin_prefetch(addr);
+}
+
+template <>
+EIGEN_STRONG_INLINE void prefetch<int32_t>(const int32_t* addr) {
+ EIGEN_MSA_DEBUG;
+
+ __builtin_prefetch(addr);
+}
+
+template <>
+EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) {
+ EIGEN_MSA_DEBUG;
+
+ return a[0];
+}
+
+template <>
+EIGEN_STRONG_INLINE int32_t pfirst<Packet4i>(const Packet4i& a) {
+ EIGEN_MSA_DEBUG;
+
+ return a[0];
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) {
+ EIGEN_MSA_DEBUG;
+
+ return (Packet4f)__builtin_msa_shf_w((v4i32)a, EIGEN_MSA_SHF_I8(3, 2, 1, 0));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_shf_w(a, EIGEN_MSA_SHF_I8(3, 2, 1, 0));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) {
+ EIGEN_MSA_DEBUG;
+
+ return (Packet4f)__builtin_msa_bclri_w((v4u32)a, 31);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) {
+ EIGEN_MSA_DEBUG;
+
+ Packet4i zero = __builtin_msa_ldi_w(0);
+ return __builtin_msa_add_a_w(zero, a);
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a) {
+ EIGEN_MSA_DEBUG;
+
+ Packet4f s = padd(a, (Packet4f)__builtin_msa_shf_w((v4i32)a, EIGEN_MSA_SHF_I8(2, 3, 0, 1)));
+ s = padd(s, (Packet4f)__builtin_msa_shf_w((v4i32)s, EIGEN_MSA_SHF_I8(1, 0, 3, 2)));
+ return s[0];
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs) {
+ EIGEN_MSA_DEBUG;
+
+ v4i32 tmp1, tmp2, tmp3, tmp4;
+ Packet4f sum;
+
+ tmp1 = __builtin_msa_ilvr_w((v4i32)vecs[1], (v4i32)vecs[0]);
+ tmp2 = __builtin_msa_ilvr_w((v4i32)vecs[3], (v4i32)vecs[2]);
+ tmp3 = __builtin_msa_ilvl_w((v4i32)vecs[1], (v4i32)vecs[0]);
+ tmp4 = __builtin_msa_ilvl_w((v4i32)vecs[3], (v4i32)vecs[2]);
+
+ sum = (Packet4f)__builtin_msa_ilvr_d((v2i64)tmp2, (v2i64)tmp1);
+ sum = padd(sum, (Packet4f)__builtin_msa_ilvod_d((v2i64)tmp2, (v2i64)tmp1));
+ sum = padd(sum, (Packet4f)__builtin_msa_ilvr_d((v2i64)tmp4, (v2i64)tmp3));
+ sum = padd(sum, (Packet4f)__builtin_msa_ilvod_d((v2i64)tmp4, (v2i64)tmp3));
+
+ return sum;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs) {
+ EIGEN_MSA_DEBUG;
+
+ v4i32 tmp1, tmp2, tmp3, tmp4;
+ Packet4i sum;
+
+ tmp1 = __builtin_msa_ilvr_w((v4i32)vecs[1], (v4i32)vecs[0]);
+ tmp2 = __builtin_msa_ilvr_w((v4i32)vecs[3], (v4i32)vecs[2]);
+ tmp3 = __builtin_msa_ilvl_w((v4i32)vecs[1], (v4i32)vecs[0]);
+ tmp4 = __builtin_msa_ilvl_w((v4i32)vecs[3], (v4i32)vecs[2]);
+
+ sum = (Packet4i)__builtin_msa_ilvr_d((v2i64)tmp2, (v2i64)tmp1);
+ sum = padd(sum, (Packet4i)__builtin_msa_ilvod_d((v2i64)tmp2, (v2i64)tmp1));
+ sum = padd(sum, (Packet4i)__builtin_msa_ilvr_d((v2i64)tmp4, (v2i64)tmp3));
+ sum = padd(sum, (Packet4i)__builtin_msa_ilvod_d((v2i64)tmp4, (v2i64)tmp3));
+
+ return sum;
+}
+
+template <>
+EIGEN_STRONG_INLINE int32_t predux<Packet4i>(const Packet4i& a) {
+ EIGEN_MSA_DEBUG;
+
+ Packet4i s = padd(a, __builtin_msa_shf_w(a, EIGEN_MSA_SHF_I8(2, 3, 0, 1)));
+ s = padd(s, __builtin_msa_shf_w(s, EIGEN_MSA_SHF_I8(1, 0, 3, 2)));
+ return s[0];
+}
+
+// Other reduction functions:
+// mul
+template <>
+EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a) {
+ EIGEN_MSA_DEBUG;
+
+ Packet4f p = pmul(a, (Packet4f)__builtin_msa_shf_w((v4i32)a, EIGEN_MSA_SHF_I8(2, 3, 0, 1)));
+ p = pmul(p, (Packet4f)__builtin_msa_shf_w((v4i32)p, EIGEN_MSA_SHF_I8(1, 0, 3, 2)));
+ return p[0];
+}
+
+template <>
+EIGEN_STRONG_INLINE int32_t predux_mul<Packet4i>(const Packet4i& a) {
+ EIGEN_MSA_DEBUG;
+
+ Packet4i p = pmul(a, __builtin_msa_shf_w(a, EIGEN_MSA_SHF_I8(2, 3, 0, 1)));
+ p = pmul(p, __builtin_msa_shf_w(p, EIGEN_MSA_SHF_I8(1, 0, 3, 2)));
+ return p[0];
+}
+
+// min
+template <>
+EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a) {
+ EIGEN_MSA_DEBUG;
+
+ // Swap 64-bit halves of a.
+ Packet4f swapped = (Packet4f)__builtin_msa_shf_w((Packet4i)a, EIGEN_MSA_SHF_I8(2, 3, 0, 1));
+#if !EIGEN_FAST_MATH
+ // Detect presence of NaNs from pairs a[0]-a[2] and a[1]-a[3] as two 32-bit
+ // masks of all zeroes/ones in low 64 bits.
+ v16u8 unord = (v16u8)__builtin_msa_fcun_w(a, swapped);
+ // Combine the two masks into one: 64 ones if no NaNs, otherwise 64 zeroes.
+ unord = (v16u8)__builtin_msa_ceqi_d((v2i64)unord, 0);
+#endif
+ // Continue with min computation.
+ Packet4f v = __builtin_msa_fmin_w(a, swapped);
+ v = __builtin_msa_fmin_w(
+ v, (Packet4f)__builtin_msa_shf_w((Packet4i)v, EIGEN_MSA_SHF_I8(1, 0, 3, 2)));
+#if !EIGEN_FAST_MATH
+ // Based on the mask select between v and 4 qNaNs.
+ v16u8 qnans = (v16u8)__builtin_msa_fill_w(0x7FC00000);
+ v = (Packet4f)__builtin_msa_bsel_v(unord, qnans, (v16u8)v);
+#endif
+ return v[0];
+}
+
+template <>
+EIGEN_STRONG_INLINE int32_t predux_min<Packet4i>(const Packet4i& a) {
+ EIGEN_MSA_DEBUG;
+
+ Packet4i m = pmin(a, __builtin_msa_shf_w(a, EIGEN_MSA_SHF_I8(2, 3, 0, 1)));
+ m = pmin(m, __builtin_msa_shf_w(m, EIGEN_MSA_SHF_I8(1, 0, 3, 2)));
+ return m[0];
+}
+
+// max
+template <>
+EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a) {
+ EIGEN_MSA_DEBUG;
+
+ // Swap 64-bit halves of a.
+ Packet4f swapped = (Packet4f)__builtin_msa_shf_w((Packet4i)a, EIGEN_MSA_SHF_I8(2, 3, 0, 1));
+#if !EIGEN_FAST_MATH
+ // Detect presence of NaNs from pairs a[0]-a[2] and a[1]-a[3] as two 32-bit
+ // masks of all zeroes/ones in low 64 bits.
+ v16u8 unord = (v16u8)__builtin_msa_fcun_w(a, swapped);
+ // Combine the two masks into one: 64 ones if no NaNs, otherwise 64 zeroes.
+ unord = (v16u8)__builtin_msa_ceqi_d((v2i64)unord, 0);
+#endif
+ // Continue with max computation.
+ Packet4f v = __builtin_msa_fmax_w(a, swapped);
+ v = __builtin_msa_fmax_w(
+ v, (Packet4f)__builtin_msa_shf_w((Packet4i)v, EIGEN_MSA_SHF_I8(1, 0, 3, 2)));
+#if !EIGEN_FAST_MATH
+ // Based on the mask select between v and 4 qNaNs.
+ v16u8 qnans = (v16u8)__builtin_msa_fill_w(0x7FC00000);
+ v = (Packet4f)__builtin_msa_bsel_v(unord, qnans, (v16u8)v);
+#endif
+ return v[0];
+}
+
+template <>
+EIGEN_STRONG_INLINE int32_t predux_max<Packet4i>(const Packet4i& a) {
+ EIGEN_MSA_DEBUG;
+
+ Packet4i m = pmax(a, __builtin_msa_shf_w(a, EIGEN_MSA_SHF_I8(2, 3, 0, 1)));
+ m = pmax(m, __builtin_msa_shf_w(m, EIGEN_MSA_SHF_I8(1, 0, 3, 2)));
+ return m[0];
+}
+
+#define PALIGN_MSA(Offset, Type, Command) \
+ template <> \
+ struct palign_impl<Offset, Type> { \
+ EIGEN_STRONG_INLINE static void run(Type& first, const Type& second) { \
+ if (Offset != 0) first = (Type)(Command((v16i8)second, (v16i8)first, Offset * 4)); \
+ } \
+ };
+
+PALIGN_MSA(0, Packet4f, __builtin_msa_sldi_b)
+PALIGN_MSA(1, Packet4f, __builtin_msa_sldi_b)
+PALIGN_MSA(2, Packet4f, __builtin_msa_sldi_b)
+PALIGN_MSA(3, Packet4f, __builtin_msa_sldi_b)
+PALIGN_MSA(0, Packet4i, __builtin_msa_sldi_b)
+PALIGN_MSA(1, Packet4i, __builtin_msa_sldi_b)
+PALIGN_MSA(2, Packet4i, __builtin_msa_sldi_b)
+PALIGN_MSA(3, Packet4i, __builtin_msa_sldi_b)
+
+#undef PALIGN_MSA
+
+inline std::ostream& operator<<(std::ostream& os, const PacketBlock<Packet4f, 4>& value) {
+ os << "[ " << value.packet[0] << "," << std::endl
+ << " " << value.packet[1] << "," << std::endl
+ << " " << value.packet[2] << "," << std::endl
+ << " " << value.packet[3] << " ]";
+ return os;
+}
+
+EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet4f, 4>& kernel) {
+ EIGEN_MSA_DEBUG;
+
+ v4i32 tmp1, tmp2, tmp3, tmp4;
+
+ tmp1 = __builtin_msa_ilvr_w((v4i32)kernel.packet[1], (v4i32)kernel.packet[0]);
+ tmp2 = __builtin_msa_ilvr_w((v4i32)kernel.packet[3], (v4i32)kernel.packet[2]);
+ tmp3 = __builtin_msa_ilvl_w((v4i32)kernel.packet[1], (v4i32)kernel.packet[0]);
+ tmp4 = __builtin_msa_ilvl_w((v4i32)kernel.packet[3], (v4i32)kernel.packet[2]);
+
+ kernel.packet[0] = (Packet4f)__builtin_msa_ilvr_d((v2i64)tmp2, (v2i64)tmp1);
+ kernel.packet[1] = (Packet4f)__builtin_msa_ilvod_d((v2i64)tmp2, (v2i64)tmp1);
+ kernel.packet[2] = (Packet4f)__builtin_msa_ilvr_d((v2i64)tmp4, (v2i64)tmp3);
+ kernel.packet[3] = (Packet4f)__builtin_msa_ilvod_d((v2i64)tmp4, (v2i64)tmp3);
+}
+
+inline std::ostream& operator<<(std::ostream& os, const PacketBlock<Packet4i, 4>& value) {
+ os << "[ " << value.packet[0] << "," << std::endl
+ << " " << value.packet[1] << "," << std::endl
+ << " " << value.packet[2] << "," << std::endl
+ << " " << value.packet[3] << " ]";
+ return os;
+}
+
+EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet4i, 4>& kernel) {
+ EIGEN_MSA_DEBUG;
+
+ v4i32 tmp1, tmp2, tmp3, tmp4;
+
+ tmp1 = __builtin_msa_ilvr_w(kernel.packet[1], kernel.packet[0]);
+ tmp2 = __builtin_msa_ilvr_w(kernel.packet[3], kernel.packet[2]);
+ tmp3 = __builtin_msa_ilvl_w(kernel.packet[1], kernel.packet[0]);
+ tmp4 = __builtin_msa_ilvl_w(kernel.packet[3], kernel.packet[2]);
+
+ kernel.packet[0] = (Packet4i)__builtin_msa_ilvr_d((v2i64)tmp2, (v2i64)tmp1);
+ kernel.packet[1] = (Packet4i)__builtin_msa_ilvod_d((v2i64)tmp2, (v2i64)tmp1);
+ kernel.packet[2] = (Packet4i)__builtin_msa_ilvr_d((v2i64)tmp4, (v2i64)tmp3);
+ kernel.packet[3] = (Packet4i)__builtin_msa_ilvod_d((v2i64)tmp4, (v2i64)tmp3);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f psqrt(const Packet4f& a) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_fsqrt_w(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f prsqrt(const Packet4f& a) {
+ EIGEN_MSA_DEBUG;
+
+#if EIGEN_FAST_MATH
+ return __builtin_msa_frsqrt_w(a);
+#else
+ Packet4f ones = __builtin_msa_ffint_s_w(__builtin_msa_ldi_w(1));
+ return pdiv(ones, psqrt(a));
+#endif
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pfloor<Packet4f>(const Packet4f& a) {
+ Packet4f v = a;
+ int32_t old_mode, new_mode;
+ asm volatile(
+ "cfcmsa %[old_mode], $1\n"
+ "ori %[new_mode], %[old_mode], 3\n" // 3 = round towards -INFINITY.
+ "ctcmsa $1, %[new_mode]\n"
+ "frint.w %w[v], %w[v]\n"
+ "ctcmsa $1, %[old_mode]\n"
+ : // outputs
+ [old_mode] "=r"(old_mode), [new_mode] "=r"(new_mode),
+ [v] "+f"(v)
+ : // inputs
+ : // clobbers
+ );
+ return v;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pceil<Packet4f>(const Packet4f& a) {
+ Packet4f v = a;
+ int32_t old_mode, new_mode;
+ asm volatile(
+ "cfcmsa %[old_mode], $1\n"
+ "ori %[new_mode], %[old_mode], 3\n"
+ "xori %[new_mode], %[new_mode], 1\n" // 2 = round towards +INFINITY.
+ "ctcmsa $1, %[new_mode]\n"
+ "frint.w %w[v], %w[v]\n"
+ "ctcmsa $1, %[old_mode]\n"
+ : // outputs
+ [old_mode] "=r"(old_mode), [new_mode] "=r"(new_mode),
+ [v] "+f"(v)
+ : // inputs
+ : // clobbers
+ );
+ return v;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pround<Packet4f>(const Packet4f& a) {
+ Packet4f v = a;
+ int32_t old_mode, new_mode;
+ asm volatile(
+ "cfcmsa %[old_mode], $1\n"
+ "ori %[new_mode], %[old_mode], 3\n"
+ "xori %[new_mode], %[new_mode], 3\n" // 0 = round to nearest, ties to even.
+ "ctcmsa $1, %[new_mode]\n"
+ "frint.w %w[v], %w[v]\n"
+ "ctcmsa $1, %[old_mode]\n"
+ : // outputs
+ [old_mode] "=r"(old_mode), [new_mode] "=r"(new_mode),
+ [v] "+f"(v)
+ : // inputs
+ : // clobbers
+ );
+ return v;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pblend(const Selector<4>& ifPacket, const Packet4f& thenPacket,
+ const Packet4f& elsePacket) {
+ Packet4ui select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2],
+ ifPacket.select[3] };
+ Packet4i mask = __builtin_msa_ceqi_w((Packet4i)select, 0);
+ return (Packet4f)__builtin_msa_bsel_v((v16u8)mask, (v16u8)thenPacket, (v16u8)elsePacket);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i pblend(const Selector<4>& ifPacket, const Packet4i& thenPacket,
+ const Packet4i& elsePacket) {
+ Packet4ui select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2],
+ ifPacket.select[3] };
+ Packet4i mask = __builtin_msa_ceqi_w((Packet4i)select, 0);
+ return (Packet4i)__builtin_msa_bsel_v((v16u8)mask, (v16u8)thenPacket, (v16u8)elsePacket);
+}
+
+//---------- double ----------
+
+typedef v2f64 Packet2d;
+typedef v2i64 Packet2l;
+typedef v2u64 Packet2ul;
+
+#define _EIGEN_DECLARE_CONST_Packet2d(NAME, X) const Packet2d p2d_##NAME = { X, X }
+#define _EIGEN_DECLARE_CONST_Packet2l(NAME, X) const Packet2l p2l_##NAME = { X, X }
+#define _EIGEN_DECLARE_CONST_Packet2ul(NAME, X) const Packet2ul p2ul_##NAME = { X, X }
+
+inline std::ostream& operator<<(std::ostream& os, const Packet2d& value) {
+ os << "[ " << value[0] << ", " << value[1] << " ]";
+ return os;
+}
+
+inline std::ostream& operator<<(std::ostream& os, const Packet2l& value) {
+ os << "[ " << value[0] << ", " << value[1] << " ]";
+ return os;
+}
+
+inline std::ostream& operator<<(std::ostream& os, const Packet2ul& value) {
+ os << "[ " << value[0] << ", " << value[1] << " ]";
+ return os;
+}
+
+template <>
+struct packet_traits<double> : default_packet_traits {
+ typedef Packet2d type;
+ typedef Packet2d half;
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = 2,
+ HasHalfPacket = 0,
+ // FIXME check the Has*
+ HasDiv = 1,
+ HasExp = 1,
+ HasSqrt = 1,
+ HasRsqrt = 1,
+ HasRound = 1,
+ HasFloor = 1,
+ HasCeil = 1,
+ HasBlend = 1
+ };
+};
+
+template <>
+struct unpacket_traits<Packet2d> {
+ typedef double type;
+ enum { size = 2, alignment = Aligned16 };
+ typedef Packet2d half;
+};
+
+template <>
+EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) {
+ EIGEN_MSA_DEBUG;
+
+ Packet2d value = { from, from };
+ return value;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(const Packet2d& a, const Packet2d& b) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_fadd_d(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d plset<Packet2d>(const double& a) {
+ EIGEN_MSA_DEBUG;
+
+ static const Packet2d countdown = { 0.0, 1.0 };
+ return padd(pset1<Packet2d>(a), countdown);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d psub<Packet2d>(const Packet2d& a, const Packet2d& b) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_fsub_d(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a) {
+ EIGEN_MSA_DEBUG;
+
+ return (Packet2d)__builtin_msa_bnegi_d((v2u64)a, 63);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) {
+ EIGEN_MSA_DEBUG;
+
+ return a;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d pmul<Packet2d>(const Packet2d& a, const Packet2d& b) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_fmul_d(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const Packet2d& b) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_fdiv_d(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_fmadd_d(c, a, b);
+}
+
+// Logical Operations are not supported for float, so we have to reinterpret casts using MSA
+// intrinsics
+template <>
+EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b) {
+ EIGEN_MSA_DEBUG;
+
+ return (Packet2d)__builtin_msa_and_v((v16u8)a, (v16u8)b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d por<Packet2d>(const Packet2d& a, const Packet2d& b) {
+ EIGEN_MSA_DEBUG;
+
+ return (Packet2d)__builtin_msa_or_v((v16u8)a, (v16u8)b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const Packet2d& b) {
+ EIGEN_MSA_DEBUG;
+
+ return (Packet2d)__builtin_msa_xor_v((v16u8)a, (v16u8)b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b) {
+ EIGEN_MSA_DEBUG;
+
+ return pand(a, (Packet2d)__builtin_msa_xori_b((v16u8)b, 255));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double* from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_UNALIGNED_LOAD return (Packet2d)__builtin_msa_ld_d(const_cast<double*>(from), 0);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) {
+ EIGEN_MSA_DEBUG;
+
+#if EIGEN_FAST_MATH
+ // This prefers numbers to NaNs.
+ return __builtin_msa_fmin_d(a, b);
+#else
+ // This prefers NaNs to numbers.
+ v2i64 aNaN = __builtin_msa_fcun_d(a, a);
+ v2i64 aMinOrNaN = por(__builtin_msa_fclt_d(a, b), aNaN);
+ return (Packet2d)__builtin_msa_bsel_v((v16u8)aMinOrNaN, (v16u8)b, (v16u8)a);
+#endif
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) {
+ EIGEN_MSA_DEBUG;
+
+#if EIGEN_FAST_MATH
+ // This prefers numbers to NaNs.
+ return __builtin_msa_fmax_d(a, b);
+#else
+ // This prefers NaNs to numbers.
+ v2i64 aNaN = __builtin_msa_fcun_d(a, a);
+ v2i64 aMaxOrNaN = por(__builtin_msa_fclt_d(b, a), aNaN);
+ return (Packet2d)__builtin_msa_bsel_v((v16u8)aMaxOrNaN, (v16u8)b, (v16u8)a);
+#endif
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_UNALIGNED_LOAD return (Packet2d)__builtin_msa_ld_d(const_cast<double*>(from), 0);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double* from) {
+ EIGEN_MSA_DEBUG;
+
+ Packet2d value = { *from, *from };
+ return value;
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet2d& from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_ALIGNED_STORE __builtin_msa_st_d((v2i64)from, to, 0);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_UNALIGNED_STORE __builtin_msa_st_d((v2i64)from, to, 0);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, Index stride) {
+ EIGEN_MSA_DEBUG;
+
+ Packet2d value;
+ value[0] = *from;
+ from += stride;
+ value[1] = *from;
+ return value;
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from,
+ Index stride) {
+ EIGEN_MSA_DEBUG;
+
+ *to = from[0];
+ to += stride;
+ *to = from[1];
+}
+
+template <>
+EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) {
+ EIGEN_MSA_DEBUG;
+
+ __builtin_prefetch(addr);
+}
+
+template <>
+EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) {
+ EIGEN_MSA_DEBUG;
+
+ return a[0];
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a) {
+ EIGEN_MSA_DEBUG;
+
+ return (Packet2d)__builtin_msa_shf_w((v4i32)a, EIGEN_MSA_SHF_I8(2, 3, 0, 1));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a) {
+ EIGEN_MSA_DEBUG;
+
+ return (Packet2d)__builtin_msa_bclri_d((v2u64)a, 63);
+}
+
+template <>
+EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a) {
+ EIGEN_MSA_DEBUG;
+
+ Packet2d s = padd(a, preverse(a));
+ return s[0];
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs) {
+ EIGEN_MSA_DEBUG;
+
+ Packet2d v0 = (Packet2d)__builtin_msa_ilvev_d((v2i64)vecs[1], (v2i64)vecs[0]);
+ Packet2d v1 = (Packet2d)__builtin_msa_ilvod_d((v2i64)vecs[1], (v2i64)vecs[0]);
+
+ return padd(v0, v1);
+}
+
+// Other reduction functions:
+// mul
+template <>
+EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a) {
+ EIGEN_MSA_DEBUG;
+
+ Packet2d p = pmul(a, preverse(a));
+ return p[0];
+}
+
+// min
+template <>
+EIGEN_STRONG_INLINE double predux_min<Packet2d>(const Packet2d& a) {
+ EIGEN_MSA_DEBUG;
+
+#if EIGEN_FAST_MATH
+ Packet2d swapped = (Packet2d)__builtin_msa_shf_w((Packet4i)a, EIGEN_MSA_SHF_I8(2, 3, 0, 1));
+ Packet2d v = __builtin_msa_fmin_d(a, swapped);
+ return v[0];
+#else
+ double a0 = a[0], a1 = a[1];
+ return ((numext::isnan)(a0) || a0 < a1) ? a0 : a1;
+#endif
+}
+
+// max
+template <>
+EIGEN_STRONG_INLINE double predux_max<Packet2d>(const Packet2d& a) {
+ EIGEN_MSA_DEBUG;
+
+#if EIGEN_FAST_MATH
+ Packet2d swapped = (Packet2d)__builtin_msa_shf_w((Packet4i)a, EIGEN_MSA_SHF_I8(2, 3, 0, 1));
+ Packet2d v = __builtin_msa_fmax_d(a, swapped);
+ return v[0];
+#else
+ double a0 = a[0], a1 = a[1];
+ return ((numext::isnan)(a0) || a0 > a1) ? a0 : a1;
+#endif
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d psqrt(const Packet2d& a) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_fsqrt_d(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d prsqrt(const Packet2d& a) {
+ EIGEN_MSA_DEBUG;
+
+#if EIGEN_FAST_MATH
+ return __builtin_msa_frsqrt_d(a);
+#else
+ Packet2d ones = __builtin_msa_ffint_s_d(__builtin_msa_ldi_d(1));
+ return pdiv(ones, psqrt(a));
+#endif
+}
+
+#define PALIGN_MSA(Offset, Type, Command) \
+ template <> \
+ struct palign_impl<Offset, Type> { \
+ EIGEN_STRONG_INLINE static void run(Type& first, const Type& second) { \
+ if (Offset != 0) first = (Type)(Command((v16i8)second, (v16i8)first, Offset * 8)); \
+ } \
+ };
+
+PALIGN_MSA(0, Packet2d, __builtin_msa_sldi_b)
+PALIGN_MSA(1, Packet2d, __builtin_msa_sldi_b)
+
+#undef PALIGN_MSA
+
+inline std::ostream& operator<<(std::ostream& os, const PacketBlock<Packet2d, 2>& value) {
+ os << "[ " << value.packet[0] << "," << std::endl << " " << value.packet[1] << " ]";
+ return os;
+}
+
+EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet2d, 2>& kernel) {
+ EIGEN_MSA_DEBUG;
+
+ Packet2d trn1 = (Packet2d)__builtin_msa_ilvev_d((v2i64)kernel.packet[1], (v2i64)kernel.packet[0]);
+ Packet2d trn2 = (Packet2d)__builtin_msa_ilvod_d((v2i64)kernel.packet[1], (v2i64)kernel.packet[0]);
+ kernel.packet[0] = trn1;
+ kernel.packet[1] = trn2;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d pfloor<Packet2d>(const Packet2d& a) {
+ Packet2d v = a;
+ int32_t old_mode, new_mode;
+ asm volatile(
+ "cfcmsa %[old_mode], $1\n"
+ "ori %[new_mode], %[old_mode], 3\n" // 3 = round towards -INFINITY.
+ "ctcmsa $1, %[new_mode]\n"
+ "frint.d %w[v], %w[v]\n"
+ "ctcmsa $1, %[old_mode]\n"
+ : // outputs
+ [old_mode] "=r"(old_mode), [new_mode] "=r"(new_mode),
+ [v] "+f"(v)
+ : // inputs
+ : // clobbers
+ );
+ return v;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d pceil<Packet2d>(const Packet2d& a) {
+ Packet2d v = a;
+ int32_t old_mode, new_mode;
+ asm volatile(
+ "cfcmsa %[old_mode], $1\n"
+ "ori %[new_mode], %[old_mode], 3\n"
+ "xori %[new_mode], %[new_mode], 1\n" // 2 = round towards +INFINITY.
+ "ctcmsa $1, %[new_mode]\n"
+ "frint.d %w[v], %w[v]\n"
+ "ctcmsa $1, %[old_mode]\n"
+ : // outputs
+ [old_mode] "=r"(old_mode), [new_mode] "=r"(new_mode),
+ [v] "+f"(v)
+ : // inputs
+ : // clobbers
+ );
+ return v;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d pround<Packet2d>(const Packet2d& a) {
+ Packet2d v = a;
+ int32_t old_mode, new_mode;
+ asm volatile(
+ "cfcmsa %[old_mode], $1\n"
+ "ori %[new_mode], %[old_mode], 3\n"
+ "xori %[new_mode], %[new_mode], 3\n" // 0 = round to nearest, ties to even.
+ "ctcmsa $1, %[new_mode]\n"
+ "frint.d %w[v], %w[v]\n"
+ "ctcmsa $1, %[old_mode]\n"
+ : // outputs
+ [old_mode] "=r"(old_mode), [new_mode] "=r"(new_mode),
+ [v] "+f"(v)
+ : // inputs
+ : // clobbers
+ );
+ return v;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d pblend(const Selector<2>& ifPacket, const Packet2d& thenPacket,
+ const Packet2d& elsePacket) {
+ Packet2ul select = { ifPacket.select[0], ifPacket.select[1] };
+ Packet2l mask = __builtin_msa_ceqi_d((Packet2l)select, 0);
+ return (Packet2d)__builtin_msa_bsel_v((v16u8)mask, (v16u8)thenPacket, (v16u8)elsePacket);
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_PACKET_MATH_MSA_H
diff --git a/Eigen/src/Core/arch/NEON/Complex.h b/Eigen/src/Core/arch/NEON/Complex.h
index 57e9b431f..306a309be 100644
--- a/Eigen/src/Core/arch/NEON/Complex.h
+++ b/Eigen/src/Core/arch/NEON/Complex.h
@@ -67,7 +67,7 @@ template<> struct unpacket_traits<Packet2cf> { typedef std::complex<float> type;
template<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>& from)
{
float32x2_t r64;
- r64 = vld1_f32((float *)&from);
+ r64 = vld1_f32((const float *)&from);
return Packet2cf(vcombine_f32(r64, r64));
}
@@ -142,7 +142,7 @@ template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf
to[stride*1] = std::complex<float>(vgetq_lane_f32(from.v, 2), vgetq_lane_f32(from.v, 3));
}
-template<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float> * addr) { EIGEN_ARM_PREFETCH((float *)addr); }
+template<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float> * addr) { EIGEN_ARM_PREFETCH((const float *)addr); }
template<> EIGEN_STRONG_INLINE std::complex<float> pfirst<Packet2cf>(const Packet2cf& a)
{
@@ -265,6 +265,8 @@ template<> struct conj_helper<Packet2cf, Packet2cf, true,true>
}
};
+EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cf,Packet4f)
+
template<> EIGEN_STRONG_INLINE Packet2cf pdiv<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
{
// TODO optimize it for NEON
@@ -275,7 +277,7 @@ template<> EIGEN_STRONG_INLINE Packet2cf pdiv<Packet2cf>(const Packet2cf& a, con
s = vmulq_f32(b.v, b.v);
rev_s = vrev64q_f32(s);
- return Packet2cf(pdiv(res.v, vaddq_f32(s,rev_s)));
+ return Packet2cf(pdiv<Packet4f>(res.v, vaddq_f32(s,rev_s)));
}
EIGEN_DEVICE_FUNC inline void
@@ -381,7 +383,7 @@ template<> EIGEN_STRONG_INLINE Packet1cd ploaddup<Packet1cd>(const std::complex<
template<> EIGEN_STRONG_INLINE void pstore <std::complex<double> >(std::complex<double> * to, const Packet1cd& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, from.v); }
template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<double> >(std::complex<double> * to, const Packet1cd& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, from.v); }
-template<> EIGEN_STRONG_INLINE void prefetch<std::complex<double> >(const std::complex<double> * addr) { EIGEN_ARM_PREFETCH((double *)addr); }
+template<> EIGEN_STRONG_INLINE void prefetch<std::complex<double> >(const std::complex<double> * addr) { EIGEN_ARM_PREFETCH((const double *)addr); }
template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather<std::complex<double>, Packet1cd>(const std::complex<double>* from, Index stride)
{
@@ -456,6 +458,8 @@ template<> struct conj_helper<Packet1cd, Packet1cd, true,true>
}
};
+EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet1cd,Packet2d)
+
template<> EIGEN_STRONG_INLINE Packet1cd pdiv<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
{
// TODO optimize it for NEON
diff --git a/Eigen/src/Core/arch/NEON/MathFunctions.h b/Eigen/src/Core/arch/NEON/MathFunctions.h
index 6bb05bb92..c48c61023 100644
--- a/Eigen/src/Core/arch/NEON/MathFunctions.h
+++ b/Eigen/src/Core/arch/NEON/MathFunctions.h
@@ -84,6 +84,98 @@ Packet4f pexp<Packet4f>(const Packet4f& _x)
return y;
}
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet4f plog<Packet4f>(const Packet4f& _x)
+{
+ Packet4f x = _x;
+ _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
+ _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
+ _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f);
+
+ _EIGEN_DECLARE_CONST_Packet4i(inv_mant_mask, ~0x7f800000);
+
+ /* natural logarithm computed for 4 simultaneous float
+ return NaN for x <= 0
+ */
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_SQRTHF, 0.707106781186547524f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p0, 7.0376836292E-2f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p1, - 1.1514610310E-1f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p2, 1.1676998740E-1f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p3, - 1.2420140846E-1f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p4, + 1.4249322787E-1f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p5, - 1.6668057665E-1f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p6, + 2.0000714765E-1f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p7, - 2.4999993993E-1f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p8, + 3.3333331174E-1f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q1, -2.12194440e-4f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q2, 0.693359375f);
+
+ x = vmaxq_f32(x, vdupq_n_f32(0)); /* force flush to zero on denormal values */
+ Packet4ui invalid_mask = vcleq_f32(x, vdupq_n_f32(0));
+
+ Packet4i ux = vreinterpretq_s32_f32(x);
+
+ Packet4i emm0 = vshrq_n_s32(ux, 23);
+
+ /* keep only the fractional part */
+ ux = vandq_s32(ux, p4i_inv_mant_mask);
+ ux = vorrq_s32(ux, vreinterpretq_s32_f32(p4f_half));
+ x = vreinterpretq_f32_s32(ux);
+
+ emm0 = vsubq_s32(emm0, p4i_0x7f);
+ Packet4f e = vcvtq_f32_s32(emm0);
+
+ e = vaddq_f32(e, p4f_1);
+
+ /* part2:
+ if( x < SQRTHF ) {
+ e -= 1;
+ x = x + x - 1.0;
+ } else { x = x - 1.0; }
+ */
+ Packet4ui mask = vcltq_f32(x, p4f_cephes_SQRTHF);
+ Packet4f tmp = vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(x), mask));
+ x = vsubq_f32(x, p4f_1);
+ e = vsubq_f32(e, vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(p4f_1), mask)));
+ x = vaddq_f32(x, tmp);
+
+ Packet4f z = vmulq_f32(x,x);
+
+ Packet4f y = p4f_cephes_log_p0;
+ y = vmulq_f32(y, x);
+ y = vaddq_f32(y, p4f_cephes_log_p1);
+ y = vmulq_f32(y, x);
+ y = vaddq_f32(y, p4f_cephes_log_p2);
+ y = vmulq_f32(y, x);
+ y = vaddq_f32(y, p4f_cephes_log_p3);
+ y = vmulq_f32(y, x);
+ y = vaddq_f32(y, p4f_cephes_log_p4);
+ y = vmulq_f32(y, x);
+ y = vaddq_f32(y, p4f_cephes_log_p5);
+ y = vmulq_f32(y, x);
+ y = vaddq_f32(y, p4f_cephes_log_p6);
+ y = vmulq_f32(y, x);
+ y = vaddq_f32(y, p4f_cephes_log_p7);
+ y = vmulq_f32(y, x);
+ y = vaddq_f32(y, p4f_cephes_log_p8);
+ y = vmulq_f32(y, x);
+
+ y = vmulq_f32(y, z);
+
+ tmp = vmulq_f32(e, p4f_cephes_log_q1);
+ y = vaddq_f32(y, tmp);
+
+
+ tmp = vmulq_f32(z, p4f_half);
+ y = vsubq_f32(y, tmp);
+
+ tmp = vmulq_f32(e, p4f_cephes_log_q2);
+ x = vaddq_f32(x, y);
+ x = vaddq_f32(x, tmp);
+ x = vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(x), invalid_mask)); // negative arg will be NAN
+ return x;
+}
+
} // end namespace internal
} // end namespace Eigen
diff --git a/Eigen/src/Core/arch/NEON/PacketMath.h b/Eigen/src/Core/arch/NEON/PacketMath.h
index 84a56bdcc..010739380 100644
--- a/Eigen/src/Core/arch/NEON/PacketMath.h
+++ b/Eigen/src/Core/arch/NEON/PacketMath.h
@@ -36,12 +36,43 @@ namespace internal {
#endif
#endif
+#if EIGEN_COMP_MSVC
+
+// In MSVC's arm_neon.h header file, all NEON vector types
+// are aliases to the same underlying type __n128.
+// We thus have to wrap them to make them different C++ types.
+// (See also bug 1428)
+
+template<typename T,int unique_id>
+struct eigen_packet_wrapper
+{
+ operator T&() { return m_val; }
+ operator const T&() const { return m_val; }
+ eigen_packet_wrapper() {}
+ eigen_packet_wrapper(const T &v) : m_val(v) {}
+ eigen_packet_wrapper& operator=(const T &v) {
+ m_val = v;
+ return *this;
+ }
+
+ T m_val;
+};
+typedef eigen_packet_wrapper<float32x2_t,0> Packet2f;
+typedef eigen_packet_wrapper<float32x4_t,1> Packet4f;
+typedef eigen_packet_wrapper<int32x4_t ,2> Packet4i;
+typedef eigen_packet_wrapper<int32x2_t ,3> Packet2i;
+typedef eigen_packet_wrapper<uint32x4_t ,4> Packet4ui;
+
+#else
+
typedef float32x2_t Packet2f;
typedef float32x4_t Packet4f;
typedef int32x4_t Packet4i;
typedef int32x2_t Packet2i;
typedef uint32x4_t Packet4ui;
+#endif // EIGEN_COMP_MSVC
+
#define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \
const Packet4f p4f_##NAME = pset1<Packet4f>(X)
@@ -51,14 +82,17 @@ typedef uint32x4_t Packet4ui;
#define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \
const Packet4i p4i_##NAME = pset1<Packet4i>(X)
-// arm64 does have the pld instruction. If available, let's trust the __builtin_prefetch built-in function
-// which available on LLVM and GCC (at least)
-#if EIGEN_HAS_BUILTIN(__builtin_prefetch) || EIGEN_COMP_GNUC
+#if EIGEN_ARCH_ARM64
+ // __builtin_prefetch tends to do nothing on ARM64 compilers because the
+ // prefetch instructions there are too detailed for __builtin_prefetch to map
+ // meaningfully to them.
+ #define EIGEN_ARM_PREFETCH(ADDR) __asm__ __volatile__("prfm pldl1keep, [%[addr]]\n" ::[addr] "r"(ADDR) : );
+#elif EIGEN_HAS_BUILTIN(__builtin_prefetch) || EIGEN_COMP_GNUC
#define EIGEN_ARM_PREFETCH(ADDR) __builtin_prefetch(ADDR);
#elif defined __pld
#define EIGEN_ARM_PREFETCH(ADDR) __pld(ADDR)
-#elif !EIGEN_ARCH_ARM64
- #define EIGEN_ARM_PREFETCH(ADDR) __asm__ __volatile__ ( " pld [%[addr]]\n" :: [addr] "r" (ADDR) : "cc" );
+#elif EIGEN_ARCH_ARM32
+ #define EIGEN_ARM_PREFETCH(ADDR) __asm__ __volatile__ ("pld [%[addr]]\n" :: [addr] "r" (ADDR) : );
#else
// by default no explicit prefetching
#define EIGEN_ARM_PREFETCH(ADDR)
@@ -78,7 +112,7 @@ template<> struct packet_traits<float> : default_packet_traits
// FIXME check the Has*
HasSin = 0,
HasCos = 0,
- HasLog = 0,
+ HasLog = 1,
HasExp = 1,
HasSqrt = 0
};
@@ -113,7 +147,7 @@ template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int32_t& from)
template<> EIGEN_STRONG_INLINE Packet4f plset<Packet4f>(const float& a)
{
- const float32_t f[] = {0, 1, 2, 3};
+ const float f[] = {0, 1, 2, 3};
Packet4f countdown = vld1q_f32(f);
return vaddq_f32(pset1<Packet4f>(a), countdown);
}
diff --git a/Eigen/src/Core/arch/NEON/TypeCasting.h b/Eigen/src/Core/arch/NEON/TypeCasting.h
new file mode 100644
index 000000000..95d1fd0e4
--- /dev/null
+++ b/Eigen/src/Core/arch/NEON/TypeCasting.h
@@ -0,0 +1,48 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2018 Rasmus Munk Larsen <rmlarsen@google.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_TYPE_CASTING_NEON_H
+#define EIGEN_TYPE_CASTING_NEON_H
+
+namespace Eigen {
+
+namespace internal {
+
+template <>
+struct type_casting_traits<float, int> {
+ enum {
+ VectorizedCast = 1,
+ SrcCoeffRatio = 1,
+ TgtCoeffRatio = 1
+ };
+};
+
+template <>
+struct type_casting_traits<int, float> {
+ enum {
+ VectorizedCast = 1,
+ SrcCoeffRatio = 1,
+ TgtCoeffRatio = 1
+ };
+};
+
+
+template<> EIGEN_STRONG_INLINE Packet4i pcast<Packet4f, Packet4i>(const Packet4f& a) {
+ return vcvtq_s32_f32(a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pcast<Packet4i, Packet4f>(const Packet4i& a) {
+ return vcvtq_f32_s32(a);
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_TYPE_CASTING_NEON_H
diff --git a/Eigen/src/Core/arch/SSE/Complex.h b/Eigen/src/Core/arch/SSE/Complex.h
index 5607fe0ab..d075043ce 100644
--- a/Eigen/src/Core/arch/SSE/Complex.h
+++ b/Eigen/src/Core/arch/SSE/Complex.h
@@ -128,7 +128,7 @@ template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf
_mm_cvtss_f32(_mm_shuffle_ps(from.v, from.v, 3)));
}
-template<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float> * addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
+template<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float> * addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
template<> EIGEN_STRONG_INLINE std::complex<float> pfirst<Packet2cf>(const Packet2cf& a)
{
@@ -229,23 +229,7 @@ template<> struct conj_helper<Packet2cf, Packet2cf, true,true>
}
};
-template<> struct conj_helper<Packet4f, Packet2cf, false,false>
-{
- EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet4f& x, const Packet2cf& y, const Packet2cf& c) const
- { return padd(c, pmul(x,y)); }
-
- EIGEN_STRONG_INLINE Packet2cf pmul(const Packet4f& x, const Packet2cf& y) const
- { return Packet2cf(Eigen::internal::pmul<Packet4f>(x, y.v)); }
-};
-
-template<> struct conj_helper<Packet2cf, Packet4f, false,false>
-{
- EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet4f& y, const Packet2cf& c) const
- { return padd(c, pmul(x,y)); }
-
- EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& x, const Packet4f& y) const
- { return Packet2cf(Eigen::internal::pmul<Packet4f>(x.v, y)); }
-};
+EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cf,Packet4f)
template<> EIGEN_STRONG_INLINE Packet2cf pdiv<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
{
@@ -340,7 +324,7 @@ template<> EIGEN_STRONG_INLINE Packet1cd ploaddup<Packet1cd>(const std::complex<
template<> EIGEN_STRONG_INLINE void pstore <std::complex<double> >(std::complex<double> * to, const Packet1cd& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, Packet2d(from.v)); }
template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<double> >(std::complex<double> * to, const Packet1cd& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, Packet2d(from.v)); }
-template<> EIGEN_STRONG_INLINE void prefetch<std::complex<double> >(const std::complex<double> * addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
+template<> EIGEN_STRONG_INLINE void prefetch<std::complex<double> >(const std::complex<double> * addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
template<> EIGEN_STRONG_INLINE std::complex<double> pfirst<Packet1cd>(const Packet1cd& a)
{
@@ -430,23 +414,7 @@ template<> struct conj_helper<Packet1cd, Packet1cd, true,true>
}
};
-template<> struct conj_helper<Packet2d, Packet1cd, false,false>
-{
- EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet2d& x, const Packet1cd& y, const Packet1cd& c) const
- { return padd(c, pmul(x,y)); }
-
- EIGEN_STRONG_INLINE Packet1cd pmul(const Packet2d& x, const Packet1cd& y) const
- { return Packet1cd(Eigen::internal::pmul<Packet2d>(x, y.v)); }
-};
-
-template<> struct conj_helper<Packet1cd, Packet2d, false,false>
-{
- EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet2d& y, const Packet1cd& c) const
- { return padd(c, pmul(x,y)); }
-
- EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& x, const Packet2d& y) const
- { return Packet1cd(Eigen::internal::pmul<Packet2d>(x.v, y)); }
-};
+EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet1cd,Packet2d)
template<> EIGEN_STRONG_INLINE Packet1cd pdiv<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
{
diff --git a/Eigen/src/Core/arch/SSE/MathFunctions.h b/Eigen/src/Core/arch/SSE/MathFunctions.h
index 7b5f948e1..4af2c6cae 100644
--- a/Eigen/src/Core/arch/SSE/MathFunctions.h
+++ b/Eigen/src/Core/arch/SSE/MathFunctions.h
@@ -242,7 +242,7 @@ Packet2d pexp<Packet2d>(const Packet2d& _x)
return pmax(pmul(x, Packet2d(_mm_castsi128_pd(emm0))), _x);
}
-/* evaluation of 4 sines at onces, using SSE2 intrinsics.
+/* evaluation of 4 sines at once, using SSE2 intrinsics.
The code is the exact rewriting of the cephes sinf function.
Precision is excellent as long as x < 8192 (I did not bother to
diff --git a/Eigen/src/Core/arch/SSE/PacketMath.h b/Eigen/src/Core/arch/SSE/PacketMath.h
index 03c8a2c13..2e815c0c5 100755
--- a/Eigen/src/Core/arch/SSE/PacketMath.h
+++ b/Eigen/src/Core/arch/SSE/PacketMath.h
@@ -461,10 +461,16 @@ template<> EIGEN_STRONG_INLINE void pstore1<Packet2d>(double* to, const double&
pstore(to, Packet2d(vec2d_swizzle1(pa,0,0)));
}
+#if EIGEN_COMP_PGI
+typedef const void * SsePrefetchPtrType;
+#else
+typedef const char * SsePrefetchPtrType;
+#endif
+
#ifndef EIGEN_VECTORIZE_AVX
-template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
-template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
-template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
+template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
+template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
+template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
#endif
#if EIGEN_COMP_MSVC_STRICT && EIGEN_OS_WIN64
@@ -657,7 +663,7 @@ template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)
// TODO try to call _mm_mul_epu32 directly
EIGEN_ALIGN16 int aux[4];
pstore(aux, a);
- return (aux[0] * aux[1]) * (aux[2] * aux[3]);;
+ return (aux[0] * aux[1]) * (aux[2] * aux[3]);
}
// min
@@ -928,4 +934,14 @@ template<> EIGEN_STRONG_INLINE double pmadd(const double& a, const double& b, co
} // end namespace Eigen
+#if EIGEN_COMP_PGI
+// PGI++ does not define the following intrinsics in C++ mode.
+static inline __m128 _mm_castpd_ps (__m128d x) { return reinterpret_cast<__m128&>(x); }
+static inline __m128i _mm_castpd_si128(__m128d x) { return reinterpret_cast<__m128i&>(x); }
+static inline __m128d _mm_castps_pd (__m128 x) { return reinterpret_cast<__m128d&>(x); }
+static inline __m128i _mm_castps_si128(__m128 x) { return reinterpret_cast<__m128i&>(x); }
+static inline __m128 _mm_castsi128_ps(__m128i x) { return reinterpret_cast<__m128&>(x); }
+static inline __m128d _mm_castsi128_pd(__m128i x) { return reinterpret_cast<__m128d&>(x); }
+#endif
+
#endif // EIGEN_PACKET_MATH_SSE_H
diff --git a/Eigen/src/Core/arch/SSE/TypeCasting.h b/Eigen/src/Core/arch/SSE/TypeCasting.h
index c84893230..c6ca8c716 100644
--- a/Eigen/src/Core/arch/SSE/TypeCasting.h
+++ b/Eigen/src/Core/arch/SSE/TypeCasting.h
@@ -14,6 +14,7 @@ namespace Eigen {
namespace internal {
+#ifndef EIGEN_VECTORIZE_AVX
template <>
struct type_casting_traits<float, int> {
enum {
@@ -23,11 +24,6 @@ struct type_casting_traits<float, int> {
};
};
-template<> EIGEN_STRONG_INLINE Packet4i pcast<Packet4f, Packet4i>(const Packet4f& a) {
- return _mm_cvttps_epi32(a);
-}
-
-
template <>
struct type_casting_traits<int, float> {
enum {
@@ -37,11 +33,6 @@ struct type_casting_traits<int, float> {
};
};
-template<> EIGEN_STRONG_INLINE Packet4f pcast<Packet4i, Packet4f>(const Packet4i& a) {
- return _mm_cvtepi32_ps(a);
-}
-
-
template <>
struct type_casting_traits<double, float> {
enum {
@@ -51,10 +42,6 @@ struct type_casting_traits<double, float> {
};
};
-template<> EIGEN_STRONG_INLINE Packet4f pcast<Packet2d, Packet4f>(const Packet2d& a, const Packet2d& b) {
- return _mm_shuffle_ps(_mm_cvtpd_ps(a), _mm_cvtpd_ps(b), (1 << 2) | (1 << 6));
-}
-
template <>
struct type_casting_traits<float, double> {
enum {
@@ -63,6 +50,19 @@ struct type_casting_traits<float, double> {
TgtCoeffRatio = 2
};
};
+#endif
+
+template<> EIGEN_STRONG_INLINE Packet4i pcast<Packet4f, Packet4i>(const Packet4f& a) {
+ return _mm_cvttps_epi32(a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pcast<Packet4i, Packet4f>(const Packet4i& a) {
+ return _mm_cvtepi32_ps(a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pcast<Packet2d, Packet4f>(const Packet2d& a, const Packet2d& b) {
+ return _mm_shuffle_ps(_mm_cvtpd_ps(a), _mm_cvtpd_ps(b), (1 << 2) | (1 << 6));
+}
template<> EIGEN_STRONG_INLINE Packet2d pcast<Packet4f, Packet2d>(const Packet4f& a) {
// Simply discard the second half of the input
diff --git a/Eigen/src/Core/arch/SYCL/InteropHeaders.h b/Eigen/src/Core/arch/SYCL/InteropHeaders.h
new file mode 100644
index 000000000..c1da40d14
--- /dev/null
+++ b/Eigen/src/Core/arch/SYCL/InteropHeaders.h
@@ -0,0 +1,104 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/*****************************************************************
+ * InteropHeaders.h
+ *
+ * \brief:
+ * InteropHeaders
+ *
+*****************************************************************/
+
+#ifndef EIGEN_INTEROP_HEADERS_SYCL_H
+#define EIGEN_INTEROP_HEADERS_SYCL_H
+#if defined EIGEN_USE_SYCL
+namespace Eigen {
+
+namespace internal {
+#define SYCL_PACKET_TRAITS(packet_type, val, unpacket_type, lengths)\
+ template<> struct packet_traits<unpacket_type> : default_packet_traits\
+ {\
+ typedef packet_type type;\
+ typedef packet_type half;\
+ enum {\
+ Vectorizable = 1,\
+ AlignedOnScalar = 1,\
+ size=lengths,\
+ HasHalfPacket = 0,\
+ HasDiv = 1,\
+ HasLog = 1,\
+ HasExp = 1,\
+ HasSqrt = 1,\
+ HasRsqrt = 1,\
+ HasSin = 1,\
+ HasCos = 1,\
+ HasTan = 1,\
+ HasASin = 1,\
+ HasACos = 1,\
+ HasATan = 1,\
+ HasSinh = 1,\
+ HasCosh = 1,\
+ HasTanh = 1,\
+ HasLGamma = 0,\
+ HasDiGamma = 0,\
+ HasZeta = 0,\
+ HasPolygamma = 0,\
+ HasErf = 0,\
+ HasErfc = 0,\
+ HasIGamma = 0,\
+ HasIGammac = 0,\
+ HasBetaInc = 0,\
+ HasBlend = val,\
+ HasMax=1,\
+ HasMin=1,\
+ HasMul=1,\
+ HasAdd=1,\
+ HasFloor=1,\
+ HasRound=1,\
+ HasLog1p=1,\
+ HasExpm1=1,\
+ HasCeil=1,\
+ };\
+ };
+
+SYCL_PACKET_TRAITS(cl::sycl::cl_float4, 1, float, 4)
+SYCL_PACKET_TRAITS(cl::sycl::cl_float4, 1, const float, 4)
+SYCL_PACKET_TRAITS(cl::sycl::cl_double2, 0, double, 2)
+SYCL_PACKET_TRAITS(cl::sycl::cl_double2, 0, const double, 2)
+#undef SYCL_PACKET_TRAITS
+
+
+// Make sure this is only available when targeting a GPU: we don't want to
+// introduce conflicts between these packet_traits definitions and the ones
+// we'll use on the host side (SSE, AVX, ...)
+#define SYCL_ARITHMETIC(packet_type) template<> struct is_arithmetic<packet_type> { enum { value = true }; };
+SYCL_ARITHMETIC(cl::sycl::cl_float4)
+SYCL_ARITHMETIC(cl::sycl::cl_double2)
+#undef SYCL_ARITHMETIC
+
+#define SYCL_UNPACKET_TRAITS(packet_type, unpacket_type, lengths)\
+template<> struct unpacket_traits<packet_type> {\
+ typedef unpacket_type type;\
+ enum {size=lengths, alignment=Aligned16};\
+ typedef packet_type half;\
+};
+SYCL_UNPACKET_TRAITS(cl::sycl::cl_float4, float, 4)
+SYCL_UNPACKET_TRAITS(cl::sycl::cl_double2, double, 2)
+
+#undef SYCL_UNPACKET_TRAITS
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_USE_SYCL
+#endif // EIGEN_INTEROP_HEADERS_SYCL_H
diff --git a/Eigen/src/Core/arch/SYCL/MathFunctions.h b/Eigen/src/Core/arch/SYCL/MathFunctions.h
new file mode 100644
index 000000000..422839c6c
--- /dev/null
+++ b/Eigen/src/Core/arch/SYCL/MathFunctions.h
@@ -0,0 +1,221 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/*****************************************************************
+ * MathFunctions.h
+ *
+ * \brief:
+ * MathFunctions
+ *
+*****************************************************************/
+
+#ifndef EIGEN_MATH_FUNCTIONS_SYCL_H
+#define EIGEN_MATH_FUNCTIONS_SYCL_H
+
+namespace Eigen {
+
+namespace internal {
+
+// Make sure this is only available when targeting a GPU: we don't want to
+// introduce conflicts between these packet_traits definitions and the ones
+// we'll use on the host side (SSE, AVX, ...)
+//#if defined(__SYCL_DEVICE_ONLY__) && defined(EIGEN_USE_SYCL)
+#define SYCL_PLOG(packet_type) \
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+packet_type plog<packet_type>(const packet_type& a) { return cl::sycl::log(a); }
+
+SYCL_PLOG(cl::sycl::cl_float4)
+SYCL_PLOG(cl::sycl::cl_double2)
+#undef SYCL_PLOG
+
+#define SYCL_PLOG1P(packet_type) \
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+packet_type plog1p<packet_type>(const packet_type& a) { return cl::sycl::log1p(a); }
+
+SYCL_PLOG1P(cl::sycl::cl_float4)
+SYCL_PLOG1P(cl::sycl::cl_double2)
+#undef SYCL_PLOG1P
+
+#define SYCL_PLOG10(packet_type) \
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+packet_type plog10<packet_type>(const packet_type& a) { return cl::sycl::log10(a); }
+
+SYCL_PLOG10(cl::sycl::cl_float4)
+SYCL_PLOG10(cl::sycl::cl_double2)
+#undef SYCL_PLOG10
+
+#define SYCL_PEXP(packet_type) \
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+packet_type pexp<packet_type>(const packet_type& a) { return cl::sycl::exp(a); }
+
+SYCL_PEXP(cl::sycl::cl_float4)
+SYCL_PEXP(cl::sycl::cl_double2)
+#undef SYCL_PEXP
+
+#define SYCL_PEXPM1(packet_type) \
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+packet_type pexpm1<packet_type>(const packet_type& a) { return cl::sycl::expm1(a); }
+
+SYCL_PEXPM1(cl::sycl::cl_float4)
+SYCL_PEXPM1(cl::sycl::cl_double2)
+#undef SYCL_PEXPM1
+
+#define SYCL_PSQRT(packet_type) \
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+packet_type psqrt<packet_type>(const packet_type& a) { return cl::sycl::sqrt(a); }
+
+SYCL_PSQRT(cl::sycl::cl_float4)
+SYCL_PSQRT(cl::sycl::cl_double2)
+#undef SYCL_PSQRT
+
+
+#define SYCL_PRSQRT(packet_type) \
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+packet_type prsqrt<packet_type>(const packet_type& a) { return cl::sycl::rsqrt(a); }
+
+SYCL_PRSQRT(cl::sycl::cl_float4)
+SYCL_PRSQRT(cl::sycl::cl_double2)
+#undef SYCL_PRSQRT
+
+
+/** \internal \returns the hyperbolic sine of \a a (coeff-wise) */
+#define SYCL_PSIN(packet_type) \
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+packet_type psin<packet_type>(const packet_type& a) { return cl::sycl::sin(a); }
+
+SYCL_PSIN(cl::sycl::cl_float4)
+SYCL_PSIN(cl::sycl::cl_double2)
+#undef SYCL_PSIN
+
+
+/** \internal \returns the hyperbolic cosine of \a a (coeff-wise) */
+#define SYCL_PCOS(packet_type) \
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+packet_type pcos<packet_type>(const packet_type& a) { return cl::sycl::cos(a); }
+
+SYCL_PCOS(cl::sycl::cl_float4)
+SYCL_PCOS(cl::sycl::cl_double2)
+#undef SYCL_PCOS
+
+/** \internal \returns the hyperbolic tan of \a a (coeff-wise) */
+#define SYCL_PTAN(packet_type) \
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+packet_type ptan<packet_type>(const packet_type& a) { return cl::sycl::tan(a); }
+
+SYCL_PTAN(cl::sycl::cl_float4)
+SYCL_PTAN(cl::sycl::cl_double2)
+#undef SYCL_PTAN
+
+/** \internal \returns the hyperbolic sine of \a a (coeff-wise) */
+#define SYCL_PASIN(packet_type) \
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+packet_type pasin<packet_type>(const packet_type& a) { return cl::sycl::asin(a); }
+
+SYCL_PASIN(cl::sycl::cl_float4)
+SYCL_PASIN(cl::sycl::cl_double2)
+#undef SYCL_PASIN
+
+
+/** \internal \returns the hyperbolic cosine of \a a (coeff-wise) */
+#define SYCL_PACOS(packet_type) \
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+packet_type pacos<packet_type>(const packet_type& a) { return cl::sycl::acos(a); }
+
+SYCL_PACOS(cl::sycl::cl_float4)
+SYCL_PACOS(cl::sycl::cl_double2)
+#undef SYCL_PACOS
+
+/** \internal \returns the hyperbolic tan of \a a (coeff-wise) */
+#define SYCL_PATAN(packet_type) \
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+packet_type patan<packet_type>(const packet_type& a) { return cl::sycl::atan(a); }
+
+SYCL_PATAN(cl::sycl::cl_float4)
+SYCL_PATAN(cl::sycl::cl_double2)
+#undef SYCL_PATAN
+
+/** \internal \returns the hyperbolic sine of \a a (coeff-wise) */
+#define SYCL_PSINH(packet_type) \
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+packet_type psinh<packet_type>(const packet_type& a) { return cl::sycl::sinh(a); }
+
+SYCL_PSINH(cl::sycl::cl_float4)
+SYCL_PSINH(cl::sycl::cl_double2)
+#undef SYCL_PSINH
+
+/** \internal \returns the hyperbolic cosine of \a a (coeff-wise) */
+#define SYCL_PCOSH(packet_type) \
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+packet_type pcosh<packet_type>(const packet_type& a) { return cl::sycl::cosh(a); }
+
+SYCL_PCOSH(cl::sycl::cl_float4)
+SYCL_PCOSH(cl::sycl::cl_double2)
+#undef SYCL_PCOSH
+
+/** \internal \returns the hyperbolic tan of \a a (coeff-wise) */
+#define SYCL_PTANH(packet_type) \
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+packet_type ptanh<packet_type>(const packet_type& a) { return cl::sycl::tanh(a); }
+
+SYCL_PTANH(cl::sycl::cl_float4)
+SYCL_PTANH(cl::sycl::cl_double2)
+#undef SYCL_PTANH
+
+#define SYCL_PCEIL(packet_type) \
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+packet_type pceil<packet_type>(const packet_type& a) { return cl::sycl::ceil(a); }
+
+SYCL_PCEIL(cl::sycl::cl_float4)
+SYCL_PCEIL(cl::sycl::cl_double2)
+#undef SYCL_PCEIL
+
+
+#define SYCL_PROUND(packet_type) \
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+packet_type pround<packet_type>(const packet_type& a) { return cl::sycl::round(a); }
+
+SYCL_PROUND(cl::sycl::cl_float4)
+SYCL_PROUND(cl::sycl::cl_double2)
+#undef SYCL_PROUND
+
+#define SYCL_FLOOR(packet_type) \
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+packet_type pfloor<packet_type>(const packet_type& a) { return cl::sycl::floor(a); }
+
+SYCL_FLOOR(cl::sycl::cl_float4)
+SYCL_FLOOR(cl::sycl::cl_double2)
+#undef SYCL_FLOOR
+
+
+#define SYCL_PMIN(packet_type, expr) \
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+packet_type pmin<packet_type>(const packet_type& a, const packet_type& b) { return expr; }
+
+SYCL_PMIN(cl::sycl::cl_float4, cl::sycl::fmin(a, b))
+SYCL_PMIN(cl::sycl::cl_double2, cl::sycl::fmin(a, b))
+#undef SYCL_PMIN
+
+#define SYCL_PMAX(packet_type, expr) \
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+packet_type pmax<packet_type>(const packet_type& a, const packet_type& b) { return expr; }
+
+SYCL_PMAX(cl::sycl::cl_float4, cl::sycl::fmax(a, b))
+SYCL_PMAX(cl::sycl::cl_double2, cl::sycl::fmax(a, b))
+#undef SYCL_PMAX
+
+//#endif
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_MATH_FUNCTIONS_CUDA_H
diff --git a/Eigen/src/Core/arch/SYCL/PacketMath.h b/Eigen/src/Core/arch/SYCL/PacketMath.h
new file mode 100644
index 000000000..820a83311
--- /dev/null
+++ b/Eigen/src/Core/arch/SYCL/PacketMath.h
@@ -0,0 +1,458 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/*****************************************************************
+ * PacketMath.h
+ *
+ * \brief:
+ * PacketMath
+ *
+*****************************************************************/
+
+#ifndef EIGEN_PACKET_MATH_SYCL_H
+#define EIGEN_PACKET_MATH_SYCL_H
+#include <type_traits>
+#if defined EIGEN_USE_SYCL
+namespace Eigen {
+
+namespace internal {
+
+#define SYCL_PLOADT_RO(address_space_target)\
+template<typename packet_type, int Alignment>\
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type\
+ ploadt_ro(typename cl::sycl::multi_ptr<const typename unpacket_traits<packet_type>::type,\
+ cl::sycl::access::address_space::address_space_target>::pointer_t from) {\
+ typedef typename unpacket_traits<packet_type>::type scalar;\
+ typedef cl::sycl::multi_ptr<scalar, cl::sycl::access::address_space::address_space_target> multi_ptr;\
+ auto res=packet_type(static_cast<typename unpacket_traits<packet_type>::type>(0));\
+ res.load(0, multi_ptr(const_cast<typename multi_ptr::pointer_t>(from)));\
+ return res;\
+}
+
+SYCL_PLOADT_RO(global_space)
+SYCL_PLOADT_RO(local_space)
+
+#undef SYCL_PLOADT_RO
+
+
+#define SYCL_PLOAD(address_space_target, Alignment, AlignedType)\
+template<typename packet_type> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type\
+ pload##AlignedType(typename cl::sycl::multi_ptr<const typename unpacket_traits<packet_type>::type,\
+ cl::sycl::access::address_space::address_space_target>::pointer_t from) {\
+ return ploadt_ro<packet_type, Alignment>(from);\
+ }
+
+// global space
+SYCL_PLOAD(global_space, Unaligned, u)
+SYCL_PLOAD(global_space, Aligned, )
+
+// local space
+SYCL_PLOAD(local_space, Unaligned, u)
+SYCL_PLOAD(local_space, Aligned, )
+
+// private space
+//SYCL_PLOAD(private_space, Unaligned, u)
+//SYCL_PLOAD(private_space, Aligned, )
+
+#undef SYCL_PLOAD
+
+
+/** \internal \returns a packet version of \a *from.
+ * The pointer \a from must be aligned on a \a Alignment bytes boundary. */
+#define SYCL_PLOADT(address_space_target)\
+template<typename packet_type, int Alignment>\
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type ploadt(\
+ typename cl::sycl::multi_ptr<const typename unpacket_traits<packet_type>::type,\
+ cl::sycl::access::address_space::address_space_target>::pointer_t from)\
+{\
+ if(Alignment >= unpacket_traits<packet_type>::alignment)\
+ return pload<packet_type>(from);\
+ else\
+ return ploadu<packet_type>(from);\
+}
+
+// global space
+SYCL_PLOADT(global_space)
+// local space
+SYCL_PLOADT(local_space)
+
+//private_space
+// There is no need to specialise it for private space as it can use the GenericPacketMath version
+
+#define SYCL_PLOADT_RO_SPECIAL(packet_type, Alignment)\
+ template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type\
+ ploadt_ro<packet_type, Alignment>(const typename unpacket_traits<packet_type>::type * from) { \
+ typedef typename unpacket_traits<packet_type>::type scalar;\
+ auto res=packet_type(static_cast<scalar>(0));\
+ res. template load<cl::sycl::access::address_space::private_space>(0, const_cast<scalar*>(from));\
+ return res;\
+ }
+
+SYCL_PLOADT_RO_SPECIAL(cl::sycl::cl_float4, Aligned)
+SYCL_PLOADT_RO_SPECIAL(cl::sycl::cl_double2, Aligned)
+SYCL_PLOADT_RO_SPECIAL(cl::sycl::cl_float4, Unaligned)
+SYCL_PLOADT_RO_SPECIAL(cl::sycl::cl_double2, Unaligned)
+
+
+#define SYCL_PLOAD_SPECIAL(packet_type, alignment_type)\
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type\
+ pload##alignment_type(const typename unpacket_traits<packet_type>::type * from) { \
+ typedef typename unpacket_traits<packet_type>::type scalar;\
+ auto res=packet_type(static_cast<scalar>(0));\
+ res. template load<cl::sycl::access::address_space::private_space>(0, const_cast<scalar*>(from));\
+ return res;\
+ }
+SYCL_PLOAD_SPECIAL(cl::sycl::cl_float4,)
+SYCL_PLOAD_SPECIAL(cl::sycl::cl_double2,)
+SYCL_PLOAD_SPECIAL(cl::sycl::cl_float4, u)
+SYCL_PLOAD_SPECIAL(cl::sycl::cl_double2, u)
+
+#undef SYCL_PLOAD_SPECIAL
+
+#define SYCL_PSTORE(scalar, packet_type, address_space_target, alignment)\
+template<>\
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void pstore##alignment( \
+ typename cl::sycl::multi_ptr<scalar, cl::sycl::access::address_space::address_space_target>::pointer_t to, \
+ const packet_type& from) {\
+ typedef cl::sycl::multi_ptr<scalar, cl::sycl::access::address_space::address_space_target> multi_ptr;\
+ from.store(0, multi_ptr(to));\
+}
+
+// global space
+SYCL_PSTORE(float, cl::sycl::cl_float4, global_space, )
+SYCL_PSTORE(float, cl::sycl::cl_float4, global_space, u)
+SYCL_PSTORE(double, cl::sycl::cl_double2, global_space, )
+SYCL_PSTORE(double, cl::sycl::cl_double2, global_space, u)
+
+SYCL_PSTORE(float, cl::sycl::cl_float4, local_space, )
+SYCL_PSTORE(float, cl::sycl::cl_float4, local_space, u)
+SYCL_PSTORE(double, cl::sycl::cl_double2, local_space, )
+SYCL_PSTORE(double, cl::sycl::cl_double2, local_space, u)
+
+SYCL_PSTORE(float, cl::sycl::cl_float4, private_space, )
+SYCL_PSTORE(float, cl::sycl::cl_float4, private_space, u)
+SYCL_PSTORE(double, cl::sycl::cl_double2, private_space, )
+SYCL_PSTORE(double, cl::sycl::cl_double2, private_space, u)
+
+
+#define SYCL_PSTORE_T(scalar, packet_type, Alignment)\
+template<>\
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void pstoret<scalar, packet_type, Alignment>(\
+ scalar* to,\
+ const packet_type& from) {\
+ if(Alignment)\
+ pstore(to, from);\
+ else\
+ pstoreu(to,from);\
+}
+
+
+SYCL_PSTORE_T(float, cl::sycl::cl_float4, Aligned)
+
+SYCL_PSTORE_T(float, cl::sycl::cl_float4, Unaligned)
+
+SYCL_PSTORE_T(double, cl::sycl::cl_double2, Aligned)
+
+SYCL_PSTORE_T(double, cl::sycl::cl_double2, Unaligned)
+
+
+#undef SYCL_PSTORE_T
+
+#define SYCL_PSET1(packet_type)\
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type pset1<packet_type>(\
+ const typename unpacket_traits<packet_type>::type& from) {\
+ return packet_type(from);\
+}
+
+// global space
+SYCL_PSET1(cl::sycl::cl_float4)
+SYCL_PSET1(cl::sycl::cl_double2)
+
+#undef SYCL_PSET1
+
+
+template <typename packet_type> struct get_base_packet {
+template <typename sycl_multi_pointer>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type get_ploaddup(sycl_multi_pointer ) {}
+
+ template <typename sycl_multi_pointer>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type get_pgather(sycl_multi_pointer , Index ) {}
+};
+
+template <> struct get_base_packet <cl::sycl::cl_float4> {
+ template <typename sycl_multi_pointer>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE cl::sycl::cl_float4 get_ploaddup(sycl_multi_pointer from) {
+ return cl::sycl::cl_float4(from[0], from[0], from[1], from[1]);
+ }
+ template <typename sycl_multi_pointer>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE cl::sycl::cl_float4 get_pgather(sycl_multi_pointer from, Index stride) {
+ return cl::sycl::cl_float4(from[0*stride], from[1*stride], from[2*stride], from[3*stride]);
+ }
+
+ template <typename sycl_multi_pointer>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void set_pscatter(sycl_multi_pointer to , const cl::sycl::cl_float4& from, Index stride) {
+ auto tmp = stride;
+ to[0] = from.x();
+ to[tmp] = from.y();
+ to[tmp += stride] = from.z();
+ to[tmp += stride] = from.w();
+ }
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE cl::sycl::cl_float4 set_plset(const float& a) {
+ return cl::sycl::cl_float4(static_cast<float>(a), static_cast<float>(a+1), static_cast<float>(a+2), static_cast<float>(a+3));
+ }
+};
+
+template <> struct get_base_packet <cl::sycl::cl_double2> {
+ template <typename sycl_multi_pointer>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE cl::sycl::cl_double2 get_ploaddup(const sycl_multi_pointer from) {
+ return cl::sycl::cl_double2(from[0], from[0]);
+ }
+
+ template <typename sycl_multi_pointer, typename Index>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE cl::sycl::cl_double2 get_pgather(const sycl_multi_pointer from, Index stride) {
+ return cl::sycl::cl_double2(from[0*stride], from[1*stride]);
+ }
+
+ template <typename sycl_multi_pointer>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void set_pscatter(sycl_multi_pointer to , const cl::sycl::cl_double2& from, Index stride) {
+ to[0] = from.x();
+ to[stride] = from.y();
+ }
+
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE cl::sycl::cl_double2 set_plset(const double& a) {
+ return cl::sycl::cl_double2(static_cast<double>(a), static_cast<double>(a + 1));
+ }
+};
+
+#define SYCL_PLOAD_DUP(address_space_target)\
+template<typename packet_type> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type \
+ploaddup(typename cl::sycl::multi_ptr<const typename unpacket_traits<packet_type>::type,\
+ cl::sycl::access::address_space::address_space_target>::pointer_t from)\
+{\
+ return get_base_packet<packet_type>::get_ploaddup(from); \
+}
+
+// global space
+SYCL_PLOAD_DUP(global_space)
+// local_space
+SYCL_PLOAD_DUP(local_space)
+// private_space
+//SYCL_PLOAD_DUP(private_space)
+#undef SYCL_PLOAD_DUP
+
+#define SYCL_PLOAD_DUP_SPECILIZE(packet_type)\
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type \
+ploaddup<packet_type>(const typename unpacket_traits<packet_type>::type * from)\
+{ \
+ return get_base_packet<packet_type>::get_ploaddup(from); \
+}
+
+SYCL_PLOAD_DUP_SPECILIZE(cl::sycl::cl_float4)
+SYCL_PLOAD_DUP_SPECILIZE(cl::sycl::cl_double2)
+
+#undef SYCL_PLOAD_DUP_SPECILIZE
+
+#define SYCL_PLSET(packet_type)\
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type plset<packet_type>(const typename unpacket_traits<packet_type>::type& a) {\
+ return get_base_packet<packet_type>::set_plset(a);\
+}
+
+SYCL_PLSET(cl::sycl::cl_float4)
+SYCL_PLSET(cl::sycl::cl_double2)
+
+#undef SYCL_PLSET
+
+
+#define SYCL_PGATHER(address_space_target)\
+template<typename Scalar, typename packet_type> EIGEN_DEVICE_FUNC inline packet_type pgather(\
+ typename cl::sycl::multi_ptr<const typename unpacket_traits<packet_type>::type,\
+ cl::sycl::access::address_space::address_space_target>::pointer_t from, Index stride) {\
+ return get_base_packet<packet_type>::get_pgather(from, stride); \
+}
+
+// global space
+SYCL_PGATHER(global_space)
+// local space
+SYCL_PGATHER(local_space)
+// private space
+//SYCL_PGATHER(private_space)
+
+#undef SYCL_PGATHER
+
+
+#define SYCL_PGATHER_SPECILIZE(scalar, packet_type)\
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type \
+pgather<scalar, packet_type>(const typename unpacket_traits<packet_type>::type * from, Index stride)\
+{ \
+ return get_base_packet<packet_type>::get_pgather(from, stride); \
+}
+
+SYCL_PGATHER_SPECILIZE(float, cl::sycl::cl_float4)
+SYCL_PGATHER_SPECILIZE(double, cl::sycl::cl_double2)
+
+#undef SYCL_PGATHER_SPECILIZE
+
+#define SYCL_PSCATTER(address_space_target)\
+template<typename Scalar, typename packet_type> EIGEN_DEVICE_FUNC inline void pscatter(\
+ typename cl::sycl::multi_ptr<typename unpacket_traits<packet_type>::type,\
+ cl::sycl::access::address_space::address_space_target>::pointer_t to,\
+ const packet_type& from, Index stride) {\
+ get_base_packet<packet_type>::set_pscatter(to, from, stride);\
+}
+
+// global space
+SYCL_PSCATTER(global_space)
+// local space
+SYCL_PSCATTER(local_space)
+// private space
+//SYCL_PSCATTER(private_space)
+
+#undef SYCL_PSCATTER
+
+
+
+#define SYCL_PSCATTER_SPECILIZE(scalar, packet_type)\
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void \
+pscatter<scalar, packet_type>(typename unpacket_traits<packet_type>::type * to, const packet_type& from, Index stride)\
+{ \
+ get_base_packet<packet_type>::set_pscatter(to, from, stride);\
+}
+
+SYCL_PSCATTER_SPECILIZE(float, cl::sycl::cl_float4)
+SYCL_PSCATTER_SPECILIZE(double, cl::sycl::cl_double2)
+
+#undef SYCL_PSCATTER_SPECILIZE
+
+
+#define SYCL_PMAD(packet_type)\
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type pmadd( const packet_type& a,\
+ const packet_type& b, const packet_type& c){\
+ return cl::sycl::mad(a,b,c);\
+}
+
+SYCL_PMAD(cl::sycl::cl_float4)
+SYCL_PMAD(cl::sycl::cl_double2)
+#undef SYCL_PMAD
+
+
+
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float pfirst<cl::sycl::cl_float4>(const cl::sycl::cl_float4& a) {
+ return a.x();
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double pfirst<cl::sycl::cl_double2>(const cl::sycl::cl_double2& a) {
+ return a.x();
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float predux<cl::sycl::cl_float4>(const cl::sycl::cl_float4& a) {
+ return a.x() + a.y() + a.z() + a.w();
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double predux<cl::sycl::cl_double2>(const cl::sycl::cl_double2& a) {
+ return a.x() + a.y();
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float predux_max<cl::sycl::cl_float4>(const cl::sycl::cl_float4& a) {
+ return cl::sycl::fmax(cl::sycl::fmax(a.x(), a.y()), cl::sycl::fmax(a.z(), a.w()));
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double predux_max<cl::sycl::cl_double2>(const cl::sycl::cl_double2& a) {
+ return cl::sycl::fmax(a.x(), a.y());
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float predux_min<cl::sycl::cl_float4>(const cl::sycl::cl_float4& a) {
+ return cl::sycl::fmin(cl::sycl::fmin(a.x(), a.y()), cl::sycl::fmin(a.z(), a.w()));
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double predux_min<cl::sycl::cl_double2>(const cl::sycl::cl_double2& a) {
+ return cl::sycl::fmin(a.x(), a.y());
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float predux_mul<cl::sycl::cl_float4>(const cl::sycl::cl_float4& a) {
+ return a.x() * a.y() * a.z() * a.w();
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double predux_mul<cl::sycl::cl_double2>(const cl::sycl::cl_double2& a) {
+ return a.x() * a.y();
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE cl::sycl::cl_float4 pabs<cl::sycl::cl_float4>(const cl::sycl::cl_float4& a) {
+ return cl::sycl::cl_float4(cl::sycl::fabs(a.x()), cl::sycl::fabs(a.y()), cl::sycl::fabs(a.z()), cl::sycl::fabs(a.w()));
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE cl::sycl::cl_double2 pabs<cl::sycl::cl_double2>(const cl::sycl::cl_double2& a) {
+ return cl::sycl::cl_double2(cl::sycl::fabs(a.x()), cl::sycl::fabs(a.y()));
+}
+
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void
+ptranspose(PacketBlock<cl::sycl::cl_float4,4>& kernel) {
+ float tmp = kernel.packet[0].y();
+ kernel.packet[0].y() = kernel.packet[1].x();
+ kernel.packet[1].x() = tmp;
+// std::swap(kernel.packet[0].y(), kernel.packet[1].x());
+
+ tmp = kernel.packet[0].z();
+ kernel.packet[0].z() = kernel.packet[2].x();
+ kernel.packet[2].x() = tmp;
+ //std::swap(kernel.packet[0].z(), kernel.packet[2].x());
+
+ tmp = kernel.packet[0].w();
+ kernel.packet[0].w() = kernel.packet[3].x();
+ kernel.packet[3].x() = tmp;
+
+ //std::swap(kernel.packet[0].w(), kernel.packet[3].x());
+
+ tmp = kernel.packet[1].z();
+ kernel.packet[1].z() = kernel.packet[2].y();
+ kernel.packet[2].y() = tmp;
+// std::swap(kernel.packet[1].z(), kernel.packet[2].y());
+
+ tmp = kernel.packet[1].w();
+ kernel.packet[1].w() = kernel.packet[3].y();
+ kernel.packet[3].y() = tmp;
+// std::swap(kernel.packet[1].w(), kernel.packet[3].y());
+
+ tmp = kernel.packet[2].w();
+ kernel.packet[2].w() = kernel.packet[3].z();
+ kernel.packet[3].z() = tmp;
+// std::swap(kernel.packet[2].w(), kernel.packet[3].z());
+
+}
+
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void
+ptranspose(PacketBlock<cl::sycl::cl_double2,2>& kernel) {
+ double tmp = kernel.packet[0].y();
+ kernel.packet[0].y() = kernel.packet[1].x();
+ kernel.packet[1].x() = tmp;
+//std::swap(kernel.packet[0].y(), kernel.packet[1].x());
+}
+
+
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE cl::sycl::cl_float4
+pblend(const Selector<unpacket_traits<cl::sycl::cl_float4>::size>& ifPacket,
+ const cl::sycl::cl_float4& thenPacket, const cl::sycl::cl_float4& elsePacket) {
+ cl::sycl::cl_int4 condition(ifPacket.select[0] ? 0 : -1,
+ ifPacket.select[1] ? 0 : -1,
+ ifPacket.select[2] ? 0 : -1,
+ ifPacket.select[3] ? 0 : -1);
+ return cl::sycl::select(thenPacket, elsePacket, condition);
+}
+
+template<> inline cl::sycl::cl_double2
+pblend(const Selector<unpacket_traits<cl::sycl::cl_double2>::size>& ifPacket,
+ const cl::sycl::cl_double2& thenPacket, const cl::sycl::cl_double2& elsePacket) {
+ cl::sycl::cl_long2 condition(ifPacket.select[0] ? 0 : -1,
+ ifPacket.select[1] ? 0 : -1);
+ return cl::sycl::select(thenPacket, elsePacket, condition);
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_USE_SYCL
+#endif // EIGEN_PACKET_MATH_SYCL_H
diff --git a/Eigen/src/Core/arch/SYCL/TypeCasting.h b/Eigen/src/Core/arch/SYCL/TypeCasting.h
new file mode 100644
index 000000000..dedd5c84a
--- /dev/null
+++ b/Eigen/src/Core/arch/SYCL/TypeCasting.h
@@ -0,0 +1,89 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/*****************************************************************
+ * TypeCasting.h
+ *
+ * \brief:
+ * TypeCasting
+ *
+*****************************************************************/
+
+#ifndef EIGEN_TYPE_CASTING_SYCL_H
+#define EIGEN_TYPE_CASTING_SYCL_H
+
+namespace Eigen {
+
+namespace internal {
+#ifdef __SYCL_DEVICE_ONLY__
+template <>
+struct type_casting_traits<float, int> {
+ enum {
+ VectorizedCast = 1,
+ SrcCoeffRatio = 1,
+ TgtCoeffRatio = 1
+ };
+};
+
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE cl::sycl::cl_int4 pcast<cl::sycl::cl_float4, cl::sycl::cl_int4>(const cl::sycl::cl_float4& a) {
+ return a. template convert<cl::sycl::cl_int, cl::sycl::rounding_mode::automatic>();
+}
+
+
+template <>
+struct type_casting_traits<int, float> {
+ enum {
+ VectorizedCast = 1,
+ SrcCoeffRatio = 1,
+ TgtCoeffRatio = 1
+ };
+};
+
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE cl::sycl::cl_float4 pcast<cl::sycl::cl_int4, cl::sycl::cl_float4>(const cl::sycl::cl_int4& a) {
+ return a. template convert<cl::sycl::cl_float, cl::sycl::rounding_mode::automatic>();
+}
+
+template <>
+struct type_casting_traits<double, float> {
+ enum {
+ VectorizedCast = 1,
+ SrcCoeffRatio = 2,
+ TgtCoeffRatio = 1
+ };
+};
+
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE cl::sycl::cl_float4 pcast<cl::sycl::cl_double2, cl::sycl::cl_float4>(const cl::sycl::cl_double2& a, const cl::sycl::cl_double2& b) {
+ auto a1=a. template convert<cl::sycl::cl_float, cl::sycl::rounding_mode::automatic>();
+ auto b1=b. template convert<cl::sycl::cl_float, cl::sycl::rounding_mode::automatic>();
+ return cl::sycl::float4(a1.x(), a1.y(), b1.x(), b1.y());
+}
+
+template <>
+struct type_casting_traits<float, double> {
+ enum {
+ VectorizedCast = 1,
+ SrcCoeffRatio = 1,
+ TgtCoeffRatio = 2
+ };
+};
+
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE cl::sycl::cl_double2 pcast<cl::sycl::cl_float4, cl::sycl::cl_double2>(const cl::sycl::cl_float4& a) {
+ // Simply discard the second half of the input
+ return cl::sycl::cl_double2(a.x(), a.y());
+}
+
+#endif
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_TYPE_CASTING_SYCL_H
diff --git a/Eigen/src/Core/arch/ZVector/Complex.h b/Eigen/src/Core/arch/ZVector/Complex.h
index d39d2d105..95aba428f 100644
--- a/Eigen/src/Core/arch/ZVector/Complex.h
+++ b/Eigen/src/Core/arch/ZVector/Complex.h
@@ -15,6 +15,10 @@ namespace Eigen {
namespace internal {
+#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12)
+static Packet4ui p4ui_CONJ_XOR = { 0x00000000, 0x80000000, 0x00000000, 0x80000000 }; //vec_mergeh((Packet4ui)p4i_ZERO, (Packet4ui)p4f_MZERO);
+#endif
+
static Packet2ul p2ul_CONJ_XOR1 = (Packet2ul) vec_sld((Packet4ui) p2d_ZERO_, (Packet4ui) p2l_ZERO, 8);//{ 0x8000000000000000, 0x0000000000000000 };
static Packet2ul p2ul_CONJ_XOR2 = (Packet2ul) vec_sld((Packet4ui) p2l_ZERO, (Packet4ui) p2d_ZERO_, 8);//{ 0x8000000000000000, 0x0000000000000000 };
@@ -29,10 +33,14 @@ struct Packet2cf
{
EIGEN_STRONG_INLINE Packet2cf() {}
EIGEN_STRONG_INLINE explicit Packet2cf(const Packet4f& a) : v(a) {}
+#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ < 12)
union {
Packet4f v;
Packet1cd cd[2];
};
+#else
+ Packet4f v;
+#endif
};
template<> struct packet_traits<std::complex<float> > : default_packet_traits
@@ -89,63 +97,27 @@ template<> struct unpacket_traits<Packet1cd> { typedef std::complex<double> type
/* Forward declaration */
EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet2cf,2>& kernel);
-template<> EIGEN_STRONG_INLINE Packet2cf pload <Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload<Packet4f>((const float*)from)); }
+/* complex<double> first */
template<> EIGEN_STRONG_INLINE Packet1cd pload <Packet1cd>(const std::complex<double>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet1cd(pload<Packet2d>((const double*)from)); }
-template<> EIGEN_STRONG_INLINE Packet2cf ploadu<Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu<Packet4f>((const float*)from)); }
template<> EIGEN_STRONG_INLINE Packet1cd ploadu<Packet1cd>(const std::complex<double>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet1cd(ploadu<Packet2d>((const double*)from)); }
-template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((float*)to, from.v); }
template<> EIGEN_STRONG_INLINE void pstore <std::complex<double> >(std::complex<double> * to, const Packet1cd& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, from.v); }
-template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((float*)to, from.v); }
template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<double> >(std::complex<double> * to, const Packet1cd& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, from.v); }
template<> EIGEN_STRONG_INLINE Packet1cd pset1<Packet1cd>(const std::complex<double>& from)
{ /* here we really have to use unaligned loads :( */ return ploadu<Packet1cd>(&from); }
-template<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>& from)
-{
- Packet2cf res;
- res.cd[0] = Packet1cd(vec_ld2f((const float *)&from));
- res.cd[1] = res.cd[0];
- return res;
-}
-template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(const std::complex<float>* from, Index stride)
-{
- std::complex<float> EIGEN_ALIGN16 af[2];
- af[0] = from[0*stride];
- af[1] = from[1*stride];
- return pload<Packet2cf>(af);
-}
template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather<std::complex<double>, Packet1cd>(const std::complex<double>* from, Index stride EIGEN_UNUSED)
{
return pload<Packet1cd>(from);
}
-template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to, const Packet2cf& from, Index stride)
-{
- std::complex<float> EIGEN_ALIGN16 af[2];
- pstore<std::complex<float> >((std::complex<float> *) af, from);
- to[0*stride] = af[0];
- to[1*stride] = af[1];
-}
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet1cd>(std::complex<double>* to, const Packet1cd& from, Index stride EIGEN_UNUSED)
{
pstore<std::complex<double> >(to, from);
}
-
-template<> EIGEN_STRONG_INLINE Packet2cf padd<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(padd<Packet4f>(a.v, b.v)); }
template<> EIGEN_STRONG_INLINE Packet1cd padd<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(a.v + b.v); }
-template<> EIGEN_STRONG_INLINE Packet2cf psub<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(psub<Packet4f>(a.v, b.v)); }
template<> EIGEN_STRONG_INLINE Packet1cd psub<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(a.v - b.v); }
template<> EIGEN_STRONG_INLINE Packet1cd pnegate(const Packet1cd& a) { return Packet1cd(pnegate(Packet2d(a.v))); }
-template<> EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a) { return Packet2cf(pnegate(Packet4f(a.v))); }
template<> EIGEN_STRONG_INLINE Packet1cd pconj(const Packet1cd& a) { return Packet1cd((Packet2d)vec_xor((Packet2d)a.v, (Packet2d)p2ul_CONJ_XOR2)); }
-template<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a)
-{
- Packet2cf res;
- res.v.v4f[0] = pconj(Packet1cd(reinterpret_cast<Packet2d>(a.v.v4f[0]))).v;
- res.v.v4f[1] = pconj(Packet1cd(reinterpret_cast<Packet2d>(a.v.v4f[1]))).v;
- return res;
-}
-
template<> EIGEN_STRONG_INLINE Packet1cd pmul<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
{
Packet2d a_re, a_im, v1, v2;
@@ -163,45 +135,182 @@ template<> EIGEN_STRONG_INLINE Packet1cd pmul<Packet1cd>(const Packet1cd& a, con
return Packet1cd(v1 + v2);
}
-template<> EIGEN_STRONG_INLINE Packet2cf pmul<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
+template<> EIGEN_STRONG_INLINE Packet1cd pand <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(vec_and(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet1cd por <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(vec_or(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet1cd pxor <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(vec_xor(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet1cd pandnot <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(vec_and(a.v, vec_nor(b.v,b.v))); }
+template<> EIGEN_STRONG_INLINE Packet1cd ploaddup<Packet1cd>(const std::complex<double>* from) { return pset1<Packet1cd>(*from); }
+
+template<> EIGEN_STRONG_INLINE void prefetch<std::complex<double> >(const std::complex<double> * addr) { EIGEN_ZVECTOR_PREFETCH(addr); }
+
+template<> EIGEN_STRONG_INLINE std::complex<double> pfirst<Packet1cd>(const Packet1cd& a)
+{
+ std::complex<double> EIGEN_ALIGN16 res;
+ pstore<std::complex<double> >(&res, a);
+
+ return res;
+}
+
+template<> EIGEN_STRONG_INLINE Packet1cd preverse(const Packet1cd& a) { return a; }
+template<> EIGEN_STRONG_INLINE std::complex<double> predux<Packet1cd>(const Packet1cd& a)
+{
+ return pfirst(a);
+}
+template<> EIGEN_STRONG_INLINE Packet1cd preduxp<Packet1cd>(const Packet1cd* vecs)
+{
+ return vecs[0];
+}
+template<> EIGEN_STRONG_INLINE std::complex<double> predux_mul<Packet1cd>(const Packet1cd& a)
+{
+ return pfirst(a);
+}
+template<int Offset>
+struct palign_impl<Offset,Packet1cd>
+{
+ static EIGEN_STRONG_INLINE void run(Packet1cd& /*first*/, const Packet1cd& /*second*/)
+ {
+ // FIXME is it sure we never have to align a Packet1cd?
+ // Even though a std::complex<double> has 16 bytes, it is not necessarily aligned on a 16 bytes boundary...
+ }
+};
+
+template<> struct conj_helper<Packet1cd, Packet1cd, false,true>
+{
+ EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const
+ { return padd(pmul(x,y),c); }
+
+ EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const
+ {
+ return internal::pmul(a, pconj(b));
+ }
+};
+
+template<> struct conj_helper<Packet1cd, Packet1cd, true,false>
+{
+ EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const
+ { return padd(pmul(x,y),c); }
+
+ EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const
+ {
+ return internal::pmul(pconj(a), b);
+ }
+};
+
+template<> struct conj_helper<Packet1cd, Packet1cd, true,true>
+{
+ EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const
+ { return padd(pmul(x,y),c); }
+
+ EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const
+ {
+ return pconj(internal::pmul(a, b));
+ }
+};
+
+EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet1cd,Packet2d)
+
+template<> EIGEN_STRONG_INLINE Packet1cd pdiv<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
+{
+ // TODO optimize it for AltiVec
+ Packet1cd res = conj_helper<Packet1cd,Packet1cd,false,true>().pmul(a,b);
+ Packet2d s = vec_madd(b.v, b.v, p2d_ZERO_);
+ return Packet1cd(pdiv(res.v, s + vec_perm(s, s, p16uc_REVERSE64)));
+}
+
+EIGEN_STRONG_INLINE Packet1cd pcplxflip/*<Packet1cd>*/(const Packet1cd& x)
+{
+ return Packet1cd(preverse(Packet2d(x.v)));
+}
+
+EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet1cd,2>& kernel)
+{
+ Packet2d tmp = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_HI);
+ kernel.packet[1].v = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_LO);
+ kernel.packet[0].v = tmp;
+}
+
+/* complex<float> follows */
+template<> EIGEN_STRONG_INLINE Packet2cf pload <Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload<Packet4f>((const float*)from)); }
+template<> EIGEN_STRONG_INLINE Packet2cf ploadu<Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu<Packet4f>((const float*)from)); }
+template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((float*)to, from.v); }
+template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((float*)to, from.v); }
+
+template<> EIGEN_STRONG_INLINE std::complex<float> pfirst<Packet2cf>(const Packet2cf& a)
+{
+ std::complex<float> EIGEN_ALIGN16 res[2];
+ pstore<std::complex<float> >(res, a);
+
+ return res[0];
+}
+
+
+#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ < 12)
+template<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>& from)
{
Packet2cf res;
- res.v.v4f[0] = pmul(Packet1cd(reinterpret_cast<Packet2d>(a.v.v4f[0])), Packet1cd(reinterpret_cast<Packet2d>(b.v.v4f[0]))).v;
- res.v.v4f[1] = pmul(Packet1cd(reinterpret_cast<Packet2d>(a.v.v4f[1])), Packet1cd(reinterpret_cast<Packet2d>(b.v.v4f[1]))).v;
+ res.cd[0] = Packet1cd(vec_ld2f((const float *)&from));
+ res.cd[1] = res.cd[0];
return res;
}
+#else
+template<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>& from)
+{
+ Packet2cf res;
+ if((std::ptrdiff_t(&from) % 16) == 0)
+ res.v = pload<Packet4f>((const float *)&from);
+ else
+ res.v = ploadu<Packet4f>((const float *)&from);
+ res.v = vec_perm(res.v, res.v, p16uc_PSET64_HI);
+ return res;
+}
+#endif
+
+template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(const std::complex<float>* from, Index stride)
+{
+ std::complex<float> EIGEN_ALIGN16 af[2];
+ af[0] = from[0*stride];
+ af[1] = from[1*stride];
+ return pload<Packet2cf>(af);
+}
+template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to, const Packet2cf& from, Index stride)
+{
+ std::complex<float> EIGEN_ALIGN16 af[2];
+ pstore<std::complex<float> >((std::complex<float> *) af, from);
+ to[0*stride] = af[0];
+ to[1*stride] = af[1];
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf padd<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(padd<Packet4f>(a.v, b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf psub<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(psub<Packet4f>(a.v, b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a) { return Packet2cf(pnegate(Packet4f(a.v))); }
-template<> EIGEN_STRONG_INLINE Packet1cd pand <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(vec_and(a.v,b.v)); }
template<> EIGEN_STRONG_INLINE Packet2cf pand <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(pand<Packet4f>(a.v,b.v)); }
-template<> EIGEN_STRONG_INLINE Packet1cd por <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(vec_or(a.v,b.v)); }
template<> EIGEN_STRONG_INLINE Packet2cf por <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(por<Packet4f>(a.v,b.v)); }
-template<> EIGEN_STRONG_INLINE Packet1cd pxor <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(vec_xor(a.v,b.v)); }
template<> EIGEN_STRONG_INLINE Packet2cf pxor <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(pxor<Packet4f>(a.v,b.v)); }
-template<> EIGEN_STRONG_INLINE Packet1cd pandnot<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(vec_and(a.v, vec_nor(b.v,b.v))); }
template<> EIGEN_STRONG_INLINE Packet2cf pandnot<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(pandnot<Packet4f>(a.v,b.v)); }
-template<> EIGEN_STRONG_INLINE Packet1cd ploaddup<Packet1cd>(const std::complex<double>* from) { return pset1<Packet1cd>(*from); }
template<> EIGEN_STRONG_INLINE Packet2cf ploaddup<Packet2cf>(const std::complex<float>* from) { return pset1<Packet2cf>(*from); }
template<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float> * addr) { EIGEN_ZVECTOR_PREFETCH(addr); }
-template<> EIGEN_STRONG_INLINE void prefetch<std::complex<double> >(const std::complex<double> * addr) { EIGEN_ZVECTOR_PREFETCH(addr); }
-template<> EIGEN_STRONG_INLINE std::complex<double> pfirst<Packet1cd>(const Packet1cd& a)
-{
- std::complex<double> EIGEN_ALIGN16 res;
- pstore<std::complex<double> >(&res, a);
+#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ < 12)
+template<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a)
+{
+ Packet2cf res;
+ res.v.v4f[0] = pconj(Packet1cd(reinterpret_cast<Packet2d>(a.v.v4f[0]))).v;
+ res.v.v4f[1] = pconj(Packet1cd(reinterpret_cast<Packet2d>(a.v.v4f[1]))).v;
return res;
}
-template<> EIGEN_STRONG_INLINE std::complex<float> pfirst<Packet2cf>(const Packet2cf& a)
-{
- std::complex<float> EIGEN_ALIGN16 res[2];
- pstore<std::complex<float> >(res, a);
- return res[0];
+template<> EIGEN_STRONG_INLINE Packet2cf pmul<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
+{
+ Packet2cf res;
+ res.v.v4f[0] = pmul(Packet1cd(reinterpret_cast<Packet2d>(a.v.v4f[0])), Packet1cd(reinterpret_cast<Packet2d>(b.v.v4f[0]))).v;
+ res.v.v4f[1] = pmul(Packet1cd(reinterpret_cast<Packet2d>(a.v.v4f[1])), Packet1cd(reinterpret_cast<Packet2d>(b.v.v4f[1]))).v;
+ return res;
}
-template<> EIGEN_STRONG_INLINE Packet1cd preverse(const Packet1cd& a) { return a; }
template<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a)
{
Packet2cf res;
@@ -210,10 +319,6 @@ template<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a)
return res;
}
-template<> EIGEN_STRONG_INLINE std::complex<double> predux<Packet1cd>(const Packet1cd& a)
-{
- return pfirst(a);
-}
template<> EIGEN_STRONG_INLINE std::complex<float> predux<Packet2cf>(const Packet2cf& a)
{
std::complex<float> res;
@@ -222,10 +327,6 @@ template<> EIGEN_STRONG_INLINE std::complex<float> predux<Packet2cf>(const Packe
return res;
}
-template<> EIGEN_STRONG_INLINE Packet1cd preduxp<Packet1cd>(const Packet1cd* vecs)
-{
- return vecs[0];
-}
template<> EIGEN_STRONG_INLINE Packet2cf preduxp<Packet2cf>(const Packet2cf* vecs)
{
PacketBlock<Packet2cf,2> transpose;
@@ -236,10 +337,6 @@ template<> EIGEN_STRONG_INLINE Packet2cf preduxp<Packet2cf>(const Packet2cf* vec
return padd<Packet2cf>(transpose.packet[0], transpose.packet[1]);
}
-template<> EIGEN_STRONG_INLINE std::complex<double> predux_mul<Packet1cd>(const Packet1cd& a)
-{
- return pfirst(a);
-}
template<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet2cf>(const Packet2cf& a)
{
std::complex<float> res;
@@ -249,16 +346,6 @@ template<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet2cf>(const P
}
template<int Offset>
-struct palign_impl<Offset,Packet1cd>
-{
- static EIGEN_STRONG_INLINE void run(Packet1cd& /*first*/, const Packet1cd& /*second*/)
- {
- // FIXME is it sure we never have to align a Packet1cd?
- // Even though a std::complex<double> has 16 bytes, it is not necessarily aligned on a 16 bytes boundary...
- }
-};
-
-template<int Offset>
struct palign_impl<Offset,Packet2cf>
{
static EIGEN_STRONG_INLINE void run(Packet2cf& first, const Packet2cf& second)
@@ -270,39 +357,143 @@ struct palign_impl<Offset,Packet2cf>
}
};
-template<> struct conj_helper<Packet1cd, Packet1cd, false,true>
+template<> struct conj_helper<Packet2cf, Packet2cf, false,true>
{
- EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const
+ EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
{ return padd(pmul(x,y),c); }
- EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const
+ EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
{
return internal::pmul(a, pconj(b));
}
};
-template<> struct conj_helper<Packet1cd, Packet1cd, true,false>
+template<> struct conj_helper<Packet2cf, Packet2cf, true,false>
{
- EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const
+ EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
{ return padd(pmul(x,y),c); }
- EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const
+ EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
{
return internal::pmul(pconj(a), b);
}
};
-template<> struct conj_helper<Packet1cd, Packet1cd, true,true>
+template<> struct conj_helper<Packet2cf, Packet2cf, true,true>
{
- EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const
+ EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
{ return padd(pmul(x,y),c); }
- EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const
+ EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
{
return pconj(internal::pmul(a, b));
}
};
+EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cf,Packet4f)
+
+template<> EIGEN_STRONG_INLINE Packet2cf pdiv<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
+{
+ // TODO optimize it for AltiVec
+ Packet2cf res;
+ res.cd[0] = pdiv<Packet1cd>(a.cd[0], b.cd[0]);
+ res.cd[1] = pdiv<Packet1cd>(a.cd[1], b.cd[1]);
+ return res;
+}
+
+EIGEN_STRONG_INLINE Packet2cf pcplxflip/*<Packet2cf>*/(const Packet2cf& x)
+{
+ Packet2cf res;
+ res.cd[0] = pcplxflip(x.cd[0]);
+ res.cd[1] = pcplxflip(x.cd[1]);
+ return res;
+}
+
+EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet2cf,2>& kernel)
+{
+ Packet1cd tmp = kernel.packet[0].cd[1];
+ kernel.packet[0].cd[1] = kernel.packet[1].cd[0];
+ kernel.packet[1].cd[0] = tmp;
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf pblend(const Selector<2>& ifPacket, const Packet2cf& thenPacket, const Packet2cf& elsePacket) {
+ Packet2cf result;
+ const Selector<4> ifPacket4 = { ifPacket.select[0], ifPacket.select[0], ifPacket.select[1], ifPacket.select[1] };
+ result.v = pblend<Packet4f>(ifPacket4, thenPacket.v, elsePacket.v);
+ return result;
+}
+#else
+template<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a) { return Packet2cf(pxor<Packet4f>(a.v, reinterpret_cast<Packet4f>(p4ui_CONJ_XOR))); }
+template<> EIGEN_STRONG_INLINE Packet2cf pmul<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
+{
+ Packet4f a_re, a_im, prod, prod_im;
+
+ // Permute and multiply the real parts of a and b
+ a_re = vec_perm(a.v, a.v, p16uc_PSET32_WODD);
+
+ // Get the imaginary parts of a
+ a_im = vec_perm(a.v, a.v, p16uc_PSET32_WEVEN);
+
+ // multiply a_im * b and get the conjugate result
+ prod_im = a_im * b.v;
+ prod_im = pxor<Packet4f>(prod_im, reinterpret_cast<Packet4f>(p4ui_CONJ_XOR));
+ // permute back to a proper order
+ prod_im = vec_perm(prod_im, prod_im, p16uc_COMPLEX32_REV);
+
+ // multiply a_re * b, add prod_im
+ prod = pmadd<Packet4f>(a_re, b.v, prod_im);
+
+ return Packet2cf(prod);
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a)
+{
+ Packet4f rev_a;
+ rev_a = vec_perm(a.v, a.v, p16uc_COMPLEX32_REV2);
+ return Packet2cf(rev_a);
+}
+
+template<> EIGEN_STRONG_INLINE std::complex<float> predux<Packet2cf>(const Packet2cf& a)
+{
+ Packet4f b;
+ b = vec_sld(a.v, a.v, 8);
+ b = padd<Packet4f>(a.v, b);
+ return pfirst<Packet2cf>(Packet2cf(b));
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf preduxp<Packet2cf>(const Packet2cf* vecs)
+{
+ Packet4f b1, b2;
+ b1 = vec_sld(vecs[0].v, vecs[1].v, 8);
+ b2 = vec_sld(vecs[1].v, vecs[0].v, 8);
+ b2 = vec_sld(b2, b2, 8);
+ b2 = padd<Packet4f>(b1, b2);
+
+ return Packet2cf(b2);
+}
+
+template<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet2cf>(const Packet2cf& a)
+{
+ Packet4f b;
+ Packet2cf prod;
+ b = vec_sld(a.v, a.v, 8);
+ prod = pmul<Packet2cf>(a, Packet2cf(b));
+
+ return pfirst<Packet2cf>(prod);
+}
+
+template<int Offset>
+struct palign_impl<Offset,Packet2cf>
+{
+ static EIGEN_STRONG_INLINE void run(Packet2cf& first, const Packet2cf& second)
+ {
+ if (Offset==1)
+ {
+ first.v = vec_sld(first.v, second.v, 8);
+ }
+ }
+};
+
template<> struct conj_helper<Packet2cf, Packet2cf, false,true>
{
EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
@@ -336,56 +527,34 @@ template<> struct conj_helper<Packet2cf, Packet2cf, true,true>
}
};
-template<> EIGEN_STRONG_INLINE Packet1cd pdiv<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
-{
- // TODO optimize it for AltiVec
- Packet1cd res = conj_helper<Packet1cd,Packet1cd,false,true>().pmul(a,b);
- Packet2d s = vec_madd(b.v, b.v, p2d_ZERO_);
- return Packet1cd(pdiv(res.v, s + vec_perm(s, s, p16uc_REVERSE64)));
-}
+EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cf,Packet4f)
template<> EIGEN_STRONG_INLINE Packet2cf pdiv<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
{
// TODO optimize it for AltiVec
- Packet2cf res;
- res.cd[0] = pdiv<Packet1cd>(a.cd[0], b.cd[0]);
- res.cd[1] = pdiv<Packet1cd>(a.cd[1], b.cd[1]);
- return res;
+ Packet2cf res = conj_helper<Packet2cf,Packet2cf,false,true>().pmul(a, b);
+ Packet4f s = pmul<Packet4f>(b.v, b.v);
+ return Packet2cf(pdiv(res.v, padd<Packet4f>(s, vec_perm(s, s, p16uc_COMPLEX32_REV))));
}
-EIGEN_STRONG_INLINE Packet1cd pcplxflip/*<Packet1cd>*/(const Packet1cd& x)
+template<> EIGEN_STRONG_INLINE Packet2cf pcplxflip<Packet2cf>(const Packet2cf& x)
{
- return Packet1cd(preverse(Packet2d(x.v)));
+ return Packet2cf(vec_perm(x.v, x.v, p16uc_COMPLEX32_REV));
}
-EIGEN_STRONG_INLINE Packet2cf pcplxflip/*<Packet2cf>*/(const Packet2cf& x)
-{
- Packet2cf res;
- res.cd[0] = pcplxflip(x.cd[0]);
- res.cd[1] = pcplxflip(x.cd[1]);
- return res;
-}
-
-EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet1cd,2>& kernel)
+EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet2cf,2>& kernel)
{
- Packet2d tmp = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_HI);
+ Packet4f tmp = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_HI);
kernel.packet[1].v = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_LO);
kernel.packet[0].v = tmp;
}
-EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet2cf,2>& kernel)
-{
- Packet1cd tmp = kernel.packet[0].cd[1];
- kernel.packet[0].cd[1] = kernel.packet[1].cd[0];
- kernel.packet[1].cd[0] = tmp;
-}
-
template<> EIGEN_STRONG_INLINE Packet2cf pblend(const Selector<2>& ifPacket, const Packet2cf& thenPacket, const Packet2cf& elsePacket) {
Packet2cf result;
- const Selector<4> ifPacket4 = { ifPacket.select[0], ifPacket.select[0], ifPacket.select[1], ifPacket.select[1] };
- result.v = pblend<Packet4f>(ifPacket4, thenPacket.v, elsePacket.v);
+ result.v = reinterpret_cast<Packet4f>(pblend<Packet2d>(ifPacket, reinterpret_cast<Packet2d>(thenPacket.v), reinterpret_cast<Packet2d>(elsePacket.v)));
return result;
}
+#endif
} // end namespace internal
diff --git a/Eigen/src/Core/arch/ZVector/MathFunctions.h b/Eigen/src/Core/arch/ZVector/MathFunctions.h
index 5c7aa7256..ff33a975f 100644
--- a/Eigen/src/Core/arch/ZVector/MathFunctions.h
+++ b/Eigen/src/Core/arch/ZVector/MathFunctions.h
@@ -20,6 +20,50 @@ namespace Eigen {
namespace internal {
+#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12)
+static _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
+static _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
+static _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f);
+static _EIGEN_DECLARE_CONST_Packet4i(23, 23);
+
+static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(inv_mant_mask, ~0x7f800000);
+
+/* the smallest non denormalized float number */
+static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(min_norm_pos, 0x00800000);
+static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(minus_inf, 0xff800000); // -1.f/0.f
+static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(minus_nan, 0xffffffff);
+
+/* natural logarithm computed for 4 simultaneous float
+ return NaN for x <= 0
+*/
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_SQRTHF, 0.707106781186547524f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p0, 7.0376836292E-2f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p1, - 1.1514610310E-1f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p2, 1.1676998740E-1f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p3, - 1.2420140846E-1f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p4, + 1.4249322787E-1f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p5, - 1.6668057665E-1f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p6, + 2.0000714765E-1f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p7, - 2.4999993993E-1f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p8, + 3.3333331174E-1f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q1, -2.12194440e-4f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q2, 0.693359375f);
+
+static _EIGEN_DECLARE_CONST_Packet4f(exp_hi, 88.3762626647950f);
+static _EIGEN_DECLARE_CONST_Packet4f(exp_lo, -88.3762626647949f);
+
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_LOG2EF, 1.44269504088896341f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C1, 0.693359375f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C2, -2.12194440e-4f);
+
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p0, 1.9875691500E-4f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p1, 1.3981999507E-3f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p2, 8.3334519073E-3f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p3, 4.1665795894E-2f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459E-1f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201E-1f);
+#endif
+
static _EIGEN_DECLARE_CONST_Packet2d(1 , 1.0);
static _EIGEN_DECLARE_CONST_Packet2d(2 , 2.0);
static _EIGEN_DECLARE_CONST_Packet2d(half, 0.5);
@@ -93,40 +137,91 @@ Packet2d pexp<Packet2d>(const Packet2d& _x)
}
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
-Packet4f pexp<Packet4f>(const Packet4f& x)
+Packet4f pexp<Packet4f>(const Packet4f& _x)
{
+#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12)
+/*
+ Packet4f x = _x;
+
+ Packet4f tmp, fx;
+ Packet4i emm0;
+
+ // clamp x
+ x = pmax(pmin(x, p4f_exp_hi), p4f_exp_lo);
+
+ // express exp(x) as exp(g + n*log(2))
+ fx = pmadd(x, p4f_cephes_LOG2EF, p4f_half);
+
+ fx = pfloor(fx);
+
+ tmp = pmul(fx, p4f_cephes_exp_C1);
+ Packet4f z = pmul(fx, p4f_cephes_exp_C2);
+ x = psub(x, tmp);
+ x = psub(x, z);
+
+ z = pmul(x,x);
+
+ Packet4f y = p4f_cephes_exp_p0;
+ y = pmadd(y, x, p4f_cephes_exp_p1);
+ y = pmadd(y, x, p4f_cephes_exp_p2);
+ y = pmadd(y, x, p4f_cephes_exp_p3);
+ y = pmadd(y, x, p4f_cephes_exp_p4);
+ y = pmadd(y, x, p4f_cephes_exp_p5);
+ y = pmadd(y, z, x);
+ y = padd(y, p4f_1);
+
+ // build 2^n
+ emm0 = vec_cts(fx, 0);
+ emm0 = emm0 + p4i_0x7f;
+ emm0 = emm0 << reinterpret_cast<Packet4i>(p4i_23);
+
+ // Altivec's max & min operators just drop silent NaNs. Check NaNs in
+ // inputs and return them unmodified.
+ Packet4ui isnumber_mask = reinterpret_cast<Packet4ui>(vec_cmpeq(_x, _x));
+ return vec_sel(_x, pmax(pmul(y, reinterpret_cast<Packet4f>(emm0)), _x),
+ isnumber_mask);*/
+ return _x;
+#else
Packet4f res;
- res.v4f[0] = pexp<Packet2d>(x.v4f[0]);
- res.v4f[1] = pexp<Packet2d>(x.v4f[1]);
+ res.v4f[0] = pexp<Packet2d>(_x.v4f[0]);
+ res.v4f[1] = pexp<Packet2d>(_x.v4f[1]);
return res;
+#endif
}
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet2d psqrt<Packet2d>(const Packet2d& x)
{
- return __builtin_s390_vfsqdb(x);
+ return vec_sqrt(x);
}
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet4f psqrt<Packet4f>(const Packet4f& x)
{
Packet4f res;
+#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12)
+ res = vec_sqrt(x);
+#else
res.v4f[0] = psqrt<Packet2d>(x.v4f[0]);
res.v4f[1] = psqrt<Packet2d>(x.v4f[1]);
+#endif
return res;
}
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet2d prsqrt<Packet2d>(const Packet2d& x) {
- // Unfortunately we can't use the much faster mm_rqsrt_pd since it only provides an approximation.
return pset1<Packet2d>(1.0) / psqrt<Packet2d>(x);
}
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet4f prsqrt<Packet4f>(const Packet4f& x) {
Packet4f res;
+#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12)
+ res = pset1<Packet4f>(1.0) / psqrt<Packet4f>(x);
+#else
res.v4f[0] = prsqrt<Packet2d>(x.v4f[0]);
res.v4f[1] = prsqrt<Packet2d>(x.v4f[1]);
+#endif
return res;
}
diff --git a/Eigen/src/Core/arch/ZVector/PacketMath.h b/Eigen/src/Core/arch/ZVector/PacketMath.h
index 57b01fc63..0b37f4992 100755
--- a/Eigen/src/Core/arch/ZVector/PacketMath.h
+++ b/Eigen/src/Core/arch/ZVector/PacketMath.h
@@ -17,7 +17,7 @@ namespace Eigen {
namespace internal {
#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
-#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 4
+#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 16
#endif
#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
@@ -29,7 +29,7 @@ namespace internal {
#endif
#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
-#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 16
+#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 32
#endif
typedef __vector int Packet4i;
@@ -41,9 +41,14 @@ typedef __vector double Packet2d;
typedef __vector unsigned long long Packet2ul;
typedef __vector long long Packet2l;
+// Z14 has builtin support for float vectors
+#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12)
+typedef __vector float Packet4f;
+#else
typedef struct {
Packet2d v4f[2];
} Packet4f;
+#endif
typedef union {
int32_t i[4];
@@ -51,11 +56,15 @@ typedef union {
int64_t l[2];
uint64_t ul[2];
double d[2];
+ float f[4];
Packet4i v4i;
Packet4ui v4ui;
Packet2l v2l;
Packet2ul v2ul;
Packet2d v2d;
+#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12)
+ Packet4f v4f;
+#endif
} Packet;
// We don't want to write the same code all the time, but we need to reuse the constants
@@ -80,7 +89,7 @@ typedef union {
Packet2l p2l_##NAME = pset1<Packet2l>(X)
// These constants are endian-agnostic
-//static _EIGEN_DECLARE_CONST_FAST_Packet4i(ZERO, 0); //{ 0, 0, 0, 0,}
+static _EIGEN_DECLARE_CONST_FAST_Packet4i(ZERO, 0); //{ 0, 0, 0, 0,}
static _EIGEN_DECLARE_CONST_FAST_Packet4i(ONE, 1); //{ 1, 1, 1, 1}
static _EIGEN_DECLARE_CONST_FAST_Packet2d(ZERO, 0);
@@ -90,6 +99,21 @@ static _EIGEN_DECLARE_CONST_FAST_Packet2l(ONE, 1);
static Packet2d p2d_ONE = { 1.0, 1.0 };
static Packet2d p2d_ZERO_ = { -0.0, -0.0 };
+#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12)
+#define _EIGEN_DECLARE_CONST_FAST_Packet4f(NAME,X) \
+ Packet4f p4f_##NAME = reinterpret_cast<Packet4f>(vec_splat_s32(X))
+
+#define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \
+ Packet4f p4f_##NAME = pset1<Packet4f>(X)
+
+#define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \
+ const Packet4f p4f_##NAME = reinterpret_cast<Packet4f>(pset1<Packet4i>(X))
+
+static _EIGEN_DECLARE_CONST_FAST_Packet4f(ZERO, 0); //{ 0.0, 0.0, 0.0, 0.0}
+static _EIGEN_DECLARE_CONST_FAST_Packet4i(MINUS1,-1); //{ -1, -1, -1, -1}
+static Packet4f p4f_MZERO = { 0x80000000, 0x80000000, 0x80000000, 0x80000000};
+#endif
+
static Packet4i p4i_COUNTDOWN = { 0, 1, 2, 3 };
static Packet4f p4f_COUNTDOWN = { 0.0, 1.0, 2.0, 3.0 };
static Packet2d p2d_COUNTDOWN = reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet16uc>(p2d_ZERO), reinterpret_cast<Packet16uc>(p2d_ONE), 8));
@@ -120,9 +144,9 @@ static Packet16uc p16uc_TRANSPOSE64_LO = vec_add(p16uc_PSET64_LO, p16uc_HALF64_0
static Packet16uc p16uc_TRANSPOSE64_HI = { 0,1,2,3, 4,5,6,7, 16,17,18,19, 20,21,22,23};
static Packet16uc p16uc_TRANSPOSE64_LO = { 8,9,10,11, 12,13,14,15, 24,25,26,27, 28,29,30,31};
-//static Packet16uc p16uc_COMPLEX32_REV = vec_sld(p16uc_REVERSE32, p16uc_REVERSE32, 8); //{ 4,5,6,7, 0,1,2,3, 12,13,14,15, 8,9,10,11 };
+static Packet16uc p16uc_COMPLEX32_REV = vec_sld(p16uc_REVERSE32, p16uc_REVERSE32, 8); //{ 4,5,6,7, 0,1,2,3, 12,13,14,15, 8,9,10,11 };
-//static Packet16uc p16uc_COMPLEX32_REV2 = vec_sld(p16uc_FORWARD, p16uc_FORWARD, 8); //{ 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 };
+static Packet16uc p16uc_COMPLEX32_REV2 = vec_sld(p16uc_FORWARD, p16uc_FORWARD, 8); //{ 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 };
#if EIGEN_HAS_BUILTIN(__builtin_prefetch) || EIGEN_COMP_GNUC
@@ -169,7 +193,11 @@ template<> struct packet_traits<float> : default_packet_traits
HasSin = 0,
HasCos = 0,
HasLog = 0,
+#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12)
+ HasExp = 0,
+#else
HasExp = 1,
+#endif
HasSqrt = 1,
HasRsqrt = 1,
HasRound = 1,
@@ -258,31 +286,16 @@ inline std::ostream & operator <<(std::ostream & s, const Packet2d & v)
return s;
}
-/* Helper function to simulate a vec_splat_packet4f
- */
-template<int element> EIGEN_STRONG_INLINE Packet4f vec_splat_packet4f(const Packet4f& from)
+#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12)
+inline std::ostream & operator <<(std::ostream & s, const Packet4f & v)
{
- Packet4f splat;
- switch (element) {
- case 0:
- splat.v4f[0] = vec_splat(from.v4f[0], 0);
- splat.v4f[1] = splat.v4f[0];
- break;
- case 1:
- splat.v4f[0] = vec_splat(from.v4f[0], 1);
- splat.v4f[1] = splat.v4f[0];
- break;
- case 2:
- splat.v4f[0] = vec_splat(from.v4f[1], 0);
- splat.v4f[1] = splat.v4f[0];
- break;
- case 3:
- splat.v4f[0] = vec_splat(from.v4f[1], 1);
- splat.v4f[1] = splat.v4f[0];
- break;
- }
- return splat;
+ Packet vt;
+ vt.v4f = v;
+ s << vt.f[0] << ", " << vt.f[1] << ", " << vt.f[2] << ", " << vt.f[3];
+ return s;
}
+#endif
+
template<int Offset>
struct palign_impl<Offset,Packet4i>
@@ -300,31 +313,6 @@ struct palign_impl<Offset,Packet4i>
}
};
-/* This is a tricky one, we have to translate float alignment to vector elements of sizeof double
- */
-template<int Offset>
-struct palign_impl<Offset,Packet4f>
-{
- static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)
- {
- switch (Offset % 4) {
- case 1:
- first.v4f[0] = vec_sld(first.v4f[0], first.v4f[1], 8);
- first.v4f[1] = vec_sld(first.v4f[1], second.v4f[0], 8);
- break;
- case 2:
- first.v4f[0] = first.v4f[1];
- first.v4f[1] = second.v4f[0];
- break;
- case 3:
- first.v4f[0] = vec_sld(first.v4f[1], second.v4f[0], 8);
- first.v4f[1] = vec_sld(second.v4f[0], second.v4f[1], 8);
- break;
- }
- }
-};
-
-
template<int Offset>
struct palign_impl<Offset,Packet2d>
{
@@ -344,16 +332,6 @@ template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int* from)
return vfrom->v4i;
}
-template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from)
-{
- // FIXME: No intrinsic yet
- EIGEN_DEBUG_ALIGNED_LOAD
- Packet4f vfrom;
- vfrom.v4f[0] = vec_ld2f(&from[0]);
- vfrom.v4f[1] = vec_ld2f(&from[2]);
- return vfrom;
-}
-
template<> EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double* from)
{
// FIXME: No intrinsic yet
@@ -372,15 +350,6 @@ template<> EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet4i& f
vto->v4i = from;
}
-template<> EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet4f& from)
-{
- // FIXME: No intrinsic yet
- EIGEN_DEBUG_ALIGNED_STORE
- vec_st2f(from.v4f[0], &to[0]);
- vec_st2f(from.v4f[1], &to[2]);
-}
-
-
template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet2d& from)
{
// FIXME: No intrinsic yet
@@ -397,13 +366,6 @@ template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from)
template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) {
return vec_splats(from);
}
-template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from)
-{
- Packet4f to;
- to.v4f[0] = pset1<Packet2d>(static_cast<const double&>(from));
- to.v4f[1] = to.v4f[0];
- return to;
-}
template<> EIGEN_STRONG_INLINE void
pbroadcast4<Packet4i>(const int *a,
@@ -417,17 +379,6 @@ pbroadcast4<Packet4i>(const int *a,
}
template<> EIGEN_STRONG_INLINE void
-pbroadcast4<Packet4f>(const float *a,
- Packet4f& a0, Packet4f& a1, Packet4f& a2, Packet4f& a3)
-{
- a3 = pload<Packet4f>(a);
- a0 = vec_splat_packet4f<0>(a3);
- a1 = vec_splat_packet4f<1>(a3);
- a2 = vec_splat_packet4f<2>(a3);
- a3 = vec_splat_packet4f<3>(a3);
-}
-
-template<> EIGEN_STRONG_INLINE void
pbroadcast4<Packet2d>(const double *a,
Packet2d& a0, Packet2d& a1, Packet2d& a2, Packet2d& a3)
{
@@ -449,16 +400,6 @@ template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* f
return pload<Packet4i>(ai);
}
-template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride)
-{
- float EIGEN_ALIGN16 ai[4];
- ai[0] = from[0*stride];
- ai[1] = from[1*stride];
- ai[2] = from[2*stride];
- ai[3] = from[3*stride];
- return pload<Packet4f>(ai);
-}
-
template<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, Index stride)
{
double EIGEN_ALIGN16 af[2];
@@ -477,16 +418,6 @@ template<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const
to[3*stride] = ai[3];
}
-template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, Index stride)
-{
- float EIGEN_ALIGN16 ai[4];
- pstore<float>((float *)ai, from);
- to[0*stride] = ai[0];
- to[1*stride] = ai[1];
- to[2*stride] = ai[2];
- to[3*stride] = ai[3];
-}
-
template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from, Index stride)
{
double EIGEN_ALIGN16 af[2];
@@ -496,160 +427,52 @@ template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to,
}
template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return (a + b); }
-template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b)
-{
- Packet4f c;
- c.v4f[0] = a.v4f[0] + b.v4f[0];
- c.v4f[1] = a.v4f[1] + b.v4f[1];
- return c;
-}
template<> EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(const Packet2d& a, const Packet2d& b) { return (a + b); }
template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return (a - b); }
-template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b)
-{
- Packet4f c;
- c.v4f[0] = a.v4f[0] - b.v4f[0];
- c.v4f[1] = a.v4f[1] - b.v4f[1];
- return c;
-}
template<> EIGEN_STRONG_INLINE Packet2d psub<Packet2d>(const Packet2d& a, const Packet2d& b) { return (a - b); }
template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b) { return (a * b); }
-template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b)
-{
- Packet4f c;
- c.v4f[0] = a.v4f[0] * b.v4f[0];
- c.v4f[1] = a.v4f[1] * b.v4f[1];
- return c;
-}
template<> EIGEN_STRONG_INLINE Packet2d pmul<Packet2d>(const Packet2d& a, const Packet2d& b) { return (a * b); }
template<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& a, const Packet4i& b) { return (a / b); }
-template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b)
-{
- Packet4f c;
- c.v4f[0] = a.v4f[0] / b.v4f[0];
- c.v4f[1] = a.v4f[1] / b.v4f[1];
- return c;
-}
template<> EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const Packet2d& b) { return (a / b); }
template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return (-a); }
-template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a)
-{
- Packet4f c;
- c.v4f[0] = -a.v4f[0];
- c.v4f[1] = -a.v4f[1];
- return c;
-}
template<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a) { return (-a); }
template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; }
-template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; }
template<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; }
template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd<Packet4i>(pmul<Packet4i>(a, b), c); }
-template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c)
-{
- Packet4f res;
- res.v4f[0] = vec_madd(a.v4f[0], b.v4f[0], c.v4f[0]);
- res.v4f[1] = vec_madd(a.v4f[1], b.v4f[1], c.v4f[1]);
- return res;
-}
template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return vec_madd(a, b, c); }
template<> EIGEN_STRONG_INLINE Packet4i plset<Packet4i>(const int& a) { return padd<Packet4i>(pset1<Packet4i>(a), p4i_COUNTDOWN); }
-template<> EIGEN_STRONG_INLINE Packet4f plset<Packet4f>(const float& a) { return padd<Packet4f>(pset1<Packet4f>(a), p4f_COUNTDOWN); }
template<> EIGEN_STRONG_INLINE Packet2d plset<Packet2d>(const double& a) { return padd<Packet2d>(pset1<Packet2d>(a), p2d_COUNTDOWN); }
template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_min(a, b); }
template<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_min(a, b); }
-template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b)
-{
- Packet4f res;
- res.v4f[0] = pmin(a.v4f[0], b.v4f[0]);
- res.v4f[1] = pmin(a.v4f[1], b.v4f[1]);
- return res;
-}
template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_max(a, b); }
template<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_max(a, b); }
-template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b)
-{
- Packet4f res;
- res.v4f[0] = pmax(a.v4f[0], b.v4f[0]);
- res.v4f[1] = pmax(a.v4f[1], b.v4f[1]);
- return res;
-}
template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_and(a, b); }
template<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_and(a, b); }
-template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b)
-{
- Packet4f res;
- res.v4f[0] = pand(a.v4f[0], b.v4f[0]);
- res.v4f[1] = pand(a.v4f[1], b.v4f[1]);
- return res;
-}
template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_or(a, b); }
template<> EIGEN_STRONG_INLINE Packet2d por<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_or(a, b); }
-template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b)
-{
- Packet4f res;
- res.v4f[0] = pand(a.v4f[0], b.v4f[0]);
- res.v4f[1] = pand(a.v4f[1], b.v4f[1]);
- return res;
-}
template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_xor(a, b); }
template<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_xor(a, b); }
-template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b)
-{
- Packet4f res;
- res.v4f[0] = pand(a.v4f[0], b.v4f[0]);
- res.v4f[1] = pand(a.v4f[1], b.v4f[1]);
- return res;
-}
template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return pand<Packet4i>(a, vec_nor(b, b)); }
template<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_and(a, vec_nor(b, b)); }
-template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b)
-{
- Packet4f res;
- res.v4f[0] = pandnot(a.v4f[0], b.v4f[0]);
- res.v4f[1] = pandnot(a.v4f[1], b.v4f[1]);
- return res;
-}
-template<> EIGEN_STRONG_INLINE Packet4f pround<Packet4f>(const Packet4f& a)
-{
- Packet4f res;
- res.v4f[0] = vec_round(a.v4f[0]);
- res.v4f[1] = vec_round(a.v4f[1]);
- return res;
-}
template<> EIGEN_STRONG_INLINE Packet2d pround<Packet2d>(const Packet2d& a) { return vec_round(a); }
-template<> EIGEN_STRONG_INLINE Packet4f pceil<Packet4f>(const Packet4f& a)
-{
- Packet4f res;
- res.v4f[0] = vec_ceil(a.v4f[0]);
- res.v4f[1] = vec_ceil(a.v4f[1]);
- return res;
-}
template<> EIGEN_STRONG_INLINE Packet2d pceil<Packet2d>(const Packet2d& a) { return vec_ceil(a); }
-template<> EIGEN_STRONG_INLINE Packet4f pfloor<Packet4f>(const Packet4f& a)
-{
- Packet4f res;
- res.v4f[0] = vec_floor(a.v4f[0]);
- res.v4f[1] = vec_floor(a.v4f[1]);
- return res;
-}
template<> EIGEN_STRONG_INLINE Packet2d pfloor<Packet2d>(const Packet2d& a) { return vec_floor(a); }
template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from) { return pload<Packet4i>(from); }
-template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from) { return pload<Packet4f>(from); }
template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from) { return pload<Packet2d>(from); }
@@ -659,14 +482,6 @@ template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int* from)
return vec_perm(p, p, p16uc_DUPLICATE32_HI);
}
-template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from)
-{
- Packet4f p = pload<Packet4f>(from);
- p.v4f[1] = vec_splat(p.v4f[0], 1);
- p.v4f[0] = vec_splat(p.v4f[0], 0);
- return p;
-}
-
template<> EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double* from)
{
Packet2d p = pload<Packet2d>(from);
@@ -674,15 +489,12 @@ template<> EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double* from)
}
template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from) { pstore<int>(to, from); }
-template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from) { pstore<float>(to, from); }
template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from) { pstore<double>(to, from); }
template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { EIGEN_ZVECTOR_PREFETCH(addr); }
-template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { EIGEN_ZVECTOR_PREFETCH(addr); }
template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { EIGEN_ZVECTOR_PREFETCH(addr); }
template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { int EIGEN_ALIGN16 x[4]; pstore(x, a); return x[0]; }
-template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { float EIGEN_ALIGN16 x[2]; vec_st2f(a.v4f[0], &x[0]); return x[0]; }
template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { double EIGEN_ALIGN16 x[2]; pstore(x, a); return x[0]; }
template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a)
@@ -695,23 +507,8 @@ template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a)
return reinterpret_cast<Packet2d>(vec_perm(reinterpret_cast<Packet16uc>(a), reinterpret_cast<Packet16uc>(a), p16uc_REVERSE64));
}
-template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a)
-{
- Packet4f rev;
- rev.v4f[0] = preverse<Packet2d>(a.v4f[1]);
- rev.v4f[1] = preverse<Packet2d>(a.v4f[0]);
- return rev;
-}
-
template<> EIGEN_STRONG_INLINE Packet4i pabs<Packet4i>(const Packet4i& a) { return vec_abs(a); }
template<> EIGEN_STRONG_INLINE Packet2d pabs<Packet2d>(const Packet2d& a) { return vec_abs(a); }
-template<> EIGEN_STRONG_INLINE Packet4f pabs<Packet4f>(const Packet4f& a)
-{
- Packet4f res;
- res.v4f[0] = pabs(a.v4f[0]);
- res.v4f[1] = pabs(a.v4f[1]);
- return res;
-}
template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
{
@@ -730,13 +527,6 @@ template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a)
sum = padd<Packet2d>(a, b);
return pfirst(sum);
}
-template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
-{
- Packet2d sum;
- sum = padd<Packet2d>(a.v4f[0], a.v4f[1]);
- double first = predux<Packet2d>(sum);
- return static_cast<float>(first);
-}
template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)
{
@@ -777,21 +567,6 @@ template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)
return sum;
}
-template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
-{
- PacketBlock<Packet4f,4> transpose;
- transpose.packet[0] = vecs[0];
- transpose.packet[1] = vecs[1];
- transpose.packet[2] = vecs[2];
- transpose.packet[3] = vecs[3];
- ptranspose(transpose);
-
- Packet4f sum = padd(transpose.packet[0], transpose.packet[1]);
- sum = padd(sum, transpose.packet[2]);
- sum = padd(sum, transpose.packet[3]);
- return sum;
-}
-
// Other reduction functions:
// mul
template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)
@@ -806,12 +581,6 @@ template<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a)
return pfirst(pmul(a, reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4i>(a), reinterpret_cast<Packet4i>(a), 8))));
}
-template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
-{
- // Return predux_mul<Packet2d> of the subvectors product
- return static_cast<float>(pfirst(predux_mul(pmul(a.v4f[0], a.v4f[1]))));
-}
-
// min
template<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a)
{
@@ -826,14 +595,6 @@ template<> EIGEN_STRONG_INLINE double predux_min<Packet2d>(const Packet2d& a)
return pfirst(pmin<Packet2d>(a, reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4i>(a), reinterpret_cast<Packet4i>(a), 8))));
}
-template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
-{
- Packet2d b, res;
- b = pmin<Packet2d>(a.v4f[0], a.v4f[1]);
- res = pmin<Packet2d>(b, reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4i>(b), reinterpret_cast<Packet4i>(b), 8)));
- return static_cast<float>(pfirst(res));
-}
-
// max
template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
{
@@ -849,14 +610,6 @@ template<> EIGEN_STRONG_INLINE double predux_max<Packet2d>(const Packet2d& a)
return pfirst(pmax<Packet2d>(a, reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4i>(a), reinterpret_cast<Packet4i>(a), 8))));
}
-template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
-{
- Packet2d b, res;
- b = pmax<Packet2d>(a.v4f[0], a.v4f[1]);
- res = pmax<Packet2d>(b, reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4i>(b), reinterpret_cast<Packet4i>(b), 8)));
- return static_cast<float>(pfirst(res));
-}
-
EIGEN_DEVICE_FUNC inline void
ptranspose(PacketBlock<Packet4i,4>& kernel) {
Packet4i t0 = vec_mergeh(kernel.packet[0], kernel.packet[2]);
@@ -877,6 +630,321 @@ ptranspose(PacketBlock<Packet2d,2>& kernel) {
kernel.packet[1] = t1;
}
+template<> EIGEN_STRONG_INLINE Packet4i pblend(const Selector<4>& ifPacket, const Packet4i& thenPacket, const Packet4i& elsePacket) {
+ Packet4ui select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2], ifPacket.select[3] };
+ Packet4ui mask = vec_cmpeq(select, reinterpret_cast<Packet4ui>(p4i_ONE));
+ return vec_sel(elsePacket, thenPacket, mask);
+}
+
+
+template<> EIGEN_STRONG_INLINE Packet2d pblend(const Selector<2>& ifPacket, const Packet2d& thenPacket, const Packet2d& elsePacket) {
+ Packet2ul select = { ifPacket.select[0], ifPacket.select[1] };
+ Packet2ul mask = vec_cmpeq(select, reinterpret_cast<Packet2ul>(p2l_ONE));
+ return vec_sel(elsePacket, thenPacket, mask);
+}
+
+/* z13 has no vector float support so we emulate that with double
+ z14 has proper vector float support.
+*/
+#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ < 12)
+/* Helper function to simulate a vec_splat_packet4f
+ */
+template<int element> EIGEN_STRONG_INLINE Packet4f vec_splat_packet4f(const Packet4f& from)
+{
+ Packet4f splat;
+ switch (element) {
+ case 0:
+ splat.v4f[0] = vec_splat(from.v4f[0], 0);
+ splat.v4f[1] = splat.v4f[0];
+ break;
+ case 1:
+ splat.v4f[0] = vec_splat(from.v4f[0], 1);
+ splat.v4f[1] = splat.v4f[0];
+ break;
+ case 2:
+ splat.v4f[0] = vec_splat(from.v4f[1], 0);
+ splat.v4f[1] = splat.v4f[0];
+ break;
+ case 3:
+ splat.v4f[0] = vec_splat(from.v4f[1], 1);
+ splat.v4f[1] = splat.v4f[0];
+ break;
+ }
+ return splat;
+}
+
+/* This is a tricky one, we have to translate float alignment to vector elements of sizeof double
+ */
+template<int Offset>
+struct palign_impl<Offset,Packet4f>
+{
+ static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)
+ {
+ switch (Offset % 4) {
+ case 1:
+ first.v4f[0] = vec_sld(first.v4f[0], first.v4f[1], 8);
+ first.v4f[1] = vec_sld(first.v4f[1], second.v4f[0], 8);
+ break;
+ case 2:
+ first.v4f[0] = first.v4f[1];
+ first.v4f[1] = second.v4f[0];
+ break;
+ case 3:
+ first.v4f[0] = vec_sld(first.v4f[1], second.v4f[0], 8);
+ first.v4f[1] = vec_sld(second.v4f[0], second.v4f[1], 8);
+ break;
+ }
+ }
+};
+
+template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from)
+{
+ // FIXME: No intrinsic yet
+ EIGEN_DEBUG_ALIGNED_LOAD
+ Packet4f vfrom;
+ vfrom.v4f[0] = vec_ld2f(&from[0]);
+ vfrom.v4f[1] = vec_ld2f(&from[2]);
+ return vfrom;
+}
+
+template<> EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet4f& from)
+{
+ // FIXME: No intrinsic yet
+ EIGEN_DEBUG_ALIGNED_STORE
+ vec_st2f(from.v4f[0], &to[0]);
+ vec_st2f(from.v4f[1], &to[2]);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from)
+{
+ Packet4f to;
+ to.v4f[0] = pset1<Packet2d>(static_cast<const double&>(from));
+ to.v4f[1] = to.v4f[0];
+ return to;
+}
+
+template<> EIGEN_STRONG_INLINE void
+pbroadcast4<Packet4f>(const float *a,
+ Packet4f& a0, Packet4f& a1, Packet4f& a2, Packet4f& a3)
+{
+ a3 = pload<Packet4f>(a);
+ a0 = vec_splat_packet4f<0>(a3);
+ a1 = vec_splat_packet4f<1>(a3);
+ a2 = vec_splat_packet4f<2>(a3);
+ a3 = vec_splat_packet4f<3>(a3);
+}
+
+template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride)
+{
+ float EIGEN_ALIGN16 ai[4];
+ ai[0] = from[0*stride];
+ ai[1] = from[1*stride];
+ ai[2] = from[2*stride];
+ ai[3] = from[3*stride];
+ return pload<Packet4f>(ai);
+}
+
+template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, Index stride)
+{
+ float EIGEN_ALIGN16 ai[4];
+ pstore<float>((float *)ai, from);
+ to[0*stride] = ai[0];
+ to[1*stride] = ai[1];
+ to[2*stride] = ai[2];
+ to[3*stride] = ai[3];
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b)
+{
+ Packet4f c;
+ c.v4f[0] = a.v4f[0] + b.v4f[0];
+ c.v4f[1] = a.v4f[1] + b.v4f[1];
+ return c;
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b)
+{
+ Packet4f c;
+ c.v4f[0] = a.v4f[0] - b.v4f[0];
+ c.v4f[1] = a.v4f[1] - b.v4f[1];
+ return c;
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b)
+{
+ Packet4f c;
+ c.v4f[0] = a.v4f[0] * b.v4f[0];
+ c.v4f[1] = a.v4f[1] * b.v4f[1];
+ return c;
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b)
+{
+ Packet4f c;
+ c.v4f[0] = a.v4f[0] / b.v4f[0];
+ c.v4f[1] = a.v4f[1] / b.v4f[1];
+ return c;
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a)
+{
+ Packet4f c;
+ c.v4f[0] = -a.v4f[0];
+ c.v4f[1] = -a.v4f[1];
+ return c;
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c)
+{
+ Packet4f res;
+ res.v4f[0] = vec_madd(a.v4f[0], b.v4f[0], c.v4f[0]);
+ res.v4f[1] = vec_madd(a.v4f[1], b.v4f[1], c.v4f[1]);
+ return res;
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b)
+{
+ Packet4f res;
+ res.v4f[0] = pmin(a.v4f[0], b.v4f[0]);
+ res.v4f[1] = pmin(a.v4f[1], b.v4f[1]);
+ return res;
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b)
+{
+ Packet4f res;
+ res.v4f[0] = pmax(a.v4f[0], b.v4f[0]);
+ res.v4f[1] = pmax(a.v4f[1], b.v4f[1]);
+ return res;
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b)
+{
+ Packet4f res;
+ res.v4f[0] = pand(a.v4f[0], b.v4f[0]);
+ res.v4f[1] = pand(a.v4f[1], b.v4f[1]);
+ return res;
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b)
+{
+ Packet4f res;
+ res.v4f[0] = pand(a.v4f[0], b.v4f[0]);
+ res.v4f[1] = pand(a.v4f[1], b.v4f[1]);
+ return res;
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b)
+{
+ Packet4f res;
+ res.v4f[0] = pand(a.v4f[0], b.v4f[0]);
+ res.v4f[1] = pand(a.v4f[1], b.v4f[1]);
+ return res;
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b)
+{
+ Packet4f res;
+ res.v4f[0] = pandnot(a.v4f[0], b.v4f[0]);
+ res.v4f[1] = pandnot(a.v4f[1], b.v4f[1]);
+ return res;
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pround<Packet4f>(const Packet4f& a)
+{
+ Packet4f res;
+ res.v4f[0] = vec_round(a.v4f[0]);
+ res.v4f[1] = vec_round(a.v4f[1]);
+ return res;
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pceil<Packet4f>(const Packet4f& a)
+{
+ Packet4f res;
+ res.v4f[0] = vec_ceil(a.v4f[0]);
+ res.v4f[1] = vec_ceil(a.v4f[1]);
+ return res;
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pfloor<Packet4f>(const Packet4f& a)
+{
+ Packet4f res;
+ res.v4f[0] = vec_floor(a.v4f[0]);
+ res.v4f[1] = vec_floor(a.v4f[1]);
+ return res;
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from)
+{
+ Packet4f p = pload<Packet4f>(from);
+ p.v4f[1] = vec_splat(p.v4f[0], 1);
+ p.v4f[0] = vec_splat(p.v4f[0], 0);
+ return p;
+}
+
+template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { float EIGEN_ALIGN16 x[2]; vec_st2f(a.v4f[0], &x[0]); return x[0]; }
+
+template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a)
+{
+ Packet4f rev;
+ rev.v4f[0] = preverse<Packet2d>(a.v4f[1]);
+ rev.v4f[1] = preverse<Packet2d>(a.v4f[0]);
+ return rev;
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pabs<Packet4f>(const Packet4f& a)
+{
+ Packet4f res;
+ res.v4f[0] = pabs(a.v4f[0]);
+ res.v4f[1] = pabs(a.v4f[1]);
+ return res;
+}
+
+template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
+{
+ Packet2d sum;
+ sum = padd<Packet2d>(a.v4f[0], a.v4f[1]);
+ double first = predux<Packet2d>(sum);
+ return static_cast<float>(first);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
+{
+ PacketBlock<Packet4f,4> transpose;
+ transpose.packet[0] = vecs[0];
+ transpose.packet[1] = vecs[1];
+ transpose.packet[2] = vecs[2];
+ transpose.packet[3] = vecs[3];
+ ptranspose(transpose);
+
+ Packet4f sum = padd(transpose.packet[0], transpose.packet[1]);
+ sum = padd(sum, transpose.packet[2]);
+ sum = padd(sum, transpose.packet[3]);
+ return sum;
+}
+
+template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
+{
+ // Return predux_mul<Packet2d> of the subvectors product
+ return static_cast<float>(pfirst(predux_mul(pmul(a.v4f[0], a.v4f[1]))));
+}
+
+template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
+{
+ Packet2d b, res;
+ b = pmin<Packet2d>(a.v4f[0], a.v4f[1]);
+ res = pmin<Packet2d>(b, reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4i>(b), reinterpret_cast<Packet4i>(b), 8)));
+ return static_cast<float>(pfirst(res));
+}
+
+template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
+{
+ Packet2d b, res;
+ b = pmax<Packet2d>(a.v4f[0], a.v4f[1]);
+ res = pmax<Packet2d>(b, reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4i>(b), reinterpret_cast<Packet4i>(b), 8)));
+ return static_cast<float>(pfirst(res));
+}
+
/* Split the Packet4f PacketBlock into 4 Packet2d PacketBlocks and transpose each one
*/
EIGEN_DEVICE_FUNC inline void
@@ -915,12 +983,6 @@ ptranspose(PacketBlock<Packet4f,4>& kernel) {
kernel.packet[3].v4f[1] = t3.packet[1];
}
-template<> EIGEN_STRONG_INLINE Packet4i pblend(const Selector<4>& ifPacket, const Packet4i& thenPacket, const Packet4i& elsePacket) {
- Packet4ui select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2], ifPacket.select[3] };
- Packet4ui mask = vec_cmpeq(select, reinterpret_cast<Packet4ui>(p4i_ONE));
- return vec_sel(elsePacket, thenPacket, mask);
-}
-
template<> EIGEN_STRONG_INLINE Packet4f pblend(const Selector<4>& ifPacket, const Packet4f& thenPacket, const Packet4f& elsePacket) {
Packet2ul select_hi = { ifPacket.select[0], ifPacket.select[1] };
Packet2ul select_lo = { ifPacket.select[2], ifPacket.select[3] };
@@ -931,13 +993,197 @@ template<> EIGEN_STRONG_INLINE Packet4f pblend(const Selector<4>& ifPacket, cons
result.v4f[1] = vec_sel(elsePacket.v4f[1], thenPacket.v4f[1], mask_lo);
return result;
}
+#else
+template<int Offset>
+struct palign_impl<Offset,Packet4f>
+{
+ static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)
+ {
+ switch (Offset % 4) {
+ case 1:
+ first = vec_sld(first, second, 4); break;
+ case 2:
+ first = vec_sld(first, second, 8); break;
+ case 3:
+ first = vec_sld(first, second, 12); break;
+ }
+ }
+};
+
+template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from)
+{
+ // FIXME: No intrinsic yet
+ EIGEN_DEBUG_ALIGNED_LOAD
+ Packet *vfrom;
+ vfrom = (Packet *) from;
+ return vfrom->v4f;
+}
-template<> EIGEN_STRONG_INLINE Packet2d pblend(const Selector<2>& ifPacket, const Packet2d& thenPacket, const Packet2d& elsePacket) {
- Packet2ul select = { ifPacket.select[0], ifPacket.select[1] };
- Packet2ul mask = vec_cmpeq(select, reinterpret_cast<Packet2ul>(p2l_ONE));
+template<> EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet4f& from)
+{
+ // FIXME: No intrinsic yet
+ EIGEN_DEBUG_ALIGNED_STORE
+ Packet *vto;
+ vto = (Packet *) to;
+ vto->v4f = from;
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from)
+{
+ return vec_splats(from);
+}
+
+template<> EIGEN_STRONG_INLINE void
+pbroadcast4<Packet4f>(const float *a,
+ Packet4f& a0, Packet4f& a1, Packet4f& a2, Packet4f& a3)
+{
+ a3 = pload<Packet4f>(a);
+ a0 = vec_splat(a3, 0);
+ a1 = vec_splat(a3, 1);
+ a2 = vec_splat(a3, 2);
+ a3 = vec_splat(a3, 3);
+}
+
+template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride)
+{
+ float EIGEN_ALIGN16 af[4];
+ af[0] = from[0*stride];
+ af[1] = from[1*stride];
+ af[2] = from[2*stride];
+ af[3] = from[3*stride];
+ return pload<Packet4f>(af);
+}
+
+template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, Index stride)
+{
+ float EIGEN_ALIGN16 af[4];
+ pstore<float>((float*)af, from);
+ to[0*stride] = af[0];
+ to[1*stride] = af[1];
+ to[2*stride] = af[2];
+ to[3*stride] = af[3];
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return (a + b); }
+template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return (a - b); }
+template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return (a * b); }
+template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b) { return (a / b); }
+template<> EIGEN_STRONG_INLINE Packet4f pnegate<Packet4f>(const Packet4f& a) { return (-a); }
+template<> EIGEN_STRONG_INLINE Packet4f pconj<Packet4f> (const Packet4f& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet4f pmadd<Packet4f> (const Packet4f& a, const Packet4f& b, const Packet4f& c) { return vec_madd(a, b, c); }
+template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f> (const Packet4f& a, const Packet4f& b) { return vec_min(a, b); }
+template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f> (const Packet4f& a, const Packet4f& b) { return vec_max(a, b); }
+template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f> (const Packet4f& a, const Packet4f& b) { return vec_and(a, b); }
+template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f> (const Packet4f& a, const Packet4f& b) { return vec_or(a, b); }
+template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f> (const Packet4f& a, const Packet4f& b) { return vec_xor(a, b); }
+template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_and(a, vec_nor(b, b)); }
+template<> EIGEN_STRONG_INLINE Packet4f pround<Packet4f> (const Packet4f& a) { return vec_round(a); }
+template<> EIGEN_STRONG_INLINE Packet4f pceil<Packet4f> (const Packet4f& a) { return vec_ceil(a); }
+template<> EIGEN_STRONG_INLINE Packet4f pfloor<Packet4f> (const Packet4f& a) { return vec_floor(a); }
+template<> EIGEN_STRONG_INLINE Packet4f pabs<Packet4f> (const Packet4f& a) { return vec_abs(a); }
+template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { float EIGEN_ALIGN16 x[4]; pstore(x, a); return x[0]; }
+
+template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from)
+{
+ Packet4f p = pload<Packet4f>(from);
+ return vec_perm(p, p, p16uc_DUPLICATE32_HI);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a)
+{
+ return reinterpret_cast<Packet4f>(vec_perm(reinterpret_cast<Packet16uc>(a), reinterpret_cast<Packet16uc>(a), p16uc_REVERSE32));
+}
+
+template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
+{
+ Packet4f b, sum;
+ b = vec_sld(a, a, 8);
+ sum = padd<Packet4f>(a, b);
+ b = vec_sld(sum, sum, 4);
+ sum = padd<Packet4f>(sum, b);
+ return pfirst(sum);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
+{
+ Packet4f v[4], sum[4];
+
+ // It's easier and faster to transpose then add as columns
+ // Check: http://www.freevec.org/function/matrix_4x4_transpose_floats for explanation
+ // Do the transpose, first set of moves
+ v[0] = vec_mergeh(vecs[0], vecs[2]);
+ v[1] = vec_mergel(vecs[0], vecs[2]);
+ v[2] = vec_mergeh(vecs[1], vecs[3]);
+ v[3] = vec_mergel(vecs[1], vecs[3]);
+ // Get the resulting vectors
+ sum[0] = vec_mergeh(v[0], v[2]);
+ sum[1] = vec_mergel(v[0], v[2]);
+ sum[2] = vec_mergeh(v[1], v[3]);
+ sum[3] = vec_mergel(v[1], v[3]);
+
+ // Now do the summation:
+ // Lines 0+1
+ sum[0] = padd<Packet4f>(sum[0], sum[1]);
+ // Lines 2+3
+ sum[1] = padd<Packet4f>(sum[2], sum[3]);
+ // Add the results
+ sum[0] = padd<Packet4f>(sum[0], sum[1]);
+
+ return sum[0];
+}
+
+// Other reduction functions:
+// mul
+template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
+{
+ Packet4f prod;
+ prod = pmul(a, vec_sld(a, a, 8));
+ return pfirst(pmul(prod, vec_sld(prod, prod, 4)));
+}
+
+// min
+template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
+{
+ Packet4f b, res;
+ b = pmin<Packet4f>(a, vec_sld(a, a, 8));
+ res = pmin<Packet4f>(b, vec_sld(b, b, 4));
+ return pfirst(res);
+}
+
+// max
+template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
+{
+ Packet4f b, res;
+ b = pmax<Packet4f>(a, vec_sld(a, a, 8));
+ res = pmax<Packet4f>(b, vec_sld(b, b, 4));
+ return pfirst(res);
+}
+
+EIGEN_DEVICE_FUNC inline void
+ptranspose(PacketBlock<Packet4f,4>& kernel) {
+ Packet4f t0 = vec_mergeh(kernel.packet[0], kernel.packet[2]);
+ Packet4f t1 = vec_mergel(kernel.packet[0], kernel.packet[2]);
+ Packet4f t2 = vec_mergeh(kernel.packet[1], kernel.packet[3]);
+ Packet4f t3 = vec_mergel(kernel.packet[1], kernel.packet[3]);
+ kernel.packet[0] = vec_mergeh(t0, t2);
+ kernel.packet[1] = vec_mergel(t0, t2);
+ kernel.packet[2] = vec_mergeh(t1, t3);
+ kernel.packet[3] = vec_mergel(t1, t3);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pblend(const Selector<4>& ifPacket, const Packet4f& thenPacket, const Packet4f& elsePacket) {
+ Packet4ui select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2], ifPacket.select[3] };
+ Packet4ui mask = vec_cmpeq(select, reinterpret_cast<Packet4ui>(p4i_ONE));
return vec_sel(elsePacket, thenPacket, mask);
}
+#endif
+
+template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { EIGEN_ZVECTOR_PREFETCH(addr); }
+template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f> (const float* from) { return pload<Packet4f>(from); }
+template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from) { pstore<float>(to, from); }
+template<> EIGEN_STRONG_INLINE Packet4f plset<Packet4f> (const float& a) { return padd<Packet4f>(pset1<Packet4f>(a), p4f_COUNTDOWN); }
+
} // end namespace internal
} // end namespace Eigen
diff --git a/Eigen/src/Core/functors/AssignmentFunctors.h b/Eigen/src/Core/functors/AssignmentFunctors.h
index 4153b877c..9765cc763 100644
--- a/Eigen/src/Core/functors/AssignmentFunctors.h
+++ b/Eigen/src/Core/functors/AssignmentFunctors.h
@@ -144,7 +144,7 @@ template<typename Scalar> struct swap_assign_op {
EIGEN_EMPTY_STRUCT_CTOR(swap_assign_op)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(Scalar& a, const Scalar& b) const
{
-#ifdef __CUDACC__
+#ifdef EIGEN_GPUCC
// FIXME is there some kind of cuda::swap?
Scalar t=b; const_cast<Scalar&>(b)=a; a=t;
#else
diff --git a/Eigen/src/Core/functors/BinaryFunctors.h b/Eigen/src/Core/functors/BinaryFunctors.h
index 96747bac7..401d597d8 100644
--- a/Eigen/src/Core/functors/BinaryFunctors.h
+++ b/Eigen/src/Core/functors/BinaryFunctors.h
@@ -255,7 +255,7 @@ struct scalar_cmp_op<LhsScalar,RhsScalar, cmp_NEQ> : binary_op_base<LhsScalar,Rh
/** \internal
- * \brief Template functor to compute the hypot of two scalars
+ * \brief Template functor to compute the hypot of two \b positive \b and \b real scalars
*
* \sa MatrixBase::stableNorm(), class Redux
*/
@@ -263,22 +263,15 @@ template<typename Scalar>
struct scalar_hypot_op<Scalar,Scalar> : binary_op_base<Scalar,Scalar>
{
EIGEN_EMPTY_STRUCT_CTOR(scalar_hypot_op)
-// typedef typename NumTraits<Scalar>::Real result_type;
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& _x, const Scalar& _y) const
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar &x, const Scalar &y) const
{
- EIGEN_USING_STD_MATH(sqrt)
- Scalar p, qp;
- if(_x>_y)
- {
- p = _x;
- qp = _y / p;
- }
- else
- {
- p = _y;
- qp = _x / p;
- }
- return p * sqrt(Scalar(1) + qp*qp);
+ // This functor is used by hypotNorm only for which it is faster to first apply abs
+ // on all coefficients prior to reduction through hypot.
+ // This way we avoid calling abs on positive and real entries, and this also permits
+ // to seamlessly handle complexes. Otherwise we would have to handle both real and complexes
+ // through the same functor...
+ return internal::positive_real_hypot(x,y);
}
};
template<typename Scalar>
@@ -443,7 +436,7 @@ template<typename BinaryOp> struct bind1st_op : BinaryOp {
typedef typename BinaryOp::second_argument_type second_argument_type;
typedef typename BinaryOp::result_type result_type;
- bind1st_op(const first_argument_type &val) : m_value(val) {}
+ EIGEN_DEVICE_FUNC explicit bind1st_op(const first_argument_type &val) : m_value(val) {}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const second_argument_type& b) const { return BinaryOp::operator()(m_value,b); }
@@ -462,7 +455,7 @@ template<typename BinaryOp> struct bind2nd_op : BinaryOp {
typedef typename BinaryOp::second_argument_type second_argument_type;
typedef typename BinaryOp::result_type result_type;
- bind2nd_op(const second_argument_type &val) : m_value(val) {}
+ EIGEN_DEVICE_FUNC explicit bind2nd_op(const second_argument_type &val) : m_value(val) {}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const first_argument_type& a) const { return BinaryOp::operator()(a,m_value); }
diff --git a/Eigen/src/Core/functors/NullaryFunctors.h b/Eigen/src/Core/functors/NullaryFunctors.h
index 6a30466fb..b03be0269 100644
--- a/Eigen/src/Core/functors/NullaryFunctors.h
+++ b/Eigen/src/Core/functors/NullaryFunctors.h
@@ -44,16 +44,16 @@ struct linspaced_op_impl<Scalar,Packet,/*IsInteger*/false>
{
linspaced_op_impl(const Scalar& low, const Scalar& high, Index num_steps) :
m_low(low), m_high(high), m_size1(num_steps==1 ? 1 : num_steps-1), m_step(num_steps==1 ? Scalar() : (high-low)/Scalar(num_steps-1)),
- m_interPacket(plset<Packet>(0)),
m_flip(numext::abs(high)<numext::abs(low))
{}
template<typename IndexType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (IndexType i) const {
+ typedef typename NumTraits<Scalar>::Real RealScalar;
if(m_flip)
- return (i==0)? m_low : (m_high - (m_size1-i)*m_step);
+ return (i==0)? m_low : (m_high - RealScalar(m_size1-i)*m_step);
else
- return (i==m_size1)? m_high : (m_low + i*m_step);
+ return (i==m_size1)? m_high : (m_low + RealScalar(i)*m_step);
}
template<typename IndexType>
@@ -63,7 +63,7 @@ struct linspaced_op_impl<Scalar,Packet,/*IsInteger*/false>
// [low, ..., low] + ( [step, ..., step] * ( [i, ..., i] + [0, ..., size] ) )
if(m_flip)
{
- Packet pi = padd(pset1<Packet>(Scalar(i-m_size1)),m_interPacket);
+ Packet pi = plset<Packet>(Scalar(i-m_size1));
Packet res = padd(pset1<Packet>(m_high), pmul(pset1<Packet>(m_step), pi));
if(i==0)
res = pinsertfirst(res, m_low);
@@ -71,7 +71,7 @@ struct linspaced_op_impl<Scalar,Packet,/*IsInteger*/false>
}
else
{
- Packet pi = padd(pset1<Packet>(Scalar(i)),m_interPacket);
+ Packet pi = plset<Packet>(Scalar(i));
Packet res = padd(pset1<Packet>(m_low), pmul(pset1<Packet>(m_step), pi));
if(i==m_size1-unpacket_traits<Packet>::size+1)
res = pinsertlast(res, m_high);
@@ -83,7 +83,6 @@ struct linspaced_op_impl<Scalar,Packet,/*IsInteger*/false>
const Scalar m_high;
const Index m_size1;
const Scalar m_step;
- const Packet m_interPacket;
const bool m_flip;
};
diff --git a/Eigen/src/Core/functors/StlFunctors.h b/Eigen/src/Core/functors/StlFunctors.h
index 6df3fa501..9c1d75850 100644
--- a/Eigen/src/Core/functors/StlFunctors.h
+++ b/Eigen/src/Core/functors/StlFunctors.h
@@ -83,13 +83,17 @@ struct functor_traits<std::binder1st<T> >
{ enum { Cost = functor_traits<T>::Cost, PacketAccess = false }; };
#endif
+#if (__cplusplus < 201703L) && (EIGEN_COMP_MSVC < 1910)
+// std::unary_negate is deprecated since c++17 and will be removed in c++20
template<typename T>
struct functor_traits<std::unary_negate<T> >
{ enum { Cost = 1 + functor_traits<T>::Cost, PacketAccess = false }; };
+// std::binary_negate is deprecated since c++17 and will be removed in c++20
template<typename T>
struct functor_traits<std::binary_negate<T> >
{ enum { Cost = 1 + functor_traits<T>::Cost, PacketAccess = false }; };
+#endif
#ifdef EIGEN_STDEXT_SUPPORT
diff --git a/Eigen/src/Core/functors/UnaryFunctors.h b/Eigen/src/Core/functors/UnaryFunctors.h
index bfc046556..c1cc2ab3b 100644
--- a/Eigen/src/Core/functors/UnaryFunctors.h
+++ b/Eigen/src/Core/functors/UnaryFunctors.h
@@ -701,7 +701,7 @@ template<typename Scalar> struct scalar_isnan_op {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const {
#if defined(__SYCL_DEVICE_ONLY__)
return numext::isnan(a);
-#else
+#else
return (numext::isnan)(a);
#endif
}
@@ -815,7 +815,7 @@ struct scalar_sign_op<Scalar,true> {
template<typename Scalar>
struct functor_traits<scalar_sign_op<Scalar> >
{ enum {
- Cost =
+ Cost =
NumTraits<Scalar>::IsComplex
? ( 8*NumTraits<Scalar>::MulCost ) // roughly
: ( 3*NumTraits<Scalar>::AddCost),
@@ -823,6 +823,34 @@ struct functor_traits<scalar_sign_op<Scalar> >
};
};
+/** \internal
+ * \brief Template functor to compute the logistic function of a scalar
+ * \sa class CwiseUnaryOp, ArrayBase::logistic()
+ */
+template <typename T>
+struct scalar_logistic_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_logistic_op)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator()(const T& x) const {
+ const T one = T(1);
+ return one / (one + numext::exp(-x));
+ }
+
+ template <typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Packet packetOp(const Packet& x) const {
+ const Packet one = pset1<Packet>(T(1));
+ return pdiv(one, padd(one, pexp(pnegate(x))));
+ }
+};
+template <typename T>
+struct functor_traits<scalar_logistic_op<T> > {
+ enum {
+ Cost = NumTraits<T>::AddCost * 2 + NumTraits<T>::MulCost * 6,
+ PacketAccess = packet_traits<T>::HasAdd && packet_traits<T>::HasDiv &&
+ packet_traits<T>::HasNegate && packet_traits<T>::HasExp
+ };
+};
+
+
} // end namespace internal
} // end namespace Eigen
diff --git a/Eigen/src/Core/products/GeneralBlockPanelKernel.h b/Eigen/src/Core/products/GeneralBlockPanelKernel.h
index 45230bce5..b012691c1 100644
--- a/Eigen/src/Core/products/GeneralBlockPanelKernel.h
+++ b/Eigen/src/Core/products/GeneralBlockPanelKernel.h
@@ -390,6 +390,7 @@ public:
typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
+ typedef LhsPacket LhsPacket4Packing;
typedef ResPacket AccPacket;
@@ -496,6 +497,7 @@ public:
typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
+ typedef LhsPacket LhsPacket4Packing;
typedef ResPacket AccPacket;
@@ -580,7 +582,7 @@ DoublePacket<Packet> padd(const DoublePacket<Packet> &a, const DoublePacket<Pack
}
template<typename Packet>
-const DoublePacket<Packet>& predux_downto4(const DoublePacket<Packet> &a)
+const DoublePacket<Packet>& predux_half_dowto4(const DoublePacket<Packet> &a)
{
return a;
}
@@ -626,6 +628,7 @@ public:
typedef typename packet_traits<Scalar>::type ScalarPacket;
typedef DoublePacket<RealPacket> DoublePacketType;
+ typedef typename conditional<Vectorizable,ScalarPacket,Scalar>::type LhsPacket4Packing;
typedef typename conditional<Vectorizable,RealPacket, Scalar>::type LhsPacket;
typedef typename conditional<Vectorizable,DoublePacketType,Scalar>::type RhsPacket;
typedef typename conditional<Vectorizable,ScalarPacket,Scalar>::type ResPacket;
@@ -777,6 +780,7 @@ public:
typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
+ typedef LhsPacket LhsPacket4Packing;
typedef ResPacket AccPacket;
@@ -972,7 +976,7 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
EIGEN_ASM_COMMENT("begin step of gebp micro kernel 3pX4"); \
EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
internal::prefetch(blA+(3*K+16)*LhsProgress); \
- if (EIGEN_ARCH_ARM) { internal::prefetch(blB+(4*K+16)*RhsProgress); } /* Bug 953 */ \
+ if (EIGEN_ARCH_ARM || EIGEN_ARCH_MIPS) { internal::prefetch(blB+(4*K+16)*RhsProgress); } /* Bug 953 */ \
traits.loadLhs(&blA[(0+3*K)*LhsProgress], A0); \
traits.loadLhs(&blA[(1+3*K)*LhsProgress], A1); \
traits.loadLhs(&blA[(2+3*K)*LhsProgress], A2); \
@@ -1025,9 +1029,9 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
ResPacket R0, R1, R2;
ResPacket alphav = pset1<ResPacket>(alpha);
- R0 = r0.loadPacket(0 * Traits::ResPacketSize);
- R1 = r0.loadPacket(1 * Traits::ResPacketSize);
- R2 = r0.loadPacket(2 * Traits::ResPacketSize);
+ R0 = r0.template loadPacket<ResPacket>(0 * Traits::ResPacketSize);
+ R1 = r0.template loadPacket<ResPacket>(1 * Traits::ResPacketSize);
+ R2 = r0.template loadPacket<ResPacket>(2 * Traits::ResPacketSize);
traits.acc(C0, alphav, R0);
traits.acc(C4, alphav, R1);
traits.acc(C8, alphav, R2);
@@ -1035,9 +1039,9 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
r0.storePacket(1 * Traits::ResPacketSize, R1);
r0.storePacket(2 * Traits::ResPacketSize, R2);
- R0 = r1.loadPacket(0 * Traits::ResPacketSize);
- R1 = r1.loadPacket(1 * Traits::ResPacketSize);
- R2 = r1.loadPacket(2 * Traits::ResPacketSize);
+ R0 = r1.template loadPacket<ResPacket>(0 * Traits::ResPacketSize);
+ R1 = r1.template loadPacket<ResPacket>(1 * Traits::ResPacketSize);
+ R2 = r1.template loadPacket<ResPacket>(2 * Traits::ResPacketSize);
traits.acc(C1, alphav, R0);
traits.acc(C5, alphav, R1);
traits.acc(C9, alphav, R2);
@@ -1045,9 +1049,9 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
r1.storePacket(1 * Traits::ResPacketSize, R1);
r1.storePacket(2 * Traits::ResPacketSize, R2);
- R0 = r2.loadPacket(0 * Traits::ResPacketSize);
- R1 = r2.loadPacket(1 * Traits::ResPacketSize);
- R2 = r2.loadPacket(2 * Traits::ResPacketSize);
+ R0 = r2.template loadPacket<ResPacket>(0 * Traits::ResPacketSize);
+ R1 = r2.template loadPacket<ResPacket>(1 * Traits::ResPacketSize);
+ R2 = r2.template loadPacket<ResPacket>(2 * Traits::ResPacketSize);
traits.acc(C2, alphav, R0);
traits.acc(C6, alphav, R1);
traits.acc(C10, alphav, R2);
@@ -1055,9 +1059,9 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
r2.storePacket(1 * Traits::ResPacketSize, R1);
r2.storePacket(2 * Traits::ResPacketSize, R2);
- R0 = r3.loadPacket(0 * Traits::ResPacketSize);
- R1 = r3.loadPacket(1 * Traits::ResPacketSize);
- R2 = r3.loadPacket(2 * Traits::ResPacketSize);
+ R0 = r3.template loadPacket<ResPacket>(0 * Traits::ResPacketSize);
+ R1 = r3.template loadPacket<ResPacket>(1 * Traits::ResPacketSize);
+ R2 = r3.template loadPacket<ResPacket>(2 * Traits::ResPacketSize);
traits.acc(C3, alphav, R0);
traits.acc(C7, alphav, R1);
traits.acc(C11, alphav, R2);
@@ -1134,9 +1138,9 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
ResPacket R0, R1, R2;
ResPacket alphav = pset1<ResPacket>(alpha);
- R0 = r0.loadPacket(0 * Traits::ResPacketSize);
- R1 = r0.loadPacket(1 * Traits::ResPacketSize);
- R2 = r0.loadPacket(2 * Traits::ResPacketSize);
+ R0 = r0.template loadPacket<ResPacket>(0 * Traits::ResPacketSize);
+ R1 = r0.template loadPacket<ResPacket>(1 * Traits::ResPacketSize);
+ R2 = r0.template loadPacket<ResPacket>(2 * Traits::ResPacketSize);
traits.acc(C0, alphav, R0);
traits.acc(C4, alphav, R1);
traits.acc(C8, alphav, R2);
@@ -1244,10 +1248,10 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
ResPacket R0, R1, R2, R3;
ResPacket alphav = pset1<ResPacket>(alpha);
- R0 = r0.loadPacket(0 * Traits::ResPacketSize);
- R1 = r0.loadPacket(1 * Traits::ResPacketSize);
- R2 = r1.loadPacket(0 * Traits::ResPacketSize);
- R3 = r1.loadPacket(1 * Traits::ResPacketSize);
+ R0 = r0.template loadPacket<ResPacket>(0 * Traits::ResPacketSize);
+ R1 = r0.template loadPacket<ResPacket>(1 * Traits::ResPacketSize);
+ R2 = r1.template loadPacket<ResPacket>(0 * Traits::ResPacketSize);
+ R3 = r1.template loadPacket<ResPacket>(1 * Traits::ResPacketSize);
traits.acc(C0, alphav, R0);
traits.acc(C4, alphav, R1);
traits.acc(C1, alphav, R2);
@@ -1257,10 +1261,10 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
r1.storePacket(0 * Traits::ResPacketSize, R2);
r1.storePacket(1 * Traits::ResPacketSize, R3);
- R0 = r2.loadPacket(0 * Traits::ResPacketSize);
- R1 = r2.loadPacket(1 * Traits::ResPacketSize);
- R2 = r3.loadPacket(0 * Traits::ResPacketSize);
- R3 = r3.loadPacket(1 * Traits::ResPacketSize);
+ R0 = r2.template loadPacket<ResPacket>(0 * Traits::ResPacketSize);
+ R1 = r2.template loadPacket<ResPacket>(1 * Traits::ResPacketSize);
+ R2 = r3.template loadPacket<ResPacket>(0 * Traits::ResPacketSize);
+ R3 = r3.template loadPacket<ResPacket>(1 * Traits::ResPacketSize);
traits.acc(C2, alphav, R0);
traits.acc(C6, alphav, R1);
traits.acc(C3, alphav, R2);
@@ -1337,8 +1341,8 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
ResPacket R0, R1;
ResPacket alphav = pset1<ResPacket>(alpha);
- R0 = r0.loadPacket(0 * Traits::ResPacketSize);
- R1 = r0.loadPacket(1 * Traits::ResPacketSize);
+ R0 = r0.template loadPacket<ResPacket>(0 * Traits::ResPacketSize);
+ R1 = r0.template loadPacket<ResPacket>(1 * Traits::ResPacketSize);
traits.acc(C0, alphav, R0);
traits.acc(C4, alphav, R1);
r0.storePacket(0 * Traits::ResPacketSize, R0);
@@ -1431,15 +1435,15 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
ResPacket R0, R1;
ResPacket alphav = pset1<ResPacket>(alpha);
- R0 = r0.loadPacket(0 * Traits::ResPacketSize);
- R1 = r1.loadPacket(0 * Traits::ResPacketSize);
+ R0 = r0.template loadPacket<ResPacket>(0 * Traits::ResPacketSize);
+ R1 = r1.template loadPacket<ResPacket>(0 * Traits::ResPacketSize);
traits.acc(C0, alphav, R0);
traits.acc(C1, alphav, R1);
r0.storePacket(0 * Traits::ResPacketSize, R0);
r1.storePacket(0 * Traits::ResPacketSize, R1);
- R0 = r2.loadPacket(0 * Traits::ResPacketSize);
- R1 = r3.loadPacket(0 * Traits::ResPacketSize);
+ R0 = r2.template loadPacket<ResPacket>(0 * Traits::ResPacketSize);
+ R1 = r3.template loadPacket<ResPacket>(0 * Traits::ResPacketSize);
traits.acc(C2, alphav, R0);
traits.acc(C3, alphav, R1);
r2.storePacket(0 * Traits::ResPacketSize, R0);
@@ -1504,7 +1508,7 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
#undef EIGEN_GEBGP_ONESTEP
ResPacket R0;
ResPacket alphav = pset1<ResPacket>(alpha);
- R0 = r0.loadPacket(0 * Traits::ResPacketSize);
+ R0 = r0.template loadPacket<ResPacket>(0 * Traits::ResPacketSize);
traits.acc(C0, alphav, R0);
r0.storePacket(0 * Traits::ResPacketSize, R0);
}
@@ -1523,13 +1527,13 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
prefetch(&blA[0]);
const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
- // The following piece of code wont work for 512 bit registers
+ // The following piece of code won't work for 512 bit registers
// Moreover, if LhsProgress==8 it assumes that there is a half packet of the same size
// as nr (which is currently 4) for the return type.
- typedef typename unpacket_traits<SResPacket>::half SResPacketHalf;
+ const int SResPacketHalfSize = unpacket_traits<typename unpacket_traits<SResPacket>::half>::size;
if ((SwappedTraits::LhsProgress % 4) == 0 &&
(SwappedTraits::LhsProgress <= 8) &&
- (SwappedTraits::LhsProgress!=8 || unpacket_traits<SResPacketHalf>::size==nr))
+ (SwappedTraits::LhsProgress!=8 || SResPacketHalfSize==nr))
{
SAccPacket C0, C1, C2, C3;
straits.initAcc(C0);
@@ -1596,13 +1600,13 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
SRhsPacketHalf b0;
straits.loadLhsUnaligned(blB, a0);
straits.loadRhs(blA, b0);
- SAccPacketHalf c0 = predux_downto4(C0);
+ SAccPacketHalf c0 = predux_half_dowto4(C0);
straits.madd(a0,b0,c0,b0);
straits.acc(c0, alphav, R);
}
else
{
- straits.acc(predux_downto4(C0), alphav, R);
+ straits.acc(predux_half_dowto4(C0), alphav, R);
}
res.scatterPacket(i, j2, R);
}
@@ -1685,19 +1689,18 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
//
// 32 33 34 35 ...
// 36 36 38 39 ...
-template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, bool Conjugate, bool PanelMode>
-struct gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, ColMajor, Conjugate, PanelMode>
+template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, typename Packet, bool Conjugate, bool PanelMode>
+struct gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, Packet, ColMajor, Conjugate, PanelMode>
{
typedef typename DataMapper::LinearMapper LinearMapper;
EIGEN_DONT_INLINE void operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0);
};
-template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, bool Conjugate, bool PanelMode>
-EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, ColMajor, Conjugate, PanelMode>
+template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, typename Packet, bool Conjugate, bool PanelMode>
+EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, Packet, ColMajor, Conjugate, PanelMode>
::operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset)
{
- typedef typename packet_traits<Scalar>::type Packet;
- enum { PacketSize = packet_traits<Scalar>::size };
+ enum { PacketSize = unpacket_traits<Packet>::size };
EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK LHS");
EIGEN_UNUSED_VARIABLE(stride);
@@ -1725,9 +1728,9 @@ EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, Co
for(Index k=0; k<depth; k++)
{
Packet A, B, C;
- A = lhs.loadPacket(i+0*PacketSize, k);
- B = lhs.loadPacket(i+1*PacketSize, k);
- C = lhs.loadPacket(i+2*PacketSize, k);
+ A = lhs.template loadPacket<Packet>(i+0*PacketSize, k);
+ B = lhs.template loadPacket<Packet>(i+1*PacketSize, k);
+ C = lhs.template loadPacket<Packet>(i+2*PacketSize, k);
pstore(blockA+count, cj.pconj(A)); count+=PacketSize;
pstore(blockA+count, cj.pconj(B)); count+=PacketSize;
pstore(blockA+count, cj.pconj(C)); count+=PacketSize;
@@ -1745,8 +1748,8 @@ EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, Co
for(Index k=0; k<depth; k++)
{
Packet A, B;
- A = lhs.loadPacket(i+0*PacketSize, k);
- B = lhs.loadPacket(i+1*PacketSize, k);
+ A = lhs.template loadPacket<Packet>(i+0*PacketSize, k);
+ B = lhs.template loadPacket<Packet>(i+1*PacketSize, k);
pstore(blockA+count, cj.pconj(A)); count+=PacketSize;
pstore(blockA+count, cj.pconj(B)); count+=PacketSize;
}
@@ -1763,7 +1766,7 @@ EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, Co
for(Index k=0; k<depth; k++)
{
Packet A;
- A = lhs.loadPacket(i+0*PacketSize, k);
+ A = lhs.template loadPacket<Packet>(i+0*PacketSize, k);
pstore(blockA+count, cj.pconj(A));
count+=PacketSize;
}
@@ -1793,19 +1796,18 @@ EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, Co
}
}
-template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, bool Conjugate, bool PanelMode>
-struct gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, RowMajor, Conjugate, PanelMode>
+template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, typename Packet, bool Conjugate, bool PanelMode>
+struct gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, Packet, RowMajor, Conjugate, PanelMode>
{
typedef typename DataMapper::LinearMapper LinearMapper;
EIGEN_DONT_INLINE void operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0);
};
-template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, bool Conjugate, bool PanelMode>
-EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, RowMajor, Conjugate, PanelMode>
+template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, typename Packet, bool Conjugate, bool PanelMode>
+EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, Packet, RowMajor, Conjugate, PanelMode>
::operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset)
{
- typedef typename packet_traits<Scalar>::type Packet;
- enum { PacketSize = packet_traits<Scalar>::size };
+ enum { PacketSize = unpacket_traits<Packet>::size };
EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK LHS");
EIGEN_UNUSED_VARIABLE(stride);
@@ -1837,7 +1839,7 @@ EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, Ro
for (Index m = 0; m < pack; m += PacketSize)
{
PacketBlock<Packet> kernel;
- for (int p = 0; p < PacketSize; ++p) kernel.packet[p] = lhs.loadPacket(i+p+m, k);
+ for (int p = 0; p < PacketSize; ++p) kernel.packet[p] = lhs.template loadPacket<Packet>(i+p+m, k);
ptranspose(kernel);
for (int p = 0; p < PacketSize; ++p) pstore(blockA+count+m+(pack)*p, cj.pconj(kernel.packet[p]));
}
@@ -1924,7 +1926,7 @@ EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, DataMapper, nr, ColMajor, Co
// const Scalar* b6 = &rhs[(j2+6)*rhsStride];
// const Scalar* b7 = &rhs[(j2+7)*rhsStride];
// Index k=0;
-// if(PacketSize==8) // TODO enbale vectorized transposition for PacketSize==4
+// if(PacketSize==8) // TODO enable vectorized transposition for PacketSize==4
// {
// for(; k<peeled_k; k+=PacketSize) {
// PacketBlock<Packet> kernel;
@@ -1971,10 +1973,10 @@ EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, DataMapper, nr, ColMajor, Co
{
for(; k<peeled_k; k+=PacketSize) {
PacketBlock<Packet,(PacketSize%4)==0?4:PacketSize> kernel;
- kernel.packet[0] = dm0.loadPacket(k);
- kernel.packet[1%PacketSize] = dm1.loadPacket(k);
- kernel.packet[2%PacketSize] = dm2.loadPacket(k);
- kernel.packet[3%PacketSize] = dm3.loadPacket(k);
+ kernel.packet[0 ] = dm0.template loadPacket<Packet>(k);
+ kernel.packet[1%PacketSize] = dm1.template loadPacket<Packet>(k);
+ kernel.packet[2%PacketSize] = dm2.template loadPacket<Packet>(k);
+ kernel.packet[3%PacketSize] = dm3.template loadPacket<Packet>(k);
ptranspose(kernel);
pstoreu(blockB+count+0*PacketSize, cj.pconj(kernel.packet[0]));
pstoreu(blockB+count+1*PacketSize, cj.pconj(kernel.packet[1%PacketSize]));
@@ -2075,7 +2077,7 @@ EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, DataMapper, nr, RowMajor, Co
for(Index k=0; k<depth; k++)
{
if (PacketSize==4) {
- Packet A = rhs.loadPacket(k, j2);
+ Packet A = rhs.template loadPacket<Packet>(k, j2);
pstoreu(blockB+count, cj.pconj(A));
count += PacketSize;
} else {
diff --git a/Eigen/src/Core/products/GeneralMatrixMatrix.h b/Eigen/src/Core/products/GeneralMatrixMatrix.h
index 6440e1d09..f49abcad5 100644
--- a/Eigen/src/Core/products/GeneralMatrixMatrix.h
+++ b/Eigen/src/Core/products/GeneralMatrixMatrix.h
@@ -75,7 +75,7 @@ static void run(Index rows, Index cols, Index depth,
Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction
- gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
+ gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, typename Traits::LhsPacket4Packing, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs;
gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp;
@@ -108,7 +108,7 @@ static void run(Index rows, Index cols, Index depth,
// i.e., we test that info[tid].users equals 0.
// Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it.
while(info[tid].users!=0) {}
- info[tid].users += threads;
+ info[tid].users = threads;
pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length);
@@ -146,7 +146,9 @@ static void run(Index rows, Index cols, Index depth,
// Release all the sub blocks A'_i of A' for the current thread,
// i.e., we simply decrement the number of users by 1
for(Index i=0; i<threads; ++i)
+#if !EIGEN_HAS_CXX11_ATOMIC
#pragma omp atomic
+#endif
info[i].users -= 1;
}
}
@@ -427,8 +429,14 @@ struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct>
template<typename Dst>
static void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
- if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0)
- lazyproduct::evalTo(dst, lhs, rhs);
+ // See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=404 for a discussion and helper program
+ // to determine the following heuristic.
+ // EIGEN_GEMM_TO_COEFFBASED_THRESHOLD is typically defined to 20 in GeneralProduct.h,
+ // unless it has been specialized by the user or for a given architecture.
+ // Note that the condition rhs.rows()>0 was required because lazy product is (was?) not happy with empty inputs.
+ // I'm not sure it is still required.
+ if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0)
+ lazyproduct::eval_dynamic(dst, lhs, rhs, internal::assign_op<typename Dst::Scalar,Scalar>());
else
{
dst.setZero();
@@ -439,8 +447,8 @@ struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct>
template<typename Dst>
static void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
- if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0)
- lazyproduct::addTo(dst, lhs, rhs);
+ if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0)
+ lazyproduct::eval_dynamic(dst, lhs, rhs, internal::add_assign_op<typename Dst::Scalar,Scalar>());
else
scaleAndAddTo(dst,lhs, rhs, Scalar(1));
}
@@ -448,8 +456,8 @@ struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct>
template<typename Dst>
static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
- if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0)
- lazyproduct::subTo(dst, lhs, rhs);
+ if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0)
+ lazyproduct::eval_dynamic(dst, lhs, rhs, internal::sub_assign_op<typename Dst::Scalar,Scalar>());
else
scaleAndAddTo(dst, lhs, rhs, Scalar(-1));
}
diff --git a/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h b/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h
index 7122efa60..ec2825bf0 100644
--- a/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h
+++ b/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h
@@ -84,7 +84,7 @@ struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA());
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB());
- gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
+ gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, typename Traits::LhsPacket4Packing, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs;
gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp;
tribb_kernel<LhsScalar, RhsScalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs, UpLo> sybb;
@@ -110,7 +110,6 @@ struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,
gebp(res.getSubMapper(i2, 0), blockA, blockB, actual_mc, actual_kc,
(std::min)(size,i2), alpha, -1, -1, 0, 0);
-
sybb(_res+resStride*i2 + i2, resStride, blockA, blockB + actual_kc*i2, actual_mc, actual_kc, alpha);
if (UpLo==Upper)
@@ -160,7 +159,7 @@ struct tribb_kernel
if(UpLo==Upper)
gebp_kernel(res.getSubMapper(0, j), blockA, actual_b, j, depth, actualBlockSize, alpha,
-1, -1, 0, 0);
-
+
// selfadjoint micro block
{
Index i = j;
@@ -168,6 +167,7 @@ struct tribb_kernel
// 1 - apply the kernel on the temporary buffer
gebp_kernel(ResMapper(buffer.data(), BlockSize), blockA+depth*i, actual_b, actualBlockSize, depth, actualBlockSize, alpha,
-1, -1, 0, 0);
+
// 2 - triangular accumulation
for(Index j1=0; j1<actualBlockSize; ++j1)
{
@@ -269,10 +269,13 @@ struct general_product_to_triangular_selector<MatrixType,ProductType,UpLo,false>
enum {
IsRowMajor = (internal::traits<MatrixType>::Flags&RowMajorBit) ? 1 : 0,
LhsIsRowMajor = _ActualLhs::Flags&RowMajorBit ? 1 : 0,
- RhsIsRowMajor = _ActualRhs::Flags&RowMajorBit ? 1 : 0
+ RhsIsRowMajor = _ActualRhs::Flags&RowMajorBit ? 1 : 0,
+ SkipDiag = (UpLo&(UnitDiag|ZeroDiag))!=0
};
Index size = mat.cols();
+ if(SkipDiag)
+ size--;
Index depth = actualLhs.cols();
typedef internal::gemm_blocking_space<IsRowMajor ? RowMajor : ColMajor,typename Lhs::Scalar,typename Rhs::Scalar,
@@ -283,21 +286,23 @@ struct general_product_to_triangular_selector<MatrixType,ProductType,UpLo,false>
internal::general_matrix_matrix_triangular_product<Index,
typename Lhs::Scalar, LhsIsRowMajor ? RowMajor : ColMajor, LhsBlasTraits::NeedToConjugate,
typename Rhs::Scalar, RhsIsRowMajor ? RowMajor : ColMajor, RhsBlasTraits::NeedToConjugate,
- IsRowMajor ? RowMajor : ColMajor, UpLo>
+ IsRowMajor ? RowMajor : ColMajor, UpLo&(Lower|Upper)>
::run(size, depth,
- &actualLhs.coeffRef(0,0), actualLhs.outerStride(), &actualRhs.coeffRef(0,0), actualRhs.outerStride(),
- mat.data(), mat.outerStride(), actualAlpha, blocking);
+ &actualLhs.coeffRef(SkipDiag&&(UpLo&Lower)==Lower ? 1 : 0,0), actualLhs.outerStride(),
+ &actualRhs.coeffRef(0,SkipDiag&&(UpLo&Upper)==Upper ? 1 : 0), actualRhs.outerStride(),
+ mat.data() + (SkipDiag ? (bool(IsRowMajor) != ((UpLo&Lower)==Lower) ? 1 : mat.outerStride() ) : 0), mat.outerStride(), actualAlpha, blocking);
}
};
template<typename MatrixType, unsigned int UpLo>
template<typename ProductType>
-TriangularView<MatrixType,UpLo>& TriangularViewImpl<MatrixType,UpLo,Dense>::_assignProduct(const ProductType& prod, const Scalar& alpha, bool beta)
+EIGEN_DEVICE_FUNC TriangularView<MatrixType,UpLo>& TriangularViewImpl<MatrixType,UpLo,Dense>::_assignProduct(const ProductType& prod, const Scalar& alpha, bool beta)
{
+ EIGEN_STATIC_ASSERT((UpLo&UnitDiag)==0, WRITING_TO_TRIANGULAR_PART_WITH_UNIT_DIAGONAL_IS_NOT_SUPPORTED);
eigen_assert(derived().nestedExpression().rows() == prod.rows() && derived().cols() == prod.cols());
-
+
general_product_to_triangular_selector<MatrixType, ProductType, UpLo, internal::traits<ProductType>::InnerSize==1>::run(derived().nestedExpression().const_cast_derived(), prod, alpha, beta);
-
+
return derived();
}
diff --git a/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h b/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h
index 5b7c15cca..49565c070 100644
--- a/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h
+++ b/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h
@@ -37,7 +37,7 @@ namespace Eigen {
namespace internal {
-template <typename Index, typename Scalar, int AStorageOrder, bool ConjugateA, int ResStorageOrder, int UpLo>
+template <typename Index, typename Scalar, int AStorageOrder, bool ConjugateA, int ResStorageOrder, int UpLo>
struct general_matrix_matrix_rankupdate :
general_matrix_matrix_triangular_product<
Index,Scalar,AStorageOrder,ConjugateA,Scalar,AStorageOrder,ConjugateA,ResStorageOrder,UpLo,BuiltIn> {};
@@ -52,7 +52,7 @@ struct general_matrix_matrix_triangular_product<Index,Scalar,LhsStorageOrder,Con
static EIGEN_STRONG_INLINE void run(Index size, Index depth,const Scalar* lhs, Index lhsStride, \
const Scalar* rhs, Index rhsStride, Scalar* res, Index resStride, Scalar alpha, level3_blocking<Scalar, Scalar>& blocking) \
{ \
- if (lhs==rhs) { \
+ if ( lhs==rhs && ((UpLo&(Lower|Upper))==UpLo) ) { \
general_matrix_matrix_rankupdate<Index,Scalar,LhsStorageOrder,ConjugateLhs,ColMajor,UpLo> \
::run(size,depth,lhs,lhsStride,rhs,rhsStride,res,resStride,alpha,blocking); \
} else { \
@@ -88,7 +88,7 @@ struct general_matrix_matrix_rankupdate<Index,EIGTYPE,AStorageOrder,ConjugateA,C
BlasIndex lda=convert_index<BlasIndex>(lhsStride), ldc=convert_index<BlasIndex>(resStride), n=convert_index<BlasIndex>(size), k=convert_index<BlasIndex>(depth); \
char uplo=((IsLower) ? 'L' : 'U'), trans=((AStorageOrder==RowMajor) ? 'T':'N'); \
EIGTYPE beta(1); \
- BLASFUNC(&uplo, &trans, &n, &k, &numext::real_ref(alpha), lhs, &lda, &numext::real_ref(beta), res, &ldc); \
+ BLASFUNC(&uplo, &trans, &n, &k, (const BLASTYPE*)&numext::real_ref(alpha), lhs, &lda, (const BLASTYPE*)&numext::real_ref(beta), res, &ldc); \
} \
};
@@ -125,9 +125,13 @@ struct general_matrix_matrix_rankupdate<Index,EIGTYPE,AStorageOrder,ConjugateA,C
} \
};
-
+#ifdef EIGEN_USE_MKL
+EIGEN_BLAS_RANKUPDATE_R(double, double, dsyrk)
+EIGEN_BLAS_RANKUPDATE_R(float, float, ssyrk)
+#else
EIGEN_BLAS_RANKUPDATE_R(double, double, dsyrk_)
EIGEN_BLAS_RANKUPDATE_R(float, float, ssyrk_)
+#endif
// TODO hanlde complex cases
// EIGEN_BLAS_RANKUPDATE_C(dcomplex, double, double, zherk_)
diff --git a/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h b/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h
index 7a3bdbf20..b0f6b0d5b 100644
--- a/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h
+++ b/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h
@@ -46,7 +46,7 @@ namespace internal {
// gemm specialization
-#define GEMM_SPECIALIZATION(EIGTYPE, EIGPREFIX, BLASTYPE, BLASPREFIX) \
+#define GEMM_SPECIALIZATION(EIGTYPE, EIGPREFIX, BLASTYPE, BLASFUNC) \
template< \
typename Index, \
int LhsStorageOrder, bool ConjugateLhs, \
@@ -100,13 +100,20 @@ static void run(Index rows, Index cols, Index depth, \
ldb = convert_index<BlasIndex>(b_tmp.outerStride()); \
} else b = _rhs; \
\
- BLASPREFIX##gemm_(&transa, &transb, &m, &n, &k, &numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (const BLASTYPE*)b, &ldb, &numext::real_ref(beta), (BLASTYPE*)res, &ldc); \
+ BLASFUNC(&transa, &transb, &m, &n, &k, (const BLASTYPE*)&numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (const BLASTYPE*)b, &ldb, (const BLASTYPE*)&numext::real_ref(beta), (BLASTYPE*)res, &ldc); \
}};
-GEMM_SPECIALIZATION(double, d, double, d)
-GEMM_SPECIALIZATION(float, f, float, s)
-GEMM_SPECIALIZATION(dcomplex, cd, double, z)
-GEMM_SPECIALIZATION(scomplex, cf, float, c)
+#ifdef EIGEN_USE_MKL
+GEMM_SPECIALIZATION(double, d, double, dgemm)
+GEMM_SPECIALIZATION(float, f, float, sgemm)
+GEMM_SPECIALIZATION(dcomplex, cd, MKL_Complex16, zgemm)
+GEMM_SPECIALIZATION(scomplex, cf, MKL_Complex8, cgemm)
+#else
+GEMM_SPECIALIZATION(double, d, double, dgemm_)
+GEMM_SPECIALIZATION(float, f, float, sgemm_)
+GEMM_SPECIALIZATION(dcomplex, cd, double, zgemm_)
+GEMM_SPECIALIZATION(scomplex, cf, float, cgemm_)
+#endif
} // end namespase internal
diff --git a/Eigen/src/Core/products/GeneralMatrixVector.h b/Eigen/src/Core/products/GeneralMatrixVector.h
index 41d8242e1..767feb99d 100644
--- a/Eigen/src/Core/products/GeneralMatrixVector.h
+++ b/Eigen/src/Core/products/GeneralMatrixVector.h
@@ -48,7 +48,7 @@ typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
-EIGEN_DONT_INLINE static void run(
+EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE static void run(
Index rows, Index cols,
const LhsMapper& lhs,
const RhsMapper& rhs,
@@ -57,7 +57,7 @@ EIGEN_DONT_INLINE static void run(
};
template<typename Index, typename LhsScalar, typename LhsMapper, bool ConjugateLhs, typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version>
-EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version>::run(
+EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version>::run(
Index rows, Index cols,
const LhsMapper& alhs,
const RhsMapper& rhs,
@@ -201,7 +201,7 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,LhsMapper,C
}
/* Optimized row-major matrix * vector product:
- * This algorithm processes 4 rows at onces that allows to both reduce
+ * This algorithm processes 4 rows at once that allows to both reduce
* the number of load/stores of the result by a factor 4 and to reduce
* the instruction dependency. Moreover, we know that all bands have the
* same alignment pattern.
@@ -231,7 +231,7 @@ typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
-EIGEN_DONT_INLINE static void run(
+EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE static void run(
Index rows, Index cols,
const LhsMapper& lhs,
const RhsMapper& rhs,
@@ -240,7 +240,7 @@ EIGEN_DONT_INLINE static void run(
};
template<typename Index, typename LhsScalar, typename LhsMapper, bool ConjugateLhs, typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version>
-EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,LhsMapper,RowMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version>::run(
+EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,LhsMapper,RowMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version>::run(
Index rows, Index cols,
const LhsMapper& alhs,
const RhsMapper& rhs,
diff --git a/Eigen/src/Core/products/GeneralMatrixVector_BLAS.h b/Eigen/src/Core/products/GeneralMatrixVector_BLAS.h
index e3a5d5892..6e36c2b3c 100644
--- a/Eigen/src/Core/products/GeneralMatrixVector_BLAS.h
+++ b/Eigen/src/Core/products/GeneralMatrixVector_BLAS.h
@@ -85,7 +85,7 @@ EIGEN_BLAS_GEMV_SPECIALIZE(float)
EIGEN_BLAS_GEMV_SPECIALIZE(dcomplex)
EIGEN_BLAS_GEMV_SPECIALIZE(scomplex)
-#define EIGEN_BLAS_GEMV_SPECIALIZATION(EIGTYPE,BLASTYPE,BLASPREFIX) \
+#define EIGEN_BLAS_GEMV_SPECIALIZATION(EIGTYPE,BLASTYPE,BLASFUNC) \
template<typename Index, int LhsStorageOrder, bool ConjugateLhs, bool ConjugateRhs> \
struct general_matrix_vector_product_gemv<Index,EIGTYPE,LhsStorageOrder,ConjugateLhs,EIGTYPE,ConjugateRhs> \
{ \
@@ -113,14 +113,21 @@ static void run( \
x_ptr=x_tmp.data(); \
incx=1; \
} else x_ptr=rhs; \
- BLASPREFIX##gemv_(&trans, &m, &n, &numext::real_ref(alpha), (const BLASTYPE*)lhs, &lda, (const BLASTYPE*)x_ptr, &incx, &numext::real_ref(beta), (BLASTYPE*)res, &incy); \
+ BLASFUNC(&trans, &m, &n, (const BLASTYPE*)&numext::real_ref(alpha), (const BLASTYPE*)lhs, &lda, (const BLASTYPE*)x_ptr, &incx, (const BLASTYPE*)&numext::real_ref(beta), (BLASTYPE*)res, &incy); \
}\
};
-EIGEN_BLAS_GEMV_SPECIALIZATION(double, double, d)
-EIGEN_BLAS_GEMV_SPECIALIZATION(float, float, s)
-EIGEN_BLAS_GEMV_SPECIALIZATION(dcomplex, double, z)
-EIGEN_BLAS_GEMV_SPECIALIZATION(scomplex, float, c)
+#ifdef EIGEN_USE_MKL
+EIGEN_BLAS_GEMV_SPECIALIZATION(double, double, dgemv)
+EIGEN_BLAS_GEMV_SPECIALIZATION(float, float, sgemv)
+EIGEN_BLAS_GEMV_SPECIALIZATION(dcomplex, MKL_Complex16, zgemv)
+EIGEN_BLAS_GEMV_SPECIALIZATION(scomplex, MKL_Complex8 , cgemv)
+#else
+EIGEN_BLAS_GEMV_SPECIALIZATION(double, double, dgemv_)
+EIGEN_BLAS_GEMV_SPECIALIZATION(float, float, sgemv_)
+EIGEN_BLAS_GEMV_SPECIALIZATION(dcomplex, double, zgemv_)
+EIGEN_BLAS_GEMV_SPECIALIZATION(scomplex, float, cgemv_)
+#endif
} // end namespase internal
diff --git a/Eigen/src/Core/products/Parallelizer.h b/Eigen/src/Core/products/Parallelizer.h
index c2f084c82..92e9b0d9f 100644
--- a/Eigen/src/Core/products/Parallelizer.h
+++ b/Eigen/src/Core/products/Parallelizer.h
@@ -10,6 +10,10 @@
#ifndef EIGEN_PARALLELIZER_H
#define EIGEN_PARALLELIZER_H
+#if EIGEN_HAS_CXX11_ATOMIC
+#include <atomic>
+#endif
+
namespace Eigen {
namespace internal {
@@ -75,8 +79,17 @@ template<typename Index> struct GemmParallelInfo
{
GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {}
+ // volatile is not enough on all architectures (see bug 1572)
+ // to guarantee that when thread A says to thread B that it is
+ // done with packing a block, then all writes have been really
+ // carried out... C++11 memory model+atomic guarantees this.
+#if EIGEN_HAS_CXX11_ATOMIC
+ std::atomic<Index> sync;
+ std::atomic<int> users;
+#else
Index volatile sync;
int volatile users;
+#endif
Index lhs_start;
Index lhs_length;
@@ -87,11 +100,14 @@ void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth,
{
// TODO when EIGEN_USE_BLAS is defined,
// we should still enable OMP for other scalar types
-#if !(defined (EIGEN_HAS_OPENMP)) || defined (EIGEN_USE_BLAS)
+ // Without C++11, we have to disable GEMM's parallelization on
+ // non x86 architectures because there volatile is not enough for our purpose.
+ // See bug 1572.
+#if (! defined(EIGEN_HAS_OPENMP)) || defined(EIGEN_USE_BLAS) || ((!EIGEN_HAS_CXX11_ATOMIC) && !(EIGEN_ARCH_i386_OR_x86_64))
// FIXME the transpose variable is only needed to properly split
// the matrix product when multithreading is enabled. This is a temporary
// fix to support row-major destination matrices. This whole
- // parallelizer mechanism has to be redisigned anyway.
+ // parallelizer mechanism has to be redesigned anyway.
EIGEN_UNUSED_VARIABLE(depth);
EIGEN_UNUSED_VARIABLE(transpose);
func(0,rows, 0,cols);
@@ -117,7 +133,7 @@ void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth,
// compute the number of threads we are going to use
Index threads = std::min<Index>(nbThreads(), pb_max_threads);
- // if multi-threading is explicitely disabled, not useful, or if we already are in a parallel session,
+ // if multi-threading is explicitly disabled, not useful, or if we already are in a parallel session,
// then abort multi-threading
// FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp?
if((!Condition) || (threads==1) || (omp_get_num_threads()>1))
diff --git a/Eigen/src/Core/products/SelfadjointMatrixMatrix.h b/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
index da6f82abc..c84c71609 100644
--- a/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
+++ b/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
@@ -352,7 +352,7 @@ EIGEN_DONT_INLINE void product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,t
gebp_kernel<Scalar, Scalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp_kernel;
symm_pack_lhs<Scalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<Scalar, Index, RhsMapper, Traits::nr,RhsStorageOrder> pack_rhs;
- gemm_pack_lhs<Scalar, Index, LhsTransposeMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder==RowMajor?ColMajor:RowMajor, true> pack_lhs_transposed;
+ gemm_pack_lhs<Scalar, Index, LhsTransposeMapper, Traits::mr, Traits::LhsProgress, typename Traits::LhsPacket4Packing, LhsStorageOrder==RowMajor?ColMajor:RowMajor, true> pack_lhs_transposed;
for(Index k2=0; k2<size; k2+=kc)
{
@@ -387,7 +387,7 @@ EIGEN_DONT_INLINE void product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,t
for(Index i2=k2+kc; i2<size; i2+=mc)
{
const Index actual_mc = (std::min)(i2+mc,size)-i2;
- gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder,false>()
+ gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, typename Traits::LhsPacket4Packing, LhsStorageOrder,false>()
(blockA, lhs.getSubMapper(i2, k2), actual_kc, actual_mc);
gebp_kernel(res.getSubMapper(i2, 0), blockA, blockB, actual_mc, actual_kc, cols, alpha);
@@ -437,7 +437,7 @@ EIGEN_DONT_INLINE void product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,f
ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());
gebp_kernel<Scalar, Scalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp_kernel;
- gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
+ gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, typename Traits::LhsPacket4Packing, LhsStorageOrder> pack_lhs;
symm_pack_rhs<Scalar, Index, Traits::nr,RhsStorageOrder> pack_rhs;
for(Index k2=0; k2<size; k2+=kc)
diff --git a/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h b/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h
index a45238d69..9a5318507 100644
--- a/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h
+++ b/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h
@@ -40,7 +40,7 @@ namespace internal {
/* Optimized selfadjoint matrix * matrix (?SYMM/?HEMM) product */
-#define EIGEN_BLAS_SYMM_L(EIGTYPE, BLASTYPE, EIGPREFIX, BLASPREFIX) \
+#define EIGEN_BLAS_SYMM_L(EIGTYPE, BLASTYPE, EIGPREFIX, BLASFUNC) \
template <typename Index, \
int LhsStorageOrder, bool ConjugateLhs, \
int RhsStorageOrder, bool ConjugateRhs> \
@@ -81,13 +81,13 @@ struct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,true,ConjugateLh
ldb = convert_index<BlasIndex>(b_tmp.outerStride()); \
} else b = _rhs; \
\
- BLASPREFIX##symm_(&side, &uplo, &m, &n, &numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (const BLASTYPE*)b, &ldb, &numext::real_ref(beta), (BLASTYPE*)res, &ldc); \
+ BLASFUNC(&side, &uplo, &m, &n, (const BLASTYPE*)&numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (const BLASTYPE*)b, &ldb, (const BLASTYPE*)&numext::real_ref(beta), (BLASTYPE*)res, &ldc); \
\
} \
};
-#define EIGEN_BLAS_HEMM_L(EIGTYPE, BLASTYPE, EIGPREFIX, BLASPREFIX) \
+#define EIGEN_BLAS_HEMM_L(EIGTYPE, BLASTYPE, EIGPREFIX, BLASFUNC) \
template <typename Index, \
int LhsStorageOrder, bool ConjugateLhs, \
int RhsStorageOrder, bool ConjugateRhs> \
@@ -144,20 +144,26 @@ struct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,true,ConjugateLh
ldb = convert_index<BlasIndex>(b_tmp.outerStride()); \
} \
\
- BLASPREFIX##hemm_(&side, &uplo, &m, &n, &numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (const BLASTYPE*)b, &ldb, &numext::real_ref(beta), (BLASTYPE*)res, &ldc); \
+ BLASFUNC(&side, &uplo, &m, &n, (const BLASTYPE*)&numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (const BLASTYPE*)b, &ldb, (const BLASTYPE*)&numext::real_ref(beta), (BLASTYPE*)res, &ldc); \
\
} \
};
-EIGEN_BLAS_SYMM_L(double, double, d, d)
-EIGEN_BLAS_SYMM_L(float, float, f, s)
-EIGEN_BLAS_HEMM_L(dcomplex, double, cd, z)
-EIGEN_BLAS_HEMM_L(scomplex, float, cf, c)
-
+#ifdef EIGEN_USE_MKL
+EIGEN_BLAS_SYMM_L(double, double, d, dsymm)
+EIGEN_BLAS_SYMM_L(float, float, f, ssymm)
+EIGEN_BLAS_HEMM_L(dcomplex, MKL_Complex16, cd, zhemm)
+EIGEN_BLAS_HEMM_L(scomplex, MKL_Complex8, cf, chemm)
+#else
+EIGEN_BLAS_SYMM_L(double, double, d, dsymm_)
+EIGEN_BLAS_SYMM_L(float, float, f, ssymm_)
+EIGEN_BLAS_HEMM_L(dcomplex, double, cd, zhemm_)
+EIGEN_BLAS_HEMM_L(scomplex, float, cf, chemm_)
+#endif
/* Optimized matrix * selfadjoint matrix (?SYMM/?HEMM) product */
-#define EIGEN_BLAS_SYMM_R(EIGTYPE, BLASTYPE, EIGPREFIX, BLASPREFIX) \
+#define EIGEN_BLAS_SYMM_R(EIGTYPE, BLASTYPE, EIGPREFIX, BLASFUNC) \
template <typename Index, \
int LhsStorageOrder, bool ConjugateLhs, \
int RhsStorageOrder, bool ConjugateRhs> \
@@ -197,13 +203,13 @@ struct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,false,ConjugateL
ldb = convert_index<BlasIndex>(b_tmp.outerStride()); \
} else b = _lhs; \
\
- BLASPREFIX##symm_(&side, &uplo, &m, &n, &numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (const BLASTYPE*)b, &ldb, &numext::real_ref(beta), (BLASTYPE*)res, &ldc); \
+ BLASFUNC(&side, &uplo, &m, &n, (const BLASTYPE*)&numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (const BLASTYPE*)b, &ldb, (const BLASTYPE*)&numext::real_ref(beta), (BLASTYPE*)res, &ldc); \
\
} \
};
-#define EIGEN_BLAS_HEMM_R(EIGTYPE, BLASTYPE, EIGPREFIX, BLASPREFIX) \
+#define EIGEN_BLAS_HEMM_R(EIGTYPE, BLASTYPE, EIGPREFIX, BLASFUNC) \
template <typename Index, \
int LhsStorageOrder, bool ConjugateLhs, \
int RhsStorageOrder, bool ConjugateRhs> \
@@ -259,15 +265,21 @@ struct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,false,ConjugateL
ldb = convert_index<BlasIndex>(b_tmp.outerStride()); \
} \
\
- BLASPREFIX##hemm_(&side, &uplo, &m, &n, &numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (const BLASTYPE*)b, &ldb, &numext::real_ref(beta), (BLASTYPE*)res, &ldc); \
+ BLASFUNC(&side, &uplo, &m, &n, (const BLASTYPE*)&numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (const BLASTYPE*)b, &ldb, (const BLASTYPE*)&numext::real_ref(beta), (BLASTYPE*)res, &ldc); \
} \
};
-EIGEN_BLAS_SYMM_R(double, double, d, d)
-EIGEN_BLAS_SYMM_R(float, float, f, s)
-EIGEN_BLAS_HEMM_R(dcomplex, double, cd, z)
-EIGEN_BLAS_HEMM_R(scomplex, float, cf, c)
-
+#ifdef EIGEN_USE_MKL
+EIGEN_BLAS_SYMM_R(double, double, d, dsymm)
+EIGEN_BLAS_SYMM_R(float, float, f, ssymm)
+EIGEN_BLAS_HEMM_R(dcomplex, MKL_Complex16, cd, zhemm)
+EIGEN_BLAS_HEMM_R(scomplex, MKL_Complex8, cf, chemm)
+#else
+EIGEN_BLAS_SYMM_R(double, double, d, dsymm_)
+EIGEN_BLAS_SYMM_R(float, float, f, ssymm_)
+EIGEN_BLAS_HEMM_R(dcomplex, double, cd, zhemm_)
+EIGEN_BLAS_HEMM_R(scomplex, float, cf, chemm_)
+#endif
} // end namespace internal
} // end namespace Eigen
diff --git a/Eigen/src/Core/products/SelfadjointMatrixVector.h b/Eigen/src/Core/products/SelfadjointMatrixVector.h
index 3fd180e6c..d38fd72b2 100644
--- a/Eigen/src/Core/products/SelfadjointMatrixVector.h
+++ b/Eigen/src/Core/products/SelfadjointMatrixVector.h
@@ -15,7 +15,7 @@ namespace Eigen {
namespace internal {
/* Optimized selfadjoint matrix * vector product:
- * This algorithm processes 2 columns at onces that allows to both reduce
+ * This algorithm processes 2 columns at once that allows to both reduce
* the number of load/stores of the result by a factor 2 and to reduce
* the instruction dependency.
*/
@@ -27,7 +27,8 @@ template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool Conju
struct selfadjoint_matrix_vector_product
{
-static EIGEN_DONT_INLINE void run(
+static EIGEN_DONT_INLINE EIGEN_DEVICE_FUNC
+void run(
Index size,
const Scalar* lhs, Index lhsStride,
const Scalar* rhs,
@@ -36,7 +37,8 @@ static EIGEN_DONT_INLINE void run(
};
template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs, int Version>
-EIGEN_DONT_INLINE void selfadjoint_matrix_vector_product<Scalar,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs,Version>::run(
+EIGEN_DONT_INLINE EIGEN_DEVICE_FUNC
+void selfadjoint_matrix_vector_product<Scalar,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs,Version>::run(
Index size,
const Scalar* lhs, Index lhsStride,
const Scalar* rhs,
@@ -62,8 +64,7 @@ EIGEN_DONT_INLINE void selfadjoint_matrix_vector_product<Scalar,Index,StorageOrd
Scalar cjAlpha = ConjugateRhs ? numext::conj(alpha) : alpha;
-
- Index bound = (std::max)(Index(0),size-8) & 0xfffffffe;
+ Index bound = numext::maxi(Index(0), size-8) & 0xfffffffe;
if (FirstTriangular)
bound = size - bound;
@@ -175,7 +176,8 @@ struct selfadjoint_product_impl<Lhs,LhsMode,false,Rhs,0,true>
enum { LhsUpLo = LhsMode&(Upper|Lower) };
template<typename Dest>
- static void run(Dest& dest, const Lhs &a_lhs, const Rhs &a_rhs, const Scalar& alpha)
+ static EIGEN_DEVICE_FUNC
+ void run(Dest& dest, const Lhs &a_lhs, const Rhs &a_rhs, const Scalar& alpha)
{
typedef typename Dest::Scalar ResScalar;
typedef typename Rhs::Scalar RhsScalar;
diff --git a/Eigen/src/Core/products/SelfadjointMatrixVector_BLAS.h b/Eigen/src/Core/products/SelfadjointMatrixVector_BLAS.h
index 38f23accf..1238345e3 100644
--- a/Eigen/src/Core/products/SelfadjointMatrixVector_BLAS.h
+++ b/Eigen/src/Core/products/SelfadjointMatrixVector_BLAS.h
@@ -95,14 +95,21 @@ const EIGTYPE* _rhs, EIGTYPE* res, EIGTYPE alpha) \
x_tmp=map_x.conjugate(); \
x_ptr=x_tmp.data(); \
} else x_ptr=_rhs; \
- BLASFUNC(&uplo, &n, &numext::real_ref(alpha), (const BLASTYPE*)lhs, &lda, (const BLASTYPE*)x_ptr, &incx, &numext::real_ref(beta), (BLASTYPE*)res, &incy); \
+ BLASFUNC(&uplo, &n, (const BLASTYPE*)&numext::real_ref(alpha), (const BLASTYPE*)lhs, &lda, (const BLASTYPE*)x_ptr, &incx, (const BLASTYPE*)&numext::real_ref(beta), (BLASTYPE*)res, &incy); \
}\
};
+#ifdef EIGEN_USE_MKL
+EIGEN_BLAS_SYMV_SPECIALIZATION(double, double, dsymv)
+EIGEN_BLAS_SYMV_SPECIALIZATION(float, float, ssymv)
+EIGEN_BLAS_SYMV_SPECIALIZATION(dcomplex, MKL_Complex16, zhemv)
+EIGEN_BLAS_SYMV_SPECIALIZATION(scomplex, MKL_Complex8, chemv)
+#else
EIGEN_BLAS_SYMV_SPECIALIZATION(double, double, dsymv_)
EIGEN_BLAS_SYMV_SPECIALIZATION(float, float, ssymv_)
EIGEN_BLAS_SYMV_SPECIALIZATION(dcomplex, double, zhemv_)
EIGEN_BLAS_SYMV_SPECIALIZATION(scomplex, float, chemv_)
+#endif
} // end namespace internal
diff --git a/Eigen/src/Core/products/SelfadjointProduct.h b/Eigen/src/Core/products/SelfadjointProduct.h
index f038d686f..39c5b59ff 100644
--- a/Eigen/src/Core/products/SelfadjointProduct.h
+++ b/Eigen/src/Core/products/SelfadjointProduct.h
@@ -120,7 +120,7 @@ struct selfadjoint_product_selector<MatrixType,OtherType,UpLo,false>
template<typename MatrixType, unsigned int UpLo>
template<typename DerivedU>
-SelfAdjointView<MatrixType,UpLo>& SelfAdjointView<MatrixType,UpLo>
+EIGEN_DEVICE_FUNC SelfAdjointView<MatrixType,UpLo>& SelfAdjointView<MatrixType,UpLo>
::rankUpdate(const MatrixBase<DerivedU>& u, const Scalar& alpha)
{
selfadjoint_product_selector<MatrixType,DerivedU,UpLo>::run(_expression().const_cast_derived(), u.derived(), alpha);
diff --git a/Eigen/src/Core/products/SelfadjointRank2Update.h b/Eigen/src/Core/products/SelfadjointRank2Update.h
index 2ae364111..09209f733 100644
--- a/Eigen/src/Core/products/SelfadjointRank2Update.h
+++ b/Eigen/src/Core/products/SelfadjointRank2Update.h
@@ -24,7 +24,8 @@ struct selfadjoint_rank2_update_selector;
template<typename Scalar, typename Index, typename UType, typename VType>
struct selfadjoint_rank2_update_selector<Scalar,Index,UType,VType,Lower>
{
- static void run(Scalar* mat, Index stride, const UType& u, const VType& v, const Scalar& alpha)
+ static EIGEN_DEVICE_FUNC
+ void run(Scalar* mat, Index stride, const UType& u, const VType& v, const Scalar& alpha)
{
const Index size = u.size();
for (Index i=0; i<size; ++i)
@@ -57,7 +58,7 @@ template<bool Cond, typename T> struct conj_expr_if
template<typename MatrixType, unsigned int UpLo>
template<typename DerivedU, typename DerivedV>
-SelfAdjointView<MatrixType,UpLo>& SelfAdjointView<MatrixType,UpLo>
+EIGEN_DEVICE_FUNC SelfAdjointView<MatrixType,UpLo>& SelfAdjointView<MatrixType,UpLo>
::rankUpdate(const MatrixBase<DerivedU>& u, const MatrixBase<DerivedV>& v, const Scalar& alpha)
{
typedef internal::blas_traits<DerivedU> UBlasTraits;
diff --git a/Eigen/src/Core/products/TriangularMatrixMatrix.h b/Eigen/src/Core/products/TriangularMatrixMatrix.h
index 6ec5a8a0b..85fd744b9 100644
--- a/Eigen/src/Core/products/TriangularMatrixMatrix.h
+++ b/Eigen/src/Core/products/TriangularMatrixMatrix.h
@@ -137,7 +137,13 @@ EIGEN_DONT_INLINE void product_triangular_matrix_matrix<Scalar,Index,Mode,true,
ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA());
ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());
- Matrix<Scalar,SmallPanelWidth,SmallPanelWidth,LhsStorageOrder> triangularBuffer((internal::constructor_without_unaligned_array_assert()));
+ // To work around an "error: member reference base type 'Matrix<...>
+ // (Eigen::internal::constructor_without_unaligned_array_assert (*)())' is
+ // not a structure or union" compilation error in nvcc (tested V8.0.61),
+ // create a dummy internal::constructor_without_unaligned_array_assert
+ // object to pass to the Matrix constructor.
+ internal::constructor_without_unaligned_array_assert a;
+ Matrix<Scalar,SmallPanelWidth,SmallPanelWidth,LhsStorageOrder> triangularBuffer(a);
triangularBuffer.setZero();
if((Mode&ZeroDiag)==ZeroDiag)
triangularBuffer.diagonal().setZero();
@@ -145,7 +151,7 @@ EIGEN_DONT_INLINE void product_triangular_matrix_matrix<Scalar,Index,Mode,true,
triangularBuffer.diagonal().setOnes();
gebp_kernel<Scalar, Scalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp_kernel;
- gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
+ gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, typename Traits::LhsPacket4Packing, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<Scalar, Index, RhsMapper, Traits::nr,RhsStorageOrder> pack_rhs;
for(Index k2=IsLower ? depth : 0;
@@ -216,7 +222,7 @@ EIGEN_DONT_INLINE void product_triangular_matrix_matrix<Scalar,Index,Mode,true,
for(Index i2=start; i2<end; i2+=mc)
{
const Index actual_mc = (std::min)(i2+mc,end)-i2;
- gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr,Traits::LhsProgress, LhsStorageOrder,false>()
+ gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr,Traits::LhsProgress, typename Traits::LhsPacket4Packing, LhsStorageOrder,false>()
(blockA, lhs.getSubMapper(i2, actual_k2), actual_kc, actual_mc);
gebp_kernel(res.getSubMapper(i2, 0), blockA, blockB, actual_mc,
@@ -284,7 +290,8 @@ EIGEN_DONT_INLINE void product_triangular_matrix_matrix<Scalar,Index,Mode,false,
ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA());
ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());
- Matrix<Scalar,SmallPanelWidth,SmallPanelWidth,RhsStorageOrder> triangularBuffer((internal::constructor_without_unaligned_array_assert()));
+ internal::constructor_without_unaligned_array_assert a;
+ Matrix<Scalar,SmallPanelWidth,SmallPanelWidth,RhsStorageOrder> triangularBuffer(a);
triangularBuffer.setZero();
if((Mode&ZeroDiag)==ZeroDiag)
triangularBuffer.diagonal().setZero();
@@ -292,7 +299,7 @@ EIGEN_DONT_INLINE void product_triangular_matrix_matrix<Scalar,Index,Mode,false,
triangularBuffer.diagonal().setOnes();
gebp_kernel<Scalar, Scalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp_kernel;
- gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
+ gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, typename Traits::LhsPacket4Packing, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<Scalar, Index, RhsMapper, Traits::nr,RhsStorageOrder> pack_rhs;
gemm_pack_rhs<Scalar, Index, RhsMapper, Traits::nr,RhsStorageOrder,false,true> pack_rhs_panel;
@@ -393,7 +400,9 @@ struct triangular_product_impl<Mode,LhsIsTriangular,Lhs,false,Rhs,false>
{
template<typename Dest> static void run(Dest& dst, const Lhs &a_lhs, const Rhs &a_rhs, const typename Dest::Scalar& alpha)
{
- typedef typename Dest::Scalar Scalar;
+ typedef typename Lhs::Scalar LhsScalar;
+ typedef typename Rhs::Scalar RhsScalar;
+ typedef typename Dest::Scalar Scalar;
typedef internal::blas_traits<Lhs> LhsBlasTraits;
typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
@@ -405,8 +414,9 @@ struct triangular_product_impl<Mode,LhsIsTriangular,Lhs,false,Rhs,false>
typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs);
typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs);
- Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs)
- * RhsBlasTraits::extractScalarFactor(a_rhs);
+ LhsScalar lhs_alpha = LhsBlasTraits::extractScalarFactor(a_lhs);
+ RhsScalar rhs_alpha = RhsBlasTraits::extractScalarFactor(a_rhs);
+ Scalar actualAlpha = alpha * lhs_alpha * rhs_alpha;
typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,Scalar,Scalar,
Lhs::MaxRowsAtCompileTime, Rhs::MaxColsAtCompileTime, Lhs::MaxColsAtCompileTime,4> BlockingType;
@@ -431,6 +441,21 @@ struct triangular_product_impl<Mode,LhsIsTriangular,Lhs,false,Rhs,false>
&dst.coeffRef(0,0), dst.outerStride(), // result info
actualAlpha, blocking
);
+
+ // Apply correction if the diagonal is unit and a scalar factor was nested:
+ if ((Mode&UnitDiag)==UnitDiag)
+ {
+ if (LhsIsTriangular && lhs_alpha!=LhsScalar(1))
+ {
+ Index diagSize = (std::min)(lhs.rows(),lhs.cols());
+ dst.topRows(diagSize) -= ((lhs_alpha-LhsScalar(1))*a_rhs).topRows(diagSize);
+ }
+ else if ((!LhsIsTriangular) && rhs_alpha!=RhsScalar(1))
+ {
+ Index diagSize = (std::min)(rhs.rows(),rhs.cols());
+ dst.leftCols(diagSize) -= (rhs_alpha-RhsScalar(1))*a_lhs.leftCols(diagSize);
+ }
+ }
}
};
diff --git a/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h b/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h
index aecded6bb..a25197ab0 100644
--- a/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h
+++ b/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h
@@ -75,7 +75,7 @@ EIGEN_BLAS_TRMM_SPECIALIZE(scomplex, true)
EIGEN_BLAS_TRMM_SPECIALIZE(scomplex, false)
// implements col-major += alpha * op(triangular) * op(general)
-#define EIGEN_BLAS_TRMM_L(EIGTYPE, BLASTYPE, EIGPREFIX, BLASPREFIX) \
+#define EIGEN_BLAS_TRMM_L(EIGTYPE, BLASTYPE, EIGPREFIX, BLASFUNC) \
template <typename Index, int Mode, \
int LhsStorageOrder, bool ConjugateLhs, \
int RhsStorageOrder, bool ConjugateRhs> \
@@ -172,7 +172,7 @@ struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,true, \
} \
/*std::cout << "TRMM_L: A is square! Go to BLAS TRMM implementation! \n";*/ \
/* call ?trmm*/ \
- BLASPREFIX##trmm_(&side, &uplo, &transa, &diag, &m, &n, &numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (BLASTYPE*)b, &ldb); \
+ BLASFUNC(&side, &uplo, &transa, &diag, &m, &n, (const BLASTYPE*)&numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (BLASTYPE*)b, &ldb); \
\
/* Add op(a_triangular)*b into res*/ \
Map<MatrixX##EIGPREFIX, 0, OuterStride<> > res_tmp(res,rows,cols,OuterStride<>(resStride)); \
@@ -180,13 +180,20 @@ struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,true, \
} \
};
-EIGEN_BLAS_TRMM_L(double, double, d, d)
-EIGEN_BLAS_TRMM_L(dcomplex, double, cd, z)
-EIGEN_BLAS_TRMM_L(float, float, f, s)
-EIGEN_BLAS_TRMM_L(scomplex, float, cf, c)
+#ifdef EIGEN_USE_MKL
+EIGEN_BLAS_TRMM_L(double, double, d, dtrmm)
+EIGEN_BLAS_TRMM_L(dcomplex, MKL_Complex16, cd, ztrmm)
+EIGEN_BLAS_TRMM_L(float, float, f, strmm)
+EIGEN_BLAS_TRMM_L(scomplex, MKL_Complex8, cf, ctrmm)
+#else
+EIGEN_BLAS_TRMM_L(double, double, d, dtrmm_)
+EIGEN_BLAS_TRMM_L(dcomplex, double, cd, ztrmm_)
+EIGEN_BLAS_TRMM_L(float, float, f, strmm_)
+EIGEN_BLAS_TRMM_L(scomplex, float, cf, ctrmm_)
+#endif
// implements col-major += alpha * op(general) * op(triangular)
-#define EIGEN_BLAS_TRMM_R(EIGTYPE, BLASTYPE, EIGPREFIX, BLASPREFIX) \
+#define EIGEN_BLAS_TRMM_R(EIGTYPE, BLASTYPE, EIGPREFIX, BLASFUNC) \
template <typename Index, int Mode, \
int LhsStorageOrder, bool ConjugateLhs, \
int RhsStorageOrder, bool ConjugateRhs> \
@@ -282,7 +289,7 @@ struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,false, \
} \
/*std::cout << "TRMM_R: A is square! Go to BLAS TRMM implementation! \n";*/ \
/* call ?trmm*/ \
- BLASPREFIX##trmm_(&side, &uplo, &transa, &diag, &m, &n, &numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (BLASTYPE*)b, &ldb); \
+ BLASFUNC(&side, &uplo, &transa, &diag, &m, &n, (const BLASTYPE*)&numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (BLASTYPE*)b, &ldb); \
\
/* Add op(a_triangular)*b into res*/ \
Map<MatrixX##EIGPREFIX, 0, OuterStride<> > res_tmp(res,rows,cols,OuterStride<>(resStride)); \
@@ -290,11 +297,17 @@ struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,false, \
} \
};
-EIGEN_BLAS_TRMM_R(double, double, d, d)
-EIGEN_BLAS_TRMM_R(dcomplex, double, cd, z)
-EIGEN_BLAS_TRMM_R(float, float, f, s)
-EIGEN_BLAS_TRMM_R(scomplex, float, cf, c)
-
+#ifdef EIGEN_USE_MKL
+EIGEN_BLAS_TRMM_R(double, double, d, dtrmm)
+EIGEN_BLAS_TRMM_R(dcomplex, MKL_Complex16, cd, ztrmm)
+EIGEN_BLAS_TRMM_R(float, float, f, strmm)
+EIGEN_BLAS_TRMM_R(scomplex, MKL_Complex8, cf, ctrmm)
+#else
+EIGEN_BLAS_TRMM_R(double, double, d, dtrmm_)
+EIGEN_BLAS_TRMM_R(dcomplex, double, cd, ztrmm_)
+EIGEN_BLAS_TRMM_R(float, float, f, strmm_)
+EIGEN_BLAS_TRMM_R(scomplex, float, cf, ctrmm_)
+#endif
} // end namespace internal
} // end namespace Eigen
diff --git a/Eigen/src/Core/products/TriangularMatrixVector.h b/Eigen/src/Core/products/TriangularMatrixVector.h
index 4b292e74d..76bfa159c 100644
--- a/Eigen/src/Core/products/TriangularMatrixVector.h
+++ b/Eigen/src/Core/products/TriangularMatrixVector.h
@@ -221,8 +221,9 @@ template<int Mode> struct trmv_selector<Mode,ColMajor>
typename internal::add_const_on_value_type<ActualLhsType>::type actualLhs = LhsBlasTraits::extract(lhs);
typename internal::add_const_on_value_type<ActualRhsType>::type actualRhs = RhsBlasTraits::extract(rhs);
- ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(lhs)
- * RhsBlasTraits::extractScalarFactor(rhs);
+ LhsScalar lhs_alpha = LhsBlasTraits::extractScalarFactor(lhs);
+ RhsScalar rhs_alpha = RhsBlasTraits::extractScalarFactor(rhs);
+ ResScalar actualAlpha = alpha * lhs_alpha * rhs_alpha;
enum {
// FIXME find a way to allow an inner stride on the result if packet_traits<Scalar>::size==1
@@ -274,6 +275,12 @@ template<int Mode> struct trmv_selector<Mode,ColMajor>
else
dest = MappedDest(actualDestPtr, dest.size());
}
+
+ if ( ((Mode&UnitDiag)==UnitDiag) && (lhs_alpha!=LhsScalar(1)) )
+ {
+ Index diagSize = (std::min)(lhs.rows(),lhs.cols());
+ dest.head(diagSize) -= (lhs_alpha-LhsScalar(1))*rhs.head(diagSize);
+ }
}
};
@@ -295,8 +302,9 @@ template<int Mode> struct trmv_selector<Mode,RowMajor>
typename add_const<ActualLhsType>::type actualLhs = LhsBlasTraits::extract(lhs);
typename add_const<ActualRhsType>::type actualRhs = RhsBlasTraits::extract(rhs);
- ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(lhs)
- * RhsBlasTraits::extractScalarFactor(rhs);
+ LhsScalar lhs_alpha = LhsBlasTraits::extractScalarFactor(lhs);
+ RhsScalar rhs_alpha = RhsBlasTraits::extractScalarFactor(rhs);
+ ResScalar actualAlpha = alpha * lhs_alpha * rhs_alpha;
enum {
DirectlyUseRhs = ActualRhsTypeCleaned::InnerStrideAtCompileTime==1
@@ -326,6 +334,12 @@ template<int Mode> struct trmv_selector<Mode,RowMajor>
actualRhsPtr,1,
dest.data(),dest.innerStride(),
actualAlpha);
+
+ if ( ((Mode&UnitDiag)==UnitDiag) && (lhs_alpha!=LhsScalar(1)) )
+ {
+ Index diagSize = (std::min)(lhs.rows(),lhs.cols());
+ dest.head(diagSize) -= (lhs_alpha-LhsScalar(1))*rhs.head(diagSize);
+ }
}
};
diff --git a/Eigen/src/Core/products/TriangularMatrixVector_BLAS.h b/Eigen/src/Core/products/TriangularMatrixVector_BLAS.h
index 07bf26ce5..3d47a2b94 100644
--- a/Eigen/src/Core/products/TriangularMatrixVector_BLAS.h
+++ b/Eigen/src/Core/products/TriangularMatrixVector_BLAS.h
@@ -71,7 +71,7 @@ EIGEN_BLAS_TRMV_SPECIALIZE(dcomplex)
EIGEN_BLAS_TRMV_SPECIALIZE(scomplex)
// implements col-major: res += alpha * op(triangular) * vector
-#define EIGEN_BLAS_TRMV_CM(EIGTYPE, BLASTYPE, EIGPREFIX, BLASPREFIX) \
+#define EIGEN_BLAS_TRMV_CM(EIGTYPE, BLASTYPE, EIGPREFIX, BLASPREFIX, BLASPOSTFIX) \
template<typename Index, int Mode, bool ConjLhs, bool ConjRhs> \
struct triangular_matrix_vector_product_trmv<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,ConjRhs,ColMajor> { \
enum { \
@@ -121,10 +121,10 @@ struct triangular_matrix_vector_product_trmv<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,
diag = IsUnitDiag ? 'U' : 'N'; \
\
/* call ?TRMV*/ \
- BLASPREFIX##trmv_(&uplo, &trans, &diag, &n, (const BLASTYPE*)_lhs, &lda, (BLASTYPE*)x, &incx); \
+ BLASPREFIX##trmv##BLASPOSTFIX(&uplo, &trans, &diag, &n, (const BLASTYPE*)_lhs, &lda, (BLASTYPE*)x, &incx); \
\
/* Add op(a_tr)rhs into res*/ \
- BLASPREFIX##axpy_(&n, &numext::real_ref(alpha),(const BLASTYPE*)x, &incx, (BLASTYPE*)_res, &incy); \
+ BLASPREFIX##axpy##BLASPOSTFIX(&n, (const BLASTYPE*)&numext::real_ref(alpha),(const BLASTYPE*)x, &incx, (BLASTYPE*)_res, &incy); \
/* Non-square case - doesn't fit to BLAS ?TRMV. Fall to default triangular product*/ \
if (size<(std::max)(rows,cols)) { \
if (ConjRhs) x_tmp = rhs.conjugate(); else x_tmp = rhs; \
@@ -142,18 +142,25 @@ struct triangular_matrix_vector_product_trmv<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,
m = convert_index<BlasIndex>(size); \
n = convert_index<BlasIndex>(cols-size); \
} \
- BLASPREFIX##gemv_(&trans, &m, &n, &numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (const BLASTYPE*)x, &incx, &numext::real_ref(beta), (BLASTYPE*)y, &incy); \
+ BLASPREFIX##gemv##BLASPOSTFIX(&trans, &m, &n, (const BLASTYPE*)&numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (const BLASTYPE*)x, &incx, (const BLASTYPE*)&numext::real_ref(beta), (BLASTYPE*)y, &incy); \
} \
} \
};
-EIGEN_BLAS_TRMV_CM(double, double, d, d)
-EIGEN_BLAS_TRMV_CM(dcomplex, double, cd, z)
-EIGEN_BLAS_TRMV_CM(float, float, f, s)
-EIGEN_BLAS_TRMV_CM(scomplex, float, cf, c)
+#ifdef EIGEN_USE_MKL
+EIGEN_BLAS_TRMV_CM(double, double, d, d,)
+EIGEN_BLAS_TRMV_CM(dcomplex, MKL_Complex16, cd, z,)
+EIGEN_BLAS_TRMV_CM(float, float, f, s,)
+EIGEN_BLAS_TRMV_CM(scomplex, MKL_Complex8, cf, c,)
+#else
+EIGEN_BLAS_TRMV_CM(double, double, d, d, _)
+EIGEN_BLAS_TRMV_CM(dcomplex, double, cd, z, _)
+EIGEN_BLAS_TRMV_CM(float, float, f, s, _)
+EIGEN_BLAS_TRMV_CM(scomplex, float, cf, c, _)
+#endif
// implements row-major: res += alpha * op(triangular) * vector
-#define EIGEN_BLAS_TRMV_RM(EIGTYPE, BLASTYPE, EIGPREFIX, BLASPREFIX) \
+#define EIGEN_BLAS_TRMV_RM(EIGTYPE, BLASTYPE, EIGPREFIX, BLASPREFIX, BLASPOSTFIX) \
template<typename Index, int Mode, bool ConjLhs, bool ConjRhs> \
struct triangular_matrix_vector_product_trmv<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,ConjRhs,RowMajor> { \
enum { \
@@ -203,10 +210,10 @@ struct triangular_matrix_vector_product_trmv<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,
diag = IsUnitDiag ? 'U' : 'N'; \
\
/* call ?TRMV*/ \
- BLASPREFIX##trmv_(&uplo, &trans, &diag, &n, (const BLASTYPE*)_lhs, &lda, (BLASTYPE*)x, &incx); \
+ BLASPREFIX##trmv##BLASPOSTFIX(&uplo, &trans, &diag, &n, (const BLASTYPE*)_lhs, &lda, (BLASTYPE*)x, &incx); \
\
/* Add op(a_tr)rhs into res*/ \
- BLASPREFIX##axpy_(&n, &numext::real_ref(alpha),(const BLASTYPE*)x, &incx, (BLASTYPE*)_res, &incy); \
+ BLASPREFIX##axpy##BLASPOSTFIX(&n, (const BLASTYPE*)&numext::real_ref(alpha),(const BLASTYPE*)x, &incx, (BLASTYPE*)_res, &incy); \
/* Non-square case - doesn't fit to BLAS ?TRMV. Fall to default triangular product*/ \
if (size<(std::max)(rows,cols)) { \
if (ConjRhs) x_tmp = rhs.conjugate(); else x_tmp = rhs; \
@@ -224,15 +231,22 @@ struct triangular_matrix_vector_product_trmv<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,
m = convert_index<BlasIndex>(size); \
n = convert_index<BlasIndex>(cols-size); \
} \
- BLASPREFIX##gemv_(&trans, &n, &m, &numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (const BLASTYPE*)x, &incx, &numext::real_ref(beta), (BLASTYPE*)y, &incy); \
+ BLASPREFIX##gemv##BLASPOSTFIX(&trans, &n, &m, (const BLASTYPE*)&numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (const BLASTYPE*)x, &incx, (const BLASTYPE*)&numext::real_ref(beta), (BLASTYPE*)y, &incy); \
} \
} \
};
-EIGEN_BLAS_TRMV_RM(double, double, d, d)
-EIGEN_BLAS_TRMV_RM(dcomplex, double, cd, z)
-EIGEN_BLAS_TRMV_RM(float, float, f, s)
-EIGEN_BLAS_TRMV_RM(scomplex, float, cf, c)
+#ifdef EIGEN_USE_MKL
+EIGEN_BLAS_TRMV_RM(double, double, d, d,)
+EIGEN_BLAS_TRMV_RM(dcomplex, MKL_Complex16, cd, z,)
+EIGEN_BLAS_TRMV_RM(float, float, f, s,)
+EIGEN_BLAS_TRMV_RM(scomplex, MKL_Complex8, cf, c,)
+#else
+EIGEN_BLAS_TRMV_RM(double, double, d, d,_)
+EIGEN_BLAS_TRMV_RM(dcomplex, double, cd, z,_)
+EIGEN_BLAS_TRMV_RM(float, float, f, s,_)
+EIGEN_BLAS_TRMV_RM(scomplex, float, cf, c,_)
+#endif
} // end namespase internal
diff --git a/Eigen/src/Core/products/TriangularSolverMatrix.h b/Eigen/src/Core/products/TriangularSolverMatrix.h
index 223c38b86..8ff2e9d9d 100644
--- a/Eigen/src/Core/products/TriangularSolverMatrix.h
+++ b/Eigen/src/Core/products/TriangularSolverMatrix.h
@@ -76,7 +76,7 @@ EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conju
conj_if<Conjugate> conj;
gebp_kernel<Scalar, Scalar, Index, OtherMapper, Traits::mr, Traits::nr, Conjugate, false> gebp_kernel;
- gemm_pack_lhs<Scalar, Index, TriMapper, Traits::mr, Traits::LhsProgress, TriStorageOrder> pack_lhs;
+ gemm_pack_lhs<Scalar, Index, TriMapper, Traits::mr, Traits::LhsProgress, typename Traits::LhsPacket4Packing, TriStorageOrder> pack_lhs;
gemm_pack_rhs<Scalar, Index, OtherMapper, Traits::nr, ColMajor, false, true> pack_rhs;
// the goal here is to subdivise the Rhs panels such that we keep some cache
@@ -229,7 +229,7 @@ EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheRight,Mode,Conj
gebp_kernel<Scalar, Scalar, Index, LhsMapper, Traits::mr, Traits::nr, false, Conjugate> gebp_kernel;
gemm_pack_rhs<Scalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs;
gemm_pack_rhs<Scalar, Index, RhsMapper, Traits::nr, RhsStorageOrder,false,true> pack_rhs_panel;
- gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, ColMajor, false, true> pack_lhs_panel;
+ gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, typename Traits::LhsPacket4Packing, ColMajor, false, true> pack_lhs_panel;
for(Index k2=IsLower ? size : 0;
IsLower ? k2>0 : k2<size;
diff --git a/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h b/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h
index 88c0fb794..f0775116a 100644
--- a/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h
+++ b/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h
@@ -38,7 +38,7 @@ namespace Eigen {
namespace internal {
// implements LeftSide op(triangular)^-1 * general
-#define EIGEN_BLAS_TRSM_L(EIGTYPE, BLASTYPE, BLASPREFIX) \
+#define EIGEN_BLAS_TRSM_L(EIGTYPE, BLASTYPE, BLASFUNC) \
template <typename Index, int Mode, bool Conjugate, int TriStorageOrder> \
struct triangular_solve_matrix<EIGTYPE,Index,OnTheLeft,Mode,Conjugate,TriStorageOrder,ColMajor> \
{ \
@@ -80,18 +80,24 @@ struct triangular_solve_matrix<EIGTYPE,Index,OnTheLeft,Mode,Conjugate,TriStorage
} \
if (IsUnitDiag) diag='U'; \
/* call ?trsm*/ \
- BLASPREFIX##trsm_(&side, &uplo, &transa, &diag, &m, &n, &numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (BLASTYPE*)_other, &ldb); \
+ BLASFUNC(&side, &uplo, &transa, &diag, &m, &n, (const BLASTYPE*)&numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (BLASTYPE*)_other, &ldb); \
} \
};
-EIGEN_BLAS_TRSM_L(double, double, d)
-EIGEN_BLAS_TRSM_L(dcomplex, double, z)
-EIGEN_BLAS_TRSM_L(float, float, s)
-EIGEN_BLAS_TRSM_L(scomplex, float, c)
-
+#ifdef EIGEN_USE_MKL
+EIGEN_BLAS_TRSM_L(double, double, dtrsm)
+EIGEN_BLAS_TRSM_L(dcomplex, MKL_Complex16, ztrsm)
+EIGEN_BLAS_TRSM_L(float, float, strsm)
+EIGEN_BLAS_TRSM_L(scomplex, MKL_Complex8, ctrsm)
+#else
+EIGEN_BLAS_TRSM_L(double, double, dtrsm_)
+EIGEN_BLAS_TRSM_L(dcomplex, double, ztrsm_)
+EIGEN_BLAS_TRSM_L(float, float, strsm_)
+EIGEN_BLAS_TRSM_L(scomplex, float, ctrsm_)
+#endif
// implements RightSide general * op(triangular)^-1
-#define EIGEN_BLAS_TRSM_R(EIGTYPE, BLASTYPE, BLASPREFIX) \
+#define EIGEN_BLAS_TRSM_R(EIGTYPE, BLASTYPE, BLASFUNC) \
template <typename Index, int Mode, bool Conjugate, int TriStorageOrder> \
struct triangular_solve_matrix<EIGTYPE,Index,OnTheRight,Mode,Conjugate,TriStorageOrder,ColMajor> \
{ \
@@ -133,16 +139,22 @@ struct triangular_solve_matrix<EIGTYPE,Index,OnTheRight,Mode,Conjugate,TriStorag
} \
if (IsUnitDiag) diag='U'; \
/* call ?trsm*/ \
- BLASPREFIX##trsm_(&side, &uplo, &transa, &diag, &m, &n, &numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (BLASTYPE*)_other, &ldb); \
+ BLASFUNC(&side, &uplo, &transa, &diag, &m, &n, (const BLASTYPE*)&numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (BLASTYPE*)_other, &ldb); \
/*std::cout << "TRMS_L specialization!\n";*/ \
} \
};
-EIGEN_BLAS_TRSM_R(double, double, d)
-EIGEN_BLAS_TRSM_R(dcomplex, double, z)
-EIGEN_BLAS_TRSM_R(float, float, s)
-EIGEN_BLAS_TRSM_R(scomplex, float, c)
-
+#ifdef EIGEN_USE_MKL
+EIGEN_BLAS_TRSM_R(double, double, dtrsm)
+EIGEN_BLAS_TRSM_R(dcomplex, MKL_Complex16, ztrsm)
+EIGEN_BLAS_TRSM_R(float, float, strsm)
+EIGEN_BLAS_TRSM_R(scomplex, MKL_Complex8, ctrsm)
+#else
+EIGEN_BLAS_TRSM_R(double, double, dtrsm_)
+EIGEN_BLAS_TRSM_R(dcomplex, double, ztrsm_)
+EIGEN_BLAS_TRSM_R(float, float, strsm_)
+EIGEN_BLAS_TRSM_R(scomplex, float, ctrsm_)
+#endif
} // end namespace internal
diff --git a/Eigen/src/Core/products/TriangularSolverVector.h b/Eigen/src/Core/products/TriangularSolverVector.h
index b994759b2..647317016 100644
--- a/Eigen/src/Core/products/TriangularSolverVector.h
+++ b/Eigen/src/Core/products/TriangularSolverVector.h
@@ -58,7 +58,7 @@ struct triangular_solve_vector<LhsScalar, RhsScalar, Index, OnTheLeft, Mode, Con
{
// let's directly call the low level product function because:
// 1 - it is faster to compile
- // 2 - it is slighlty faster at runtime
+ // 2 - it is slightly faster at runtime
Index startRow = IsLower ? pi : pi-actualPanelWidth;
Index startCol = IsLower ? 0 : pi;
@@ -77,7 +77,7 @@ struct triangular_solve_vector<LhsScalar, RhsScalar, Index, OnTheLeft, Mode, Con
if (k>0)
rhs[i] -= (cjLhs.row(i).segment(s,k).transpose().cwiseProduct(Map<const Matrix<RhsScalar,Dynamic,1> >(rhs+s,k))).sum();
- if(!(Mode & UnitDiag))
+ if((!(Mode & UnitDiag)) && numext::not_equal_strict(rhs[i],RhsScalar(0)))
rhs[i] /= cjLhs(i,i);
}
}
@@ -114,20 +114,23 @@ struct triangular_solve_vector<LhsScalar, RhsScalar, Index, OnTheLeft, Mode, Con
for(Index k=0; k<actualPanelWidth; ++k)
{
Index i = IsLower ? pi+k : pi-k-1;
- if(!(Mode & UnitDiag))
- rhs[i] /= cjLhs.coeff(i,i);
-
- Index r = actualPanelWidth - k - 1; // remaining size
- Index s = IsLower ? i+1 : i-r;
- if (r>0)
- Map<Matrix<RhsScalar,Dynamic,1> >(rhs+s,r) -= rhs[i] * cjLhs.col(i).segment(s,r);
+ if(numext::not_equal_strict(rhs[i],RhsScalar(0)))
+ {
+ if(!(Mode & UnitDiag))
+ rhs[i] /= cjLhs.coeff(i,i);
+
+ Index r = actualPanelWidth - k - 1; // remaining size
+ Index s = IsLower ? i+1 : i-r;
+ if (r>0)
+ Map<Matrix<RhsScalar,Dynamic,1> >(rhs+s,r) -= rhs[i] * cjLhs.col(i).segment(s,r);
+ }
}
Index r = IsLower ? size - endBlock : startBlock; // remaining size
if (r > 0)
{
// let's directly call the low level product function because:
// 1 - it is faster to compile
- // 2 - it is slighlty faster at runtime
+ // 2 - it is slightly faster at runtime
general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,Conjugate,RhsScalar,RhsMapper,false>::run(
r, actualPanelWidth,
LhsMapper(&lhs.coeffRef(endBlock,startBlock), lhsStride),
diff --git a/Eigen/src/Core/util/BlasUtil.h b/Eigen/src/Core/util/BlasUtil.h
index b1791fb3a..a32630ed7 100755
--- a/Eigen/src/Core/util/BlasUtil.h
+++ b/Eigen/src/Core/util/BlasUtil.h
@@ -24,7 +24,7 @@ struct gebp_kernel;
template<typename Scalar, typename Index, typename DataMapper, int nr, int StorageOrder, bool Conjugate = false, bool PanelMode=false>
struct gemm_pack_rhs;
-template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, int StorageOrder, bool Conjugate = false, bool PanelMode = false>
+template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, typename Packet, int StorageOrder, bool Conjugate = false, bool PanelMode = false>
struct gemm_pack_lhs;
template<
@@ -156,11 +156,9 @@ class BlasVectorMapper {
};
template<typename Scalar, typename Index, int AlignmentType>
-class BlasLinearMapper {
- public:
- typedef typename packet_traits<Scalar>::type Packet;
- typedef typename packet_traits<Scalar>::half HalfPacket;
-
+class BlasLinearMapper
+{
+public:
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE BlasLinearMapper(Scalar *data) : m_data(data) {}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void prefetch(int i) const {
@@ -171,29 +169,25 @@ class BlasLinearMapper {
return m_data[i];
}
- EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet loadPacket(Index i) const {
- return ploadt<Packet, AlignmentType>(m_data + i);
+ template<typename PacketType>
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketType loadPacket(Index i) const {
+ return ploadt<PacketType, AlignmentType>(m_data + i);
}
- EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE HalfPacket loadHalfPacket(Index i) const {
- return ploadt<HalfPacket, AlignmentType>(m_data + i);
+ template<typename PacketType>
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void storePacket(Index i, const PacketType &p) const {
+ pstoret<Scalar, PacketType, AlignmentType>(m_data + i, p);
}
- EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void storePacket(Index i, const Packet &p) const {
- pstoret<Scalar, Packet, AlignmentType>(m_data + i, p);
- }
-
- protected:
+protected:
Scalar *m_data;
};
// Lightweight helper class to access matrix coefficients.
template<typename Scalar, typename Index, int StorageOrder, int AlignmentType = Unaligned>
-class blas_data_mapper {
- public:
- typedef typename packet_traits<Scalar>::type Packet;
- typedef typename packet_traits<Scalar>::half HalfPacket;
-
+class blas_data_mapper
+{
+public:
typedef BlasLinearMapper<Scalar, Index, AlignmentType> LinearMapper;
typedef BlasVectorMapper<Scalar, Index> VectorMapper;
@@ -218,8 +212,9 @@ class blas_data_mapper {
return m_data[StorageOrder==RowMajor ? j + i*m_stride : i + j*m_stride];
}
- EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet loadPacket(Index i, Index j) const {
- return ploadt<Packet, AlignmentType>(&operator()(i, j));
+ template<typename PacketType>
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketType loadPacket(Index i, Index j) const {
+ return ploadt<PacketType, AlignmentType>(&operator()(i, j));
}
template <typename PacketT, int AlignmentT>
@@ -227,10 +222,6 @@ class blas_data_mapper {
return ploadt<PacketT, AlignmentT>(&operator()(i, j));
}
- EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE HalfPacket loadHalfPacket(Index i, Index j) const {
- return ploadt<HalfPacket, AlignmentType>(&operator()(i, j));
- }
-
template<typename SubPacket>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void scatterPacket(Index i, Index j, const SubPacket &p) const {
pscatter<Scalar, SubPacket>(&operator()(i, j), p, m_stride);
@@ -251,7 +242,7 @@ class blas_data_mapper {
return internal::first_default_aligned(m_data, size);
}
- protected:
+protected:
Scalar* EIGEN_RESTRICT m_data;
const Index m_stride;
};
@@ -289,8 +280,8 @@ template<typename XprType> struct blas_traits
ExtractType,
typename _ExtractType::PlainObject
>::type DirectLinearAccessType;
- static inline ExtractType extract(const XprType& x) { return x; }
- static inline const Scalar extractScalarFactor(const XprType&) { return Scalar(1); }
+ static inline EIGEN_DEVICE_FUNC ExtractType extract(const XprType& x) { return x; }
+ static inline EIGEN_DEVICE_FUNC const Scalar extractScalarFactor(const XprType&) { return Scalar(1); }
};
// pop conjugate
@@ -318,8 +309,8 @@ struct blas_traits<CwiseBinaryOp<scalar_product_op<Scalar>, const CwiseNullaryOp
typedef blas_traits<NestedXpr> Base;
typedef CwiseBinaryOp<scalar_product_op<Scalar>, const CwiseNullaryOp<scalar_constant_op<Scalar>,Plain>, NestedXpr> XprType;
typedef typename Base::ExtractType ExtractType;
- static inline ExtractType extract(const XprType& x) { return Base::extract(x.rhs()); }
- static inline Scalar extractScalarFactor(const XprType& x)
+ static inline EIGEN_DEVICE_FUNC ExtractType extract(const XprType& x) { return Base::extract(x.rhs()); }
+ static inline EIGEN_DEVICE_FUNC Scalar extractScalarFactor(const XprType& x)
{ return x.lhs().functor().m_other * Base::extractScalarFactor(x.rhs()); }
};
template<typename Scalar, typename NestedXpr, typename Plain>
diff --git a/Eigen/src/Core/util/ConfigureVectorization.h b/Eigen/src/Core/util/ConfigureVectorization.h
new file mode 100644
index 000000000..e75c7d89e
--- /dev/null
+++ b/Eigen/src/Core/util/ConfigureVectorization.h
@@ -0,0 +1,436 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2018 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CONFIGURE_VECTORIZATION_H
+#define EIGEN_CONFIGURE_VECTORIZATION_H
+
+// FIXME: not sure why this is needed, perhaps it is not needed anymore.
+#ifdef __NVCC__
+ #ifndef EIGEN_DONT_VECTORIZE
+ #define EIGEN_DONT_VECTORIZE
+ #endif
+#endif
+
+//------------------------------------------------------------------------------------------
+// Static and dynamic alignment control
+//
+// The main purpose of this section is to define EIGEN_MAX_ALIGN_BYTES and EIGEN_MAX_STATIC_ALIGN_BYTES
+// as the maximal boundary in bytes on which dynamically and statically allocated data may be alignment respectively.
+// The values of EIGEN_MAX_ALIGN_BYTES and EIGEN_MAX_STATIC_ALIGN_BYTES can be specified by the user. If not,
+// a default value is automatically computed based on architecture, compiler, and OS.
+//
+// This section also defines macros EIGEN_ALIGN_TO_BOUNDARY(N) and the shortcuts EIGEN_ALIGN{8,16,32,_MAX}
+// to be used to declare statically aligned buffers.
+//------------------------------------------------------------------------------------------
+
+
+/* EIGEN_ALIGN_TO_BOUNDARY(n) forces data to be n-byte aligned. This is used to satisfy SIMD requirements.
+ * However, we do that EVEN if vectorization (EIGEN_VECTORIZE) is disabled,
+ * so that vectorization doesn't affect binary compatibility.
+ *
+ * If we made alignment depend on whether or not EIGEN_VECTORIZE is defined, it would be impossible to link
+ * vectorized and non-vectorized code.
+ */
+#if (defined EIGEN_CUDACC)
+ #define EIGEN_ALIGN_TO_BOUNDARY(n) __align__(n)
+ #define EIGEN_ALIGNOF(x) __alignof(x)
+#elif EIGEN_COMP_GNUC || EIGEN_COMP_PGI || EIGEN_COMP_IBM || EIGEN_COMP_ARM
+ #define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n)))
+ #define EIGEN_ALIGNOF(x) __alignof(x)
+#elif EIGEN_COMP_MSVC
+ #define EIGEN_ALIGN_TO_BOUNDARY(n) __declspec(align(n))
+ #define EIGEN_ALIGNOF(x) __alignof(x)
+#elif EIGEN_COMP_SUNCC
+ // FIXME not sure about this one:
+ #define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n)))
+ #define EIGEN_ALIGNOF(x) __alignof(x)
+#else
+ #error Please tell me what is the equivalent of __attribute__((aligned(n))) and __alignof(x) for your compiler
+#endif
+
+// If the user explicitly disable vectorization, then we also disable alignment
+#if defined(EIGEN_DONT_VECTORIZE)
+ #define EIGEN_IDEAL_MAX_ALIGN_BYTES 0
+#elif defined(__AVX512F__)
+ // 64 bytes static alignment is preferred only if really required
+ #define EIGEN_IDEAL_MAX_ALIGN_BYTES 64
+#elif defined(__AVX__)
+ // 32 bytes static alignment is preferred only if really required
+ #define EIGEN_IDEAL_MAX_ALIGN_BYTES 32
+#else
+ #define EIGEN_IDEAL_MAX_ALIGN_BYTES 16
+#endif
+
+
+// EIGEN_MIN_ALIGN_BYTES defines the minimal value for which the notion of explicit alignment makes sense
+#define EIGEN_MIN_ALIGN_BYTES 16
+
+// Defined the boundary (in bytes) on which the data needs to be aligned. Note
+// that unless EIGEN_ALIGN is defined and not equal to 0, the data may not be
+// aligned at all regardless of the value of this #define.
+
+#if (defined(EIGEN_DONT_ALIGN_STATICALLY) || defined(EIGEN_DONT_ALIGN)) && defined(EIGEN_MAX_STATIC_ALIGN_BYTES) && EIGEN_MAX_STATIC_ALIGN_BYTES>0
+#error EIGEN_MAX_STATIC_ALIGN_BYTES and EIGEN_DONT_ALIGN[_STATICALLY] are both defined with EIGEN_MAX_STATIC_ALIGN_BYTES!=0. Use EIGEN_MAX_STATIC_ALIGN_BYTES=0 as a synonym of EIGEN_DONT_ALIGN_STATICALLY.
+#endif
+
+// EIGEN_DONT_ALIGN_STATICALLY and EIGEN_DONT_ALIGN are deprecated
+// They imply EIGEN_MAX_STATIC_ALIGN_BYTES=0
+#if defined(EIGEN_DONT_ALIGN_STATICALLY) || defined(EIGEN_DONT_ALIGN)
+ #ifdef EIGEN_MAX_STATIC_ALIGN_BYTES
+ #undef EIGEN_MAX_STATIC_ALIGN_BYTES
+ #endif
+ #define EIGEN_MAX_STATIC_ALIGN_BYTES 0
+#endif
+
+#ifndef EIGEN_MAX_STATIC_ALIGN_BYTES
+
+ // Try to automatically guess what is the best default value for EIGEN_MAX_STATIC_ALIGN_BYTES
+
+ // 16 byte alignment is only useful for vectorization. Since it affects the ABI, we need to enable
+ // 16 byte alignment on all platforms where vectorization might be enabled. In theory we could always
+ // enable alignment, but it can be a cause of problems on some platforms, so we just disable it in
+ // certain common platform (compiler+architecture combinations) to avoid these problems.
+ // Only static alignment is really problematic (relies on nonstandard compiler extensions),
+ // try to keep heap alignment even when we have to disable static alignment.
+ #if EIGEN_COMP_GNUC && !(EIGEN_ARCH_i386_OR_x86_64 || EIGEN_ARCH_ARM_OR_ARM64 || EIGEN_ARCH_PPC || EIGEN_ARCH_IA64 || EIGEN_ARCH_MIPS)
+ #define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 1
+ #elif EIGEN_ARCH_ARM_OR_ARM64 && EIGEN_COMP_GNUC_STRICT && EIGEN_GNUC_AT_MOST(4, 6)
+ // Old versions of GCC on ARM, at least 4.4, were once seen to have buggy static alignment support.
+ // Not sure which version fixed it, hopefully it doesn't affect 4.7, which is still somewhat in use.
+ // 4.8 and newer seem definitely unaffected.
+ #define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 1
+ #else
+ #define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 0
+ #endif
+
+ // static alignment is completely disabled with GCC 3, Sun Studio, and QCC/QNX
+ #if !EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT \
+ && !EIGEN_GCC3_OR_OLDER \
+ && !EIGEN_COMP_SUNCC \
+ && !EIGEN_OS_QNX
+ #define EIGEN_ARCH_WANTS_STACK_ALIGNMENT 1
+ #else
+ #define EIGEN_ARCH_WANTS_STACK_ALIGNMENT 0
+ #endif
+
+ #if EIGEN_ARCH_WANTS_STACK_ALIGNMENT
+ #define EIGEN_MAX_STATIC_ALIGN_BYTES EIGEN_IDEAL_MAX_ALIGN_BYTES
+ #else
+ #define EIGEN_MAX_STATIC_ALIGN_BYTES 0
+ #endif
+
+#endif
+
+// If EIGEN_MAX_ALIGN_BYTES is defined, then it is considered as an upper bound for EIGEN_MAX_ALIGN_BYTES
+#if defined(EIGEN_MAX_ALIGN_BYTES) && EIGEN_MAX_ALIGN_BYTES<EIGEN_MAX_STATIC_ALIGN_BYTES
+#undef EIGEN_MAX_STATIC_ALIGN_BYTES
+#define EIGEN_MAX_STATIC_ALIGN_BYTES EIGEN_MAX_ALIGN_BYTES
+#endif
+
+#if EIGEN_MAX_STATIC_ALIGN_BYTES==0 && !defined(EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT)
+ #define EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT
+#endif
+
+// At this stage, EIGEN_MAX_STATIC_ALIGN_BYTES>0 is the true test whether we want to align arrays on the stack or not.
+// It takes into account both the user choice to explicitly enable/disable alignment (by setting EIGEN_MAX_STATIC_ALIGN_BYTES)
+// and the architecture config (EIGEN_ARCH_WANTS_STACK_ALIGNMENT).
+// Henceforth, only EIGEN_MAX_STATIC_ALIGN_BYTES should be used.
+
+
+// Shortcuts to EIGEN_ALIGN_TO_BOUNDARY
+#define EIGEN_ALIGN8 EIGEN_ALIGN_TO_BOUNDARY(8)
+#define EIGEN_ALIGN16 EIGEN_ALIGN_TO_BOUNDARY(16)
+#define EIGEN_ALIGN32 EIGEN_ALIGN_TO_BOUNDARY(32)
+#define EIGEN_ALIGN64 EIGEN_ALIGN_TO_BOUNDARY(64)
+#if EIGEN_MAX_STATIC_ALIGN_BYTES>0
+#define EIGEN_ALIGN_MAX EIGEN_ALIGN_TO_BOUNDARY(EIGEN_MAX_STATIC_ALIGN_BYTES)
+#else
+#define EIGEN_ALIGN_MAX
+#endif
+
+
+// Dynamic alignment control
+
+#if defined(EIGEN_DONT_ALIGN) && defined(EIGEN_MAX_ALIGN_BYTES) && EIGEN_MAX_ALIGN_BYTES>0
+#error EIGEN_MAX_ALIGN_BYTES and EIGEN_DONT_ALIGN are both defined with EIGEN_MAX_ALIGN_BYTES!=0. Use EIGEN_MAX_ALIGN_BYTES=0 as a synonym of EIGEN_DONT_ALIGN.
+#endif
+
+#ifdef EIGEN_DONT_ALIGN
+ #ifdef EIGEN_MAX_ALIGN_BYTES
+ #undef EIGEN_MAX_ALIGN_BYTES
+ #endif
+ #define EIGEN_MAX_ALIGN_BYTES 0
+#elif !defined(EIGEN_MAX_ALIGN_BYTES)
+ #define EIGEN_MAX_ALIGN_BYTES EIGEN_IDEAL_MAX_ALIGN_BYTES
+#endif
+
+#if EIGEN_IDEAL_MAX_ALIGN_BYTES > EIGEN_MAX_ALIGN_BYTES
+#define EIGEN_DEFAULT_ALIGN_BYTES EIGEN_IDEAL_MAX_ALIGN_BYTES
+#else
+#define EIGEN_DEFAULT_ALIGN_BYTES EIGEN_MAX_ALIGN_BYTES
+#endif
+
+
+#ifndef EIGEN_UNALIGNED_VECTORIZE
+#define EIGEN_UNALIGNED_VECTORIZE 1
+#endif
+
+//----------------------------------------------------------------------
+
+
+
+// if alignment is disabled, then disable vectorization. Note: EIGEN_MAX_ALIGN_BYTES is the proper check, it takes into
+// account both the user's will (EIGEN_MAX_ALIGN_BYTES,EIGEN_DONT_ALIGN) and our own platform checks
+#if EIGEN_MAX_ALIGN_BYTES==0
+ #ifndef EIGEN_DONT_VECTORIZE
+ #define EIGEN_DONT_VECTORIZE
+ #endif
+#endif
+
+
+// The following (except #include <malloc.h> and _M_IX86_FP ??) can likely be
+// removed as gcc 4.1 and msvc 2008 are not supported anyways.
+#if EIGEN_COMP_MSVC
+ #include <malloc.h> // for _aligned_malloc -- need it regardless of whether vectorization is enabled
+ #if (EIGEN_COMP_MSVC >= 1500) // 2008 or later
+ // a user reported that in 64-bit mode, MSVC doesn't care to define _M_IX86_FP.
+ #if (defined(_M_IX86_FP) && (_M_IX86_FP >= 2)) || EIGEN_ARCH_x86_64
+ #define EIGEN_SSE2_ON_MSVC_2008_OR_LATER
+ #endif
+ #endif
+#else
+ #if (defined __SSE2__) && ( (!EIGEN_COMP_GNUC) || EIGEN_COMP_ICC || EIGEN_GNUC_AT_LEAST(4,2) )
+ #define EIGEN_SSE2_ON_NON_MSVC_BUT_NOT_OLD_GCC
+ #endif
+#endif
+
+
+#ifndef EIGEN_DONT_VECTORIZE
+
+ #if defined (EIGEN_SSE2_ON_NON_MSVC_BUT_NOT_OLD_GCC) || defined(EIGEN_SSE2_ON_MSVC_2008_OR_LATER)
+
+ // Defines symbols for compile-time detection of which instructions are
+ // used.
+ // EIGEN_VECTORIZE_YY is defined if and only if the instruction set YY is used
+ #define EIGEN_VECTORIZE
+ #define EIGEN_VECTORIZE_SSE
+ #define EIGEN_VECTORIZE_SSE2
+
+ // Detect sse3/ssse3/sse4:
+ // gcc and icc defines __SSE3__, ...
+ // there is no way to know about this on msvc. You can define EIGEN_VECTORIZE_SSE* if you
+ // want to force the use of those instructions with msvc.
+ #ifdef __SSE3__
+ #define EIGEN_VECTORIZE_SSE3
+ #endif
+ #ifdef __SSSE3__
+ #define EIGEN_VECTORIZE_SSSE3
+ #endif
+ #ifdef __SSE4_1__
+ #define EIGEN_VECTORIZE_SSE4_1
+ #endif
+ #ifdef __SSE4_2__
+ #define EIGEN_VECTORIZE_SSE4_2
+ #endif
+ #ifdef __AVX__
+ #define EIGEN_VECTORIZE_AVX
+ #define EIGEN_VECTORIZE_SSE3
+ #define EIGEN_VECTORIZE_SSSE3
+ #define EIGEN_VECTORIZE_SSE4_1
+ #define EIGEN_VECTORIZE_SSE4_2
+ #endif
+ #ifdef __AVX2__
+ #define EIGEN_VECTORIZE_AVX2
+ #define EIGEN_VECTORIZE_AVX
+ #define EIGEN_VECTORIZE_SSE3
+ #define EIGEN_VECTORIZE_SSSE3
+ #define EIGEN_VECTORIZE_SSE4_1
+ #define EIGEN_VECTORIZE_SSE4_2
+ #endif
+ #ifdef __FMA__
+ #define EIGEN_VECTORIZE_FMA
+ #endif
+ #if defined(__AVX512F__)
+ #define EIGEN_VECTORIZE_AVX512
+ #define EIGEN_VECTORIZE_AVX2
+ #define EIGEN_VECTORIZE_AVX
+ #define EIGEN_VECTORIZE_FMA
+ #define EIGEN_VECTORIZE_SSE3
+ #define EIGEN_VECTORIZE_SSSE3
+ #define EIGEN_VECTORIZE_SSE4_1
+ #define EIGEN_VECTORIZE_SSE4_2
+ #ifdef __AVX512DQ__
+ #define EIGEN_VECTORIZE_AVX512DQ
+ #endif
+ #ifdef __AVX512ER__
+ #define EIGEN_VECTORIZE_AVX512ER
+ #endif
+ #endif
+
+ // include files
+
+ // This extern "C" works around a MINGW-w64 compilation issue
+ // https://sourceforge.net/tracker/index.php?func=detail&aid=3018394&group_id=202880&atid=983354
+ // In essence, intrin.h is included by windows.h and also declares intrinsics (just as emmintrin.h etc. below do).
+ // However, intrin.h uses an extern "C" declaration, and g++ thus complains of duplicate declarations
+ // with conflicting linkage. The linkage for intrinsics doesn't matter, but at that stage the compiler doesn't know;
+ // so, to avoid compile errors when windows.h is included after Eigen/Core, ensure intrinsics are extern "C" here too.
+ // notice that since these are C headers, the extern "C" is theoretically needed anyways.
+ extern "C" {
+ // In theory we should only include immintrin.h and not the other *mmintrin.h header files directly.
+ // Doing so triggers some issues with ICC. However old gcc versions seems to not have this file, thus:
+ #if EIGEN_COMP_ICC >= 1110
+ #include <immintrin.h>
+ #else
+ #include <mmintrin.h>
+ #include <emmintrin.h>
+ #include <xmmintrin.h>
+ #ifdef EIGEN_VECTORIZE_SSE3
+ #include <pmmintrin.h>
+ #endif
+ #ifdef EIGEN_VECTORIZE_SSSE3
+ #include <tmmintrin.h>
+ #endif
+ #ifdef EIGEN_VECTORIZE_SSE4_1
+ #include <smmintrin.h>
+ #endif
+ #ifdef EIGEN_VECTORIZE_SSE4_2
+ #include <nmmintrin.h>
+ #endif
+ #if defined(EIGEN_VECTORIZE_AVX) || defined(EIGEN_VECTORIZE_AVX512)
+ #include <immintrin.h>
+ #endif
+ #endif
+ } // end extern "C"
+
+ #elif defined __VSX__
+
+ #define EIGEN_VECTORIZE
+ #define EIGEN_VECTORIZE_VSX
+ #include <altivec.h>
+ // We need to #undef all these ugly tokens defined in <altivec.h>
+ // => use __vector instead of vector
+ #undef bool
+ #undef vector
+ #undef pixel
+
+ #elif defined __ALTIVEC__
+
+ #define EIGEN_VECTORIZE
+ #define EIGEN_VECTORIZE_ALTIVEC
+ #include <altivec.h>
+ // We need to #undef all these ugly tokens defined in <altivec.h>
+ // => use __vector instead of vector
+ #undef bool
+ #undef vector
+ #undef pixel
+
+ #elif (defined __ARM_NEON) || (defined __ARM_NEON__)
+
+ #define EIGEN_VECTORIZE
+ #define EIGEN_VECTORIZE_NEON
+ #include <arm_neon.h>
+
+ #elif (defined __s390x__ && defined __VEC__)
+
+ #define EIGEN_VECTORIZE
+ #define EIGEN_VECTORIZE_ZVECTOR
+ #include <vecintrin.h>
+
+ #elif defined __mips_msa
+
+ // Limit MSA optimizations to little-endian CPUs for now.
+ // TODO: Perhaps, eventually support MSA optimizations on big-endian CPUs?
+ #if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
+ #if defined(__LP64__)
+ #define EIGEN_MIPS_64
+ #else
+ #define EIGEN_MIPS_32
+ #endif
+ #define EIGEN_VECTORIZE
+ #define EIGEN_VECTORIZE_MSA
+ #include <msa.h>
+ #endif
+
+ #endif
+#endif
+
+#if defined(__F16C__) && !defined(EIGEN_COMP_CLANG)
+ // We can use the optimized fp16 to float and float to fp16 conversion routines
+ #define EIGEN_HAS_FP16_C
+#endif
+
+#if defined EIGEN_CUDACC
+ #define EIGEN_VECTORIZE_GPU
+ #include <vector_types.h>
+ #if EIGEN_CUDACC_VER >= 70500
+ #define EIGEN_HAS_CUDA_FP16
+ #endif
+#endif
+
+#if defined(EIGEN_HAS_CUDA_FP16)
+ #include <host_defines.h>
+ #include <cuda_fp16.h>
+#endif
+
+#if defined(EIGEN_HIP_DEVICE_COMPILE)
+
+ #define EIGEN_VECTORIZE_GPU
+ #include <hip/hip_vector_types.h>
+
+ #define EIGEN_HAS_HIP_FP16
+ #include <hip/hip_fp16.h>
+
+ #define HIP_PATCH_WITH_NEW_FP16 18215
+ #if (HIP_VERSION_PATCH < HIP_PATCH_WITH_NEW_FP16)
+ #define EIGEN_HAS_OLD_HIP_FP16
+ // Old HIP implementation does not have a explicit typedef for "half2"
+ typedef __half2 half2;
+ #endif
+
+#endif
+
+
+/** \brief Namespace containing all symbols from the %Eigen library. */
+namespace Eigen {
+
+inline static const char *SimdInstructionSetsInUse(void) {
+#if defined(EIGEN_VECTORIZE_AVX512)
+ return "AVX512, FMA, AVX2, AVX, SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2";
+#elif defined(EIGEN_VECTORIZE_AVX)
+ return "AVX SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2";
+#elif defined(EIGEN_VECTORIZE_SSE4_2)
+ return "SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2";
+#elif defined(EIGEN_VECTORIZE_SSE4_1)
+ return "SSE, SSE2, SSE3, SSSE3, SSE4.1";
+#elif defined(EIGEN_VECTORIZE_SSSE3)
+ return "SSE, SSE2, SSE3, SSSE3";
+#elif defined(EIGEN_VECTORIZE_SSE3)
+ return "SSE, SSE2, SSE3";
+#elif defined(EIGEN_VECTORIZE_SSE2)
+ return "SSE, SSE2";
+#elif defined(EIGEN_VECTORIZE_ALTIVEC)
+ return "AltiVec";
+#elif defined(EIGEN_VECTORIZE_VSX)
+ return "VSX";
+#elif defined(EIGEN_VECTORIZE_NEON)
+ return "ARM NEON";
+#elif defined(EIGEN_VECTORIZE_ZVECTOR)
+ return "S390X ZVECTOR";
+#elif defined(EIGEN_VECTORIZE_MSA)
+ return "MIPS MSA";
+#else
+ return "None";
+#endif
+}
+
+} // end namespace Eigen
+
+
+#endif // EIGEN_CONFIGURE_VECTORIZATION_H
diff --git a/Eigen/src/Core/util/Constants.h b/Eigen/src/Core/util/Constants.h
index 612dbf5e8..a5f63a9b5 100644
--- a/Eigen/src/Core/util/Constants.h
+++ b/Eigen/src/Core/util/Constants.h
@@ -470,6 +470,7 @@ namespace Architecture
AltiVec = 0x2,
VSX = 0x3,
NEON = 0x4,
+ MSA = 0x5,
#if defined EIGEN_VECTORIZE_SSE
Target = SSE
#elif defined EIGEN_VECTORIZE_ALTIVEC
@@ -478,6 +479,8 @@ namespace Architecture
Target = VSX
#elif defined EIGEN_VECTORIZE_NEON
Target = NEON
+#elif defined EIGEN_VECTORIZE_MSA
+ Target = MSA
#else
Target = Generic
#endif
diff --git a/Eigen/src/Core/util/DisableStupidWarnings.h b/Eigen/src/Core/util/DisableStupidWarnings.h
index 4431f2fc4..6e93bbc0f 100755
--- a/Eigen/src/Core/util/DisableStupidWarnings.h
+++ b/Eigen/src/Core/util/DisableStupidWarnings.h
@@ -45,16 +45,25 @@
#pragma clang diagnostic ignored "-Wabsolute-value"
#endif
-#elif defined __GNUC__ && __GNUC__>=6
+#elif defined __GNUC__
#ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS
#pragma GCC diagnostic push
#endif
- #pragma GCC diagnostic ignored "-Wignored-attributes"
+ // g++ warns about local variables shadowing member functions, which is too strict
+ #pragma GCC diagnostic ignored "-Wshadow"
+ #if __GNUC__ == 4 && __GNUC_MINOR__ < 8
+ // Until g++-4.7 there are warnings when comparing unsigned int vs 0, even in templated functions:
+ #pragma GCC diagnostic ignored "-Wtype-limits"
+ #endif
+ #if __GNUC__>=6
+ #pragma GCC diagnostic ignored "-Wignored-attributes"
+ #endif
#endif
#if defined __NVCC__
+ #pragma diag_suppress boolean_controlling_expr_is_constant
// Disable the "statement is unreachable" message
#pragma diag_suppress code_is_unreachable
// Disable the "dynamic initialization in unreachable code" message
@@ -72,6 +81,15 @@
#pragma diag_suppress 2671
#pragma diag_suppress 2735
#pragma diag_suppress 2737
+ #pragma diag_suppress 2739
#endif
+#else
+// warnings already disabled:
+# ifndef EIGEN_WARNINGS_DISABLED_2
+# define EIGEN_WARNINGS_DISABLED_2
+# elif defined(EIGEN_INTERNAL_DEBUGGING)
+# error "Do not include \"DisableStupidWarnings.h\" recursively more than twice!"
+# endif
+
#endif // not EIGEN_WARNINGS_DISABLED
diff --git a/Eigen/src/Core/util/IndexedViewHelper.h b/Eigen/src/Core/util/IndexedViewHelper.h
index ab01c857f..40e16fdb4 100644
--- a/Eigen/src/Core/util/IndexedViewHelper.h
+++ b/Eigen/src/Core/util/IndexedViewHelper.h
@@ -13,13 +13,6 @@
namespace Eigen {
-/** \namespace Eigen::placeholders
- * \ingroup Core_Module
- *
- * Namespace containing symbolic placeholder and identifiers
- */
-namespace placeholders {
-
namespace internal {
struct symbolic_last_tag {};
}
@@ -35,36 +28,35 @@ struct symbolic_last_tag {};
* A typical usage example would be:
* \code
* using namespace Eigen;
- * using Eigen::placeholders::last;
+ * using Eigen::last;
* VectorXd v(n);
* v(seq(2,last-2)).setOnes();
* \endcode
*
* \sa end
*/
-static const Symbolic::SymbolExpr<internal::symbolic_last_tag> last;
+static const symbolic::SymbolExpr<internal::symbolic_last_tag> last; // PLEASE use Eigen::last instead of Eigen::placeholders::last
-/** \var end
+/** \var lastp1
* \ingroup Core_Module
*
- * Can be used as a parameter to Eigen::seq and Eigen::seqN functions to symbolically reference the last+1 element/row/columns
- * of the underlying vector or matrix once passed to DenseBase::operator()(const RowIndices&, const ColIndices&).
+ * Can be used as a parameter to Eigen::seq and Eigen::seqN functions to symbolically
+ * reference the last+1 element/row/columns of the underlying vector or matrix once
+ * passed to DenseBase::operator()(const RowIndices&, const ColIndices&).
*
* This symbolic placeholder support standard arithmetic operation.
- * It is essentially an alias to last+1
+ * It is essentially an alias to last+fix<1>.
*
* \sa last
*/
#ifdef EIGEN_PARSED_BY_DOXYGEN
-static const auto end = last+1;
+static const auto lastp1 = last+fix<1>;
#else
// Using a FixedExpr<1> expression is important here to make sure the compiler
// can fully optimize the computation starting indices with zero overhead.
-static const Symbolic::AddExpr<Symbolic::SymbolExpr<internal::symbolic_last_tag>,Symbolic::ValueExpr<Eigen::internal::FixedInt<1> > > end(last+fix<1>());
+static const symbolic::AddExpr<symbolic::SymbolExpr<internal::symbolic_last_tag>,symbolic::ValueExpr<Eigen::internal::FixedInt<1> > > lastp1(last+fix<1>());
#endif
-} // end namespace placeholders
-
namespace internal {
// Replace symbolic last/end "keywords" by their true runtime value
@@ -74,9 +66,9 @@ template<int N>
FixedInt<N> eval_expr_given_size(FixedInt<N> x, Index /*size*/) { return x; }
template<typename Derived>
-Index eval_expr_given_size(const Symbolic::BaseExpr<Derived> &x, Index size)
+Index eval_expr_given_size(const symbolic::BaseExpr<Derived> &x, Index size)
{
- return x.derived().eval(placeholders::last=size-1);
+ return x.derived().eval(last=size-1);
}
// Extract increment/step at compile time
@@ -117,7 +109,7 @@ template<> struct get_compile_time_incr<SingleRange> {
enum { value = 1 }; // 1 or 0 ??
};
-// Turn a single index into something that looks like an array (i.e., that exposes a .size(), and operatro[](int) methods)
+// Turn a single index into something that looks like an array (i.e., that exposes a .size(), and operator[](int) methods)
template<typename T, int XprSize>
struct IndexedViewCompatibleType<T,XprSize,typename internal::enable_if<internal::is_integral<T>::value>::type> {
// Here we could simply use Array, but maybe it's less work for the compiler to use
@@ -127,13 +119,13 @@ struct IndexedViewCompatibleType<T,XprSize,typename internal::enable_if<internal
};
template<typename T, int XprSize>
-struct IndexedViewCompatibleType<T, XprSize, typename enable_if<Symbolic::is_symbolic<T>::value>::type> {
+struct IndexedViewCompatibleType<T, XprSize, typename enable_if<symbolic::is_symbolic<T>::value>::type> {
typedef SingleRange type;
};
template<typename T>
-typename enable_if<Symbolic::is_symbolic<T>::value,SingleRange>::type
+typename enable_if<symbolic::is_symbolic<T>::value,SingleRange>::type
makeIndexedViewCompatible(const T& id, Index size, SpecializedType) {
return eval_expr_given_size(id,size);
}
@@ -172,14 +164,21 @@ template<int Size> struct get_compile_time_incr<AllRange<Size> > {
} // end namespace internal
-namespace placeholders {
-
/** \var all
* \ingroup Core_Module
* Can be used as a parameter to DenseBase::operator()(const RowIndices&, const ColIndices&) to index all rows or columns
*/
-static const Eigen::internal::all_t all;
+static const Eigen::internal::all_t all; // PLEASE use Eigen::all instead of Eigen::placeholders::all
+
+
+namespace placeholders {
+ typedef symbolic::SymbolExpr<internal::symbolic_last_tag> last_t;
+ typedef symbolic::AddExpr<symbolic::SymbolExpr<internal::symbolic_last_tag>,symbolic::ValueExpr<Eigen::internal::FixedInt<1> > > end_t;
+ typedef Eigen::internal::all_t all_t;
+ EIGEN_DEPRECATED static const all_t all = Eigen::all; // PLEASE use Eigen::all instead of Eigen::placeholders::all
+ EIGEN_DEPRECATED static const last_t last = Eigen::last; // PLEASE use Eigen::last instead of Eigen::placeholders::last
+ EIGEN_DEPRECATED static const end_t end = Eigen::lastp1; // PLEASE use Eigen::lastp1 instead of Eigen::placeholders::end
}
} // end namespace Eigen
diff --git a/Eigen/src/Core/util/IntegralConstant.h b/Eigen/src/Core/util/IntegralConstant.h
index c7d3b1c06..bf99cd8ab 100644
--- a/Eigen/src/Core/util/IntegralConstant.h
+++ b/Eigen/src/Core/util/IntegralConstant.h
@@ -151,9 +151,9 @@ struct get_fixed_value<variable_if_dynamic<T,N>,Default> {
static const int value = N;
};
-template<typename T> Index get_runtime_value(const T &x) { return x; }
+template<typename T> EIGEN_DEVICE_FUNC Index get_runtime_value(const T &x) { return x; }
#if !EIGEN_HAS_CXX14
-template<int N> Index get_runtime_value(FixedInt<N> (*)()) { return N; }
+template<int N> EIGEN_DEVICE_FUNC Index get_runtime_value(FixedInt<N> (*)()) { return N; }
#endif
// Cleanup integer/FixedInt/VariableAndFixedInt/etc types:
diff --git a/Eigen/src/Core/util/MKL_support.h b/Eigen/src/Core/util/MKL_support.h
index 26b59669e..17963fad4 100755
--- a/Eigen/src/Core/util/MKL_support.h
+++ b/Eigen/src/Core/util/MKL_support.h
@@ -49,12 +49,17 @@
#define EIGEN_USE_LAPACKE
#endif
-#if defined(EIGEN_USE_MKL_VML)
+#if defined(EIGEN_USE_MKL_VML) && !defined(EIGEN_USE_MKL)
#define EIGEN_USE_MKL
#endif
+
#if defined EIGEN_USE_MKL
-# include <mkl.h>
+# if (!defined MKL_DIRECT_CALL) && (!defined EIGEN_MKL_NO_DIRECT_CALL)
+# define MKL_DIRECT_CALL
+# define MKL_DIRECT_CALL_JUST_SET
+# endif
+# include <mkl.h>
/*Check IMKL version for compatibility: < 10.3 is not usable with Eigen*/
# ifndef INTEL_MKL_VERSION
# undef EIGEN_USE_MKL /* INTEL_MKL_VERSION is not even defined on older versions */
@@ -68,6 +73,9 @@
# undef EIGEN_USE_MKL_VML
# undef EIGEN_USE_LAPACKE_STRICT
# undef EIGEN_USE_LAPACKE
+# ifdef MKL_DIRECT_CALL_JUST_SET
+# undef MKL_DIRECT_CALL
+# endif
# endif
#endif
@@ -108,6 +116,10 @@
#endif
#endif
+#if defined(EIGEN_USE_BLAS) && !defined(EIGEN_USE_MKL)
+#include "../../misc/blas.h"
+#endif
+
namespace Eigen {
typedef std::complex<double> dcomplex;
@@ -121,8 +133,5 @@ typedef int BlasIndex;
} // end namespace Eigen
-#if defined(EIGEN_USE_BLAS)
-#include "../../misc/blas.h"
-#endif
#endif // EIGEN_MKL_SUPPORT_H
diff --git a/Eigen/src/Core/util/Macros.h b/Eigen/src/Core/util/Macros.h
index 29c796647..3af6c4e37 100644
--- a/Eigen/src/Core/util/Macros.h
+++ b/Eigen/src/Core/util/Macros.h
@@ -11,6 +11,10 @@
#ifndef EIGEN_MACROS_H
#define EIGEN_MACROS_H
+//------------------------------------------------------------------------------------------
+// Eigen version and basic defaults
+//------------------------------------------------------------------------------------------
+
#define EIGEN_WORLD_VERSION 3
#define EIGEN_MAJOR_VERSION 3
#define EIGEN_MINOR_VERSION 90
@@ -19,7 +23,40 @@
(EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \
EIGEN_MINOR_VERSION>=z))))
+#ifdef EIGEN_DEFAULT_TO_ROW_MAJOR
+#define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION Eigen::RowMajor
+#else
+#define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION Eigen::ColMajor
+#endif
+
+#ifndef EIGEN_DEFAULT_DENSE_INDEX_TYPE
+#define EIGEN_DEFAULT_DENSE_INDEX_TYPE std::ptrdiff_t
+#endif
+
+// Upperbound on the C++ version to use.
+// Expected values are 03, 11, 14, 17, etc.
+// By default, let's use an arbitrarily large C++ version.
+#ifndef EIGEN_MAX_CPP_VER
+#define EIGEN_MAX_CPP_VER 99
+#endif
+
+/** Allows to disable some optimizations which might affect the accuracy of the result.
+ * Such optimization are enabled by default, and set EIGEN_FAST_MATH to 0 to disable them.
+ * They currently include:
+ * - single precision ArrayBase::sin() and ArrayBase::cos() for SSE and AVX vectorization.
+ */
+#ifndef EIGEN_FAST_MATH
+#define EIGEN_FAST_MATH 1
+#endif
+
+#ifndef EIGEN_STACK_ALLOCATION_LIMIT
+// 131072 == 128 KB
+#define EIGEN_STACK_ALLOCATION_LIMIT 131072
+#endif
+
+//------------------------------------------------------------------------------------------
// Compiler identification, EIGEN_COMP_*
+//------------------------------------------------------------------------------------------
/// \internal EIGEN_COMP_GNUC set to 1 for all compilers compatible with GCC
#ifdef __GNUC__
@@ -73,12 +110,17 @@
// For the record, here is a table summarizing the possible values for EIGEN_COMP_MSVC:
// name ver MSC_VER
-// 2008 9 1500
-// 2010 10 1600
-// 2012 11 1700
-// 2013 12 1800
-// 2015 14 1900
-// "15" 15 1900
+// 2008 9 1500
+// 2010 10 1600
+// 2012 11 1700
+// 2013 12 1800
+// 2015 14 1900
+// "15" 15 1900
+// 2017-14.1 15.0 1910
+// 2017-14.11 15.3 1911
+// 2017-14.12 15.5 1912
+// 2017-14.13 15.6 1913
+// 2017-14.14 15.7 1914
/// \internal EIGEN_COMP_MSVC_STRICT set to 1 if the compiler is really Microsoft Visual C++ and not ,e.g., ICC or clang-cl
#if EIGEN_COMP_MSVC && !(EIGEN_COMP_ICC || EIGEN_COMP_LLVM || EIGEN_COMP_CLANG)
@@ -142,7 +184,11 @@
#endif
+
+//------------------------------------------------------------------------------------------
// Architecture identification, EIGEN_ARCH_*
+//------------------------------------------------------------------------------------------
+
#if defined(__x86_64__) || defined(_M_X64) || defined(__amd64)
#define EIGEN_ARCH_x86_64 1
@@ -212,7 +258,9 @@
+//------------------------------------------------------------------------------------------
// Operating system identification, EIGEN_OS_*
+//------------------------------------------------------------------------------------------
/// \internal EIGEN_OS_UNIX set to 1 if the OS is a unix variant
#if defined(__unix__) || defined(__unix)
@@ -314,26 +362,114 @@
#endif
+//------------------------------------------------------------------------------------------
+// Detect GPU compilers and architectures
+//------------------------------------------------------------------------------------------
-#if EIGEN_GNUC_AT_MOST(4,3) && !EIGEN_COMP_CLANG
- // see bug 89
- #define EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO 0
-#else
- #define EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO 1
+// NVCC is not supported as the target platform for HIPCC
+// Note that this also makes EIGEN_CUDACC and EIGEN_HIPCC mutually exclusive
+#if defined(__NVCC__) && defined(__HIPCC__)
+ #error "NVCC as the target platform for HIPCC is currently not supported."
#endif
-// This macro can be used to prevent from macro expansion, e.g.:
-// std::max EIGEN_NOT_A_MACRO(a,b)
-#define EIGEN_NOT_A_MACRO
+#if defined(__CUDACC__) && !defined(EIGEN_NO_CUDA)
+ // Means the compiler is either nvcc or clang with CUDA enabled
+ #define EIGEN_CUDACC __CUDACC__
+#endif
-#ifdef EIGEN_DEFAULT_TO_ROW_MAJOR
-#define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION Eigen::RowMajor
+#if defined(__CUDA_ARCH__) && !defined(EIGEN_NO_CUDA)
+ // Means we are generating code for the device
+ #define EIGEN_CUDA_ARCH __CUDA_ARCH__
+#endif
+
+// Starting with CUDA 9 the composite __CUDACC_VER__ is not available.
+#if defined(__CUDACC_VER_MAJOR__) && (__CUDACC_VER_MAJOR__ >= 9)
+ #define EIGEN_CUDACC_VER ((__CUDACC_VER_MAJOR__ * 10000) + (__CUDACC_VER_MINOR__ * 100))
+#elif defined(__CUDACC_VER__)
+ #define EIGEN_CUDACC_VER __CUDACC_VER__
#else
-#define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION Eigen::ColMajor
+ #define EIGEN_CUDACC_VER 0
#endif
-#ifndef EIGEN_DEFAULT_DENSE_INDEX_TYPE
-#define EIGEN_DEFAULT_DENSE_INDEX_TYPE std::ptrdiff_t
+#if defined(__HIPCC__) && !defined(EIGEN_NO_HIP)
+ // Means the compiler is HIPCC (analogous to EIGEN_CUDACC, but for HIP)
+ #define EIGEN_HIPCC __HIPCC__
+
+ // We need to include hip_runtime.h here because it pulls in
+ // ++ hip_common.h which contains the define for __HIP_DEVICE_COMPILE__
+ // ++ host_defines.h which contains the defines for the __host__ and __device__ macros
+ #include <hip/hip_runtime.h>
+
+ #if defined(__HIP_DEVICE_COMPILE__)
+ // analogous to EIGEN_CUDA_ARCH, but for HIP
+ #define EIGEN_HIP_DEVICE_COMPILE __HIP_DEVICE_COMPILE__
+ #endif
+#endif
+
+// Unify CUDA/HIPCC
+
+#if defined(EIGEN_CUDACC) || defined(EIGEN_HIPCC)
+//
+// If either EIGEN_CUDACC or EIGEN_HIPCC is defined, then define EIGEN_GPUCC
+//
+#define EIGEN_GPUCC
+//
+// EIGEN_HIPCC implies the HIP compiler and is used to tweak Eigen code for use in HIP kernels
+// EIGEN_CUDACC implies the CUDA compiler and is used to tweak Eigen code for use in CUDA kernels
+//
+// In most cases the same tweaks are required to the Eigen code to enable in both the HIP and CUDA kernels.
+// For those cases, the corresponding code should be guarded with
+// #if defined(EIGEN_GPUCC)
+// instead of
+// #if defined(EIGEN_CUDACC) || defined(EIGEN_HIPCC)
+//
+// For cases where the tweak is specific to HIP, the code should be guarded with
+// #if defined(EIGEN_HIPCC)
+//
+// For cases where the tweak is specific to CUDA, the code should be guarded with
+// #if defined(EIGEN_CUDACC)
+//
+#endif
+
+#if defined(EIGEN_CUDA_ARCH) || defined(EIGEN_HIP_DEVICE_COMPILE)
+//
+// If either EIGEN_CUDA_ARCH or EIGEN_HIP_DEVICE_COMPILE is defined, then define EIGEN_GPU_COMPILE_PHASE
+//
+#define EIGEN_GPU_COMPILE_PHASE
+//
+// GPU compilers (HIPCC, NVCC) typically do two passes over the source code,
+// + one to compile the source for the "host" (ie CPU)
+// + another to compile the source for the "device" (ie. GPU)
+//
+// Code that needs to enabled only during the either the "host" or "device" compilation phase
+// needs to be guarded with a macro that indicates the current compilation phase
+//
+// EIGEN_HIP_DEVICE_COMPILE implies the device compilation phase in HIP
+// EIGEN_CUDA_ARCH implies the device compilation phase in CUDA
+//
+// In most cases, the "host" / "device" specific code is the same for both HIP and CUDA
+// For those cases, the code should be guarded with
+// #if defined(EIGEN_GPU_COMPILE_PHASE)
+// instead of
+// #if defined(EIGEN_CUDA_ARCH) || defined(EIGEN_HIP_DEVICE_COMPILE)
+//
+// For cases where the tweak is specific to HIP, the code should be guarded with
+// #if defined(EIGEN_HIP_DEVICE_COMPILE)
+//
+// For cases where the tweak is specific to CUDA, the code should be guarded with
+// #if defined(EIGEN_CUDA_ARCH)
+//
+#endif
+
+//------------------------------------------------------------------------------------------
+// Detect Compiler/Architecture/OS specific features
+//------------------------------------------------------------------------------------------
+
+#if EIGEN_GNUC_AT_MOST(4,3) && !EIGEN_COMP_CLANG
+ // see bug 89
+ #define EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO 0
+#else
+ #define EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO 1
#endif
// Cross compiler wrapper around LLVM's __has_builtin
@@ -357,13 +493,6 @@
#define EIGEN_HAS_STATIC_ARRAY_TEMPLATE 0
#endif
-// Upperbound on the C++ version to use.
-// Expected values are 03, 11, 14, 17, etc.
-// By default, let's use an arbitrarily large C++ version.
-#ifndef EIGEN_MAX_CPP_VER
-#define EIGEN_MAX_CPP_VER 99
-#endif
-
#if EIGEN_MAX_CPP_VER>=11 && (defined(__cplusplus) && (__cplusplus >= 201103L) || EIGEN_COMP_MSVC >= 1900)
#define EIGEN_HAS_CXX11 1
#else
@@ -389,6 +518,8 @@
#endif
// Does the compiler support C99?
+// Need to include <cmath> to make sure _GLIBCXX_USE_C99 gets defined
+#include <cmath>
#ifndef EIGEN_HAS_C99_MATH
#if EIGEN_MAX_CPP_VER>=11 && \
((defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901)) \
@@ -402,18 +533,35 @@
#endif
// Does the compiler support result_of?
+// It's likely that MSVC 2013 supports result_of but I couldn't not find a good source for that,
+// so let's be conservative.
#ifndef EIGEN_HAS_STD_RESULT_OF
-#if EIGEN_MAX_CPP_VER>=11 && ((__has_feature(cxx_lambdas) || (defined(__cplusplus) && __cplusplus >= 201103L)))
+#if EIGEN_MAX_CPP_VER>=11 && \
+ (__has_feature(cxx_lambdas) || (defined(__cplusplus) && __cplusplus >= 201103L) || EIGEN_COMP_MSVC >= 1900)
#define EIGEN_HAS_STD_RESULT_OF 1
#else
#define EIGEN_HAS_STD_RESULT_OF 0
#endif
#endif
+// Does the compiler support type_traits?
+// - full support of type traits was added only to GCC 5.1.0.
+// - 20150626 corresponds to the last release of 4.x libstdc++
+#ifndef EIGEN_HAS_TYPE_TRAITS
+#if EIGEN_MAX_CPP_VER>=11 && (EIGEN_HAS_CXX11 || EIGEN_COMP_MSVC >= 1700) \
+ && ((!EIGEN_COMP_GNUC_STRICT) || EIGEN_GNUC_AT_LEAST(5, 1)) \
+ && ((!defined(__GLIBCXX__)) || __GLIBCXX__ > 20150626)
+#define EIGEN_HAS_TYPE_TRAITS 1
+#define EIGEN_INCLUDE_TYPE_TRAITS
+#else
+#define EIGEN_HAS_TYPE_TRAITS 0
+#endif
+#endif
+
// Does the compiler support variadic templates?
#ifndef EIGEN_HAS_VARIADIC_TEMPLATES
#if EIGEN_MAX_CPP_VER>=11 && (__cplusplus > 199711L || EIGEN_COMP_MSVC >= 1900) \
- && (!defined(__NVCC__) || !EIGEN_ARCH_ARM_OR_ARM64 || (defined __CUDACC_VER__ && __CUDACC_VER__ >= 80000) )
+ && (!defined(__NVCC__) || !EIGEN_ARCH_ARM_OR_ARM64 || (EIGEN_CUDACC_VER >= 80000) )
// ^^ Disable the use of variadic templates when compiling with versions of nvcc older than 8.0 on ARM devices:
// this prevents nvcc from crashing when compiling Eigen on Tegra X1
#define EIGEN_HAS_VARIADIC_TEMPLATES 1
@@ -426,23 +574,22 @@
// Does the compiler fully support const expressions? (as in c++14)
#ifndef EIGEN_HAS_CONSTEXPR
+ #if defined(EIGEN_CUDACC)
+ // Const expressions are supported provided that c++11 is enabled and we're using either clang or nvcc 7.5 or above
+ #if EIGEN_MAX_CPP_VER>=14 && (__cplusplus > 199711L && (EIGEN_COMP_CLANG || EIGEN_CUDACC_VER >= 70500))
+ #define EIGEN_HAS_CONSTEXPR 1
+ #endif
+ #elif EIGEN_MAX_CPP_VER>=14 && (__has_feature(cxx_relaxed_constexpr) || (defined(__cplusplus) && __cplusplus >= 201402L) || \
+ (EIGEN_GNUC_AT_LEAST(4,8) && (__cplusplus > 199711L)) || \
+ (EIGEN_COMP_CLANG >= 306 && (__cplusplus > 199711L)))
+ #define EIGEN_HAS_CONSTEXPR 1
+ #endif
-#if defined(__CUDACC__)
-// Const expressions are supported provided that c++11 is enabled and we're using either clang or nvcc 7.5 or above
-#if EIGEN_MAX_CPP_VER>=14 && (__cplusplus > 199711L && defined(__CUDACC_VER__) && (EIGEN_COMP_CLANG || __CUDACC_VER__ >= 70500))
- #define EIGEN_HAS_CONSTEXPR 1
-#endif
-#elif EIGEN_MAX_CPP_VER>=14 && (__has_feature(cxx_relaxed_constexpr) || (defined(__cplusplus) && __cplusplus >= 201402L) || \
- (EIGEN_GNUC_AT_LEAST(4,8) && (__cplusplus > 199711L)) || \
- (EIGEN_COMP_CLANG >= 306 && (__cplusplus > 199711L)))
-#define EIGEN_HAS_CONSTEXPR 1
-#endif
-
-#ifndef EIGEN_HAS_CONSTEXPR
-#define EIGEN_HAS_CONSTEXPR 0
-#endif
+ #ifndef EIGEN_HAS_CONSTEXPR
+ #define EIGEN_HAS_CONSTEXPR 0
+ #endif
-#endif
+#endif // EIGEN_HAS_CONSTEXPR
// Does the compiler support C++11 math?
// Let's be conservative and enable the default C++11 implementation only if we are sure it exists
@@ -480,15 +627,42 @@
#endif
#endif
-/** Allows to disable some optimizations which might affect the accuracy of the result.
- * Such optimization are enabled by default, and set EIGEN_FAST_MATH to 0 to disable them.
- * They currently include:
- * - single precision ArrayBase::sin() and ArrayBase::cos() for SSE and AVX vectorization.
- */
-#ifndef EIGEN_FAST_MATH
-#define EIGEN_FAST_MATH 1
+#ifndef EIGEN_HAS_CXX11_ATOMIC
+ #if EIGEN_MAX_CPP_VER>=11 && \
+ (__has_feature(cxx_atomic) \
+ || (__cplusplus > 201103L) \
+ || ((__cplusplus >= 201103L) && (EIGEN_COMP_MSVC==0 || EIGEN_COMP_MSVC >= 1700)))
+ #define EIGEN_HAS_CXX11_ATOMIC 1
+ #else
+ #define EIGEN_HAS_CXX11_ATOMIC 0
+ #endif
+#endif
+
+#if defined(EIGEN_CUDACC) && EIGEN_HAS_CONSTEXPR
+ // While available already with c++11, this is useful mostly starting with c++14 and relaxed constexpr rules
+ #if defined(__NVCC__)
+ // nvcc considers constexpr functions as __host__ __device__ with the option --expt-relaxed-constexpr
+ #ifdef __CUDACC_RELAXED_CONSTEXPR__
+ #define EIGEN_CONSTEXPR_ARE_DEVICE_FUNC
+ #endif
+ // See bug 1580: clang/CUDA fails to make the following calls
+ // to constexpr bool std::equal_to::operator() even when
+ // EIGEN_CONSTEXPR_ARE_DEVICE_FUNC is defined in c++14 only.
+ // #elif defined(__clang__) && defined(__CUDA__) && EIGEN_HAS_CONSTEXPR == 1
+ // // clang++ always considers constexpr functions as implicitly __host__ __device__
+ // #define EIGEN_CONSTEXPR_ARE_DEVICE_FUNC
+ #endif
#endif
+
+//------------------------------------------------------------------------------------------
+// Preprocessor programming helpers
+//------------------------------------------------------------------------------------------
+
+// This macro can be used to prevent from macro expansion, e.g.:
+// std::max EIGEN_NOT_A_MACRO(a,b)
+#define EIGEN_NOT_A_MACRO
+
#define EIGEN_DEBUG_VAR(x) std::cerr << #x << " = " << x << std::endl;
// concatenate two tokens
@@ -504,11 +678,13 @@
// EIGEN_STRONG_INLINE is a stronger version of the inline, using __forceinline on MSVC,
// but it still doesn't use GCC's always_inline. This is useful in (common) situations where MSVC needs forceinline
// but GCC is still doing fine with just inline.
+#ifndef EIGEN_STRONG_INLINE
#if EIGEN_COMP_MSVC || EIGEN_COMP_ICC
#define EIGEN_STRONG_INLINE __forceinline
#else
#define EIGEN_STRONG_INLINE inline
#endif
+#endif
// EIGEN_ALWAYS_INLINE is the stronget, it has the effect of making the function inline and adding every possible
// attribute to maximize inlining. This should only be used when really necessary: in particular,
@@ -538,12 +714,42 @@
#define EIGEN_PERMISSIVE_EXPR
#endif
+// GPU stuff
+
+// Disable some features when compiling with GPU compilers (NVCC/clang-cuda/SYCL/HIPCC)
+#if defined(EIGEN_CUDACC) || defined(__SYCL_DEVICE_ONLY__) || defined(EIGEN_HIPCC)
+ // Do not try asserts on device code
+ #ifndef EIGEN_NO_DEBUG
+ #define EIGEN_NO_DEBUG
+ #endif
+
+ #ifdef EIGEN_INTERNAL_DEBUGGING
+ #undef EIGEN_INTERNAL_DEBUGGING
+ #endif
+
+ #ifdef EIGEN_EXCEPTIONS
+ #undef EIGEN_EXCEPTIONS
+ #endif
+#endif
+
+// All functions callable from CUDA/HIP code must be qualified with __device__
+#ifdef EIGEN_GPUCC
+ #ifndef EIGEN_DONT_VECTORIZE
+ #define EIGEN_DONT_VECTORIZE
+ #endif
+
+ #define EIGEN_DEVICE_FUNC __host__ __device__
+#else
+ #define EIGEN_DEVICE_FUNC
+#endif
+
+
// this macro allows to get rid of linking errors about multiply defined functions.
// - static is not very good because it prevents definitions from different object files to be merged.
// So static causes the resulting linked executable to be bloated with multiple copies of the same function.
// - inline is not perfect either as it unwantedly hints the compiler toward inlining the function.
-#define EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
-#define EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS inline
+#define EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_DEVICE_FUNC
+#define EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_DEVICE_FUNC inline
#ifdef NDEBUG
# ifndef EIGEN_NO_DEBUG
@@ -649,169 +855,6 @@ namespace Eigen {
# define EIGEN_CONST_CONDITIONAL(cond) cond
#endif
-//------------------------------------------------------------------------------------------
-// Static and dynamic alignment control
-//
-// The main purpose of this section is to define EIGEN_MAX_ALIGN_BYTES and EIGEN_MAX_STATIC_ALIGN_BYTES
-// as the maximal boundary in bytes on which dynamically and statically allocated data may be alignment respectively.
-// The values of EIGEN_MAX_ALIGN_BYTES and EIGEN_MAX_STATIC_ALIGN_BYTES can be specified by the user. If not,
-// a default value is automatically computed based on architecture, compiler, and OS.
-//
-// This section also defines macros EIGEN_ALIGN_TO_BOUNDARY(N) and the shortcuts EIGEN_ALIGN{8,16,32,_MAX}
-// to be used to declare statically aligned buffers.
-//------------------------------------------------------------------------------------------
-
-
-/* EIGEN_ALIGN_TO_BOUNDARY(n) forces data to be n-byte aligned. This is used to satisfy SIMD requirements.
- * However, we do that EVEN if vectorization (EIGEN_VECTORIZE) is disabled,
- * so that vectorization doesn't affect binary compatibility.
- *
- * If we made alignment depend on whether or not EIGEN_VECTORIZE is defined, it would be impossible to link
- * vectorized and non-vectorized code.
- */
-#if (defined __CUDACC__)
- #define EIGEN_ALIGN_TO_BOUNDARY(n) __align__(n)
-#elif EIGEN_COMP_GNUC || EIGEN_COMP_PGI || EIGEN_COMP_IBM || EIGEN_COMP_ARM
- #define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n)))
-#elif EIGEN_COMP_MSVC
- #define EIGEN_ALIGN_TO_BOUNDARY(n) __declspec(align(n))
-#elif EIGEN_COMP_SUNCC
- // FIXME not sure about this one:
- #define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n)))
-#else
- #error Please tell me what is the equivalent of __attribute__((aligned(n))) for your compiler
-#endif
-
-// If the user explicitly disable vectorization, then we also disable alignment
-#if defined(EIGEN_DONT_VECTORIZE)
- #define EIGEN_IDEAL_MAX_ALIGN_BYTES 0
-#elif defined(EIGEN_VECTORIZE_AVX512)
- // 64 bytes static alignmeent is preferred only if really required
- #define EIGEN_IDEAL_MAX_ALIGN_BYTES 64
-#elif defined(__AVX__)
- // 32 bytes static alignmeent is preferred only if really required
- #define EIGEN_IDEAL_MAX_ALIGN_BYTES 32
-#else
- #define EIGEN_IDEAL_MAX_ALIGN_BYTES 16
-#endif
-
-
-// EIGEN_MIN_ALIGN_BYTES defines the minimal value for which the notion of explicit alignment makes sense
-#define EIGEN_MIN_ALIGN_BYTES 16
-
-// Defined the boundary (in bytes) on which the data needs to be aligned. Note
-// that unless EIGEN_ALIGN is defined and not equal to 0, the data may not be
-// aligned at all regardless of the value of this #define.
-
-#if (defined(EIGEN_DONT_ALIGN_STATICALLY) || defined(EIGEN_DONT_ALIGN)) && defined(EIGEN_MAX_STATIC_ALIGN_BYTES) && EIGEN_MAX_STATIC_ALIGN_BYTES>0
-#error EIGEN_MAX_STATIC_ALIGN_BYTES and EIGEN_DONT_ALIGN[_STATICALLY] are both defined with EIGEN_MAX_STATIC_ALIGN_BYTES!=0. Use EIGEN_MAX_STATIC_ALIGN_BYTES=0 as a synonym of EIGEN_DONT_ALIGN_STATICALLY.
-#endif
-
-// EIGEN_DONT_ALIGN_STATICALLY and EIGEN_DONT_ALIGN are deprectated
-// They imply EIGEN_MAX_STATIC_ALIGN_BYTES=0
-#if defined(EIGEN_DONT_ALIGN_STATICALLY) || defined(EIGEN_DONT_ALIGN)
- #ifdef EIGEN_MAX_STATIC_ALIGN_BYTES
- #undef EIGEN_MAX_STATIC_ALIGN_BYTES
- #endif
- #define EIGEN_MAX_STATIC_ALIGN_BYTES 0
-#endif
-
-#ifndef EIGEN_MAX_STATIC_ALIGN_BYTES
-
- // Try to automatically guess what is the best default value for EIGEN_MAX_STATIC_ALIGN_BYTES
-
- // 16 byte alignment is only useful for vectorization. Since it affects the ABI, we need to enable
- // 16 byte alignment on all platforms where vectorization might be enabled. In theory we could always
- // enable alignment, but it can be a cause of problems on some platforms, so we just disable it in
- // certain common platform (compiler+architecture combinations) to avoid these problems.
- // Only static alignment is really problematic (relies on nonstandard compiler extensions),
- // try to keep heap alignment even when we have to disable static alignment.
- #if EIGEN_COMP_GNUC && !(EIGEN_ARCH_i386_OR_x86_64 || EIGEN_ARCH_ARM_OR_ARM64 || EIGEN_ARCH_PPC || EIGEN_ARCH_IA64)
- #define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 1
- #elif EIGEN_ARCH_ARM_OR_ARM64 && EIGEN_COMP_GNUC_STRICT && EIGEN_GNUC_AT_MOST(4, 6)
- // Old versions of GCC on ARM, at least 4.4, were once seen to have buggy static alignment support.
- // Not sure which version fixed it, hopefully it doesn't affect 4.7, which is still somewhat in use.
- // 4.8 and newer seem definitely unaffected.
- #define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 1
- #else
- #define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 0
- #endif
-
- // static alignment is completely disabled with GCC 3, Sun Studio, and QCC/QNX
- #if !EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT \
- && !EIGEN_GCC3_OR_OLDER \
- && !EIGEN_COMP_SUNCC \
- && !EIGEN_OS_QNX
- #define EIGEN_ARCH_WANTS_STACK_ALIGNMENT 1
- #else
- #define EIGEN_ARCH_WANTS_STACK_ALIGNMENT 0
- #endif
-
- #if EIGEN_ARCH_WANTS_STACK_ALIGNMENT
- #define EIGEN_MAX_STATIC_ALIGN_BYTES EIGEN_IDEAL_MAX_ALIGN_BYTES
- #else
- #define EIGEN_MAX_STATIC_ALIGN_BYTES 0
- #endif
-
-#endif
-
-// If EIGEN_MAX_ALIGN_BYTES is defined, then it is considered as an upper bound for EIGEN_MAX_ALIGN_BYTES
-#if defined(EIGEN_MAX_ALIGN_BYTES) && EIGEN_MAX_ALIGN_BYTES<EIGEN_MAX_STATIC_ALIGN_BYTES
-#undef EIGEN_MAX_STATIC_ALIGN_BYTES
-#define EIGEN_MAX_STATIC_ALIGN_BYTES EIGEN_MAX_ALIGN_BYTES
-#endif
-
-#if EIGEN_MAX_STATIC_ALIGN_BYTES==0 && !defined(EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT)
- #define EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT
-#endif
-
-// At this stage, EIGEN_MAX_STATIC_ALIGN_BYTES>0 is the true test whether we want to align arrays on the stack or not.
-// It takes into account both the user choice to explicitly enable/disable alignment (by settting EIGEN_MAX_STATIC_ALIGN_BYTES)
-// and the architecture config (EIGEN_ARCH_WANTS_STACK_ALIGNMENT).
-// Henceforth, only EIGEN_MAX_STATIC_ALIGN_BYTES should be used.
-
-
-// Shortcuts to EIGEN_ALIGN_TO_BOUNDARY
-#define EIGEN_ALIGN8 EIGEN_ALIGN_TO_BOUNDARY(8)
-#define EIGEN_ALIGN16 EIGEN_ALIGN_TO_BOUNDARY(16)
-#define EIGEN_ALIGN32 EIGEN_ALIGN_TO_BOUNDARY(32)
-#define EIGEN_ALIGN64 EIGEN_ALIGN_TO_BOUNDARY(64)
-#if EIGEN_MAX_STATIC_ALIGN_BYTES>0
-#define EIGEN_ALIGN_MAX EIGEN_ALIGN_TO_BOUNDARY(EIGEN_MAX_STATIC_ALIGN_BYTES)
-#else
-#define EIGEN_ALIGN_MAX
-#endif
-
-
-// Dynamic alignment control
-
-#if defined(EIGEN_DONT_ALIGN) && defined(EIGEN_MAX_ALIGN_BYTES) && EIGEN_MAX_ALIGN_BYTES>0
-#error EIGEN_MAX_ALIGN_BYTES and EIGEN_DONT_ALIGN are both defined with EIGEN_MAX_ALIGN_BYTES!=0. Use EIGEN_MAX_ALIGN_BYTES=0 as a synonym of EIGEN_DONT_ALIGN.
-#endif
-
-#ifdef EIGEN_DONT_ALIGN
- #ifdef EIGEN_MAX_ALIGN_BYTES
- #undef EIGEN_MAX_ALIGN_BYTES
- #endif
- #define EIGEN_MAX_ALIGN_BYTES 0
-#elif !defined(EIGEN_MAX_ALIGN_BYTES)
- #define EIGEN_MAX_ALIGN_BYTES EIGEN_IDEAL_MAX_ALIGN_BYTES
-#endif
-
-#if EIGEN_IDEAL_MAX_ALIGN_BYTES > EIGEN_MAX_ALIGN_BYTES
-#define EIGEN_DEFAULT_ALIGN_BYTES EIGEN_IDEAL_MAX_ALIGN_BYTES
-#else
-#define EIGEN_DEFAULT_ALIGN_BYTES EIGEN_MAX_ALIGN_BYTES
-#endif
-
-
-#ifndef EIGEN_UNALIGNED_VECTORIZE
-#define EIGEN_UNALIGNED_VECTORIZE 1
-#endif
-
-//----------------------------------------------------------------------
-
-
#ifdef EIGEN_DONT_USE_RESTRICT_KEYWORD
#define EIGEN_RESTRICT
#endif
@@ -819,10 +862,6 @@ namespace Eigen {
#define EIGEN_RESTRICT __restrict
#endif
-#ifndef EIGEN_STACK_ALLOCATION_LIMIT
-// 131072 == 128 KB
-#define EIGEN_STACK_ALLOCATION_LIMIT 131072
-#endif
#ifndef EIGEN_DEFAULT_IO_FORMAT
#ifdef EIGEN_MAKING_DOCS
@@ -837,7 +876,20 @@ namespace Eigen {
// just an empty macro !
#define EIGEN_EMPTY
-#if EIGEN_COMP_MSVC_STRICT && (EIGEN_COMP_MSVC < 1900 || defined(__CUDACC_VER__)) // for older MSVC versions, as well as 1900 && CUDA 8, using the base operator is sufficient (cf Bugs 1000, 1324)
+
+// When compiling CUDA/HIP device code with NVCC or HIPCC
+// pull in math functions from the global namespace.
+// In host mode, and when device code is compiled with clang,
+// use the std versions.
+#if (defined(EIGEN_CUDA_ARCH) && defined(__NVCC__)) || defined(EIGEN_HIP_DEVICE_COMPILE)
+ #define EIGEN_USING_STD_MATH(FUNC) using ::FUNC;
+#else
+ #define EIGEN_USING_STD_MATH(FUNC) using std::FUNC;
+#endif
+
+
+#if EIGEN_COMP_MSVC_STRICT && (EIGEN_COMP_MSVC < 1900 || EIGEN_CUDACC_VER>0)
+ // for older MSVC versions, as well as 1900 && CUDA 8, using the base operator is sufficient (cf Bugs 1000, 1324)
#define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
using Base::operator =;
#elif EIGEN_COMP_CLANG // workaround clang bug (see http://forum.kde.org/viewtopic.php?f=74&t=102653)
@@ -955,7 +1007,7 @@ namespace Eigen {
const typename internal::plain_constant_type<EXPR,SCALAR>::type, const EXPR>
// Workaround for MSVC 2010 (see ML thread "patch with compile for for MSVC 2010")
-#if EIGEN_COMP_MSVC_STRICT<=1600
+#if EIGEN_COMP_MSVC_STRICT && (EIGEN_COMP_MSVC_STRICT<=1600)
#define EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(X) typename internal::enable_if<true,X>::type
#else
#define EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(X) X
@@ -984,15 +1036,23 @@ namespace Eigen {
EIGEN_MAKE_SCALAR_BINARY_OP_ONTHERIGHT(METHOD,OPNAME)
+#if (defined(_CPPUNWIND) || defined(__EXCEPTIONS)) && !defined(EIGEN_CUDA_ARCH) && !defined(EIGEN_EXCEPTIONS) && !defined(EIGEN_USE_SYCL) && !defined(EIGEN_HIP_DEVICE_COMPILE)
+ #define EIGEN_EXCEPTIONS
+#endif
+
+
#ifdef EIGEN_EXCEPTIONS
# define EIGEN_THROW_X(X) throw X
# define EIGEN_THROW throw
# define EIGEN_TRY try
# define EIGEN_CATCH(X) catch (X)
#else
-# ifdef __CUDA_ARCH__
+# if defined(EIGEN_CUDA_ARCH)
# define EIGEN_THROW_X(X) asm("trap;")
# define EIGEN_THROW asm("trap;")
+# elif defined(EIGEN_HIP_DEVICE_COMPILE)
+# define EIGEN_THROW_X(X) asm("s_trap 0")
+# define EIGEN_THROW asm("s_trap 0")
# else
# define EIGEN_THROW_X(X) std::abort()
# define EIGEN_THROW std::abort()
@@ -1012,7 +1072,38 @@ namespace Eigen {
# define EIGEN_NOEXCEPT
# define EIGEN_NOEXCEPT_IF(x)
# define EIGEN_NO_THROW throw()
-# define EIGEN_EXCEPTION_SPEC(X) throw(X)
+# if EIGEN_COMP_MSVC
+ // MSVC does not support exception specifications (warning C4290),
+ // and they are deprecated in c++11 anyway.
+# define EIGEN_EXCEPTION_SPEC(X) throw()
+# else
+# define EIGEN_EXCEPTION_SPEC(X) throw(X)
+# endif
+#endif
+
+#if EIGEN_HAS_VARIADIC_TEMPLATES
+// The all function is used to enable a variadic version of eigen_assert which can take a parameter pack as its input.
+namespace Eigen {
+namespace internal {
+
+inline bool all(){ return true; }
+
+template<typename T, typename ...Ts>
+bool all(T t, Ts ... ts){ return t && all(ts...); }
+
+}
+}
+#endif
+
+// Wrapping #pragma unroll in a macro since it is required for SYCL
+#if defined(__SYCL_DEVICE_ONLY__)
+ #if defined(_MSC_VER)
+ #define EIGEN_UNROLL_LOOP __pragma(unroll)
+ #else
+ #define EIGEN_UNROLL_LOOP _Pragma("unroll")
+ #endif
+#else
+ #define EIGEN_UNROLL_LOOP
#endif
#endif // EIGEN_MACROS_H
diff --git a/Eigen/src/Core/util/Memory.h b/Eigen/src/Core/util/Memory.h
index 7d9053496..9dd2e0252 100644
--- a/Eigen/src/Core/util/Memory.h
+++ b/Eigen/src/Core/util/Memory.h
@@ -70,7 +70,20 @@ inline void throw_std_bad_alloc()
throw std::bad_alloc();
#else
std::size_t huge = static_cast<std::size_t>(-1);
+ #if defined(EIGEN_HIPCC)
+ //
+ // calls to "::operator new" are to be treated as opaque function calls (i.e no inlining),
+ // and as a consequence the code in the #else block triggers the hipcc warning :
+ // "no overloaded function has restriction specifiers that are compatible with the ambient context"
+ //
+ // "throw_std_bad_alloc" has the EIGEN_DEVICE_FUNC attribute, so it seems that hipcc expects
+ // the same on "operator new"
+ // Reverting code back to the old version in this #if block for the hipcc compiler
+ //
new int[huge];
+ #else
+ ::operator new(huge);
+ #endif
#endif
}
@@ -83,11 +96,12 @@ inline void throw_std_bad_alloc()
/** \internal Like malloc, but the returned pointer is guaranteed to be 16-byte aligned.
* Fast, but wastes 16 additional bytes of memory. Does not throw any exception.
*/
-inline void* handmade_aligned_malloc(std::size_t size)
+inline void* handmade_aligned_malloc(std::size_t size, std::size_t alignment = EIGEN_DEFAULT_ALIGN_BYTES)
{
- void *original = std::malloc(size+EIGEN_DEFAULT_ALIGN_BYTES);
+ eigen_assert(alignment >= sizeof(void*) && (alignment & -alignment) == alignment && "Alignment must be at least sizeof(void*) and a power of 2");
+ void *original = std::malloc(size+alignment);
if (original == 0) return 0;
- void *aligned = reinterpret_cast<void*>((reinterpret_cast<std::size_t>(original) & ~(std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1))) + EIGEN_DEFAULT_ALIGN_BYTES);
+ void *aligned = reinterpret_cast<void*>((reinterpret_cast<std::size_t>(original) & ~(std::size_t(alignment-1))) + alignment);
*(reinterpret_cast<void**>(aligned) - 1) = original;
return aligned;
}
@@ -156,9 +170,15 @@ EIGEN_DEVICE_FUNC inline void* aligned_malloc(std::size_t size)
void *result;
#if (EIGEN_DEFAULT_ALIGN_BYTES==0) || EIGEN_MALLOC_ALREADY_ALIGNED
+
+ #if defined(EIGEN_HIP_DEVICE_COMPILE)
+ result = ::malloc(size);
+ #else
result = std::malloc(size);
+ #endif
+
#if EIGEN_DEFAULT_ALIGN_BYTES==16
- eigen_assert((size<16 || (std::size_t(result)%16)==0) && "System's malloc returned an unaligned pointer. Compile with EIGEN_MALLOC_ALREADY_ALIGNED=0 to fallback to handmade alignd memory allocator.");
+ eigen_assert((size<16 || (std::size_t(result)%16)==0) && "System's malloc returned an unaligned pointer. Compile with EIGEN_MALLOC_ALREADY_ALIGNED=0 to fallback to handmade aligned memory allocator.");
#endif
#else
result = handmade_aligned_malloc(size);
@@ -174,7 +194,13 @@ EIGEN_DEVICE_FUNC inline void* aligned_malloc(std::size_t size)
EIGEN_DEVICE_FUNC inline void aligned_free(void *ptr)
{
#if (EIGEN_DEFAULT_ALIGN_BYTES==0) || EIGEN_MALLOC_ALREADY_ALIGNED
+
+ #if defined(EIGEN_HIP_DEVICE_COMPILE)
+ ::free(ptr);
+ #else
std::free(ptr);
+ #endif
+
#else
handmade_aligned_free(ptr);
#endif
@@ -218,7 +244,12 @@ template<> EIGEN_DEVICE_FUNC inline void* conditional_aligned_malloc<false>(std:
{
check_that_malloc_is_allowed();
+ #if defined(EIGEN_HIP_DEVICE_COMPILE)
+ void *result = ::malloc(size);
+ #else
void *result = std::malloc(size);
+ #endif
+
if(!result && size)
throw_std_bad_alloc();
return result;
@@ -232,7 +263,11 @@ template<bool Align> EIGEN_DEVICE_FUNC inline void conditional_aligned_free(void
template<> EIGEN_DEVICE_FUNC inline void conditional_aligned_free<false>(void *ptr)
{
+ #if defined(EIGEN_HIP_DEVICE_COMPILE)
+ ::free(ptr);
+ #else
std::free(ptr);
+ #endif
}
template<bool Align> inline void* conditional_aligned_realloc(void* ptr, std::size_t new_size, std::size_t old_size)
@@ -493,7 +528,11 @@ template<typename T> struct smart_copy_helper<T,true> {
IntPtr size = IntPtr(end)-IntPtr(start);
if(size==0) return;
eigen_internal_assert(start!=0 && end!=0 && target!=0);
- memcpy(target, start, size);
+ #if defined(EIGEN_HIP_DEVICE_COMPILE)
+ ::memcpy(target, start, size);
+ #else
+ std::memcpy(target, start, size);
+ #endif
}
};
@@ -542,7 +581,7 @@ template<typename T> struct smart_memmove_helper<T,false> {
// you can overwrite Eigen's default behavior regarding alloca by defining EIGEN_ALLOCA
// to the appropriate stack allocation function
-#ifndef EIGEN_ALLOCA
+#if ! defined EIGEN_ALLOCA && ! defined EIGEN_GPU_COMPILE_PHASE
#if EIGEN_OS_LINUX || EIGEN_OS_MAC || (defined alloca)
#define EIGEN_ALLOCA alloca
#elif EIGEN_COMP_MSVC
@@ -550,6 +589,15 @@ template<typename T> struct smart_memmove_helper<T,false> {
#endif
#endif
+// With clang -Oz -mthumb, alloca changes the stack pointer in a way that is
+// not allowed in Thumb2. -DEIGEN_STACK_ALLOCATION_LIMIT=0 doesn't work because
+// the compiler still emits bad code because stack allocation checks use "<=".
+// TODO: Eliminate after https://bugs.llvm.org/show_bug.cgi?id=23772
+// is fixed.
+#if defined(__clang__) && defined(__thumb__)
+ #undef EIGEN_ALLOCA
+#endif
+
// This helper class construct the allocated memory, and takes care of destructing and freeing the handled data
// at destruction time. In practice this helper class is mainly useful to avoid memory leak in case of exceptions.
template<typename T> class aligned_stack_memory_handler : noncopyable
@@ -561,12 +609,14 @@ template<typename T> class aligned_stack_memory_handler : noncopyable
* In this case, the buffer elements will also be destructed when this handler will be destructed.
* Finally, if \a dealloc is true, then the pointer \a ptr is freed.
**/
+ EIGEN_DEVICE_FUNC
aligned_stack_memory_handler(T* ptr, std::size_t size, bool dealloc)
: m_ptr(ptr), m_size(size), m_deallocate(dealloc)
{
if(NumTraits<T>::RequireInitialization && m_ptr)
Eigen::internal::construct_elements_of_array(m_ptr, size);
}
+ EIGEN_DEVICE_FUNC
~aligned_stack_memory_handler()
{
if(NumTraits<T>::RequireInitialization && m_ptr)
@@ -580,6 +630,60 @@ template<typename T> class aligned_stack_memory_handler : noncopyable
bool m_deallocate;
};
+#ifdef EIGEN_ALLOCA
+
+template<typename Xpr, int NbEvaluations,
+ bool MapExternalBuffer = nested_eval<Xpr,NbEvaluations>::Evaluate && Xpr::MaxSizeAtCompileTime==Dynamic
+ >
+struct local_nested_eval_wrapper
+{
+ static const bool NeedExternalBuffer = false;
+ typedef typename Xpr::Scalar Scalar;
+ typedef typename nested_eval<Xpr,NbEvaluations>::type ObjectType;
+ ObjectType object;
+
+ EIGEN_DEVICE_FUNC
+ local_nested_eval_wrapper(const Xpr& xpr, Scalar* ptr) : object(xpr)
+ {
+ EIGEN_UNUSED_VARIABLE(ptr);
+ eigen_internal_assert(ptr==0);
+ }
+};
+
+template<typename Xpr, int NbEvaluations>
+struct local_nested_eval_wrapper<Xpr,NbEvaluations,true>
+{
+ static const bool NeedExternalBuffer = true;
+ typedef typename Xpr::Scalar Scalar;
+ typedef typename plain_object_eval<Xpr>::type PlainObject;
+ typedef Map<PlainObject,EIGEN_DEFAULT_ALIGN_BYTES> ObjectType;
+ ObjectType object;
+
+ EIGEN_DEVICE_FUNC
+ local_nested_eval_wrapper(const Xpr& xpr, Scalar* ptr)
+ : object(ptr==0 ? reinterpret_cast<Scalar*>(Eigen::internal::aligned_malloc(sizeof(Scalar)*xpr.size())) : ptr, xpr.rows(), xpr.cols()),
+ m_deallocate(ptr==0)
+ {
+ if(NumTraits<Scalar>::RequireInitialization && object.data())
+ Eigen::internal::construct_elements_of_array(object.data(), object.size());
+ object = xpr;
+ }
+
+ EIGEN_DEVICE_FUNC
+ ~local_nested_eval_wrapper()
+ {
+ if(NumTraits<Scalar>::RequireInitialization && object.data())
+ Eigen::internal::destruct_elements_of_array(object.data(), object.size());
+ if(m_deallocate)
+ Eigen::internal::aligned_free(object.data());
+ }
+
+private:
+ bool m_deallocate;
+};
+
+#endif // EIGEN_ALLOCA
+
template<typename T> class scoped_array : noncopyable
{
T* m_ptr;
@@ -607,9 +711,11 @@ template<typename T> void swap(scoped_array<T> &a,scoped_array<T> &b)
} // end namespace internal
/** \internal
- * Declares, allocates and construct an aligned buffer named NAME of SIZE elements of type TYPE on the stack
- * if SIZE is smaller than EIGEN_STACK_ALLOCATION_LIMIT, and if stack allocation is supported by the platform
- * (currently, this is Linux and Visual Studio only). Otherwise the memory is allocated on the heap.
+ *
+ * The macro ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) declares, allocates,
+ * and construct an aligned buffer named NAME of SIZE elements of type TYPE on the stack
+ * if the size in bytes is smaller than EIGEN_STACK_ALLOCATION_LIMIT, and if stack allocation is supported by the platform
+ * (currently, this is Linux, OSX and Visual Studio only). Otherwise the memory is allocated on the heap.
* The allocated buffer is automatically deleted when exiting the scope of this declaration.
* If BUFFER is non null, then the declared variable is simply an alias for BUFFER, and no allocation/deletion occurs.
* Here is an example:
@@ -620,6 +726,14 @@ template<typename T> void swap(scoped_array<T> &a,scoped_array<T> &b)
* }
* \endcode
* The underlying stack allocation function can controlled with the EIGEN_ALLOCA preprocessor token.
+ *
+ * The macro ei_declare_local_nested_eval(XPR_T,XPR,N,NAME) is analogue to
+ * \code
+ * typename internal::nested_eval<XPRT_T,N>::type NAME(XPR);
+ * \endcode
+ * with the advantage of using aligned stack allocation even if the maximal size of XPR at compile time is unknown.
+ * This is accomplished through alloca if this later is supported and if the required number of bytes
+ * is below EIGEN_STACK_ALLOCATION_LIMIT.
*/
#ifdef EIGEN_ALLOCA
@@ -639,6 +753,13 @@ template<typename T> void swap(scoped_array<T> &a,scoped_array<T> &b)
: Eigen::internal::aligned_malloc(sizeof(TYPE)*SIZE) ); \
Eigen::internal::aligned_stack_memory_handler<TYPE> EIGEN_CAT(NAME,_stack_memory_destructor)((BUFFER)==0 ? NAME : 0,SIZE,sizeof(TYPE)*SIZE>EIGEN_STACK_ALLOCATION_LIMIT)
+
+ #define ei_declare_local_nested_eval(XPR_T,XPR,N,NAME) \
+ Eigen::internal::local_nested_eval_wrapper<XPR_T,N> EIGEN_CAT(NAME,_wrapper)(XPR, reinterpret_cast<typename XPR_T::Scalar*>( \
+ ( (Eigen::internal::local_nested_eval_wrapper<XPR_T,N>::NeedExternalBuffer) && ((sizeof(typename XPR_T::Scalar)*XPR.size())<=EIGEN_STACK_ALLOCATION_LIMIT) ) \
+ ? EIGEN_ALIGNED_ALLOCA( sizeof(typename XPR_T::Scalar)*XPR.size() ) : 0 ) ) ; \
+ typename Eigen::internal::local_nested_eval_wrapper<XPR_T,N>::ObjectType NAME(EIGEN_CAT(NAME,_wrapper).object)
+
#else
#define ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) \
@@ -646,6 +767,9 @@ template<typename T> void swap(scoped_array<T> &a,scoped_array<T> &b)
TYPE* NAME = (BUFFER)!=0 ? BUFFER : reinterpret_cast<TYPE*>(Eigen::internal::aligned_malloc(sizeof(TYPE)*SIZE)); \
Eigen::internal::aligned_stack_memory_handler<TYPE> EIGEN_CAT(NAME,_stack_memory_destructor)((BUFFER)==0 ? NAME : 0,SIZE,true)
+
+#define ei_declare_local_nested_eval(XPR_T,XPR,N,NAME) typename Eigen::internal::nested_eval<XPR_T,N>::type NAME(XPR)
+
#endif
@@ -688,15 +812,27 @@ template<typename T> void swap(scoped_array<T> &a,scoped_array<T> &b)
#endif
#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(true)
-#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(Scalar,Size) \
- EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(bool(((Size)!=Eigen::Dynamic) && ((sizeof(Scalar)*(Size))%EIGEN_MAX_ALIGN_BYTES==0)))
+#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(Scalar,Size) \
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(bool( \
+ ((Size)!=Eigen::Dynamic) && \
+ (((EIGEN_MAX_ALIGN_BYTES>=16) && ((sizeof(Scalar)*(Size))%(EIGEN_MAX_ALIGN_BYTES )==0)) || \
+ ((EIGEN_MAX_ALIGN_BYTES>=32) && ((sizeof(Scalar)*(Size))%(EIGEN_MAX_ALIGN_BYTES/2)==0)) || \
+ ((EIGEN_MAX_ALIGN_BYTES>=64) && ((sizeof(Scalar)*(Size))%(EIGEN_MAX_ALIGN_BYTES/4)==0)) )))
/****************************************************************************/
/** \class aligned_allocator
* \ingroup Core_Module
*
-* \brief STL compatible allocator to use with with 16 byte aligned types
+* \brief STL compatible allocator to use with types requiring a non standrad alignment.
+*
+* The memory is aligned as for dynamically aligned matrix/array types such as MatrixXd.
+* By default, it will thus provide at least 16 bytes alignment and more in following cases:
+* - 32 bytes alignment if AVX is enabled.
+* - 64 bytes alignment if AVX512 is enabled.
+*
+* This can be controlled using the \c EIGEN_MAX_ALIGN_BYTES macro as documented
+* \link TopicPreprocessorDirectivesPerformance there \endlink.
*
* Example:
* \code
diff --git a/Eigen/src/Core/util/Meta.h b/Eigen/src/Core/util/Meta.h
index 90eda6e70..1e4f95581 100755
--- a/Eigen/src/Core/util/Meta.h
+++ b/Eigen/src/Core/util/Meta.h
@@ -11,9 +11,18 @@
#ifndef EIGEN_META_H
#define EIGEN_META_H
-#if defined(__CUDA_ARCH__)
-#include <cfloat>
-#include <math_constants.h>
+#if defined(EIGEN_GPU_COMPILE_PHASE)
+
+ #include <cfloat>
+
+ #if defined(EIGEN_CUDA_ARCH)
+ #include <math_constants.h>
+ #endif
+
+ #if defined(EIGEN_HIP_DEVICE_COMPILE)
+ #include "Eigen/src/Core/arch/HIP/hcc/math_constants.h"
+ #endif
+
#endif
#if EIGEN_COMP_ICC>=1600 && __cplusplus >= 201103L
@@ -98,6 +107,8 @@ template<> struct is_arithmetic<signed long> { enum { value = true }; };
template<> struct is_arithmetic<unsigned long> { enum { value = true }; };
#if EIGEN_HAS_CXX11
+template<> struct is_arithmetic<signed long long> { enum { value = true }; };
+template<> struct is_arithmetic<unsigned long long> { enum { value = true }; };
using std::is_integral;
#else
template<typename T> struct is_integral { enum { value = false }; };
@@ -111,8 +122,33 @@ template<> struct is_integral<signed int> { enum { value = true }; }
template<> struct is_integral<unsigned int> { enum { value = true }; };
template<> struct is_integral<signed long> { enum { value = true }; };
template<> struct is_integral<unsigned long> { enum { value = true }; };
+#if EIGEN_COMP_MSVC
+template<> struct is_integral<signed __int64> { enum { value = true }; };
+template<> struct is_integral<unsigned __int64> { enum { value = true }; };
+#endif
#endif
+#if EIGEN_HAS_CXX11
+using std::make_unsigned;
+#else
+// TODO: Possibly improve this implementation of make_unsigned.
+// It is currently used only by
+// template<typename Scalar> struct random_default_impl<Scalar, false, true>.
+template<typename> struct make_unsigned;
+template<> struct make_unsigned<char> { typedef unsigned char type; };
+template<> struct make_unsigned<signed char> { typedef unsigned char type; };
+template<> struct make_unsigned<unsigned char> { typedef unsigned char type; };
+template<> struct make_unsigned<signed short> { typedef unsigned short type; };
+template<> struct make_unsigned<unsigned short> { typedef unsigned short type; };
+template<> struct make_unsigned<signed int> { typedef unsigned int type; };
+template<> struct make_unsigned<unsigned int> { typedef unsigned int type; };
+template<> struct make_unsigned<signed long> { typedef unsigned long type; };
+template<> struct make_unsigned<unsigned long> { typedef unsigned long type; };
+#if EIGEN_COMP_MSVC
+template<> struct make_unsigned<signed __int64> { typedef unsigned __int64 type; };
+template<> struct make_unsigned<unsigned __int64> { typedef unsigned __int64 type; };
+#endif
+#endif
template <typename T> struct add_const { typedef const T type; };
template <typename T> struct add_const<T&> { typedef T& type; };
@@ -139,16 +175,19 @@ private:
struct yes {int a[1];};
struct no {int a[2];};
- static yes test(const To&, int);
+ template<typename T>
+ static yes test(T, int);
+
+ template<typename T>
static no test(any_conversion, ...);
public:
- static From ms_from;
+ static typename internal::remove_reference<From>::type* ms_from;
#ifdef __INTEL_COMPILER
#pragma warning push
#pragma warning ( disable : 2259 )
#endif
- enum { value = sizeof(test(ms_from, 0))==sizeof(yes) };
+ enum { value = sizeof(test<To>(*ms_from, 0))==sizeof(yes) };
#ifdef __INTEL_COMPILER
#pragma warning pop
#endif
@@ -157,8 +196,7 @@ public:
template<typename From, typename To>
struct is_convertible
{
- enum { value = is_convertible_impl<typename remove_all<From>::type,
- typename remove_all<To >::type>::value };
+ enum { value = is_convertible_impl<From,To>::value };
};
/** \internal Allows to enable/disable an overload
@@ -169,7 +207,7 @@ template<bool Condition, typename T=void> struct enable_if;
template<typename T> struct enable_if<true,T>
{ typedef T type; };
-#if defined(__CUDA_ARCH__)
+#if defined(EIGEN_GPU_COMPILE_PHASE)
#if !defined(__FLT_EPSILON__)
#define __FLT_EPSILON__ FLT_EPSILON
#define __DBL_EPSILON__ DBL_EPSILON
@@ -191,13 +229,31 @@ template<> struct numeric_limits<float>
EIGEN_DEVICE_FUNC
static float epsilon() { return __FLT_EPSILON__; }
EIGEN_DEVICE_FUNC
- static float (max)() { return CUDART_MAX_NORMAL_F; }
+ static float (max)() {
+ #if defined(EIGEN_CUDA_ARCH)
+ return CUDART_MAX_NORMAL_F;
+ #else
+ return HIPRT_MAX_NORMAL_F;
+ #endif
+ }
EIGEN_DEVICE_FUNC
static float (min)() { return FLT_MIN; }
EIGEN_DEVICE_FUNC
- static float infinity() { return CUDART_INF_F; }
+ static float infinity() {
+ #if defined(EIGEN_CUDA_ARCH)
+ return CUDART_INF_F;
+ #else
+ return HIPRT_INF_F;
+ #endif
+ }
EIGEN_DEVICE_FUNC
- static float quiet_NaN() { return CUDART_NAN_F; }
+ static float quiet_NaN() {
+ #if defined(EIGEN_CUDA_ARCH)
+ return CUDART_NAN_F;
+ #else
+ return HIPRT_NAN_F;
+ #endif
+ }
};
template<> struct numeric_limits<double>
{
@@ -208,9 +264,21 @@ template<> struct numeric_limits<double>
EIGEN_DEVICE_FUNC
static double (min)() { return DBL_MIN; }
EIGEN_DEVICE_FUNC
- static double infinity() { return CUDART_INF; }
+ static double infinity() {
+ #if defined(EIGEN_CUDA_ARCH)
+ return CUDART_INF;
+ #else
+ return HIPRT_INF;
+ #endif
+ }
EIGEN_DEVICE_FUNC
- static double quiet_NaN() { return CUDART_NAN; }
+ static double quiet_NaN() {
+ #if defined(EIGEN_CUDA_ARCH)
+ return CUDART_NAN;
+ #else
+ return HIPRT_NAN;
+ #endif
+ }
};
template<> struct numeric_limits<int>
{
@@ -272,7 +340,7 @@ template<> struct numeric_limits<unsigned long long>
#endif
/** \internal
- * A base class do disable default copy ctor and copy assignement operator.
+ * A base class do disable default copy ctor and copy assignment operator.
*/
class noncopyable
{
@@ -433,10 +501,10 @@ struct meta_no { char a[2]; };
template <typename T>
struct has_ReturnType
{
- template <typename C> static meta_yes testFunctor(typename C::ReturnType const *);
- template <typename C> static meta_no testFunctor(...);
+ template <typename C> static meta_yes testFunctor(C const *, typename C::ReturnType const * = 0);
+ template <typename C> static meta_no testFunctor(...);
- enum { value = sizeof(testFunctor<T>(0)) == sizeof(meta_yes) };
+ enum { value = sizeof(testFunctor<T>(static_cast<T*>(0))) == sizeof(meta_yes) };
};
template<typename T> const T* return_ptr();
@@ -522,14 +590,14 @@ template<typename T, typename U> struct scalar_product_traits
} // end namespace internal
namespace numext {
-
-#if defined(__CUDA_ARCH__)
+
+#if defined(EIGEN_GPU_COMPILE_PHASE)
template<typename T> EIGEN_DEVICE_FUNC void swap(T &a, T &b) { T tmp = b; b = a; a = tmp; }
#else
template<typename T> EIGEN_STRONG_INLINE void swap(T &a, T &b) { std::swap(a,b); }
#endif
-#if defined(__CUDA_ARCH__)
+#if defined(EIGEN_GPU_COMPILE_PHASE)
using internal::device::numeric_limits;
#else
using std::numeric_limits;
@@ -538,11 +606,36 @@ using std::numeric_limits;
// Integer division with rounding up.
// T is assumed to be an integer type with a>=0, and b>0
template<typename T>
+EIGEN_DEVICE_FUNC
T div_ceil(const T &a, const T &b)
{
return (a+b-1) / b;
}
+// The aim of the following functions is to bypass -Wfloat-equal warnings
+// when we really want a strict equality comparison on floating points.
+template<typename X, typename Y> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
+bool equal_strict(const X& x,const Y& y) { return x == y; }
+
+#if !defined(EIGEN_GPU_COMPILE_PHASE) || defined(EIGEN_CONSTEXPR_ARE_DEVICE_FUNC)
+template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
+bool equal_strict(const float& x,const float& y) { return std::equal_to<float>()(x,y); }
+
+template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
+bool equal_strict(const double& x,const double& y) { return std::equal_to<double>()(x,y); }
+#endif
+
+template<typename X, typename Y> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
+bool not_equal_strict(const X& x,const Y& y) { return x != y; }
+
+#if !defined(EIGEN_GPU_COMPILE_PHASE) || defined(EIGEN_CONSTEXPR_ARE_DEVICE_FUNC)
+template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
+bool not_equal_strict(const float& x,const float& y) { return std::not_equal_to<float>()(x,y); }
+
+template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
+bool not_equal_strict(const double& x,const double& y) { return std::not_equal_to<double>()(x,y); }
+#endif
+
} // end namespace numext
} // end namespace Eigen
diff --git a/Eigen/src/Core/util/ReenableStupidWarnings.h b/Eigen/src/Core/util/ReenableStupidWarnings.h
index 86b60f52f..e23a128d1 100644
--- a/Eigen/src/Core/util/ReenableStupidWarnings.h
+++ b/Eigen/src/Core/util/ReenableStupidWarnings.h
@@ -1,4 +1,8 @@
-#ifdef EIGEN_WARNINGS_DISABLED
+#ifdef EIGEN_WARNINGS_DISABLED_2
+// "DisableStupidWarnings.h" was included twice recursively: Do not reenable warnings yet!
+# undef EIGEN_WARNINGS_DISABLED_2
+
+#elif defined(EIGEN_WARNINGS_DISABLED)
#undef EIGEN_WARNINGS_DISABLED
#ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS
@@ -8,7 +12,7 @@
#pragma warning pop
#elif defined __clang__
#pragma clang diagnostic pop
- #elif defined __GNUC__ && __GNUC__>=6
+ #elif defined __GNUC__
#pragma GCC diagnostic pop
#endif
diff --git a/Eigen/src/Core/util/StaticAssert.h b/Eigen/src/Core/util/StaticAssert.h
index 983361a45..500e47792 100644
--- a/Eigen/src/Core/util/StaticAssert.h
+++ b/Eigen/src/Core/util/StaticAssert.h
@@ -24,6 +24,7 @@
*
*/
+#ifndef EIGEN_STATIC_ASSERT
#ifndef EIGEN_NO_STATIC_ASSERT
#if EIGEN_MAX_CPP_VER>=11 && (__has_feature(cxx_static_assert) || (defined(__cplusplus) && __cplusplus >= 201103L) || (EIGEN_COMP_MSVC >= 1600))
@@ -44,64 +45,65 @@
struct static_assertion<true>
{
enum {
- YOU_TRIED_CALLING_A_VECTOR_METHOD_ON_A_MATRIX,
- YOU_MIXED_VECTORS_OF_DIFFERENT_SIZES,
- YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES,
- THIS_METHOD_IS_ONLY_FOR_VECTORS_OF_A_SPECIFIC_SIZE,
- THIS_METHOD_IS_ONLY_FOR_MATRICES_OF_A_SPECIFIC_SIZE,
- THIS_METHOD_IS_ONLY_FOR_OBJECTS_OF_A_SPECIFIC_SIZE,
- OUT_OF_RANGE_ACCESS,
- YOU_MADE_A_PROGRAMMING_MISTAKE,
- EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT,
- EIGEN_INTERNAL_COMPILATION_ERROR_OR_YOU_MADE_A_PROGRAMMING_MISTAKE,
- YOU_CALLED_A_FIXED_SIZE_METHOD_ON_A_DYNAMIC_SIZE_MATRIX_OR_VECTOR,
- YOU_CALLED_A_DYNAMIC_SIZE_METHOD_ON_A_FIXED_SIZE_MATRIX_OR_VECTOR,
- UNALIGNED_LOAD_AND_STORE_OPERATIONS_UNIMPLEMENTED_ON_ALTIVEC,
- THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES,
- FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED,
- NUMERIC_TYPE_MUST_BE_REAL,
- COEFFICIENT_WRITE_ACCESS_TO_SELFADJOINT_NOT_SUPPORTED,
- WRITING_TO_TRIANGULAR_PART_WITH_UNIT_DIAGONAL_IS_NOT_SUPPORTED,
- THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE,
- INVALID_MATRIX_PRODUCT,
- INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS,
- INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION,
- YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY,
- THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES,
- THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES,
- INVALID_MATRIX_TEMPLATE_PARAMETERS,
- INVALID_MATRIXBASE_TEMPLATE_PARAMETERS,
- BOTH_MATRICES_MUST_HAVE_THE_SAME_STORAGE_ORDER,
- THIS_METHOD_IS_ONLY_FOR_DIAGONAL_MATRIX,
- THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE,
- THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_WITH_DIRECT_MEMORY_ACCESS_SUCH_AS_MAP_OR_PLAIN_MATRICES,
- YOU_ALREADY_SPECIFIED_THIS_STRIDE,
- INVALID_STORAGE_ORDER_FOR_THIS_VECTOR_EXPRESSION,
- THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD,
- PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1,
- THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS,
- YOU_CANNOT_MIX_ARRAYS_AND_MATRICES,
- YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION,
- THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY,
- YOU_ARE_TRYING_TO_USE_AN_INDEX_BASED_ACCESSOR_ON_AN_EXPRESSION_THAT_DOES_NOT_SUPPORT_THAT,
- THIS_METHOD_IS_ONLY_FOR_1x1_EXPRESSIONS,
- THIS_METHOD_IS_ONLY_FOR_INNER_OR_LAZY_PRODUCTS,
- THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL,
- THIS_METHOD_IS_ONLY_FOR_ARRAYS_NOT_MATRICES,
- YOU_PASSED_A_ROW_VECTOR_BUT_A_COLUMN_VECTOR_WAS_EXPECTED,
- YOU_PASSED_A_COLUMN_VECTOR_BUT_A_ROW_VECTOR_WAS_EXPECTED,
- THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE,
- THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH,
- OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG,
- IMPLICIT_CONVERSION_TO_SCALAR_IS_FOR_INNER_PRODUCT_ONLY,
- STORAGE_LAYOUT_DOES_NOT_MATCH,
- EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT__INVALID_COST_VALUE,
- THIS_COEFFICIENT_ACCESSOR_TAKING_ONE_ACCESS_IS_ONLY_FOR_EXPRESSIONS_ALLOWING_LINEAR_ACCESS,
- MATRIX_FREE_CONJUGATE_GRADIENT_IS_COMPATIBLE_WITH_UPPER_UNION_LOWER_MODE_ONLY,
- THIS_TYPE_IS_NOT_SUPPORTED,
- STORAGE_KIND_MUST_MATCH,
- STORAGE_INDEX_MUST_MATCH,
- CHOLMOD_SUPPORTS_DOUBLE_PRECISION_ONLY
+ YOU_TRIED_CALLING_A_VECTOR_METHOD_ON_A_MATRIX=1,
+ YOU_MIXED_VECTORS_OF_DIFFERENT_SIZES=1,
+ YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES=1,
+ THIS_METHOD_IS_ONLY_FOR_VECTORS_OF_A_SPECIFIC_SIZE=1,
+ THIS_METHOD_IS_ONLY_FOR_MATRICES_OF_A_SPECIFIC_SIZE=1,
+ THIS_METHOD_IS_ONLY_FOR_OBJECTS_OF_A_SPECIFIC_SIZE=1,
+ OUT_OF_RANGE_ACCESS=1,
+ YOU_MADE_A_PROGRAMMING_MISTAKE=1,
+ EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT=1,
+ EIGEN_INTERNAL_COMPILATION_ERROR_OR_YOU_MADE_A_PROGRAMMING_MISTAKE=1,
+ YOU_CALLED_A_FIXED_SIZE_METHOD_ON_A_DYNAMIC_SIZE_MATRIX_OR_VECTOR=1,
+ YOU_CALLED_A_DYNAMIC_SIZE_METHOD_ON_A_FIXED_SIZE_MATRIX_OR_VECTOR=1,
+ UNALIGNED_LOAD_AND_STORE_OPERATIONS_UNIMPLEMENTED_ON_ALTIVEC=1,
+ THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES=1,
+ FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED=1,
+ NUMERIC_TYPE_MUST_BE_REAL=1,
+ COEFFICIENT_WRITE_ACCESS_TO_SELFADJOINT_NOT_SUPPORTED=1,
+ WRITING_TO_TRIANGULAR_PART_WITH_UNIT_DIAGONAL_IS_NOT_SUPPORTED=1,
+ THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE=1,
+ INVALID_MATRIX_PRODUCT=1,
+ INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS=1,
+ INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION=1,
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY=1,
+ THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES=1,
+ THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES=1,
+ INVALID_MATRIX_TEMPLATE_PARAMETERS=1,
+ INVALID_MATRIXBASE_TEMPLATE_PARAMETERS=1,
+ BOTH_MATRICES_MUST_HAVE_THE_SAME_STORAGE_ORDER=1,
+ THIS_METHOD_IS_ONLY_FOR_DIAGONAL_MATRIX=1,
+ THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE=1,
+ THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_WITH_DIRECT_MEMORY_ACCESS_SUCH_AS_MAP_OR_PLAIN_MATRICES=1,
+ YOU_ALREADY_SPECIFIED_THIS_STRIDE=1,
+ INVALID_STORAGE_ORDER_FOR_THIS_VECTOR_EXPRESSION=1,
+ THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD=1,
+ PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1=1,
+ THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS=1,
+ YOU_CANNOT_MIX_ARRAYS_AND_MATRICES=1,
+ YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION=1,
+ THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY=1,
+ YOU_ARE_TRYING_TO_USE_AN_INDEX_BASED_ACCESSOR_ON_AN_EXPRESSION_THAT_DOES_NOT_SUPPORT_THAT=1,
+ THIS_METHOD_IS_ONLY_FOR_1x1_EXPRESSIONS=1,
+ THIS_METHOD_IS_ONLY_FOR_INNER_OR_LAZY_PRODUCTS=1,
+ THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL=1,
+ THIS_METHOD_IS_ONLY_FOR_ARRAYS_NOT_MATRICES=1,
+ YOU_PASSED_A_ROW_VECTOR_BUT_A_COLUMN_VECTOR_WAS_EXPECTED=1,
+ YOU_PASSED_A_COLUMN_VECTOR_BUT_A_ROW_VECTOR_WAS_EXPECTED=1,
+ THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE=1,
+ THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH=1,
+ OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG=1,
+ IMPLICIT_CONVERSION_TO_SCALAR_IS_FOR_INNER_PRODUCT_ONLY=1,
+ STORAGE_LAYOUT_DOES_NOT_MATCH=1,
+ EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT__INVALID_COST_VALUE=1,
+ THIS_COEFFICIENT_ACCESSOR_TAKING_ONE_ACCESS_IS_ONLY_FOR_EXPRESSIONS_ALLOWING_LINEAR_ACCESS=1,
+ MATRIX_FREE_CONJUGATE_GRADIENT_IS_COMPATIBLE_WITH_UPPER_UNION_LOWER_MODE_ONLY=1,
+ THIS_TYPE_IS_NOT_SUPPORTED=1,
+ STORAGE_KIND_MUST_MATCH=1,
+ STORAGE_INDEX_MUST_MATCH=1,
+ CHOLMOD_SUPPORTS_DOUBLE_PRECISION_ONLY=1,
+ SELFADJOINTVIEW_ACCEPTS_UPPER_AND_LOWER_MODE_ONLY=1
};
};
@@ -131,7 +133,7 @@
#define EIGEN_STATIC_ASSERT(CONDITION,MSG) eigen_assert((CONDITION) && #MSG);
#endif // EIGEN_NO_STATIC_ASSERT
-
+#endif // EIGEN_STATIC_ASSERT
// static assertion failing if the type \a TYPE is not a vector type
#define EIGEN_STATIC_ASSERT_VECTOR_ONLY(TYPE) \
diff --git a/Eigen/src/Core/util/SymbolicIndex.h b/Eigen/src/Core/util/SymbolicIndex.h
index bb6349eb9..17cf46f05 100644
--- a/Eigen/src/Core/util/SymbolicIndex.h
+++ b/Eigen/src/Core/util/SymbolicIndex.h
@@ -12,7 +12,7 @@
namespace Eigen {
-/** \namespace Eigen::Symbolic
+/** \namespace Eigen::symbolic
* \ingroup Core_Module
*
* This namespace defines a set of classes and functions to build and evaluate symbolic expressions of scalar type Index.
@@ -20,9 +20,9 @@ namespace Eigen {
*
* \code
* // First step, defines symbols:
- * struct x_tag {}; static const Symbolic::SymbolExpr<x_tag> x;
- * struct y_tag {}; static const Symbolic::SymbolExpr<y_tag> y;
- * struct z_tag {}; static const Symbolic::SymbolExpr<z_tag> z;
+ * struct x_tag {}; static const symbolic::SymbolExpr<x_tag> x;
+ * struct y_tag {}; static const symbolic::SymbolExpr<y_tag> y;
+ * struct z_tag {}; static const symbolic::SymbolExpr<z_tag> z;
*
* // Defines an expression:
* auto expr = (x+3)/y+z;
@@ -35,10 +35,10 @@ namespace Eigen {
* std::cout << expr98.eval(x=6) << "\n";
* \endcode
*
- * It is currently only used internally to define and minipulate the placeholders::last and placeholders::end symbols in Eigen::seq and Eigen::seqN.
+ * It is currently only used internally to define and manipulate the Eigen::last and Eigen::lastp1 symbols in Eigen::seq and Eigen::seqN.
*
*/
-namespace Symbolic {
+namespace symbolic {
template<typename Tag> class Symbol;
template<typename Arg0> class NegateExpr;
@@ -187,17 +187,10 @@ public:
template<typename T>
struct is_symbolic {
- // BaseExpr has no conversion ctor, so we only have to check whether T can be staticaly cast to its base class BaseExpr<T>.
+ // BaseExpr has no conversion ctor, so we only have to check whether T can be statically cast to its base class BaseExpr<T>.
enum { value = internal::is_convertible<T,BaseExpr<T> >::value };
};
-// Specialization for functions, because is_convertible fails in this case.
-// Useful in c++98/11 mode when testing is_symbolic<decltype(fix<N>)>
-template<typename T>
-struct is_symbolic<T (*)()> {
- enum { value = false };
-};
-
/** Represents the actual value of a symbol identified by its tag
*
* It is the return type of SymbolValue::operator=, and most of the time this is only way it is used.
@@ -293,7 +286,7 @@ protected:
Arg1 m_arg1;
};
-} // end namespace Symbolic
+} // end namespace symbolic
} // end namespace Eigen
diff --git a/Eigen/src/Core/util/XprHelper.h b/Eigen/src/Core/util/XprHelper.h
index 4b337f29f..836ff4711 100644
--- a/Eigen/src/Core/util/XprHelper.h
+++ b/Eigen/src/Core/util/XprHelper.h
@@ -34,6 +34,26 @@ inline IndexDest convert_index(const IndexSrc& idx) {
return IndexDest(idx);
}
+// true if T can be considered as an integral index (i.e., and integral type or enum)
+template<typename T> struct is_valid_index_type
+{
+ enum { value =
+#if EIGEN_HAS_TYPE_TRAITS
+ internal::is_integral<T>::value || std::is_enum<T>::value
+#elif EIGEN_COMP_MSVC
+ internal::is_integral<T>::value || __is_enum(T)
+#else
+ // without C++11, we use is_convertible to Index instead of is_integral in order to treat enums as Index.
+ internal::is_convertible<T,Index>::value
+#endif
+ };
+};
+
+// true if both types are not valid index types
+template<typename RowIndices, typename ColIndices>
+struct valid_indexed_view_overload {
+ enum { value = !(internal::is_valid_index_type<RowIndices>::value && internal::is_valid_index_type<ColIndices>::value) };
+};
// promote_scalar_arg is an helper used in operation between an expression and a scalar, like:
// expression * scalar
@@ -385,7 +405,7 @@ template<typename T> struct plain_matrix_type_row_major
typedef Matrix<typename traits<T>::Scalar,
Rows,
Cols,
- (MaxCols==1&&MaxRows!=1) ? RowMajor : ColMajor,
+ (MaxCols==1&&MaxRows!=1) ? ColMajor : RowMajor,
MaxRows,
MaxCols
> type;
@@ -440,7 +460,7 @@ template<typename T, int n, typename PlainObject = typename plain_object_eval<T>
{
enum {
ScalarReadCost = NumTraits<typename traits<T>::Scalar>::ReadCost,
- CoeffReadCost = evaluator<T>::CoeffReadCost, // NOTE What if an evaluator evaluate itself into a tempory?
+ CoeffReadCost = evaluator<T>::CoeffReadCost, // NOTE What if an evaluator evaluate itself into a temporary?
// Then CoeffReadCost will be small (e.g., 1) but we still have to evaluate, especially if n>1.
// This situation is already taken care by the EvalBeforeNestingBit flag, which is turned ON
// for all evaluator creating a temporary. This flag is then propagated by the parent evaluators.
@@ -656,17 +676,32 @@ template<typename T> struct is_diagonal<DiagonalWrapper<T> >
template<typename T, int S> struct is_diagonal<DiagonalMatrix<T,S> >
{ enum { ret = true }; };
+
+template<typename T> struct is_identity
+{ enum { value = false }; };
+
+template<typename T> struct is_identity<CwiseNullaryOp<internal::scalar_identity_op<typename T::Scalar>, T> >
+{ enum { value = true }; };
+
+
template<typename S1, typename S2> struct glue_shapes;
template<> struct glue_shapes<DenseShape,TriangularShape> { typedef TriangularShape type; };
template<typename T1, typename T2>
-bool is_same_dense(const T1 &mat1, const T2 &mat2, typename enable_if<has_direct_access<T1>::ret&&has_direct_access<T2>::ret, T1>::type * = 0)
+struct possibly_same_dense {
+ enum { value = has_direct_access<T1>::ret && has_direct_access<T2>::ret && is_same<typename T1::Scalar,typename T2::Scalar>::value };
+};
+
+template<typename T1, typename T2>
+EIGEN_DEVICE_FUNC
+bool is_same_dense(const T1 &mat1, const T2 &mat2, typename enable_if<possibly_same_dense<T1,T2>::value>::type * = 0)
{
return (mat1.data()==mat2.data()) && (mat1.innerStride()==mat2.innerStride()) && (mat1.outerStride()==mat2.outerStride());
}
template<typename T1, typename T2>
-bool is_same_dense(const T1 &, const T2 &, typename enable_if<!(has_direct_access<T1>::ret&&has_direct_access<T2>::ret), T1>::type * = 0)
+EIGEN_DEVICE_FUNC
+bool is_same_dense(const T1 &, const T2 &, typename enable_if<!possibly_same_dense<T1,T2>::value>::type * = 0)
{
return false;
}
diff --git a/Eigen/src/Eigenvalues/ComplexEigenSolver.h b/Eigen/src/Eigenvalues/ComplexEigenSolver.h
index dc5fae06a..081e918f1 100644
--- a/Eigen/src/Eigenvalues/ComplexEigenSolver.h
+++ b/Eigen/src/Eigenvalues/ComplexEigenSolver.h
@@ -214,7 +214,7 @@ template<typename _MatrixType> class ComplexEigenSolver
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
+ * \returns \c Success if computation was successful, \c NoConvergence otherwise.
*/
ComputationInfo info() const
{
diff --git a/Eigen/src/Eigenvalues/ComplexSchur.h b/Eigen/src/Eigenvalues/ComplexSchur.h
index 7f38919f7..b8b3490c6 100644
--- a/Eigen/src/Eigenvalues/ComplexSchur.h
+++ b/Eigen/src/Eigenvalues/ComplexSchur.h
@@ -212,7 +212,7 @@ template<typename _MatrixType> class ComplexSchur
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
+ * \returns \c Success if computation was successful, \c NoConvergence otherwise.
*/
ComputationInfo info() const
{
diff --git a/Eigen/src/Eigenvalues/EigenSolver.h b/Eigen/src/Eigenvalues/EigenSolver.h
index f205b185d..997bebe7b 100644
--- a/Eigen/src/Eigenvalues/EigenSolver.h
+++ b/Eigen/src/Eigenvalues/EigenSolver.h
@@ -277,7 +277,7 @@ template<typename _MatrixType> class EigenSolver
template<typename InputType>
EigenSolver& compute(const EigenBase<InputType>& matrix, bool computeEigenvectors = true);
- /** \returns NumericalIssue if the input contains INF or NaN values or overflow occured. Returns Success otherwise. */
+ /** \returns NumericalIssue if the input contains INF or NaN values or overflow occurred. Returns Success otherwise. */
ComputationInfo info() const
{
eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
diff --git a/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h b/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h
index 36a91dffc..87d789b3f 100644
--- a/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h
+++ b/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h
@@ -311,7 +311,6 @@ GeneralizedEigenSolver<MatrixType>::compute(const MatrixType& A, const MatrixTyp
// Aliases:
Map<VectorType> v(reinterpret_cast<Scalar*>(m_tmp.data()), size);
ComplexVectorType &cv = m_tmp;
- const MatrixType &mZ = m_realQZ.matrixZ();
const MatrixType &mS = m_realQZ.matrixS();
const MatrixType &mT = m_realQZ.matrixT();
@@ -351,7 +350,7 @@ GeneralizedEigenSolver<MatrixType>::compute(const MatrixType& A, const MatrixTyp
}
}
}
- m_eivec.col(i).real().noalias() = mZ.transpose() * v;
+ m_eivec.col(i).real().noalias() = m_realQZ.matrixZ().transpose() * v;
m_eivec.col(i).real().normalize();
m_eivec.col(i).imag().setConstant(0);
}
@@ -400,7 +399,7 @@ GeneralizedEigenSolver<MatrixType>::compute(const MatrixType& A, const MatrixTyp
/ (alpha*mT.coeffRef(j,j) - static_cast<Scalar>(beta*mS.coeffRef(j,j)));
}
}
- m_eivec.col(i+1).noalias() = (mZ.transpose() * cv);
+ m_eivec.col(i+1).noalias() = (m_realQZ.matrixZ().transpose() * cv);
m_eivec.col(i+1).normalize();
m_eivec.col(i) = m_eivec.col(i+1).conjugate();
}
diff --git a/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h b/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h
index 5f6bb8289..d0f9091be 100644
--- a/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h
+++ b/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h
@@ -121,7 +121,7 @@ class GeneralizedSelfAdjointEigenSolver : public SelfAdjointEigenSolver<_MatrixT
*
* \returns Reference to \c *this
*
- * Accoring to \p options, this function computes eigenvalues and (if requested)
+ * According to \p options, this function computes eigenvalues and (if requested)
* the eigenvectors of one of the following three generalized eigenproblems:
* - \c Ax_lBx: \f$ Ax = \lambda B x \f$
* - \c ABx_lx: \f$ ABx = \lambda x \f$
diff --git a/Eigen/src/Eigenvalues/HessenbergDecomposition.h b/Eigen/src/Eigenvalues/HessenbergDecomposition.h
index f647f69b0..d947dac4e 100644
--- a/Eigen/src/Eigenvalues/HessenbergDecomposition.h
+++ b/Eigen/src/Eigenvalues/HessenbergDecomposition.h
@@ -315,7 +315,7 @@ void HessenbergDecomposition<MatrixType>::_compute(MatrixType& matA, CoeffVector
// A = A H'
matA.rightCols(remainingSize)
- .applyHouseholderOnTheRight(matA.col(i).tail(remainingSize-1).conjugate(), numext::conj(h), &temp.coeffRef(0));
+ .applyHouseholderOnTheRight(matA.col(i).tail(remainingSize-1), numext::conj(h), &temp.coeffRef(0));
}
}
diff --git a/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h b/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h
index 4fec8af0a..66e5a3dbb 100644
--- a/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h
+++ b/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h
@@ -66,7 +66,6 @@ template<typename Derived>
inline typename MatrixBase<Derived>::EigenvaluesReturnType
MatrixBase<Derived>::eigenvalues() const
{
- typedef typename internal::traits<Derived>::Scalar Scalar;
return internal::eigenvalues_selector<Derived, NumTraits<Scalar>::IsComplex>::run(derived());
}
@@ -85,10 +84,9 @@ MatrixBase<Derived>::eigenvalues() const
* \sa SelfAdjointEigenSolver::eigenvalues(), MatrixBase::eigenvalues()
*/
template<typename MatrixType, unsigned int UpLo>
-inline typename SelfAdjointView<MatrixType, UpLo>::EigenvaluesReturnType
+EIGEN_DEVICE_FUNC inline typename SelfAdjointView<MatrixType, UpLo>::EigenvaluesReturnType
SelfAdjointView<MatrixType, UpLo>::eigenvalues() const
{
- typedef typename SelfAdjointView<MatrixType, UpLo>::PlainObject PlainObject;
PlainObject thisAsMatrix(*this);
return SelfAdjointEigenSolver<PlainObject>(thisAsMatrix, false).eigenvalues();
}
@@ -149,7 +147,7 @@ MatrixBase<Derived>::operatorNorm() const
* \sa eigenvalues(), MatrixBase::operatorNorm()
*/
template<typename MatrixType, unsigned int UpLo>
-inline typename SelfAdjointView<MatrixType, UpLo>::RealScalar
+EIGEN_DEVICE_FUNC inline typename SelfAdjointView<MatrixType, UpLo>::RealScalar
SelfAdjointView<MatrixType, UpLo>::operatorNorm() const
{
return eigenvalues().cwiseAbs().maxCoeff();
diff --git a/Eigen/src/Eigenvalues/RealQZ.h b/Eigen/src/Eigenvalues/RealQZ.h
index b3a910dd9..e2b37f40e 100644
--- a/Eigen/src/Eigenvalues/RealQZ.h
+++ b/Eigen/src/Eigenvalues/RealQZ.h
@@ -161,7 +161,7 @@ namespace Eigen {
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
+ * \returns \c Success if computation was successful, \c NoConvergence otherwise.
*/
ComputationInfo info() const
{
diff --git a/Eigen/src/Eigenvalues/RealSchur.h b/Eigen/src/Eigenvalues/RealSchur.h
index f5c86041d..aca8a8279 100644
--- a/Eigen/src/Eigenvalues/RealSchur.h
+++ b/Eigen/src/Eigenvalues/RealSchur.h
@@ -190,7 +190,7 @@ template<typename _MatrixType> class RealSchur
RealSchur& computeFromHessenberg(const HessMatrixType& matrixH, const OrthMatrixType& matrixQ, bool computeU);
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
+ * \returns \c Success if computation was successful, \c NoConvergence otherwise.
*/
ComputationInfo info() const
{
@@ -270,8 +270,13 @@ RealSchur<MatrixType>& RealSchur<MatrixType>::compute(const EigenBase<InputType>
// Step 1. Reduce to Hessenberg form
m_hess.compute(matrix.derived()/scale);
- // Step 2. Reduce to real Schur form
- computeFromHessenberg(m_hess.matrixH(), m_hess.matrixQ(), computeU);
+ // Step 2. Reduce to real Schur form
+ // Note: we copy m_hess.matrixQ() into m_matU here and not in computeFromHessenberg
+ // to be able to pass our working-space buffer for the Householder to Dense evaluation.
+ m_workspaceVector.resize(matrix.cols());
+ if(computeU)
+ m_hess.matrixQ().evalTo(m_matU, m_workspaceVector);
+ computeFromHessenberg(m_hess.matrixH(), m_matU, computeU);
m_matT *= scale;
@@ -284,13 +289,13 @@ RealSchur<MatrixType>& RealSchur<MatrixType>::computeFromHessenberg(const HessMa
using std::abs;
m_matT = matrixH;
- if(computeU)
+ m_workspaceVector.resize(m_matT.cols());
+ if(computeU && !internal::is_same_dense(m_matU,matrixQ))
m_matU = matrixQ;
Index maxIters = m_maxIters;
if (maxIters == -1)
maxIters = m_maxIterationsPerRow * matrixH.rows();
- m_workspaceVector.resize(m_matT.cols());
Scalar* workspace = &m_workspaceVector.coeffRef(0);
// The matrix m_matT is divided in three parts.
@@ -303,7 +308,7 @@ RealSchur<MatrixType>& RealSchur<MatrixType>::computeFromHessenberg(const HessMa
Scalar exshift(0); // sum of exceptional shifts
Scalar norm = computeNormOfT();
- if(norm!=0)
+ if(norm!=Scalar(0))
{
while (iu >= 0)
{
@@ -327,7 +332,7 @@ RealSchur<MatrixType>& RealSchur<MatrixType>::computeFromHessenberg(const HessMa
else // No convergence yet
{
// The firstHouseholderVector vector has to be initialized to something to get rid of a silly GCC warning (-O1 -Wall -DNDEBUG )
- Vector3s firstHouseholderVector(0,0,0), shiftInfo;
+ Vector3s firstHouseholderVector = Vector3s::Zero(), shiftInfo;
computeShift(iu, iter, exshift, shiftInfo);
iter = iter + 1;
totalIter = totalIter + 1;
diff --git a/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h b/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
index 9ddd553f2..f95606206 100644
--- a/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
+++ b/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
@@ -20,7 +20,9 @@ class GeneralizedSelfAdjointEigenSolver;
namespace internal {
template<typename SolverType,int Size,bool IsComplex> struct direct_selfadjoint_eigenvalues;
+
template<typename MatrixType, typename DiagType, typename SubDiagType>
+EIGEN_DEVICE_FUNC
ComputationInfo computeFromTridiagonal_impl(DiagType& diag, SubDiagType& subdiag, const Index maxIterations, bool computeEigenvectors, MatrixType& eivec);
}
@@ -119,6 +121,7 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
: m_eivec(),
m_eivalues(),
m_subdiag(),
+ m_info(InvalidInput),
m_isInitialized(false)
{ }
@@ -337,7 +340,7 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
+ * \returns \c Success if computation was successful, \c NoConvergence otherwise.
*/
EIGEN_DEVICE_FUNC
ComputationInfo info() const
@@ -354,7 +357,8 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
static const int m_maxIterations = 30;
protected:
- static void check_template_parameters()
+ static EIGEN_DEVICE_FUNC
+ void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
}
@@ -403,7 +407,7 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
const InputType &matrix(a_matrix.derived());
- using std::abs;
+ EIGEN_USING_STD_MATH(abs);
eigen_assert(matrix.cols() == matrix.rows());
eigen_assert((options&~(EigVecMask|GenEigMask))==0
&& (options&EigVecMask)!=EigVecMask
@@ -479,9 +483,10 @@ namespace internal {
* \returns \c Success or \c NoConvergence
*/
template<typename MatrixType, typename DiagType, typename SubDiagType>
+EIGEN_DEVICE_FUNC
ComputationInfo computeFromTridiagonal_impl(DiagType& diag, SubDiagType& subdiag, const Index maxIterations, bool computeEigenvectors, MatrixType& eivec)
{
- using std::abs;
+ EIGEN_USING_STD_MATH(abs);
ComputationInfo info;
typedef typename MatrixType::Scalar Scalar;
@@ -535,7 +540,7 @@ ComputationInfo computeFromTridiagonal_impl(DiagType& diag, SubDiagType& subdiag
diag.segment(i,n-i).minCoeff(&k);
if (k > 0)
{
- std::swap(diag[i], diag[k+i]);
+ numext::swap(diag[i], diag[k+i]);
if(computeEigenvectors)
eivec.col(i).swap(eivec.col(k+i));
}
@@ -605,7 +610,8 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3
EIGEN_DEVICE_FUNC
static inline bool extract_kernel(MatrixType& mat, Ref<VectorType> res, Ref<VectorType> representative)
{
- using std::abs;
+ EIGEN_USING_STD_MATH(abs);
+ EIGEN_USING_STD_MATH(sqrt);
Index i0;
// Find non-zero column i0 (by construction, there must exist a non zero coefficient on the diagonal):
mat.diagonal().cwiseAbs().maxCoeff(&i0);
@@ -616,8 +622,8 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3
VectorType c0, c1;
n0 = (c0 = representative.cross(mat.col((i0+1)%3))).squaredNorm();
n1 = (c1 = representative.cross(mat.col((i0+2)%3))).squaredNorm();
- if(n0>n1) res = c0/std::sqrt(n0);
- else res = c1/std::sqrt(n1);
+ if(n0>n1) res = c0/sqrt(n0);
+ else res = c1/sqrt(n1);
return true;
}
@@ -719,7 +725,7 @@ struct direct_selfadjoint_eigenvalues<SolverType,2,false>
EIGEN_DEVICE_FUNC
static inline void computeRoots(const MatrixType& m, VectorType& roots)
{
- using std::sqrt;
+ EIGEN_USING_STD_MATH(sqrt);
const Scalar t0 = Scalar(0.5) * sqrt( numext::abs2(m(0,0)-m(1,1)) + Scalar(4)*numext::abs2(m(1,0)));
const Scalar t1 = Scalar(0.5) * (m(0,0) + m(1,1));
roots(0) = t1 - t0;
@@ -807,7 +813,7 @@ template<int StorageOrder,typename RealScalar, typename Scalar, typename Index>
EIGEN_DEVICE_FUNC
static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n)
{
- using std::abs;
+ EIGEN_USING_STD_MATH(abs);
RealScalar td = (diag[end-1] - diag[end])*RealScalar(0.5);
RealScalar e = subdiag[end-1];
// Note that thanks to scaling, e^2 or td^2 cannot overflow, however they can still
diff --git a/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h b/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h
index 3891cf883..b0c947dc0 100644
--- a/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h
+++ b/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h
@@ -37,7 +37,7 @@ namespace Eigen {
/** \internal Specialization for the data types supported by LAPACKe */
-#define EIGEN_LAPACKE_EIG_SELFADJ(EIGTYPE, LAPACKE_TYPE, LAPACKE_RTYPE, LAPACKE_NAME, EIGCOLROW, LAPACKE_COLROW ) \
+#define EIGEN_LAPACKE_EIG_SELFADJ_2(EIGTYPE, LAPACKE_TYPE, LAPACKE_RTYPE, LAPACKE_NAME, EIGCOLROW ) \
template<> template<typename InputType> inline \
SelfAdjointEigenSolver<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >& \
SelfAdjointEigenSolver<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(const EigenBase<InputType>& matrix, int options) \
@@ -47,7 +47,7 @@ SelfAdjointEigenSolver<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(c
&& (options&EigVecMask)!=EigVecMask \
&& "invalid option parameter"); \
bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors; \
- lapack_int n = internal::convert_index<lapack_int>(matrix.cols()), lda, matrix_order, info; \
+ lapack_int n = internal::convert_index<lapack_int>(matrix.cols()), lda, info; \
m_eivalues.resize(n,1); \
m_subdiag.resize(n-1); \
m_eivec = matrix; \
@@ -63,27 +63,24 @@ SelfAdjointEigenSolver<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(c
} \
\
lda = internal::convert_index<lapack_int>(m_eivec.outerStride()); \
- matrix_order=LAPACKE_COLROW; \
char jobz, uplo='L'/*, range='A'*/; \
jobz = computeEigenvectors ? 'V' : 'N'; \
\
- info = LAPACKE_##LAPACKE_NAME( matrix_order, jobz, uplo, n, (LAPACKE_TYPE*)m_eivec.data(), lda, (LAPACKE_RTYPE*)m_eivalues.data() ); \
+ info = LAPACKE_##LAPACKE_NAME( LAPACK_COL_MAJOR, jobz, uplo, n, (LAPACKE_TYPE*)m_eivec.data(), lda, (LAPACKE_RTYPE*)m_eivalues.data() ); \
m_info = (info==0) ? Success : NoConvergence; \
m_isInitialized = true; \
m_eigenvectorsOk = computeEigenvectors; \
return *this; \
}
+#define EIGEN_LAPACKE_EIG_SELFADJ(EIGTYPE, LAPACKE_TYPE, LAPACKE_RTYPE, LAPACKE_NAME ) \
+ EIGEN_LAPACKE_EIG_SELFADJ_2(EIGTYPE, LAPACKE_TYPE, LAPACKE_RTYPE, LAPACKE_NAME, ColMajor ) \
+ EIGEN_LAPACKE_EIG_SELFADJ_2(EIGTYPE, LAPACKE_TYPE, LAPACKE_RTYPE, LAPACKE_NAME, RowMajor )
-EIGEN_LAPACKE_EIG_SELFADJ(double, double, double, dsyev, ColMajor, LAPACK_COL_MAJOR)
-EIGEN_LAPACKE_EIG_SELFADJ(float, float, float, ssyev, ColMajor, LAPACK_COL_MAJOR)
-EIGEN_LAPACKE_EIG_SELFADJ(dcomplex, lapack_complex_double, double, zheev, ColMajor, LAPACK_COL_MAJOR)
-EIGEN_LAPACKE_EIG_SELFADJ(scomplex, lapack_complex_float, float, cheev, ColMajor, LAPACK_COL_MAJOR)
-
-EIGEN_LAPACKE_EIG_SELFADJ(double, double, double, dsyev, RowMajor, LAPACK_ROW_MAJOR)
-EIGEN_LAPACKE_EIG_SELFADJ(float, float, float, ssyev, RowMajor, LAPACK_ROW_MAJOR)
-EIGEN_LAPACKE_EIG_SELFADJ(dcomplex, lapack_complex_double, double, zheev, RowMajor, LAPACK_ROW_MAJOR)
-EIGEN_LAPACKE_EIG_SELFADJ(scomplex, lapack_complex_float, float, cheev, RowMajor, LAPACK_ROW_MAJOR)
+EIGEN_LAPACKE_EIG_SELFADJ(double, double, double, dsyev)
+EIGEN_LAPACKE_EIG_SELFADJ(float, float, float, ssyev)
+EIGEN_LAPACKE_EIG_SELFADJ(dcomplex, lapack_complex_double, double, zheev)
+EIGEN_LAPACKE_EIG_SELFADJ(scomplex, lapack_complex_float, float, cheev)
} // end namespace Eigen
diff --git a/Eigen/src/Eigenvalues/Tridiagonalization.h b/Eigen/src/Eigenvalues/Tridiagonalization.h
index 1d102c17b..c5c1acf46 100644
--- a/Eigen/src/Eigenvalues/Tridiagonalization.h
+++ b/Eigen/src/Eigenvalues/Tridiagonalization.h
@@ -25,6 +25,7 @@ struct traits<TridiagonalizationMatrixTReturnType<MatrixType> >
};
template<typename MatrixType, typename CoeffVectorType>
+EIGEN_DEVICE_FUNC
void tridiagonalization_inplace(MatrixType& matA, CoeffVectorType& hCoeffs);
}
@@ -344,6 +345,7 @@ namespace internal {
* \sa Tridiagonalization::packedMatrix()
*/
template<typename MatrixType, typename CoeffVectorType>
+EIGEN_DEVICE_FUNC
void tridiagonalization_inplace(MatrixType& matA, CoeffVectorType& hCoeffs)
{
using numext::conj;
@@ -424,6 +426,7 @@ struct tridiagonalization_inplace_selector;
* \sa class Tridiagonalization
*/
template<typename MatrixType, typename DiagonalType, typename SubDiagonalType>
+EIGEN_DEVICE_FUNC
void tridiagonalization_inplace(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)
{
eigen_assert(mat.cols()==mat.rows() && diag.size()==mat.rows() && subdiag.size()==mat.rows()-1);
@@ -439,7 +442,8 @@ struct tridiagonalization_inplace_selector
typedef typename Tridiagonalization<MatrixType>::CoeffVectorType CoeffVectorType;
typedef typename Tridiagonalization<MatrixType>::HouseholderSequenceType HouseholderSequenceType;
template<typename DiagonalType, typename SubDiagonalType>
- static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)
+ static EIGEN_DEVICE_FUNC
+ void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)
{
CoeffVectorType hCoeffs(mat.cols()-1);
tridiagonalization_inplace(mat,hCoeffs);
@@ -508,7 +512,8 @@ struct tridiagonalization_inplace_selector<MatrixType,1,IsComplex>
typedef typename MatrixType::Scalar Scalar;
template<typename DiagonalType, typename SubDiagonalType>
- static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType&, bool extractQ)
+ static EIGEN_DEVICE_FUNC
+ void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType&, bool extractQ)
{
diag(0,0) = numext::real(mat(0,0));
if(extractQ)
diff --git a/Eigen/src/Geometry/AngleAxis.h b/Eigen/src/Geometry/AngleAxis.h
index 0af3c1b08..83ee1be46 100644
--- a/Eigen/src/Geometry/AngleAxis.h
+++ b/Eigen/src/Geometry/AngleAxis.h
@@ -178,7 +178,7 @@ EIGEN_DEVICE_FUNC AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const Quaterni
if (n != Scalar(0))
{
m_angle = Scalar(2)*atan2(n, abs(q.w()));
- if(q.w() < 0)
+ if(q.w() < Scalar(0))
n = -n;
m_axis = q.vec() / n;
}
diff --git a/Eigen/src/Geometry/Hyperplane.h b/Eigen/src/Geometry/Hyperplane.h
index 05929b299..cebe03557 100644
--- a/Eigen/src/Geometry/Hyperplane.h
+++ b/Eigen/src/Geometry/Hyperplane.h
@@ -119,7 +119,7 @@ public:
* If the dimension of the ambient space is greater than 2, then there isn't uniqueness,
* so an arbitrary choice is made.
*/
- // FIXME to be consitent with the rest this could be implemented as a static Through function ??
+ // FIXME to be consistent with the rest this could be implemented as a static Through function ??
EIGEN_DEVICE_FUNC explicit Hyperplane(const ParametrizedLine<Scalar, AmbientDimAtCompileTime>& parametrized)
{
normal() = parametrized.direction().unitOrthogonal();
diff --git a/Eigen/src/Geometry/Quaternion.h b/Eigen/src/Geometry/Quaternion.h
index f6ef1bcf6..faea62f17 100644
--- a/Eigen/src/Geometry/Quaternion.h
+++ b/Eigen/src/Geometry/Quaternion.h
@@ -43,6 +43,11 @@ class QuaternionBase : public RotationBase<Derived, 3>
typedef typename internal::traits<Derived>::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef typename internal::traits<Derived>::Coefficients Coefficients;
+ typedef typename Coefficients::CoeffReturnType CoeffReturnType;
+ typedef typename internal::conditional<bool(internal::traits<Derived>::Flags&LvalueBit),
+ Scalar&, CoeffReturnType>::type NonConstCoeffReturnType;
+
+
enum {
Flags = Eigen::internal::traits<Derived>::Flags
};
@@ -58,22 +63,22 @@ class QuaternionBase : public RotationBase<Derived, 3>
/** \returns the \c x coefficient */
- EIGEN_DEVICE_FUNC inline Scalar x() const { return this->derived().coeffs().coeff(0); }
+ EIGEN_DEVICE_FUNC inline CoeffReturnType x() const { return this->derived().coeffs().coeff(0); }
/** \returns the \c y coefficient */
- EIGEN_DEVICE_FUNC inline Scalar y() const { return this->derived().coeffs().coeff(1); }
+ EIGEN_DEVICE_FUNC inline CoeffReturnType y() const { return this->derived().coeffs().coeff(1); }
/** \returns the \c z coefficient */
- EIGEN_DEVICE_FUNC inline Scalar z() const { return this->derived().coeffs().coeff(2); }
+ EIGEN_DEVICE_FUNC inline CoeffReturnType z() const { return this->derived().coeffs().coeff(2); }
/** \returns the \c w coefficient */
- EIGEN_DEVICE_FUNC inline Scalar w() const { return this->derived().coeffs().coeff(3); }
+ EIGEN_DEVICE_FUNC inline CoeffReturnType w() const { return this->derived().coeffs().coeff(3); }
- /** \returns a reference to the \c x coefficient */
- EIGEN_DEVICE_FUNC inline Scalar& x() { return this->derived().coeffs().coeffRef(0); }
- /** \returns a reference to the \c y coefficient */
- EIGEN_DEVICE_FUNC inline Scalar& y() { return this->derived().coeffs().coeffRef(1); }
- /** \returns a reference to the \c z coefficient */
- EIGEN_DEVICE_FUNC inline Scalar& z() { return this->derived().coeffs().coeffRef(2); }
- /** \returns a reference to the \c w coefficient */
- EIGEN_DEVICE_FUNC inline Scalar& w() { return this->derived().coeffs().coeffRef(3); }
+ /** \returns a reference to the \c x coefficient (if Derived is a non-const lvalue) */
+ EIGEN_DEVICE_FUNC inline NonConstCoeffReturnType x() { return this->derived().coeffs().x(); }
+ /** \returns a reference to the \c y coefficient (if Derived is a non-const lvalue) */
+ EIGEN_DEVICE_FUNC inline NonConstCoeffReturnType y() { return this->derived().coeffs().y(); }
+ /** \returns a reference to the \c z coefficient (if Derived is a non-const lvalue) */
+ EIGEN_DEVICE_FUNC inline NonConstCoeffReturnType z() { return this->derived().coeffs().z(); }
+ /** \returns a reference to the \c w coefficient (if Derived is a non-const lvalue) */
+ EIGEN_DEVICE_FUNC inline NonConstCoeffReturnType w() { return this->derived().coeffs().w(); }
/** \returns a read-only vector expression of the imaginary part (x,y,z) */
EIGEN_DEVICE_FUNC inline const VectorBlock<const Coefficients,3> vec() const { return coeffs().template head<3>(); }
@@ -271,6 +276,27 @@ public:
EIGEN_DEVICE_FUNC explicit inline Quaternion(const Quaternion<OtherScalar, OtherOptions>& other)
{ m_coeffs = other.coeffs().template cast<Scalar>(); }
+#if EIGEN_HAS_RVALUE_REFERENCES
+ // We define a copy constructor, which means we don't get an implicit move constructor or assignment operator.
+ /** Default move constructor */
+ EIGEN_DEVICE_FUNC inline Quaternion(Quaternion&& other) EIGEN_NOEXCEPT_IF(std::is_nothrow_move_constructible<Scalar>::value)
+ : m_coeffs(std::move(other.coeffs()))
+ {}
+
+ /** Default move assignment operator */
+ EIGEN_DEVICE_FUNC Quaternion& operator=(Quaternion&& other) EIGEN_NOEXCEPT_IF(std::is_nothrow_move_assignable<Scalar>::value)
+ {
+ m_coeffs = std::move(other.coeffs());
+ return *this;
+ }
+
+ // And now because we declared a constructor, we don't get an implicit copy constructor. Say we want one.
+ /** Default copy constructor */
+ EIGEN_DEVICE_FUNC Quaternion(const Quaternion& other)
+ : m_coeffs(other.coeffs())
+ {}
+#endif
+
EIGEN_DEVICE_FUNC static Quaternion UnitRandom();
template<typename Derived1, typename Derived2>
@@ -423,7 +449,7 @@ typedef Map<Quaternion<double>, Aligned> QuaternionMapAlignedd;
// Generic Quaternion * Quaternion product
// This product can be specialized for a given architecture via the Arch template argument.
namespace internal {
-template<int Arch, class Derived1, class Derived2, typename Scalar, int _Options> struct quat_product
+template<int Arch, class Derived1, class Derived2, typename Scalar> struct quat_product
{
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Quaternion<Scalar> run(const QuaternionBase<Derived1>& a, const QuaternionBase<Derived2>& b){
return Quaternion<Scalar>
@@ -446,8 +472,7 @@ QuaternionBase<Derived>::operator* (const QuaternionBase<OtherDerived>& other) c
EIGEN_STATIC_ASSERT((internal::is_same<typename Derived::Scalar, typename OtherDerived::Scalar>::value),
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
return internal::quat_product<Architecture::Target, Derived, OtherDerived,
- typename internal::traits<Derived>::Scalar,
- EIGEN_PLAIN_ENUM_MIN(internal::traits<Derived>::Alignment, internal::traits<OtherDerived>::Alignment)>::run(*this, other);
+ typename internal::traits<Derived>::Scalar>::run(*this, other);
}
/** \sa operator*(Quaternion) */
@@ -624,7 +649,7 @@ EIGEN_DEVICE_FUNC Quaternion<Scalar,Options> Quaternion<Scalar,Options>::UnitRan
const Scalar u1 = internal::random<Scalar>(0, 1),
u2 = internal::random<Scalar>(0, 2*EIGEN_PI),
u3 = internal::random<Scalar>(0, 2*EIGEN_PI);
- const Scalar a = sqrt(1 - u1),
+ const Scalar a = sqrt(Scalar(1) - u1),
b = sqrt(u1);
return Quaternion (a * sin(u2), a * cos(u2), b * sin(u3), b * cos(u3));
}
@@ -672,7 +697,7 @@ EIGEN_DEVICE_FUNC inline Quaternion<typename internal::traits<Derived>::Scalar>
// Generic conjugate of a Quaternion
namespace internal {
-template<int Arch, class Derived, typename Scalar, int _Options> struct quat_conj
+template<int Arch, class Derived, typename Scalar> struct quat_conj
{
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Quaternion<Scalar> run(const QuaternionBase<Derived>& q){
return Quaternion<Scalar>(q.w(),-q.x(),-q.y(),-q.z());
@@ -691,8 +716,7 @@ EIGEN_DEVICE_FUNC inline Quaternion<typename internal::traits<Derived>::Scalar>
QuaternionBase<Derived>::conjugate() const
{
return internal::quat_conj<Architecture::Target, Derived,
- typename internal::traits<Derived>::Scalar,
- internal::traits<Derived>::Alignment>::run(*this);
+ typename internal::traits<Derived>::Scalar>::run(*this);
}
diff --git a/Eigen/src/Geometry/Scaling.h b/Eigen/src/Geometry/Scaling.h
index f58ca03d9..df650fda6 100755
--- a/Eigen/src/Geometry/Scaling.h
+++ b/Eigen/src/Geometry/Scaling.h
@@ -29,6 +29,22 @@ namespace Eigen {
*
* \sa Scaling(), class DiagonalMatrix, MatrixBase::asDiagonal(), class Translation, class Transform
*/
+
+namespace internal
+{
+ // This helper helps nvcc+MSVC to properly parse this file.
+ // See bug 1412.
+ template <typename Scalar, int Dim, int Mode>
+ struct uniformscaling_times_affine_returntype
+ {
+ enum
+ {
+ NewMode = int(Mode) == int(Isometry) ? Affine : Mode
+ };
+ typedef Transform <Scalar, Dim, NewMode> type;
+ };
+}
+
template<typename _Scalar>
class UniformScaling
{
@@ -60,9 +76,11 @@ public:
/** Concatenates a uniform scaling and an affine transformation */
template<int Dim, int Mode, int Options>
- inline Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Mode)> operator* (const Transform<Scalar,Dim, Mode, Options>& t) const
+ inline typename
+ internal::uniformscaling_times_affine_returntype<Scalar,Dim,Mode>::type
+ operator* (const Transform<Scalar, Dim, Mode, Options>& t) const
{
- Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Mode)> res = t;
+ typename internal::uniformscaling_times_affine_returntype<Scalar,Dim,Mode>::type res = t;
res.prescale(factor());
return res;
}
@@ -70,7 +88,7 @@ public:
/** Concatenates a uniform scaling and a linear transformation matrix */
// TODO returns an expression
template<typename Derived>
- inline typename internal::plain_matrix_type<Derived>::type operator* (const MatrixBase<Derived>& other) const
+ inline typename Eigen::internal::plain_matrix_type<Derived>::type operator* (const MatrixBase<Derived>& other) const
{ return other * m_factor; }
template<typename Derived,int Dim>
@@ -110,7 +128,7 @@ public:
/** Concatenates a linear transformation matrix and a uniform scaling
* \relates UniformScaling
*/
-// NOTE this operator is defiend in MatrixBase and not as a friend function
+// NOTE this operator is defined in MatrixBase and not as a friend function
// of UniformScaling to fix an internal crash of Intel's ICC
template<typename Derived,typename Scalar>
EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,Scalar,product)
diff --git a/Eigen/src/Geometry/Transform.h b/Eigen/src/Geometry/Transform.h
index 2d36dfadf..75991aaed 100644
--- a/Eigen/src/Geometry/Transform.h
+++ b/Eigen/src/Geometry/Transform.h
@@ -115,7 +115,7 @@ template<int Mode> struct transform_make_affine;
* \end{array} \right) \f$
*
* Note that for a projective transformation the last row can be anything,
- * and then the interpretation of different parts might be sightly different.
+ * and then the interpretation of different parts might be slightly different.
*
* However, unlike a plain matrix, the Transform class provides many features
* simplifying both its assembly and usage. In particular, it can be composed
diff --git a/Eigen/src/Geometry/Translation.h b/Eigen/src/Geometry/Translation.h
index 51d9a82eb..23b19f74f 100644
--- a/Eigen/src/Geometry/Translation.h
+++ b/Eigen/src/Geometry/Translation.h
@@ -70,18 +70,18 @@ public:
/** Constructs and initialize the translation transformation from a vector of translation coefficients */
EIGEN_DEVICE_FUNC explicit inline Translation(const VectorType& vector) : m_coeffs(vector) {}
- /** \brief Retruns the x-translation by value. **/
+ /** \brief Returns the x-translation by value. **/
EIGEN_DEVICE_FUNC inline Scalar x() const { return m_coeffs.x(); }
- /** \brief Retruns the y-translation by value. **/
+ /** \brief Returns the y-translation by value. **/
EIGEN_DEVICE_FUNC inline Scalar y() const { return m_coeffs.y(); }
- /** \brief Retruns the z-translation by value. **/
+ /** \brief Returns the z-translation by value. **/
EIGEN_DEVICE_FUNC inline Scalar z() const { return m_coeffs.z(); }
- /** \brief Retruns the x-translation as a reference. **/
+ /** \brief Returns the x-translation as a reference. **/
EIGEN_DEVICE_FUNC inline Scalar& x() { return m_coeffs.x(); }
- /** \brief Retruns the y-translation as a reference. **/
+ /** \brief Returns the y-translation as a reference. **/
EIGEN_DEVICE_FUNC inline Scalar& y() { return m_coeffs.y(); }
- /** \brief Retruns the z-translation as a reference. **/
+ /** \brief Returns the z-translation as a reference. **/
EIGEN_DEVICE_FUNC inline Scalar& z() { return m_coeffs.z(); }
EIGEN_DEVICE_FUNC const VectorType& vector() const { return m_coeffs; }
diff --git a/Eigen/src/Geometry/arch/Geometry_SSE.h b/Eigen/src/Geometry/arch/Geometry_SSE.h
index 1a86ff837..d4346aa1c 100644
--- a/Eigen/src/Geometry/arch/Geometry_SSE.h
+++ b/Eigen/src/Geometry/arch/Geometry_SSE.h
@@ -16,34 +16,43 @@ namespace Eigen {
namespace internal {
template<class Derived, class OtherDerived>
-struct quat_product<Architecture::SSE, Derived, OtherDerived, float, Aligned16>
+struct quat_product<Architecture::SSE, Derived, OtherDerived, float>
{
+ enum {
+ AAlignment = traits<Derived>::Alignment,
+ BAlignment = traits<OtherDerived>::Alignment,
+ ResAlignment = traits<Quaternion<float> >::Alignment
+ };
static inline Quaternion<float> run(const QuaternionBase<Derived>& _a, const QuaternionBase<OtherDerived>& _b)
{
Quaternion<float> res;
- const __m128 mask = _mm_setr_ps(0.f,0.f,0.f,-0.f);
- __m128 a = _a.coeffs().template packet<Aligned16>(0);
- __m128 b = _b.coeffs().template packet<Aligned16>(0);
- __m128 s1 = _mm_mul_ps(vec4f_swizzle1(a,1,2,0,2),vec4f_swizzle1(b,2,0,1,2));
- __m128 s2 = _mm_mul_ps(vec4f_swizzle1(a,3,3,3,1),vec4f_swizzle1(b,0,1,2,1));
- pstore(&res.x(),
- _mm_add_ps(_mm_sub_ps(_mm_mul_ps(a,vec4f_swizzle1(b,3,3,3,3)),
- _mm_mul_ps(vec4f_swizzle1(a,2,0,1,0),
+ const Packet4f mask = _mm_setr_ps(0.f,0.f,0.f,-0.f);
+ Packet4f a = _a.coeffs().template packet<AAlignment>(0);
+ Packet4f b = _b.coeffs().template packet<BAlignment>(0);
+ Packet4f s1 = pmul(vec4f_swizzle1(a,1,2,0,2),vec4f_swizzle1(b,2,0,1,2));
+ Packet4f s2 = pmul(vec4f_swizzle1(a,3,3,3,1),vec4f_swizzle1(b,0,1,2,1));
+ pstoret<float,Packet4f,ResAlignment>(
+ &res.x(),
+ padd(psub(pmul(a,vec4f_swizzle1(b,3,3,3,3)),
+ pmul(vec4f_swizzle1(a,2,0,1,0),
vec4f_swizzle1(b,1,2,0,0))),
- _mm_xor_ps(mask,_mm_add_ps(s1,s2))));
+ pxor(mask,padd(s1,s2))));
return res;
}
};
-template<class Derived, int Alignment>
-struct quat_conj<Architecture::SSE, Derived, float, Alignment>
+template<class Derived>
+struct quat_conj<Architecture::SSE, Derived, float>
{
+ enum {
+ ResAlignment = traits<Quaternion<float> >::Alignment
+ };
static inline Quaternion<float> run(const QuaternionBase<Derived>& q)
{
Quaternion<float> res;
const __m128 mask = _mm_setr_ps(-0.f,-0.f,-0.f,0.f);
- pstore(&res.x(), _mm_xor_ps(mask, q.coeffs().template packet<Alignment>(0)));
+ pstoret<float,Packet4f,ResAlignment>(&res.x(), _mm_xor_ps(mask, q.coeffs().template packet<traits<Derived>::Alignment>(0)));
return res;
}
};
@@ -52,6 +61,9 @@ struct quat_conj<Architecture::SSE, Derived, float, Alignment>
template<typename VectorLhs,typename VectorRhs>
struct cross3_impl<Architecture::SSE,VectorLhs,VectorRhs,float,true>
{
+ enum {
+ ResAlignment = traits<typename plain_matrix_type<VectorLhs>::type>::Alignment
+ };
static inline typename plain_matrix_type<VectorLhs>::type
run(const VectorLhs& lhs, const VectorRhs& rhs)
{
@@ -60,7 +72,7 @@ struct cross3_impl<Architecture::SSE,VectorLhs,VectorRhs,float,true>
__m128 mul1=_mm_mul_ps(vec4f_swizzle1(a,1,2,0,3),vec4f_swizzle1(b,2,0,1,3));
__m128 mul2=_mm_mul_ps(vec4f_swizzle1(a,2,0,1,3),vec4f_swizzle1(b,1,2,0,3));
typename plain_matrix_type<VectorLhs>::type res;
- pstore(&res.x(),_mm_sub_ps(mul1,mul2));
+ pstoret<float,Packet4f,ResAlignment>(&res.x(),_mm_sub_ps(mul1,mul2));
return res;
}
};
@@ -68,9 +80,14 @@ struct cross3_impl<Architecture::SSE,VectorLhs,VectorRhs,float,true>
-template<class Derived, class OtherDerived, int Alignment>
-struct quat_product<Architecture::SSE, Derived, OtherDerived, double, Alignment>
+template<class Derived, class OtherDerived>
+struct quat_product<Architecture::SSE, Derived, OtherDerived, double>
{
+ enum {
+ BAlignment = traits<OtherDerived>::Alignment,
+ ResAlignment = traits<Quaternion<double> >::Alignment
+ };
+
static inline Quaternion<double> run(const QuaternionBase<Derived>& _a, const QuaternionBase<OtherDerived>& _b)
{
const Packet2d mask = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0));
@@ -78,8 +95,8 @@ struct quat_product<Architecture::SSE, Derived, OtherDerived, double, Alignment>
Quaternion<double> res;
const double* a = _a.coeffs().data();
- Packet2d b_xy = _b.coeffs().template packet<Alignment>(0);
- Packet2d b_zw = _b.coeffs().template packet<Alignment>(2);
+ Packet2d b_xy = _b.coeffs().template packet<BAlignment>(0);
+ Packet2d b_zw = _b.coeffs().template packet<BAlignment>(2);
Packet2d a_xx = pset1<Packet2d>(a[0]);
Packet2d a_yy = pset1<Packet2d>(a[1]);
Packet2d a_zz = pset1<Packet2d>(a[2]);
@@ -97,9 +114,9 @@ struct quat_product<Architecture::SSE, Derived, OtherDerived, double, Alignment>
t2 = psub(pmul(a_zz, b_xy), pmul(a_xx, b_zw));
#ifdef EIGEN_VECTORIZE_SSE3
EIGEN_UNUSED_VARIABLE(mask)
- pstore(&res.x(), _mm_addsub_pd(t1, preverse(t2)));
+ pstoret<double,Packet2d,ResAlignment>(&res.x(), _mm_addsub_pd(t1, preverse(t2)));
#else
- pstore(&res.x(), padd(t1, pxor(mask,preverse(t2))));
+ pstoret<double,Packet2d,ResAlignment>(&res.x(), padd(t1, pxor(mask,preverse(t2))));
#endif
/*
@@ -111,25 +128,28 @@ struct quat_product<Architecture::SSE, Derived, OtherDerived, double, Alignment>
t2 = padd(pmul(a_zz, b_zw), pmul(a_xx, b_xy));
#ifdef EIGEN_VECTORIZE_SSE3
EIGEN_UNUSED_VARIABLE(mask)
- pstore(&res.z(), preverse(_mm_addsub_pd(preverse(t1), t2)));
+ pstoret<double,Packet2d,ResAlignment>(&res.z(), preverse(_mm_addsub_pd(preverse(t1), t2)));
#else
- pstore(&res.z(), psub(t1, pxor(mask,preverse(t2))));
+ pstoret<double,Packet2d,ResAlignment>(&res.z(), psub(t1, pxor(mask,preverse(t2))));
#endif
return res;
}
};
-template<class Derived, int Alignment>
-struct quat_conj<Architecture::SSE, Derived, double, Alignment>
+template<class Derived>
+struct quat_conj<Architecture::SSE, Derived, double>
{
+ enum {
+ ResAlignment = traits<Quaternion<double> >::Alignment
+ };
static inline Quaternion<double> run(const QuaternionBase<Derived>& q)
{
Quaternion<double> res;
const __m128d mask0 = _mm_setr_pd(-0.,-0.);
const __m128d mask2 = _mm_setr_pd(-0.,0.);
- pstore(&res.x(), _mm_xor_pd(mask0, q.coeffs().template packet<Alignment>(0)));
- pstore(&res.z(), _mm_xor_pd(mask2, q.coeffs().template packet<Alignment>(2)));
+ pstoret<double,Packet2d,ResAlignment>(&res.x(), _mm_xor_pd(mask0, q.coeffs().template packet<traits<Derived>::Alignment>(0)));
+ pstoret<double,Packet2d,ResAlignment>(&res.z(), _mm_xor_pd(mask2, q.coeffs().template packet<traits<Derived>::Alignment>(2)));
return res;
}
};
diff --git a/Eigen/src/Householder/BlockHouseholder.h b/Eigen/src/Householder/BlockHouseholder.h
index 01a7ed188..39ce1c2a0 100644
--- a/Eigen/src/Householder/BlockHouseholder.h
+++ b/Eigen/src/Householder/BlockHouseholder.h
@@ -63,8 +63,15 @@ void make_block_householder_triangular_factor(TriangularFactorType& triFactor, c
triFactor.row(i).tail(rt).noalias() = -hCoeffs(i) * vectors.col(i).tail(rs).adjoint()
* vectors.bottomRightCorner(rs, rt).template triangularView<UnitLower>();
- // FIXME add .noalias() once the triangular product can work inplace
- triFactor.row(i).tail(rt) = triFactor.row(i).tail(rt) * triFactor.bottomRightCorner(rt,rt).template triangularView<Upper>();
+ // FIXME use the following line with .noalias() once the triangular product can work inplace
+ // triFactor.row(i).tail(rt) = triFactor.row(i).tail(rt) * triFactor.bottomRightCorner(rt,rt).template triangularView<Upper>();
+ for(Index j=nbVecs-1; j>i; --j)
+ {
+ typename TriangularFactorType::Scalar z = triFactor(i,j);
+ triFactor(i,j) = z * triFactor(j,j);
+ if(nbVecs-j-1>0)
+ triFactor.row(i).tail(nbVecs-j-1) += z * triFactor.row(j).tail(nbVecs-j-1);
+ }
}
triFactor(i,i) = hCoeffs(i);
diff --git a/Eigen/src/Householder/Householder.h b/Eigen/src/Householder/Householder.h
index 80de2c305..5bc037f00 100644
--- a/Eigen/src/Householder/Householder.h
+++ b/Eigen/src/Householder/Householder.h
@@ -39,6 +39,7 @@ template<int n> struct decrement_size
* MatrixBase::applyHouseholderOnTheRight()
*/
template<typename Derived>
+EIGEN_DEVICE_FUNC
void MatrixBase<Derived>::makeHouseholderInPlace(Scalar& tau, RealScalar& beta)
{
VectorBlock<Derived, internal::decrement_size<Base::SizeAtCompileTime>::ret> essentialPart(derived(), 1, size()-1);
@@ -62,6 +63,7 @@ void MatrixBase<Derived>::makeHouseholderInPlace(Scalar& tau, RealScalar& beta)
*/
template<typename Derived>
template<typename EssentialPart>
+EIGEN_DEVICE_FUNC
void MatrixBase<Derived>::makeHouseholder(
EssentialPart& essential,
Scalar& tau,
@@ -103,13 +105,14 @@ void MatrixBase<Derived>::makeHouseholder(
* \param essential the essential part of the vector \c v
* \param tau the scaling factor of the Householder transformation
* \param workspace a pointer to working space with at least
- * this->cols() * essential.size() entries
+ * this->cols() entries
*
* \sa MatrixBase::makeHouseholder(), MatrixBase::makeHouseholderInPlace(),
* MatrixBase::applyHouseholderOnTheRight()
*/
template<typename Derived>
template<typename EssentialPart>
+EIGEN_DEVICE_FUNC
void MatrixBase<Derived>::applyHouseholderOnTheLeft(
const EssentialPart& essential,
const Scalar& tau,
@@ -140,13 +143,14 @@ void MatrixBase<Derived>::applyHouseholderOnTheLeft(
* \param essential the essential part of the vector \c v
* \param tau the scaling factor of the Householder transformation
* \param workspace a pointer to working space with at least
- * this->cols() * essential.size() entries
+ * this->rows() entries
*
* \sa MatrixBase::makeHouseholder(), MatrixBase::makeHouseholderInPlace(),
* MatrixBase::applyHouseholderOnTheLeft()
*/
template<typename Derived>
template<typename EssentialPart>
+EIGEN_DEVICE_FUNC
void MatrixBase<Derived>::applyHouseholderOnTheRight(
const EssentialPart& essential,
const Scalar& tau,
@@ -160,10 +164,10 @@ void MatrixBase<Derived>::applyHouseholderOnTheRight(
{
Map<typename internal::plain_col_type<PlainObject>::type> tmp(workspace,rows());
Block<Derived, Derived::RowsAtCompileTime, EssentialPart::SizeAtCompileTime> right(derived(), 0, 1, rows(), cols()-1);
- tmp.noalias() = right * essential.conjugate();
+ tmp.noalias() = right * essential;
tmp += this->col(0);
this->col(0) -= tau * tmp;
- right.noalias() -= tau * tmp * essential.transpose();
+ right.noalias() -= tau * tmp * essential.adjoint();
}
}
diff --git a/Eigen/src/Householder/HouseholderSequence.h b/Eigen/src/Householder/HouseholderSequence.h
index 3ce0a693d..e62befcb6 100644
--- a/Eigen/src/Householder/HouseholderSequence.h
+++ b/Eigen/src/Householder/HouseholderSequence.h
@@ -87,7 +87,7 @@ struct hseq_side_dependent_impl
{
typedef Block<const VectorsType, Dynamic, 1> EssentialVectorType;
typedef HouseholderSequence<VectorsType, CoeffsType, OnTheLeft> HouseholderSequenceType;
- static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k)
+ static EIGEN_DEVICE_FUNC inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k)
{
Index start = k+1+h.m_shift;
return Block<const VectorsType,Dynamic,1>(h.m_vectors, start, k, h.rows()-start, 1);
@@ -140,6 +140,22 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
Side
> ConjugateReturnType;
+ typedef HouseholderSequence<
+ VectorsType,
+ typename internal::conditional<NumTraits<Scalar>::IsComplex,
+ typename internal::remove_all<typename CoeffsType::ConjugateReturnType>::type,
+ CoeffsType>::type,
+ Side
+ > AdjointReturnType;
+
+ typedef HouseholderSequence<
+ typename internal::conditional<NumTraits<Scalar>::IsComplex,
+ typename internal::remove_all<typename VectorsType::ConjugateReturnType>::type,
+ VectorsType>::type,
+ CoeffsType,
+ Side
+ > TransposeReturnType;
+
/** \brief Constructor.
* \param[in] v %Matrix containing the essential parts of the Householder vectors
* \param[in] h Vector containing the Householder coefficients
@@ -157,17 +173,19 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
*
* \sa setLength(), setShift()
*/
+ EIGEN_DEVICE_FUNC
HouseholderSequence(const VectorsType& v, const CoeffsType& h)
- : m_vectors(v), m_coeffs(h), m_trans(false), m_length(v.diagonalSize()),
+ : m_vectors(v), m_coeffs(h), m_reverse(false), m_length(v.diagonalSize()),
m_shift(0)
{
}
/** \brief Copy constructor. */
+ EIGEN_DEVICE_FUNC
HouseholderSequence(const HouseholderSequence& other)
: m_vectors(other.m_vectors),
m_coeffs(other.m_coeffs),
- m_trans(other.m_trans),
+ m_reverse(other.m_reverse),
m_length(other.m_length),
m_shift(other.m_shift)
{
@@ -177,12 +195,14 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
* \returns Number of rows
* \details This equals the dimension of the space that the transformation acts on.
*/
+ EIGEN_DEVICE_FUNC
Index rows() const { return Side==OnTheLeft ? m_vectors.rows() : m_vectors.cols(); }
/** \brief Number of columns of transformation viewed as a matrix.
* \returns Number of columns
* \details This equals the dimension of the space that the transformation acts on.
*/
+ EIGEN_DEVICE_FUNC
Index cols() const { return rows(); }
/** \brief Essential part of a Householder vector.
@@ -199,6 +219,7 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
*
* \sa setShift(), shift()
*/
+ EIGEN_DEVICE_FUNC
const EssentialVectorType essentialVector(Index k) const
{
eigen_assert(k >= 0 && k < m_length);
@@ -206,31 +227,39 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
}
/** \brief %Transpose of the Householder sequence. */
- HouseholderSequence transpose() const
+ TransposeReturnType transpose() const
{
- return HouseholderSequence(*this).setTrans(!m_trans);
+ return TransposeReturnType(m_vectors.conjugate(), m_coeffs)
+ .setReverseFlag(!m_reverse)
+ .setLength(m_length)
+ .setShift(m_shift);
}
/** \brief Complex conjugate of the Householder sequence. */
ConjugateReturnType conjugate() const
{
return ConjugateReturnType(m_vectors.conjugate(), m_coeffs.conjugate())
- .setTrans(m_trans)
+ .setReverseFlag(m_reverse)
.setLength(m_length)
.setShift(m_shift);
}
/** \brief Adjoint (conjugate transpose) of the Householder sequence. */
- ConjugateReturnType adjoint() const
+ AdjointReturnType adjoint() const
{
- return conjugate().setTrans(!m_trans);
+ return AdjointReturnType(m_vectors, m_coeffs.conjugate())
+ .setReverseFlag(!m_reverse)
+ .setLength(m_length)
+ .setShift(m_shift);
}
/** \brief Inverse of the Householder sequence (equals the adjoint). */
- ConjugateReturnType inverse() const { return adjoint(); }
+ AdjointReturnType inverse() const { return adjoint(); }
/** \internal */
- template<typename DestType> inline void evalTo(DestType& dst) const
+ template<typename DestType>
+ inline EIGEN_DEVICE_FUNC
+ void evalTo(DestType& dst) const
{
Matrix<Scalar, DestType::RowsAtCompileTime, 1,
AutoAlign|ColMajor, DestType::MaxRowsAtCompileTime, 1> workspace(rows());
@@ -239,6 +268,7 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
/** \internal */
template<typename Dest, typename Workspace>
+ EIGEN_DEVICE_FUNC
void evalTo(Dest& dst, Workspace& workspace) const
{
workspace.resize(rows());
@@ -251,7 +281,7 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
for(Index k = vecs-1; k >= 0; --k)
{
Index cornerSize = rows() - k - m_shift;
- if(m_trans)
+ if(m_reverse)
dst.bottomRightCorner(cornerSize, cornerSize)
.applyHouseholderOnTheRight(essentialVector(k), m_coeffs.coeff(k), workspace.data());
else
@@ -265,18 +295,26 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
for(Index k = 0; k<cols()-vecs ; ++k)
dst.col(k).tail(rows()-k-1).setZero();
}
+ else if(m_length>BlockSize)
+ {
+ dst.setIdentity(rows(), rows());
+ if(m_reverse)
+ applyThisOnTheLeft(dst,workspace,true);
+ else
+ applyThisOnTheLeft(dst,workspace,true);
+ }
else
{
dst.setIdentity(rows(), rows());
for(Index k = vecs-1; k >= 0; --k)
{
Index cornerSize = rows() - k - m_shift;
- if(m_trans)
+ if(m_reverse)
dst.bottomRightCorner(cornerSize, cornerSize)
- .applyHouseholderOnTheRight(essentialVector(k), m_coeffs.coeff(k), &workspace.coeffRef(0));
+ .applyHouseholderOnTheRight(essentialVector(k), m_coeffs.coeff(k), workspace.data());
else
dst.bottomRightCorner(cornerSize, cornerSize)
- .applyHouseholderOnTheLeft(essentialVector(k), m_coeffs.coeff(k), &workspace.coeffRef(0));
+ .applyHouseholderOnTheLeft(essentialVector(k), m_coeffs.coeff(k), workspace.data());
}
}
}
@@ -295,31 +333,34 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
workspace.resize(dst.rows());
for(Index k = 0; k < m_length; ++k)
{
- Index actual_k = m_trans ? m_length-k-1 : k;
+ Index actual_k = m_reverse ? m_length-k-1 : k;
dst.rightCols(rows()-m_shift-actual_k)
.applyHouseholderOnTheRight(essentialVector(actual_k), m_coeffs.coeff(actual_k), workspace.data());
}
}
/** \internal */
- template<typename Dest> inline void applyThisOnTheLeft(Dest& dst) const
+ template<typename Dest> inline void applyThisOnTheLeft(Dest& dst, bool inputIsIdentity = false) const
{
Matrix<Scalar,1,Dest::ColsAtCompileTime,RowMajor,1,Dest::MaxColsAtCompileTime> workspace;
- applyThisOnTheLeft(dst, workspace);
+ applyThisOnTheLeft(dst, workspace, inputIsIdentity);
}
/** \internal */
template<typename Dest, typename Workspace>
- inline void applyThisOnTheLeft(Dest& dst, Workspace& workspace) const
+ inline void applyThisOnTheLeft(Dest& dst, Workspace& workspace, bool inputIsIdentity = false) const
{
- const Index BlockSize = 48;
+ if(inputIsIdentity && m_reverse)
+ inputIsIdentity = false;
// if the entries are large enough, then apply the reflectors by block
if(m_length>=BlockSize && dst.cols()>1)
{
- for(Index i = 0; i < m_length; i+=BlockSize)
+ // Make sure we have at least 2 useful blocks, otherwise it is point-less:
+ Index blockSize = m_length<Index(2*BlockSize) ? (m_length+1)/2 : Index(BlockSize);
+ for(Index i = 0; i < m_length; i+=blockSize)
{
- Index end = m_trans ? (std::min)(m_length,i+BlockSize) : m_length-i;
- Index k = m_trans ? i : (std::max)(Index(0),end-BlockSize);
+ Index end = m_reverse ? (std::min)(m_length,i+blockSize) : m_length-i;
+ Index k = m_reverse ? i : (std::max)(Index(0),end-blockSize);
Index bs = end-k;
Index start = k + m_shift;
@@ -329,8 +370,15 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
Side==OnTheRight ? bs : m_vectors.rows()-start,
Side==OnTheRight ? m_vectors.cols()-start : bs);
typename internal::conditional<Side==OnTheRight, Transpose<SubVectorsType>, SubVectorsType&>::type sub_vecs(sub_vecs1);
- Block<Dest,Dynamic,Dynamic> sub_dst(dst,dst.rows()-rows()+m_shift+k,0, rows()-m_shift-k,dst.cols());
- apply_block_householder_on_the_left(sub_dst, sub_vecs, m_coeffs.segment(k, bs), !m_trans);
+
+ Index dstStart = dst.rows()-rows()+m_shift+k;
+ Index dstRows = rows()-m_shift-k;
+ Block<Dest,Dynamic,Dynamic> sub_dst(dst,
+ dstStart,
+ inputIsIdentity ? dstStart : 0,
+ dstRows,
+ inputIsIdentity ? dstRows : dst.cols());
+ apply_block_householder_on_the_left(sub_dst, sub_vecs, m_coeffs.segment(k, bs), !m_reverse);
}
}
else
@@ -338,8 +386,9 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
workspace.resize(dst.cols());
for(Index k = 0; k < m_length; ++k)
{
- Index actual_k = m_trans ? k : m_length-k-1;
- dst.bottomRows(rows()-m_shift-actual_k)
+ Index actual_k = m_reverse ? k : m_length-k-1;
+ Index dstStart = rows()-m_shift-actual_k;
+ dst.bottomRightCorner(dstStart, inputIsIdentity ? dstStart : dst.cols())
.applyHouseholderOnTheLeft(essentialVector(actual_k), m_coeffs.coeff(actual_k), workspace.data());
}
}
@@ -357,7 +406,7 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
{
typename internal::matrix_type_times_scalar_type<Scalar, OtherDerived>::Type
res(other.template cast<typename internal::matrix_type_times_scalar_type<Scalar,OtherDerived>::ResultScalar>());
- applyThisOnTheLeft(res);
+ applyThisOnTheLeft(res, internal::is_identity<OtherDerived>::value && res.rows()==res.cols());
return res;
}
@@ -372,6 +421,7 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
*
* \sa length()
*/
+ EIGEN_DEVICE_FUNC
HouseholderSequence& setLength(Index length)
{
m_length = length;
@@ -389,13 +439,17 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
*
* \sa shift()
*/
+ EIGEN_DEVICE_FUNC
HouseholderSequence& setShift(Index shift)
{
m_shift = shift;
return *this;
}
+ EIGEN_DEVICE_FUNC
Index length() const { return m_length; } /**< \brief Returns the length of the Householder sequence. */
+
+ EIGEN_DEVICE_FUNC
Index shift() const { return m_shift; } /**< \brief Returns the shift of the Householder sequence. */
/* Necessary for .adjoint() and .conjugate() */
@@ -403,27 +457,30 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
protected:
- /** \brief Sets the transpose flag.
- * \param [in] trans New value of the transpose flag.
+ /** \internal
+ * \brief Sets the reverse flag.
+ * \param [in] reverse New value of the reverse flag.
*
- * By default, the transpose flag is not set. If the transpose flag is set, then this object represents
- * \f$ H^T = H_{n-1}^T \ldots H_1^T H_0^T \f$ instead of \f$ H = H_0 H_1 \ldots H_{n-1} \f$.
+ * By default, the reverse flag is not set. If the reverse flag is set, then this object represents
+ * \f$ H^r = H_{n-1} \ldots H_1 H_0 \f$ instead of \f$ H = H_0 H_1 \ldots H_{n-1} \f$.
+ * \note For real valued HouseholderSequence this is equivalent to transposing \f$ H \f$.
*
- * \sa trans()
+ * \sa reverseFlag(), transpose(), adjoint()
*/
- HouseholderSequence& setTrans(bool trans)
+ HouseholderSequence& setReverseFlag(bool reverse)
{
- m_trans = trans;
+ m_reverse = reverse;
return *this;
}
- bool trans() const { return m_trans; } /**< \brief Returns the transpose flag. */
+ bool reverseFlag() const { return m_reverse; } /**< \internal \brief Returns the reverse flag. */
typename VectorsType::Nested m_vectors;
typename CoeffsType::Nested m_coeffs;
- bool m_trans;
+ bool m_reverse;
Index m_length;
Index m_shift;
+ enum { BlockSize = 48 };
};
/** \brief Computes the product of a matrix with a Householder sequence.
diff --git a/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h b/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h
index 358444aff..f66c846ef 100644
--- a/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h
+++ b/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h
@@ -152,13 +152,28 @@ class LeastSquareDiagonalPreconditioner : public DiagonalPreconditioner<_Scalar>
{
// Compute the inverse squared-norm of each column of mat
m_invdiag.resize(mat.cols());
- for(Index j=0; j<mat.outerSize(); ++j)
+ if(MatType::IsRowMajor)
{
- RealScalar sum = mat.innerVector(j).squaredNorm();
- if(sum>0)
- m_invdiag(j) = RealScalar(1)/sum;
- else
- m_invdiag(j) = RealScalar(1);
+ m_invdiag.setZero();
+ for(Index j=0; j<mat.outerSize(); ++j)
+ {
+ for(typename MatType::InnerIterator it(mat,j); it; ++it)
+ m_invdiag(it.index()) += numext::abs2(it.value());
+ }
+ for(Index j=0; j<mat.cols(); ++j)
+ if(numext::real(m_invdiag(j))>RealScalar(0))
+ m_invdiag(j) = RealScalar(1)/numext::real(m_invdiag(j));
+ }
+ else
+ {
+ for(Index j=0; j<mat.outerSize(); ++j)
+ {
+ RealScalar sum = mat.col(j).squaredNorm();
+ if(sum>RealScalar(0))
+ m_invdiag(j) = RealScalar(1)/sum;
+ else
+ m_invdiag(j) = RealScalar(1);
+ }
}
Base::m_isInitialized = true;
return *this;
diff --git a/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h b/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
index 395daa8e4..f7ce47134 100644
--- a/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
+++ b/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
@@ -50,7 +50,8 @@ void conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x,
tol_error = 0;
return;
}
- RealScalar threshold = tol*tol*rhsNorm2;
+ const RealScalar considerAsZero = (std::numeric_limits<RealScalar>::min)();
+ RealScalar threshold = numext::maxi(tol*tol*rhsNorm2,considerAsZero);
RealScalar residualNorm2 = residual.squaredNorm();
if (residualNorm2 < threshold)
{
@@ -58,7 +59,7 @@ void conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x,
tol_error = sqrt(residualNorm2 / rhsNorm2);
return;
}
-
+
VectorType p(n);
p = precond.solve(residual); // initial search direction
diff --git a/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h b/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
index 338e6f10a..43bd8e8f6 100644
--- a/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
+++ b/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
@@ -136,7 +136,7 @@ class IncompleteLUT : public SparseSolverBase<IncompleteLUT<_Scalar, _StorageInd
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful,
+ * \returns \c Success if computation was successful,
* \c NumericalIssue if the matrix.appears to be negative.
*/
ComputationInfo info() const
@@ -230,7 +230,7 @@ void IncompleteLUT<Scalar,StorageIndex>::analyzePattern(const _MatrixType& amat)
SparseMatrix<Scalar,ColMajor, StorageIndex> mat1 = amat;
SparseMatrix<Scalar,ColMajor, StorageIndex> mat2 = amat.transpose();
// FIXME for a matrix with nearly symmetric pattern, mat2+mat1 is the appropriate choice.
- // on the other hand for a really non-symmetric pattern, mat2*mat1 should be prefered...
+ // on the other hand for a really non-symmetric pattern, mat2*mat1 should be preferred...
SparseMatrix<Scalar,ColMajor, StorageIndex> AtA = mat2 + mat1;
AMDOrdering<StorageIndex> ordering;
ordering(AtA,m_P);
diff --git a/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h b/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
index 7c2326eb7..bfeee71cd 100644
--- a/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
+++ b/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
@@ -275,7 +275,7 @@ public:
const Preconditioner& preconditioner() const { return m_preconditioner; }
/** \returns the max number of iterations.
- * It is either the value setted by setMaxIterations or, by default,
+ * It is either the value set by setMaxIterations or, by default,
* twice the number of columns of the matrix.
*/
Index maxIterations() const
diff --git a/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h b/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h
index 0ace45177..79e1e4819 100644
--- a/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h
+++ b/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h
@@ -108,7 +108,7 @@ struct Assignment<DstXprType, SolveWithGuess<DecType,RhsType,GuessType>, interna
}
};
-} // end namepsace internal
+} // end namespace internal
} // end namespace Eigen
diff --git a/Eigen/src/Jacobi/Jacobi.h b/Eigen/src/Jacobi/Jacobi.h
index d25af8e90..4ccd49a04 100644
--- a/Eigen/src/Jacobi/Jacobi.h
+++ b/Eigen/src/Jacobi/Jacobi.h
@@ -37,17 +37,20 @@ template<typename Scalar> class JacobiRotation
typedef typename NumTraits<Scalar>::Real RealScalar;
/** Default constructor without any initialization. */
+ EIGEN_DEVICE_FUNC
JacobiRotation() {}
/** Construct a planar rotation from a cosine-sine pair (\a c, \c s). */
+ EIGEN_DEVICE_FUNC
JacobiRotation(const Scalar& c, const Scalar& s) : m_c(c), m_s(s) {}
- Scalar& c() { return m_c; }
- Scalar c() const { return m_c; }
- Scalar& s() { return m_s; }
- Scalar s() const { return m_s; }
+ EIGEN_DEVICE_FUNC Scalar& c() { return m_c; }
+ EIGEN_DEVICE_FUNC Scalar c() const { return m_c; }
+ EIGEN_DEVICE_FUNC Scalar& s() { return m_s; }
+ EIGEN_DEVICE_FUNC Scalar s() const { return m_s; }
/** Concatenates two planar rotation */
+ EIGEN_DEVICE_FUNC
JacobiRotation operator*(const JacobiRotation& other)
{
using numext::conj;
@@ -56,20 +59,27 @@ template<typename Scalar> class JacobiRotation
}
/** Returns the transposed transformation */
+ EIGEN_DEVICE_FUNC
JacobiRotation transpose() const { using numext::conj; return JacobiRotation(m_c, -conj(m_s)); }
/** Returns the adjoint transformation */
+ EIGEN_DEVICE_FUNC
JacobiRotation adjoint() const { using numext::conj; return JacobiRotation(conj(m_c), -m_s); }
template<typename Derived>
+ EIGEN_DEVICE_FUNC
bool makeJacobi(const MatrixBase<Derived>&, Index p, Index q);
+ EIGEN_DEVICE_FUNC
bool makeJacobi(const RealScalar& x, const Scalar& y, const RealScalar& z);
- void makeGivens(const Scalar& p, const Scalar& q, Scalar* z=0);
+ EIGEN_DEVICE_FUNC
+ void makeGivens(const Scalar& p, const Scalar& q, Scalar* r=0);
protected:
- void makeGivens(const Scalar& p, const Scalar& q, Scalar* z, internal::true_type);
- void makeGivens(const Scalar& p, const Scalar& q, Scalar* z, internal::false_type);
+ EIGEN_DEVICE_FUNC
+ void makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::true_type);
+ EIGEN_DEVICE_FUNC
+ void makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::false_type);
Scalar m_c, m_s;
};
@@ -80,11 +90,12 @@ template<typename Scalar> class JacobiRotation
* \sa MatrixBase::makeJacobi(const MatrixBase<Derived>&, Index, Index), MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
*/
template<typename Scalar>
+EIGEN_DEVICE_FUNC
bool JacobiRotation<Scalar>::makeJacobi(const RealScalar& x, const Scalar& y, const RealScalar& z)
{
using std::sqrt;
using std::abs;
- typedef typename NumTraits<Scalar>::Real RealScalar;
+
RealScalar deno = RealScalar(2)*abs(y);
if(deno < (std::numeric_limits<RealScalar>::min)())
{
@@ -124,6 +135,7 @@ bool JacobiRotation<Scalar>::makeJacobi(const RealScalar& x, const Scalar& y, co
*/
template<typename Scalar>
template<typename Derived>
+EIGEN_DEVICE_FUNC
inline bool JacobiRotation<Scalar>::makeJacobi(const MatrixBase<Derived>& m, Index p, Index q)
{
return makeJacobi(numext::real(m.coeff(p,p)), m.coeff(p,q), numext::real(m.coeff(q,q)));
@@ -133,7 +145,7 @@ inline bool JacobiRotation<Scalar>::makeJacobi(const MatrixBase<Derived>& m, Ind
* \f$ V = \left ( \begin{array}{c} p \\ q \end{array} \right )\f$ yields:
* \f$ G^* V = \left ( \begin{array}{c} r \\ 0 \end{array} \right )\f$.
*
- * The value of \a z is returned if \a z is not null (the default is null).
+ * The value of \a r is returned if \a r is not null (the default is null).
* Also note that G is built such that the cosine is always real.
*
* Example: \include Jacobi_makeGivens.cpp
@@ -146,14 +158,16 @@ inline bool JacobiRotation<Scalar>::makeJacobi(const MatrixBase<Derived>& m, Ind
* \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
*/
template<typename Scalar>
-void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar* z)
+EIGEN_DEVICE_FUNC
+void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar* r)
{
- makeGivens(p, q, z, typename internal::conditional<NumTraits<Scalar>::IsComplex, internal::true_type, internal::false_type>::type());
+ makeGivens(p, q, r, typename internal::conditional<NumTraits<Scalar>::IsComplex, internal::true_type, internal::false_type>::type());
}
// specialization for complexes
template<typename Scalar>
+EIGEN_DEVICE_FUNC
void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::true_type)
{
using std::sqrt;
@@ -213,6 +227,7 @@ void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar
// specialization for reals
template<typename Scalar>
+EIGEN_DEVICE_FUNC
void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::false_type)
{
using std::sqrt;
@@ -264,6 +279,7 @@ namespace internal {
* \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
*/
template<typename VectorX, typename VectorY, typename OtherScalar>
+EIGEN_DEVICE_FUNC
void apply_rotation_in_the_plane(DenseBase<VectorX>& xpr_x, DenseBase<VectorY>& xpr_y, const JacobiRotation<OtherScalar>& j);
}
@@ -275,6 +291,7 @@ void apply_rotation_in_the_plane(DenseBase<VectorX>& xpr_x, DenseBase<VectorY>&
*/
template<typename Derived>
template<typename OtherScalar>
+EIGEN_DEVICE_FUNC
inline void MatrixBase<Derived>::applyOnTheLeft(Index p, Index q, const JacobiRotation<OtherScalar>& j)
{
RowXpr x(this->row(p));
@@ -290,6 +307,7 @@ inline void MatrixBase<Derived>::applyOnTheLeft(Index p, Index q, const JacobiRo
*/
template<typename Derived>
template<typename OtherScalar>
+EIGEN_DEVICE_FUNC
inline void MatrixBase<Derived>::applyOnTheRight(Index p, Index q, const JacobiRotation<OtherScalar>& j)
{
ColXpr x(this->col(p));
@@ -298,132 +316,164 @@ inline void MatrixBase<Derived>::applyOnTheRight(Index p, Index q, const JacobiR
}
namespace internal {
-template<typename VectorX, typename VectorY, typename OtherScalar>
-void /*EIGEN_DONT_INLINE*/ apply_rotation_in_the_plane(DenseBase<VectorX>& xpr_x, DenseBase<VectorY>& xpr_y, const JacobiRotation<OtherScalar>& j)
-{
- typedef typename VectorX::Scalar Scalar;
- enum { PacketSize = packet_traits<Scalar>::size };
- typedef typename packet_traits<Scalar>::type Packet;
- eigen_assert(xpr_x.size() == xpr_y.size());
- Index size = xpr_x.size();
- Index incrx = xpr_x.derived().innerStride();
- Index incry = xpr_y.derived().innerStride();
-
- Scalar* EIGEN_RESTRICT x = &xpr_x.derived().coeffRef(0);
- Scalar* EIGEN_RESTRICT y = &xpr_y.derived().coeffRef(0);
-
- OtherScalar c = j.c();
- OtherScalar s = j.s();
- if (c==OtherScalar(1) && s==OtherScalar(0))
- return;
- /*** dynamic-size vectorized paths ***/
+template<typename Scalar, typename OtherScalar,
+ int SizeAtCompileTime, int MinAlignment, bool Vectorizable>
+struct apply_rotation_in_the_plane_selector
+{
+ static EIGEN_DEVICE_FUNC
+ inline void run(Scalar *x, Index incrx, Scalar *y, Index incry, Index size, OtherScalar c, OtherScalar s)
+ {
+ for(Index i=0; i<size; ++i)
+ {
+ Scalar xi = *x;
+ Scalar yi = *y;
+ *x = c * xi + numext::conj(s) * yi;
+ *y = -s * xi + numext::conj(c) * yi;
+ x += incrx;
+ y += incry;
+ }
+ }
+};
- if(VectorX::SizeAtCompileTime == Dynamic &&
- (VectorX::Flags & VectorY::Flags & PacketAccessBit) &&
- ((incrx==1 && incry==1) || PacketSize == 1))
+template<typename Scalar, typename OtherScalar,
+ int SizeAtCompileTime, int MinAlignment>
+struct apply_rotation_in_the_plane_selector<Scalar,OtherScalar,SizeAtCompileTime,MinAlignment,true /* vectorizable */>
+{
+ static inline void run(Scalar *x, Index incrx, Scalar *y, Index incry, Index size, OtherScalar c, OtherScalar s)
{
- // both vectors are sequentially stored in memory => vectorization
- enum { Peeling = 2 };
+ enum {
+ PacketSize = packet_traits<Scalar>::size,
+ OtherPacketSize = packet_traits<OtherScalar>::size
+ };
+ typedef typename packet_traits<Scalar>::type Packet;
+ typedef typename packet_traits<OtherScalar>::type OtherPacket;
+
+ /*** dynamic-size vectorized paths ***/
+ if(SizeAtCompileTime == Dynamic && ((incrx==1 && incry==1) || PacketSize == 1))
+ {
+ // both vectors are sequentially stored in memory => vectorization
+ enum { Peeling = 2 };
- Index alignedStart = internal::first_default_aligned(y, size);
- Index alignedEnd = alignedStart + ((size-alignedStart)/PacketSize)*PacketSize;
+ Index alignedStart = internal::first_default_aligned(y, size);
+ Index alignedEnd = alignedStart + ((size-alignedStart)/PacketSize)*PacketSize;
- const Packet pc = pset1<Packet>(c);
- const Packet ps = pset1<Packet>(s);
- conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex,false> pcj;
+ const OtherPacket pc = pset1<OtherPacket>(c);
+ const OtherPacket ps = pset1<OtherPacket>(s);
+ conj_helper<OtherPacket,Packet,NumTraits<OtherScalar>::IsComplex,false> pcj;
+ conj_helper<OtherPacket,Packet,false,false> pm;
- for(Index i=0; i<alignedStart; ++i)
- {
- Scalar xi = x[i];
- Scalar yi = y[i];
- x[i] = c * xi + numext::conj(s) * yi;
- y[i] = -s * xi + numext::conj(c) * yi;
- }
+ for(Index i=0; i<alignedStart; ++i)
+ {
+ Scalar xi = x[i];
+ Scalar yi = y[i];
+ x[i] = c * xi + numext::conj(s) * yi;
+ y[i] = -s * xi + numext::conj(c) * yi;
+ }
- Scalar* EIGEN_RESTRICT px = x + alignedStart;
- Scalar* EIGEN_RESTRICT py = y + alignedStart;
+ Scalar* EIGEN_RESTRICT px = x + alignedStart;
+ Scalar* EIGEN_RESTRICT py = y + alignedStart;
- if(internal::first_default_aligned(x, size)==alignedStart)
- {
- for(Index i=alignedStart; i<alignedEnd; i+=PacketSize)
+ if(internal::first_default_aligned(x, size)==alignedStart)
{
- Packet xi = pload<Packet>(px);
- Packet yi = pload<Packet>(py);
- pstore(px, padd(pmul(pc,xi),pcj.pmul(ps,yi)));
- pstore(py, psub(pcj.pmul(pc,yi),pmul(ps,xi)));
- px += PacketSize;
- py += PacketSize;
+ for(Index i=alignedStart; i<alignedEnd; i+=PacketSize)
+ {
+ Packet xi = pload<Packet>(px);
+ Packet yi = pload<Packet>(py);
+ pstore(px, padd(pm.pmul(pc,xi),pcj.pmul(ps,yi)));
+ pstore(py, psub(pcj.pmul(pc,yi),pm.pmul(ps,xi)));
+ px += PacketSize;
+ py += PacketSize;
+ }
}
- }
- else
- {
- Index peelingEnd = alignedStart + ((size-alignedStart)/(Peeling*PacketSize))*(Peeling*PacketSize);
- for(Index i=alignedStart; i<peelingEnd; i+=Peeling*PacketSize)
+ else
{
- Packet xi = ploadu<Packet>(px);
- Packet xi1 = ploadu<Packet>(px+PacketSize);
- Packet yi = pload <Packet>(py);
- Packet yi1 = pload <Packet>(py+PacketSize);
- pstoreu(px, padd(pmul(pc,xi),pcj.pmul(ps,yi)));
- pstoreu(px+PacketSize, padd(pmul(pc,xi1),pcj.pmul(ps,yi1)));
- pstore (py, psub(pcj.pmul(pc,yi),pmul(ps,xi)));
- pstore (py+PacketSize, psub(pcj.pmul(pc,yi1),pmul(ps,xi1)));
- px += Peeling*PacketSize;
- py += Peeling*PacketSize;
+ Index peelingEnd = alignedStart + ((size-alignedStart)/(Peeling*PacketSize))*(Peeling*PacketSize);
+ for(Index i=alignedStart; i<peelingEnd; i+=Peeling*PacketSize)
+ {
+ Packet xi = ploadu<Packet>(px);
+ Packet xi1 = ploadu<Packet>(px+PacketSize);
+ Packet yi = pload <Packet>(py);
+ Packet yi1 = pload <Packet>(py+PacketSize);
+ pstoreu(px, padd(pm.pmul(pc,xi),pcj.pmul(ps,yi)));
+ pstoreu(px+PacketSize, padd(pm.pmul(pc,xi1),pcj.pmul(ps,yi1)));
+ pstore (py, psub(pcj.pmul(pc,yi),pm.pmul(ps,xi)));
+ pstore (py+PacketSize, psub(pcj.pmul(pc,yi1),pm.pmul(ps,xi1)));
+ px += Peeling*PacketSize;
+ py += Peeling*PacketSize;
+ }
+ if(alignedEnd!=peelingEnd)
+ {
+ Packet xi = ploadu<Packet>(x+peelingEnd);
+ Packet yi = pload <Packet>(y+peelingEnd);
+ pstoreu(x+peelingEnd, padd(pm.pmul(pc,xi),pcj.pmul(ps,yi)));
+ pstore (y+peelingEnd, psub(pcj.pmul(pc,yi),pm.pmul(ps,xi)));
+ }
}
- if(alignedEnd!=peelingEnd)
+
+ for(Index i=alignedEnd; i<size; ++i)
{
- Packet xi = ploadu<Packet>(x+peelingEnd);
- Packet yi = pload <Packet>(y+peelingEnd);
- pstoreu(x+peelingEnd, padd(pmul(pc,xi),pcj.pmul(ps,yi)));
- pstore (y+peelingEnd, psub(pcj.pmul(pc,yi),pmul(ps,xi)));
+ Scalar xi = x[i];
+ Scalar yi = y[i];
+ x[i] = c * xi + numext::conj(s) * yi;
+ y[i] = -s * xi + numext::conj(c) * yi;
}
}
- for(Index i=alignedEnd; i<size; ++i)
+ /*** fixed-size vectorized path ***/
+ else if(SizeAtCompileTime != Dynamic && MinAlignment>0) // FIXME should be compared to the required alignment
{
- Scalar xi = x[i];
- Scalar yi = y[i];
- x[i] = c * xi + numext::conj(s) * yi;
- y[i] = -s * xi + numext::conj(c) * yi;
+ const OtherPacket pc = pset1<OtherPacket>(c);
+ const OtherPacket ps = pset1<OtherPacket>(s);
+ conj_helper<OtherPacket,Packet,NumTraits<OtherPacket>::IsComplex,false> pcj;
+ conj_helper<OtherPacket,Packet,false,false> pm;
+ Scalar* EIGEN_RESTRICT px = x;
+ Scalar* EIGEN_RESTRICT py = y;
+ for(Index i=0; i<size; i+=PacketSize)
+ {
+ Packet xi = pload<Packet>(px);
+ Packet yi = pload<Packet>(py);
+ pstore(px, padd(pm.pmul(pc,xi),pcj.pmul(ps,yi)));
+ pstore(py, psub(pcj.pmul(pc,yi),pm.pmul(ps,xi)));
+ px += PacketSize;
+ py += PacketSize;
+ }
}
- }
- /*** fixed-size vectorized path ***/
- else if(VectorX::SizeAtCompileTime != Dynamic &&
- (VectorX::Flags & VectorY::Flags & PacketAccessBit) &&
- (EIGEN_PLAIN_ENUM_MIN(evaluator<VectorX>::Alignment, evaluator<VectorY>::Alignment)>0)) // FIXME should be compared to the required alignment
- {
- const Packet pc = pset1<Packet>(c);
- const Packet ps = pset1<Packet>(s);
- conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex,false> pcj;
- Scalar* EIGEN_RESTRICT px = x;
- Scalar* EIGEN_RESTRICT py = y;
- for(Index i=0; i<size; i+=PacketSize)
+ /*** non-vectorized path ***/
+ else
{
- Packet xi = pload<Packet>(px);
- Packet yi = pload<Packet>(py);
- pstore(px, padd(pmul(pc,xi),pcj.pmul(ps,yi)));
- pstore(py, psub(pcj.pmul(pc,yi),pmul(ps,xi)));
- px += PacketSize;
- py += PacketSize;
+ apply_rotation_in_the_plane_selector<Scalar,OtherScalar,SizeAtCompileTime,MinAlignment,false>::run(x,incrx,y,incry,size,c,s);
}
}
+};
- /*** non-vectorized path ***/
- else
- {
- for(Index i=0; i<size; ++i)
- {
- Scalar xi = *x;
- Scalar yi = *y;
- *x = c * xi + numext::conj(s) * yi;
- *y = -s * xi + numext::conj(c) * yi;
- x += incrx;
- y += incry;
- }
- }
+template<typename VectorX, typename VectorY, typename OtherScalar>
+EIGEN_DEVICE_FUNC
+void /*EIGEN_DONT_INLINE*/ apply_rotation_in_the_plane(DenseBase<VectorX>& xpr_x, DenseBase<VectorY>& xpr_y, const JacobiRotation<OtherScalar>& j)
+{
+ typedef typename VectorX::Scalar Scalar;
+ const bool Vectorizable = (VectorX::Flags & VectorY::Flags & PacketAccessBit)
+ && (int(packet_traits<Scalar>::size) == int(packet_traits<OtherScalar>::size));
+
+ eigen_assert(xpr_x.size() == xpr_y.size());
+ Index size = xpr_x.size();
+ Index incrx = xpr_x.derived().innerStride();
+ Index incry = xpr_y.derived().innerStride();
+
+ Scalar* EIGEN_RESTRICT x = &xpr_x.derived().coeffRef(0);
+ Scalar* EIGEN_RESTRICT y = &xpr_y.derived().coeffRef(0);
+
+ OtherScalar c = j.c();
+ OtherScalar s = j.s();
+ if (c==OtherScalar(1) && s==OtherScalar(0))
+ return;
+
+ apply_rotation_in_the_plane_selector<
+ Scalar,OtherScalar,
+ VectorX::SizeAtCompileTime,
+ EIGEN_PLAIN_ENUM_MIN(evaluator<VectorX>::Alignment, evaluator<VectorY>::Alignment),
+ Vectorizable>::run(x,incrx,y,incry,size,c,s);
}
} // end namespace internal
diff --git a/Eigen/src/KLUSupport/KLUSupport.h b/Eigen/src/KLUSupport/KLUSupport.h
new file mode 100644
index 000000000..d2633a935
--- /dev/null
+++ b/Eigen/src/KLUSupport/KLUSupport.h
@@ -0,0 +1,358 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2017 Kyle Macfarlan <kyle.macfarlan@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_KLUSUPPORT_H
+#define EIGEN_KLUSUPPORT_H
+
+namespace Eigen {
+
+/* TODO extract L, extract U, compute det, etc... */
+
+/** \ingroup KLUSupport_Module
+ * \brief A sparse LU factorization and solver based on KLU
+ *
+ * This class allows to solve for A.X = B sparse linear problems via a LU factorization
+ * using the KLU library. The sparse matrix A must be squared and full rank.
+ * The vectors or matrices X and B can be either dense or sparse.
+ *
+ * \warning The input matrix A should be in a \b compressed and \b column-major form.
+ * Otherwise an expensive copy will be made. You can call the inexpensive makeCompressed() to get a compressed matrix.
+ * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+ *
+ * \implsparsesolverconcept
+ *
+ * \sa \ref TutorialSparseSolverConcept, class UmfPackLU, class SparseLU
+ */
+
+
+inline int klu_solve(klu_symbolic *Symbolic, klu_numeric *Numeric, Index ldim, Index nrhs, double B [ ], klu_common *Common, double) {
+ return klu_solve(Symbolic, Numeric, internal::convert_index<int>(ldim), internal::convert_index<int>(nrhs), B, Common);
+}
+
+inline int klu_solve(klu_symbolic *Symbolic, klu_numeric *Numeric, Index ldim, Index nrhs, std::complex<double>B[], klu_common *Common, std::complex<double>) {
+ return klu_z_solve(Symbolic, Numeric, internal::convert_index<int>(ldim), internal::convert_index<int>(nrhs), &numext::real_ref(B[0]), Common);
+}
+
+inline int klu_tsolve(klu_symbolic *Symbolic, klu_numeric *Numeric, Index ldim, Index nrhs, double B[], klu_common *Common, double) {
+ return klu_tsolve(Symbolic, Numeric, internal::convert_index<int>(ldim), internal::convert_index<int>(nrhs), B, Common);
+}
+
+inline int klu_tsolve(klu_symbolic *Symbolic, klu_numeric *Numeric, Index ldim, Index nrhs, std::complex<double>B[], klu_common *Common, std::complex<double>) {
+ return klu_z_tsolve(Symbolic, Numeric, internal::convert_index<int>(ldim), internal::convert_index<int>(nrhs), &numext::real_ref(B[0]), 0, Common);
+}
+
+inline klu_numeric* klu_factor(int Ap [ ], int Ai [ ], double Ax [ ], klu_symbolic *Symbolic, klu_common *Common, double) {
+ return klu_factor(Ap, Ai, Ax, Symbolic, Common);
+}
+
+inline klu_numeric* klu_factor(int Ap[], int Ai[], std::complex<double> Ax[], klu_symbolic *Symbolic, klu_common *Common, std::complex<double>) {
+ return klu_z_factor(Ap, Ai, &numext::real_ref(Ax[0]), Symbolic, Common);
+}
+
+
+template<typename _MatrixType>
+class KLU : public SparseSolverBase<KLU<_MatrixType> >
+{
+ protected:
+ typedef SparseSolverBase<KLU<_MatrixType> > Base;
+ using Base::m_isInitialized;
+ public:
+ using Base::_solve_impl;
+ typedef _MatrixType MatrixType;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::StorageIndex StorageIndex;
+ typedef Matrix<Scalar,Dynamic,1> Vector;
+ typedef Matrix<int, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
+ typedef Matrix<int, MatrixType::RowsAtCompileTime, 1> IntColVectorType;
+ typedef SparseMatrix<Scalar> LUMatrixType;
+ typedef SparseMatrix<Scalar,ColMajor,int> KLUMatrixType;
+ typedef Ref<const KLUMatrixType, StandardCompressedFormat> KLUMatrixRef;
+ enum {
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+ };
+
+ public:
+
+ KLU()
+ : m_dummy(0,0), mp_matrix(m_dummy)
+ {
+ init();
+ }
+
+ template<typename InputMatrixType>
+ explicit KLU(const InputMatrixType& matrix)
+ : mp_matrix(matrix)
+ {
+ init();
+ compute(matrix);
+ }
+
+ ~KLU()
+ {
+ if(m_symbolic) klu_free_symbolic(&m_symbolic,&m_common);
+ if(m_numeric) klu_free_numeric(&m_numeric,&m_common);
+ }
+
+ inline Index rows() const { return mp_matrix.rows(); }
+ inline Index cols() const { return mp_matrix.cols(); }
+
+ /** \brief Reports whether previous computation was successful.
+ *
+ * \returns \c Success if computation was successful,
+ * \c NumericalIssue if the matrix.appears to be negative.
+ */
+ ComputationInfo info() const
+ {
+ eigen_assert(m_isInitialized && "Decomposition is not initialized.");
+ return m_info;
+ }
+#if 0 // not implemented yet
+ inline const LUMatrixType& matrixL() const
+ {
+ if (m_extractedDataAreDirty) extractData();
+ return m_l;
+ }
+
+ inline const LUMatrixType& matrixU() const
+ {
+ if (m_extractedDataAreDirty) extractData();
+ return m_u;
+ }
+
+ inline const IntColVectorType& permutationP() const
+ {
+ if (m_extractedDataAreDirty) extractData();
+ return m_p;
+ }
+
+ inline const IntRowVectorType& permutationQ() const
+ {
+ if (m_extractedDataAreDirty) extractData();
+ return m_q;
+ }
+#endif
+ /** Computes the sparse Cholesky decomposition of \a matrix
+ * Note that the matrix should be column-major, and in compressed format for best performance.
+ * \sa SparseMatrix::makeCompressed().
+ */
+ template<typename InputMatrixType>
+ void compute(const InputMatrixType& matrix)
+ {
+ if(m_symbolic) klu_free_symbolic(&m_symbolic, &m_common);
+ if(m_numeric) klu_free_numeric(&m_numeric, &m_common);
+ grab(matrix.derived());
+ analyzePattern_impl();
+ factorize_impl();
+ }
+
+ /** Performs a symbolic decomposition on the sparcity of \a matrix.
+ *
+ * This function is particularly useful when solving for several problems having the same structure.
+ *
+ * \sa factorize(), compute()
+ */
+ template<typename InputMatrixType>
+ void analyzePattern(const InputMatrixType& matrix)
+ {
+ if(m_symbolic) klu_free_symbolic(&m_symbolic, &m_common);
+ if(m_numeric) klu_free_numeric(&m_numeric, &m_common);
+
+ grab(matrix.derived());
+
+ analyzePattern_impl();
+ }
+
+
+ /** Provides access to the control settings array used by KLU.
+ *
+ * See KLU documentation for details.
+ */
+ inline const klu_common& kluCommon() const
+ {
+ return m_common;
+ }
+
+ /** Provides access to the control settings array used by UmfPack.
+ *
+ * If this array contains NaN's, the default values are used.
+ *
+ * See KLU documentation for details.
+ */
+ inline klu_common& kluCommon()
+ {
+ return m_common;
+ }
+
+ /** Performs a numeric decomposition of \a matrix
+ *
+ * The given matrix must has the same sparcity than the matrix on which the pattern anylysis has been performed.
+ *
+ * \sa analyzePattern(), compute()
+ */
+ template<typename InputMatrixType>
+ void factorize(const InputMatrixType& matrix)
+ {
+ eigen_assert(m_analysisIsOk && "KLU: you must first call analyzePattern()");
+ if(m_numeric)
+ klu_free_numeric(&m_numeric,&m_common);
+
+ grab(matrix.derived());
+
+ factorize_impl();
+ }
+
+ /** \internal */
+ template<typename BDerived,typename XDerived>
+ bool _solve_impl(const MatrixBase<BDerived> &b, MatrixBase<XDerived> &x) const;
+
+#if 0 // not implemented yet
+ Scalar determinant() const;
+
+ void extractData() const;
+#endif
+
+ protected:
+
+ void init()
+ {
+ m_info = InvalidInput;
+ m_isInitialized = false;
+ m_numeric = 0;
+ m_symbolic = 0;
+ m_extractedDataAreDirty = true;
+
+ klu_defaults(&m_common);
+ }
+
+ void analyzePattern_impl()
+ {
+ m_info = InvalidInput;
+ m_analysisIsOk = false;
+ m_factorizationIsOk = false;
+ m_symbolic = klu_analyze(internal::convert_index<int>(mp_matrix.rows()),
+ const_cast<StorageIndex*>(mp_matrix.outerIndexPtr()), const_cast<StorageIndex*>(mp_matrix.innerIndexPtr()),
+ &m_common);
+ if (m_symbolic) {
+ m_isInitialized = true;
+ m_info = Success;
+ m_analysisIsOk = true;
+ m_extractedDataAreDirty = true;
+ }
+ }
+
+ void factorize_impl()
+ {
+
+ m_numeric = klu_factor(const_cast<StorageIndex*>(mp_matrix.outerIndexPtr()), const_cast<StorageIndex*>(mp_matrix.innerIndexPtr()), const_cast<Scalar*>(mp_matrix.valuePtr()),
+ m_symbolic, &m_common, Scalar());
+
+
+ m_info = m_numeric ? Success : NumericalIssue;
+ m_factorizationIsOk = m_numeric ? 1 : 0;
+ m_extractedDataAreDirty = true;
+ }
+
+ template<typename MatrixDerived>
+ void grab(const EigenBase<MatrixDerived> &A)
+ {
+ mp_matrix.~KLUMatrixRef();
+ ::new (&mp_matrix) KLUMatrixRef(A.derived());
+ }
+
+ void grab(const KLUMatrixRef &A)
+ {
+ if(&(A.derived()) != &mp_matrix)
+ {
+ mp_matrix.~KLUMatrixRef();
+ ::new (&mp_matrix) KLUMatrixRef(A);
+ }
+ }
+
+ // cached data to reduce reallocation, etc.
+#if 0 // not implemented yet
+ mutable LUMatrixType m_l;
+ mutable LUMatrixType m_u;
+ mutable IntColVectorType m_p;
+ mutable IntRowVectorType m_q;
+#endif
+
+ KLUMatrixType m_dummy;
+ KLUMatrixRef mp_matrix;
+
+ klu_numeric* m_numeric;
+ klu_symbolic* m_symbolic;
+ klu_common m_common;
+ mutable ComputationInfo m_info;
+ int m_factorizationIsOk;
+ int m_analysisIsOk;
+ mutable bool m_extractedDataAreDirty;
+
+ private:
+ KLU(const KLU& ) { }
+};
+
+#if 0 // not implemented yet
+template<typename MatrixType>
+void KLU<MatrixType>::extractData() const
+{
+ if (m_extractedDataAreDirty)
+ {
+ eigen_assert(false && "KLU: extractData Not Yet Implemented");
+
+ // get size of the data
+ int lnz, unz, rows, cols, nz_udiag;
+ umfpack_get_lunz(&lnz, &unz, &rows, &cols, &nz_udiag, m_numeric, Scalar());
+
+ // allocate data
+ m_l.resize(rows,(std::min)(rows,cols));
+ m_l.resizeNonZeros(lnz);
+
+ m_u.resize((std::min)(rows,cols),cols);
+ m_u.resizeNonZeros(unz);
+
+ m_p.resize(rows);
+ m_q.resize(cols);
+
+ // extract
+ umfpack_get_numeric(m_l.outerIndexPtr(), m_l.innerIndexPtr(), m_l.valuePtr(),
+ m_u.outerIndexPtr(), m_u.innerIndexPtr(), m_u.valuePtr(),
+ m_p.data(), m_q.data(), 0, 0, 0, m_numeric);
+
+ m_extractedDataAreDirty = false;
+ }
+}
+
+template<typename MatrixType>
+typename KLU<MatrixType>::Scalar KLU<MatrixType>::determinant() const
+{
+ eigen_assert(false && "KLU: extractData Not Yet Implemented");
+ return Scalar();
+}
+#endif
+
+template<typename MatrixType>
+template<typename BDerived,typename XDerived>
+bool KLU<MatrixType>::_solve_impl(const MatrixBase<BDerived> &b, MatrixBase<XDerived> &x) const
+{
+ Index rhsCols = b.cols();
+ EIGEN_STATIC_ASSERT((XDerived::Flags&RowMajorBit)==0, THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+ eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or analyzePattern()/factorize()");
+
+ x = b;
+ int info = klu_solve(m_symbolic, m_numeric, b.rows(), rhsCols, x.const_cast_derived().data(), const_cast<klu_common*>(&m_common), Scalar());
+
+ m_info = info!=0 ? Success : NumericalIssue;
+ return true;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_KLUSUPPORT_H
diff --git a/Eigen/src/LU/Determinant.h b/Eigen/src/LU/Determinant.h
index d6a3c1e5a..6af63a6e7 100644
--- a/Eigen/src/LU/Determinant.h
+++ b/Eigen/src/LU/Determinant.h
@@ -15,6 +15,7 @@ namespace Eigen {
namespace internal {
template<typename Derived>
+EIGEN_DEVICE_FUNC
inline const typename Derived::Scalar bruteforce_det3_helper
(const MatrixBase<Derived>& matrix, int a, int b, int c)
{
@@ -23,6 +24,7 @@ inline const typename Derived::Scalar bruteforce_det3_helper
}
template<typename Derived>
+EIGEN_DEVICE_FUNC
const typename Derived::Scalar bruteforce_det4_helper
(const MatrixBase<Derived>& matrix, int j, int k, int m, int n)
{
@@ -44,7 +46,8 @@ template<typename Derived,
template<typename Derived> struct determinant_impl<Derived, 1>
{
- static inline typename traits<Derived>::Scalar run(const Derived& m)
+ static inline EIGEN_DEVICE_FUNC
+ typename traits<Derived>::Scalar run(const Derived& m)
{
return m.coeff(0,0);
}
@@ -52,7 +55,8 @@ template<typename Derived> struct determinant_impl<Derived, 1>
template<typename Derived> struct determinant_impl<Derived, 2>
{
- static inline typename traits<Derived>::Scalar run(const Derived& m)
+ static inline EIGEN_DEVICE_FUNC
+ typename traits<Derived>::Scalar run(const Derived& m)
{
return m.coeff(0,0) * m.coeff(1,1) - m.coeff(1,0) * m.coeff(0,1);
}
@@ -60,7 +64,8 @@ template<typename Derived> struct determinant_impl<Derived, 2>
template<typename Derived> struct determinant_impl<Derived, 3>
{
- static inline typename traits<Derived>::Scalar run(const Derived& m)
+ static inline EIGEN_DEVICE_FUNC
+ typename traits<Derived>::Scalar run(const Derived& m)
{
return bruteforce_det3_helper(m,0,1,2)
- bruteforce_det3_helper(m,1,0,2)
@@ -70,7 +75,8 @@ template<typename Derived> struct determinant_impl<Derived, 3>
template<typename Derived> struct determinant_impl<Derived, 4>
{
- static typename traits<Derived>::Scalar run(const Derived& m)
+ static EIGEN_DEVICE_FUNC
+ typename traits<Derived>::Scalar run(const Derived& m)
{
// trick by Martin Costabel to compute 4x4 det with only 30 muls
return bruteforce_det4_helper(m,0,1,2,3)
@@ -89,6 +95,7 @@ template<typename Derived> struct determinant_impl<Derived, 4>
* \returns the determinant of this matrix
*/
template<typename Derived>
+EIGEN_DEVICE_FUNC
inline typename internal::traits<Derived>::Scalar MatrixBase<Derived>::determinant() const
{
eigen_assert(rows() == cols());
diff --git a/Eigen/src/LU/FullPivLU.h b/Eigen/src/LU/FullPivLU.h
index 03b6af706..344ec8926 100644
--- a/Eigen/src/LU/FullPivLU.h
+++ b/Eigen/src/LU/FullPivLU.h
@@ -48,12 +48,12 @@ template<typename _MatrixType> struct traits<FullPivLU<_MatrixType> >
* The data of the LU decomposition can be directly accessed through the methods matrixLU(),
* permutationP(), permutationQ().
*
- * As an exemple, here is how the original matrix can be retrieved:
+ * As an example, here is how the original matrix can be retrieved:
* \include class_FullPivLU.cpp
* Output: \verbinclude class_FullPivLU.out
*
* This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism.
- *
+ *
* \sa MatrixBase::fullPivLu(), MatrixBase::determinant(), MatrixBase::inverse()
*/
template<typename _MatrixType> class FullPivLU
@@ -320,7 +320,7 @@ template<typename _MatrixType> class FullPivLU
return m_usePrescribedThreshold ? m_prescribedThreshold
// this formula comes from experimenting (see "LU precision tuning" thread on the list)
// and turns out to be identical to Higham's formula used already in LDLt.
- : NumTraits<Scalar>::epsilon() * m_lu.diagonalSize();
+ : NumTraits<Scalar>::epsilon() * RealScalar(m_lu.diagonalSize());
}
/** \returns the rank of the matrix of which *this is the LU decomposition.
@@ -411,11 +411,9 @@ template<typename _MatrixType> class FullPivLU
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename RhsType, typename DstType>
- EIGEN_DEVICE_FUNC
void _solve_impl(const RhsType &rhs, DstType &dst) const;
template<bool Conjugate, typename RhsType, typename DstType>
- EIGEN_DEVICE_FUNC
void _solve_impl_transposed(const RhsType &rhs, DstType &dst) const;
#endif
diff --git a/Eigen/src/LU/InverseImpl.h b/Eigen/src/LU/InverseImpl.h
index 018f99b58..1bab00c01 100644
--- a/Eigen/src/LU/InverseImpl.h
+++ b/Eigen/src/LU/InverseImpl.h
@@ -290,6 +290,7 @@ template<typename DstXprType, typename XprType>
struct Assignment<DstXprType, Inverse<XprType>, internal::assign_op<typename DstXprType::Scalar,typename XprType::Scalar>, Dense2Dense>
{
typedef Inverse<XprType> SrcXprType;
+ EIGEN_DEVICE_FUNC
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename XprType::Scalar> &)
{
Index dstRows = src.rows();
@@ -332,6 +333,7 @@ struct Assignment<DstXprType, Inverse<XprType>, internal::assign_op<typename Dst
* \sa computeInverseAndDetWithCheck()
*/
template<typename Derived>
+EIGEN_DEVICE_FUNC
inline const Inverse<Derived> MatrixBase<Derived>::inverse() const
{
EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsInteger,THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES)
@@ -404,7 +406,7 @@ inline void MatrixBase<Derived>::computeInverseWithCheck(
const RealScalar& absDeterminantThreshold
) const
{
- RealScalar determinant;
+ Scalar determinant;
// i'd love to put some static assertions there, but SFINAE means that they have no effect...
eigen_assert(rows() == cols());
computeInverseAndDetWithCheck(inverse,determinant,invertible,absDeterminantThreshold);
diff --git a/Eigen/src/LU/PartialPivLU.h b/Eigen/src/LU/PartialPivLU.h
index d43961887..bfcd2c95b 100644
--- a/Eigen/src/LU/PartialPivLU.h
+++ b/Eigen/src/LU/PartialPivLU.h
@@ -420,8 +420,8 @@ struct partial_lu_impl
* \returns The index of the first pivot which is exactly zero if any, or a negative number otherwise.
*
* \note This very low level interface using pointers, etc. is to:
- * 1 - reduce the number of instanciations to the strict minimum
- * 2 - avoid infinite recursion of the instanciations with Block<Block<Block<...> > >
+ * 1 - reduce the number of instantiations to the strict minimum
+ * 2 - avoid infinite recursion of the instantiations with Block<Block<Block<...> > >
*/
static Index blocked_lu(Index rows, Index cols, Scalar* lu_data, Index luStride, PivIndex* row_transpositions, PivIndex& nb_transpositions, Index maxBlockSize=256)
{
diff --git a/Eigen/src/OrderingMethods/Eigen_Colamd.h b/Eigen/src/OrderingMethods/Eigen_Colamd.h
index 933cd564b..67fcad3f7 100644
--- a/Eigen/src/OrderingMethods/Eigen_Colamd.h
+++ b/Eigen/src/OrderingMethods/Eigen_Colamd.h
@@ -1004,7 +1004,7 @@ static IndexType find_ordering /* return the number of garbage collections */
COLAMD_ASSERT (head [min_score] >= COLAMD_EMPTY) ;
/* get pivot column from head of minimum degree list */
- while (head [min_score] == COLAMD_EMPTY && min_score < n_col)
+ while (min_score < n_col && head [min_score] == COLAMD_EMPTY)
{
min_score++ ;
}
@@ -1493,7 +1493,7 @@ static inline void order_children
c = Col [c].shared1.parent ;
/* continue until we hit an ordered column. There are */
- /* guarranteed not to be anymore unordered columns */
+ /* guaranteed not to be anymore unordered columns */
/* above an ordered column */
} while (Col [c].shared2.order == COLAMD_EMPTY) ;
@@ -1638,7 +1638,7 @@ static void detect_super_cols
COLAMD_ASSERT (ROW_IS_ALIVE (*cp1)) ;
COLAMD_ASSERT (ROW_IS_ALIVE (*cp2)) ;
/* row indices will same order for both supercols, */
- /* no gather scatter nessasary */
+ /* no gather scatter necessary */
if (*cp1++ != *cp2++)
{
break ;
@@ -1688,7 +1688,7 @@ static void detect_super_cols
/*
Defragments and compacts columns and rows in the workspace A. Used when
- all avaliable memory has been used while performing row merging. Returns
+ all available memory has been used while performing row merging. Returns
the index of the first free position in A, after garbage collection. The
time taken by this routine is linear is the size of the array A, which is
itself linear in the number of nonzeros in the input matrix.
diff --git a/Eigen/src/OrderingMethods/Ordering.h b/Eigen/src/OrderingMethods/Ordering.h
index 7ea9b14d7..34dbef487 100644
--- a/Eigen/src/OrderingMethods/Ordering.h
+++ b/Eigen/src/OrderingMethods/Ordering.h
@@ -31,7 +31,7 @@ void ordering_helper_at_plus_a(const MatrixType& A, MatrixType& symmat)
for (int i = 0; i < C.rows(); i++)
{
for (typename MatrixType::InnerIterator it(C, i); it; ++it)
- it.valueRef() = 0.0;
+ it.valueRef() = typename MatrixType::Scalar(0);
}
symmat = C + A;
}
diff --git a/Eigen/src/PaStiXSupport/PaStiXSupport.h b/Eigen/src/PaStiXSupport/PaStiXSupport.h
index d2ebfd7bb..37426877a 100644
--- a/Eigen/src/PaStiXSupport/PaStiXSupport.h
+++ b/Eigen/src/PaStiXSupport/PaStiXSupport.h
@@ -64,28 +64,28 @@ namespace internal
typedef typename _MatrixType::StorageIndex StorageIndex;
};
- void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, float *vals, int *perm, int * invp, float *x, int nbrhs, int *iparm, double *dparm)
+ inline void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, float *vals, int *perm, int * invp, float *x, int nbrhs, int *iparm, double *dparm)
{
if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
if (nbrhs == 0) {x = NULL; nbrhs=1;}
s_pastix(pastix_data, pastix_comm, n, ptr, idx, vals, perm, invp, x, nbrhs, iparm, dparm);
}
- void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, double *vals, int *perm, int * invp, double *x, int nbrhs, int *iparm, double *dparm)
+ inline void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, double *vals, int *perm, int * invp, double *x, int nbrhs, int *iparm, double *dparm)
{
if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
if (nbrhs == 0) {x = NULL; nbrhs=1;}
d_pastix(pastix_data, pastix_comm, n, ptr, idx, vals, perm, invp, x, nbrhs, iparm, dparm);
}
- void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex<float> *vals, int *perm, int * invp, std::complex<float> *x, int nbrhs, int *iparm, double *dparm)
+ inline void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex<float> *vals, int *perm, int * invp, std::complex<float> *x, int nbrhs, int *iparm, double *dparm)
{
if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
if (nbrhs == 0) {x = NULL; nbrhs=1;}
c_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast<PASTIX_COMPLEX*>(vals), perm, invp, reinterpret_cast<PASTIX_COMPLEX*>(x), nbrhs, iparm, dparm);
}
- void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex<double> *vals, int *perm, int * invp, std::complex<double> *x, int nbrhs, int *iparm, double *dparm)
+ inline void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex<double> *vals, int *perm, int * invp, std::complex<double> *x, int nbrhs, int *iparm, double *dparm)
{
if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
if (nbrhs == 0) {x = NULL; nbrhs=1;}
@@ -203,7 +203,7 @@ class PastixBase : public SparseSolverBase<Derived>
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful,
+ * \returns \c Success if computation was successful,
* \c NumericalIssue if the PaStiX reports a problem
* \c InvalidInput if the input matrix is invalid
*
diff --git a/Eigen/src/PardisoSupport/PardisoSupport.h b/Eigen/src/PardisoSupport/PardisoSupport.h
index 091c3970e..fb2ba04b4 100644
--- a/Eigen/src/PardisoSupport/PardisoSupport.h
+++ b/Eigen/src/PardisoSupport/PardisoSupport.h
@@ -140,7 +140,7 @@ class PardisoImpl : public SparseSolverBase<Derived>
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful,
+ * \returns \c Success if computation was successful,
* \c NumericalIssue if the matrix appears to be negative.
*/
ComputationInfo info() const
diff --git a/Eigen/src/QR/ColPivHouseholderQR.h b/Eigen/src/QR/ColPivHouseholderQR.h
index 0e47c8332..1faa3442e 100644
--- a/Eigen/src/QR/ColPivHouseholderQR.h
+++ b/Eigen/src/QR/ColPivHouseholderQR.h
@@ -402,7 +402,7 @@ template<typename _MatrixType> class ColPivHouseholderQR
*/
RealScalar maxPivot() const { return m_maxpivot; }
- /** \brief Reports whether the QR factorization was succesful.
+ /** \brief Reports whether the QR factorization was successful.
*
* \note This function always returns \c Success. It is provided for compatibility
* with other factorization routines.
@@ -416,7 +416,6 @@ template<typename _MatrixType> class ColPivHouseholderQR
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename RhsType, typename DstType>
- EIGEN_DEVICE_FUNC
void _solve_impl(const RhsType &rhs, DstType &dst) const;
#endif
@@ -506,8 +505,8 @@ void ColPivHouseholderQR<MatrixType>::computeInPlace()
m_colNormsUpdated.coeffRef(k) = m_colNormsDirect.coeffRef(k);
}
- RealScalar threshold_helper = numext::abs2<Scalar>(m_colNormsUpdated.maxCoeff() * NumTraits<Scalar>::epsilon()) / RealScalar(rows);
- RealScalar norm_downdate_threshold = numext::sqrt(NumTraits<Scalar>::epsilon());
+ RealScalar threshold_helper = numext::abs2<RealScalar>(m_colNormsUpdated.maxCoeff() * NumTraits<RealScalar>::epsilon()) / RealScalar(rows);
+ RealScalar norm_downdate_threshold = numext::sqrt(NumTraits<RealScalar>::epsilon());
m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case)
m_maxpivot = RealScalar(0);
@@ -553,12 +552,12 @@ void ColPivHouseholderQR<MatrixType>::computeInPlace()
// http://www.netlib.org/lapack/lawnspdf/lawn176.pdf
// and used in LAPACK routines xGEQPF and xGEQP3.
// See lines 278-297 in http://www.netlib.org/lapack/explore-html/dc/df4/sgeqpf_8f_source.html
- if (m_colNormsUpdated.coeffRef(j) != 0) {
+ if (m_colNormsUpdated.coeffRef(j) != RealScalar(0)) {
RealScalar temp = abs(m_qr.coeffRef(k, j)) / m_colNormsUpdated.coeffRef(j);
temp = (RealScalar(1) + temp) * (RealScalar(1) - temp);
- temp = temp < 0 ? 0 : temp;
- RealScalar temp2 = temp * numext::abs2<Scalar>(m_colNormsUpdated.coeffRef(j) /
- m_colNormsDirect.coeffRef(j));
+ temp = temp < RealScalar(0) ? RealScalar(0) : temp;
+ RealScalar temp2 = temp * numext::abs2<RealScalar>(m_colNormsUpdated.coeffRef(j) /
+ m_colNormsDirect.coeffRef(j));
if (temp2 <= norm_downdate_threshold) {
// The updated norm has become too inaccurate so re-compute the column
// norm directly.
@@ -596,11 +595,7 @@ void ColPivHouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &
typename RhsType::PlainObject c(rhs);
- // Note that the matrix Q = H_0^* H_1^*... so its inverse is Q^* = (H_0 H_1 ...)^T
- c.applyOnTheLeft(householderSequence(m_qr, m_hCoeffs)
- .setLength(nonzero_pivots)
- .transpose()
- );
+ c.applyOnTheLeft(householderQ().setLength(nonzero_pivots).adjoint() );
m_qr.topLeftCorner(nonzero_pivots, nonzero_pivots)
.template triangularView<Upper>()
diff --git a/Eigen/src/QR/CompleteOrthogonalDecomposition.h b/Eigen/src/QR/CompleteOrthogonalDecomposition.h
index 34c637b70..03017a375 100644
--- a/Eigen/src/QR/CompleteOrthogonalDecomposition.h
+++ b/Eigen/src/QR/CompleteOrthogonalDecomposition.h
@@ -353,7 +353,7 @@ class CompleteOrthogonalDecomposition {
inline RealScalar maxPivot() const { return m_cpqr.maxPivot(); }
/** \brief Reports whether the complete orthogonal decomposition was
- * succesful.
+ * successful.
*
* \note This function always returns \c Success. It is provided for
* compatibility
@@ -367,7 +367,7 @@ class CompleteOrthogonalDecomposition {
#ifndef EIGEN_PARSED_BY_DOXYGEN
template <typename RhsType, typename DstType>
- EIGEN_DEVICE_FUNC void _solve_impl(const RhsType& rhs, DstType& dst) const;
+ void _solve_impl(const RhsType& rhs, DstType& dst) const;
#endif
protected:
@@ -452,7 +452,7 @@ void CompleteOrthogonalDecomposition<MatrixType>::computeInPlace()
// Apply Z(k) to the first k rows of X_k
m_cpqr.m_qr.topRightCorner(k, cols - rank + 1)
.applyHouseholderOnTheRight(
- m_cpqr.m_qr.row(k).tail(cols - rank).transpose(), m_zCoeffs(k),
+ m_cpqr.m_qr.row(k).tail(cols - rank).adjoint(), m_zCoeffs(k),
&m_temp(0));
}
if (k != rank - 1) {
@@ -500,11 +500,8 @@ void CompleteOrthogonalDecomposition<_MatrixType>::_solve_impl(
}
// Compute c = Q^* * rhs
- // Note that the matrix Q = H_0^* H_1^*... so its inverse is
- // Q^* = (H_0 H_1 ...)^T
typename RhsType::PlainObject c(rhs);
- c.applyOnTheLeft(
- householderSequence(matrixQTZ(), hCoeffs()).setLength(rank).transpose());
+ c.applyOnTheLeft(matrixQ().setLength(rank).adjoint());
// Solve T z = c(1:rank, :)
dst.topRows(rank) = matrixT()
diff --git a/Eigen/src/QR/FullPivHouseholderQR.h b/Eigen/src/QR/FullPivHouseholderQR.h
index e489bddc2..c31e47cc4 100644
--- a/Eigen/src/QR/FullPivHouseholderQR.h
+++ b/Eigen/src/QR/FullPivHouseholderQR.h
@@ -392,22 +392,21 @@ template<typename _MatrixType> class FullPivHouseholderQR
* diagonal coefficient of U.
*/
RealScalar maxPivot() const { return m_maxpivot; }
-
+
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename RhsType, typename DstType>
- EIGEN_DEVICE_FUNC
void _solve_impl(const RhsType &rhs, DstType &dst) const;
#endif
protected:
-
+
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
}
-
+
void computeInPlace();
-
+
MatrixType m_qr;
HCoeffsType m_hCoeffs;
IntDiagSizeVectorType m_rows_transpositions;
diff --git a/Eigen/src/QR/HouseholderQR.h b/Eigen/src/QR/HouseholderQR.h
index 3513d995c..33cb9c8ff 100644
--- a/Eigen/src/QR/HouseholderQR.h
+++ b/Eigen/src/QR/HouseholderQR.h
@@ -204,28 +204,27 @@ template<typename _MatrixType> class HouseholderQR
inline Index rows() const { return m_qr.rows(); }
inline Index cols() const { return m_qr.cols(); }
-
+
/** \returns a const reference to the vector of Householder coefficients used to represent the factor \c Q.
*
* For advanced uses only.
*/
const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
-
+
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename RhsType, typename DstType>
- EIGEN_DEVICE_FUNC
void _solve_impl(const RhsType &rhs, DstType &dst) const;
#endif
protected:
-
+
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
}
void computeInPlace();
-
+
MatrixType m_qr;
HCoeffsType m_hCoeffs;
RowVectorType m_temp;
@@ -292,7 +291,7 @@ template<typename MatrixQR, typename HCoeffs,
bool InnerStrideIsOne = (MatrixQR::InnerStrideAtCompileTime == 1 && HCoeffs::InnerStrideAtCompileTime == 1)>
struct householder_qr_inplace_blocked
{
- // This is specialized for MKL-supported Scalar types in HouseholderQR_MKL.h
+ // This is specialized for LAPACK-supported Scalar types in HouseholderQR_LAPACKE.h
static void run(MatrixQR& mat, HCoeffs& hCoeffs, Index maxBlockSize=32,
typename MatrixQR::Scalar* tempData = 0)
{
@@ -354,11 +353,7 @@ void HouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) c
typename RhsType::PlainObject c(rhs);
- // Note that the matrix Q = H_0^* H_1^*... so its inverse is Q^* = (H_0 H_1 ...)^T
- c.applyOnTheLeft(householderSequence(
- m_qr.leftCols(rank),
- m_hCoeffs.head(rank)).transpose()
- );
+ c.applyOnTheLeft(householderQ().setLength(rank).adjoint() );
m_qr.topLeftCorner(rank, rank)
.template triangularView<Upper>()
diff --git a/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h b/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h
index 953d57c9d..1a5c5254e 100644
--- a/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h
+++ b/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h
@@ -220,7 +220,7 @@ class SPQR : public SparseSolverBase<SPQR<_MatrixType> >
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful,
+ * \returns \c Success if computation was successful,
* \c NumericalIssue if the sparse QR can not be computed
*/
ComputationInfo info() const
diff --git a/Eigen/src/SVD/BDCSVD.h b/Eigen/src/SVD/BDCSVD.h
index 25fca6f4d..4daa9dd21 100644
--- a/Eigen/src/SVD/BDCSVD.h
+++ b/Eigen/src/SVD/BDCSVD.h
@@ -11,7 +11,7 @@
// Copyright (C) 2013 Jean Ceccato <jean.ceccato@ensimag.fr>
// Copyright (C) 2013 Pierre Zoppitelli <pierre.zoppitelli@ensimag.fr>
// Copyright (C) 2013 Jitse Niesen <jitse@maths.leeds.ac.uk>
-// Copyright (C) 2014-2016 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2014-2017 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -22,6 +22,11 @@
// #define EIGEN_BDCSVD_DEBUG_VERBOSE
// #define EIGEN_BDCSVD_SANITY_CHECKS
+#ifdef EIGEN_BDCSVD_SANITY_CHECKS
+#undef eigen_internal_assert
+#define eigen_internal_assert(X) assert(X);
+#endif
+
namespace Eigen {
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
@@ -57,7 +62,7 @@ struct traits<BDCSVD<_MatrixType> >
* recommended and can several order of magnitude faster.
*
* \warning this algorithm is unlikely to provide accurate result when compiled with unsafe math optimizations.
- * For instance, this concerns Intel's compiler (ICC), which perfroms such optimization by default unless
+ * For instance, this concerns Intel's compiler (ICC), which performs such optimization by default unless
* you compile with the \c -fp-model \c precise option. Likewise, the \c -ffast-math option of GCC or clang will
* significantly degrade the accuracy.
*
@@ -77,6 +82,7 @@ public:
typedef _MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+ typedef typename NumTraits<RealScalar>::Literal Literal;
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
@@ -211,7 +217,7 @@ public:
// Method to allocate and initialize matrix and attributes
template<typename MatrixType>
-void BDCSVD<MatrixType>::allocate(Index rows, Index cols, unsigned int computationOptions)
+void BDCSVD<MatrixType>::allocate(Eigen::Index rows, Eigen::Index cols, unsigned int computationOptions)
{
m_isTranspose = (cols > rows);
@@ -259,7 +265,7 @@ BDCSVD<MatrixType>& BDCSVD<MatrixType>::compute(const MatrixType& matrix, unsign
//**** step 0 - Copy the input matrix and apply scaling to reduce over/under-flows
RealScalar scale = matrix.cwiseAbs().maxCoeff();
- if(scale==RealScalar(0)) scale = RealScalar(1);
+ if(scale==Literal(0)) scale = Literal(1);
MatrixX copy;
if (m_isTranspose) copy = matrix.adjoint()/scale;
else copy = matrix/scale;
@@ -351,13 +357,13 @@ void BDCSVD<MatrixType>::structured_update(Block<MatrixXr,Dynamic,Dynamic> A, co
Index k1=0, k2=0;
for(Index j=0; j<n; ++j)
{
- if( (A.col(j).head(n1).array()!=0).any() )
+ if( (A.col(j).head(n1).array()!=Literal(0)).any() )
{
A1.col(k1) = A.col(j).head(n1);
B1.row(k1) = B.row(j);
++k1;
}
- if( (A.col(j).tail(n2).array()!=0).any() )
+ if( (A.col(j).tail(n2).array()!=Literal(0)).any() )
{
A2.col(k2) = A.col(j).tail(n2);
B2.row(k2) = B.row(j);
@@ -387,7 +393,7 @@ void BDCSVD<MatrixType>::structured_update(Block<MatrixXr,Dynamic,Dynamic> A, co
//@param shift : Each time one takes the left submatrix, one must add 1 to the shift. Why? Because! We actually want the last column of the U submatrix
// to become the first column (*coeff) and to shift all the other columns to the right. There are more details on the reference paper.
template<typename MatrixType>
-void BDCSVD<MatrixType>::divide (Index firstCol, Index lastCol, Index firstRowW, Index firstColW, Index shift)
+void BDCSVD<MatrixType>::divide (Eigen::Index firstCol, Eigen::Index lastCol, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index shift)
{
// requires rows = cols + 1;
using std::pow;
@@ -449,11 +455,11 @@ void BDCSVD<MatrixType>::divide (Index firstCol, Index lastCol, Index firstRowW,
l = m_naiveU.row(1).segment(firstCol, k);
f = m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1);
}
- if (m_compV) m_naiveV(firstRowW+k, firstColW) = 1;
+ if (m_compV) m_naiveV(firstRowW+k, firstColW) = Literal(1);
if (r0<considerZero)
{
- c0 = 1;
- s0 = 0;
+ c0 = Literal(1);
+ s0 = Literal(0);
}
else
{
@@ -567,14 +573,14 @@ void BDCSVD<MatrixType>::divide (Index firstCol, Index lastCol, Index firstRowW,
// handling of round-off errors, be consistent in ordering
// For instance, to solve the secular equation using FMM, see http://www.stat.uchicago.edu/~lekheng/courses/302/classics/greengard-rokhlin.pdf
template <typename MatrixType>
-void BDCSVD<MatrixType>::computeSVDofM(Index firstCol, Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V)
+void BDCSVD<MatrixType>::computeSVDofM(Eigen::Index firstCol, Eigen::Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V)
{
const RealScalar considerZero = (std::numeric_limits<RealScalar>::min)();
using std::abs;
ArrayRef col0 = m_computed.col(firstCol).segment(firstCol, n);
m_workspace.head(n) = m_computed.block(firstCol, firstCol, n, n).diagonal();
ArrayRef diag = m_workspace.head(n);
- diag(0) = 0;
+ diag(0) = Literal(0);
// Allocate space for singular values and vectors
singVals.resize(n);
@@ -590,7 +596,7 @@ void BDCSVD<MatrixType>::computeSVDofM(Index firstCol, Index n, MatrixXr& U, Vec
// but others are interleaved and we must ignore them at this stage.
// To this end, let's compute a permutation skipping them:
Index actual_n = n;
- while(actual_n>1 && diag(actual_n-1)==0) --actual_n;
+ while(actual_n>1 && diag(actual_n-1)==Literal(0)) {--actual_n; eigen_internal_assert(col0(actual_n)==Literal(0)); }
Index m = 0; // size of the deflated problem
for(Index k=0;k<actual_n;++k)
if(abs(col0(k))>considerZero)
@@ -617,13 +623,11 @@ void BDCSVD<MatrixType>::computeSVDofM(Index firstCol, Index n, MatrixXr& U, Vec
std::cout << " shift: " << shifts.transpose() << "\n";
{
- Index actual_n = n;
- while(actual_n>1 && abs(col0(actual_n-1))<considerZero) --actual_n;
std::cout << "\n\n mus: " << mus.head(actual_n).transpose() << "\n\n";
std::cout << " check1 (expect0) : " << ((singVals.array()-(shifts+mus)) / singVals.array()).head(actual_n).transpose() << "\n\n";
+ assert((((singVals.array()-(shifts+mus)) / singVals.array()).head(actual_n) >= 0).all());
std::cout << " check2 (>0) : " << ((singVals.array()-diag) / singVals.array()).head(actual_n).transpose() << "\n\n";
- std::cout << " check3 (>0) : " << ((diag.segment(1,actual_n-1)-singVals.head(actual_n-1).array()) / singVals.head(actual_n-1).array()).transpose() << "\n\n\n";
- std::cout << " check4 (>0) : " << ((singVals.segment(1,actual_n-1)-singVals.head(actual_n-1))).transpose() << "\n\n\n";
+ assert((((singVals.array()-diag) / singVals.array()).head(actual_n) >= 0).all());
}
#endif
@@ -651,13 +655,13 @@ void BDCSVD<MatrixType>::computeSVDofM(Index firstCol, Index n, MatrixXr& U, Vec
#endif
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
- assert(U.allFinite());
- assert(V.allFinite());
- assert((U.transpose() * U - MatrixXr(MatrixXr::Identity(U.cols(),U.cols()))).norm() < 1e-14 * n);
- assert((V.transpose() * V - MatrixXr(MatrixXr::Identity(V.cols(),V.cols()))).norm() < 1e-14 * n);
assert(m_naiveU.allFinite());
assert(m_naiveV.allFinite());
assert(m_computed.allFinite());
+ assert(U.allFinite());
+ assert(V.allFinite());
+// assert((U.transpose() * U - MatrixXr(MatrixXr::Identity(U.cols(),U.cols()))).norm() < 100*NumTraits<RealScalar>::epsilon() * n);
+// assert((V.transpose() * V - MatrixXr(MatrixXr::Identity(V.cols(),V.cols()))).norm() < 100*NumTraits<RealScalar>::epsilon() * n);
#endif
// Because of deflation, the singular values might not be completely sorted.
@@ -672,6 +676,15 @@ void BDCSVD<MatrixType>::computeSVDofM(Index firstCol, Index n, MatrixXr& U, Vec
if(m_compV) V.col(i).swap(V.col(i+1));
}
}
+
+#ifdef EIGEN_BDCSVD_SANITY_CHECKS
+ {
+ bool singular_values_sorted = (((singVals.segment(1,actual_n-1)-singVals.head(actual_n-1))).array() >= 0).all();
+ if(!singular_values_sorted)
+ std::cout << "Singular values are not sorted: " << singVals.segment(1,actual_n).transpose() << "\n";
+ assert(singular_values_sorted);
+ }
+#endif
// Reverse order so that singular values in increased order
// Because of deflation, the zeros singular-values are already at the end
@@ -691,11 +704,13 @@ template <typename MatrixType>
typename BDCSVD<MatrixType>::RealScalar BDCSVD<MatrixType>::secularEq(RealScalar mu, const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const ArrayRef& diagShifted, RealScalar shift)
{
Index m = perm.size();
- RealScalar res = 1;
+ RealScalar res = Literal(1);
for(Index i=0; i<m; ++i)
{
Index j = perm(i);
- res += numext::abs2(col0(j)) / ((diagShifted(j) - mu) * (diag(j) + shift + mu));
+ // The following expression could be rewritten to involve only a single division,
+ // but this would make the expression more sensitive to overflow.
+ res += (col0(j) / (diagShifted(j) - mu)) * (col0(j) / (diag(j) + shift + mu));
}
return res;
@@ -707,19 +722,22 @@ void BDCSVD<MatrixType>::computeSingVals(const ArrayRef& col0, const ArrayRef& d
{
using std::abs;
using std::swap;
+ using std::sqrt;
Index n = col0.size();
Index actual_n = n;
- while(actual_n>1 && col0(actual_n-1)==0) --actual_n;
+ // Note that here actual_n is computed based on col0(i)==0 instead of diag(i)==0 as above
+ // because 1) we have diag(i)==0 => col0(i)==0 and 2) if col0(i)==0, then diag(i) is already a singular value.
+ while(actual_n>1 && col0(actual_n-1)==Literal(0)) --actual_n;
for (Index k = 0; k < n; ++k)
{
- if (col0(k) == 0 || actual_n==1)
+ if (col0(k) == Literal(0) || actual_n==1)
{
// if col0(k) == 0, then entry is deflated, so singular value is on diagonal
// if actual_n==1, then the deflated problem is already diagonalized
singVals(k) = k==0 ? col0(0) : diag(k);
- mus(k) = 0;
+ mus(k) = Literal(0);
shifts(k) = k==0 ? col0(0) : diag(k);
continue;
}
@@ -731,31 +749,36 @@ void BDCSVD<MatrixType>::computeSingVals(const ArrayRef& col0, const ArrayRef& d
right = (diag(actual_n-1) + col0.matrix().norm());
else
{
- // Skip deflated singular values
+ // Skip deflated singular values,
+ // recall that at this stage we assume that z[j]!=0 and all entries for which z[j]==0 have been put aside.
+ // This should be equivalent to using perm[]
Index l = k+1;
- while(col0(l)==0) { ++l; eigen_internal_assert(l<actual_n); }
+ while(col0(l)==Literal(0)) { ++l; eigen_internal_assert(l<actual_n); }
right = diag(l);
}
// first decide whether it's closer to the left end or the right end
- RealScalar mid = left + (right-left) / 2;
- RealScalar fMid = secularEq(mid, col0, diag, perm, diag, 0);
+ RealScalar mid = left + (right-left) / Literal(2);
+ RealScalar fMid = secularEq(mid, col0, diag, perm, diag, Literal(0));
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
- std::cout << right-left << "\n";
- std::cout << "fMid = " << fMid << " " << secularEq(mid-left, col0, diag, perm, diag-left, left) << " " << secularEq(mid-right, col0, diag, perm, diag-right, right) << "\n";
- std::cout << " = " << secularEq(0.1*(left+right), col0, diag, perm, diag, 0)
- << " " << secularEq(0.2*(left+right), col0, diag, perm, diag, 0)
- << " " << secularEq(0.3*(left+right), col0, diag, perm, diag, 0)
- << " " << secularEq(0.4*(left+right), col0, diag, perm, diag, 0)
- << " " << secularEq(0.49*(left+right), col0, diag, perm, diag, 0)
- << " " << secularEq(0.5*(left+right), col0, diag, perm, diag, 0)
- << " " << secularEq(0.51*(left+right), col0, diag, perm, diag, 0)
- << " " << secularEq(0.6*(left+right), col0, diag, perm, diag, 0)
- << " " << secularEq(0.7*(left+right), col0, diag, perm, diag, 0)
- << " " << secularEq(0.8*(left+right), col0, diag, perm, diag, 0)
- << " " << secularEq(0.9*(left+right), col0, diag, perm, diag, 0) << "\n";
+ std::cout << "right-left = " << right-left << "\n";
+// std::cout << "fMid = " << fMid << " " << secularEq(mid-left, col0, diag, perm, ArrayXr(diag-left), left)
+// << " " << secularEq(mid-right, col0, diag, perm, ArrayXr(diag-right), right) << "\n";
+ std::cout << " = " << secularEq(left+RealScalar(0.000001)*(right-left), col0, diag, perm, diag, 0)
+ << " " << secularEq(left+RealScalar(0.1) *(right-left), col0, diag, perm, diag, 0)
+ << " " << secularEq(left+RealScalar(0.2) *(right-left), col0, diag, perm, diag, 0)
+ << " " << secularEq(left+RealScalar(0.3) *(right-left), col0, diag, perm, diag, 0)
+ << " " << secularEq(left+RealScalar(0.4) *(right-left), col0, diag, perm, diag, 0)
+ << " " << secularEq(left+RealScalar(0.49) *(right-left), col0, diag, perm, diag, 0)
+ << " " << secularEq(left+RealScalar(0.5) *(right-left), col0, diag, perm, diag, 0)
+ << " " << secularEq(left+RealScalar(0.51) *(right-left), col0, diag, perm, diag, 0)
+ << " " << secularEq(left+RealScalar(0.6) *(right-left), col0, diag, perm, diag, 0)
+ << " " << secularEq(left+RealScalar(0.7) *(right-left), col0, diag, perm, diag, 0)
+ << " " << secularEq(left+RealScalar(0.8) *(right-left), col0, diag, perm, diag, 0)
+ << " " << secularEq(left+RealScalar(0.9) *(right-left), col0, diag, perm, diag, 0)
+ << " " << secularEq(left+RealScalar(0.999999)*(right-left), col0, diag, perm, diag, 0) << "\n";
#endif
- RealScalar shift = (k == actual_n-1 || fMid > 0) ? left : right;
+ RealScalar shift = (k == actual_n-1 || fMid > Literal(0)) ? left : right;
// measure everything relative to shift
Map<ArrayXr> diagShifted(m_workspace.data()+4*n, n);
@@ -785,26 +808,29 @@ void BDCSVD<MatrixType>::computeSingVals(const ArrayRef& col0, const ArrayRef& d
// rational interpolation: fit a function of the form a / mu + b through the two previous
// iterates and use its zero to compute the next iterate
- bool useBisection = fPrev*fCur>0;
- while (fCur!=0 && abs(muCur - muPrev) > 8 * NumTraits<RealScalar>::epsilon() * numext::maxi<RealScalar>(abs(muCur), abs(muPrev)) && abs(fCur - fPrev)>NumTraits<RealScalar>::epsilon() && !useBisection)
+ bool useBisection = fPrev*fCur>Literal(0);
+ while (fCur!=Literal(0) && abs(muCur - muPrev) > Literal(8) * NumTraits<RealScalar>::epsilon() * numext::maxi<RealScalar>(abs(muCur), abs(muPrev)) && abs(fCur - fPrev)>NumTraits<RealScalar>::epsilon() && !useBisection)
{
++m_numIters;
// Find a and b such that the function f(mu) = a / mu + b matches the current and previous samples.
- RealScalar a = (fCur - fPrev) / (1/muCur - 1/muPrev);
+ RealScalar a = (fCur - fPrev) / (Literal(1)/muCur - Literal(1)/muPrev);
RealScalar b = fCur - a / muCur;
// And find mu such that f(mu)==0:
RealScalar muZero = -a/b;
RealScalar fZero = secularEq(muZero, col0, diag, perm, diagShifted, shift);
+
+#ifdef EIGEN_BDCSVD_SANITY_CHECKS
+ assert((std::isfinite)(fZero));
+#endif
muPrev = muCur;
fPrev = fCur;
muCur = muZero;
fCur = fZero;
-
- if (shift == left && (muCur < 0 || muCur > right - left)) useBisection = true;
- if (shift == right && (muCur < -(right - left) || muCur > 0)) useBisection = true;
+ if (shift == left && (muCur < Literal(0) || muCur > right - left)) useBisection = true;
+ if (shift == right && (muCur < -(right - left) || muCur > Literal(0))) useBisection = true;
if (abs(fCur)>abs(fPrev)) useBisection = true;
}
@@ -817,37 +843,59 @@ void BDCSVD<MatrixType>::computeSingVals(const ArrayRef& col0, const ArrayRef& d
RealScalar leftShifted, rightShifted;
if (shift == left)
{
- leftShifted = (std::numeric_limits<RealScalar>::min)();
+ // to avoid overflow, we must have mu > max(real_min, |z(k)|/sqrt(real_max)),
+ // the factor 2 is to be more conservative
+ leftShifted = numext::maxi<RealScalar>( (std::numeric_limits<RealScalar>::min)(), Literal(2) * abs(col0(k)) / sqrt((std::numeric_limits<RealScalar>::max)()) );
+
+ // check that we did it right:
+ eigen_internal_assert( (numext::isfinite)( (col0(k)/leftShifted)*(col0(k)/(diag(k)+shift+leftShifted)) ) );
// I don't understand why the case k==0 would be special there:
- // if (k == 0) rightShifted = right - left; else
- rightShifted = (k==actual_n-1) ? right : ((right - left) * RealScalar(0.6)); // theoretically we can take 0.5, but let's be safe
+ // if (k == 0) rightShifted = right - left; else
+ rightShifted = (k==actual_n-1) ? right : ((right - left) * RealScalar(0.51)); // theoretically we can take 0.5, but let's be safe
}
else
{
- leftShifted = -(right - left) * RealScalar(0.6);
- rightShifted = -(std::numeric_limits<RealScalar>::min)();
+ leftShifted = -(right - left) * RealScalar(0.51);
+ if(k+1<n)
+ rightShifted = -numext::maxi<RealScalar>( (std::numeric_limits<RealScalar>::min)(), abs(col0(k+1)) / sqrt((std::numeric_limits<RealScalar>::max)()) );
+ else
+ rightShifted = -(std::numeric_limits<RealScalar>::min)();
}
-
+
RealScalar fLeft = secularEq(leftShifted, col0, diag, perm, diagShifted, shift);
-#if defined EIGEN_INTERNAL_DEBUGGING || defined EIGEN_BDCSVD_DEBUG_VERBOSE
+#if defined EIGEN_INTERNAL_DEBUGGING || defined EIGEN_BDCSVD_SANITY_CHECKS
RealScalar fRight = secularEq(rightShifted, col0, diag, perm, diagShifted, shift);
#endif
+#ifdef EIGEN_BDCSVD_SANITY_CHECKS
+ if(!(std::isfinite)(fLeft))
+ std::cout << "f(" << leftShifted << ") =" << fLeft << " ; " << left << " " << shift << " " << right << "\n";
+ assert((std::isfinite)(fLeft));
+
+ if(!(std::isfinite)(fRight))
+ std::cout << "f(" << rightShifted << ") =" << fRight << " ; " << left << " " << shift << " " << right << "\n";
+// assert((std::isfinite)(fRight));
+#endif
+
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
if(!(fLeft * fRight<0))
{
- std::cout << "fLeft: " << leftShifted << " - " << diagShifted.head(10).transpose() << "\n ; " << bool(left==shift) << " " << (left-shift) << "\n";
- std::cout << k << " : " << fLeft << " * " << fRight << " == " << fLeft * fRight << " ; " << left << " - " << right << " -> " << leftShifted << " " << rightShifted << " shift=" << shift << "\n";
+ std::cout << "f(leftShifted) using leftShifted=" << leftShifted << " ; diagShifted(1:10):" << diagShifted.head(10).transpose() << "\n ; "
+ << "left==shift=" << bool(left==shift) << " ; left-shift = " << (left-shift) << "\n";
+ std::cout << "k=" << k << ", " << fLeft << " * " << fRight << " == " << fLeft * fRight << " ; "
+ << "[" << left << " .. " << right << "] -> [" << leftShifted << " " << rightShifted << "], shift=" << shift << " , f(right)=" << secularEq(0, col0, diag, perm, diagShifted, shift) << " == " << secularEq(right, col0, diag, perm, diag, 0) << "\n";
}
#endif
- eigen_internal_assert(fLeft * fRight < 0);
+ eigen_internal_assert(fLeft * fRight < Literal(0));
- while (rightShifted - leftShifted > 2 * NumTraits<RealScalar>::epsilon() * numext::maxi<RealScalar>(abs(leftShifted), abs(rightShifted)))
+ while (rightShifted - leftShifted > Literal(2) * NumTraits<RealScalar>::epsilon() * numext::maxi<RealScalar>(abs(leftShifted), abs(rightShifted)))
{
- RealScalar midShifted = (leftShifted + rightShifted) / 2;
+ RealScalar midShifted = (leftShifted + rightShifted) / Literal(2);
fMid = secularEq(midShifted, col0, diag, perm, diagShifted, shift);
- if (fLeft * fMid < 0)
+ eigen_internal_assert((numext::isfinite)(fMid));
+
+ if (fLeft * fMid < Literal(0))
{
rightShifted = midShifted;
}
@@ -858,13 +906,22 @@ void BDCSVD<MatrixType>::computeSingVals(const ArrayRef& col0, const ArrayRef& d
}
}
- muCur = (leftShifted + rightShifted) / 2;
+ muCur = (leftShifted + rightShifted) / Literal(2);
}
singVals[k] = shift + muCur;
shifts[k] = shift;
mus[k] = muCur;
+#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
+ if(k+1<n)
+ std::cout << "found " << singVals[k] << " == " << shift << " + " << muCur << " from " << diag(k) << " .. " << diag(k+1) << "\n";
+#endif
+#ifdef EIGEN_BDCSVD_SANITY_CHECKS
+ assert(k==0 || singVals[k]>=singVals[k-1]);
+ assert(singVals[k]>=diag(k));
+#endif
+
// perturb singular value slightly if it equals diagonal entry to avoid division by zero later
// (deflation is supposed to avoid this from happening)
// - this does no seem to be necessary anymore -
@@ -888,25 +945,53 @@ void BDCSVD<MatrixType>::perturbCol0
zhat.setZero();
return;
}
- Index last = perm(m-1);
+ Index lastIdx = perm(m-1);
// The offset permits to skip deflated entries while computing zhat
for (Index k = 0; k < n; ++k)
{
- if (col0(k) == 0) // deflated
- zhat(k) = 0;
+ if (col0(k) == Literal(0)) // deflated
+ zhat(k) = Literal(0);
else
{
// see equation (3.6)
RealScalar dk = diag(k);
- RealScalar prod = (singVals(last) + dk) * (mus(last) + (shifts(last) - dk));
+ RealScalar prod = (singVals(lastIdx) + dk) * (mus(lastIdx) + (shifts(lastIdx) - dk));
+#ifdef EIGEN_BDCSVD_SANITY_CHECKS
+ if(prod<0) {
+ std::cout << "k = " << k << " ; z(k)=" << col0(k) << ", diag(k)=" << dk << "\n";
+ std::cout << "prod = " << "(" << singVals(lastIdx) << " + " << dk << ") * (" << mus(lastIdx) << " + (" << shifts(lastIdx) << " - " << dk << "))" << "\n";
+ std::cout << " = " << singVals(lastIdx) + dk << " * " << mus(lastIdx) + (shifts(lastIdx) - dk) << "\n";
+ }
+ assert(prod>=0);
+#endif
for(Index l = 0; l<m; ++l)
{
Index i = perm(l);
if(i!=k)
{
+#ifdef EIGEN_BDCSVD_SANITY_CHECKS
+ if(i>=k && (l==0 || l-1>=m))
+ {
+ std::cout << "Error in perturbCol0\n";
+ std::cout << " " << k << "/" << n << " " << l << "/" << m << " " << i << "/" << n << " ; " << col0(k) << " " << diag(k) << " " << "\n";
+ std::cout << " " <<diag(i) << "\n";
+ Index j = (i<k /*|| l==0*/) ? i : perm(l-1);
+ std::cout << " " << "j=" << j << "\n";
+ }
+#endif
Index j = i<k ? i : perm(l-1);
+#ifdef EIGEN_BDCSVD_SANITY_CHECKS
+ if(!(dk!=Literal(0) || diag(i)!=Literal(0)))
+ {
+ std::cout << "k=" << k << ", i=" << i << ", l=" << l << ", perm.size()=" << perm.size() << "\n";
+ }
+ assert(dk!=Literal(0) || diag(i)!=Literal(0));
+#endif
prod *= ((singVals(j)+dk) / ((diag(i)+dk))) * ((mus(j)+(shifts(j)-dk)) / ((diag(i)-dk)));
+#ifdef EIGEN_BDCSVD_SANITY_CHECKS
+ assert(prod>=0);
+#endif
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
if(i!=k && std::abs(((singVals(j)+dk)*(mus(j)+(shifts(j)-dk)))/((diag(i)+dk)*(diag(i)-dk)) - 1) > 0.9 )
std::cout << " " << ((singVals(j)+dk)*(mus(j)+(shifts(j)-dk)))/((diag(i)+dk)*(diag(i)-dk)) << " == (" << (singVals(j)+dk) << " * " << (mus(j)+(shifts(j)-dk))
@@ -915,10 +1000,13 @@ void BDCSVD<MatrixType>::perturbCol0
}
}
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
- std::cout << "zhat(" << k << ") = sqrt( " << prod << ") ; " << (singVals(last) + dk) << " * " << mus(last) + shifts(last) << " - " << dk << "\n";
+ std::cout << "zhat(" << k << ") = sqrt( " << prod << ") ; " << (singVals(lastIdx) + dk) << " * " << mus(lastIdx) + shifts(lastIdx) << " - " << dk << "\n";
#endif
RealScalar tmp = sqrt(prod);
- zhat(k) = col0(k) > 0 ? tmp : -tmp;
+#ifdef EIGEN_BDCSVD_SANITY_CHECKS
+ assert((std::isfinite)(tmp));
+#endif
+ zhat(k) = col0(k) > Literal(0) ? tmp : -tmp;
}
}
}
@@ -934,7 +1022,7 @@ void BDCSVD<MatrixType>::computeSingVecs
for (Index k = 0; k < n; ++k)
{
- if (zhat(k) == 0)
+ if (zhat(k) == Literal(0))
{
U.col(k) = VectorType::Unit(n+1, k);
if (m_compV) V.col(k) = VectorType::Unit(n, k);
@@ -947,7 +1035,7 @@ void BDCSVD<MatrixType>::computeSingVecs
Index i = perm(l);
U(i,k) = zhat(i)/(((diag(i) - shifts(k)) - mus(k)) )/( (diag(i) + singVals[k]));
}
- U(n,k) = 0;
+ U(n,k) = Literal(0);
U.col(k).normalize();
if (m_compV)
@@ -958,7 +1046,7 @@ void BDCSVD<MatrixType>::computeSingVecs
Index i = perm(l);
V(i,k) = diag(i) * zhat(i) / (((diag(i) - shifts(k)) - mus(k)) )/( (diag(i) + singVals[k]));
}
- V(0,k) = -1;
+ V(0,k) = Literal(-1);
V.col(k).normalize();
}
}
@@ -971,7 +1059,7 @@ void BDCSVD<MatrixType>::computeSingVecs
// i >= 1, di almost null and zi non null.
// We use a rotation to zero out zi applied to the left of M
template <typename MatrixType>
-void BDCSVD<MatrixType>::deflation43(Index firstCol, Index shift, Index i, Index size)
+void BDCSVD<MatrixType>::deflation43(Eigen::Index firstCol, Eigen::Index shift, Eigen::Index i, Eigen::Index size)
{
using std::abs;
using std::sqrt;
@@ -979,15 +1067,15 @@ void BDCSVD<MatrixType>::deflation43(Index firstCol, Index shift, Index i, Index
Index start = firstCol + shift;
RealScalar c = m_computed(start, start);
RealScalar s = m_computed(start+i, start);
- RealScalar r = sqrt(numext::abs2(c) + numext::abs2(s));
- if (r == 0)
+ RealScalar r = numext::hypot(c,s);
+ if (r == Literal(0))
{
- m_computed(start+i, start+i) = 0;
+ m_computed(start+i, start+i) = Literal(0);
return;
}
m_computed(start,start) = r;
- m_computed(start+i, start) = 0;
- m_computed(start+i, start+i) = 0;
+ m_computed(start+i, start) = Literal(0);
+ m_computed(start+i, start+i) = Literal(0);
JacobiRotation<RealScalar> J(c/r,-s/r);
if (m_compU) m_naiveU.middleRows(firstCol, size+1).applyOnTheRight(firstCol, firstCol+i, J);
@@ -1000,7 +1088,7 @@ void BDCSVD<MatrixType>::deflation43(Index firstCol, Index shift, Index i, Index
// We apply two rotations to have zj = 0;
// TODO deflation44 is still broken and not properly tested
template <typename MatrixType>
-void BDCSVD<MatrixType>::deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size)
+void BDCSVD<MatrixType>::deflation44(Eigen::Index firstColu , Eigen::Index firstColm, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index i, Eigen::Index j, Eigen::Index size)
{
using std::abs;
using std::sqrt;
@@ -1020,16 +1108,16 @@ void BDCSVD<MatrixType>::deflation44(Index firstColu , Index firstColm, Index fi
<< m_computed(firstColm + i+1, firstColm+i+1) << " "
<< m_computed(firstColm + i+2, firstColm+i+2) << "\n";
#endif
- if (r==0)
+ if (r==Literal(0))
{
m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j);
return;
}
c/=r;
s/=r;
- m_computed(firstColm + i, firstColm) = r;
+ m_computed(firstColm + i, firstColm) = r;
m_computed(firstColm + j, firstColm + j) = m_computed(firstColm + i, firstColm + i);
- m_computed(firstColm + j, firstColm) = 0;
+ m_computed(firstColm + j, firstColm) = Literal(0);
JacobiRotation<RealScalar> J(c,-s);
if (m_compU) m_naiveU.middleRows(firstColu, size+1).applyOnTheRight(firstColu + i, firstColu + j, J);
@@ -1040,7 +1128,7 @@ void BDCSVD<MatrixType>::deflation44(Index firstColu , Index firstColm, Index fi
// acts on block from (firstCol+shift, firstCol+shift) to (lastCol+shift, lastCol+shift) [inclusive]
template <typename MatrixType>
-void BDCSVD<MatrixType>::deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift)
+void BDCSVD<MatrixType>::deflation(Eigen::Index firstCol, Eigen::Index lastCol, Eigen::Index k, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index shift)
{
using std::sqrt;
using std::abs;
@@ -1053,7 +1141,7 @@ void BDCSVD<MatrixType>::deflation(Index firstCol, Index lastCol, Index k, Index
const RealScalar considerZero = (std::numeric_limits<RealScalar>::min)();
RealScalar maxDiag = diag.tail((std::max)(Index(1),length-1)).cwiseAbs().maxCoeff();
RealScalar epsilon_strict = numext::maxi<RealScalar>(considerZero,NumTraits<RealScalar>::epsilon() * maxDiag);
- RealScalar epsilon_coarse = 8 * NumTraits<RealScalar>::epsilon() * numext::maxi<RealScalar>(col0.cwiseAbs().maxCoeff(), maxDiag);
+ RealScalar epsilon_coarse = Literal(8) * NumTraits<RealScalar>::epsilon() * numext::maxi<RealScalar>(col0.cwiseAbs().maxCoeff(), maxDiag);
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
assert(m_naiveU.allFinite());
@@ -1081,7 +1169,7 @@ void BDCSVD<MatrixType>::deflation(Index firstCol, Index lastCol, Index k, Index
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << "deflation 4.2, set z(" << i << ") to zero because " << abs(col0(i)) << " < " << epsilon_strict << " (diag(" << i << ")=" << diag(i) << ")\n";
#endif
- col0(i) = 0;
+ col0(i) = Literal(0);
}
//condition 4.3
@@ -1101,6 +1189,7 @@ void BDCSVD<MatrixType>::deflation(Index firstCol, Index lastCol, Index k, Index
#endif
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << "to be sorted: " << diag.transpose() << "\n\n";
+ std::cout << " : " << col0.transpose() << "\n\n";
#endif
{
// Check for total deflation
@@ -1191,7 +1280,7 @@ void BDCSVD<MatrixType>::deflation(Index firstCol, Index lastCol, Index k, Index
if( (diag(i) - diag(i-1)) < NumTraits<RealScalar>::epsilon()*maxDiag )
{
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
- std::cout << "deflation 4.4 with i = " << i << " because " << (diag(i) - diag(i-1)) << " < " << NumTraits<RealScalar>::epsilon()*diag(i) << "\n";
+ std::cout << "deflation 4.4 with i = " << i << " because " << diag(i) << " - " << diag(i-1) << " == " << (diag(i) - diag(i-1)) << " < " << NumTraits<RealScalar>::epsilon()*/*diag(i)*/maxDiag << "\n";
#endif
eigen_internal_assert(abs(diag(i) - diag(i-1))<epsilon_coarse && " diagonal entries are not properly sorted");
deflation44(firstCol, firstCol + shift, firstRowW, firstColW, i-1, i, length);
@@ -1210,7 +1299,7 @@ void BDCSVD<MatrixType>::deflation(Index firstCol, Index lastCol, Index k, Index
#endif
}//end deflation
-#ifndef __CUDACC__
+#if !defined(EIGEN_GPUCC)
/** \svd_module
*
* \return the singular value decomposition of \c *this computed by Divide & Conquer algorithm
diff --git a/Eigen/src/SVD/JacobiSVD.h b/Eigen/src/SVD/JacobiSVD.h
index 43488b1e0..1c7c80376 100644
--- a/Eigen/src/SVD/JacobiSVD.h
+++ b/Eigen/src/SVD/JacobiSVD.h
@@ -610,7 +610,7 @@ template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
};
template<typename MatrixType, int QRPreconditioner>
-void JacobiSVD<MatrixType, QRPreconditioner>::allocate(Index rows, Index cols, unsigned int computationOptions)
+void JacobiSVD<MatrixType, QRPreconditioner>::allocate(Eigen::Index rows, Eigen::Index cols, unsigned int computationOptions)
{
eigen_assert(rows >= 0 && cols >= 0);
diff --git a/Eigen/src/SVD/JacobiSVD_LAPACKE.h b/Eigen/src/SVD/JacobiSVD_LAPACKE.h
index 50272154f..ff0516f61 100644
--- a/Eigen/src/SVD/JacobiSVD_LAPACKE.h
+++ b/Eigen/src/SVD/JacobiSVD_LAPACKE.h
@@ -61,9 +61,10 @@ JacobiSVD<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>, ColPiv
u = (LAPACKE_TYPE*)m_matrixU.data(); \
} else { ldu=1; u=&dummy; }\
MatrixType localV; \
- ldvt = (m_computeFullV) ? internal::convert_index<lapack_int>(m_cols) : (m_computeThinV) ? internal::convert_index<lapack_int>(m_diagSize) : 1; \
+ lapack_int vt_rows = (m_computeFullV) ? internal::convert_index<lapack_int>(m_cols) : (m_computeThinV) ? internal::convert_index<lapack_int>(m_diagSize) : 1; \
if (computeV()) { \
- localV.resize(ldvt, m_cols); \
+ localV.resize(vt_rows, m_cols); \
+ ldvt = internal::convert_index<lapack_int>(localV.outerStride()); \
vt = (LAPACKE_TYPE*)localV.data(); \
} else { ldvt=1; vt=&dummy; }\
Matrix<LAPACKE_RTYPE, Dynamic, Dynamic> superb; superb.resize(m_diagSize, 1); \
diff --git a/Eigen/src/SVD/SVDBase.h b/Eigen/src/SVD/SVDBase.h
index cc90a3b75..429414797 100644
--- a/Eigen/src/SVD/SVDBase.h
+++ b/Eigen/src/SVD/SVDBase.h
@@ -212,7 +212,6 @@ public:
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename RhsType, typename DstType>
- EIGEN_DEVICE_FUNC
void _solve_impl(const RhsType &rhs, DstType &dst) const;
#endif
diff --git a/Eigen/src/SVD/UpperBidiagonalization.h b/Eigen/src/SVD/UpperBidiagonalization.h
index 0b1460894..997defc47 100644
--- a/Eigen/src/SVD/UpperBidiagonalization.h
+++ b/Eigen/src/SVD/UpperBidiagonalization.h
@@ -127,7 +127,7 @@ void upperbidiagonalization_inplace_unblocked(MatrixType& mat,
.makeHouseholderInPlace(mat.coeffRef(k,k+1), upper_diagonal[k]);
// apply householder transform to remaining part of mat on the left
mat.bottomRightCorner(remainingRows-1, remainingCols)
- .applyHouseholderOnTheRight(mat.row(k).tail(remainingCols-1).transpose(), mat.coeff(k,k+1), tempData);
+ .applyHouseholderOnTheRight(mat.row(k).tail(remainingCols-1).adjoint(), mat.coeff(k,k+1), tempData);
}
}
@@ -159,6 +159,8 @@ void upperbidiagonalization_blocked_helper(MatrixType& A,
traits<MatrixType>::Flags & RowMajorBit> > Y)
{
typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename NumTraits<RealScalar>::Literal Literal;
enum { StorageOrder = traits<MatrixType>::Flags & RowMajorBit };
typedef InnerStride<int(StorageOrder) == int(ColMajor) ? 1 : Dynamic> ColInnerStride;
typedef InnerStride<int(StorageOrder) == int(ColMajor) ? Dynamic : 1> RowInnerStride;
@@ -200,7 +202,7 @@ void upperbidiagonalization_blocked_helper(MatrixType& A,
{
SubColumnType y_k( Y.col(k).tail(remainingCols) );
- // let's use the begining of column k of Y as a temporary vector
+ // let's use the beginning of column k of Y as a temporary vector
SubColumnType tmp( Y.col(k).head(k) );
y_k.noalias() = A.block(k,k+1, remainingRows,remainingCols).adjoint() * v_k; // bottleneck
tmp.noalias() = V_k1.adjoint() * v_k;
@@ -229,7 +231,7 @@ void upperbidiagonalization_blocked_helper(MatrixType& A,
{
SubColumnType x_k ( X.col(k).tail(remainingRows-1) );
- // let's use the begining of column k of X as a temporary vectors
+ // let's use the beginning of column k of X as a temporary vectors
// note that tmp0 and tmp1 overlaps
SubColumnType tmp0 ( X.col(k).head(k) ),
tmp1 ( X.col(k).head(k+1) );
@@ -263,7 +265,7 @@ void upperbidiagonalization_blocked_helper(MatrixType& A,
SubMatType A10( A.block(bs,0, brows-bs,bs) );
SubMatType A01( A.block(0,bs, bs,bcols-bs) );
Scalar tmp = A01(bs-1,0);
- A01(bs-1,0) = 1;
+ A01(bs-1,0) = Literal(1);
A11.noalias() -= A10 * Y.topLeftCorner(bcols,bs).bottomRows(bcols-bs).adjoint();
A11.noalias() -= X.topLeftCorner(brows,bs).bottomRows(brows-bs) * A01;
A01(bs-1,0) = tmp;
diff --git a/Eigen/src/SparseCholesky/SimplicialCholesky.h b/Eigen/src/SparseCholesky/SimplicialCholesky.h
index 2907f6529..b9ca94bc3 100644
--- a/Eigen/src/SparseCholesky/SimplicialCholesky.h
+++ b/Eigen/src/SparseCholesky/SimplicialCholesky.h
@@ -101,7 +101,7 @@ class SimplicialCholeskyBase : public SparseSolverBase<Derived>
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful,
+ * \returns \c Success if computation was successful,
* \c NumericalIssue if the matrix.appears to be negative.
*/
ComputationInfo info() const
diff --git a/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h b/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h
index 31e06995b..0aa92f8bc 100644
--- a/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h
+++ b/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h
@@ -5,7 +5,7 @@
/*
-NOTE: thes functions vave been adapted from the LDL library:
+NOTE: these functions have been adapted from the LDL library:
LDL Copyright (c) 2005 by Timothy A. Davis. All Rights Reserved.
@@ -122,7 +122,7 @@ void SimplicialCholeskyBase<Derived>::factorize_preordered(const CholMatrixType&
for(StorageIndex k = 0; k < size; ++k)
{
// compute nonzero pattern of kth row of L, in topological order
- y[k] = 0.0; // Y(0:k) is now all zero
+ y[k] = Scalar(0); // Y(0:k) is now all zero
StorageIndex top = size; // stack for pattern is empty
tags[k] = k; // mark node k as visited
m_nonZerosPerCol[k] = 0; // count of nonzeros in column k of L
@@ -146,12 +146,12 @@ void SimplicialCholeskyBase<Derived>::factorize_preordered(const CholMatrixType&
/* compute numerical values kth row of L (a sparse triangular solve) */
RealScalar d = numext::real(y[k]) * m_shiftScale + m_shiftOffset; // get D(k,k), apply the shift function, and clear Y(k)
- y[k] = 0.0;
+ y[k] = Scalar(0);
for(; top < size; ++top)
{
Index i = pattern[top]; /* pattern[top:n-1] is pattern of L(:,k) */
Scalar yi = y[i]; /* get and clear Y(i) */
- y[i] = 0.0;
+ y[i] = Scalar(0);
/* the nonzero entry L(k,i) */
Scalar l_ki;
diff --git a/Eigen/src/SparseCore/AmbiVector.h b/Eigen/src/SparseCore/AmbiVector.h
index 8a5cc91f2..e0295f2af 100644
--- a/Eigen/src/SparseCore/AmbiVector.h
+++ b/Eigen/src/SparseCore/AmbiVector.h
@@ -94,7 +94,7 @@ class AmbiVector
Index allocSize = m_allocatedElements * sizeof(ListEl);
allocSize = (allocSize + sizeof(Scalar) - 1)/sizeof(Scalar);
Scalar* newBuffer = new Scalar[allocSize];
- memcpy(newBuffer, m_buffer, copyElements * sizeof(ListEl));
+ std::memcpy(newBuffer, m_buffer, copyElements * sizeof(ListEl));
delete[] m_buffer;
m_buffer = newBuffer;
}
diff --git a/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h b/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
index 492eb0a29..9db119b67 100644
--- a/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
+++ b/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
@@ -17,7 +17,9 @@ namespace internal {
template<typename Lhs, typename Rhs, typename ResultType>
static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, bool sortedInsertion = false)
{
- typedef typename remove_all<Lhs>::type::Scalar Scalar;
+ typedef typename remove_all<Lhs>::type::Scalar LhsScalar;
+ typedef typename remove_all<Rhs>::type::Scalar RhsScalar;
+ typedef typename remove_all<ResultType>::type::Scalar ResScalar;
// make sure to call innerSize/outerSize since we fake the storage order.
Index rows = lhs.innerSize();
@@ -25,7 +27,7 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
eigen_assert(lhs.outerSize() == rhs.innerSize());
ei_declare_aligned_stack_constructed_variable(bool, mask, rows, 0);
- ei_declare_aligned_stack_constructed_variable(Scalar, values, rows, 0);
+ ei_declare_aligned_stack_constructed_variable(ResScalar, values, rows, 0);
ei_declare_aligned_stack_constructed_variable(Index, indices, rows, 0);
std::memset(mask,0,sizeof(bool)*rows);
@@ -51,12 +53,12 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
Index nnz = 0;
for (typename evaluator<Rhs>::InnerIterator rhsIt(rhsEval, j); rhsIt; ++rhsIt)
{
- Scalar y = rhsIt.value();
+ RhsScalar y = rhsIt.value();
Index k = rhsIt.index();
for (typename evaluator<Lhs>::InnerIterator lhsIt(lhsEval, k); lhsIt; ++lhsIt)
{
Index i = lhsIt.index();
- Scalar x = lhsIt.value();
+ LhsScalar x = lhsIt.value();
if(!mask[i])
{
mask[i] = true;
@@ -166,11 +168,12 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,C
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;
- RowMajorMatrix rhsRow = rhs;
- RowMajorMatrix resRow(lhs.rows(), rhs.cols());
- internal::conservative_sparse_sparse_product_impl<RowMajorMatrix,Lhs,RowMajorMatrix>(rhsRow, lhs, resRow);
- res = resRow;
+ typedef SparseMatrix<typename Rhs::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorRhs;
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorRes;
+ RowMajorRhs rhsRow = rhs;
+ RowMajorRes resRow(lhs.rows(), rhs.cols());
+ internal::conservative_sparse_sparse_product_impl<RowMajorRhs,Lhs,RowMajorRes>(rhsRow, lhs, resRow);
+ res = resRow;
}
};
@@ -179,10 +182,11 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,R
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;
- RowMajorMatrix lhsRow = lhs;
- RowMajorMatrix resRow(lhs.rows(), rhs.cols());
- internal::conservative_sparse_sparse_product_impl<Rhs,RowMajorMatrix,RowMajorMatrix>(rhs, lhsRow, resRow);
+ typedef SparseMatrix<typename Lhs::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorLhs;
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorRes;
+ RowMajorLhs lhsRow = lhs;
+ RowMajorRes resRow(lhs.rows(), rhs.cols());
+ internal::conservative_sparse_sparse_product_impl<Rhs,RowMajorLhs,RowMajorRes>(rhs, lhsRow, resRow);
res = resRow;
}
};
@@ -219,10 +223,11 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,C
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;
- ColMajorMatrix lhsCol = lhs;
- ColMajorMatrix resCol(lhs.rows(), rhs.cols());
- internal::conservative_sparse_sparse_product_impl<ColMajorMatrix,Rhs,ColMajorMatrix>(lhsCol, rhs, resCol);
+ typedef SparseMatrix<typename Lhs::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorLhs;
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorRes;
+ ColMajorLhs lhsCol = lhs;
+ ColMajorRes resCol(lhs.rows(), rhs.cols());
+ internal::conservative_sparse_sparse_product_impl<ColMajorLhs,Rhs,ColMajorRes>(lhsCol, rhs, resCol);
res = resCol;
}
};
@@ -232,10 +237,11 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,R
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;
- ColMajorMatrix rhsCol = rhs;
- ColMajorMatrix resCol(lhs.rows(), rhs.cols());
- internal::conservative_sparse_sparse_product_impl<Lhs,ColMajorMatrix,ColMajorMatrix>(lhs, rhsCol, resCol);
+ typedef SparseMatrix<typename Rhs::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorRhs;
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorRes;
+ ColMajorRhs rhsCol = rhs;
+ ColMajorRes resCol(lhs.rows(), rhs.cols());
+ internal::conservative_sparse_sparse_product_impl<Lhs,ColMajorRhs,ColMajorRes>(lhs, rhsCol, resCol);
res = resCol;
}
};
@@ -263,7 +269,8 @@ namespace internal {
template<typename Lhs, typename Rhs, typename ResultType>
static void sparse_sparse_to_dense_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef typename remove_all<Lhs>::type::Scalar Scalar;
+ typedef typename remove_all<Lhs>::type::Scalar LhsScalar;
+ typedef typename remove_all<Rhs>::type::Scalar RhsScalar;
Index cols = rhs.outerSize();
eigen_assert(lhs.outerSize() == rhs.innerSize());
@@ -274,12 +281,12 @@ static void sparse_sparse_to_dense_product_impl(const Lhs& lhs, const Rhs& rhs,
{
for (typename evaluator<Rhs>::InnerIterator rhsIt(rhsEval, j); rhsIt; ++rhsIt)
{
- Scalar y = rhsIt.value();
+ RhsScalar y = rhsIt.value();
Index k = rhsIt.index();
for (typename evaluator<Lhs>::InnerIterator lhsIt(lhsEval, k); lhsIt; ++lhsIt)
{
Index i = lhsIt.index();
- Scalar x = lhsIt.value();
+ LhsScalar x = lhsIt.value();
res.coeffRef(i,j) += x * y;
}
}
@@ -310,9 +317,9 @@ struct sparse_sparse_to_dense_product_selector<Lhs,Rhs,ResultType,RowMajor,ColMa
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;
- ColMajorMatrix lhsCol(lhs);
- internal::sparse_sparse_to_dense_product_impl<ColMajorMatrix,Rhs,ResultType>(lhsCol, rhs, res);
+ typedef SparseMatrix<typename Lhs::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorLhs;
+ ColMajorLhs lhsCol(lhs);
+ internal::sparse_sparse_to_dense_product_impl<ColMajorLhs,Rhs,ResultType>(lhsCol, rhs, res);
}
};
@@ -321,9 +328,9 @@ struct sparse_sparse_to_dense_product_selector<Lhs,Rhs,ResultType,ColMajor,RowMa
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;
- ColMajorMatrix rhsCol(rhs);
- internal::sparse_sparse_to_dense_product_impl<Lhs,ColMajorMatrix,ResultType>(lhs, rhsCol, res);
+ typedef SparseMatrix<typename Rhs::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorRhs;
+ ColMajorRhs rhsCol(rhs);
+ internal::sparse_sparse_to_dense_product_impl<Lhs,ColMajorRhs,ResultType>(lhs, rhsCol, res);
}
};
diff --git a/Eigen/src/SparseCore/SparseAssign.h b/Eigen/src/SparseCore/SparseAssign.h
index 18352a847..113463258 100644
--- a/Eigen/src/SparseCore/SparseAssign.h
+++ b/Eigen/src/SparseCore/SparseAssign.h
@@ -83,7 +83,7 @@ void assign_sparse_to_sparse(DstXprType &dst, const SrcXprType &src)
// eval without temporary
dst.resize(src.rows(), src.cols());
dst.setZero();
- dst.reserve((std::max)(src.rows(),src.cols())*2);
+ dst.reserve((std::min)(src.rows()*src.cols(), (std::max)(src.rows(),src.cols())*2));
for (Index j=0; j<outerEvaluationSize; ++j)
{
dst.startVec(j);
@@ -107,7 +107,7 @@ void assign_sparse_to_sparse(DstXprType &dst, const SrcXprType &src)
DstXprType temp(src.rows(), src.cols());
- temp.reserve((std::max)(src.rows(),src.cols())*2);
+ temp.reserve((std::min)(src.rows()*src.cols(), (std::max)(src.rows(),src.cols())*2));
for (Index j=0; j<outerEvaluationSize; ++j)
{
temp.startVec(j);
diff --git a/Eigen/src/SparseCore/SparseBlock.h b/Eigen/src/SparseCore/SparseBlock.h
index 511e92b2f..5ed7c437b 100644
--- a/Eigen/src/SparseCore/SparseBlock.h
+++ b/Eigen/src/SparseCore/SparseBlock.h
@@ -326,46 +326,6 @@ private:
//----------
-/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
- * is col-major (resp. row-major).
- */
-template<typename Derived>
-typename SparseMatrixBase<Derived>::InnerVectorReturnType SparseMatrixBase<Derived>::innerVector(Index outer)
-{ return InnerVectorReturnType(derived(), outer); }
-
-/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
- * is col-major (resp. row-major). Read-only.
- */
-template<typename Derived>
-const typename SparseMatrixBase<Derived>::ConstInnerVectorReturnType SparseMatrixBase<Derived>::innerVector(Index outer) const
-{ return ConstInnerVectorReturnType(derived(), outer); }
-
-/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
- * is col-major (resp. row-major).
- */
-template<typename Derived>
-typename SparseMatrixBase<Derived>::InnerVectorsReturnType
-SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize)
-{
- return Block<Derived,Dynamic,Dynamic,true>(derived(),
- IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
- IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize);
-
-}
-
-/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
- * is col-major (resp. row-major). Read-only.
- */
-template<typename Derived>
-const typename SparseMatrixBase<Derived>::ConstInnerVectorsReturnType
-SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) const
-{
- return Block<const Derived,Dynamic,Dynamic,true>(derived(),
- IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
- IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize);
-
-}
-
/** Generic implementation of sparse Block expression.
* Real-only.
*/
diff --git a/Eigen/src/SparseCore/SparseDenseProduct.h b/Eigen/src/SparseCore/SparseDenseProduct.h
index 0547db596..f005a18a1 100644
--- a/Eigen/src/SparseCore/SparseDenseProduct.h
+++ b/Eigen/src/SparseCore/SparseDenseProduct.h
@@ -88,10 +88,11 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, A
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
- typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
+ typedef evaluator<Lhs> LhsEval;
+ typedef typename LhsEval::InnerIterator LhsInnerIterator;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
{
- evaluator<Lhs> lhsEval(lhs);
+ LhsEval lhsEval(lhs);
for(Index c=0; c<rhs.cols(); ++c)
{
for(Index j=0; j<lhs.outerSize(); ++j)
@@ -111,17 +112,38 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, t
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
- typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
+ typedef evaluator<Lhs> LhsEval;
+ typedef typename LhsEval::InnerIterator LhsInnerIterator;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{
- evaluator<Lhs> lhsEval(lhs);
- for(Index j=0; j<lhs.outerSize(); ++j)
+ Index n = lhs.rows();
+ LhsEval lhsEval(lhs);
+
+#ifdef EIGEN_HAS_OPENMP
+ Eigen::initParallel();
+ Index threads = Eigen::nbThreads();
+ // This 20000 threshold has been found experimentally on 2D and 3D Poisson problems.
+ // It basically represents the minimal amount of work to be done to be worth it.
+ if(threads>1 && lhsEval.nonZerosEstimate()*rhs.cols() > 20000)
{
- typename Res::RowXpr res_j(res.row(j));
- for(LhsInnerIterator it(lhsEval,j); it ;++it)
- res_j += (alpha*it.value()) * rhs.row(it.index());
+ #pragma omp parallel for schedule(dynamic,(n+threads*4-1)/(threads*4)) num_threads(threads)
+ for(Index i=0; i<n; ++i)
+ processRow(lhsEval,rhs,res,alpha,i);
+ }
+ else
+#endif
+ {
+ for(Index i=0; i<n; ++i)
+ processRow(lhsEval, rhs, res, alpha, i);
}
}
+
+ static void processRow(const LhsEval& lhsEval, const DenseRhsType& rhs, Res& res, const typename Res::Scalar& alpha, Index i)
+ {
+ typename Res::RowXpr res_i(res.row(i));
+ for(LhsInnerIterator it(lhsEval,i); it ;++it)
+ res_i += (alpha*it.value()) * rhs.row(it.index());
+ }
};
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
diff --git a/Eigen/src/SparseCore/SparseMatrix.h b/Eigen/src/SparseCore/SparseMatrix.h
index 323c2323b..8bfa5f6b8 100644
--- a/Eigen/src/SparseCore/SparseMatrix.h
+++ b/Eigen/src/SparseCore/SparseMatrix.h
@@ -21,7 +21,7 @@ namespace Eigen {
* This class implements a more versatile variants of the common \em compressed row/column storage format.
* Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index.
* All the non zeros are stored in a single large buffer. Unlike the \em compressed format, there might be extra
- * space inbetween the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero
+ * space in between the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero
* can be done with limited memory reallocation and copies.
*
* A call to the function makeCompressed() turns the matrix into the standard \em compressed format
@@ -503,7 +503,7 @@ class SparseMatrix
}
}
- /** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerence \a epsilon */
+ /** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerance \a epsilon */
void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
{
prune(default_prunning_func(reference,epsilon));
@@ -604,9 +604,9 @@ class SparseMatrix
m_outerIndex = newOuterIndex;
if (outerChange > 0)
{
- StorageIndex last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
+ StorageIndex lastIdx = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
- m_outerIndex[i] = last;
+ m_outerIndex[i] = lastIdx;
}
m_outerSize += outerChange;
}
@@ -986,7 +986,7 @@ void set_from_triplets(const InputIterator& begin, const InputIterator& end, Spa
*
* \warning The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define
* an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather
- * be explicitely stored into a std::vector for instance.
+ * be explicitly stored into a std::vector for instance.
*/
template<typename Scalar, int _Options, typename _StorageIndex>
template<typename InputIterators>
diff --git a/Eigen/src/SparseCore/SparseMatrixBase.h b/Eigen/src/SparseCore/SparseMatrixBase.h
index c6b548f11..229449f02 100644
--- a/Eigen/src/SparseCore/SparseMatrixBase.h
+++ b/Eigen/src/SparseCore/SparseMatrixBase.h
@@ -87,6 +87,11 @@ template<typename Derived> class SparseMatrixBase
* we are dealing with a column-vector (if there is only one column) or with
* a row-vector (if there is only one row). */
+ NumDimensions = int(MaxSizeAtCompileTime) == 1 ? 0 : bool(IsVectorAtCompileTime) ? 1 : 2,
+ /**< This value is equal to Tensor::NumDimensions, i.e. 0 for scalars, 1 for vectors,
+ * and 2 for matrices.
+ */
+
Flags = internal::traits<Derived>::Flags,
/**< This stores expression \ref flags flags which may or may not be inherited by new expressions
* constructed from this one. See the \ref flags "list of flags".
@@ -350,18 +355,6 @@ template<typename Derived> class SparseMatrixBase
const ConstTransposeReturnType transpose() const { return ConstTransposeReturnType(derived()); }
const AdjointReturnType adjoint() const { return AdjointReturnType(transpose()); }
- // inner-vector
- typedef Block<Derived,IsRowMajor?1:Dynamic,IsRowMajor?Dynamic:1,true> InnerVectorReturnType;
- typedef Block<const Derived,IsRowMajor?1:Dynamic,IsRowMajor?Dynamic:1,true> ConstInnerVectorReturnType;
- InnerVectorReturnType innerVector(Index outer);
- const ConstInnerVectorReturnType innerVector(Index outer) const;
-
- // set of inner-vectors
- typedef Block<Derived,Dynamic,Dynamic,true> InnerVectorsReturnType;
- typedef Block<const Derived,Dynamic,Dynamic,true> ConstInnerVectorsReturnType;
- InnerVectorsReturnType innerVectors(Index outerStart, Index outerSize);
- const ConstInnerVectorsReturnType innerVectors(Index outerStart, Index outerSize) const;
-
DenseMatrixType toDense() const
{
return DenseMatrixType(derived());
diff --git a/Eigen/src/SparseCore/SparseProduct.h b/Eigen/src/SparseCore/SparseProduct.h
index 4cbf68781..c495a7398 100644
--- a/Eigen/src/SparseCore/SparseProduct.h
+++ b/Eigen/src/SparseCore/SparseProduct.h
@@ -17,7 +17,7 @@ namespace Eigen {
* The automatic pruning of the small values can be achieved by calling the pruned() function
* in which case a totally different product algorithm is employed:
* \code
- * C = (A*B).pruned(); // supress numerical zeros (exact)
+ * C = (A*B).pruned(); // suppress numerical zeros (exact)
* C = (A*B).pruned(ref);
* C = (A*B).pruned(ref,epsilon);
* \endcode
diff --git a/Eigen/src/SparseCore/SparseSelfAdjointView.h b/Eigen/src/SparseCore/SparseSelfAdjointView.h
index 9e39be738..65611b3d4 100644
--- a/Eigen/src/SparseCore/SparseSelfAdjointView.h
+++ b/Eigen/src/SparseCore/SparseSelfAdjointView.h
@@ -47,6 +47,7 @@ template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
enum {
Mode = _Mode,
+ TransposeMode = ((Mode & Upper) ? Lower : 0) | ((Mode & Lower) ? Upper : 0),
RowsAtCompileTime = internal::traits<SparseSelfAdjointView>::RowsAtCompileTime,
ColsAtCompileTime = internal::traits<SparseSelfAdjointView>::ColsAtCompileTime
};
@@ -310,7 +311,7 @@ inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, cons
while (i && i.index()<j) ++i;
if(i && i.index()==j)
{
- res(j,k) += alpha * i.value() * rhs(j,k);
+ res.coeffRef(j,k) += alpha * i.value() * rhs.coeff(j,k);
++i;
}
}
@@ -323,14 +324,14 @@ inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, cons
{
LhsScalar lhs_ij = i.value();
if(!LhsIsRowMajor) lhs_ij = numext::conj(lhs_ij);
- res_j += lhs_ij * rhs(i.index(),k);
+ res_j += lhs_ij * rhs.coeff(i.index(),k);
res(i.index(),k) += numext::conj(lhs_ij) * rhs_j;
}
- res(j,k) += alpha * res_j;
+ res.coeffRef(j,k) += alpha * res_j;
// handle diagonal coeff
if (ProcessFirstHalf && i && (i.index()==j))
- res(j,k) += alpha * i.value() * rhs(j,k);
+ res.coeffRef(j,k) += alpha * i.value() * rhs.coeff(j,k);
}
}
}
@@ -368,7 +369,7 @@ struct generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, Pr
// transpose everything
Transpose<Dest> dstT(dst);
- internal::sparse_selfadjoint_time_dense_product<RhsView::Mode>(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha);
+ internal::sparse_selfadjoint_time_dense_product<RhsView::TransposeMode>(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha);
}
};
diff --git a/Eigen/src/SparseCore/SparseSparseProductWithPruning.h b/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
index 21c419002..88820a48f 100644
--- a/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
+++ b/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
@@ -21,7 +21,8 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r
{
// return sparse_sparse_product_with_pruning_impl2(lhs,rhs,res);
- typedef typename remove_all<Lhs>::type::Scalar Scalar;
+ typedef typename remove_all<Rhs>::type::Scalar RhsScalar;
+ typedef typename remove_all<ResultType>::type::Scalar ResScalar;
typedef typename remove_all<Lhs>::type::StorageIndex StorageIndex;
// make sure to call innerSize/outerSize since we fake the storage order.
@@ -31,7 +32,7 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r
eigen_assert(lhs.outerSize() == rhs.innerSize());
// allocate a temporary buffer
- AmbiVector<Scalar,StorageIndex> tempVector(rows);
+ AmbiVector<ResScalar,StorageIndex> tempVector(rows);
// mimics a resizeByInnerOuter:
if(ResultType::IsRowMajor)
@@ -63,14 +64,14 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r
{
// FIXME should be written like this: tmp += rhsIt.value() * lhs.col(rhsIt.index())
tempVector.restart();
- Scalar x = rhsIt.value();
+ RhsScalar x = rhsIt.value();
for (typename evaluator<Lhs>::InnerIterator lhsIt(lhsEval, rhsIt.index()); lhsIt; ++lhsIt)
{
tempVector.coeffRef(lhsIt.index()) += lhsIt.value() * x;
}
}
res.startVec(j);
- for (typename AmbiVector<Scalar,StorageIndex>::Iterator it(tempVector,tolerance); it; ++it)
+ for (typename AmbiVector<ResScalar,StorageIndex>::Iterator it(tempVector,tolerance); it; ++it)
res.insertBackByOuterInner(j,it.index()) = it.value();
}
res.finalize();
@@ -85,7 +86,6 @@ struct sparse_sparse_product_with_pruning_selector;
template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,ColMajor>
{
- typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar;
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
@@ -129,8 +129,8 @@ struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,R
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixLhs;
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixRhs;
+ typedef SparseMatrix<typename Lhs::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixLhs;
+ typedef SparseMatrix<typename Rhs::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixRhs;
ColMajorMatrixLhs colLhs(lhs);
ColMajorMatrixRhs colRhs(rhs);
internal::sparse_sparse_product_with_pruning_impl<ColMajorMatrixLhs,ColMajorMatrixRhs,ResultType>(colLhs, colRhs, res, tolerance);
@@ -149,7 +149,7 @@ struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,R
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
- typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename Lhs::StorageIndex> RowMajorMatrixLhs;
+ typedef SparseMatrix<typename Lhs::Scalar,RowMajor,typename Lhs::StorageIndex> RowMajorMatrixLhs;
RowMajorMatrixLhs rowLhs(lhs);
sparse_sparse_product_with_pruning_selector<RowMajorMatrixLhs,Rhs,ResultType,RowMajor,RowMajor>(rowLhs,rhs,res,tolerance);
}
@@ -161,7 +161,7 @@ struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,C
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
- typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename Lhs::StorageIndex> RowMajorMatrixRhs;
+ typedef SparseMatrix<typename Rhs::Scalar,RowMajor,typename Lhs::StorageIndex> RowMajorMatrixRhs;
RowMajorMatrixRhs rowRhs(rhs);
sparse_sparse_product_with_pruning_selector<Lhs,RowMajorMatrixRhs,ResultType,RowMajor,RowMajor,RowMajor>(lhs,rowRhs,res,tolerance);
}
@@ -173,7 +173,7 @@ struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,R
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixRhs;
+ typedef SparseMatrix<typename Rhs::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixRhs;
ColMajorMatrixRhs colRhs(rhs);
internal::sparse_sparse_product_with_pruning_impl<Lhs,ColMajorMatrixRhs,ResultType>(lhs, colRhs, res, tolerance);
}
@@ -185,7 +185,7 @@ struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,C
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixLhs;
+ typedef SparseMatrix<typename Lhs::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixLhs;
ColMajorMatrixLhs colLhs(lhs);
internal::sparse_sparse_product_with_pruning_impl<ColMajorMatrixLhs,Rhs,ResultType>(colLhs, rhs, res, tolerance);
}
diff --git a/Eigen/src/SparseCore/SparseVector.h b/Eigen/src/SparseCore/SparseVector.h
index 19b0fbc9d..05779be68 100644
--- a/Eigen/src/SparseCore/SparseVector.h
+++ b/Eigen/src/SparseCore/SparseVector.h
@@ -281,7 +281,7 @@ class SparseVector
}
/** Swaps the values of \c *this and \a other.
- * Overloaded for performance: this version performs a \em shallow swap by swaping pointers and attributes only.
+ * Overloaded for performance: this version performs a \em shallow swap by swapping pointers and attributes only.
* \sa SparseMatrixBase::swap()
*/
inline void swap(SparseVector& other)
diff --git a/Eigen/src/SparseLU/SparseLU.h b/Eigen/src/SparseLU/SparseLU.h
index f883ab383..91b6369ac 100644
--- a/Eigen/src/SparseLU/SparseLU.h
+++ b/Eigen/src/SparseLU/SparseLU.h
@@ -193,7 +193,7 @@ class SparseLU : public SparseSolverBase<SparseLU<_MatrixType,_OrderingType> >,
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful,
+ * \returns \c Success if computation was successful,
* \c NumericalIssue if the LU factorization reports a problem, zero diagonal for instance
* \c InvalidInput if the input matrix is invalid
*
@@ -499,11 +499,8 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
eigen_assert(m_analysisIsOk && "analyzePattern() should be called first");
eigen_assert((matrix.rows() == matrix.cols()) && "Only for squared matrices");
- typedef typename IndexVector::Scalar StorageIndex;
-
m_isInitialized = true;
-
// Apply the column permutation computed in analyzepattern()
// m_mat = matrix * m_perm_c.inverse();
m_mat = matrix;
@@ -706,8 +703,8 @@ struct SparseLUMatrixLReturnType : internal::no_assignment_operator
typedef typename MappedSupernodalType::Scalar Scalar;
explicit SparseLUMatrixLReturnType(const MappedSupernodalType& mapL) : m_mapL(mapL)
{ }
- Index rows() { return m_mapL.rows(); }
- Index cols() { return m_mapL.cols(); }
+ Index rows() const { return m_mapL.rows(); }
+ Index cols() const { return m_mapL.cols(); }
template<typename Dest>
void solveInPlace( MatrixBase<Dest> &X) const
{
@@ -723,8 +720,8 @@ struct SparseLUMatrixUReturnType : internal::no_assignment_operator
SparseLUMatrixUReturnType(const MatrixLType& mapL, const MatrixUType& mapU)
: m_mapL(mapL),m_mapU(mapU)
{ }
- Index rows() { return m_mapL.rows(); }
- Index cols() { return m_mapL.cols(); }
+ Index rows() const { return m_mapL.rows(); }
+ Index cols() const { return m_mapL.cols(); }
template<typename Dest> void solveInPlace(MatrixBase<Dest> &X) const
{
@@ -747,8 +744,9 @@ struct SparseLUMatrixUReturnType : internal::no_assignment_operator
}
else
{
+ // FIXME: the following lines should use Block expressions and not Map!
Map<const Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > A( &(m_mapL.valuePtr()[luptr]), nsupc, nsupc, OuterStride<>(lda) );
- Map< Matrix<Scalar,Dynamic,Dest::ColsAtCompileTime, ColMajor>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );
+ Map< Matrix<Scalar,Dynamic,Dest::ColsAtCompileTime, ColMajor>, 0, OuterStride<> > U (&(X.coeffRef(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );
U = A.template triangularView<Upper>().solve(U);
}
diff --git a/Eigen/src/SparseLU/SparseLU_Memory.h b/Eigen/src/SparseLU/SparseLU_Memory.h
index 4dc42e87b..349bfd585 100644
--- a/Eigen/src/SparseLU/SparseLU_Memory.h
+++ b/Eigen/src/SparseLU/SparseLU_Memory.h
@@ -51,7 +51,7 @@ inline Index LUTempSpace(Index&m, Index& w)
/**
- * Expand the existing storage to accomodate more fill-ins
+ * Expand the existing storage to accommodate more fill-ins
* \param vec Valid pointer to the vector to allocate or expand
* \param[in,out] length At input, contain the current length of the vector that is to be increased. At output, length of the newly allocated vector
* \param[in] nbElts Current number of elements in the factors
diff --git a/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h b/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h
index 721e1883b..8583b1b69 100644
--- a/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h
+++ b/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h
@@ -75,12 +75,12 @@ class MappedSuperNodalMatrix
/**
* Number of rows
*/
- Index rows() { return m_row; }
+ Index rows() const { return m_row; }
/**
* Number of columns
*/
- Index cols() { return m_col; }
+ Index cols() const { return m_col; }
/**
* Return the array of nonzero values packed by column
diff --git a/Eigen/src/SparseLU/SparseLU_column_dfs.h b/Eigen/src/SparseLU/SparseLU_column_dfs.h
index c98b30e32..5a2c941b4 100644
--- a/Eigen/src/SparseLU/SparseLU_column_dfs.h
+++ b/Eigen/src/SparseLU/SparseLU_column_dfs.h
@@ -151,7 +151,7 @@ Index SparseLUImpl<Scalar,StorageIndex>::column_dfs(const Index m, const Index j
StorageIndex ito = glu.xlsub(fsupc+1);
glu.xlsub(jcolm1) = ito;
StorageIndex istop = ito + jptr - jm1ptr;
- xprune(jcolm1) = istop; // intialize xprune(jcol-1)
+ xprune(jcolm1) = istop; // initialize xprune(jcol-1)
glu.xlsub(jcol) = istop;
for (StorageIndex ifrom = jm1ptr; ifrom < nextl; ++ifrom, ++ito)
@@ -166,7 +166,7 @@ Index SparseLUImpl<Scalar,StorageIndex>::column_dfs(const Index m, const Index j
// Tidy up the pointers before exit
glu.xsup(nsuper+1) = jcolp1;
glu.supno(jcolp1) = nsuper;
- xprune(jcol) = StorageIndex(nextl); // Intialize upper bound for pruning
+ xprune(jcol) = StorageIndex(nextl); // Initialize upper bound for pruning
glu.xlsub(jcolp1) = StorageIndex(nextl);
return 0;
diff --git a/Eigen/src/SparseLU/SparseLU_gemm_kernel.h b/Eigen/src/SparseLU/SparseLU_gemm_kernel.h
index 95ba7413f..e37c2fe0d 100644
--- a/Eigen/src/SparseLU/SparseLU_gemm_kernel.h
+++ b/Eigen/src/SparseLU/SparseLU_gemm_kernel.h
@@ -215,7 +215,7 @@ void sparselu_gemm(Index m, Index n, Index d, const Scalar* A, Index lda, const
if(RK==4){ a3 = pload<Packet>(A3+i+(I+1)*PacketSize); }\
pstore(C0+i+(I)*PacketSize, c0);
- // agressive vectorization and peeling
+ // aggressive vectorization and peeling
for(Index i=0; i<actual_b_end1; i+=PacketSize*8)
{
EIGEN_ASM_COMMENT("SPARSELU_GEMML_KERNEL2");
diff --git a/Eigen/src/SparseLU/SparseLU_panel_bmod.h b/Eigen/src/SparseLU/SparseLU_panel_bmod.h
index 822cf32c3..f052001c8 100644
--- a/Eigen/src/SparseLU/SparseLU_panel_bmod.h
+++ b/Eigen/src/SparseLU/SparseLU_panel_bmod.h
@@ -38,7 +38,7 @@ namespace internal {
* \brief Performs numeric block updates (sup-panel) in topological order.
*
* Before entering this routine, the original nonzeros in the panel
- * were already copied i nto the spa[m,w]
+ * were already copied into the spa[m,w]
*
* \param m number of rows in the matrix
* \param w Panel size
diff --git a/Eigen/src/SparseQR/SparseQR.h b/Eigen/src/SparseQR/SparseQR.h
index 2d4498b03..1a28389e8 100644
--- a/Eigen/src/SparseQR/SparseQR.h
+++ b/Eigen/src/SparseQR/SparseQR.h
@@ -52,7 +52,7 @@ namespace internal {
* rank-revealing permutations. Use colsPermutation() to get it.
*
* Q is the orthogonal matrix represented as products of Householder reflectors.
- * Use matrixQ() to get an expression and matrixQ().transpose() to get the transpose.
+ * Use matrixQ() to get an expression and matrixQ().adjoint() to get the adjoint.
* You can then apply it to a vector.
*
* R is the sparse triangular or trapezoidal matrix. The later occurs when A is rank-deficient.
@@ -65,6 +65,7 @@ namespace internal {
* \implsparsesolverconcept
*
* \warning The input sparse matrix A must be in compressed mode (see SparseMatrix::makeCompressed()).
+ * \warning For complex matrices matrixQ().transpose() will actually return the adjoint matrix.
*
*/
template<typename _MatrixType, typename _OrderingType>
@@ -196,9 +197,9 @@ class SparseQR : public SparseSolverBase<SparseQR<_MatrixType,_OrderingType> >
Index rank = this->rank();
- // Compute Q^T * b;
+ // Compute Q^* * b;
typename Dest::PlainObject y, b;
- y = this->matrixQ().transpose() * B;
+ y = this->matrixQ().adjoint() * B;
b = y;
// Solve with the triangular matrix R
@@ -604,7 +605,7 @@ struct SparseQR_QProduct : ReturnByValue<SparseQR_QProduct<SparseQRType, Derived
// Get the references
SparseQR_QProduct(const SparseQRType& qr, const Derived& other, bool transpose) :
m_qr(qr),m_other(other),m_transpose(transpose) {}
- inline Index rows() const { return m_transpose ? m_qr.rows() : m_qr.cols(); }
+ inline Index rows() const { return m_qr.matrixQ().rows(); }
inline Index cols() const { return m_other.cols(); }
// Assign to a vector
@@ -632,16 +633,20 @@ struct SparseQR_QProduct : ReturnByValue<SparseQR_QProduct<SparseQRType, Derived
}
else
{
- eigen_assert(m_qr.m_Q.rows() == m_other.rows() && "Non conforming object sizes");
+ eigen_assert(m_qr.matrixQ().cols() == m_other.rows() && "Non conforming object sizes");
+
+ res.conservativeResize(rows(), cols());
+
// Compute res = Q * other column by column
for(Index j = 0; j < res.cols(); j++)
{
- for (Index k = diagSize-1; k >=0; k--)
+ Index start_k = internal::is_identity<Derived>::value ? numext::mini(j,diagSize-1) : diagSize-1;
+ for (Index k = start_k; k >=0; k--)
{
Scalar tau = Scalar(0);
tau = m_qr.m_Q.col(k).dot(res.col(j));
if(tau==Scalar(0)) continue;
- tau = tau * m_qr.m_hcoeffs(k);
+ tau = tau * numext::conj(m_qr.m_hcoeffs(k));
res.col(j) -= tau * m_qr.m_Q.col(k);
}
}
@@ -650,7 +655,7 @@ struct SparseQR_QProduct : ReturnByValue<SparseQR_QProduct<SparseQRType, Derived
const SparseQRType& m_qr;
const Derived& m_other;
- bool m_transpose;
+ bool m_transpose; // TODO this actually means adjoint
};
template<typename SparseQRType>
@@ -668,13 +673,14 @@ struct SparseQRMatrixQReturnType : public EigenBase<SparseQRMatrixQReturnType<Sp
{
return SparseQR_QProduct<SparseQRType,Derived>(m_qr,other.derived(),false);
}
+ // To use for operations with the adjoint of Q
SparseQRMatrixQTransposeReturnType<SparseQRType> adjoint() const
{
return SparseQRMatrixQTransposeReturnType<SparseQRType>(m_qr);
}
inline Index rows() const { return m_qr.rows(); }
- inline Index cols() const { return (std::min)(m_qr.rows(),m_qr.cols()); }
- // To use for operations with the transpose of Q
+ inline Index cols() const { return m_qr.rows(); }
+ // To use for operations with the transpose of Q FIXME this is the same as adjoint at the moment
SparseQRMatrixQTransposeReturnType<SparseQRType> transpose() const
{
return SparseQRMatrixQTransposeReturnType<SparseQRType>(m_qr);
@@ -682,6 +688,7 @@ struct SparseQRMatrixQReturnType : public EigenBase<SparseQRMatrixQReturnType<Sp
const SparseQRType& m_qr;
};
+// TODO this actually represents the adjoint of Q
template<typename SparseQRType>
struct SparseQRMatrixQTransposeReturnType
{
@@ -712,7 +719,7 @@ struct Assignment<DstXprType, SparseQRMatrixQReturnType<SparseQRType>, internal:
typedef typename DstXprType::StorageIndex StorageIndex;
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &/*func*/)
{
- typename DstXprType::PlainObject idMat(src.m_qr.rows(), src.m_qr.rows());
+ typename DstXprType::PlainObject idMat(src.rows(), src.cols());
idMat.setIdentity();
// Sort the sparse householder reflectors if needed
const_cast<SparseQRType *>(&src.m_qr)->_sort_matrix_Q();
diff --git a/Eigen/src/StlSupport/StdDeque.h b/Eigen/src/StlSupport/StdDeque.h
index cf1fedf92..045da7b4d 100644
--- a/Eigen/src/StlSupport/StdDeque.h
+++ b/Eigen/src/StlSupport/StdDeque.h
@@ -36,7 +36,7 @@ namespace std \
deque(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : deque_base(first, last, a) {} \
deque(const deque& c) : deque_base(c) {} \
explicit deque(size_type num, const value_type& val = value_type()) : deque_base(num, val) {} \
- deque(iterator start, iterator end) : deque_base(start, end) {} \
+ deque(iterator start_, iterator end_) : deque_base(start_, end_) {} \
deque& operator=(const deque& x) { \
deque_base::operator=(x); \
return *this; \
@@ -62,7 +62,7 @@ namespace std {
: deque_base(first, last, a) {} \
deque(const deque& c) : deque_base(c) {} \
explicit deque(size_type num, const value_type& val = value_type()) : deque_base(num, val) {} \
- deque(iterator start, iterator end) : deque_base(start, end) {} \
+ deque(iterator start_, iterator end_) : deque_base(start_, end_) {} \
deque& operator=(const deque& x) { \
deque_base::operator=(x); \
return *this; \
diff --git a/Eigen/src/StlSupport/StdList.h b/Eigen/src/StlSupport/StdList.h
index e1eba4985..8ba3fada0 100644
--- a/Eigen/src/StlSupport/StdList.h
+++ b/Eigen/src/StlSupport/StdList.h
@@ -35,7 +35,7 @@ namespace std \
list(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : list_base(first, last, a) {} \
list(const list& c) : list_base(c) {} \
explicit list(size_type num, const value_type& val = value_type()) : list_base(num, val) {} \
- list(iterator start, iterator end) : list_base(start, end) {} \
+ list(iterator start_, iterator end_) : list_base(start_, end_) {} \
list& operator=(const list& x) { \
list_base::operator=(x); \
return *this; \
@@ -62,7 +62,7 @@ namespace std
: list_base(first, last, a) {} \
list(const list& c) : list_base(c) {} \
explicit list(size_type num, const value_type& val = value_type()) : list_base(num, val) {} \
- list(iterator start, iterator end) : list_base(start, end) {} \
+ list(iterator start_, iterator end_) : list_base(start_, end_) {} \
list& operator=(const list& x) { \
list_base::operator=(x); \
return *this; \
diff --git a/Eigen/src/StlSupport/StdVector.h b/Eigen/src/StlSupport/StdVector.h
index ec22821d2..9fcf19bce 100644
--- a/Eigen/src/StlSupport/StdVector.h
+++ b/Eigen/src/StlSupport/StdVector.h
@@ -36,7 +36,7 @@ namespace std \
vector(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : vector_base(first, last, a) {} \
vector(const vector& c) : vector_base(c) {} \
explicit vector(size_type num, const value_type& val = value_type()) : vector_base(num, val) {} \
- vector(iterator start, iterator end) : vector_base(start, end) {} \
+ vector(iterator start_, iterator end_) : vector_base(start_, end_) {} \
vector& operator=(const vector& x) { \
vector_base::operator=(x); \
return *this; \
@@ -62,7 +62,7 @@ namespace std {
: vector_base(first, last, a) {} \
vector(const vector& c) : vector_base(c) {} \
explicit vector(size_type num, const value_type& val = value_type()) : vector_base(num, val) {} \
- vector(iterator start, iterator end) : vector_base(start, end) {} \
+ vector(iterator start_, iterator end_) : vector_base(start_, end_) {} \
vector& operator=(const vector& x) { \
vector_base::operator=(x); \
return *this; \
diff --git a/Eigen/src/SuperLUSupport/SuperLUSupport.h b/Eigen/src/SuperLUSupport/SuperLUSupport.h
index 50a69f306..354e33de5 100644
--- a/Eigen/src/SuperLUSupport/SuperLUSupport.h
+++ b/Eigen/src/SuperLUSupport/SuperLUSupport.h
@@ -297,8 +297,8 @@ SluMatrix asSluMatrix(MatrixType& mat)
template<typename Scalar, int Flags, typename Index>
MappedSparseMatrix<Scalar,Flags,Index> map_superlu(SluMatrix& sluMat)
{
- eigen_assert((Flags&RowMajor)==RowMajor && sluMat.Stype == SLU_NR
- || (Flags&ColMajor)==ColMajor && sluMat.Stype == SLU_NC);
+ eigen_assert(((Flags&RowMajor)==RowMajor && sluMat.Stype == SLU_NR)
+ || ((Flags&ColMajor)==ColMajor && sluMat.Stype == SLU_NC));
Index outerSize = (Flags&RowMajor)==RowMajor ? sluMat.ncol : sluMat.nrow;
@@ -352,7 +352,7 @@ class SuperLUBase : public SparseSolverBase<Derived>
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful,
+ * \returns \c Success if computation was successful,
* \c NumericalIssue if the matrix.appears to be negative.
*/
ComputationInfo info() const
diff --git a/Eigen/src/UmfPackSupport/UmfPackSupport.h b/Eigen/src/UmfPackSupport/UmfPackSupport.h
index 9568cc1d5..e3a333f80 100644
--- a/Eigen/src/UmfPackSupport/UmfPackSupport.h
+++ b/Eigen/src/UmfPackSupport/UmfPackSupport.h
@@ -10,6 +10,16 @@
#ifndef EIGEN_UMFPACKSUPPORT_H
#define EIGEN_UMFPACKSUPPORT_H
+// for compatibility with super old version of umfpack,
+// not sure this is really needed, but this is harmless.
+#ifndef SuiteSparse_long
+#ifdef UF_long
+#define SuiteSparse_long UF_long
+#else
+#error neither SuiteSparse_long nor UF_long are defined
+#endif
+#endif
+
namespace Eigen {
/* TODO extract L, extract U, compute det, etc... */
@@ -17,42 +27,85 @@ namespace Eigen {
// generic double/complex<double> wrapper functions:
-inline void umfpack_defaults(double control[UMFPACK_CONTROL], double)
+ // Defaults
+inline void umfpack_defaults(double control[UMFPACK_CONTROL], double, int)
{ umfpack_di_defaults(control); }
-inline void umfpack_defaults(double control[UMFPACK_CONTROL], std::complex<double>)
+inline void umfpack_defaults(double control[UMFPACK_CONTROL], std::complex<double>, int)
{ umfpack_zi_defaults(control); }
-inline void umfpack_report_info(double control[UMFPACK_CONTROL], double info[UMFPACK_INFO], double)
+inline void umfpack_defaults(double control[UMFPACK_CONTROL], double, SuiteSparse_long)
+{ umfpack_dl_defaults(control); }
+
+inline void umfpack_defaults(double control[UMFPACK_CONTROL], std::complex<double>, SuiteSparse_long)
+{ umfpack_zl_defaults(control); }
+
+// Report info
+inline void umfpack_report_info(double control[UMFPACK_CONTROL], double info[UMFPACK_INFO], double, int)
{ umfpack_di_report_info(control, info);}
-inline void umfpack_report_info(double control[UMFPACK_CONTROL], double info[UMFPACK_INFO], std::complex<double>)
+inline void umfpack_report_info(double control[UMFPACK_CONTROL], double info[UMFPACK_INFO], std::complex<double>, int)
{ umfpack_zi_report_info(control, info);}
-inline void umfpack_report_status(double control[UMFPACK_CONTROL], int status, double)
+inline void umfpack_report_info(double control[UMFPACK_CONTROL], double info[UMFPACK_INFO], double, SuiteSparse_long)
+{ umfpack_dl_report_info(control, info);}
+
+inline void umfpack_report_info(double control[UMFPACK_CONTROL], double info[UMFPACK_INFO], std::complex<double>, SuiteSparse_long)
+{ umfpack_zl_report_info(control, info);}
+
+// Report status
+inline void umfpack_report_status(double control[UMFPACK_CONTROL], int status, double, int)
{ umfpack_di_report_status(control, status);}
-inline void umfpack_report_status(double control[UMFPACK_CONTROL], int status, std::complex<double>)
+inline void umfpack_report_status(double control[UMFPACK_CONTROL], int status, std::complex<double>, int)
{ umfpack_zi_report_status(control, status);}
-inline void umfpack_report_control(double control[UMFPACK_CONTROL], double)
+inline void umfpack_report_status(double control[UMFPACK_CONTROL], int status, double, SuiteSparse_long)
+{ umfpack_dl_report_status(control, status);}
+
+inline void umfpack_report_status(double control[UMFPACK_CONTROL], int status, std::complex<double>, SuiteSparse_long)
+{ umfpack_zl_report_status(control, status);}
+
+// report control
+inline void umfpack_report_control(double control[UMFPACK_CONTROL], double, int)
{ umfpack_di_report_control(control);}
-inline void umfpack_report_control(double control[UMFPACK_CONTROL], std::complex<double>)
+inline void umfpack_report_control(double control[UMFPACK_CONTROL], std::complex<double>, int)
{ umfpack_zi_report_control(control);}
-inline void umfpack_free_numeric(void **Numeric, double)
+inline void umfpack_report_control(double control[UMFPACK_CONTROL], double, SuiteSparse_long)
+{ umfpack_dl_report_control(control);}
+
+inline void umfpack_report_control(double control[UMFPACK_CONTROL], std::complex<double>, SuiteSparse_long)
+{ umfpack_zl_report_control(control);}
+
+// Free numeric
+inline void umfpack_free_numeric(void **Numeric, double, int)
{ umfpack_di_free_numeric(Numeric); *Numeric = 0; }
-inline void umfpack_free_numeric(void **Numeric, std::complex<double>)
+inline void umfpack_free_numeric(void **Numeric, std::complex<double>, int)
{ umfpack_zi_free_numeric(Numeric); *Numeric = 0; }
-inline void umfpack_free_symbolic(void **Symbolic, double)
+inline void umfpack_free_numeric(void **Numeric, double, SuiteSparse_long)
+{ umfpack_dl_free_numeric(Numeric); *Numeric = 0; }
+
+inline void umfpack_free_numeric(void **Numeric, std::complex<double>, SuiteSparse_long)
+{ umfpack_zl_free_numeric(Numeric); *Numeric = 0; }
+
+// Free symbolic
+inline void umfpack_free_symbolic(void **Symbolic, double, int)
{ umfpack_di_free_symbolic(Symbolic); *Symbolic = 0; }
-inline void umfpack_free_symbolic(void **Symbolic, std::complex<double>)
+inline void umfpack_free_symbolic(void **Symbolic, std::complex<double>, int)
{ umfpack_zi_free_symbolic(Symbolic); *Symbolic = 0; }
+inline void umfpack_free_symbolic(void **Symbolic, double, SuiteSparse_long)
+{ umfpack_dl_free_symbolic(Symbolic); *Symbolic = 0; }
+
+inline void umfpack_free_symbolic(void **Symbolic, std::complex<double>, SuiteSparse_long)
+{ umfpack_zl_free_symbolic(Symbolic); *Symbolic = 0; }
+
+// Symbolic
inline int umfpack_symbolic(int n_row,int n_col,
const int Ap[], const int Ai[], const double Ax[], void **Symbolic,
const double Control [UMFPACK_CONTROL], double Info [UMFPACK_INFO])
@@ -66,7 +119,21 @@ inline int umfpack_symbolic(int n_row,int n_col,
{
return umfpack_zi_symbolic(n_row,n_col,Ap,Ai,&numext::real_ref(Ax[0]),0,Symbolic,Control,Info);
}
+inline SuiteSparse_long umfpack_symbolic( SuiteSparse_long n_row,SuiteSparse_long n_col,
+ const SuiteSparse_long Ap[], const SuiteSparse_long Ai[], const double Ax[], void **Symbolic,
+ const double Control [UMFPACK_CONTROL], double Info [UMFPACK_INFO])
+{
+ return umfpack_dl_symbolic(n_row,n_col,Ap,Ai,Ax,Symbolic,Control,Info);
+}
+inline SuiteSparse_long umfpack_symbolic( SuiteSparse_long n_row,SuiteSparse_long n_col,
+ const SuiteSparse_long Ap[], const SuiteSparse_long Ai[], const std::complex<double> Ax[], void **Symbolic,
+ const double Control [UMFPACK_CONTROL], double Info [UMFPACK_INFO])
+{
+ return umfpack_zl_symbolic(n_row,n_col,Ap,Ai,&numext::real_ref(Ax[0]),0,Symbolic,Control,Info);
+}
+
+// Numeric
inline int umfpack_numeric( const int Ap[], const int Ai[], const double Ax[],
void *Symbolic, void **Numeric,
const double Control[UMFPACK_CONTROL],double Info [UMFPACK_INFO])
@@ -80,7 +147,21 @@ inline int umfpack_numeric( const int Ap[], const int Ai[], const std::complex<d
{
return umfpack_zi_numeric(Ap,Ai,&numext::real_ref(Ax[0]),0,Symbolic,Numeric,Control,Info);
}
+inline SuiteSparse_long umfpack_numeric(const SuiteSparse_long Ap[], const SuiteSparse_long Ai[], const double Ax[],
+ void *Symbolic, void **Numeric,
+ const double Control[UMFPACK_CONTROL],double Info [UMFPACK_INFO])
+{
+ return umfpack_dl_numeric(Ap,Ai,Ax,Symbolic,Numeric,Control,Info);
+}
+inline SuiteSparse_long umfpack_numeric(const SuiteSparse_long Ap[], const SuiteSparse_long Ai[], const std::complex<double> Ax[],
+ void *Symbolic, void **Numeric,
+ const double Control[UMFPACK_CONTROL],double Info [UMFPACK_INFO])
+{
+ return umfpack_zl_numeric(Ap,Ai,&numext::real_ref(Ax[0]),0,Symbolic,Numeric,Control,Info);
+}
+
+// solve
inline int umfpack_solve( int sys, const int Ap[], const int Ai[], const double Ax[],
double X[], const double B[], void *Numeric,
const double Control[UMFPACK_CONTROL], double Info[UMFPACK_INFO])
@@ -95,6 +176,21 @@ inline int umfpack_solve( int sys, const int Ap[], const int Ai[], const std::co
return umfpack_zi_solve(sys,Ap,Ai,&numext::real_ref(Ax[0]),0,&numext::real_ref(X[0]),0,&numext::real_ref(B[0]),0,Numeric,Control,Info);
}
+inline SuiteSparse_long umfpack_solve(int sys, const SuiteSparse_long Ap[], const SuiteSparse_long Ai[], const double Ax[],
+ double X[], const double B[], void *Numeric,
+ const double Control[UMFPACK_CONTROL], double Info[UMFPACK_INFO])
+{
+ return umfpack_dl_solve(sys,Ap,Ai,Ax,X,B,Numeric,Control,Info);
+}
+
+inline SuiteSparse_long umfpack_solve(int sys, const SuiteSparse_long Ap[], const SuiteSparse_long Ai[], const std::complex<double> Ax[],
+ std::complex<double> X[], const std::complex<double> B[], void *Numeric,
+ const double Control[UMFPACK_CONTROL], double Info[UMFPACK_INFO])
+{
+ return umfpack_zl_solve(sys,Ap,Ai,&numext::real_ref(Ax[0]),0,&numext::real_ref(X[0]),0,&numext::real_ref(B[0]),0,Numeric,Control,Info);
+}
+
+// Get Lunz
inline int umfpack_get_lunz(int *lnz, int *unz, int *n_row, int *n_col, int *nz_udiag, void *Numeric, double)
{
return umfpack_di_get_lunz(lnz,unz,n_row,n_col,nz_udiag,Numeric);
@@ -105,6 +201,19 @@ inline int umfpack_get_lunz(int *lnz, int *unz, int *n_row, int *n_col, int *nz_
return umfpack_zi_get_lunz(lnz,unz,n_row,n_col,nz_udiag,Numeric);
}
+inline SuiteSparse_long umfpack_get_lunz( SuiteSparse_long *lnz, SuiteSparse_long *unz, SuiteSparse_long *n_row, SuiteSparse_long *n_col,
+ SuiteSparse_long *nz_udiag, void *Numeric, double)
+{
+ return umfpack_dl_get_lunz(lnz,unz,n_row,n_col,nz_udiag,Numeric);
+}
+
+inline SuiteSparse_long umfpack_get_lunz( SuiteSparse_long *lnz, SuiteSparse_long *unz, SuiteSparse_long *n_row, SuiteSparse_long *n_col,
+ SuiteSparse_long *nz_udiag, void *Numeric, std::complex<double>)
+{
+ return umfpack_zl_get_lunz(lnz,unz,n_row,n_col,nz_udiag,Numeric);
+}
+
+// Get Numeric
inline int umfpack_get_numeric(int Lp[], int Lj[], double Lx[], int Up[], int Ui[], double Ux[],
int P[], int Q[], double Dx[], int *do_recip, double Rs[], void *Numeric)
{
@@ -120,18 +229,45 @@ inline int umfpack_get_numeric(int Lp[], int Lj[], std::complex<double> Lx[], in
return umfpack_zi_get_numeric(Lp,Lj,Lx?&lx0_real:0,0,Up,Ui,Ux?&ux0_real:0,0,P,Q,
Dx?&dx0_real:0,0,do_recip,Rs,Numeric);
}
+inline SuiteSparse_long umfpack_get_numeric(SuiteSparse_long Lp[], SuiteSparse_long Lj[], double Lx[], SuiteSparse_long Up[], SuiteSparse_long Ui[], double Ux[],
+ SuiteSparse_long P[], SuiteSparse_long Q[], double Dx[], SuiteSparse_long *do_recip, double Rs[], void *Numeric)
+{
+ return umfpack_dl_get_numeric(Lp,Lj,Lx,Up,Ui,Ux,P,Q,Dx,do_recip,Rs,Numeric);
+}
-inline int umfpack_get_determinant(double *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO])
+inline SuiteSparse_long umfpack_get_numeric(SuiteSparse_long Lp[], SuiteSparse_long Lj[], std::complex<double> Lx[], SuiteSparse_long Up[], SuiteSparse_long Ui[], std::complex<double> Ux[],
+ SuiteSparse_long P[], SuiteSparse_long Q[], std::complex<double> Dx[], SuiteSparse_long *do_recip, double Rs[], void *Numeric)
+{
+ double& lx0_real = numext::real_ref(Lx[0]);
+ double& ux0_real = numext::real_ref(Ux[0]);
+ double& dx0_real = numext::real_ref(Dx[0]);
+ return umfpack_zl_get_numeric(Lp,Lj,Lx?&lx0_real:0,0,Up,Ui,Ux?&ux0_real:0,0,P,Q,
+ Dx?&dx0_real:0,0,do_recip,Rs,Numeric);
+}
+
+// Get Determinant
+inline int umfpack_get_determinant(double *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO], int)
{
return umfpack_di_get_determinant(Mx,Ex,NumericHandle,User_Info);
}
-inline int umfpack_get_determinant(std::complex<double> *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO])
+inline int umfpack_get_determinant(std::complex<double> *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO], int)
{
double& mx_real = numext::real_ref(*Mx);
return umfpack_zi_get_determinant(&mx_real,0,Ex,NumericHandle,User_Info);
}
+inline SuiteSparse_long umfpack_get_determinant(double *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO], SuiteSparse_long)
+{
+ return umfpack_dl_get_determinant(Mx,Ex,NumericHandle,User_Info);
+}
+
+inline SuiteSparse_long umfpack_get_determinant(std::complex<double> *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO], SuiteSparse_long)
+{
+ double& mx_real = numext::real_ref(*Mx);
+ return umfpack_zl_get_determinant(&mx_real,0,Ex,NumericHandle,User_Info);
+}
+
/** \ingroup UmfPackSupport_Module
* \brief A sparse LU factorization and solver based on UmfPack
@@ -164,7 +300,7 @@ class UmfPackLU : public SparseSolverBase<UmfPackLU<_MatrixType> >
typedef Matrix<int, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
typedef Matrix<int, MatrixType::RowsAtCompileTime, 1> IntColVectorType;
typedef SparseMatrix<Scalar> LUMatrixType;
- typedef SparseMatrix<Scalar,ColMajor,int> UmfpackMatrixType;
+ typedef SparseMatrix<Scalar,ColMajor,StorageIndex> UmfpackMatrixType;
typedef Ref<const UmfpackMatrixType, StandardCompressedFormat> UmfpackMatrixRef;
enum {
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
@@ -192,8 +328,8 @@ class UmfPackLU : public SparseSolverBase<UmfPackLU<_MatrixType> >
~UmfPackLU()
{
- if(m_symbolic) umfpack_free_symbolic(&m_symbolic,Scalar());
- if(m_numeric) umfpack_free_numeric(&m_numeric,Scalar());
+ if(m_symbolic) umfpack_free_symbolic(&m_symbolic,Scalar(), StorageIndex());
+ if(m_numeric) umfpack_free_numeric(&m_numeric,Scalar(), StorageIndex());
}
inline Index rows() const { return mp_matrix.rows(); }
@@ -201,7 +337,7 @@ class UmfPackLU : public SparseSolverBase<UmfPackLU<_MatrixType> >
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful,
+ * \returns \c Success if computation was successful,
* \c NumericalIssue if the matrix.appears to be negative.
*/
ComputationInfo info() const
@@ -241,8 +377,8 @@ class UmfPackLU : public SparseSolverBase<UmfPackLU<_MatrixType> >
template<typename InputMatrixType>
void compute(const InputMatrixType& matrix)
{
- if(m_symbolic) umfpack_free_symbolic(&m_symbolic,Scalar());
- if(m_numeric) umfpack_free_numeric(&m_numeric,Scalar());
+ if(m_symbolic) umfpack_free_symbolic(&m_symbolic,Scalar(),StorageIndex());
+ if(m_numeric) umfpack_free_numeric(&m_numeric,Scalar(),StorageIndex());
grab(matrix.derived());
analyzePattern_impl();
factorize_impl();
@@ -257,8 +393,8 @@ class UmfPackLU : public SparseSolverBase<UmfPackLU<_MatrixType> >
template<typename InputMatrixType>
void analyzePattern(const InputMatrixType& matrix)
{
- if(m_symbolic) umfpack_free_symbolic(&m_symbolic,Scalar());
- if(m_numeric) umfpack_free_numeric(&m_numeric,Scalar());
+ if(m_symbolic) umfpack_free_symbolic(&m_symbolic,Scalar(),StorageIndex());
+ if(m_numeric) umfpack_free_numeric(&m_numeric,Scalar(),StorageIndex());
grab(matrix.derived());
@@ -309,7 +445,7 @@ class UmfPackLU : public SparseSolverBase<UmfPackLU<_MatrixType> >
{
eigen_assert(m_analysisIsOk && "UmfPackLU: you must first call analyzePattern()");
if(m_numeric)
- umfpack_free_numeric(&m_numeric,Scalar());
+ umfpack_free_numeric(&m_numeric,Scalar(),StorageIndex());
grab(matrix.derived());
@@ -322,7 +458,7 @@ class UmfPackLU : public SparseSolverBase<UmfPackLU<_MatrixType> >
*/
void printUmfpackControl()
{
- umfpack_report_control(m_control.data(), Scalar());
+ umfpack_report_control(m_control.data(), Scalar(),StorageIndex());
}
/** Prints statistics collected by UmfPack.
@@ -332,7 +468,7 @@ class UmfPackLU : public SparseSolverBase<UmfPackLU<_MatrixType> >
void printUmfpackInfo()
{
eigen_assert(m_analysisIsOk && "UmfPackLU: you must first call analyzePattern()");
- umfpack_report_info(m_control.data(), m_umfpackInfo.data(), Scalar());
+ umfpack_report_info(m_control.data(), m_umfpackInfo.data(), Scalar(),StorageIndex());
}
/** Prints the status of the previous factorization operation performed by UmfPack (symbolic or numerical factorization).
@@ -341,7 +477,7 @@ class UmfPackLU : public SparseSolverBase<UmfPackLU<_MatrixType> >
*/
void printUmfpackStatus() {
eigen_assert(m_analysisIsOk && "UmfPackLU: you must first call analyzePattern()");
- umfpack_report_status(m_control.data(), m_fact_errorCode, Scalar());
+ umfpack_report_status(m_control.data(), m_fact_errorCode, Scalar(),StorageIndex());
}
/** \internal */
@@ -362,13 +498,13 @@ class UmfPackLU : public SparseSolverBase<UmfPackLU<_MatrixType> >
m_symbolic = 0;
m_extractedDataAreDirty = true;
- umfpack_defaults(m_control.data(), Scalar());
+ umfpack_defaults(m_control.data(), Scalar(),StorageIndex());
}
void analyzePattern_impl()
{
- m_fact_errorCode = umfpack_symbolic(internal::convert_index<int>(mp_matrix.rows()),
- internal::convert_index<int>(mp_matrix.cols()),
+ m_fact_errorCode = umfpack_symbolic(internal::convert_index<StorageIndex>(mp_matrix.rows()),
+ internal::convert_index<StorageIndex>(mp_matrix.cols()),
mp_matrix.outerIndexPtr(), mp_matrix.innerIndexPtr(), mp_matrix.valuePtr(),
&m_symbolic, m_control.data(), m_umfpackInfo.data());
@@ -408,7 +544,7 @@ class UmfPackLU : public SparseSolverBase<UmfPackLU<_MatrixType> >
// cached data to reduce reallocation, etc.
mutable LUMatrixType m_l;
- int m_fact_errorCode;
+ StorageIndex m_fact_errorCode;
UmfpackControl m_control;
mutable UmfpackInfo m_umfpackInfo;
@@ -438,7 +574,7 @@ void UmfPackLU<MatrixType>::extractData() const
if (m_extractedDataAreDirty)
{
// get size of the data
- int lnz, unz, rows, cols, nz_udiag;
+ StorageIndex lnz, unz, rows, cols, nz_udiag;
umfpack_get_lunz(&lnz, &unz, &rows, &cols, &nz_udiag, m_numeric, Scalar());
// allocate data
@@ -464,7 +600,7 @@ template<typename MatrixType>
typename UmfPackLU<MatrixType>::Scalar UmfPackLU<MatrixType>::determinant() const
{
Scalar det;
- umfpack_get_determinant(&det, 0, m_numeric, 0);
+ umfpack_get_determinant(&det, 0, m_numeric, 0, StorageIndex());
return det;
}
@@ -477,7 +613,6 @@ bool UmfPackLU<MatrixType>::_solve_impl(const MatrixBase<BDerived> &b, MatrixBas
eigen_assert((XDerived::Flags&RowMajorBit)==0 && "UmfPackLU backend does not support non col-major result yet");
eigen_assert(b.derived().data() != x.derived().data() && " Umfpack does not support inplace solve");
- int errorCode;
Scalar* x_ptr = 0;
Matrix<Scalar,Dynamic,1> x_tmp;
if(x.innerStride()!=1)
@@ -489,9 +624,10 @@ bool UmfPackLU<MatrixType>::_solve_impl(const MatrixBase<BDerived> &b, MatrixBas
{
if(x.innerStride()==1)
x_ptr = &x.col(j).coeffRef(0);
- errorCode = umfpack_solve(UMFPACK_A,
- mp_matrix.outerIndexPtr(), mp_matrix.innerIndexPtr(), mp_matrix.valuePtr(),
- x_ptr, &b.const_cast_derived().col(j).coeffRef(0), m_numeric, m_control.data(), m_umfpackInfo.data());
+ StorageIndex errorCode = umfpack_solve(UMFPACK_A,
+ mp_matrix.outerIndexPtr(), mp_matrix.innerIndexPtr(), mp_matrix.valuePtr(),
+ x_ptr, &b.const_cast_derived().col(j).coeffRef(0),
+ m_numeric, m_control.data(), m_umfpackInfo.data());
if(x.innerStride()!=1)
x.col(j) = x_tmp;
if (errorCode!=0)
diff --git a/Eigen/src/misc/lapacke.h b/Eigen/src/misc/lapacke.h
index 8c7e79b03..3d8e24f5a 100755
--- a/Eigen/src/misc/lapacke.h
+++ b/Eigen/src/misc/lapacke.h
@@ -43,10 +43,6 @@
#include "lapacke_config.h"
#endif
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
#include <stdlib.h>
#ifndef lapack_int
@@ -108,6 +104,11 @@ lapack_complex_double lapack_make_complex_double( double re, double im );
#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
#ifndef LAPACKE_malloc
#define LAPACKE_malloc( size ) malloc( size )
#endif
diff --git a/Eigen/src/plugins/ArrayCwiseUnaryOps.h b/Eigen/src/plugins/ArrayCwiseUnaryOps.h
index 43615bd56..e928db467 100644
--- a/Eigen/src/plugins/ArrayCwiseUnaryOps.h
+++ b/Eigen/src/plugins/ArrayCwiseUnaryOps.h
@@ -21,6 +21,7 @@ typedef CwiseUnaryOp<internal::scalar_acos_op<Scalar>, const Derived> AcosReturn
typedef CwiseUnaryOp<internal::scalar_asin_op<Scalar>, const Derived> AsinReturnType;
typedef CwiseUnaryOp<internal::scalar_atan_op<Scalar>, const Derived> AtanReturnType;
typedef CwiseUnaryOp<internal::scalar_tanh_op<Scalar>, const Derived> TanhReturnType;
+typedef CwiseUnaryOp<internal::scalar_logistic_op<Scalar>, const Derived> LogisticReturnType;
typedef CwiseUnaryOp<internal::scalar_sinh_op<Scalar>, const Derived> SinhReturnType;
typedef CwiseUnaryOp<internal::scalar_cosh_op<Scalar>, const Derived> CoshReturnType;
typedef CwiseUnaryOp<internal::scalar_square_op<Scalar>, const Derived> SquareReturnType;
@@ -335,6 +336,15 @@ cosh() const
return CoshReturnType(derived());
}
+/** \returns an expression of the coefficient-wise logistic of *this.
+ */
+EIGEN_DEVICE_FUNC
+inline const LogisticReturnType
+logistic() const
+{
+ return LogisticReturnType(derived());
+}
+
/** \returns an expression of the coefficient-wise inverse of *this.
*
* Example: \include Cwise_inverse.cpp
diff --git a/Eigen/src/plugins/BlockMethods.h b/Eigen/src/plugins/BlockMethods.h
index 5caf14469..67fdebc6f 100644
--- a/Eigen/src/plugins/BlockMethods.h
+++ b/Eigen/src/plugins/BlockMethods.h
@@ -40,6 +40,14 @@ typedef const VectorBlock<const Derived> ConstSegmentReturnType;
template<int Size> struct FixedSegmentReturnType { typedef VectorBlock<Derived, Size> Type; };
template<int Size> struct ConstFixedSegmentReturnType { typedef const VectorBlock<const Derived, Size> Type; };
+/// \internal inner-vector
+typedef Block<Derived,IsRowMajor?1:Dynamic,IsRowMajor?Dynamic:1,true> InnerVectorReturnType;
+typedef Block<const Derived,IsRowMajor?1:Dynamic,IsRowMajor?Dynamic:1,true> ConstInnerVectorReturnType;
+
+/// \internal set of inner-vectors
+typedef Block<Derived,Dynamic,Dynamic,true> InnerVectorsReturnType;
+typedef Block<const Derived,Dynamic,Dynamic,true> ConstInnerVectorsReturnType;
+
#endif // not EIGEN_PARSED_BY_DOXYGEN
/// \returns an expression of a block in \c *this with either dynamic or fixed sizes.
@@ -1036,7 +1044,7 @@ inline const typename ConstFixedBlockXpr<NRows,NCols>::Type block(Index startRow
/// \a NRows is \a Dynamic, and the same for the number of columns.
///
/// Example: \include MatrixBase_template_int_int_block_int_int_int_int.cpp
-/// Output: \verbinclude MatrixBase_template_int_int_block_int_int_int_int.cpp
+/// Output: \verbinclude MatrixBase_template_int_int_block_int_int_int_int.out
///
/// \note The usage of of this overload is discouraged from %Eigen 3.4, better used the generic
/// block(Index,Index,NRowsType,NColsType), here is the one-to-one complete equivalence:
@@ -1053,6 +1061,7 @@ EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<int NRows, int NCols>
+EIGEN_DEVICE_FUNC
inline typename FixedBlockXpr<NRows,NCols>::Type block(Index startRow, Index startCol,
Index blockRows, Index blockCols)
{
@@ -1354,3 +1363,39 @@ inline typename ConstFixedSegmentReturnType<N>::Type tail(Index n = N) const
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return typename ConstFixedSegmentReturnType<N>::Type(derived(), size() - n);
}
+
+/// \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+/// is col-major (resp. row-major).
+///
+InnerVectorReturnType innerVector(Index outer)
+{ return InnerVectorReturnType(derived(), outer); }
+
+/// \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+/// is col-major (resp. row-major). Read-only.
+///
+const ConstInnerVectorReturnType innerVector(Index outer) const
+{ return ConstInnerVectorReturnType(derived(), outer); }
+
+/// \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+/// is col-major (resp. row-major).
+///
+InnerVectorsReturnType
+innerVectors(Index outerStart, Index outerSize)
+{
+ return Block<Derived,Dynamic,Dynamic,true>(derived(),
+ IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
+ IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize);
+
+}
+
+/// \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+/// is col-major (resp. row-major). Read-only.
+///
+const ConstInnerVectorsReturnType
+innerVectors(Index outerStart, Index outerSize) const
+{
+ return Block<const Derived,Dynamic,Dynamic,true>(derived(),
+ IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
+ IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize);
+
+}
diff --git a/Eigen/src/plugins/IndexedViewMethods.h b/Eigen/src/plugins/IndexedViewMethods.h
index 22c1666c5..5bfb19ac6 100644
--- a/Eigen/src/plugins/IndexedViewMethods.h
+++ b/Eigen/src/plugins/IndexedViewMethods.h
@@ -53,13 +53,6 @@ ivcSize(const Indices& indices) const {
return internal::makeIndexedViewCompatible(indices, internal::variable_if_dynamic<Index,SizeAtCompileTime>(derived().size()),Specialized);
}
-template<typename RowIndices, typename ColIndices>
-struct valid_indexed_view_overload {
- // Here we use is_convertible to Index instead of is_integral in order to treat enums as Index.
- // In c++11 we could use is_integral<T> && is_enum<T> if is_convertible appears to be too permissive.
- enum { value = !(internal::is_convertible<RowIndices,Index>::value && internal::is_convertible<ColIndices,Index>::value) };
-};
-
public:
#endif
@@ -74,7 +67,7 @@ struct EIGEN_INDEXED_VIEW_METHOD_TYPE {
// This is the generic version
template<typename RowIndices, typename ColIndices>
-typename internal::enable_if<valid_indexed_view_overload<RowIndices,ColIndices>::value
+typename internal::enable_if<internal::valid_indexed_view_overload<RowIndices,ColIndices>::value
&& internal::traits<typename EIGEN_INDEXED_VIEW_METHOD_TYPE<RowIndices,ColIndices>::type>::ReturnAsIndexedView,
typename EIGEN_INDEXED_VIEW_METHOD_TYPE<RowIndices,ColIndices>::type >::type
operator()(const RowIndices& rowIndices, const ColIndices& colIndices) EIGEN_INDEXED_VIEW_METHOD_CONST
@@ -86,7 +79,7 @@ operator()(const RowIndices& rowIndices, const ColIndices& colIndices) EIGEN_IND
// The following overload returns a Block<> object
template<typename RowIndices, typename ColIndices>
-typename internal::enable_if<valid_indexed_view_overload<RowIndices,ColIndices>::value
+typename internal::enable_if<internal::valid_indexed_view_overload<RowIndices,ColIndices>::value
&& internal::traits<typename EIGEN_INDEXED_VIEW_METHOD_TYPE<RowIndices,ColIndices>::type>::ReturnAsBlock,
typename internal::traits<typename EIGEN_INDEXED_VIEW_METHOD_TYPE<RowIndices,ColIndices>::type>::BlockType>::type
operator()(const RowIndices& rowIndices, const ColIndices& colIndices) EIGEN_INDEXED_VIEW_METHOD_CONST
@@ -104,7 +97,7 @@ operator()(const RowIndices& rowIndices, const ColIndices& colIndices) EIGEN_IND
// The following overload returns a Scalar
template<typename RowIndices, typename ColIndices>
-typename internal::enable_if<valid_indexed_view_overload<RowIndices,ColIndices>::value
+typename internal::enable_if<internal::valid_indexed_view_overload<RowIndices,ColIndices>::value
&& internal::traits<typename EIGEN_INDEXED_VIEW_METHOD_TYPE<RowIndices,ColIndices>::type>::ReturnAsScalar,
CoeffReturnType >::type
operator()(const RowIndices& rowIndices, const ColIndices& colIndices) EIGEN_INDEXED_VIEW_METHOD_CONST
@@ -114,7 +107,7 @@ operator()(const RowIndices& rowIndices, const ColIndices& colIndices) EIGEN_IND
#if EIGEN_HAS_STATIC_ARRAY_TEMPLATE
-// The folowing three overloads are needed to handle raw Index[N] arrays.
+// The following three overloads are needed to handle raw Index[N] arrays.
template<typename RowIndicesT, std::size_t RowIndicesN, typename ColIndices>
IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,const RowIndicesT (&)[RowIndicesN],typename IvcColType<ColIndices>::type>
@@ -146,7 +139,7 @@ operator()(const RowIndicesT (&rowIndices)[RowIndicesN], const ColIndicesT (&col
template<typename Indices>
typename internal::enable_if<
- IsRowMajor && (!(internal::get_compile_time_incr<typename IvcType<Indices>::type>::value==1 || internal::is_integral<Indices>::value)),
+ IsRowMajor && (!(internal::get_compile_time_incr<typename IvcType<Indices>::type>::value==1 || internal::is_valid_index_type<Indices>::value)),
IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,IvcIndex,typename IvcType<Indices>::type> >::type
operator()(const Indices& indices) EIGEN_INDEXED_VIEW_METHOD_CONST
{
@@ -157,7 +150,7 @@ operator()(const Indices& indices) EIGEN_INDEXED_VIEW_METHOD_CONST
template<typename Indices>
typename internal::enable_if<
- (!IsRowMajor) && (!(internal::get_compile_time_incr<typename IvcType<Indices>::type>::value==1 || internal::is_integral<Indices>::value)),
+ (!IsRowMajor) && (!(internal::get_compile_time_incr<typename IvcType<Indices>::type>::value==1 || internal::is_valid_index_type<Indices>::value)),
IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,typename IvcType<Indices>::type,IvcIndex> >::type
operator()(const Indices& indices) EIGEN_INDEXED_VIEW_METHOD_CONST
{
@@ -168,7 +161,7 @@ operator()(const Indices& indices) EIGEN_INDEXED_VIEW_METHOD_CONST
template<typename Indices>
typename internal::enable_if<
- (internal::get_compile_time_incr<typename IvcType<Indices>::type>::value==1) && (!internal::is_integral<Indices>::value) && (!Symbolic::is_symbolic<Indices>::value),
+ (internal::get_compile_time_incr<typename IvcType<Indices>::type>::value==1) && (!internal::is_valid_index_type<Indices>::value) && (!symbolic::is_symbolic<Indices>::value),
VectorBlock<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,internal::array_size<Indices>::value> >::type
operator()(const Indices& indices) EIGEN_INDEXED_VIEW_METHOD_CONST
{
@@ -179,7 +172,7 @@ operator()(const Indices& indices) EIGEN_INDEXED_VIEW_METHOD_CONST
}
template<typename IndexType>
-typename internal::enable_if<Symbolic::is_symbolic<IndexType>::value, CoeffReturnType >::type
+typename internal::enable_if<symbolic::is_symbolic<IndexType>::value, CoeffReturnType >::type
operator()(const IndexType& id) EIGEN_INDEXED_VIEW_METHOD_CONST
{
return Base::operator()(internal::eval_expr_given_size(id,size()));
@@ -250,6 +243,8 @@ operator()(const IndicesT (&indices)[IndicesN]) EIGEN_INDEXED_VIEW_METHOD_CONST
*
* For 1D vectors and arrays, you better use the operator()(const Indices&) overload, which behave the same way but taking a single parameter.
*
+ * See also this <a href="https://stackoverflow.com/questions/46110917/eigen-replicate-items-along-one-dimension-without-useless-allocations">question</a> and its answer for an example of how to duplicate coefficients.
+ *
* \sa operator()(const Indices&), class Block, class IndexedView, DenseBase::block(Index,Index,Index,Index)
*/
template<typename RowIndices, typename ColIndices>
diff --git a/Eigen/src/plugins/ReshapedMethods.h b/Eigen/src/plugins/ReshapedMethods.h
index 2bb0b8623..9aeb7f3ee 100644
--- a/Eigen/src/plugins/ReshapedMethods.h
+++ b/Eigen/src/plugins/ReshapedMethods.h
@@ -25,7 +25,7 @@
/// AutoSize does preserve compile-time sizes when possible, i.e., when the sizes of the input are known at compile time \b and
/// that the other size is passed at compile-time using Eigen::fix<N> as above.
///
-/// \sa operator()(placeholders::all), class Reshaped, fix, fix<N>(int)
+/// \sa operator()(all), class Reshaped, fix, fix<N>(int)
///
template<int Order = ColMajor, typename NRowsType, typename NColsType>
EIGEN_DEVICE_FUNC
@@ -50,7 +50,7 @@ reshaped(NRowsType nRows, NColsType nCols) const;
/// \sa reshaped()
EIGEN_DEVICE_FUNC
inline Reshaped<Derived,SizeAtCompileTime,1>
-operator()(placeholders::all);
+operator()(all);
#else
diff --git a/README.md b/README.md
index 4654a81c3..99c9e2933 100644
--- a/README.md
+++ b/README.md
@@ -1,3 +1,7 @@
**Eigen is a C++ template library for linear algebra: matrices, vectors, numerical solvers, and related algorithms.**
For more information go to http://eigen.tuxfamily.org/.
+
+For ***pull request*** please only use the official repository at https://bitbucket.org/eigen/eigen.
+
+For ***bug reports*** and ***feature requests*** go to http://eigen.tuxfamily.org/bz.
diff --git a/bench/analyze-blocking-sizes.cpp b/bench/analyze-blocking-sizes.cpp
index d563a1d2d..6bc4aca3d 100644
--- a/bench/analyze-blocking-sizes.cpp
+++ b/bench/analyze-blocking-sizes.cpp
@@ -825,7 +825,7 @@ int main(int argc, char* argv[])
}
for (int i = 1; i < argc; i++) {
bool arg_handled = false;
- // Step 1. Try to match action invokation names.
+ // Step 1. Try to match action invocation names.
for (auto it = available_actions.begin(); it != available_actions.end(); ++it) {
if (!strcmp(argv[i], (*it)->invokation_name())) {
if (!action) {
diff --git a/bench/bench_gemm.cpp b/bench/bench_gemm.cpp
index 8528c5587..688d99c4a 100644
--- a/bench/bench_gemm.cpp
+++ b/bench/bench_gemm.cpp
@@ -129,7 +129,7 @@ void matlab_cplx_real(const M& ar, const M& ai, const M& b, M& cr, M& ci)
template<typename A, typename B, typename C>
EIGEN_DONT_INLINE void gemm(const A& a, const B& b, C& c)
{
- c.noalias() += a * b;
+ c.noalias() += a * b;
}
int main(int argc, char ** argv)
diff --git a/bench/btl/README b/bench/btl/README
index f3f5fb36f..ebed88960 100644
--- a/bench/btl/README
+++ b/bench/btl/README
@@ -36,7 +36,7 @@ For instance:
You can also select a given set of actions defining the environment variable BTL_CONFIG this way:
BTL_CONFIG="-a action1{:action2}*" ctest -V
-An exemple:
+An example:
BTL_CONFIG="-a axpy:vector_matrix:trisolve:ata" ctest -V -R eigen2
Finally, if bench results already exist (the bench*.dat files) then they merges by keeping the best for each matrix size. If you want to overwrite the previous ones you can simply add the "--overwrite" option:
diff --git a/bench/btl/generic_bench/bench.hh b/bench/btl/generic_bench/bench.hh
index 7b7b951b5..0732940d5 100644
--- a/bench/btl/generic_bench/bench.hh
+++ b/bench/btl/generic_bench/bench.hh
@@ -159,7 +159,7 @@ BTL_DONT_INLINE void bench( int size_min, int size_max, int nb_point ){
// bench<Mixed_Perf_Analyzer,Action>(size_min,size_max,nb_point);
- // Only for small problem size. Otherwize it will be too long
+ // Only for small problem size. Otherwise it will be too long
// bench<X86_Perf_Analyzer,Action>(size_min,size_max,nb_point);
// bench<STL_Perf_Analyzer,Action>(size_min,size_max,nb_point);
diff --git a/bench/btl/generic_bench/utils/size_log.hh b/bench/btl/generic_bench/utils/size_log.hh
index 13a3da7a8..68945e7cc 100644
--- a/bench/btl/generic_bench/utils/size_log.hh
+++ b/bench/btl/generic_bench/utils/size_log.hh
@@ -23,7 +23,7 @@
#include "math.h"
// The Vector class must satisfy the following part of STL vector concept :
// resize() method
-// [] operator for seting element
+// [] operator for setting element
// the vector element are int compatible.
template<class Vector>
void size_log(const int nb_point, const int size_min, const int size_max, Vector & X)
diff --git a/bench/btl/generic_bench/utils/xy_file.hh b/bench/btl/generic_bench/utils/xy_file.hh
index 4571bed8f..0492faf09 100644
--- a/bench/btl/generic_bench/utils/xy_file.hh
+++ b/bench/btl/generic_bench/utils/xy_file.hh
@@ -55,7 +55,7 @@ bool read_xy_file(const std::string & filename, std::vector<int> & tab_sizes,
// The Vector class must satisfy the following part of STL vector concept :
// resize() method
-// [] operator for seting element
+// [] operator for setting element
// the vector element must have the << operator define
using namespace std;
diff --git a/bench/btl/libs/ublas/ublas_interface.hh b/bench/btl/libs/ublas/ublas_interface.hh
index 95cad5195..f59b7cf2f 100644
--- a/bench/btl/libs/ublas/ublas_interface.hh
+++ b/bench/btl/libs/ublas/ublas_interface.hh
@@ -100,7 +100,7 @@ public :
Y+=coef*X;
}
- // alias free assignements
+ // alias free assignments
static inline void matrix_vector_product(gene_matrix & A, gene_vector & B, gene_vector & X, int N){
X.assign(prod(A,B));
diff --git a/bench/eig33.cpp b/bench/eig33.cpp
index 47947a9be..f003d8a53 100644
--- a/bench/eig33.cpp
+++ b/bench/eig33.cpp
@@ -101,7 +101,7 @@ void eigen33(const Matrix& mat, Matrix& evecs, Vector& evals)
computeRoots(scaledMat,evals);
// compute the eigen vectors
- // **here we assume 3 differents eigenvalues**
+ // **here we assume 3 different eigenvalues**
// "optimized version" which appears to be slower with gcc!
// Vector base;
diff --git a/bench/spbench/CMakeLists.txt b/bench/spbench/CMakeLists.txt
index 8d53f4ae2..029ba6d6b 100644
--- a/bench/spbench/CMakeLists.txt
+++ b/bench/spbench/CMakeLists.txt
@@ -38,25 +38,32 @@ if(SUPERLU_FOUND AND BLAS_FOUND)
endif()
-find_package(Pastix)
-find_package(Scotch)
-find_package(Metis)
-if(PASTIX_FOUND AND BLAS_FOUND)
+find_package(PASTIX QUIET COMPONENTS METIS SCOTCH)
+# check that the PASTIX found is a version without MPI
+find_path(PASTIX_pastix_nompi.h_INCLUDE_DIRS
+ NAMES pastix_nompi.h
+ HINTS ${PASTIX_INCLUDE_DIRS}
+)
+if (NOT PASTIX_pastix_nompi.h_INCLUDE_DIRS)
+ message(STATUS "A version of Pastix has been found but pastix_nompi.h does not exist in the include directory."
+ " Because Eigen tests require a version without MPI, we disable the Pastix backend.")
+endif()
+if(PASTIX_FOUND AND PASTIX_pastix_nompi.h_INCLUDE_DIRS AND BLAS_FOUND)
add_definitions("-DEIGEN_PASTIX_SUPPORT")
- include_directories(${PASTIX_INCLUDES})
+ include_directories(${PASTIX_INCLUDE_DIRS_DEP})
if(SCOTCH_FOUND)
- include_directories(${SCOTCH_INCLUDES})
+ include_directories(${SCOTCH_INCLUDE_DIRS})
set(PASTIX_LIBRARIES ${PASTIX_LIBRARIES} ${SCOTCH_LIBRARIES})
elseif(METIS_FOUND)
- include_directories(${METIS_INCLUDES})
+ include_directories(${METIS_INCLUDE_DIRS})
set(PASTIX_LIBRARIES ${PASTIX_LIBRARIES} ${METIS_LIBRARIES})
endif(SCOTCH_FOUND)
- set(SPARSE_LIBS ${SPARSE_LIBS} ${PASTIX_LIBRARIES} ${ORDERING_LIBRARIES} ${BLAS_LIBRARIES})
- set(PASTIX_ALL_LIBS ${PASTIX_LIBRARIES} ${BLAS_LIBRARIES})
-endif(PASTIX_FOUND AND BLAS_FOUND)
+ set(SPARSE_LIBS ${SPARSE_LIBS} ${PASTIX_LIBRARIES_DEP} ${ORDERING_LIBRARIES})
+ set(PASTIX_ALL_LIBS ${PASTIX_LIBRARIES_DEP})
+endif()
if(METIS_FOUND)
- include_directories(${METIS_INCLUDES})
+ include_directories(${METIS_INCLUDE_DIRS})
set (SPARSE_LIBS ${SPARSE_LIBS} ${METIS_LIBRARIES})
add_definitions("-DEIGEN_METIS_SUPPORT")
endif(METIS_FOUND)
diff --git a/bench/spbench/spbenchsolver.cpp b/bench/spbench/spbenchsolver.cpp
index 4acd0039c..2a7351124 100644
--- a/bench/spbench/spbenchsolver.cpp
+++ b/bench/spbench/spbenchsolver.cpp
@@ -54,7 +54,7 @@ int main(int argc, char ** args)
statbuf.close();
}
else
- std::cerr << "Unable to open the provided file for writting... \n";
+ std::cerr << "Unable to open the provided file for writing... \n";
}
// Get the maximum number of iterations and the tolerance
diff --git a/bench/tensors/README b/bench/tensors/README
index 3a5fdbe17..69342cc9c 100644
--- a/bench/tensors/README
+++ b/bench/tensors/README
@@ -14,8 +14,12 @@ nvcc tensor_benchmarks_fp16_gpu.cu benchmark_main.cc -I ../../ -std=c++11 -O2 -D
last but not least, we also provide a suite of benchmarks to measure the scalability of the contraction code on CPU. To compile these benchmarks, call
g++ contraction_benchmarks_cpu.cc benchmark_main.cc -I ../../ -std=c++11 -O3 -DNDEBUG -pthread -mavx -o benchmarks_cpu
-To compile the benchmark for SYCL, using ComputeCpp you currently need 2 passes (only for translation units containing device code):
+To compile and run the benchmark for SYCL, using ComputeCpp you currently need following passes (only for translation units containing device code):
1. The device compilation pass that generates the device code (SYCL kernels and referenced device functions) and glue code needed by the host compiler to reference the device code from host code.
-{ComputeCpp_ROOT}/bin/compute++ -I ../../ -I {ComputeCpp_ROOT}/include/ -std=c++11 -mllvm -inline-threshold=1000 -Wno-ignored-attributes -sycl -intelspirmetadata -emit-llvm -no-serial-memop -sycl-compress-name -DBUILD_PLATFORM_SPIR -DNDBUG -O3 -c tensor_benchmarks_sycl.cc
+{ComputeCpp_ROOT}/bin/compute++ -I ../../ -I {ComputeCpp_ROOT}/include/ -std=c++11 -mllvm -inline-threshold=1000 -Wno-ignored-attributes -sycl -intelspirmetadata -emit-llvm -no-serial-memop -sycl-compress-name -DBUILD_PLATFORM_SPIR -DNDBUG -O3 -c tensor_benchmarks_sycl.cc -DEIGEN_USE_SYCL=1
2. The host compilation pass that generates the final host binary.
-clang++-3.7 -include tensor_benchmarks_sycl.sycl benchmark_main.cc tensor_benchmarks_sycl.cc -pthread -I ../../ -I {ComputeCpp_ROOT}/include/ -L {ComputeCpp_ROOT}/lib/ -lComputeCpp -lOpenCL -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++11 -o tensor_benchmark_sycl
+clang++ -O3 -c benchmark_main.cc -pthread -I ../../ -D_GLIBCXX_USE_CXX11_ABI=0 -DEIGEN_USE_SYCL=1 -std=c++11 -o benchmark_main.o
+clang++ -O3 tensor_benchmarks_sycl_include_headers.cc -pthread -I ../../ -I {ComputeCpp_ROOT}/include/ -L {ComputeCpp_ROOT}/lib/ -lComputeCpp -lOpenCL -D_GLIBCXX_USE_CXX11_ABI=0 -DEIGEN_USE_SYCL=1 -std=c++11 benchmark_main.o -o tensor_benchmark_sycl
+export LD_LIBRARY_PATH={ComputeCpp_ROOT}/lib
+3. Run the benchmark
+./tensor_benchmark_sycl
diff --git a/bench/tensors/tensor_benchmarks.h b/bench/tensors/tensor_benchmarks.h
index c2fb3dede..3a640ede4 100644
--- a/bench/tensors/tensor_benchmarks.h
+++ b/bench/tensors/tensor_benchmarks.h
@@ -35,6 +35,11 @@ template <typename Device, typename T> class BenchmarkSuite {
void memcpy(int num_iters) {
eigen_assert(m_ == k_ && k_ == n_);
+#ifdef EIGEN_USE_SYCL // warmup for sycl
+ for (int iter = 0; iter < 10; ++iter) {
+ device_.memcpy(c_, a_, m_ * m_ * sizeof(T));
+ }
+#endif
StartBenchmarkTiming();
for (int iter = 0; iter < num_iters; ++iter) {
device_.memcpy(c_, a_, m_ * m_ * sizeof(T));
@@ -55,7 +60,11 @@ template <typename Device, typename T> class BenchmarkSuite {
}
const TensorMap<Tensor<int, 2, 0, TensorIndex>, Eigen::Aligned> A((int*)a_, sizes);
TensorMap<Tensor<T, 2, 0, TensorIndex>, Eigen::Aligned> B(b_, sizes);
-
+#ifdef EIGEN_USE_SYCL // warmup for sycl
+ for (int iter = 0; iter < 10; ++iter) {
+ B.device(device_) = A.template cast<T>();
+ }
+#endif
StartBenchmarkTiming();
for (int iter = 0; iter < num_iters; ++iter) {
B.device(device_) = A.template cast<T>();
@@ -70,7 +79,6 @@ template <typename Device, typename T> class BenchmarkSuite {
sizes[0] = m_;
sizes[1] = m_;
TensorMap<Tensor<T, 2>, Eigen::Aligned> C(c_, sizes);
-
StartBenchmarkTiming();
for (int iter = 0; iter < num_iters; ++iter) {
C.device(device_) = C.random();
@@ -93,7 +101,18 @@ template <typename Device, typename T> class BenchmarkSuite {
const Eigen::DSizes<TensorIndex, 2> second_quadrant(0, m_/2);
const Eigen::DSizes<TensorIndex, 2> third_quadrant(m_/2, 0);
const Eigen::DSizes<TensorIndex, 2> fourth_quadrant(m_/2, m_/2);
-
+#ifdef EIGEN_USE_SYCL // warmup for sycl
+ for (int iter = 0; iter < 10; ++iter) {
+ C.slice(first_quadrant, quarter_sizes).device(device_) =
+ A.slice(first_quadrant, quarter_sizes);
+ C.slice(second_quadrant, quarter_sizes).device(device_) =
+ B.slice(second_quadrant, quarter_sizes);
+ C.slice(third_quadrant, quarter_sizes).device(device_) =
+ A.slice(third_quadrant, quarter_sizes);
+ C.slice(fourth_quadrant, quarter_sizes).device(device_) =
+ B.slice(fourth_quadrant, quarter_sizes);
+ }
+#endif
StartBenchmarkTiming();
for (int iter = 0; iter < num_iters; ++iter) {
C.slice(first_quadrant, quarter_sizes).device(device_) =
@@ -118,7 +137,11 @@ template <typename Device, typename T> class BenchmarkSuite {
Eigen::array<TensorIndex, 1> output_size;
output_size[0] = n_;
TensorMap<Tensor<T, 1, 0, TensorIndex>, Eigen::Aligned> C(c_, output_size);
-
+#ifdef EIGEN_USE_SYCL // warmup for sycl
+ for (int iter = 0; iter < 10; ++iter) {
+ C.device(device_) = B.chip(iter % k_, 0);
+ }
+#endif
StartBenchmarkTiming();
for (int iter = 0; iter < num_iters; ++iter) {
C.device(device_) = B.chip(iter % k_, 0);
@@ -135,7 +158,11 @@ template <typename Device, typename T> class BenchmarkSuite {
Eigen::array<TensorIndex, 1> output_size;
output_size[0] = n_;
TensorMap<Tensor<T, 1, 0, TensorIndex>, Eigen::Aligned> C(c_, output_size);
-
+#ifdef EIGEN_USE_SYCL // warmup for sycl
+ for (int iter = 0; iter < 10; ++iter) {
+ C.device(device_) = B.chip(iter % n_, 1);
+ }
+#endif
StartBenchmarkTiming();
for (int iter = 0; iter < num_iters; ++iter) {
C.device(device_) = B.chip(iter % n_, 1);
@@ -158,7 +185,11 @@ template <typename Device, typename T> class BenchmarkSuite {
Eigen::array<int, 2> shuffle;
shuffle[0] = 1;
shuffle[1] = 0;
-
+#ifdef EIGEN_USE_SYCL // warmup for sycl
+ for (int iter = 0; iter < 10; ++iter) {
+ B.device(device_) = A.shuffle(shuffle);
+ }
+#endif
StartBenchmarkTiming();
for (int iter = 0; iter < num_iters; ++iter) {
B.device(device_) = A.shuffle(shuffle);
@@ -186,7 +217,11 @@ template <typename Device, typename T> class BenchmarkSuite {
paddings[0] = Eigen::IndexPair<TensorIndex>(0, 0);
paddings[1] = Eigen::IndexPair<TensorIndex>(2, 1);
#endif
-
+#ifdef EIGEN_USE_SYCL // warmup for sycl
+ for (int iter = 0; iter < 10; ++iter) {
+ B.device(device_) = A.pad(paddings);
+ }
+#endif
StartBenchmarkTiming();
for (int iter = 0; iter < num_iters; ++iter) {
B.device(device_) = A.pad(paddings);
@@ -216,6 +251,11 @@ template <typename Device, typename T> class BenchmarkSuite {
Eigen::IndexList<Eigen::type2index<1>, Eigen::type2index<2> > strides;
#endif
+#ifdef EIGEN_USE_SYCL // warmup for sycl
+ for (int iter = 0; iter < 10; ++iter) {
+ B.device(device_) = A.stride(strides);
+ }
+#endif
StartBenchmarkTiming();
for (int iter = 0; iter < num_iters; ++iter) {
B.device(device_) = A.stride(strides);
@@ -245,6 +285,11 @@ template <typename Device, typename T> class BenchmarkSuite {
broadcast.set(1, n_);
#endif
+#ifdef EIGEN_USE_SYCL // warmup for sycl
+ for (int iter = 0; iter < 10; ++iter) {
+ C.device(device_) = A.broadcast(broadcast);
+ }
+#endif
StartBenchmarkTiming();
for (int iter = 0; iter < num_iters; ++iter) {
C.device(device_) = A.broadcast(broadcast);
@@ -261,7 +306,11 @@ template <typename Device, typename T> class BenchmarkSuite {
const TensorMap<Tensor<T, 2>, Eigen::Aligned> A(a_, sizes);
const TensorMap<Tensor<T, 2>, Eigen::Aligned> B(b_, sizes);
TensorMap<Tensor<T, 2>, Eigen::Aligned> C(c_, sizes);
-
+#ifdef EIGEN_USE_SYCL // warmup for sycl
+ for (int iter = 0; iter < 10; ++iter) {
+ C.device(device_) = A * A.constant(static_cast<T>(3.14)) + B * B.constant(static_cast<T>(2.7));
+ }
+#endif
StartBenchmarkTiming();
for (int iter = 0; iter < num_iters; ++iter) {
C.device(device_) = A * A.constant(static_cast<T>(3.14)) + B * B.constant(static_cast<T>(2.7));
@@ -280,6 +329,11 @@ template <typename Device, typename T> class BenchmarkSuite {
const TensorMap<Tensor<T, 2>, Eigen::Aligned> B(b_, sizes);
TensorMap<Tensor<T, 2>, Eigen::Aligned> C(c_, sizes);
+#ifdef EIGEN_USE_SYCL // warmup for sycl
+for (int iter = 0; iter < 10; ++iter) {
+ C.device(device_) = A.rsqrt() + B.sqrt() * B.square();
+}
+#endif
StartBenchmarkTiming();
for (int iter = 0; iter < num_iters; ++iter) {
C.device(device_) = A.rsqrt() + B.sqrt() * B.square();
@@ -297,7 +351,11 @@ template <typename Device, typename T> class BenchmarkSuite {
const TensorMap<Tensor<T, 2>, Eigen::Aligned> A(a_, sizes);
const TensorMap<Tensor<T, 2>, Eigen::Aligned> B(b_, sizes);
TensorMap<Tensor<T, 2>, Eigen::Aligned> C(c_, sizes);
-
+#ifdef EIGEN_USE_SYCL // warmup for sycl
+ for (int iter = 0; iter < 10; ++iter) {
+ C.device(device_) = A.exp() + B.log();
+ }
+#endif
StartBenchmarkTiming();
for (int iter = 0; iter < num_iters; ++iter) {
C.device(device_) = A.exp() + B.log();
@@ -325,7 +383,11 @@ template <typename Device, typename T> class BenchmarkSuite {
// optimize the code.
Eigen::IndexList<Eigen::type2index<0>> sum_along_dim;
#endif
-
+#ifdef EIGEN_USE_SYCL // warmup for sycl
+ for (int iter = 0; iter < 10; ++iter) {
+ C.device(device_) = B.sum(sum_along_dim);
+ }
+#endif
StartBenchmarkTiming();
for (int iter = 0; iter < num_iters; ++iter) {
C.device(device_) = B.sum(sum_along_dim);
@@ -355,7 +417,11 @@ template <typename Device, typename T> class BenchmarkSuite {
// optimize the code.
Eigen::IndexList<Eigen::type2index<1>> sum_along_dim;
#endif
-
+#ifdef EIGEN_USE_SYCL // warmup for sycl
+ for (int iter = 0; iter < 10; ++iter) {
+ C.device(device_) = B.sum(sum_along_dim);
+ }
+#endif
StartBenchmarkTiming();
for (int iter = 0; iter < num_iters; ++iter) {
C.device(device_) = B.sum(sum_along_dim);
@@ -375,7 +441,11 @@ template <typename Device, typename T> class BenchmarkSuite {
Eigen::array<TensorIndex, 0> output_size;
TensorMap<Tensor<T, 0, 0, TensorIndex>, Eigen::Aligned> C(
c_, output_size);
-
+#ifdef EIGEN_USE_SYCL // warmup for sycl
+ for (int iter = 0; iter < 10; ++iter) {
+ C.device(device_) = B.sum();
+ }
+#endif
StartBenchmarkTiming();
for (int iter = 0; iter < num_iters; ++iter) {
C.device(device_) = B.sum();
@@ -404,7 +474,11 @@ template <typename Device, typename T> class BenchmarkSuite {
typedef typename Tensor<T, 2>::DimensionPair DimPair;
Eigen::array<DimPair, 1> dims;
dims[0] = DimPair(1, 0);
-
+#ifdef EIGEN_USE_SYCL // warmup for sycl
+ for (int iter = 0; iter < 10; ++iter) {
+ C.device(device_) = A.contract(B, dims);
+ }
+#endif
StartBenchmarkTiming();
for (int iter = 0; iter < num_iters; ++iter) {
C.device(device_) = A.contract(B, dims);
@@ -430,7 +504,11 @@ template <typename Device, typename T> class BenchmarkSuite {
Eigen::array<TensorIndex, 2> dims;
dims[0] = 0;
dims[1] = 1;
-
+#ifdef EIGEN_USE_SYCL // warmup for sycl
+ for (int iter = 0; iter < 10; ++iter) {
+ C.device(device_) = A.convolve(B, dims);
+ }
+#endif
StartBenchmarkTiming();
for (int iter = 0; iter < num_iters; ++iter) {
C.device(device_) = A.convolve(B, dims);
@@ -461,6 +539,11 @@ template <typename Device, typename T> class BenchmarkSuite {
if (Eigen::internal::is_same<Device, Eigen::GpuDevice>::value) {
device_.synchronize();
}
+#elif defined(EIGEN_USE_SYCL)
+ if (Eigen::internal::is_same<Device, Eigen::SyclDevice>::value) {
+ device_.synchronize();
+ }
+
#endif
StopBenchmarkTiming();
SetBenchmarkFlopsProcessed(num_items);
diff --git a/bench/tensors/tensor_benchmarks_sycl.cc b/bench/tensors/tensor_benchmarks_sycl.cc
index 6df190869..cb6daac15 100644
--- a/bench/tensors/tensor_benchmarks_sycl.cc
+++ b/bench/tensors/tensor_benchmarks_sycl.cc
@@ -1,20 +1,73 @@
-#define EIGEN_USE_SYCL
+#ifdef EIGEN_USE_SYCL
#include <SYCL/sycl.hpp>
#include <iostream>
#include "tensor_benchmarks.h"
-#define BM_FuncGPU(FUNC) \
- static void BM_##FUNC(int iters, int N) { \
- StopBenchmarkTiming(); \
- cl::sycl::gpu_selector selector; \
- Eigen::QueueInterface queue(selector); \
- Eigen::SyclDevice device(&queue); \
- BenchmarkSuite<Eigen::SyclDevice, float> suite(device, N); \
- suite.FUNC(iters); \
- } \
+#define BM_FuncGPU(FUNC) \
+ static void BM_##FUNC(int iters, int N) { \
+ StopBenchmarkTiming(); \
+ cl::sycl::gpu_selector selector; \
+ Eigen::QueueInterface queue(selector); \
+ Eigen::SyclDevice device(&queue); \
+ BenchmarkSuite<Eigen::SyclDevice, float> suite(device, N); \
+ suite.FUNC(iters); \
+ } \
BENCHMARK_RANGE(BM_##FUNC, 10, 5000);
+BM_FuncGPU(memcpy);
+BM_FuncGPU(typeCasting);
+BM_FuncGPU(slicing);
+BM_FuncGPU(rowChip);
+BM_FuncGPU(colChip);
+BM_FuncGPU(shuffling);
+BM_FuncGPU(padding);
+BM_FuncGPU(striding);
BM_FuncGPU(broadcasting);
BM_FuncGPU(coeffWiseOp);
+BM_FuncGPU(algebraicFunc);
+BM_FuncGPU(transcendentalFunc);
+BM_FuncGPU(rowReduction);
+BM_FuncGPU(colReduction);
+BM_FuncGPU(fullReduction);
+
+
+// Contractions
+#define BM_FuncWithInputDimsGPU(FUNC, D1, D2, D3) \
+ static void BM_##FUNC##_##D1##x##D2##x##D3(int iters, int N) { \
+ StopBenchmarkTiming(); \
+ cl::sycl::gpu_selector selector; \
+ Eigen::QueueInterface queue(selector); \
+ Eigen::SyclDevice device(&queue); \
+ BenchmarkSuite<Eigen::SyclDevice, float> suite(device, D1, D2, D3); \
+ suite.FUNC(iters); \
+ } \
+ BENCHMARK_RANGE(BM_##FUNC##_##D1##x##D2##x##D3, 10, 5000);
+
+
+BM_FuncWithInputDimsGPU(contraction, N, N, N);
+BM_FuncWithInputDimsGPU(contraction, 64, N, N);
+BM_FuncWithInputDimsGPU(contraction, N, 64, N);
+BM_FuncWithInputDimsGPU(contraction, N, N, 64);
+
+
+// Convolutions
+#define BM_FuncWithKernelDimsGPU(FUNC, DIM1, DIM2) \
+ static void BM_##FUNC##_##DIM1##x##DIM2(int iters, int N) { \
+ StopBenchmarkTiming(); \
+ cl::sycl::gpu_selector selector; \
+ Eigen::QueueInterface queue(selector); \
+ Eigen::SyclDevice device(&queue); \
+ BenchmarkSuite<Eigen::SyclDevice, float> suite(device, N); \
+ suite.FUNC(iters, DIM1, DIM2); \
+ } \
+ BENCHMARK_RANGE(BM_##FUNC##_##DIM1##x##DIM2, 128, 5000);
+
+BM_FuncWithKernelDimsGPU(convolution, 7, 1);
+BM_FuncWithKernelDimsGPU(convolution, 1, 7);
+BM_FuncWithKernelDimsGPU(convolution, 7, 4);
+BM_FuncWithKernelDimsGPU(convolution, 4, 7);
+BM_FuncWithKernelDimsGPU(convolution, 7, 64);
+BM_FuncWithKernelDimsGPU(convolution, 64, 7);
+#endif
diff --git a/bench/tensors/tensor_benchmarks_sycl_include_headers.cc b/bench/tensors/tensor_benchmarks_sycl_include_headers.cc
new file mode 100644
index 000000000..bcc3c4c79
--- /dev/null
+++ b/bench/tensors/tensor_benchmarks_sycl_include_headers.cc
@@ -0,0 +1,2 @@
+#include "tensor_benchmarks_sycl.cc"
+#include "tensor_benchmarks_sycl.sycl"
diff --git a/blas/common.h b/blas/common.h
index 61d8344d9..960c09cc6 100644
--- a/blas/common.h
+++ b/blas/common.h
@@ -10,6 +10,14 @@
#ifndef EIGEN_BLAS_COMMON_H
#define EIGEN_BLAS_COMMON_H
+#ifdef __GNUC__
+# if __GNUC__<5
+// GCC < 5.0 does not like the global Scalar typedef
+// we just keep shadow-warnings disabled permanently
+# define EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS
+# endif
+#endif
+
#include "../Eigen/Core"
#include "../Eigen/Jacobi"
diff --git a/blas/f2c/ctbmv.c b/blas/f2c/ctbmv.c
index 790fd581f..a6e0dae80 100644
--- a/blas/f2c/ctbmv.c
+++ b/blas/f2c/ctbmv.c
@@ -147,7 +147,7 @@
/* ( 1 + ( n - 1 )*abs( INCX ) ). */
/* Before entry, the incremented array X must contain the n */
/* element vector x. On exit, X is overwritten with the */
-/* tranformed vector x. */
+/* transformed vector x. */
/* INCX - INTEGER. */
/* On entry, INCX specifies the increment for the elements of */
diff --git a/blas/f2c/dtbmv.c b/blas/f2c/dtbmv.c
index fdf73ebb5..aa67d19da 100644
--- a/blas/f2c/dtbmv.c
+++ b/blas/f2c/dtbmv.c
@@ -143,7 +143,7 @@
/* ( 1 + ( n - 1 )*abs( INCX ) ). */
/* Before entry, the incremented array X must contain the n */
/* element vector x. On exit, X is overwritten with the */
-/* tranformed vector x. */
+/* transformed vector x. */
/* INCX - INTEGER. */
/* On entry, INCX specifies the increment for the elements of */
diff --git a/blas/f2c/stbmv.c b/blas/f2c/stbmv.c
index fcf9ce336..b5a68b545 100644
--- a/blas/f2c/stbmv.c
+++ b/blas/f2c/stbmv.c
@@ -143,7 +143,7 @@
/* ( 1 + ( n - 1 )*abs( INCX ) ). */
/* Before entry, the incremented array X must contain the n */
/* element vector x. On exit, X is overwritten with the */
-/* tranformed vector x. */
+/* transformed vector x. */
/* INCX - INTEGER. */
/* On entry, INCX specifies the increment for the elements of */
diff --git a/blas/f2c/ztbmv.c b/blas/f2c/ztbmv.c
index 4cdcd7f88..3bf0beb01 100644
--- a/blas/f2c/ztbmv.c
+++ b/blas/f2c/ztbmv.c
@@ -147,7 +147,7 @@
/* ( 1 + ( n - 1 )*abs( INCX ) ). */
/* Before entry, the incremented array X must contain the n */
/* element vector x. On exit, X is overwritten with the */
-/* tranformed vector x. */
+/* transformed vector x. */
/* INCX - INTEGER. */
/* On entry, INCX specifies the increment for the elements of */
diff --git a/blas/level1_impl.h b/blas/level1_impl.h
index f857bfa20..6e7f8c976 100644
--- a/blas/level1_impl.h
+++ b/blas/level1_impl.h
@@ -33,7 +33,7 @@ int EIGEN_BLAS_FUNC(copy)(int *n, RealScalar *px, int *incx, RealScalar *py, int
Scalar* x = reinterpret_cast<Scalar*>(px);
Scalar* y = reinterpret_cast<Scalar*>(py);
- // be carefull, *incx==0 is allowed !!
+ // be careful, *incx==0 is allowed !!
if(*incx==1 && *incy==1)
make_vector(y,*n) = make_vector(x,*n);
else
diff --git a/blas/testing/cblat1.f b/blas/testing/cblat1.f
index 8ca67fb19..73015f5a9 100644
--- a/blas/testing/cblat1.f
+++ b/blas/testing/cblat1.f
@@ -619,7 +619,7 @@
SUBROUTINE STEST1(SCOMP1,STRUE1,SSIZE,SFAC)
* ************************* STEST1 *****************************
*
-* THIS IS AN INTERFACE SUBROUTINE TO ACCOMODATE THE FORTRAN
+* THIS IS AN INTERFACE SUBROUTINE TO ACCOMMODATE THE FORTRAN
* REQUIREMENT THAT WHEN A DUMMY ARGUMENT IS AN ARRAY, THE
* ACTUAL ARGUMENT MUST ALSO BE AN ARRAY OR AN ARRAY ELEMENT.
*
diff --git a/blas/testing/dblat1.f b/blas/testing/dblat1.f
index 30691f9bf..03d9f1345 100644
--- a/blas/testing/dblat1.f
+++ b/blas/testing/dblat1.f
@@ -990,7 +990,7 @@
SUBROUTINE STEST1(SCOMP1,STRUE1,SSIZE,SFAC)
* ************************* STEST1 *****************************
*
-* THIS IS AN INTERFACE SUBROUTINE TO ACCOMODATE THE FORTRAN
+* THIS IS AN INTERFACE SUBROUTINE TO ACCOMMODATE THE FORTRAN
* REQUIREMENT THAT WHEN A DUMMY ARGUMENT IS AN ARRAY, THE
* ACTUAL ARGUMENT MUST ALSO BE AN ARRAY OR AN ARRAY ELEMENT.
*
diff --git a/blas/testing/sblat1.f b/blas/testing/sblat1.f
index 6657c2693..4d43d9b48 100644
--- a/blas/testing/sblat1.f
+++ b/blas/testing/sblat1.f
@@ -946,7 +946,7 @@
SUBROUTINE STEST1(SCOMP1,STRUE1,SSIZE,SFAC)
* ************************* STEST1 *****************************
*
-* THIS IS AN INTERFACE SUBROUTINE TO ACCOMODATE THE FORTRAN
+* THIS IS AN INTERFACE SUBROUTINE TO ACCOMMODATE THE FORTRAN
* REQUIREMENT THAT WHEN A DUMMY ARGUMENT IS AN ARRAY, THE
* ACTUAL ARGUMENT MUST ALSO BE AN ARRAY OR AN ARRAY ELEMENT.
*
diff --git a/blas/testing/zblat1.f b/blas/testing/zblat1.f
index d30112c63..c00b67dc8 100644
--- a/blas/testing/zblat1.f
+++ b/blas/testing/zblat1.f
@@ -619,7 +619,7 @@
SUBROUTINE STEST1(SCOMP1,STRUE1,SSIZE,SFAC)
* ************************* STEST1 *****************************
*
-* THIS IS AN INTERFACE SUBROUTINE TO ACCOMODATE THE FORTRAN
+* THIS IS AN INTERFACE SUBROUTINE TO ACCOMMODATE THE FORTRAN
* REQUIREMENT THAT WHEN A DUMMY ARGUMENT IS AN ARRAY, THE
* ACTUAL ARGUMENT MUST ALSO BE AN ARRAY OR AN ARRAY ELEMENT.
*
diff --git a/cmake/Eigen3Config.cmake.in b/cmake/Eigen3Config.cmake.in
index c5c546887..0a1ac61c9 100644
--- a/cmake/Eigen3Config.cmake.in
+++ b/cmake/Eigen3Config.cmake.in
@@ -3,7 +3,9 @@
@PACKAGE_INIT@
-include ("${CMAKE_CURRENT_LIST_DIR}/Eigen3Targets.cmake")
+if (NOT TARGET eigen)
+ include ("${CMAKE_CURRENT_LIST_DIR}/Eigen3Targets.cmake")
+endif ()
# Legacy variables, do *not* use. May be removed in the future.
diff --git a/cmake/EigenConfigureTesting.cmake b/cmake/EigenConfigureTesting.cmake
index afc24b5e9..ba88228a0 100644
--- a/cmake/EigenConfigureTesting.cmake
+++ b/cmake/EigenConfigureTesting.cmake
@@ -11,16 +11,18 @@ add_custom_target(buildtests)
add_custom_target(check COMMAND "ctest")
add_dependencies(check buildtests)
-# check whether /bin/bash exists
-find_file(EIGEN_BIN_BASH_EXISTS "/bin/bash" PATHS "/" NO_DEFAULT_PATH)
+# check whether /bin/bash exists (disabled as not used anymore)
+# find_file(EIGEN_BIN_BASH_EXISTS "/bin/bash" PATHS "/" NO_DEFAULT_PATH)
# This call activates testing and generates the DartConfiguration.tcl
include(CTest)
set(EIGEN_TEST_BUILD_FLAGS "" CACHE STRING "Options passed to the build command of unit tests")
+set(EIGEN_DASHBOARD_BUILD_TARGET "buildtests" CACHE STRING "Target to be built in dashboard mode, default is buildtests")
+set(EIGEN_CTEST_ERROR_EXCEPTION "" CACHE STRING "Regular expression for build error messages to be filtered out")
# Overwrite default DartConfiguration.tcl such that ctest can build our unit tests.
-# Recall that our unit tests are not in the "all" target, so we have to explicitely ask ctest to build our custom 'buildtests' target.
+# Recall that our unit tests are not in the "all" target, so we have to explicitly ask ctest to build our custom 'buildtests' target.
# At this stage, we can also add custom flags to the build tool through the user defined EIGEN_TEST_BUILD_FLAGS variable.
file(READ "${CMAKE_CURRENT_BINARY_DIR}/DartConfiguration.tcl" EIGEN_DART_CONFIG_FILE)
# try to grab the default flags
@@ -28,7 +30,7 @@ string(REGEX MATCH "MakeCommand:.*-- (.*)\nDefaultCTestConfigurationType" EIGEN_
if(NOT CMAKE_MATCH_1)
string(REGEX MATCH "MakeCommand:.*[^c]make (.*)\nDefaultCTestConfigurationType" EIGEN_DUMMY ${EIGEN_DART_CONFIG_FILE})
endif()
-string(REGEX REPLACE "MakeCommand:.*DefaultCTestConfigurationType" "MakeCommand: ${CMAKE_COMMAND} --build . --target buildtests --config \"\${CTEST_CONFIGURATION_TYPE}\" -- ${CMAKE_MATCH_1} ${EIGEN_TEST_BUILD_FLAGS}\nDefaultCTestConfigurationType"
+string(REGEX REPLACE "MakeCommand:.*DefaultCTestConfigurationType" "MakeCommand: ${CMAKE_COMMAND} --build . --target ${EIGEN_DASHBOARD_BUILD_TARGET} --config \"\${CTEST_CONFIGURATION_TYPE}\" -- ${CMAKE_MATCH_1} ${EIGEN_TEST_BUILD_FLAGS}\nDefaultCTestConfigurationType"
EIGEN_DART_CONFIG_FILE2 ${EIGEN_DART_CONFIG_FILE})
file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/DartConfiguration.tcl" ${EIGEN_DART_CONFIG_FILE2})
@@ -39,7 +41,7 @@ ei_init_testing()
# configure Eigen related testing options
option(EIGEN_NO_ASSERTION_CHECKING "Disable checking of assertions using exceptions" OFF)
-option(EIGEN_DEBUG_ASSERTS "Enable advanced debuging of assertions" OFF)
+option(EIGEN_DEBUG_ASSERTS "Enable advanced debugging of assertions" OFF)
if(CMAKE_COMPILER_IS_GNUCXX)
option(EIGEN_COVERAGE_TESTING "Enable/disable gcov" OFF)
diff --git a/cmake/EigenTesting.cmake b/cmake/EigenTesting.cmake
index a92a2978b..35deed509 100644
--- a/cmake/EigenTesting.cmake
+++ b/cmake/EigenTesting.cmake
@@ -19,19 +19,28 @@ macro(ei_add_test_internal testname testname_with_suffix)
endif()
if(EIGEN_ADD_TEST_FILENAME_EXTENSION STREQUAL cu)
- if(EIGEN_TEST_CUDA_CLANG)
+ if(EIGEN_TEST_HIP)
+ hip_reset_flags()
+ hip_add_executable(${targetname} ${filename} HIPCC_OPTIONS "-DEIGEN_USE_HIP ${ARGV2}")
+ elseif(EIGEN_TEST_CUDA_CLANG)
set_source_files_properties(${filename} PROPERTIES LANGUAGE CXX)
- if(CUDA_64_BIT_DEVICE_CODE)
+
+ if(CUDA_64_BIT_DEVICE_CODE AND (EXISTS "${CUDA_TOOLKIT_ROOT_DIR}/lib64"))
link_directories("${CUDA_TOOLKIT_ROOT_DIR}/lib64")
else()
link_directories("${CUDA_TOOLKIT_ROOT_DIR}/lib")
endif()
+
if (${ARGC} GREATER 2)
add_executable(${targetname} ${filename})
else()
add_executable(${targetname} ${filename} OPTIONS ${ARGV2})
endif()
- target_link_libraries(${targetname} "cudart_static" "cuda" "dl" "rt" "pthread")
+ set(CUDA_CLANG_LINK_LIBRARIES "cudart_static" "cuda" "dl" "pthread")
+ if (CMAKE_SYSTEM_NAME STREQUAL "Linux")
+ set(CUDA_CLANG_LINK_LIBRARIES ${CUDA_CLANG_LINK_LIBRARIES} "rt")
+ endif()
+ target_link_libraries(${targetname} ${CUDA_CLANG_LINK_LIBRARIES})
else()
if (${ARGC} GREATER 2)
cuda_add_executable(${targetname} ${filename} OPTIONS ${ARGV2})
@@ -59,8 +68,6 @@ macro(ei_add_test_internal testname testname_with_suffix)
ei_add_target_property(${targetname} COMPILE_FLAGS "-DEIGEN_TEST_MAX_SIZE=${EIGEN_TEST_MAX_SIZE}")
- ei_add_target_property(${targetname} COMPILE_FLAGS "-DEIGEN_TEST_FUNC=${testname}")
-
if(MSVC)
ei_add_target_property(${targetname} COMPILE_FLAGS "/bigobj")
endif()
@@ -99,7 +106,7 @@ macro(ei_add_test_internal testname testname_with_suffix)
add_test(${testname_with_suffix} "${targetname}")
- # Specify target and test labels accoirding to EIGEN_CURRENT_SUBPROJECT
+ # Specify target and test labels according to EIGEN_CURRENT_SUBPROJECT
get_property(current_subproject GLOBAL PROPERTY EIGEN_CURRENT_SUBPROJECT)
if ((current_subproject) AND (NOT (current_subproject STREQUAL "")))
set_property(TARGET ${targetname} PROPERTY LABELS "Build${current_subproject}")
@@ -111,7 +118,6 @@ endmacro(ei_add_test_internal)
# SYCL
macro(ei_add_test_internal_sycl testname testname_with_suffix)
- include_directories( SYSTEM ${COMPUTECPP_PACKAGE_ROOT_DIR}/include)
set(targetname ${testname_with_suffix})
if(EIGEN_ADD_TEST_FILENAME_EXTENSION)
@@ -120,23 +126,31 @@ macro(ei_add_test_internal_sycl testname testname_with_suffix)
set(filename ${testname}.cpp)
endif()
- set( include_file ${CMAKE_CURRENT_BINARY_DIR}/inc_${filename})
- set( bc_file ${CMAKE_CURRENT_BINARY_DIR}/${filename})
- set( host_file ${CMAKE_CURRENT_SOURCE_DIR}/${filename})
+ set( include_file "${CMAKE_CURRENT_BINARY_DIR}/inc_${filename}")
+ set( bc_file "${CMAKE_CURRENT_BINARY_DIR}/${filename}.sycl")
+ set( host_file "${CMAKE_CURRENT_SOURCE_DIR}/${filename}")
- ADD_CUSTOM_COMMAND(
- OUTPUT ${include_file}
- COMMAND ${CMAKE_COMMAND} -E echo "\\#include \\\"${host_file}\\\"" > ${include_file}
- COMMAND ${CMAKE_COMMAND} -E echo "\\#include \\\"${bc_file}.sycl\\\"" >> ${include_file}
- DEPENDS ${filename} ${bc_file}.sycl
- COMMENT "Building ComputeCpp integration header file ${include_file}"
- )
- # Add a custom target for the generated integration header
- add_custom_target(${testname}_integration_header_sycl DEPENDS ${include_file})
+ if(NOT EIGEN_SYCL_TRISYCL)
+ include_directories( SYSTEM ${COMPUTECPP_PACKAGE_ROOT_DIR}/include)
- add_executable(${targetname} ${include_file})
- add_dependencies(${targetname} ${testname}_integration_header_sycl)
- add_sycl_to_target(${targetname} ${filename} ${CMAKE_CURRENT_BINARY_DIR})
+ ADD_CUSTOM_COMMAND(
+ OUTPUT ${include_file}
+ COMMAND ${CMAKE_COMMAND} -E echo "\\#include \\\"${host_file}\\\"" > ${include_file}
+ COMMAND ${CMAKE_COMMAND} -E echo "\\#include \\\"${bc_file}\\\"" >> ${include_file}
+ DEPENDS ${filename} ${bc_file}
+ COMMENT "Building ComputeCpp integration header file ${include_file}"
+ )
+
+ # Add a custom target for the generated integration header
+ add_custom_target("${testname}_integration_header_sycl" DEPENDS ${include_file})
+
+ add_executable(${targetname} ${include_file})
+ add_dependencies(${targetname} "${testname}_integration_header_sycl")
+ else()
+ add_executable(${targetname} ${host_file})
+ endif()
+
+ add_sycl_to_target(${targetname} ${CMAKE_CURRENT_BINARY_DIR} ${filename})
if (targetname MATCHES "^eigen2_")
add_dependencies(eigen2_buildtests ${targetname})
@@ -154,8 +168,6 @@ macro(ei_add_test_internal_sycl testname testname_with_suffix)
ei_add_target_property(${targetname} COMPILE_FLAGS "-DEIGEN_TEST_MAX_SIZE=${EIGEN_TEST_MAX_SIZE}")
- ei_add_target_property(${targetname} COMPILE_FLAGS "-DEIGEN_TEST_FUNC=${testname}")
-
if(MSVC AND NOT EIGEN_SPLIT_LARGE_TESTS)
ei_add_target_property(${targetname} COMPILE_FLAGS "/bigobj")
endif()
@@ -240,7 +252,7 @@ endmacro(ei_add_test_internal_sycl)
#
# If EIGEN_SPLIT_LARGE_TESTS is ON, the test is split into multiple executables
# test_<testname>_<N>
-# where N runs from 1 to the greatest occurence found in the source file. Each of these
+# where N runs from 1 to the greatest occurrence found in the source file. Each of these
# executables is built passing -DEIGEN_TEST_PART_N. This allows to split large tests
# into smaller executables.
#
@@ -260,26 +272,28 @@ macro(ei_add_test testname)
endif()
file(READ "${filename}" test_source)
- set(parts 0)
string(REGEX MATCHALL "CALL_SUBTEST_[0-9]+|EIGEN_TEST_PART_[0-9]+|EIGEN_SUFFIXES(;[0-9]+)+"
- occurences "${test_source}")
- string(REGEX REPLACE "CALL_SUBTEST_|EIGEN_TEST_PART_|EIGEN_SUFFIXES" "" suffixes "${occurences}")
+ occurrences "${test_source}")
+ string(REGEX REPLACE "CALL_SUBTEST_|EIGEN_TEST_PART_|EIGEN_SUFFIXES" "" suffixes "${occurrences}")
list(REMOVE_DUPLICATES suffixes)
- if(EIGEN_SPLIT_LARGE_TESTS AND suffixes)
+ set(explicit_suffixes "")
+ if( (NOT EIGEN_SPLIT_LARGE_TESTS) AND suffixes)
+ # Check whether we have EIGEN_TEST_PART_* statements, in which case we likely must enforce splitting.
+ # For instance, indexed_view activate a different c++ version for each part.
+ string(REGEX MATCHALL "EIGEN_TEST_PART_[0-9]+" occurrences "${test_source}")
+ string(REGEX REPLACE "EIGEN_TEST_PART_" "" explicit_suffixes "${occurrences}")
+ list(REMOVE_DUPLICATES explicit_suffixes)
+ endif()
+ if( (EIGEN_SPLIT_LARGE_TESTS AND suffixes) OR explicit_suffixes)
add_custom_target(${testname})
foreach(suffix ${suffixes})
ei_add_test_internal(${testname} ${testname}_${suffix}
"${ARGV1} -DEIGEN_TEST_PART_${suffix}=1" "${ARGV2}")
add_dependencies(${testname} ${testname}_${suffix})
endforeach(suffix)
- else(EIGEN_SPLIT_LARGE_TESTS AND suffixes)
- set(symbols_to_enable_all_parts "")
- foreach(suffix ${suffixes})
- set(symbols_to_enable_all_parts
- "${symbols_to_enable_all_parts} -DEIGEN_TEST_PART_${suffix}=1")
- endforeach(suffix)
- ei_add_test_internal(${testname} ${testname} "${ARGV1} ${symbols_to_enable_all_parts}" "${ARGV2}")
- endif(EIGEN_SPLIT_LARGE_TESTS AND suffixes)
+ else()
+ ei_add_test_internal(${testname} ${testname} "${ARGV1} -DEIGEN_TEST_PART_ALL=1" "${ARGV2}")
+ endif()
endmacro(ei_add_test)
macro(ei_add_test_sycl testname)
@@ -296,8 +310,8 @@ macro(ei_add_test_sycl testname)
file(READ "${filename}" test_source)
set(parts 0)
string(REGEX MATCHALL "CALL_SUBTEST_[0-9]+|EIGEN_TEST_PART_[0-9]+|EIGEN_SUFFIXES(;[0-9]+)+"
- occurences "${test_source}")
- string(REGEX REPLACE "CALL_SUBTEST_|EIGEN_TEST_PART_|EIGEN_SUFFIXES" "" suffixes "${occurences}")
+ occurrences "${test_source}")
+ string(REGEX REPLACE "CALL_SUBTEST_|EIGEN_TEST_PART_|EIGEN_SUFFIXES" "" suffixes "${occurrences}")
list(REMOVE_DUPLICATES suffixes)
if(EIGEN_SPLIT_LARGE_TESTS AND suffixes)
add_custom_target(${testname})
@@ -442,6 +456,12 @@ macro(ei_testing_print_summary)
message(STATUS "VSX: Using architecture defaults")
endif()
+ if(EIGEN_TEST_MSA)
+ message(STATUS "MIPS MSA: ON")
+ else()
+ message(STATUS "MIPS MSA: Using architecture defaults")
+ endif()
+
if(EIGEN_TEST_NEON)
message(STATUS "ARM NEON: ON")
else()
@@ -467,7 +487,11 @@ macro(ei_testing_print_summary)
endif()
if(EIGEN_TEST_SYCL)
- message(STATUS "SYCL: ON")
+ if(EIGEN_SYCL_TRISYCL)
+ message(STATUS "SYCL: ON (using triSYCL)")
+ else()
+ message(STATUS "SYCL: ON (using computeCPP)")
+ endif()
else()
message(STATUS "SYCL: OFF")
endif()
@@ -480,6 +504,11 @@ macro(ei_testing_print_summary)
else()
message(STATUS "CUDA: OFF")
endif()
+ if(EIGEN_TEST_HIP)
+ message(STATUS "HIP: ON (using hipcc)")
+ else()
+ message(STATUS "HIP: OFF")
+ endif()
endif() # vectorization / alignment options
@@ -538,6 +567,8 @@ macro(ei_get_compilerver VAR)
else()
set(${VAR} "na")
endif()
+ elseif(${CMAKE_CXX_COMPILER_ID} MATCHES "PGI")
+ set(${VAR} "${CMAKE_CXX_COMPILER_ID}-${CMAKE_CXX_COMPILER_VERSION}")
else()
# on all other system we rely on ${CMAKE_CXX_COMPILER}
# supporting a "--version" or "/version" flag
@@ -634,6 +665,8 @@ macro(ei_get_cxxflags VAR)
set(${VAR} SSE3)
elseif(EIGEN_TEST_SSE2 OR IS_64BIT_ENV)
set(${VAR} SSE2)
+ elseif(EIGEN_TEST_MSA)
+ set(${VAR} MSA)
endif()
if(EIGEN_TEST_OPENMP)
@@ -666,6 +699,10 @@ macro(ei_set_build_string)
set(TMP_BUILD_STRING ${TMP_BUILD_STRING}-${LOCAL_COMPILER_FLAGS})
endif()
+ if(EIGEN_TEST_EXTERNAL_BLAS)
+ set(TMP_BUILD_STRING ${TMP_BUILD_STRING}-external_blas)
+ endif()
+
ei_is_64bit_env(IS_64BIT_ENV)
if(NOT IS_64BIT_ENV)
set(TMP_BUILD_STRING ${TMP_BUILD_STRING}-32bit)
diff --git a/cmake/FindBLAS.cmake b/cmake/FindBLAS.cmake
index 68c4e0724..e3395bc10 100644
--- a/cmake/FindBLAS.cmake
+++ b/cmake/FindBLAS.cmake
@@ -1,385 +1,1363 @@
-# Find BLAS library
+###
#
-# This module finds an installed library that implements the BLAS
+# @copyright (c) 2009-2014 The University of Tennessee and The University
+# of Tennessee Research Foundation.
+# All rights reserved.
+# @copyright (c) 2012-2016 Inria. All rights reserved.
+# @copyright (c) 2012-2014 Bordeaux INP, CNRS (LaBRI UMR 5800), Inria, Univ. Bordeaux. All rights reserved.
+#
+###
+#
+# - Find BLAS library
+# This module finds an installed fortran library that implements the BLAS
# linear-algebra interface (see http://www.netlib.org/blas/).
-# The list of libraries searched for is mainly taken
+# The list of libraries searched for is taken
# from the autoconf macro file, acx_blas.m4 (distributed at
# http://ac-archive.sourceforge.net/ac-archive/acx_blas.html).
#
# This module sets the following variables:
# BLAS_FOUND - set to true if a library implementing the BLAS interface
# is found
-# BLAS_INCLUDE_DIR - Directories containing the BLAS header files
-# BLAS_DEFINITIONS - Compilation options to use BLAS
-# BLAS_LINKER_FLAGS - Linker flags to use BLAS (excluding -l
+# BLAS_LINKER_FLAGS - uncached list of required linker flags (excluding -l
# and -L).
-# BLAS_LIBRARIES_DIR - Directories containing the BLAS libraries.
-# May be null if BLAS_LIBRARIES contains libraries name using full path.
-# BLAS_LIBRARIES - List of libraries to link against BLAS interface.
-# May be null if the compiler supports auto-link (e.g. VC++).
-# BLAS_USE_FILE - The name of the cmake module to include to compile
-# applications or libraries using BLAS.
+# BLAS_COMPILER_FLAGS - uncached list of required compiler flags (including -I for mkl headers).
+# BLAS_LIBRARIES - uncached list of libraries (using full path name) to
+# link against to use BLAS
+# BLAS95_LIBRARIES - uncached list of libraries (using full path name)
+# to link against to use BLAS95 interface
+# BLAS95_FOUND - set to true if a library implementing the BLAS f95 interface
+# is found
+# BLA_STATIC if set on this determines what kind of linkage we do (static)
+# BLA_VENDOR if set checks only the specified vendor, if not set checks
+# all the possibilities
+# BLAS_VENDOR_FOUND stores the BLAS vendor found
+# BLA_F95 if set on tries to find the f95 interfaces for BLAS/LAPACK
+# The user can give specific paths where to find the libraries adding cmake
+# options at configure (ex: cmake path/to/project -DBLAS_DIR=path/to/blas):
+# BLAS_DIR - Where to find the base directory of blas
+# BLAS_INCDIR - Where to find the header files
+# BLAS_LIBDIR - Where to find the library files
+# The module can also look for the following environment variables if paths
+# are not given as cmake variable: BLAS_DIR, BLAS_INCDIR, BLAS_LIBDIR
+# For MKL case and if no paths are given as hints, we will try to use the MKLROOT
+# environment variable
+# BLAS_VERBOSE Print some additional information during BLAS libraries detection
+##########
+### List of vendors (BLA_VENDOR) valid in this module
+########## List of vendors (BLA_VENDOR) valid in this module
+## Open (for OpenBlas), Eigen (for EigenBlas), Goto, ATLAS PhiPACK,
+##  CXML, DXML, SunPerf, SCSL, SGIMATH, IBMESSL, IBMESSLMT
+## Intel10_32 (intel mkl v10 32 bit), Intel10_64lp (intel mkl v10 64 bit,lp thread model, lp64 model),
+## Intel10_64lp_seq (intel mkl v10 64 bit,sequential code, lp64 model),
+## Intel( older versions of mkl 32 and 64 bit),
+##  ACML, ACML_MP, ACML_GPU, Apple, NAS, Generic
+# C/CXX should be enabled to use Intel mkl
+###
+# We handle different modes to find the dependency
+#
+# - Detection if already installed on the system
+# - BLAS libraries can be detected from different ways
+# Here is the order of precedence:
+# 1) we look in cmake variable BLAS_LIBDIR or BLAS_DIR (we guess the libdirs) if defined
+# 2) we look in environment variable BLAS_LIBDIR or BLAS_DIR (we guess the libdirs) if defined
+# 3) we look in common environnment variables depending on the system (INCLUDE, C_INCLUDE_PATH, CPATH - LIB, DYLD_LIBRARY_PATH, LD_LIBRARY_PATH)
+# 4) we look in common system paths depending on the system, see for example paths contained in the following cmake variables:
+# - CMAKE_PLATFORM_IMPLICIT_INCLUDE_DIRECTORIES, CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES
+# - CMAKE_C_IMPLICIT_INCLUDE_DIRECTORIES, CMAKE_C_IMPLICIT_LINK_DIRECTORIES
+#
+
+#=============================================================================
+# Copyright 2007-2009 Kitware, Inc.
#
-# This module was modified by CGAL team:
-# - find libraries for a C++ compiler, instead of Fortran
-# - added BLAS_INCLUDE_DIR, BLAS_DEFINITIONS and BLAS_LIBRARIES_DIR
-# - removed BLAS95_LIBRARIES
+# Distributed under the OSI-approved BSD License (the "License");
+# see accompanying file Copyright.txt for details.
+#
+# This software is distributed WITHOUT ANY WARRANTY; without even the
+# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the License for more information.
+#=============================================================================
+# (To distribute this file outside of CMake, substitute the full
+# License text for the above reference.)
+
+## Some macros to print status when search for headers and libs
+# This macro informs why the _lib_to_find file has not been found
+macro(Print_Find_Library_Blas_Status _libname _lib_to_find)
+
+ # save _libname upper/lower case
+ string(TOUPPER ${_libname} LIBNAME)
+ string(TOLOWER ${_libname} libname)
+
+ # print status
+ #message(" ")
+ if(${LIBNAME}_LIBDIR)
+ message("${Yellow}${LIBNAME}_LIBDIR is defined but ${_lib_to_find}"
+ "has not been found in ${ARGN}${ColourReset}")
+ else()
+ if(${LIBNAME}_DIR)
+ message("${Yellow}${LIBNAME}_DIR is defined but ${_lib_to_find}"
+ "has not been found in ${ARGN}${ColourReset}")
+ else()
+ message("${Yellow}${_lib_to_find} not found."
+ "Nor ${LIBNAME}_DIR neither ${LIBNAME}_LIBDIR"
+ "are defined so that we look for ${_lib_to_find} in"
+ "system paths (Linux: LD_LIBRARY_PATH, Windows: LIB,"
+ "Mac: DYLD_LIBRARY_PATH,"
+ "CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES,"
+ "CMAKE_C_IMPLICIT_LINK_DIRECTORIES)${ColourReset}")
+ if(_lib_env)
+ message("${Yellow}${_lib_to_find} has not been found in"
+ "${_lib_env}${ColourReset}")
+ endif()
+ endif()
+ endif()
+ message("${BoldYellow}Please indicate where to find ${_lib_to_find}. You have three options:\n"
+ "- Option 1: Provide the Installation directory of BLAS library with cmake option: -D${LIBNAME}_DIR=your/path/to/${libname}/\n"
+ "- Option 2: Provide the directory where to find the library with cmake option: -D${LIBNAME}_LIBDIR=your/path/to/${libname}/lib/\n"
+ "- Option 3: Update your environment variable (Linux: LD_LIBRARY_PATH, Windows: LIB, Mac: DYLD_LIBRARY_PATH)\n"
+ "- Option 4: If your library provides a PkgConfig file, make sure pkg-config finds your library${ColourReset}")
+
+endmacro()
+
+# This macro informs why the _lib_to_find file has not been found
+macro(Print_Find_Library_Blas_CheckFunc_Status _name)
+
+ # save _libname upper/lower case
+ string(TOUPPER ${_name} FUNCNAME)
+ string(TOLOWER ${_name} funcname)
+
+ # print status
+ #message(" ")
+ message("${Red}Libs have been found but check of symbol ${_name} failed "
+ "with following libraries ${ARGN}${ColourReset}")
+ message("${BoldRed}Please open your error file CMakeFiles/CMakeError.log"
+ "to figure out why it fails${ColourReset}")
+ #message(" ")
+
+endmacro()
+
+if (NOT BLAS_FOUND)
+ set(BLAS_DIR "" CACHE PATH "Installation directory of BLAS library")
+ if (NOT BLAS_FIND_QUIETLY)
+ message(STATUS "A cache variable, namely BLAS_DIR, has been set to specify the install directory of BLAS")
+ endif()
+endif()
+
+option(BLAS_VERBOSE "Print some additional information during BLAS libraries detection" OFF)
+mark_as_advanced(BLAS_VERBOSE)
include(CheckFunctionExists)
+include(CheckFortranFunctionExists)
+set(_blas_ORIG_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES})
-# This macro checks for the existence of the combination of fortran libraries
-# given by _list. If the combination is found, this macro checks (using the
-# check_function_exists macro) whether can link against that library
-# combination using the name of a routine given by _name using the linker
-# flags given by _flags. If the combination of libraries is found and passes
-# the link test, LIBRARIES is set to the list of complete library paths that
-# have been found and DEFINITIONS to the required definitions.
-# Otherwise, LIBRARIES is set to FALSE.
-# N.B. _prefix is the prefix applied to the names of all cached variables that
-# are generated internally and marked advanced by this macro.
-macro(check_fortran_libraries DEFINITIONS LIBRARIES _prefix _name _flags _list _path)
- #message("DEBUG: check_fortran_libraries(${_list} in ${_path})")
-
- # Check for the existence of the libraries given by _list
- set(_libraries_found TRUE)
- set(_libraries_work FALSE)
- set(${DEFINITIONS} "")
- set(${LIBRARIES} "")
+# Check the language being used
+get_property( _LANGUAGES_ GLOBAL PROPERTY ENABLED_LANGUAGES )
+if( _LANGUAGES_ MATCHES Fortran AND CMAKE_Fortran_COMPILER)
+ set( _CHECK_FORTRAN TRUE )
+elseif( (_LANGUAGES_ MATCHES C) OR (_LANGUAGES_ MATCHES CXX) )
+ set( _CHECK_FORTRAN FALSE )
+else()
+ if(BLAS_FIND_REQUIRED)
+ message(FATAL_ERROR "FindBLAS requires Fortran, C, or C++ to be enabled.")
+ else()
+ message(STATUS "Looking for BLAS... - NOT found (Unsupported languages)")
+ return()
+ endif()
+endif()
+
+macro(Check_Fortran_Libraries LIBRARIES _prefix _name _flags _list _thread)
+ # This macro checks for the existence of the combination of fortran libraries
+ # given by _list. If the combination is found, this macro checks (using the
+ # Check_Fortran_Function_Exists macro) whether can link against that library
+ # combination using the name of a routine given by _name using the linker
+ # flags given by _flags. If the combination of libraries is found and passes
+ # the link test, LIBRARIES is set to the list of complete library paths that
+ # have been found. Otherwise, LIBRARIES is set to FALSE.
+
+ # N.B. _prefix is the prefix applied to the names of all cached variables that
+ # are generated internally and marked advanced by this macro.
+
+ set(_libdir ${ARGN})
+
+ set(_libraries_work TRUE)
+ set(${LIBRARIES})
set(_combined_name)
+ set(ENV_MKLROOT "$ENV{MKLROOT}")
+ set(ENV_BLAS_DIR "$ENV{BLAS_DIR}")
+ set(ENV_BLAS_LIBDIR "$ENV{BLAS_LIBDIR}")
+ if (NOT _libdir)
+ if (BLAS_LIBDIR)
+ list(APPEND _libdir "${BLAS_LIBDIR}")
+ elseif (BLAS_DIR)
+ list(APPEND _libdir "${BLAS_DIR}")
+ list(APPEND _libdir "${BLAS_DIR}/lib")
+ if("${CMAKE_SIZEOF_VOID_P}" EQUAL "8")
+ list(APPEND _libdir "${BLAS_DIR}/lib64")
+ list(APPEND _libdir "${BLAS_DIR}/lib/intel64")
+ else()
+ list(APPEND _libdir "${BLAS_DIR}/lib32")
+ list(APPEND _libdir "${BLAS_DIR}/lib/ia32")
+ endif()
+ elseif(ENV_BLAS_LIBDIR)
+ list(APPEND _libdir "${ENV_BLAS_LIBDIR}")
+ elseif(ENV_BLAS_DIR)
+ list(APPEND _libdir "${ENV_BLAS_DIR}")
+ list(APPEND _libdir "${ENV_BLAS_DIR}/lib")
+ if("${CMAKE_SIZEOF_VOID_P}" EQUAL "8")
+ list(APPEND _libdir "${ENV_BLAS_DIR}/lib64")
+ list(APPEND _libdir "${ENV_BLAS_DIR}/lib/intel64")
+ else()
+ list(APPEND _libdir "${ENV_BLAS_DIR}/lib32")
+ list(APPEND _libdir "${ENV_BLAS_DIR}/lib/ia32")
+ endif()
+ else()
+ if (ENV_MKLROOT)
+ list(APPEND _libdir "${ENV_MKLROOT}/lib")
+ if("${CMAKE_SIZEOF_VOID_P}" EQUAL "8")
+ list(APPEND _libdir "${ENV_MKLROOT}/lib64")
+ list(APPEND _libdir "${ENV_MKLROOT}/lib/intel64")
+ else()
+ list(APPEND _libdir "${ENV_MKLROOT}/lib32")
+ list(APPEND _libdir "${ENV_MKLROOT}/lib/ia32")
+ endif()
+ endif()
+ if (WIN32)
+ string(REPLACE ":" ";" _libdir2 "$ENV{LIB}")
+ elseif (APPLE)
+ string(REPLACE ":" ";" _libdir2 "$ENV{DYLD_LIBRARY_PATH}")
+ else ()
+ string(REPLACE ":" ";" _libdir2 "$ENV{LD_LIBRARY_PATH}")
+ endif ()
+ list(APPEND _libdir "${_libdir2}")
+ list(APPEND _libdir "${CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES}")
+ list(APPEND _libdir "${CMAKE_C_IMPLICIT_LINK_DIRECTORIES}")
+ endif()
+ endif ()
+
+ if (BLAS_VERBOSE)
+ message("${Cyan}Try to find BLAS libraries: ${_list}")
+ endif ()
+
foreach(_library ${_list})
set(_combined_name ${_combined_name}_${_library})
- if(_libraries_found)
- # search first in ${_path}
- find_library(${_prefix}_${_library}_LIBRARY
- NAMES ${_library}
- PATHS ${_path} NO_DEFAULT_PATH
- )
- # if not found, search in environment variables and system
- if ( WIN32 )
- find_library(${_prefix}_${_library}_LIBRARY
- NAMES ${_library}
- PATHS ENV LIB
- )
- elseif ( APPLE )
- find_library(${_prefix}_${_library}_LIBRARY
- NAMES ${_library}
- PATHS /usr/local/lib /usr/lib /usr/local/lib64 /usr/lib64 ENV DYLD_LIBRARY_PATH
- )
+ if(_libraries_work)
+ if (BLA_STATIC)
+ if (WIN32)
+ set(CMAKE_FIND_LIBRARY_SUFFIXES .lib ${CMAKE_FIND_LIBRARY_SUFFIXES})
+ endif ()
+ if (APPLE)
+ set(CMAKE_FIND_LIBRARY_SUFFIXES .lib ${CMAKE_FIND_LIBRARY_SUFFIXES})
+ else ()
+ set(CMAKE_FIND_LIBRARY_SUFFIXES .a ${CMAKE_FIND_LIBRARY_SUFFIXES})
+ endif ()
else ()
- find_library(${_prefix}_${_library}_LIBRARY
- NAMES ${_library}
- PATHS /usr/local/lib /usr/lib /usr/local/lib64 /usr/lib64 ENV LD_LIBRARY_PATH
- )
- endif()
+ if (CMAKE_SYSTEM_NAME STREQUAL "Linux")
+ # for ubuntu's libblas3gf and liblapack3gf packages
+ set(CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES} .so.3gf)
+ endif ()
+ endif ()
+ find_library(${_prefix}_${_library}_LIBRARY
+ NAMES ${_library}
+ HINTS ${_libdir}
+ NO_DEFAULT_PATH
+ )
mark_as_advanced(${_prefix}_${_library}_LIBRARY)
+ # Print status if not found
+ # -------------------------
+ if (NOT ${_prefix}_${_library}_LIBRARY AND NOT BLAS_FIND_QUIETLY AND BLAS_VERBOSE)
+ Print_Find_Library_Blas_Status(blas ${_library} ${_libdir})
+ endif ()
set(${LIBRARIES} ${${LIBRARIES}} ${${_prefix}_${_library}_LIBRARY})
- set(_libraries_found ${${_prefix}_${_library}_LIBRARY})
- endif(_libraries_found)
+ set(_libraries_work ${${_prefix}_${_library}_LIBRARY})
+ endif(_libraries_work)
endforeach(_library ${_list})
- if(_libraries_found)
- set(_libraries_found ${${LIBRARIES}})
- endif()
-
- # Test this combination of libraries with the Fortran/f2c interface.
- # We test the Fortran interface first as it is well standardized.
- if(_libraries_found AND NOT _libraries_work)
- set(${DEFINITIONS} "-D${_prefix}_USE_F2C")
- set(${LIBRARIES} ${_libraries_found})
- # Some C++ linkers require the f2c library to link with Fortran libraries.
- # I do not know which ones, thus I just add the f2c library if it is available.
- find_package( F2C QUIET )
- if ( F2C_FOUND )
- set(${DEFINITIONS} ${${DEFINITIONS}} ${F2C_DEFINITIONS})
- set(${LIBRARIES} ${${LIBRARIES}} ${F2C_LIBRARIES})
+
+ if(_libraries_work)
+ # Test this combination of libraries.
+ if (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND BLA_STATIC)
+ list(INSERT ${LIBRARIES} 0 "-Wl,--start-group")
+ list(APPEND ${LIBRARIES} "-Wl,--end-group")
+ endif()
+ set(CMAKE_REQUIRED_LIBRARIES "${_flags};${${LIBRARIES}};${_thread}")
+ set(CMAKE_REQUIRED_FLAGS "${BLAS_COMPILER_FLAGS}")
+ if (BLAS_VERBOSE)
+ message("${Cyan}BLAS libs found for BLA_VENDOR ${BLA_VENDOR}."
+ "Try to compile symbol ${_name} with following libraries:"
+ "${CMAKE_REQUIRED_LIBRARIES}")
+ endif ()
+ if(NOT BLAS_FOUND)
+ unset(${_prefix}${_combined_name}_WORKS CACHE)
+ endif()
+ if (_CHECK_FORTRAN)
+ if (CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
+ string(REPLACE "mkl_intel_lp64" "mkl_gf_lp64" CMAKE_REQUIRED_LIBRARIES "${CMAKE_REQUIRED_LIBRARIES}")
+ string(REPLACE "mkl_intel_ilp64" "mkl_gf_ilp64" CMAKE_REQUIRED_LIBRARIES "${CMAKE_REQUIRED_LIBRARIES}")
+ endif()
+ check_fortran_function_exists("${_name}" ${_prefix}${_combined_name}_WORKS)
+ else()
+ check_function_exists("${_name}_" ${_prefix}${_combined_name}_WORKS)
endif()
- set(CMAKE_REQUIRED_DEFINITIONS ${${DEFINITIONS}})
- set(CMAKE_REQUIRED_LIBRARIES ${_flags} ${${LIBRARIES}})
- #message("DEBUG: CMAKE_REQUIRED_DEFINITIONS = ${CMAKE_REQUIRED_DEFINITIONS}")
- #message("DEBUG: CMAKE_REQUIRED_LIBRARIES = ${CMAKE_REQUIRED_LIBRARIES}")
- # Check if function exists with f2c calling convention (ie a trailing underscore)
- check_function_exists(${_name}_ ${_prefix}_${_name}_${_combined_name}_f2c_WORKS)
- set(CMAKE_REQUIRED_DEFINITIONS} "")
- set(CMAKE_REQUIRED_LIBRARIES "")
- mark_as_advanced(${_prefix}_${_name}_${_combined_name}_f2c_WORKS)
- set(_libraries_work ${${_prefix}_${_name}_${_combined_name}_f2c_WORKS})
- endif(_libraries_found AND NOT _libraries_work)
-
- # If not found, test this combination of libraries with a C interface.
- # A few implementations (ie ACML) provide a C interface. Unfortunately, there is no standard.
- if(_libraries_found AND NOT _libraries_work)
- set(${DEFINITIONS} "")
- set(${LIBRARIES} ${_libraries_found})
- set(CMAKE_REQUIRED_DEFINITIONS "")
- set(CMAKE_REQUIRED_LIBRARIES ${_flags} ${${LIBRARIES}})
- #message("DEBUG: CMAKE_REQUIRED_LIBRARIES = ${CMAKE_REQUIRED_LIBRARIES}")
- check_function_exists(${_name} ${_prefix}_${_name}${_combined_name}_WORKS)
- set(CMAKE_REQUIRED_LIBRARIES "")
- mark_as_advanced(${_prefix}_${_name}${_combined_name}_WORKS)
- set(_libraries_work ${${_prefix}_${_name}${_combined_name}_WORKS})
- endif(_libraries_found AND NOT _libraries_work)
-
- # on failure
- if(NOT _libraries_work)
- set(${DEFINITIONS} "")
- set(${LIBRARIES} FALSE)
- endif()
- #message("DEBUG: ${DEFINITIONS} = ${${DEFINITIONS}}")
- #message("DEBUG: ${LIBRARIES} = ${${LIBRARIES}}")
-endmacro(check_fortran_libraries)
+ mark_as_advanced(${_prefix}${_combined_name}_WORKS)
+ set(_libraries_work ${${_prefix}${_combined_name}_WORKS})
+ # Print status if not found
+ # -------------------------
+ if (NOT _libraries_work AND NOT BLAS_FIND_QUIETLY AND BLAS_VERBOSE)
+ Print_Find_Library_Blas_CheckFunc_Status(${_name} ${CMAKE_REQUIRED_LIBRARIES})
+ endif ()
+ set(CMAKE_REQUIRED_LIBRARIES)
+ endif()
+ if(_libraries_work)
+ set(${LIBRARIES} ${${LIBRARIES}} ${_thread})
+ else(_libraries_work)
+ set(${LIBRARIES} FALSE)
+ endif(_libraries_work)
-#
-# main
-#
+endmacro(Check_Fortran_Libraries)
-# Is it already configured?
-if (BLAS_LIBRARIES_DIR OR BLAS_LIBRARIES)
- set(BLAS_FOUND TRUE)
+set(BLAS_LINKER_FLAGS)
+set(BLAS_LIBRARIES)
+set(BLAS95_LIBRARIES)
+if ($ENV{BLA_VENDOR} MATCHES ".+")
+ set(BLA_VENDOR $ENV{BLA_VENDOR})
+else ()
+ if(NOT BLA_VENDOR)
+ set(BLA_VENDOR "All")
+ endif()
+endif ()
-else()
+#BLAS in intel mkl 10 library? (em64t 64bit)
+if (BLA_VENDOR MATCHES "Intel*" OR BLA_VENDOR STREQUAL "All")
+
+ if(NOT BLAS_LIBRARIES OR BLA_VENDOR MATCHES "Intel*")
+ # Looking for include
+ # -------------------
+
+ # Add system include paths to search include
+ # ------------------------------------------
+ unset(_inc_env)
+ set(ENV_MKLROOT "$ENV{MKLROOT}")
+ set(ENV_BLAS_DIR "$ENV{BLAS_DIR}")
+ set(ENV_BLAS_INCDIR "$ENV{BLAS_INCDIR}")
+ if(ENV_BLAS_INCDIR)
+ list(APPEND _inc_env "${ENV_BLAS_INCDIR}")
+ elseif(ENV_BLAS_DIR)
+ list(APPEND _inc_env "${ENV_BLAS_DIR}")
+ list(APPEND _inc_env "${ENV_BLAS_DIR}/include")
+ else()
+ if (ENV_MKLROOT)
+ list(APPEND _inc_env "${ENV_MKLROOT}/include")
+ endif()
+ # system variables
+ if(WIN32)
+ string(REPLACE ":" ";" _path_env "$ENV{INCLUDE}")
+ list(APPEND _inc_env "${_path_env}")
+ else()
+ string(REPLACE ":" ";" _path_env "$ENV{INCLUDE}")
+ list(APPEND _inc_env "${_path_env}")
+ string(REPLACE ":" ";" _path_env "$ENV{C_INCLUDE_PATH}")
+ list(APPEND _inc_env "${_path_env}")
+ string(REPLACE ":" ";" _path_env "$ENV{CPATH}")
+ list(APPEND _inc_env "${_path_env}")
+ string(REPLACE ":" ";" _path_env "$ENV{INCLUDE_PATH}")
+ list(APPEND _inc_env "${_path_env}")
+ endif()
+ endif()
+ list(APPEND _inc_env "${CMAKE_PLATFORM_IMPLICIT_INCLUDE_DIRECTORIES}")
+ list(APPEND _inc_env "${CMAKE_C_IMPLICIT_INCLUDE_DIRECTORIES}")
+ list(REMOVE_DUPLICATES _inc_env)
- # reset variables
- set( BLAS_INCLUDE_DIR "" )
- set( BLAS_DEFINITIONS "" )
- set( BLAS_LINKER_FLAGS "" )
- set( BLAS_LIBRARIES "" )
- set( BLAS_LIBRARIES_DIR "" )
+ # set paths where to look for
+ set(PATH_TO_LOOK_FOR "${_inc_env}")
- #
- # If Unix, search for BLAS function in possible libraries
- #
+ # Try to find the fftw header in the given paths
+ # -------------------------------------------------
+ # call cmake macro to find the header path
+ if(BLAS_INCDIR)
+ set(BLAS_mkl.h_DIRS "BLAS_mkl.h_DIRS-NOTFOUND")
+ find_path(BLAS_mkl.h_DIRS
+ NAMES mkl.h
+ HINTS ${BLAS_INCDIR})
+ else()
+ if(BLAS_DIR)
+ set(BLAS_mkl.h_DIRS "BLAS_mkl.h_DIRS-NOTFOUND")
+ find_path(BLAS_mkl.h_DIRS
+ NAMES mkl.h
+ HINTS ${BLAS_DIR}
+ PATH_SUFFIXES "include")
+ else()
+ set(BLAS_mkl.h_DIRS "BLAS_mkl.h_DIRS-NOTFOUND")
+ find_path(BLAS_mkl.h_DIRS
+ NAMES mkl.h
+ HINTS ${PATH_TO_LOOK_FOR})
+ endif()
+ endif()
+ mark_as_advanced(BLAS_mkl.h_DIRS)
- # BLAS in ATLAS library? (http://math-atlas.sourceforge.net/)
- if(NOT BLAS_LIBRARIES)
- check_fortran_libraries(
- BLAS_DEFINITIONS
+ # If found, add path to cmake variable
+ # ------------------------------------
+ if (BLAS_mkl.h_DIRS)
+ set(BLAS_INCLUDE_DIRS "${BLAS_mkl.h_DIRS}")
+ else ()
+ set(BLAS_INCLUDE_DIRS "BLAS_INCLUDE_DIRS-NOTFOUND")
+ if(NOT BLAS_FIND_QUIETLY)
+ message(STATUS "Looking for BLAS -- mkl.h not found")
+ endif()
+ endif()
+
+ if (WIN32)
+ string(REPLACE ":" ";" _libdir "$ENV{LIB}")
+ elseif (APPLE)
+ string(REPLACE ":" ";" _libdir "$ENV{DYLD_LIBRARY_PATH}")
+ else ()
+ string(REPLACE ":" ";" _libdir "$ENV{LD_LIBRARY_PATH}")
+ endif ()
+ list(APPEND _libdir "${CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES}")
+ list(APPEND _libdir "${CMAKE_C_IMPLICIT_LINK_DIRECTORIES}")
+ # libiomp5
+ # --------
+ set(OMP_iomp5_LIBRARY "OMP_iomp5_LIBRARY-NOTFOUND")
+ find_library(OMP_iomp5_LIBRARY
+ NAMES iomp5
+ HINTS ${_libdir}
+ )
+ mark_as_advanced(OMP_iomp5_LIBRARY)
+ set(OMP_LIB "")
+ # libgomp
+ # -------
+ set(OMP_gomp_LIBRARY "OMP_gomp_LIBRARY-NOTFOUND")
+ find_library(OMP_gomp_LIBRARY
+ NAMES gomp
+ HINTS ${_libdir}
+ )
+ mark_as_advanced(OMP_gomp_LIBRARY)
+ # choose one or another depending on the compilo
+ if (CMAKE_C_COMPILER_ID STREQUAL "GNU")
+ if (OMP_gomp_LIBRARY)
+ set(OMP_LIB "${OMP_gomp_LIBRARY}")
+ endif()
+ else(CMAKE_C_COMPILER_ID STREQUAL "Intel")
+ if (OMP_iomp5_LIBRARY)
+ set(OMP_LIB "${OMP_iomp5_LIBRARY}")
+ endif()
+ endif()
+
+ if (UNIX AND NOT WIN32)
+ # m
+ find_library(M_LIBRARY
+ NAMES m
+ HINTS ${_libdir})
+ mark_as_advanced(M_LIBRARY)
+ if(M_LIBRARY)
+ set(LM "-lm")
+ else()
+ set(LM "")
+ endif()
+ # Fortran
+ set(LGFORTRAN "")
+ if (CMAKE_C_COMPILER_ID MATCHES "GNU")
+ find_library(
+ FORTRAN_gfortran_LIBRARY
+ NAMES gfortran
+ HINTS ${_libdir}
+ )
+ mark_as_advanced(FORTRAN_gfortran_LIBRARY)
+ if (FORTRAN_gfortran_LIBRARY)
+ set(LGFORTRAN "${FORTRAN_gfortran_LIBRARY}")
+ endif()
+ elseif (CMAKE_C_COMPILER_ID MATCHES "Intel")
+ find_library(
+ FORTRAN_ifcore_LIBRARY
+ NAMES ifcore
+ HINTS ${_libdir}
+ )
+ mark_as_advanced(FORTRAN_ifcore_LIBRARY)
+ if (FORTRAN_ifcore_LIBRARY)
+ set(LGFORTRAN "{FORTRAN_ifcore_LIBRARY}")
+ endif()
+ endif()
+ set(BLAS_COMPILER_FLAGS "")
+ if (NOT BLA_VENDOR STREQUAL "Intel10_64lp_seq")
+ if (CMAKE_C_COMPILER_ID STREQUAL "Intel")
+ list(APPEND BLAS_COMPILER_FLAGS "-openmp")
+ endif()
+ if (CMAKE_C_COMPILER_ID STREQUAL "GNU")
+ list(APPEND BLAS_COMPILER_FLAGS "-fopenmp")
+ endif()
+ endif()
+ if (CMAKE_C_COMPILER_ID STREQUAL "GNU")
+ if (BLA_VENDOR STREQUAL "Intel10_32")
+ list(APPEND BLAS_COMPILER_FLAGS "-m32")
+ else()
+ list(APPEND BLAS_COMPILER_FLAGS "-m64")
+ endif()
+ if (NOT BLA_VENDOR STREQUAL "Intel10_64lp_seq")
+ list(APPEND OMP_LIB "-ldl")
+ endif()
+ if (ENV_MKLROOT)
+ list(APPEND BLAS_COMPILER_FLAGS "-I${ENV_MKLROOT}/include")
+ endif()
+ endif()
+
+ set(additional_flags "")
+ if (CMAKE_C_COMPILER_ID STREQUAL "GNU" AND CMAKE_SYSTEM_NAME STREQUAL "Linux")
+ set(additional_flags "-Wl,--no-as-needed")
+ endif()
+ endif ()
+
+ if (_LANGUAGES_ MATCHES C OR _LANGUAGES_ MATCHES CXX)
+ if(BLAS_FIND_QUIETLY OR NOT BLAS_FIND_REQUIRED)
+ find_package(Threads)
+ else()
+ find_package(Threads REQUIRED)
+ endif()
+
+ set(BLAS_SEARCH_LIBS "")
+
+ if(BLA_F95)
+
+ set(BLAS_mkl_SEARCH_SYMBOL SGEMM)
+ set(_LIBRARIES BLAS95_LIBRARIES)
+ if (WIN32)
+ if (BLA_STATIC)
+ set(BLAS_mkl_DLL_SUFFIX "")
+ else()
+ set(BLAS_mkl_DLL_SUFFIX "_dll")
+ endif()
+
+ # Find the main file (32-bit or 64-bit)
+ set(BLAS_SEARCH_LIBS_WIN_MAIN "")
+ if (BLA_VENDOR STREQUAL "Intel10_32" OR BLA_VENDOR STREQUAL "All")
+ list(APPEND BLAS_SEARCH_LIBS_WIN_MAIN
+ "mkl_blas95${BLAS_mkl_DLL_SUFFIX} mkl_intel_c${BLAS_mkl_DLL_SUFFIX}")
+ endif()
+ if (BLA_VENDOR STREQUAL "Intel10_64lp*" OR BLA_VENDOR STREQUAL "All")
+ list(APPEND BLAS_SEARCH_LIBS_WIN_MAIN
+ "mkl_blas95_lp64${BLAS_mkl_DLL_SUFFIX} mkl_intel_lp64${BLAS_mkl_DLL_SUFFIX}")
+ endif ()
+
+ # Add threading/sequential libs
+ set(BLAS_SEARCH_LIBS_WIN_THREAD "")
+ if (BLA_VENDOR STREQUAL "*_seq" OR BLA_VENDOR STREQUAL "All")
+ list(APPEND BLAS_SEARCH_LIBS_WIN_THREAD
+ "mkl_sequential${BLAS_mkl_DLL_SUFFIX}")
+ endif()
+ if (NOT BLA_VENDOR STREQUAL "*_seq" OR BLA_VENDOR STREQUAL "All")
+ # old version
+ list(APPEND BLAS_SEARCH_LIBS_WIN_THREAD
+ "libguide40 mkl_intel_thread${BLAS_mkl_DLL_SUFFIX}")
+ # mkl >= 10.3
+ list(APPEND BLAS_SEARCH_LIBS_WIN_THREAD
+ "libiomp5md mkl_intel_thread${BLAS_mkl_DLL_SUFFIX}")
+ endif()
+
+ # Cartesian product of the above
+ foreach (MAIN ${BLAS_SEARCH_LIBS_WIN_MAIN})
+ foreach (THREAD ${BLAS_SEARCH_LIBS_WIN_THREAD})
+ list(APPEND BLAS_SEARCH_LIBS
+ "${MAIN} ${THREAD} mkl_core${BLAS_mkl_DLL_SUFFIX}")
+ endforeach()
+ endforeach()
+ else (WIN32)
+ if (BLA_VENDOR STREQUAL "Intel10_32" OR BLA_VENDOR STREQUAL "All")
+ list(APPEND BLAS_SEARCH_LIBS
+ "mkl_blas95 mkl_intel mkl_intel_thread mkl_core guide")
+ endif ()
+ if (BLA_VENDOR STREQUAL "Intel10_64lp" OR BLA_VENDOR STREQUAL "All")
+ # old version
+ list(APPEND BLAS_SEARCH_LIBS
+ "mkl_blas95 mkl_intel_lp64 mkl_intel_thread mkl_core guide")
+ # mkl >= 10.3
+ if (CMAKE_C_COMPILER_ID STREQUAL "Intel")
+ list(APPEND BLAS_SEARCH_LIBS
+ "mkl_blas95_lp64 mkl_intel_lp64 mkl_intel_thread mkl_core")
+ endif()
+ if (CMAKE_C_COMPILER_ID STREQUAL "GNU")
+ list(APPEND BLAS_SEARCH_LIBS
+ "mkl_blas95_lp64 mkl_intel_lp64 mkl_gnu_thread mkl_core")
+ endif()
+ endif ()
+ if (BLA_VENDOR STREQUAL "Intel10_64lp_seq" OR BLA_VENDOR STREQUAL "All")
+ list(APPEND BLAS_SEARCH_LIBS
+ "mkl_intel_lp64 mkl_sequential mkl_core")
+ if (BLA_VENDOR STREQUAL "Intel10_64lp_seq")
+ set(OMP_LIB "")
+ endif()
+ endif ()
+ endif (WIN32)
+
+ else (BLA_F95)
+
+ set(BLAS_mkl_SEARCH_SYMBOL sgemm)
+ set(_LIBRARIES BLAS_LIBRARIES)
+ if (WIN32)
+ if (BLA_STATIC)
+ set(BLAS_mkl_DLL_SUFFIX "")
+ else()
+ set(BLAS_mkl_DLL_SUFFIX "_dll")
+ endif()
+
+ # Find the main file (32-bit or 64-bit)
+ set(BLAS_SEARCH_LIBS_WIN_MAIN "")
+ if (BLA_VENDOR STREQUAL "Intel10_32" OR BLA_VENDOR STREQUAL "All")
+ list(APPEND BLAS_SEARCH_LIBS_WIN_MAIN
+ "mkl_intel_c${BLAS_mkl_DLL_SUFFIX}")
+ endif()
+ if (BLA_VENDOR STREQUAL "Intel10_64lp*" OR BLA_VENDOR STREQUAL "All")
+ list(APPEND BLAS_SEARCH_LIBS_WIN_MAIN
+ "mkl_intel_lp64${BLAS_mkl_DLL_SUFFIX}")
+ endif ()
+
+ # Add threading/sequential libs
+ set(BLAS_SEARCH_LIBS_WIN_THREAD "")
+ if (NOT BLA_VENDOR STREQUAL "*_seq" OR BLA_VENDOR STREQUAL "All")
+ # old version
+ list(APPEND BLAS_SEARCH_LIBS_WIN_THREAD
+ "libguide40 mkl_intel_thread${BLAS_mkl_DLL_SUFFIX}")
+ # mkl >= 10.3
+ list(APPEND BLAS_SEARCH_LIBS_WIN_THREAD
+ "libiomp5md mkl_intel_thread${BLAS_mkl_DLL_SUFFIX}")
+ endif()
+ if (BLA_VENDOR STREQUAL "*_seq" OR BLA_VENDOR STREQUAL "All")
+ list(APPEND BLAS_SEARCH_LIBS_WIN_THREAD
+ "mkl_sequential${BLAS_mkl_DLL_SUFFIX}")
+ endif()
+
+ # Cartesian product of the above
+ foreach (MAIN ${BLAS_SEARCH_LIBS_WIN_MAIN})
+ foreach (THREAD ${BLAS_SEARCH_LIBS_WIN_THREAD})
+ list(APPEND BLAS_SEARCH_LIBS
+ "${MAIN} ${THREAD} mkl_core${BLAS_mkl_DLL_SUFFIX}")
+ endforeach()
+ endforeach()
+ else (WIN32)
+ if (BLA_VENDOR STREQUAL "Intel10_32" OR BLA_VENDOR STREQUAL "All")
+ list(APPEND BLAS_SEARCH_LIBS
+ "mkl_intel mkl_intel_thread mkl_core guide")
+ endif ()
+ if (BLA_VENDOR STREQUAL "Intel10_64lp" OR BLA_VENDOR STREQUAL "All")
+ # old version
+ list(APPEND BLAS_SEARCH_LIBS
+ "mkl_intel_lp64 mkl_intel_thread mkl_core guide")
+ # mkl >= 10.3
+ if (CMAKE_C_COMPILER_ID STREQUAL "Intel")
+ list(APPEND BLAS_SEARCH_LIBS
+ "mkl_intel_lp64 mkl_intel_thread mkl_core")
+ endif()
+ if (CMAKE_C_COMPILER_ID STREQUAL "GNU")
+ list(APPEND BLAS_SEARCH_LIBS
+ "mkl_intel_lp64 mkl_gnu_thread mkl_core")
+ endif()
+ endif ()
+ if (BLA_VENDOR STREQUAL "Intel10_64lp_seq" OR BLA_VENDOR STREQUAL "All")
+ list(APPEND BLAS_SEARCH_LIBS
+ "mkl_intel_lp64 mkl_sequential mkl_core")
+ if (BLA_VENDOR STREQUAL "Intel10_64lp_seq")
+ set(OMP_LIB "")
+ endif()
+ endif ()
+ #older vesions of intel mkl libs
+ if (BLA_VENDOR STREQUAL "Intel" OR BLA_VENDOR STREQUAL "All")
+ list(APPEND BLAS_SEARCH_LIBS
+ "mkl")
+ list(APPEND BLAS_SEARCH_LIBS
+ "mkl_ia32")
+ list(APPEND BLAS_SEARCH_LIBS
+ "mkl_em64t")
+ endif ()
+ endif (WIN32)
+
+ endif (BLA_F95)
+
+ foreach (IT ${BLAS_SEARCH_LIBS})
+ string(REPLACE " " ";" SEARCH_LIBS ${IT})
+ if (${_LIBRARIES})
+ else ()
+ check_fortran_libraries(
+ ${_LIBRARIES}
+ BLAS
+ ${BLAS_mkl_SEARCH_SYMBOL}
+ "${additional_flags}"
+ "${SEARCH_LIBS}"
+ "${OMP_LIB};${CMAKE_THREAD_LIBS_INIT};${LM}"
+ )
+ if(_LIBRARIES)
+ set(BLAS_LINKER_FLAGS "${additional_flags}")
+ endif()
+ endif()
+ endforeach ()
+ if(NOT BLAS_FIND_QUIETLY)
+ if(${_LIBRARIES})
+ message(STATUS "Looking for MKL BLAS: found")
+ else()
+ message(STATUS "Looking for MKL BLAS: not found")
+ endif()
+ endif()
+ if (${_LIBRARIES} AND NOT BLAS_VENDOR_FOUND)
+ set (BLAS_VENDOR_FOUND "Intel MKL")
+ endif()
+ endif (_LANGUAGES_ MATCHES C OR _LANGUAGES_ MATCHES CXX)
+ endif(NOT BLAS_LIBRARIES OR BLA_VENDOR MATCHES "Intel*")
+endif (BLA_VENDOR MATCHES "Intel*" OR BLA_VENDOR STREQUAL "All")
+
+
+if (BLA_VENDOR STREQUAL "Goto" OR BLA_VENDOR STREQUAL "All")
+
+ if(NOT BLAS_LIBRARIES)
+ # gotoblas (http://www.tacc.utexas.edu/tacc-projects/gotoblas2)
+ check_fortran_libraries(
BLAS_LIBRARIES
BLAS
sgemm
""
- "cblas;f77blas;atlas"
- "${CGAL_TAUCS_LIBRARIES_DIR} ENV BLAS_LIB_DIR"
+ "goto2"
+ ""
)
+ if(NOT BLAS_FIND_QUIETLY)
+ if(BLAS_LIBRARIES)
+ message(STATUS "Looking for Goto BLAS: found")
+ else()
+ message(STATUS "Looking for Goto BLAS: not found")
+ endif()
endif()
+ endif()
+ if (BLAS_LIBRARIES AND NOT BLAS_VENDOR_FOUND)
+ set (BLAS_VENDOR_FOUND "Goto")
+ endif()
- # BLAS in PhiPACK libraries? (requires generic BLAS lib, too)
- if(NOT BLAS_LIBRARIES)
- check_fortran_libraries(
- BLAS_DEFINITIONS
+endif (BLA_VENDOR STREQUAL "Goto" OR BLA_VENDOR STREQUAL "All")
+
+
+# OpenBlas
+if (BLA_VENDOR STREQUAL "Open" OR BLA_VENDOR STREQUAL "All")
+
+ if(NOT BLAS_LIBRARIES)
+ # openblas (http://www.openblas.net/)
+ check_fortran_libraries(
BLAS_LIBRARIES
BLAS
sgemm
""
- "sgemm;dgemm;blas"
- "${CGAL_TAUCS_LIBRARIES_DIR} ENV BLAS_LIB_DIR"
+ "openblas"
+ ""
)
+ if(NOT BLAS_FIND_QUIETLY)
+ if(BLAS_LIBRARIES)
+ message(STATUS "Looking for Open BLAS: found")
+ else()
+ message(STATUS "Looking for Open BLAS: not found")
+ endif()
endif()
+ endif()
+ if (BLAS_LIBRARIES AND NOT BLAS_VENDOR_FOUND)
+ set (BLAS_VENDOR_FOUND "Openblas")
+ endif()
- # BLAS in Alpha CXML library?
- if(NOT BLAS_LIBRARIES)
- check_fortran_libraries(
- BLAS_DEFINITIONS
+endif (BLA_VENDOR STREQUAL "Open" OR BLA_VENDOR STREQUAL "All")
+
+
+# EigenBlas
+if (BLA_VENDOR STREQUAL "Eigen" OR BLA_VENDOR STREQUAL "All")
+
+ if(NOT BLAS_LIBRARIES)
+ # eigenblas (http://eigen.tuxfamily.org/index.php?title=Main_Page)
+ check_fortran_libraries(
BLAS_LIBRARIES
BLAS
sgemm
""
- "cxml"
- "${CGAL_TAUCS_LIBRARIES_DIR} ENV BLAS_LIB_DIR"
+ "eigen_blas"
+ ""
)
+ if(NOT BLAS_FIND_QUIETLY)
+ if(BLAS_LIBRARIES AND NOT BLAS_VENDOR_FOUND)
+ message(STATUS "Looking for Eigen BLAS: found")
+ else()
+ message(STATUS "Looking for Eigen BLAS: not found")
+ endif()
endif()
+ endif()
- # BLAS in Alpha DXML library? (now called CXML, see above)
- if(NOT BLAS_LIBRARIES)
- check_fortran_libraries(
- BLAS_DEFINITIONS
+ if(NOT BLAS_LIBRARIES)
+ # eigenblas (http://eigen.tuxfamily.org/index.php?title=Main_Page)
+ check_fortran_libraries(
BLAS_LIBRARIES
BLAS
sgemm
""
- "dxml"
- "${CGAL_TAUCS_LIBRARIES_DIR} ENV BLAS_LIB_DIR"
+ "eigen_blas_static"
+ ""
)
+ if(NOT BLAS_FIND_QUIETLY)
+ if(BLAS_LIBRARIES)
+ message(STATUS "Looking for Eigen BLAS: found")
+ else()
+ message(STATUS "Looking for Eigen BLAS: not found")
+ endif()
endif()
+ endif()
+ if (BLAS_LIBRARIES AND NOT BLAS_VENDOR_FOUND)
+ set (BLAS_VENDOR_FOUND "Eigen")
+ endif()
- # BLAS in Sun Performance library?
- if(NOT BLAS_LIBRARIES)
- check_fortran_libraries(
- BLAS_DEFINITIONS
+endif (BLA_VENDOR STREQUAL "Eigen" OR BLA_VENDOR STREQUAL "All")
+
+
+if (BLA_VENDOR STREQUAL "ATLAS" OR BLA_VENDOR STREQUAL "All")
+
+ if(NOT BLAS_LIBRARIES)
+ # BLAS in ATLAS library? (http://math-atlas.sourceforge.net/)
+ check_fortran_libraries(
BLAS_LIBRARIES
BLAS
- sgemm
- "-xlic_lib=sunperf"
- "sunperf;sunmath"
- "${CGAL_TAUCS_LIBRARIES_DIR} ENV BLAS_LIB_DIR"
+ dgemm
+ ""
+ "f77blas;atlas"
+ ""
)
+ if(NOT BLAS_FIND_QUIETLY)
if(BLAS_LIBRARIES)
- # Extra linker flag
- set(BLAS_LINKER_FLAGS "-xlic_lib=sunperf")
+ message(STATUS "Looking for Atlas BLAS: found")
+ else()
+ message(STATUS "Looking for Atlas BLAS: not found")
endif()
endif()
+ endif()
- # BLAS in SCSL library? (SGI/Cray Scientific Library)
- if(NOT BLAS_LIBRARIES)
- check_fortran_libraries(
- BLAS_DEFINITIONS
+ if (BLAS_LIBRARIES AND NOT BLAS_VENDOR_FOUND)
+ set (BLAS_VENDOR_FOUND "Atlas")
+ endif()
+
+endif (BLA_VENDOR STREQUAL "ATLAS" OR BLA_VENDOR STREQUAL "All")
+
+
+# BLAS in PhiPACK libraries? (requires generic BLAS lib, too)
+if (BLA_VENDOR STREQUAL "PhiPACK" OR BLA_VENDOR STREQUAL "All")
+
+ if(NOT BLAS_LIBRARIES)
+ check_fortran_libraries(
BLAS_LIBRARIES
BLAS
sgemm
""
- "scsl"
- "${CGAL_TAUCS_LIBRARIES_DIR} ENV BLAS_LIB_DIR"
+ "sgemm;dgemm;blas"
+ ""
)
+ if(NOT BLAS_FIND_QUIETLY)
+ if(BLAS_LIBRARIES)
+ message(STATUS "Looking for PhiPACK BLAS: found")
+ else()
+ message(STATUS "Looking for PhiPACK BLAS: not found")
+ endif()
endif()
+ endif()
- # BLAS in SGIMATH library?
- if(NOT BLAS_LIBRARIES)
- check_fortran_libraries(
- BLAS_DEFINITIONS
+ if (BLAS_LIBRARIES AND NOT BLAS_VENDOR_FOUND)
+ set (BLAS_VENDOR_FOUND "PhiPACK")
+ endif()
+
+endif (BLA_VENDOR STREQUAL "PhiPACK" OR BLA_VENDOR STREQUAL "All")
+
+
+# BLAS in Alpha CXML library?
+if (BLA_VENDOR STREQUAL "CXML" OR BLA_VENDOR STREQUAL "All")
+
+ if(NOT BLAS_LIBRARIES)
+ check_fortran_libraries(
BLAS_LIBRARIES
BLAS
sgemm
""
- "complib.sgimath"
- "${CGAL_TAUCS_LIBRARIES_DIR} ENV BLAS_LIB_DIR"
+ "cxml"
+ ""
)
+ if(NOT BLAS_FIND_QUIETLY)
+ if(BLAS_LIBRARIES)
+ message(STATUS "Looking for CXML BLAS: found")
+ else()
+ message(STATUS "Looking for CXML BLAS: not found")
+ endif()
endif()
+ endif()
- # BLAS in IBM ESSL library? (requires generic BLAS lib, too)
- if(NOT BLAS_LIBRARIES)
- check_fortran_libraries(
- BLAS_DEFINITIONS
+ if (BLAS_LIBRARIES AND NOT BLAS_VENDOR_FOUND)
+ set (BLAS_VENDOR_FOUND "CXML")
+ endif()
+
+endif (BLA_VENDOR STREQUAL "CXML" OR BLA_VENDOR STREQUAL "All")
+
+
+# BLAS in Alpha DXML library? (now called CXML, see above)
+if (BLA_VENDOR STREQUAL "DXML" OR BLA_VENDOR STREQUAL "All")
+
+ if(NOT BLAS_LIBRARIES)
+ check_fortran_libraries(
BLAS_LIBRARIES
BLAS
sgemm
""
- "essl;blas"
- "${CGAL_TAUCS_LIBRARIES_DIR} ENV BLAS_LIB_DIR"
+ "dxml"
+ ""
)
+ if(NOT BLAS_FIND_QUIETLY)
+ if(BLAS_LIBRARIES)
+ message(STATUS "Looking for DXML BLAS: found")
+ else()
+ message(STATUS "Looking for DXML BLAS: not found")
+ endif()
endif()
+ endif()
- #BLAS in intel mkl 10 library? (em64t 64bit)
- if(NOT BLAS_LIBRARIES)
- check_fortran_libraries(
- BLAS_DEFINITIONS
+ if (BLAS_LIBRARIES AND NOT BLAS_VENDOR_FOUND)
+ set (BLAS_VENDOR_FOUND "DXML")
+ endif()
+
+endif (BLA_VENDOR STREQUAL "DXML" OR BLA_VENDOR STREQUAL "All")
+
+
+# BLAS in Sun Performance library?
+if (BLA_VENDOR STREQUAL "SunPerf" OR BLA_VENDOR STREQUAL "All")
+
+ if(NOT BLAS_LIBRARIES)
+ check_fortran_libraries(
BLAS_LIBRARIES
BLAS
sgemm
+ "-xlic_lib=sunperf"
+ "sunperf;sunmath"
""
- "mkl_intel_lp64;mkl_intel_thread;mkl_core;guide;pthread"
- "${CGAL_TAUCS_LIBRARIES_DIR} ENV BLAS_LIB_DIR"
)
+ if(BLAS_LIBRARIES)
+ set(BLAS_LINKER_FLAGS "-xlic_lib=sunperf")
endif()
+ if(NOT BLAS_FIND_QUIETLY)
+ if(BLAS_LIBRARIES)
+ message(STATUS "Looking for SunPerf BLAS: found")
+ else()
+ message(STATUS "Looking for SunPerf BLAS: not found")
+ endif()
+ endif()
+ endif()
- ### windows version of intel mkl 10?
- if(NOT BLAS_LIBRARIES)
- check_fortran_libraries(
- BLAS_DEFINITIONS
+ if (BLAS_LIBRARIES AND NOT BLAS_VENDOR_FOUND)
+ set (BLAS_VENDOR_FOUND "SunPerf")
+ endif()
+
+endif ()
+
+
+# BLAS in SCSL library? (SGI/Cray Scientific Library)
+if (BLA_VENDOR STREQUAL "SCSL" OR BLA_VENDOR STREQUAL "All")
+
+ if(NOT BLAS_LIBRARIES)
+ check_fortran_libraries(
BLAS_LIBRARIES
BLAS
- SGEMM
+ sgemm
+ ""
+ "scsl"
""
- "mkl_c_dll;mkl_intel_thread_dll;mkl_core_dll;libguide40"
- "${CGAL_TAUCS_LIBRARIES_DIR} ENV BLAS_LIB_DIR"
)
+ if(NOT BLAS_FIND_QUIETLY)
+ if(BLAS_LIBRARIES)
+ message(STATUS "Looking for SCSL BLAS: found")
+ else()
+ message(STATUS "Looking for SCSL BLAS: not found")
+ endif()
endif()
+ endif()
- #older versions of intel mkl libs
+ if (BLAS_LIBRARIES AND NOT BLAS_VENDOR_FOUND)
+ set (BLAS_VENDOR_FOUND "SunPerf")
+ endif()
- # BLAS in intel mkl library? (shared)
- if(NOT BLAS_LIBRARIES)
- check_fortran_libraries(
- BLAS_DEFINITIONS
+endif ()
+
+
+# BLAS in SGIMATH library?
+if (BLA_VENDOR STREQUAL "SGIMATH" OR BLA_VENDOR STREQUAL "All")
+
+ if(NOT BLAS_LIBRARIES)
+ check_fortran_libraries(
BLAS_LIBRARIES
BLAS
sgemm
""
- "mkl;guide;pthread"
- "${CGAL_TAUCS_LIBRARIES_DIR} ENV BLAS_LIB_DIR"
+ "complib.sgimath"
+ ""
)
+ if(NOT BLAS_FIND_QUIETLY)
+ if(BLAS_LIBRARIES)
+ message(STATUS "Looking for SGIMATH BLAS: found")
+ else()
+ message(STATUS "Looking for SGIMATH BLAS: not found")
+ endif()
endif()
+ endif()
- #BLAS in intel mkl library? (static, 32bit)
- if(NOT BLAS_LIBRARIES)
- check_fortran_libraries(
- BLAS_DEFINITIONS
+ if (BLAS_LIBRARIES AND NOT BLAS_VENDOR_FOUND)
+ set (BLAS_VENDOR_FOUND "SGIMATH")
+ endif()
+
+endif ()
+
+
+# BLAS in IBM ESSL library (requires generic BLAS lib, too)
+if (BLA_VENDOR STREQUAL "IBMESSL" OR BLA_VENDOR STREQUAL "All")
+
+ if(NOT BLAS_LIBRARIES)
+ check_fortran_libraries(
BLAS_LIBRARIES
BLAS
sgemm
""
- "mkl_ia32;guide;pthread"
- "${CGAL_TAUCS_LIBRARIES_DIR} ENV BLAS_LIB_DIR"
+ "essl;xlfmath;xlf90_r;blas"
+ ""
)
+ if(NOT BLAS_FIND_QUIETLY)
+ if(BLAS_LIBRARIES)
+ message(STATUS "Looking for IBM ESSL BLAS: found")
+ else()
+ message(STATUS "Looking for IBM ESSL BLAS: not found")
+ endif()
endif()
+ endif()
- #BLAS in intel mkl library? (static, em64t 64bit)
- if(NOT BLAS_LIBRARIES)
- check_fortran_libraries(
- BLAS_DEFINITIONS
+ if (BLAS_LIBRARIES AND NOT BLAS_VENDOR_FOUND)
+ set (BLAS_VENDOR_FOUND "IBM ESSL")
+ endif()
+
+endif ()
+
+# BLAS in IBM ESSL_MT library (requires generic BLAS lib, too)
+if (BLA_VENDOR STREQUAL "IBMESSLMT" OR BLA_VENDOR STREQUAL "All")
+
+ if(NOT BLAS_LIBRARIES)
+ check_fortran_libraries(
BLAS_LIBRARIES
BLAS
sgemm
""
- "mkl_em64t;guide;pthread"
- "${CGAL_TAUCS_LIBRARIES_DIR} ENV BLAS_LIB_DIR"
+ "esslsmp;xlsmp;xlfmath;xlf90_r;blas"
+ ""
)
+ if(NOT BLAS_FIND_QUIETLY)
+ if(BLAS_LIBRARIES)
+ message(STATUS "Looking for IBM ESSL MT BLAS: found")
+ else()
+ message(STATUS "Looking for IBM ESSL MT BLAS: not found")
+ endif()
endif()
+ endif()
- #BLAS in acml library?
- if(NOT BLAS_LIBRARIES)
- check_fortran_libraries(
- BLAS_DEFINITIONS
+ if (BLAS_LIBRARIES AND NOT BLAS_VENDOR_FOUND)
+ set (BLAS_VENDOR_FOUND "IBM ESSL MT")
+ endif()
+
+endif ()
+
+
+#BLAS in acml library?
+if (BLA_VENDOR MATCHES "ACML.*" OR BLA_VENDOR STREQUAL "All")
+
+ if( ((BLA_VENDOR STREQUAL "ACML") AND (NOT BLAS_ACML_LIB_DIRS)) OR
+ ((BLA_VENDOR STREQUAL "ACML_MP") AND (NOT BLAS_ACML_MP_LIB_DIRS)) OR
+ ((BLA_VENDOR STREQUAL "ACML_GPU") AND (NOT BLAS_ACML_GPU_LIB_DIRS)))
+
+ # try to find acml in "standard" paths
+ if( WIN32 )
+ file( GLOB _ACML_ROOT "C:/AMD/acml*/ACML-EULA.txt" )
+ else()
+ file( GLOB _ACML_ROOT "/opt/acml*/ACML-EULA.txt" )
+ endif()
+ if( WIN32 )
+ file( GLOB _ACML_GPU_ROOT "C:/AMD/acml*/GPGPUexamples" )
+ else()
+ file( GLOB _ACML_GPU_ROOT "/opt/acml*/GPGPUexamples" )
+ endif()
+ list(GET _ACML_ROOT 0 _ACML_ROOT)
+ list(GET _ACML_GPU_ROOT 0 _ACML_GPU_ROOT)
+
+ if( _ACML_ROOT )
+
+ get_filename_component( _ACML_ROOT ${_ACML_ROOT} PATH )
+ if( SIZEOF_INTEGER EQUAL 8 )
+ set( _ACML_PATH_SUFFIX "_int64" )
+ else()
+ set( _ACML_PATH_SUFFIX "" )
+ endif()
+ if( CMAKE_Fortran_COMPILER_ID STREQUAL "Intel" )
+ set( _ACML_COMPILER32 "ifort32" )
+ set( _ACML_COMPILER64 "ifort64" )
+ elseif( CMAKE_Fortran_COMPILER_ID STREQUAL "SunPro" )
+ set( _ACML_COMPILER32 "sun32" )
+ set( _ACML_COMPILER64 "sun64" )
+ elseif( CMAKE_Fortran_COMPILER_ID STREQUAL "PGI" )
+ set( _ACML_COMPILER32 "pgi32" )
+ if( WIN32 )
+ set( _ACML_COMPILER64 "win64" )
+ else()
+ set( _ACML_COMPILER64 "pgi64" )
+ endif()
+ elseif( CMAKE_Fortran_COMPILER_ID STREQUAL "Open64" )
+ # 32 bit builds not supported on Open64 but for code simplicity
+ # We'll just use the same directory twice
+ set( _ACML_COMPILER32 "open64_64" )
+ set( _ACML_COMPILER64 "open64_64" )
+ elseif( CMAKE_Fortran_COMPILER_ID STREQUAL "NAG" )
+ set( _ACML_COMPILER32 "nag32" )
+ set( _ACML_COMPILER64 "nag64" )
+ else()
+ set( _ACML_COMPILER32 "gfortran32" )
+ set( _ACML_COMPILER64 "gfortran64" )
+ endif()
+
+ if( BLA_VENDOR STREQUAL "ACML_MP" )
+ set(_ACML_MP_LIB_DIRS
+ "${_ACML_ROOT}/${_ACML_COMPILER32}_mp${_ACML_PATH_SUFFIX}/lib"
+ "${_ACML_ROOT}/${_ACML_COMPILER64}_mp${_ACML_PATH_SUFFIX}/lib" )
+ else()
+ set(_ACML_LIB_DIRS
+ "${_ACML_ROOT}/${_ACML_COMPILER32}${_ACML_PATH_SUFFIX}/lib"
+ "${_ACML_ROOT}/${_ACML_COMPILER64}${_ACML_PATH_SUFFIX}/lib" )
+ endif()
+
+ endif(_ACML_ROOT)
+
+ elseif(BLAS_${BLA_VENDOR}_LIB_DIRS)
+
+ set(_${BLA_VENDOR}_LIB_DIRS ${BLAS_${BLA_VENDOR}_LIB_DIRS})
+
+ endif()
+
+ if( BLA_VENDOR STREQUAL "ACML_MP" )
+ foreach( BLAS_ACML_MP_LIB_DIRS ${_ACML_MP_LIB_DIRS})
+ check_fortran_libraries (
+ BLAS_LIBRARIES
+ BLAS
+ sgemm
+ "" "acml_mp;acml_mv" "" ${BLAS_ACML_MP_LIB_DIRS}
+ )
+ if( BLAS_LIBRARIES )
+ break()
+ endif()
+ endforeach()
+ elseif( BLA_VENDOR STREQUAL "ACML_GPU" )
+ foreach( BLAS_ACML_GPU_LIB_DIRS ${_ACML_GPU_LIB_DIRS})
+ check_fortran_libraries (
+ BLAS_LIBRARIES
+ BLAS
+ sgemm
+ "" "acml;acml_mv;CALBLAS" "" ${BLAS_ACML_GPU_LIB_DIRS}
+ )
+ if( BLAS_LIBRARIES )
+ break()
+ endif()
+ endforeach()
+ else()
+ foreach( BLAS_ACML_LIB_DIRS ${_ACML_LIB_DIRS} )
+ check_fortran_libraries (
+ BLAS_LIBRARIES
+ BLAS
+ sgemm
+ "" "acml;acml_mv" "" ${BLAS_ACML_LIB_DIRS}
+ )
+ if( BLAS_LIBRARIES )
+ break()
+ endif()
+ endforeach()
+ endif()
+
+ # Either acml or acml_mp should be in LD_LIBRARY_PATH but not both
+ if(NOT BLAS_LIBRARIES)
+ check_fortran_libraries(
BLAS_LIBRARIES
BLAS
sgemm
""
- "acml"
- "${CGAL_TAUCS_LIBRARIES_DIR} ENV BLAS_LIB_DIR"
+ "acml;acml_mv"
+ ""
)
+ if(NOT BLAS_FIND_QUIETLY)
+ if(BLAS_LIBRARIES)
+ message(STATUS "Looking for ACML BLAS: found")
+ else()
+ message(STATUS "Looking for ACML BLAS: not found")
+ endif()
endif()
+ endif()
- # Apple BLAS library?
- if(NOT BLAS_LIBRARIES)
- check_fortran_libraries(
- BLAS_DEFINITIONS
+ if(NOT BLAS_LIBRARIES)
+ check_fortran_libraries(
BLAS_LIBRARIES
BLAS
sgemm
""
- "Accelerate"
- "${CGAL_TAUCS_LIBRARIES_DIR} ENV BLAS_LIB_DIR"
+ "acml_mp;acml_mv"
+ ""
)
+ if(NOT BLAS_FIND_QUIETLY)
+ if(BLAS_LIBRARIES)
+ message(STATUS "Looking for ACML BLAS: found")
+ else()
+ message(STATUS "Looking for ACML BLAS: not found")
+ endif()
endif()
+ endif()
- if ( NOT BLAS_LIBRARIES )
- check_fortran_libraries(
- BLAS_DEFINITIONS
+ if(NOT BLAS_LIBRARIES)
+ check_fortran_libraries(
BLAS_LIBRARIES
BLAS
sgemm
""
- "vecLib"
- "${CGAL_TAUCS_LIBRARIES_DIR} ENV BLAS_LIB_DIR"
+ "acml;acml_mv;CALBLAS"
+ ""
)
- endif ( NOT BLAS_LIBRARIES )
+ if(NOT BLAS_FIND_QUIETLY)
+ if(BLAS_LIBRARIES)
+ message(STATUS "Looking for ACML BLAS: found")
+ else()
+ message(STATUS "Looking for ACML BLAS: not found")
+ endif()
+ endif()
+ endif()
- # Generic BLAS library?
- # This configuration *must* be the last try as this library is notably slow.
- if ( NOT BLAS_LIBRARIES )
- check_fortran_libraries(
- BLAS_DEFINITIONS
+ if (BLAS_LIBRARIES AND NOT BLAS_VENDOR_FOUND)
+ set (BLAS_VENDOR_FOUND "ACML")
+ endif()
+
+endif (BLA_VENDOR MATCHES "ACML.*" OR BLA_VENDOR STREQUAL "All") # ACML
+
+
+# Apple BLAS library?
+if (BLA_VENDOR STREQUAL "Apple" OR BLA_VENDOR STREQUAL "All")
+
+ if(NOT BLAS_LIBRARIES)
+ check_fortran_libraries(
BLAS_LIBRARIES
BLAS
- sgemm
+ dgemm
+ ""
+ "Accelerate"
""
- "blas"
- "${CGAL_TAUCS_LIBRARIES_DIR} ENV BLAS_LIB_DIR"
)
+ if(NOT BLAS_FIND_QUIETLY)
+ if(BLAS_LIBRARIES)
+ message(STATUS "Looking for Apple BLAS: found")
+ else()
+ message(STATUS "Looking for Apple BLAS: not found")
+ endif()
endif()
+ endif()
+
+ if (BLAS_LIBRARIES AND NOT BLAS_VENDOR_FOUND)
+ set (BLAS_VENDOR_FOUND "Apple Accelerate")
+ endif()
+
+endif (BLA_VENDOR STREQUAL "Apple" OR BLA_VENDOR STREQUAL "All")
- if(BLAS_LIBRARIES_DIR OR BLAS_LIBRARIES)
+
+if (BLA_VENDOR STREQUAL "NAS" OR BLA_VENDOR STREQUAL "All")
+
+ if ( NOT BLAS_LIBRARIES )
+ check_fortran_libraries(
+ BLAS_LIBRARIES
+ BLAS
+ dgemm
+ ""
+ "vecLib"
+ ""
+ )
+ if(NOT BLAS_FIND_QUIETLY)
+ if(BLAS_LIBRARIES)
+ message(STATUS "Looking for NAS BLAS: found")
+ else()
+ message(STATUS "Looking for NAS BLAS: not found")
+ endif()
+ endif()
+ endif ()
+
+ if (BLAS_LIBRARIES AND NOT BLAS_VENDOR_FOUND)
+ set (BLAS_VENDOR_FOUND "NAS")
+ endif()
+
+endif (BLA_VENDOR STREQUAL "NAS" OR BLA_VENDOR STREQUAL "All")
+
+
+# Generic BLAS library?
+if (BLA_VENDOR STREQUAL "Generic" OR BLA_VENDOR STREQUAL "All")
+
+ set(BLAS_SEARCH_LIBS "blas;blas_LINUX;blas_MAC;blas_WINDOWS;refblas")
+ foreach (SEARCH_LIB ${BLAS_SEARCH_LIBS})
+ if (BLAS_LIBRARIES)
+ else ()
+ check_fortran_libraries(
+ BLAS_LIBRARIES
+ BLAS
+ sgemm
+ ""
+ "${SEARCH_LIB}"
+ "${LGFORTRAN}"
+ )
+ if(NOT BLAS_FIND_QUIETLY)
+ if(BLAS_LIBRARIES)
+ message(STATUS "Looking for Generic BLAS: found")
+ else()
+ message(STATUS "Looking for Generic BLAS: not found")
+ endif()
+ endif()
+ endif()
+ endforeach ()
+
+ if (BLAS_LIBRARIES AND NOT BLAS_VENDOR_FOUND)
+ set (BLAS_VENDOR_FOUND "Netlib or other Generic libblas")
+ endif()
+
+endif (BLA_VENDOR STREQUAL "Generic" OR BLA_VENDOR STREQUAL "All")
+
+
+if(BLA_F95)
+
+ if(BLAS95_LIBRARIES)
+ set(BLAS95_FOUND TRUE)
+ else()
+ set(BLAS95_FOUND FALSE)
+ endif()
+
+ if(NOT BLAS_FIND_QUIETLY)
+ if(BLAS95_FOUND)
+ message(STATUS "A library with BLAS95 API found.")
+ message(STATUS "BLAS_LIBRARIES ${BLAS_LIBRARIES}")
+ else(BLAS95_FOUND)
+ message(WARNING "BLA_VENDOR has been set to ${BLA_VENDOR} but blas 95 libraries could not be found or check of symbols failed."
+ "\nPlease indicate where to find blas libraries. You have three options:\n"
+ "- Option 1: Provide the installation directory of BLAS library with cmake option: -DBLAS_DIR=your/path/to/blas\n"
+ "- Option 2: Provide the directory where to find BLAS libraries with cmake option: -DBLAS_LIBDIR=your/path/to/blas/libs\n"
+ "- Option 3: Update your environment variable (Linux: LD_LIBRARY_PATH, Windows: LIB, Mac: DYLD_LIBRARY_PATH)\n"
+ "\nTo follow libraries detection more precisely you can activate a verbose mode with -DBLAS_VERBOSE=ON at cmake configure."
+ "\nYou could also specify a BLAS vendor to look for by setting -DBLA_VENDOR=blas_vendor_name."
+ "\nList of possible BLAS vendor: Goto, ATLAS PhiPACK, CXML, DXML, SunPerf, SCSL, SGIMATH, IBMESSL, Intel10_32 (intel mkl v10 32 bit),"
+ "Intel10_64lp (intel mkl v10 64 bit, lp thread model, lp64 model), Intel10_64lp_seq (intel mkl v10 64 bit, sequential code, lp64 model),"
+ "Intel( older versions of mkl 32 and 64 bit), ACML, ACML_MP, ACML_GPU, Apple, NAS, Generic")
+ if(BLAS_FIND_REQUIRED)
+ message(FATAL_ERROR
+ "A required library with BLAS95 API not found. Please specify library location.")
+ else()
+ message(STATUS
+ "A library with BLAS95 API not found. Please specify library location.")
+ endif()
+ endif(BLAS95_FOUND)
+ endif(NOT BLAS_FIND_QUIETLY)
+
+ set(BLAS_FOUND TRUE)
+ set(BLAS_LIBRARIES "${BLAS95_LIBRARIES}")
+
+else(BLA_F95)
+
+ if(BLAS_LIBRARIES)
set(BLAS_FOUND TRUE)
else()
set(BLAS_FOUND FALSE)
@@ -388,32 +1366,41 @@ else()
if(NOT BLAS_FIND_QUIETLY)
if(BLAS_FOUND)
message(STATUS "A library with BLAS API found.")
+ message(STATUS "BLAS_LIBRARIES ${BLAS_LIBRARIES}")
else(BLAS_FOUND)
+ message(WARNING "BLA_VENDOR has been set to ${BLA_VENDOR} but blas libraries could not be found or check of symbols failed."
+ "\nPlease indicate where to find blas libraries. You have three options:\n"
+ "- Option 1: Provide the installation directory of BLAS library with cmake option: -DBLAS_DIR=your/path/to/blas\n"
+ "- Option 2: Provide the directory where to find BLAS libraries with cmake option: -DBLAS_LIBDIR=your/path/to/blas/libs\n"
+ "- Option 3: Update your environment variable (Linux: LD_LIBRARY_PATH, Windows: LIB, Mac: DYLD_LIBRARY_PATH)\n"
+ "\nTo follow libraries detection more precisely you can activate a verbose mode with -DBLAS_VERBOSE=ON at cmake configure."
+ "\nYou could also specify a BLAS vendor to look for by setting -DBLA_VENDOR=blas_vendor_name."
+ "\nList of possible BLAS vendor: Goto, ATLAS PhiPACK, CXML, DXML, SunPerf, SCSL, SGIMATH, IBMESSL, Intel10_32 (intel mkl v10 32 bit),"
+ "Intel10_64lp (intel mkl v10 64 bit, lp thread model, lp64 model), Intel10_64lp_seq (intel mkl v10 64 bit, sequential code, lp64 model),"
+ "Intel( older versions of mkl 32 and 64 bit), ACML, ACML_MP, ACML_GPU, Apple, NAS, Generic")
if(BLAS_FIND_REQUIRED)
- message(FATAL_ERROR "A required library with BLAS API not found. Please specify library location.")
+ message(FATAL_ERROR
+ "A required library with BLAS API not found. Please specify library location.")
else()
- message(STATUS "A library with BLAS API not found. Please specify library location.")
+ message(STATUS
+ "A library with BLAS API not found. Please specify library location.")
endif()
endif(BLAS_FOUND)
endif(NOT BLAS_FIND_QUIETLY)
- # Add variables to cache
- set( BLAS_INCLUDE_DIR "${BLAS_INCLUDE_DIR}"
- CACHE PATH "Directories containing the BLAS header files" FORCE )
- set( BLAS_DEFINITIONS "${BLAS_DEFINITIONS}"
- CACHE STRING "Compilation options to use BLAS" FORCE )
- set( BLAS_LINKER_FLAGS "${BLAS_LINKER_FLAGS}"
- CACHE STRING "Linker flags to use BLAS" FORCE )
- set( BLAS_LIBRARIES "${BLAS_LIBRARIES}"
- CACHE FILEPATH "BLAS libraries name" FORCE )
- set( BLAS_LIBRARIES_DIR "${BLAS_LIBRARIES_DIR}"
- CACHE PATH "Directories containing the BLAS libraries" FORCE )
-
- #message("DEBUG: BLAS_INCLUDE_DIR = ${BLAS_INCLUDE_DIR}")
- #message("DEBUG: BLAS_DEFINITIONS = ${BLAS_DEFINITIONS}")
- #message("DEBUG: BLAS_LINKER_FLAGS = ${BLAS_LINKER_FLAGS}")
- #message("DEBUG: BLAS_LIBRARIES = ${BLAS_LIBRARIES}")
- #message("DEBUG: BLAS_LIBRARIES_DIR = ${BLAS_LIBRARIES_DIR}")
- #message("DEBUG: BLAS_FOUND = ${BLAS_FOUND}")
-
-endif(BLAS_LIBRARIES_DIR OR BLAS_LIBRARIES)
+endif(BLA_F95)
+
+set(CMAKE_FIND_LIBRARY_SUFFIXES ${_blas_ORIG_CMAKE_FIND_LIBRARY_SUFFIXES})
+
+if (BLAS_FOUND)
+ list(GET BLAS_LIBRARIES 0 first_lib)
+ get_filename_component(first_lib_path "${first_lib}" PATH)
+ if (${first_lib_path} MATCHES "(/lib(32|64)?$)|(/lib/intel64$|/lib/ia32$)")
+ string(REGEX REPLACE "(/lib(32|64)?$)|(/lib/intel64$|/lib/ia32$)" "" not_cached_dir "${first_lib_path}")
+ set(BLAS_DIR_FOUND "${not_cached_dir}" CACHE PATH "Installation directory of BLAS library" FORCE)
+ else()
+ set(BLAS_DIR_FOUND "${first_lib_path}" CACHE PATH "Installation directory of BLAS library" FORCE)
+ endif()
+endif()
+mark_as_advanced(BLAS_DIR)
+mark_as_advanced(BLAS_DIR_FOUND)
diff --git a/cmake/FindBLASEXT.cmake b/cmake/FindBLASEXT.cmake
new file mode 100644
index 000000000..0fe7fb849
--- /dev/null
+++ b/cmake/FindBLASEXT.cmake
@@ -0,0 +1,380 @@
+###
+#
+# @copyright (c) 2009-2014 The University of Tennessee and The University
+# of Tennessee Research Foundation.
+# All rights reserved.
+# @copyright (c) 2012-2016 Inria. All rights reserved.
+# @copyright (c) 2012-2014 Bordeaux INP, CNRS (LaBRI UMR 5800), Inria, Univ. Bordeaux. All rights reserved.
+#
+###
+#
+# - Find BLAS EXTENDED for MORSE projects: find include dirs and libraries
+#
+# This module allows to find BLAS libraries by calling the official FindBLAS module
+# and handles the creation of different library lists whether the user wishes to link
+# with a sequential BLAS or a multihreaded (BLAS_SEQ_LIBRARIES and BLAS_PAR_LIBRARIES).
+# BLAS is detected with a FindBLAS call then if the BLAS vendor is Intel10_64lp, ACML
+# or IBMESSLMT then the module attempts to find the corresponding multithreaded libraries.
+#
+# The following variables have been added to manage links with sequential or multithreaded
+# versions:
+# BLAS_INCLUDE_DIRS - BLAS include directories
+# BLAS_LIBRARY_DIRS - Link directories for BLAS libraries
+# BLAS_SEQ_LIBRARIES - BLAS component libraries to be linked (sequential)
+# BLAS_PAR_LIBRARIES - BLAS component libraries to be linked (multithreaded)
+
+#=============================================================================
+# Copyright 2012-2013 Inria
+# Copyright 2012-2013 Emmanuel Agullo
+# Copyright 2012-2013 Mathieu Faverge
+# Copyright 2012 Cedric Castagnede
+# Copyright 2013-2016 Florent Pruvost
+#
+# Distributed under the OSI-approved BSD License (the "License");
+# see accompanying file MORSE-Copyright.txt for details.
+#
+# This software is distributed WITHOUT ANY WARRANTY; without even the
+# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the License for more information.
+#=============================================================================
+# (To distribute this file outside of Morse, substitute the full
+# License text for the above reference.)
+
+# macro to factorize this call
+macro(find_package_blas)
+ if(BLASEXT_FIND_REQUIRED)
+ if(BLASEXT_FIND_QUIETLY)
+ find_package(BLAS REQUIRED QUIET)
+ else()
+ find_package(BLAS REQUIRED)
+ endif()
+ else()
+ if(BLASEXT_FIND_QUIETLY)
+ find_package(BLAS QUIET)
+ else()
+ find_package(BLAS)
+ endif()
+ endif()
+endmacro()
+
+# add a cache variable to let the user specify the BLAS vendor
+set(BLA_VENDOR "" CACHE STRING "list of possible BLAS vendor:
+ Open, Eigen, Goto, ATLAS PhiPACK, CXML, DXML, SunPerf, SCSL, SGIMATH, IBMESSL, IBMESSLMT,
+ Intel10_32 (intel mkl v10 32 bit),
+ Intel10_64lp (intel mkl v10 64 bit, lp thread model, lp64 model),
+ Intel10_64lp_seq (intel mkl v10 64 bit, sequential code, lp64 model),
+ Intel( older versions of mkl 32 and 64 bit),
+ ACML, ACML_MP, ACML_GPU, Apple, NAS, Generic")
+
+if(NOT BLASEXT_FIND_QUIETLY)
+ message(STATUS "In FindBLASEXT")
+ message(STATUS "If you want to force the use of one specific library, "
+ "\n please specify the BLAS vendor by setting -DBLA_VENDOR=blas_vendor_name"
+ "\n at cmake configure.")
+ message(STATUS "List of possible BLAS vendor: Goto, ATLAS PhiPACK, CXML, "
+ "\n DXML, SunPerf, SCSL, SGIMATH, IBMESSL, IBMESSLMT, Intel10_32 (intel mkl v10 32 bit),"
+ "\n Intel10_64lp (intel mkl v10 64 bit, lp thread model, lp64 model),"
+ "\n Intel10_64lp_seq (intel mkl v10 64 bit, sequential code, lp64 model),"
+ "\n Intel( older versions of mkl 32 and 64 bit),"
+ "\n ACML, ACML_MP, ACML_GPU, Apple, NAS, Generic")
+endif()
+
+if (NOT BLAS_FOUND)
+ # First try to detect two cases:
+ # 1: only SEQ libs are handled
+ # 2: both SEQ and PAR libs are handled
+ find_package_blas()
+endif ()
+
+# detect the cases where SEQ and PAR libs are handled
+if(BLA_VENDOR STREQUAL "All" AND
+ (BLAS_mkl_core_LIBRARY OR BLAS_mkl_core_dll_LIBRARY)
+ )
+ set(BLA_VENDOR "Intel")
+ if(BLAS_mkl_intel_LIBRARY)
+ set(BLA_VENDOR "Intel10_32")
+ endif()
+ if(BLAS_mkl_intel_lp64_LIBRARY)
+ set(BLA_VENDOR "Intel10_64lp")
+ endif()
+ if(NOT BLASEXT_FIND_QUIETLY)
+ message(STATUS "A BLAS library has been found (${BLAS_LIBRARIES}) but we"
+ "\n have also potentially detected some multithreaded BLAS libraries from the MKL."
+ "\n We try to find both libraries lists (Sequential/Multithreaded).")
+ endif()
+ set(BLAS_FOUND "")
+elseif(BLA_VENDOR STREQUAL "All" AND BLAS_acml_LIBRARY)
+ set(BLA_VENDOR "ACML")
+ if(NOT BLASEXT_FIND_QUIETLY)
+ message(STATUS "A BLAS library has been found (${BLAS_LIBRARIES}) but we"
+ "\n have also potentially detected some multithreaded BLAS libraries from the ACML."
+ "\n We try to find both libraries lists (Sequential/Multithreaded).")
+ endif()
+ set(BLAS_FOUND "")
+elseif(BLA_VENDOR STREQUAL "All" AND BLAS_essl_LIBRARY)
+ set(BLA_VENDOR "IBMESSL")
+ if(NOT BLASEXT_FIND_QUIETLY)
+ message(STATUS "A BLAS library has been found (${BLAS_LIBRARIES}) but we"
+ "\n have also potentially detected some multithreaded BLAS libraries from the ESSL."
+ "\n We try to find both libraries lists (Sequential/Multithreaded).")
+ endif()
+ set(BLAS_FOUND "")
+endif()
+
+# Intel case
+if(BLA_VENDOR MATCHES "Intel*")
+
+ ###
+ # look for include path if the BLAS vendor is Intel
+ ###
+
+ # gather system include paths
+ unset(_inc_env)
+ if(WIN32)
+ string(REPLACE ":" ";" _inc_env "$ENV{INCLUDE}")
+ else()
+ string(REPLACE ":" ";" _path_env "$ENV{INCLUDE}")
+ list(APPEND _inc_env "${_path_env}")
+ string(REPLACE ":" ";" _path_env "$ENV{C_INCLUDE_PATH}")
+ list(APPEND _inc_env "${_path_env}")
+ string(REPLACE ":" ";" _path_env "$ENV{CPATH}")
+ list(APPEND _inc_env "${_path_env}")
+ string(REPLACE ":" ";" _path_env "$ENV{INCLUDE_PATH}")
+ list(APPEND _inc_env "${_path_env}")
+ endif()
+ list(APPEND _inc_env "${CMAKE_PLATFORM_IMPLICIT_INCLUDE_DIRECTORIES}")
+ list(APPEND _inc_env "${CMAKE_C_IMPLICIT_INCLUDE_DIRECTORIES}")
+ set(ENV_MKLROOT "$ENV{MKLROOT}")
+ if (ENV_MKLROOT)
+ list(APPEND _inc_env "${ENV_MKLROOT}/include")
+ endif()
+ list(REMOVE_DUPLICATES _inc_env)
+
+ # find mkl.h inside known include paths
+ set(BLAS_mkl.h_INCLUDE_DIRS "BLAS_mkl.h_INCLUDE_DIRS-NOTFOUND")
+ if(BLAS_INCDIR)
+ set(BLAS_mkl.h_INCLUDE_DIRS "BLAS_mkl.h_INCLUDE_DIRS-NOTFOUND")
+ find_path(BLAS_mkl.h_INCLUDE_DIRS
+ NAMES mkl.h
+ HINTS ${BLAS_INCDIR})
+ else()
+ if(BLAS_DIR)
+ set(BLAS_mkl.h_INCLUDE_DIRS "BLAS_mkl.h_INCLUDE_DIRS-NOTFOUND")
+ find_path(BLAS_mkl.h_INCLUDE_DIRS
+ NAMES mkl.h
+ HINTS ${BLAS_DIR}
+ PATH_SUFFIXES include)
+ else()
+ set(BLAS_mkl.h_INCLUDE_DIRS "BLAS_mkl.h_INCLUDE_DIRS-NOTFOUND")
+ find_path(BLAS_mkl.h_INCLUDE_DIRS
+ NAMES mkl.h
+ HINTS ${_inc_env})
+ endif()
+ endif()
+ mark_as_advanced(BLAS_mkl.h_INCLUDE_DIRS)
+ ## Print status if not found
+ ## -------------------------
+ #if (NOT BLAS_mkl.h_INCLUDE_DIRS AND MORSE_VERBOSE)
+ # Print_Find_Header_Status(blas mkl.h)
+ #endif ()
+ set(BLAS_INCLUDE_DIRS "")
+ if(BLAS_mkl.h_INCLUDE_DIRS)
+ list(APPEND BLAS_INCLUDE_DIRS "${BLAS_mkl.h_INCLUDE_DIRS}" )
+ endif()
+
+ ###
+ # look for libs
+ ###
+ # if Intel 10 64 bit -> look for sequential and multithreaded versions
+ if(BLA_VENDOR MATCHES "Intel10_64lp*")
+
+ ## look for the sequential version
+ set(BLA_VENDOR "Intel10_64lp_seq")
+ if(NOT BLASEXT_FIND_QUIETLY)
+ message(STATUS "Look for the sequential version Intel10_64lp_seq")
+ endif()
+ find_package_blas()
+ if(BLAS_FOUND)
+ set(BLAS_SEQ_LIBRARIES "${BLAS_LIBRARIES}")
+ else()
+ set(BLAS_SEQ_LIBRARIES "${BLAS_SEQ_LIBRARIES-NOTFOUND}")
+ endif()
+
+ ## look for the multithreaded version
+ set(BLA_VENDOR "Intel10_64lp")
+ if(NOT BLASEXT_FIND_QUIETLY)
+ message(STATUS "Look for the multithreaded version Intel10_64lp")
+ endif()
+ find_package_blas()
+ if(BLAS_FOUND)
+ set(BLAS_PAR_LIBRARIES "${BLAS_LIBRARIES}")
+ else()
+ set(BLAS_PAR_LIBRARIES "${BLAS_PAR_LIBRARIES-NOTFOUND}")
+ endif()
+
+ else()
+
+ if(BLAS_FOUND)
+ set(BLAS_SEQ_LIBRARIES "${BLAS_LIBRARIES}")
+ else()
+ set(BLAS_SEQ_LIBRARIES "${BLAS_SEQ_LIBRARIES-NOTFOUND}")
+ endif()
+
+ endif()
+
+ # ACML case
+elseif(BLA_VENDOR MATCHES "ACML*")
+
+ ## look for the sequential version
+ set(BLA_VENDOR "ACML")
+ find_package_blas()
+ if(BLAS_FOUND)
+ set(BLAS_SEQ_LIBRARIES "${BLAS_LIBRARIES}")
+ else()
+ set(BLAS_SEQ_LIBRARIES "${BLAS_SEQ_LIBRARIES-NOTFOUND}")
+ endif()
+
+ ## look for the multithreaded version
+ set(BLA_VENDOR "ACML_MP")
+ find_package_blas()
+ if(BLAS_FOUND)
+ set(BLAS_PAR_LIBRARIES "${BLAS_LIBRARIES}")
+ else()
+ set(BLAS_PAR_LIBRARIES "${BLAS_PAR_LIBRARIES-NOTFOUND}")
+ endif()
+
+ # IBMESSL case
+elseif(BLA_VENDOR MATCHES "IBMESSL*")
+
+ ## look for the sequential version
+ set(BLA_VENDOR "IBMESSL")
+ find_package_blas()
+ if(BLAS_FOUND)
+ set(BLAS_SEQ_LIBRARIES "${BLAS_LIBRARIES}")
+ else()
+ set(BLAS_SEQ_LIBRARIES "${BLAS_SEQ_LIBRARIES-NOTFOUND}")
+ endif()
+
+ ## look for the multithreaded version
+ set(BLA_VENDOR "IBMESSLMT")
+ find_package_blas()
+ if(BLAS_FOUND)
+ set(BLAS_PAR_LIBRARIES "${BLAS_LIBRARIES}")
+ else()
+ set(BLAS_PAR_LIBRARIES "${BLAS_PAR_LIBRARIES-NOTFOUND}")
+ endif()
+
+else()
+
+ if(BLAS_FOUND)
+ # define the SEQ libs as the BLAS_LIBRARIES
+ set(BLAS_SEQ_LIBRARIES "${BLAS_LIBRARIES}")
+ else()
+ set(BLAS_SEQ_LIBRARIES "${BLAS_SEQ_LIBRARIES-NOTFOUND}")
+ endif()
+ set(BLAS_PAR_LIBRARIES "${BLAS_PAR_LIBRARIES-NOTFOUND}")
+
+endif()
+
+
+if(BLAS_SEQ_LIBRARIES)
+ set(BLAS_LIBRARIES "${BLAS_SEQ_LIBRARIES}")
+endif()
+
+# extract libs paths
+# remark: because it is not given by find_package(BLAS)
+set(BLAS_LIBRARY_DIRS "")
+string(REPLACE " " ";" BLAS_LIBRARIES "${BLAS_LIBRARIES}")
+foreach(blas_lib ${BLAS_LIBRARIES})
+ if (EXISTS "${blas_lib}")
+ get_filename_component(a_blas_lib_dir "${blas_lib}" PATH)
+ list(APPEND BLAS_LIBRARY_DIRS "${a_blas_lib_dir}" )
+ else()
+ string(REPLACE "-L" "" blas_lib "${blas_lib}")
+ if (EXISTS "${blas_lib}")
+ list(APPEND BLAS_LIBRARY_DIRS "${blas_lib}" )
+ else()
+ get_filename_component(a_blas_lib_dir "${blas_lib}" PATH)
+ if (EXISTS "${a_blas_lib_dir}")
+ list(APPEND BLAS_LIBRARY_DIRS "${a_blas_lib_dir}" )
+ endif()
+ endif()
+ endif()
+endforeach()
+if (BLAS_LIBRARY_DIRS)
+ list(REMOVE_DUPLICATES BLAS_LIBRARY_DIRS)
+endif ()
+
+# check that BLAS has been found
+# ---------------------------------
+include(FindPackageHandleStandardArgs)
+if(BLA_VENDOR MATCHES "Intel*")
+ if(BLA_VENDOR MATCHES "Intel10_64lp*")
+ if(NOT BLASEXT_FIND_QUIETLY)
+ message(STATUS "BLAS found is Intel MKL:"
+ "\n we manage two lists of libs, one sequential and one parallel if found"
+ "\n (see BLAS_SEQ_LIBRARIES and BLAS_PAR_LIBRARIES)")
+ message(STATUS "BLAS sequential libraries stored in BLAS_SEQ_LIBRARIES")
+ endif()
+ find_package_handle_standard_args(BLAS DEFAULT_MSG
+ BLAS_SEQ_LIBRARIES
+ BLAS_LIBRARY_DIRS
+ BLAS_INCLUDE_DIRS)
+ if(BLAS_PAR_LIBRARIES)
+ if(NOT BLASEXT_FIND_QUIETLY)
+ message(STATUS "BLAS parallel libraries stored in BLAS_PAR_LIBRARIES")
+ endif()
+ find_package_handle_standard_args(BLAS DEFAULT_MSG
+ BLAS_PAR_LIBRARIES)
+ endif()
+ else()
+ if(NOT BLASEXT_FIND_QUIETLY)
+ message(STATUS "BLAS sequential libraries stored in BLAS_SEQ_LIBRARIES")
+ endif()
+ find_package_handle_standard_args(BLAS DEFAULT_MSG
+ BLAS_SEQ_LIBRARIES
+ BLAS_LIBRARY_DIRS
+ BLAS_INCLUDE_DIRS)
+ endif()
+elseif(BLA_VENDOR MATCHES "ACML*")
+ if(NOT BLASEXT_FIND_QUIETLY)
+ message(STATUS "BLAS found is ACML:"
+ "\n we manage two lists of libs, one sequential and one parallel if found"
+ "\n (see BLAS_SEQ_LIBRARIES and BLAS_PAR_LIBRARIES)")
+ message(STATUS "BLAS sequential libraries stored in BLAS_SEQ_LIBRARIES")
+ endif()
+ find_package_handle_standard_args(BLAS DEFAULT_MSG
+ BLAS_SEQ_LIBRARIES
+ BLAS_LIBRARY_DIRS)
+ if(BLAS_PAR_LIBRARIES)
+ if(NOT BLASEXT_FIND_QUIETLY)
+ message(STATUS "BLAS parallel libraries stored in BLAS_PAR_LIBRARIES")
+ endif()
+ find_package_handle_standard_args(BLAS DEFAULT_MSG
+ BLAS_PAR_LIBRARIES)
+ endif()
+elseif(BLA_VENDOR MATCHES "IBMESSL*")
+ if(NOT BLASEXT_FIND_QUIETLY)
+ message(STATUS "BLAS found is ESSL:"
+ "\n we manage two lists of libs, one sequential and one parallel if found"
+ "\n (see BLAS_SEQ_LIBRARIES and BLAS_PAR_LIBRARIES)")
+ message(STATUS "BLAS sequential libraries stored in BLAS_SEQ_LIBRARIES")
+ endif()
+ find_package_handle_standard_args(BLAS DEFAULT_MSG
+ BLAS_SEQ_LIBRARIES
+ BLAS_LIBRARY_DIRS)
+ if(BLAS_PAR_LIBRARIES)
+ if(NOT BLASEXT_FIND_QUIETLY)
+ message(STATUS "BLAS parallel libraries stored in BLAS_PAR_LIBRARIES")
+ endif()
+ find_package_handle_standard_args(BLAS DEFAULT_MSG
+ BLAS_PAR_LIBRARIES)
+ endif()
+else()
+ if(NOT BLASEXT_FIND_QUIETLY)
+ message(STATUS "BLAS sequential libraries stored in BLAS_SEQ_LIBRARIES")
+ endif()
+ find_package_handle_standard_args(BLAS DEFAULT_MSG
+ BLAS_SEQ_LIBRARIES
+ BLAS_LIBRARY_DIRS)
+endif()
diff --git a/cmake/FindComputeCpp.cmake b/cmake/FindComputeCpp.cmake
index 27e5c9b1f..29f2a5007 100644
--- a/cmake/FindComputeCpp.cmake
+++ b/cmake/FindComputeCpp.cmake
@@ -38,11 +38,6 @@ if(CMAKE_COMPILER_IS_GNUCXX)
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.8)
message(FATAL_ERROR
"host compiler - Not found! (gcc version must be at least 4.8)")
- # Require the GCC dual ABI to be disabled for 5.1 or higher
- elseif (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 5.1)
- set(COMPUTECPP_DISABLE_GCC_DUAL_ABI "True")
- message(STATUS
- "host compiler - gcc ${CMAKE_CXX_COMPILER_VERSION} (note pre 5.1 gcc ABI enabled)")
else()
message(STATUS "host compiler - gcc ${CMAKE_CXX_COMPILER_VERSION}")
endif()
@@ -64,6 +59,12 @@ option(COMPUTECPP_64_BIT_CODE "Compile device code in 64 bit mode"
${COMPUTECPP_64_BIT_DEFAULT})
mark_as_advanced(COMPUTECPP_64_BIT_CODE)
+option(COMPUTECPP_DISABLE_GCC_DUAL_ABI "Compile with pre-5.1 ABI" OFF)
+mark_as_advanced(COMPUTECPP_DISABLE_GCC_DUAL_ABI)
+
+set(COMPUTECPP_USER_FLAGS "" CACHE STRING "User flags for compute++")
+mark_as_advanced(COMPUTECPP_USER_FLAGS)
+
# Find OpenCL package
find_package(OpenCL REQUIRED)
@@ -74,7 +75,6 @@ if(NOT COMPUTECPP_PACKAGE_ROOT_DIR)
else()
message(STATUS "ComputeCpp package - Found")
endif()
-option(COMPUTECPP_PACKAGE_ROOT_DIR "Path to the ComputeCpp Package")
# Obtain the path to compute++
find_program(COMPUTECPP_DEVICE_COMPILER compute++ PATHS
@@ -138,8 +138,6 @@ else()
message(STATUS "compute++ flags - ${COMPUTECPP_DEVICE_COMPILER_FLAGS}")
endif()
-set(COMPUTECPP_DEVICE_COMPILER_FLAGS ${COMPUTECPP_DEVICE_COMPILER_FLAGS} -sycl-compress-name -Wall -no-serial-memop -DEIGEN_NO_ASSERTION_CHECKING=1)
-
# Check if the platform is supported
execute_process(COMMAND ${COMPUTECPP_INFO_TOOL} "--dump-is-supported"
OUTPUT_VARIABLE COMPUTECPP_PLATFORM_IS_SUPPORTED
@@ -155,6 +153,13 @@ else()
endif()
endif()
+set(COMPUTECPP_USER_FLAGS
+ -sycl-compress-name
+ -Wall
+ -no-serial-memop
+ -DEIGEN_NO_ASSERTION_CHECKING=1
+ )
+
####################
# __build_sycl
####################
@@ -165,8 +170,11 @@ endif()
# targetName : Name of the target.
# sourceFile : Source file to be compiled.
# binaryDir : Intermediate directory to output the integration header.
+# fileCounter : Counter included in name of custom target. Different counter
+# values prevent duplicated names of custom target when source files with the same name,
+# but located in different directories, are used for the same target.
#
-function(__build_spir targetName sourceFile binaryDir)
+function(__build_spir targetName sourceFile binaryDir fileCounter)
# Retrieve source file name.
get_filename_component(sourceFileName ${sourceFile} NAME)
@@ -175,12 +183,16 @@ function(__build_spir targetName sourceFile binaryDir)
set(outputSyclFile ${binaryDir}/${sourceFileName}.sycl)
# Add any user-defined include to the device compiler
+ set(device_compiler_includes "")
get_property(includeDirectories DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} PROPERTY
INCLUDE_DIRECTORIES)
- set(device_compiler_includes "")
foreach(directory ${includeDirectories})
set(device_compiler_includes "-I${directory}" ${device_compiler_includes})
endforeach()
+ get_target_property(targetIncludeDirectories ${targetName} INCLUDE_DIRECTORIES)
+ foreach(directory ${targetIncludeDirectories})
+ set(device_compiler_includes "-I${directory}" ${device_compiler_includes})
+ endforeach()
if (CMAKE_INCLUDE_PATH)
foreach(directory ${CMAKE_INCLUDE_PATH})
set(device_compiler_includes "-I${directory}"
@@ -188,6 +200,9 @@ function(__build_spir targetName sourceFile binaryDir)
endforeach()
endif()
+ set(COMPUTECPP_DEVICE_COMPILER_FLAGS
+ ${COMPUTECPP_DEVICE_COMPILER_FLAGS}
+ ${COMPUTECPP_USER_FLAGS})
# Convert argument list format
separate_arguments(COMPUTECPP_DEVICE_COMPILER_FLAGS)
@@ -201,9 +216,10 @@ function(__build_spir targetName sourceFile binaryDir)
${device_compiler_includes}
-o ${outputSyclFile}
-c ${CMAKE_CURRENT_SOURCE_DIR}/${sourceFile}
- DEPENDS ${sourceFile}
+ DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${sourceFile}
+ IMPLICIT_DEPENDS CXX "${CMAKE_CURRENT_SOURCE_DIR}/${sourceFile}"
WORKING_DIRECTORY ${binaryDir}
- COMMENT "Building ComputeCpp integration header file ${outputSyclFile}")
+ COMMENT "Building ComputeCpp integration header file ${outputSyclFile}")
# Add a custom target for the generated integration header
add_custom_target(${targetName}_integration_header DEPENDS ${outputSyclFile})
@@ -227,16 +243,21 @@ endfunction()
#######################
#
# Adds a SYCL compilation custom command associated with an existing
-# target and sets a dependancy on that new command.
+# target and sets a dependency on that new command.
#
# targetName : Name of the target to add a SYCL to.
-# sourceFile : Source file to be compiled for SYCL.
# binaryDir : Intermediate directory to output the integration header.
+# sourceFiles : Source files to be compiled for SYCL.
#
-function(add_sycl_to_target targetName sourceFile binaryDir)
+function(add_sycl_to_target targetName binaryDir sourceFiles)
+ set(sourceFiles ${sourceFiles} ${ARGN})
+ set(fileCounter 0)
# Add custom target to run compute++ and generate the integration header
- __build_spir(${targetName} ${sourceFile} ${binaryDir})
+ foreach(sourceFile ${sourceFiles})
+ __build_spir(${targetName} ${sourceFile} ${binaryDir} ${fileCounter})
+ MATH(EXPR fileCounter "${fileCounter} + 1")
+ endforeach()
# Link with the ComputeCpp runtime library
target_link_libraries(${targetName} PUBLIC ${COMPUTECPP_RUNTIME_LIBRARY}
diff --git a/cmake/FindEigen3.cmake b/cmake/FindEigen3.cmake
index 9e9697860..4d9bc1a52 100644
--- a/cmake/FindEigen3.cmake
+++ b/cmake/FindEigen3.cmake
@@ -10,8 +10,12 @@
# EIGEN3_INCLUDE_DIR - the eigen include directory
# EIGEN3_VERSION - eigen version
#
+# and the following imported target:
+#
+# Eigen3::Eigen - The header-only Eigen library
+#
# This module reads hints about search locations from
-# the following enviroment variables:
+# the following environment variables:
#
# EIGEN3_ROOT
# EIGEN3_ROOT_DIR
@@ -64,6 +68,7 @@ if (EIGEN3_INCLUDE_DIR)
# in cache already
_eigen3_check_version()
set(EIGEN3_FOUND ${EIGEN3_VERSION_OK})
+ set(Eigen3_FOUND ${EIGEN3_VERSION_OK})
else (EIGEN3_INCLUDE_DIR)
@@ -95,3 +100,8 @@ else (EIGEN3_INCLUDE_DIR)
endif(EIGEN3_INCLUDE_DIR)
+if(EIGEN3_FOUND AND NOT TARGET Eigen3::Eigen)
+ add_library(Eigen3::Eigen INTERFACE IMPORTED)
+ set_target_properties(Eigen3::Eigen PROPERTIES
+ INTERFACE_INCLUDE_DIRECTORIES "${EIGEN3_INCLUDE_DIR}")
+endif()
diff --git a/cmake/FindHWLOC.cmake b/cmake/FindHWLOC.cmake
new file mode 100644
index 000000000..a831b5c72
--- /dev/null
+++ b/cmake/FindHWLOC.cmake
@@ -0,0 +1,331 @@
+###
+#
+# @copyright (c) 2009-2014 The University of Tennessee and The University
+# of Tennessee Research Foundation.
+# All rights reserved.
+# @copyright (c) 2012-2014 Inria. All rights reserved.
+# @copyright (c) 2012-2014 Bordeaux INP, CNRS (LaBRI UMR 5800), Inria, Univ. Bordeaux. All rights reserved.
+#
+###
+#
+# - Find HWLOC include dirs and libraries
+# Use this module by invoking find_package with the form:
+# find_package(HWLOC
+# [REQUIRED]) # Fail with error if hwloc is not found
+#
+# This module finds headers and hwloc library.
+# Results are reported in variables:
+# HWLOC_FOUND - True if headers and requested libraries were found
+# HWLOC_INCLUDE_DIRS - hwloc include directories
+# HWLOC_LIBRARY_DIRS - Link directories for hwloc libraries
+# HWLOC_LIBRARIES - hwloc component libraries to be linked
+#
+# The user can give specific paths where to find the libraries adding cmake
+# options at configure (ex: cmake path/to/project -DHWLOC_DIR=path/to/hwloc):
+# HWLOC_DIR - Where to find the base directory of hwloc
+# HWLOC_INCDIR - Where to find the header files
+# HWLOC_LIBDIR - Where to find the library files
+# The module can also look for the following environment variables if paths
+# are not given as cmake variable: HWLOC_DIR, HWLOC_INCDIR, HWLOC_LIBDIR
+
+#=============================================================================
+# Copyright 2012-2013 Inria
+# Copyright 2012-2013 Emmanuel Agullo
+# Copyright 2012-2013 Mathieu Faverge
+# Copyright 2012 Cedric Castagnede
+# Copyright 2013 Florent Pruvost
+#
+# Distributed under the OSI-approved BSD License (the "License");
+# see accompanying file MORSE-Copyright.txt for details.
+#
+# This software is distributed WITHOUT ANY WARRANTY; without even the
+# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the License for more information.
+#=============================================================================
+# (To distribute this file outside of Morse, substitute the full
+# License text for the above reference.)
+
+include(CheckStructHasMember)
+include(CheckCSourceCompiles)
+
+if (NOT HWLOC_FOUND)
+ set(HWLOC_DIR "" CACHE PATH "Installation directory of HWLOC library")
+ if (NOT HWLOC_FIND_QUIETLY)
+ message(STATUS "A cache variable, namely HWLOC_DIR, has been set to specify the install directory of HWLOC")
+ endif()
+endif()
+
+set(ENV_HWLOC_DIR "$ENV{HWLOC_DIR}")
+set(ENV_HWLOC_INCDIR "$ENV{HWLOC_INCDIR}")
+set(ENV_HWLOC_LIBDIR "$ENV{HWLOC_LIBDIR}")
+set(HWLOC_GIVEN_BY_USER "FALSE")
+if ( HWLOC_DIR OR ( HWLOC_INCDIR AND HWLOC_LIBDIR) OR ENV_HWLOC_DIR OR (ENV_HWLOC_INCDIR AND ENV_HWLOC_LIBDIR) )
+ set(HWLOC_GIVEN_BY_USER "TRUE")
+endif()
+
+# Optionally use pkg-config to detect include/library dirs (if pkg-config is available)
+# -------------------------------------------------------------------------------------
+include(FindPkgConfig)
+find_package(PkgConfig QUIET)
+if( PKG_CONFIG_EXECUTABLE AND NOT HWLOC_GIVEN_BY_USER )
+
+ pkg_search_module(HWLOC hwloc)
+ if (NOT HWLOC_FIND_QUIETLY)
+ if (HWLOC_FOUND AND HWLOC_LIBRARIES)
+ message(STATUS "Looking for HWLOC - found using PkgConfig")
+ #if(NOT HWLOC_INCLUDE_DIRS)
+ # message("${Magenta}HWLOC_INCLUDE_DIRS is empty using PkgConfig."
+ # "Perhaps the path to hwloc headers is already present in your"
+ # "C(PLUS)_INCLUDE_PATH environment variable.${ColourReset}")
+ #endif()
+ else()
+ message(STATUS "${Magenta}Looking for HWLOC - not found using PkgConfig."
+ "\n Perhaps you should add the directory containing hwloc.pc to"
+ "\n the PKG_CONFIG_PATH environment variable.${ColourReset}")
+ endif()
+ endif()
+
+endif( PKG_CONFIG_EXECUTABLE AND NOT HWLOC_GIVEN_BY_USER )
+
+if( (NOT PKG_CONFIG_EXECUTABLE) OR (PKG_CONFIG_EXECUTABLE AND NOT HWLOC_FOUND) OR (HWLOC_GIVEN_BY_USER) )
+
+ if (NOT HWLOC_FIND_QUIETLY)
+ message(STATUS "Looking for HWLOC - PkgConfig not used")
+ endif()
+
+ # Looking for include
+ # -------------------
+
+ # Add system include paths to search include
+ # ------------------------------------------
+ unset(_inc_env)
+ if(ENV_HWLOC_INCDIR)
+ list(APPEND _inc_env "${ENV_HWLOC_INCDIR}")
+ elseif(ENV_HWLOC_DIR)
+ list(APPEND _inc_env "${ENV_HWLOC_DIR}")
+ list(APPEND _inc_env "${ENV_HWLOC_DIR}/include")
+ list(APPEND _inc_env "${ENV_HWLOC_DIR}/include/hwloc")
+ else()
+ if(WIN32)
+ string(REPLACE ":" ";" _inc_env "$ENV{INCLUDE}")
+ else()
+ string(REPLACE ":" ";" _path_env "$ENV{INCLUDE}")
+ list(APPEND _inc_env "${_path_env}")
+ string(REPLACE ":" ";" _path_env "$ENV{C_INCLUDE_PATH}")
+ list(APPEND _inc_env "${_path_env}")
+ string(REPLACE ":" ";" _path_env "$ENV{CPATH}")
+ list(APPEND _inc_env "${_path_env}")
+ string(REPLACE ":" ";" _path_env "$ENV{INCLUDE_PATH}")
+ list(APPEND _inc_env "${_path_env}")
+ endif()
+ endif()
+ list(APPEND _inc_env "${CMAKE_PLATFORM_IMPLICIT_INCLUDE_DIRECTORIES}")
+ list(APPEND _inc_env "${CMAKE_C_IMPLICIT_INCLUDE_DIRECTORIES}")
+ list(REMOVE_DUPLICATES _inc_env)
+
+ # set paths where to look for
+ set(PATH_TO_LOOK_FOR "${_inc_env}")
+
+ # Try to find the hwloc header in the given paths
+ # -------------------------------------------------
+ # call cmake macro to find the header path
+ if(HWLOC_INCDIR)
+ set(HWLOC_hwloc.h_DIRS "HWLOC_hwloc.h_DIRS-NOTFOUND")
+ find_path(HWLOC_hwloc.h_DIRS
+ NAMES hwloc.h
+ HINTS ${HWLOC_INCDIR})
+ else()
+ if(HWLOC_DIR)
+ set(HWLOC_hwloc.h_DIRS "HWLOC_hwloc.h_DIRS-NOTFOUND")
+ find_path(HWLOC_hwloc.h_DIRS
+ NAMES hwloc.h
+ HINTS ${HWLOC_DIR}
+ PATH_SUFFIXES "include" "include/hwloc")
+ else()
+ set(HWLOC_hwloc.h_DIRS "HWLOC_hwloc.h_DIRS-NOTFOUND")
+ find_path(HWLOC_hwloc.h_DIRS
+ NAMES hwloc.h
+ HINTS ${PATH_TO_LOOK_FOR}
+ PATH_SUFFIXES "hwloc")
+ endif()
+ endif()
+ mark_as_advanced(HWLOC_hwloc.h_DIRS)
+
+ # Add path to cmake variable
+ # ------------------------------------
+ if (HWLOC_hwloc.h_DIRS)
+ set(HWLOC_INCLUDE_DIRS "${HWLOC_hwloc.h_DIRS}")
+ else ()
+ set(HWLOC_INCLUDE_DIRS "HWLOC_INCLUDE_DIRS-NOTFOUND")
+ if(NOT HWLOC_FIND_QUIETLY)
+ message(STATUS "Looking for hwloc -- hwloc.h not found")
+ endif()
+ endif ()
+
+ if (HWLOC_INCLUDE_DIRS)
+ list(REMOVE_DUPLICATES HWLOC_INCLUDE_DIRS)
+ endif ()
+
+
+ # Looking for lib
+ # ---------------
+
+ # Add system library paths to search lib
+ # --------------------------------------
+ unset(_lib_env)
+ if(ENV_HWLOC_LIBDIR)
+ list(APPEND _lib_env "${ENV_HWLOC_LIBDIR}")
+ elseif(ENV_HWLOC_DIR)
+ list(APPEND _lib_env "${ENV_HWLOC_DIR}")
+ list(APPEND _lib_env "${ENV_HWLOC_DIR}/lib")
+ else()
+ if(WIN32)
+ string(REPLACE ":" ";" _lib_env "$ENV{LIB}")
+ else()
+ if(APPLE)
+ string(REPLACE ":" ";" _lib_env "$ENV{DYLD_LIBRARY_PATH}")
+ else()
+ string(REPLACE ":" ";" _lib_env "$ENV{LD_LIBRARY_PATH}")
+ endif()
+ list(APPEND _lib_env "${CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES}")
+ list(APPEND _lib_env "${CMAKE_C_IMPLICIT_LINK_DIRECTORIES}")
+ endif()
+ endif()
+ list(REMOVE_DUPLICATES _lib_env)
+
+ # set paths where to look for
+ set(PATH_TO_LOOK_FOR "${_lib_env}")
+
+ # Try to find the hwloc lib in the given paths
+ # ----------------------------------------------
+
+ # call cmake macro to find the lib path
+ if(HWLOC_LIBDIR)
+ set(HWLOC_hwloc_LIBRARY "HWLOC_hwloc_LIBRARY-NOTFOUND")
+ find_library(HWLOC_hwloc_LIBRARY
+ NAMES hwloc
+ HINTS ${HWLOC_LIBDIR})
+ else()
+ if(HWLOC_DIR)
+ set(HWLOC_hwloc_LIBRARY "HWLOC_hwloc_LIBRARY-NOTFOUND")
+ find_library(HWLOC_hwloc_LIBRARY
+ NAMES hwloc
+ HINTS ${HWLOC_DIR}
+ PATH_SUFFIXES lib lib32 lib64)
+ else()
+ set(HWLOC_hwloc_LIBRARY "HWLOC_hwloc_LIBRARY-NOTFOUND")
+ find_library(HWLOC_hwloc_LIBRARY
+ NAMES hwloc
+ HINTS ${PATH_TO_LOOK_FOR})
+ endif()
+ endif()
+ mark_as_advanced(HWLOC_hwloc_LIBRARY)
+
+ # If found, add path to cmake variable
+ # ------------------------------------
+ if (HWLOC_hwloc_LIBRARY)
+ get_filename_component(hwloc_lib_path ${HWLOC_hwloc_LIBRARY} PATH)
+ # set cmake variables (respects naming convention)
+ set(HWLOC_LIBRARIES "${HWLOC_hwloc_LIBRARY}")
+ set(HWLOC_LIBRARY_DIRS "${hwloc_lib_path}")
+ else ()
+ set(HWLOC_LIBRARIES "HWLOC_LIBRARIES-NOTFOUND")
+ set(HWLOC_LIBRARY_DIRS "HWLOC_LIBRARY_DIRS-NOTFOUND")
+ if(NOT HWLOC_FIND_QUIETLY)
+ message(STATUS "Looking for hwloc -- lib hwloc not found")
+ endif()
+ endif ()
+
+ if (HWLOC_LIBRARY_DIRS)
+ list(REMOVE_DUPLICATES HWLOC_LIBRARY_DIRS)
+ endif ()
+
+ # check a function to validate the find
+ if(HWLOC_LIBRARIES)
+
+ set(REQUIRED_INCDIRS)
+ set(REQUIRED_LIBDIRS)
+ set(REQUIRED_LIBS)
+
+ # HWLOC
+ if (HWLOC_INCLUDE_DIRS)
+ set(REQUIRED_INCDIRS "${HWLOC_INCLUDE_DIRS}")
+ endif()
+ if (HWLOC_LIBRARY_DIRS)
+ set(REQUIRED_LIBDIRS "${HWLOC_LIBRARY_DIRS}")
+ endif()
+ set(REQUIRED_LIBS "${HWLOC_LIBRARIES}")
+
+ # set required libraries for link
+ set(CMAKE_REQUIRED_INCLUDES "${REQUIRED_INCDIRS}")
+ set(CMAKE_REQUIRED_LIBRARIES)
+ foreach(lib_dir ${REQUIRED_LIBDIRS})
+ list(APPEND CMAKE_REQUIRED_LIBRARIES "-L${lib_dir}")
+ endforeach()
+ list(APPEND CMAKE_REQUIRED_LIBRARIES "${REQUIRED_LIBS}")
+ string(REGEX REPLACE "^ -" "-" CMAKE_REQUIRED_LIBRARIES "${CMAKE_REQUIRED_LIBRARIES}")
+
+ # test link
+ unset(HWLOC_WORKS CACHE)
+ include(CheckFunctionExists)
+ check_function_exists(hwloc_topology_init HWLOC_WORKS)
+ mark_as_advanced(HWLOC_WORKS)
+
+ if(NOT HWLOC_WORKS)
+ if(NOT HWLOC_FIND_QUIETLY)
+ message(STATUS "Looking for hwloc : test of hwloc_topology_init with hwloc library fails")
+ message(STATUS "CMAKE_REQUIRED_LIBRARIES: ${CMAKE_REQUIRED_LIBRARIES}")
+ message(STATUS "CMAKE_REQUIRED_INCLUDES: ${CMAKE_REQUIRED_INCLUDES}")
+ message(STATUS "Check in CMakeFiles/CMakeError.log to figure out why it fails")
+ endif()
+ endif()
+ set(CMAKE_REQUIRED_INCLUDES)
+ set(CMAKE_REQUIRED_FLAGS)
+ set(CMAKE_REQUIRED_LIBRARIES)
+ endif(HWLOC_LIBRARIES)
+
+endif( (NOT PKG_CONFIG_EXECUTABLE) OR (PKG_CONFIG_EXECUTABLE AND NOT HWLOC_FOUND) OR (HWLOC_GIVEN_BY_USER) )
+
+if (HWLOC_LIBRARIES)
+ if (HWLOC_LIBRARY_DIRS)
+ list(GET HWLOC_LIBRARY_DIRS 0 first_lib_path)
+ else()
+ list(GET HWLOC_LIBRARIES 0 first_lib)
+ get_filename_component(first_lib_path "${first_lib}" PATH)
+ endif()
+ if (${first_lib_path} MATCHES "/lib(32|64)?$")
+ string(REGEX REPLACE "/lib(32|64)?$" "" not_cached_dir "${first_lib_path}")
+ set(HWLOC_DIR_FOUND "${not_cached_dir}" CACHE PATH "Installation directory of HWLOC library" FORCE)
+ else()
+ set(HWLOC_DIR_FOUND "${first_lib_path}" CACHE PATH "Installation directory of HWLOC library" FORCE)
+ endif()
+endif()
+mark_as_advanced(HWLOC_DIR)
+mark_as_advanced(HWLOC_DIR_FOUND)
+
+# check that HWLOC has been found
+# -------------------------------
+include(FindPackageHandleStandardArgs)
+if (PKG_CONFIG_EXECUTABLE AND HWLOC_FOUND)
+ find_package_handle_standard_args(HWLOC DEFAULT_MSG
+ HWLOC_LIBRARIES)
+else()
+ find_package_handle_standard_args(HWLOC DEFAULT_MSG
+ HWLOC_LIBRARIES
+ HWLOC_WORKS)
+endif()
+
+if (HWLOC_FOUND)
+ set(HWLOC_SAVE_CMAKE_REQUIRED_INCLUDES ${CMAKE_REQUIRED_INCLUDES})
+ list(APPEND CMAKE_REQUIRED_INCLUDES ${HWLOC_INCLUDE_DIRS})
+
+ # test headers to guess the version
+ check_struct_has_member( "struct hwloc_obj" parent hwloc.h HAVE_HWLOC_PARENT_MEMBER )
+ check_struct_has_member( "struct hwloc_cache_attr_s" size hwloc.h HAVE_HWLOC_CACHE_ATTR )
+ check_c_source_compiles( "#include <hwloc.h>
+ int main(void) { hwloc_obj_t o; o->type = HWLOC_OBJ_PU; return 0;}" HAVE_HWLOC_OBJ_PU)
+ include(CheckLibraryExists)
+ check_library_exists(${HWLOC_LIBRARIES} hwloc_bitmap_free "" HAVE_HWLOC_BITMAP)
+
+ set(CMAKE_REQUIRED_INCLUDES ${HWLOC_SAVE_CMAKE_REQUIRED_INCLUDES})
+endif()
diff --git a/cmake/FindKLU.cmake b/cmake/FindKLU.cmake
new file mode 100644
index 000000000..4a8f8e0b0
--- /dev/null
+++ b/cmake/FindKLU.cmake
@@ -0,0 +1,48 @@
+# KLU lib usually requires linking to a blas library.
+# It is up to the user of this module to find a BLAS and link to it.
+
+if (KLU_INCLUDES AND KLU_LIBRARIES)
+ set(KLU_FIND_QUIETLY TRUE)
+endif (KLU_INCLUDES AND KLU_LIBRARIES)
+
+find_path(KLU_INCLUDES
+ NAMES
+ klu.h
+ PATHS
+ $ENV{KLUDIR}
+ ${INCLUDE_INSTALL_DIR}
+ PATH_SUFFIXES
+ suitesparse
+ ufsparse
+)
+
+find_library(KLU_LIBRARIES klu PATHS $ENV{KLUDIR} ${LIB_INSTALL_DIR})
+
+if(KLU_LIBRARIES)
+
+ if(NOT KLU_LIBDIR)
+ get_filename_component(KLU_LIBDIR ${KLU_LIBRARIES} PATH)
+ endif(NOT KLU_LIBDIR)
+
+ find_library(COLAMD_LIBRARY colamd PATHS ${KLU_LIBDIR} $ENV{KLUDIR} ${LIB_INSTALL_DIR})
+ if(COLAMD_LIBRARY)
+ set(KLU_LIBRARIES ${KLU_LIBRARIES} ${COLAMD_LIBRARY})
+ endif ()
+
+ find_library(AMD_LIBRARY amd PATHS ${KLU_LIBDIR} $ENV{KLUDIR} ${LIB_INSTALL_DIR})
+ if(AMD_LIBRARY)
+ set(KLU_LIBRARIES ${KLU_LIBRARIES} ${AMD_LIBRARY})
+ endif ()
+
+ find_library(BTF_LIBRARY btf PATHS $ENV{KLU_LIBDIR} $ENV{KLUDIR} ${LIB_INSTALL_DIR})
+ if(BTF_LIBRARY)
+ set(KLU_LIBRARIES ${KLU_LIBRARIES} ${BTF_LIBRARY})
+ endif()
+
+endif(KLU_LIBRARIES)
+
+include(FindPackageHandleStandardArgs)
+find_package_handle_standard_args(KLU DEFAULT_MSG
+ KLU_INCLUDES KLU_LIBRARIES)
+
+mark_as_advanced(KLU_INCLUDES KLU_LIBRARIES AMD_LIBRARY COLAMD_LIBRARY BTF_LIBRARY)
diff --git a/cmake/FindMetis.cmake b/cmake/FindMetis.cmake
index 6a0ce790c..da2f1f1d7 100644
--- a/cmake/FindMetis.cmake
+++ b/cmake/FindMetis.cmake
@@ -1,59 +1,264 @@
-# Pastix requires METIS or METIS (partitioning and reordering tools)
-
-if (METIS_INCLUDES AND METIS_LIBRARIES)
- set(METIS_FIND_QUIETLY TRUE)
-endif (METIS_INCLUDES AND METIS_LIBRARIES)
-
-find_path(METIS_INCLUDES
- NAMES
- metis.h
- PATHS
- $ENV{METISDIR}
- ${INCLUDE_INSTALL_DIR}
- PATH_SUFFIXES
- .
- metis
- include
-)
-
-macro(_metis_check_version)
- file(READ "${METIS_INCLUDES}/metis.h" _metis_version_header)
-
- string(REGEX MATCH "define[ \t]+METIS_VER_MAJOR[ \t]+([0-9]+)" _metis_major_version_match "${_metis_version_header}")
- set(METIS_MAJOR_VERSION "${CMAKE_MATCH_1}")
- string(REGEX MATCH "define[ \t]+METIS_VER_MINOR[ \t]+([0-9]+)" _metis_minor_version_match "${_metis_version_header}")
- set(METIS_MINOR_VERSION "${CMAKE_MATCH_1}")
- string(REGEX MATCH "define[ \t]+METIS_VER_SUBMINOR[ \t]+([0-9]+)" _metis_subminor_version_match "${_metis_version_header}")
- set(METIS_SUBMINOR_VERSION "${CMAKE_MATCH_1}")
- if(NOT METIS_MAJOR_VERSION)
- message(STATUS "Could not determine Metis version. Assuming version 4.0.0")
- set(METIS_VERSION 4.0.0)
+###
+#
+# @copyright (c) 2009-2014 The University of Tennessee and The University
+# of Tennessee Research Foundation.
+# All rights reserved.
+# @copyright (c) 2012-2014 Inria. All rights reserved.
+# @copyright (c) 2012-2014 Bordeaux INP, CNRS (LaBRI UMR 5800), Inria, Univ. Bordeaux. All rights reserved.
+#
+###
+#
+# - Find METIS include dirs and libraries
+# Use this module by invoking find_package with the form:
+# find_package(METIS
+# [REQUIRED] # Fail with error if metis is not found
+# )
+#
+# This module finds headers and metis library.
+# Results are reported in variables:
+# METIS_FOUND - True if headers and requested libraries were found
+# METIS_INCLUDE_DIRS - metis include directories
+# METIS_LIBRARY_DIRS - Link directories for metis libraries
+# METIS_LIBRARIES - metis component libraries to be linked
+#
+# The user can give specific paths where to find the libraries adding cmake
+# options at configure (ex: cmake path/to/project -DMETIS_DIR=path/to/metis):
+# METIS_DIR - Where to find the base directory of metis
+# METIS_INCDIR - Where to find the header files
+# METIS_LIBDIR - Where to find the library files
+# The module can also look for the following environment variables if paths
+# are not given as cmake variable: METIS_DIR, METIS_INCDIR, METIS_LIBDIR
+
+#=============================================================================
+# Copyright 2012-2013 Inria
+# Copyright 2012-2013 Emmanuel Agullo
+# Copyright 2012-2013 Mathieu Faverge
+# Copyright 2012 Cedric Castagnede
+# Copyright 2013 Florent Pruvost
+#
+# Distributed under the OSI-approved BSD License (the "License");
+# see accompanying file MORSE-Copyright.txt for details.
+#
+# This software is distributed WITHOUT ANY WARRANTY; without even the
+# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the License for more information.
+#=============================================================================
+# (To distribute this file outside of Morse, substitute the full
+# License text for the above reference.)
+
+if (NOT METIS_FOUND)
+ set(METIS_DIR "" CACHE PATH "Installation directory of METIS library")
+ if (NOT METIS_FIND_QUIETLY)
+ message(STATUS "A cache variable, namely METIS_DIR, has been set to specify the install directory of METIS")
+ endif()
+endif()
+
+# Looking for include
+# -------------------
+
+# Add system include paths to search include
+# ------------------------------------------
+unset(_inc_env)
+set(ENV_METIS_DIR "$ENV{METIS_DIR}")
+set(ENV_METIS_INCDIR "$ENV{METIS_INCDIR}")
+if(ENV_METIS_INCDIR)
+ list(APPEND _inc_env "${ENV_METIS_INCDIR}")
+elseif(ENV_METIS_DIR)
+ list(APPEND _inc_env "${ENV_METIS_DIR}")
+ list(APPEND _inc_env "${ENV_METIS_DIR}/include")
+ list(APPEND _inc_env "${ENV_METIS_DIR}/include/metis")
+else()
+ if(WIN32)
+ string(REPLACE ":" ";" _inc_env "$ENV{INCLUDE}")
else()
- set(METIS_VERSION ${METIS_MAJOR_VERSION}.${METIS_MINOR_VERSION}.${METIS_SUBMINOR_VERSION})
+ string(REPLACE ":" ";" _path_env "$ENV{INCLUDE}")
+ list(APPEND _inc_env "${_path_env}")
+ string(REPLACE ":" ";" _path_env "$ENV{C_INCLUDE_PATH}")
+ list(APPEND _inc_env "${_path_env}")
+ string(REPLACE ":" ";" _path_env "$ENV{CPATH}")
+ list(APPEND _inc_env "${_path_env}")
+ string(REPLACE ":" ";" _path_env "$ENV{INCLUDE_PATH}")
+ list(APPEND _inc_env "${_path_env}")
endif()
- if(${METIS_VERSION} VERSION_LESS ${Metis_FIND_VERSION})
- set(METIS_VERSION_OK FALSE)
+endif()
+list(APPEND _inc_env "${CMAKE_PLATFORM_IMPLICIT_INCLUDE_DIRECTORIES}")
+list(APPEND _inc_env "${CMAKE_C_IMPLICIT_INCLUDE_DIRECTORIES}")
+list(REMOVE_DUPLICATES _inc_env)
+
+
+# Try to find the metis header in the given paths
+# -------------------------------------------------
+# call cmake macro to find the header path
+if(METIS_INCDIR)
+ set(METIS_metis.h_DIRS "METIS_metis.h_DIRS-NOTFOUND")
+ find_path(METIS_metis.h_DIRS
+ NAMES metis.h
+ HINTS ${METIS_INCDIR})
+else()
+ if(METIS_DIR)
+ set(METIS_metis.h_DIRS "METIS_metis.h_DIRS-NOTFOUND")
+ find_path(METIS_metis.h_DIRS
+ NAMES metis.h
+ HINTS ${METIS_DIR}
+ PATH_SUFFIXES "include" "include/metis")
else()
- set(METIS_VERSION_OK TRUE)
+ set(METIS_metis.h_DIRS "METIS_metis.h_DIRS-NOTFOUND")
+ find_path(METIS_metis.h_DIRS
+ NAMES metis.h
+ HINTS ${_inc_env})
endif()
+endif()
+mark_as_advanced(METIS_metis.h_DIRS)
- if(NOT METIS_VERSION_OK)
- message(STATUS "Metis version ${METIS_VERSION} found in ${METIS_INCLUDES}, "
- "but at least version ${Metis_FIND_VERSION} is required")
- endif(NOT METIS_VERSION_OK)
-endmacro(_metis_check_version)
- if(METIS_INCLUDES AND Metis_FIND_VERSION)
- _metis_check_version()
+# If found, add path to cmake variable
+# ------------------------------------
+if (METIS_metis.h_DIRS)
+ set(METIS_INCLUDE_DIRS "${METIS_metis.h_DIRS}")
+else ()
+ set(METIS_INCLUDE_DIRS "METIS_INCLUDE_DIRS-NOTFOUND")
+ if(NOT METIS_FIND_QUIETLY)
+ message(STATUS "Looking for metis -- metis.h not found")
+ endif()
+endif()
+
+
+# Looking for lib
+# ---------------
+
+# Add system library paths to search lib
+# --------------------------------------
+unset(_lib_env)
+set(ENV_METIS_LIBDIR "$ENV{METIS_LIBDIR}")
+if(ENV_METIS_LIBDIR)
+ list(APPEND _lib_env "${ENV_METIS_LIBDIR}")
+elseif(ENV_METIS_DIR)
+ list(APPEND _lib_env "${ENV_METIS_DIR}")
+ list(APPEND _lib_env "${ENV_METIS_DIR}/lib")
+else()
+ if(WIN32)
+ string(REPLACE ":" ";" _lib_env "$ENV{LIB}")
else()
- set(METIS_VERSION_OK TRUE)
+ if(APPLE)
+ string(REPLACE ":" ";" _lib_env "$ENV{DYLD_LIBRARY_PATH}")
+ else()
+ string(REPLACE ":" ";" _lib_env "$ENV{LD_LIBRARY_PATH}")
+ endif()
+ list(APPEND _lib_env "${CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES}")
+ list(APPEND _lib_env "${CMAKE_C_IMPLICIT_LINK_DIRECTORIES}")
endif()
+endif()
+list(REMOVE_DUPLICATES _lib_env)
+
+# Try to find the metis lib in the given paths
+# ----------------------------------------------
+# call cmake macro to find the lib path
+if(METIS_LIBDIR)
+ set(METIS_metis_LIBRARY "METIS_metis_LIBRARY-NOTFOUND")
+ find_library(METIS_metis_LIBRARY
+ NAMES metis
+ HINTS ${METIS_LIBDIR})
+else()
+ if(METIS_DIR)
+ set(METIS_metis_LIBRARY "METIS_metis_LIBRARY-NOTFOUND")
+ find_library(METIS_metis_LIBRARY
+ NAMES metis
+ HINTS ${METIS_DIR}
+ PATH_SUFFIXES lib lib32 lib64)
+ else()
+ set(METIS_metis_LIBRARY "METIS_metis_LIBRARY-NOTFOUND")
+ find_library(METIS_metis_LIBRARY
+ NAMES metis
+ HINTS ${_lib_env})
+ endif()
+endif()
+mark_as_advanced(METIS_metis_LIBRARY)
-find_library(METIS_LIBRARIES metis PATHS $ENV{METISDIR} ${LIB_INSTALL_DIR} PATH_SUFFIXES lib)
+# If found, add path to cmake variable
+# ------------------------------------
+if (METIS_metis_LIBRARY)
+ get_filename_component(metis_lib_path "${METIS_metis_LIBRARY}" PATH)
+ # set cmake variables
+ set(METIS_LIBRARIES "${METIS_metis_LIBRARY}")
+ set(METIS_LIBRARY_DIRS "${metis_lib_path}")
+else ()
+ set(METIS_LIBRARIES "METIS_LIBRARIES-NOTFOUND")
+ set(METIS_LIBRARY_DIRS "METIS_LIBRARY_DIRS-NOTFOUND")
+ if(NOT METIS_FIND_QUIETLY)
+ message(STATUS "Looking for metis -- lib metis not found")
+ endif()
+endif ()
+# check a function to validate the find
+if(METIS_LIBRARIES)
+
+ set(REQUIRED_INCDIRS)
+ set(REQUIRED_LIBDIRS)
+ set(REQUIRED_LIBS)
+
+ # METIS
+ if (METIS_INCLUDE_DIRS)
+ set(REQUIRED_INCDIRS "${METIS_INCLUDE_DIRS}")
+ endif()
+ if (METIS_LIBRARY_DIRS)
+ set(REQUIRED_LIBDIRS "${METIS_LIBRARY_DIRS}")
+ endif()
+ set(REQUIRED_LIBS "${METIS_LIBRARIES}")
+ # m
+ find_library(M_LIBRARY NAMES m)
+ mark_as_advanced(M_LIBRARY)
+ if(M_LIBRARY)
+ list(APPEND REQUIRED_LIBS "-lm")
+ endif()
+
+ # set required libraries for link
+ set(CMAKE_REQUIRED_INCLUDES "${REQUIRED_INCDIRS}")
+ set(CMAKE_REQUIRED_LIBRARIES)
+ foreach(lib_dir ${REQUIRED_LIBDIRS})
+ list(APPEND CMAKE_REQUIRED_LIBRARIES "-L${lib_dir}")
+ endforeach()
+ list(APPEND CMAKE_REQUIRED_LIBRARIES "${REQUIRED_LIBS}")
+ string(REGEX REPLACE "^ -" "-" CMAKE_REQUIRED_LIBRARIES "${CMAKE_REQUIRED_LIBRARIES}")
+
+ # test link
+ unset(METIS_WORKS CACHE)
+ include(CheckFunctionExists)
+ check_function_exists(METIS_NodeND METIS_WORKS)
+ mark_as_advanced(METIS_WORKS)
+
+ if(NOT METIS_WORKS)
+ if(NOT METIS_FIND_QUIETLY)
+ message(STATUS "Looking for METIS : test of METIS_NodeND with METIS library fails")
+ message(STATUS "CMAKE_REQUIRED_LIBRARIES: ${CMAKE_REQUIRED_LIBRARIES}")
+ message(STATUS "CMAKE_REQUIRED_INCLUDES: ${CMAKE_REQUIRED_INCLUDES}")
+ message(STATUS "Check in CMakeFiles/CMakeError.log to figure out why it fails")
+ endif()
+ endif()
+ set(CMAKE_REQUIRED_INCLUDES)
+ set(CMAKE_REQUIRED_FLAGS)
+ set(CMAKE_REQUIRED_LIBRARIES)
+endif(METIS_LIBRARIES)
+
+if (METIS_LIBRARIES)
+ list(GET METIS_LIBRARIES 0 first_lib)
+ get_filename_component(first_lib_path "${first_lib}" PATH)
+ if (${first_lib_path} MATCHES "/lib(32|64)?$")
+ string(REGEX REPLACE "/lib(32|64)?$" "" not_cached_dir "${first_lib_path}")
+ set(METIS_DIR_FOUND "${not_cached_dir}" CACHE PATH "Installation directory of METIS library" FORCE)
+ else()
+ set(METIS_DIR_FOUND "${first_lib_path}" CACHE PATH "Installation directory of METIS library" FORCE)
+ endif()
+endif()
+mark_as_advanced(METIS_DIR)
+mark_as_advanced(METIS_DIR_FOUND)
+
+# check that METIS has been found
+# ---------------------------------
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(METIS DEFAULT_MSG
- METIS_INCLUDES METIS_LIBRARIES METIS_VERSION_OK)
-
-mark_as_advanced(METIS_INCLUDES METIS_LIBRARIES)
+ METIS_LIBRARIES
+ METIS_WORKS)
+#
+# TODO: Add possibility to check for specific functions in the library
+#
diff --git a/cmake/FindPTSCOTCH.cmake b/cmake/FindPTSCOTCH.cmake
new file mode 100644
index 000000000..1396d0582
--- /dev/null
+++ b/cmake/FindPTSCOTCH.cmake
@@ -0,0 +1,423 @@
+###
+#
+# @copyright (c) 2009-2014 The University of Tennessee and The University
+# of Tennessee Research Foundation.
+# All rights reserved.
+# @copyright (c) 2012-2016 Inria. All rights reserved.
+# @copyright (c) 2012-2014 Bordeaux INP, CNRS (LaBRI UMR 5800), Inria, Univ. Bordeaux. All rights reserved.
+#
+###
+#
+# - Find PTSCOTCH include dirs and libraries
+# Use this module by invoking find_package with the form:
+# find_package(PTSCOTCH
+# [REQUIRED] # Fail with error if ptscotch is not found
+# [COMPONENTS <comp1> <comp2> ...] # dependencies
+# )
+#
+# PTSCOTCH depends on the following libraries:
+# - Threads
+# - MPI
+#
+# COMPONENTS can be some of the following:
+# - ESMUMPS: to activate detection of PT-Scotch with the esmumps interface
+#
+# This module finds headers and ptscotch library.
+# Results are reported in variables:
+# PTSCOTCH_FOUND - True if headers and requested libraries were found
+# PTSCOTCH_LINKER_FLAGS - list of required linker flags (excluding -l and -L)
+# PTSCOTCH_INCLUDE_DIRS - ptscotch include directories
+# PTSCOTCH_LIBRARY_DIRS - Link directories for ptscotch libraries
+# PTSCOTCH_LIBRARIES - ptscotch component libraries to be linked
+# PTSCOTCH_INCLUDE_DIRS_DEP - ptscotch + dependencies include directories
+# PTSCOTCH_LIBRARY_DIRS_DEP - ptscotch + dependencies link directories
+# PTSCOTCH_LIBRARIES_DEP - ptscotch libraries + dependencies
+# PTSCOTCH_INTSIZE - Number of octets occupied by a SCOTCH_Num
+#
+# The user can give specific paths where to find the libraries adding cmake
+# options at configure (ex: cmake path/to/project -DPTSCOTCH=path/to/ptscotch):
+# PTSCOTCH_DIR - Where to find the base directory of ptscotch
+# PTSCOTCH_INCDIR - Where to find the header files
+# PTSCOTCH_LIBDIR - Where to find the library files
+# The module can also look for the following environment variables if paths
+# are not given as cmake variable: PTSCOTCH_DIR, PTSCOTCH_INCDIR, PTSCOTCH_LIBDIR
+
+#=============================================================================
+# Copyright 2012-2013 Inria
+# Copyright 2012-2013 Emmanuel Agullo
+# Copyright 2012-2013 Mathieu Faverge
+# Copyright 2012 Cedric Castagnede
+# Copyright 2013-2016 Florent Pruvost
+#
+# Distributed under the OSI-approved BSD License (the "License");
+# see accompanying file MORSE-Copyright.txt for details.
+#
+# This software is distributed WITHOUT ANY WARRANTY; without even the
+# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the License for more information.
+#=============================================================================
+# (To distribute this file outside of Morse, substitute the full
+# License text for the above reference.)
+
+if (NOT PTSCOTCH_FOUND)
+ set(PTSCOTCH_DIR "" CACHE PATH "Installation directory of PTSCOTCH library")
+ if (NOT PTSCOTCH_FIND_QUIETLY)
+ message(STATUS "A cache variable, namely PTSCOTCH_DIR, has been set to specify the install directory of PTSCOTCH")
+ endif()
+endif()
+
+# Set the version to find
+set(PTSCOTCH_LOOK_FOR_ESMUMPS OFF)
+
+if( PTSCOTCH_FIND_COMPONENTS )
+ foreach( component ${PTSCOTCH_FIND_COMPONENTS} )
+ if (${component} STREQUAL "ESMUMPS")
+ # means we look for esmumps library
+ set(PTSCOTCH_LOOK_FOR_ESMUMPS ON)
+ endif()
+ endforeach()
+endif()
+
+# PTSCOTCH depends on Threads, try to find it
+if (NOT THREADS_FOUND)
+ if (PTSCOTCH_FIND_REQUIRED)
+ find_package(Threads REQUIRED)
+ else()
+ find_package(Threads)
+ endif()
+endif()
+
+# PTSCOTCH depends on MPI, try to find it
+if (NOT MPI_FOUND)
+ if (PTSCOTCH_FIND_REQUIRED)
+ find_package(MPI REQUIRED)
+ else()
+ find_package(MPI)
+ endif()
+endif()
+
+# Looking for include
+# -------------------
+
+# Add system include paths to search include
+# ------------------------------------------
+unset(_inc_env)
+set(ENV_PTSCOTCH_DIR "$ENV{PTSCOTCH_DIR}")
+set(ENV_PTSCOTCH_INCDIR "$ENV{PTSCOTCH_INCDIR}")
+if(ENV_PTSCOTCH_INCDIR)
+ list(APPEND _inc_env "${ENV_PTSCOTCH_INCDIR}")
+elseif(ENV_PTSCOTCH_DIR)
+ list(APPEND _inc_env "${ENV_PTSCOTCH_DIR}")
+ list(APPEND _inc_env "${ENV_PTSCOTCH_DIR}/include")
+ list(APPEND _inc_env "${ENV_PTSCOTCH_DIR}/include/ptscotch")
+else()
+ if(WIN32)
+ string(REPLACE ":" ";" _inc_env "$ENV{INCLUDE}")
+ else()
+ string(REPLACE ":" ";" _path_env "$ENV{INCLUDE}")
+ list(APPEND _inc_env "${_path_env}")
+ string(REPLACE ":" ";" _path_env "$ENV{C_INCLUDE_PATH}")
+ list(APPEND _inc_env "${_path_env}")
+ string(REPLACE ":" ";" _path_env "$ENV{CPATH}")
+ list(APPEND _inc_env "${_path_env}")
+ string(REPLACE ":" ";" _path_env "$ENV{INCLUDE_PATH}")
+ list(APPEND _inc_env "${_path_env}")
+ endif()
+endif()
+list(APPEND _inc_env "${CMAKE_PLATFORM_IMPLICIT_INCLUDE_DIRECTORIES}")
+list(APPEND _inc_env "${CMAKE_C_IMPLICIT_INCLUDE_DIRECTORIES}")
+list(REMOVE_DUPLICATES _inc_env)
+
+
+# Try to find the ptscotch header in the given paths
+# -------------------------------------------------
+
+set(PTSCOTCH_hdrs_to_find "ptscotch.h;scotch.h")
+
+# call cmake macro to find the header path
+if(PTSCOTCH_INCDIR)
+ foreach(ptscotch_hdr ${PTSCOTCH_hdrs_to_find})
+ set(PTSCOTCH_${ptscotch_hdr}_DIRS "PTSCOTCH_${ptscotch_hdr}_DIRS-NOTFOUND")
+ find_path(PTSCOTCH_${ptscotch_hdr}_DIRS
+ NAMES ${ptscotch_hdr}
+ HINTS ${PTSCOTCH_INCDIR})
+ mark_as_advanced(PTSCOTCH_${ptscotch_hdr}_DIRS)
+ endforeach()
+else()
+ if(PTSCOTCH_DIR)
+ foreach(ptscotch_hdr ${PTSCOTCH_hdrs_to_find})
+ set(PTSCOTCH_${ptscotch_hdr}_DIRS "PTSCOTCH_${ptscotch_hdr}_DIRS-NOTFOUND")
+ find_path(PTSCOTCH_${ptscotch_hdr}_DIRS
+ NAMES ${ptscotch_hdr}
+ HINTS ${PTSCOTCH_DIR}
+ PATH_SUFFIXES "include" "include/scotch")
+ mark_as_advanced(PTSCOTCH_${ptscotch_hdr}_DIRS)
+ endforeach()
+ else()
+ foreach(ptscotch_hdr ${PTSCOTCH_hdrs_to_find})
+ set(PTSCOTCH_${ptscotch_hdr}_DIRS "PTSCOTCH_${ptscotch_hdr}_DIRS-NOTFOUND")
+ find_path(PTSCOTCH_${ptscotch_hdr}_DIRS
+ NAMES ${ptscotch_hdr}
+ HINTS ${_inc_env}
+ PATH_SUFFIXES "scotch")
+ mark_as_advanced(PTSCOTCH_${ptscotch_hdr}_DIRS)
+ endforeach()
+ endif()
+endif()
+
+# If found, add path to cmake variable
+# ------------------------------------
+foreach(ptscotch_hdr ${PTSCOTCH_hdrs_to_find})
+ if (PTSCOTCH_${ptscotch_hdr}_DIRS)
+ list(APPEND PTSCOTCH_INCLUDE_DIRS "${PTSCOTCH_${ptscotch_hdr}_DIRS}")
+ else ()
+ set(PTSCOTCH_INCLUDE_DIRS "PTSCOTCH_INCLUDE_DIRS-NOTFOUND")
+ if (NOT PTSCOTCH_FIND_QUIETLY)
+ message(STATUS "Looking for ptscotch -- ${ptscotch_hdr} not found")
+ endif()
+ endif()
+endforeach()
+list(REMOVE_DUPLICATES PTSCOTCH_INCLUDE_DIRS)
+
+# Looking for lib
+# ---------------
+
+# Add system library paths to search lib
+# --------------------------------------
+unset(_lib_env)
+set(ENV_PTSCOTCH_LIBDIR "$ENV{PTSCOTCH_LIBDIR}")
+if(ENV_PTSCOTCH_LIBDIR)
+ list(APPEND _lib_env "${ENV_PTSCOTCH_LIBDIR}")
+elseif(ENV_PTSCOTCH_DIR)
+ list(APPEND _lib_env "${ENV_PTSCOTCH_DIR}")
+ list(APPEND _lib_env "${ENV_PTSCOTCH_DIR}/lib")
+else()
+ if(WIN32)
+ string(REPLACE ":" ";" _lib_env "$ENV{LIB}")
+ else()
+ if(APPLE)
+ string(REPLACE ":" ";" _lib_env "$ENV{DYLD_LIBRARY_PATH}")
+ else()
+ string(REPLACE ":" ";" _lib_env "$ENV{LD_LIBRARY_PATH}")
+ endif()
+ list(APPEND _lib_env "${CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES}")
+ list(APPEND _lib_env "${CMAKE_C_IMPLICIT_LINK_DIRECTORIES}")
+ endif()
+endif()
+list(REMOVE_DUPLICATES _lib_env)
+
+# Try to find the ptscotch lib in the given paths
+# ----------------------------------------------
+
+set(PTSCOTCH_libs_to_find "ptscotch;ptscotcherr")
+if (PTSCOTCH_LOOK_FOR_ESMUMPS)
+ list(INSERT PTSCOTCH_libs_to_find 0 "ptesmumps")
+ list(APPEND PTSCOTCH_libs_to_find "esmumps" )
+endif()
+list(APPEND PTSCOTCH_libs_to_find "scotch;scotcherr")
+
+# call cmake macro to find the lib path
+if(PTSCOTCH_LIBDIR)
+ foreach(ptscotch_lib ${PTSCOTCH_libs_to_find})
+ set(PTSCOTCH_${ptscotch_lib}_LIBRARY "PTSCOTCH_${ptscotch_lib}_LIBRARY-NOTFOUND")
+ find_library(PTSCOTCH_${ptscotch_lib}_LIBRARY
+ NAMES ${ptscotch_lib}
+ HINTS ${PTSCOTCH_LIBDIR})
+ endforeach()
+else()
+ if(PTSCOTCH_DIR)
+ foreach(ptscotch_lib ${PTSCOTCH_libs_to_find})
+ set(PTSCOTCH_${ptscotch_lib}_LIBRARY "PTSCOTCH_${ptscotch_lib}_LIBRARY-NOTFOUND")
+ find_library(PTSCOTCH_${ptscotch_lib}_LIBRARY
+ NAMES ${ptscotch_lib}
+ HINTS ${PTSCOTCH_DIR}
+ PATH_SUFFIXES lib lib32 lib64)
+ endforeach()
+ else()
+ foreach(ptscotch_lib ${PTSCOTCH_libs_to_find})
+ set(PTSCOTCH_${ptscotch_lib}_LIBRARY "PTSCOTCH_${ptscotch_lib}_LIBRARY-NOTFOUND")
+ find_library(PTSCOTCH_${ptscotch_lib}_LIBRARY
+ NAMES ${ptscotch_lib}
+ HINTS ${_lib_env})
+ endforeach()
+ endif()
+endif()
+
+set(PTSCOTCH_LIBRARIES "")
+set(PTSCOTCH_LIBRARY_DIRS "")
+# If found, add path to cmake variable
+# ------------------------------------
+foreach(ptscotch_lib ${PTSCOTCH_libs_to_find})
+
+ if (PTSCOTCH_${ptscotch_lib}_LIBRARY)
+ get_filename_component(${ptscotch_lib}_lib_path "${PTSCOTCH_${ptscotch_lib}_LIBRARY}" PATH)
+ # set cmake variables
+ list(APPEND PTSCOTCH_LIBRARIES "${PTSCOTCH_${ptscotch_lib}_LIBRARY}")
+ list(APPEND PTSCOTCH_LIBRARY_DIRS "${${ptscotch_lib}_lib_path}")
+ else ()
+ list(APPEND PTSCOTCH_LIBRARIES "${PTSCOTCH_${ptscotch_lib}_LIBRARY}")
+ if (NOT PTSCOTCH_FIND_QUIETLY)
+ message(STATUS "Looking for ptscotch -- lib ${ptscotch_lib} not found")
+ endif()
+ endif ()
+
+ mark_as_advanced(PTSCOTCH_${ptscotch_lib}_LIBRARY)
+
+endforeach()
+list(REMOVE_DUPLICATES PTSCOTCH_LIBRARY_DIRS)
+
+# check a function to validate the find
+if(PTSCOTCH_LIBRARIES)
+
+ set(REQUIRED_LDFLAGS)
+ set(REQUIRED_INCDIRS)
+ set(REQUIRED_LIBDIRS)
+ set(REQUIRED_LIBS)
+
+ # PTSCOTCH
+ if (PTSCOTCH_INCLUDE_DIRS)
+ set(REQUIRED_INCDIRS "${PTSCOTCH_INCLUDE_DIRS}")
+ endif()
+ if (PTSCOTCH_LIBRARY_DIRS)
+ set(REQUIRED_LIBDIRS "${PTSCOTCH_LIBRARY_DIRS}")
+ endif()
+ set(REQUIRED_LIBS "${PTSCOTCH_LIBRARIES}")
+ # MPI
+ if (MPI_FOUND)
+ if (MPI_C_INCLUDE_PATH)
+ list(APPEND CMAKE_REQUIRED_INCLUDES "${MPI_C_INCLUDE_PATH}")
+ endif()
+ if (MPI_C_LINK_FLAGS)
+ if (${MPI_C_LINK_FLAGS} MATCHES " -")
+ string(REGEX REPLACE " -" "-" MPI_C_LINK_FLAGS ${MPI_C_LINK_FLAGS})
+ endif()
+ list(APPEND REQUIRED_LDFLAGS "${MPI_C_LINK_FLAGS}")
+ endif()
+ list(APPEND REQUIRED_LIBS "${MPI_C_LIBRARIES}")
+ endif()
+ # THREADS
+ if(CMAKE_THREAD_LIBS_INIT)
+ list(APPEND REQUIRED_LIBS "${CMAKE_THREAD_LIBS_INIT}")
+ endif()
+ set(Z_LIBRARY "Z_LIBRARY-NOTFOUND")
+ find_library(Z_LIBRARY NAMES z)
+ mark_as_advanced(Z_LIBRARY)
+ if(Z_LIBRARY)
+ list(APPEND REQUIRED_LIBS "-lz")
+ endif()
+ set(M_LIBRARY "M_LIBRARY-NOTFOUND")
+ find_library(M_LIBRARY NAMES m)
+ mark_as_advanced(M_LIBRARY)
+ if(M_LIBRARY)
+ list(APPEND REQUIRED_LIBS "-lm")
+ endif()
+ set(RT_LIBRARY "RT_LIBRARY-NOTFOUND")
+ find_library(RT_LIBRARY NAMES rt)
+ mark_as_advanced(RT_LIBRARY)
+ if(RT_LIBRARY)
+ list(APPEND REQUIRED_LIBS "-lrt")
+ endif()
+
+ # set required libraries for link
+ set(CMAKE_REQUIRED_INCLUDES "${REQUIRED_INCDIRS}")
+ set(CMAKE_REQUIRED_LIBRARIES)
+ list(APPEND CMAKE_REQUIRED_LIBRARIES "${REQUIRED_LDFLAGS}")
+ foreach(lib_dir ${REQUIRED_LIBDIRS})
+ list(APPEND CMAKE_REQUIRED_LIBRARIES "-L${lib_dir}")
+ endforeach()
+ list(APPEND CMAKE_REQUIRED_LIBRARIES "${REQUIRED_LIBS}")
+ list(APPEND CMAKE_REQUIRED_FLAGS "${REQUIRED_FLAGS}")
+ string(REGEX REPLACE "^ -" "-" CMAKE_REQUIRED_LIBRARIES "${CMAKE_REQUIRED_LIBRARIES}")
+
+ # test link
+ unset(PTSCOTCH_WORKS CACHE)
+ include(CheckFunctionExists)
+ check_function_exists(SCOTCH_dgraphInit PTSCOTCH_WORKS)
+ mark_as_advanced(PTSCOTCH_WORKS)
+
+ if(PTSCOTCH_WORKS)
+ # save link with dependencies
+ set(PTSCOTCH_LIBRARIES_DEP "${REQUIRED_LIBS}")
+ set(PTSCOTCH_LIBRARY_DIRS_DEP "${REQUIRED_LIBDIRS}")
+ set(PTSCOTCH_INCLUDE_DIRS_DEP "${REQUIRED_INCDIRS}")
+ set(PTSCOTCH_LINKER_FLAGS "${REQUIRED_LDFLAGS}")
+ list(REMOVE_DUPLICATES PTSCOTCH_LIBRARY_DIRS_DEP)
+ list(REMOVE_DUPLICATES PTSCOTCH_INCLUDE_DIRS_DEP)
+ list(REMOVE_DUPLICATES PTSCOTCH_LINKER_FLAGS)
+ else()
+ if(NOT PTSCOTCH_FIND_QUIETLY)
+ message(STATUS "Looking for PTSCOTCH : test of SCOTCH_dgraphInit with PTSCOTCH library fails")
+ message(STATUS "CMAKE_REQUIRED_LIBRARIES: ${CMAKE_REQUIRED_LIBRARIES}")
+ message(STATUS "CMAKE_REQUIRED_INCLUDES: ${CMAKE_REQUIRED_INCLUDES}")
+ message(STATUS "Check in CMakeFiles/CMakeError.log to figure out why it fails")
+ endif()
+ endif()
+ set(CMAKE_REQUIRED_INCLUDES)
+ set(CMAKE_REQUIRED_FLAGS)
+ set(CMAKE_REQUIRED_LIBRARIES)
+endif(PTSCOTCH_LIBRARIES)
+
+if (PTSCOTCH_LIBRARIES)
+ list(GET PTSCOTCH_LIBRARIES 0 first_lib)
+ get_filename_component(first_lib_path "${first_lib}" PATH)
+ if (${first_lib_path} MATCHES "/lib(32|64)?$")
+ string(REGEX REPLACE "/lib(32|64)?$" "" not_cached_dir "${first_lib_path}")
+ set(PTSCOTCH_DIR_FOUND "${not_cached_dir}" CACHE PATH "Installation directory of PTSCOTCH library" FORCE)
+ else()
+ set(PTSCOTCH_DIR_FOUND "${first_lib_path}" CACHE PATH "Installation directory of PTSCOTCH library" FORCE)
+ endif()
+endif()
+mark_as_advanced(PTSCOTCH_DIR)
+mark_as_advanced(PTSCOTCH_DIR_FOUND)
+
+# Check the size of SCOTCH_Num
+# ---------------------------------
+set(CMAKE_REQUIRED_INCLUDES ${PTSCOTCH_INCLUDE_DIRS})
+
+include(CheckCSourceRuns)
+#stdio.h and stdint.h should be included by scotch.h directly
+set(PTSCOTCH_C_TEST_SCOTCH_Num_4 "
+#include <stdio.h>
+#include <stdint.h>
+#include <ptscotch.h>
+int main(int argc, char **argv) {
+ if (sizeof(SCOTCH_Num) == 4)
+ return 0;
+ else
+ return 1;
+}
+")
+
+set(PTSCOTCH_C_TEST_SCOTCH_Num_8 "
+#include <stdio.h>
+#include <stdint.h>
+#include <ptscotch.h>
+int main(int argc, char **argv) {
+ if (sizeof(SCOTCH_Num) == 8)
+ return 0;
+ else
+ return 1;
+}
+")
+check_c_source_runs("${PTSCOTCH_C_TEST_SCOTCH_Num_4}" PTSCOTCH_Num_4)
+if(NOT PTSCOTCH_Num_4)
+ check_c_source_runs("${PTSCOTCH_C_TEST_SCOTCH_Num_8}" PTSCOTCH_Num_8)
+ if(NOT PTSCOTCH_Num_8)
+ set(PTSCOTCH_INTSIZE -1)
+ else()
+ set(PTSCOTCH_INTSIZE 8)
+ endif()
+else()
+ set(PTSCOTCH_INTSIZE 4)
+endif()
+set(CMAKE_REQUIRED_INCLUDES "")
+
+# check that PTSCOTCH has been found
+# ---------------------------------
+include(FindPackageHandleStandardArgs)
+find_package_handle_standard_args(PTSCOTCH DEFAULT_MSG
+ PTSCOTCH_LIBRARIES
+ PTSCOTCH_WORKS)
+#
+# TODO: Add possibility to check for specific functions in the library
+#
diff --git a/cmake/FindPastix.cmake b/cmake/FindPastix.cmake
index e2e6c810d..470477fdc 100644
--- a/cmake/FindPastix.cmake
+++ b/cmake/FindPastix.cmake
@@ -1,25 +1,704 @@
-# Pastix lib requires linking to a blas library.
-# It is up to the user of this module to find a BLAS and link to it.
-# Pastix requires SCOTCH or METIS (partitioning and reordering tools) as well
+###
+#
+# @copyright (c) 2009-2014 The University of Tennessee and The University
+# of Tennessee Research Foundation.
+# All rights reserved.
+# @copyright (c) 2012-2014 Inria. All rights reserved.
+# @copyright (c) 2012-2014 Bordeaux INP, CNRS (LaBRI UMR 5800), Inria, Univ. Bordeaux. All rights reserved.
+#
+###
+#
+# - Find PASTIX include dirs and libraries
+# Use this module by invoking find_package with the form:
+# find_package(PASTIX
+# [REQUIRED] # Fail with error if pastix is not found
+# [COMPONENTS <comp1> <comp2> ...] # dependencies
+# )
+#
+# PASTIX depends on the following libraries:
+# - Threads, m, rt
+# - MPI
+# - HWLOC
+# - BLAS
+#
+# COMPONENTS are optional libraries PASTIX could be linked with,
+# Use it to drive detection of a specific compilation chain
+# COMPONENTS can be some of the following:
+# - MPI: to activate detection of the parallel MPI version (default)
+# it looks for Threads, HWLOC, BLAS, MPI and ScaLAPACK libraries
+# - SEQ: to activate detection of the sequential version (exclude MPI version)
+# - STARPU: to activate detection of StarPU version
+# it looks for MPI version of StarPU (default behaviour)
+# if SEQ and STARPU are given, it looks for a StarPU without MPI
+# - STARPU_CUDA: to activate detection of StarPU with CUDA
+# - STARPU_FXT: to activate detection of StarPU with FxT
+# - SCOTCH: to activate detection of PASTIX linked with SCOTCH
+# - PTSCOTCH: to activate detection of PASTIX linked with SCOTCH
+# - METIS: to activate detection of PASTIX linked with SCOTCH
+#
+# This module finds headers and pastix library.
+# Results are reported in variables:
+# PASTIX_FOUND - True if headers and requested libraries were found
+# PASTIX_LINKER_FLAGS - list of required linker flags (excluding -l and -L)
+# PASTIX_INCLUDE_DIRS - pastix include directories
+# PASTIX_LIBRARY_DIRS - Link directories for pastix libraries
+# PASTIX_LIBRARIES - pastix libraries
+# PASTIX_INCLUDE_DIRS_DEP - pastix + dependencies include directories
+# PASTIX_LIBRARY_DIRS_DEP - pastix + dependencies link directories
+# PASTIX_LIBRARIES_DEP - pastix libraries + dependencies
+#
+# The user can give specific paths where to find the libraries adding cmake
+# options at configure (ex: cmake path/to/project -DPASTIX_DIR=path/to/pastix):
+# PASTIX_DIR - Where to find the base directory of pastix
+# PASTIX_INCDIR - Where to find the header files
+# PASTIX_LIBDIR - Where to find the library files
+# The module can also look for the following environment variables if paths
+# are not given as cmake variable: PASTIX_DIR, PASTIX_INCDIR, PASTIX_LIBDIR
-if (PASTIX_INCLUDES AND PASTIX_LIBRARIES)
- set(PASTIX_FIND_QUIETLY TRUE)
-endif (PASTIX_INCLUDES AND PASTIX_LIBRARIES)
+#=============================================================================
+# Copyright 2012-2013 Inria
+# Copyright 2012-2013 Emmanuel Agullo
+# Copyright 2012-2013 Mathieu Faverge
+# Copyright 2012 Cedric Castagnede
+# Copyright 2013 Florent Pruvost
+#
+# Distributed under the OSI-approved BSD License (the "License");
+# see accompanying file MORSE-Copyright.txt for details.
+#
+# This software is distributed WITHOUT ANY WARRANTY; without even the
+# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the License for more information.
+#=============================================================================
+# (To distribute this file outside of Morse, substitute the full
+# License text for the above reference.)
-find_path(PASTIX_INCLUDES
- NAMES
- pastix_nompi.h
- PATHS
- $ENV{PASTIXDIR}
- ${INCLUDE_INSTALL_DIR}
-)
-find_library(PASTIX_LIBRARIES pastix PATHS $ENV{PASTIXDIR} ${LIB_INSTALL_DIR})
+if (NOT PASTIX_FOUND)
+ set(PASTIX_DIR "" CACHE PATH "Installation directory of PASTIX library")
+ if (NOT PASTIX_FIND_QUIETLY)
+ message(STATUS "A cache variable, namely PASTIX_DIR, has been set to specify the install directory of PASTIX")
+ endif()
+endif()
+# Set the version to find
+set(PASTIX_LOOK_FOR_MPI ON)
+set(PASTIX_LOOK_FOR_SEQ OFF)
+set(PASTIX_LOOK_FOR_STARPU OFF)
+set(PASTIX_LOOK_FOR_STARPU_CUDA OFF)
+set(PASTIX_LOOK_FOR_STARPU_FXT OFF)
+set(PASTIX_LOOK_FOR_SCOTCH ON)
+set(PASTIX_LOOK_FOR_PTSCOTCH OFF)
+set(PASTIX_LOOK_FOR_METIS OFF)
+if( PASTIX_FIND_COMPONENTS )
+ foreach( component ${PASTIX_FIND_COMPONENTS} )
+ if (${component} STREQUAL "SEQ")
+ # means we look for the sequential version of PaStiX (without MPI)
+ set(PASTIX_LOOK_FOR_SEQ ON)
+ set(PASTIX_LOOK_FOR_MPI OFF)
+ endif()
+ if (${component} STREQUAL "MPI")
+ # means we look for the MPI version of PaStiX (default)
+ set(PASTIX_LOOK_FOR_SEQ OFF)
+ set(PASTIX_LOOK_FOR_MPI ON)
+ endif()
+ if (${component} STREQUAL "STARPU")
+ # means we look for PaStiX with StarPU
+ set(PASTIX_LOOK_FOR_STARPU ON)
+ endif()
+ if (${component} STREQUAL "STARPU_CUDA")
+ # means we look for PaStiX with StarPU + CUDA
+ set(PASTIX_LOOK_FOR_STARPU ON)
+ set(PASTIX_LOOK_FOR_STARPU_CUDA ON)
+ endif()
+ if (${component} STREQUAL "STARPU_FXT")
+ # means we look for PaStiX with StarPU + FxT
+ set(PASTIX_LOOK_FOR_STARPU_FXT ON)
+ endif()
+ if (${component} STREQUAL "SCOTCH")
+ set(PASTIX_LOOK_FOR_SCOTCH ON)
+ endif()
+ if (${component} STREQUAL "SCOTCH")
+ set(PASTIX_LOOK_FOR_PTSCOTCH ON)
+ endif()
+ if (${component} STREQUAL "METIS")
+ set(PASTIX_LOOK_FOR_METIS ON)
+ endif()
+ endforeach()
+endif()
+# Dependencies detection
+# ----------------------
+
+
+# Required dependencies
+# ---------------------
+
+if (NOT PASTIX_FIND_QUIETLY)
+ message(STATUS "Looking for PASTIX - Try to detect pthread")
+endif()
+if (PASTIX_FIND_REQUIRED)
+ find_package(Threads REQUIRED QUIET)
+else()
+ find_package(Threads QUIET)
+endif()
+set(PASTIX_EXTRA_LIBRARIES "")
+if( THREADS_FOUND )
+ list(APPEND PASTIX_EXTRA_LIBRARIES ${CMAKE_THREAD_LIBS_INIT})
+endif ()
+
+# Add math library to the list of extra
+# it normally exists on all common systems provided with a C compiler
+if (NOT PASTIX_FIND_QUIETLY)
+ message(STATUS "Looking for PASTIX - Try to detect libm")
+endif()
+set(PASTIX_M_LIBRARIES "")
+if(UNIX OR WIN32)
+ find_library(
+ PASTIX_M_m_LIBRARY
+ NAMES m
+ )
+ mark_as_advanced(PASTIX_M_m_LIBRARY)
+ if (PASTIX_M_m_LIBRARY)
+ list(APPEND PASTIX_M_LIBRARIES "${PASTIX_M_m_LIBRARY}")
+ list(APPEND PASTIX_EXTRA_LIBRARIES "${PASTIX_M_m_LIBRARY}")
+ else()
+ if (PASTIX_FIND_REQUIRED)
+ message(FATAL_ERROR "Could NOT find libm on your system."
+ "Are you sure to a have a C compiler installed?")
+ endif()
+ endif()
+endif()
+
+# Try to find librt (libposix4 - POSIX.1b Realtime Extensions library)
+# on Unix systems except Apple ones because it does not exist on it
+if (NOT PASTIX_FIND_QUIETLY)
+ message(STATUS "Looking for PASTIX - Try to detect librt")
+endif()
+set(PASTIX_RT_LIBRARIES "")
+if(UNIX AND NOT APPLE)
+ find_library(
+ PASTIX_RT_rt_LIBRARY
+ NAMES rt
+ )
+ mark_as_advanced(PASTIX_RT_rt_LIBRARY)
+ if (PASTIX_RT_rt_LIBRARY)
+ list(APPEND PASTIX_RT_LIBRARIES "${PASTIX_RT_rt_LIBRARY}")
+ list(APPEND PASTIX_EXTRA_LIBRARIES "${PASTIX_RT_rt_LIBRARY}")
+ else()
+ if (PASTIX_FIND_REQUIRED)
+ message(FATAL_ERROR "Could NOT find librt on your system")
+ endif()
+ endif()
+endif()
+
+# PASTIX depends on HWLOC
+#------------------------
+if (NOT PASTIX_FIND_QUIETLY)
+ message(STATUS "Looking for PASTIX - Try to detect HWLOC")
+endif()
+if (PASTIX_FIND_REQUIRED)
+ find_package(HWLOC REQUIRED QUIET)
+else()
+ find_package(HWLOC QUIET)
+endif()
+
+# PASTIX depends on BLAS
+#-----------------------
+if (NOT PASTIX_FIND_QUIETLY)
+ message(STATUS "Looking for PASTIX - Try to detect BLAS")
+endif()
+if (PASTIX_FIND_REQUIRED)
+ find_package(BLASEXT REQUIRED QUIET)
+else()
+ find_package(BLASEXT QUIET)
+endif()
+
+# Optional dependencies
+# ---------------------
+
+# PASTIX may depend on MPI
+#-------------------------
+if (NOT MPI_FOUND AND PASTIX_LOOK_FOR_MPI)
+ if (NOT PASTIX_FIND_QUIETLY)
+ message(STATUS "Looking for PASTIX - Try to detect MPI")
+ endif()
+ # allows to use an external mpi compilation by setting compilers with
+ # -DMPI_C_COMPILER=path/to/mpicc -DMPI_Fortran_COMPILER=path/to/mpif90
+ # at cmake configure
+ if(NOT MPI_C_COMPILER)
+ set(MPI_C_COMPILER mpicc)
+ endif()
+ if (PASTIX_FIND_REQUIRED AND PASTIX_FIND_REQUIRED_MPI)
+ find_package(MPI REQUIRED QUIET)
+ else()
+ find_package(MPI QUIET)
+ endif()
+ if (MPI_FOUND)
+ mark_as_advanced(MPI_LIBRARY)
+ mark_as_advanced(MPI_EXTRA_LIBRARY)
+ endif()
+endif (NOT MPI_FOUND AND PASTIX_LOOK_FOR_MPI)
+
+# PASTIX may depend on STARPU
+#----------------------------
+if( NOT STARPU_FOUND AND PASTIX_LOOK_FOR_STARPU)
+
+ if (NOT PASTIX_FIND_QUIETLY)
+ message(STATUS "Looking for PASTIX - Try to detect StarPU")
+ endif()
+
+ set(PASTIX_STARPU_VERSION "1.1" CACHE STRING "oldest STARPU version desired")
+
+ # create list of components in order to make a single call to find_package(starpu...)
+ # we explicitly need a StarPU version built with hwloc
+ set(STARPU_COMPONENT_LIST "HWLOC")
+
+ # StarPU may depend on MPI
+ # allows to use an external mpi compilation by setting compilers with
+ # -DMPI_C_COMPILER=path/to/mpicc -DMPI_Fortran_COMPILER=path/to/mpif90
+ # at cmake configure
+ if (PASTIX_LOOK_FOR_MPI)
+ if(NOT MPI_C_COMPILER)
+ set(MPI_C_COMPILER mpicc)
+ endif()
+ list(APPEND STARPU_COMPONENT_LIST "MPI")
+ endif()
+ if (PASTIX_LOOK_FOR_STARPU_CUDA)
+ list(APPEND STARPU_COMPONENT_LIST "CUDA")
+ endif()
+ if (PASTIX_LOOK_FOR_STARPU_FXT)
+ list(APPEND STARPU_COMPONENT_LIST "FXT")
+ endif()
+ # set the list of optional dependencies we may discover
+ if (PASTIX_FIND_REQUIRED AND PASTIX_FIND_REQUIRED_STARPU)
+ find_package(STARPU ${PASTIX_STARPU_VERSION} REQUIRED
+ COMPONENTS ${STARPU_COMPONENT_LIST})
+ else()
+ find_package(STARPU ${PASTIX_STARPU_VERSION}
+ COMPONENTS ${STARPU_COMPONENT_LIST})
+ endif()
+
+endif( NOT STARPU_FOUND AND PASTIX_LOOK_FOR_STARPU)
+
+# PASTIX may depends on SCOTCH
+#-----------------------------
+if (NOT SCOTCH_FOUND AND PASTIX_LOOK_FOR_SCOTCH)
+ if (NOT PASTIX_FIND_QUIETLY)
+ message(STATUS "Looking for PASTIX - Try to detect SCOTCH")
+ endif()
+ if (PASTIX_FIND_REQUIRED AND PASTIX_FIND_REQUIRED_SCOTCH)
+ find_package(SCOTCH REQUIRED QUIET)
+ else()
+ find_package(SCOTCH QUIET)
+ endif()
+endif()
+
+# PASTIX may depends on PTSCOTCH
+#-------------------------------
+if (NOT PTSCOTCH_FOUND AND PASTIX_LOOK_FOR_PTSCOTCH)
+ if (NOT PASTIX_FIND_QUIETLY)
+ message(STATUS "Looking for PASTIX - Try to detect PTSCOTCH")
+ endif()
+ if (PASTIX_FIND_REQUIRED AND PASTIX_FIND_REQUIRED_PTSCOTCH)
+ find_package(PTSCOTCH REQUIRED QUIET)
+ else()
+ find_package(PTSCOTCH QUIET)
+ endif()
+endif()
+
+# PASTIX may depends on METIS
+#----------------------------
+if (NOT METIS_FOUND AND PASTIX_LOOK_FOR_METIS)
+ if (NOT PASTIX_FIND_QUIETLY)
+ message(STATUS "Looking for PASTIX - Try to detect METIS")
+ endif()
+ if (PASTIX_FIND_REQUIRED AND PASTIX_FIND_REQUIRED_METIS)
+ find_package(METIS REQUIRED QUIET)
+ else()
+ find_package(METIS QUIET)
+ endif()
+endif()
+
+# Error if pastix required and no partitioning lib found
+if (PASTIX_FIND_REQUIRED AND NOT SCOTCH_FOUND AND NOT PTSCOTCH_FOUND AND NOT METIS_FOUND)
+ message(FATAL_ERROR "Could NOT find any partitioning library on your system"
+ " (install scotch, ptscotch or metis)")
+endif()
+
+
+# Looking for PaStiX
+# ------------------
+
+# Looking for include
+# -------------------
+
+# Add system include paths to search include
+# ------------------------------------------
+unset(_inc_env)
+set(ENV_PASTIX_DIR "$ENV{PASTIX_DIR}")
+set(ENV_PASTIX_INCDIR "$ENV{PASTIX_INCDIR}")
+if(ENV_PASTIX_INCDIR)
+ list(APPEND _inc_env "${ENV_PASTIX_INCDIR}")
+elseif(ENV_PASTIX_DIR)
+ list(APPEND _inc_env "${ENV_PASTIX_DIR}")
+ list(APPEND _inc_env "${ENV_PASTIX_DIR}/include")
+ list(APPEND _inc_env "${ENV_PASTIX_DIR}/include/pastix")
+else()
+ if(WIN32)
+ string(REPLACE ":" ";" _inc_env "$ENV{INCLUDE}")
+ else()
+ string(REPLACE ":" ";" _path_env "$ENV{INCLUDE}")
+ list(APPEND _inc_env "${_path_env}")
+ string(REPLACE ":" ";" _path_env "$ENV{C_INCLUDE_PATH}")
+ list(APPEND _inc_env "${_path_env}")
+ string(REPLACE ":" ";" _path_env "$ENV{CPATH}")
+ list(APPEND _inc_env "${_path_env}")
+ string(REPLACE ":" ";" _path_env "$ENV{INCLUDE_PATH}")
+ list(APPEND _inc_env "${_path_env}")
+ endif()
+endif()
+list(APPEND _inc_env "${CMAKE_PLATFORM_IMPLICIT_INCLUDE_DIRECTORIES}")
+list(APPEND _inc_env "${CMAKE_C_IMPLICIT_INCLUDE_DIRECTORIES}")
+list(REMOVE_DUPLICATES _inc_env)
+
+
+# Try to find the pastix header in the given paths
+# ---------------------------------------------------
+# call cmake macro to find the header path
+if(PASTIX_INCDIR)
+ set(PASTIX_pastix.h_DIRS "PASTIX_pastix.h_DIRS-NOTFOUND")
+ find_path(PASTIX_pastix.h_DIRS
+ NAMES pastix.h
+ HINTS ${PASTIX_INCDIR})
+else()
+ if(PASTIX_DIR)
+ set(PASTIX_pastix.h_DIRS "PASTIX_pastix.h_DIRS-NOTFOUND")
+ find_path(PASTIX_pastix.h_DIRS
+ NAMES pastix.h
+ HINTS ${PASTIX_DIR}
+ PATH_SUFFIXES "include" "include/pastix")
+ else()
+ set(PASTIX_pastix.h_DIRS "PASTIX_pastix.h_DIRS-NOTFOUND")
+ find_path(PASTIX_pastix.h_DIRS
+ NAMES pastix.h
+ HINTS ${_inc_env}
+ PATH_SUFFIXES "pastix")
+ endif()
+endif()
+mark_as_advanced(PASTIX_pastix.h_DIRS)
+
+# If found, add path to cmake variable
+# ------------------------------------
+if (PASTIX_pastix.h_DIRS)
+ set(PASTIX_INCLUDE_DIRS "${PASTIX_pastix.h_DIRS}")
+else ()
+ set(PASTIX_INCLUDE_DIRS "PASTIX_INCLUDE_DIRS-NOTFOUND")
+ if(NOT PASTIX_FIND_QUIETLY)
+ message(STATUS "Looking for pastix -- pastix.h not found")
+ endif()
+endif()
+
+
+# Looking for lib
+# ---------------
+
+# Add system library paths to search lib
+# --------------------------------------
+unset(_lib_env)
+set(ENV_PASTIX_LIBDIR "$ENV{PASTIX_LIBDIR}")
+if(ENV_PASTIX_LIBDIR)
+ list(APPEND _lib_env "${ENV_PASTIX_LIBDIR}")
+elseif(ENV_PASTIX_DIR)
+ list(APPEND _lib_env "${ENV_PASTIX_DIR}")
+ list(APPEND _lib_env "${ENV_PASTIX_DIR}/lib")
+else()
+ if(WIN32)
+ string(REPLACE ":" ";" _lib_env "$ENV{LIB}")
+ else()
+ if(APPLE)
+ string(REPLACE ":" ";" _lib_env "$ENV{DYLD_LIBRARY_PATH}")
+ else()
+ string(REPLACE ":" ";" _lib_env "$ENV{LD_LIBRARY_PATH}")
+ endif()
+ list(APPEND _lib_env "${CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES}")
+ list(APPEND _lib_env "${CMAKE_C_IMPLICIT_LINK_DIRECTORIES}")
+ endif()
+endif()
+list(REMOVE_DUPLICATES _lib_env)
+
+# Try to find the pastix lib in the given paths
+# ------------------------------------------------
+
+# create list of libs to find
+set(PASTIX_libs_to_find "pastix_murge;pastix")
+
+# call cmake macro to find the lib path
+if(PASTIX_LIBDIR)
+ foreach(pastix_lib ${PASTIX_libs_to_find})
+ set(PASTIX_${pastix_lib}_LIBRARY "PASTIX_${pastix_lib}_LIBRARY-NOTFOUND")
+ find_library(PASTIX_${pastix_lib}_LIBRARY
+ NAMES ${pastix_lib}
+ HINTS ${PASTIX_LIBDIR})
+ endforeach()
+else()
+ if(PASTIX_DIR)
+ foreach(pastix_lib ${PASTIX_libs_to_find})
+ set(PASTIX_${pastix_lib}_LIBRARY "PASTIX_${pastix_lib}_LIBRARY-NOTFOUND")
+ find_library(PASTIX_${pastix_lib}_LIBRARY
+ NAMES ${pastix_lib}
+ HINTS ${PASTIX_DIR}
+ PATH_SUFFIXES lib lib32 lib64)
+ endforeach()
+ else()
+ foreach(pastix_lib ${PASTIX_libs_to_find})
+ set(PASTIX_${pastix_lib}_LIBRARY "PASTIX_${pastix_lib}_LIBRARY-NOTFOUND")
+ find_library(PASTIX_${pastix_lib}_LIBRARY
+ NAMES ${pastix_lib}
+ HINTS ${_lib_env})
+ endforeach()
+ endif()
+endif()
+
+# If found, add path to cmake variable
+# ------------------------------------
+foreach(pastix_lib ${PASTIX_libs_to_find})
+
+ get_filename_component(${pastix_lib}_lib_path ${PASTIX_${pastix_lib}_LIBRARY} PATH)
+ # set cmake variables (respects naming convention)
+ if (PASTIX_LIBRARIES)
+ list(APPEND PASTIX_LIBRARIES "${PASTIX_${pastix_lib}_LIBRARY}")
+ else()
+ set(PASTIX_LIBRARIES "${PASTIX_${pastix_lib}_LIBRARY}")
+ endif()
+ if (PASTIX_LIBRARY_DIRS)
+ list(APPEND PASTIX_LIBRARY_DIRS "${${pastix_lib}_lib_path}")
+ else()
+ set(PASTIX_LIBRARY_DIRS "${${pastix_lib}_lib_path}")
+ endif()
+ mark_as_advanced(PASTIX_${pastix_lib}_LIBRARY)
+
+endforeach(pastix_lib ${PASTIX_libs_to_find})
+
+# check a function to validate the find
+if(PASTIX_LIBRARIES)
+
+ set(REQUIRED_LDFLAGS)
+ set(REQUIRED_INCDIRS)
+ set(REQUIRED_LIBDIRS)
+ set(REQUIRED_LIBS)
+
+ # PASTIX
+ if (PASTIX_INCLUDE_DIRS)
+ set(REQUIRED_INCDIRS "${PASTIX_INCLUDE_DIRS}")
+ endif()
+ foreach(libdir ${PASTIX_LIBRARY_DIRS})
+ if (libdir)
+ list(APPEND REQUIRED_LIBDIRS "${libdir}")
+ endif()
+ endforeach()
+ set(REQUIRED_LIBS "${PASTIX_LIBRARIES}")
+ # STARPU
+ if (PASTIX_LOOK_FOR_STARPU AND STARPU_FOUND)
+ if (STARPU_INCLUDE_DIRS_DEP)
+ list(APPEND REQUIRED_INCDIRS "${STARPU_INCLUDE_DIRS_DEP}")
+ elseif (STARPU_INCLUDE_DIRS)
+ list(APPEND REQUIRED_INCDIRS "${STARPU_INCLUDE_DIRS}")
+ endif()
+ if(STARPU_LIBRARY_DIRS_DEP)
+ list(APPEND REQUIRED_LIBDIRS "${STARPU_LIBRARY_DIRS_DEP}")
+ elseif(STARPU_LIBRARY_DIRS)
+ list(APPEND REQUIRED_LIBDIRS "${STARPU_LIBRARY_DIRS}")
+ endif()
+ if (STARPU_LIBRARIES_DEP)
+ list(APPEND REQUIRED_LIBS "${STARPU_LIBRARIES_DEP}")
+ elseif (STARPU_LIBRARIES)
+ foreach(lib ${STARPU_LIBRARIES})
+ if (EXISTS ${lib} OR ${lib} MATCHES "^-")
+ list(APPEND REQUIRED_LIBS "${lib}")
+ else()
+ list(APPEND REQUIRED_LIBS "-l${lib}")
+ endif()
+ endforeach()
+ endif()
+ endif()
+ # CUDA
+ if (PASTIX_LOOK_FOR_STARPU_CUDA AND CUDA_FOUND)
+ if (CUDA_INCLUDE_DIRS)
+ list(APPEND REQUIRED_INCDIRS "${CUDA_INCLUDE_DIRS}")
+ endif()
+ foreach(libdir ${CUDA_LIBRARY_DIRS})
+ if (libdir)
+ list(APPEND REQUIRED_LIBDIRS "${libdir}")
+ endif()
+ endforeach()
+ list(APPEND REQUIRED_LIBS "${CUDA_CUBLAS_LIBRARIES};${CUDA_LIBRARIES}")
+ endif()
+ # MPI
+ if (PASTIX_LOOK_FOR_MPI AND MPI_FOUND)
+ if (MPI_C_INCLUDE_PATH)
+ list(APPEND REQUIRED_INCDIRS "${MPI_C_INCLUDE_PATH}")
+ endif()
+ if (MPI_C_LINK_FLAGS)
+ if (${MPI_C_LINK_FLAGS} MATCHES " -")
+ string(REGEX REPLACE " -" "-" MPI_C_LINK_FLAGS ${MPI_C_LINK_FLAGS})
+ endif()
+ list(APPEND REQUIRED_LDFLAGS "${MPI_C_LINK_FLAGS}")
+ endif()
+ list(APPEND REQUIRED_LIBS "${MPI_C_LIBRARIES}")
+ endif()
+ # HWLOC
+ if (HWLOC_FOUND)
+ if (HWLOC_INCLUDE_DIRS)
+ list(APPEND REQUIRED_INCDIRS "${HWLOC_INCLUDE_DIRS}")
+ endif()
+ foreach(libdir ${HWLOC_LIBRARY_DIRS})
+ if (libdir)
+ list(APPEND REQUIRED_LIBDIRS "${libdir}")
+ endif()
+ endforeach()
+ foreach(lib ${HWLOC_LIBRARIES})
+ if (EXISTS ${lib} OR ${lib} MATCHES "^-")
+ list(APPEND REQUIRED_LIBS "${lib}")
+ else()
+ list(APPEND REQUIRED_LIBS "-l${lib}")
+ endif()
+ endforeach()
+ endif()
+ # BLAS
+ if (BLAS_FOUND)
+ if (BLAS_INCLUDE_DIRS)
+ list(APPEND REQUIRED_INCDIRS "${BLAS_INCLUDE_DIRS}")
+ endif()
+ foreach(libdir ${BLAS_LIBRARY_DIRS})
+ if (libdir)
+ list(APPEND REQUIRED_LIBDIRS "${libdir}")
+ endif()
+ endforeach()
+ list(APPEND REQUIRED_LIBS "${BLAS_LIBRARIES}")
+ if (BLAS_LINKER_FLAGS)
+ list(APPEND REQUIRED_LDFLAGS "${BLAS_LINKER_FLAGS}")
+ endif()
+ endif()
+ # SCOTCH
+ if (PASTIX_LOOK_FOR_SCOTCH AND SCOTCH_FOUND)
+ if (SCOTCH_INCLUDE_DIRS)
+ list(APPEND REQUIRED_INCDIRS "${SCOTCH_INCLUDE_DIRS}")
+ endif()
+ foreach(libdir ${SCOTCH_LIBRARY_DIRS})
+ if (libdir)
+ list(APPEND REQUIRED_LIBDIRS "${libdir}")
+ endif()
+ endforeach()
+ list(APPEND REQUIRED_LIBS "${SCOTCH_LIBRARIES}")
+ endif()
+ # PTSCOTCH
+ if (PASTIX_LOOK_FOR_PTSCOTCH AND PTSCOTCH_FOUND)
+ if (PTSCOTCH_INCLUDE_DIRS)
+ list(APPEND REQUIRED_INCDIRS "${PTSCOTCH_INCLUDE_DIRS}")
+ endif()
+ foreach(libdir ${PTSCOTCH_LIBRARY_DIRS})
+ if (libdir)
+ list(APPEND REQUIRED_LIBDIRS "${libdir}")
+ endif()
+ endforeach()
+ list(APPEND REQUIRED_LIBS "${PTSCOTCH_LIBRARIES}")
+ endif()
+ # METIS
+ if (PASTIX_LOOK_FOR_METIS AND METIS_FOUND)
+ if (METIS_INCLUDE_DIRS)
+ list(APPEND REQUIRED_INCDIRS "${METIS_INCLUDE_DIRS}")
+ endif()
+ foreach(libdir ${METIS_LIBRARY_DIRS})
+ if (libdir)
+ list(APPEND REQUIRED_LIBDIRS "${libdir}")
+ endif()
+ endforeach()
+ list(APPEND REQUIRED_LIBS "${METIS_LIBRARIES}")
+ endif()
+ # Fortran
+ if (CMAKE_C_COMPILER_ID MATCHES "GNU")
+ find_library(
+ FORTRAN_gfortran_LIBRARY
+ NAMES gfortran
+ HINTS ${_lib_env}
+ )
+ mark_as_advanced(FORTRAN_gfortran_LIBRARY)
+ if (FORTRAN_gfortran_LIBRARY)
+ list(APPEND REQUIRED_LIBS "${FORTRAN_gfortran_LIBRARY}")
+ endif()
+ elseif (CMAKE_C_COMPILER_ID MATCHES "Intel")
+ find_library(
+ FORTRAN_ifcore_LIBRARY
+ NAMES ifcore
+ HINTS ${_lib_env}
+ )
+ mark_as_advanced(FORTRAN_ifcore_LIBRARY)
+ if (FORTRAN_ifcore_LIBRARY)
+ list(APPEND REQUIRED_LIBS "${FORTRAN_ifcore_LIBRARY}")
+ endif()
+ endif()
+ # EXTRA LIBS such that pthread, m, rt
+ list(APPEND REQUIRED_LIBS ${PASTIX_EXTRA_LIBRARIES})
+
+ # set required libraries for link
+ set(CMAKE_REQUIRED_INCLUDES "${REQUIRED_INCDIRS}")
+ set(CMAKE_REQUIRED_LIBRARIES)
+ list(APPEND CMAKE_REQUIRED_LIBRARIES "${REQUIRED_LDFLAGS}")
+ foreach(lib_dir ${REQUIRED_LIBDIRS})
+ list(APPEND CMAKE_REQUIRED_LIBRARIES "-L${lib_dir}")
+ endforeach()
+ list(APPEND CMAKE_REQUIRED_LIBRARIES "${REQUIRED_LIBS}")
+ list(APPEND CMAKE_REQUIRED_FLAGS "${REQUIRED_FLAGS}")
+ string(REGEX REPLACE "^ -" "-" CMAKE_REQUIRED_LIBRARIES "${CMAKE_REQUIRED_LIBRARIES}")
+
+ # test link
+ unset(PASTIX_WORKS CACHE)
+ include(CheckFunctionExists)
+ check_function_exists(pastix PASTIX_WORKS)
+ mark_as_advanced(PASTIX_WORKS)
+
+ if(PASTIX_WORKS)
+ # save link with dependencies
+ set(PASTIX_LIBRARIES_DEP "${REQUIRED_LIBS}")
+ set(PASTIX_LIBRARY_DIRS_DEP "${REQUIRED_LIBDIRS}")
+ set(PASTIX_INCLUDE_DIRS_DEP "${REQUIRED_INCDIRS}")
+ set(PASTIX_LINKER_FLAGS "${REQUIRED_LDFLAGS}")
+ list(REMOVE_DUPLICATES PASTIX_LIBRARY_DIRS_DEP)
+ list(REMOVE_DUPLICATES PASTIX_INCLUDE_DIRS_DEP)
+ list(REMOVE_DUPLICATES PASTIX_LINKER_FLAGS)
+ else()
+ if(NOT PASTIX_FIND_QUIETLY)
+ message(STATUS "Looking for PASTIX : test of pastix() fails")
+ message(STATUS "CMAKE_REQUIRED_LIBRARIES: ${CMAKE_REQUIRED_LIBRARIES}")
+ message(STATUS "CMAKE_REQUIRED_INCLUDES: ${CMAKE_REQUIRED_INCLUDES}")
+ message(STATUS "Check in CMakeFiles/CMakeError.log to figure out why it fails")
+ message(STATUS "Maybe PASTIX is linked with specific libraries. "
+ "Have you tried with COMPONENTS (MPI/SEQ, STARPU, STARPU_CUDA, SCOTCH, PTSCOTCH, METIS)? "
+ "See the explanation in FindPASTIX.cmake.")
+ endif()
+ endif()
+ set(CMAKE_REQUIRED_INCLUDES)
+ set(CMAKE_REQUIRED_FLAGS)
+ set(CMAKE_REQUIRED_LIBRARIES)
+endif(PASTIX_LIBRARIES)
+
+if (PASTIX_LIBRARIES)
+ list(GET PASTIX_LIBRARIES 0 first_lib)
+ get_filename_component(first_lib_path "${first_lib}" PATH)
+ if (${first_lib_path} MATCHES "/lib(32|64)?$")
+ string(REGEX REPLACE "/lib(32|64)?$" "" not_cached_dir "${first_lib_path}")
+ set(PASTIX_DIR_FOUND "${not_cached_dir}" CACHE PATH "Installation directory of PASTIX library" FORCE)
+ else()
+ set(PASTIX_DIR_FOUND "${first_lib_path}" CACHE PATH "Installation directory of PASTIX library" FORCE)
+ endif()
+endif()
+mark_as_advanced(PASTIX_DIR)
+mark_as_advanced(PASTIX_DIR_FOUND)
+
+# check that PASTIX has been found
+# ---------------------------------
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(PASTIX DEFAULT_MSG
- PASTIX_INCLUDES PASTIX_LIBRARIES)
-
-mark_as_advanced(PASTIX_INCLUDES PASTIX_LIBRARIES)
+ PASTIX_LIBRARIES
+ PASTIX_WORKS)
diff --git a/cmake/FindScotch.cmake b/cmake/FindScotch.cmake
index 530340b16..89d295ac2 100644
--- a/cmake/FindScotch.cmake
+++ b/cmake/FindScotch.cmake
@@ -1,24 +1,369 @@
-# Pastix requires SCOTCH or METIS (partitioning and reordering tools)
+###
+#
+# @copyright (c) 2009-2014 The University of Tennessee and The University
+# of Tennessee Research Foundation.
+# All rights reserved.
+# @copyright (c) 2012-2014 Inria. All rights reserved.
+# @copyright (c) 2012-2014 Bordeaux INP, CNRS (LaBRI UMR 5800), Inria, Univ. Bordeaux. All rights reserved.
+#
+###
+#
+# - Find SCOTCH include dirs and libraries
+# Use this module by invoking find_package with the form:
+# find_package(SCOTCH
+# [REQUIRED] # Fail with error if scotch is not found
+# [COMPONENTS <comp1> <comp2> ...] # dependencies
+# )
+#
+# COMPONENTS can be some of the following:
+# - ESMUMPS: to activate detection of Scotch with the esmumps interface
+#
+# This module finds headers and scotch library.
+# Results are reported in variables:
+# SCOTCH_FOUND - True if headers and requested libraries were found
+# SCOTCH_INCLUDE_DIRS - scotch include directories
+# SCOTCH_LIBRARY_DIRS - Link directories for scotch libraries
+# SCOTCH_LIBRARIES - scotch component libraries to be linked
+# SCOTCH_INTSIZE - Number of octets occupied by a SCOTCH_Num
+#
+# The user can give specific paths where to find the libraries adding cmake
+# options at configure (ex: cmake path/to/project -DSCOTCH=path/to/scotch):
+# SCOTCH_DIR - Where to find the base directory of scotch
+# SCOTCH_INCDIR - Where to find the header files
+# SCOTCH_LIBDIR - Where to find the library files
+# The module can also look for the following environment variables if paths
+# are not given as cmake variable: SCOTCH_DIR, SCOTCH_INCDIR, SCOTCH_LIBDIR
-if (SCOTCH_INCLUDES AND SCOTCH_LIBRARIES)
- set(SCOTCH_FIND_QUIETLY TRUE)
-endif (SCOTCH_INCLUDES AND SCOTCH_LIBRARIES)
+#=============================================================================
+# Copyright 2012-2013 Inria
+# Copyright 2012-2013 Emmanuel Agullo
+# Copyright 2012-2013 Mathieu Faverge
+# Copyright 2012 Cedric Castagnede
+# Copyright 2013 Florent Pruvost
+#
+# Distributed under the OSI-approved BSD License (the "License");
+# see accompanying file MORSE-Copyright.txt for details.
+#
+# This software is distributed WITHOUT ANY WARRANTY; without even the
+# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the License for more information.
+#=============================================================================
+# (To distribute this file outside of Morse, substitute the full
+# License text for the above reference.)
-find_path(SCOTCH_INCLUDES
- NAMES
- scotch.h
- PATHS
- $ENV{SCOTCHDIR}
- ${INCLUDE_INSTALL_DIR}
- PATH_SUFFIXES
- scotch
-)
+if (NOT SCOTCH_FOUND)
+ set(SCOTCH_DIR "" CACHE PATH "Installation directory of SCOTCH library")
+ if (NOT SCOTCH_FIND_QUIETLY)
+ message(STATUS "A cache variable, namely SCOTCH_DIR, has been set to specify the install directory of SCOTCH")
+ endif()
+endif()
+# Set the version to find
+set(SCOTCH_LOOK_FOR_ESMUMPS OFF)
-find_library(SCOTCH_LIBRARIES scotch PATHS $ENV{SCOTCHDIR} ${LIB_INSTALL_DIR})
+if( SCOTCH_FIND_COMPONENTS )
+ foreach( component ${SCOTCH_FIND_COMPONENTS} )
+ if (${component} STREQUAL "ESMUMPS")
+ # means we look for esmumps library
+ set(SCOTCH_LOOK_FOR_ESMUMPS ON)
+ endif()
+ endforeach()
+endif()
+# SCOTCH may depend on Threads, try to find it
+if (NOT THREADS_FOUND)
+ if (SCOTCH_FIND_REQUIRED)
+ find_package(Threads REQUIRED)
+ else()
+ find_package(Threads)
+ endif()
+endif()
+
+# Looking for include
+# -------------------
+
+# Add system include paths to search include
+# ------------------------------------------
+unset(_inc_env)
+set(ENV_SCOTCH_DIR "$ENV{SCOTCH_DIR}")
+set(ENV_SCOTCH_INCDIR "$ENV{SCOTCH_INCDIR}")
+if(ENV_SCOTCH_INCDIR)
+ list(APPEND _inc_env "${ENV_SCOTCH_INCDIR}")
+elseif(ENV_SCOTCH_DIR)
+ list(APPEND _inc_env "${ENV_SCOTCH_DIR}")
+ list(APPEND _inc_env "${ENV_SCOTCH_DIR}/include")
+ list(APPEND _inc_env "${ENV_SCOTCH_DIR}/include/scotch")
+else()
+ if(WIN32)
+ string(REPLACE ":" ";" _inc_env "$ENV{INCLUDE}")
+ else()
+ string(REPLACE ":" ";" _path_env "$ENV{INCLUDE}")
+ list(APPEND _inc_env "${_path_env}")
+ string(REPLACE ":" ";" _path_env "$ENV{C_INCLUDE_PATH}")
+ list(APPEND _inc_env "${_path_env}")
+ string(REPLACE ":" ";" _path_env "$ENV{CPATH}")
+ list(APPEND _inc_env "${_path_env}")
+ string(REPLACE ":" ";" _path_env "$ENV{INCLUDE_PATH}")
+ list(APPEND _inc_env "${_path_env}")
+ endif()
+endif()
+list(APPEND _inc_env "${CMAKE_PLATFORM_IMPLICIT_INCLUDE_DIRECTORIES}")
+list(APPEND _inc_env "${CMAKE_C_IMPLICIT_INCLUDE_DIRECTORIES}")
+list(REMOVE_DUPLICATES _inc_env)
+
+
+# Try to find the scotch header in the given paths
+# -------------------------------------------------
+# call cmake macro to find the header path
+if(SCOTCH_INCDIR)
+ set(SCOTCH_scotch.h_DIRS "SCOTCH_scotch.h_DIRS-NOTFOUND")
+ find_path(SCOTCH_scotch.h_DIRS
+ NAMES scotch.h
+ HINTS ${SCOTCH_INCDIR})
+else()
+ if(SCOTCH_DIR)
+ set(SCOTCH_scotch.h_DIRS "SCOTCH_scotch.h_DIRS-NOTFOUND")
+ find_path(SCOTCH_scotch.h_DIRS
+ NAMES scotch.h
+ HINTS ${SCOTCH_DIR}
+ PATH_SUFFIXES "include" "include/scotch")
+ else()
+ set(SCOTCH_scotch.h_DIRS "SCOTCH_scotch.h_DIRS-NOTFOUND")
+ find_path(SCOTCH_scotch.h_DIRS
+ NAMES scotch.h
+ HINTS ${_inc_env}
+ PATH_SUFFIXES "scotch")
+ endif()
+endif()
+mark_as_advanced(SCOTCH_scotch.h_DIRS)
+
+# If found, add path to cmake variable
+# ------------------------------------
+if (SCOTCH_scotch.h_DIRS)
+ set(SCOTCH_INCLUDE_DIRS "${SCOTCH_scotch.h_DIRS}")
+else ()
+ set(SCOTCH_INCLUDE_DIRS "SCOTCH_INCLUDE_DIRS-NOTFOUND")
+ if (NOT SCOTCH_FIND_QUIETLY)
+ message(STATUS "Looking for scotch -- scotch.h not found")
+ endif()
+endif()
+list(REMOVE_DUPLICATES SCOTCH_INCLUDE_DIRS)
+
+# Looking for lib
+# ---------------
+
+# Add system library paths to search lib
+# --------------------------------------
+unset(_lib_env)
+set(ENV_SCOTCH_LIBDIR "$ENV{SCOTCH_LIBDIR}")
+if(ENV_SCOTCH_LIBDIR)
+ list(APPEND _lib_env "${ENV_SCOTCH_LIBDIR}")
+elseif(ENV_SCOTCH_DIR)
+ list(APPEND _lib_env "${ENV_SCOTCH_DIR}")
+ list(APPEND _lib_env "${ENV_SCOTCH_DIR}/lib")
+else()
+ if(WIN32)
+ string(REPLACE ":" ";" _lib_env "$ENV{LIB}")
+ else()
+ if(APPLE)
+ string(REPLACE ":" ";" _lib_env "$ENV{DYLD_LIBRARY_PATH}")
+ else()
+ string(REPLACE ":" ";" _lib_env "$ENV{LD_LIBRARY_PATH}")
+ endif()
+ list(APPEND _lib_env "${CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES}")
+ list(APPEND _lib_env "${CMAKE_C_IMPLICIT_LINK_DIRECTORIES}")
+ endif()
+endif()
+list(REMOVE_DUPLICATES _lib_env)
+
+# Try to find the scotch lib in the given paths
+# ----------------------------------------------
+
+set(SCOTCH_libs_to_find "scotch;scotcherrexit")
+if (SCOTCH_LOOK_FOR_ESMUMPS)
+ list(INSERT SCOTCH_libs_to_find 0 "esmumps")
+endif()
+
+# call cmake macro to find the lib path
+if(SCOTCH_LIBDIR)
+ foreach(scotch_lib ${SCOTCH_libs_to_find})
+ set(SCOTCH_${scotch_lib}_LIBRARY "SCOTCH_${scotch_lib}_LIBRARY-NOTFOUND")
+ find_library(SCOTCH_${scotch_lib}_LIBRARY
+ NAMES ${scotch_lib}
+ HINTS ${SCOTCH_LIBDIR})
+ endforeach()
+else()
+ if(SCOTCH_DIR)
+ foreach(scotch_lib ${SCOTCH_libs_to_find})
+ set(SCOTCH_${scotch_lib}_LIBRARY "SCOTCH_${scotch_lib}_LIBRARY-NOTFOUND")
+ find_library(SCOTCH_${scotch_lib}_LIBRARY
+ NAMES ${scotch_lib}
+ HINTS ${SCOTCH_DIR}
+ PATH_SUFFIXES lib lib32 lib64)
+ endforeach()
+ else()
+ foreach(scotch_lib ${SCOTCH_libs_to_find})
+ set(SCOTCH_${scotch_lib}_LIBRARY "SCOTCH_${scotch_lib}_LIBRARY-NOTFOUND")
+ find_library(SCOTCH_${scotch_lib}_LIBRARY
+ NAMES ${scotch_lib}
+ HINTS ${_lib_env})
+ endforeach()
+ endif()
+endif()
+
+set(SCOTCH_LIBRARIES "")
+set(SCOTCH_LIBRARY_DIRS "")
+# If found, add path to cmake variable
+# ------------------------------------
+foreach(scotch_lib ${SCOTCH_libs_to_find})
+
+ if (SCOTCH_${scotch_lib}_LIBRARY)
+ get_filename_component(${scotch_lib}_lib_path "${SCOTCH_${scotch_lib}_LIBRARY}" PATH)
+ # set cmake variables
+ list(APPEND SCOTCH_LIBRARIES "${SCOTCH_${scotch_lib}_LIBRARY}")
+ list(APPEND SCOTCH_LIBRARY_DIRS "${${scotch_lib}_lib_path}")
+ else ()
+ list(APPEND SCOTCH_LIBRARIES "${SCOTCH_${scotch_lib}_LIBRARY}")
+ if (NOT SCOTCH_FIND_QUIETLY)
+ message(STATUS "Looking for scotch -- lib ${scotch_lib} not found")
+ endif()
+ endif ()
+
+ mark_as_advanced(SCOTCH_${scotch_lib}_LIBRARY)
+
+endforeach()
+list(REMOVE_DUPLICATES SCOTCH_LIBRARY_DIRS)
+
+# check a function to validate the find
+if(SCOTCH_LIBRARIES)
+
+ set(REQUIRED_INCDIRS)
+ set(REQUIRED_LIBDIRS)
+ set(REQUIRED_LIBS)
+
+ # SCOTCH
+ if (SCOTCH_INCLUDE_DIRS)
+ set(REQUIRED_INCDIRS "${SCOTCH_INCLUDE_DIRS}")
+ endif()
+ if (SCOTCH_LIBRARY_DIRS)
+ set(REQUIRED_LIBDIRS "${SCOTCH_LIBRARY_DIRS}")
+ endif()
+ set(REQUIRED_LIBS "${SCOTCH_LIBRARIES}")
+ # THREADS
+ if(CMAKE_THREAD_LIBS_INIT)
+ list(APPEND REQUIRED_LIBS "${CMAKE_THREAD_LIBS_INIT}")
+ endif()
+ set(Z_LIBRARY "Z_LIBRARY-NOTFOUND")
+ find_library(Z_LIBRARY NAMES z)
+ mark_as_advanced(Z_LIBRARY)
+ if(Z_LIBRARY)
+ list(APPEND REQUIRED_LIBS "-lz")
+ endif()
+ set(M_LIBRARY "M_LIBRARY-NOTFOUND")
+ find_library(M_LIBRARY NAMES m)
+ mark_as_advanced(M_LIBRARY)
+ if(M_LIBRARY)
+ list(APPEND REQUIRED_LIBS "-lm")
+ endif()
+ set(RT_LIBRARY "RT_LIBRARY-NOTFOUND")
+ find_library(RT_LIBRARY NAMES rt)
+ mark_as_advanced(RT_LIBRARY)
+ if(RT_LIBRARY)
+ list(APPEND REQUIRED_LIBS "-lrt")
+ endif()
+
+ # set required libraries for link
+ set(CMAKE_REQUIRED_INCLUDES "${REQUIRED_INCDIRS}")
+ set(CMAKE_REQUIRED_LIBRARIES)
+ foreach(lib_dir ${REQUIRED_LIBDIRS})
+ list(APPEND CMAKE_REQUIRED_LIBRARIES "-L${lib_dir}")
+ endforeach()
+ list(APPEND CMAKE_REQUIRED_LIBRARIES "${REQUIRED_LIBS}")
+ string(REGEX REPLACE "^ -" "-" CMAKE_REQUIRED_LIBRARIES "${CMAKE_REQUIRED_LIBRARIES}")
+
+ # test link
+ unset(SCOTCH_WORKS CACHE)
+ include(CheckFunctionExists)
+ check_function_exists(SCOTCH_graphInit SCOTCH_WORKS)
+ mark_as_advanced(SCOTCH_WORKS)
+
+ if(SCOTCH_WORKS)
+ # save link with dependencies
+ set(SCOTCH_LIBRARIES "${REQUIRED_LIBS}")
+ else()
+ if(NOT SCOTCH_FIND_QUIETLY)
+ message(STATUS "Looking for SCOTCH : test of SCOTCH_graphInit with SCOTCH library fails")
+ message(STATUS "CMAKE_REQUIRED_LIBRARIES: ${CMAKE_REQUIRED_LIBRARIES}")
+ message(STATUS "CMAKE_REQUIRED_INCLUDES: ${CMAKE_REQUIRED_INCLUDES}")
+ message(STATUS "Check in CMakeFiles/CMakeError.log to figure out why it fails")
+ endif()
+ endif()
+ set(CMAKE_REQUIRED_INCLUDES)
+ set(CMAKE_REQUIRED_FLAGS)
+ set(CMAKE_REQUIRED_LIBRARIES)
+endif(SCOTCH_LIBRARIES)
+
+if (SCOTCH_LIBRARIES)
+ list(GET SCOTCH_LIBRARIES 0 first_lib)
+ get_filename_component(first_lib_path "${first_lib}" PATH)
+ if (${first_lib_path} MATCHES "/lib(32|64)?$")
+ string(REGEX REPLACE "/lib(32|64)?$" "" not_cached_dir "${first_lib_path}")
+ set(SCOTCH_DIR_FOUND "${not_cached_dir}" CACHE PATH "Installation directory of SCOTCH library" FORCE)
+ else()
+ set(SCOTCH_DIR_FOUND "${first_lib_path}" CACHE PATH "Installation directory of SCOTCH library" FORCE)
+ endif()
+endif()
+mark_as_advanced(SCOTCH_DIR)
+mark_as_advanced(SCOTCH_DIR_FOUND)
+
+# Check the size of SCOTCH_Num
+# ---------------------------------
+set(CMAKE_REQUIRED_INCLUDES ${SCOTCH_INCLUDE_DIRS})
+
+include(CheckCSourceRuns)
+#stdio.h and stdint.h should be included by scotch.h directly
+set(SCOTCH_C_TEST_SCOTCH_Num_4 "
+#include <stdio.h>
+#include <stdint.h>
+#include <scotch.h>
+int main(int argc, char **argv) {
+ if (sizeof(SCOTCH_Num) == 4)
+ return 0;
+ else
+ return 1;
+}
+")
+
+set(SCOTCH_C_TEST_SCOTCH_Num_8 "
+#include <stdio.h>
+#include <stdint.h>
+#include <scotch.h>
+int main(int argc, char **argv) {
+ if (sizeof(SCOTCH_Num) == 8)
+ return 0;
+ else
+ return 1;
+}
+")
+check_c_source_runs("${SCOTCH_C_TEST_SCOTCH_Num_4}" SCOTCH_Num_4)
+if(NOT SCOTCH_Num_4)
+ check_c_source_runs("${SCOTCH_C_TEST_SCOTCH_Num_8}" SCOTCH_Num_8)
+ if(NOT SCOTCH_Num_8)
+ set(SCOTCH_INTSIZE -1)
+ else()
+ set(SCOTCH_INTSIZE 8)
+ endif()
+else()
+ set(SCOTCH_INTSIZE 4)
+endif()
+set(CMAKE_REQUIRED_INCLUDES "")
+
+# check that SCOTCH has been found
+# ---------------------------------
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(SCOTCH DEFAULT_MSG
- SCOTCH_INCLUDES SCOTCH_LIBRARIES)
-
-mark_as_advanced(SCOTCH_INCLUDES SCOTCH_LIBRARIES)
+ SCOTCH_LIBRARIES
+ SCOTCH_WORKS)
+#
+# TODO: Add possibility to check for specific functions in the library
+#
diff --git a/cmake/FindTriSYCL.cmake b/cmake/FindTriSYCL.cmake
new file mode 100644
index 000000000..cb2154192
--- /dev/null
+++ b/cmake/FindTriSYCL.cmake
@@ -0,0 +1,152 @@
+#.rst:
+# FindTriSYCL
+#---------------
+#
+# TODO : insert Copyright and licence
+
+#########################
+# FindTriSYCL.cmake
+#########################
+#
+# Tools for finding and building with TriSYCL.
+#
+# User must define TRISYCL_INCLUDE_DIR pointing to the triSYCL
+# include directory.
+#
+# Latest version of this file can be found at:
+# https://github.com/triSYCL/triSYCL
+
+# Requite CMake version 3.5 or higher
+cmake_minimum_required (VERSION 3.5)
+
+# Check that a supported host compiler can be found
+if(CMAKE_COMPILER_IS_GNUCXX)
+ # Require at least gcc 5.4
+ if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.4)
+ message(FATAL_ERROR
+ "host compiler - Not found! (gcc version must be at least 5.4)")
+ else()
+ message(STATUS "host compiler - gcc ${CMAKE_CXX_COMPILER_VERSION}")
+ endif()
+elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
+ # Require at least clang 3.9
+ if (${CMAKE_CXX_COMPILER_VERSION} VERSION_LESS 3.9)
+ message(FATAL_ERROR
+ "host compiler - Not found! (clang version must be at least 3.9)")
+ else()
+ message(STATUS "host compiler - clang ${CMAKE_CXX_COMPILER_VERSION}")
+ endif()
+else()
+ message(WARNING
+ "host compiler - Not found! (triSYCL supports GCC and Clang)")
+endif()
+
+#triSYCL options
+option(TRISYCL_OPENMP "triSYCL multi-threading with OpenMP" ON)
+option(TRISYCL_OPENCL "triSYCL OpenCL interoperability mode" OFF)
+option(TRISYCL_NO_ASYNC "triSYCL use synchronous kernel execution" OFF)
+option(TRISYCL_DEBUG "triSCYL use debug mode" OFF)
+option(TRISYCL_DEBUG_STRUCTORS "triSYCL trace of object lifetimes" OFF)
+option(TRISYCL_TRACE_KERNEL "triSYCL trace of kernel execution" OFF)
+
+mark_as_advanced(TRISYCL_OPENMP)
+mark_as_advanced(TRISYCL_OPENCL)
+mark_as_advanced(TRISYCL_NO_ASYNC)
+mark_as_advanced(TRISYCL_DEBUG)
+mark_as_advanced(TRISYCL_DEBUG_STRUCTORS)
+mark_as_advanced(TRISYCL_TRACE_KERNEL)
+
+#triSYCL definitions
+set(CL_SYCL_LANGUAGE_VERSION 220 CACHE VERSION
+ "Host language version to be used by trisYCL (default is: 220)")
+set(TRISYCL_CL_LANGUAGE_VERSION 220 CACHE VERSION
+ "Device language version to be used by trisYCL (default is: 220)")
+#set(TRISYCL_COMPILE_OPTIONS "-std=c++1z -Wall -Wextra")
+set(CMAKE_CXX_STANDARD 14)
+set(CXX_STANDARD_REQUIRED ON)
+
+
+# Find OpenCL package
+if(TRISYCL_OPENCL)
+ find_package(OpenCL REQUIRED)
+ if(UNIX)
+ set(BOOST_COMPUTE_INCPATH /usr/include/compute CACHE PATH
+ "Path to Boost.Compute headers (default is: /usr/include/compute)")
+ endif(UNIX)
+endif()
+
+# Find OpenMP package
+if(TRISYCL_OPENMP)
+ find_package(OpenMP REQUIRED)
+endif()
+
+# Find Boost
+find_package(Boost 1.58 REQUIRED COMPONENTS chrono log)
+
+# If debug or trace we need boost log
+if(TRISYCL_DEBUG OR TRISYCL_DEBUG_STRUCTORS OR TRISYCL_TRACE_KERNEL)
+ set(LOG_NEEDED ON)
+else()
+ set(LOG_NEEDED OFF)
+endif()
+
+find_package(Threads REQUIRED)
+
+# Find triSYCL directory
+if(NOT TRISYCL_INCLUDE_DIR)
+ message(FATAL_ERROR
+ "triSYCL include directory - Not found! (please set TRISYCL_INCLUDE_DIR")
+else()
+ message(STATUS "triSYCL include directory - Found ${TRISYCL_INCLUDE_DIR}")
+endif()
+
+#######################
+# add_sycl_to_target
+#######################
+#
+# Sets the proper flags and includes for the target compilation.
+#
+# targetName : Name of the target to add a SYCL to.
+# sourceFile : Source file to be compiled for SYCL.
+# binaryDir : Intermediate directory to output the integration header.
+#
+function(add_sycl_to_target targetName sourceFile binaryDir)
+
+ # Add include directories to the "#include <>" paths
+ target_include_directories (${targetName} PUBLIC
+ ${TRISYCL_INCLUDE_DIR}
+ ${Boost_INCLUDE_DIRS}
+ $<$<BOOL:${TRISYCL_OPENCL}>:${OpenCL_INCLUDE_DIRS}>
+ $<$<BOOL:${TRISYCL_OPENCL}>:${BOOST_COMPUTE_INCPATH}>)
+
+
+ # Link dependencies
+ target_link_libraries(${targetName} PUBLIC
+ $<$<BOOL:${TRISYCL_OPENCL}>:${OpenCL_LIBRARIES}>
+ Threads::Threads
+ $<$<BOOL:${LOG_NEEDED}>:Boost::log>
+ Boost::chrono)
+
+
+ # Compile definitions
+ target_compile_definitions(${targetName} PUBLIC
+ $<$<BOOL:${TRISYCL_NO_ASYNC}>:TRISYCL_NO_ASYNC>
+ $<$<BOOL:${TRISYCL_OPENCL}>:TRISYCL_OPENCL>
+ $<$<BOOL:${TRISYCL_DEBUG}>:TRISYCL_DEBUG>
+ $<$<BOOL:${TRISYCL_DEBUG_STRUCTORS}>:TRISYCL_DEBUG_STRUCTORS>
+ $<$<BOOL:${TRISYCL_TRACE_KERNEL}>:TRISYCL_TRACE_KERNEL>
+ $<$<BOOL:${LOG_NEEDED}>:BOOST_LOG_DYN_LINK>)
+
+ # C++ and OpenMP requirements
+ target_compile_options(${targetName} PUBLIC
+ ${TRISYCL_COMPILE_OPTIONS}
+ $<$<BOOL:${TRISYCL_OPENMP}>:${OpenMP_CXX_FLAGS}>)
+
+ if(${TRISYCL_OPENMP} AND (NOT WIN32))
+ # Does not support generator expressions
+ set_target_properties(${targetName}
+ PROPERTIES
+ LINK_FLAGS ${OpenMP_CXX_FLAGS})
+ endif(${TRISYCL_OPENMP} AND (NOT WIN32))
+
+endfunction(add_sycl_to_target)
diff --git a/cmake/language_support.cmake b/cmake/language_support.cmake
index 2f14f30b8..ddba50945 100644
--- a/cmake/language_support.cmake
+++ b/cmake/language_support.cmake
@@ -26,7 +26,7 @@ function(workaround_9220 language language_works)
cmake_minimum_required(VERSION 2.8.0)
set (CMAKE_Fortran_FLAGS \"${CMAKE_Fortran_FLAGS}\")
set (CMAKE_EXE_LINKER_FLAGS \"${CMAKE_EXE_LINKER_FLAGS}\")
- enable_language(${language} OPTIONAL)
+ enable_language(${language})
")
file(REMOVE_RECURSE ${CMAKE_BINARY_DIR}/language_tests/${language})
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/language_tests/${language})
diff --git a/debug/msvc/eigen_autoexp_part.dat b/debug/msvc/eigen_autoexp_part.dat
index 07aa43739..35ef5807c 100644
--- a/debug/msvc/eigen_autoexp_part.dat
+++ b/debug/msvc/eigen_autoexp_part.dat
@@ -14,7 +14,7 @@
; * - Eigen::Matrix<*,-1,+,*,*,*>
; * - Eigen::Matrix<*,+,+,*,*,*>
; *
-; * Matrices are displayed properly independantly of the memory
+; * Matrices are displayed properly independently of the memory
; * alignment (RowMajor vs. ColMajor).
; *
; * This file is distributed WITHOUT ANY WARRANTY. Please ensure
diff --git a/doc/AsciiQuickReference.txt b/doc/AsciiQuickReference.txt
index 8409f8850..0ca54cef3 100644
--- a/doc/AsciiQuickReference.txt
+++ b/doc/AsciiQuickReference.txt
@@ -140,7 +140,7 @@ R.array().abs() // abs(P)
R.cwiseAbs2() // abs(P.^2)
R.array().abs2() // abs(P.^2)
(R.array() < s).select(P,Q ); // (R < s ? P : Q)
-R = (Q.array()==0).select(P,A) // R(Q==0) = P(Q==0)
+R = (Q.array()==0).select(P,R) // R(Q==0) = P(Q==0)
R = P.unaryExpr(ptr_fun(func)) // R = arrayfun(func, P) // with: scalar func(const scalar &x);
diff --git a/doc/Doxyfile.in b/doc/Doxyfile.in
index 2109978fe..49b9fba39 100644
--- a/doc/Doxyfile.in
+++ b/doc/Doxyfile.in
@@ -1764,7 +1764,7 @@ UML_LOOK = YES
# the class node. If there are many fields or methods and many nodes the
# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS
# threshold limits the number of items for each type to make the size more
-# managable. Set this to 0 for no limit. Note that the threshold may be
+# manageable. Set this to 0 for no limit. Note that the threshold may be
# exceeded by 50% before the limit is enforced.
UML_LIMIT_NUM_FIELDS = 10
diff --git a/doc/FunctionsTakingEigenTypes.dox b/doc/FunctionsTakingEigenTypes.dox
index 152dda47d..6b4e49214 100644
--- a/doc/FunctionsTakingEigenTypes.dox
+++ b/doc/FunctionsTakingEigenTypes.dox
@@ -79,7 +79,7 @@ These examples are just intended to give the reader a first impression of how fu
\section TopicUsingRefClass How to write generic, but non-templated function?
-In all the previous examples, the functions had to be template functions. This approach allows to write very generic code, but it is often desirable to write non templated function and still keep some level of genericity to avoid stupid copies of the arguments. The typical example is to write functions accepting both a MatrixXf or a block of a MatrixXf. This exactly the purpose of the Ref class. Here is a simple example:
+In all the previous examples, the functions had to be template functions. This approach allows to write very generic code, but it is often desirable to write non templated functions and still keep some level of genericity to avoid stupid copies of the arguments. The typical example is to write functions accepting both a MatrixXf or a block of a MatrixXf. This is exactly the purpose of the Ref class. Here is a simple example:
<table class="example">
<tr><th>Example:</th><th>Output:</th></tr>
@@ -133,7 +133,7 @@ In this special case, the example is fine and will be working because both param
\section TopicPlainFunctionsFailing In which cases do functions taking a plain Matrix or Array argument fail?
-Here, we consider a slightly modified version of the function given above. This time, we do not want to return the result but pass an additional non-const paramter which allows us to store the result. A first naive implementation might look as follows.
+Here, we consider a slightly modified version of the function given above. This time, we do not want to return the result but pass an additional non-const parameter which allows us to store the result. A first naive implementation might look as follows.
\code
// Note: This code is flawed!
void cov(const MatrixXf& x, const MatrixXf& y, MatrixXf& C)
@@ -176,7 +176,7 @@ The implementation above does now not only work with temporary expressions but i
\section TopicResizingInGenericImplementations How to resize matrices in generic implementations?
-One might think we are done now, right? This is not completely true because in order for our covariance function to be generically applicable, we want the follwing code to work
+One might think we are done now, right? This is not completely true because in order for our covariance function to be generically applicable, we want the following code to work
\code
MatrixXf x = MatrixXf::Random(100,3);
MatrixXf y = MatrixXf::Random(100,3);
diff --git a/doc/LeastSquares.dox b/doc/LeastSquares.dox
index e2191a22f..24dfe4b4f 100644
--- a/doc/LeastSquares.dox
+++ b/doc/LeastSquares.dox
@@ -16,7 +16,7 @@ equations is the fastest but least accurate, and the QR decomposition is in betw
\section LeastSquaresSVD Using the SVD decomposition
-The \link JacobiSVD::solve() solve() \endlink method in the JacobiSVD class can be directly used to
+The \link BDCSVD::solve() solve() \endlink method in the BDCSVD class can be directly used to
solve linear squares systems. It is not enough to compute only the singular values (the default for
this class); you also need the singular vectors but the thin SVD decomposition suffices for
computing least squares solutions:
diff --git a/doc/Pitfalls.dox b/doc/Pitfalls.dox
index cf42effef..3f395053d 100644
--- a/doc/Pitfalls.dox
+++ b/doc/Pitfalls.dox
@@ -2,10 +2,16 @@ namespace Eigen {
/** \page TopicPitfalls Common pitfalls
+
\section TopicPitfalls_template_keyword Compilation error with template methods
See this \link TopicTemplateKeyword page \endlink.
+\section TopicPitfalls_aliasing Aliasing
+
+Don't miss this \link TopicAliasing page \endlink on aliasing,
+especially if you got wrong results in statements where the destination appears on the right hand side of the expression.
+
\section TopicPitfalls_auto_keyword C++11 and the auto keyword
In short: do not use the auto keywords with Eigen's expressions, unless you are 100% sure about what you are doing. In particular, do not use the auto keyword as a replacement for a Matrix<> type. Here is an example:
diff --git a/doc/PreprocessorDirectives.dox b/doc/PreprocessorDirectives.dox
index f01b39aec..d801798c9 100644
--- a/doc/PreprocessorDirectives.dox
+++ b/doc/PreprocessorDirectives.dox
@@ -51,7 +51,7 @@ are doing.
\section TopicPreprocessorDirectivesCppVersion C++ standard features
-By default, %Eigen strive to automatically detect and enable langage features at compile-time based on
+By default, %Eigen strive to automatically detect and enable language features at compile-time based on
the information provided by the compiler.
- \b EIGEN_MAX_CPP_VER - disables usage of C++ features requiring a version greater than EIGEN_MAX_CPP_VER.
@@ -66,7 +66,7 @@ functions by defining EIGEN_HAS_C99_MATH=1.
Automatic detection disabled if EIGEN_MAX_CPP_VER<11.
- \b EIGEN_HAS_CXX11_MATH - controls the implementation of some functions such as round, logp1, isinf, isnan, etc.
Automatic detection disabled if EIGEN_MAX_CPP_VER<11.
- - \b EIGEN_HAS_RVALUE_REFERENCES - defines whetehr rvalue references are supported
+ - \b EIGEN_HAS_RVALUE_REFERENCES - defines whether rvalue references are supported
Automatic detection disabled if EIGEN_MAX_CPP_VER<11.
- \b EIGEN_HAS_STD_RESULT_OF - defines whether std::result_of is supported
Automatic detection disabled if EIGEN_MAX_CPP_VER<11.
@@ -120,6 +120,12 @@ run time. However, these assertions do cost time and can thus be turned off.
- \b \c EIGEN_STACK_ALLOCATION_LIMIT - defines the maximum bytes for a buffer to be allocated on the stack. For internal
temporary buffers, dynamic memory allocation is employed as a fall back. For fixed-size matrices or arrays, exceeding
this threshold raises a compile time assertion. Use 0 to set no limit. Default is 128 KB.
+ - \b \c EIGEN_NO_CUDA - disables CUDA support when defined. Might be useful in .cu files for which Eigen is used on the host only,
+ and never called from device code.
+ - \b \c EIGEN_STRONG_INLINE - This macro is used to qualify critical functions and methods that we expect the compiler to inline.
+ By default it is defined to \c __forceinline for MSVC and ICC, and to \c inline for other compilers. A tipical usage is to
+ define it to \c inline for MSVC users wanting faster compilation times, at the risk of performance degradations in some rare
+ cases for which MSVC inliner fails to do a good job.
- \c EIGEN_DONT_ALIGN - Deprecated, it is a synonym for \c EIGEN_MAX_ALIGN_BYTES=0. It disables alignment completely. %Eigen will not try to align its objects and does not expect that any objects passed to it are aligned. This will turn off vectorization if \b EIGEN_UNALIGNED_VECTORIZE=1. Not defined by default.
diff --git a/doc/QuickReference.dox b/doc/QuickReference.dox
index 44f5410db..18c90a2a9 100644
--- a/doc/QuickReference.dox
+++ b/doc/QuickReference.dox
@@ -68,7 +68,7 @@ Array<float,4,1> <=> Array4f
Conversion between the matrix and array worlds:
\code
-Array44f a1, a1;
+Array44f a1, a2;
Matrix4f m1, m2;
m1 = a1 * a2; // coeffwise product, implicit conversion from array to matrix.
a1 = m1 * m2; // matrix product, implicit conversion from matrix to array.
@@ -261,6 +261,8 @@ x.setIdentity();
Vector3f::UnitX() // 1 0 0
Vector3f::UnitY() // 0 1 0
Vector3f::UnitZ() // 0 0 1
+Vector4f::Unit(i)
+x.setUnit(i);
\endcode
</td>
<td>
@@ -278,6 +280,7 @@ N/A
VectorXf::Unit(size,i)
+x.setUnit(size,i);
VectorXf::Unit(4,1) == Vector4f(0,1,0,0)
== Vector4f::UnitY()
\endcode
@@ -285,7 +288,12 @@ VectorXf::Unit(4,1) == Vector4f(0,1,0,0)
</tr>
</table>
-
+Note that it is allowed to call any of the \c set* functions to a dynamic-sized vector or matrix without passing new sizes.
+For instance:
+\code
+MatrixXi M(3,3);
+M.setIdentity();
+\endcode
\subsection QuickRef_Map Mapping external arrays
diff --git a/doc/QuickStartGuide.dox b/doc/QuickStartGuide.dox
index ea32c3b3d..23bb2981b 100644
--- a/doc/QuickStartGuide.dox
+++ b/doc/QuickStartGuide.dox
@@ -68,7 +68,7 @@ The output is as follows:
The second example starts by declaring a 3-by-3 matrix \c m which is initialized using the \link DenseBase::Random(Index,Index) Random() \endlink method with random values between -1 and 1. The next line applies a linear mapping such that the values are between 10 and 110. The function call \link DenseBase::Constant(Index,Index,const Scalar&) MatrixXd::Constant\endlink(3,3,1.2) returns a 3-by-3 matrix expression having all coefficients equal to 1.2. The rest is standard arithmetics.
-The next line of the \c main function introduces a new type: \c VectorXd. This represents a (column) vector of arbitrary size. Here, the vector \c v is created to contain \c 3 coefficients which are left unitialized. The one but last line uses the so-called comma-initializer, explained in \ref TutorialAdvancedInitialization, to set all coefficients of the vector \c v to be as follows:
+The next line of the \c main function introduces a new type: \c VectorXd. This represents a (column) vector of arbitrary size. Here, the vector \c v is created to contain \c 3 coefficients which are left uninitialized. The one but last line uses the so-called comma-initializer, explained in \ref TutorialAdvancedInitialization, to set all coefficients of the vector \c v to be as follows:
\f[
v =
diff --git a/doc/SparseLinearSystems.dox b/doc/SparseLinearSystems.dox
index fc33b93e7..38754e4af 100644
--- a/doc/SparseLinearSystems.dox
+++ b/doc/SparseLinearSystems.dox
@@ -70,6 +70,9 @@ They are summarized in the following tables:
<tr><td>UmfPackLU</td><td>\link UmfPackSupport_Module UmfPackSupport \endlink</td><td>Direct LU factorization</td><td>Square</td><td>Fill-in reducing, Leverage fast dense algebra</td>
<td>Requires the <a href="http://www.suitesparse.com">SuiteSparse</a> package, \b GPL </td>
<td></td></tr>
+<tr><td>KLU</td><td>\link KLUSupport_Module KLUSupport \endlink</td><td>Direct LU factorization</td><td>Square</td><td>Fill-in reducing, suitted for circuit simulation</td>
+ <td>Requires the <a href="http://www.suitesparse.com">SuiteSparse</a> package, \b GPL </td>
+ <td></td></tr>
<tr><td>SuperLU</td><td>\link SuperLUSupport_Module SuperLUSupport \endlink</td><td>Direct LU factorization</td><td>Square</td><td>Fill-in reducing, Leverage fast dense algebra</td>
<td>Requires the <a href="http://crd-legacy.lbl.gov/~xiaoye/SuperLU/">SuperLU</a> library, (BSD-like)</td>
<td></td></tr>
diff --git a/doc/SparseQuickReference.dox b/doc/SparseQuickReference.dox
index a25622e80..81a73eec2 100644
--- a/doc/SparseQuickReference.dox
+++ b/doc/SparseQuickReference.dox
@@ -80,7 +80,7 @@ sm1.setZero();
\section SparseBasicInfos Matrix properties
-Beyond the basic functions rows() and cols(), there are some useful functions that are available to easily get some informations from the matrix.
+Beyond the basic functions rows() and cols(), there are some useful functions that are available to easily get some information from the matrix.
<table class="manual">
<tr>
<td> \code
diff --git a/doc/TemplateKeyword.dox b/doc/TemplateKeyword.dox
index b84cfdae9..fbf2c7081 100644
--- a/doc/TemplateKeyword.dox
+++ b/doc/TemplateKeyword.dox
@@ -76,7 +76,7 @@ point where the template is defined, without knowing the actual value of the tem
and \c Derived2 in the example). That means that the compiler cannot know that <tt>dst.triangularView</tt> is
a member template and that the following &lt; symbol is part of the delimiter for the template
parameter. Another possibility would be that <tt>dst.triangularView</tt> is a member variable with the &lt;
-symbol refering to the <tt>operator&lt;()</tt> function. In fact, the compiler should choose the second
+symbol referring to the <tt>operator&lt;()</tt> function. In fact, the compiler should choose the second
possibility, according to the standard. If <tt>dst.triangularView</tt> is a member template (as in our case),
the programmer should specify this explicitly with the \c template keyword and write <tt>dst.template
triangularView</tt>.
diff --git a/doc/TopicLazyEvaluation.dox b/doc/TopicLazyEvaluation.dox
index 101ef8c72..b7820e3e6 100644
--- a/doc/TopicLazyEvaluation.dox
+++ b/doc/TopicLazyEvaluation.dox
@@ -58,7 +58,7 @@ the product <tt>matrix3 * matrix4</tt> gets evaluated immediately into a tempora
\code matrix1 = matrix2 * (matrix3 + matrix4); \endcode
-Here, provided the matrices have at least 2 rows and 2 columns, each coefficienct of the expression <tt>matrix3 + matrix4</tt> is going to be used several times in the matrix product. Instead of computing the sum everytime, it is much better to compute it once and store it in a temporary variable. Eigen understands this and evaluates <tt>matrix3 + matrix4</tt> into a temporary variable before evaluating the product.
+Here, provided the matrices have at least 2 rows and 2 columns, each coefficienct of the expression <tt>matrix3 + matrix4</tt> is going to be used several times in the matrix product. Instead of computing the sum every time, it is much better to compute it once and store it in a temporary variable. Eigen understands this and evaluates <tt>matrix3 + matrix4</tt> into a temporary variable before evaluating the product.
*/
diff --git a/doc/TopicLinearAlgebraDecompositions.dox b/doc/TopicLinearAlgebraDecompositions.dox
index 491470627..0965da872 100644
--- a/doc/TopicLinearAlgebraDecompositions.dox
+++ b/doc/TopicLinearAlgebraDecompositions.dox
@@ -4,7 +4,7 @@ namespace Eigen {
This page presents a catalogue of the dense matrix decompositions offered by Eigen.
For an introduction on linear solvers and decompositions, check this \link TutorialLinearAlgebra page \endlink.
-To get an overview of the true relative speed of the different decomposition, check this \link DenseDecompositionBenchmark benchmark \endlink.
+To get an overview of the true relative speed of the different decompositions, check this \link DenseDecompositionBenchmark benchmark \endlink.
\section TopicLinAlgBigTable Catalogue of decompositions offered by Eigen
@@ -114,6 +114,18 @@ To get an overview of the true relative speed of the different decomposition, ch
<tr><th class="inter" colspan="9">\n Singular values and eigenvalues decompositions</th></tr>
<tr>
+ <td>BDCSVD (divide \& conquer)</td>
+ <td>-</td>
+ <td>One of the fastest SVD algorithms</td>
+ <td>Excellent</td>
+ <td>Yes</td>
+ <td>Singular values/vectors, least squares</td>
+ <td>Yes (and does least squares)</td>
+ <td>Excellent</td>
+ <td>Blocked bidiagonalization</td>
+ </tr>
+
+ <tr>
<td>JacobiSVD (two-sided)</td>
<td>-</td>
<td>Slow (but fast for small matrices)</td>
@@ -248,7 +260,7 @@ To get an overview of the true relative speed of the different decomposition, ch
<dt><b>Blocking</b></dt>
<dd>Means the algorithm can work per block, whence guaranteeing a good scaling of the performance for large matrices.</dd>
<dt><b>Implicit Multi Threading (MT)</b></dt>
- <dd>Means the algorithm can take advantage of multicore processors via OpenMP. "Implicit" means the algortihm itself is not parallelized, but that it relies on parallelized matrix-matrix product rountines.</dd>
+ <dd>Means the algorithm can take advantage of multicore processors via OpenMP. "Implicit" means the algortihm itself is not parallelized, but that it relies on parallelized matrix-matrix product routines.</dd>
<dt><b>Explicit Multi Threading (MT)</b></dt>
<dd>Means the algorithm is explicitly parallelized to take advantage of multicore processors via OpenMP.</dd>
<dt><b>Meta-unroller</b></dt>
diff --git a/doc/TopicMultithreading.dox b/doc/TopicMultithreading.dox
index 47c9b261f..bc394f484 100644
--- a/doc/TopicMultithreading.dox
+++ b/doc/TopicMultithreading.dox
@@ -47,7 +47,7 @@ int main(int argc, char** argv)
\warning note that all functions generating random matrices are \b not re-entrant nor thread-safe. Those include DenseBase::Random(), and DenseBase::setRandom() despite a call to Eigen::initParallel(). This is because these functions are based on std::rand which is not re-entrant. For thread-safe random generator, we recommend the use of boost::random or c++11 random feature.
-In the case your application is parallelized with OpenMP, you might want to disable Eigen's own parallization as detailed in the previous section.
+In the case your application is parallelized with OpenMP, you might want to disable Eigen's own parallelization as detailed in the previous section.
*/
diff --git a/doc/TutorialGeometry.dox b/doc/TutorialGeometry.dox
index 2e1420f98..86e9f1e72 100644
--- a/doc/TutorialGeometry.dox
+++ b/doc/TutorialGeometry.dox
@@ -111,7 +111,7 @@ rot3 = rot1.slerp(alpha,rot2);\endcode</td></tr>
<a href="#" class="top">top</a>\section TutorialGeoTransform Affine transformations
-Generic affine transformations are represented by the Transform class which internaly
+Generic affine transformations are represented by the Transform class which internally
is a (Dim+1)^2 matrix. In Eigen we have chosen to not distinghish between points and
vectors such that all points are actually represented by displacement vectors from the
origin ( \f$ \mathbf{p} \equiv \mathbf{p}-0 \f$ ). With that in mind, real points and
diff --git a/doc/TutorialLinearAlgebra.dox b/doc/TutorialLinearAlgebra.dox
index cb92ceeae..a72724143 100644
--- a/doc/TutorialLinearAlgebra.dox
+++ b/doc/TutorialLinearAlgebra.dox
@@ -73,7 +73,7 @@ depending on your matrix and the trade-off you want to make:
<td>ColPivHouseholderQR</td>
<td>colPivHouseholderQr()</td>
<td>None</td>
- <td>++</td>
+ <td>+</td>
<td>-</td>
<td>+++</td>
</tr>
@@ -86,6 +86,14 @@ depending on your matrix and the trade-off you want to make:
<td>+++</td>
</tr>
<tr class="alt">
+ <td>CompleteOrthogonalDecomposition</td>
+ <td>completeOrthogonalDecomposition()</td>
+ <td>None</td>
+ <td>+</td>
+ <td>-</td>
+ <td>+++</td>
+ </tr>
+ <tr class="alt">
<td>LLT</td>
<td>llt()</td>
<td>Positive definite</td>
@@ -102,14 +110,23 @@ depending on your matrix and the trade-off you want to make:
<td>++</td>
</tr>
<tr class="alt">
+ <td>BDCSVD</td>
+ <td>bdcSvd()</td>
+ <td>None</td>
+ <td>-</td>
+ <td>-</td>
+ <td>+++</td>
+ </tr>
+ <tr class="alt">
<td>JacobiSVD</td>
<td>jacobiSvd()</td>
<td>None</td>
- <td>- -</td>
+ <td>-</td>
<td>- - -</td>
<td>+++</td>
</tr>
</table>
+To get an overview of the true relative speed of the different decompositions, check this \link DenseDecompositionBenchmark benchmark \endlink.
All of these decompositions offer a solve() method that works as in the above example.
@@ -183,8 +200,11 @@ Here is an example:
\section TutorialLinAlgLeastsquares Least squares solving
-The most accurate method to do least squares solving is with a SVD decomposition. Eigen provides one
-as the JacobiSVD class, and its solve() is doing least-squares solving.
+The most accurate method to do least squares solving is with a SVD decomposition.
+Eigen provides two implementations.
+The recommended one is the BDCSVD class, which scale well for large problems
+and automatically fall-back to the JacobiSVD class for smaller problems.
+For both classes, their solve() method is doing least-squares solving.
Here is an example:
<table class="example">
diff --git a/doc/TutorialMapClass.dox b/doc/TutorialMapClass.dox
index f8fb0fd2f..caa2539d8 100644
--- a/doc/TutorialMapClass.dox
+++ b/doc/TutorialMapClass.dox
@@ -29,9 +29,9 @@ Map<const Vector4i> mi(pi);
\endcode
where \c pi is an \c int \c *. In this case the size does not have to be passed to the constructor, because it is already specified by the Matrix/Array type.
-Note that Map does not have a default constructor; you \em must pass a pointer to intialize the object. However, you can work around this requirement (see \ref TutorialMapPlacementNew).
+Note that Map does not have a default constructor; you \em must pass a pointer to initialize the object. However, you can work around this requirement (see \ref TutorialMapPlacementNew).
-Map is flexible enough to accomodate a variety of different data representations. There are two other (optional) template parameters:
+Map is flexible enough to accommodate a variety of different data representations. There are two other (optional) template parameters:
\code
Map<typename MatrixType,
int MapOptions,
diff --git a/doc/TutorialSparse.dox b/doc/TutorialSparse.dox
index 352907408..350ea1139 100644
--- a/doc/TutorialSparse.dox
+++ b/doc/TutorialSparse.dox
@@ -57,7 +57,7 @@ The \c "_" indicates available free space to quickly insert new elements.
Assuming no reallocation is needed, the insertion of a random element is therefore in O(nnz_j) where nnz_j is the number of nonzeros of the respective inner vector.
On the other hand, inserting elements with increasing inner indices in a given inner vector is much more efficient since this only requires to increase the respective \c InnerNNZs entry that is a O(1) operation.
-The case where no empty space is available is a special case, and is refered as the \em compressed mode.
+The case where no empty space is available is a special case, and is referred as the \em compressed mode.
It corresponds to the widely used Compressed Column (or Row) Storage schemes (CCS or CRS).
Any SparseMatrix can be turned to this form by calling the SparseMatrix::makeCompressed() function.
In this case, one can remark that the \c InnerNNZs array is redundant with \c OuterStarts because we the equality: \c InnerNNZs[j] = \c OuterStarts[j+1]-\c OuterStarts[j].
@@ -212,7 +212,7 @@ See the SparseMatrix::setFromTriplets() function and class Triplet for more deta
In some cases, however, slightly higher performance, and lower memory consumption can be reached by directly inserting the non-zeros into the destination matrix.
-A typical scenario of this approach is illustrated bellow:
+A typical scenario of this approach is illustrated below:
\code
1: SparseMatrix<double> mat(rows,cols); // default is column major
2: mat.reserve(VectorXi::Constant(cols,6));
diff --git a/doc/UnalignedArrayAssert.dox b/doc/UnalignedArrayAssert.dox
index 0f7022973..8676faa1b 100644
--- a/doc/UnalignedArrayAssert.dox
+++ b/doc/UnalignedArrayAssert.dox
@@ -117,8 +117,8 @@ It doesn't disable 16-byte alignment, because that would mean that vectorized an
\section checkmycode How can I check my code is safe regarding alignment issues?
-Unfortunately, there is no possibility in C++ to detect any of the aformentioned shortcoming at compile time (though static analysers are becoming more and more powerful and could detect some of them).
-Even at runtime, all we can do is to catch invalid unaligned allocation and trigger the explicit assertion mentioned at the begining of this page.
+Unfortunately, there is no possibility in C++ to detect any of the aforementioned shortcoming at compile time (though static analysers are becoming more and more powerful and could detect some of them).
+Even at runtime, all we can do is to catch invalid unaligned allocation and trigger the explicit assertion mentioned at the beginning of this page.
Therefore, if your program runs fine on a given system with some given compilation flags, then this does not guarantee that your code is safe. For instance, on most 64 bits systems buffer are aligned on 16 bytes boundary and so, if you do not enable AVX instruction set, then your code will run fine. On the other hand, the same code may assert if moving to a more exotic platform, or enabling AVX instructions that required 32 bytes alignment by default.
The situation is not hopeless though. Assuming your code is well covered by unit test, then you can check its alignment safety by linking it to a custom malloc library returning 8 bytes aligned buffers only. This way all alignment shortcomings should pop-up. To this end, you must also compile your program with \link TopicPreprocessorDirectivesPerformance EIGEN_MALLOC_ALREADY_ALIGNED=0 \endlink.
diff --git a/doc/UsingIntelMKL.dox b/doc/UsingIntelMKL.dox
index a1a3a18f2..fc35c3cf0 100644
--- a/doc/UsingIntelMKL.dox
+++ b/doc/UsingIntelMKL.dox
@@ -63,6 +63,12 @@ In addition you can choose which parts will be substituted by defining one or mu
<tr><td>\c EIGEN_USE_MKL_ALL </td><td>Defines \c EIGEN_USE_BLAS, \c EIGEN_USE_LAPACKE, and \c EIGEN_USE_MKL_VML </td></tr>
</table>
+The \c EIGEN_USE_BLAS and \c EIGEN_USE_LAPACKE* macros can be combined with \c EIGEN_USE_MKL to explicitly tell Eigen that the underlying BLAS/Lapack implementation is Intel MKL.
+The main effect is to enable MKL direct call feature (\c MKL_DIRECT_CALL).
+This may help to increase performance of some MKL BLAS (?GEMM, ?GEMV, ?TRSM, ?AXPY and ?DOT) and LAPACK (LU, Cholesky and QR) routines for very small matrices.
+MKL direct call can be disabled by defining \c EIGEN_MKL_NO_DIRECT_CALL.
+
+
Note that the BLAS and LAPACKE backends can be enabled for any F77 compatible BLAS and LAPACK libraries. See this \link TopicUsingBlasLapack page \endlink for the details.
Finally, the PARDISO sparse solver shipped with Intel MKL can be used through the \ref PardisoLU, \ref PardisoLLT and \ref PardisoLDLT classes of the \ref PardisoSupport_Module.
diff --git a/doc/UsingNVCC.dox b/doc/UsingNVCC.dox
index f8e755b79..36beb2ddd 100644
--- a/doc/UsingNVCC.dox
+++ b/doc/UsingNVCC.dox
@@ -3,18 +3,16 @@ namespace Eigen {
/** \page TopicCUDA Using Eigen in CUDA kernels
-\b Disclaimer: this page is about an \b experimental feature in %Eigen.
-
-Staring from CUDA 5.0, the CUDA compiler, \c nvcc, is able to properly parse %Eigen's code (almost).
-A few adaptations of the %Eigen's code already allows to use some parts of %Eigen in your own CUDA kernels.
-To this end you need the devel branch of %Eigen, CUDA 5.0 or greater with GCC.
+Staring from CUDA 5.5 and Eigen 3.3, it is possible to use Eigen's matrices, vectors, and arrays for fixed size within CUDA kernels. This is especially useful when working on numerous but small problems. By default, when Eigen's headers are included within a .cu file compiled by nvcc most Eigen's functions and methods are prefixed by the \c __device__ \c __host__ keywords making them callable from both host and device code.
+This support can be disabled by defining \c EIGEN_NO_CUDA before including any Eigen's header.
+This might be useful to disable some warnings when a .cu file makes use of Eigen on the host side only.
+However, in both cases, host's SIMD vectorization has to be disabled in .cu files.
+It is thus \b strongly \b recommended to properly move all costly host computation from your .cu files to regular .cpp files.
Known issues:
- \c nvcc with MS Visual Studio does not work (patch welcome)
- - \c nvcc with \c clang does not work (patch welcome)
-
- \c nvcc 5.5 with gcc-4.7 (or greater) has issues with the standard \c \<limits\> header file. To workaround this, you can add the following before including any other files:
\code
// workaround issue between gcc >= 4.7 and cuda 5.5
diff --git a/doc/eigen_navtree_hacks.js b/doc/eigen_navtree_hacks.js
index bd7e02b38..39c59f73c 100644
--- a/doc/eigen_navtree_hacks.js
+++ b/doc/eigen_navtree_hacks.js
@@ -65,6 +65,10 @@ function getNode(o, po)
function resizeHeight()
{
var toc = $("#nav-toc");
+ var header = $("#header");
+ var content = $("#doc-content");
+ var navtree = $("#nav-path");
+ var sidenav = $("#side-nav");
var tocHeight = toc.height(); // <- we added this line
var headerHeight = header.height();
var footerHeight = footer.height();
diff --git a/doc/eigendoxy.css b/doc/eigendoxy.css
index 6ce2b839b..427b128ba 100644
--- a/doc/eigendoxy.css
+++ b/doc/eigendoxy.css
@@ -93,7 +93,7 @@ table th.inter {
border-color: #cccccc;
}
-/** class for exemple / output tables **/
+/** class for example / output tables **/
table.example {
}
@@ -219,3 +219,8 @@ h3.version {
td.width20em p.endtd {
width: 20em;
}
+
+/* needed for huge screens */
+.ui-resizable-e {
+ background-repeat: repeat-y;
+} \ No newline at end of file
diff --git a/doc/eigendoxy_footer.html.in b/doc/eigendoxy_footer.html.in
index 878244a19..9ac0596cb 100644
--- a/doc/eigendoxy_footer.html.in
+++ b/doc/eigendoxy_footer.html.in
@@ -5,14 +5,14 @@
$navpath
<li class="footer">$generatedby
<a href="http://www.doxygen.org/index.html">
- <img class="footer" src="$relpath$doxygen.png" alt="doxygen"/></a> $doxygenversion </li>
+ <img class="footer" src="$relpath^doxygen.png" alt="doxygen"/></a> $doxygenversion </li>
</ul>
</div>
<!--END GENERATE_TREEVIEW-->
<!--BEGIN !GENERATE_TREEVIEW-->
<hr class="footer"/><address class="footer"><small>
$generatedby &#160;<a href="http://www.doxygen.org/index.html">
-<img class="footer" src="$relpath$doxygen.png" alt="doxygen"/>
+<img class="footer" src="$relpath^doxygen.png" alt="doxygen"/>
</a> $doxygenversion
</small></address>
<!--END !GENERATE_TREEVIEW-->
diff --git a/doc/eigendoxy_header.html.in b/doc/eigendoxy_header.html.in
index 0f3859f40..bb149f8f0 100644
--- a/doc/eigendoxy_header.html.in
+++ b/doc/eigendoxy_header.html.in
@@ -4,25 +4,23 @@
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen $doxygenversion"/>
+<meta name="viewport" content="width=device-width, initial-scale=1"/>
<!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME-->
<!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME-->
-<link href="$relpath$tabs.css" rel="stylesheet" type="text/css"/>
-<script type="text/javascript" src="$relpath$jquery.js"></script>
-<script type="text/javascript" src="$relpath$dynsections.js"></script>
+<link href="$relpath^tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="$relpath^jquery.js"></script>
+<script type="text/javascript" src="$relpath^dynsections.js"></script>
$treeview
$search
$mathjax
-<link href="$relpath$$stylesheet" rel="stylesheet" type="text/css" />
+<link href="$relpath^$stylesheet" rel="stylesheet" type="text/css" />
<link href="$relpath$eigendoxy.css" rel="stylesheet" type="text/css">
<!-- $extrastylesheet -->
<script type="text/javascript" src="$relpath$eigen_navtree_hacks.js"></script>
-<!-- <script type="text/javascript"> -->
-<!-- </script> -->
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
-<!-- <a name="top"></a> -->
<!--BEGIN TITLEAREA-->
<div id="titlearea">
@@ -30,10 +28,10 @@ $mathjax
<tbody>
<tr style="height: 56px;">
<!--BEGIN PROJECT_LOGO-->
- <td id="projectlogo"><img alt="Logo" src="$relpath$$projectlogo"/></td>
+ <td id="projectlogo"><img alt="Logo" src="$relpath^$projectlogo"/></td>
<!--END PROJECT_LOGO-->
<!--BEGIN PROJECT_NAME-->
- <td style="padding-left: 0.5em;">
+ <td id="projectalign" style="padding-left: 0.5em;">
<div id="projectname"><a href="http://eigen.tuxfamily.org">$projectname</a>
<!--BEGIN PROJECT_NUMBER-->&#160;<span id="projectnumber">$projectnumber</span><!--END PROJECT_NUMBER-->
</div>
@@ -42,7 +40,7 @@ $mathjax
<!--END PROJECT_NAME-->
<!--BEGIN !PROJECT_NAME-->
<!--BEGIN PROJECT_BRIEF-->
- <td style="padding-left: 0.5em;">
+ <td id="projectalign" style="padding-left: 0.5em;">
<div id="projectbrief">$projectbrief</div>
</td>
<!--END PROJECT_BRIEF-->
diff --git a/doc/examples/Cwise_lgamma.cpp b/doc/examples/Cwise_lgamma.cpp
index f1c4f503e..6bfaccbce 100644
--- a/doc/examples/Cwise_lgamma.cpp
+++ b/doc/examples/Cwise_lgamma.cpp
@@ -6,4 +6,4 @@ int main()
{
Array4d v(0.5,10,0,-1);
std::cout << v.lgamma() << std::endl;
-} \ No newline at end of file
+}
diff --git a/doc/examples/TutorialLinAlgSVDSolve.cpp b/doc/examples/TutorialLinAlgSVDSolve.cpp
index 9fbc031de..f109f04e5 100644
--- a/doc/examples/TutorialLinAlgSVDSolve.cpp
+++ b/doc/examples/TutorialLinAlgSVDSolve.cpp
@@ -11,5 +11,5 @@ int main()
VectorXf b = VectorXf::Random(3);
cout << "Here is the right hand side b:\n" << b << endl;
cout << "The least-squares solution is:\n"
- << A.jacobiSvd(ComputeThinU | ComputeThinV).solve(b) << endl;
+ << A.bdcSvd(ComputeThinU | ComputeThinV).solve(b) << endl;
}
diff --git a/doc/examples/Tutorial_simple_example_dynamic_size.cpp b/doc/examples/Tutorial_simple_example_dynamic_size.cpp
index 0f0280e0e..defcb1ee4 100644
--- a/doc/examples/Tutorial_simple_example_dynamic_size.cpp
+++ b/doc/examples/Tutorial_simple_example_dynamic_size.cpp
@@ -10,7 +10,7 @@ int main()
MatrixXi m(size,size+1); // a (size)x(size+1)-matrix of int's
for (int j=0; j<m.cols(); ++j) // loop over columns
for (int i=0; i<m.rows(); ++i) // loop over rows
- m(i,j) = i+j*m.rows(); // to access matrix coefficients,
+ m(i,j) = i+j*size; // to access matrix coefficients,
// use operator()(int,int)
std::cout << m << "\n\n";
}
diff --git a/doc/examples/matrixfree_cg.cpp b/doc/examples/matrixfree_cg.cpp
index 6a205aea3..74699381c 100644
--- a/doc/examples/matrixfree_cg.cpp
+++ b/doc/examples/matrixfree_cg.cpp
@@ -67,6 +67,7 @@ namespace internal {
// This method should implement "dst += alpha * lhs * rhs" inplace,
// however, for iterative solvers, alpha is always equal to 1, so let's not bother about it.
assert(alpha==Scalar(1) && "scaling is not implemented");
+ EIGEN_ONLY_USED_FOR_DEBUG(alpha);
// Here we could simply call dst.noalias() += lhs.my_matrix() * rhs,
// but let's do something fancier (and less efficient):
diff --git a/doc/examples/nullary_indexing.cpp b/doc/examples/nullary_indexing.cpp
index e27c3585a..ca1745628 100644
--- a/doc/examples/nullary_indexing.cpp
+++ b/doc/examples/nullary_indexing.cpp
@@ -30,7 +30,7 @@ public:
// [function]
template <class ArgType, class RowIndexType, class ColIndexType>
CwiseNullaryOp<indexing_functor<ArgType,RowIndexType,ColIndexType>, typename indexing_functor<ArgType,RowIndexType,ColIndexType>::MatrixType>
-indexing(const Eigen::MatrixBase<ArgType>& arg, const RowIndexType& row_indices, const ColIndexType& col_indices)
+mat_indexing(const Eigen::MatrixBase<ArgType>& arg, const RowIndexType& row_indices, const ColIndexType& col_indices)
{
typedef indexing_functor<ArgType,RowIndexType,ColIndexType> Func;
typedef typename Func::MatrixType MatrixType;
@@ -45,7 +45,7 @@ int main()
Eigen::MatrixXi A = Eigen::MatrixXi::Random(4,4);
Array3i ri(1,2,1);
ArrayXi ci(6); ci << 3,2,1,0,0,2;
- Eigen::MatrixXi B = indexing(A, ri, ci);
+ Eigen::MatrixXi B = mat_indexing(A, ri, ci);
std::cout << "A =" << std::endl;
std::cout << A << std::endl << std::endl;
std::cout << "A([" << ri.transpose() << "], [" << ci.transpose() << "]) =" << std::endl;
@@ -53,11 +53,11 @@ int main()
std::cout << "[main1]\n";
std::cout << "[main2]\n";
- B = indexing(A, ri+1, ci);
+ B = mat_indexing(A, ri+1, ci);
std::cout << "A(ri+1,ci) =" << std::endl;
std::cout << B << std::endl << std::endl;
#if __cplusplus >= 201103L
- B = indexing(A, ArrayXi::LinSpaced(13,0,12).unaryExpr([](int x){return x%4;}), ArrayXi::LinSpaced(4,0,3));
+ B = mat_indexing(A, ArrayXi::LinSpaced(13,0,12).unaryExpr([](int x){return x%4;}), ArrayXi::LinSpaced(4,0,3));
std::cout << "A(ArrayXi::LinSpaced(13,0,12).unaryExpr([](int x){return x%4;}), ArrayXi::LinSpaced(4,0,3)) =" << std::endl;
std::cout << B << std::endl << std::endl;
#endif
diff --git a/doc/snippets/DirectionWise_hnormalized.cpp b/doc/snippets/DirectionWise_hnormalized.cpp
index 3410790a8..2451f6e7b 100644
--- a/doc/snippets/DirectionWise_hnormalized.cpp
+++ b/doc/snippets/DirectionWise_hnormalized.cpp
@@ -1,7 +1,6 @@
-typedef Matrix<double,4,Dynamic> Matrix4Xd;
Matrix4Xd M = Matrix4Xd::Random(4,5);
Projective3d P(Matrix4d::Random());
cout << "The matrix M is:" << endl << M << endl << endl;
cout << "M.colwise().hnormalized():" << endl << M.colwise().hnormalized() << endl << endl;
cout << "P*M:" << endl << P*M << endl << endl;
-cout << "(P*M).colwise().hnormalized():" << endl << (P*M).colwise().hnormalized() << endl << endl; \ No newline at end of file
+cout << "(P*M).colwise().hnormalized():" << endl << (P*M).colwise().hnormalized() << endl << endl;
diff --git a/doc/snippets/MatrixBase_cwiseEqual.cpp b/doc/snippets/MatrixBase_cwiseEqual.cpp
index eb3656f4c..469af642c 100644
--- a/doc/snippets/MatrixBase_cwiseEqual.cpp
+++ b/doc/snippets/MatrixBase_cwiseEqual.cpp
@@ -3,5 +3,5 @@ m << 1, 0,
1, 1;
cout << "Comparing m with identity matrix:" << endl;
cout << m.cwiseEqual(MatrixXi::Identity(2,2)) << endl;
-int count = m.cwiseEqual(MatrixXi::Identity(2,2)).count();
+Index count = m.cwiseEqual(MatrixXi::Identity(2,2)).count();
cout << "Number of coefficients that are equal: " << count << endl;
diff --git a/doc/snippets/MatrixBase_cwiseNotEqual.cpp b/doc/snippets/MatrixBase_cwiseNotEqual.cpp
index 6a2e4fb6c..7f0a105d6 100644
--- a/doc/snippets/MatrixBase_cwiseNotEqual.cpp
+++ b/doc/snippets/MatrixBase_cwiseNotEqual.cpp
@@ -3,5 +3,5 @@ m << 1, 0,
1, 1;
cout << "Comparing m with identity matrix:" << endl;
cout << m.cwiseNotEqual(MatrixXi::Identity(2,2)) << endl;
-int count = m.cwiseNotEqual(MatrixXi::Identity(2,2)).count();
+Index count = m.cwiseNotEqual(MatrixXi::Identity(2,2)).count();
cout << "Number of coefficients that are not equal: " << count << endl;
diff --git a/doc/snippets/MatrixBase_reshaped_all.cpp b/doc/snippets/MatrixBase_reshaped_all.cpp
index 501f6276f..a4841834f 100644
--- a/doc/snippets/MatrixBase_reshaped_all.cpp
+++ b/doc/snippets/MatrixBase_reshaped_all.cpp
@@ -1,4 +1,3 @@
-using Eigen::placeholders::all;
Matrix4i m = Matrix4i::Random();
cout << "Here is the matrix m:" << endl << m << endl;
cout << "Here is m(all).transpose():" << endl << m(all).transpose() << endl;
diff --git a/doc/snippets/Matrix_Map_stride.cpp b/doc/snippets/Matrix_Map_stride.cpp
new file mode 100644
index 000000000..ae42a127a
--- /dev/null
+++ b/doc/snippets/Matrix_Map_stride.cpp
@@ -0,0 +1,7 @@
+Matrix4i A;
+A << 1, 2, 3, 4,
+ 5, 6, 7, 8,
+ 9, 10, 11, 12,
+ 13, 14, 15, 16;
+
+std::cout << Matrix2i::Map(&A(1,1),Stride<8,2>()) << std::endl;
diff --git a/doc/snippets/VectorwiseOp_homogeneous.cpp b/doc/snippets/VectorwiseOp_homogeneous.cpp
index aba4fed0e..67cf5737d 100644
--- a/doc/snippets/VectorwiseOp_homogeneous.cpp
+++ b/doc/snippets/VectorwiseOp_homogeneous.cpp
@@ -1,7 +1,6 @@
-typedef Matrix<double,3,Dynamic> Matrix3Xd;
Matrix3Xd M = Matrix3Xd::Random(3,5);
Projective3d P(Matrix4d::Random());
cout << "The matrix M is:" << endl << M << endl << endl;
cout << "M.colwise().homogeneous():" << endl << M.colwise().homogeneous() << endl << endl;
cout << "P * M.colwise().homogeneous():" << endl << P * M.colwise().homogeneous() << endl << endl;
-cout << "P * M.colwise().homogeneous().hnormalized(): " << endl << (P * M.colwise().homogeneous()).colwise().hnormalized() << endl << endl; \ No newline at end of file
+cout << "P * M.colwise().homogeneous().hnormalized(): " << endl << (P * M.colwise().homogeneous()).colwise().hnormalized() << endl << endl;
diff --git a/doc/special_examples/Tutorial_sparse_example.cpp b/doc/special_examples/Tutorial_sparse_example.cpp
index 830e196ea..8850db052 100644
--- a/doc/special_examples/Tutorial_sparse_example.cpp
+++ b/doc/special_examples/Tutorial_sparse_example.cpp
@@ -1,5 +1,6 @@
#include <Eigen/Sparse>
#include <vector>
+#include <iostream>
typedef Eigen::SparseMatrix<double> SpMat; // declares a column-major sparse matrix type of double
typedef Eigen::Triplet<double> T;
@@ -9,10 +10,13 @@ void saveAsBitmap(const Eigen::VectorXd& x, int n, const char* filename);
int main(int argc, char** argv)
{
- assert(argc==2);
+ if(argc!=2) {
+ std::cerr << "Error: expected one and only one argument.\n";
+ return -1;
+ }
int n = 300; // size of the image
- int m = n*n; // number of unknows (=number of pixels)
+ int m = n*n; // number of unknowns (=number of pixels)
// Assembly:
std::vector<T> coefficients; // list of non-zeros coefficients
diff --git a/lapack/CMakeLists.txt b/lapack/CMakeLists.txt
index 9883d4c72..522ba8a2b 100644
--- a/lapack/CMakeLists.txt
+++ b/lapack/CMakeLists.txt
@@ -35,7 +35,7 @@ set(EigenLapack_SRCS ${EigenLapack_SRCS}
second_NONE.f dsecnd_NONE.f
)
-option(EIGEN_ENABLE_LAPACK_TESTS OFF "Enbale the Lapack unit tests")
+option(EIGEN_ENABLE_LAPACK_TESTS OFF "Enable the Lapack unit tests")
if(EIGEN_ENABLE_LAPACK_TESTS)
@@ -49,7 +49,7 @@ if(EIGEN_ENABLE_LAPACK_TESTS)
INACTIVITY_TIMEOUT 15
TIMEOUT 240
STATUS download_status
- EXPECTED_MD5 5758ce55afcf79da98de8b9de1615ad5
+ EXPECTED_MD5 ab5742640617e3221a873aba44bbdc93
SHOW_PROGRESS)
message(STATUS ${download_status})
@@ -59,7 +59,7 @@ if(EIGEN_ENABLE_LAPACK_TESTS)
message(STATUS "Setup lapack reference and lapack unit tests")
execute_process(COMMAND tar xzf "lapack_addons_3.4.1.tgz" WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
else()
- message(STATUS "Download of lapack_addons_3.4.1.tgz failed, LAPACK unit tests wont be enabled")
+ message(STATUS "Download of lapack_addons_3.4.1.tgz failed, LAPACK unit tests won't be enabled")
set(EIGEN_ENABLE_LAPACK_TESTS false)
endif()
diff --git a/scripts/buildtests.in b/scripts/buildtests.in
index 526d5b74b..ab9c18fb1 100755
--- a/scripts/buildtests.in
+++ b/scripts/buildtests.in
@@ -10,7 +10,7 @@ then
fi
TESTSLIST="@EIGEN_TESTS_LIST@"
-targets_to_make=`echo "$TESTSLIST" | egrep "$1" | xargs echo`
+targets_to_make=$(echo "$TESTSLIST" | grep -E "$1" | xargs echo)
if [ -n "${EIGEN_MAKE_ARGS:+x}" ]
then
diff --git a/scripts/eigen_gen_split_test_help.cmake b/scripts/eigen_gen_split_test_help.cmake
new file mode 100644
index 000000000..e43f5aabe
--- /dev/null
+++ b/scripts/eigen_gen_split_test_help.cmake
@@ -0,0 +1,11 @@
+#!cmake -P
+file(WRITE split_test_helper.h "")
+foreach(i RANGE 1 999)
+ file(APPEND split_test_helper.h
+ "#if defined(EIGEN_TEST_PART_${i}) || defined(EIGEN_TEST_PART_ALL)\n"
+ "#define CALL_SUBTEST_${i}(FUNC) CALL_SUBTEST(FUNC)\n"
+ "#else\n"
+ "#define CALL_SUBTEST_${i}(FUNC)\n"
+ "#endif\n\n"
+ )
+endforeach() \ No newline at end of file
diff --git a/scripts/eigen_monitor_perf.sh b/scripts/eigen_monitor_perf.sh
index 39f8e7ecd..8f3425daf 100755
--- a/scripts/eigen_monitor_perf.sh
+++ b/scripts/eigen_monitor_perf.sh
@@ -12,9 +12,9 @@ export CXX_FLAGS="-mfma -w"
####
BENCH_PATH=$EIGEN_SOURCE_PATH/bench/perf_monitoring/$PREFIX
-PREVPATH=`pwd`
-cd $EIGEN_SOURCE_PATH/bench/perf_monitoring && ./runall.sh "Haswell 2.6GHz, FMA, Apple's clang" $*
-cd $PREVPATH
+PREVPATH=$(pwd)
+cd $EIGEN_SOURCE_PATH/bench/perf_monitoring && ./runall.sh "Haswell 2.6GHz, FMA, Apple's clang" "$@"
+cd $PREVPATH || exit 1
ALLFILES="$BENCH_PATH/*.png $BENCH_PATH/*.html $BENCH_PATH/index.html $BENCH_PATH/s1.js $BENCH_PATH/s2.js"
diff --git a/test/AnnoyingScalar.h b/test/AnnoyingScalar.h
new file mode 100644
index 000000000..2b6544a6a
--- /dev/null
+++ b/test/AnnoyingScalar.h
@@ -0,0 +1,154 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011-2018 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_TEST_ANNOYING_SCALAR_H
+#define EIGEN_TEST_ANNOYING_SCALAR_H
+
+#include <ostream>
+
+#ifndef EIGEN_TEST_ANNOYING_SCALAR_DONT_THROW
+struct my_exception
+{
+ my_exception() {}
+ ~my_exception() {}
+};
+#endif
+
+// An AnnoyingScalar is a pseudo scalar type that:
+// - can randomly through an exception in operator +
+// - randomly allocate on the heap or initialize a reference to itself making it non trivially copyable, nor movable, nor relocatable.
+
+class AnnoyingScalar
+{
+ public:
+ AnnoyingScalar() { init(); *v = 0; }
+ AnnoyingScalar(long double _v) { init(); *v = _v; }
+ AnnoyingScalar(double _v) { init(); *v = _v; }
+ AnnoyingScalar(float _v) { init(); *v = _v; }
+ AnnoyingScalar(int _v) { init(); *v = _v; }
+ AnnoyingScalar(long _v) { init(); *v = _v; }
+ #if EIGEN_HAS_CXX11
+ AnnoyingScalar(long long _v) { init(); *v = _v; }
+ #endif
+ AnnoyingScalar(const AnnoyingScalar& other) { init(); *v = *(other.v); }
+ ~AnnoyingScalar() {
+ if(v!=&data)
+ delete v;
+ instances--;
+ }
+
+ void init() {
+ if(internal::random<bool>())
+ v = new float;
+ else
+ v = &data;
+ instances++;
+ }
+
+ AnnoyingScalar operator+(const AnnoyingScalar& other) const
+ {
+ #ifndef EIGEN_TEST_ANNOYING_SCALAR_DONT_THROW
+ countdown--;
+ if(countdown<=0 && !dont_throw)
+ throw my_exception();
+ #endif
+ return AnnoyingScalar(*v+*other.v);
+ }
+
+ AnnoyingScalar operator-() const
+ { return AnnoyingScalar(-*v); }
+
+ AnnoyingScalar operator-(const AnnoyingScalar& other) const
+ { return AnnoyingScalar(*v-*other.v); }
+
+ AnnoyingScalar operator*(const AnnoyingScalar& other) const
+ { return AnnoyingScalar((*v)*(*other.v)); }
+
+ AnnoyingScalar operator/(const AnnoyingScalar& other) const
+ { return AnnoyingScalar((*v)/(*other.v)); }
+
+ AnnoyingScalar& operator+=(const AnnoyingScalar& other) { *v += *other.v; return *this; }
+ AnnoyingScalar& operator-=(const AnnoyingScalar& other) { *v -= *other.v; return *this; }
+ AnnoyingScalar& operator*=(const AnnoyingScalar& other) { *v *= *other.v; return *this; }
+ AnnoyingScalar& operator/=(const AnnoyingScalar& other) { *v /= *other.v; return *this; }
+ AnnoyingScalar& operator= (const AnnoyingScalar& other) { *v = *other.v; return *this; }
+
+ bool operator==(const AnnoyingScalar& other) const { return *v == *other.v; }
+ bool operator!=(const AnnoyingScalar& other) const { return *v != *other.v; }
+ bool operator<=(const AnnoyingScalar& other) const { return *v <= *other.v; }
+ bool operator< (const AnnoyingScalar& other) const { return *v < *other.v; }
+ bool operator>=(const AnnoyingScalar& other) const { return *v >= *other.v; }
+ bool operator> (const AnnoyingScalar& other) const { return *v > *other.v; }
+
+ float* v;
+ float data;
+ static int instances;
+#ifndef EIGEN_TEST_ANNOYING_SCALAR_DONT_THROW
+ static int countdown;
+ static bool dont_throw;
+#endif
+};
+
+AnnoyingScalar real(const AnnoyingScalar &x) { return x; }
+AnnoyingScalar imag(const AnnoyingScalar & ) { return 0; }
+AnnoyingScalar conj(const AnnoyingScalar &x) { return x; }
+AnnoyingScalar sqrt(const AnnoyingScalar &x) { return std::sqrt(*x.v); }
+AnnoyingScalar abs (const AnnoyingScalar &x) { return std::abs(*x.v); }
+AnnoyingScalar cos (const AnnoyingScalar &x) { return std::cos(*x.v); }
+AnnoyingScalar sin (const AnnoyingScalar &x) { return std::sin(*x.v); }
+AnnoyingScalar acos(const AnnoyingScalar &x) { return std::acos(*x.v); }
+AnnoyingScalar atan2(const AnnoyingScalar &y,const AnnoyingScalar &x) { return std::atan2(*y.v,*x.v); }
+
+std::ostream& operator<<(std::ostream& stream,const AnnoyingScalar& x) {
+ stream << (*(x.v));
+ return stream;
+}
+
+int AnnoyingScalar::instances = 0;
+
+#ifndef EIGEN_TEST_ANNOYING_SCALAR_DONT_THROW
+int AnnoyingScalar::countdown = 0;
+bool AnnoyingScalar::dont_throw = false;
+#endif
+
+namespace Eigen {
+template<>
+struct NumTraits<AnnoyingScalar> : NumTraits<float>
+{
+ enum {
+ RequireInitialization = true
+ };
+ typedef AnnoyingScalar Real;
+ typedef AnnoyingScalar Nested;
+ typedef AnnoyingScalar Literal;
+ typedef AnnoyingScalar NonInteger;
+};
+
+template<> inline AnnoyingScalar test_precision<AnnoyingScalar>() { return test_precision<float>(); }
+
+namespace internal {
+ template<> double cast(const AnnoyingScalar& x) { return double(*x.v); }
+ template<> float cast(const AnnoyingScalar& x) { return *x.v; }
+}
+
+}
+
+AnnoyingScalar get_test_precision(const AnnoyingScalar&)
+{ return Eigen::test_precision<AnnoyingScalar>(); }
+
+AnnoyingScalar test_relative_error(const AnnoyingScalar &a, const AnnoyingScalar &b)
+{ return test_relative_error(*a.v, *b.v); }
+
+inline bool test_isApprox(const AnnoyingScalar &a, const AnnoyingScalar &b)
+{ return internal::isApprox(*a.v, *b.v, test_precision<float>()); }
+
+inline bool test_isMuchSmallerThan(const AnnoyingScalar &a, const AnnoyingScalar &b)
+{ return test_isMuchSmallerThan(*a.v, *b.v); }
+
+#endif // EIGEN_TEST_ANNOYING_SCALAR_H
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index ed5aed1c8..45e7abbd1 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -1,16 +1,7 @@
-# generate split test header file only if it does not yet exist
-# in order to prevent a rebuild everytime cmake is configured
-if(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h)
- file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h "")
- foreach(i RANGE 1 999)
- file(APPEND ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h
- "#ifdef EIGEN_TEST_PART_${i}\n"
- "#define CALL_SUBTEST_${i}(FUNC) CALL_SUBTEST(FUNC)\n"
- "#else\n"
- "#define CALL_SUBTEST_${i}(FUNC)\n"
- "#endif\n\n"
- )
- endforeach()
+# The file split_test_helper.h was generated at first run,
+# it is now included in test/
+if(EXISTS ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h)
+ file(REMOVE ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h)
endif()
# check if we have a Fortran compiler
@@ -27,9 +18,18 @@ endif()
if(NOT EIGEN_Fortran_COMPILER_WORKS)
# search for a default Lapack library to complete Eigen's one
- find_package(LAPACK)
+ find_package(LAPACK QUIET)
endif()
+# TODO do the same for EXTERNAL_LAPACK
+option(EIGEN_TEST_EXTERNAL_BLAS "Use external BLAS library for testsuite" OFF)
+if(EIGEN_TEST_EXTERNAL_BLAS)
+ find_package(BLAS REQUIRED)
+ message(STATUS "BLAS_COMPILER_FLAGS: ${BLAS_COMPILER_FLAGS}")
+ add_definitions("-DEIGEN_USE_BLAS") # is adding ${BLAS_COMPILER_FLAGS} necessary?
+ list(APPEND EXTERNAL_LIBS "${BLAS_LIBRARIES}")
+endif(EIGEN_TEST_EXTERNAL_BLAS)
+
# configure blas/lapack (use Eigen's ones)
set(EIGEN_BLAS_LIBRARIES eigen_blas)
set(EIGEN_LAPACK_LIBRARIES eigen_lapack)
@@ -68,6 +68,17 @@ else()
ei_add_property(EIGEN_MISSING_BACKENDS "UmfPack, ")
endif()
+find_package(KLU)
+if(KLU_FOUND)
+ add_definitions("-DEIGEN_KLU_SUPPORT")
+ include_directories(${KLU_INCLUDES})
+ set(SPARSE_LIBS ${SPARSE_LIBS} ${KLU_LIBRARIES} ${EIGEN_BLAS_LIBRARIES})
+ set(KLU_ALL_LIBS ${KLU_LIBRARIES} ${EIGEN_BLAS_LIBRARIES})
+ ei_add_property(EIGEN_TESTED_BACKENDS "KLU, ")
+else()
+ ei_add_property(EIGEN_MISSING_BACKENDS "KLU, ")
+endif()
+
find_package(SuperLU 4.0)
if(SUPERLU_FOUND)
add_definitions("-DEIGEN_SUPERLU_SUPPORT")
@@ -80,23 +91,30 @@ else()
endif()
-find_package(Pastix)
-find_package(Scotch)
-find_package(Metis 5.0 REQUIRED)
-if(PASTIX_FOUND)
+find_package(PASTIX QUIET COMPONENTS METIS SEQ)
+# check that the PASTIX found is a version without MPI
+find_path(PASTIX_pastix_nompi.h_INCLUDE_DIRS
+ NAMES pastix_nompi.h
+ HINTS ${PASTIX_INCLUDE_DIRS}
+)
+if (NOT PASTIX_pastix_nompi.h_INCLUDE_DIRS)
+ message(STATUS "A version of Pastix has been found but pastix_nompi.h does not exist in the include directory."
+ " Because Eigen tests require a version without MPI, we disable the Pastix backend.")
+endif()
+if(PASTIX_FOUND AND PASTIX_pastix_nompi.h_INCLUDE_DIRS)
add_definitions("-DEIGEN_PASTIX_SUPPORT")
- include_directories(${PASTIX_INCLUDES})
+ include_directories(${PASTIX_INCLUDE_DIRS_DEP})
if(SCOTCH_FOUND)
- include_directories(${SCOTCH_INCLUDES})
+ include_directories(${SCOTCH_INCLUDE_DIRS})
set(PASTIX_LIBRARIES ${PASTIX_LIBRARIES} ${SCOTCH_LIBRARIES})
elseif(METIS_FOUND)
- include_directories(${METIS_INCLUDES})
+ include_directories(${METIS_INCLUDE_DIRS})
set(PASTIX_LIBRARIES ${PASTIX_LIBRARIES} ${METIS_LIBRARIES})
else(SCOTCH_FOUND)
ei_add_property(EIGEN_MISSING_BACKENDS "PaStiX, ")
endif(SCOTCH_FOUND)
- set(SPARSE_LIBS ${SPARSE_LIBS} ${PASTIX_LIBRARIES} ${ORDERING_LIBRARIES} ${EIGEN_BLAS_LIBRARIES})
- set(PASTIX_ALL_LIBS ${PASTIX_LIBRARIES} ${EIGEN_BLAS_LIBRARIES})
+ set(SPARSE_LIBS ${SPARSE_LIBS} ${PASTIX_LIBRARIES_DEP} ${ORDERING_LIBRARIES})
+ set(PASTIX_ALL_LIBS ${PASTIX_LIBRARIES_DEP})
ei_add_property(EIGEN_TESTED_BACKENDS "PaStiX, ")
else()
ei_add_property(EIGEN_MISSING_BACKENDS "PaStiX, ")
@@ -104,7 +122,7 @@ endif()
if(METIS_FOUND)
add_definitions("-DEIGEN_METIS_SUPPORT")
- include_directories(${METIS_INCLUDES})
+ include_directories(${METIS_INCLUDE_DIRS})
ei_add_property(EIGEN_TESTED_BACKENDS "METIS, ")
else()
ei_add_property(EIGEN_MISSING_BACKENDS "METIS, ")
@@ -141,6 +159,7 @@ add_custom_target(BuildOfficial)
ei_add_test(rand)
ei_add_test(meta)
+ei_add_test(numext)
ei_add_test(sizeof)
ei_add_test(dynalloc)
ei_add_test(nomalloc)
@@ -180,7 +199,7 @@ ei_add_test(smallvectors)
ei_add_test(mapped_matrix)
ei_add_test(mapstride)
ei_add_test(mapstaticmethods)
-ei_add_test(array)
+ei_add_test(array_cwise)
ei_add_test(array_for_matrix)
ei_add_test(array_replicate)
ei_add_test(array_reverse)
@@ -265,6 +284,7 @@ ei_add_test(mpl2only)
ei_add_test(inplace_decomposition)
ei_add_test(half_float)
ei_add_test(array_of_string)
+ei_add_test(num_dimensions)
add_executable(bug1213 bug1213.cpp bug1213_main.cpp)
@@ -290,6 +310,10 @@ if(UMFPACK_FOUND)
ei_add_test(umfpack_support "" "${UMFPACK_ALL_LIBS}")
endif()
+if(KLU_FOUND OR SuiteSparse_FOUND)
+ ei_add_test(klu_support "" "${KLU_ALL_LIBS}")
+endif()
+
if(SUPERLU_FOUND)
ei_add_test(superlu_support "" "${SUPERLU_ALL_LIBS}")
endif()
@@ -364,10 +388,9 @@ if(CUDA_FOUND)
if(EIGEN_TEST_CUDA_CLANG)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 --cuda-gpu-arch=sm_30")
endif()
- cuda_include_directories(${CMAKE_CURRENT_BINARY_DIR})
set(EIGEN_ADD_TEST_FILENAME_EXTENSION "cu")
- ei_add_test(cuda_basic)
+ ei_add_test(gpu_basic)
unset(EIGEN_ADD_TEST_FILENAME_EXTENSION)
@@ -376,6 +399,47 @@ endif(CUDA_FOUND)
endif(EIGEN_TEST_CUDA)
+# HIP unit tests
+option(EIGEN_TEST_HIP "Add HIP support." OFF)
+if (EIGEN_TEST_HIP)
+
+ set(HIP_PATH "/opt/rocm/hip" CACHE STRING "Path to the HIP installation.")
+
+ if (EXISTS ${HIP_PATH})
+
+ list(APPEND CMAKE_MODULE_PATH ${HIP_PATH}/cmake)
+
+ find_package(HIP REQUIRED)
+ if (HIP_FOUND)
+
+ execute_process(COMMAND ${HIP_PATH}/bin/hipconfig --platform OUTPUT_VARIABLE HIP_PLATFORM)
+
+ if (${HIP_PLATFORM} STREQUAL "hcc")
+
+ include_directories(${HIP_PATH}/include)
+
+ set(EIGEN_ADD_TEST_FILENAME_EXTENSION "cu")
+ ei_add_test(gpu_basic)
+ unset(EIGEN_ADD_TEST_FILENAME_EXTENSION)
+
+ elseif (${HIP_PLATFORM} STREQUAL "nvcc")
+ message(FATAL_ERROR "HIP_PLATFORM = nvcc is not supported within Eigen")
+ else ()
+ message(FATAL_ERROR "Unknown HIP_PLATFORM = ${HIP_PLATFORM}")
+ endif()
+
+ endif(HIP_FOUND)
+
+ else ()
+
+ message(FATAL_ERROR "EIGEN_TEST_HIP is ON, but the specified HIP_PATH (${HIP_PATH}) does not exist")
+
+ endif()
+
+endif(EIGEN_TEST_HIP)
+
+
+
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/failtests)
add_test(NAME failtests WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/failtests COMMAND ${CMAKE_COMMAND} ${Eigen_SOURCE_DIR} -G "${CMAKE_GENERATOR}" -DEIGEN_FAILTEST=ON)
diff --git a/test/adjoint.cpp b/test/adjoint.cpp
index bdea51c10..4e1e4b5e8 100644
--- a/test/adjoint.cpp
+++ b/test/adjoint.cpp
@@ -70,7 +70,6 @@ template<typename MatrixType> void adjoint(const MatrixType& m)
Transpose.h Conjugate.h Dot.h
*/
using std::abs;
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
@@ -146,7 +145,35 @@ template<typename MatrixType> void adjoint(const MatrixType& m)
VERIFY_IS_APPROX(rv1.template cast<Scalar>().dot(v1), rv1.dot(v1));
}
-void test_adjoint()
+template<int>
+void adjoint_extra()
+{
+ MatrixXcf a(10,10), b(10,10);
+ VERIFY_RAISES_ASSERT(a = a.transpose());
+ VERIFY_RAISES_ASSERT(a = a.transpose() + b);
+ VERIFY_RAISES_ASSERT(a = b + a.transpose());
+ VERIFY_RAISES_ASSERT(a = a.conjugate().transpose());
+ VERIFY_RAISES_ASSERT(a = a.adjoint());
+ VERIFY_RAISES_ASSERT(a = a.adjoint() + b);
+ VERIFY_RAISES_ASSERT(a = b + a.adjoint());
+
+ // no assertion should be triggered for these cases:
+ a.transpose() = a.transpose();
+ a.transpose() += a.transpose();
+ a.transpose() += a.transpose() + b;
+ a.transpose() = a.adjoint();
+ a.transpose() += a.adjoint();
+ a.transpose() += a.adjoint() + b;
+
+ // regression tests for check_for_aliasing
+ MatrixXd c(10,10);
+ c = 1.0 * MatrixXd::Ones(10,10) + c;
+ c = MatrixXd::Ones(10,10) * 1.0 + c;
+ c = c + MatrixXd::Ones(10,10) .cwiseProduct( MatrixXd::Zero(10,10) );
+ c = MatrixXd::Ones(10,10) * MatrixXd::Zero(10,10);
+}
+
+EIGEN_DECLARE_TEST(adjoint)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( adjoint(Matrix<float, 1, 1>()) );
@@ -169,32 +196,6 @@ void test_adjoint()
// test a large static matrix only once
CALL_SUBTEST_7( adjoint(Matrix<float, 100, 100>()) );
-#ifdef EIGEN_TEST_PART_13
- {
- MatrixXcf a(10,10), b(10,10);
- VERIFY_RAISES_ASSERT(a = a.transpose());
- VERIFY_RAISES_ASSERT(a = a.transpose() + b);
- VERIFY_RAISES_ASSERT(a = b + a.transpose());
- VERIFY_RAISES_ASSERT(a = a.conjugate().transpose());
- VERIFY_RAISES_ASSERT(a = a.adjoint());
- VERIFY_RAISES_ASSERT(a = a.adjoint() + b);
- VERIFY_RAISES_ASSERT(a = b + a.adjoint());
-
- // no assertion should be triggered for these cases:
- a.transpose() = a.transpose();
- a.transpose() += a.transpose();
- a.transpose() += a.transpose() + b;
- a.transpose() = a.adjoint();
- a.transpose() += a.adjoint();
- a.transpose() += a.adjoint() + b;
-
- // regression tests for check_for_aliasing
- MatrixXd c(10,10);
- c = 1.0 * MatrixXd::Ones(10,10) + c;
- c = MatrixXd::Ones(10,10) * 1.0 + c;
- c = c + MatrixXd::Ones(10,10) .cwiseProduct( MatrixXd::Zero(10,10) );
- c = MatrixXd::Ones(10,10) * MatrixXd::Zero(10,10);
- }
-#endif
+ CALL_SUBTEST_13( adjoint_extra<0>() );
}
diff --git a/test/array.cpp b/test/array_cwise.cpp
index f7f3ba780..84e46665b 100644
--- a/test/array.cpp
+++ b/test/array_cwise.cpp
@@ -11,7 +11,6 @@
template<typename ArrayType> void array(const ArrayType& m)
{
- typedef typename ArrayType::Index Index;
typedef typename ArrayType::Scalar Scalar;
typedef typename ArrayType::RealScalar RealScalar;
typedef Array<Scalar, ArrayType::RowsAtCompileTime, 1> ColVectorType;
@@ -130,7 +129,6 @@ template<typename ArrayType> void array(const ArrayType& m)
template<typename ArrayType> void comparisons(const ArrayType& m)
{
using std::abs;
- typedef typename ArrayType::Index Index;
typedef typename ArrayType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
@@ -197,7 +195,7 @@ template<typename ArrayType> void comparisons(const ArrayType& m)
RealScalar a = m1.abs().mean();
VERIFY( (m1<-a || m1>a).count() == (m1.abs()>a).count());
- typedef Array<typename ArrayType::Index, Dynamic, 1> ArrayOfIndices;
+ typedef Array<Index, Dynamic, 1> ArrayOfIndices;
// TODO allows colwise/rowwise for array
VERIFY_IS_APPROX(((m1.abs()+1)>RealScalar(0.1)).colwise().count(), ArrayOfIndices::Constant(cols,rows).transpose());
@@ -208,7 +206,6 @@ template<typename ArrayType> void array_real(const ArrayType& m)
{
using std::abs;
using std::sqrt;
- typedef typename ArrayType::Index Index;
typedef typename ArrayType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
@@ -234,6 +231,7 @@ template<typename ArrayType> void array_real(const ArrayType& m)
VERIFY_IS_APPROX(m1.sinh(), sinh(m1));
VERIFY_IS_APPROX(m1.cosh(), cosh(m1));
VERIFY_IS_APPROX(m1.tanh(), tanh(m1));
+ VERIFY_IS_APPROX(m1.logistic(), logistic(m1));
VERIFY_IS_APPROX(m1.arg(), arg(m1));
VERIFY_IS_APPROX(m1.round(), round(m1));
@@ -269,6 +267,7 @@ template<typename ArrayType> void array_real(const ArrayType& m)
VERIFY_IS_APPROX(sinh(m1), 0.5*(exp(m1)-exp(-m1)));
VERIFY_IS_APPROX(cosh(m1), 0.5*(exp(m1)+exp(-m1)));
VERIFY_IS_APPROX(tanh(m1), (0.5*(exp(m1)-exp(-m1)))/(0.5*(exp(m1)+exp(-m1))));
+ VERIFY_IS_APPROX(logistic(m1), (1.0/(1.0+exp(-m1))));
VERIFY_IS_APPROX(arg(m1), ((m1<0).template cast<Scalar>())*std::acos(-1.0));
VERIFY((round(m1) <= ceil(m1) && round(m1) >= floor(m1)).all());
VERIFY((Eigen::isnan)((m1*0.0)/0.0).all());
@@ -322,7 +321,6 @@ template<typename ArrayType> void array_real(const ArrayType& m)
template<typename ArrayType> void array_complex(const ArrayType& m)
{
- typedef typename ArrayType::Index Index;
typedef typename ArrayType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
@@ -349,6 +347,7 @@ template<typename ArrayType> void array_complex(const ArrayType& m)
VERIFY_IS_APPROX(m1.sinh(), sinh(m1));
VERIFY_IS_APPROX(m1.cosh(), cosh(m1));
VERIFY_IS_APPROX(m1.tanh(), tanh(m1));
+ VERIFY_IS_APPROX(m1.logistic(), logistic(m1));
VERIFY_IS_APPROX(m1.arg(), arg(m1));
VERIFY((m1.isNaN() == (Eigen::isnan)(m1)).all());
VERIFY((m1.isInf() == (Eigen::isinf)(m1)).all());
@@ -372,6 +371,7 @@ template<typename ArrayType> void array_complex(const ArrayType& m)
VERIFY_IS_APPROX(sinh(m1), 0.5*(exp(m1)-exp(-m1)));
VERIFY_IS_APPROX(cosh(m1), 0.5*(exp(m1)+exp(-m1)));
VERIFY_IS_APPROX(tanh(m1), (0.5*(exp(m1)-exp(-m1)))/(0.5*(exp(m1)+exp(-m1))));
+ VERIFY_IS_APPROX(logistic(m1), (1.0/(1.0 + exp(-m1))));
for (Index i = 0; i < m.rows(); ++i)
for (Index j = 0; j < m.cols(); ++j)
@@ -427,7 +427,6 @@ template<typename ArrayType> void array_complex(const ArrayType& m)
template<typename ArrayType> void min_max(const ArrayType& m)
{
- typedef typename ArrayType::Index Index;
typedef typename ArrayType::Scalar Scalar;
Index rows = m.rows();
@@ -454,7 +453,7 @@ template<typename ArrayType> void min_max(const ArrayType& m)
}
-void test_array()
+EIGEN_DECLARE_TEST(array_cwise)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( array(Array<float, 1, 1>()) );
diff --git a/test/array_for_matrix.cpp b/test/array_for_matrix.cpp
index c1501947b..6b03abb10 100644
--- a/test/array_for_matrix.cpp
+++ b/test/array_for_matrix.cpp
@@ -11,7 +11,6 @@
template<typename MatrixType> void array_for_matrix(const MatrixType& m)
{
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> ColVectorType;
typedef Matrix<Scalar, 1, MatrixType::ColsAtCompileTime> RowVectorType;
@@ -83,7 +82,6 @@ template<typename MatrixType> void array_for_matrix(const MatrixType& m)
template<typename MatrixType> void comparisons(const MatrixType& m)
{
using std::abs;
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
@@ -140,7 +138,7 @@ template<typename MatrixType> void comparisons(const MatrixType& m)
RealScalar a = m1.cwiseAbs().mean();
VERIFY( ((m1.array()<-a).matrix() || (m1.array()>a).matrix()).count() == (m1.cwiseAbs().array()>a).count());
- typedef Matrix<typename MatrixType::Index, Dynamic, 1> VectorOfIndices;
+ typedef Matrix<Index, Dynamic, 1> VectorOfIndices;
// TODO allows colwise/rowwise for array
VERIFY_IS_APPROX(((m1.array().abs()+1)>RealScalar(0.1)).matrix().colwise().count(), VectorOfIndices::Constant(cols,rows).transpose());
@@ -172,7 +170,6 @@ template<typename VectorType> void lpNorm(const VectorType& v)
template<typename MatrixType> void cwise_min_max(const MatrixType& m)
{
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
Index rows = m.rows();
@@ -211,7 +208,6 @@ template<typename MatrixType> void cwise_min_max(const MatrixType& m)
template<typename MatrixTraits> void resize(const MatrixTraits& t)
{
- typedef typename MatrixTraits::Index Index;
typedef typename MatrixTraits::Scalar Scalar;
typedef Matrix<Scalar,Dynamic,Dynamic> MatrixType;
typedef Array<Scalar,Dynamic,Dynamic> Array2DType;
@@ -235,13 +231,32 @@ template<typename MatrixTraits> void resize(const MatrixTraits& t)
VERIFY(a1.size()==cols);
}
+template<int>
void regression_bug_654()
{
ArrayXf a = RowVectorXf(3);
VectorXf v = Array<float,1,Dynamic>(3);
}
-void test_array_for_matrix()
+// Check propagation of LvalueBit through Array/Matrix-Wrapper
+template<int>
+void regrrssion_bug_1410()
+{
+ const Matrix4i M;
+ const Array4i A;
+ ArrayWrapper<const Matrix4i> MA = M.array();
+ MA.row(0);
+ MatrixWrapper<const Array4i> AM = A.matrix();
+ AM.row(0);
+
+ VERIFY((internal::traits<ArrayWrapper<const Matrix4i> >::Flags&LvalueBit)==0);
+ VERIFY((internal::traits<MatrixWrapper<const Array4i> >::Flags&LvalueBit)==0);
+
+ VERIFY((internal::traits<ArrayWrapper<Matrix4i> >::Flags&LvalueBit)==LvalueBit);
+ VERIFY((internal::traits<MatrixWrapper<Array4i> >::Flags&LvalueBit)==LvalueBit);
+}
+
+EIGEN_DECLARE_TEST(array_for_matrix)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( array_for_matrix(Matrix<float, 1, 1>()) );
@@ -280,5 +295,6 @@ void test_array_for_matrix()
CALL_SUBTEST_5( resize(MatrixXf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
CALL_SUBTEST_6( resize(MatrixXi(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
}
- CALL_SUBTEST_6( regression_bug_654() );
+ CALL_SUBTEST_6( regression_bug_654<0>() );
+ CALL_SUBTEST_6( regrrssion_bug_1410<0>() );
}
diff --git a/test/array_of_string.cpp b/test/array_of_string.cpp
index e23b7c59e..23e51529b 100644
--- a/test/array_of_string.cpp
+++ b/test/array_of_string.cpp
@@ -9,7 +9,7 @@
#include "main.h"
-void test_array_of_string()
+EIGEN_DECLARE_TEST(array_of_string)
{
typedef Array<std::string,1,Dynamic> ArrayXs;
ArrayXs a1(3), a2(3), a3(3), a3ref(3);
diff --git a/test/array_replicate.cpp b/test/array_replicate.cpp
index 779c8fc2f..057c3c77b 100644
--- a/test/array_replicate.cpp
+++ b/test/array_replicate.cpp
@@ -14,7 +14,6 @@ template<typename MatrixType> void replicate(const MatrixType& m)
/* this test covers the following files:
Replicate.cpp
*/
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
typedef Matrix<Scalar, Dynamic, Dynamic> MatrixX;
@@ -69,7 +68,7 @@ template<typename MatrixType> void replicate(const MatrixType& m)
VERIFY_IS_APPROX(vx1, v1.colwise().replicate(f2));
}
-void test_array_replicate()
+EIGEN_DECLARE_TEST(array_replicate)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( replicate(Matrix<float, 1, 1>()) );
diff --git a/test/array_reverse.cpp b/test/array_reverse.cpp
index c9d9f90c3..e23159def 100644
--- a/test/array_reverse.cpp
+++ b/test/array_reverse.cpp
@@ -15,7 +15,6 @@ using namespace std;
template<typename MatrixType> void reverse(const MatrixType& m)
{
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
@@ -124,7 +123,16 @@ template<typename MatrixType> void reverse(const MatrixType& m)
VERIFY_IS_APPROX(x, m1(r, cols - 1 - c));
}
-void test_array_reverse()
+template<int>
+void array_reverse_extra()
+{
+ Vector4f x; x << 1, 2, 3, 4;
+ Vector4f y; y << 4, 3, 2, 1;
+ VERIFY(x.reverse()[1] == 3);
+ VERIFY(x.reverse() == y);
+}
+
+EIGEN_DECLARE_TEST(array_reverse)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( reverse(Matrix<float, 1, 1>()) );
@@ -137,10 +145,5 @@ void test_array_reverse()
CALL_SUBTEST_8( reverse(Matrix<float, 100, 100>()) );
CALL_SUBTEST_9( reverse(Matrix<float,Dynamic,Dynamic,RowMajor>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
}
-#ifdef EIGEN_TEST_PART_3
- Vector4f x; x << 1, 2, 3, 4;
- Vector4f y; y << 4, 3, 2, 1;
- VERIFY(x.reverse()[1] == 3);
- VERIFY(x.reverse() == y);
-#endif
+ CALL_SUBTEST_3( array_reverse_extra<0>() );
}
diff --git a/test/bandmatrix.cpp b/test/bandmatrix.cpp
index f8c38f7c3..66a1b0db4 100644
--- a/test/bandmatrix.cpp
+++ b/test/bandmatrix.cpp
@@ -59,7 +59,7 @@ template<typename MatrixType> void bandmatrix(const MatrixType& _m)
using Eigen::internal::BandMatrix;
-void test_bandmatrix()
+EIGEN_DECLARE_TEST(bandmatrix)
{
for(int i = 0; i < 10*g_repeat ; i++) {
Index rows = internal::random<Index>(1,10);
diff --git a/test/basicstuff.cpp b/test/basicstuff.cpp
index c346ce6cb..85af603d8 100644
--- a/test/basicstuff.cpp
+++ b/test/basicstuff.cpp
@@ -13,7 +13,6 @@
template<typename MatrixType> void basicStuff(const MatrixType& m)
{
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime> SquareMatrixType;
@@ -124,22 +123,22 @@ template<typename MatrixType> void basicStuff(const MatrixType& m)
// check automatic transposition
sm2.setZero();
- for(typename MatrixType::Index i=0;i<rows;++i)
+ for(Index i=0;i<rows;++i)
sm2.col(i) = sm1.row(i);
VERIFY_IS_APPROX(sm2,sm1.transpose());
sm2.setZero();
- for(typename MatrixType::Index i=0;i<rows;++i)
+ for(Index i=0;i<rows;++i)
sm2.col(i).noalias() = sm1.row(i);
VERIFY_IS_APPROX(sm2,sm1.transpose());
sm2.setZero();
- for(typename MatrixType::Index i=0;i<rows;++i)
+ for(Index i=0;i<rows;++i)
sm2.col(i).noalias() += sm1.row(i);
VERIFY_IS_APPROX(sm2,sm1.transpose());
sm2.setZero();
- for(typename MatrixType::Index i=0;i<rows;++i)
+ for(Index i=0;i<rows;++i)
sm2.col(i).noalias() -= sm1.row(i);
VERIFY_IS_APPROX(sm2,-sm1.transpose());
@@ -160,7 +159,6 @@ template<typename MatrixType> void basicStuff(const MatrixType& m)
template<typename MatrixType> void basicStuffComplex(const MatrixType& m)
{
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef Matrix<RealScalar, MatrixType::RowsAtCompileTime, MatrixType::ColsAtCompileTime> RealMatrixType;
@@ -196,7 +194,7 @@ template<typename MatrixType> void basicStuffComplex(const MatrixType& m)
VERIFY(!static_cast<const MatrixType&>(cm).imag().isZero());
}
-#ifdef EIGEN_TEST_PART_2
+template<int>
void casting()
{
Matrix4f m = Matrix4f::Random(), m2;
@@ -205,7 +203,6 @@ void casting()
m2 = m.cast<float>(); // check the specialization when NewType == Type
VERIFY(m.isApprox(m2));
}
-#endif
template <typename Scalar>
void fixedSizeMatrixConstruction()
@@ -270,7 +267,7 @@ void fixedSizeMatrixConstruction()
}
}
-void test_basicstuff()
+EIGEN_DECLARE_TEST(basicstuff)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( basicStuff(Matrix<float, 1, 1>()) );
@@ -292,5 +289,5 @@ void test_basicstuff()
CALL_SUBTEST_1(fixedSizeMatrixConstruction<long int>());
CALL_SUBTEST_1(fixedSizeMatrixConstruction<std::ptrdiff_t>());
- CALL_SUBTEST_2(casting());
+ CALL_SUBTEST_2(casting<0>());
}
diff --git a/test/bdcsvd.cpp b/test/bdcsvd.cpp
index f9f687aac..3065ff015 100644
--- a/test/bdcsvd.cpp
+++ b/test/bdcsvd.cpp
@@ -62,7 +62,7 @@ void compare_bdc_jacobi(const MatrixType& a = MatrixType(), unsigned int computa
if(computationOptions & ComputeThinV) VERIFY_IS_APPROX(bdc_svd.matrixV(), jacobi_svd.matrixV());
}
-void test_bdcsvd()
+EIGEN_DECLARE_TEST(bdcsvd)
{
CALL_SUBTEST_3(( svd_verify_assert<BDCSVD<Matrix3f> >(Matrix3f()) ));
CALL_SUBTEST_4(( svd_verify_assert<BDCSVD<Matrix4d> >(Matrix4d()) ));
@@ -104,7 +104,8 @@ void test_bdcsvd()
CALL_SUBTEST_7( BDCSVD<MatrixXf>(10,10) );
// Check that preallocation avoids subsequent mallocs
- CALL_SUBTEST_9( svd_preallocate<void>() );
+ // Disabled because not supported by BDCSVD
+ // CALL_SUBTEST_9( svd_preallocate<void>() );
CALL_SUBTEST_2( svd_underoverflow<void>() );
}
diff --git a/test/bicgstab.cpp b/test/bicgstab.cpp
index 4cc0dd31c..89d6a45ef 100644
--- a/test/bicgstab.cpp
+++ b/test/bicgstab.cpp
@@ -26,7 +26,7 @@ template<typename T, typename I> void test_bicgstab_T()
//CALL_SUBTEST( check_sparse_square_solving(bicgstab_colmajor_ssor) );
}
-void test_bicgstab()
+EIGEN_DECLARE_TEST(bicgstab)
{
CALL_SUBTEST_1((test_bicgstab_T<double,int>()) );
CALL_SUBTEST_2((test_bicgstab_T<std::complex<double>, int>()));
diff --git a/test/block.cpp b/test/block.cpp
index d61059874..ca13539a9 100644
--- a/test/block.cpp
+++ b/test/block.cpp
@@ -39,12 +39,11 @@ is_same_block(const T1& a, const T2& b)
template<typename MatrixType> void block(const MatrixType& m)
{
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
typedef Matrix<Scalar, 1, MatrixType::ColsAtCompileTime> RowVectorType;
- typedef Matrix<Scalar, Dynamic, Dynamic> DynamicMatrixType;
+ typedef Matrix<Scalar, Dynamic, Dynamic, MatrixType::IsRowMajor?RowMajor:ColMajor> DynamicMatrixType;
typedef Matrix<Scalar, Dynamic, 1> DynamicVectorType;
Index rows = m.rows();
@@ -142,7 +141,7 @@ template<typename MatrixType> void block(const MatrixType& m)
VERIFY(numext::real(ones.col(c1).dot(ones.col(c2))) == RealScalar(rows));
VERIFY(numext::real(ones.row(r1).dot(ones.row(r2))) == RealScalar(cols));
- // chekc that linear acccessors works on blocks
+ // check that linear acccessors works on blocks
m1 = m1_copy;
if((MatrixType::Flags&RowMajorBit)==0)
VERIFY_IS_EQUAL(m1.leftCols(c1).coeff(r1+c1*rows), m1(r1,c1));
@@ -162,9 +161,25 @@ template<typename MatrixType> void block(const MatrixType& m)
// expressions without direct access
VERIFY_IS_APPROX( ((m1+m2).block(r1,c1,rows-r1,cols-c1).block(r2-r1,c2-c1,rows-r2,cols-c2)) , ((m1+m2).block(r2,c2,rows-r2,cols-c2)) );
VERIFY_IS_APPROX( ((m1+m2).block(r1,c1,r2-r1+1,c2-c1+1).row(0)) , ((m1+m2).row(r1).segment(c1,c2-c1+1)) );
+ VERIFY_IS_APPROX( ((m1+m2).block(r1,c1,r2-r1+1,c2-c1+1).row(0)) , ((m1+m2).eval().row(r1).segment(c1,c2-c1+1)) );
VERIFY_IS_APPROX( ((m1+m2).block(r1,c1,r2-r1+1,c2-c1+1).col(0)) , ((m1+m2).col(c1).segment(r1,r2-r1+1)) );
VERIFY_IS_APPROX( ((m1+m2).block(r1,c1,r2-r1+1,c2-c1+1).transpose().col(0)) , ((m1+m2).row(r1).segment(c1,c2-c1+1)).transpose() );
VERIFY_IS_APPROX( ((m1+m2).transpose().block(c1,r1,c2-c1+1,r2-r1+1).col(0)) , ((m1+m2).row(r1).segment(c1,c2-c1+1)).transpose() );
+ VERIFY_IS_APPROX( ((m1+m2).template block<Dynamic,1>(r1,c1,r2-r1+1,1)) , ((m1+m2).eval().col(c1).eval().segment(r1,r2-r1+1)) );
+ VERIFY_IS_APPROX( ((m1+m2).template block<1,Dynamic>(r1,c1,1,c2-c1+1)) , ((m1+m2).eval().row(r1).eval().segment(c1,c2-c1+1)) );
+ VERIFY_IS_APPROX( ((m1+m2).transpose().template block<1,Dynamic>(c1,r1,1,r2-r1+1)) , ((m1+m2).eval().col(c1).eval().segment(r1,r2-r1+1)).transpose() );
+ VERIFY_IS_APPROX( (m1+m2).row(r1).eval(), (m1+m2).eval().row(r1) );
+ VERIFY_IS_APPROX( (m1+m2).adjoint().col(r1).eval(), (m1+m2).adjoint().eval().col(r1) );
+ VERIFY_IS_APPROX( (m1+m2).adjoint().row(c1).eval(), (m1+m2).adjoint().eval().row(c1) );
+ VERIFY_IS_APPROX( (m1*1).row(r1).segment(c1,c2-c1+1).eval(), m1.row(r1).eval().segment(c1,c2-c1+1).eval() );
+ VERIFY_IS_APPROX( m1.col(c1).reverse().segment(r1,r2-r1+1).eval(),m1.col(c1).reverse().eval().segment(r1,r2-r1+1).eval() );
+
+ VERIFY_IS_APPROX( (m1*1).topRows(r1), m1.topRows(r1) );
+ VERIFY_IS_APPROX( (m1*1).leftCols(c1), m1.leftCols(c1) );
+ VERIFY_IS_APPROX( (m1*1).transpose().topRows(c1), m1.transpose().topRows(c1) );
+ VERIFY_IS_APPROX( (m1*1).transpose().leftCols(r1), m1.transpose().leftCols(r1) );
+ VERIFY_IS_APPROX( (m1*1).transpose().middleRows(c1,c2-c1+1), m1.transpose().middleRows(c1,c2-c1+1) );
+ VERIFY_IS_APPROX( (m1*1).transpose().middleCols(r1,r2-r1+1), m1.transpose().middleCols(r1,r2-r1+1) );
// evaluation into plain matrices from expressions with direct access (stress MapBase)
DynamicMatrixType dm;
@@ -211,7 +226,6 @@ template<typename MatrixType> void block(const MatrixType& m)
template<typename MatrixType>
void compare_using_data_and_stride(const MatrixType& m)
{
- typedef typename MatrixType::Index Index;
Index rows = m.rows();
Index cols = m.cols();
Index size = m.size();
@@ -245,7 +259,6 @@ void compare_using_data_and_stride(const MatrixType& m)
template<typename MatrixType>
void data_and_stride(const MatrixType& m)
{
- typedef typename MatrixType::Index Index;
Index rows = m.rows();
Index cols = m.cols();
@@ -263,7 +276,7 @@ void data_and_stride(const MatrixType& m)
compare_using_data_and_stride(m1.col(c1).transpose());
}
-void test_block()
+EIGEN_DECLARE_TEST(block)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( block(Matrix<float, 1, 1>()) );
diff --git a/test/boostmultiprec.cpp b/test/boostmultiprec.cpp
index e06e9bdaf..579a6fd25 100644
--- a/test/boostmultiprec.cpp
+++ b/test/boostmultiprec.cpp
@@ -55,6 +55,10 @@
#include "bdcsvd.cpp"
#endif
+#ifdef EIGEN_TEST_PART_11
+#include "simplicial_cholesky.cpp"
+#endif
+
#include <Eigen/Dense>
#undef min
@@ -141,7 +145,7 @@ namespace Eigen {
}
-void test_boostmultiprec()
+EIGEN_DECLARE_TEST(boostmultiprec)
{
typedef Matrix<Real,Dynamic,Dynamic> Mat;
typedef Matrix<std::complex<Real>,Dynamic,Dynamic> MatC;
@@ -152,7 +156,7 @@ void test_boostmultiprec()
std::cout << "NumTraits<Real>::highest() = " << NumTraits<Real>::highest() << std::endl;
std::cout << "NumTraits<Real>::digits10() = " << NumTraits<Real>::digits10() << std::endl;
- // chekc stream output
+ // check stream output
{
Mat A(10,10);
A.setRandom();
@@ -197,5 +201,7 @@ void test_boostmultiprec()
CALL_SUBTEST_9(( jacobisvd(Mat(internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE), internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) ));
CALL_SUBTEST_10(( bdcsvd(Mat(internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE), internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) ));
+
+ CALL_SUBTEST_11(( test_simplicial_cholesky_T<Real,int>() ));
}
diff --git a/test/cholesky.cpp b/test/cholesky.cpp
index 8ad5ac639..b871351e0 100644
--- a/test/cholesky.cpp
+++ b/test/cholesky.cpp
@@ -57,7 +57,6 @@ template<typename MatrixType,template <typename,int> class CholType> void test_c
template<typename MatrixType> void cholesky(const MatrixType& m)
{
- typedef typename MatrixType::Index Index;
/* this test covers the following files:
LLT.h LDLT.h
*/
@@ -289,8 +288,6 @@ template<typename MatrixType> void cholesky_cplx(const MatrixType& m)
// test mixing real/scalar types
- typedef typename MatrixType::Index Index;
-
Index rows = m.rows();
Index cols = m.cols();
@@ -373,6 +370,7 @@ template<typename MatrixType> void cholesky_definiteness(const MatrixType& m)
VERIFY(ldlt.info()==Success);
VERIFY(!ldlt.isNegative());
VERIFY(!ldlt.isPositive());
+ VERIFY_IS_APPROX(mat,ldlt.reconstructedMatrix());
}
{
mat << 1, 2, 2, 1;
@@ -380,6 +378,7 @@ template<typename MatrixType> void cholesky_definiteness(const MatrixType& m)
VERIFY(ldlt.info()==Success);
VERIFY(!ldlt.isNegative());
VERIFY(!ldlt.isPositive());
+ VERIFY_IS_APPROX(mat,ldlt.reconstructedMatrix());
}
{
mat << 0, 0, 0, 0;
@@ -387,6 +386,7 @@ template<typename MatrixType> void cholesky_definiteness(const MatrixType& m)
VERIFY(ldlt.info()==Success);
VERIFY(ldlt.isNegative());
VERIFY(ldlt.isPositive());
+ VERIFY_IS_APPROX(mat,ldlt.reconstructedMatrix());
}
{
mat << 0, 0, 0, 1;
@@ -394,6 +394,7 @@ template<typename MatrixType> void cholesky_definiteness(const MatrixType& m)
VERIFY(ldlt.info()==Success);
VERIFY(!ldlt.isNegative());
VERIFY(ldlt.isPositive());
+ VERIFY_IS_APPROX(mat,ldlt.reconstructedMatrix());
}
{
mat << -1, 0, 0, 0;
@@ -401,6 +402,7 @@ template<typename MatrixType> void cholesky_definiteness(const MatrixType& m)
VERIFY(ldlt.info()==Success);
VERIFY(ldlt.isNegative());
VERIFY(!ldlt.isPositive());
+ VERIFY_IS_APPROX(mat,ldlt.reconstructedMatrix());
}
}
@@ -452,6 +454,18 @@ void cholesky_faillure_cases()
VERIFY(ldlt.info()==NumericalIssue);
VERIFY_IS_NOT_APPROX(mat,ldlt.reconstructedMatrix());
}
+
+ // bug 1479
+ {
+ mat.resize(4,4);
+ mat << 1, 2, 0, 1,
+ 2, 4, 0, 2,
+ 0, 0, 0, 1,
+ 1, 2, 1, 1;
+ ldlt.compute(mat);
+ VERIFY(ldlt.info()==NumericalIssue);
+ VERIFY_IS_NOT_APPROX(mat,ldlt.reconstructedMatrix());
+ }
}
template<typename MatrixType> void cholesky_verify_assert()
@@ -474,7 +488,7 @@ template<typename MatrixType> void cholesky_verify_assert()
VERIFY_RAISES_ASSERT(ldlt.solveInPlace(&tmp))
}
-void test_cholesky()
+EIGEN_DECLARE_TEST(cholesky)
{
int s = 0;
for(int i = 0; i < g_repeat; i++) {
diff --git a/test/cholmod_support.cpp b/test/cholmod_support.cpp
index 931207334..89b9cf41e 100644
--- a/test/cholmod_support.cpp
+++ b/test/cholmod_support.cpp
@@ -55,7 +55,7 @@ template<typename T, int flags, typename IdxType> void test_cholmod_T()
test_cholmod_ST<SparseMatrix<T, flags, IdxType> >();
}
-void test_cholmod_support()
+EIGEN_DECLARE_TEST(cholmod_support)
{
CALL_SUBTEST_11( (test_cholmod_T<double , ColMajor, int >()) );
CALL_SUBTEST_12( (test_cholmod_T<double , ColMajor, long>()) );
diff --git a/test/commainitializer.cpp b/test/commainitializer.cpp
index 9844adbd2..3cb94da62 100644
--- a/test/commainitializer.cpp
+++ b/test/commainitializer.cpp
@@ -65,7 +65,7 @@ struct test_block_recursion<-1>
static void run() { }
};
-void test_commainitializer()
+EIGEN_DECLARE_TEST(commainitializer)
{
Matrix3d m3;
Matrix4d m4;
diff --git a/test/conjugate_gradient.cpp b/test/conjugate_gradient.cpp
index 9622fd86d..47a4ca707 100644
--- a/test/conjugate_gradient.cpp
+++ b/test/conjugate_gradient.cpp
@@ -26,7 +26,7 @@ template<typename T, typename I> void test_conjugate_gradient_T()
CALL_SUBTEST( check_sparse_spd_solving(cg_colmajor_upper_I) );
}
-void test_conjugate_gradient()
+EIGEN_DECLARE_TEST(conjugate_gradient)
{
CALL_SUBTEST_1(( test_conjugate_gradient_T<double,int>() ));
CALL_SUBTEST_2(( test_conjugate_gradient_T<std::complex<double>, int>() ));
diff --git a/test/conservative_resize.cpp b/test/conservative_resize.cpp
index 498421b4c..5dc500068 100644
--- a/test/conservative_resize.cpp
+++ b/test/conservative_resize.cpp
@@ -10,6 +10,7 @@
#include "main.h"
#include <Eigen/Core>
+#include "AnnoyingScalar.h"
using namespace Eigen;
@@ -17,7 +18,6 @@ template <typename Scalar, int Storage>
void run_matrix_tests()
{
typedef Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, Storage> MatrixType;
- typedef typename MatrixType::Index Index;
MatrixType m, n;
@@ -110,7 +110,31 @@ void run_vector_tests()
}
}
-void test_conservative_resize()
+// Basic memory leak check with a non-copyable scalar type
+template<int> void noncopyable()
+{
+ typedef Eigen::Matrix<AnnoyingScalar,Dynamic,1> VectorType;
+ typedef Eigen::Matrix<AnnoyingScalar,Dynamic,Dynamic> MatrixType;
+
+ {
+ AnnoyingScalar::dont_throw = true;
+ int n = 50;
+ VectorType v0(n), v1(n);
+ MatrixType m0(n,n), m1(n,n), m2(n,n);
+ v0.setOnes(); v1.setOnes();
+ m0.setOnes(); m1.setOnes(); m2.setOnes();
+ VERIFY(m0==m1);
+ m0.conservativeResize(2*n,2*n);
+ VERIFY(m0.topLeftCorner(n,n) == m1);
+
+ VERIFY(v0.head(n) == v1);
+ v0.conservativeResize(2*n);
+ VERIFY(v0.head(n) == v1);
+ }
+ VERIFY(AnnoyingScalar::instances==0 && "global memory leak detected in noncopyable");
+}
+
+EIGEN_DECLARE_TEST(conservative_resize)
{
for(int i=0; i<g_repeat; ++i)
{
@@ -123,12 +147,16 @@ void test_conservative_resize()
CALL_SUBTEST_4((run_matrix_tests<std::complex<float>, Eigen::RowMajor>()));
CALL_SUBTEST_4((run_matrix_tests<std::complex<float>, Eigen::ColMajor>()));
CALL_SUBTEST_5((run_matrix_tests<std::complex<double>, Eigen::RowMajor>()));
- CALL_SUBTEST_6((run_matrix_tests<std::complex<double>, Eigen::ColMajor>()));
+ CALL_SUBTEST_5((run_matrix_tests<std::complex<double>, Eigen::ColMajor>()));
CALL_SUBTEST_1((run_vector_tests<int>()));
CALL_SUBTEST_2((run_vector_tests<float>()));
CALL_SUBTEST_3((run_vector_tests<double>()));
CALL_SUBTEST_4((run_vector_tests<std::complex<float> >()));
CALL_SUBTEST_5((run_vector_tests<std::complex<double> >()));
+
+ AnnoyingScalar::dont_throw = true;
+ CALL_SUBTEST_6(( run_vector_tests<AnnoyingScalar>() ));
+ CALL_SUBTEST_6(( noncopyable<0>() ));
}
}
diff --git a/test/constructor.cpp b/test/constructor.cpp
index eec9e2192..1dd3bc3c0 100644
--- a/test/constructor.cpp
+++ b/test/constructor.cpp
@@ -37,7 +37,7 @@ template<typename MatrixType> void ctor_init1(const MatrixType& m)
}
-void test_constructor()
+EIGEN_DECLARE_TEST(constructor)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( ctor_init1(Matrix<float, 1, 1>()) );
diff --git a/test/corners.cpp b/test/corners.cpp
index 3c64c32a1..73342a8dd 100644
--- a/test/corners.cpp
+++ b/test/corners.cpp
@@ -15,7 +15,6 @@
template<typename MatrixType> void corners(const MatrixType& m)
{
- typedef typename MatrixType::Index Index;
Index rows = m.rows();
Index cols = m.cols();
@@ -102,7 +101,7 @@ template<typename MatrixType, int CRows, int CCols, int SRows, int SCols> void c
VERIFY_IS_EQUAL((const_matrix.template rightCols<c>()), (const_matrix.template block<rows,c>(0,cols-c)));
}
-void test_corners()
+EIGEN_DECLARE_TEST(corners)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( corners(Matrix<float, 1, 1>()) );
diff --git a/test/ctorleak.cpp b/test/ctorleak.cpp
index c158f5e4e..7202e90dd 100644
--- a/test/ctorleak.cpp
+++ b/test/ctorleak.cpp
@@ -33,7 +33,7 @@ Index Foo::object_limit = 0;
#undef EIGEN_TEST_MAX_SIZE
#define EIGEN_TEST_MAX_SIZE 3
-void test_ctorleak()
+EIGEN_DECLARE_TEST(ctorleak)
{
typedef Matrix<Foo, Dynamic, Dynamic> MatrixX;
typedef Matrix<Foo, Dynamic, 1> VectorX;
diff --git a/test/cuda_common.h b/test/cuda_common.h
deleted file mode 100644
index 9737693ac..000000000
--- a/test/cuda_common.h
+++ /dev/null
@@ -1,101 +0,0 @@
-
-#ifndef EIGEN_TEST_CUDA_COMMON_H
-#define EIGEN_TEST_CUDA_COMMON_H
-
-#include <cuda.h>
-#include <cuda_runtime.h>
-#include <cuda_runtime_api.h>
-#include <iostream>
-
-#ifndef __CUDACC__
-dim3 threadIdx, blockDim, blockIdx;
-#endif
-
-template<typename Kernel, typename Input, typename Output>
-void run_on_cpu(const Kernel& ker, int n, const Input& in, Output& out)
-{
- for(int i=0; i<n; i++)
- ker(i, in.data(), out.data());
-}
-
-
-template<typename Kernel, typename Input, typename Output>
-__global__
-void run_on_cuda_meta_kernel(const Kernel ker, int n, const Input* in, Output* out)
-{
- int i = threadIdx.x + blockIdx.x*blockDim.x;
- if(i<n) {
- ker(i, in, out);
- }
-}
-
-
-template<typename Kernel, typename Input, typename Output>
-void run_on_cuda(const Kernel& ker, int n, const Input& in, Output& out)
-{
- typename Input::Scalar* d_in;
- typename Output::Scalar* d_out;
- std::ptrdiff_t in_bytes = in.size() * sizeof(typename Input::Scalar);
- std::ptrdiff_t out_bytes = out.size() * sizeof(typename Output::Scalar);
-
- cudaMalloc((void**)(&d_in), in_bytes);
- cudaMalloc((void**)(&d_out), out_bytes);
-
- cudaMemcpy(d_in, in.data(), in_bytes, cudaMemcpyHostToDevice);
- cudaMemcpy(d_out, out.data(), out_bytes, cudaMemcpyHostToDevice);
-
- // Simple and non-optimal 1D mapping assuming n is not too large
- // That's only for unit testing!
- dim3 Blocks(128);
- dim3 Grids( (n+int(Blocks.x)-1)/int(Blocks.x) );
-
- cudaThreadSynchronize();
- run_on_cuda_meta_kernel<<<Grids,Blocks>>>(ker, n, d_in, d_out);
- cudaThreadSynchronize();
-
- // check inputs have not been modified
- cudaMemcpy(const_cast<typename Input::Scalar*>(in.data()), d_in, in_bytes, cudaMemcpyDeviceToHost);
- cudaMemcpy(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost);
-
- cudaFree(d_in);
- cudaFree(d_out);
-}
-
-
-template<typename Kernel, typename Input, typename Output>
-void run_and_compare_to_cuda(const Kernel& ker, int n, const Input& in, Output& out)
-{
- Input in_ref, in_cuda;
- Output out_ref, out_cuda;
- #ifndef __CUDA_ARCH__
- in_ref = in_cuda = in;
- out_ref = out_cuda = out;
- #endif
- run_on_cpu (ker, n, in_ref, out_ref);
- run_on_cuda(ker, n, in_cuda, out_cuda);
- #ifndef __CUDA_ARCH__
- VERIFY_IS_APPROX(in_ref, in_cuda);
- VERIFY_IS_APPROX(out_ref, out_cuda);
- #endif
-}
-
-
-void ei_test_init_cuda()
-{
- int device = 0;
- cudaDeviceProp deviceProp;
- cudaGetDeviceProperties(&deviceProp, device);
- std::cout << "CUDA device info:\n";
- std::cout << " name: " << deviceProp.name << "\n";
- std::cout << " capability: " << deviceProp.major << "." << deviceProp.minor << "\n";
- std::cout << " multiProcessorCount: " << deviceProp.multiProcessorCount << "\n";
- std::cout << " maxThreadsPerMultiProcessor: " << deviceProp.maxThreadsPerMultiProcessor << "\n";
- std::cout << " warpSize: " << deviceProp.warpSize << "\n";
- std::cout << " regsPerBlock: " << deviceProp.regsPerBlock << "\n";
- std::cout << " concurrentKernels: " << deviceProp.concurrentKernels << "\n";
- std::cout << " clockRate: " << deviceProp.clockRate << "\n";
- std::cout << " canMapHostMemory: " << deviceProp.canMapHostMemory << "\n";
- std::cout << " computeMode: " << deviceProp.computeMode << "\n";
-}
-
-#endif // EIGEN_TEST_CUDA_COMMON_H
diff --git a/test/denseLM.cpp b/test/denseLM.cpp
index 0aa736ea3..afb8004b1 100644
--- a/test/denseLM.cpp
+++ b/test/denseLM.cpp
@@ -182,7 +182,7 @@ void test_denseLM_T()
}
-void test_denseLM()
+EIGEN_DECLARE_TEST(denseLM)
{
CALL_SUBTEST_2(test_denseLM_T<double>());
diff --git a/test/dense_storage.cpp b/test/dense_storage.cpp
index e63712b1a..1150ec52b 100644
--- a/test/dense_storage.cpp
+++ b/test/dense_storage.cpp
@@ -52,7 +52,7 @@ void dense_storage_assignment()
VERIFY_IS_EQUAL(raw_reference[i], raw_copied_reference[i]);
}
-void test_dense_storage()
+EIGEN_DECLARE_TEST(dense_storage)
{
dense_storage_copy<int,Dynamic,Dynamic>();
dense_storage_copy<int,Dynamic,3>();
diff --git a/test/determinant.cpp b/test/determinant.cpp
index 758f3afbb..7dd33c373 100644
--- a/test/determinant.cpp
+++ b/test/determinant.cpp
@@ -16,7 +16,6 @@ template<typename MatrixType> void determinant(const MatrixType& m)
/* this test covers the following files:
Determinant.h
*/
- typedef typename MatrixType::Index Index;
Index size = m.rows();
MatrixType m1(size, size), m2(size, size);
@@ -51,7 +50,7 @@ template<typename MatrixType> void determinant(const MatrixType& m)
VERIFY_IS_APPROX(m2.block(0,0,0,0).determinant(), Scalar(1));
}
-void test_determinant()
+EIGEN_DECLARE_TEST(determinant)
{
for(int i = 0; i < g_repeat; i++) {
int s = 0;
diff --git a/test/diagonal.cpp b/test/diagonal.cpp
index c1546e97d..4e8c4b3c9 100644
--- a/test/diagonal.cpp
+++ b/test/diagonal.cpp
@@ -11,7 +11,6 @@
template<typename MatrixType> void diagonal(const MatrixType& m)
{
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
Index rows = m.rows();
@@ -66,6 +65,9 @@ template<typename MatrixType> void diagonal(const MatrixType& m)
m2.diagonal(N2).coeffRef(0) = Scalar(2)*s1;
VERIFY_IS_APPROX(m2.diagonal(N2).coeff(0), Scalar(2)*s1);
}
+
+ VERIFY( m1.diagonal( cols).size()==0 );
+ VERIFY( m1.diagonal(-rows).size()==0 );
}
template<typename MatrixType> void diagonal_assert(const MatrixType& m) {
@@ -81,9 +83,12 @@ template<typename MatrixType> void diagonal_assert(const MatrixType& m) {
VERIFY_RAISES_ASSERT( m1.array() *= m1.diagonal().array() );
VERIFY_RAISES_ASSERT( m1.array() /= m1.diagonal().array() );
}
+
+ VERIFY_RAISES_ASSERT( m1.diagonal(cols+1) );
+ VERIFY_RAISES_ASSERT( m1.diagonal(-(rows+1)) );
}
-void test_diagonal()
+EIGEN_DECLARE_TEST(diagonal)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( diagonal(Matrix<float, 1, 1>()) );
@@ -95,7 +100,6 @@ void test_diagonal()
CALL_SUBTEST_2( diagonal(MatrixXcd(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
CALL_SUBTEST_1( diagonal(MatrixXf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
CALL_SUBTEST_1( diagonal(Matrix<float,Dynamic,4>(3, 4)) );
+ CALL_SUBTEST_1( diagonal_assert(MatrixXf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
}
-
- CALL_SUBTEST_1( diagonal_assert(MatrixXf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
}
diff --git a/test/diagonalmatrices.cpp b/test/diagonalmatrices.cpp
index cd6dc8cf0..ba58ca8d1 100644
--- a/test/diagonalmatrices.cpp
+++ b/test/diagonalmatrices.cpp
@@ -11,7 +11,6 @@
using namespace std;
template<typename MatrixType> void diagonalmatrices(const MatrixType& m)
{
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
enum { Rows = MatrixType::RowsAtCompileTime, Cols = MatrixType::ColsAtCompileTime };
typedef Matrix<Scalar, Rows, 1> VectorType;
@@ -30,6 +29,7 @@ template<typename MatrixType> void diagonalmatrices(const MatrixType& m)
v2 = VectorType::Random(rows);
RowVectorType rv1 = RowVectorType::Random(cols),
rv2 = RowVectorType::Random(cols);
+
LeftDiagonalMatrix ldm1(v1), ldm2(v2);
RightDiagonalMatrix rdm1(rv1), rdm2(rv2);
@@ -99,6 +99,38 @@ template<typename MatrixType> void diagonalmatrices(const MatrixType& m)
VERIFY_IS_APPROX( (sq_m1 += (s1*v1).asDiagonal()), sq_m2 += (s1*v1).asDiagonal().toDenseMatrix() );
VERIFY_IS_APPROX( (sq_m1 -= (s1*v1).asDiagonal()), sq_m2 -= (s1*v1).asDiagonal().toDenseMatrix() );
VERIFY_IS_APPROX( (sq_m1 = (s1*v1).asDiagonal()), (s1*v1).asDiagonal().toDenseMatrix() );
+
+ sq_m1.setRandom();
+ sq_m2 = v1.asDiagonal();
+ sq_m2 = sq_m1 * sq_m2;
+ VERIFY_IS_APPROX( (sq_m1*v1.asDiagonal()).col(i), sq_m2.col(i) );
+ VERIFY_IS_APPROX( (sq_m1*v1.asDiagonal()).row(i), sq_m2.row(i) );
+}
+
+template<typename MatrixType> void as_scalar_product(const MatrixType& m)
+{
+ typedef typename MatrixType::Scalar Scalar;
+ typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
+ typedef Matrix<Scalar, Dynamic, Dynamic> DynMatrixType;
+ typedef Matrix<Scalar, Dynamic, 1> DynVectorType;
+ typedef Matrix<Scalar, 1, Dynamic> DynRowVectorType;
+
+ Index rows = m.rows();
+ Index depth = internal::random<Index>(1,EIGEN_TEST_MAX_SIZE);
+
+ VectorType v1 = VectorType::Random(rows);
+ DynVectorType dv1 = DynVectorType::Random(depth);
+ DynRowVectorType drv1 = DynRowVectorType::Random(depth);
+ DynMatrixType dm1 = dv1;
+ DynMatrixType drm1 = drv1;
+
+ Scalar s = v1(0);
+
+ VERIFY_IS_APPROX( v1.asDiagonal() * drv1, s*drv1 );
+ VERIFY_IS_APPROX( dv1 * v1.asDiagonal(), dv1*s );
+
+ VERIFY_IS_APPROX( v1.asDiagonal() * drm1, s*drm1 );
+ VERIFY_IS_APPROX( dm1 * v1.asDiagonal(), dm1*s );
}
template<int>
@@ -112,18 +144,23 @@ void bug987()
VERIFY_IS_APPROX(( res1 = points.topLeftCorner<2,2>()*diag.asDiagonal()) , res2 = tmp2*diag.asDiagonal() );
}
-void test_diagonalmatrices()
+EIGEN_DECLARE_TEST(diagonalmatrices)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( diagonalmatrices(Matrix<float, 1, 1>()) );
+ CALL_SUBTEST_1( as_scalar_product(Matrix<float, 1, 1>()) );
+
CALL_SUBTEST_2( diagonalmatrices(Matrix3f()) );
CALL_SUBTEST_3( diagonalmatrices(Matrix<double,3,3,RowMajor>()) );
CALL_SUBTEST_4( diagonalmatrices(Matrix4d()) );
CALL_SUBTEST_5( diagonalmatrices(Matrix<float,4,4,RowMajor>()) );
CALL_SUBTEST_6( diagonalmatrices(MatrixXcf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
+ CALL_SUBTEST_6( as_scalar_product(MatrixXcf(1,1)) );
CALL_SUBTEST_7( diagonalmatrices(MatrixXi(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
CALL_SUBTEST_8( diagonalmatrices(Matrix<double,Dynamic,Dynamic,RowMajor>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
CALL_SUBTEST_9( diagonalmatrices(MatrixXf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
+ CALL_SUBTEST_9( diagonalmatrices(MatrixXf(1,1)) );
+ CALL_SUBTEST_9( as_scalar_product(MatrixXf(1,1)) );
}
CALL_SUBTEST_10( bug987<0>() );
}
diff --git a/test/dontalign.cpp b/test/dontalign.cpp
index 4643cfed6..2e4102b86 100644
--- a/test/dontalign.cpp
+++ b/test/dontalign.cpp
@@ -19,7 +19,6 @@
template<typename MatrixType>
void dontalign(const MatrixType& m)
{
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime> SquareMatrixType;
@@ -45,7 +44,7 @@ void dontalign(const MatrixType& m)
internal::aligned_delete(array, rows);
}
-void test_dontalign()
+EIGEN_DECLARE_TEST(dontalign)
{
#if defined EIGEN_TEST_PART_1 || defined EIGEN_TEST_PART_5
dontalign(Matrix3d());
diff --git a/test/dynalloc.cpp b/test/dynalloc.cpp
index f1cc70bee..1c74866ba 100644
--- a/test/dynalloc.cpp
+++ b/test/dynalloc.cpp
@@ -15,6 +15,7 @@
#define ALIGNMENT 1
#endif
+typedef Matrix<float,16,1> Vector16f;
typedef Matrix<float,8,1> Vector8f;
void check_handmade_aligned_malloc()
@@ -70,7 +71,7 @@ struct MyStruct
{
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
char dummychar;
- Vector8f avec;
+ Vector16f avec;
};
class MyClassA
@@ -78,7 +79,7 @@ class MyClassA
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
char dummychar;
- Vector8f avec;
+ Vector16f avec;
};
template<typename T> void check_dynaligned()
@@ -119,7 +120,7 @@ template<typename T> void check_custom_new_delete()
#endif
}
-void test_dynalloc()
+EIGEN_DECLARE_TEST(dynalloc)
{
// low level dynamic memory allocation
CALL_SUBTEST(check_handmade_aligned_malloc());
@@ -145,6 +146,7 @@ void test_dynalloc()
CALL_SUBTEST(check_dynaligned<Vector4d>() );
CALL_SUBTEST(check_dynaligned<Vector4i>() );
CALL_SUBTEST(check_dynaligned<Vector8f>() );
+ CALL_SUBTEST(check_dynaligned<Vector16f>() );
}
{
diff --git a/test/eigen2support.cpp b/test/eigen2support.cpp
index ad1d98091..49d7328e9 100644
--- a/test/eigen2support.cpp
+++ b/test/eigen2support.cpp
@@ -13,7 +13,6 @@
template<typename MatrixType> void eigen2support(const MatrixType& m)
{
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
Index rows = m.rows();
@@ -53,7 +52,7 @@ template<typename MatrixType> void eigen2support(const MatrixType& m)
m1.minor(0,0);
}
-void test_eigen2support()
+EIGEN_DECLARE_TEST(eigen2support)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( eigen2support(Matrix<double,1,1>()) );
diff --git a/test/eigensolver_complex.cpp b/test/eigensolver_complex.cpp
index 293b1b265..c5373f420 100644
--- a/test/eigensolver_complex.cpp
+++ b/test/eigensolver_complex.cpp
@@ -47,7 +47,7 @@ template<typename MatrixType> bool find_pivot(typename MatrixType::Scalar tol, M
return false;
}
-/* Check that two column vectors are approximately equal upto permutations.
+/* Check that two column vectors are approximately equal up to permutations.
* Initially, this method checked that the k-th power sums are equal for all k = 1, ..., vec1.rows(),
* however this strategy is numerically inacurate because of numerical cancellation issues.
*/
@@ -71,7 +71,6 @@ void verify_is_approx_upto_permutation(const VectorType& vec1, const VectorType&
template<typename MatrixType> void eigensolver(const MatrixType& m)
{
- typedef typename MatrixType::Index Index;
/* this test covers the following files:
ComplexEigenSolver.h, and indirectly ComplexSchur.h
*/
@@ -153,7 +152,7 @@ template<typename MatrixType> void eigensolver_verify_assert(const MatrixType& m
VERIFY_RAISES_ASSERT(eig.eigenvectors());
}
-void test_eigensolver_complex()
+EIGEN_DECLARE_TEST(eigensolver_complex)
{
int s = 0;
for(int i = 0; i < g_repeat; i++) {
diff --git a/test/eigensolver_generalized_real.cpp b/test/eigensolver_generalized_real.cpp
index 9c0838ba4..95ed431db 100644
--- a/test/eigensolver_generalized_real.cpp
+++ b/test/eigensolver_generalized_real.cpp
@@ -15,7 +15,6 @@
template<typename MatrixType> void generalized_eigensolver_real(const MatrixType& m)
{
- typedef typename MatrixType::Index Index;
/* this test covers the following files:
GeneralizedEigenSolver.h
*/
@@ -77,9 +76,16 @@ template<typename MatrixType> void generalized_eigensolver_real(const MatrixType
GeneralizedEigenSolver<MatrixType> eig2(a.adjoint() * a,b.adjoint() * b);
eig2.compute(a.adjoint() * a,b.adjoint() * b);
}
+
+ // check without eigenvectors
+ {
+ GeneralizedEigenSolver<MatrixType> eig1(spdA, spdB, true);
+ GeneralizedEigenSolver<MatrixType> eig2(spdA, spdB, false);
+ VERIFY_IS_APPROX(eig1.eigenvalues(), eig2.eigenvalues());
+ }
}
-void test_eigensolver_generalized_real()
+EIGEN_DECLARE_TEST(eigensolver_generalized_real)
{
for(int i = 0; i < g_repeat; i++) {
int s = 0;
diff --git a/test/eigensolver_generic.cpp b/test/eigensolver_generic.cpp
index d0e644d4b..e0e435151 100644
--- a/test/eigensolver_generic.cpp
+++ b/test/eigensolver_generic.cpp
@@ -14,7 +14,6 @@
template<typename MatrixType> void eigensolver(const MatrixType& m)
{
- typedef typename MatrixType::Index Index;
/* this test covers the following files:
EigenSolver.h
*/
@@ -101,7 +100,35 @@ template<typename MatrixType> void eigensolver_verify_assert(const MatrixType& m
VERIFY_RAISES_ASSERT(eig.pseudoEigenvectors());
}
-void test_eigensolver_generic()
+template<int>
+void eigensolver_generic_extra()
+{
+ {
+ // regression test for bug 793
+ MatrixXd a(3,3);
+ a << 0, 0, 1,
+ 1, 1, 1,
+ 1, 1e+200, 1;
+ Eigen::EigenSolver<MatrixXd> eig(a);
+ double scale = 1e-200; // scale to avoid overflow during the comparisons
+ VERIFY_IS_APPROX(a * eig.pseudoEigenvectors()*scale, eig.pseudoEigenvectors() * eig.pseudoEigenvalueMatrix()*scale);
+ VERIFY_IS_APPROX(a * eig.eigenvectors()*scale, eig.eigenvectors() * eig.eigenvalues().asDiagonal()*scale);
+ }
+ {
+ // check a case where all eigenvalues are null.
+ MatrixXd a(2,2);
+ a << 1, 1,
+ -1, -1;
+ Eigen::EigenSolver<MatrixXd> eig(a);
+ VERIFY_IS_APPROX(eig.pseudoEigenvectors().squaredNorm(), 2.);
+ VERIFY_IS_APPROX((a * eig.pseudoEigenvectors()).norm()+1., 1.);
+ VERIFY_IS_APPROX((eig.pseudoEigenvectors() * eig.pseudoEigenvalueMatrix()).norm()+1., 1.);
+ VERIFY_IS_APPROX((a * eig.eigenvectors()).norm()+1., 1.);
+ VERIFY_IS_APPROX((eig.eigenvectors() * eig.eigenvalues().asDiagonal()).norm()+1., 1.);
+ }
+}
+
+EIGEN_DECLARE_TEST(eigensolver_generic)
{
int s = 0;
for(int i = 0; i < g_repeat; i++) {
@@ -136,31 +163,7 @@ void test_eigensolver_generic()
}
);
-#ifdef EIGEN_TEST_PART_2
- {
- // regression test for bug 793
- MatrixXd a(3,3);
- a << 0, 0, 1,
- 1, 1, 1,
- 1, 1e+200, 1;
- Eigen::EigenSolver<MatrixXd> eig(a);
- double scale = 1e-200; // scale to avoid overflow during the comparisons
- VERIFY_IS_APPROX(a * eig.pseudoEigenvectors()*scale, eig.pseudoEigenvectors() * eig.pseudoEigenvalueMatrix()*scale);
- VERIFY_IS_APPROX(a * eig.eigenvectors()*scale, eig.eigenvectors() * eig.eigenvalues().asDiagonal()*scale);
- }
- {
- // check a case where all eigenvalues are null.
- MatrixXd a(2,2);
- a << 1, 1,
- -1, -1;
- Eigen::EigenSolver<MatrixXd> eig(a);
- VERIFY_IS_APPROX(eig.pseudoEigenvectors().squaredNorm(), 2.);
- VERIFY_IS_APPROX((a * eig.pseudoEigenvectors()).norm()+1., 1.);
- VERIFY_IS_APPROX((eig.pseudoEigenvectors() * eig.pseudoEigenvalueMatrix()).norm()+1., 1.);
- VERIFY_IS_APPROX((a * eig.eigenvectors()).norm()+1., 1.);
- VERIFY_IS_APPROX((eig.eigenvectors() * eig.eigenvalues().asDiagonal()).norm()+1., 1.);
- }
-#endif
+ CALL_SUBTEST_2( eigensolver_generic_extra<0>() );
TEST_SET_BUT_UNUSED_VARIABLE(s)
}
diff --git a/test/eigensolver_selfadjoint.cpp b/test/eigensolver_selfadjoint.cpp
index 39ad4130e..65b80c3fb 100644
--- a/test/eigensolver_selfadjoint.cpp
+++ b/test/eigensolver_selfadjoint.cpp
@@ -68,7 +68,6 @@ template<typename MatrixType> void selfadjointeigensolver_essential_check(const
template<typename MatrixType> void selfadjointeigensolver(const MatrixType& m)
{
- typedef typename MatrixType::Index Index;
/* this test covers the following files:
EigenSolver.h, SelfAdjointEigenSolver.h (and indirectly: Tridiagonalization.h)
*/
@@ -231,7 +230,7 @@ void bug_1204()
SelfAdjointEigenSolver<Eigen::SparseMatrix<double> > eig(A);
}
-void test_eigensolver_selfadjoint()
+EIGEN_DECLARE_TEST(eigensolver_selfadjoint)
{
int s = 0;
for(int i = 0; i < g_repeat; i++) {
diff --git a/test/evaluators.cpp b/test/evaluators.cpp
index aed5a05a7..f4fdaf053 100644
--- a/test/evaluators.cpp
+++ b/test/evaluators.cpp
@@ -101,7 +101,7 @@ using namespace std;
#define VERIFY_IS_APPROX_EVALUATOR(DEST,EXPR) VERIFY_IS_APPROX(copy_using_evaluator(DEST,(EXPR)), (EXPR).eval());
#define VERIFY_IS_APPROX_EVALUATOR2(DEST,EXPR,REF) VERIFY_IS_APPROX(copy_using_evaluator(DEST,(EXPR)), (REF).eval());
-void test_evaluators()
+EIGEN_DECLARE_TEST(evaluators)
{
// Testing Matrix evaluator and Transpose
Vector2d v = Vector2d::Random();
diff --git a/test/exceptions.cpp b/test/exceptions.cpp
index b83fb82ba..3d93060ab 100644
--- a/test/exceptions.cpp
+++ b/test/exceptions.cpp
@@ -8,93 +8,34 @@
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-// Various sanity tests with exceptions:
+// Various sanity tests with exceptions and non trivially copyable scalar type.
// - no memory leak when a custom scalar type trow an exceptions
// - todo: complete the list of tests!
#define EIGEN_STACK_ALLOCATION_LIMIT 100000000
#include "main.h"
-
-struct my_exception
-{
- my_exception() {}
- ~my_exception() {}
-};
-
-class ScalarWithExceptions
-{
- public:
- ScalarWithExceptions() { init(); }
- ScalarWithExceptions(const float& _v) { init(); *v = _v; }
- ScalarWithExceptions(const ScalarWithExceptions& other) { init(); *v = *(other.v); }
- ~ScalarWithExceptions() {
- delete v;
- instances--;
- }
-
- void init() {
- v = new float;
- instances++;
- }
-
- ScalarWithExceptions operator+(const ScalarWithExceptions& other) const
- {
- countdown--;
- if(countdown<=0)
- throw my_exception();
- return ScalarWithExceptions(*v+*other.v);
- }
-
- ScalarWithExceptions operator-(const ScalarWithExceptions& other) const
- { return ScalarWithExceptions(*v-*other.v); }
-
- ScalarWithExceptions operator*(const ScalarWithExceptions& other) const
- { return ScalarWithExceptions((*v)*(*other.v)); }
-
- ScalarWithExceptions& operator+=(const ScalarWithExceptions& other)
- { *v+=*other.v; return *this; }
- ScalarWithExceptions& operator-=(const ScalarWithExceptions& other)
- { *v-=*other.v; return *this; }
- ScalarWithExceptions& operator=(const ScalarWithExceptions& other)
- { *v = *(other.v); return *this; }
-
- bool operator==(const ScalarWithExceptions& other) const
- { return *v==*other.v; }
- bool operator!=(const ScalarWithExceptions& other) const
- { return *v!=*other.v; }
-
- float* v;
- static int instances;
- static int countdown;
-};
-
-ScalarWithExceptions real(const ScalarWithExceptions &x) { return x; }
-ScalarWithExceptions imag(const ScalarWithExceptions & ) { return 0; }
-ScalarWithExceptions conj(const ScalarWithExceptions &x) { return x; }
-
-int ScalarWithExceptions::instances = 0;
-int ScalarWithExceptions::countdown = 0;
-
+#include "AnnoyingScalar.h"
#define CHECK_MEMLEAK(OP) { \
- ScalarWithExceptions::countdown = 100; \
- int before = ScalarWithExceptions::instances; \
- bool exception_thrown = false; \
- try { OP; } \
+ AnnoyingScalar::countdown = 100; \
+ int before = AnnoyingScalar::instances; \
+ bool exception_thrown = false; \
+ try { OP; } \
catch (my_exception) { \
exception_thrown = true; \
- VERIFY(ScalarWithExceptions::instances==before && "memory leak detected in " && EIGEN_MAKESTRING(OP)); \
+ VERIFY(AnnoyingScalar::instances==before && "memory leak detected in " && EIGEN_MAKESTRING(OP)); \
} \
- VERIFY(exception_thrown && " no exception thrown in " && EIGEN_MAKESTRING(OP)); \
+ VERIFY( (AnnoyingScalar::dont_throw) || (exception_thrown && " no exception thrown in " && EIGEN_MAKESTRING(OP)) ); \
}
-void memoryleak()
+EIGEN_DECLARE_TEST(exceptions)
{
- typedef Eigen::Matrix<ScalarWithExceptions,Dynamic,1> VectorType;
- typedef Eigen::Matrix<ScalarWithExceptions,Dynamic,Dynamic> MatrixType;
+ typedef Eigen::Matrix<AnnoyingScalar,Dynamic,1> VectorType;
+ typedef Eigen::Matrix<AnnoyingScalar,Dynamic,Dynamic> MatrixType;
{
+ AnnoyingScalar::dont_throw = false;
int n = 50;
VectorType v0(n), v1(n);
MatrixType m0(n,n), m1(n,n), m2(n,n);
@@ -104,10 +45,5 @@ void memoryleak()
CHECK_MEMLEAK(m2 = m0 * m1 * m2);
CHECK_MEMLEAK((v0+v1).dot(v0+v1));
}
- VERIFY(ScalarWithExceptions::instances==0 && "global memory leak detected in " && EIGEN_MAKESTRING(OP)); \
-}
-
-void test_exceptions()
-{
- CALL_SUBTEST( memoryleak() );
+ VERIFY(AnnoyingScalar::instances==0 && "global memory leak detected in " && EIGEN_MAKESTRING(OP));
}
diff --git a/test/fastmath.cpp b/test/fastmath.cpp
index cc5db0746..c30f0a846 100644
--- a/test/fastmath.cpp
+++ b/test/fastmath.cpp
@@ -88,7 +88,7 @@ void check_inf_nan(bool dryrun) {
}
}
-void test_fastmath() {
+EIGEN_DECLARE_TEST(fastmath) {
std::cout << "*** float *** \n\n"; check_inf_nan<float>(true);
std::cout << "*** double ***\n\n"; check_inf_nan<double>(true);
std::cout << "*** long double *** \n\n"; check_inf_nan<long double>(true);
diff --git a/test/first_aligned.cpp b/test/first_aligned.cpp
index ae2d4bc42..ed9945077 100644
--- a/test/first_aligned.cpp
+++ b/test/first_aligned.cpp
@@ -26,7 +26,7 @@ void test_none_aligned_helper(Scalar *array, int size)
struct some_non_vectorizable_type { float x; };
-void test_first_aligned()
+EIGEN_DECLARE_TEST(first_aligned)
{
EIGEN_ALIGN16 float array_float[100];
test_first_aligned_helper(array_float, 50);
diff --git a/test/geo_alignedbox.cpp b/test/geo_alignedbox.cpp
index 223ff5eea..c6c051ce4 100644
--- a/test/geo_alignedbox.cpp
+++ b/test/geo_alignedbox.cpp
@@ -33,7 +33,6 @@ template<typename BoxType> void alignedbox(const BoxType& _box)
/* this test covers the following files:
AlignedBox.h
*/
- typedef typename BoxType::Index Index;
typedef typename BoxType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef Matrix<Scalar, BoxType::AmbientDimAtCompileTime, 1> VectorType;
@@ -95,7 +94,6 @@ template<typename BoxType>
void alignedboxCastTests(const BoxType& _box)
{
// casting
- typedef typename BoxType::Index Index;
typedef typename BoxType::Scalar Scalar;
typedef Matrix<Scalar, BoxType::AmbientDimAtCompileTime, 1> VectorType;
@@ -171,7 +169,7 @@ void specificTest2()
}
-void test_geo_alignedbox()
+EIGEN_DECLARE_TEST(geo_alignedbox)
{
for(int i = 0; i < g_repeat; i++)
{
diff --git a/test/geo_eulerangles.cpp b/test/geo_eulerangles.cpp
index 932ebe773..693c627a9 100644
--- a/test/geo_eulerangles.cpp
+++ b/test/geo_eulerangles.cpp
@@ -103,7 +103,7 @@ template<typename Scalar> void eulerangles()
check_all_var(ea);
}
-void test_geo_eulerangles()
+EIGEN_DECLARE_TEST(geo_eulerangles)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( eulerangles<float>() );
diff --git a/test/geo_homogeneous.cpp b/test/geo_homogeneous.cpp
index 2187c7bf9..9aebe6226 100644
--- a/test/geo_homogeneous.cpp
+++ b/test/geo_homogeneous.cpp
@@ -115,7 +115,7 @@ template<typename Scalar,int Size> void homogeneous(void)
VERIFY_IS_APPROX( (t2.template triangularView<Lower>() * v0.homogeneous()).eval(), (t2.template triangularView<Lower>()*hv0) );
}
-void test_geo_homogeneous()
+EIGEN_DECLARE_TEST(geo_homogeneous)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1(( homogeneous<float,1>() ));
diff --git a/test/geo_hyperplane.cpp b/test/geo_hyperplane.cpp
index 27892850d..a26709301 100644
--- a/test/geo_hyperplane.cpp
+++ b/test/geo_hyperplane.cpp
@@ -19,7 +19,6 @@ template<typename HyperplaneType> void hyperplane(const HyperplaneType& _plane)
Hyperplane.h
*/
using std::abs;
- typedef typename HyperplaneType::Index Index;
const Index dim = _plane.dim();
enum { Options = HyperplaneType::Options };
typedef typename HyperplaneType::Scalar Scalar;
@@ -181,7 +180,7 @@ template<typename Scalar> void hyperplane_alignment()
}
-void test_geo_hyperplane()
+EIGEN_DECLARE_TEST(geo_hyperplane)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( hyperplane(Hyperplane<float,2>()) );
diff --git a/test/geo_orthomethods.cpp b/test/geo_orthomethods.cpp
index e178df257..b7b660740 100644
--- a/test/geo_orthomethods.cpp
+++ b/test/geo_orthomethods.cpp
@@ -115,7 +115,7 @@ template<typename Scalar, int Size> void orthomethods(int size=Size)
VERIFY_IS_APPROX(mcrossN3.row(i), matN3.row(i).cross(vec3));
}
-void test_geo_orthomethods()
+EIGEN_DECLARE_TEST(geo_orthomethods)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( orthomethods_3<float>() );
diff --git a/test/geo_parametrizedline.cpp b/test/geo_parametrizedline.cpp
index 29c1b105c..7135c8fa5 100644
--- a/test/geo_parametrizedline.cpp
+++ b/test/geo_parametrizedline.cpp
@@ -19,7 +19,6 @@ template<typename LineType> void parametrizedline(const LineType& _line)
ParametrizedLine.h
*/
using std::abs;
- typedef typename LineType::Index Index;
const Index dim = _line.dim();
typedef typename LineType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
@@ -118,7 +117,7 @@ template<typename Scalar> void parametrizedline_alignment()
#endif
}
-void test_geo_parametrizedline()
+EIGEN_DECLARE_TEST(geo_parametrizedline)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( parametrizedline(ParametrizedLine<float,2>()) );
diff --git a/test/geo_quaternion.cpp b/test/geo_quaternion.cpp
index 96889e722..27219db10 100644
--- a/test/geo_quaternion.cpp
+++ b/test/geo_quaternion.cpp
@@ -12,6 +12,7 @@
#include <Eigen/Geometry>
#include <Eigen/LU>
#include <Eigen/SVD>
+#include "AnnoyingScalar.h"
template<typename T> T bounded_acos(T v)
{
@@ -85,7 +86,7 @@ template<typename Scalar, int Options> void quaternion(void)
if (refangle>Scalar(EIGEN_PI))
refangle = Scalar(2)*Scalar(EIGEN_PI) - refangle;
- if((q1.coeffs()-q2.coeffs()).norm() > 10*largeEps)
+ if((q1.coeffs()-q2.coeffs()).norm() > Scalar(10)*largeEps)
{
VERIFY_IS_MUCH_SMALLER_THAN(abs(q1.angularDistance(q2) - refangle), Scalar(1));
}
@@ -113,7 +114,7 @@ template<typename Scalar, int Options> void quaternion(void)
// Do not execute the test if the rotation angle is almost zero, or
// the rotation axis and v1 are almost parallel.
- if (abs(aa.angle()) > 5*test_precision<Scalar>()
+ if (abs(aa.angle()) > Scalar(5)*test_precision<Scalar>()
&& (aa.axis() - v1.normalized()).norm() < Scalar(1.99)
&& (aa.axis() + v1.normalized()).norm() < Scalar(1.99))
{
@@ -231,6 +232,19 @@ template<typename Scalar> void mapQuaternion(void){
VERIFY_IS_APPROX(mq3*mq2, q3*q2);
VERIFY_IS_APPROX(mcq1*mq2, q1*q2);
VERIFY_IS_APPROX(mcq3*mq2, q3*q2);
+
+ // Bug 1461, compilation issue with Map<const Quat>::w(), and other reference/constness checks:
+ VERIFY_IS_APPROX(mcq3.coeffs().x() + mcq3.coeffs().y() + mcq3.coeffs().z() + mcq3.coeffs().w(), mcq3.coeffs().sum());
+ VERIFY_IS_APPROX(mcq3.x() + mcq3.y() + mcq3.z() + mcq3.w(), mcq3.coeffs().sum());
+ mq3.w() = 1;
+ const Quaternionx& cq3(q3);
+ VERIFY( &cq3.x() == &q3.x() );
+ const MQuaternionUA& cmq3(mq3);
+ VERIFY( &cmq3.x() == &mq3.x() );
+ // FIXME the following should be ok. The problem is that currently the LValueBit flag
+ // is used to determine whether we can return a coeff by reference or not, which is not enough for Map<const ...>.
+ //const MCQuaternionUA& cmcq3(mcq3);
+ //VERIFY( &cmcq3.x() == &mcq3.x() );
}
template<typename Scalar> void quaternionAlignment(void){
@@ -272,18 +286,38 @@ template<typename PlainObjectType> void check_const_correctness(const PlainObjec
VERIFY( !(Map<ConstPlainObjectType, Aligned>::Flags & LvalueBit) );
}
-void test_geo_quaternion()
+#if EIGEN_HAS_RVALUE_REFERENCES
+
+// Regression for bug 1573
+struct MovableClass {
+ // The following line is a workaround for gcc 4.7 and 4.8 (see bug 1573 comments).
+ static_assert(std::is_nothrow_move_constructible<Quaternionf>::value,"");
+ MovableClass() = default;
+ MovableClass(const MovableClass&) = default;
+ MovableClass(MovableClass&&) noexcept = default;
+ MovableClass& operator=(const MovableClass&) = default;
+ MovableClass& operator=(MovableClass&&) = default;
+ Quaternionf m_quat;
+};
+
+#endif
+
+EIGEN_DECLARE_TEST(geo_quaternion)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1(( quaternion<float,AutoAlign>() ));
CALL_SUBTEST_1( check_const_correctness(Quaternionf()) );
+ CALL_SUBTEST_1(( quaternion<float,DontAlign>() ));
+ CALL_SUBTEST_1(( quaternionAlignment<float>() ));
+ CALL_SUBTEST_1( mapQuaternion<float>() );
+
CALL_SUBTEST_2(( quaternion<double,AutoAlign>() ));
CALL_SUBTEST_2( check_const_correctness(Quaterniond()) );
- CALL_SUBTEST_3(( quaternion<float,DontAlign>() ));
- CALL_SUBTEST_4(( quaternion<double,DontAlign>() ));
- CALL_SUBTEST_5(( quaternionAlignment<float>() ));
- CALL_SUBTEST_6(( quaternionAlignment<double>() ));
- CALL_SUBTEST_1( mapQuaternion<float>() );
+ CALL_SUBTEST_2(( quaternion<double,DontAlign>() ));
+ CALL_SUBTEST_2(( quaternionAlignment<double>() ));
CALL_SUBTEST_2( mapQuaternion<double>() );
+
+ AnnoyingScalar::dont_throw = true;
+ CALL_SUBTEST_3(( quaternion<AnnoyingScalar,AutoAlign>() ));
}
}
diff --git a/test/geo_transformations.cpp b/test/geo_transformations.cpp
index 278e527c2..bf920696b 100755
--- a/test/geo_transformations.cpp
+++ b/test/geo_transformations.cpp
@@ -612,7 +612,7 @@ template<typename Scalar, int Dim, int Options> void transform_products()
VERIFY_IS_APPROX((ac*p).matrix(), a_m*p_m);
}
-void test_geo_transformations()
+EIGEN_DECLARE_TEST(geo_transformations)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1(( transformations<double,Affine,AutoAlign>() ));
diff --git a/test/cuda_basic.cu b/test/gpu_basic.cu
index cb2e4167a..e8069f185 100644
--- a/test/cuda_basic.cu
+++ b/test/gpu_basic.cu
@@ -15,16 +15,10 @@
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
-#define EIGEN_TEST_FUNC cuda_basic
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
-#include <math_constants.h>
-#include <cuda.h>
-#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500
-#include <cuda_fp16.h>
-#endif
#include "main.h"
-#include "cuda_common.h"
+#include "gpu_common.h"
// Check that dense modules can be properly parsed by nvcc
#include <Eigen/Dense>
@@ -124,6 +118,22 @@ struct diagonal {
};
template<typename T>
+struct eigenvalues_direct {
+ EIGEN_DEVICE_FUNC
+ void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const
+ {
+ using namespace Eigen;
+ typedef Matrix<typename T::Scalar, T::RowsAtCompileTime, 1> Vec;
+ T M(in+i);
+ Map<Vec> res(out+i*Vec::MaxSizeAtCompileTime);
+ T A = M*M.adjoint();
+ SelfAdjointEigenSolver<T> eig;
+ eig.computeDirect(A);
+ res = eig.eigenvalues();
+ }
+};
+
+template<typename T>
struct eigenvalues {
EIGEN_DEVICE_FUNC
void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const
@@ -134,40 +144,71 @@ struct eigenvalues {
Map<Vec> res(out+i*Vec::MaxSizeAtCompileTime);
T A = M*M.adjoint();
SelfAdjointEigenSolver<T> eig;
- eig.computeDirect(M);
+ eig.compute(A);
res = eig.eigenvalues();
}
};
-void test_cuda_basic()
+template<typename T>
+struct matrix_inverse {
+ EIGEN_DEVICE_FUNC
+ void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const
+ {
+ using namespace Eigen;
+ T M(in+i);
+ Map<T> res(out+i*T::MaxSizeAtCompileTime);
+ res = M.inverse();
+ }
+};
+
+EIGEN_DECLARE_TEST(gpu_basic)
{
- ei_test_init_cuda();
+ ei_test_init_gpu();
int nthreads = 100;
Eigen::VectorXf in, out;
- #ifndef __CUDA_ARCH__
+ #if !defined(__CUDA_ARCH__) && !defined(__HIP_DEVICE_COMPILE__)
int data_size = nthreads * 512;
in.setRandom(data_size);
out.setRandom(data_size);
#endif
- CALL_SUBTEST( run_and_compare_to_cuda(coeff_wise<Vector3f>(), nthreads, in, out) );
- CALL_SUBTEST( run_and_compare_to_cuda(coeff_wise<Array44f>(), nthreads, in, out) );
-
- CALL_SUBTEST( run_and_compare_to_cuda(replicate<Array4f>(), nthreads, in, out) );
- CALL_SUBTEST( run_and_compare_to_cuda(replicate<Array33f>(), nthreads, in, out) );
+ CALL_SUBTEST( run_and_compare_to_gpu(coeff_wise<Vector3f>(), nthreads, in, out) );
+ CALL_SUBTEST( run_and_compare_to_gpu(coeff_wise<Array44f>(), nthreads, in, out) );
+
+#if !defined(EIGEN_USE_HIP)
+ // FIXME
+ // These subtests result in a compile failure on the HIP platform
+ //
+ // eigen-upstream/Eigen/src/Core/Replicate.h:61:65: error:
+ // base class 'internal::dense_xpr_base<Replicate<Array<float, 4, 1, 0, 4, 1>, -1, -1> >::type'
+ // (aka 'ArrayBase<Eigen::Replicate<Eigen::Array<float, 4, 1, 0, 4, 1>, -1, -1> >') has protected default constructor
+ CALL_SUBTEST( run_and_compare_to_gpu(replicate<Array4f>(), nthreads, in, out) );
+ CALL_SUBTEST( run_and_compare_to_gpu(replicate<Array33f>(), nthreads, in, out) );
+#endif
- CALL_SUBTEST( run_and_compare_to_cuda(redux<Array4f>(), nthreads, in, out) );
- CALL_SUBTEST( run_and_compare_to_cuda(redux<Matrix3f>(), nthreads, in, out) );
+ CALL_SUBTEST( run_and_compare_to_gpu(redux<Array4f>(), nthreads, in, out) );
+ CALL_SUBTEST( run_and_compare_to_gpu(redux<Matrix3f>(), nthreads, in, out) );
- CALL_SUBTEST( run_and_compare_to_cuda(prod_test<Matrix3f,Matrix3f>(), nthreads, in, out) );
- CALL_SUBTEST( run_and_compare_to_cuda(prod_test<Matrix4f,Vector4f>(), nthreads, in, out) );
+ CALL_SUBTEST( run_and_compare_to_gpu(prod_test<Matrix3f,Matrix3f>(), nthreads, in, out) );
+ CALL_SUBTEST( run_and_compare_to_gpu(prod_test<Matrix4f,Vector4f>(), nthreads, in, out) );
- CALL_SUBTEST( run_and_compare_to_cuda(diagonal<Matrix3f,Vector3f>(), nthreads, in, out) );
- CALL_SUBTEST( run_and_compare_to_cuda(diagonal<Matrix4f,Vector4f>(), nthreads, in, out) );
+ CALL_SUBTEST( run_and_compare_to_gpu(diagonal<Matrix3f,Vector3f>(), nthreads, in, out) );
+ CALL_SUBTEST( run_and_compare_to_gpu(diagonal<Matrix4f,Vector4f>(), nthreads, in, out) );
+
+ CALL_SUBTEST( run_and_compare_to_gpu(matrix_inverse<Matrix2f>(), nthreads, in, out) );
+ CALL_SUBTEST( run_and_compare_to_gpu(matrix_inverse<Matrix3f>(), nthreads, in, out) );
+ CALL_SUBTEST( run_and_compare_to_gpu(matrix_inverse<Matrix4f>(), nthreads, in, out) );
- CALL_SUBTEST( run_and_compare_to_cuda(eigenvalues<Matrix3f>(), nthreads, in, out) );
- CALL_SUBTEST( run_and_compare_to_cuda(eigenvalues<Matrix2f>(), nthreads, in, out) );
+ CALL_SUBTEST( run_and_compare_to_gpu(eigenvalues_direct<Matrix3f>(), nthreads, in, out) );
+ CALL_SUBTEST( run_and_compare_to_gpu(eigenvalues_direct<Matrix2f>(), nthreads, in, out) );
+#if defined(__NVCC__)
+ // FIXME
+ // These subtests compiles only with nvcc and fail with HIPCC and clang-cuda
+ CALL_SUBTEST( run_and_compare_to_gpu(eigenvalues<Matrix4f>(), nthreads, in, out) );
+ typedef Matrix<float,6,6> Matrix6f;
+ CALL_SUBTEST( run_and_compare_to_gpu(eigenvalues<Matrix6f>(), nthreads, in, out) );
+#endif
}
diff --git a/test/gpu_common.h b/test/gpu_common.h
new file mode 100644
index 000000000..79d4ea694
--- /dev/null
+++ b/test/gpu_common.h
@@ -0,0 +1,157 @@
+
+#ifndef EIGEN_TEST_GPU_COMMON_H
+#define EIGEN_TEST_GPU_COMMON_H
+
+#ifdef EIGEN_USE_HIP
+ #include <hip/hip_runtime.h>
+ #include <hip/hip_runtime_api.h>
+#else
+ #include <cuda.h>
+ #include <cuda_runtime.h>
+ #include <cuda_runtime_api.h>
+#endif
+
+#include <iostream>
+
+#define EIGEN_USE_GPU
+#include <unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h>
+
+#if !defined(__CUDACC__) && !defined(__HIPCC__)
+dim3 threadIdx, blockDim, blockIdx;
+#endif
+
+template<typename Kernel, typename Input, typename Output>
+void run_on_cpu(const Kernel& ker, int n, const Input& in, Output& out)
+{
+ for(int i=0; i<n; i++)
+ ker(i, in.data(), out.data());
+}
+
+
+template<typename Kernel, typename Input, typename Output>
+__global__
+void run_on_gpu_meta_kernel(const Kernel ker, int n, const Input* in, Output* out)
+{
+ int i = threadIdx.x + blockIdx.x*blockDim.x;
+ if(i<n) {
+ ker(i, in, out);
+ }
+}
+
+
+template<typename Kernel, typename Input, typename Output>
+void run_on_gpu(const Kernel& ker, int n, const Input& in, Output& out)
+{
+ typename Input::Scalar* d_in;
+ typename Output::Scalar* d_out;
+ std::ptrdiff_t in_bytes = in.size() * sizeof(typename Input::Scalar);
+ std::ptrdiff_t out_bytes = out.size() * sizeof(typename Output::Scalar);
+
+ gpuMalloc((void**)(&d_in), in_bytes);
+ gpuMalloc((void**)(&d_out), out_bytes);
+
+ gpuMemcpy(d_in, in.data(), in_bytes, gpuMemcpyHostToDevice);
+ gpuMemcpy(d_out, out.data(), out_bytes, gpuMemcpyHostToDevice);
+
+ // Simple and non-optimal 1D mapping assuming n is not too large
+ // That's only for unit testing!
+ dim3 Blocks(128);
+ dim3 Grids( (n+int(Blocks.x)-1)/int(Blocks.x) );
+
+ gpuDeviceSynchronize();
+
+#ifdef EIGEN_USE_HIP
+ hipLaunchKernelGGL(HIP_KERNEL_NAME(run_on_gpu_meta_kernel<Kernel,
+ typename std::decay<decltype(*d_in)>::type,
+ typename std::decay<decltype(*d_out)>::type>),
+ dim3(Grids), dim3(Blocks), 0, 0, ker, n, d_in, d_out);
+#else
+ run_on_gpu_meta_kernel<<<Grids,Blocks>>>(ker, n, d_in, d_out);
+#endif
+
+ gpuDeviceSynchronize();
+
+ // check inputs have not been modified
+ gpuMemcpy(const_cast<typename Input::Scalar*>(in.data()), d_in, in_bytes, gpuMemcpyDeviceToHost);
+ gpuMemcpy(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost);
+
+ gpuFree(d_in);
+ gpuFree(d_out);
+}
+
+
+template<typename Kernel, typename Input, typename Output>
+void run_and_compare_to_gpu(const Kernel& ker, int n, const Input& in, Output& out)
+{
+ Input in_ref, in_gpu;
+ Output out_ref, out_gpu;
+ #if !defined(__CUDA_ARCH__) && !defined(__HIP_DEVICE_COMPILE__)
+ in_ref = in_gpu = in;
+ out_ref = out_gpu = out;
+ #else
+ EIGEN_UNUSED_VARIABLE(in);
+ EIGEN_UNUSED_VARIABLE(out);
+ #endif
+ run_on_cpu (ker, n, in_ref, out_ref);
+ run_on_gpu(ker, n, in_gpu, out_gpu);
+ #if !defined(__CUDA_ARCH__) && !defined(__HIP_DEVICE_COMPILE__)
+ VERIFY_IS_APPROX(in_ref, in_gpu);
+ VERIFY_IS_APPROX(out_ref, out_gpu);
+ #endif
+}
+
+struct compile_time_device_info {
+ EIGEN_DEVICE_FUNC
+ void operator()(int /*i*/, const int* /*in*/, int* info) const
+ {
+ #if defined(__CUDA_ARCH__)
+ info[0] = int(__CUDA_ARCH__ +0);
+ #endif
+ #if defined(EIGEN_HIP_DEVICE_COMPILE)
+ info[1] = int(EIGEN_HIP_DEVICE_COMPILE +0);
+ #endif
+ }
+};
+
+void ei_test_init_gpu()
+{
+ int device = 0;
+ gpuDeviceProp_t deviceProp;
+ gpuGetDeviceProperties(&deviceProp, device);
+
+ ArrayXi dummy(1), info(10);
+ info = -1;
+ run_on_gpu(compile_time_device_info(),10,dummy,info);
+
+
+ std::cout << "GPU compile-time info:\n";
+
+ #ifdef EIGEN_CUDACC
+ std::cout << " EIGEN_CUDACC: " << int(EIGEN_CUDACC) << "\n";
+ #endif
+
+ #ifdef EIGEN_CUDACC_VER
+ std::cout << " EIGEN_CUDACC_VER: " << int(EIGEN_CUDACC_VER) << "\n";
+ #endif
+
+ #ifdef EIGEN_HIPCC
+ std::cout << " EIGEN_HIPCC: " << int(EIGEN_HIPCC) << "\n";
+ #endif
+
+ std::cout << " EIGEN_CUDA_ARCH: " << info[0] << "\n";
+ std::cout << " EIGEN_HIP_DEVICE_COMPILE: " << info[1] << "\n";
+
+ std::cout << "GPU device info:\n";
+ std::cout << " name: " << deviceProp.name << "\n";
+ std::cout << " capability: " << deviceProp.major << "." << deviceProp.minor << "\n";
+ std::cout << " multiProcessorCount: " << deviceProp.multiProcessorCount << "\n";
+ std::cout << " maxThreadsPerMultiProcessor: " << deviceProp.maxThreadsPerMultiProcessor << "\n";
+ std::cout << " warpSize: " << deviceProp.warpSize << "\n";
+ std::cout << " regsPerBlock: " << deviceProp.regsPerBlock << "\n";
+ std::cout << " concurrentKernels: " << deviceProp.concurrentKernels << "\n";
+ std::cout << " clockRate: " << deviceProp.clockRate << "\n";
+ std::cout << " canMapHostMemory: " << deviceProp.canMapHostMemory << "\n";
+ std::cout << " computeMode: " << deviceProp.computeMode << "\n";
+}
+
+#endif // EIGEN_TEST_GPU_COMMON_H
diff --git a/test/half_float.cpp b/test/half_float.cpp
index 6f3196852..2a7f9b497 100644
--- a/test/half_float.cpp
+++ b/test/half_float.cpp
@@ -9,7 +9,7 @@
#include "main.h"
-#include <Eigen/src/Core/arch/CUDA/Half.h>
+#include <Eigen/src/Core/arch/GPU/Half.h>
// Make sure it's possible to forward declare Eigen::half
namespace Eigen {
@@ -20,7 +20,7 @@ using Eigen::half;
void test_conversion()
{
- using Eigen::half_impl::__half;
+ using Eigen::half_impl::__half_raw;
// Conversion from float.
VERIFY_IS_EQUAL(half(1.0f).x, 0x3c00);
@@ -37,9 +37,9 @@ void test_conversion()
VERIFY_IS_EQUAL(half(1.19209e-07f).x, 0x0002);
// Verify round-to-nearest-even behavior.
- float val1 = float(half(__half(0x3c00)));
- float val2 = float(half(__half(0x3c01)));
- float val3 = float(half(__half(0x3c02)));
+ float val1 = float(half(__half_raw(0x3c00)));
+ float val2 = float(half(__half_raw(0x3c01)));
+ float val3 = float(half(__half_raw(0x3c02)));
VERIFY_IS_EQUAL(half(0.5f * (val1 + val2)).x, 0x3c00);
VERIFY_IS_EQUAL(half(0.5f * (val2 + val3)).x, 0x3c02);
@@ -55,21 +55,21 @@ void test_conversion()
VERIFY_IS_EQUAL(half(true).x, 0x3c00);
// Conversion to float.
- VERIFY_IS_EQUAL(float(half(__half(0x0000))), 0.0f);
- VERIFY_IS_EQUAL(float(half(__half(0x3c00))), 1.0f);
+ VERIFY_IS_EQUAL(float(half(__half_raw(0x0000))), 0.0f);
+ VERIFY_IS_EQUAL(float(half(__half_raw(0x3c00))), 1.0f);
// Denormals.
- VERIFY_IS_APPROX(float(half(__half(0x8001))), -5.96046e-08f);
- VERIFY_IS_APPROX(float(half(__half(0x0001))), 5.96046e-08f);
- VERIFY_IS_APPROX(float(half(__half(0x0002))), 1.19209e-07f);
+ VERIFY_IS_APPROX(float(half(__half_raw(0x8001))), -5.96046e-08f);
+ VERIFY_IS_APPROX(float(half(__half_raw(0x0001))), 5.96046e-08f);
+ VERIFY_IS_APPROX(float(half(__half_raw(0x0002))), 1.19209e-07f);
// NaNs and infinities.
VERIFY(!(numext::isinf)(float(half(65504.0f)))); // Largest finite number.
VERIFY(!(numext::isnan)(float(half(0.0f))));
- VERIFY((numext::isinf)(float(half(__half(0xfc00)))));
- VERIFY((numext::isnan)(float(half(__half(0xfc01)))));
- VERIFY((numext::isinf)(float(half(__half(0x7c00)))));
- VERIFY((numext::isnan)(float(half(__half(0x7c01)))));
+ VERIFY((numext::isinf)(float(half(__half_raw(0xfc00)))));
+ VERIFY((numext::isnan)(float(half(__half_raw(0xfc01)))));
+ VERIFY((numext::isinf)(float(half(__half_raw(0x7c00)))));
+ VERIFY((numext::isnan)(float(half(__half_raw(0x7c01)))));
#if !EIGEN_COMP_MSVC
// Visual Studio errors out on divisions by 0
@@ -79,12 +79,12 @@ void test_conversion()
#endif
// Exactly same checks as above, just directly on the half representation.
- VERIFY(!(numext::isinf)(half(__half(0x7bff))));
- VERIFY(!(numext::isnan)(half(__half(0x0000))));
- VERIFY((numext::isinf)(half(__half(0xfc00))));
- VERIFY((numext::isnan)(half(__half(0xfc01))));
- VERIFY((numext::isinf)(half(__half(0x7c00))));
- VERIFY((numext::isnan)(half(__half(0x7c01))));
+ VERIFY(!(numext::isinf)(half(__half_raw(0x7bff))));
+ VERIFY(!(numext::isnan)(half(__half_raw(0x0000))));
+ VERIFY((numext::isinf)(half(__half_raw(0xfc00))));
+ VERIFY((numext::isnan)(half(__half_raw(0xfc01))));
+ VERIFY((numext::isinf)(half(__half_raw(0x7c00))));
+ VERIFY((numext::isnan)(half(__half_raw(0x7c01))));
#if !EIGEN_COMP_MSVC
// Visual Studio errors out on divisions by 0
@@ -96,12 +96,24 @@ void test_conversion()
void test_numtraits()
{
- std::cout << "epsilon = " << NumTraits<half>::epsilon() << std::endl;
- std::cout << "highest = " << NumTraits<half>::highest() << std::endl;
- std::cout << "lowest = " << NumTraits<half>::lowest() << std::endl;
- std::cout << "inifinty = " << NumTraits<half>::infinity() << std::endl;
- std::cout << "nan = " << NumTraits<half>::quiet_NaN() << std::endl;
-
+ std::cout << "epsilon = " << NumTraits<half>::epsilon() << " (0x" << std::hex << NumTraits<half>::epsilon().x << ")" << std::endl;
+ std::cout << "highest = " << NumTraits<half>::highest() << " (0x" << std::hex << NumTraits<half>::highest().x << ")" << std::endl;
+ std::cout << "lowest = " << NumTraits<half>::lowest() << " (0x" << std::hex << NumTraits<half>::lowest().x << ")" << std::endl;
+ std::cout << "min = " << (std::numeric_limits<half>::min)() << " (0x" << std::hex << half((std::numeric_limits<half>::min)()).x << ")" << std::endl;
+ std::cout << "denorm min = " << (std::numeric_limits<half>::denorm_min)() << " (0x" << std::hex << half((std::numeric_limits<half>::denorm_min)()).x << ")" << std::endl;
+ std::cout << "infinity = " << NumTraits<half>::infinity() << " (0x" << std::hex << NumTraits<half>::infinity().x << ")" << std::endl;
+ std::cout << "quiet nan = " << NumTraits<half>::quiet_NaN() << " (0x" << std::hex << NumTraits<half>::quiet_NaN().x << ")" << std::endl;
+ std::cout << "signaling nan = " << std::numeric_limits<half>::signaling_NaN() << " (0x" << std::hex << std::numeric_limits<half>::signaling_NaN().x << ")" << std::endl;
+
+ VERIFY(NumTraits<half>::IsSigned);
+
+ VERIFY_IS_EQUAL( std::numeric_limits<half>::infinity().x, half(std::numeric_limits<float>::infinity()).x );
+ VERIFY_IS_EQUAL( std::numeric_limits<half>::quiet_NaN().x, half(std::numeric_limits<float>::quiet_NaN()).x );
+ VERIFY_IS_EQUAL( std::numeric_limits<half>::signaling_NaN().x, half(std::numeric_limits<float>::signaling_NaN()).x );
+ VERIFY( (std::numeric_limits<half>::min)() > half(0.f) );
+ VERIFY( (std::numeric_limits<half>::denorm_min)() > half(0.f) );
+ VERIFY( (std::numeric_limits<half>::min)()/half(2) > half(0.f) );
+ VERIFY_IS_EQUAL( (std::numeric_limits<half>::denorm_min)()/half(2), half(0.f) );
}
void test_arithmetic()
@@ -245,13 +257,31 @@ void test_array()
ss << a1;
}
-void test_half_float()
+void test_product()
+{
+ typedef Matrix<half,Dynamic,Dynamic> MatrixXh;
+ Index rows = internal::random<Index>(1,EIGEN_TEST_MAX_SIZE);
+ Index cols = internal::random<Index>(1,EIGEN_TEST_MAX_SIZE);
+ Index depth = internal::random<Index>(1,EIGEN_TEST_MAX_SIZE);
+ MatrixXh Ah = MatrixXh::Random(rows,depth);
+ MatrixXh Bh = MatrixXh::Random(depth,cols);
+ MatrixXh Ch = MatrixXh::Random(rows,cols);
+ MatrixXf Af = Ah.cast<float>();
+ MatrixXf Bf = Bh.cast<float>();
+ MatrixXf Cf = Ch.cast<float>();
+ VERIFY_IS_APPROX(Ch.noalias()+=Ah*Bh, (Cf.noalias()+=Af*Bf).cast<half>());
+}
+
+EIGEN_DECLARE_TEST(half_float)
{
- CALL_SUBTEST(test_conversion());
CALL_SUBTEST(test_numtraits());
- CALL_SUBTEST(test_arithmetic());
- CALL_SUBTEST(test_comparison());
- CALL_SUBTEST(test_basic_functions());
- CALL_SUBTEST(test_trigonometric_functions());
- CALL_SUBTEST(test_array());
+ for(int i = 0; i < g_repeat; i++) {
+ CALL_SUBTEST(test_conversion());
+ CALL_SUBTEST(test_arithmetic());
+ CALL_SUBTEST(test_comparison());
+ CALL_SUBTEST(test_basic_functions());
+ CALL_SUBTEST(test_trigonometric_functions());
+ CALL_SUBTEST(test_array());
+ CALL_SUBTEST(test_product());
+ }
}
diff --git a/test/hessenberg.cpp b/test/hessenberg.cpp
index 96bc19e2e..0e1b0098d 100644
--- a/test/hessenberg.cpp
+++ b/test/hessenberg.cpp
@@ -49,7 +49,7 @@ template<typename Scalar,int Size> void hessenberg(int size = Size)
// TODO: Add tests for packedMatrix() and householderCoefficients()
}
-void test_hessenberg()
+EIGEN_DECLARE_TEST(hessenberg)
{
CALL_SUBTEST_1(( hessenberg<std::complex<double>,1>() ));
CALL_SUBTEST_2(( hessenberg<std::complex<double>,2>() ));
diff --git a/test/householder.cpp b/test/householder.cpp
index c5f6b5e4f..cad8138a2 100644
--- a/test/householder.cpp
+++ b/test/householder.cpp
@@ -12,7 +12,6 @@
template<typename MatrixType> void householder(const MatrixType& m)
{
- typedef typename MatrixType::Index Index;
static bool even = true;
even = !even;
/* this test covers the following files:
@@ -49,6 +48,17 @@ template<typename MatrixType> void householder(const MatrixType& m)
v1.applyHouseholderOnTheLeft(essential,beta,tmp);
VERIFY_IS_APPROX(v1.norm(), v2.norm());
+ // reconstruct householder matrix:
+ SquareMatrixType id, H1, H2;
+ id.setIdentity(rows, rows);
+ H1 = H2 = id;
+ VectorType vv(rows);
+ vv << Scalar(1), essential;
+ H1.applyHouseholderOnTheLeft(essential, beta, tmp);
+ H2.applyHouseholderOnTheRight(essential, beta, tmp);
+ VERIFY_IS_APPROX(H1, H2);
+ VERIFY_IS_APPROX(H1, id - beta * vv*vv.adjoint());
+
MatrixType m1(rows, cols),
m2(rows, cols);
@@ -69,7 +79,7 @@ template<typename MatrixType> void householder(const MatrixType& m)
m3.rowwise() = v1.transpose();
m4 = m3;
m3.row(0).makeHouseholder(essential, beta, alpha);
- m3.applyHouseholderOnTheRight(essential,beta,tmp);
+ m3.applyHouseholderOnTheRight(essential.conjugate(),beta,tmp);
VERIFY_IS_APPROX(m3.norm(), m4.norm());
if(rows>=2) VERIFY_IS_MUCH_SMALLER_THAN(m3.block(0,1,rows,rows-1).norm(), m3.norm());
VERIFY_IS_MUCH_SMALLER_THAN(numext::imag(m3(0,0)), numext::real(m3(0,0)));
@@ -104,14 +114,14 @@ template<typename MatrixType> void householder(const MatrixType& m)
VERIFY_IS_APPROX(hseq_mat.adjoint(), hseq_mat_adj);
VERIFY_IS_APPROX(hseq_mat.conjugate(), hseq_mat_conj);
VERIFY_IS_APPROX(hseq_mat.transpose(), hseq_mat_trans);
- VERIFY_IS_APPROX(hseq_mat * m6, hseq_mat * m6);
- VERIFY_IS_APPROX(hseq_mat.adjoint() * m6, hseq_mat_adj * m6);
- VERIFY_IS_APPROX(hseq_mat.conjugate() * m6, hseq_mat_conj * m6);
- VERIFY_IS_APPROX(hseq_mat.transpose() * m6, hseq_mat_trans * m6);
- VERIFY_IS_APPROX(m6 * hseq_mat, m6 * hseq_mat);
- VERIFY_IS_APPROX(m6 * hseq_mat.adjoint(), m6 * hseq_mat_adj);
- VERIFY_IS_APPROX(m6 * hseq_mat.conjugate(), m6 * hseq_mat_conj);
- VERIFY_IS_APPROX(m6 * hseq_mat.transpose(), m6 * hseq_mat_trans);
+ VERIFY_IS_APPROX(hseq * m6, hseq_mat * m6);
+ VERIFY_IS_APPROX(hseq.adjoint() * m6, hseq_mat_adj * m6);
+ VERIFY_IS_APPROX(hseq.conjugate() * m6, hseq_mat_conj * m6);
+ VERIFY_IS_APPROX(hseq.transpose() * m6, hseq_mat_trans * m6);
+ VERIFY_IS_APPROX(m6 * hseq, m6 * hseq_mat);
+ VERIFY_IS_APPROX(m6 * hseq.adjoint(), m6 * hseq_mat_adj);
+ VERIFY_IS_APPROX(m6 * hseq.conjugate(), m6 * hseq_mat_conj);
+ VERIFY_IS_APPROX(m6 * hseq.transpose(), m6 * hseq_mat_trans);
// test householder sequence on the right with a shift
@@ -123,7 +133,7 @@ template<typename MatrixType> void householder(const MatrixType& m)
VERIFY_IS_APPROX(m3 * m5, m1); // test evaluating rhseq to a dense matrix, then applying
}
-void test_householder()
+EIGEN_DECLARE_TEST(householder)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( householder(Matrix<double,2,2>()) );
diff --git a/test/incomplete_cholesky.cpp b/test/incomplete_cholesky.cpp
index 59ffe9259..68fe7d507 100644
--- a/test/incomplete_cholesky.cpp
+++ b/test/incomplete_cholesky.cpp
@@ -29,14 +29,10 @@ template<typename T, typename I> void test_incomplete_cholesky_T()
CALL_SUBTEST( check_sparse_spd_solving(cg_illt_uplo_amd) );
}
-void test_incomplete_cholesky()
+template<int>
+void bug1150()
{
- CALL_SUBTEST_1(( test_incomplete_cholesky_T<double,int>() ));
- CALL_SUBTEST_2(( test_incomplete_cholesky_T<std::complex<double>, int>() ));
- CALL_SUBTEST_3(( test_incomplete_cholesky_T<double,long int>() ));
-
-#ifdef EIGEN_TEST_PART_1
- // regression for bug 1150
+ // regression for bug 1150
for(int N = 1; N<20; ++N)
{
Eigen::MatrixXd b( N, N );
@@ -61,5 +57,13 @@ void test_incomplete_cholesky()
VERIFY(solver.preconditioner().info() == Eigen::Success);
VERIFY(solver.info() == Eigen::Success);
}
-#endif
+}
+
+EIGEN_DECLARE_TEST(incomplete_cholesky)
+{
+ CALL_SUBTEST_1(( test_incomplete_cholesky_T<double,int>() ));
+ CALL_SUBTEST_2(( test_incomplete_cholesky_T<std::complex<double>, int>() ));
+ CALL_SUBTEST_3(( test_incomplete_cholesky_T<double,long int>() ));
+
+ CALL_SUBTEST_1(( bug1150<0>() ));
}
diff --git a/test/indexed_view.cpp b/test/indexed_view.cpp
index 7245cf378..9a284cf0a 100644
--- a/test/indexed_view.cpp
+++ b/test/indexed_view.cpp
@@ -77,12 +77,11 @@ is_same_seq_type(const T1& a, const T2& b)
#define VERIFY_EQ_INT(A,B) VERIFY_IS_APPROX(int(A),int(B))
+// C++03 does not allow local or unnamed enums as index
+enum DummyEnum { XX=0, YY=1 };
+
void check_indexed_view()
{
- using Eigen::placeholders::all;
- using Eigen::placeholders::last;
- using Eigen::placeholders::end;
-
Index n = 10;
ArrayXd a = ArrayXd::LinSpaced(n,0,n-1);
@@ -140,7 +139,7 @@ void check_indexed_view()
"500 501 502 503 504 505 506 507 508 509")
);
- // takes the row numer 3, and repeat it 5 times
+ // take row number 3, and repeat it 5 times
VERIFY( MATCH( A(seqN(3,5,0), all),
"300 301 302 303 304 305 306 307 308 309\n"
"300 301 302 303 304 305 306 307 308 309\n"
@@ -236,7 +235,7 @@ void check_indexed_view()
VERIFY_IS_APPROX( A(seq(n-1,2,-2), seqN(n-1-6,4)), A(seq(last,2,-2), seqN(last-6,4)) );
VERIFY_IS_APPROX( A(seq(n-1-6,n-1-2), seqN(n-1-6,4)), A(seq(last-6,last-2), seqN(6+last-6-6,4)) );
VERIFY_IS_APPROX( A(seq((n-1)/2,(n)/2+3), seqN(2,4)), A(seq(last/2,(last+1)/2+3), seqN(last+2-last,4)) );
- VERIFY_IS_APPROX( A(seq(n-2,2,-2), seqN(n-8,4)), A(seq(end-2,2,-2), seqN(end-8,4)) );
+ VERIFY_IS_APPROX( A(seq(n-2,2,-2), seqN(n-8,4)), A(seq(lastp1-2,2,-2), seqN(lastp1-8,4)) );
// Check all combinations of seq:
VERIFY_IS_APPROX( A(seq(1,n-1-2,2), seq(1,n-1-2,2)), A(seq(1,last-2,2), seq(1,last-2,fix<2>)) );
@@ -246,7 +245,7 @@ void check_indexed_view()
VERIFY_IS_APPROX( A(seq(n-1-5,n-1-2), seq(n-1-5,n-1-2)), A(seq(last-5,last-2), seq(last-5,last-2)) );
VERIFY_IS_APPROX( A.col(A.cols()-1), A(all,last) );
- VERIFY_IS_APPROX( A(A.rows()-2, A.cols()/2), A(last-1, end/2) );
+ VERIFY_IS_APPROX( A(A.rows()-2, A.cols()/2), A(last-1, lastp1/2) );
VERIFY_IS_APPROX( a(a.size()-2), a(last-1) );
VERIFY_IS_APPROX( a(a.size()/2), a((last+1)/2) );
@@ -269,7 +268,7 @@ void check_indexed_view()
VERIFY( is_same_eq(a.head(4), a(seq(0,3))) );
VERIFY( is_same_eq(a.tail(4), a(seqN(last-3,4))) );
- VERIFY( is_same_eq(a.tail(4), a(seq(end-4,last))) );
+ VERIFY( is_same_eq(a.tail(4), a(seq(lastp1-4,last))) );
VERIFY( is_same_eq(a.segment<4>(3), a(seqN(3,fix<4>))) );
}
@@ -293,6 +292,14 @@ void check_indexed_view()
}
#if EIGEN_HAS_CXX11
+ // check lastN
+ VERIFY_IS_APPROX( a(lastN(3)), a.tail(3) );
+ VERIFY( MATCH( a(lastN(3)), "7\n8\n9" ) );
+ VERIFY_IS_APPROX( a(lastN(fix<3>())), a.tail<3>() );
+ VERIFY( MATCH( a(lastN(3,2)), "5\n7\n9" ) );
+ VERIFY( MATCH( a(lastN(3,fix<2>())), "5\n7\n9" ) );
+ VERIFY( a(lastN(fix<3>())).SizeAtCompileTime == 3 );
+
VERIFY( (A(all, std::array<int,4>{{1,3,2,4}})).ColsAtCompileTime == 4);
VERIFY_IS_APPROX( (A(std::array<int,3>{{1,3,5}}, std::array<int,4>{{9,6,3,0}})), A(seqN(1,3,2), seqN(9,4,-3)) );
@@ -366,13 +373,51 @@ void check_indexed_view()
VERIFY( is_same_eq( cA.middleRows<3>(1), cA.middleRows(1,fix<3>)) );
}
+ // Check compilation of enums as index type:
+ a(XX) = 1;
+ A(XX,YY) = 1;
+ // Anonymous enums only work with C++11
+#if EIGEN_HAS_CXX11
+ enum { X=0, Y=1 };
+ a(X) = 1;
+ A(X,Y) = 1;
+ A(XX,Y) = 1;
+ A(X,YY) = 1;
+#endif
+
+ // Check compilation of varying integer types as index types:
+ Index i = n/2;
+ short i_short(i);
+ std::size_t i_sizet(i);
+ VERIFY_IS_EQUAL( a(i), a.coeff(i_short) );
+ VERIFY_IS_EQUAL( a(i), a.coeff(i_sizet) );
+
+ VERIFY_IS_EQUAL( A(i,i), A.coeff(i_short, i_short) );
+ VERIFY_IS_EQUAL( A(i,i), A.coeff(i_short, i) );
+ VERIFY_IS_EQUAL( A(i,i), A.coeff(i, i_short) );
+ VERIFY_IS_EQUAL( A(i,i), A.coeff(i, i_sizet) );
+ VERIFY_IS_EQUAL( A(i,i), A.coeff(i_sizet, i) );
+ VERIFY_IS_EQUAL( A(i,i), A.coeff(i_sizet, i_short) );
+ VERIFY_IS_EQUAL( A(i,i), A.coeff(5, i_sizet) );
+
}
-void test_indexed_view()
+EIGEN_DECLARE_TEST(indexed_view)
{
// for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( check_indexed_view() );
CALL_SUBTEST_2( check_indexed_view() );
CALL_SUBTEST_3( check_indexed_view() );
// }
+
+ // static checks of some internals:
+ STATIC_CHECK(( internal::is_valid_index_type<int>::value ));
+ STATIC_CHECK(( internal::is_valid_index_type<unsigned int>::value ));
+ STATIC_CHECK(( internal::is_valid_index_type<short>::value ));
+ STATIC_CHECK(( internal::is_valid_index_type<std::ptrdiff_t>::value ));
+ STATIC_CHECK(( internal::is_valid_index_type<std::size_t>::value ));
+ STATIC_CHECK(( !internal::valid_indexed_view_overload<int,int>::value ));
+ STATIC_CHECK(( !internal::valid_indexed_view_overload<int,std::ptrdiff_t>::value ));
+ STATIC_CHECK(( !internal::valid_indexed_view_overload<std::ptrdiff_t,int>::value ));
+ STATIC_CHECK(( !internal::valid_indexed_view_overload<std::size_t,int>::value ));
}
diff --git a/test/inplace_decomposition.cpp b/test/inplace_decomposition.cpp
index 92d0d91b6..e3aa9957d 100644
--- a/test/inplace_decomposition.cpp
+++ b/test/inplace_decomposition.cpp
@@ -79,7 +79,7 @@ template<typename DecType,typename MatrixType> void inplace(bool square = false,
}
-void test_inplace_decomposition()
+EIGEN_DECLARE_TEST(inplace_decomposition)
{
EIGEN_UNUSED typedef Matrix<double,4,3> Matrix43d;
for(int i = 0; i < g_repeat; i++) {
diff --git a/test/integer_types.cpp b/test/integer_types.cpp
index a21f73a81..3f9030d77 100644
--- a/test/integer_types.cpp
+++ b/test/integer_types.cpp
@@ -18,7 +18,6 @@
template<typename MatrixType> void signed_integer_type_tests(const MatrixType& m)
{
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
enum { is_signed = (Scalar(-1) > Scalar(0)) ? 0 : 1 };
@@ -49,7 +48,6 @@ template<typename MatrixType> void signed_integer_type_tests(const MatrixType& m
template<typename MatrixType> void integer_type_tests(const MatrixType& m)
{
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
VERIFY(NumTraits<Scalar>::IsInteger);
@@ -133,7 +131,18 @@ template<typename MatrixType> void integer_type_tests(const MatrixType& m)
VERIFY_IS_APPROX((m1 * m2.transpose()) * m1, m1 * (m2.transpose() * m1));
}
-void test_integer_types()
+template<int>
+void integer_types_extra()
+{
+ VERIFY_IS_EQUAL(int(internal::scalar_div_cost<int>::value), 8);
+ VERIFY_IS_EQUAL(int(internal::scalar_div_cost<unsigned int>::value), 8);
+ if(sizeof(long)>sizeof(int)) {
+ VERIFY(int(internal::scalar_div_cost<long>::value) > int(internal::scalar_div_cost<int>::value));
+ VERIFY(int(internal::scalar_div_cost<unsigned long>::value) > int(internal::scalar_div_cost<int>::value));
+ }
+}
+
+EIGEN_DECLARE_TEST(integer_types)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( integer_type_tests(Matrix<unsigned int, 1, 1>()) );
@@ -158,12 +167,5 @@ void test_integer_types()
CALL_SUBTEST_8( integer_type_tests(Matrix<unsigned long long, Dynamic, 5>(1, 5)) );
}
-#ifdef EIGEN_TEST_PART_9
- VERIFY_IS_EQUAL(internal::scalar_div_cost<int>::value, 8);
- VERIFY_IS_EQUAL(internal::scalar_div_cost<unsigned int>::value, 8);
- if(sizeof(long)>sizeof(int)) {
- VERIFY(internal::scalar_div_cost<long>::value > internal::scalar_div_cost<int>::value);
- VERIFY(internal::scalar_div_cost<unsigned long>::value > internal::scalar_div_cost<int>::value);
- }
-#endif
+ CALL_SUBTEST_9( integer_types_extra<0>() );
}
diff --git a/test/inverse.cpp b/test/inverse.cpp
index 5c6777a18..8754cb7e5 100644
--- a/test/inverse.cpp
+++ b/test/inverse.cpp
@@ -11,43 +11,26 @@
#include "main.h"
#include <Eigen/LU>
-template<typename MatrixType> void inverse(const MatrixType& m)
+template<typename MatrixType>
+void inverse_for_fixed_size(const MatrixType&, typename internal::enable_if<MatrixType::SizeAtCompileTime==Dynamic>::type* = 0)
{
- using std::abs;
- typedef typename MatrixType::Index Index;
- /* this test covers the following files:
- Inverse.h
- */
- Index rows = m.rows();
- Index cols = m.cols();
-
- typedef typename MatrixType::Scalar Scalar;
-
- MatrixType m1(rows, cols),
- m2(rows, cols),
- identity = MatrixType::Identity(rows, rows);
- createRandomPIMatrixOfRank(rows,rows,rows,m1);
- m2 = m1.inverse();
- VERIFY_IS_APPROX(m1, m2.inverse() );
-
- VERIFY_IS_APPROX((Scalar(2)*m2).inverse(), m2.inverse()*Scalar(0.5));
-
- VERIFY_IS_APPROX(identity, m1.inverse() * m1 );
- VERIFY_IS_APPROX(identity, m1 * m1.inverse() );
+}
- VERIFY_IS_APPROX(m1, m1.inverse().inverse() );
+template<typename MatrixType>
+void inverse_for_fixed_size(const MatrixType& m1, typename internal::enable_if<MatrixType::SizeAtCompileTime!=Dynamic>::type* = 0)
+{
+ using std::abs;
- // since for the general case we implement separately row-major and col-major, test that
- VERIFY_IS_APPROX(MatrixType(m1.transpose().inverse()), MatrixType(m1.inverse().transpose()));
+ MatrixType m2, identity = MatrixType::Identity();
-#if !defined(EIGEN_TEST_PART_5) && !defined(EIGEN_TEST_PART_6)
+ typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef Matrix<Scalar, MatrixType::ColsAtCompileTime, 1> VectorType;
//computeInverseAndDetWithCheck tests
//First: an invertible matrix
bool invertible;
- RealScalar det;
+ Scalar det;
m2.setZero();
m1.computeInverseAndDetWithCheck(m2, det, invertible);
@@ -61,23 +44,52 @@ template<typename MatrixType> void inverse(const MatrixType& m)
VERIFY_IS_APPROX(identity, m1*m2);
//Second: a rank one matrix (not invertible, except for 1x1 matrices)
- VectorType v3 = VectorType::Random(rows);
- MatrixType m3 = v3*v3.transpose(), m4(rows,cols);
+ VectorType v3 = VectorType::Random();
+ MatrixType m3 = v3*v3.transpose(), m4;
m3.computeInverseAndDetWithCheck(m4, det, invertible);
- VERIFY( rows==1 ? invertible : !invertible );
+ VERIFY( m1.rows()==1 ? invertible : !invertible );
VERIFY_IS_MUCH_SMALLER_THAN(abs(det-m3.determinant()), RealScalar(1));
m3.computeInverseWithCheck(m4, invertible);
- VERIFY( rows==1 ? invertible : !invertible );
+ VERIFY( m1.rows()==1 ? invertible : !invertible );
// check with submatrices
{
Matrix<Scalar, MatrixType::RowsAtCompileTime+1, MatrixType::RowsAtCompileTime+1, MatrixType::Options> m5;
m5.setRandom();
- m5.topLeftCorner(rows,rows) = m1;
+ m5.topLeftCorner(m1.rows(),m1.rows()) = m1;
m2 = m5.template topLeftCorner<MatrixType::RowsAtCompileTime,MatrixType::ColsAtCompileTime>().inverse();
VERIFY_IS_APPROX( (m5.template topLeftCorner<MatrixType::RowsAtCompileTime,MatrixType::ColsAtCompileTime>()), m2.inverse() );
}
-#endif
+}
+
+template<typename MatrixType> void inverse(const MatrixType& m)
+{
+ /* this test covers the following files:
+ Inverse.h
+ */
+ Index rows = m.rows();
+ Index cols = m.cols();
+
+ typedef typename MatrixType::Scalar Scalar;
+
+ MatrixType m1(rows, cols),
+ m2(rows, cols),
+ identity = MatrixType::Identity(rows, rows);
+ createRandomPIMatrixOfRank(rows,rows,rows,m1);
+ m2 = m1.inverse();
+ VERIFY_IS_APPROX(m1, m2.inverse() );
+
+ VERIFY_IS_APPROX((Scalar(2)*m2).inverse(), m2.inverse()*Scalar(0.5));
+
+ VERIFY_IS_APPROX(identity, m1.inverse() * m1 );
+ VERIFY_IS_APPROX(identity, m1 * m1.inverse() );
+
+ VERIFY_IS_APPROX(m1, m1.inverse().inverse() );
+
+ // since for the general case we implement separately row-major and col-major, test that
+ VERIFY_IS_APPROX(MatrixType(m1.transpose().inverse()), MatrixType(m1.inverse().transpose()));
+
+ inverse_for_fixed_size(m1);
// check in-place inversion
if(MatrixType::RowsAtCompileTime>=2 && MatrixType::RowsAtCompileTime<=4)
@@ -93,7 +105,7 @@ template<typename MatrixType> void inverse(const MatrixType& m)
}
}
-void test_inverse()
+EIGEN_DECLARE_TEST(inverse)
{
int s = 0;
for(int i = 0; i < g_repeat; i++) {
@@ -113,5 +125,7 @@ void test_inverse()
CALL_SUBTEST_7( inverse(Matrix4d()) );
CALL_SUBTEST_7( inverse(Matrix<double,4,4,DontAlign>()) );
+
+ CALL_SUBTEST_8( inverse(Matrix4cd()) );
}
}
diff --git a/test/is_same_dense.cpp b/test/is_same_dense.cpp
index 2c7838ce9..23dd806eb 100644
--- a/test/is_same_dense.cpp
+++ b/test/is_same_dense.cpp
@@ -11,12 +11,16 @@
using internal::is_same_dense;
-void test_is_same_dense()
+EIGEN_DECLARE_TEST(is_same_dense)
{
typedef Matrix<double,Dynamic,Dynamic,ColMajor> ColMatrixXd;
+ typedef Matrix<std::complex<double>,Dynamic,Dynamic,ColMajor> ColMatrixXcd;
ColMatrixXd m1(10,10);
+ ColMatrixXcd m2(10,10);
Ref<ColMatrixXd> ref_m1(m1);
+ Ref<ColMatrixXd,0, Stride<Dynamic,Dynamic> > ref_m2_real(m2.real());
Ref<const ColMatrixXd> const_ref_m1(m1);
+
VERIFY(is_same_dense(m1,m1));
VERIFY(is_same_dense(m1,ref_m1));
VERIFY(is_same_dense(const_ref_m1,m1));
@@ -30,4 +34,8 @@ void test_is_same_dense()
Ref<const ColMatrixXd> const_ref_m1_col(m1.col(1));
VERIFY(is_same_dense(m1.col(1),const_ref_m1_col));
+
+
+ VERIFY(!is_same_dense(m1, ref_m2_real));
+ VERIFY(!is_same_dense(m2, ref_m2_real));
}
diff --git a/test/jacobi.cpp b/test/jacobi.cpp
index 7ccd4124b..5604797f5 100644
--- a/test/jacobi.cpp
+++ b/test/jacobi.cpp
@@ -14,7 +14,6 @@
template<typename MatrixType, typename JacobiScalar>
void jacobi(const MatrixType& m = MatrixType())
{
- typedef typename MatrixType::Index Index;
Index rows = m.rows();
Index cols = m.cols();
@@ -58,7 +57,7 @@ void jacobi(const MatrixType& m = MatrixType())
}
}
-void test_jacobi()
+EIGEN_DECLARE_TEST(jacobi)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1(( jacobi<Matrix3f, float>() ));
diff --git a/test/jacobisvd.cpp b/test/jacobisvd.cpp
index 7f5f71562..f9a59e0e7 100644
--- a/test/jacobisvd.cpp
+++ b/test/jacobisvd.cpp
@@ -36,7 +36,6 @@ void jacobisvd(const MatrixType& a = MatrixType(), bool pickrandom = true)
template<typename MatrixType> void jacobisvd_verify_assert(const MatrixType& m)
{
svd_verify_assert<JacobiSVD<MatrixType> >(m);
- typedef typename MatrixType::Index Index;
Index rows = m.rows();
Index cols = m.cols();
@@ -70,7 +69,7 @@ void jacobisvd_method()
VERIFY_IS_APPROX(m.jacobiSvd(ComputeFullU|ComputeFullV).solve(m), m);
}
-void test_jacobisvd()
+EIGEN_DECLARE_TEST(jacobisvd)
{
CALL_SUBTEST_3(( jacobisvd_verify_assert(Matrix3f()) ));
CALL_SUBTEST_4(( jacobisvd_verify_assert(Matrix4d()) ));
diff --git a/test/klu_support.cpp b/test/klu_support.cpp
new file mode 100644
index 000000000..f806ad50e
--- /dev/null
+++ b/test/klu_support.cpp
@@ -0,0 +1,32 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Gael Guennebaud <g.gael@free.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_NO_DEBUG_SMALL_PRODUCT_BLOCKS
+#include "sparse_solver.h"
+
+#include <Eigen/KLUSupport>
+
+template<typename T> void test_klu_support_T()
+{
+ KLU<SparseMatrix<T, ColMajor> > klu_colmajor;
+ KLU<SparseMatrix<T, RowMajor> > klu_rowmajor;
+
+ check_sparse_square_solving(klu_colmajor);
+ check_sparse_square_solving(klu_rowmajor);
+
+ //check_sparse_square_determinant(umfpack_colmajor);
+ //check_sparse_square_determinant(umfpack_rowmajor);
+}
+
+EIGEN_DECLARE_TEST(klu_support)
+{
+ CALL_SUBTEST_1(test_klu_support_T<double>());
+ CALL_SUBTEST_2(test_klu_support_T<std::complex<double> >());
+}
+
diff --git a/test/linearstructure.cpp b/test/linearstructure.cpp
index 17474af10..46ee5162b 100644
--- a/test/linearstructure.cpp
+++ b/test/linearstructure.cpp
@@ -19,7 +19,6 @@ template<typename MatrixType> void linearStructure(const MatrixType& m)
/* this test covers the following files:
CwiseUnaryOp.h, CwiseBinaryOp.h, SelfCwiseBinaryOp.h
*/
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
@@ -111,7 +110,20 @@ template<typename MatrixType> void real_complex(DenseIndex rows = MatrixType::Ro
VERIFY(g_called && "matrix<complex> - real not properly optimized");
}
-void test_linearstructure()
+template<int>
+void linearstructure_overflow()
+{
+ // make sure that /=scalar and /scalar do not overflow
+ // rational: 1.0/4.94e-320 overflow, but m/4.94e-320 should not
+ Matrix4d m2, m3;
+ m3 = m2 = Matrix4d::Random()*1e-20;
+ m2 = m2 / 4.9e-320;
+ VERIFY_IS_APPROX(m2.cwiseQuotient(m2), Matrix4d::Ones());
+ m3 /= 4.9e-320;
+ VERIFY_IS_APPROX(m3.cwiseQuotient(m3), Matrix4d::Ones());
+}
+
+EIGEN_DECLARE_TEST(linearstructure)
{
g_called = true;
VERIFY(g_called); // avoid `unneeded-internal-declaration` warning.
@@ -131,19 +143,5 @@ void test_linearstructure()
CALL_SUBTEST_11( real_complex<MatrixXcf>(10,10) );
CALL_SUBTEST_11( real_complex<ArrayXXcf>(10,10) );
}
-
-#ifdef EIGEN_TEST_PART_4
- {
- // make sure that /=scalar and /scalar do not overflow
- // rational: 1.0/4.94e-320 overflow, but m/4.94e-320 should not
- Matrix4d m2, m3;
- m3 = m2 = Matrix4d::Random()*1e-20;
- m2 = m2 / 4.9e-320;
- VERIFY_IS_APPROX(m2.cwiseQuotient(m2), Matrix4d::Ones());
- m3 /= 4.9e-320;
- VERIFY_IS_APPROX(m3.cwiseQuotient(m3), Matrix4d::Ones());
-
-
- }
-#endif
+ CALL_SUBTEST_4( linearstructure_overflow<0>() );
}
diff --git a/test/lscg.cpp b/test/lscg.cpp
index daa62a954..feb2347a8 100644
--- a/test/lscg.cpp
+++ b/test/lscg.cpp
@@ -14,15 +14,23 @@ template<typename T> void test_lscg_T()
{
LeastSquaresConjugateGradient<SparseMatrix<T> > lscg_colmajor_diag;
LeastSquaresConjugateGradient<SparseMatrix<T>, IdentityPreconditioner> lscg_colmajor_I;
+ LeastSquaresConjugateGradient<SparseMatrix<T,RowMajor> > lscg_rowmajor_diag;
+ LeastSquaresConjugateGradient<SparseMatrix<T,RowMajor>, IdentityPreconditioner> lscg_rowmajor_I;
CALL_SUBTEST( check_sparse_square_solving(lscg_colmajor_diag) );
CALL_SUBTEST( check_sparse_square_solving(lscg_colmajor_I) );
CALL_SUBTEST( check_sparse_leastsquare_solving(lscg_colmajor_diag) );
CALL_SUBTEST( check_sparse_leastsquare_solving(lscg_colmajor_I) );
+
+ CALL_SUBTEST( check_sparse_square_solving(lscg_rowmajor_diag) );
+ CALL_SUBTEST( check_sparse_square_solving(lscg_rowmajor_I) );
+
+ CALL_SUBTEST( check_sparse_leastsquare_solving(lscg_rowmajor_diag) );
+ CALL_SUBTEST( check_sparse_leastsquare_solving(lscg_rowmajor_I) );
}
-void test_lscg()
+EIGEN_DECLARE_TEST(lscg)
{
CALL_SUBTEST_1(test_lscg_T<double>());
CALL_SUBTEST_2(test_lscg_T<std::complex<double> >());
diff --git a/test/lu.cpp b/test/lu.cpp
index 9787f4d86..144496e91 100644
--- a/test/lu.cpp
+++ b/test/lu.cpp
@@ -18,7 +18,6 @@ typename MatrixType::RealScalar matrix_l1_norm(const MatrixType& m) {
template<typename MatrixType> void lu_non_invertible()
{
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::RealScalar RealScalar;
/* this test covers the following files:
LU.h
@@ -181,7 +180,6 @@ template<typename MatrixType> void lu_partial_piv()
/* this test covers the following files:
PartialPivLU.h
*/
- typedef typename MatrixType::Index Index;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
Index size = internal::random<Index>(1,4);
@@ -244,7 +242,7 @@ template<typename MatrixType> void lu_verify_assert()
VERIFY_RAISES_ASSERT(plu.inverse())
}
-void test_lu()
+EIGEN_DECLARE_TEST(lu)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( lu_non_invertible<Matrix3f>() );
diff --git a/test/main.h b/test/main.h
index 25d2dcf43..36784b1f4 100644
--- a/test/main.h
+++ b/test/main.h
@@ -50,15 +50,44 @@
#endif
#endif
+// Same for cuda_fp16.h
+#if defined(__CUDACC_VER_MAJOR__) && (__CUDACC_VER_MAJOR__ >= 9)
+#define EIGEN_TEST_CUDACC_VER ((__CUDACC_VER_MAJOR__ * 10000) + (__CUDACC_VER_MINOR__ * 100))
+#elif defined(__CUDACC_VER__)
+#define EIGEN_TEST_CUDACC_VER __CUDACC_VER__
+#else
+#define EIGEN_TEST_CUDACC_VER 0
+#endif
+
+#if EIGEN_TEST_CUDACC_VER >= 70500
+#include <cuda_fp16.h>
+#endif
+
// To test that all calls from Eigen code to std::min() and std::max() are
// protected by parenthesis against macro expansion, the min()/max() macros
// are defined here and any not-parenthesized min/max call will cause a
// compiler error.
-#define min(A,B) please_protect_your_min_with_parentheses
-#define max(A,B) please_protect_your_max_with_parentheses
-#define isnan(X) please_protect_your_isnan_with_parentheses
-#define isinf(X) please_protect_your_isinf_with_parentheses
-#define isfinite(X) please_protect_your_isfinite_with_parentheses
+#if !defined(__HIPCC__)
+ //
+ // HIP header files include the following files
+ // <thread>
+ // <regex>
+ // <unordered_map>
+ // which seem to contain not-parenthesized calls to "max"/"min", triggering the following check and causing the compile to fail
+ //
+ // Including those header files before the following macro definition for "min" / "max", only partially resolves the issue
+ // This is because other HIP header files also define "isnan" / "isinf" / "isfinite" functions, which are needed in other
+ // headers.
+ //
+ // So instead choosing to simply disable this check for HIP
+ //
+ #define min(A,B) please_protect_your_min_with_parentheses
+ #define max(A,B) please_protect_your_max_with_parentheses
+ #define isnan(X) please_protect_your_isnan_with_parentheses
+ #define isinf(X) please_protect_your_isinf_with_parentheses
+ #define isfinite(X) please_protect_your_isfinite_with_parentheses
+#endif
+
#ifdef M_PI
#undef M_PI
#endif
@@ -93,13 +122,12 @@ inline void on_temporary_creation(long int size) {
#define VERIFY_EVALUATION_COUNT(XPR,N) {\
nb_temporaries = 0; \
XPR; \
- if(nb_temporaries!=N) { std::cerr << "nb_temporaries == " << nb_temporaries << "\n"; }\
- VERIFY( (#XPR) && nb_temporaries==N ); \
+ if(nb_temporaries!=(N)) { std::cerr << "nb_temporaries == " << nb_temporaries << "\n"; }\
+ VERIFY( (#XPR) && nb_temporaries==(N) ); \
}
-
+
#endif
-// the following file is automatically generated by cmake
#include "split_test_helper.h"
#ifdef NDEBUG
@@ -116,10 +144,6 @@ inline void on_temporary_creation(long int size) {
#define EIGEN_MAKING_DOCS
#endif
-#ifndef EIGEN_TEST_FUNC
-#error EIGEN_TEST_FUNC must be defined
-#endif
-
#define DEFAULT_REPEAT 10
namespace Eigen
@@ -128,20 +152,48 @@ namespace Eigen
// level == 0 <=> abort if test fail
// level >= 1 <=> warning message to std::cerr if test fail
static int g_test_level = 0;
- static int g_repeat;
- static unsigned int g_seed;
- static bool g_has_set_repeat, g_has_set_seed;
+ static int g_repeat = 1;
+ static unsigned int g_seed = 0;
+ static bool g_has_set_repeat = false, g_has_set_seed = false;
+
+ class EigenTest
+ {
+ public:
+ EigenTest() : m_func(0) {}
+ EigenTest(const char* a_name, void (*func)(void))
+ : m_name(a_name), m_func(func)
+ {
+ ms_registered_tests.push_back(this);
+ }
+ const std::string& name() const { return m_name; }
+ void operator()() const { m_func(); }
+
+ static const std::vector<EigenTest*>& all() { return ms_registered_tests; }
+ protected:
+ std::string m_name;
+ void (*m_func)(void);
+ static std::vector<EigenTest*> ms_registered_tests;
+ };
+
+ std::vector<EigenTest*> EigenTest::ms_registered_tests;
+
+ // Declare and register a test, e.g.:
+ // EIGEN_DECLARE_TEST(mytest) { ... }
+ // will create a function:
+ // void test_mytest() { ... }
+ // that will be automatically called.
+ #define EIGEN_DECLARE_TEST(X) \
+ void EIGEN_CAT(test_,X) (); \
+ static EigenTest EIGEN_CAT(test_handler_,X) (EIGEN_MAKESTRING(X), & EIGEN_CAT(test_,X)); \
+ void EIGEN_CAT(test_,X) ()
}
#define TRACK std::cerr << __FILE__ << " " << __LINE__ << std::endl
// #define TRACK while()
-#define EI_PP_MAKE_STRING2(S) #S
-#define EI_PP_MAKE_STRING(S) EI_PP_MAKE_STRING2(S)
-
#define EIGEN_DEFAULT_IO_FORMAT IOFormat(4, 0, " ", "\n", "", "", "", "")
-#if (defined(_CPPUNWIND) || defined(__EXCEPTIONS)) && !defined(__CUDA_ARCH__)
+#if (defined(_CPPUNWIND) || defined(__EXCEPTIONS)) && !defined(__CUDA_ARCH__) && !defined(__HIP_DEVICE_COMPILE__) && !defined(__SYCL_DEVICE_ONLY__)
#define EIGEN_EXCEPTIONS
#endif
@@ -162,9 +214,15 @@ namespace Eigen
eigen_assert_exception(void) {}
~eigen_assert_exception() { Eigen::no_more_assert = false; }
};
+
+ struct eigen_static_assert_exception
+ {
+ eigen_static_assert_exception(void) {}
+ ~eigen_static_assert_exception() { Eigen::no_more_assert = false; }
+ };
}
// If EIGEN_DEBUG_ASSERTS is defined and if no assertion is triggered while
- // one should have been, then the list of excecuted assertions is printed out.
+ // one should have been, then the list of executed assertions is printed out.
//
// EIGEN_DEBUG_ASSERTS is not enabled by default as it
// significantly increases the compilation time
@@ -190,7 +248,7 @@ namespace Eigen
} \
else if (Eigen::internal::push_assert) \
{ \
- eigen_assert_list.push_back(std::string(EI_PP_MAKE_STRING(__FILE__) " (" EI_PP_MAKE_STRING(__LINE__) ") : " #a) ); \
+ eigen_assert_list.push_back(std::string(EIGEN_MAKESTRING(__FILE__) " (" EIGEN_MAKESTRING(__LINE__) ") : " #a) ); \
}
#ifdef EIGEN_EXCEPTIONS
@@ -214,7 +272,7 @@ namespace Eigen
}
#endif //EIGEN_EXCEPTIONS
- #elif !defined(__CUDACC__) // EIGEN_DEBUG_ASSERTS
+ #elif !defined(__CUDACC__) && !defined(__HIPCC__) && !defined(__SYCL_DEVICE_ONLY__) // EIGEN_DEBUG_ASSERTS
// see bug 89. The copy_bool here is working around a bug in gcc <= 4.3
#define eigen_assert(a) \
if( (!Eigen::internal::copy_bool(a)) && (!no_more_assert) )\
@@ -225,6 +283,7 @@ namespace Eigen
else \
EIGEN_THROW_X(Eigen::eigen_assert_exception()); \
}
+
#ifdef EIGEN_EXCEPTIONS
#define VERIFY_RAISES_ASSERT(a) { \
Eigen::no_more_assert = false; \
@@ -236,25 +295,51 @@ namespace Eigen
catch (Eigen::eigen_assert_exception&) { VERIFY(true); } \
Eigen::report_on_cerr_on_assert_failure = true; \
}
- #endif //EIGEN_EXCEPTIONS
+ #endif // EIGEN_EXCEPTIONS
#endif // EIGEN_DEBUG_ASSERTS
+ #if defined(TEST_CHECK_STATIC_ASSERTIONS) && defined(EIGEN_EXCEPTIONS)
+ #define EIGEN_STATIC_ASSERT(a,MSG) \
+ if( (!Eigen::internal::copy_bool(a)) && (!no_more_assert) )\
+ { \
+ Eigen::no_more_assert = true; \
+ if(report_on_cerr_on_assert_failure) \
+ eigen_plain_assert((a) && #MSG); \
+ else \
+ EIGEN_THROW_X(Eigen::eigen_static_assert_exception()); \
+ }
+ #define VERIFY_RAISES_STATIC_ASSERT(a) { \
+ Eigen::no_more_assert = false; \
+ Eigen::report_on_cerr_on_assert_failure = false; \
+ try { \
+ a; \
+ VERIFY(Eigen::should_raise_an_assert && # a); \
+ } \
+ catch (Eigen::eigen_static_assert_exception&) { VERIFY(true); } \
+ Eigen::report_on_cerr_on_assert_failure = true; \
+ }
+ #endif // TEST_CHECK_STATIC_ASSERTIONS
+
#ifndef VERIFY_RAISES_ASSERT
#define VERIFY_RAISES_ASSERT(a) \
std::cout << "Can't VERIFY_RAISES_ASSERT( " #a " ) with exceptions disabled\n";
#endif
-
- #if !defined(__CUDACC__)
+#ifndef VERIFY_RAISES_STATIC_ASSERT
+ #define VERIFY_RAISES_STATIC_ASSERT(a) \
+ std::cout << "Can't VERIFY_RAISES_STATIC_ASSERT( " #a " ) with exceptions disabled\n";
+#endif
+
+ #if !defined(__CUDACC__) && !defined(__HIPCC__) && !defined(__SYCL_DEVICE_ONLY__)
#define EIGEN_USE_CUSTOM_ASSERT
#endif
#else // EIGEN_NO_ASSERTION_CHECKING
#define VERIFY_RAISES_ASSERT(a) {}
+ #define VERIFY_RAISES_STATIC_ASSERT(a) {}
#endif // EIGEN_NO_ASSERTION_CHECKING
-
#define EIGEN_INTERNAL_DEBUGGING
#include <Eigen/QR> // required for createRandomPIMatrixOfRank
@@ -276,10 +361,10 @@ inline void verify_impl(bool condition, const char *testname, const char *file,
}
}
-#define VERIFY(a) ::verify_impl(a, g_test_stack.back().c_str(), __FILE__, __LINE__, EI_PP_MAKE_STRING(a))
+#define VERIFY(a) ::verify_impl(a, g_test_stack.back().c_str(), __FILE__, __LINE__, EIGEN_MAKESTRING(a))
-#define VERIFY_GE(a, b) ::verify_impl(a >= b, g_test_stack.back().c_str(), __FILE__, __LINE__, EI_PP_MAKE_STRING(a >= b))
-#define VERIFY_LE(a, b) ::verify_impl(a <= b, g_test_stack.back().c_str(), __FILE__, __LINE__, EI_PP_MAKE_STRING(a <= b))
+#define VERIFY_GE(a, b) ::verify_impl(a >= b, g_test_stack.back().c_str(), __FILE__, __LINE__, EIGEN_MAKESTRING(a >= b))
+#define VERIFY_LE(a, b) ::verify_impl(a <= b, g_test_stack.back().c_str(), __FILE__, __LINE__, EIGEN_MAKESTRING(a <= b))
#define VERIFY_IS_EQUAL(a, b) VERIFY(test_is_equal(a, b, true))
@@ -293,8 +378,10 @@ inline void verify_impl(bool condition, const char *testname, const char *file,
#define VERIFY_IS_UNITARY(a) VERIFY(test_isUnitary(a))
+#define STATIC_CHECK(COND) EIGEN_STATIC_ASSERT( (COND) , EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT )
+
#define CALL_SUBTEST(FUNC) do { \
- g_test_stack.push_back(EI_PP_MAKE_STRING(FUNC)); \
+ g_test_stack.push_back(EIGEN_MAKESTRING(FUNC)); \
FUNC; \
g_test_stack.pop_back(); \
} while (0)
@@ -310,6 +397,17 @@ template<> inline float test_precision<std::complex<float> >() { return test_pre
template<> inline double test_precision<std::complex<double> >() { return test_precision<double>(); }
template<> inline long double test_precision<std::complex<long double> >() { return test_precision<long double>(); }
+inline bool test_isApprox(const short& a, const short& b)
+{ return internal::isApprox(a, b, test_precision<short>()); }
+inline bool test_isApprox(const unsigned short& a, const unsigned short& b)
+{ return internal::isApprox(a, b, test_precision<unsigned short>()); }
+inline bool test_isApprox(const unsigned int& a, const unsigned int& b)
+{ return internal::isApprox(a, b, test_precision<unsigned int>()); }
+inline bool test_isApprox(const long& a, const long& b)
+{ return internal::isApprox(a, b, test_precision<long>()); }
+inline bool test_isApprox(const unsigned long& a, const unsigned long& b)
+{ return internal::isApprox(a, b, test_precision<unsigned long>()); }
+
inline bool test_isApprox(const int& a, const int& b)
{ return internal::isApprox(a, b, test_precision<int>()); }
inline bool test_isMuchSmallerThan(const int& a, const int& b)
@@ -634,9 +732,6 @@ template<> std::string type_name<std::complex<double> >() { return "comple
template<> std::string type_name<std::complex<long double> >() { return "complex<long double>"; }
template<> std::string type_name<std::complex<int> >() { return "complex<int>"; }
-// forward declaration of the main test function
-void EIGEN_CAT(test_,EIGEN_TEST_FUNC)();
-
using namespace Eigen;
inline void set_repeat_from_string(const char *str)
@@ -723,9 +818,16 @@ int main(int argc, char *argv[])
srand(g_seed);
std::cout << "Repeating each test " << g_repeat << " times" << std::endl;
- Eigen::g_test_stack.push_back(std::string(EI_PP_MAKE_STRING(EIGEN_TEST_FUNC)));
+ VERIFY(EigenTest::all().size()>0);
+
+ for(std::size_t i=0; i<EigenTest::all().size(); ++i)
+ {
+ const EigenTest& current_test = *EigenTest::all()[i];
+ Eigen::g_test_stack.push_back(current_test.name());
+ current_test();
+ Eigen::g_test_stack.pop_back();
+ }
- EIGEN_CAT(test_,EIGEN_TEST_FUNC)();
return 0;
}
diff --git a/test/mapped_matrix.cpp b/test/mapped_matrix.cpp
index 6a84c5897..0ea136ae6 100644
--- a/test/mapped_matrix.cpp
+++ b/test/mapped_matrix.cpp
@@ -17,7 +17,6 @@
template<typename VectorType> void map_class_vector(const VectorType& m)
{
- typedef typename VectorType::Index Index;
typedef typename VectorType::Scalar Scalar;
Index size = m.size();
@@ -51,7 +50,6 @@ template<typename VectorType> void map_class_vector(const VectorType& m)
template<typename MatrixType> void map_class_matrix(const MatrixType& m)
{
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
Index rows = m.rows(), cols = m.cols(), size = rows*cols;
@@ -64,8 +62,9 @@ template<typename MatrixType> void map_class_matrix(const MatrixType& m)
for(int i = 0; i < size; i++) array2[i] = Scalar(1);
// array3unaligned -> unaligned pointer to heap
Scalar* array3 = new Scalar[size+1];
- for(int i = 0; i < size+1; i++) array3[i] = Scalar(1);
- Scalar* array3unaligned = internal::UIntPtr(array3)%EIGEN_MAX_ALIGN_BYTES == 0 ? array3+1 : array3;
+ Index sizep1 = size + 1; // <- without this temporary MSVC 2103 generates bad code
+ for(Index i = 0; i < sizep1; i++) array3[i] = Scalar(1);
+ Scalar* array3unaligned = (internal::UIntPtr(array3)%EIGEN_MAX_ALIGN_BYTES) == 0 ? array3+1 : array3;
Scalar array4[256];
if(size<=256)
for(int i = 0; i < size; i++) array4[i] = Scalar(1);
@@ -121,7 +120,6 @@ template<typename MatrixType> void map_class_matrix(const MatrixType& m)
template<typename VectorType> void map_static_methods(const VectorType& m)
{
- typedef typename VectorType::Index Index;
typedef typename VectorType::Scalar Scalar;
Index size = m.size();
@@ -163,7 +161,6 @@ template<typename Scalar>
void map_not_aligned_on_scalar()
{
typedef Matrix<Scalar,Dynamic,Dynamic> MatrixType;
- typedef typename MatrixType::Index Index;
Index size = 11;
Scalar* array1 = internal::aligned_new<Scalar>((size+1)*(size+1)+1);
Scalar* array2 = reinterpret_cast<Scalar*>(sizeof(Scalar)/2+std::size_t(array1));
@@ -181,7 +178,7 @@ void map_not_aligned_on_scalar()
internal::aligned_delete(array1, (size+1)*(size+1)+1);
}
-void test_mapped_matrix()
+EIGEN_DECLARE_TEST(mapped_matrix)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( map_class_vector(Matrix<float, 1, 1>()) );
@@ -205,7 +202,6 @@ void test_mapped_matrix()
CALL_SUBTEST_8( map_static_methods(RowVector3d()) );
CALL_SUBTEST_9( map_static_methods(VectorXcd(8)) );
CALL_SUBTEST_10( map_static_methods(VectorXf(12)) );
-
CALL_SUBTEST_11( map_not_aligned_on_scalar<double>() );
}
}
diff --git a/test/mapstaticmethods.cpp b/test/mapstaticmethods.cpp
index 06272d106..d0128ba94 100644
--- a/test/mapstaticmethods.cpp
+++ b/test/mapstaticmethods.cpp
@@ -9,8 +9,12 @@
#include "main.h"
+// GCC<=4.8 has spurious shadow warnings, because `ptr` re-appears inside template instantiations
+// workaround: put these in an anonymous namespace
+namespace {
float *ptr;
const float *const_ptr;
+}
template<typename PlainObjectType,
bool IsDynamicSize = PlainObjectType::SizeAtCompileTime == Dynamic,
@@ -69,7 +73,6 @@ struct mapstaticmethods_impl<PlainObjectType, true, false>
{
static void run(const PlainObjectType& m)
{
- typedef typename PlainObjectType::Index Index;
Index rows = m.rows(), cols = m.cols();
int i = internal::random<int>(2,5), j = internal::random<int>(2,5);
@@ -116,7 +119,6 @@ struct mapstaticmethods_impl<PlainObjectType, true, true>
{
static void run(const PlainObjectType& v)
{
- typedef typename PlainObjectType::Index Index;
Index size = v.size();
int i = internal::random<int>(2,5);
@@ -145,7 +147,7 @@ void mapstaticmethods(const PlainObjectType& m)
VERIFY(true); // just to avoid 'unused function' warning
}
-void test_mapstaticmethods()
+EIGEN_DECLARE_TEST(mapstaticmethods)
{
ptr = internal::aligned_new<float>(1000);
for(int i = 0; i < 1000; i++) ptr[i] = float(i);
diff --git a/test/mapstride.cpp b/test/mapstride.cpp
index 4858f8fea..09196600b 100644
--- a/test/mapstride.cpp
+++ b/test/mapstride.cpp
@@ -11,7 +11,6 @@
template<int Alignment,typename VectorType> void map_class_vector(const VectorType& m)
{
- typedef typename VectorType::Index Index;
typedef typename VectorType::Scalar Scalar;
Index size = m.size();
@@ -50,7 +49,6 @@ template<int Alignment,typename VectorType> void map_class_vector(const VectorTy
template<int Alignment,typename MatrixType> void map_class_matrix(const MatrixType& _m)
{
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
Index rows = _m.rows(), cols = _m.cols();
@@ -58,7 +56,7 @@ template<int Alignment,typename MatrixType> void map_class_matrix(const MatrixTy
MatrixType m = MatrixType::Random(rows,cols);
Scalar s1 = internal::random<Scalar>();
- Index arraysize = 2*(rows+4)*(cols+4);
+ Index arraysize = 4*(rows+4)*(cols+4);
Scalar* a_array1 = internal::aligned_new<Scalar>(arraysize+1);
Scalar* array1 = a_array1;
@@ -143,10 +141,63 @@ template<int Alignment,typename MatrixType> void map_class_matrix(const MatrixTy
VERIFY_IS_APPROX(map,s1*m);
}
+ // test inner stride and no outer stride
+ for(int k=0; k<2; ++k)
+ {
+ if(k==1 && (m.innerSize()*2)*m.outerSize() > maxsize2)
+ break;
+ Scalar* array = (k==0 ? array1 : array2);
+
+ Map<MatrixType, Alignment, InnerStride<Dynamic> > map(array, rows, cols, InnerStride<Dynamic>(2));
+ map = m;
+ VERIFY(map.outerStride() == map.innerSize()*2);
+ for(int i = 0; i < m.outerSize(); ++i)
+ for(int j = 0; j < m.innerSize(); ++j)
+ {
+ VERIFY(array[map.innerSize()*i*2+j*2] == m.coeffByOuterInner(i,j));
+ VERIFY(map.coeffByOuterInner(i,j) == m.coeffByOuterInner(i,j));
+ }
+ VERIFY_IS_APPROX(s1*map,s1*m);
+ map *= s1;
+ VERIFY_IS_APPROX(map,s1*m);
+ }
+
internal::aligned_delete(a_array1, arraysize+1);
}
-void test_mapstride()
+// Additional tests for inner-stride but no outer-stride
+template<int>
+void bug1453()
+{
+ const int data[] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31};
+ typedef Matrix<int,Dynamic,Dynamic,RowMajor> RowMatrixXi;
+ typedef Matrix<int,2,3,ColMajor> ColMatrix23i;
+ typedef Matrix<int,3,2,ColMajor> ColMatrix32i;
+ typedef Matrix<int,2,3,RowMajor> RowMatrix23i;
+ typedef Matrix<int,3,2,RowMajor> RowMatrix32i;
+
+ VERIFY_IS_APPROX(MatrixXi::Map(data, 2, 3, InnerStride<2>()), MatrixXi::Map(data, 2, 3, Stride<4,2>()));
+ VERIFY_IS_APPROX(MatrixXi::Map(data, 2, 3, InnerStride<>(2)), MatrixXi::Map(data, 2, 3, Stride<4,2>()));
+ VERIFY_IS_APPROX(MatrixXi::Map(data, 3, 2, InnerStride<2>()), MatrixXi::Map(data, 3, 2, Stride<6,2>()));
+ VERIFY_IS_APPROX(MatrixXi::Map(data, 3, 2, InnerStride<>(2)), MatrixXi::Map(data, 3, 2, Stride<6,2>()));
+
+ VERIFY_IS_APPROX(RowMatrixXi::Map(data, 2, 3, InnerStride<2>()), RowMatrixXi::Map(data, 2, 3, Stride<6,2>()));
+ VERIFY_IS_APPROX(RowMatrixXi::Map(data, 2, 3, InnerStride<>(2)), RowMatrixXi::Map(data, 2, 3, Stride<6,2>()));
+ VERIFY_IS_APPROX(RowMatrixXi::Map(data, 3, 2, InnerStride<2>()), RowMatrixXi::Map(data, 3, 2, Stride<4,2>()));
+ VERIFY_IS_APPROX(RowMatrixXi::Map(data, 3, 2, InnerStride<>(2)), RowMatrixXi::Map(data, 3, 2, Stride<4,2>()));
+
+ VERIFY_IS_APPROX(ColMatrix23i::Map(data, InnerStride<2>()), MatrixXi::Map(data, 2, 3, Stride<4,2>()));
+ VERIFY_IS_APPROX(ColMatrix23i::Map(data, InnerStride<>(2)), MatrixXi::Map(data, 2, 3, Stride<4,2>()));
+ VERIFY_IS_APPROX(ColMatrix32i::Map(data, InnerStride<2>()), MatrixXi::Map(data, 3, 2, Stride<6,2>()));
+ VERIFY_IS_APPROX(ColMatrix32i::Map(data, InnerStride<>(2)), MatrixXi::Map(data, 3, 2, Stride<6,2>()));
+
+ VERIFY_IS_APPROX(RowMatrix23i::Map(data, InnerStride<2>()), RowMatrixXi::Map(data, 2, 3, Stride<6,2>()));
+ VERIFY_IS_APPROX(RowMatrix23i::Map(data, InnerStride<>(2)), RowMatrixXi::Map(data, 2, 3, Stride<6,2>()));
+ VERIFY_IS_APPROX(RowMatrix32i::Map(data, InnerStride<2>()), RowMatrixXi::Map(data, 3, 2, Stride<4,2>()));
+ VERIFY_IS_APPROX(RowMatrix32i::Map(data, InnerStride<>(2)), RowMatrixXi::Map(data, 3, 2, Stride<4,2>()));
+}
+
+EIGEN_DECLARE_TEST(mapstride)
{
for(int i = 0; i < g_repeat; i++) {
int maxn = 30;
@@ -175,6 +226,8 @@ void test_mapstride()
CALL_SUBTEST_5( map_class_matrix<Unaligned>(MatrixXi(internal::random<int>(1,maxn),internal::random<int>(1,maxn))) );
CALL_SUBTEST_6( map_class_matrix<Aligned>(MatrixXcd(internal::random<int>(1,maxn),internal::random<int>(1,maxn))) );
CALL_SUBTEST_6( map_class_matrix<Unaligned>(MatrixXcd(internal::random<int>(1,maxn),internal::random<int>(1,maxn))) );
+
+ CALL_SUBTEST_5( bug1453<0>() );
TEST_SET_BUT_UNUSED_VARIABLE(maxn);
}
diff --git a/test/meta.cpp b/test/meta.cpp
index b8dea68e8..ea9607fe7 100644
--- a/test/meta.cpp
+++ b/test/meta.cpp
@@ -15,7 +15,19 @@ bool check_is_convertible(const From&, const To&)
return internal::is_convertible<From,To>::value;
}
-void test_meta()
+struct FooReturnType {
+ typedef int ReturnType;
+};
+
+struct MyInterface {
+ virtual void func() = 0;
+ virtual ~MyInterface() {}
+};
+struct MyImpl : public MyInterface {
+ void func() {}
+};
+
+EIGEN_DECLARE_TEST(meta)
{
VERIFY((internal::conditional<(3<4),internal::true_type, internal::false_type>::type::value));
VERIFY(( internal::is_same<float,float>::value));
@@ -58,14 +70,27 @@ void test_meta()
VERIFY(( internal::is_same<const float,internal::remove_pointer<const float*>::type >::value));
VERIFY(( internal::is_same<float,internal::remove_pointer<float* const >::type >::value));
- VERIFY(( internal::is_convertible<float,double>::value ));
- VERIFY(( internal::is_convertible<int,double>::value ));
- VERIFY(( internal::is_convertible<double,int>::value ));
- VERIFY((!internal::is_convertible<std::complex<double>,double>::value ));
- VERIFY(( internal::is_convertible<Array33f,Matrix3f>::value ));
-// VERIFY((!internal::is_convertible<Matrix3f,Matrix3d>::value )); //does not work because the conversion is prevented by a static assertion
- VERIFY((!internal::is_convertible<Array33f,int>::value ));
- VERIFY((!internal::is_convertible<MatrixXf,float>::value ));
+
+ // is_convertible
+ STATIC_CHECK(( internal::is_convertible<float,double>::value ));
+ STATIC_CHECK(( internal::is_convertible<int,double>::value ));
+ STATIC_CHECK(( internal::is_convertible<int, short>::value ));
+ STATIC_CHECK(( internal::is_convertible<short, int>::value ));
+ STATIC_CHECK(( internal::is_convertible<double,int>::value ));
+ STATIC_CHECK(( internal::is_convertible<double,std::complex<double> >::value ));
+ STATIC_CHECK((!internal::is_convertible<std::complex<double>,double>::value ));
+ STATIC_CHECK(( internal::is_convertible<Array33f,Matrix3f>::value ));
+ STATIC_CHECK(( internal::is_convertible<Matrix3f&,Matrix3f>::value ));
+ STATIC_CHECK(( internal::is_convertible<Matrix3f&,Matrix3f&>::value ));
+ STATIC_CHECK(( internal::is_convertible<Matrix3f&,const Matrix3f&>::value ));
+ STATIC_CHECK(( internal::is_convertible<const Matrix3f&,Matrix3f>::value ));
+ STATIC_CHECK(( internal::is_convertible<const Matrix3f&,const Matrix3f&>::value ));
+ STATIC_CHECK((!internal::is_convertible<const Matrix3f&,Matrix3f&>::value ));
+ STATIC_CHECK((!internal::is_convertible<const Matrix3f,Matrix3f&>::value ));
+ STATIC_CHECK(( internal::is_convertible<Matrix3f,Matrix3f&>::value )); // std::is_convertible returns false here though Matrix3f from; Matrix3f& to = from; is valid.
+ //STATIC_CHECK((!internal::is_convertible<Matrix3f,Matrix3d>::value )); //does not work because the conversion is prevented by a static assertion
+ STATIC_CHECK((!internal::is_convertible<Array33f,int>::value ));
+ STATIC_CHECK((!internal::is_convertible<MatrixXf,float>::value ));
{
float f;
MatrixXf A, B;
@@ -75,6 +100,26 @@ void test_meta()
VERIFY((!check_is_convertible(A*B, f) ));
VERIFY(( check_is_convertible(A*B, A) ));
}
+
+ STATIC_CHECK(( !internal::is_convertible<MyInterface, MyImpl>::value ));
+ #if (!EIGEN_COMP_GNUC_STRICT) || (EIGEN_GNUC_AT_LEAST(4,8))
+ // GCC prior to 4.8 fails to compile this test:
+ // error: cannot allocate an object of abstract type 'MyInterface'
+ // In other word, it does not obey SFINAE.
+ // Nevertheless, we don't really care about supporting abstract type as scalar type!
+ STATIC_CHECK(( !internal::is_convertible<MyImpl, MyInterface>::value ));
+ #endif
+ STATIC_CHECK(( internal::is_convertible<MyImpl, const MyInterface&>::value ));
+ {
+ int i;
+ VERIFY(( check_is_convertible(fix<3>(), i) ));
+ VERIFY((!check_is_convertible(i, fix<DynamicIndex>()) ));
+ }
+
+ VERIFY(( internal::has_ReturnType<FooReturnType>::value ));
+ VERIFY(( internal::has_ReturnType<ScalarBinaryOpTraits<int,int> >::value ));
+ VERIFY(( !internal::has_ReturnType<MatrixXf>::value ));
+ VERIFY(( !internal::has_ReturnType<int>::value ));
VERIFY(internal::meta_sqrt<1>::ret == 1);
#define VERIFY_META_SQRT(X) VERIFY(internal::meta_sqrt<X>::ret == int(std::sqrt(double(X))))
diff --git a/test/metis_support.cpp b/test/metis_support.cpp
index d87c56a13..b490dacde 100644
--- a/test/metis_support.cpp
+++ b/test/metis_support.cpp
@@ -19,7 +19,7 @@ template<typename T> void test_metis_T()
check_sparse_square_solving(sparselu_metis);
}
-void test_metis_support()
+EIGEN_DECLARE_TEST(metis_support)
{
CALL_SUBTEST_1(test_metis_T<double>());
}
diff --git a/test/miscmatrices.cpp b/test/miscmatrices.cpp
index ef20dc749..e71712f33 100644
--- a/test/miscmatrices.cpp
+++ b/test/miscmatrices.cpp
@@ -14,7 +14,6 @@ template<typename MatrixType> void miscMatrices(const MatrixType& m)
/* this test covers the following files:
DiagonalMatrix.h Ones.h
*/
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
Index rows = m.rows();
@@ -35,7 +34,7 @@ template<typename MatrixType> void miscMatrices(const MatrixType& m)
VERIFY_IS_APPROX(square, MatrixType::Identity(rows, rows));
}
-void test_miscmatrices()
+EIGEN_DECLARE_TEST(miscmatrices)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( miscMatrices(Matrix<float, 1, 1>()) );
diff --git a/test/mixingtypes.cpp b/test/mixingtypes.cpp
index b796082cd..aad63ec2b 100644
--- a/test/mixingtypes.cpp
+++ b/test/mixingtypes.cpp
@@ -8,13 +8,27 @@
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-// work around "uninitialized" warnings and give that option some testing
-#define EIGEN_INITIALIZE_MATRICES_BY_ZERO
+#if defined(EIGEN_TEST_PART_7)
#ifndef EIGEN_NO_STATIC_ASSERT
#define EIGEN_NO_STATIC_ASSERT // turn static asserts into runtime asserts in order to check them
#endif
+// ignore double-promotion diagnostic for clang and gcc, if we check for static assertion anyway:
+// TODO do the same for MSVC?
+#if defined(__clang__)
+# if (__clang_major__ * 100 + __clang_minor__) >= 308
+# pragma clang diagnostic ignored "-Wdouble-promotion"
+# endif
+#elif defined(__GNUC__)
+ // TODO is there a minimal GCC version for this? At least g++-4.7 seems to be fine with this.
+# pragma GCC diagnostic ignored "-Wdouble-promotion"
+#endif
+
+#endif
+
+
+
#if defined(EIGEN_TEST_PART_1) || defined(EIGEN_TEST_PART_2) || defined(EIGEN_TEST_PART_3)
#ifndef EIGEN_DONT_VECTORIZE
@@ -35,6 +49,28 @@ using namespace std;
VERIFY_IS_APPROX(XPR,REF); \
VERIFY( g_called && #XPR" not properly optimized");
+template<int SizeAtCompileType>
+void raise_assertion(Index size = SizeAtCompileType)
+{
+ // VERIFY_RAISES_ASSERT(mf+md); // does not even compile
+ Matrix<float, SizeAtCompileType, 1> vf; vf.setRandom(size);
+ Matrix<double, SizeAtCompileType, 1> vd; vd.setRandom(size);
+ VERIFY_RAISES_ASSERT(vf=vd);
+ VERIFY_RAISES_ASSERT(vf+=vd);
+ VERIFY_RAISES_ASSERT(vf-=vd);
+ VERIFY_RAISES_ASSERT(vd=vf);
+ VERIFY_RAISES_ASSERT(vd+=vf);
+ VERIFY_RAISES_ASSERT(vd-=vf);
+
+ // vd.asDiagonal() * mf; // does not even compile
+ // vcd.asDiagonal() * mf; // does not even compile
+
+#if 0 // we get other compilation errors here than just static asserts
+ VERIFY_RAISES_ASSERT(vd.dot(vf));
+#endif
+}
+
+
template<int SizeAtCompileType> void mixingtypes(int size = SizeAtCompileType)
{
typedef std::complex<float> CF;
@@ -73,13 +109,6 @@ template<int SizeAtCompileType> void mixingtypes(int size = SizeAtCompileType)
while(std::abs(scf)<epsf) scf = internal::random<CF>();
while(std::abs(scd)<epsd) scd = internal::random<CD>();
-// VERIFY_RAISES_ASSERT(mf+md); // does not even compile
-
-#ifdef EIGEN_DONT_VECTORIZE
- VERIFY_RAISES_ASSERT(vf=vd);
- VERIFY_RAISES_ASSERT(vf+=vd);
-#endif
-
// check scalar products
VERIFY_MIX_SCALAR(vcf * sf , vcf * complex<float>(sf));
VERIFY_MIX_SCALAR(sd * vcd , complex<double>(sd) * vcd);
@@ -119,9 +148,6 @@ template<int SizeAtCompileType> void mixingtypes(int size = SizeAtCompileType)
// check dot product
vf.dot(vf);
-#if 0 // we get other compilation errors here than just static asserts
- VERIFY_RAISES_ASSERT(vd.dot(vf));
-#endif
VERIFY_IS_APPROX(vcf.dot(vf), vcf.dot(vf.template cast<complex<float> >()));
// check diagonal product
@@ -130,9 +156,6 @@ template<int SizeAtCompileType> void mixingtypes(int size = SizeAtCompileType)
VERIFY_IS_APPROX(mcf * vf.asDiagonal(), mcf * vf.template cast<complex<float> >().asDiagonal());
VERIFY_IS_APPROX(md * vcd.asDiagonal(), md.template cast<complex<double> >() * vcd.asDiagonal());
-// vd.asDiagonal() * mf; // does not even compile
-// vcd.asDiagonal() * mf; // does not even compile
-
// check inner product
VERIFY_IS_APPROX((vf.transpose() * vcf).value(), (vf.template cast<complex<float> >().transpose() * vcf).value());
@@ -286,7 +309,7 @@ template<int SizeAtCompileType> void mixingtypes(int size = SizeAtCompileType)
VERIFY_IS_APPROX( rcd.noalias() -= mcd + md*md, - ((md*md).eval().template cast<CD>()) );
}
-void test_mixingtypes()
+EIGEN_DECLARE_TEST(mixingtypes)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1(mixingtypes<3>());
@@ -296,5 +319,10 @@ void test_mixingtypes()
CALL_SUBTEST_4(mixingtypes<3>());
CALL_SUBTEST_5(mixingtypes<4>());
CALL_SUBTEST_6(mixingtypes<Dynamic>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE)));
+ CALL_SUBTEST_7(raise_assertion<Dynamic>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE)));
}
+ CALL_SUBTEST_7(raise_assertion<0>());
+ CALL_SUBTEST_7(raise_assertion<3>());
+ CALL_SUBTEST_7(raise_assertion<4>());
+ CALL_SUBTEST_7(raise_assertion<Dynamic>(0));
}
diff --git a/test/nesting_ops.cpp b/test/nesting_ops.cpp
index a419b0e44..4b5fc21f2 100644
--- a/test/nesting_ops.cpp
+++ b/test/nesting_ops.cpp
@@ -91,7 +91,7 @@ template <typename MatrixType> void run_nesting_ops_2(const MatrixType& _m)
}
-void test_nesting_ops()
+EIGEN_DECLARE_TEST(nesting_ops)
{
CALL_SUBTEST_1(run_nesting_ops_1(MatrixXf::Random(25,25)));
CALL_SUBTEST_2(run_nesting_ops_1(MatrixXcd::Random(25,25)));
diff --git a/test/nomalloc.cpp b/test/nomalloc.cpp
index 50756c2fb..cb4c073e9 100644
--- a/test/nomalloc.cpp
+++ b/test/nomalloc.cpp
@@ -24,7 +24,6 @@ template<typename MatrixType> void nomalloc(const MatrixType& m)
{
/* this test check no dynamic memory allocation are issued with fixed-size matrices
*/
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
Index rows = m.rows();
@@ -173,7 +172,7 @@ template<typename MatrixType> void test_reference(const MatrixType& m) {
typedef typename MatrixType::Scalar Scalar;
enum { Flag = MatrixType::IsRowMajor ? Eigen::RowMajor : Eigen::ColMajor};
enum { TransposeFlag = !MatrixType::IsRowMajor ? Eigen::RowMajor : Eigen::ColMajor};
- typename MatrixType::Index rows = m.rows(), cols=m.cols();
+ Index rows = m.rows(), cols=m.cols();
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, Flag > MatrixX;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, TransposeFlag> MatrixXT;
// Dynamic reference:
@@ -203,7 +202,7 @@ template<typename MatrixType> void test_reference(const MatrixType& m) {
}
-void test_nomalloc()
+EIGEN_DECLARE_TEST(nomalloc)
{
// create some dynamic objects
Eigen::MatrixXd M1 = MatrixXd::Random(3,3);
diff --git a/test/nullary.cpp b/test/nullary.cpp
index acd55506e..12b9e122f 100644
--- a/test/nullary.cpp
+++ b/test/nullary.cpp
@@ -191,6 +191,24 @@ void testVectorType(const VectorType& base)
}
}
}
+
+ // test setUnit()
+ if(m.size()>0)
+ {
+ for(Index k=0; k<10; ++k)
+ {
+ Index i = internal::random<Index>(0,m.size()-1);
+ m.setUnit(i);
+ VERIFY_IS_APPROX( m, VectorType::Unit(m.size(), i) );
+ }
+ if(VectorType::SizeAtCompileTime==Dynamic)
+ {
+ Index i = internal::random<Index>(0,2*m.size()-1);
+ m.setUnit(2*m.size(),i);
+ VERIFY_IS_APPROX( m, VectorType::Unit(m.size(),i) );
+ }
+ }
+
}
template<typename MatrixType>
@@ -221,45 +239,28 @@ void testMatrixType(const MatrixType& m)
VERIFY_IS_APPROX( A(i,j), s1 );
}
-void test_nullary()
+template<int>
+void bug79()
{
- CALL_SUBTEST_1( testMatrixType(Matrix2d()) );
- CALL_SUBTEST_2( testMatrixType(MatrixXcf(internal::random<int>(1,300),internal::random<int>(1,300))) );
- CALL_SUBTEST_3( testMatrixType(MatrixXf(internal::random<int>(1,300),internal::random<int>(1,300))) );
-
- for(int i = 0; i < g_repeat*10; i++) {
- CALL_SUBTEST_4( testVectorType(VectorXd(internal::random<int>(1,30000))) );
- CALL_SUBTEST_5( testVectorType(Vector4d()) ); // regression test for bug 232
- CALL_SUBTEST_6( testVectorType(Vector3d()) );
- CALL_SUBTEST_7( testVectorType(VectorXf(internal::random<int>(1,30000))) );
- CALL_SUBTEST_8( testVectorType(Vector3f()) );
- CALL_SUBTEST_8( testVectorType(Vector4f()) );
- CALL_SUBTEST_8( testVectorType(Matrix<float,8,1>()) );
- CALL_SUBTEST_8( testVectorType(Matrix<float,1,1>()) );
-
- CALL_SUBTEST_9( testVectorType(VectorXi(internal::random<int>(1,10))) );
- CALL_SUBTEST_9( testVectorType(VectorXi(internal::random<int>(9,300))) );
- CALL_SUBTEST_9( testVectorType(Matrix<int,1,1>()) );
- }
-
-#ifdef EIGEN_TEST_PART_6
// Assignment of a RowVectorXd to a MatrixXd (regression test for bug #79).
VERIFY( (MatrixXd(RowVectorXd::LinSpaced(3, 0, 1)) - RowVector3d(0, 0.5, 1)).norm() < std::numeric_limits<double>::epsilon() );
-#endif
+}
-#ifdef EIGEN_TEST_PART_9
+template<int>
+void nullary_overflow()
+{
// Check possible overflow issue
- {
- int n = 60000;
- ArrayXi a1(n), a2(n);
- a1.setLinSpaced(n, 0, n-1);
- for(int i=0; i<n; ++i)
- a2(i) = i;
- VERIFY_IS_APPROX(a1,a2);
- }
-#endif
+ int n = 60000;
+ ArrayXi a1(n), a2(n);
+ a1.setLinSpaced(n, 0, n-1);
+ for(int i=0; i<n; ++i)
+ a2(i) = i;
+ VERIFY_IS_APPROX(a1,a2);
+}
-#ifdef EIGEN_TEST_PART_10
+template<int>
+void nullary_internal_logic()
+{
// check some internal logic
VERIFY(( internal::has_nullary_operator<internal::scalar_constant_op<double> >::value ));
VERIFY(( !internal::has_unary_operator<internal::scalar_constant_op<double> >::value ));
@@ -300,5 +301,30 @@ void test_nullary()
VERIFY(( !internal::has_binary_operator<internal::linspaced_op<int,int> >::value ));
VERIFY(( internal::functor_has_linear_access<internal::linspaced_op<int,int> >::ret ));
}
-#endif
+}
+
+EIGEN_DECLARE_TEST(nullary)
+{
+ CALL_SUBTEST_1( testMatrixType(Matrix2d()) );
+ CALL_SUBTEST_2( testMatrixType(MatrixXcf(internal::random<int>(1,300),internal::random<int>(1,300))) );
+ CALL_SUBTEST_3( testMatrixType(MatrixXf(internal::random<int>(1,300),internal::random<int>(1,300))) );
+
+ for(int i = 0; i < g_repeat*10; i++) {
+ CALL_SUBTEST_4( testVectorType(VectorXd(internal::random<int>(1,30000))) );
+ CALL_SUBTEST_5( testVectorType(Vector4d()) ); // regression test for bug 232
+ CALL_SUBTEST_6( testVectorType(Vector3d()) );
+ CALL_SUBTEST_7( testVectorType(VectorXf(internal::random<int>(1,30000))) );
+ CALL_SUBTEST_8( testVectorType(Vector3f()) );
+ CALL_SUBTEST_8( testVectorType(Vector4f()) );
+ CALL_SUBTEST_8( testVectorType(Matrix<float,8,1>()) );
+ CALL_SUBTEST_8( testVectorType(Matrix<float,1,1>()) );
+
+ CALL_SUBTEST_9( testVectorType(VectorXi(internal::random<int>(1,10))) );
+ CALL_SUBTEST_9( testVectorType(VectorXi(internal::random<int>(9,300))) );
+ CALL_SUBTEST_9( testVectorType(Matrix<int,1,1>()) );
+ }
+
+ CALL_SUBTEST_6( bug79<0>() );
+ CALL_SUBTEST_9( nullary_overflow<0>() );
+ CALL_SUBTEST_10( nullary_internal_logic<0>() );
}
diff --git a/test/num_dimensions.cpp b/test/num_dimensions.cpp
new file mode 100644
index 000000000..7ad7ef697
--- /dev/null
+++ b/test/num_dimensions.cpp
@@ -0,0 +1,90 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2018 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+#include <Eigen/SparseCore>
+
+template<int ExpectedDim,typename Xpr>
+void check_dim(const Xpr& ) {
+ STATIC_CHECK( Xpr::NumDimensions == ExpectedDim );
+}
+
+#if EIGEN_HAS_CXX11
+template<template <typename,int,int> class Object>
+void map_num_dimensions()
+{
+ typedef Object<double, 1, 1> ArrayScalarType;
+ typedef Object<double, 2, 1> ArrayVectorType;
+ typedef Object<double, 1, 2> TransposeArrayVectorType;
+ typedef Object<double, 2, 2> ArrayType;
+ typedef Object<double, Eigen::Dynamic, 1> DynamicArrayVectorType;
+ typedef Object<double, 1, Eigen::Dynamic> DynamicTransposeArrayVectorType;
+ typedef Object<double, Eigen::Dynamic, Eigen::Dynamic> DynamicArrayType;
+
+ STATIC_CHECK(ArrayScalarType::NumDimensions == 0);
+ STATIC_CHECK(ArrayVectorType::NumDimensions == 1);
+ STATIC_CHECK(TransposeArrayVectorType::NumDimensions == 1);
+ STATIC_CHECK(ArrayType::NumDimensions == 2);
+ STATIC_CHECK(DynamicArrayVectorType::NumDimensions == 1);
+ STATIC_CHECK(DynamicTransposeArrayVectorType::NumDimensions == 1);
+ STATIC_CHECK(DynamicArrayType::NumDimensions == 2);
+
+ typedef Eigen::Map<ArrayScalarType> ArrayScalarMap;
+ typedef Eigen::Map<ArrayVectorType> ArrayVectorMap;
+ typedef Eigen::Map<TransposeArrayVectorType> TransposeArrayVectorMap;
+ typedef Eigen::Map<ArrayType> ArrayMap;
+ typedef Eigen::Map<DynamicArrayVectorType> DynamicArrayVectorMap;
+ typedef Eigen::Map<DynamicTransposeArrayVectorType> DynamicTransposeArrayVectorMap;
+ typedef Eigen::Map<DynamicArrayType> DynamicArrayMap;
+
+ STATIC_CHECK(ArrayScalarMap::NumDimensions == 0);
+ STATIC_CHECK(ArrayVectorMap::NumDimensions == 1);
+ STATIC_CHECK(TransposeArrayVectorMap::NumDimensions == 1);
+ STATIC_CHECK(ArrayMap::NumDimensions == 2);
+ STATIC_CHECK(DynamicArrayVectorMap::NumDimensions == 1);
+ STATIC_CHECK(DynamicTransposeArrayVectorMap::NumDimensions == 1);
+ STATIC_CHECK(DynamicArrayMap::NumDimensions == 2);
+}
+
+template<typename Scalar, int Rows, int Cols>
+using TArray = Array<Scalar,Rows,Cols>;
+
+template<typename Scalar, int Rows, int Cols>
+using TMatrix = Matrix<Scalar,Rows,Cols>;
+
+#endif
+
+EIGEN_DECLARE_TEST(num_dimensions)
+{
+ int n = 10;
+ ArrayXXd A(n,n);
+ CALL_SUBTEST( check_dim<2>(A) );
+ CALL_SUBTEST( check_dim<2>(A.block(1,1,2,2)) );
+ CALL_SUBTEST( check_dim<1>(A.col(1)) );
+ CALL_SUBTEST( check_dim<1>(A.row(1)) );
+
+ MatrixXd M(n,n);
+ CALL_SUBTEST( check_dim<0>(M.row(1)*M.col(1)) );
+
+ SparseMatrix<double> S(n,n);
+ CALL_SUBTEST( check_dim<2>(S) );
+ CALL_SUBTEST( check_dim<2>(S.block(1,1,2,2)) );
+ CALL_SUBTEST( check_dim<1>(S.col(1)) );
+ CALL_SUBTEST( check_dim<1>(S.row(1)) );
+
+ SparseVector<double> s(n);
+ CALL_SUBTEST( check_dim<1>(s) );
+ CALL_SUBTEST( check_dim<1>(s.head(2)) );
+
+
+ #if EIGEN_HAS_CXX11
+ CALL_SUBTEST( map_num_dimensions<TArray>() );
+ CALL_SUBTEST( map_num_dimensions<TMatrix>() );
+ #endif
+}
diff --git a/test/numext.cpp b/test/numext.cpp
new file mode 100644
index 000000000..6307f5979
--- /dev/null
+++ b/test/numext.cpp
@@ -0,0 +1,53 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2017 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+template<typename T>
+void check_abs() {
+ typedef typename NumTraits<T>::Real Real;
+
+ if(NumTraits<T>::IsSigned)
+ VERIFY_IS_EQUAL(numext::abs(-T(1)), T(1));
+ VERIFY_IS_EQUAL(numext::abs(T(0)), T(0));
+ VERIFY_IS_EQUAL(numext::abs(T(1)), T(1));
+
+ for(int k=0; k<g_repeat*100; ++k)
+ {
+ T x = internal::random<T>();
+ if(!internal::is_same<T,bool>::value)
+ x = x/Real(2);
+ if(NumTraits<T>::IsSigned)
+ {
+ VERIFY_IS_EQUAL(numext::abs(x), numext::abs(-x));
+ VERIFY( numext::abs(-x) >= Real(0));
+ }
+ VERIFY( numext::abs(x) >= Real(0));
+ VERIFY_IS_APPROX( numext::abs2(x), numext::abs2(numext::abs(x)) );
+ }
+}
+
+EIGEN_DECLARE_TEST(numext) {
+ CALL_SUBTEST( check_abs<bool>() );
+ CALL_SUBTEST( check_abs<signed char>() );
+ CALL_SUBTEST( check_abs<unsigned char>() );
+ CALL_SUBTEST( check_abs<short>() );
+ CALL_SUBTEST( check_abs<unsigned short>() );
+ CALL_SUBTEST( check_abs<int>() );
+ CALL_SUBTEST( check_abs<unsigned int>() );
+ CALL_SUBTEST( check_abs<long>() );
+ CALL_SUBTEST( check_abs<unsigned long>() );
+ CALL_SUBTEST( check_abs<half>() );
+ CALL_SUBTEST( check_abs<float>() );
+ CALL_SUBTEST( check_abs<double>() );
+ CALL_SUBTEST( check_abs<long double>() );
+
+ CALL_SUBTEST( check_abs<std::complex<float> >() );
+ CALL_SUBTEST( check_abs<std::complex<double> >() );
+}
diff --git a/test/packetmath.cpp b/test/packetmath.cpp
index 08b360340..2b0dda573 100644
--- a/test/packetmath.cpp
+++ b/test/packetmath.cpp
@@ -28,7 +28,7 @@ template<typename T> T negate(const T& x) { return -x; }
}
}
-// NOTE: we disbale inlining for this function to workaround a GCC issue when using -O3 and the i387 FPU.
+// NOTE: we disable inlining for this function to workaround a GCC issue when using -O3 and the i387 FPU.
template<typename Scalar> EIGEN_DONT_INLINE
bool isApproxAbs(const Scalar& a, const Scalar& b, const typename NumTraits<Scalar>::Real& refvalue)
{
@@ -123,7 +123,7 @@ template<typename Scalar> void packetmath()
EIGEN_ALIGN_MAX Scalar data2[size];
EIGEN_ALIGN_MAX Packet packets[PacketSize*2];
EIGEN_ALIGN_MAX Scalar ref[size];
- RealScalar refvalue = 0;
+ RealScalar refvalue = RealScalar(0);
for (int i=0; i<size; ++i)
{
data1[i] = internal::random<Scalar>()/RealScalar(PacketSize);
@@ -148,37 +148,42 @@ template<typename Scalar> void packetmath()
for (int offset=0; offset<PacketSize; ++offset)
{
+ #define MIN(A,B) (A<B?A:B)
packets[0] = internal::pload<Packet>(data1);
packets[1] = internal::pload<Packet>(data1+PacketSize);
if (offset==0) internal::palign<0>(packets[0], packets[1]);
- else if (offset==1) internal::palign<1>(packets[0], packets[1]);
- else if (offset==2) internal::palign<2>(packets[0], packets[1]);
- else if (offset==3) internal::palign<3>(packets[0], packets[1]);
- else if (offset==4) internal::palign<4>(packets[0], packets[1]);
- else if (offset==5) internal::palign<5>(packets[0], packets[1]);
- else if (offset==6) internal::palign<6>(packets[0], packets[1]);
- else if (offset==7) internal::palign<7>(packets[0], packets[1]);
- else if (offset==8) internal::palign<8>(packets[0], packets[1]);
- else if (offset==9) internal::palign<9>(packets[0], packets[1]);
- else if (offset==10) internal::palign<10>(packets[0], packets[1]);
- else if (offset==11) internal::palign<11>(packets[0], packets[1]);
- else if (offset==12) internal::palign<12>(packets[0], packets[1]);
- else if (offset==13) internal::palign<13>(packets[0], packets[1]);
- else if (offset==14) internal::palign<14>(packets[0], packets[1]);
- else if (offset==15) internal::palign<15>(packets[0], packets[1]);
+ else if (offset==1) internal::palign<MIN(1,PacketSize-1)>(packets[0], packets[1]);
+ else if (offset==2) internal::palign<MIN(2,PacketSize-1)>(packets[0], packets[1]);
+ else if (offset==3) internal::palign<MIN(3,PacketSize-1)>(packets[0], packets[1]);
+ else if (offset==4) internal::palign<MIN(4,PacketSize-1)>(packets[0], packets[1]);
+ else if (offset==5) internal::palign<MIN(5,PacketSize-1)>(packets[0], packets[1]);
+ else if (offset==6) internal::palign<MIN(6,PacketSize-1)>(packets[0], packets[1]);
+ else if (offset==7) internal::palign<MIN(7,PacketSize-1)>(packets[0], packets[1]);
+ else if (offset==8) internal::palign<MIN(8,PacketSize-1)>(packets[0], packets[1]);
+ else if (offset==9) internal::palign<MIN(9,PacketSize-1)>(packets[0], packets[1]);
+ else if (offset==10) internal::palign<MIN(10,PacketSize-1)>(packets[0], packets[1]);
+ else if (offset==11) internal::palign<MIN(11,PacketSize-1)>(packets[0], packets[1]);
+ else if (offset==12) internal::palign<MIN(12,PacketSize-1)>(packets[0], packets[1]);
+ else if (offset==13) internal::palign<MIN(13,PacketSize-1)>(packets[0], packets[1]);
+ else if (offset==14) internal::palign<MIN(14,PacketSize-1)>(packets[0], packets[1]);
+ else if (offset==15) internal::palign<MIN(15,PacketSize-1)>(packets[0], packets[1]);
internal::pstore(data2, packets[0]);
for (int i=0; i<PacketSize; ++i)
ref[i] = data1[i+offset];
+ // palign is not used anymore, so let's just put a warning if it fails
+ ++g_test_level;
VERIFY(areApprox(ref, data2, PacketSize) && "internal::palign");
+ --g_test_level;
}
VERIFY((!PacketTraits::Vectorizable) || PacketTraits::HasAdd);
VERIFY((!PacketTraits::Vectorizable) || PacketTraits::HasSub);
VERIFY((!PacketTraits::Vectorizable) || PacketTraits::HasMul);
VERIFY((!PacketTraits::Vectorizable) || PacketTraits::HasNegate);
- VERIFY((internal::is_same<Scalar,int>::value) || (!PacketTraits::Vectorizable) || PacketTraits::HasDiv);
+ // Disabled as it is not clear why it would be mandatory to support division.
+ //VERIFY((internal::is_same<Scalar,int>::value) || (!PacketTraits::Vectorizable) || PacketTraits::HasDiv);
CHECK_CWISE2_IF(PacketTraits::HasAdd, REF_ADD, internal::padd);
CHECK_CWISE2_IF(PacketTraits::HasSub, REF_SUB, internal::psub);
@@ -242,28 +247,30 @@ template<typename Scalar> void packetmath()
}
}
- ref[0] = 0;
+ ref[0] = Scalar(0);
for (int i=0; i<PacketSize; ++i)
ref[0] += data1[i];
VERIFY(isApproxAbs(ref[0], internal::predux(internal::pload<Packet>(data1)), refvalue) && "internal::predux");
+ if(PacketSize==8 && internal::unpacket_traits<typename internal::unpacket_traits<Packet>::half>::size ==4) // so far, predux_half_dowto4 is only required in such a case
{
- for (int i=0; i<4; ++i)
- ref[i] = 0;
+ int HalfPacketSize = PacketSize>4 ? PacketSize/2 : PacketSize;
+ for (int i=0; i<HalfPacketSize; ++i)
+ ref[i] = Scalar(0);
for (int i=0; i<PacketSize; ++i)
- ref[i%4] += data1[i];
- internal::pstore(data2, internal::predux_downto4(internal::pload<Packet>(data1)));
- VERIFY(areApprox(ref, data2, PacketSize>4?PacketSize/2:PacketSize) && "internal::predux_downto4");
+ ref[i%HalfPacketSize] += data1[i];
+ internal::pstore(data2, internal::predux_half_dowto4(internal::pload<Packet>(data1)));
+ VERIFY(areApprox(ref, data2, HalfPacketSize) && "internal::predux_half_dowto4");
}
- ref[0] = 1;
+ ref[0] = Scalar(1);
for (int i=0; i<PacketSize; ++i)
ref[0] *= data1[i];
VERIFY(internal::isApprox(ref[0], internal::predux_mul(internal::pload<Packet>(data1))) && "internal::predux_mul");
for (int j=0; j<PacketSize; ++j)
{
- ref[j] = 0;
+ ref[j] = Scalar(0);
for (int i=0; i<PacketSize; ++i)
ref[j] += data1[i+j*PacketSize];
packets[j] = internal::pload<Packet>(data1+j*PacketSize);
@@ -436,6 +443,7 @@ template<typename Scalar> void packetmath_real()
if(internal::random<float>(0,1)<0.1f)
data1[internal::random<int>(0, PacketSize)] = 0;
CHECK_CWISE1_IF(PacketTraits::HasSqrt, std::sqrt, internal::psqrt);
+ CHECK_CWISE1_IF(PacketTraits::HasSqrt, Scalar(1)/std::sqrt, internal::prsqrt);
CHECK_CWISE1_IF(PacketTraits::HasLog, std::log, internal::plog);
#if EIGEN_HAS_C99_MATH && (__cplusplus > 199711L)
CHECK_CWISE1_IF(PacketTraits::HasExpm1, std::expm1, internal::pexpm1);
@@ -449,35 +457,41 @@ template<typename Scalar> void packetmath_real()
{
data1[0] = std::numeric_limits<Scalar>::quiet_NaN();
data1[1] = std::numeric_limits<Scalar>::epsilon();
- packet_helper<PacketTraits::HasLog,Packet> h;
- h.store(data2, internal::plog(h.load(data1)));
- VERIFY((numext::isnan)(data2[0]));
- VERIFY_IS_EQUAL(std::log(std::numeric_limits<Scalar>::epsilon()), data2[1]);
-
- data1[0] = -std::numeric_limits<Scalar>::epsilon();
- data1[1] = 0;
- h.store(data2, internal::plog(h.load(data1)));
- VERIFY((numext::isnan)(data2[0]));
- VERIFY_IS_EQUAL(std::log(Scalar(0)), data2[1]);
-
- data1[0] = (std::numeric_limits<Scalar>::min)();
- data1[1] = -(std::numeric_limits<Scalar>::min)();
- h.store(data2, internal::plog(h.load(data1)));
- VERIFY_IS_EQUAL(std::log((std::numeric_limits<Scalar>::min)()), data2[0]);
- VERIFY((numext::isnan)(data2[1]));
-
- data1[0] = std::numeric_limits<Scalar>::denorm_min();
- data1[1] = -std::numeric_limits<Scalar>::denorm_min();
- h.store(data2, internal::plog(h.load(data1)));
- // VERIFY_IS_EQUAL(std::log(std::numeric_limits<Scalar>::denorm_min()), data2[0]);
- VERIFY((numext::isnan)(data2[1]));
-
- data1[0] = Scalar(-1.0f);
- h.store(data2, internal::plog(h.load(data1)));
- VERIFY((numext::isnan)(data2[0]));
- h.store(data2, internal::psqrt(h.load(data1)));
- VERIFY((numext::isnan)(data2[0]));
- VERIFY((numext::isnan)(data2[1]));
+ {
+ packet_helper<PacketTraits::HasLog,Packet> h;
+ h.store(data2, internal::plog(h.load(data1)));
+ VERIFY((numext::isnan)(data2[0]));
+ VERIFY_IS_EQUAL(std::log(std::numeric_limits<Scalar>::epsilon()), data2[1]);
+
+ data1[0] = -std::numeric_limits<Scalar>::epsilon();
+ data1[1] = 0;
+ h.store(data2, internal::plog(h.load(data1)));
+ VERIFY((numext::isnan)(data2[0]));
+ VERIFY_IS_EQUAL(std::log(Scalar(0)), data2[1]);
+
+ data1[0] = (std::numeric_limits<Scalar>::min)();
+ data1[1] = -(std::numeric_limits<Scalar>::min)();
+ h.store(data2, internal::plog(h.load(data1)));
+ VERIFY_IS_EQUAL(std::log((std::numeric_limits<Scalar>::min)()), data2[0]);
+ VERIFY((numext::isnan)(data2[1]));
+
+ data1[0] = std::numeric_limits<Scalar>::denorm_min();
+ data1[1] = -std::numeric_limits<Scalar>::denorm_min();
+ h.store(data2, internal::plog(h.load(data1)));
+ // VERIFY_IS_EQUAL(std::log(std::numeric_limits<Scalar>::denorm_min()), data2[0]);
+ VERIFY((numext::isnan)(data2[1]));
+
+ data1[0] = Scalar(-1.0f);
+ h.store(data2, internal::plog(h.load(data1)));
+ VERIFY((numext::isnan)(data2[0]));
+ }
+ {
+ packet_helper<PacketTraits::HasSqrt,Packet> h;
+ data1[0] = Scalar(-1.0f);
+ h.store(data2, internal::psqrt(h.load(data1)));
+ VERIFY((numext::isnan)(data2[0]));
+ VERIFY((numext::isnan)(data2[1]));
+ }
}
}
@@ -614,7 +628,7 @@ template<typename Scalar> void packetmath_scatter_gather()
}
}
-void test_packetmath()
+EIGEN_DECLARE_TEST(packetmath)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( packetmath<float>() );
@@ -622,6 +636,7 @@ void test_packetmath()
CALL_SUBTEST_3( packetmath<int>() );
CALL_SUBTEST_4( packetmath<std::complex<float> >() );
CALL_SUBTEST_5( packetmath<std::complex<double> >() );
+ CALL_SUBTEST_6( packetmath<half>() );
CALL_SUBTEST_1( packetmath_notcomplex<float>() );
CALL_SUBTEST_2( packetmath_notcomplex<double>() );
diff --git a/test/pardiso_support.cpp b/test/pardiso_support.cpp
index 67efad6d8..9c16ded5b 100644
--- a/test/pardiso_support.cpp
+++ b/test/pardiso_support.cpp
@@ -20,7 +20,7 @@ template<typename T> void test_pardiso_T()
check_sparse_square_solving(pardiso_lu);
}
-void test_pardiso_support()
+EIGEN_DECLARE_TEST(pardiso_support)
{
CALL_SUBTEST_1(test_pardiso_T<float>());
CALL_SUBTEST_2(test_pardiso_T<double>());
diff --git a/test/pastix_support.cpp b/test/pastix_support.cpp
index b62f85739..9b64417c1 100644
--- a/test/pastix_support.cpp
+++ b/test/pastix_support.cpp
@@ -45,7 +45,7 @@ template<typename T> void test_pastix_T_LU()
check_sparse_square_solving(pastix_lu);
}
-void test_pastix_support()
+EIGEN_DECLARE_TEST(pastix_support)
{
CALL_SUBTEST_1(test_pastix_T<float>());
CALL_SUBTEST_2(test_pastix_T<double>());
diff --git a/test/permutationmatrices.cpp b/test/permutationmatrices.cpp
index db1266579..71f09be0f 100644
--- a/test/permutationmatrices.cpp
+++ b/test/permutationmatrices.cpp
@@ -14,14 +14,15 @@
using namespace std;
template<typename MatrixType> void permutationmatrices(const MatrixType& m)
{
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
enum { Rows = MatrixType::RowsAtCompileTime, Cols = MatrixType::ColsAtCompileTime,
Options = MatrixType::Options };
typedef PermutationMatrix<Rows> LeftPermutationType;
+ typedef Transpositions<Rows> LeftTranspositionsType;
typedef Matrix<int, Rows, 1> LeftPermutationVectorType;
typedef Map<LeftPermutationType> MapLeftPerm;
typedef PermutationMatrix<Cols> RightPermutationType;
+ typedef Transpositions<Cols> RightTranspositionsType;
typedef Matrix<int, Cols, 1> RightPermutationVectorType;
typedef Map<RightPermutationType> MapRightPerm;
@@ -35,6 +36,8 @@ template<typename MatrixType> void permutationmatrices(const MatrixType& m)
RightPermutationVectorType rv;
randomPermutationVector(rv, cols);
RightPermutationType rp(rv);
+ LeftTranspositionsType lt(lv);
+ RightTranspositionsType rt(rv);
MatrixType m_permuted = MatrixType::Random(rows,cols);
VERIFY_EVALUATION_COUNT(m_permuted = lp * m_original * rp, 1); // 1 temp for sub expression "lp * m_original"
@@ -115,6 +118,14 @@ template<typename MatrixType> void permutationmatrices(const MatrixType& m)
Matrix<Scalar, Cols, Cols> B = rp.transpose();
VERIFY_IS_APPROX(A, B.transpose());
}
+
+ m_permuted = m_original;
+ lp = lt;
+ rp = rt;
+ VERIFY_EVALUATION_COUNT(m_permuted = lt * m_permuted * rt, 1);
+ VERIFY_IS_APPROX(m_permuted, lp*m_original*rp.transpose());
+
+ VERIFY_IS_APPROX(lt.inverse()*m_permuted*rt.inverse(), m_original);
}
template<typename T>
@@ -141,7 +152,7 @@ void bug890()
VERIFY_IS_APPROX(v1, (P.inverse() * rhs).eval());
}
-void test_permutationmatrices()
+EIGEN_DECLARE_TEST(permutationmatrices)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( permutationmatrices(Matrix<float, 1, 1>()) );
diff --git a/test/prec_inverse_4x4.cpp b/test/prec_inverse_4x4.cpp
index eb6ad18c9..072466467 100644
--- a/test/prec_inverse_4x4.cpp
+++ b/test/prec_inverse_4x4.cpp
@@ -68,7 +68,7 @@ template<typename MatrixType> void inverse_general_4x4(int repeat)
}
}
-void test_prec_inverse_4x4()
+EIGEN_DECLARE_TEST(prec_inverse_4x4)
{
CALL_SUBTEST_1((inverse_permutation_4x4<Matrix4f>()));
CALL_SUBTEST_1(( inverse_general_4x4<Matrix4f>(200000 * g_repeat) ));
diff --git a/test/product.h b/test/product.h
index 3b6511270..d26e8063d 100644
--- a/test/product.h
+++ b/test/product.h
@@ -111,6 +111,17 @@ template<typename MatrixType> void product(const MatrixType& m)
vcres.noalias() -= m1.transpose() * v1;
VERIFY_IS_APPROX(vcres, vc2 - m1.transpose() * v1);
+ // test scaled products
+ res = square;
+ res.noalias() = s1 * m1 * m2.transpose();
+ VERIFY_IS_APPROX(res, ((s1*m1).eval() * m2.transpose()));
+ res = square;
+ res.noalias() += s1 * m1 * m2.transpose();
+ VERIFY_IS_APPROX(res, square + ((s1*m1).eval() * m2.transpose()));
+ res = square;
+ res.noalias() -= s1 * m1 * m2.transpose();
+ VERIFY_IS_APPROX(res, square - ((s1*m1).eval() * m2.transpose()));
+
// test d ?= a+b*c rules
res.noalias() = square + m1 * m2.transpose();
VERIFY_IS_APPROX(res, square + m1 * m2.transpose());
@@ -216,6 +227,8 @@ template<typename MatrixType> void product(const MatrixType& m)
// CwiseBinaryOp
VERIFY_IS_APPROX(x = y + A*x, A*z);
x = z;
+ VERIFY_IS_APPROX(x = y - A*x, A*(-z));
+ x = z;
// CwiseUnaryOp
VERIFY_IS_APPROX(x = Scalar(1.)*(A*x), A*z);
}
diff --git a/test/product_extra.cpp b/test/product_extra.cpp
index e2b855bff..bd31df84d 100644
--- a/test/product_extra.cpp
+++ b/test/product_extra.cpp
@@ -11,7 +11,6 @@
template<typename MatrixType> void product_extra(const MatrixType& m)
{
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
typedef Matrix<Scalar, 1, Dynamic> RowVectorType;
typedef Matrix<Scalar, Dynamic, 1> ColVectorType;
@@ -353,7 +352,7 @@ void bug_1308()
VERIFY_IS_APPROX(r44.noalias() += Vector4d::Ones() * m44.col(0).transpose(), ones44);
}
-void test_product_extra()
+EIGEN_DECLARE_TEST(product_extra)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( product_extra(MatrixXf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
diff --git a/test/product_large.cpp b/test/product_large.cpp
index 845cd40ca..ae14b714c 100644
--- a/test/product_large.cpp
+++ b/test/product_large.cpp
@@ -30,19 +30,9 @@ void test_aliasing()
x = z;
}
-void test_product_large()
+template<int>
+void product_large_regressions()
{
- for(int i = 0; i < g_repeat; i++) {
- CALL_SUBTEST_1( product(MatrixXf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
- CALL_SUBTEST_2( product(MatrixXd(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
- CALL_SUBTEST_3( product(MatrixXi(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
- CALL_SUBTEST_4( product(MatrixXcf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE/2), internal::random<int>(1,EIGEN_TEST_MAX_SIZE/2))) );
- CALL_SUBTEST_5( product(Matrix<float,Dynamic,Dynamic,RowMajor>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
-
- CALL_SUBTEST_1( test_aliasing<float>() );
- }
-
-#if defined EIGEN_TEST_PART_6
{
// test a specific issue in DiagonalProduct
int N = 1000000;
@@ -95,7 +85,23 @@ void test_product_large()
* (((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A)) * ((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A)));
VERIFY_IS_APPROX(B,C);
}
-#endif
+}
+
+EIGEN_DECLARE_TEST(product_large)
+{
+ for(int i = 0; i < g_repeat; i++) {
+ CALL_SUBTEST_1( product(MatrixXf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
+ CALL_SUBTEST_2( product(MatrixXd(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
+ CALL_SUBTEST_2( product(MatrixXd(internal::random<int>(1,10), internal::random<int>(1,10))) );
+
+ CALL_SUBTEST_3( product(MatrixXi(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
+ CALL_SUBTEST_4( product(MatrixXcf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE/2), internal::random<int>(1,EIGEN_TEST_MAX_SIZE/2))) );
+ CALL_SUBTEST_5( product(Matrix<float,Dynamic,Dynamic,RowMajor>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
+
+ CALL_SUBTEST_1( test_aliasing<float>() );
+ }
+
+ CALL_SUBTEST_6( product_large_regressions<0>() );
// Regression test for bug 714:
#if defined EIGEN_HAS_OPENMP
diff --git a/test/product_mmtr.cpp b/test/product_mmtr.cpp
index f6e4bb1ae..bb19e6e52 100644
--- a/test/product_mmtr.cpp
+++ b/test/product_mmtr.cpp
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010-2017 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -10,12 +10,19 @@
#include "main.h"
#define CHECK_MMTR(DEST, TRI, OP) { \
+ ref3 = DEST; \
ref2 = ref1 = DEST; \
DEST.template triangularView<TRI>() OP; \
ref1 OP; \
ref2.template triangularView<TRI>() \
= ref1.template triangularView<TRI>(); \
VERIFY_IS_APPROX(DEST,ref2); \
+ \
+ DEST = ref3; \
+ ref3 = ref2; \
+ ref3.diagonal() = DEST.diagonal(); \
+ DEST.template triangularView<TRI|ZeroDiag>() OP; \
+ VERIFY_IS_APPROX(DEST,ref3); \
}
template<typename Scalar> void mmtr(int size)
@@ -27,7 +34,7 @@ template<typename Scalar> void mmtr(int size)
MatrixColMaj matc = MatrixColMaj::Zero(size, size);
MatrixRowMaj matr = MatrixRowMaj::Zero(size, size);
- MatrixColMaj ref1(size, size), ref2(size, size);
+ MatrixColMaj ref1(size, size), ref2(size, size), ref3(size,size);
MatrixColMaj soc(size,othersize); soc.setRandom();
MatrixColMaj osc(othersize,size); osc.setRandom();
@@ -77,7 +84,7 @@ template<typename Scalar> void mmtr(int size)
VERIFY_IS_APPROX(matc, ref2);
}
-void test_product_mmtr()
+EIGEN_DECLARE_TEST(product_mmtr)
{
for(int i = 0; i < g_repeat ; i++)
{
diff --git a/test/product_notemporary.cpp b/test/product_notemporary.cpp
index 8bf71b4f2..dffb07608 100644
--- a/test/product_notemporary.cpp
+++ b/test/product_notemporary.cpp
@@ -15,7 +15,6 @@ template<typename MatrixType> void product_notemporary(const MatrixType& m)
{
/* This test checks the number of temporaries created
* during the evaluation of a complex expression */
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef Matrix<Scalar, 1, Dynamic> RowVectorType;
@@ -51,6 +50,7 @@ template<typename MatrixType> void product_notemporary(const MatrixType& m)
VERIFY_EVALUATION_COUNT( m3.noalias() = s1 * (m1 * m2.transpose()), 0);
VERIFY_EVALUATION_COUNT( m3 = m3 + (m1 * m2.adjoint()), 1);
+ VERIFY_EVALUATION_COUNT( m3 = m3 - (m1 * m2.adjoint()), 1);
VERIFY_EVALUATION_COUNT( m3 = m3 + (m1 * m2.adjoint()).transpose(), 1);
VERIFY_EVALUATION_COUNT( m3.noalias() = m3 + m1 * m2.transpose(), 0);
@@ -127,11 +127,19 @@ template<typename MatrixType> void product_notemporary(const MatrixType& m)
VERIFY_EVALUATION_COUNT( cvres.noalias() = (rm3+rm3) * (m1*cv1), 1 );
// Check outer products
+ #ifdef EIGEN_ALLOCA
+ bool temp_via_alloca = m3.rows()*sizeof(Scalar) <= EIGEN_STACK_ALLOCATION_LIMIT;
+ #else
+ bool temp_via_alloca = false;
+ #endif
m3 = cv1 * rv1;
VERIFY_EVALUATION_COUNT( m3.noalias() = cv1 * rv1, 0 );
- VERIFY_EVALUATION_COUNT( m3.noalias() = (cv1+cv1) * (rv1+rv1), 1 );
+ VERIFY_EVALUATION_COUNT( m3.noalias() = (cv1+cv1) * (rv1+rv1), temp_via_alloca ? 0 : 1 );
VERIFY_EVALUATION_COUNT( m3.noalias() = (m1*cv1) * (rv1), 1 );
VERIFY_EVALUATION_COUNT( m3.noalias() += (m1*cv1) * (rv1), 1 );
+ rm3 = cv1 * rv1;
+ VERIFY_EVALUATION_COUNT( rm3.noalias() = cv1 * rv1, 0 );
+ VERIFY_EVALUATION_COUNT( rm3.noalias() = (cv1+cv1) * (rv1+rv1), temp_via_alloca ? 0 : 1 );
VERIFY_EVALUATION_COUNT( rm3.noalias() = (cv1) * (rv1 * m1), 1 );
VERIFY_EVALUATION_COUNT( rm3.noalias() -= (cv1) * (rv1 * m1), 1 );
VERIFY_EVALUATION_COUNT( rm3.noalias() = (m1*cv1) * (rv1 * m1), 2 );
@@ -142,7 +150,7 @@ template<typename MatrixType> void product_notemporary(const MatrixType& m)
VERIFY_EVALUATION_COUNT( rvres.noalias() = rv1 * (m1 * m2.adjoint()), 1 );
}
-void test_product_notemporary()
+EIGEN_DECLARE_TEST(product_notemporary)
{
int s;
for(int i = 0; i < g_repeat; i++) {
diff --git a/test/product_selfadjoint.cpp b/test/product_selfadjoint.cpp
index 3d768aa7e..bdccd0491 100644
--- a/test/product_selfadjoint.cpp
+++ b/test/product_selfadjoint.cpp
@@ -11,7 +11,6 @@
template<typename MatrixType> void product_selfadjoint(const MatrixType& m)
{
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
typedef Matrix<Scalar, 1, MatrixType::RowsAtCompileTime> RowVectorType;
@@ -60,7 +59,7 @@ template<typename MatrixType> void product_selfadjoint(const MatrixType& m)
}
}
-void test_product_selfadjoint()
+EIGEN_DECLARE_TEST(product_selfadjoint)
{
int s = 0;
for(int i = 0; i < g_repeat ; i++) {
diff --git a/test/product_small.cpp b/test/product_small.cpp
index fdfdd9f6c..b8ce37d90 100644
--- a/test/product_small.cpp
+++ b/test/product_small.cpp
@@ -228,7 +228,37 @@ void bug_1311()
VERIFY_IS_APPROX(res, A*b);
}
-void test_product_small()
+template<int>
+void product_small_regressions()
+{
+ {
+ // test compilation of (outer_product) * vector
+ Vector3f v = Vector3f::Random();
+ VERIFY_IS_APPROX( (v * v.transpose()) * v, (v * v.transpose()).eval() * v);
+ }
+
+ {
+ // regression test for pull-request #93
+ Eigen::Matrix<double, 1, 1> A; A.setRandom();
+ Eigen::Matrix<double, 18, 1> B; B.setRandom();
+ Eigen::Matrix<double, 1, 18> C; C.setRandom();
+ VERIFY_IS_APPROX(B * A.inverse(), B * A.inverse()[0]);
+ VERIFY_IS_APPROX(A.inverse() * C, A.inverse()[0] * C);
+ }
+
+ {
+ Eigen::Matrix<double, 10, 10> A, B, C;
+ A.setRandom();
+ C = A;
+ for(int k=0; k<79; ++k)
+ C = C * A;
+ B.noalias() = (((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A)) * ((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A)))
+ * (((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A)) * ((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A)));
+ VERIFY_IS_APPROX(B,C);
+ }
+}
+
+EIGEN_DECLARE_TEST(product_small)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( product(Matrix<float, 3, 2>()) );
@@ -263,31 +293,5 @@ void test_product_small()
CALL_SUBTEST_6( bug_1311<5>() );
}
-#ifdef EIGEN_TEST_PART_6
- {
- // test compilation of (outer_product) * vector
- Vector3f v = Vector3f::Random();
- VERIFY_IS_APPROX( (v * v.transpose()) * v, (v * v.transpose()).eval() * v);
- }
-
- {
- // regression test for pull-request #93
- Eigen::Matrix<double, 1, 1> A; A.setRandom();
- Eigen::Matrix<double, 18, 1> B; B.setRandom();
- Eigen::Matrix<double, 1, 18> C; C.setRandom();
- VERIFY_IS_APPROX(B * A.inverse(), B * A.inverse()[0]);
- VERIFY_IS_APPROX(A.inverse() * C, A.inverse()[0] * C);
- }
-
- {
- Eigen::Matrix<double, 10, 10> A, B, C;
- A.setRandom();
- C = A;
- for(int k=0; k<79; ++k)
- C = C * A;
- B.noalias() = (((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A)) * ((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A)))
- * (((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A)) * ((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A)));
- VERIFY_IS_APPROX(B,C);
- }
-#endif
+ CALL_SUBTEST_6( product_small_regressions<0>() );
}
diff --git a/test/product_symm.cpp b/test/product_symm.cpp
index 8c44383f9..7d786467d 100644
--- a/test/product_symm.cpp
+++ b/test/product_symm.cpp
@@ -16,7 +16,6 @@ template<typename Scalar, int Size, int OtherSize> void symm(int size = Size, in
typedef Matrix<Scalar, OtherSize, Size> Rhs2;
enum { order = OtherSize==1 ? 0 : RowMajor };
typedef Matrix<Scalar, Size, OtherSize,order> Rhs3;
- typedef typename MatrixType::Index Index;
Index rows = size;
Index cols = size;
@@ -95,7 +94,7 @@ template<typename Scalar, int Size, int OtherSize> void symm(int size = Size, in
}
-void test_product_symm()
+EIGEN_DECLARE_TEST(product_symm)
{
for(int i = 0; i < g_repeat ; i++)
{
diff --git a/test/product_syrk.cpp b/test/product_syrk.cpp
index e10f0f2f2..23406fe4b 100644
--- a/test/product_syrk.cpp
+++ b/test/product_syrk.cpp
@@ -11,7 +11,6 @@
template<typename MatrixType> void syrk(const MatrixType& m)
{
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::ColsAtCompileTime, RowMajor> RMatrixType;
typedef Matrix<Scalar, MatrixType::ColsAtCompileTime, Dynamic> Rhs1;
@@ -118,7 +117,7 @@ template<typename MatrixType> void syrk(const MatrixType& m)
((s1 * m1.row(c).adjoint() * m1.row(c).adjoint().adjoint()).eval().template triangularView<Upper>().toDenseMatrix()));
}
-void test_product_syrk()
+EIGEN_DECLARE_TEST(product_syrk)
{
for(int i = 0; i < g_repeat ; i++)
{
diff --git a/test/product_trmm.cpp b/test/product_trmm.cpp
index 12e554410..c7594e512 100644
--- a/test/product_trmm.cpp
+++ b/test/product_trmm.cpp
@@ -29,7 +29,7 @@ void trmm(int rows=get_random_size<Scalar>(),
typedef Matrix<Scalar,Dynamic,OtherCols,OtherCols==1?ColMajor:ResOrder> ResXS;
typedef Matrix<Scalar,OtherCols,Dynamic,OtherCols==1?RowMajor:ResOrder> ResSX;
- TriMatrix mat(rows,cols), tri(rows,cols), triTr(cols,rows);
+ TriMatrix mat(rows,cols), tri(rows,cols), triTr(cols,rows), s1tri(rows,cols), s1triTr(cols,rows);
OnTheRight ge_right(cols,otherCols);
OnTheLeft ge_left(otherCols,rows);
@@ -42,6 +42,8 @@ void trmm(int rows=get_random_size<Scalar>(),
mat.setRandom();
tri = mat.template triangularView<Mode>();
triTr = mat.transpose().template triangularView<Mode>();
+ s1tri = (s1*mat).template triangularView<Mode>();
+ s1triTr = (s1*mat).transpose().template triangularView<Mode>();
ge_right.setRandom();
ge_left.setRandom();
@@ -51,19 +53,29 @@ void trmm(int rows=get_random_size<Scalar>(),
VERIFY_IS_APPROX( ge_xs.noalias() = mat.template triangularView<Mode>() * ge_right, tri * ge_right);
VERIFY_IS_APPROX( ge_sx.noalias() = ge_left * mat.template triangularView<Mode>(), ge_left * tri);
- VERIFY_IS_APPROX( ge_xs.noalias() = (s1*mat.adjoint()).template triangularView<Mode>() * (s2*ge_left.transpose()), s1*triTr.conjugate() * (s2*ge_left.transpose()));
- VERIFY_IS_APPROX( ge_sx.noalias() = ge_right.transpose() * mat.adjoint().template triangularView<Mode>(), ge_right.transpose() * triTr.conjugate());
+ if((Mode&UnitDiag)==0)
+ VERIFY_IS_APPROX( ge_xs.noalias() = (s1*mat.adjoint()).template triangularView<Mode>() * (s2*ge_left.transpose()), s1*triTr.conjugate() * (s2*ge_left.transpose()));
- VERIFY_IS_APPROX( ge_xs.noalias() = (s1*mat.adjoint()).template triangularView<Mode>() * (s2*ge_left.adjoint()), s1*triTr.conjugate() * (s2*ge_left.adjoint()));
- VERIFY_IS_APPROX( ge_sx.noalias() = ge_right.adjoint() * mat.adjoint().template triangularView<Mode>(), ge_right.adjoint() * triTr.conjugate());
+ VERIFY_IS_APPROX( ge_xs.noalias() = (s1*mat.transpose()).template triangularView<Mode>() * (s2*ge_left.transpose()), s1triTr * (s2*ge_left.transpose()));
+ VERIFY_IS_APPROX( ge_sx.noalias() = (s2*ge_left) * (s1*mat).template triangularView<Mode>(), (s2*ge_left)*s1tri);
+ VERIFY_IS_APPROX( ge_sx.noalias() = ge_right.transpose() * mat.adjoint().template triangularView<Mode>(), ge_right.transpose() * triTr.conjugate());
+ VERIFY_IS_APPROX( ge_sx.noalias() = ge_right.adjoint() * mat.adjoint().template triangularView<Mode>(), ge_right.adjoint() * triTr.conjugate());
+
+ ge_xs_save = ge_xs;
+ if((Mode&UnitDiag)==0)
+ VERIFY_IS_APPROX( (ge_xs_save + s1*triTr.conjugate() * (s2*ge_left.adjoint())).eval(), ge_xs.noalias() += (s1*mat.adjoint()).template triangularView<Mode>() * (s2*ge_left.adjoint()) );
ge_xs_save = ge_xs;
- VERIFY_IS_APPROX( (ge_xs_save + s1*triTr.conjugate() * (s2*ge_left.adjoint())).eval(), ge_xs.noalias() += (s1*mat.adjoint()).template triangularView<Mode>() * (s2*ge_left.adjoint()) );
+ VERIFY_IS_APPROX( (ge_xs_save + s1triTr * (s2*ge_left.adjoint())).eval(), ge_xs.noalias() += (s1*mat.transpose()).template triangularView<Mode>() * (s2*ge_left.adjoint()) );
ge_sx.setRandom();
ge_sx_save = ge_sx;
- VERIFY_IS_APPROX( ge_sx_save - (ge_right.adjoint() * (-s1 * triTr).conjugate()).eval(), ge_sx.noalias() -= (ge_right.adjoint() * (-s1 * mat).adjoint().template triangularView<Mode>()).eval());
+ if((Mode&UnitDiag)==0)
+ VERIFY_IS_APPROX( ge_sx_save - (ge_right.adjoint() * (-s1 * triTr).conjugate()).eval(), ge_sx.noalias() -= (ge_right.adjoint() * (-s1 * mat).adjoint().template triangularView<Mode>()).eval());
- VERIFY_IS_APPROX( ge_xs = (s1*mat).adjoint().template triangularView<Mode>() * ge_left.adjoint(), numext::conj(s1) * triTr.conjugate() * ge_left.adjoint());
+ if((Mode&UnitDiag)==0)
+ VERIFY_IS_APPROX( ge_xs = (s1*mat).adjoint().template triangularView<Mode>() * ge_left.adjoint(), numext::conj(s1) * triTr.conjugate() * ge_left.adjoint());
+ VERIFY_IS_APPROX( ge_xs = (s1*mat).transpose().template triangularView<Mode>() * ge_left.adjoint(), s1triTr * ge_left.adjoint());
+
// TODO check with sub-matrix expressions ?
}
@@ -103,7 +115,7 @@ void trmm(int rows=get_random_size<Scalar>(), int cols=get_random_size<Scalar>()
CALL_ALL_ORDERS(EIGEN_CAT(3,NB),SCALAR,StrictlyLower)
-void test_product_trmm()
+EIGEN_DECLARE_TEST(product_trmm)
{
for(int i = 0; i < g_repeat ; i++)
{
diff --git a/test/product_trmv.cpp b/test/product_trmv.cpp
index 57a202afc..5eb1b5ac0 100644
--- a/test/product_trmv.cpp
+++ b/test/product_trmv.cpp
@@ -11,7 +11,6 @@
template<typename MatrixType> void trmv(const MatrixType& m)
{
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
@@ -71,7 +70,7 @@ template<typename MatrixType> void trmv(const MatrixType& m)
// TODO check with sub-matrices
}
-void test_product_trmv()
+EIGEN_DECLARE_TEST(product_trmv)
{
int s = 0;
for(int i = 0; i < g_repeat ; i++) {
diff --git a/test/product_trsolve.cpp b/test/product_trsolve.cpp
index 4b97fa9d6..0c22cccf6 100644
--- a/test/product_trsolve.cpp
+++ b/test/product_trsolve.cpp
@@ -73,7 +73,7 @@ template<typename Scalar,int Size, int Cols> void trsolve(int size=Size,int cols
VERIFY_TRSM(cmLhs.template triangularView<Lower>(), rmRhs.col(c));
}
-void test_product_trsolve()
+EIGEN_DECLARE_TEST(product_trsolve)
{
for(int i = 0; i < g_repeat ; i++)
{
diff --git a/test/qr.cpp b/test/qr.cpp
index dfcc1e8f9..4799aa9ef 100644
--- a/test/qr.cpp
+++ b/test/qr.cpp
@@ -12,8 +12,6 @@
template<typename MatrixType> void qr(const MatrixType& m)
{
- typedef typename MatrixType::Index Index;
-
Index rows = m.rows();
Index cols = m.cols();
@@ -85,7 +83,7 @@ template<typename MatrixType> void qr_invertible()
qr.compute(m1);
VERIFY_IS_APPROX(log(absdet), qr.logAbsDeterminant());
// This test is tricky if the determinant becomes too small.
- // Since we generate random numbers with magnitude rrange [0,1], the average determinant is 0.5^size
+ // Since we generate random numbers with magnitude range [0,1], the average determinant is 0.5^size
VERIFY_IS_MUCH_SMALLER_THAN( abs(absdet-qr.absDeterminant()), numext::maxi(RealScalar(pow(0.5,size)),numext::maxi<RealScalar>(abs(absdet),abs(qr.absDeterminant()))) );
}
@@ -102,7 +100,7 @@ template<typename MatrixType> void qr_verify_assert()
VERIFY_RAISES_ASSERT(qr.logAbsDeterminant())
}
-void test_qr()
+EIGEN_DECLARE_TEST(qr)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( qr(MatrixXf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE),internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
diff --git a/test/qr_colpivoting.cpp b/test/qr_colpivoting.cpp
index 26ed27f5c..d224a9436 100644
--- a/test/qr_colpivoting.cpp
+++ b/test/qr_colpivoting.cpp
@@ -14,8 +14,6 @@
template <typename MatrixType>
void cod() {
- typedef typename MatrixType::Index Index;
-
Index rows = internal::random<Index>(2, EIGEN_TEST_MAX_SIZE);
Index cols = internal::random<Index>(2, EIGEN_TEST_MAX_SIZE);
Index cols2 = internal::random<Index>(2, EIGEN_TEST_MAX_SIZE);
@@ -94,7 +92,6 @@ void cod_fixedsize() {
template<typename MatrixType> void qr()
{
using std::sqrt;
- typedef typename MatrixType::Index Index;
Index rows = internal::random<Index>(2,EIGEN_TEST_MAX_SIZE), cols = internal::random<Index>(2,EIGEN_TEST_MAX_SIZE), cols2 = internal::random<Index>(2,EIGEN_TEST_MAX_SIZE);
Index rank = internal::random<Index>(1, (std::min)(rows, cols)-1);
@@ -211,7 +208,6 @@ template<typename MatrixType> void qr_kahan_matrix()
{
using std::sqrt;
using std::abs;
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
@@ -300,7 +296,7 @@ template<typename MatrixType> void qr_verify_assert()
VERIFY_RAISES_ASSERT(qr.logAbsDeterminant())
}
-void test_qr_colpivoting()
+EIGEN_DECLARE_TEST(qr_colpivoting)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( qr<MatrixXf>() );
diff --git a/test/qr_fullpivoting.cpp b/test/qr_fullpivoting.cpp
index 70e89c198..150b4256c 100644
--- a/test/qr_fullpivoting.cpp
+++ b/test/qr_fullpivoting.cpp
@@ -13,13 +13,12 @@
template<typename MatrixType> void qr()
{
- typedef typename MatrixType::Index Index;
-
+ static const int Rows = MatrixType::RowsAtCompileTime, Cols = MatrixType::ColsAtCompileTime;
Index max_size = EIGEN_TEST_MAX_SIZE;
Index min_size = numext::maxi(1,EIGEN_TEST_MAX_SIZE/10);
- Index rows = internal::random<Index>(min_size,max_size),
- cols = internal::random<Index>(min_size,max_size),
- cols2 = internal::random<Index>(min_size,max_size),
+ Index rows = Rows == Dynamic ? internal::random<Index>(min_size,max_size) : Rows,
+ cols = Cols == Dynamic ? internal::random<Index>(min_size,max_size) : Cols,
+ cols2 = Cols == Dynamic ? internal::random<Index>(min_size,max_size) : Cols,
rank = internal::random<Index>(1, (std::min)(rows, cols)-1);
typedef typename MatrixType::Scalar Scalar;
@@ -126,11 +125,12 @@ template<typename MatrixType> void qr_verify_assert()
VERIFY_RAISES_ASSERT(qr.logAbsDeterminant())
}
-void test_qr_fullpivoting()
+EIGEN_DECLARE_TEST(qr_fullpivoting)
{
- for(int i = 0; i < 1; i++) {
- // FIXME : very weird bug here
-// CALL_SUBTEST(qr(Matrix2f()) );
+ for(int i = 0; i < 1; i++) {
+ CALL_SUBTEST_5( qr<Matrix3f>() );
+ CALL_SUBTEST_6( qr<Matrix3d>() );
+ CALL_SUBTEST_8( qr<Matrix2f>() );
CALL_SUBTEST_1( qr<MatrixXf>() );
CALL_SUBTEST_2( qr<MatrixXd>() );
CALL_SUBTEST_3( qr<MatrixXcd>() );
diff --git a/test/qtvector.cpp b/test/qtvector.cpp
index 2be885e48..4ec79b1e6 100644
--- a/test/qtvector.cpp
+++ b/test/qtvector.cpp
@@ -18,8 +18,6 @@
template<typename MatrixType>
void check_qtvector_matrix(const MatrixType& m)
{
- typedef typename MatrixType::Index Index;
-
Index rows = m.rows();
Index cols = m.cols();
MatrixType x = MatrixType::Random(rows,cols), y = MatrixType::Random(rows,cols);
@@ -127,7 +125,7 @@ void check_qtvector_quaternion(const QuaternionType&)
}
}
-void test_qtvector()
+EIGEN_DECLARE_TEST(qtvector)
{
// some non vectorizable fixed sizes
CALL_SUBTEST(check_qtvector_matrix(Vector2f()));
diff --git a/test/rand.cpp b/test/rand.cpp
index 51cf01773..1b5c058ab 100644
--- a/test/rand.cpp
+++ b/test/rand.cpp
@@ -54,7 +54,7 @@ template<typename Scalar> void check_histogram(Scalar x, Scalar y, int bins)
VERIFY( (((hist.cast<double>()/double(f))-1.0).abs()<0.02).all() );
}
-void test_rand()
+EIGEN_DECLARE_TEST(rand)
{
long long_ref = NumTraits<long>::highest()/10;
signed char char_offset = (std::min)(g_repeat,64);
diff --git a/test/real_qz.cpp b/test/real_qz.cpp
index 99ac31235..1cf7aba2d 100644
--- a/test/real_qz.cpp
+++ b/test/real_qz.cpp
@@ -18,7 +18,6 @@ template<typename MatrixType> void real_qz(const MatrixType& m)
RealQZ.h
*/
using std::abs;
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
Index dim = m.cols();
@@ -76,7 +75,7 @@ template<typename MatrixType> void real_qz(const MatrixType& m)
VERIFY_IS_APPROX(qz.matrixZ()*qz.matrixZ().adjoint(), MatrixType::Identity(dim,dim));
}
-void test_real_qz()
+EIGEN_DECLARE_TEST(real_qz)
{
int s = 0;
for(int i = 0; i < g_repeat; i++) {
diff --git a/test/redux.cpp b/test/redux.cpp
index 989e1057b..9e3ed4546 100644
--- a/test/redux.cpp
+++ b/test/redux.cpp
@@ -9,12 +9,13 @@
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define TEST_ENABLE_TEMPORARY_TRACKING
+#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
+// ^^ see bug 1449
#include "main.h"
template<typename MatrixType> void matrixRedux(const MatrixType& m)
{
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
@@ -79,7 +80,6 @@ template<typename MatrixType> void matrixRedux(const MatrixType& m)
template<typename VectorType> void vectorRedux(const VectorType& w)
{
using std::abs;
- typedef typename VectorType::Index Index;
typedef typename VectorType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
Index size = w.size();
@@ -146,7 +146,7 @@ template<typename VectorType> void vectorRedux(const VectorType& w)
VERIFY_RAISES_ASSERT(v.head(0).maxCoeff());
}
-void test_redux()
+EIGEN_DECLARE_TEST(redux)
{
// the max size cannot be too large, otherwise reduxion operations obviously generate large errors.
int maxsize = (std::min)(100,EIGEN_TEST_MAX_SIZE);
diff --git a/test/ref.cpp b/test/ref.cpp
index 769db0414..a94ea9677 100644
--- a/test/ref.cpp
+++ b/test/ref.cpp
@@ -13,7 +13,7 @@
#endif
#define TEST_ENABLE_TEMPORARY_TRACKING
-
+#define TEST_CHECK_STATIC_ASSERTIONS
#include "main.h"
// test Ref.h
@@ -32,7 +32,6 @@
template<typename MatrixType> void ref_matrix(const MatrixType& m)
{
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef Matrix<Scalar,Dynamic,Dynamic,MatrixType::Options> DynMatrixType;
@@ -80,7 +79,6 @@ template<typename MatrixType> void ref_matrix(const MatrixType& m)
template<typename VectorType> void ref_vector(const VectorType& m)
{
- typedef typename VectorType::Index Index;
typedef typename VectorType::Scalar Scalar;
typedef typename VectorType::RealScalar RealScalar;
typedef Matrix<Scalar,Dynamic,1,VectorType::Options> DynMatrixType;
@@ -255,7 +253,18 @@ void test_ref_overloads()
test_ref_ambiguous(A, B);
}
-void test_ref()
+void test_ref_fixed_size_assert()
+{
+ Vector4f v4;
+ VectorXf vx(10);
+ VERIFY_RAISES_STATIC_ASSERT( Ref<Vector3f> y = v4; (void)y; );
+ VERIFY_RAISES_STATIC_ASSERT( Ref<Vector3f> y = vx.head<4>(); (void)y; );
+ VERIFY_RAISES_STATIC_ASSERT( Ref<const Vector3f> y = v4; (void)y; );
+ VERIFY_RAISES_STATIC_ASSERT( Ref<const Vector3f> y = vx.head<4>(); (void)y; );
+ VERIFY_RAISES_STATIC_ASSERT( Ref<const Vector3f> y = 2*v4; (void)y; );
+}
+
+EIGEN_DECLARE_TEST(ref)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( ref_vector(Matrix<float, 1, 1>()) );
@@ -277,4 +286,5 @@ void test_ref()
}
CALL_SUBTEST_7( test_ref_overloads() );
+ CALL_SUBTEST_7( test_ref_fixed_size_assert() );
}
diff --git a/test/reshape.cpp b/test/reshape.cpp
index 77aec1f95..da5c01183 100644
--- a/test/reshape.cpp
+++ b/test/reshape.cpp
@@ -149,7 +149,6 @@ void reshape4x4(MatType m)
MatrixXi m28r2 = m.transpose().template reshaped<ColMajor>(8,2).transpose();
VERIFY_IS_EQUAL( m28r1, m28r2);
- using placeholders::all;
VERIFY(is_same_eq(m.reshaped(v16,fix<1>), m(all)));
VERIFY_IS_EQUAL(m.reshaped(16,1), m(all));
VERIFY_IS_EQUAL(m.reshaped(1,16), m(all).transpose());
diff --git a/test/resize.cpp b/test/resize.cpp
index 4adaafe56..646a75b8f 100644
--- a/test/resize.cpp
+++ b/test/resize.cpp
@@ -33,7 +33,7 @@ void resizeLikeTest12() { resizeLikeTest<1,2>(); }
void resizeLikeTest1020() { resizeLikeTest<10,20>(); }
void resizeLikeTest31() { resizeLikeTest<3,1>(); }
-void test_resize()
+EIGEN_DECLARE_TEST(resize)
{
CALL_SUBTEST(resizeLikeTest12() );
CALL_SUBTEST(resizeLikeTest1020() );
diff --git a/test/rvalue_types.cpp b/test/rvalue_types.cpp
index 8887f1b1b..5f52fb3bc 100644
--- a/test/rvalue_types.cpp
+++ b/test/rvalue_types.cpp
@@ -43,7 +43,7 @@ template <typename MatrixType>
void rvalue_copyassign(const MatrixType&) {}
#endif
-void test_rvalue_types()
+EIGEN_DECLARE_TEST(rvalue_types)
{
CALL_SUBTEST_1(rvalue_copyassign( MatrixXf::Random(50,50).eval() ));
CALL_SUBTEST_1(rvalue_copyassign( ArrayXXf::Random(50,50).eval() ));
diff --git a/test/schur_complex.cpp b/test/schur_complex.cpp
index deb78e44e..03e17e81d 100644
--- a/test/schur_complex.cpp
+++ b/test/schur_complex.cpp
@@ -79,7 +79,7 @@ template<typename MatrixType> void schur(int size = MatrixType::ColsAtCompileTim
}
}
-void test_schur_complex()
+EIGEN_DECLARE_TEST(schur_complex)
{
CALL_SUBTEST_1(( schur<Matrix4cd>() ));
CALL_SUBTEST_2(( schur<MatrixXcf>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE/4)) ));
diff --git a/test/schur_real.cpp b/test/schur_real.cpp
index 4aede87df..945461027 100644
--- a/test/schur_real.cpp
+++ b/test/schur_real.cpp
@@ -13,8 +13,6 @@
template<typename MatrixType> void verifyIsQuasiTriangular(const MatrixType& T)
{
- typedef typename MatrixType::Index Index;
-
const Index size = T.cols();
typedef typename MatrixType::Scalar Scalar;
@@ -100,7 +98,7 @@ template<typename MatrixType> void schur(int size = MatrixType::ColsAtCompileTim
}
}
-void test_schur_real()
+EIGEN_DECLARE_TEST(schur_real)
{
CALL_SUBTEST_1(( schur<Matrix4f>() ));
CALL_SUBTEST_2(( schur<MatrixXd>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE/4)) ));
diff --git a/test/selfadjoint.cpp b/test/selfadjoint.cpp
index 92401e506..9ca9cef9e 100644
--- a/test/selfadjoint.cpp
+++ b/test/selfadjoint.cpp
@@ -7,6 +7,7 @@
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+#define TEST_CHECK_STATIC_ASSERTIONS
#include "main.h"
// This file tests the basic selfadjointView API,
@@ -14,7 +15,6 @@
template<typename MatrixType> void selfadjoint(const MatrixType& m)
{
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
Index rows = m.rows();
@@ -45,6 +45,9 @@ template<typename MatrixType> void selfadjoint(const MatrixType& m)
m4 = m2;
m4 -= m1.template selfadjointView<Lower>();
VERIFY_IS_APPROX(m4, m2-m3);
+
+ VERIFY_RAISES_STATIC_ASSERT(m2.template selfadjointView<StrictlyUpper>());
+ VERIFY_RAISES_STATIC_ASSERT(m2.template selfadjointView<UnitLower>());
}
void bug_159()
@@ -53,7 +56,7 @@ void bug_159()
EIGEN_UNUSED_VARIABLE(m)
}
-void test_selfadjoint()
+EIGEN_DECLARE_TEST(selfadjoint)
{
for(int i = 0; i < g_repeat ; i++)
{
diff --git a/test/simplicial_cholesky.cpp b/test/simplicial_cholesky.cpp
index 649c817b4..314b903e2 100644
--- a/test/simplicial_cholesky.cpp
+++ b/test/simplicial_cholesky.cpp
@@ -35,11 +35,11 @@ template<typename T, typename I> void test_simplicial_cholesky_T()
check_sparse_spd_determinant(ldlt_colmajor_lower_amd);
check_sparse_spd_determinant(ldlt_colmajor_upper_amd);
- check_sparse_spd_solving(ldlt_colmajor_lower_nat, 300, 1000);
- check_sparse_spd_solving(ldlt_colmajor_upper_nat, 300, 1000);
+ check_sparse_spd_solving(ldlt_colmajor_lower_nat, (std::min)(300,EIGEN_TEST_MAX_SIZE), 1000);
+ check_sparse_spd_solving(ldlt_colmajor_upper_nat, (std::min)(300,EIGEN_TEST_MAX_SIZE), 1000);
}
-void test_simplicial_cholesky()
+EIGEN_DECLARE_TEST(simplicial_cholesky)
{
CALL_SUBTEST_1(( test_simplicial_cholesky_T<double,int>() ));
CALL_SUBTEST_2(( test_simplicial_cholesky_T<std::complex<double>, int>() ));
diff --git a/test/sizeof.cpp b/test/sizeof.cpp
index 03ad20453..af34e97dd 100644
--- a/test/sizeof.cpp
+++ b/test/sizeof.cpp
@@ -15,10 +15,10 @@ template<typename MatrixType> void verifySizeOf(const MatrixType&)
if (MatrixType::RowsAtCompileTime!=Dynamic && MatrixType::ColsAtCompileTime!=Dynamic)
VERIFY_IS_EQUAL(std::ptrdiff_t(sizeof(MatrixType)),std::ptrdiff_t(sizeof(Scalar))*std::ptrdiff_t(MatrixType::SizeAtCompileTime));
else
- VERIFY_IS_EQUAL(sizeof(MatrixType),sizeof(Scalar*) + 2 * sizeof(typename MatrixType::Index));
+ VERIFY_IS_EQUAL(sizeof(MatrixType),sizeof(Scalar*) + 2 * sizeof(Index));
}
-void test_sizeof()
+EIGEN_DECLARE_TEST(sizeof)
{
CALL_SUBTEST(verifySizeOf(Matrix<float, 1, 1>()) );
CALL_SUBTEST(verifySizeOf(Array<float, 2, 1>()) );
diff --git a/test/sizeoverflow.cpp b/test/sizeoverflow.cpp
index 240d22294..421351233 100644
--- a/test/sizeoverflow.cpp
+++ b/test/sizeoverflow.cpp
@@ -34,7 +34,7 @@ void triggerVectorBadAlloc(Index size)
VERIFY_THROWS_BADALLOC( VectorType v; v.conservativeResize(size) );
}
-void test_sizeoverflow()
+EIGEN_DECLARE_TEST(sizeoverflow)
{
// there are 2 levels of overflow checking. first in PlainObjectBase.h we check for overflow in rows*cols computations.
// this is tested in tests of the form times_itself_gives_0 * times_itself_gives_0
diff --git a/test/smallvectors.cpp b/test/smallvectors.cpp
index 781511397..f9803acbb 100644
--- a/test/smallvectors.cpp
+++ b/test/smallvectors.cpp
@@ -57,7 +57,7 @@ template<typename Scalar> void smallVectors()
}
}
-void test_smallvectors()
+EIGEN_DECLARE_TEST(smallvectors)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST(smallVectors<int>() );
diff --git a/test/sparseLM.cpp b/test/sparseLM.cpp
index 8e148f9bc..a48fcb685 100644
--- a/test/sparseLM.cpp
+++ b/test/sparseLM.cpp
@@ -168,7 +168,7 @@ void test_sparseLM_T()
return ;
}
-void test_sparseLM()
+EIGEN_DECLARE_TEST(sparseLM)
{
CALL_SUBTEST_1(test_sparseLM_T<double>());
diff --git a/test/sparse_basic.cpp b/test/sparse_basic.cpp
index 384985028..3691e8dad 100644
--- a/test/sparse_basic.cpp
+++ b/test/sparse_basic.cpp
@@ -228,8 +228,8 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re
VERIFY_RAISES_ASSERT( m1 -= m1.innerVector(0) );
VERIFY_RAISES_ASSERT( refM1 -= m1.innerVector(0) );
VERIFY_RAISES_ASSERT( refM1 += m1.innerVector(0) );
- m1 = m4; refM1 = refM4;
}
+ m1 = m4; refM1 = refM4;
// test aliasing
VERIFY_IS_APPROX((m1 = -m1), (refM1 = -refM1));
@@ -640,8 +640,22 @@ void big_sparse_triplet(Index rows, Index cols, double density) {
VERIFY_IS_APPROX(sum, m.sum());
}
+template<int>
+void bug1105()
+{
+ // Regression test for bug 1105
+ int n = Eigen::internal::random<int>(200,600);
+ SparseMatrix<std::complex<double>,0, long> mat(n, n);
+ std::complex<double> val;
+
+ for(int i=0; i<n; ++i)
+ {
+ mat.coeffRef(i, i%(n/10)) = val;
+ VERIFY(mat.data().allocatedSize()<20*n);
+ }
+}
-void test_sparse_basic()
+EIGEN_DECLARE_TEST(sparse_basic)
{
for(int i = 0; i < g_repeat; i++) {
int r = Eigen::internal::random<int>(1,200), c = Eigen::internal::random<int>(1,200);
@@ -671,18 +685,5 @@ void test_sparse_basic()
CALL_SUBTEST_3((big_sparse_triplet<SparseMatrix<float, RowMajor, int> >(10000, 10000, 0.125)));
CALL_SUBTEST_4((big_sparse_triplet<SparseMatrix<double, ColMajor, long int> >(10000, 10000, 0.125)));
- // Regression test for bug 1105
-#ifdef EIGEN_TEST_PART_7
- {
- int n = Eigen::internal::random<int>(200,600);
- SparseMatrix<std::complex<double>,0, long> mat(n, n);
- std::complex<double> val;
-
- for(int i=0; i<n; ++i)
- {
- mat.coeffRef(i, i%(n/10)) = val;
- VERIFY(mat.data().allocatedSize()<20*n);
- }
- }
-#endif
+ CALL_SUBTEST_7( bug1105<0>() );
}
diff --git a/test/sparse_block.cpp b/test/sparse_block.cpp
index 2a0b3b617..f9668102c 100644
--- a/test/sparse_block.cpp
+++ b/test/sparse_block.cpp
@@ -8,6 +8,7 @@
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "sparse.h"
+#include "AnnoyingScalar.h"
template<typename T>
typename Eigen::internal::enable_if<(T::Flags&RowMajorBit)==RowMajorBit, typename T::RowXpr>::type
@@ -31,6 +32,7 @@ template<typename SparseMatrixType> void sparse_block(const SparseMatrixType& re
const Index outer = ref.outerSize();
typedef typename SparseMatrixType::Scalar Scalar;
+ typedef typename SparseMatrixType::RealScalar RealScalar;
typedef typename SparseMatrixType::StorageIndex StorageIndex;
double density = (std::max)(8./(rows*cols), 0.01);
@@ -164,14 +166,14 @@ template<typename SparseMatrixType> void sparse_block(const SparseMatrixType& re
{
VERIFY(j==numext::real(m3.innerVector(j).nonZeros()));
if(j>0)
- VERIFY(j==numext::real(m3.innerVector(j).lastCoeff()));
+ VERIFY(RealScalar(j)==numext::real(m3.innerVector(j).lastCoeff()));
}
m3.makeCompressed();
for(Index j=0; j<(std::min)(outer, inner); ++j)
{
VERIFY(j==numext::real(m3.innerVector(j).nonZeros()));
if(j>0)
- VERIFY(j==numext::real(m3.innerVector(j).lastCoeff()));
+ VERIFY(RealScalar(j)==numext::real(m3.innerVector(j).lastCoeff()));
}
VERIFY(m3.innerVector(j0).nonZeros() == m3.transpose().innerVector(j0).nonZeros());
@@ -288,7 +290,7 @@ template<typename SparseMatrixType> void sparse_block(const SparseMatrixType& re
}
}
-void test_sparse_block()
+EIGEN_DECLARE_TEST(sparse_block)
{
for(int i = 0; i < g_repeat; i++) {
int r = Eigen::internal::random<int>(1,200), c = Eigen::internal::random<int>(1,200);
@@ -313,5 +315,8 @@ void test_sparse_block()
CALL_SUBTEST_4(( sparse_block(SparseMatrix<double,ColMajor,short int>(short(r), short(c))) ));
CALL_SUBTEST_4(( sparse_block(SparseMatrix<double,RowMajor,short int>(short(r), short(c))) ));
+
+ AnnoyingScalar::dont_throw = true;
+ CALL_SUBTEST_5(( sparse_block(SparseMatrix<AnnoyingScalar>(r,c)) ));
}
}
diff --git a/test/sparse_permutations.cpp b/test/sparse_permutations.cpp
index b82cceff8..e93493c39 100644
--- a/test/sparse_permutations.cpp
+++ b/test/sparse_permutations.cpp
@@ -220,7 +220,7 @@ template<typename Scalar> void sparse_permutations_all(int size)
CALL_SUBTEST(( sparse_permutations<RowMajor>(SparseMatrix<Scalar, RowMajor>(size,size)) ));
}
-void test_sparse_permutations()
+EIGEN_DECLARE_TEST(sparse_permutations)
{
for(int i = 0; i < g_repeat; i++) {
int s = Eigen::internal::random<int>(1,50);
diff --git a/test/sparse_product.cpp b/test/sparse_product.cpp
index c1edd26e3..db1b0e833 100644
--- a/test/sparse_product.cpp
+++ b/test/sparse_product.cpp
@@ -7,6 +7,12 @@
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+#if defined(_MSC_VER) && (_MSC_VER==1800)
+// This unit test takes forever to compile in Release mode with MSVC 2013,
+// multiple hours. So let's switch off optimization for this one.
+#pragma optimize("",off)
+#endif
+
static long int nb_temporaries;
inline void on_temporary_creation() {
@@ -297,6 +303,10 @@ template<typename SparseMatrixType> void sparse_product()
VERIFY_IS_APPROX(x=mLo.template selfadjointView<Lower>()*b, refX=refS*b);
VERIFY_IS_APPROX(x=mS.template selfadjointView<Upper|Lower>()*b, refX=refS*b);
+ VERIFY_IS_APPROX(x=b * mUp.template selfadjointView<Upper>(), refX=b*refS);
+ VERIFY_IS_APPROX(x=b * mLo.template selfadjointView<Lower>(), refX=b*refS);
+ VERIFY_IS_APPROX(x=b * mS.template selfadjointView<Upper|Lower>(), refX=b*refS);
+
VERIFY_IS_APPROX(x.noalias()+=mUp.template selfadjointView<Upper>()*b, refX+=refS*b);
VERIFY_IS_APPROX(x.noalias()-=mLo.template selfadjointView<Lower>()*b, refX-=refS*b);
VERIFY_IS_APPROX(x.noalias()+=mS.template selfadjointView<Upper|Lower>()*b, refX+=refS*b);
@@ -367,7 +377,89 @@ void bug_942()
VERIFY_IS_APPROX( ( d.asDiagonal()*cmA ).eval().coeff(0,0), res );
}
-void test_sparse_product()
+template<typename Real>
+void test_mixing_types()
+{
+ typedef std::complex<Real> Cplx;
+ typedef SparseMatrix<Real> SpMatReal;
+ typedef SparseMatrix<Cplx> SpMatCplx;
+ typedef SparseMatrix<Cplx,RowMajor> SpRowMatCplx;
+ typedef Matrix<Real,Dynamic,Dynamic> DenseMatReal;
+ typedef Matrix<Cplx,Dynamic,Dynamic> DenseMatCplx;
+
+ Index n = internal::random<Index>(1,100);
+ double density = (std::max)(8./(n*n), 0.2);
+
+ SpMatReal sR1(n,n);
+ SpMatCplx sC1(n,n), sC2(n,n), sC3(n,n);
+ SpRowMatCplx sCR(n,n);
+ DenseMatReal dR1(n,n);
+ DenseMatCplx dC1(n,n), dC2(n,n), dC3(n,n);
+
+ initSparse<Real>(density, dR1, sR1);
+ initSparse<Cplx>(density, dC1, sC1);
+ initSparse<Cplx>(density, dC2, sC2);
+
+ VERIFY_IS_APPROX( sC2 = (sR1 * sC1), dC3 = dR1.template cast<Cplx>() * dC1 );
+ VERIFY_IS_APPROX( sC2 = (sC1 * sR1), dC3 = dC1 * dR1.template cast<Cplx>() );
+ VERIFY_IS_APPROX( sC2 = (sR1.transpose() * sC1), dC3 = dR1.template cast<Cplx>().transpose() * dC1 );
+ VERIFY_IS_APPROX( sC2 = (sC1.transpose() * sR1), dC3 = dC1.transpose() * dR1.template cast<Cplx>() );
+ VERIFY_IS_APPROX( sC2 = (sR1 * sC1.transpose()), dC3 = dR1.template cast<Cplx>() * dC1.transpose() );
+ VERIFY_IS_APPROX( sC2 = (sC1 * sR1.transpose()), dC3 = dC1 * dR1.template cast<Cplx>().transpose() );
+ VERIFY_IS_APPROX( sC2 = (sR1.transpose() * sC1.transpose()), dC3 = dR1.template cast<Cplx>().transpose() * dC1.transpose() );
+ VERIFY_IS_APPROX( sC2 = (sC1.transpose() * sR1.transpose()), dC3 = dC1.transpose() * dR1.template cast<Cplx>().transpose() );
+
+ VERIFY_IS_APPROX( sCR = (sR1 * sC1), dC3 = dR1.template cast<Cplx>() * dC1 );
+ VERIFY_IS_APPROX( sCR = (sC1 * sR1), dC3 = dC1 * dR1.template cast<Cplx>() );
+ VERIFY_IS_APPROX( sCR = (sR1.transpose() * sC1), dC3 = dR1.template cast<Cplx>().transpose() * dC1 );
+ VERIFY_IS_APPROX( sCR = (sC1.transpose() * sR1), dC3 = dC1.transpose() * dR1.template cast<Cplx>() );
+ VERIFY_IS_APPROX( sCR = (sR1 * sC1.transpose()), dC3 = dR1.template cast<Cplx>() * dC1.transpose() );
+ VERIFY_IS_APPROX( sCR = (sC1 * sR1.transpose()), dC3 = dC1 * dR1.template cast<Cplx>().transpose() );
+ VERIFY_IS_APPROX( sCR = (sR1.transpose() * sC1.transpose()), dC3 = dR1.template cast<Cplx>().transpose() * dC1.transpose() );
+ VERIFY_IS_APPROX( sCR = (sC1.transpose() * sR1.transpose()), dC3 = dC1.transpose() * dR1.template cast<Cplx>().transpose() );
+
+
+ VERIFY_IS_APPROX( sC2 = (sR1 * sC1).pruned(), dC3 = dR1.template cast<Cplx>() * dC1 );
+ VERIFY_IS_APPROX( sC2 = (sC1 * sR1).pruned(), dC3 = dC1 * dR1.template cast<Cplx>() );
+ VERIFY_IS_APPROX( sC2 = (sR1.transpose() * sC1).pruned(), dC3 = dR1.template cast<Cplx>().transpose() * dC1 );
+ VERIFY_IS_APPROX( sC2 = (sC1.transpose() * sR1).pruned(), dC3 = dC1.transpose() * dR1.template cast<Cplx>() );
+ VERIFY_IS_APPROX( sC2 = (sR1 * sC1.transpose()).pruned(), dC3 = dR1.template cast<Cplx>() * dC1.transpose() );
+ VERIFY_IS_APPROX( sC2 = (sC1 * sR1.transpose()).pruned(), dC3 = dC1 * dR1.template cast<Cplx>().transpose() );
+ VERIFY_IS_APPROX( sC2 = (sR1.transpose() * sC1.transpose()).pruned(), dC3 = dR1.template cast<Cplx>().transpose() * dC1.transpose() );
+ VERIFY_IS_APPROX( sC2 = (sC1.transpose() * sR1.transpose()).pruned(), dC3 = dC1.transpose() * dR1.template cast<Cplx>().transpose() );
+
+ VERIFY_IS_APPROX( sCR = (sR1 * sC1).pruned(), dC3 = dR1.template cast<Cplx>() * dC1 );
+ VERIFY_IS_APPROX( sCR = (sC1 * sR1).pruned(), dC3 = dC1 * dR1.template cast<Cplx>() );
+ VERIFY_IS_APPROX( sCR = (sR1.transpose() * sC1).pruned(), dC3 = dR1.template cast<Cplx>().transpose() * dC1 );
+ VERIFY_IS_APPROX( sCR = (sC1.transpose() * sR1).pruned(), dC3 = dC1.transpose() * dR1.template cast<Cplx>() );
+ VERIFY_IS_APPROX( sCR = (sR1 * sC1.transpose()).pruned(), dC3 = dR1.template cast<Cplx>() * dC1.transpose() );
+ VERIFY_IS_APPROX( sCR = (sC1 * sR1.transpose()).pruned(), dC3 = dC1 * dR1.template cast<Cplx>().transpose() );
+ VERIFY_IS_APPROX( sCR = (sR1.transpose() * sC1.transpose()).pruned(), dC3 = dR1.template cast<Cplx>().transpose() * dC1.transpose() );
+ VERIFY_IS_APPROX( sCR = (sC1.transpose() * sR1.transpose()).pruned(), dC3 = dC1.transpose() * dR1.template cast<Cplx>().transpose() );
+
+
+ VERIFY_IS_APPROX( dC2 = (sR1 * sC1), dC3 = dR1.template cast<Cplx>() * dC1 );
+ VERIFY_IS_APPROX( dC2 = (sC1 * sR1), dC3 = dC1 * dR1.template cast<Cplx>() );
+ VERIFY_IS_APPROX( dC2 = (sR1.transpose() * sC1), dC3 = dR1.template cast<Cplx>().transpose() * dC1 );
+ VERIFY_IS_APPROX( dC2 = (sC1.transpose() * sR1), dC3 = dC1.transpose() * dR1.template cast<Cplx>() );
+ VERIFY_IS_APPROX( dC2 = (sR1 * sC1.transpose()), dC3 = dR1.template cast<Cplx>() * dC1.transpose() );
+ VERIFY_IS_APPROX( dC2 = (sC1 * sR1.transpose()), dC3 = dC1 * dR1.template cast<Cplx>().transpose() );
+ VERIFY_IS_APPROX( dC2 = (sR1.transpose() * sC1.transpose()), dC3 = dR1.template cast<Cplx>().transpose() * dC1.transpose() );
+ VERIFY_IS_APPROX( dC2 = (sC1.transpose() * sR1.transpose()), dC3 = dC1.transpose() * dR1.template cast<Cplx>().transpose() );
+
+
+ VERIFY_IS_APPROX( dC2 = dR1 * sC1, dC3 = dR1.template cast<Cplx>() * sC1 );
+ VERIFY_IS_APPROX( dC2 = sR1 * dC1, dC3 = sR1.template cast<Cplx>() * dC1 );
+ VERIFY_IS_APPROX( dC2 = dC1 * sR1, dC3 = dC1 * sR1.template cast<Cplx>() );
+ VERIFY_IS_APPROX( dC2 = sC1 * dR1, dC3 = sC1 * dR1.template cast<Cplx>() );
+
+ VERIFY_IS_APPROX( dC2 = dR1.row(0) * sC1, dC3 = dR1.template cast<Cplx>().row(0) * sC1 );
+ VERIFY_IS_APPROX( dC2 = sR1 * dC1.col(0), dC3 = sR1.template cast<Cplx>() * dC1.col(0) );
+ VERIFY_IS_APPROX( dC2 = dC1.row(0) * sR1, dC3 = dC1.row(0) * sR1.template cast<Cplx>() );
+ VERIFY_IS_APPROX( dC2 = sC1 * dR1.col(0), dC3 = sC1 * dR1.template cast<Cplx>().col(0) );
+}
+
+EIGEN_DECLARE_TEST(sparse_product)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( (sparse_product<SparseMatrix<double,ColMajor> >()) );
@@ -377,5 +469,7 @@ void test_sparse_product()
CALL_SUBTEST_2( (sparse_product<SparseMatrix<std::complex<double>, RowMajor > >()) );
CALL_SUBTEST_3( (sparse_product<SparseMatrix<float,ColMajor,long int> >()) );
CALL_SUBTEST_4( (sparse_product_regression_test<SparseMatrix<double,RowMajor>, Matrix<double, Dynamic, Dynamic, RowMajor> >()) );
+
+ CALL_SUBTEST_5( (test_mixing_types<float>()) );
}
}
diff --git a/test/sparse_ref.cpp b/test/sparse_ref.cpp
index 5e9607234..12b6f8a9d 100644
--- a/test/sparse_ref.cpp
+++ b/test/sparse_ref.cpp
@@ -126,7 +126,7 @@ void call_ref()
VERIFY_EVALUATION_COUNT( call_ref_5(A.row(2), A.row(2).transpose()), 1);
}
-void test_sparse_ref()
+EIGEN_DECLARE_TEST(sparse_ref)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( check_const_correctness(SparseMatrix<float>()) );
diff --git a/test/sparse_solver.h b/test/sparse_solver.h
index 5145bc3eb..19416ed5d 100644
--- a/test/sparse_solver.h
+++ b/test/sparse_solver.h
@@ -266,7 +266,7 @@ std::string solver_stats(const SparseSolverBase<Derived> &/*solver*/)
}
#endif
-template<typename Solver> void check_sparse_spd_solving(Solver& solver, int maxSize = 300, int maxRealWorldSize = 100000)
+template<typename Solver> void check_sparse_spd_solving(Solver& solver, int maxSize = (std::min)(300,EIGEN_TEST_MAX_SIZE), int maxRealWorldSize = 100000)
{
typedef typename Solver::MatrixType Mat;
typedef typename Mat::Scalar Scalar;
diff --git a/test/sparse_solvers.cpp b/test/sparse_solvers.cpp
index 3a8873d43..aaf3d39c9 100644
--- a/test/sparse_solvers.cpp
+++ b/test/sparse_solvers.cpp
@@ -101,7 +101,7 @@ template<typename Scalar> void sparse_solvers(int rows, int cols)
}
}
-void test_sparse_solvers()
+EIGEN_DECLARE_TEST(sparse_solvers)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1(sparse_solvers<double>(8, 8) );
diff --git a/test/sparse_vector.cpp b/test/sparse_vector.cpp
index b3e1dda25..35129278b 100644
--- a/test/sparse_vector.cpp
+++ b/test/sparse_vector.cpp
@@ -145,7 +145,7 @@ template<typename Scalar,typename StorageIndex> void sparse_vector(int rows, int
}
-void test_sparse_vector()
+EIGEN_DECLARE_TEST(sparse_vector)
{
for(int i = 0; i < g_repeat; i++) {
int r = Eigen::internal::random<int>(1,500), c = Eigen::internal::random<int>(1,500);
diff --git a/test/sparselu.cpp b/test/sparselu.cpp
index bd000baf1..84cc6ebe5 100644
--- a/test/sparselu.cpp
+++ b/test/sparselu.cpp
@@ -36,7 +36,7 @@ template<typename T> void test_sparselu_T()
check_sparse_square_determinant(sparselu_amd);
}
-void test_sparselu()
+EIGEN_DECLARE_TEST(sparselu)
{
CALL_SUBTEST_1(test_sparselu_T<float>());
CALL_SUBTEST_2(test_sparselu_T<double>());
diff --git a/test/sparseqr.cpp b/test/sparseqr.cpp
index e8605fd21..3ffe62314 100644
--- a/test/sparseqr.cpp
+++ b/test/sparseqr.cpp
@@ -54,6 +54,28 @@ template<typename Scalar> void test_sparseqr_scalar()
b = dA * DenseVector::Random(A.cols());
solver.compute(A);
+
+ // Q should be MxM
+ VERIFY_IS_EQUAL(solver.matrixQ().rows(), A.rows());
+ VERIFY_IS_EQUAL(solver.matrixQ().cols(), A.rows());
+
+ // R should be MxN
+ VERIFY_IS_EQUAL(solver.matrixR().rows(), A.rows());
+ VERIFY_IS_EQUAL(solver.matrixR().cols(), A.cols());
+
+ // Q and R can be multiplied
+ DenseMat recoveredA = solver.matrixQ()
+ * DenseMat(solver.matrixR().template triangularView<Upper>())
+ * solver.colsPermutation().transpose();
+ VERIFY_IS_EQUAL(recoveredA.rows(), A.rows());
+ VERIFY_IS_EQUAL(recoveredA.cols(), A.cols());
+
+ // and in the full rank case the original matrix is recovered
+ if (solver.rank() == A.cols())
+ {
+ VERIFY_IS_APPROX(A, recoveredA);
+ }
+
if(internal::random<float>(0,1)>0.5f)
solver.factorize(A); // this checks that calling analyzePattern is not needed if the pattern do not change.
if (solver.info() != Success)
@@ -95,7 +117,7 @@ template<typename Scalar> void test_sparseqr_scalar()
dQ = solver.matrixQ();
VERIFY_IS_APPROX(Q, dQ);
}
-void test_sparseqr()
+EIGEN_DECLARE_TEST(sparseqr)
{
for(int i=0; i<g_repeat; ++i)
{
diff --git a/test/special_numbers.cpp b/test/special_numbers.cpp
index 2f1b704be..1e1a63631 100644
--- a/test/special_numbers.cpp
+++ b/test/special_numbers.cpp
@@ -49,7 +49,7 @@ template<typename Scalar> void special_numbers()
VERIFY(!mboth.array().allFinite());
}
-void test_special_numbers()
+EIGEN_DECLARE_TEST(special_numbers)
{
for(int i = 0; i < 10*g_repeat; i++) {
CALL_SUBTEST_1( special_numbers<float>() );
diff --git a/test/split_test_helper.h b/test/split_test_helper.h
new file mode 100644
index 000000000..82e82aaef
--- /dev/null
+++ b/test/split_test_helper.h
@@ -0,0 +1,5994 @@
+#if defined(EIGEN_TEST_PART_1) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_1(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_1(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_2) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_2(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_2(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_3) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_3(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_3(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_4) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_4(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_4(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_5) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_5(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_5(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_6) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_6(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_6(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_7) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_7(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_7(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_8) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_8(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_8(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_9) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_9(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_9(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_10) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_10(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_10(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_11) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_11(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_11(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_12) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_12(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_12(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_13) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_13(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_13(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_14) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_14(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_14(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_15) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_15(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_15(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_16) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_16(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_16(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_17) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_17(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_17(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_18) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_18(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_18(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_19) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_19(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_19(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_20) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_20(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_20(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_21) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_21(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_21(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_22) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_22(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_22(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_23) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_23(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_23(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_24) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_24(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_24(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_25) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_25(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_25(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_26) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_26(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_26(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_27) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_27(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_27(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_28) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_28(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_28(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_29) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_29(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_29(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_30) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_30(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_30(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_31) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_31(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_31(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_32) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_32(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_32(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_33) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_33(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_33(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_34) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_34(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_34(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_35) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_35(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_35(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_36) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_36(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_36(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_37) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_37(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_37(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_38) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_38(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_38(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_39) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_39(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_39(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_40) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_40(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_40(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_41) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_41(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_41(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_42) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_42(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_42(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_43) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_43(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_43(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_44) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_44(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_44(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_45) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_45(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_45(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_46) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_46(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_46(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_47) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_47(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_47(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_48) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_48(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_48(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_49) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_49(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_49(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_50) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_50(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_50(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_51) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_51(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_51(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_52) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_52(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_52(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_53) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_53(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_53(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_54) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_54(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_54(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_55) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_55(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_55(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_56) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_56(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_56(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_57) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_57(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_57(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_58) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_58(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_58(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_59) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_59(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_59(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_60) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_60(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_60(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_61) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_61(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_61(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_62) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_62(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_62(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_63) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_63(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_63(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_64) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_64(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_64(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_65) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_65(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_65(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_66) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_66(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_66(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_67) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_67(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_67(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_68) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_68(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_68(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_69) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_69(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_69(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_70) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_70(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_70(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_71) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_71(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_71(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_72) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_72(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_72(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_73) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_73(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_73(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_74) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_74(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_74(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_75) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_75(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_75(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_76) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_76(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_76(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_77) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_77(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_77(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_78) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_78(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_78(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_79) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_79(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_79(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_80) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_80(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_80(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_81) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_81(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_81(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_82) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_82(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_82(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_83) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_83(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_83(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_84) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_84(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_84(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_85) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_85(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_85(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_86) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_86(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_86(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_87) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_87(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_87(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_88) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_88(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_88(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_89) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_89(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_89(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_90) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_90(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_90(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_91) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_91(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_91(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_92) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_92(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_92(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_93) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_93(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_93(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_94) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_94(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_94(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_95) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_95(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_95(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_96) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_96(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_96(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_97) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_97(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_97(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_98) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_98(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_98(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_99) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_99(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_99(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_100) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_100(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_100(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_101) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_101(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_101(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_102) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_102(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_102(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_103) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_103(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_103(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_104) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_104(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_104(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_105) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_105(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_105(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_106) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_106(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_106(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_107) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_107(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_107(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_108) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_108(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_108(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_109) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_109(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_109(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_110) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_110(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_110(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_111) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_111(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_111(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_112) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_112(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_112(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_113) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_113(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_113(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_114) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_114(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_114(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_115) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_115(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_115(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_116) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_116(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_116(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_117) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_117(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_117(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_118) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_118(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_118(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_119) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_119(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_119(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_120) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_120(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_120(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_121) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_121(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_121(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_122) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_122(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_122(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_123) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_123(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_123(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_124) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_124(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_124(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_125) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_125(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_125(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_126) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_126(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_126(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_127) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_127(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_127(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_128) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_128(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_128(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_129) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_129(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_129(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_130) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_130(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_130(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_131) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_131(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_131(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_132) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_132(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_132(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_133) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_133(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_133(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_134) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_134(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_134(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_135) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_135(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_135(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_136) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_136(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_136(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_137) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_137(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_137(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_138) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_138(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_138(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_139) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_139(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_139(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_140) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_140(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_140(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_141) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_141(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_141(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_142) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_142(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_142(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_143) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_143(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_143(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_144) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_144(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_144(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_145) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_145(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_145(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_146) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_146(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_146(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_147) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_147(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_147(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_148) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_148(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_148(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_149) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_149(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_149(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_150) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_150(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_150(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_151) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_151(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_151(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_152) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_152(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_152(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_153) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_153(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_153(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_154) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_154(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_154(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_155) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_155(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_155(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_156) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_156(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_156(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_157) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_157(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_157(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_158) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_158(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_158(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_159) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_159(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_159(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_160) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_160(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_160(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_161) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_161(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_161(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_162) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_162(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_162(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_163) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_163(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_163(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_164) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_164(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_164(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_165) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_165(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_165(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_166) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_166(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_166(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_167) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_167(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_167(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_168) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_168(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_168(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_169) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_169(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_169(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_170) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_170(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_170(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_171) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_171(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_171(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_172) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_172(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_172(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_173) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_173(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_173(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_174) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_174(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_174(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_175) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_175(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_175(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_176) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_176(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_176(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_177) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_177(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_177(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_178) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_178(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_178(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_179) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_179(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_179(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_180) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_180(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_180(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_181) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_181(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_181(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_182) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_182(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_182(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_183) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_183(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_183(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_184) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_184(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_184(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_185) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_185(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_185(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_186) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_186(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_186(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_187) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_187(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_187(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_188) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_188(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_188(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_189) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_189(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_189(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_190) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_190(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_190(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_191) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_191(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_191(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_192) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_192(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_192(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_193) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_193(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_193(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_194) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_194(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_194(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_195) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_195(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_195(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_196) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_196(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_196(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_197) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_197(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_197(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_198) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_198(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_198(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_199) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_199(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_199(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_200) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_200(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_200(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_201) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_201(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_201(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_202) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_202(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_202(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_203) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_203(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_203(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_204) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_204(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_204(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_205) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_205(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_205(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_206) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_206(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_206(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_207) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_207(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_207(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_208) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_208(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_208(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_209) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_209(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_209(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_210) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_210(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_210(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_211) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_211(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_211(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_212) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_212(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_212(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_213) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_213(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_213(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_214) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_214(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_214(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_215) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_215(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_215(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_216) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_216(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_216(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_217) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_217(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_217(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_218) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_218(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_218(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_219) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_219(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_219(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_220) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_220(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_220(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_221) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_221(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_221(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_222) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_222(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_222(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_223) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_223(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_223(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_224) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_224(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_224(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_225) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_225(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_225(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_226) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_226(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_226(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_227) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_227(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_227(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_228) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_228(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_228(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_229) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_229(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_229(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_230) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_230(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_230(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_231) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_231(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_231(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_232) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_232(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_232(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_233) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_233(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_233(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_234) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_234(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_234(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_235) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_235(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_235(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_236) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_236(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_236(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_237) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_237(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_237(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_238) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_238(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_238(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_239) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_239(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_239(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_240) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_240(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_240(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_241) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_241(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_241(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_242) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_242(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_242(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_243) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_243(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_243(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_244) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_244(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_244(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_245) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_245(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_245(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_246) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_246(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_246(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_247) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_247(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_247(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_248) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_248(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_248(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_249) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_249(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_249(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_250) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_250(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_250(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_251) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_251(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_251(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_252) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_252(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_252(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_253) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_253(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_253(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_254) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_254(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_254(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_255) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_255(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_255(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_256) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_256(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_256(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_257) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_257(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_257(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_258) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_258(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_258(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_259) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_259(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_259(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_260) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_260(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_260(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_261) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_261(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_261(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_262) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_262(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_262(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_263) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_263(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_263(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_264) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_264(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_264(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_265) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_265(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_265(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_266) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_266(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_266(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_267) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_267(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_267(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_268) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_268(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_268(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_269) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_269(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_269(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_270) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_270(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_270(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_271) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_271(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_271(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_272) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_272(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_272(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_273) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_273(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_273(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_274) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_274(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_274(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_275) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_275(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_275(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_276) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_276(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_276(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_277) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_277(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_277(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_278) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_278(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_278(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_279) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_279(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_279(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_280) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_280(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_280(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_281) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_281(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_281(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_282) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_282(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_282(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_283) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_283(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_283(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_284) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_284(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_284(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_285) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_285(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_285(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_286) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_286(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_286(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_287) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_287(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_287(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_288) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_288(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_288(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_289) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_289(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_289(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_290) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_290(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_290(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_291) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_291(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_291(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_292) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_292(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_292(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_293) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_293(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_293(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_294) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_294(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_294(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_295) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_295(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_295(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_296) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_296(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_296(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_297) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_297(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_297(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_298) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_298(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_298(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_299) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_299(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_299(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_300) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_300(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_300(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_301) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_301(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_301(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_302) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_302(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_302(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_303) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_303(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_303(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_304) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_304(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_304(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_305) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_305(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_305(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_306) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_306(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_306(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_307) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_307(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_307(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_308) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_308(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_308(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_309) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_309(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_309(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_310) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_310(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_310(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_311) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_311(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_311(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_312) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_312(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_312(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_313) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_313(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_313(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_314) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_314(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_314(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_315) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_315(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_315(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_316) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_316(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_316(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_317) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_317(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_317(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_318) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_318(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_318(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_319) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_319(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_319(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_320) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_320(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_320(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_321) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_321(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_321(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_322) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_322(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_322(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_323) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_323(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_323(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_324) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_324(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_324(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_325) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_325(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_325(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_326) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_326(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_326(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_327) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_327(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_327(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_328) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_328(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_328(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_329) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_329(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_329(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_330) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_330(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_330(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_331) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_331(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_331(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_332) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_332(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_332(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_333) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_333(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_333(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_334) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_334(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_334(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_335) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_335(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_335(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_336) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_336(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_336(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_337) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_337(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_337(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_338) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_338(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_338(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_339) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_339(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_339(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_340) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_340(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_340(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_341) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_341(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_341(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_342) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_342(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_342(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_343) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_343(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_343(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_344) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_344(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_344(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_345) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_345(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_345(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_346) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_346(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_346(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_347) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_347(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_347(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_348) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_348(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_348(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_349) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_349(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_349(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_350) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_350(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_350(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_351) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_351(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_351(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_352) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_352(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_352(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_353) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_353(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_353(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_354) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_354(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_354(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_355) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_355(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_355(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_356) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_356(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_356(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_357) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_357(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_357(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_358) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_358(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_358(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_359) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_359(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_359(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_360) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_360(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_360(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_361) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_361(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_361(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_362) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_362(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_362(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_363) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_363(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_363(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_364) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_364(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_364(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_365) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_365(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_365(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_366) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_366(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_366(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_367) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_367(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_367(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_368) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_368(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_368(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_369) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_369(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_369(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_370) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_370(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_370(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_371) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_371(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_371(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_372) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_372(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_372(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_373) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_373(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_373(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_374) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_374(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_374(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_375) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_375(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_375(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_376) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_376(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_376(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_377) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_377(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_377(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_378) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_378(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_378(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_379) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_379(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_379(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_380) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_380(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_380(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_381) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_381(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_381(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_382) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_382(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_382(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_383) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_383(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_383(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_384) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_384(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_384(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_385) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_385(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_385(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_386) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_386(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_386(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_387) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_387(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_387(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_388) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_388(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_388(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_389) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_389(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_389(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_390) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_390(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_390(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_391) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_391(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_391(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_392) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_392(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_392(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_393) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_393(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_393(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_394) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_394(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_394(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_395) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_395(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_395(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_396) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_396(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_396(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_397) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_397(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_397(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_398) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_398(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_398(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_399) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_399(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_399(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_400) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_400(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_400(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_401) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_401(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_401(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_402) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_402(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_402(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_403) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_403(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_403(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_404) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_404(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_404(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_405) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_405(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_405(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_406) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_406(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_406(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_407) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_407(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_407(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_408) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_408(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_408(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_409) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_409(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_409(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_410) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_410(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_410(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_411) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_411(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_411(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_412) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_412(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_412(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_413) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_413(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_413(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_414) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_414(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_414(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_415) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_415(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_415(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_416) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_416(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_416(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_417) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_417(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_417(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_418) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_418(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_418(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_419) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_419(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_419(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_420) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_420(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_420(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_421) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_421(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_421(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_422) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_422(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_422(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_423) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_423(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_423(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_424) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_424(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_424(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_425) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_425(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_425(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_426) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_426(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_426(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_427) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_427(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_427(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_428) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_428(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_428(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_429) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_429(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_429(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_430) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_430(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_430(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_431) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_431(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_431(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_432) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_432(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_432(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_433) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_433(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_433(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_434) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_434(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_434(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_435) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_435(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_435(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_436) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_436(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_436(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_437) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_437(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_437(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_438) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_438(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_438(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_439) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_439(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_439(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_440) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_440(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_440(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_441) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_441(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_441(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_442) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_442(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_442(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_443) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_443(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_443(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_444) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_444(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_444(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_445) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_445(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_445(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_446) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_446(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_446(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_447) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_447(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_447(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_448) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_448(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_448(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_449) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_449(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_449(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_450) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_450(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_450(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_451) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_451(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_451(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_452) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_452(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_452(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_453) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_453(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_453(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_454) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_454(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_454(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_455) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_455(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_455(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_456) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_456(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_456(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_457) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_457(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_457(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_458) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_458(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_458(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_459) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_459(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_459(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_460) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_460(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_460(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_461) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_461(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_461(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_462) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_462(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_462(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_463) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_463(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_463(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_464) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_464(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_464(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_465) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_465(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_465(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_466) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_466(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_466(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_467) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_467(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_467(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_468) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_468(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_468(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_469) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_469(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_469(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_470) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_470(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_470(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_471) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_471(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_471(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_472) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_472(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_472(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_473) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_473(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_473(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_474) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_474(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_474(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_475) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_475(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_475(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_476) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_476(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_476(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_477) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_477(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_477(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_478) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_478(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_478(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_479) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_479(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_479(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_480) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_480(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_480(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_481) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_481(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_481(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_482) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_482(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_482(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_483) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_483(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_483(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_484) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_484(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_484(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_485) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_485(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_485(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_486) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_486(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_486(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_487) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_487(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_487(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_488) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_488(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_488(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_489) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_489(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_489(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_490) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_490(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_490(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_491) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_491(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_491(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_492) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_492(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_492(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_493) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_493(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_493(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_494) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_494(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_494(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_495) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_495(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_495(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_496) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_496(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_496(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_497) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_497(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_497(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_498) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_498(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_498(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_499) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_499(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_499(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_500) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_500(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_500(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_501) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_501(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_501(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_502) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_502(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_502(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_503) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_503(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_503(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_504) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_504(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_504(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_505) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_505(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_505(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_506) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_506(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_506(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_507) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_507(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_507(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_508) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_508(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_508(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_509) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_509(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_509(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_510) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_510(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_510(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_511) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_511(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_511(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_512) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_512(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_512(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_513) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_513(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_513(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_514) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_514(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_514(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_515) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_515(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_515(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_516) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_516(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_516(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_517) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_517(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_517(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_518) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_518(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_518(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_519) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_519(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_519(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_520) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_520(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_520(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_521) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_521(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_521(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_522) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_522(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_522(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_523) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_523(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_523(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_524) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_524(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_524(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_525) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_525(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_525(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_526) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_526(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_526(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_527) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_527(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_527(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_528) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_528(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_528(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_529) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_529(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_529(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_530) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_530(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_530(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_531) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_531(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_531(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_532) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_532(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_532(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_533) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_533(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_533(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_534) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_534(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_534(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_535) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_535(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_535(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_536) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_536(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_536(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_537) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_537(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_537(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_538) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_538(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_538(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_539) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_539(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_539(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_540) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_540(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_540(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_541) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_541(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_541(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_542) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_542(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_542(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_543) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_543(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_543(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_544) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_544(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_544(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_545) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_545(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_545(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_546) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_546(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_546(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_547) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_547(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_547(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_548) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_548(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_548(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_549) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_549(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_549(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_550) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_550(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_550(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_551) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_551(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_551(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_552) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_552(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_552(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_553) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_553(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_553(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_554) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_554(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_554(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_555) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_555(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_555(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_556) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_556(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_556(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_557) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_557(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_557(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_558) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_558(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_558(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_559) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_559(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_559(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_560) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_560(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_560(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_561) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_561(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_561(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_562) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_562(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_562(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_563) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_563(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_563(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_564) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_564(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_564(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_565) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_565(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_565(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_566) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_566(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_566(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_567) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_567(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_567(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_568) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_568(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_568(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_569) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_569(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_569(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_570) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_570(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_570(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_571) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_571(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_571(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_572) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_572(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_572(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_573) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_573(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_573(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_574) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_574(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_574(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_575) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_575(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_575(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_576) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_576(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_576(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_577) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_577(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_577(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_578) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_578(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_578(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_579) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_579(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_579(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_580) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_580(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_580(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_581) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_581(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_581(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_582) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_582(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_582(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_583) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_583(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_583(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_584) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_584(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_584(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_585) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_585(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_585(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_586) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_586(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_586(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_587) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_587(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_587(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_588) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_588(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_588(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_589) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_589(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_589(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_590) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_590(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_590(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_591) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_591(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_591(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_592) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_592(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_592(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_593) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_593(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_593(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_594) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_594(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_594(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_595) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_595(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_595(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_596) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_596(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_596(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_597) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_597(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_597(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_598) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_598(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_598(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_599) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_599(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_599(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_600) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_600(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_600(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_601) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_601(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_601(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_602) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_602(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_602(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_603) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_603(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_603(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_604) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_604(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_604(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_605) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_605(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_605(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_606) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_606(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_606(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_607) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_607(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_607(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_608) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_608(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_608(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_609) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_609(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_609(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_610) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_610(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_610(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_611) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_611(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_611(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_612) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_612(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_612(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_613) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_613(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_613(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_614) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_614(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_614(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_615) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_615(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_615(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_616) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_616(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_616(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_617) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_617(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_617(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_618) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_618(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_618(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_619) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_619(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_619(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_620) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_620(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_620(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_621) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_621(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_621(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_622) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_622(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_622(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_623) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_623(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_623(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_624) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_624(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_624(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_625) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_625(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_625(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_626) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_626(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_626(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_627) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_627(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_627(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_628) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_628(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_628(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_629) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_629(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_629(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_630) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_630(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_630(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_631) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_631(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_631(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_632) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_632(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_632(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_633) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_633(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_633(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_634) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_634(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_634(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_635) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_635(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_635(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_636) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_636(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_636(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_637) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_637(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_637(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_638) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_638(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_638(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_639) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_639(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_639(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_640) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_640(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_640(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_641) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_641(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_641(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_642) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_642(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_642(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_643) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_643(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_643(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_644) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_644(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_644(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_645) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_645(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_645(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_646) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_646(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_646(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_647) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_647(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_647(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_648) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_648(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_648(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_649) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_649(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_649(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_650) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_650(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_650(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_651) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_651(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_651(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_652) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_652(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_652(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_653) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_653(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_653(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_654) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_654(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_654(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_655) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_655(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_655(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_656) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_656(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_656(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_657) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_657(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_657(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_658) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_658(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_658(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_659) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_659(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_659(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_660) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_660(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_660(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_661) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_661(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_661(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_662) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_662(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_662(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_663) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_663(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_663(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_664) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_664(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_664(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_665) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_665(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_665(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_666) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_666(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_666(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_667) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_667(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_667(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_668) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_668(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_668(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_669) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_669(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_669(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_670) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_670(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_670(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_671) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_671(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_671(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_672) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_672(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_672(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_673) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_673(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_673(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_674) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_674(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_674(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_675) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_675(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_675(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_676) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_676(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_676(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_677) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_677(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_677(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_678) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_678(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_678(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_679) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_679(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_679(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_680) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_680(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_680(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_681) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_681(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_681(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_682) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_682(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_682(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_683) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_683(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_683(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_684) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_684(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_684(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_685) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_685(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_685(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_686) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_686(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_686(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_687) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_687(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_687(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_688) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_688(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_688(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_689) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_689(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_689(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_690) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_690(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_690(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_691) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_691(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_691(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_692) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_692(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_692(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_693) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_693(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_693(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_694) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_694(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_694(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_695) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_695(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_695(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_696) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_696(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_696(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_697) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_697(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_697(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_698) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_698(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_698(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_699) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_699(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_699(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_700) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_700(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_700(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_701) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_701(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_701(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_702) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_702(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_702(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_703) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_703(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_703(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_704) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_704(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_704(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_705) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_705(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_705(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_706) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_706(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_706(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_707) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_707(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_707(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_708) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_708(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_708(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_709) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_709(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_709(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_710) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_710(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_710(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_711) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_711(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_711(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_712) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_712(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_712(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_713) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_713(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_713(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_714) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_714(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_714(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_715) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_715(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_715(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_716) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_716(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_716(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_717) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_717(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_717(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_718) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_718(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_718(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_719) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_719(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_719(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_720) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_720(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_720(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_721) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_721(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_721(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_722) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_722(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_722(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_723) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_723(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_723(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_724) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_724(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_724(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_725) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_725(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_725(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_726) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_726(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_726(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_727) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_727(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_727(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_728) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_728(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_728(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_729) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_729(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_729(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_730) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_730(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_730(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_731) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_731(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_731(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_732) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_732(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_732(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_733) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_733(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_733(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_734) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_734(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_734(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_735) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_735(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_735(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_736) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_736(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_736(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_737) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_737(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_737(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_738) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_738(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_738(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_739) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_739(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_739(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_740) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_740(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_740(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_741) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_741(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_741(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_742) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_742(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_742(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_743) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_743(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_743(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_744) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_744(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_744(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_745) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_745(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_745(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_746) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_746(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_746(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_747) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_747(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_747(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_748) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_748(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_748(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_749) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_749(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_749(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_750) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_750(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_750(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_751) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_751(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_751(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_752) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_752(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_752(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_753) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_753(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_753(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_754) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_754(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_754(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_755) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_755(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_755(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_756) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_756(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_756(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_757) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_757(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_757(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_758) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_758(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_758(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_759) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_759(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_759(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_760) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_760(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_760(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_761) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_761(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_761(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_762) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_762(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_762(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_763) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_763(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_763(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_764) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_764(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_764(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_765) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_765(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_765(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_766) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_766(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_766(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_767) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_767(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_767(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_768) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_768(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_768(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_769) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_769(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_769(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_770) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_770(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_770(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_771) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_771(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_771(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_772) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_772(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_772(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_773) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_773(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_773(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_774) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_774(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_774(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_775) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_775(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_775(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_776) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_776(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_776(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_777) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_777(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_777(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_778) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_778(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_778(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_779) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_779(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_779(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_780) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_780(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_780(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_781) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_781(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_781(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_782) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_782(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_782(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_783) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_783(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_783(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_784) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_784(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_784(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_785) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_785(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_785(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_786) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_786(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_786(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_787) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_787(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_787(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_788) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_788(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_788(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_789) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_789(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_789(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_790) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_790(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_790(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_791) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_791(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_791(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_792) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_792(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_792(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_793) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_793(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_793(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_794) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_794(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_794(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_795) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_795(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_795(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_796) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_796(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_796(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_797) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_797(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_797(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_798) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_798(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_798(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_799) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_799(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_799(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_800) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_800(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_800(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_801) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_801(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_801(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_802) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_802(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_802(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_803) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_803(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_803(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_804) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_804(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_804(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_805) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_805(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_805(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_806) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_806(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_806(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_807) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_807(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_807(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_808) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_808(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_808(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_809) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_809(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_809(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_810) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_810(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_810(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_811) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_811(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_811(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_812) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_812(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_812(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_813) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_813(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_813(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_814) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_814(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_814(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_815) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_815(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_815(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_816) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_816(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_816(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_817) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_817(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_817(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_818) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_818(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_818(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_819) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_819(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_819(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_820) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_820(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_820(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_821) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_821(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_821(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_822) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_822(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_822(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_823) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_823(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_823(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_824) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_824(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_824(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_825) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_825(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_825(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_826) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_826(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_826(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_827) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_827(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_827(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_828) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_828(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_828(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_829) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_829(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_829(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_830) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_830(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_830(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_831) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_831(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_831(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_832) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_832(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_832(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_833) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_833(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_833(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_834) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_834(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_834(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_835) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_835(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_835(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_836) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_836(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_836(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_837) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_837(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_837(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_838) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_838(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_838(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_839) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_839(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_839(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_840) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_840(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_840(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_841) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_841(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_841(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_842) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_842(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_842(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_843) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_843(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_843(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_844) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_844(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_844(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_845) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_845(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_845(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_846) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_846(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_846(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_847) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_847(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_847(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_848) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_848(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_848(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_849) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_849(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_849(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_850) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_850(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_850(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_851) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_851(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_851(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_852) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_852(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_852(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_853) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_853(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_853(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_854) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_854(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_854(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_855) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_855(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_855(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_856) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_856(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_856(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_857) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_857(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_857(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_858) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_858(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_858(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_859) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_859(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_859(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_860) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_860(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_860(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_861) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_861(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_861(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_862) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_862(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_862(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_863) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_863(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_863(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_864) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_864(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_864(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_865) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_865(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_865(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_866) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_866(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_866(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_867) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_867(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_867(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_868) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_868(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_868(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_869) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_869(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_869(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_870) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_870(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_870(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_871) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_871(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_871(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_872) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_872(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_872(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_873) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_873(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_873(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_874) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_874(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_874(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_875) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_875(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_875(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_876) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_876(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_876(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_877) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_877(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_877(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_878) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_878(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_878(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_879) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_879(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_879(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_880) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_880(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_880(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_881) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_881(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_881(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_882) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_882(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_882(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_883) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_883(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_883(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_884) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_884(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_884(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_885) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_885(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_885(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_886) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_886(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_886(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_887) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_887(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_887(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_888) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_888(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_888(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_889) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_889(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_889(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_890) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_890(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_890(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_891) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_891(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_891(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_892) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_892(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_892(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_893) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_893(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_893(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_894) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_894(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_894(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_895) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_895(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_895(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_896) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_896(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_896(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_897) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_897(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_897(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_898) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_898(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_898(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_899) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_899(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_899(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_900) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_900(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_900(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_901) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_901(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_901(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_902) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_902(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_902(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_903) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_903(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_903(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_904) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_904(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_904(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_905) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_905(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_905(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_906) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_906(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_906(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_907) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_907(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_907(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_908) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_908(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_908(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_909) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_909(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_909(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_910) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_910(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_910(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_911) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_911(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_911(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_912) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_912(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_912(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_913) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_913(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_913(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_914) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_914(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_914(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_915) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_915(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_915(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_916) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_916(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_916(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_917) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_917(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_917(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_918) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_918(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_918(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_919) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_919(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_919(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_920) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_920(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_920(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_921) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_921(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_921(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_922) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_922(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_922(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_923) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_923(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_923(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_924) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_924(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_924(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_925) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_925(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_925(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_926) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_926(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_926(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_927) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_927(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_927(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_928) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_928(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_928(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_929) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_929(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_929(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_930) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_930(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_930(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_931) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_931(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_931(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_932) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_932(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_932(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_933) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_933(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_933(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_934) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_934(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_934(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_935) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_935(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_935(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_936) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_936(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_936(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_937) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_937(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_937(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_938) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_938(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_938(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_939) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_939(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_939(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_940) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_940(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_940(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_941) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_941(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_941(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_942) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_942(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_942(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_943) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_943(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_943(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_944) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_944(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_944(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_945) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_945(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_945(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_946) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_946(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_946(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_947) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_947(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_947(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_948) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_948(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_948(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_949) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_949(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_949(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_950) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_950(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_950(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_951) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_951(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_951(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_952) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_952(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_952(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_953) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_953(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_953(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_954) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_954(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_954(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_955) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_955(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_955(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_956) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_956(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_956(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_957) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_957(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_957(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_958) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_958(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_958(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_959) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_959(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_959(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_960) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_960(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_960(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_961) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_961(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_961(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_962) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_962(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_962(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_963) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_963(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_963(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_964) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_964(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_964(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_965) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_965(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_965(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_966) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_966(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_966(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_967) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_967(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_967(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_968) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_968(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_968(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_969) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_969(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_969(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_970) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_970(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_970(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_971) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_971(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_971(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_972) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_972(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_972(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_973) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_973(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_973(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_974) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_974(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_974(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_975) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_975(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_975(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_976) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_976(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_976(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_977) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_977(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_977(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_978) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_978(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_978(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_979) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_979(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_979(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_980) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_980(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_980(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_981) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_981(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_981(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_982) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_982(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_982(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_983) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_983(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_983(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_984) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_984(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_984(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_985) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_985(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_985(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_986) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_986(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_986(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_987) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_987(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_987(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_988) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_988(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_988(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_989) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_989(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_989(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_990) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_990(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_990(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_991) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_991(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_991(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_992) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_992(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_992(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_993) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_993(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_993(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_994) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_994(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_994(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_995) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_995(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_995(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_996) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_996(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_996(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_997) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_997(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_997(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_998) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_998(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_998(FUNC)
+#endif
+
+#if defined(EIGEN_TEST_PART_999) || defined(EIGEN_TEST_PART_ALL)
+#define CALL_SUBTEST_999(FUNC) CALL_SUBTEST(FUNC)
+#else
+#define CALL_SUBTEST_999(FUNC)
+#endif
+
diff --git a/test/spqr_support.cpp b/test/spqr_support.cpp
index 81e63b6a5..79c2c12fc 100644
--- a/test/spqr_support.cpp
+++ b/test/spqr_support.cpp
@@ -57,7 +57,7 @@ template<typename Scalar> void test_spqr_scalar()
refX = dA.colPivHouseholderQr().solve(b);
VERIFY(x.isApprox(refX,test_precision<Scalar>()));
}
-void test_spqr_support()
+EIGEN_DECLARE_TEST(spqr_support)
{
CALL_SUBTEST_1(test_spqr_scalar<double>());
CALL_SUBTEST_2(test_spqr_scalar<std::complex<double> >());
diff --git a/test/stable_norm.cpp b/test/stable_norm.cpp
index c3eb5ff31..ee5f91674 100644
--- a/test/stable_norm.cpp
+++ b/test/stable_norm.cpp
@@ -21,7 +21,6 @@ template<typename MatrixType> void stable_norm(const MatrixType& m)
*/
using std::sqrt;
using std::abs;
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
@@ -65,6 +64,8 @@ template<typename MatrixType> void stable_norm(const MatrixType& m)
factor = internal::random<Scalar>();
Scalar small = factor * ((std::numeric_limits<RealScalar>::min)() * RealScalar(1e4));
+ Scalar one(1);
+
MatrixType vzero = MatrixType::Zero(rows, cols),
vrand = MatrixType::Random(rows, cols),
vbig(rows, cols),
@@ -78,6 +79,14 @@ template<typename MatrixType> void stable_norm(const MatrixType& m)
VERIFY_IS_APPROX(vrand.blueNorm(), vrand.norm());
VERIFY_IS_APPROX(vrand.hypotNorm(), vrand.norm());
+ // test with expressions as input
+ VERIFY_IS_APPROX((one*vrand).stableNorm(), vrand.norm());
+ VERIFY_IS_APPROX((one*vrand).blueNorm(), vrand.norm());
+ VERIFY_IS_APPROX((one*vrand).hypotNorm(), vrand.norm());
+ VERIFY_IS_APPROX((one*vrand+one*vrand-one*vrand).stableNorm(), vrand.norm());
+ VERIFY_IS_APPROX((one*vrand+one*vrand-one*vrand).blueNorm(), vrand.norm());
+ VERIFY_IS_APPROX((one*vrand+one*vrand-one*vrand).hypotNorm(), vrand.norm());
+
RealScalar size = static_cast<RealScalar>(m.size());
// test numext::isfinite
@@ -180,13 +189,51 @@ template<typename MatrixType> void stable_norm(const MatrixType& m)
}
}
-void test_stable_norm()
+template<typename Scalar>
+void test_hypot()
+{
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ Scalar factor = internal::random<Scalar>();
+ while(numext::abs2(factor)<RealScalar(1e-4))
+ factor = internal::random<Scalar>();
+ Scalar big = factor * ((std::numeric_limits<RealScalar>::max)() * RealScalar(1e-4));
+
+ factor = internal::random<Scalar>();
+ while(numext::abs2(factor)<RealScalar(1e-4))
+ factor = internal::random<Scalar>();
+ Scalar small = factor * ((std::numeric_limits<RealScalar>::min)() * RealScalar(1e4));
+
+ Scalar one (1),
+ zero (0),
+ sqrt2 (std::sqrt(2)),
+ nan (std::numeric_limits<RealScalar>::quiet_NaN());
+
+ Scalar a = internal::random<Scalar>(-1,1);
+ Scalar b = internal::random<Scalar>(-1,1);
+ VERIFY_IS_APPROX(numext::hypot(a,b),std::sqrt(numext::abs2(a)+numext::abs2(b)));
+ VERIFY_IS_EQUAL(numext::hypot(zero,zero), zero);
+ VERIFY_IS_APPROX(numext::hypot(one, one), sqrt2);
+ VERIFY_IS_APPROX(numext::hypot(big,big), sqrt2*numext::abs(big));
+ VERIFY_IS_APPROX(numext::hypot(small,small), sqrt2*numext::abs(small));
+ VERIFY_IS_APPROX(numext::hypot(small,big), numext::abs(big));
+ VERIFY((numext::isnan)(numext::hypot(nan,a)));
+ VERIFY((numext::isnan)(numext::hypot(a,nan)));
+}
+
+EIGEN_DECLARE_TEST(stable_norm)
{
for(int i = 0; i < g_repeat; i++) {
+ CALL_SUBTEST_3( test_hypot<double>() );
+ CALL_SUBTEST_4( test_hypot<float>() );
+ CALL_SUBTEST_5( test_hypot<std::complex<double> >() );
+ CALL_SUBTEST_6( test_hypot<std::complex<float> >() );
+
CALL_SUBTEST_1( stable_norm(Matrix<float, 1, 1>()) );
CALL_SUBTEST_2( stable_norm(Vector4d()) );
CALL_SUBTEST_3( stable_norm(VectorXd(internal::random<int>(10,2000))) );
+ CALL_SUBTEST_3( stable_norm(MatrixXd(internal::random<int>(10,200), internal::random<int>(10,200))) );
CALL_SUBTEST_4( stable_norm(VectorXf(internal::random<int>(10,2000))) );
CALL_SUBTEST_5( stable_norm(VectorXcd(internal::random<int>(10,2000))) );
+ CALL_SUBTEST_6( stable_norm(VectorXcf(internal::random<int>(10,2000))) );
}
}
diff --git a/test/stddeque.cpp b/test/stddeque.cpp
index bb4b476f3..0a6aa2572 100644
--- a/test/stddeque.cpp
+++ b/test/stddeque.cpp
@@ -15,8 +15,6 @@
template<typename MatrixType>
void check_stddeque_matrix(const MatrixType& m)
{
- typedef typename MatrixType::Index Index;
-
Index rows = m.rows();
Index cols = m.cols();
MatrixType x = MatrixType::Random(rows,cols), y = MatrixType::Random(rows,cols);
@@ -102,7 +100,7 @@ void check_stddeque_quaternion(const QuaternionType&)
VERIFY_IS_APPROX(v.back(), x);
}
-void test_stddeque()
+EIGEN_DECLARE_TEST(stddeque)
{
// some non vectorizable fixed sizes
CALL_SUBTEST_1(check_stddeque_matrix(Vector2f()));
diff --git a/test/stddeque_overload.cpp b/test/stddeque_overload.cpp
index 4da618bbf..eebe93d81 100644
--- a/test/stddeque_overload.cpp
+++ b/test/stddeque_overload.cpp
@@ -28,8 +28,8 @@ EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(Quaterniond)
template<typename MatrixType>
void check_stddeque_matrix(const MatrixType& m)
{
- typename MatrixType::Index rows = m.rows();
- typename MatrixType::Index cols = m.cols();
+ Index rows = m.rows();
+ Index cols = m.cols();
MatrixType x = MatrixType::Random(rows,cols), y = MatrixType::Random(rows,cols);
std::deque<MatrixType> v(10, MatrixType(rows,cols)), w(20, y);
v[5] = x;
@@ -128,7 +128,7 @@ void check_stddeque_quaternion(const QuaternionType&)
}
}
-void test_stddeque_overload()
+EIGEN_DECLARE_TEST(stddeque_overload)
{
// some non vectorizable fixed sizes
CALL_SUBTEST_1(check_stddeque_matrix(Vector2f()));
diff --git a/test/stdlist.cpp b/test/stdlist.cpp
index 17cce779a..c9e5b7286 100644
--- a/test/stdlist.cpp
+++ b/test/stdlist.cpp
@@ -15,8 +15,6 @@
template<typename MatrixType>
void check_stdlist_matrix(const MatrixType& m)
{
- typedef typename MatrixType::Index Index;
-
Index rows = m.rows();
Index cols = m.cols();
MatrixType x = MatrixType::Random(rows,cols), y = MatrixType::Random(rows,cols);
@@ -102,7 +100,7 @@ void check_stdlist_quaternion(const QuaternionType&)
VERIFY_IS_APPROX(v.back(), x);
}
-void test_stdlist()
+EIGEN_DECLARE_TEST(stdlist)
{
// some non vectorizable fixed sizes
CALL_SUBTEST_1(check_stdlist_matrix(Vector2f()));
diff --git a/test/stdlist_overload.cpp b/test/stdlist_overload.cpp
index bb910bd43..93b6fc9ed 100644
--- a/test/stdlist_overload.cpp
+++ b/test/stdlist_overload.cpp
@@ -44,8 +44,8 @@ void set(Container & c, Position position, const Value & value)
template<typename MatrixType>
void check_stdlist_matrix(const MatrixType& m)
{
- typename MatrixType::Index rows = m.rows();
- typename MatrixType::Index cols = m.cols();
+ Index rows = m.rows();
+ Index cols = m.cols();
MatrixType x = MatrixType::Random(rows,cols), y = MatrixType::Random(rows,cols);
std::list<MatrixType> v(10, MatrixType(rows,cols)), w(20, y);
typename std::list<MatrixType>::iterator itv = get(v, 5);
@@ -162,7 +162,7 @@ void check_stdlist_quaternion(const QuaternionType&)
}
}
-void test_stdlist_overload()
+EIGEN_DECLARE_TEST(stdlist_overload)
{
// some non vectorizable fixed sizes
CALL_SUBTEST_1(check_stdlist_matrix(Vector2f()));
diff --git a/test/stdvector.cpp b/test/stdvector.cpp
index 50cb3341d..e2b7bd061 100644
--- a/test/stdvector.cpp
+++ b/test/stdvector.cpp
@@ -14,8 +14,8 @@
template<typename MatrixType>
void check_stdvector_matrix(const MatrixType& m)
{
- typename MatrixType::Index rows = m.rows();
- typename MatrixType::Index cols = m.cols();
+ Index rows = m.rows();
+ Index cols = m.cols();
MatrixType x = MatrixType::Random(rows,cols), y = MatrixType::Random(rows,cols);
std::vector<MatrixType,Eigen::aligned_allocator<MatrixType> > v(10, MatrixType(rows,cols)), w(20, y);
v[5] = x;
@@ -117,7 +117,7 @@ void check_stdvector_quaternion(const QuaternionType&)
}
}
-void test_stdvector()
+EIGEN_DECLARE_TEST(stdvector)
{
// some non vectorizable fixed sizes
CALL_SUBTEST_1(check_stdvector_matrix(Vector2f()));
diff --git a/test/stdvector_overload.cpp b/test/stdvector_overload.cpp
index 959665954..5c042a64c 100644
--- a/test/stdvector_overload.cpp
+++ b/test/stdvector_overload.cpp
@@ -28,8 +28,8 @@ EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(Quaterniond)
template<typename MatrixType>
void check_stdvector_matrix(const MatrixType& m)
{
- typename MatrixType::Index rows = m.rows();
- typename MatrixType::Index cols = m.cols();
+ Index rows = m.rows();
+ Index cols = m.cols();
MatrixType x = MatrixType::Random(rows,cols), y = MatrixType::Random(rows,cols);
std::vector<MatrixType> v(10, MatrixType(rows,cols)), w(20, y);
v[5] = x;
@@ -131,7 +131,7 @@ void check_stdvector_quaternion(const QuaternionType&)
}
}
-void test_stdvector_overload()
+EIGEN_DECLARE_TEST(stdvector_overload)
{
// some non vectorizable fixed sizes
CALL_SUBTEST_1(check_stdvector_matrix(Vector2f()));
diff --git a/test/superlu_support.cpp b/test/superlu_support.cpp
index 98a7bc5c8..55450c868 100644
--- a/test/superlu_support.cpp
+++ b/test/superlu_support.cpp
@@ -12,7 +12,7 @@
#include <Eigen/SuperLUSupport>
-void test_superlu_support()
+EIGEN_DECLARE_TEST(superlu_support)
{
SuperLU<SparseMatrix<double> > superlu_double_colmajor;
SuperLU<SparseMatrix<std::complex<double> > > superlu_cplxdouble_colmajor;
diff --git a/test/svd_common.h b/test/svd_common.h
index 605d5dfef..cba066593 100644
--- a/test/svd_common.h
+++ b/test/svd_common.h
@@ -23,7 +23,6 @@
template<typename SvdType, typename MatrixType>
void svd_check_full(const MatrixType& m, const SvdType& svd)
{
- typedef typename MatrixType::Index Index;
Index rows = m.rows();
Index cols = m.cols();
@@ -101,7 +100,6 @@ void svd_least_square(const MatrixType& m, unsigned int computationOptions)
{
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
- typedef typename MatrixType::Index Index;
Index rows = m.rows();
Index cols = m.cols();
@@ -168,7 +166,6 @@ template<typename MatrixType>
void svd_min_norm(const MatrixType& m, unsigned int computationOptions)
{
typedef typename MatrixType::Scalar Scalar;
- typedef typename MatrixType::Index Index;
Index cols = m.cols();
enum {
@@ -261,7 +258,6 @@ void svd_test_all_computation_options(const MatrixType& m, bool full_only)
CALL_SUBTEST(( svd_min_norm(m, ComputeThinU | ComputeThinV) ));
// test reconstruction
- typedef typename MatrixType::Index Index;
Index diagSize = (std::min)(m.rows(), m.cols());
SvdType svd(m, ComputeThinU | ComputeThinV);
VERIFY_IS_APPROX(m, svd.matrixU().leftCols(diagSize) * svd.singularValues().asDiagonal() * svd.matrixV().leftCols(diagSize).adjoint());
@@ -437,7 +433,6 @@ template<typename SvdType,typename MatrixType>
void svd_verify_assert(const MatrixType& m)
{
typedef typename MatrixType::Scalar Scalar;
- typedef typename MatrixType::Index Index;
Index rows = m.rows();
Index cols = m.cols();
diff --git a/test/svd_fill.h b/test/svd_fill.h
index 3877c0c7e..d68647e99 100644
--- a/test/svd_fill.h
+++ b/test/svd_fill.h
@@ -23,7 +23,6 @@ void svd_fill_random(MatrixType &m, int Option = 0)
using std::pow;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
- typedef typename MatrixType::Index Index;
Index diagSize = (std::min)(m.rows(), m.cols());
RealScalar s = std::numeric_limits<RealScalar>::max_exponent10/4;
s = internal::random<RealScalar>(1,s);
diff --git a/test/swap.cpp b/test/swap.cpp
index f76e3624d..5b259d3ec 100644
--- a/test/swap.cpp
+++ b/test/swap.cpp
@@ -28,8 +28,8 @@ template<typename MatrixType> void swap(const MatrixType& m)
typedef typename MatrixType::Scalar Scalar;
eigen_assert((!internal::is_same<MatrixType,OtherMatrixType>::value));
- typename MatrixType::Index rows = m.rows();
- typename MatrixType::Index cols = m.cols();
+ Index rows = m.rows();
+ Index cols = m.cols();
// construct 3 matrix guaranteed to be distinct
MatrixType m1 = MatrixType::Random(rows,cols);
@@ -83,7 +83,7 @@ template<typename MatrixType> void swap(const MatrixType& m)
}
}
-void test_swap()
+EIGEN_DECLARE_TEST(swap)
{
int s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE);
CALL_SUBTEST_1( swap(Matrix3f()) ); // fixed size, no vectorization
diff --git a/test/symbolic_index.cpp b/test/symbolic_index.cpp
index 1db85144b..bccf1b884 100644
--- a/test/symbolic_index.cpp
+++ b/test/symbolic_index.cpp
@@ -54,17 +54,29 @@ is_same_type(const T1&, const T2&)
template<typename T1,typename T2>
bool is_same_symb(const T1& a, const T2& b, Index size)
{
- using Eigen::placeholders::last;
return a.eval(last=size-1) == b.eval(last=size-1);
}
+template<typename T>
+void check_is_symbolic(const T&) {
+ STATIC_CHECK(( symbolic::is_symbolic<T>::value ))
+}
+
+template<typename T>
+void check_isnot_symbolic(const T&) {
+ STATIC_CHECK(( !symbolic::is_symbolic<T>::value ))
+}
#define VERIFY_EQ_INT(A,B) VERIFY_IS_APPROX(int(A),int(B))
void check_symbolic_index()
{
- using Eigen::placeholders::last;
- using Eigen::placeholders::end;
+ check_is_symbolic(last);
+ check_is_symbolic(lastp1);
+ check_is_symbolic(last+1);
+ check_is_symbolic(last-lastp1);
+ check_is_symbolic(2*last-lastp1/2);
+ check_isnot_symbolic(fix<3>());
Index size=100;
@@ -77,27 +89,27 @@ void check_symbolic_index()
VERIFY( is_same_type( fix<9>()|fix<2>(), fix<9|2>() ) );
VERIFY( is_same_type( fix<9>()/2, int(9/2) ) );
- VERIFY( is_same_symb( end-1, last, size) );
- VERIFY( is_same_symb( end-fix<1>, last, size) );
+ VERIFY( is_same_symb( lastp1-1, last, size) );
+ VERIFY( is_same_symb( lastp1-fix<1>, last, size) );
VERIFY_IS_EQUAL( ( (last*5-2)/3 ).eval(last=size-1), ((size-1)*5-2)/3 );
VERIFY_IS_EQUAL( ( (last*fix<5>-fix<2>)/fix<3> ).eval(last=size-1), ((size-1)*5-2)/3 );
- VERIFY_IS_EQUAL( ( -last*end ).eval(last=size-1), -(size-1)*size );
- VERIFY_IS_EQUAL( ( end-3*last ).eval(last=size-1), size- 3*(size-1) );
- VERIFY_IS_EQUAL( ( (end-3*last)/end ).eval(last=size-1), (size- 3*(size-1))/size );
+ VERIFY_IS_EQUAL( ( -last*lastp1 ).eval(last=size-1), -(size-1)*size );
+ VERIFY_IS_EQUAL( ( lastp1-3*last ).eval(last=size-1), size- 3*(size-1) );
+ VERIFY_IS_EQUAL( ( (lastp1-3*last)/lastp1 ).eval(last=size-1), (size- 3*(size-1))/size );
#if EIGEN_HAS_CXX14
{
- struct x_tag {}; static const Symbolic::SymbolExpr<x_tag> x;
- struct y_tag {}; static const Symbolic::SymbolExpr<y_tag> y;
- struct z_tag {}; static const Symbolic::SymbolExpr<z_tag> z;
+ struct x_tag {}; static const symbolic::SymbolExpr<x_tag> x;
+ struct y_tag {}; static const symbolic::SymbolExpr<y_tag> y;
+ struct z_tag {}; static const symbolic::SymbolExpr<z_tag> z;
VERIFY_IS_APPROX( int(((x+3)/y+z).eval(x=6,y=3,z=-13)), (6+3)/3+(-13) );
}
#endif
}
-void test_symbolic_index()
+EIGEN_DECLARE_TEST(symbolic_index)
{
CALL_SUBTEST_1( check_symbolic_index() );
CALL_SUBTEST_2( check_symbolic_index() );
diff --git a/test/triangular.cpp b/test/triangular.cpp
index b96856486..99ef1dcda 100644
--- a/test/triangular.cpp
+++ b/test/triangular.cpp
@@ -19,8 +19,8 @@ template<typename MatrixType> void triangular_square(const MatrixType& m)
RealScalar largerEps = 10*test_precision<RealScalar>();
- typename MatrixType::Index rows = m.rows();
- typename MatrixType::Index cols = m.cols();
+ Index rows = m.rows();
+ Index cols = m.cols();
MatrixType m1 = MatrixType::Random(rows, cols),
m2 = MatrixType::Random(rows, cols),
@@ -68,7 +68,7 @@ template<typename MatrixType> void triangular_square(const MatrixType& m)
while (numext::abs2(m1(i,i))<RealScalar(1e-1)) m1(i,i) = internal::random<Scalar>();
Transpose<MatrixType> trm4(m4);
- // test back and forward subsitution with a vector as the rhs
+ // test back and forward substitution with a vector as the rhs
m3 = m1.template triangularView<Upper>();
VERIFY(v2.isApprox(m3.adjoint() * (m1.adjoint().template triangularView<Lower>().solve(v2)), largerEps));
m3 = m1.template triangularView<Lower>();
@@ -134,7 +134,6 @@ template<typename MatrixType> void triangular_square(const MatrixType& m)
template<typename MatrixType> void triangular_rect(const MatrixType& m)
{
- typedef const typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
enum { Rows = MatrixType::RowsAtCompileTime, Cols = MatrixType::ColsAtCompileTime };
@@ -221,7 +220,7 @@ void bug_159()
EIGEN_UNUSED_VARIABLE(m)
}
-void test_triangular()
+EIGEN_DECLARE_TEST(triangular)
{
int maxsize = (std::min)(EIGEN_TEST_MAX_SIZE,20);
for(int i = 0; i < g_repeat ; i++)
diff --git a/test/umeyama.cpp b/test/umeyama.cpp
index 2e8092434..1590a0a81 100644
--- a/test/umeyama.cpp
+++ b/test/umeyama.cpp
@@ -155,7 +155,7 @@ void run_fixed_size_test(int num_elements)
VERIFY(error < Scalar(16)*std::numeric_limits<Scalar>::epsilon());
}
-void test_umeyama()
+EIGEN_DECLARE_TEST(umeyama)
{
for (int i=0; i<g_repeat; ++i)
{
diff --git a/test/umfpack_support.cpp b/test/umfpack_support.cpp
index 37ab11f0b..d8f2a6f80 100644
--- a/test/umfpack_support.cpp
+++ b/test/umfpack_support.cpp
@@ -12,10 +12,10 @@
#include <Eigen/UmfPackSupport>
-template<typename T> void test_umfpack_support_T()
+template<typename T1, typename T2> void test_umfpack_support_T()
{
- UmfPackLU<SparseMatrix<T, ColMajor> > umfpack_colmajor;
- UmfPackLU<SparseMatrix<T, RowMajor> > umfpack_rowmajor;
+ UmfPackLU<SparseMatrix<T1, ColMajor, T2> > umfpack_colmajor;
+ UmfPackLU<SparseMatrix<T1, RowMajor, T2> > umfpack_rowmajor;
check_sparse_square_solving(umfpack_colmajor);
check_sparse_square_solving(umfpack_rowmajor);
@@ -24,9 +24,11 @@ template<typename T> void test_umfpack_support_T()
check_sparse_square_determinant(umfpack_rowmajor);
}
-void test_umfpack_support()
+EIGEN_DECLARE_TEST(umfpack_support)
{
- CALL_SUBTEST_1(test_umfpack_support_T<double>());
- CALL_SUBTEST_2(test_umfpack_support_T<std::complex<double> >());
+ CALL_SUBTEST_1((test_umfpack_support_T<double, int>()));
+ CALL_SUBTEST_2((test_umfpack_support_T<std::complex<double>, int>()));
+ CALL_SUBTEST_3((test_umfpack_support_T<double, long >()));
+ CALL_SUBTEST_4((test_umfpack_support_T<std::complex<double>, long>()));
}
diff --git a/test/unalignedassert.cpp b/test/unalignedassert.cpp
index 731a08977..120cc42bb 100644
--- a/test/unalignedassert.cpp
+++ b/test/unalignedassert.cpp
@@ -174,7 +174,7 @@ void unalignedassert()
#endif
}
-void test_unalignedassert()
+EIGEN_DECLARE_TEST(unalignedassert)
{
CALL_SUBTEST(unalignedassert());
}
diff --git a/test/unalignedcount.cpp b/test/unalignedcount.cpp
index d6ffeafdf..069fc1bb9 100644
--- a/test/unalignedcount.cpp
+++ b/test/unalignedcount.cpp
@@ -28,7 +28,7 @@ static int nb_storeu;
#include "main.h"
-void test_unalignedcount()
+EIGEN_DECLARE_TEST(unalignedcount)
{
#if defined(EIGEN_VECTORIZE_AVX)
VectorXf a(40), b(40);
diff --git a/test/upperbidiagonalization.cpp b/test/upperbidiagonalization.cpp
index 847b34b55..945c99959 100644
--- a/test/upperbidiagonalization.cpp
+++ b/test/upperbidiagonalization.cpp
@@ -12,8 +12,8 @@
template<typename MatrixType> void upperbidiag(const MatrixType& m)
{
- const typename MatrixType::Index rows = m.rows();
- const typename MatrixType::Index cols = m.cols();
+ const Index rows = m.rows();
+ const Index cols = m.cols();
typedef Matrix<typename MatrixType::RealScalar, MatrixType::RowsAtCompileTime, MatrixType::ColsAtCompileTime> RealMatrixType;
typedef Matrix<typename MatrixType::Scalar, MatrixType::ColsAtCompileTime, MatrixType::RowsAtCompileTime> TransposeMatrixType;
@@ -29,7 +29,7 @@ template<typename MatrixType> void upperbidiag(const MatrixType& m)
VERIFY_IS_APPROX(a.adjoint(),d);
}
-void test_upperbidiagonalization()
+EIGEN_DECLARE_TEST(upperbidiagonalization)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( upperbidiag(MatrixXf(3,3)) );
diff --git a/test/vectorization_logic.cpp b/test/vectorization_logic.cpp
index 83c1439ad..c15f75103 100644
--- a/test/vectorization_logic.cpp
+++ b/test/vectorization_logic.cpp
@@ -22,6 +22,14 @@
#include "main.h"
#include <typeinfo>
+// Disable "ignoring attributes on template argument"
+// for packet_traits<Packet*>
+// => The only workaround would be to wrap _m128 and the likes
+// within wrappers.
+#if EIGEN_GNUC_AT_LEAST(6,0)
+ #pragma GCC diagnostic ignored "-Wignored-attributes"
+#endif
+
using internal::demangle_flags;
using internal::demangle_traversal;
using internal::demangle_unrolling;
@@ -207,6 +215,12 @@ struct vectorization_logic
VERIFY(test_redux(Vector1(),
LinearVectorizedTraversal,CompleteUnrolling));
+ VERIFY(test_redux(Vector1().array()*Vector1().array(),
+ LinearVectorizedTraversal,CompleteUnrolling));
+
+ VERIFY(test_redux((Vector1().array()*Vector1().array()).col(0),
+ LinearVectorizedTraversal,CompleteUnrolling));
+
VERIFY(test_redux(Matrix<Scalar,PacketSize,3>(),
LinearVectorizedTraversal,CompleteUnrolling));
@@ -380,7 +394,7 @@ template<typename Scalar> struct vectorization_logic_half<Scalar,false>
static void run() {}
};
-void test_vectorization_logic()
+EIGEN_DECLARE_TEST(vectorization_logic)
{
#ifdef EIGEN_VECTORIZE
diff --git a/test/vectorwiseop.cpp b/test/vectorwiseop.cpp
index f3ab561ee..2d7ddbed1 100644
--- a/test/vectorwiseop.cpp
+++ b/test/vectorwiseop.cpp
@@ -15,7 +15,6 @@
template<typename ArrayType> void vectorwiseop_array(const ArrayType& m)
{
- typedef typename ArrayType::Index Index;
typedef typename ArrayType::Scalar Scalar;
typedef Array<Scalar, ArrayType::RowsAtCompileTime, 1> ColVectorType;
typedef Array<Scalar, 1, ArrayType::ColsAtCompileTime> RowVectorType;
@@ -129,7 +128,6 @@ template<typename ArrayType> void vectorwiseop_array(const ArrayType& m)
template<typename MatrixType> void vectorwiseop_matrix(const MatrixType& m)
{
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> ColVectorType;
@@ -239,7 +237,7 @@ template<typename MatrixType> void vectorwiseop_matrix(const MatrixType& m)
VERIFY_EVALUATION_COUNT( m2 = (m1.rowwise() - m1.colwise().sum()/RealScalar(m1.rows())), (MatrixType::RowsAtCompileTime!=1 ? 1 : 0) );
}
-void test_vectorwiseop()
+EIGEN_DECLARE_TEST(vectorwiseop)
{
CALL_SUBTEST_1( vectorwiseop_array(Array22cd()) );
CALL_SUBTEST_2( vectorwiseop_array(Array<double, 3, 2>()) );
diff --git a/test/visitor.cpp b/test/visitor.cpp
index 844170ec6..de938fc95 100644
--- a/test/visitor.cpp
+++ b/test/visitor.cpp
@@ -12,7 +12,6 @@
template<typename MatrixType> void matrixVisitor(const MatrixType& p)
{
typedef typename MatrixType::Scalar Scalar;
- typedef typename MatrixType::Index Index;
Index rows = p.rows();
Index cols = p.cols();
@@ -65,7 +64,6 @@ template<typename MatrixType> void matrixVisitor(const MatrixType& p)
template<typename VectorType> void vectorVisitor(const VectorType& w)
{
typedef typename VectorType::Scalar Scalar;
- typedef typename VectorType::Index Index;
Index size = w.size();
@@ -115,7 +113,7 @@ template<typename VectorType> void vectorVisitor(const VectorType& w)
VERIFY(eigen_maxidx == (std::min)(idx0,idx2));
}
-void test_visitor()
+EIGEN_DECLARE_TEST(visitor)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( matrixVisitor(Matrix<float, 1, 1>()) );
diff --git a/test/zerosized.cpp b/test/zerosized.cpp
index 477ff0070..edd1f6925 100644
--- a/test/zerosized.cpp
+++ b/test/zerosized.cpp
@@ -81,7 +81,7 @@ template<typename VectorType> void zeroSizedVector()
}
}
-void test_zerosized()
+EIGEN_DECLARE_TEST(zerosized)
{
zeroSizedMatrix<Matrix2d>();
zeroSizedMatrix<Matrix3i>();
diff --git a/unsupported/Eigen/AdolcForward b/unsupported/Eigen/AdolcForward
index 15f5f0731..9b8d3cd1a 100644
--- a/unsupported/Eigen/AdolcForward
+++ b/unsupported/Eigen/AdolcForward
@@ -40,7 +40,7 @@
# undef realloc
#endif
-#include <Eigen/Core>
+#include "../../Eigen/Core"
namespace Eigen {
diff --git a/unsupported/Eigen/AlignedVector3 b/unsupported/Eigen/AlignedVector3
index 47a86d4c0..a67ee6e42 100644
--- a/unsupported/Eigen/AlignedVector3
+++ b/unsupported/Eigen/AlignedVector3
@@ -10,7 +10,9 @@
#ifndef EIGEN_ALIGNED_VECTOR3
#define EIGEN_ALIGNED_VECTOR3
-#include <Eigen/Geometry>
+#include "../../Eigen/Geometry"
+
+#include "../../Eigen/src/Core/util/DisableStupidWarnings.h"
namespace Eigen {
@@ -221,4 +223,6 @@ struct evaluator<AlignedVector3<Scalar> >
}
+#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h"
+
#endif // EIGEN_ALIGNED_VECTOR3
diff --git a/unsupported/Eigen/ArpackSupport b/unsupported/Eigen/ArpackSupport
index 37a2799ef..28c95ffa2 100644
--- a/unsupported/Eigen/ArpackSupport
+++ b/unsupported/Eigen/ArpackSupport
@@ -9,9 +9,7 @@
#ifndef EIGEN_ARPACKSUPPORT_MODULE_H
#define EIGEN_ARPACKSUPPORT_MODULE_H
-#include <Eigen/Core>
-
-#include <Eigen/src/Core/util/DisableStupidWarnings.h>
+#include "../../Eigen/Core"
/** \defgroup ArpackSupport_Module Arpack support module
*
@@ -22,10 +20,12 @@
* \endcode
*/
-#include <Eigen/SparseCholesky>
+#include "../../Eigen/SparseCholesky"
+
+#include "../../Eigen/src/Core/util/DisableStupidWarnings.h"
#include "src/Eigenvalues/ArpackSelfAdjointEigenSolver.h"
-#include <Eigen/src/Core/util/ReenableStupidWarnings.h>
+#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h"
#endif // EIGEN_ARPACKSUPPORT_MODULE_H
/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/unsupported/Eigen/AutoDiff b/unsupported/Eigen/AutoDiff
index abf5b7d67..7a4ff460c 100644
--- a/unsupported/Eigen/AutoDiff
+++ b/unsupported/Eigen/AutoDiff
@@ -28,11 +28,17 @@ namespace Eigen {
//@{
}
+#include "../../Eigen/src/Core/util/DisableStupidWarnings.h"
+
#include "src/AutoDiff/AutoDiffScalar.h"
// #include "src/AutoDiff/AutoDiffVector.h"
#include "src/AutoDiff/AutoDiffJacobian.h"
+#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h"
+
+
+
namespace Eigen {
//@}
}
diff --git a/unsupported/Eigen/BVH b/unsupported/Eigen/BVH
index 0161a5402..666c9835f 100644
--- a/unsupported/Eigen/BVH
+++ b/unsupported/Eigen/BVH
@@ -10,9 +10,9 @@
#ifndef EIGEN_BVH_MODULE_H
#define EIGEN_BVH_MODULE_H
-#include <Eigen/Core>
-#include <Eigen/Geometry>
-#include <Eigen/StdVector>
+#include "../../Eigen/Core"
+#include "../../Eigen/Geometry"
+#include "../../Eigen/StdVector"
#include <algorithm>
#include <queue>
diff --git a/unsupported/Eigen/CXX11/Tensor b/unsupported/Eigen/CXX11/Tensor
index 39916092b..14a2d0f0d 100644
--- a/unsupported/Eigen/CXX11/Tensor
+++ b/unsupported/Eigen/CXX11/Tensor
@@ -19,16 +19,16 @@
#undef isnan
#undef isinf
#undef isfinite
-#include <SYCL/sycl.hpp>
+#include <CL/sycl.hpp>
#include <iostream>
#include <map>
#include <memory>
#include <utility>
#endif
-#include <Eigen/src/Core/util/DisableStupidWarnings.h>
-
#include "../SpecialFunctions"
+
+#include "../../../Eigen/src/Core/util/DisableStupidWarnings.h"
#include "src/util/CXX11Meta.h"
#include "src/util/MaxSizeVector.h"
@@ -80,12 +80,16 @@ typedef unsigned __int64 uint64_t;
#endif
#ifdef EIGEN_USE_GPU
-#include <iostream>
-#include <cuda_runtime.h>
-#if __cplusplus >= 201103L
-#include <atomic>
-#include <unistd.h>
-#endif
+ #include <iostream>
+ #if defined(EIGEN_USE_HIP)
+ #include <hip/hip_runtime.h>
+ #else
+ #include <cuda_runtime.h>
+ #endif
+ #if __cplusplus >= 201103L
+ #include <atomic>
+ #include <unistd.h>
+ #endif
#endif
#include "src/Tensor/TensorMacros.h"
@@ -95,7 +99,7 @@ typedef unsigned __int64 uint64_t;
#include "src/Tensor/TensorCostModel.h"
#include "src/Tensor/TensorDeviceDefault.h"
#include "src/Tensor/TensorDeviceThreadPool.h"
-#include "src/Tensor/TensorDeviceCuda.h"
+#include "src/Tensor/TensorDeviceGpu.h"
#include "src/Tensor/TensorDeviceSycl.h"
#include "src/Tensor/TensorIndexList.h"
#include "src/Tensor/TensorDimensionList.h"
@@ -108,18 +112,19 @@ typedef unsigned __int64 uint64_t;
#include "src/Tensor/TensorGlobalFunctions.h"
#include "src/Tensor/TensorBase.h"
+#include "src/Tensor/TensorBlock.h"
#include "src/Tensor/TensorEvaluator.h"
#include "src/Tensor/TensorExpr.h"
#include "src/Tensor/TensorReduction.h"
-#include "src/Tensor/TensorReductionCuda.h"
+#include "src/Tensor/TensorReductionGpu.h"
#include "src/Tensor/TensorArgMax.h"
#include "src/Tensor/TensorConcatenation.h"
#include "src/Tensor/TensorContractionMapper.h"
#include "src/Tensor/TensorContractionBlocking.h"
#include "src/Tensor/TensorContraction.h"
#include "src/Tensor/TensorContractionThreadPool.h"
-#include "src/Tensor/TensorContractionCuda.h"
+#include "src/Tensor/TensorContractionGpu.h"
#include "src/Tensor/TensorConversion.h"
#include "src/Tensor/TensorConvolution.h"
#include "src/Tensor/TensorFFT.h"
@@ -141,6 +146,7 @@ typedef unsigned __int64 uint64_t;
#include "src/Tensor/TensorGenerator.h"
#include "src/Tensor/TensorAssign.h"
#include "src/Tensor/TensorScan.h"
+#include "src/Tensor/TensorTrace.h"
#include "src/Tensor/TensorSycl.h"
#include "src/Tensor/TensorExecutor.h"
@@ -154,6 +160,6 @@ typedef unsigned __int64 uint64_t;
#include "src/Tensor/TensorIO.h"
-#include <Eigen/src/Core/util/ReenableStupidWarnings.h>
+#include "../../../Eigen/src/Core/util/ReenableStupidWarnings.h"
//#endif // EIGEN_CXX11_TENSOR_MODULE
diff --git a/unsupported/Eigen/CXX11/TensorSymmetry b/unsupported/Eigen/CXX11/TensorSymmetry
index fb1b0c0fb..b09c5e472 100644
--- a/unsupported/Eigen/CXX11/TensorSymmetry
+++ b/unsupported/Eigen/CXX11/TensorSymmetry
@@ -10,9 +10,9 @@
#ifndef EIGEN_CXX11_TENSORSYMMETRY_MODULE
#define EIGEN_CXX11_TENSORSYMMETRY_MODULE
-#include <unsupported/Eigen/CXX11/Tensor>
+#include "Tensor"
-#include <Eigen/src/Core/util/DisableStupidWarnings.h>
+#include "../../../Eigen/src/Core/util/DisableStupidWarnings.h"
#include "src/util/CXX11Meta.h"
@@ -33,7 +33,7 @@
#include "src/TensorSymmetry/StaticSymmetry.h"
#include "src/TensorSymmetry/DynamicSymmetry.h"
-#include <Eigen/src/Core/util/ReenableStupidWarnings.h>
+#include "../../../Eigen/src/Core/util/ReenableStupidWarnings.h"
#endif // EIGEN_CXX11_TENSORSYMMETRY_MODULE
diff --git a/unsupported/Eigen/CXX11/ThreadPool b/unsupported/Eigen/CXX11/ThreadPool
index c34614194..d046af9b2 100644
--- a/unsupported/Eigen/CXX11/ThreadPool
+++ b/unsupported/Eigen/CXX11/ThreadPool
@@ -12,7 +12,7 @@
#include "../../../Eigen/Core"
-#include <Eigen/src/Core/util/DisableStupidWarnings.h>
+#include "../../../Eigen/src/Core/util/DisableStupidWarnings.h"
/** \defgroup CXX11_ThreadPool_Module C++11 ThreadPool Module
*
@@ -44,35 +44,31 @@
#include <thread>
#include <functional>
#include <memory>
-
#include "src/util/CXX11Meta.h"
#include "src/util/MaxSizeVector.h"
#include "src/ThreadPool/ThreadLocal.h"
+#ifndef EIGEN_THREAD_LOCAL
+// There are non-parenthesized calls to "max" in the <unordered_map> header,
+// which trigger a check in test/main.h causing compilation to fail.
+// We work around the check here by removing the check for max in
+// the case where we have to emulate thread_local.
+#ifdef max
+#undef max
+#endif
+#include <unordered_map>
+#endif
#include "src/ThreadPool/ThreadYield.h"
#include "src/ThreadPool/ThreadCancel.h"
#include "src/ThreadPool/EventCount.h"
#include "src/ThreadPool/RunQueue.h"
#include "src/ThreadPool/ThreadPoolInterface.h"
#include "src/ThreadPool/ThreadEnvironment.h"
-#include "src/ThreadPool/SimpleThreadPool.h"
+#include "src/ThreadPool/Barrier.h"
#include "src/ThreadPool/NonBlockingThreadPool.h"
-
-// Use the more efficient NonBlockingThreadPool by default.
-namespace Eigen {
-#ifndef EIGEN_USE_SIMPLE_THREAD_POOL
-template <typename Env> using ThreadPoolTempl = NonBlockingThreadPoolTempl<Env>;
-typedef NonBlockingThreadPool ThreadPool;
-#else
-template <typename Env> using ThreadPoolTempl = SimpleThreadPoolTempl<Env>;
-typedef SimpleThreadPool ThreadPool;
#endif
-} // namespace Eigen
-#endif
-
-#include <Eigen/src/Core/util/ReenableStupidWarnings.h>
+#include "../../../Eigen/src/Core/util/ReenableStupidWarnings.h"
#endif // EIGEN_CXX11_THREADPOOL_MODULE
-
diff --git a/unsupported/Eigen/CXX11/src/Tensor/README.md b/unsupported/Eigen/CXX11/src/Tensor/README.md
index fbb7f3bfc..d6bc6d46f 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/README.md
+++ b/unsupported/Eigen/CXX11/src/Tensor/README.md
@@ -75,16 +75,16 @@ large enough to hold all the data.
// Map a tensor of ints on top of stack-allocated storage.
int storage[128]; // 2 x 4 x 2 x 8 = 128
- TensorMap<int, 4> t_4d(storage, 2, 4, 2, 8);
+ TensorMap<Tensor<int, 4>> t_4d(storage, 2, 4, 2, 8);
// The same storage can be viewed as a different tensor.
// You can also pass the sizes as an array.
- TensorMap<int, 2> t_2d(storage, 16, 8);
+ TensorMap<Tensor<int, 2>> t_2d(storage, 16, 8);
// You can also map fixed-size tensors. Here we get a 1d view of
// the 2d fixed-size tensor.
- Tensor<float, Sizes<4, 5>> t_4x3;
- TensorMap<float, 1> t_12(t_4x3, 12);
+ TensorFixedSize<float, Sizes<4, 5>> t_4x3;
+ TensorMap<Tensor<float, 1>> t_12(t_4x3.data(), 12);
#### Class TensorRef
@@ -272,7 +272,7 @@ Operation to a TensorFixedSize instead of a Tensor, which is a bit more
efficient.
// We know that the result is a 4x4x2 tensor!
- TensorFixedSize<float, 4, 4, 2> result = t5;
+ TensorFixedSize<float, Sizes<4, 4, 2>> result = t5;
Simiarly, assigning an expression to a TensorMap causes its evaluation. Like
tensors of type TensorFixedSize, TensorMaps cannot be resized so they have to
@@ -296,7 +296,7 @@ the expression in a temporary Tensor of the right size. The code above in
effect does:
// .eval() knows the size!
- TensorFixedSize<float, 4, 4, 2> tmp = t1 + t2;
+ TensorFixedSize<float, Sizes<4, 4, 2>> tmp = t1 + t2;
Tensor<float, 3> result = (tmp * 0.2f).exp();
Note that the return value of ```eval()``` is itself an Operation, so the
@@ -567,11 +567,11 @@ to the rank of the tensor. The content of the tensor is not initialized.
### TensorFixedSize
-Creates a tensor of the specified size. The number of arguments in the Size<>
+Creates a tensor of the specified size. The number of arguments in the Sizes<>
template parameter determines the rank of the tensor. The content of the tensor
is not initialized.
- Eigen::TensorFixedSize<float, Size<3, 4>> a;
+ Eigen::TensorFixedSize<float, Sizes<3, 4>> a;
cout << "Rank: " << a.rank() << endl;
=> Rank: 2
cout << "NumRows: " << a.dimension(0) << " NumCols: " << a.dimension(1) << endl;
@@ -581,14 +581,14 @@ is not initialized.
Creates a tensor mapping an existing array of data. The data must not be freed
until the TensorMap is discarded, and the size of the data must be large enough
-to accomodate of the coefficients of the tensor.
+to accommodate the coefficients of the tensor.
float data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
- Eigen::TensorMap<float, 2> a(data, 3, 4);
+ Eigen::TensorMap<Tensor<float, 2>> a(data, 3, 4);
cout << "NumRows: " << a.dimension(0) << " NumCols: " << a.dimension(1) << endl;
=> NumRows: 3 NumCols: 4
cout << "a(1, 2): " << a(1, 2) << endl;
- => a(1, 2): 9
+ => a(1, 2): 7
## Contents Initialization
@@ -1013,16 +1013,23 @@ multidimensional case.
Eigen::Tensor<int, 2> a(2, 3);
a.setValues({{1, 2, 3}, {6, 5, 4}});
Eigen::Tensor<int, 2> b(3, 2);
- a.setValues({{1, 2}, {4, 5}, {5, 6}});
+ b.setValues({{1, 2}, {4, 5}, {5, 6}});
// Compute the traditional matrix product
- array<IndexPair<int>, 1> product_dims = { IndexPair(1, 0) };
+ Eigen::array<Eigen::IndexPair<int>, 1> product_dims = { Eigen::IndexPair<int>(1, 0) };
Eigen::Tensor<int, 2> AB = a.contract(b, product_dims);
// Compute the product of the transpose of the matrices
- array<IndexPair<int>, 1> transpose_product_dims = { IndexPair(0, 1) };
+ Eigen::array<Eigen::IndexPair<int>, 1> transposed_product_dims = { Eigen::IndexPair<int>(0, 1) };
Eigen::Tensor<int, 2> AtBt = a.contract(b, transposed_product_dims);
+ // Contraction to scalar value using a double contraction.
+ // First coordinate of both tensors are contracted as well as both second coordinates, i.e., this computes the sum of the squares of the elements.
+ Eigen::array<Eigen::IndexPair<int>, 2> double_contraction_product_dims = { Eigen::IndexPair<int>(0, 0), Eigen::IndexPair<int>(1, 1) };
+ Eigen::Tensor<int, 0> AdoubleContractedA = a.contract(a, double_contraction_product_dims);
+
+ // Extracting the scalar value of the tensor contraction for further usage
+ int value = AdoubleContractedA(0);
## Reduction Operations
@@ -1168,6 +1175,58 @@ Reduce a tensor using a user-defined reduction operator. See ```SumReducer```
in TensorFunctors.h for information on how to implement a reduction operator.
+## Trace
+
+A *Trace* operation returns a tensor with fewer dimensions than the original
+tensor. It returns a tensor whose elements are the sum of the elements of the
+original tensor along the main diagonal for a list of specified dimensions, the
+"trace dimensions". Similar to the ```Reduction Dimensions```, the trace dimensions
+are passed as an input parameter to the operation, are of type ```<TensorType>::Dimensions```
+, and have the same requirements when passed as an input parameter. In addition,
+the trace dimensions must have the same size.
+
+Example: Trace along 2 dimensions.
+
+ // Create a tensor of 3 dimensions
+ Eigen::Tensor<int, 3> a(2, 2, 3);
+ a.setValues({{{1, 2, 3}, {4, 5, 6}}, {{7, 8, 9}, {10, 11, 12}}});
+ // Specify the dimensions along which the trace will be computed.
+ // In this example, the trace can only be computed along the dimensions
+ // with indices 0 and 1
+ Eigen::array<int, 2> dims({0, 1});
+ // The output tensor contains all but the trace dimensions.
+ Tensor<int, 1> a_trace = a.trace(dims);
+ cout << "a_trace:" << endl;
+ cout << a_trace << endl;
+ =>
+ a_trace:
+ 11
+ 13
+ 15
+
+
+### <Operation> trace(const Dimensions& new_dims)
+### <Operation> trace()
+
+As a special case, if no parameter is passed to the operation, trace is computed
+along *all* dimensions of the input tensor.
+
+Example: Trace along all dimensions.
+
+ // Create a tensor of 3 dimensions, with all dimensions having the same size.
+ Eigen::Tensor<int, 3> a(3, 3, 3);
+ a.setValues({{{1, 2, 3}, {4, 5, 6}, {7, 8, 9}},
+ {{10, 11, 12}, {13, 14, 15}, {16, 17, 18}},
+ {{19, 20, 21}, {22, 23, 24}, {25, 26, 27}}});
+ // Result is a zero dimension tensor
+ Tensor<int, 0> a_trace = a.trace();
+ cout<<"a_trace:"<<endl;
+ cout<<a_trace<<endl;
+ =>
+ a_trace:
+ 42
+
+
## Scan Operations
A *Scan* operation returns a tensor with the same dimensions as the original
@@ -1191,7 +1250,7 @@ dd a comment to this line
=>
a
1 2 3
- 6 5 4
+ 4 5 6
b
1 3 6
@@ -1314,7 +1373,7 @@ The previous example can be rewritten as follow:
Eigen::Tensor<float, 2, Eigen::ColMajor> a(2, 3);
a.setValues({{0.0f, 100.0f, 200.0f}, {300.0f, 400.0f, 500.0f}});
Eigen::array<Eigen::DenseIndex, 2> two_dim({2, 3});
- Eigen::Tensor<float, 1, Eigen::ColMajor> b;
+ Eigen::Tensor<float, 1, Eigen::ColMajor> b(6);
b.reshape(two_dim) = a;
cout << "b" << endl << b << endl;
=>
diff --git a/unsupported/Eigen/CXX11/src/Tensor/Tensor.h b/unsupported/Eigen/CXX11/src/Tensor/Tensor.h
index 1940a9692..95349d0ed 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/Tensor.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/Tensor.h
@@ -48,7 +48,7 @@ namespace Eigen {
*
* <dl>
* <dt><b>Relation to other parts of Eigen:</b></dt>
- * <dd>The midterm developement goal for this class is to have a similar hierarchy as Eigen uses for matrices, so that
+ * <dd>The midterm development goal for this class is to have a similar hierarchy as Eigen uses for matrices, so that
* taking blocks or using tensors in expressions is easily possible, including an interface with the vector/matrix code
* by providing .asMatrix() and .asVector() (or similar) methods for rank 2 and 1 tensors. However, currently, the %Tensor
* class does not provide any of these features and is only available as a stand-alone class that just allows for
@@ -112,7 +112,7 @@ class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_, IndexTyp
#if EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes>
- EIGEN_DEVICE_FUNC inline const Scalar& coeff(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
{
// The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
@@ -398,6 +398,21 @@ class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_, IndexTyp
internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
}
+ #if EIGEN_HAS_RVALUE_REFERENCES
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Tensor(Self&& other)
+ : Tensor()
+ {
+ m_storage.swap(other.m_storage);
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Tensor& operator=(Self&& other)
+ {
+ m_storage.swap(other.m_storage);
+ return *this;
+ }
+ #endif
+
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Tensor& operator=(const Tensor& other)
{
@@ -462,6 +477,18 @@ class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_, IndexTyp
// Nothing to do: rank 0 tensors have fixed size
}
+#ifdef EIGEN_HAS_INDEX_LIST
+ template <typename FirstType, typename... OtherTypes>
+ EIGEN_DEVICE_FUNC
+ void resize(const Eigen::IndexList<FirstType, OtherTypes...>& dimensions) {
+ array<Index, NumIndices> dims;
+ for (int i = 0; i < NumIndices; ++i) {
+ dims[i] = static_cast<Index>(dimensions[i]);
+ }
+ resize(dims);
+ }
+#endif
+
/** Custom Dimension */
#ifdef EIGEN_HAS_SFINAE
template<typename CustomDimension,
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorArgMax.h b/unsupported/Eigen/CXX11/src/Tensor/TensorArgMax.h
index d06f40cd8..ea3ab329d 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorArgMax.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorArgMax.h
@@ -87,6 +87,7 @@ struct TensorEvaluator<const TensorIndexTupleOp<ArgType>, Device>
IsAligned = /*TensorEvaluator<ArgType, Device>::IsAligned*/ false,
PacketAccess = /*TensorEvaluator<ArgType, Device>::PacketAccess*/ false,
BlockAccess = false,
+ PreferBlockAccess = false,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
@@ -119,6 +120,12 @@ struct TensorEvaluator<const TensorIndexTupleOp<ArgType>, Device>
EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
+#ifdef EIGEN_USE_SYCL
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const TensorEvaluator<ArgType, Device>& impl() const {
+ return m_impl;
+ }
+#endif
+
protected:
TensorEvaluator<ArgType, Device> m_impl;
};
@@ -172,7 +179,7 @@ class TensorTupleReducerOp : public TensorBase<TensorTupleReducerOp<ReduceOp, Di
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorTupleReducerOp(const XprType& expr,
const ReduceOp& reduce_op,
- const int return_dim,
+ const Index return_dim,
const Dims& reduce_dims)
: m_xpr(expr), m_reduce_op(reduce_op), m_return_dim(return_dim), m_reduce_dims(reduce_dims) {}
@@ -187,12 +194,12 @@ class TensorTupleReducerOp : public TensorBase<TensorTupleReducerOp<ReduceOp, Di
const Dims& reduce_dims() const { return m_reduce_dims; }
EIGEN_DEVICE_FUNC
- int return_dim() const { return m_return_dim; }
+ Index return_dim() const { return m_return_dim; }
protected:
typename XprType::Nested m_xpr;
const ReduceOp m_reduce_op;
- const int m_return_dim;
+ const Index m_return_dim;
const Dims m_reduce_dims;
};
@@ -214,6 +221,7 @@ struct TensorEvaluator<const TensorTupleReducerOp<ReduceOp, Dims, ArgType>, Devi
IsAligned = /*TensorEvaluator<ArgType, Device>::IsAligned*/ false,
PacketAccess = /*TensorEvaluator<ArgType, Device>::PacketAccess*/ false,
BlockAccess = false,
+ PreferBlockAccess = false,
Layout = TensorEvaluator<const TensorReductionOp<ReduceOp, Dims, const TensorIndexTupleOp<ArgType> >, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
@@ -222,7 +230,11 @@ struct TensorEvaluator<const TensorTupleReducerOp<ReduceOp, Dims, ArgType>, Devi
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_orig_impl(op.expression(), device),
m_impl(op.expression().index_tuples().reduce(op.reduce_dims(), op.reduce_op()), device),
- m_return_dim(op.return_dim()) {
+ m_return_dim(op.return_dim())
+#ifdef EIGEN_USE_SYCL
+ ,m_device(device)
+#endif
+ {
gen_strides(m_orig_impl.dimensions(), m_strides);
if (Layout == static_cast<int>(ColMajor)) {
@@ -252,7 +264,16 @@ struct TensorEvaluator<const TensorTupleReducerOp<ReduceOp, Dims, ArgType>, Devi
return (m_return_dim < 0) ? v.first : (v.first % m_stride_mod) / m_stride_div;
}
+ #ifndef EIGEN_USE_SYCL
EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
+ #else // following functions are required by sycl
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TupleType* data() const { return m_impl.data(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index return_dim() const {return m_return_dim;}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const StrideDims& strides() const {return m_strides;}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Index& stride_mod() const {return m_stride_mod;}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Index& stride_div() const {return m_stride_div;}
+ const Device& device() const{return m_device;}
+ #endif
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
costPerCoeff(bool vectorized) const {
@@ -288,10 +309,13 @@ struct TensorEvaluator<const TensorTupleReducerOp<ReduceOp, Dims, ArgType>, Devi
protected:
TensorEvaluator<const TensorIndexTupleOp<ArgType>, Device> m_orig_impl;
TensorEvaluator<const TensorReductionOp<ReduceOp, Dims, const TensorIndexTupleOp<ArgType> >, Device> m_impl;
- const int m_return_dim;
+ const Index m_return_dim;
StrideDims m_strides;
Index m_stride_mod;
Index m_stride_div;
+#ifdef EIGEN_USE_SYCL
+ const Device& m_device;
+#endif
};
} // end namespace Eigen
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorArgMaxSycl.h b/unsupported/Eigen/CXX11/src/Tensor/TensorArgMaxSycl.h
new file mode 100644
index 000000000..5110e99ee
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorArgMaxSycl.h
@@ -0,0 +1,148 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/*****************************************************************
+ * TensorArgMaxSycl.h
+ * \brief:
+ * TensorArgMaxSycl
+ *
+*****************************************************************/
+
+#ifndef UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSOR_ARGMAX_SYCL_HPP
+#define UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSOR_ARGMAX_SYCL_HPP
+namespace Eigen {
+namespace internal {
+ template<typename Dims, typename XprType>
+ struct eval<TensorTupleReducerDeviceOp<Dims, XprType>, Eigen::Dense>
+ {
+ typedef const TensorTupleReducerDeviceOp<Dims, XprType>& type;
+ };
+
+ template<typename Dims, typename XprType>
+ struct nested<TensorTupleReducerDeviceOp<Dims, XprType>, 1,
+ typename eval<TensorTupleReducerDeviceOp<Dims, XprType> >::type>
+ {
+ typedef TensorTupleReducerDeviceOp<Dims, XprType> type;
+ };
+
+template<typename StrideDims, typename XprType>
+struct traits<TensorTupleReducerDeviceOp<StrideDims, XprType> > : public traits<XprType>
+{
+ typedef traits<XprType> XprTraits;
+ typedef typename XprTraits::StorageKind StorageKind;
+ typedef typename XprTraits::Index Index;
+ typedef Index Scalar;
+ typedef typename XprType::Nested Nested;
+ typedef typename remove_reference<Nested>::type _Nested;
+ static const int NumDimensions = XprTraits::NumDimensions;
+ static const int Layout = XprTraits::Layout;
+};
+
+
+}// end namespace internal
+template<typename StrideDims, typename XprType>
+class TensorTupleReducerDeviceOp : public TensorBase<TensorTupleReducerDeviceOp<StrideDims, XprType>, ReadOnlyAccessors>
+{
+ public:
+ typedef typename Eigen::internal::traits<TensorTupleReducerDeviceOp>::Scalar Scalar;
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
+ typedef typename Eigen::internal::nested<TensorTupleReducerDeviceOp>::type Nested;
+ typedef typename Eigen::internal::traits<TensorTupleReducerDeviceOp>::StorageKind StorageKind;
+ typedef typename Eigen::internal::traits<TensorTupleReducerDeviceOp>::Index Index;
+ typedef typename XprType::CoeffReturnType TupleType;
+ typedef Index CoeffReturnType;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorTupleReducerDeviceOp(XprType expr,
+ const Index return_dim,
+ const StrideDims strides,
+ const Index stride_mod, const Index stride_div)
+ :m_xpr(expr), m_return_dim(return_dim), m_strides(strides), m_stride_mod(stride_mod), m_stride_div(stride_div) {}
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename XprType::Nested>::type&
+ expression() const { return m_xpr; }
+
+ EIGEN_DEVICE_FUNC
+ Index return_dim() const { return m_return_dim; }
+
+ EIGEN_DEVICE_FUNC
+ const StrideDims& strides() const { return m_strides; }
+
+ EIGEN_DEVICE_FUNC
+ const Index& stride_mod() const { return m_stride_mod; }
+
+ EIGEN_DEVICE_FUNC
+ const Index& stride_div() const { return m_stride_div; }
+
+ protected:
+ typename Eigen::internal::remove_all<typename
+ XprType::Nested
+ >::type m_xpr;
+ const Index m_return_dim;
+ const StrideDims m_strides;
+ const Index m_stride_mod;
+ const Index m_stride_div;
+};
+
+
+// Eval as rvalue
+template<typename StrideDims, typename ArgType>
+struct TensorEvaluator<const TensorTupleReducerDeviceOp<StrideDims, ArgType>, SyclKernelDevice>
+{
+ typedef TensorTupleReducerDeviceOp<StrideDims, ArgType> XprType;
+ typedef typename XprType::Index Index;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename XprType::TupleType TupleType;
+ typedef typename TensorEvaluator<ArgType, SyclKernelDevice>::Dimensions Dimensions;
+
+ enum {
+ IsAligned = false,
+ PacketAccess = false,
+ BlockAccess = false,
+ PreferBlockAccess = false,
+ Layout = TensorEvaluator<ArgType, SyclKernelDevice>::Layout,
+ CoordAccess = false,
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const SyclKernelDevice& device)
+ : m_impl(op.expression(), device), m_return_dim(op.return_dim()), m_strides(op.strides()), m_stride_mod(op.stride_mod()),
+ m_stride_div(op.stride_div()){}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const {
+ return m_impl.dimensions();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar*) {
+ m_impl.evalSubExprsIfNeeded(NULL);
+ return true;
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
+ m_impl.cleanup();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const {
+ const TupleType v = m_impl.coeff(index);
+ return (m_return_dim < 0) ? v.first : (v.first % m_stride_mod) / m_stride_div;
+ }
+typedef typename MakeGlobalPointer<typename TensorEvaluator<ArgType , SyclKernelDevice>::CoeffReturnType >::Type ptr_Dev_type;
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ptr_Dev_type data() const { return const_cast<ptr_Dev_type>(m_impl.data()); }
+
+protected:
+ TensorEvaluator<ArgType , SyclKernelDevice> m_impl;
+ const Index m_return_dim;
+ const StrideDims m_strides;
+ const Index m_stride_mod;
+ const Index m_stride_div;
+};
+} // end namespace Eigen
+#endif //UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSOR_ARGMAX_SYCL_HPP
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h b/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h
index 166be200c..06bf422c5 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h
@@ -34,6 +34,7 @@ struct traits<TensorAssignOp<LhsXprType, RhsXprType> >
typedef typename remove_reference<RhsNested>::type _RhsNested;
static const std::size_t NumDimensions = internal::traits<LhsXprType>::NumDimensions;
static const int Layout = internal::traits<LhsXprType>::Layout;
+ typedef typename traits<LhsXprType>::PointerType PointerType;
enum {
Flags = 0
@@ -67,6 +68,8 @@ class TensorAssignOp : public TensorBase<TensorAssignOp<LhsXprType, RhsXprType>
typedef typename Eigen::internal::traits<TensorAssignOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorAssignOp>::Index Index;
+ static const int NumDims = Eigen::internal::traits<TensorAssignOp>::NumDimensions;
+
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorAssignOp(LhsXprType& lhs, const RhsXprType& rhs)
: m_lhs_xpr(lhs), m_rhs_xpr(rhs) {}
@@ -94,20 +97,35 @@ struct TensorEvaluator<const TensorAssignOp<LeftArgType, RightArgType>, Device>
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
typedef typename TensorEvaluator<RightArgType, Device>::Dimensions Dimensions;
- static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+
+ static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
+ static const int NumDims = XprType::NumDims;
enum {
- IsAligned = TensorEvaluator<LeftArgType, Device>::IsAligned & TensorEvaluator<RightArgType, Device>::IsAligned,
- PacketAccess = TensorEvaluator<LeftArgType, Device>::PacketAccess & TensorEvaluator<RightArgType, Device>::PacketAccess,
- Layout = TensorEvaluator<LeftArgType, Device>::Layout,
- RawAccess = TensorEvaluator<LeftArgType, Device>::RawAccess
+ IsAligned = TensorEvaluator<LeftArgType, Device>::IsAligned &
+ TensorEvaluator<RightArgType, Device>::IsAligned,
+ PacketAccess = TensorEvaluator<LeftArgType, Device>::PacketAccess &
+ TensorEvaluator<RightArgType, Device>::PacketAccess,
+ BlockAccess = TensorEvaluator<LeftArgType, Device>::BlockAccess &
+ TensorEvaluator<RightArgType, Device>::BlockAccess,
+ PreferBlockAccess = TensorEvaluator<LeftArgType, Device>::PreferBlockAccess |
+ TensorEvaluator<RightArgType, Device>::PreferBlockAccess,
+ Layout = TensorEvaluator<LeftArgType, Device>::Layout,
+ RawAccess = TensorEvaluator<LeftArgType, Device>::RawAccess
};
+ typedef typename internal::TensorBlock<
+ typename internal::remove_const<Scalar>::type, Index, NumDims, Layout>
+ TensorBlock;
+
EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device) :
m_leftImpl(op.lhsExpression(), device),
m_rightImpl(op.rhsExpression(), device)
{
- EIGEN_STATIC_ASSERT((static_cast<int>(TensorEvaluator<LeftArgType, Device>::Layout) == static_cast<int>(TensorEvaluator<RightArgType, Device>::Layout)), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT(
+ (static_cast<int>(TensorEvaluator<LeftArgType, Device>::Layout) ==
+ static_cast<int>(TensorEvaluator<RightArgType, Device>::Layout)),
+ YOU_MADE_A_PROGRAMMING_MISTAKE);
}
EIGEN_DEVICE_FUNC const Dimensions& dimensions() const
@@ -163,12 +181,31 @@ struct TensorEvaluator<const TensorAssignOp<LeftArgType, RightArgType>, Device>
TensorOpCost(0, sizeof(CoeffReturnType), 0, vectorized, PacketSize);
}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void getResourceRequirements(
+ std::vector<internal::TensorOpResourceRequirements>* resources) const {
+ m_leftImpl.getResourceRequirements(resources);
+ m_rightImpl.getResourceRequirements(resources);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalBlock(TensorBlock* block) {
+ if (TensorEvaluator<LeftArgType, Device>::RawAccess &&
+ m_leftImpl.data() != NULL) {
+ TensorBlock left_block(block->first_coeff_index(), block->block_sizes(),
+ block->tensor_strides(), block->tensor_strides(),
+ m_leftImpl.data() + block->first_coeff_index());
+ m_rightImpl.block(&left_block);
+ } else {
+ m_rightImpl.block(block);
+ m_leftImpl.writeBlock(*block);
+ }
+ }
+
/// required by sycl in order to extract the accessor
const TensorEvaluator<LeftArgType, Device>& left_impl() const { return m_leftImpl; }
/// required by sycl in order to extract the accessor
const TensorEvaluator<RightArgType, Device>& right_impl() const { return m_rightImpl; }
- EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return m_leftImpl.data(); }
+ EIGEN_DEVICE_FUNC typename Eigen::internal::traits<XprType>::PointerType data() const { return m_leftImpl.data(); }
private:
TensorEvaluator<LeftArgType, Device> m_leftImpl;
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h b/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h
index fbe340820..9b9d330c1 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h
@@ -20,7 +20,7 @@ namespace Eigen {
* \brief The tensor base class.
*
* This class is the common parent of the Tensor and TensorMap class, thus
- * making it possible to use either class interchangably in expressions.
+ * making it possible to use either class interchangeably in expressions.
*/
template<typename Derived>
@@ -133,6 +133,18 @@ class TensorBase<Derived, ReadOnlyAccessors>
return unaryExpr(internal::scalar_digamma_op<Scalar>());
}
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_i0e_op<Scalar>, const Derived>
+ i0e() const {
+ return unaryExpr(internal::scalar_i0e_op<Scalar>());
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_i1e_op<Scalar>, const Derived>
+ i1e() const {
+ return unaryExpr(internal::scalar_i1e_op<Scalar>());
+ }
+
// igamma(a = this, x = other)
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCwiseBinaryOp<internal::scalar_igamma_op<Scalar>, const Derived, const OtherDerived>
@@ -140,6 +152,20 @@ class TensorBase<Derived, ReadOnlyAccessors>
return binaryExpr(other.derived(), internal::scalar_igamma_op<Scalar>());
}
+ // igamma_der_a(a = this, x = other)
+ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorCwiseBinaryOp<internal::scalar_igamma_der_a_op<Scalar>, const Derived, const OtherDerived>
+ igamma_der_a(const OtherDerived& other) const {
+ return binaryExpr(other.derived(), internal::scalar_igamma_der_a_op<Scalar>());
+ }
+
+ // gamma_sample_der_alpha(alpha = this, sample = other)
+ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorCwiseBinaryOp<internal::scalar_gamma_sample_der_alpha_op<Scalar>, const Derived, const OtherDerived>
+ gamma_sample_der_alpha(const OtherDerived& other) const {
+ return binaryExpr(other.derived(), internal::scalar_gamma_sample_der_alpha_op<Scalar>());
+ }
+
// igammac(a = this, x = other)
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCwiseBinaryOp<internal::scalar_igammac_op<Scalar>, const Derived, const OtherDerived>
@@ -174,9 +200,9 @@ class TensorBase<Derived, ReadOnlyAccessors>
}
EIGEN_DEVICE_FUNC
- EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_sigmoid_op<Scalar>, const Derived>
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_logistic_op<Scalar>, const Derived>
sigmoid() const {
- return unaryExpr(internal::scalar_sigmoid_op<Scalar>());
+ return unaryExpr(internal::scalar_logistic_op<Scalar>());
}
EIGEN_DEVICE_FUNC
@@ -210,6 +236,12 @@ class TensorBase<Derived, ReadOnlyAccessors>
}
EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_clamp_op<Scalar>, const Derived>
+ clip(Scalar min, Scalar max) const {
+ return unaryExpr(internal::scalar_clamp_op<Scalar>(min, max));
+ }
+
+ EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, const Derived>
conjugate() const {
return unaryExpr(internal::scalar_conjugate_op<Scalar>());
@@ -485,9 +517,15 @@ class TensorBase<Derived, ReadOnlyAccessors>
typedef Eigen::IndexPair<Index> DimensionPair;
template<typename OtherDerived, typename Dimensions> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
- const TensorContractionOp<const Dimensions, const Derived, const OtherDerived>
+ const TensorContractionOp<const Dimensions, const Derived, const OtherDerived, const NoOpOutputKernel>
contract(const OtherDerived& other, const Dimensions& dims) const {
- return TensorContractionOp<const Dimensions, const Derived, const OtherDerived>(derived(), other.derived(), dims);
+ return TensorContractionOp<const Dimensions, const Derived, const OtherDerived, const NoOpOutputKernel>(derived(), other.derived(), dims);
+ }
+
+ template<typename OtherDerived, typename Dimensions, typename OutputKernel> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorContractionOp<const Dimensions, const Derived, const OtherDerived, const OutputKernel>
+ contract(const OtherDerived& other, const Dimensions& dims, const OutputKernel& output_kernel) const {
+ return TensorContractionOp<const Dimensions, const Derived, const OtherDerived, const OutputKernel>(derived(), other.derived(), dims, output_kernel);
}
// Convolutions.
@@ -500,8 +538,8 @@ class TensorBase<Derived, ReadOnlyAccessors>
// Fourier transforms
template <int FFTDataType, int FFTDirection, typename FFT> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorFFTOp<const FFT, const Derived, FFTDataType, FFTDirection>
- fft(const FFT& fft) const {
- return TensorFFTOp<const FFT, const Derived, FFTDataType, FFTDirection>(derived(), fft);
+ fft(const FFT& dims) const {
+ return TensorFFTOp<const FFT, const Derived, FFTDataType, FFTDirection>(derived(), dims);
}
// Scan.
@@ -619,7 +657,7 @@ class TensorBase<Derived, ReadOnlyAccessors>
const array<Index, NumDimensions>, const Derived>
argmax() const {
array<Index, NumDimensions> in_dims;
- for (int d = 0; d < NumDimensions; ++d) in_dims[d] = d;
+ for (Index d = 0; d < NumDimensions; ++d) in_dims[d] = d;
return TensorTupleReducerOp<
internal::ArgMaxTupleReducer<Tuple<Index, CoeffReturnType> >,
const array<Index, NumDimensions>,
@@ -632,7 +670,7 @@ class TensorBase<Derived, ReadOnlyAccessors>
const array<Index, NumDimensions>, const Derived>
argmin() const {
array<Index, NumDimensions> in_dims;
- for (int d = 0; d < NumDimensions; ++d) in_dims[d] = d;
+ for (Index d = 0; d < NumDimensions; ++d) in_dims[d] = d;
return TensorTupleReducerOp<
internal::ArgMinTupleReducer<Tuple<Index, CoeffReturnType> >,
const array<Index, NumDimensions>,
@@ -643,7 +681,7 @@ class TensorBase<Derived, ReadOnlyAccessors>
const TensorTupleReducerOp<
internal::ArgMaxTupleReducer<Tuple<Index, CoeffReturnType> >,
const array<Index, 1>, const Derived>
- argmax(const int return_dim) const {
+ argmax(const Index return_dim) const {
array<Index, 1> in_dims;
in_dims[0] = return_dim;
return TensorTupleReducerOp<
@@ -656,7 +694,7 @@ class TensorBase<Derived, ReadOnlyAccessors>
const TensorTupleReducerOp<
internal::ArgMinTupleReducer<Tuple<Index, CoeffReturnType> >,
const array<Index, 1>, const Derived>
- argmin(const int return_dim) const {
+ argmin(const Index return_dim) const {
array<Index, 1> in_dims;
in_dims[0] = return_dim;
return TensorTupleReducerOp<
@@ -671,10 +709,22 @@ class TensorBase<Derived, ReadOnlyAccessors>
return TensorReductionOp<Reducer, const Dims, const Derived>(derived(), dims, reducer);
}
+ template <typename Dims> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorTraceOp<const Dims, const Derived>
+ trace(const Dims& dims) const {
+ return TensorTraceOp<const Dims, const Derived>(derived(), dims);
+ }
+
+ const TensorTraceOp<const DimensionList<Index, NumDimensions>, const Derived>
+ trace() const {
+ DimensionList<Index, NumDimensions> in_dims;
+ return TensorTraceOp<const DimensionList<Index, NumDimensions>, const Derived>(derived(), in_dims);
+ }
+
template <typename Broadcast> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorBroadcastingOp<const Broadcast, const Derived>
- broadcast(const Broadcast& broadcast) const {
- return TensorBroadcastingOp<const Broadcast, const Derived>(derived(), broadcast);
+ broadcast(const Broadcast& bcast) const {
+ return TensorBroadcastingOp<const Broadcast, const Derived>(derived(), bcast);
}
template <typename Axis, typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
@@ -782,8 +832,8 @@ class TensorBase<Derived, ReadOnlyAccessors>
}
template <typename Shuffle> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorShufflingOp<const Shuffle, const Derived>
- shuffle(const Shuffle& shuffle) const {
- return TensorShufflingOp<const Shuffle, const Derived>(derived(), shuffle);
+ shuffle(const Shuffle& shfl) const {
+ return TensorShufflingOp<const Shuffle, const Derived>(derived(), shfl);
}
template <typename Strides> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorStridingOp<const Strides, const Derived>
@@ -824,7 +874,8 @@ class TensorBase<Derived, ReadOnlyAccessors>
protected:
template <typename Scalar, int NumIndices, int Options, typename IndexType> friend class Tensor;
template <typename Scalar, typename Dimensions, int Option, typename IndexTypes> friend class TensorFixedSize;
- template <typename OtherDerived, int AccessLevel> friend class TensorBase;
+ // the Eigen:: prefix is required to workaround a compilation issue with nvcc 9.0
+ template <typename OtherDerived, int AccessLevel> friend class Eigen::TensorBase;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Derived& derived() const { return *static_cast<const Derived*>(this); }
};
@@ -840,7 +891,8 @@ class TensorBase : public TensorBase<Derived, ReadOnlyAccessors> {
template <typename Scalar, int NumIndices, int Options, typename IndexType> friend class Tensor;
template <typename Scalar, typename Dimensions, int Option, typename IndexTypes> friend class TensorFixedSize;
- template <typename OtherDerived, int OtherAccessLevel> friend class TensorBase;
+ // the Eigen:: prefix is required to workaround a compilation issue with nvcc 9.0
+ template <typename OtherDerived, int OtherAccessLevel> friend class Eigen::TensorBase;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Derived& setZero() {
@@ -978,13 +1030,13 @@ class TensorBase : public TensorBase<Derived, ReadOnlyAccessors> {
template <typename Shuffle> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorShufflingOp<const Shuffle, const Derived>
- shuffle(const Shuffle& shuffle) const {
- return TensorShufflingOp<const Shuffle, const Derived>(derived(), shuffle);
+ shuffle(const Shuffle& shfl) const {
+ return TensorShufflingOp<const Shuffle, const Derived>(derived(), shfl);
}
template <typename Shuffle> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorShufflingOp<const Shuffle, Derived>
- shuffle(const Shuffle& shuffle) {
- return TensorShufflingOp<const Shuffle, Derived>(derived(), shuffle);
+ shuffle(const Shuffle& shfl) {
+ return TensorShufflingOp<const Shuffle, Derived>(derived(), shfl);
}
template <typename Strides> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
@@ -1000,8 +1052,8 @@ class TensorBase : public TensorBase<Derived, ReadOnlyAccessors> {
// Select the device on which to evaluate the expression.
template <typename DeviceType>
- TensorDevice<Derived, DeviceType> device(const DeviceType& device) {
- return TensorDevice<Derived, DeviceType>(device, derived());
+ TensorDevice<Derived, DeviceType> device(const DeviceType& dev) {
+ return TensorDevice<Derived, DeviceType>(dev, derived());
}
protected:
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h b/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h
new file mode 100644
index 000000000..558130300
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h
@@ -0,0 +1,1077 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2018 Andy Davis <andydavis@google.com>
+// Copyright (C) 2018 Eugene Zhulenev <ezhulenev@google.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_BLOCK_H
+#define EIGEN_CXX11_TENSOR_TENSOR_BLOCK_H
+
+namespace Eigen {
+namespace internal {
+
+namespace {
+
+// Helper template to choose between ColMajor and RowMajor values.
+template <int Layout>
+struct cond;
+
+template <>
+struct cond<ColMajor> {
+ template <typename T>
+ EIGEN_STRONG_INLINE const T& operator()(const T& col,
+ const T& /*row*/) const {
+ return col;
+ }
+};
+
+template <>
+struct cond<RowMajor> {
+ template <typename T>
+ EIGEN_STRONG_INLINE const T& operator()(const T& /*col*/,
+ const T& row) const {
+ return row;
+ }
+};
+
+} // namespace
+
+/**
+ * \class TensorBlockShapeType
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Tensor block shape type.
+ *
+ * Tensor block shape type defines what are the shape preference for the blocks
+ * extracted from the larger tensor.
+ *
+ * Example:
+ *
+ * We want to extract blocks of 100 elements from the large 100x100 tensor:
+ * - tensor: 100x100
+ * - target_block_size: 100
+ *
+ * TensorBlockShapeType:
+ * - kUniformAllDims: 100 blocks of size 10x10
+ * - kSkewedInnerDims: 100 blocks of size 100x1 (or 1x100 depending on a column
+ * or row major layout)
+ */
+enum TensorBlockShapeType {
+ kUniformAllDims,
+ kSkewedInnerDims
+};
+
+struct TensorOpResourceRequirements {
+ TensorBlockShapeType block_shape;
+ Index block_total_size;
+ // TODO(andydavis) Add 'target_num_threads' to support communication of
+ // thread-resource requirements. This will allow ops deep in the
+ // expression tree (like reductions) to communicate resources
+ // requirements based on local state (like the total number of reductions
+ // to be computed).
+ TensorOpResourceRequirements(TensorBlockShapeType shape,
+ const Index size)
+ : block_shape(shape), block_total_size(size) {}
+};
+
+// Tries to merge multiple resource requirements.
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void MergeResourceRequirements(
+ const std::vector<TensorOpResourceRequirements>& resources,
+ TensorBlockShapeType* block_shape, Index* block_total_size) {
+ if (resources.empty()) {
+ return;
+ }
+ // TODO(andydavis) Implement different policies (i.e. revert to a default
+ // policy if block shapes/sizes conflict).
+ *block_shape = resources[0].block_shape;
+ *block_total_size = resources[0].block_total_size;
+ for (std::vector<TensorOpResourceRequirements>::size_type i = 1; i < resources.size(); ++i) {
+ if (resources[i].block_shape == kSkewedInnerDims &&
+ *block_shape != kSkewedInnerDims) {
+ *block_shape = kSkewedInnerDims;
+ }
+ *block_total_size =
+ numext::maxi(*block_total_size, resources[i].block_total_size);
+ }
+}
+
+/**
+ * \class TensorBlock
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Tensor block class.
+ *
+ * This class represents a tensor block specified by the index of the
+ * first block coefficient, and the size of the block in each dimension.
+ */
+template <typename Scalar, typename StorageIndex, int NumDims, int Layout>
+class TensorBlock {
+ public:
+ typedef DSizes<StorageIndex, NumDims> Dimensions;
+
+ TensorBlock(const StorageIndex first_coeff_index, const Dimensions& block_sizes,
+ const Dimensions& block_strides, const Dimensions& tensor_strides,
+ Scalar* data)
+ : m_first_coeff_index(first_coeff_index),
+ m_block_sizes(block_sizes),
+ m_block_strides(block_strides),
+ m_tensor_strides(tensor_strides),
+ m_data(data) {}
+
+ StorageIndex first_coeff_index() const { return m_first_coeff_index; }
+
+ const Dimensions& block_sizes() const { return m_block_sizes; }
+
+ const Dimensions& block_strides() const { return m_block_strides; }
+
+ const Dimensions& tensor_strides() const { return m_tensor_strides; }
+
+ Scalar* data() { return m_data; }
+
+ const Scalar* data() const { return m_data; }
+
+ private:
+ StorageIndex m_first_coeff_index;
+ Dimensions m_block_sizes;
+ Dimensions m_block_strides;
+ Dimensions m_tensor_strides;
+ Scalar* m_data; // Not owned.
+};
+
+template <typename Scalar, typename StorageIndex>
+struct TensorBlockCopyOp {
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run(
+ const StorageIndex num_coeff_to_copy, const StorageIndex dst_index,
+ const StorageIndex dst_stride, Scalar* EIGEN_RESTRICT dst_data,
+ const StorageIndex src_index, const StorageIndex src_stride,
+ const Scalar* EIGEN_RESTRICT src_data) {
+ const Scalar* src_base = &src_data[src_index];
+ Scalar* dst_base = &dst_data[dst_index];
+
+ typedef const Array<Scalar, Dynamic, 1> Src;
+ typedef Array<Scalar, Dynamic, 1> Dst;
+
+ typedef Map<Src, 0, InnerStride<> > SrcMap;
+ typedef Map<Dst, 0, InnerStride<> > DstMap;
+
+ const SrcMap src(src_base, num_coeff_to_copy, InnerStride<>(src_stride));
+ DstMap dst(dst_base, num_coeff_to_copy, InnerStride<>(dst_stride));
+
+ dst = src;
+ }
+};
+
+/**
+ * \class TensorBlockIO
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Tensor block IO class.
+ *
+ * This class is responsible for copying data between a tensor and a tensor
+ * block.
+ */
+template <typename Scalar, typename StorageIndex, int NumDims, int Layout,
+ bool BlockRead>
+class TensorBlockIO {
+ public:
+ typedef TensorBlock<Scalar, StorageIndex, NumDims, Layout> Block;
+ typedef TensorBlockCopyOp<Scalar, StorageIndex> BlockCopyOp;
+
+ protected:
+ struct BlockIteratorState {
+ StorageIndex input_stride;
+ StorageIndex output_stride;
+ StorageIndex input_span;
+ StorageIndex output_span;
+ StorageIndex size;
+ StorageIndex count;
+ BlockIteratorState()
+ : input_stride(0),
+ output_stride(0),
+ input_span(0),
+ output_span(0),
+ size(0),
+ count(0) {}
+ };
+
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Copy(
+ const Block& block, StorageIndex first_coeff_index,
+ const array<StorageIndex, NumDims>& tensor_to_block_dim_map,
+ const array<StorageIndex, NumDims>& tensor_strides, const Scalar* src_data,
+ Scalar* dst_data) {
+ // Find the innermost tensor dimension whose size is not 1. This is the
+ // effective inner dim. If all dimensions are of size 1, then fallback to
+ // using the actual innermost dim to avoid out-of-bound access.
+ StorageIndex num_size_one_inner_dims = 0;
+ for (int i = 0; i < NumDims; ++i) {
+ const int dim = cond<Layout>()(i, NumDims - i - 1);
+ if (block.block_sizes()[tensor_to_block_dim_map[dim]] != 1) {
+ num_size_one_inner_dims = i;
+ break;
+ }
+ }
+ // Calculate strides and dimensions.
+ const StorageIndex tensor_stride1_dim = cond<Layout>()(
+ num_size_one_inner_dims, NumDims - num_size_one_inner_dims - 1);
+ const StorageIndex block_dim_for_tensor_stride1_dim =
+ NumDims == 0 ? 1 : tensor_to_block_dim_map[tensor_stride1_dim];
+ StorageIndex block_inner_dim_size =
+ NumDims == 0 ? 1
+ : block.block_sizes()[block_dim_for_tensor_stride1_dim];
+ for (Index i = num_size_one_inner_dims + 1; i < NumDims; ++i) {
+ const Index dim = cond<Layout>()(i, NumDims - i - 1);
+ const StorageIndex block_stride =
+ block.block_strides()[tensor_to_block_dim_map[dim]];
+ if (block_inner_dim_size == block_stride &&
+ block_stride == tensor_strides[dim]) {
+ block_inner_dim_size *=
+ block.block_sizes()[tensor_to_block_dim_map[dim]];
+ ++num_size_one_inner_dims;
+ } else {
+ break;
+ }
+ }
+
+ StorageIndex inputIndex;
+ StorageIndex outputIndex;
+ StorageIndex input_stride;
+ StorageIndex output_stride;
+
+ // Setup strides to read/write along the tensor's stride1 dimension.
+ if (BlockRead) {
+ inputIndex = first_coeff_index;
+ outputIndex = 0;
+ input_stride = NumDims == 0 ? 1 : tensor_strides[tensor_stride1_dim];
+ output_stride =
+ NumDims == 0
+ ? 1
+ : block.block_strides()[block_dim_for_tensor_stride1_dim];
+ } else {
+ inputIndex = 0;
+ outputIndex = first_coeff_index;
+ input_stride =
+ NumDims == 0
+ ? 1
+ : block.block_strides()[block_dim_for_tensor_stride1_dim];
+ output_stride = NumDims == 0 ? 1 : tensor_strides[tensor_stride1_dim];
+ }
+
+ const int at_least_1_dim = NumDims <= 1 ? 1 : NumDims - 1;
+ array<BlockIteratorState, at_least_1_dim> block_iter_state;
+
+ // Initialize block iterator state. Squeeze away any dimension of size 1.
+ Index num_squeezed_dims = 0;
+ for (Index i = num_size_one_inner_dims; i < NumDims - 1; ++i) {
+ const Index dim = cond<Layout>()(i + 1, NumDims - i - 2);
+ const StorageIndex size = block.block_sizes()[tensor_to_block_dim_map[dim]];
+ if (size == 1) {
+ continue;
+ }
+ block_iter_state[num_squeezed_dims].size = size;
+ if (BlockRead) {
+ block_iter_state[num_squeezed_dims].input_stride = tensor_strides[dim];
+ block_iter_state[num_squeezed_dims].output_stride =
+ block.block_strides()[tensor_to_block_dim_map[dim]];
+ } else {
+ block_iter_state[num_squeezed_dims].input_stride =
+ block.block_strides()[tensor_to_block_dim_map[dim]];
+ block_iter_state[num_squeezed_dims].output_stride = tensor_strides[dim];
+ }
+ block_iter_state[num_squeezed_dims].input_span =
+ block_iter_state[num_squeezed_dims].input_stride *
+ (block_iter_state[num_squeezed_dims].size - 1);
+ block_iter_state[num_squeezed_dims].output_span =
+ block_iter_state[num_squeezed_dims].output_stride *
+ (block_iter_state[num_squeezed_dims].size - 1);
+ ++num_squeezed_dims;
+ }
+
+ // Iterate copying data from src to dst.
+ const StorageIndex block_total_size =
+ NumDims == 0 ? 1 : block.block_sizes().TotalSize();
+ for (StorageIndex i = 0; i < block_total_size; i += block_inner_dim_size) {
+ BlockCopyOp::Run(block_inner_dim_size, outputIndex, output_stride,
+ dst_data, inputIndex, input_stride, src_data);
+ // Update index.
+ for (int j = 0; j < num_squeezed_dims; ++j) {
+ if (++block_iter_state[j].count < block_iter_state[j].size) {
+ inputIndex += block_iter_state[j].input_stride;
+ outputIndex += block_iter_state[j].output_stride;
+ break;
+ }
+ block_iter_state[j].count = 0;
+ inputIndex -= block_iter_state[j].input_span;
+ outputIndex -= block_iter_state[j].output_span;
+ }
+ }
+ }
+};
+
+/**
+ * \class TensorBlockReader
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Tensor block reader class.
+ *
+ * This class is responsible for reading a tensor block.
+ *
+ */
+template <typename Scalar, typename StorageIndex, int NumDims, int Layout>
+class TensorBlockReader : public TensorBlockIO<Scalar, StorageIndex, NumDims,
+ Layout, /*BlockRead=*/true> {
+ public:
+ typedef TensorBlock<Scalar, StorageIndex, NumDims, Layout> Block;
+ typedef TensorBlockIO<Scalar, StorageIndex, NumDims, Layout, /*BlockRead=*/true> Base;
+
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run(
+ Block* block, const Scalar* src_data) {
+ array<StorageIndex, NumDims> tensor_to_block_dim_map;
+ for (int i = 0; i < NumDims; ++i) {
+ tensor_to_block_dim_map[i] = i;
+ }
+ Base::Copy(*block, block->first_coeff_index(), tensor_to_block_dim_map,
+ block->tensor_strides(), src_data, block->data());
+ }
+
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run(
+ Block* block, StorageIndex first_coeff_index,
+ const array<StorageIndex, NumDims>& tensor_to_block_dim_map,
+ const array<StorageIndex, NumDims>& tensor_strides, const Scalar* src_data) {
+ Base::Copy(*block, first_coeff_index, tensor_to_block_dim_map,
+ tensor_strides, src_data, block->data());
+ }
+};
+
+/**
+ * \class TensorBlockWriter
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Tensor block writer class.
+ *
+ * This class is responsible for writing a tensor block.
+ *
+ */
+template <typename Scalar, typename StorageIndex, int NumDims, int Layout>
+class TensorBlockWriter : public TensorBlockIO<Scalar, StorageIndex, NumDims,
+ Layout, /*BlockRead=*/false> {
+ public:
+ typedef TensorBlock<Scalar, StorageIndex, NumDims, Layout> Block;
+ typedef TensorBlockIO<Scalar, StorageIndex, NumDims, Layout, /*BlockRead=*/false> Base;
+
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run(
+ const Block& block, Scalar* dst_data) {
+ array<StorageIndex, NumDims> tensor_to_block_dim_map;
+ for (int i = 0; i < NumDims; ++i) {
+ tensor_to_block_dim_map[i] = i;
+ }
+ Base::Copy(block, block.first_coeff_index(), tensor_to_block_dim_map,
+ block.tensor_strides(), block.data(), dst_data);
+ }
+
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run(
+ const Block& block, StorageIndex first_coeff_index,
+ const array<StorageIndex, NumDims>& tensor_to_block_dim_map,
+ const array<StorageIndex, NumDims>& tensor_strides, Scalar* dst_data) {
+ Base::Copy(block, first_coeff_index, tensor_to_block_dim_map,
+ tensor_strides, block.data(), dst_data);
+ }
+};
+
+/**
+ * \class TensorBlockCwiseUnaryOp
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Carries out a cwise binary op on a number of coefficients.
+ *
+ * This class reads strided input from the argument, and writes the
+ * result of the cwise unary op to the strided output array.
+ *
+ */
+struct TensorBlockCwiseUnaryOp {
+ template <typename StorageIndex, typename UnaryFunctor,
+ typename OutputScalar, typename InputScalar>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run(
+ const UnaryFunctor& functor, const StorageIndex num_coeff,
+ const StorageIndex output_index, const StorageIndex output_stride,
+ OutputScalar* output_data, const StorageIndex input_index,
+ const StorageIndex input_stride, const InputScalar* input_data) {
+ typedef const Eigen::Array<InputScalar, Dynamic, 1> Input;
+ typedef Eigen::Array<OutputScalar, Dynamic, 1> Output;
+
+ typedef Eigen::Map<Input, 0, InnerStride<> > InputMap;
+ typedef Eigen::Map<Output, 0, InnerStride<> > OutputMap;
+
+ const InputScalar* input_base = &input_data[input_index];
+ OutputScalar* output_base = &output_data[output_index];
+
+ const InputMap input(input_base, num_coeff, InnerStride<>(input_stride));
+ OutputMap output(output_base, num_coeff, InnerStride<>(output_stride));
+
+ output = Eigen::CwiseUnaryOp<UnaryFunctor, InputMap>(input, functor);
+ }
+};
+
+/**
+ * \class TensorBlockCwiseUnaryIO
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Tensor block IO class for carrying out cwise unary ops.
+ *
+ * This class carries out the unary op on given blocks.
+ */
+template <typename UnaryFunctor, typename StorageIndex, typename OutputScalar,
+ int NumDims, int Layout>
+struct TensorBlockCwiseUnaryIO {
+ typedef typename internal::TensorBlock<OutputScalar, StorageIndex, NumDims,
+ Layout>::Dimensions Dimensions;
+
+ struct BlockIteratorState {
+ StorageIndex output_stride, output_span;
+ StorageIndex input_stride, input_span;
+ StorageIndex size, count;
+ };
+
+ template <typename InputScalar>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run(
+ const UnaryFunctor& functor, const Dimensions& block_sizes,
+ const Dimensions& block_strides, OutputScalar* output_data,
+ const array<StorageIndex, NumDims>& input_strides,
+ const InputScalar* input_data) {
+ // Find the innermost dimension whose size is not 1. This is the effective
+ // inner dim. If all dimensions are of size 1, fallback to using the actual
+ // innermost dim to avoid out-of-bound access.
+ int num_size_one_inner_dims = 0;
+ for (int i = 0; i < NumDims; ++i) {
+ const int dim = cond<Layout>()(i, NumDims - i - 1);
+ if (block_sizes[dim] != 1) {
+ num_size_one_inner_dims = i;
+ break;
+ }
+ }
+ // Calculate strides and dimensions.
+ const int inner_dim =
+ NumDims == 0 ? 1
+ : cond<Layout>()(num_size_one_inner_dims,
+ NumDims - num_size_one_inner_dims - 1);
+ StorageIndex inner_dim_size = NumDims == 0 ? 1 : block_sizes[inner_dim];
+ for (int i = num_size_one_inner_dims + 1; i < NumDims; ++i) {
+ const int dim = cond<Layout>()(i, NumDims - i - 1);
+ // Merge multiple inner dims into one for larger inner dim size (i.e.
+ // fewer calls to TensorBlockCwiseUnaryOp::Run()).
+ if (inner_dim_size == block_strides[dim] &&
+ block_strides[dim] == input_strides[dim]) {
+ inner_dim_size *= block_sizes[dim];
+ ++num_size_one_inner_dims;
+ } else {
+ break;
+ }
+ }
+
+ StorageIndex output_index = 0, input_index = 0;
+
+ const StorageIndex output_stride =
+ NumDims == 0 ? 1 : block_strides[inner_dim];
+ const StorageIndex input_stride =
+ NumDims == 0 ? 1 : input_strides[inner_dim];
+
+ const int at_least_1_dim = NumDims <= 1 ? 1 : NumDims - 1;
+ array<BlockIteratorState, at_least_1_dim> block_iter_state;
+
+ // Initialize block iterator state. Squeeze away any dimension of size 1.
+ int num_squeezed_dims = 0;
+ for (int i = num_size_one_inner_dims; i < NumDims - 1; ++i) {
+ const int dim = cond<Layout>()(i + 1, NumDims - i - 2);
+ const StorageIndex size = block_sizes[dim];
+ if (size == 1) {
+ continue;
+ }
+ BlockIteratorState& state = block_iter_state[num_squeezed_dims];
+ state.output_stride = block_strides[dim];
+ state.input_stride = input_strides[dim];
+ state.size = size;
+ state.output_span = state.output_stride * (size - 1);
+ state.input_span = state.input_stride * (size - 1);
+ state.count = 0;
+ ++num_squeezed_dims;
+ }
+
+ // Compute cwise unary op.
+ const StorageIndex block_total_size =
+ NumDims == 0 ? 1 : block_sizes.TotalSize();
+ for (StorageIndex i = 0; i < block_total_size; i += inner_dim_size) {
+ TensorBlockCwiseUnaryOp::Run(functor, inner_dim_size, output_index,
+ output_stride, output_data, input_index,
+ input_stride, input_data);
+ // Update index.
+ for (int j = 0; j < num_squeezed_dims; ++j) {
+ BlockIteratorState& state = block_iter_state[j];
+ if (++state.count < state.size) {
+ output_index += state.output_stride;
+ input_index += state.input_stride;
+ break;
+ }
+ state.count = 0;
+ output_index -= state.output_span;
+ input_index -= state.input_span;
+ }
+ }
+ }
+};
+
+/**
+ * \class TensorBlockCwiseBinaryOp
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Carries out a cwise binary op on a number of coefficients.
+ *
+ * This class reads strided inputs from left and right operands, and writes the
+ * result of the cwise binary op to the strided output array.
+ *
+ */
+struct TensorBlockCwiseBinaryOp {
+ template <typename StorageIndex, typename BinaryFunctor, typename OutputScalar,
+ typename LeftScalar, typename RightScalar>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run(
+ const BinaryFunctor& functor, const StorageIndex num_coeff,
+ const StorageIndex output_index, const StorageIndex output_stride,
+ OutputScalar* output_data, const StorageIndex left_index,
+ const StorageIndex left_stride, const LeftScalar* left_data,
+ const StorageIndex right_index, const StorageIndex right_stride,
+ const RightScalar* right_data) {
+ typedef const Array<LeftScalar, Dynamic, 1> Lhs;
+ typedef const Array<RightScalar, Dynamic, 1> Rhs;
+ typedef Array<OutputScalar, Dynamic, 1> Out;
+
+ typedef Map<Lhs, 0, InnerStride<> > LhsMap;
+ typedef Map<Rhs, 0, InnerStride<> > RhsMap;
+ typedef Map<Out, 0, InnerStride<> > OutMap;
+
+ const LeftScalar* lhs_base = &left_data[left_index];
+ const RightScalar* rhs_base = &right_data[right_index];
+ OutputScalar* out_base = &output_data[output_index];
+
+ const LhsMap lhs(lhs_base, num_coeff, InnerStride<>(left_stride));
+ const RhsMap rhs(rhs_base, num_coeff, InnerStride<>(right_stride));
+ OutMap out(out_base, num_coeff, InnerStride<>(output_stride));
+
+ out = CwiseBinaryOp<BinaryFunctor, LhsMap, RhsMap>(lhs, rhs, functor);
+ }
+};
+
+/**
+ * \class TensorBlockCwiseBinaryIO
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Tensor block IO class for carrying out cwise binary ops.
+ *
+ * This class carries out the binary op on given blocks.
+ *
+ */
+template <typename BinaryFunctor, typename StorageIndex, typename OutputScalar,
+ int NumDims, int Layout>
+struct TensorBlockCwiseBinaryIO {
+ typedef typename TensorBlock<OutputScalar, StorageIndex, NumDims, Layout>::Dimensions Dimensions;
+
+ struct BlockIteratorState {
+ StorageIndex output_stride, output_span;
+ StorageIndex left_stride, left_span;
+ StorageIndex right_stride, right_span;
+ StorageIndex size, count;
+ };
+
+ template <typename LeftScalar, typename RightScalar>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run(
+ const BinaryFunctor& functor, const Dimensions& block_sizes,
+ const Dimensions& block_strides, OutputScalar* output_data,
+ const array<StorageIndex, NumDims>& left_strides,
+ const LeftScalar* left_data,
+ const array<StorageIndex, NumDims>& right_strides,
+ const RightScalar* right_data) {
+ // Find the innermost dimension whose size is not 1. This is the effective
+ // inner dim. If all dimensions are of size 1, fallback to using the actual
+ // innermost dim to avoid out-of-bound access.
+ int num_size_one_inner_dims = 0;
+ for (int i = 0; i < NumDims; ++i) {
+ const int dim = cond<Layout>()(i, NumDims - i - 1);
+ if (block_sizes[dim] != 1) {
+ num_size_one_inner_dims = i;
+ break;
+ }
+ }
+ // Calculate strides and dimensions.
+ const int inner_dim =
+ NumDims == 0 ? 1
+ : cond<Layout>()(num_size_one_inner_dims,
+ NumDims - num_size_one_inner_dims - 1);
+ StorageIndex inner_dim_size = NumDims == 0 ? 1 : block_sizes[inner_dim];
+ for (int i = num_size_one_inner_dims + 1; i < NumDims; ++i) {
+ const int dim = cond<Layout>()(i, NumDims - i - 1);
+ // Merge multiple inner dims into one for larger inner dim size (i.e.
+ // fewer calls to TensorBlockCwiseBinaryOp::Run()).
+ if (inner_dim_size == block_strides[dim] &&
+ block_strides[dim] == left_strides[dim] &&
+ block_strides[dim] == right_strides[dim]) {
+ inner_dim_size *= block_sizes[dim];
+ ++num_size_one_inner_dims;
+ } else {
+ break;
+ }
+ }
+
+ StorageIndex output_index = 0, left_index = 0, right_index = 0;
+ const StorageIndex output_stride =
+ NumDims == 0 ? 1 : block_strides[inner_dim];
+ const StorageIndex left_stride = NumDims == 0 ? 1 : left_strides[inner_dim];
+ const StorageIndex right_stride =
+ NumDims == 0 ? 1 : right_strides[inner_dim];
+
+ const int at_least_1_dim = NumDims <= 1 ? 1 : NumDims - 1;
+ array<BlockIteratorState, at_least_1_dim> block_iter_state;
+
+ // Initialize block iterator state. Squeeze away any dimension of size 1.
+ int num_squeezed_dims = 0;
+ for (int i = num_size_one_inner_dims; i < NumDims - 1; ++i) {
+ const int dim = cond<Layout>()(i + 1, NumDims - i - 2);
+ const StorageIndex size = block_sizes[dim];
+ if (size == 1) {
+ continue;
+ }
+ BlockIteratorState& state = block_iter_state[num_squeezed_dims];
+ state.output_stride = block_strides[dim];
+ state.left_stride = left_strides[dim];
+ state.right_stride = right_strides[dim];
+ state.size = size;
+ state.output_span = state.output_stride * (size - 1);
+ state.left_span = state.left_stride * (size - 1);
+ state.right_span = state.right_stride * (size - 1);
+ state.count = 0;
+ ++num_squeezed_dims;
+ }
+
+ // Compute cwise binary op.
+ const StorageIndex block_total_size =
+ NumDims == 0 ? 1 : block_sizes.TotalSize();
+ for (StorageIndex i = 0; i < block_total_size; i += inner_dim_size) {
+ TensorBlockCwiseBinaryOp::Run(functor, inner_dim_size, output_index,
+ output_stride, output_data, left_index,
+ left_stride, left_data, right_index,
+ right_stride, right_data);
+ // Update index.
+ for (int j = 0; j < num_squeezed_dims; ++j) {
+ BlockIteratorState& state = block_iter_state[j];
+ if (++state.count < state.size) {
+ output_index += state.output_stride;
+ left_index += state.left_stride;
+ right_index += state.right_stride;
+ break;
+ }
+ state.count = 0;
+ output_index -= state.output_span;
+ left_index -= state.left_span;
+ right_index -= state.right_span;
+ }
+ }
+ }
+};
+
+/**
+ * \class TensorBlockView
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Read-only view into a block of data.
+ *
+ * This class provides read-only access to a block of data in impl. It may need
+ * to allocate space for holding the intermediate result.
+ *
+ */
+template <class ArgType, class Device>
+struct TensorBlockView {
+ typedef TensorEvaluator<ArgType, Device> Impl;
+ typedef typename Impl::Index StorageIndex;
+ typedef typename remove_const<typename Impl::Scalar>::type Scalar;
+ static const int NumDims = array_size<typename Impl::Dimensions>::value;
+ typedef DSizes<StorageIndex, NumDims> Dimensions;
+
+ // Constructs a TensorBlockView for `impl`. `block` is only used for for
+ // specifying the start offset, shape, and strides of the block.
+ template <typename OtherTensorBlock>
+ TensorBlockView(const Device& device,
+ const TensorEvaluator<ArgType, Device>& impl,
+ const OtherTensorBlock& block)
+ : m_device(device),
+ m_block_sizes(block.block_sizes()),
+ m_data(NULL),
+ m_allocated_data(NULL) {
+ if (Impl::RawAccess && impl.data() != NULL) {
+ m_data = impl.data() + block.first_coeff_index();
+ m_block_strides = block.tensor_strides();
+ } else {
+ // Actually make a copy.
+
+ // TODO(wuke): This sometimes put a lot pressure on the heap allocator.
+ // Consider allowing ops to request additional temporary block memory in
+ // TensorOpResourceRequirements.
+ m_allocated_data = static_cast<Scalar*>(
+ m_device.allocate(m_block_sizes.TotalSize() * sizeof(Scalar)));
+ m_data = m_allocated_data;
+ if (NumDims > 0) {
+ if (static_cast<int>(Impl::Layout) == static_cast<int>(ColMajor)) {
+ m_block_strides[0] = 1;
+ for (int i = 1; i < NumDims; ++i) {
+ m_block_strides[i] = m_block_strides[i - 1] * m_block_sizes[i - 1];
+ }
+ } else {
+ m_block_strides[NumDims - 1] = 1;
+ for (int i = NumDims - 2; i >= 0; --i) {
+ m_block_strides[i] = m_block_strides[i + 1] * m_block_sizes[i + 1];
+ }
+ }
+ }
+ TensorBlock<Scalar, StorageIndex, NumDims, Impl::Layout> input_block(
+ block.first_coeff_index(), m_block_sizes, m_block_strides,
+ block.tensor_strides(), m_allocated_data);
+ impl.block(&input_block);
+ }
+ }
+
+ ~TensorBlockView() {
+ if (m_allocated_data != NULL) {
+ m_device.deallocate(m_allocated_data);
+ }
+ }
+
+ const Dimensions& block_sizes() const { return m_block_sizes; }
+ const Dimensions& block_strides() const { return m_block_strides; }
+ const Scalar* data() const { return m_data; }
+
+ private:
+ const Device& m_device;
+ Dimensions m_block_sizes, m_block_strides;
+ const Scalar* m_data; // Not owned.
+ Scalar* m_allocated_data; // Owned.
+};
+
+/**
+ * \class TensorBlockMapper
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Tensor block mapper class.
+ *
+ * This class is responsible for iterating over the blocks of a tensor.
+ */
+template <typename Scalar, typename StorageIndex, int NumDims, int Layout>
+class TensorBlockMapper {
+ public:
+ typedef TensorBlock<Scalar, StorageIndex, NumDims, Layout> Block;
+ typedef DSizes<StorageIndex, NumDims> Dimensions;
+
+ TensorBlockMapper(const Dimensions& dims,
+ const TensorBlockShapeType block_shape,
+ Index min_target_size)
+ : m_dimensions(dims),
+ m_block_dim_sizes(BlockDimensions(dims, block_shape, internal::convert_index<StorageIndex>(min_target_size))) {
+ // Calculate block counts by dimension and total block count.
+ DSizes<StorageIndex, NumDims> block_count;
+ for (Index i = 0; i < block_count.rank(); ++i) {
+ block_count[i] = divup(m_dimensions[i], m_block_dim_sizes[i]);
+ }
+ m_total_block_count = array_prod(block_count);
+
+ // Calculate block strides (used for enumerating blocks).
+ if (NumDims > 0) {
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ m_block_strides[0] = 1;
+ m_tensor_strides[0] = 1;
+ for (int i = 1; i < NumDims; ++i) {
+ m_block_strides[i] = m_block_strides[i - 1] * block_count[i - 1];
+ m_tensor_strides[i] = m_tensor_strides[i - 1] * m_dimensions[i - 1];
+ }
+ } else {
+ m_block_strides[NumDims - 1] = 1;
+ m_tensor_strides[NumDims - 1] = 1;
+ for (int i = NumDims - 2; i >= 0; --i) {
+ m_block_strides[i] = m_block_strides[i + 1] * block_count[i + 1];
+ m_tensor_strides[i] = m_tensor_strides[i + 1] * m_dimensions[i + 1];
+ }
+ }
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Block
+ GetBlockForIndex(StorageIndex block_index, Scalar* data) const {
+ StorageIndex first_coeff_index = 0;
+ DSizes<StorageIndex, NumDims> coords;
+ DSizes<StorageIndex, NumDims> sizes;
+ DSizes<StorageIndex, NumDims> strides;
+ if (NumDims > 0) {
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int i = NumDims - 1; i > 0; --i) {
+ const StorageIndex idx = block_index / m_block_strides[i];
+ coords[i] = idx * m_block_dim_sizes[i];
+ sizes[i] =
+ numext::mini((m_dimensions[i] - coords[i]), m_block_dim_sizes[i]);
+ block_index -= idx * m_block_strides[i];
+ first_coeff_index += coords[i] * m_tensor_strides[i];
+ }
+ coords[0] = block_index * m_block_dim_sizes[0];
+ sizes[0] =
+ numext::mini((m_dimensions[0] - coords[0]), m_block_dim_sizes[0]);
+ first_coeff_index += coords[0] * m_tensor_strides[0];
+
+ strides[0] = 1;
+ for (int i = 1; i < NumDims; ++i) {
+ strides[i] = strides[i - 1] * sizes[i - 1];
+ }
+ } else {
+ for (int i = 0; i < NumDims - 1; ++i) {
+ const StorageIndex idx = block_index / m_block_strides[i];
+ coords[i] = idx * m_block_dim_sizes[i];
+ sizes[i] =
+ numext::mini((m_dimensions[i] - coords[i]), m_block_dim_sizes[i]);
+ block_index -= idx * m_block_strides[i];
+ first_coeff_index += coords[i] * m_tensor_strides[i];
+ }
+ coords[NumDims - 1] = block_index * m_block_dim_sizes[NumDims - 1];
+ sizes[NumDims - 1] =
+ numext::mini((m_dimensions[NumDims - 1] - coords[NumDims - 1]),
+ m_block_dim_sizes[NumDims - 1]);
+ first_coeff_index +=
+ coords[NumDims - 1] * m_tensor_strides[NumDims - 1];
+
+ strides[NumDims - 1] = 1;
+ for (int i = NumDims - 2; i >= 0; --i) {
+ strides[i] = strides[i + 1] * sizes[i + 1];
+ }
+ }
+ }
+
+ return Block(first_coeff_index, sizes, strides, m_tensor_strides, data);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE StorageIndex total_block_count() const {
+ return m_total_block_count;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE StorageIndex
+ block_dims_total_size() const {
+ return m_block_dim_sizes.TotalSize();
+ }
+
+ private:
+ static Dimensions BlockDimensions(const Dimensions& tensor_dims,
+ const TensorBlockShapeType block_shape,
+ StorageIndex min_target_size) {
+ min_target_size = numext::maxi<StorageIndex>(1, min_target_size);
+
+ // If tensor fully fits into the target size, we'll treat it a single block.
+ Dimensions block_dim_sizes = tensor_dims;
+
+ if (tensor_dims.TotalSize() == 0) {
+ // Corner case: one of the dimensions is zero. Logic below is too complex
+ // to handle this case on a general basis, just use unit block size.
+ // Note: we must not yield blocks with zero dimensions (recipe for
+ // overflows/underflows, divisions by zero and NaNs later).
+ for (int i = 0; i < NumDims; ++i) {
+ block_dim_sizes[i] = 1;
+ }
+ } else if (block_dim_sizes.TotalSize() > min_target_size) {
+ if (block_shape == kUniformAllDims) {
+ // Tensor will not fit within 'min_target_size' budget: calculate tensor
+ // block dimension sizes based on "square" dimension size target.
+ const StorageIndex dim_size_target = internal::convert_index<StorageIndex>(
+ std::pow(static_cast<float>(min_target_size),
+ 1.0f / static_cast<float>(block_dim_sizes.rank())));
+ for (Index i = 0; i < block_dim_sizes.rank(); ++i) {
+ // TODO(andydavis) Adjust the inner most 'block_dim_size' to make it
+ // a multiple of the packet size. Note that reducing
+ // 'block_dim_size' in this manner can increase the number of
+ // blocks, and so will amplify any per-block overhead.
+ block_dim_sizes[i] = numext::mini(dim_size_target, tensor_dims[i]);
+ }
+ // Add any un-allocated coefficients to inner dimension(s).
+ StorageIndex total_size = block_dim_sizes.TotalSize();
+ for (int i = 0; i < NumDims; ++i) {
+ const int dim = cond<Layout>()(i, NumDims - i - 1);
+ if (block_dim_sizes[dim] < tensor_dims[dim]) {
+ const StorageIndex total_size_other_dims =
+ total_size / block_dim_sizes[dim];
+ const StorageIndex alloc_avail =
+ divup<StorageIndex>(min_target_size, total_size_other_dims);
+ if (alloc_avail == block_dim_sizes[dim]) {
+ // Insufficient excess coefficients to allocate.
+ break;
+ }
+ block_dim_sizes[dim] = numext::mini(tensor_dims[dim], alloc_avail);
+ total_size = total_size_other_dims * block_dim_sizes[dim];
+ }
+ }
+ } else if (block_shape == kSkewedInnerDims) {
+ StorageIndex coeff_to_allocate = min_target_size;
+ for (int i = 0; i < NumDims; ++i) {
+ const int dim = cond<Layout>()(i, NumDims - i - 1);
+ block_dim_sizes[dim] =
+ numext::mini(coeff_to_allocate, tensor_dims[dim]);
+ coeff_to_allocate = divup(
+ coeff_to_allocate,
+ numext::maxi(static_cast<StorageIndex>(1), block_dim_sizes[dim]));
+ }
+ eigen_assert(coeff_to_allocate == 1);
+ } else {
+ eigen_assert(false); // someone added new block shape type
+ }
+ }
+
+ eigen_assert(
+ block_dim_sizes.TotalSize() >=
+ numext::mini<Index>(min_target_size, tensor_dims.TotalSize()));
+
+ return block_dim_sizes;
+ }
+
+ Dimensions m_dimensions;
+ Dimensions m_block_dim_sizes;
+ Dimensions m_block_strides;
+ Dimensions m_tensor_strides;
+ StorageIndex m_total_block_count;
+};
+
+/**
+ * \class TensorSliceBlockMapper
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Tensor slice block mapper class.
+ *
+ * This class is responsible for iterating over the blocks of
+ * a slice of a tensor. Supports shuffling of the block strides
+ * for callers that want to reduce strides for dimensions to be
+ * processed together.
+ *
+ */
+template <typename Scalar, typename StorageIndex, int NumDims, int Layout>
+class TensorSliceBlockMapper {
+ public:
+ typedef TensorBlock<Scalar, StorageIndex, NumDims, Layout> Block;
+ typedef DSizes<StorageIndex, NumDims> Dimensions;
+
+ TensorSliceBlockMapper(const Dimensions& tensor_dims,
+ const Dimensions& tensor_slice_offsets,
+ const Dimensions& tensor_slice_extents,
+ const Dimensions& block_dim_sizes,
+ const Dimensions& block_stride_order)
+ : m_tensor_dimensions(tensor_dims),
+ m_tensor_slice_offsets(tensor_slice_offsets),
+ m_tensor_slice_extents(tensor_slice_extents),
+ m_block_dim_sizes(block_dim_sizes),
+ m_block_stride_order(block_stride_order),
+ m_total_block_count(1) {
+ // Calculate block counts by dimension and total block count.
+ DSizes<StorageIndex, NumDims> block_count;
+ for (Index i = 0; i < block_count.rank(); ++i) {
+ block_count[i] = divup(m_tensor_slice_extents[i], m_block_dim_sizes[i]);
+ }
+ m_total_block_count = array_prod(block_count);
+
+ // Calculate block strides (used for enumerating blocks).
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ m_block_strides[0] = 1;
+ m_tensor_strides[0] = 1;
+ for (int i = 1; i < NumDims; ++i) {
+ m_block_strides[i] = m_block_strides[i - 1] * block_count[i - 1];
+ m_tensor_strides[i] =
+ m_tensor_strides[i - 1] * m_tensor_dimensions[i - 1];
+ }
+ } else {
+ m_block_strides[NumDims - 1] = 1;
+ m_tensor_strides[NumDims - 1] = 1;
+ for (int i = NumDims - 2; i >= 0; --i) {
+ m_block_strides[i] = m_block_strides[i + 1] * block_count[i + 1];
+ m_tensor_strides[i] =
+ m_tensor_strides[i + 1] * m_tensor_dimensions[i + 1];
+ }
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Block
+ GetBlockForIndex(StorageIndex block_index, Scalar* data) const {
+ StorageIndex first_coeff_index = 0;
+ DSizes<StorageIndex, NumDims> coords;
+ DSizes<StorageIndex, NumDims> sizes;
+ DSizes<StorageIndex, NumDims> strides;
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int i = NumDims - 1; i > 0; --i) {
+ const Index idx = block_index / m_block_strides[i];
+ coords[i] = m_tensor_slice_offsets[i] + idx * m_block_dim_sizes[i];
+ sizes[i] = numext::mini(
+ m_tensor_slice_offsets[i] + m_tensor_slice_extents[i] - coords[i],
+ m_block_dim_sizes[i]);
+ block_index -= idx * m_block_strides[i];
+ first_coeff_index += coords[i] * m_tensor_strides[i];
+ }
+ coords[0] =
+ m_tensor_slice_offsets[0] + block_index * m_block_dim_sizes[0];
+ sizes[0] = numext::mini(
+ m_tensor_slice_offsets[0] + m_tensor_slice_extents[0] - coords[0],
+ m_block_dim_sizes[0]);
+ first_coeff_index += coords[0] * m_tensor_strides[0];
+
+ StorageIndex prev_dim = m_block_stride_order[0];
+ strides[prev_dim] = 1;
+ for (int i = 1; i < NumDims; ++i) {
+ const StorageIndex curr_dim = m_block_stride_order[i];
+ strides[curr_dim] = strides[prev_dim] * sizes[prev_dim];
+ prev_dim = curr_dim;
+ }
+ } else {
+ for (int i = 0; i < NumDims - 1; ++i) {
+ const StorageIndex idx = block_index / m_block_strides[i];
+ coords[i] = m_tensor_slice_offsets[i] + idx * m_block_dim_sizes[i];
+ sizes[i] = numext::mini(
+ m_tensor_slice_offsets[i] + m_tensor_slice_extents[i] - coords[i],
+ m_block_dim_sizes[i]);
+ block_index -= idx * m_block_strides[i];
+ first_coeff_index += coords[i] * m_tensor_strides[i];
+ }
+ coords[NumDims - 1] = m_tensor_slice_offsets[NumDims - 1] +
+ block_index * m_block_dim_sizes[NumDims - 1];
+ sizes[NumDims - 1] = numext::mini(
+ m_tensor_slice_offsets[NumDims - 1] +
+ m_tensor_slice_extents[NumDims - 1] - coords[NumDims - 1],
+ m_block_dim_sizes[NumDims - 1]);
+ first_coeff_index += coords[NumDims - 1] * m_tensor_strides[NumDims - 1];
+
+ StorageIndex prev_dim = m_block_stride_order[NumDims - 1];
+ strides[prev_dim] = 1;
+ for (int i = NumDims - 2; i >= 0; --i) {
+ const StorageIndex curr_dim = m_block_stride_order[i];
+ strides[curr_dim] = strides[prev_dim] * sizes[prev_dim];
+ prev_dim = curr_dim;
+ }
+ }
+
+ return Block(first_coeff_index, sizes, strides, m_tensor_strides, data);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE StorageIndex total_block_count() const {
+ return m_total_block_count;
+ }
+
+ private:
+ Dimensions m_tensor_dimensions;
+ Dimensions m_tensor_slice_offsets;
+ Dimensions m_tensor_slice_extents;
+ Dimensions m_tensor_strides;
+ Dimensions m_block_dim_sizes;
+ Dimensions m_block_stride_order;
+ Dimensions m_block_strides;
+ StorageIndex m_total_block_count;
+};
+
+} // namespace internal
+
+} // namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_BLOCK_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h b/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h
index 23a74460e..c102a43fb 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h
@@ -31,6 +31,7 @@ struct traits<TensorBroadcastingOp<Broadcast, XprType> > : public traits<XprType
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
+ typedef typename XprTraits::PointerType PointerType;
};
template<typename Broadcast, typename XprType>
@@ -103,27 +104,52 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
typedef typename TensorEvaluator<ArgType, Device>::Dimensions InputDimensions;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
- static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+ static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
+ bool isCopy, nByOne, oneByN;
enum {
- IsAligned = true,
- PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
- Layout = TensorEvaluator<ArgType, Device>::Layout,
- RawAccess = false
+ IsAligned = true,
+ PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ BlockAccess = TensorEvaluator<ArgType, Device>::BlockAccess,
+ PreferBlockAccess = true,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ RawAccess = false
};
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
- : m_broadcast(op.broadcast()),m_impl(op.expression(), device)
+ typedef typename internal::remove_const<Scalar>::type ScalarNoConst;
+
+ // Block based access to the XprType (input) tensor.
+ typedef internal::TensorBlock<ScalarNoConst, Index, NumDims, Layout>
+ TensorBlock;
+ typedef internal::TensorBlockReader<ScalarNoConst, Index, NumDims, Layout>
+ TensorBlockReader;
+
+ // We do block based broadcasting using a trick with 2x tensor rank and 0
+ // strides. See block method implementation for details.
+ typedef DSizes<Index, 2 * NumDims> BroadcastDimensions;
+ typedef internal::TensorBlock<ScalarNoConst, Index, 2 * NumDims, Layout>
+ BroadcastTensorBlock;
+ typedef internal::TensorBlockReader<ScalarNoConst, Index, 2 * NumDims, Layout>
+ BroadcastTensorBlockReader;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op,
+ const Device& device)
+ : isCopy(false), nByOne(false), oneByN(false),
+ m_device(device), m_broadcast(op.broadcast()), m_impl(op.expression(), device)
{
+
// The broadcasting op doesn't change the rank of the tensor. One can't broadcast a scalar
// and store the result in a scalar. Instead one should reshape the scalar into a a N-D
// tensor with N >= 1 of 1 element first and then broadcast.
EIGEN_STATIC_ASSERT((NumDims > 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
const InputDimensions& input_dims = m_impl.dimensions();
- const Broadcast& broadcast = op.broadcast();
+ isCopy = true;
for (int i = 0; i < NumDims; ++i) {
eigen_assert(input_dims[i] > 0);
- m_dimensions[i] = input_dims[i] * broadcast[i];
+ m_dimensions[i] = input_dims[i] * m_broadcast[i];
+ if (m_broadcast[i] != 1) {
+ isCopy = false;
+ }
}
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
@@ -141,6 +167,40 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
m_outputStrides[i] = m_outputStrides[i+1] * m_dimensions[i+1];
}
}
+
+ if (input_dims[0] == 1) {
+ oneByN = true;
+ for (int i = 1; i < NumDims; ++i) {
+ if (m_broadcast[i] != 1) {
+ oneByN = false;
+ break;
+ }
+ }
+ } else if (input_dims[NumDims-1] == 1) {
+ nByOne = true;
+ for (int i = 0; i < NumDims-1; ++i) {
+ if (m_broadcast[i] != 1) {
+ nByOne = false;
+ break;
+ }
+ }
+ }
+
+ // Handle special format like NCHW, its input shape is '[1, N..., 1]' and
+ // broadcast shape is '[N, 1..., N]'
+ if (!oneByN && !nByOne) {
+ if (input_dims[0] == 1 && input_dims[NumDims-1] == 1 && NumDims > 2) {
+ nByOne = true;
+ oneByN = true;
+ for (int i = 1; i < NumDims-1; ++i) {
+ if (m_broadcast[i] != 1) {
+ nByOne = false;
+ oneByN = false;
+ break;
+ }
+ }
+ }
+ }
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
@@ -161,15 +221,22 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
}
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
- return coeffColMajor(index);
+ if (isCopy) {
+ return m_impl.coeff(index);
+ } else {
+ return coeffColMajor(index);
+ }
} else {
- return coeffRowMajor(index);
+ if (isCopy) {
+ return m_impl.coeff(index);
+ } else {
+ return coeffRowMajor(index);
+ }
}
}
// TODO: attempt to speed this up. The integer divisions and modulo are slow
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeffColMajor(Index index) const
- {
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index indexColMajor(Index index) const {
Index inputIndex = 0;
for (int i = NumDims - 1; i > 0; --i) {
const Index idx = index / m_outputStrides[i];
@@ -195,11 +262,15 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
inputIndex += (index % m_impl.dimensions()[0]);
}
}
- return m_impl.coeff(inputIndex);
+ return inputIndex;
}
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeffRowMajor(Index index) const
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeffColMajor(Index index) const
{
+ return m_impl.coeff(indexColMajor(index));
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index indexRowMajor(Index index) const {
Index inputIndex = 0;
for (int i = 0; i < NumDims - 1; ++i) {
const Index idx = index / m_outputStrides[i];
@@ -215,17 +286,22 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
}
index -= idx * m_outputStrides[i];
}
- if (internal::index_statically_eq<Broadcast>(NumDims-1, 1)) {
- eigen_assert(index < m_impl.dimensions()[NumDims-1]);
+ if (internal::index_statically_eq<Broadcast>(NumDims - 1, 1)) {
+ eigen_assert(index < m_impl.dimensions()[NumDims - 1]);
inputIndex += index;
} else {
- if (internal::index_statically_eq<InputDimensions>(NumDims-1, 1)) {
- eigen_assert(index % m_impl.dimensions()[NumDims-1] == 0);
+ if (internal::index_statically_eq<InputDimensions>(NumDims - 1, 1)) {
+ eigen_assert(index % m_impl.dimensions()[NumDims - 1] == 0);
} else {
- inputIndex += (index % m_impl.dimensions()[NumDims-1]);
+ inputIndex += (index % m_impl.dimensions()[NumDims - 1]);
}
}
- return m_impl.coeff(inputIndex);
+ return inputIndex;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeffRowMajor(Index index) const
+ {
+ return m_impl.coeff(indexRowMajor(index));
}
template<int LoadMode>
@@ -236,9 +312,145 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
}
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
- return packetColMajor<LoadMode>(index);
+ if (isCopy) {
+ #ifdef EIGEN_GPU_COMPILE_PHASE
+ // See PR 437: on NVIDIA P100 and K20m we observed a x3-4 speed up by enforcing
+ // unaligned loads here. The reason is unclear though.
+ return m_impl.template packet<Unaligned>(index);
+ #else
+ return m_impl.template packet<LoadMode>(index);
+ #endif
+ } else if (oneByN && !nByOne) {
+ return packetNByOne<LoadMode>(index);
+ } else if (!oneByN && nByOne) {
+ return packetOneByN<LoadMode>(index);
+ } else if (oneByN && nByOne) {
+ return packetOneByNByOne<LoadMode>(index);
+ } else {
+ return packetColMajor<LoadMode>(index);
+ }
} else {
- return packetRowMajor<LoadMode>(index);
+ if (isCopy) {
+ #ifdef EIGEN_GPU_COMPILE_PHASE
+ // See above.
+ return m_impl.template packet<Unaligned>(index);
+ #else
+ return m_impl.template packet<LoadMode>(index);
+ #endif
+ } else if (oneByN && !nByOne) {
+ return packetOneByN<LoadMode>(index);
+ } else if (!oneByN && nByOne) {
+ return packetNByOne<LoadMode>(index);
+ } else if (oneByN && nByOne) {
+ return packetOneByNByOne<LoadMode>(index);
+ } else {
+ return packetRowMajor<LoadMode>(index);
+ }
+ }
+ }
+
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetOneByNByOne
+ (Index index) const
+ {
+ EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
+ eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
+
+ EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
+ Index startDim, endDim;
+ Index inputIndex, outputOffset, batchedIndex;
+
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ startDim = NumDims - 1;
+ endDim = 1;
+ } else {
+ startDim = 0;
+ endDim = NumDims - 2;
+ }
+
+ batchedIndex = index % m_outputStrides[startDim];
+ inputIndex = batchedIndex / m_outputStrides[endDim];
+ outputOffset = batchedIndex % m_outputStrides[endDim];
+
+ if (outputOffset + PacketSize <= m_outputStrides[endDim]) {
+ values[0] = m_impl.coeff(inputIndex);
+ return internal::pload1<PacketReturnType>(values);
+ } else {
+ for (int i = 0, cur = 0; i < PacketSize; ++i, ++cur) {
+ if (outputOffset + cur < m_outputStrides[endDim]) {
+ values[i] = m_impl.coeff(inputIndex);
+ } else {
+ ++inputIndex;
+ inputIndex = (inputIndex == m_inputStrides[startDim] ? 0 : inputIndex);
+ values[i] = m_impl.coeff(inputIndex);
+ outputOffset = 0;
+ cur = 0;
+ }
+ }
+ return internal::pload<PacketReturnType>(values);
+ }
+ }
+
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetOneByN(Index index) const
+ {
+ EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
+ eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
+
+ Index dim, inputIndex;
+
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ dim = NumDims - 1;
+ } else {
+ dim = 0;
+ }
+
+ inputIndex = index % m_inputStrides[dim];
+ if (inputIndex + PacketSize <= m_inputStrides[dim]) {
+ return m_impl.template packet<Unaligned>(inputIndex);
+ } else {
+ EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
+ for (int i = 0; i < PacketSize; ++i) {
+ if (inputIndex > m_inputStrides[dim]-1) {
+ inputIndex = 0;
+ }
+ values[i] = m_impl.coeff(inputIndex++);
+ }
+ return internal::pload<PacketReturnType>(values);
+ }
+ }
+
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetNByOne(Index index) const
+ {
+ EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
+ eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
+
+ EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
+ Index dim, inputIndex, outputOffset;
+
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ dim = 1;
+ } else {
+ dim = NumDims - 2;
+ }
+
+ inputIndex = index / m_outputStrides[dim];
+ outputOffset = index % m_outputStrides[dim];
+ if (outputOffset + PacketSize <= m_outputStrides[dim]) {
+ values[0] = m_impl.coeff(inputIndex);
+ return internal::pload1<PacketReturnType>(values);
+ } else {
+ for (int i = 0, cur = 0; i < PacketSize; ++i, ++cur) {
+ if (outputOffset + cur < m_outputStrides[dim]) {
+ values[i] = m_impl.coeff(inputIndex);
+ } else {
+ values[i] = m_impl.coeff(++inputIndex);
+ outputOffset = 0;
+ cur = 0;
+ }
+ }
+ return internal::pload<PacketReturnType>(values);
}
}
@@ -289,7 +501,11 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
values[0] = m_impl.coeff(inputIndex);
for (int i = 1; i < PacketSize; ++i) {
- values[i] = coeffColMajor(originalIndex+i);
+ if (innermostLoc + i < m_impl.dimensions()[0]) {
+ values[i] = m_impl.coeff(inputIndex+i);
+ } else {
+ values[i] = coeffColMajor(originalIndex+i);
+ }
}
PacketReturnType rslt = internal::pload<PacketReturnType>(values);
return rslt;
@@ -341,7 +557,11 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
values[0] = m_impl.coeff(inputIndex);
for (int i = 1; i < PacketSize; ++i) {
- values[i] = coeffRowMajor(originalIndex+i);
+ if (innermostLoc + i < m_impl.dimensions()[NumDims-1]) {
+ values[i] = m_impl.coeff(inputIndex+i);
+ } else {
+ values[i] = coeffRowMajor(originalIndex+i);
+ }
}
PacketReturnType rslt = internal::pload<PacketReturnType>(values);
return rslt;
@@ -351,7 +571,7 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
costPerCoeff(bool vectorized) const {
double compute_cost = TensorOpCost::AddCost<Index>();
- if (NumDims > 0) {
+ if (!isCopy && NumDims > 0) {
for (int i = NumDims - 1; i > 0; --i) {
compute_cost += TensorOpCost::DivCost<Index>();
if (internal::index_statically_eq<Broadcast>(i, 1)) {
@@ -372,13 +592,290 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
TensorOpCost(0, 0, compute_cost, vectorized, PacketSize);
}
- EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void getResourceRequirements(
+ std::vector<internal::TensorOpResourceRequirements>* resources) const {
+ // TODO(wuke): Targeting L1 size is 30% faster than targeting L{-1} on large
+ // tensors. But this might need further tuning.
+ Eigen::Index block_total_size_max = numext::maxi<Eigen::Index>(
+ 1, m_device.firstLevelCacheSize() / sizeof(Scalar));
+
+ resources->push_back(internal::TensorOpResourceRequirements(
+ internal::kSkewedInnerDims, block_total_size_max));
+
+ m_impl.getResourceRequirements(resources);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void block(
+ TensorBlock* output_block) const {
+ if (NumDims <= 0) {
+ output_block->data()[0] = m_impl.coeff(0);
+ return;
+ }
+
+ // Because we only support kSkewedInnerDims blocking, block size should be
+ // equal to m_dimensions for inner dims, a smaller than m_dimensions[i] size
+ // for the first outer dim, and 1 for other outer dims. This is guaranteed
+ // by MergeResourceRequirements() in TensorBlock.h.
+ const Dimensions& output_block_sizes = output_block->block_sizes();
+ const Dimensions& output_block_strides = output_block->block_strides();
+
+ // Find where outer dims start.
+ int outer_dim_start = 0;
+ Index outer_dim_size = 1, inner_dim_size = 1;
+ for (int i = 0; i < NumDims; ++i) {
+ const int dim = static_cast<int>(Layout) == static_cast<int>(ColMajor)
+ ? i
+ : NumDims - i - 1;
+ if (i > outer_dim_start) {
+ eigen_assert(output_block_sizes[dim] == 1);
+ } else if (output_block_sizes[dim] != m_dimensions[dim]) {
+ eigen_assert(output_block_sizes[dim] < m_dimensions[dim]);
+ outer_dim_size = output_block_sizes[dim];
+ } else {
+ inner_dim_size *= output_block_sizes[dim];
+ ++outer_dim_start;
+ }
+ }
+
+ if (inner_dim_size == 0 || outer_dim_size == 0) {
+ return;
+ }
+
+ const Dimensions& input_dims = Dimensions(m_impl.dimensions());
+
+ // Pre-fill input_block_sizes, broadcast_block_sizes,
+ // broadcast_block_strides, and broadcast_tensor_strides. Later on we will
+ // only modify the outer_dim_start-th dimension on these arrays.
+
+ // Calculate the input block size for looking into the input.
+ Dimensions input_block_sizes;
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int i = 0; i < outer_dim_start; ++i) {
+ input_block_sizes[i] = input_dims[i];
+ }
+ for (int i = outer_dim_start; i < NumDims; ++i) {
+ input_block_sizes[i] = 1;
+ }
+ } else {
+ for (int i = 0; i < outer_dim_start; ++i) {
+ input_block_sizes[NumDims - i - 1] = input_dims[NumDims - i - 1];
+ }
+ for (int i = outer_dim_start; i < NumDims; ++i) {
+ input_block_sizes[NumDims - i - 1] = 1;
+ }
+ }
+
+ // Broadcast with the 0-stride trick: Create 1 extra dim for each
+ // broadcast, set the input stride to 0.
+ //
+ // When ColMajor:
+ // - broadcast_block_sizes is [d_0, b_0, d_1, b_1, ...].
+ //
+ // - broadcast_block_strides is [output_block_strides[0],
+ // output_block_strides[0] * d_0,
+ // output_block_strides[1],
+ // output_block_strides[1] * d_1,
+ // ...].
+ //
+ // - broadcast_tensor_strides is [output_block_strides[0],
+ // 0,
+ // output_block_strides[1],
+ // 0,
+ // ...].
+ BroadcastDimensions broadcast_block_sizes, broadcast_block_strides,
+ broadcast_tensor_strides;
+
+ for (int i = 0; i < outer_dim_start; ++i) {
+ const int dim = static_cast<int>(Layout) == static_cast<int>(ColMajor)
+ ? i
+ : NumDims - i - 1;
+ const int copy_dim =
+ static_cast<int>(Layout) == static_cast<int>(ColMajor)
+ ? 2 * i
+ : 2 * NumDims - 2 * i - 1;
+ const int broadcast_dim =
+ static_cast<int>(Layout) == static_cast<int>(ColMajor) ? copy_dim + 1
+ : copy_dim - 1;
+ broadcast_block_sizes[copy_dim] = input_dims[dim];
+ broadcast_block_sizes[broadcast_dim] = m_broadcast[dim];
+ broadcast_block_strides[copy_dim] = output_block_strides[dim];
+ broadcast_block_strides[broadcast_dim] =
+ output_block_strides[dim] * input_dims[dim];
+ broadcast_tensor_strides[copy_dim] = m_inputStrides[dim];
+ broadcast_tensor_strides[broadcast_dim] = 0;
+ }
+ for (int i = 2 * outer_dim_start; i < 2 * NumDims; ++i) {
+ const int dim = static_cast<int>(Layout) == static_cast<int>(ColMajor)
+ ? i
+ : 2 * NumDims - i - 1;
+ broadcast_block_sizes[dim] = 1;
+ broadcast_block_strides[dim] = 0;
+ broadcast_tensor_strides[dim] = 0;
+ }
+
+ const int outer_dim = static_cast<int>(Layout) == static_cast<int>(ColMajor)
+ ? outer_dim_start
+ : NumDims - outer_dim_start - 1;
+
+ if (outer_dim_size == 1) {
+ // We just need one block read using the ready-set values above.
+ BroadcastBlock(input_block_sizes, broadcast_block_sizes,
+ broadcast_block_strides, broadcast_tensor_strides, 0,
+ output_block);
+ } else if (input_dims[outer_dim] == 1) {
+ // Broadcast outer_dim_start-th dimension (< NumDims) by outer_dim_size.
+ const int broadcast_outer_dim =
+ static_cast<int>(Layout) == static_cast<int>(ColMajor)
+ ? 2 * outer_dim_start + 1
+ : 2 * NumDims - 2 * outer_dim_start - 2;
+ broadcast_block_sizes[broadcast_outer_dim] = outer_dim_size;
+ broadcast_tensor_strides[broadcast_outer_dim] = 0;
+ broadcast_block_strides[broadcast_outer_dim] =
+ output_block_strides[outer_dim];
+ BroadcastBlock(input_block_sizes, broadcast_block_sizes,
+ broadcast_block_strides, broadcast_tensor_strides, 0,
+ output_block);
+ } else {
+ // The general case. Let's denote the output block as x[...,
+ // a:a+outer_dim_size, :, ..., :], where a:a+outer_dim_size is a slice on
+ // the outer_dim_start-th dimension (< NumDims). We need to split the
+ // a:a+outer_dim_size into possibly 3 sub-blocks:
+ //
+ // (1) a:b, where b is the smallest multiple of
+ // input_dims[outer_dim_start] in [a, a+outer_dim_size].
+ //
+ // (2) b:c, where c is the largest multiple of input_dims[outer_dim_start]
+ // in [a, a+outer_dim_size].
+ //
+ // (3) c:a+outer_dim_size .
+ //
+ // Or, when b and c do not exist, we just need to process the whole block
+ // together.
+
+ // Find a.
+ const Index outer_dim_left_index =
+ output_block->first_coeff_index() / m_outputStrides[outer_dim];
+
+ // Find b and c.
+ const Index input_outer_dim_size = input_dims[outer_dim];
+
+ // First multiple after a. This is b when <= outer_dim_left_index +
+ // outer_dim_size.
+ const Index first_multiple =
+ divup<Index>(outer_dim_left_index, input_outer_dim_size) *
+ input_outer_dim_size;
+
+ if (first_multiple <= outer_dim_left_index + outer_dim_size) {
+ // b exists, so does c. Find it.
+ const Index last_multiple = (outer_dim_left_index + outer_dim_size) /
+ input_outer_dim_size * input_outer_dim_size;
+ const int copy_outer_dim =
+ static_cast<int>(Layout) == static_cast<int>(ColMajor)
+ ? 2 * outer_dim_start
+ : 2 * NumDims - 2 * outer_dim_start - 1;
+ const int broadcast_outer_dim =
+ static_cast<int>(Layout) == static_cast<int>(ColMajor)
+ ? 2 * outer_dim_start + 1
+ : 2 * NumDims - 2 * outer_dim_start - 2;
+ if (first_multiple > outer_dim_left_index) {
+ const Index head_size = first_multiple - outer_dim_left_index;
+ input_block_sizes[outer_dim] = head_size;
+ broadcast_block_sizes[copy_outer_dim] = head_size;
+ broadcast_tensor_strides[copy_outer_dim] = m_inputStrides[outer_dim];
+ broadcast_block_strides[copy_outer_dim] =
+ output_block_strides[outer_dim];
+ broadcast_block_sizes[broadcast_outer_dim] = 1;
+ broadcast_tensor_strides[broadcast_outer_dim] = 0;
+ broadcast_block_strides[broadcast_outer_dim] =
+ output_block_strides[outer_dim] * input_dims[outer_dim];
+ BroadcastBlock(input_block_sizes, broadcast_block_sizes,
+ broadcast_block_strides, broadcast_tensor_strides, 0,
+ output_block);
+ }
+ if (first_multiple < last_multiple) {
+ input_block_sizes[outer_dim] = input_outer_dim_size;
+ broadcast_block_sizes[copy_outer_dim] = input_outer_dim_size;
+ broadcast_tensor_strides[copy_outer_dim] = m_inputStrides[outer_dim];
+ broadcast_block_strides[copy_outer_dim] =
+ output_block_strides[outer_dim];
+ broadcast_block_sizes[broadcast_outer_dim] =
+ (last_multiple - first_multiple) / input_outer_dim_size;
+ broadcast_tensor_strides[broadcast_outer_dim] = 0;
+ broadcast_block_strides[broadcast_outer_dim] =
+ output_block_strides[outer_dim] * input_dims[outer_dim];
+ const Index offset = (first_multiple - outer_dim_left_index) *
+ m_outputStrides[outer_dim];
+ BroadcastBlock(input_block_sizes, broadcast_block_sizes,
+ broadcast_block_strides, broadcast_tensor_strides,
+ offset, output_block);
+ }
+ if (last_multiple < outer_dim_left_index + outer_dim_size) {
+ const Index tail_size =
+ outer_dim_left_index + outer_dim_size - last_multiple;
+ input_block_sizes[outer_dim] = tail_size;
+ broadcast_block_sizes[copy_outer_dim] = tail_size;
+ broadcast_tensor_strides[copy_outer_dim] = m_inputStrides[outer_dim];
+ broadcast_block_strides[copy_outer_dim] =
+ output_block_strides[outer_dim];
+ broadcast_block_sizes[broadcast_outer_dim] = 1;
+ broadcast_tensor_strides[broadcast_outer_dim] = 0;
+ broadcast_block_strides[broadcast_outer_dim] =
+ output_block_strides[outer_dim] * input_dims[outer_dim];
+ const Index offset = (last_multiple - outer_dim_left_index) *
+ m_outputStrides[outer_dim];
+ BroadcastBlock(input_block_sizes, broadcast_block_sizes,
+ broadcast_block_strides, broadcast_tensor_strides,
+ offset, output_block);
+ }
+ } else {
+ // b and c do not exist.
+ const int copy_outer_dim =
+ static_cast<int>(Layout) == static_cast<int>(ColMajor)
+ ? 2 * outer_dim_start
+ : 2 * NumDims - 2 * outer_dim_start - 1;
+ input_block_sizes[outer_dim] = outer_dim_size;
+ broadcast_block_sizes[copy_outer_dim] = outer_dim_size;
+ broadcast_tensor_strides[copy_outer_dim] = m_inputStrides[outer_dim];
+ broadcast_block_strides[copy_outer_dim] =
+ output_block_strides[outer_dim];
+ BroadcastBlock(input_block_sizes, broadcast_block_sizes,
+ broadcast_block_strides, broadcast_tensor_strides, 0,
+ output_block);
+ }
+ }
+ }
+
+ EIGEN_DEVICE_FUNC typename Eigen::internal::traits<XprType>::PointerType data() const { return NULL; }
const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
Broadcast functor() const { return m_broadcast; }
+ private:
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void BroadcastBlock(
+ const Dimensions& input_block_sizes,
+ const BroadcastDimensions& broadcast_block_sizes,
+ const BroadcastDimensions& broadcast_block_strides,
+ const BroadcastDimensions& broadcast_tensor_strides, Index offset,
+ TensorBlock* output_block) const {
+ TensorBlock input_view_block(
+ static_cast<int>(Layout) == static_cast<int>(ColMajor)
+ ? indexColMajor(output_block->first_coeff_index() + offset)
+ : indexRowMajor(output_block->first_coeff_index() + offset),
+ input_block_sizes, Dimensions(m_inputStrides),
+ Dimensions(m_inputStrides), NULL);
+
+ internal::TensorBlockView<ArgType, Device> input_block(m_device, m_impl,
+ input_view_block);
+ BroadcastTensorBlock broadcast_block(
+ 0, broadcast_block_sizes, broadcast_block_strides,
+ broadcast_tensor_strides, output_block->data() + offset);
+
+ BroadcastTensorBlockReader::Run(&broadcast_block, input_block.data());
+ }
+
protected:
+ const Device& m_device;
const Broadcast m_broadcast;
Dimensions m_dimensions;
array<Index, NumDims> m_outputStrides;
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h b/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h
index f335edf7d..b47fa9e8e 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h
@@ -32,6 +32,7 @@ struct traits<TensorChippingOp<DimId, XprType> > : public traits<XprType>
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions - 1;
static const int Layout = XprTraits::Layout;
+ typedef typename XprTraits::PointerType PointerType;
};
template<DenseIndex DimId, typename XprType>
@@ -50,6 +51,7 @@ template <DenseIndex DimId>
struct DimensionId
{
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DimensionId(DenseIndex dim) {
+ EIGEN_UNUSED_VARIABLE(dim);
eigen_assert(dim == DimId);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex actualDim() const {
@@ -136,19 +138,28 @@ struct TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device>
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
- static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+ static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
enum {
// Alignment can't be guaranteed at compile time since it depends on the
// slice offsets.
- IsAligned = false,
- PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
- Layout = TensorEvaluator<ArgType, Device>::Layout,
- CoordAccess = false, // to be implemented
- RawAccess = false
+ IsAligned = false,
+ PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ BlockAccess = TensorEvaluator<ArgType, Device>::BlockAccess,
+ PreferBlockAccess = true,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ CoordAccess = false, // to be implemented
+ RawAccess = false
};
+ typedef typename internal::remove_const<Scalar>::type ScalarNoConst;
+
+ typedef internal::TensorBlock<ScalarNoConst, Index, NumInputDims, Layout>
+ InputTensorBlock;
+ typedef internal::TensorBlock<ScalarNoConst, Index, NumDims, Layout>
+ OutputTensorBlock;
+
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_impl(op.expression(), device), m_dim(op.dim()), m_device(device), m_offset(op.offset())
{
@@ -181,6 +192,20 @@ struct TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device>
}
m_inputStride *= input_dims[m_dim.actualDim()];
m_inputOffset = m_stride * op.offset();
+
+ if (BlockAccess) {
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ m_inputStrides[0] = 1;
+ for (int i = 1; i < NumInputDims; ++i) {
+ m_inputStrides[i] = m_inputStrides[i - 1] * input_dims[i - 1];
+ }
+ } else {
+ m_inputStrides[NumInputDims - 1] = 1;
+ for (int i = NumInputDims - 2; i >= 0; --i) {
+ m_inputStrides[i] = m_inputStrides[i + 1] * input_dims[i + 1];
+ }
+ }
+ }
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
@@ -263,7 +288,62 @@ struct TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device>
TensorOpCost(0, 0, cost, vectorized, PacketSize);
}
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType* data() const {
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void getResourceRequirements(
+ std::vector<internal::TensorOpResourceRequirements>* resources) const {
+ Eigen::Index block_total_size_max = numext::maxi<Eigen::Index>(
+ 1, m_device.lastLevelCacheSize() / sizeof(Scalar));
+ resources->push_back(internal::TensorOpResourceRequirements(
+ internal::kSkewedInnerDims, block_total_size_max));
+ m_impl.getResourceRequirements(resources);
+ }
+
+ // TODO(andydavis) Reduce the overhead of this function (experiment with
+ // using a fixed block size).
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void block(
+ OutputTensorBlock* output_block) const {
+ // Calculate input block sizes.
+ const DSizes<Index, NumDims>& output_block_sizes =
+ output_block->block_sizes();
+ const DSizes<Index, NumDims>& output_block_strides =
+ output_block->block_strides();
+ const Index chip_dim = m_dim.actualDim();
+ DSizes<Index, NumInputDims> input_block_sizes;
+ DSizes<Index, NumInputDims> input_block_strides;
+ for (Index i = 0; i < NumInputDims; ++i) {
+ if (i < chip_dim) {
+ input_block_sizes[i] = output_block_sizes[i];
+ input_block_strides[i] = output_block_strides[i];
+ } else if (i > chip_dim) {
+ input_block_sizes[i] = output_block_sizes[i - 1];
+ input_block_strides[i] = output_block_strides[i - 1];
+ } else {
+ input_block_sizes[i] = 1;
+ }
+ }
+ // Fix up input_block_stride for chip dimension.
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ if (chip_dim == 0) {
+ input_block_strides[chip_dim] = 1;
+ } else {
+ input_block_strides[chip_dim] =
+ input_block_strides[chip_dim - 1] * input_block_sizes[chip_dim - 1];
+ }
+ } else {
+ if (chip_dim == NumInputDims - 1) {
+ input_block_strides[chip_dim] = 1;
+ } else {
+ input_block_strides[chip_dim] =
+ input_block_strides[chip_dim + 1] * input_block_sizes[chip_dim + 1];
+ }
+ }
+ // Instantiate and read input block from input tensor.
+ InputTensorBlock input_block(srcCoeff(output_block->first_coeff_index()),
+ input_block_sizes, input_block_strides,
+ m_inputStrides, output_block->data());
+ m_impl.block(&input_block);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Eigen::internal::traits<XprType>::PointerType data() const {
CoeffReturnType* result = const_cast<CoeffReturnType*>(m_impl.data());
if (((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == NumDims) ||
(static_cast<int>(Layout) == static_cast<int>(RowMajor) && m_dim.actualDim() == 0)) &&
@@ -291,13 +371,14 @@ struct TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device>
{
Index inputIndex;
if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == 0) ||
- (static_cast<int>(Layout) == static_cast<int>(RowMajor) && m_dim.actualDim() == NumInputDims-1)) {
+ (static_cast<int>(Layout) == static_cast<int>(RowMajor) && m_dim.actualDim() == NumInputDims - 1)) {
// m_stride is equal to 1, so let's avoid the integer division.
eigen_assert(m_stride == 1);
inputIndex = index * m_inputStride + m_inputOffset;
- } else if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == NumInputDims-1) ||
- (static_cast<int>(Layout) == static_cast<int>(RowMajor) && m_dim.actualDim() == 0)) {
- // m_stride is aways greater than index, so let's avoid the integer division.
+ } else if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == NumInputDims - 1) ||
+ (static_cast<int>(Layout) == static_cast<int>(RowMajor) && m_dim.actualDim() == 0)) {
+ // m_stride is aways greater than index, so let's avoid the integer
+ // division.
eigen_assert(m_stride > index);
inputIndex = index + m_inputOffset;
} else {
@@ -313,6 +394,7 @@ struct TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device>
Index m_stride;
Index m_inputOffset;
Index m_inputStride;
+ DSizes<Index, NumInputDims> m_inputStrides;
TensorEvaluator<ArgType, Device> m_impl;
const internal::DimensionId<DimId> m_dim;
const Device& m_device;
@@ -336,14 +418,23 @@ struct TensorEvaluator<TensorChippingOp<DimId, ArgType>, Device>
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
- static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+ static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
enum {
- IsAligned = false,
+ IsAligned = false,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
- RawAccess = false
+ BlockAccess = TensorEvaluator<ArgType, Device>::BlockAccess,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ RawAccess = false
};
+ typedef typename internal::remove_const<Scalar>::type ScalarNoConst;
+
+ typedef internal::TensorBlock<ScalarNoConst, Index, NumInputDims, Layout>
+ InputTensorBlock;
+ typedef internal::TensorBlock<ScalarNoConst, Index, NumDims, Layout>
+ OutputTensorBlock;
+
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: Base(op, device)
{ }
@@ -391,6 +482,50 @@ struct TensorEvaluator<TensorChippingOp<DimId, ArgType>, Device>
}
}
}
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writeBlock(
+ const OutputTensorBlock& output_block) {
+ // Calculate input block sizes.
+ const DSizes<Index, NumDims>& output_block_sizes =
+ output_block.block_sizes();
+ const DSizes<Index, NumDims>& output_block_strides =
+ output_block.block_strides();
+ const Index chip_dim = this->m_dim.actualDim();
+ DSizes<Index, NumInputDims> input_block_sizes;
+ DSizes<Index, NumInputDims> input_block_strides;
+ for (Index i = 0; i < NumInputDims; ++i) {
+ if (i < chip_dim) {
+ input_block_sizes[i] = output_block_sizes[i];
+ input_block_strides[i] = output_block_strides[i];
+ } else if (i > chip_dim) {
+ input_block_sizes[i] = output_block_sizes[i - 1];
+ input_block_strides[i] = output_block_strides[i - 1];
+ } else {
+ input_block_sizes[i] = 1;
+ }
+ }
+ // Fix up input_block_stride for chip dimension.
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ if (chip_dim == 0) {
+ input_block_strides[chip_dim] = 1;
+ } else {
+ input_block_strides[chip_dim] =
+ input_block_strides[chip_dim - 1] * input_block_sizes[chip_dim - 1];
+ }
+ } else {
+ if (chip_dim == NumInputDims - 1) {
+ input_block_strides[chip_dim] = 1;
+ } else {
+ input_block_strides[chip_dim] =
+ input_block_strides[chip_dim + 1] * input_block_sizes[chip_dim + 1];
+ }
+ }
+ // Write input block.
+ this->m_impl.writeBlock(InputTensorBlock(
+ this->srcCoeff(output_block.first_coeff_index()), input_block_sizes,
+ input_block_strides, this->m_inputStrides,
+ const_cast<ScalarNoConst*>(output_block.data())));
+ }
};
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorConcatenation.h b/unsupported/Eigen/CXX11/src/Tensor/TensorConcatenation.h
index 2c7ba961c..3863ee8c3 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorConcatenation.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorConcatenation.h
@@ -37,6 +37,8 @@ struct traits<TensorConcatenationOp<Axis, LhsXprType, RhsXprType> >
static const int NumDimensions = traits<LhsXprType>::NumDimensions;
static const int Layout = traits<LhsXprType>::Layout;
enum { Flags = 0 };
+ typedef typename conditional<Pointer_type_promotion<typename LhsXprType::Scalar, Scalar>::val,
+ typename traits<LhsXprType>::PointerType, typename traits<RhsXprType>::PointerType>::type PointerType;
};
template<typename Axis, typename LhsXprType, typename RhsXprType>
@@ -120,6 +122,8 @@ struct TensorEvaluator<const TensorConcatenationOp<Axis, LeftArgType, RightArgTy
enum {
IsAligned = false,
PacketAccess = TensorEvaluator<LeftArgType, Device>::PacketAccess & TensorEvaluator<RightArgType, Device>::PacketAccess,
+ BlockAccess = false,
+ PreferBlockAccess = false,
Layout = TensorEvaluator<LeftArgType, Device>::Layout,
RawAccess = false
};
@@ -248,7 +252,7 @@ struct TensorEvaluator<const TensorConcatenationOp<Axis, LeftArgType, RightArgTy
template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
{
- const int packetSize = internal::unpacket_traits<PacketReturnType>::size;
+ const int packetSize = PacketType<CoeffReturnType, Device>::size;
EIGEN_STATIC_ASSERT((packetSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
eigen_assert(index + packetSize - 1 < dimensions().TotalSize());
@@ -275,7 +279,7 @@ struct TensorEvaluator<const TensorConcatenationOp<Axis, LeftArgType, RightArgTy
TensorOpCost(0, 0, compute_cost);
}
- EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
+ EIGEN_DEVICE_FUNC typename Eigen::internal::traits<XprType>::PointerType data() const { return NULL; }
/// required by sycl in order to extract the accessor
const TensorEvaluator<LeftArgType, Device>& left_impl() const { return m_leftImpl; }
/// required by sycl in order to extract the accessor
@@ -304,6 +308,8 @@ template<typename Axis, typename LeftArgType, typename RightArgType, typename De
enum {
IsAligned = false,
PacketAccess = TensorEvaluator<LeftArgType, Device>::PacketAccess & TensorEvaluator<RightArgType, Device>::PacketAccess,
+ BlockAccess = false,
+ PreferBlockAccess = false,
Layout = TensorEvaluator<LeftArgType, Device>::Layout,
RawAccess = false
};
@@ -350,7 +356,7 @@ template<typename Axis, typename LeftArgType, typename RightArgType, typename De
template <int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void writePacket(Index index, const PacketReturnType& x)
{
- const int packetSize = internal::unpacket_traits<PacketReturnType>::size;
+ const int packetSize = PacketType<CoeffReturnType, Device>::size;
EIGEN_STATIC_ASSERT((packetSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
eigen_assert(index + packetSize - 1 < this->dimensions().TotalSize());
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h
index bf4a476d9..3b22e43e7 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h
@@ -85,8 +85,8 @@ template<typename LhsScalar, typename RhsScalar, typename Scalar>
#endif
-template<typename Dimensions, typename LhsXprType, typename RhsXprType>
-struct traits<TensorContractionOp<Dimensions, LhsXprType, RhsXprType> >
+template<typename Dimensions, typename LhsXprType, typename RhsXprType, typename OutputKernelType>
+struct traits<TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType> >
{
// Type promotion to handle the case where the types of the lhs and the rhs are different.
typedef typename gebp_traits<typename remove_const<typename LhsXprType::Scalar>::type,
@@ -104,29 +104,32 @@ struct traits<TensorContractionOp<Dimensions, LhsXprType, RhsXprType> >
// From NumDims below.
static const int NumDimensions = traits<RhsXprType>::NumDimensions + traits<RhsXprType>::NumDimensions - 2 * array_size<Dimensions>::value;
static const int Layout = traits<LhsXprType>::Layout;
+ typedef typename conditional<Pointer_type_promotion<typename LhsXprType::Scalar, Scalar>::val,
+ typename traits<LhsXprType>::PointerType, typename traits<RhsXprType>::PointerType>::type PointerType;
enum {
Flags = 0
};
};
-template<typename Dimensions, typename LhsXprType, typename RhsXprType>
-struct eval<TensorContractionOp<Dimensions, LhsXprType, RhsXprType>, Eigen::Dense>
+template<typename Dimensions, typename LhsXprType, typename RhsXprType, typename OutputKernelType>
+struct eval<TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType>, Eigen::Dense>
{
- typedef const TensorContractionOp<Dimensions, LhsXprType, RhsXprType>& type;
+ typedef const TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType>& type;
};
-template<typename Dimensions, typename LhsXprType, typename RhsXprType>
-struct nested<TensorContractionOp<Dimensions, LhsXprType, RhsXprType>, 1, typename eval<TensorContractionOp<Dimensions, LhsXprType, RhsXprType> >::type>
+template<typename Dimensions, typename LhsXprType, typename RhsXprType, typename OutputKernelType>
+struct nested<TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType>, 1, typename eval<TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType> >::type>
{
- typedef TensorContractionOp<Dimensions, LhsXprType, RhsXprType> type;
+ typedef TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType> type;
};
-template<typename Indices_, typename LeftArgType_, typename RightArgType_, typename Device_>
-struct traits<TensorEvaluator<const TensorContractionOp<Indices_, LeftArgType_, RightArgType_>, Device_> > {
+template<typename Indices_, typename LeftArgType_, typename RightArgType_, typename OutputKernelType_, typename Device_>
+struct traits<TensorEvaluator<const TensorContractionOp<Indices_, LeftArgType_, RightArgType_, OutputKernelType_>, Device_> > {
typedef Indices_ Indices;
typedef LeftArgType_ LeftArgType;
typedef RightArgType_ RightArgType;
+ typedef OutputKernelType_ OutputKernelType;
typedef Device_ Device;
// From NumDims below.
@@ -135,8 +138,46 @@ struct traits<TensorEvaluator<const TensorContractionOp<Indices_, LeftArgType_,
} // end namespace internal
-template<typename Indices, typename LhsXprType, typename RhsXprType>
-class TensorContractionOp : public TensorBase<TensorContractionOp<Indices, LhsXprType, RhsXprType>, ReadOnlyAccessors>
+// Tensor contraction params that should enable to get from output matrix
+// 2-dimensional coordinates to the output tensor dimensions.
+struct TensorContractionParams {
+ // TensorContraction evaluator assumes that both tensors are in ColMajor
+ // layout, if tensors are in RowMajor evaluator swap lhs with rhs.
+ bool swapped_arguments;
+};
+
+// Output kernel allows to fuse operations into the tensor contraction.
+//
+// Examples:
+// 1. Elementwise Relu transformation following Conv2D.
+// 2. AddBias to the Conv2D output channels dimension.
+//
+// The NoOpOutputKernel implements an output kernel that does absolutely nothing.
+struct NoOpOutputKernel {
+ /**
+ * Tensor contraction evaluator calls this kernel after finishing each block
+ * of output matrix. Output blocks belong to the 2-dimensional output tensor.
+ *
+ * TensorContractionParams contains contraction dimensions information
+ * required to map output 2-d space into the expected output tensor space
+ * (potentially higher dimensional).
+ *
+ * \param[in] output_mapper Access to output tensor memory
+ * \param[in] params Tensor contraction parameters
+ * \param[in] i Index of a first row available through output_mapper
+ * \param[in] j Index of a first column available through output_mapper
+ * \param[in] num_rows Number of available rows
+ * \param[in] num_cols Number of available columns
+ */
+ template <typename Index, typename Scalar>
+ EIGEN_ALWAYS_INLINE void operator()(
+ const internal::blas_data_mapper<Scalar, Index, ColMajor>& /*output_mapper*/,
+ const TensorContractionParams& /*params*/, Index /*i*/,
+ Index /*j*/, Index /*num_rows*/, Index /*num_cols*/) const {}
+};
+
+template<typename Indices, typename LhsXprType, typename RhsXprType, typename OutputKernelType = const NoOpOutputKernel>
+class TensorContractionOp : public TensorBase<TensorContractionOp<Indices, LhsXprType, RhsXprType, OutputKernelType>, ReadOnlyAccessors>
{
public:
typedef typename Eigen::internal::traits<TensorContractionOp>::Scalar Scalar;
@@ -147,8 +188,10 @@ class TensorContractionOp : public TensorBase<TensorContractionOp<Indices, LhsXp
typedef typename Eigen::internal::traits<TensorContractionOp>::Index Index;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorContractionOp(
- const LhsXprType& lhs, const RhsXprType& rhs, const Indices& dims)
- : m_lhs_xpr(lhs), m_rhs_xpr(rhs), m_indices(dims) {}
+ const LhsXprType& lhs, const RhsXprType& rhs, const Indices& dims,
+ const OutputKernelType& output_kernel = OutputKernelType())
+ : m_lhs_xpr(lhs), m_rhs_xpr(rhs), m_indices(dims),
+ m_output_kernel(output_kernel) {}
EIGEN_DEVICE_FUNC
const Indices& indices() const { return m_indices; }
@@ -162,10 +205,14 @@ class TensorContractionOp : public TensorBase<TensorContractionOp<Indices, LhsXp
const typename internal::remove_all<typename RhsXprType::Nested>::type&
rhsExpression() const { return m_rhs_xpr; }
+ EIGEN_DEVICE_FUNC
+ const OutputKernelType& outputKernel() const { return m_output_kernel; }
+
protected:
typename LhsXprType::Nested m_lhs_xpr;
typename RhsXprType::Nested m_rhs_xpr;
const Indices m_indices;
+ const OutputKernelType m_output_kernel;
};
@@ -175,9 +222,10 @@ struct TensorContractionEvaluatorBase
typedef typename internal::traits<Derived>::Indices Indices;
typedef typename internal::traits<Derived>::LeftArgType LeftArgType;
typedef typename internal::traits<Derived>::RightArgType RightArgType;
+ typedef typename internal::traits<Derived>::OutputKernelType OutputKernelType;
typedef typename internal::traits<Derived>::Device Device;
- typedef TensorContractionOp<Indices, LeftArgType, RightArgType> XprType;
+ typedef TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType> XprType;
typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar;
typedef typename XprType::Index Index;
typedef typename XprType::CoeffReturnType CoeffReturnType;
@@ -185,7 +233,9 @@ struct TensorContractionEvaluatorBase
enum {
IsAligned = true,
- PacketAccess = (internal::unpacket_traits<PacketReturnType>::size > 1),
+ PacketAccess = (PacketType<CoeffReturnType, Device>::size > 1),
+ BlockAccess = false,
+ PreferBlockAccess = false,
Layout = TensorEvaluator<LeftArgType, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = true
@@ -220,6 +270,7 @@ struct TensorContractionEvaluatorBase
m_rightImpl(choose(Cond<static_cast<int>(Layout) == static_cast<int>(ColMajor)>(),
op.rhsExpression(), op.lhsExpression()), device),
m_device(device),
+ m_output_kernel(op.outputKernel()),
m_result(NULL) {
EIGEN_STATIC_ASSERT((static_cast<int>(TensorEvaluator<LeftArgType, Device>::Layout) ==
static_cast<int>(TensorEvaluator<RightArgType, Device>::Layout)),
@@ -297,7 +348,7 @@ struct TensorContractionEvaluatorBase
// dimensions and right non-contracting dimensions.
m_lhs_inner_dim_contiguous = true;
int dim_idx = 0;
- unsigned int nocontract_idx = 0;
+ Index nocontract_idx = 0;
for (int i = 0; i < LDims; i++) {
// find if we are contracting on index i of left tensor
@@ -389,6 +440,12 @@ struct TensorContractionEvaluatorBase
numext::swap(m_dimensions[i], m_dimensions[j]);
}
}
+
+ // A set of parameters that will allow output kernel to get from output
+ // tensor dimensions (i, j) into the original tensor dimensions.
+ // TODO(ezhulenev): Add parameters required to infer output tensor index for
+ // more complex contractions than 2x2 on internal dimension.
+ m_tensor_contraction_params.swapped_arguments = static_cast<int>(Layout) == RowMajor;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
@@ -406,47 +463,66 @@ struct TensorContractionEvaluatorBase
}
}
- EIGEN_DEVICE_FUNC void evalTo(Scalar* buffer) const {
- if (this->m_lhs_inner_dim_contiguous) {
- if (this->m_rhs_inner_dim_contiguous) {
- if (this->m_rhs_inner_dim_reordered) {
- static_cast<const Derived*>(this)->template evalProduct<true, true, true, Unaligned>(buffer);
- }
- else {
- static_cast<const Derived*>(this)->template evalProduct<true, true, false, Unaligned>(buffer);
- }
- }
- else {
- if (this->m_rhs_inner_dim_reordered) {
- static_cast<const Derived*>(this)->template evalProduct<true, false, true, Unaligned>(buffer);
- }
- else {
- static_cast<const Derived*>(this)->template evalProduct<true, false, false, Unaligned>(buffer);
- }
- }
+#define TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \
+ if (this->m_lhs_inner_dim_contiguous) { \
+ if (this->m_rhs_inner_dim_contiguous) { \
+ if (this->m_rhs_inner_dim_reordered) { \
+ METHOD<true, true, true, ALIGNMENT>ARGS; \
+ } \
+ else { \
+ METHOD<true, true, false, ALIGNMENT>ARGS; \
+ } \
+ } \
+ else { \
+ if (this->m_rhs_inner_dim_reordered) { \
+ METHOD<true, false, true, ALIGNMENT>ARGS; \
+ } \
+ else { \
+ METHOD<true, false, false, ALIGNMENT>ARGS; \
+ } \
+ } \
+ } \
+ else { \
+ if (this->m_rhs_inner_dim_contiguous) { \
+ if (this->m_rhs_inner_dim_reordered) { \
+ METHOD<false, true, true, ALIGNMENT>ARGS; \
+ } \
+ else { \
+ METHOD<false, true, false, ALIGNMENT>ARGS; \
+ } \
+ } \
+ else { \
+ if (this->m_rhs_inner_dim_reordered) { \
+ METHOD<false, false, true, ALIGNMENT>ARGS; \
+ } \
+ else { \
+ METHOD<false, false, false, ALIGNMENT>ARGS; \
+ } \
+ } \
}
- else {
- if (this->m_rhs_inner_dim_contiguous) {
- if (this->m_rhs_inner_dim_reordered) {
- static_cast<const Derived*>(this)->template evalProduct<false, true, true, Unaligned>(buffer);
- }
- else {
- static_cast<const Derived*>(this)->template evalProduct<false, true, false, Unaligned>(buffer);
- }
- }
- else {
- if (this->m_rhs_inner_dim_reordered) {
- static_cast<const Derived*>(this)->template evalProduct<false, false, true, Unaligned>(buffer);
- }
- else {
- static_cast<const Derived*>(this)->template evalProduct<false, false, false, Unaligned>(buffer);
- }
- }
+
+ EIGEN_DEVICE_FUNC void evalTo(Scalar* buffer) const {
+ static_cast<const Derived*>(this)->template evalProduct<Unaligned>(buffer);
+ }
+
+ template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous,
+ bool rhs_inner_dim_reordered, int Alignment>
+ void evalProductSequential(Scalar* buffer) const {
+ if (this->m_j_size == 1) {
+ this->template evalGemv<lhs_inner_dim_contiguous,
+ rhs_inner_dim_contiguous, rhs_inner_dim_reordered,
+ Alignment>(buffer);
+ } else {
+ this->template evalGemm<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous,
+ rhs_inner_dim_reordered, Alignment>(buffer);
}
}
template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment>
- EIGEN_DEVICE_FUNC void evalGemv(Scalar* buffer) const {
+ #if !defined(EIGEN_HIPCC)
+ EIGEN_DEVICE_FUNC
+ #endif
+ void evalGemv(Scalar* buffer) const {
const Index rows = m_i_size;
const Index cols = m_k_size;
@@ -484,10 +560,18 @@ struct TensorContractionEvaluatorBase
internal::general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,false,RhsScalar,RhsMapper,false>::run(
rows, cols, lhs, rhs,
buffer, resIncr, alpha);
+
+ typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper;
+ m_output_kernel(OutputMapper(buffer, rows), m_tensor_contraction_params,
+ static_cast<Index>(0), static_cast<Index>(0), rows,
+ static_cast<Index>(1));
}
template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment>
- EIGEN_DEVICE_FUNC void evalGemm(Scalar* buffer) const {
+ #if !defined(EIGEN_HIPCC)
+ EIGEN_DEVICE_FUNC
+ #endif
+ void evalGemm(Scalar* buffer) const {
#if defined(EIGEN_VECTORIZE_AVX) && defined(EIGEN_USE_LIBXSMM)
if (m_can_use_xsmm) {
evalGemmXSMM(buffer);
@@ -536,7 +620,7 @@ struct TensorContractionEvaluatorBase
typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper;
// Declare GEBP packing and kernel structs
- internal::gemm_pack_lhs<LhsScalar, Index, typename LhsMapper::SubMapper, mr, Traits::LhsProgress, ColMajor> pack_lhs;
+ internal::gemm_pack_lhs<LhsScalar, Index, typename LhsMapper::SubMapper, mr, Traits::LhsProgress, typename Traits::LhsPacket4Packing, ColMajor> pack_lhs;
internal::gemm_pack_rhs<RhsScalar, Index, typename RhsMapper::SubMapper, nr, ColMajor> pack_rhs;
internal::gebp_kernel<LhsScalar, RhsScalar, Index, OutputMapper, mr, nr, false, false> gebp;
@@ -551,7 +635,7 @@ struct TensorContractionEvaluatorBase
OutputMapper output(buffer, m);
// Sizes of the blocks to load in cache. See the Goto paper for details.
- internal::TensorContractionBlocking<LhsMapper, RhsMapper, Index, internal::ShardByCol> blocking(k, m, n, 1);
+ internal::TensorContractionBlocking<LhsScalar, RhsScalar, Index, internal::ShardByCol> blocking(k, m, n, 1);
const Index kc = blocking.kc();
const Index mc = numext::mini(m, blocking.mc());
const Index nc = numext::mini(n, blocking.nc());
@@ -577,7 +661,15 @@ struct TensorContractionEvaluatorBase
// call gebp (matrix kernel)
// The parameters here are copied from Eigen's GEMM implementation
- gebp(output.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, Scalar(1), -1, -1, 0, 0);
+ const OutputMapper output_mapper = output.getSubMapper(i2, j2);
+ gebp(output_mapper, blockA, blockB, actual_mc, actual_kc, actual_nc,
+ Scalar(1), -1, -1, 0, 0);
+
+ // We are done with this [i2, j2] output block.
+ if (k2 + kc >= k) {
+ m_output_kernel(output_mapper, m_tensor_contraction_params, i2, j2,
+ actual_mc, actual_nc);
+ }
}
}
}
@@ -609,7 +701,7 @@ struct TensorContractionEvaluatorBase
return internal::ploadt<PacketReturnType, LoadMode>(m_result + index);
}
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar* data() const { return m_result; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Eigen::internal::traits<XprType>::PointerType data() const { return m_result; }
protected:
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void EnableXSMMIfPossible(const array<IndexPair<Index>, ContractDims>& eval_op_indices) {
@@ -840,23 +932,26 @@ protected:
Index m_j_size;
Index m_k_size;
+ TensorContractionParams m_tensor_contraction_params;
+
TensorEvaluator<EvalLeftArgType, Device> m_leftImpl;
TensorEvaluator<EvalRightArgType, Device> m_rightImpl;
const Device& m_device;
+ OutputKernelType m_output_kernel;
Scalar* m_result;
bool m_can_use_xsmm;
};
// evaluator for default device
-template<typename Indices, typename LeftArgType, typename RightArgType, typename Device>
-struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType>, Device> :
+template<typename Indices, typename LeftArgType, typename RightArgType, typename OutputKernelType, typename Device>
+struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType>, Device> :
public TensorContractionEvaluatorBase<
- TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType>, Device> > {
- typedef TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType>, Device> Self;
+ TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType>, Device> > {
+ typedef TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType>, Device> Self;
typedef TensorContractionEvaluatorBase<Self> Base;
- typedef TensorContractionOp<Indices, LeftArgType, RightArgType> XprType;
+ typedef TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType> XprType;
typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar;
typedef typename XprType::Index Index;
typedef typename XprType::CoeffReturnType CoeffReturnType;
@@ -893,14 +988,9 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device) :
Base(op, device) { }
- template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment>
- EIGEN_DEVICE_FUNC void evalProduct(Scalar* buffer) const {
- if (this->m_j_size == 1) {
- this->template evalGemv<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>(buffer);
- return;
- }
-
- this->template evalGemm<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>(buffer);
+ template <int Alignment>
+ void evalProduct(Scalar* buffer) const {
+ TENSOR_CONTRACTION_DISPATCH(this->template evalProductSequential, Alignment, (buffer));
}
};
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h
index d34f9caee..cf281192c 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h
@@ -21,14 +21,28 @@ enum {
// Default Blocking Strategy
-template <typename LhsMapper, typename RhsMapper, typename Index, int ShardingType=ShardByCol>
+template <typename LhsScalar, typename RhsScalar, typename Index, int ShardingType=ShardByCol>
class TensorContractionBlocking {
public:
- typedef typename LhsMapper::Scalar LhsScalar;
- typedef typename RhsMapper::Scalar RhsScalar;
+ /*
+ adding EIGEN_DEVICE_FUNC unconditionally to 'TensorContractionBlocking' constructor in `TensorContractionBlocking.h`
+ requires adding EIGEN_DEVICE_FUNC to `computeProductBlockingSizes` in `GeneralBlockPanelKernel.h`
+ which in turn, requires adding EIGEN_DEVICE_FUNC to `evaluateProductBlockingSizesHeuristic` in `GeneralBlockPanelKernel.h`
+ which in turn, requires adding EIGEN_DEVICE_FUNC to `manage_caching_sizes` in `GeneralBlockPanelKernel.h`
+ (else HIPCC will error out)
- EIGEN_DEVICE_FUNC TensorContractionBlocking(Index k, Index m, Index n, Index num_threads = 1) :
+ However adding EIGEN_DEVICE_FUNC to `manage_caching_sizes` in `GeneralBlockPanelKernel.h`
+ results in NVCC erroring out with the following error
+
+ ../Eigen/src/Core/products/GeneralBlockPanelKernel.h(57): error #2901:
+ dynamic initialization is not supported for function-scope static variables within a __device__/__global__ function
+ */
+
+ #if !defined(EIGEN_HIPCC)
+ EIGEN_DEVICE_FUNC
+ #endif
+ TensorContractionBlocking(Index k, Index m, Index n, Index num_threads = 1) :
kc_(k), mc_(m), nc_(n)
{
if (ShardingType == ShardByCol) {
@@ -75,7 +89,7 @@ class TensorXsmmContractionBlocking {
outer_n_ = outer_n_ != 0 ? outer_n_ : n;
}
#else
- // Defaults, possibly overriden per-platform.
+ // Defaults, possibly overridden per-platform.
copyA_ = true;
copyB_ = false;
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionCuda.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionCuda.h
index d65dbb40f..3f315fedc 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionCuda.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionCuda.h
@@ -1,1391 +1,6 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2014-2015 Benoit Steiner <benoit.steiner.goog@gmail.com>
-// Copyright (C) 2015 Navdeep Jaitly <ndjaitly@google.com>
-// Copyright (C) 2014 Eric Martin <eric@ericmart.in>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_CUDA_H
-#define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_CUDA_H
+#if defined(__clang__) || defined(__GNUC__)
+#warning "Deprecated header file, please either include the main Eigen/CXX11/Tensor header or the respective TensorContractionGpu.h file"
+#endif
-#if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
-
-namespace Eigen {
-
-template<typename Scalar, typename Index, typename LhsMapper,
- typename RhsMapper, typename OutputMapper, bool needs_edge_check>
-__device__ EIGEN_STRONG_INLINE void
-EigenContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs,
- const OutputMapper output, Scalar* lhs_shmem, Scalar* rhs_shmem,
- const Index m_size, const Index n_size, const Index k_size) {
-
- const Index m_block_idx = blockIdx.x;
- const Index n_block_idx = blockIdx.y;
-
- const Index base_m = 64 * m_block_idx;
- const Index base_n = 64 * n_block_idx;
-
- // declare and initialize 64 registers for output 8x8 block
-
- // prefetch registers
- Scalar lhs_pf0;
- Scalar lhs_pf1;
- Scalar lhs_pf2;
- Scalar lhs_pf3;
- Scalar lhs_pf4;
- Scalar lhs_pf5;
- Scalar lhs_pf6;
- Scalar lhs_pf7;
-
- Scalar rhs_pf0;
- Scalar rhs_pf1;
- Scalar rhs_pf2;
- Scalar rhs_pf3;
- Scalar rhs_pf4;
- Scalar rhs_pf5;
- Scalar rhs_pf6;
- Scalar rhs_pf7;
-
- // shared memory is formatted
- // (contract idx in block, nocontract idx in block, block idx)
- // where block idx is column major. This transposition limits the number of
- // bank conflicts when reading the LHS. The core idea is that since the contracting
- // index is shared by both sides, then the contracting index should be in threadIdx.x.
-
- // On the LHS, we pad each row inside of each block with an extra element. This makes
- // each block 8 rows of 9 elements, which is 72 elements. This gives no bank conflicts
- // on writes and very few 2-way conflicts on reads. There is an 8x8 grid of these blocks.
-
- // On the RHS we just add 8 padding elements to the end of each block. This gives no bank
- // conflicts on writes and also none on reads.
-
- // storage indices
- const Index lhs_store_idx_base = threadIdx.y * 72 + threadIdx.x * 9 + threadIdx.z;
- const Index rhs_store_idx_base = threadIdx.y * 72 + threadIdx.z * 8 + threadIdx.x;
-
- const Index lhs_store_idx_0 = lhs_store_idx_base + 576 * 0;
- const Index lhs_store_idx_1 = lhs_store_idx_base + 576 * 1;
- const Index lhs_store_idx_2 = lhs_store_idx_base + 576 * 2;
- const Index lhs_store_idx_3 = lhs_store_idx_base + 576 * 3;
- const Index lhs_store_idx_4 = lhs_store_idx_base + 576 * 4;
- const Index lhs_store_idx_5 = lhs_store_idx_base + 576 * 5;
- const Index lhs_store_idx_6 = lhs_store_idx_base + 576 * 6;
- const Index lhs_store_idx_7 = lhs_store_idx_base + 576 * 7;
-
- const Index rhs_store_idx_0 = rhs_store_idx_base + 576 * 0;
- const Index rhs_store_idx_1 = rhs_store_idx_base + 576 * 1;
- const Index rhs_store_idx_2 = rhs_store_idx_base + 576 * 2;
- const Index rhs_store_idx_3 = rhs_store_idx_base + 576 * 3;
- const Index rhs_store_idx_4 = rhs_store_idx_base + 576 * 4;
- const Index rhs_store_idx_5 = rhs_store_idx_base + 576 * 5;
- const Index rhs_store_idx_6 = rhs_store_idx_base + 576 * 6;
- const Index rhs_store_idx_7 = rhs_store_idx_base + 576 * 7;
-
- // in the loading code, the following variables are important:
- // threadIdx.x: the vertical position in an 8x8 block
- // threadIdx.y: the vertical index of the 8x8 block in the grid
- // threadIdx.z: the horizontal position in an 8x8 block
- // k: the horizontal index of the 8x8 block in the grid
- //
- // The k parameter is implicit (it was the loop counter for a loop that went
- // from 0 to <8, but now that loop is unrolled in the below code.
-
- const Index load_idx_vert = threadIdx.x + 8 * threadIdx.y;
- const Index lhs_vert = base_m + load_idx_vert;
-
-#define prefetchIntoRegisters(base_k) \
- { \
- lhs_pf0 = conv(0); \
- lhs_pf1 = conv(0); \
- lhs_pf2 = conv(0); \
- lhs_pf3 = conv(0); \
- lhs_pf4 = conv(0); \
- lhs_pf5 = conv(0); \
- lhs_pf6 = conv(0); \
- lhs_pf7 = conv(0); \
- \
- rhs_pf0 = conv(0); \
- rhs_pf1 = conv(0); \
- rhs_pf2 = conv(0); \
- rhs_pf3 = conv(0); \
- rhs_pf4 = conv(0); \
- rhs_pf5 = conv(0); \
- rhs_pf6 = conv(0); \
- rhs_pf7 = conv(0); \
- \
- if (!needs_edge_check || lhs_vert < m_size) { \
- const Index lhs_horiz_0 = base_k + threadIdx.z + 0 * 8; \
- const Index lhs_horiz_1 = base_k + threadIdx.z + 1 * 8; \
- const Index lhs_horiz_2 = base_k + threadIdx.z + 2 * 8; \
- const Index lhs_horiz_3 = base_k + threadIdx.z + 3 * 8; \
- const Index lhs_horiz_4 = base_k + threadIdx.z + 4 * 8; \
- const Index lhs_horiz_5 = base_k + threadIdx.z + 5 * 8; \
- const Index lhs_horiz_6 = base_k + threadIdx.z + 6 * 8; \
- const Index lhs_horiz_7 = base_k + threadIdx.z + 7 * 8; \
- \
- if (!needs_edge_check || lhs_horiz_7 < k_size) { \
- lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
- lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \
- lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \
- lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \
- lhs_pf4 = lhs(lhs_vert, lhs_horiz_4); \
- lhs_pf5 = lhs(lhs_vert, lhs_horiz_5); \
- lhs_pf6 = lhs(lhs_vert, lhs_horiz_6); \
- lhs_pf7 = lhs(lhs_vert, lhs_horiz_7); \
- } else if (lhs_horiz_6 < k_size) { \
- lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
- lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \
- lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \
- lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \
- lhs_pf4 = lhs(lhs_vert, lhs_horiz_4); \
- lhs_pf5 = lhs(lhs_vert, lhs_horiz_5); \
- lhs_pf6 = lhs(lhs_vert, lhs_horiz_6); \
- } else if (lhs_horiz_5 < k_size) { \
- lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
- lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \
- lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \
- lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \
- lhs_pf4 = lhs(lhs_vert, lhs_horiz_4); \
- lhs_pf5 = lhs(lhs_vert, lhs_horiz_5); \
- } else if (lhs_horiz_4 < k_size) { \
- lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
- lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \
- lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \
- lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \
- lhs_pf4 = lhs(lhs_vert, lhs_horiz_4); \
- } else if (lhs_horiz_3 < k_size) { \
- lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
- lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \
- lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \
- lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \
- } else if (lhs_horiz_2 < k_size) { \
- lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
- lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \
- lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \
- } else if (lhs_horiz_1 < k_size) { \
- lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
- lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \
- } else if (lhs_horiz_0 < k_size) { \
- lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
- } \
- } \
- \
- const Index rhs_vert = base_k + load_idx_vert; \
- if (!needs_edge_check || rhs_vert < k_size) { \
- const Index rhs_horiz_0 = base_n + threadIdx.z + 0 * 8; \
- const Index rhs_horiz_1 = base_n + threadIdx.z + 1 * 8; \
- const Index rhs_horiz_2 = base_n + threadIdx.z + 2 * 8; \
- const Index rhs_horiz_3 = base_n + threadIdx.z + 3 * 8; \
- const Index rhs_horiz_4 = base_n + threadIdx.z + 4 * 8; \
- const Index rhs_horiz_5 = base_n + threadIdx.z + 5 * 8; \
- const Index rhs_horiz_6 = base_n + threadIdx.z + 6 * 8; \
- const Index rhs_horiz_7 = base_n + threadIdx.z + 7 * 8; \
- \
- if (rhs_horiz_7 < n_size) { \
- rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
- rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \
- rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \
- rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \
- rhs_pf4 = rhs(rhs_vert, rhs_horiz_4); \
- rhs_pf5 = rhs(rhs_vert, rhs_horiz_5); \
- rhs_pf6 = rhs(rhs_vert, rhs_horiz_6); \
- rhs_pf7 = rhs(rhs_vert, rhs_horiz_7); \
- } else if (rhs_horiz_6 < n_size) { \
- rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
- rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \
- rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \
- rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \
- rhs_pf4 = rhs(rhs_vert, rhs_horiz_4); \
- rhs_pf5 = rhs(rhs_vert, rhs_horiz_5); \
- rhs_pf6 = rhs(rhs_vert, rhs_horiz_6); \
- } else if (rhs_horiz_5 < n_size) { \
- rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
- rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \
- rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \
- rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \
- rhs_pf4 = rhs(rhs_vert, rhs_horiz_4); \
- rhs_pf5 = rhs(rhs_vert, rhs_horiz_5); \
- } else if (rhs_horiz_4 < n_size) { \
- rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
- rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \
- rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \
- rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \
- rhs_pf4 = rhs(rhs_vert, rhs_horiz_4); \
- } else if (rhs_horiz_3 < n_size) { \
- rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
- rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \
- rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \
- rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \
- } else if (rhs_horiz_2 < n_size) { \
- rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
- rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \
- rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \
- } else if (rhs_horiz_1 < n_size) { \
- rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
- rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \
- } else if (rhs_horiz_0 < n_size) { \
- rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
- } \
- } \
- } \
-
-#define writeRegToShmem(_) \
- lhs_shmem[lhs_store_idx_0] = lhs_pf0; \
- rhs_shmem[rhs_store_idx_0] = rhs_pf0; \
- \
- lhs_shmem[lhs_store_idx_1] = lhs_pf1; \
- rhs_shmem[rhs_store_idx_1] = rhs_pf1; \
- \
- lhs_shmem[lhs_store_idx_2] = lhs_pf2; \
- rhs_shmem[rhs_store_idx_2] = rhs_pf2; \
- \
- lhs_shmem[lhs_store_idx_3] = lhs_pf3; \
- rhs_shmem[rhs_store_idx_3] = rhs_pf3; \
- \
- lhs_shmem[lhs_store_idx_4] = lhs_pf4; \
- rhs_shmem[rhs_store_idx_4] = rhs_pf4; \
- \
- lhs_shmem[lhs_store_idx_5] = lhs_pf5; \
- rhs_shmem[rhs_store_idx_5] = rhs_pf5; \
- \
- lhs_shmem[lhs_store_idx_6] = lhs_pf6; \
- rhs_shmem[rhs_store_idx_6] = rhs_pf6; \
- \
- lhs_shmem[lhs_store_idx_7] = lhs_pf7; \
- rhs_shmem[rhs_store_idx_7] = rhs_pf7; \
-
- // declare and initialize result array
-#define res(i, j) _res_##i##j
-#define initResultRow(i) \
- Scalar res(i, 0) = conv(0); \
- Scalar res(i, 1) = conv(0); \
- Scalar res(i, 2) = conv(0); \
- Scalar res(i, 3) = conv(0); \
- Scalar res(i, 4) = conv(0); \
- Scalar res(i, 5) = conv(0); \
- Scalar res(i, 6) = conv(0); \
- Scalar res(i, 7) = conv(0); \
-
- internal::scalar_cast_op<int, Scalar> conv;
- initResultRow(0);
- initResultRow(1);
- initResultRow(2);
- initResultRow(3);
- initResultRow(4);
- initResultRow(5);
- initResultRow(6);
- initResultRow(7);
-#undef initResultRow
-
- for (Index base_k = 0; base_k < k_size; base_k += 64) {
- // wait for previous iteration to finish with shmem. Despite common sense,
- // the code is a bit faster with this here then at bottom of loop
- __syncthreads();
-
- prefetchIntoRegisters(base_k);
- writeRegToShmem();
-
- #undef prefetchIntoRegisters
- #undef writeRegToShmem
-
- // wait for shared mem packing to be done before starting computation
- __syncthreads();
-
- // compute 8x8 matrix product by outer product. This involves packing one column
- // of LHS and one row of RHS into registers (takes 16 registers).
-
-#define lcol(i) _lcol##i
- Scalar lcol(0);
- Scalar lcol(1);
- Scalar lcol(2);
- Scalar lcol(3);
- Scalar lcol(4);
- Scalar lcol(5);
- Scalar lcol(6);
- Scalar lcol(7);
-
-#define rrow(j) _rrow##j
- Scalar rrow(0);
- Scalar rrow(1);
- Scalar rrow(2);
- Scalar rrow(3);
- Scalar rrow(4);
- Scalar rrow(5);
- Scalar rrow(6);
- Scalar rrow(7);
-
- // Now x corresponds to k, y to m, and z to n
- const Scalar* lhs_block = &lhs_shmem[threadIdx.x + 9 * threadIdx.y];
- const Scalar* rhs_block = &rhs_shmem[threadIdx.x + 8 * threadIdx.z];
-
-#define lhs_element(i, j) lhs_block[72 * ((i) + 8 * (j))]
-#define rhs_element(i, j) rhs_block[72 * ((i) + 8 * (j))]
-
-#define loadData(i, j) \
- lcol(0) = lhs_element(0, j); \
- rrow(0) = rhs_element(i, 0); \
- lcol(1) = lhs_element(1, j); \
- rrow(1) = rhs_element(i, 1); \
- lcol(2) = lhs_element(2, j); \
- rrow(2) = rhs_element(i, 2); \
- lcol(3) = lhs_element(3, j); \
- rrow(3) = rhs_element(i, 3); \
- lcol(4) = lhs_element(4, j); \
- rrow(4) = rhs_element(i, 4); \
- lcol(5) = lhs_element(5, j); \
- rrow(5) = rhs_element(i, 5); \
- lcol(6) = lhs_element(6, j); \
- rrow(6) = rhs_element(i, 6); \
- lcol(7) = lhs_element(7, j); \
- rrow(7) = rhs_element(i, 7); \
-
-#define computeCol(j) \
- res(0, j) += lcol(0) * rrow(j); \
- res(1, j) += lcol(1) * rrow(j); \
- res(2, j) += lcol(2) * rrow(j); \
- res(3, j) += lcol(3) * rrow(j); \
- res(4, j) += lcol(4) * rrow(j); \
- res(5, j) += lcol(5) * rrow(j); \
- res(6, j) += lcol(6) * rrow(j); \
- res(7, j) += lcol(7) * rrow(j); \
-
-#define computePass(i) \
- loadData(i, i); \
- \
- computeCol(0); \
- computeCol(1); \
- computeCol(2); \
- computeCol(3); \
- computeCol(4); \
- computeCol(5); \
- computeCol(6); \
- computeCol(7); \
-
- computePass(0);
- computePass(1);
- computePass(2);
- computePass(3);
- computePass(4);
- computePass(5);
- computePass(6);
- computePass(7);
-
-#undef lcol
-#undef rrow
-#undef lhs_element
-#undef rhs_element
-#undef loadData
-#undef computeCol
-#undef computePass
- } // end loop over k
-
- // we've now iterated over all of the large (ie width 64) k blocks and
- // accumulated results in registers. At this point thread (x, y, z) contains
- // the sum across all big k blocks of the product of little k block of index (x, y)
- // with block of index (y, z). To compute the final output, we need to reduce
- // the 8 threads over y by summation.
-#define shuffleInc(i, j, mask) res(i, j) += __shfl_xor(res(i, j), mask)
-
-#define reduceRow(i, mask) \
- shuffleInc(i, 0, mask); \
- shuffleInc(i, 1, mask); \
- shuffleInc(i, 2, mask); \
- shuffleInc(i, 3, mask); \
- shuffleInc(i, 4, mask); \
- shuffleInc(i, 5, mask); \
- shuffleInc(i, 6, mask); \
- shuffleInc(i, 7, mask); \
-
-#define reduceMatrix(mask) \
- reduceRow(0, mask); \
- reduceRow(1, mask); \
- reduceRow(2, mask); \
- reduceRow(3, mask); \
- reduceRow(4, mask); \
- reduceRow(5, mask); \
- reduceRow(6, mask); \
- reduceRow(7, mask); \
-
- // actually perform the reduction, now each thread of index (_, y, z)
- // contains the correct values in its registers that belong in the output
- // block
- reduceMatrix(1);
- reduceMatrix(2);
- reduceMatrix(4);
-
-#undef shuffleInc
-#undef reduceRow
-#undef reduceMatrix
-
- // now we need to copy the 64 values into main memory. We can't split work
- // among threads because all variables are in registers. There's 2 ways
- // to do this:
- // (1) have 1 thread do 64 writes from registers into global memory
- // (2) have 1 thread do 64 writes into shared memory, and then 8 threads
- // each do 8 writes into global memory. We can just overwrite the shared
- // memory from the problem we just solved.
- // (2) is slightly faster than (1) due to less branching and more ILP
-
- // TODO: won't yield much gain, but could just use currently unused shared mem
- // and then we won't have to sync
- // wait for shared mem to be out of use
- __syncthreads();
-
-#define writeResultShmem(i, j) \
- lhs_shmem[i + 8 * threadIdx.y + 64 * threadIdx.z + 512 * j] = res(i, j); \
-
-#define writeRow(i) \
- writeResultShmem(i, 0); \
- writeResultShmem(i, 1); \
- writeResultShmem(i, 2); \
- writeResultShmem(i, 3); \
- writeResultShmem(i, 4); \
- writeResultShmem(i, 5); \
- writeResultShmem(i, 6); \
- writeResultShmem(i, 7); \
-
- if (threadIdx.x == 0) {
- writeRow(0);
- writeRow(1);
- writeRow(2);
- writeRow(3);
- writeRow(4);
- writeRow(5);
- writeRow(6);
- writeRow(7);
- }
-#undef writeResultShmem
-#undef writeRow
-
- const int max_i_write = numext::mini((int)((m_size - base_m - threadIdx.y + 7) / 8), 8);
- const int max_j_write = numext::mini((int)((n_size - base_n - threadIdx.z + 7) / 8), 8);
-
- if (threadIdx.x < max_i_write) {
- if (max_j_write == 8) {
- // TODO: can i trade bank conflicts for coalesced writes?
- Scalar val0 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 0];
- Scalar val1 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 1];
- Scalar val2 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 2];
- Scalar val3 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 3];
- Scalar val4 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 4];
- Scalar val5 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 5];
- Scalar val6 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 6];
- Scalar val7 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 7];
-
- output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 0) = val0;
- output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 1) = val1;
- output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 2) = val2;
- output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 3) = val3;
- output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 4) = val4;
- output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 5) = val5;
- output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 6) = val6;
- output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 7) = val7;
- } else {
-#pragma unroll 7
- for (int j = 0; j < max_j_write; j++) {
- Scalar val = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * j];
- output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * j) = val;
- }
- }
- }
-#undef res
-}
-
-
-template<typename Scalar, typename Index, typename LhsMapper,
- typename RhsMapper, typename OutputMapper>
-__global__ void
-__launch_bounds__(512)
-EigenContractionKernel(const LhsMapper lhs, const RhsMapper rhs,
- const OutputMapper output,
- const Index m_size, const Index n_size, const Index k_size) {
- __shared__ Scalar lhs_shmem[72 * 64];
- __shared__ Scalar rhs_shmem[72 * 64];
-
- const Index m_block_idx = blockIdx.x;
- const Index n_block_idx = blockIdx.y;
-
- const Index base_m = 64 * m_block_idx;
- const Index base_n = 64 * n_block_idx;
-
- if (base_m + 63 < m_size && base_n + 63 < n_size) {
- EigenContractionKernelInternal<Scalar, Index, LhsMapper, RhsMapper, OutputMapper, false>(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size);
- } else {
- EigenContractionKernelInternal<Scalar, Index, LhsMapper, RhsMapper, OutputMapper, true>(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size);
- }
-}
-
-
-template<typename Index, typename LhsMapper,
- typename RhsMapper, typename OutputMapper, bool CHECK_LHS_BOUNDARY,
- bool CHECK_RHS_BOUNDARY>
-__device__ EIGEN_STRONG_INLINE void
-EigenFloatContractionKernelInternal16x16(const LhsMapper lhs, const RhsMapper rhs,
- const OutputMapper output, float2 lhs_shmem2[][16],
- float2 rhs_shmem2[][8], const Index m_size,
- const Index n_size, const Index k_size,
- const Index base_m, const Index base_n) {
- typedef float Scalar;
-
- // prefetch registers
- float4 lhs_pf0, rhs_pf0;
-
- float4 results[4];
- for (int i=0; i < 4; i++) {
- results[i].x = results[i].y = results[i].z = results[i].w = 0;
- }
-
-
-#define prefetch_lhs(reg, row, col) \
- if (!CHECK_LHS_BOUNDARY) { \
- if (col < k_size) { \
- reg =lhs.loadPacket<Unaligned>(row, col); \
- } \
- } else { \
- if (col < k_size) { \
- if (row + 3 < m_size) { \
- reg =lhs.loadPacket<Unaligned>(row, col); \
- } else if (row + 2 < m_size) { \
- reg.x =lhs(row + 0, col); \
- reg.y =lhs(row + 1, col); \
- reg.z =lhs(row + 2, col); \
- } else if (row + 1 < m_size) { \
- reg.x =lhs(row + 0, col); \
- reg.y =lhs(row + 1, col); \
- } else if (row < m_size) { \
- reg.x =lhs(row + 0, col); \
- } \
- } \
- } \
-
-
- Index lhs_vert = base_m+threadIdx.x*4;
-
- for (Index k = 0; k < k_size; k += 16) {
- lhs_pf0 = internal::pset1<float4>(0);
- rhs_pf0 = internal::pset1<float4>(0);
-
- Index lhs_horiz = threadIdx.y+k;
- prefetch_lhs(lhs_pf0, lhs_vert, lhs_horiz)
-
- Index rhs_vert = k+(threadIdx.x%4)*4;
- Index rhs_horiz0 = (threadIdx.x>>2)+threadIdx.y*4+base_n;
-
- if (!CHECK_RHS_BOUNDARY) {
- if ((rhs_vert + 3) < k_size) {
- // just CHECK_RHS_BOUNDARY
- rhs_pf0 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
- } else if (rhs_vert + 2 < k_size) {
- // just CHECK_RHS_BOUNDARY
- rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
- rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
- rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0);
- } else if (rhs_vert + 1 < k_size) {
- rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
- rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
- } else if (rhs_vert < k_size) {
- rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
- }
- } else {
- if (rhs_horiz0 < n_size) {
- if ((rhs_vert + 3) < k_size) {
- rhs_pf0 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
- } else if ((rhs_vert + 2) < k_size) {
- rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
- rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
- rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0);
- } else if ((rhs_vert + 1) < k_size) {
- rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
- rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
- } else if (rhs_vert < k_size) {
- rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
- }
- }
- }
- float x1, x2 ;
- // the following can be a bitwise operation..... some day.
- if((threadIdx.x%8) < 4) {
- x1 = rhs_pf0.y;
- x2 = rhs_pf0.w;
- } else {
- x1 = rhs_pf0.x;
- x2 = rhs_pf0.z;
- }
- x1 = __shfl_xor(x1, 4);
- x2 = __shfl_xor(x2, 4);
- if((threadIdx.x%8) < 4) {
- rhs_pf0.y = x1;
- rhs_pf0.w = x2;
- } else {
- rhs_pf0.x = x1;
- rhs_pf0.z = x2;
- }
-
- // We have 64 features.
- // Row 0 -> times (0, 4, 8, 12, 1, 5, 9, 13) for features 0, 1.
- // Row 1 -> times (0, 4, 8, 12, 1, 5, 9, 13) for features 2, 3.
- // ...
- // Row 31 -> times (0, 4, 8, 12, 1, 5, 9, 13) for features 62, 63
- // Row 32 -> times (2, 6, 10, 14, 3, 7, 11, 15) for features 0, 1
- // ...
- rhs_shmem2[(threadIdx.x>>3)+ threadIdx.y*2][threadIdx.x%8] = make_float2(rhs_pf0.x, rhs_pf0.y);
- rhs_shmem2[(threadIdx.x>>3)+ threadIdx.y*2+32][threadIdx.x%8] = make_float2(rhs_pf0.z, rhs_pf0.w);
-
- // Row 0 (time 0) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61)
- // Row 1 (time 1) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61)
- // ...
- // Row 15 (time 15) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61)
- // Row 16 (time 0) -> features (2, 3), (6, 7), .. (30, 31), (34, 35), .. (62, 63)
- // ...
-
- lhs_shmem2[threadIdx.y][threadIdx.x] = make_float2(lhs_pf0.x, lhs_pf0.y);
- lhs_shmem2[threadIdx.y+16][threadIdx.x] = make_float2(lhs_pf0.z, lhs_pf0.w);
-
-
-#define add_vals(fl1, fl2, fr1, fr2)\
- results[0].x += fl1.x * fr1.x;\
- results[0].y += fl1.y * fr1.x;\
- results[0].z += fl2.x * fr1.x;\
- results[0].w += fl2.y * fr1.x;\
-\
- results[1].x += fl1.x * fr1.y;\
- results[1].y += fl1.y * fr1.y;\
- results[1].z += fl2.x * fr1.y;\
- results[1].w += fl2.y * fr1.y;\
-\
- results[2].x += fl1.x * fr2.x;\
- results[2].y += fl1.y * fr2.x;\
- results[2].z += fl2.x * fr2.x;\
- results[2].w += fl2.y * fr2.x;\
-\
- results[3].x += fl1.x * fr2.y;\
- results[3].y += fl1.y * fr2.y;\
- results[3].z += fl2.x * fr2.y;\
- results[3].w += fl2.y * fr2.y;\
-
- __syncthreads();
-
- // Do the multiplies.
- #pragma unroll
- for (int koff = 0; koff < 16; koff ++) {
- // 32 x threads.
- float2 fl1 = lhs_shmem2[koff][threadIdx.x];
- float2 fl2 = lhs_shmem2[koff + 16][threadIdx.x];
-
- int start_feature = threadIdx.y * 4;
- float2 fr1 = rhs_shmem2[(start_feature>>1) + 32*((koff%4)/2)][koff/4 + (koff%2)*4];
- float2 fr2 = rhs_shmem2[(start_feature>>1) + 1 + 32*((koff%4)/2)][koff/4 + (koff%2)*4];
-
- add_vals(fl1, fl2, fr1, fr2)
- }
- __syncthreads();
- }
-
-#undef prefetch_lhs
-#undef add_vals
-
- Index horiz_base = threadIdx.y*4+base_n;
- if (!CHECK_LHS_BOUNDARY && !CHECK_RHS_BOUNDARY) {
- for (int i = 0; i < 4; i++) {
- output(lhs_vert, horiz_base + i) = results[i].x;
- output(lhs_vert + 1, horiz_base + i) = results[i].y;
- output(lhs_vert + 2, horiz_base + i) = results[i].z;
- output(lhs_vert + 3, horiz_base + i) = results[i].w;
- }
- } else if (!CHECK_RHS_BOUNDARY) {
- // CHECK LHS
- if (lhs_vert + 3 < m_size) {
- for (int i = 0; i < 4; i++) {
- output(lhs_vert, horiz_base + i) = results[i].x;
- output(lhs_vert + 1, horiz_base + i) = results[i].y;
- output(lhs_vert + 2, horiz_base + i) = results[i].z;
- output(lhs_vert + 3, horiz_base + i) = results[i].w;
- }
- } else if (lhs_vert + 2 < m_size) {
- for (int i = 0; i < 4; i++) {
- output(lhs_vert, horiz_base + i) = results[i].x;
- output(lhs_vert + 1, horiz_base + i) = results[i].y;
- output(lhs_vert + 2, horiz_base + i) = results[i].z;
- }
- } else if (lhs_vert + 1 < m_size) {
- for (int i = 0; i < 4; i++) {
- output(lhs_vert, horiz_base + i) = results[i].x;
- output(lhs_vert + 1, horiz_base + i) = results[i].y;
- }
- } else if (lhs_vert < m_size) {
- for (int i = 0; i < 4; i++) {
- output(lhs_vert, horiz_base + i) = results[i].x;
- }
- }
- } else if (!CHECK_LHS_BOUNDARY) {
- // CHECK RHS
- /*
- int ncols_rem = fminf(n_size- horiz_base, 4);
- for (int i = 0; i < ncols_rem; i++) {
- output(lhs_vert, horiz_base + i) = results[i].x;
- output(lhs_vert + 1, horiz_base + i) = results[i].y;
- output(lhs_vert + 2, horiz_base + i) = results[i].z;
- output(lhs_vert + 3, horiz_base + i) = results[i].w;
- }*/
- for (int i = 0; i < 4; i++) {
- if (horiz_base+i < n_size) {
- output(lhs_vert, horiz_base + i) = results[i].x;
- output(lhs_vert + 1, horiz_base + i) = results[i].y;
- output(lhs_vert + 2, horiz_base + i) = results[i].z;
- output(lhs_vert + 3, horiz_base + i) = results[i].w;
- }
- }
- } else {
- // CHECK both boundaries.
- for (int i = 0; i < 4; i++) {
- if (horiz_base+i < n_size) {
- if (lhs_vert < m_size)
- output(lhs_vert, horiz_base + i) = results[i].x;
- if (lhs_vert + 1 < m_size)
- output(lhs_vert + 1, horiz_base + i) = results[i].y;
- if (lhs_vert + 2 < m_size)
- output(lhs_vert + 2, horiz_base + i) = results[i].z;
- if (lhs_vert + 3 < m_size)
- output(lhs_vert + 3, horiz_base + i) = results[i].w;
- }
- }
- }
-}
-
-
-template<typename Index, typename LhsMapper,
- typename RhsMapper, typename OutputMapper, bool CHECK_LHS_BOUNDARY,
- bool CHECK_RHS_BOUNDARY>
-__device__ EIGEN_STRONG_INLINE void
-EigenFloatContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs,
- const OutputMapper output, float2 lhs_shmem2[][32],
- float2 rhs_shmem2[][8], const Index m_size,
- const Index n_size, const Index k_size,
- const Index base_m, const Index base_n) {
- typedef float Scalar;
-
- // prefetch registers
- float4 lhs_pf0, lhs_pf1, lhs_pf2, lhs_pf3;
- float4 rhs_pf0, rhs_pf1;
-
- float4 results[8];
- for (int i=0; i < 8; i++) {
- results[i].x = results[i].y = results[i].z = results[i].w = 0;
- }
-
-
- Index lhs_vert = base_m+threadIdx.x*4+(threadIdx.y%4)*32;
- for (Index k = 0; k < k_size; k += 32) {
- lhs_pf0 = internal::pset1<float4>(0);
- lhs_pf1 = internal::pset1<float4>(0);
- lhs_pf2 = internal::pset1<float4>(0);
- lhs_pf3 = internal::pset1<float4>(0);
-
- rhs_pf0 = internal::pset1<float4>(0);
- rhs_pf1 = internal::pset1<float4>(0);
-
- if (!CHECK_LHS_BOUNDARY) {
- if ((threadIdx.y/4+k+24) < k_size) {
- lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
- lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
- lhs_pf2 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
- lhs_pf3 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+24));
- } else if ((threadIdx.y/4+k+16) < k_size) {
- lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
- lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
- lhs_pf2 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
- } else if ((threadIdx.y/4+k+8) < k_size) {
- lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
- lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
- } else if ((threadIdx.y/4+k) < k_size) {
- lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
- }
- } else {
- // just CHECK_LHS_BOUNDARY
- if (lhs_vert + 3 < m_size) {
- if ((threadIdx.y/4+k+24) < k_size) {
- lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
- lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
- lhs_pf2 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
- lhs_pf3 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+24));
- } else if ((threadIdx.y/4+k+16) < k_size) {
- lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
- lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
- lhs_pf2 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
- } else if ((threadIdx.y/4+k+8) < k_size) {
- lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
- lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
- } else if ((threadIdx.y/4+k) < k_size) {
- lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
- }
- } else if (lhs_vert + 2 < m_size) {
- if ((threadIdx.y/4+k+24) < k_size) {
- lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
- lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
- lhs_pf0.z =lhs(lhs_vert + 2, (threadIdx.y/4+k));
- lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
- lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8));
- lhs_pf1.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+8));
- lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16));
- lhs_pf2.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+16));
- lhs_pf2.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+16));
- lhs_pf3.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+24));
- lhs_pf3.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+24));
- lhs_pf3.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+24));
- } else if ((threadIdx.y/4+k+16) < k_size) {
- lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
- lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
- lhs_pf0.z =lhs(lhs_vert + 2, (threadIdx.y/4+k));
- lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
- lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8));
- lhs_pf1.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+8));
- lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16));
- lhs_pf2.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+16));
- lhs_pf2.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+16));
- } else if ((threadIdx.y/4+k+8) < k_size) {
- lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
- lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
- lhs_pf0.z =lhs(lhs_vert + 2, (threadIdx.y/4+k));
- lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
- lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8));
- lhs_pf1.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+8));
- } else if ((threadIdx.y/4+k) < k_size) {
- lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
- lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
- lhs_pf0.z =lhs(lhs_vert + 2, (threadIdx.y/4+k));
- }
- } else if (lhs_vert + 1 < m_size) {
- if ((threadIdx.y/4+k+24) < k_size) {
- lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
- lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
- lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
- lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8));
- lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16));
- lhs_pf2.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+16));
- lhs_pf3.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+24));
- lhs_pf3.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+24));
- } else if ((threadIdx.y/4+k+16) < k_size) {
- lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
- lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
- lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
- lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8));
- lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16));
- lhs_pf2.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+16));
- } else if ((threadIdx.y/4+k+8) < k_size) {
- lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
- lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
- lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
- lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8));
- } else if ((threadIdx.y/4+k) < k_size) {
- lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
- lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
- }
- } else if (lhs_vert < m_size) {
- if ((threadIdx.y/4+k+24) < k_size) {
- lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
- lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
- lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16));
- lhs_pf3.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+24));
- } else if ((threadIdx.y/4+k+16) < k_size) {
- lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
- lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
- lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16));
- } else if ((threadIdx.y/4+k+8) < k_size) {
- lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
- lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
- } else if ((threadIdx.y/4+k) < k_size) {
- lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
- }
- }
- }
- __syncthreads();
- Index rhs_vert = k+threadIdx.x*4;
- Index rhs_horiz0 = threadIdx.y*2+base_n;
- Index rhs_horiz1 = threadIdx.y*2+1+base_n;
- if (!CHECK_RHS_BOUNDARY) {
- if ((rhs_vert + 3) < k_size) {
- // just CHECK_RHS_BOUNDARY
- rhs_pf0 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
- rhs_pf1 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz1);
- } else if (rhs_vert + 2 < k_size) {
- // just CHECK_RHS_BOUNDARY
- rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
- rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
- rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0);
- rhs_pf1.x = rhs(rhs_vert, rhs_horiz1);
- rhs_pf1.y = rhs(rhs_vert + 1, rhs_horiz1);
- rhs_pf1.z = rhs(rhs_vert + 2, rhs_horiz1);
- } else if (rhs_vert + 1 < k_size) {
- rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
- rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
- rhs_pf1.x = rhs(rhs_vert, rhs_horiz1);
- rhs_pf1.y = rhs(rhs_vert + 1, rhs_horiz1);
- } else if (rhs_vert < k_size) {
- rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
- rhs_pf1.x = rhs(rhs_vert, rhs_horiz1);
- }
- } else {
- if (rhs_horiz1 < n_size) {
- if ((rhs_vert + 3) < k_size) {
- // just CHECK_RHS_BOUNDARY
- rhs_pf0 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
- rhs_pf1 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz1);
- } else if (rhs_vert + 2 < k_size) {
- // just CHECK_RHS_BOUNDARY
- rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
- rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
- rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0);
- rhs_pf1.x = rhs(rhs_vert, rhs_horiz1);
- rhs_pf1.y = rhs(rhs_vert + 1, rhs_horiz1);
- rhs_pf1.z = rhs(rhs_vert + 2, rhs_horiz1);
- } else if (k+threadIdx.x*4 + 1 < k_size) {
- rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
- rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
- rhs_pf1.x = rhs(rhs_vert, rhs_horiz1);
- rhs_pf1.y = rhs(rhs_vert + 1, rhs_horiz1);
- } else if (k+threadIdx.x*4 < k_size) {
- rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
- rhs_pf1.x = rhs(rhs_vert, rhs_horiz1);
- }
- } else if (rhs_horiz0 < n_size) {
- if ((rhs_vert + 3) < k_size) {
- // just CHECK_RHS_BOUNDARY
- rhs_pf0 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
- } else if ((rhs_vert + 2) < k_size) {
- // just CHECK_RHS_BOUNDARY
- rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
- rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
- rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0);
- } else if ((rhs_vert + 1) < k_size) {
- rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
- rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
- } else if (rhs_vert < k_size) {
- rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
- }
- }
- }
- __syncthreads();
- // Loaded. Do computation
- // Row 0 -> times (0, 4, 8, .. 28) for features 0, 1.
- // Row 1 -> times (0, 4, 8, .. 28) for features 2, 3.
- // ..
- // Row 31 -> times (0, 4, 8, .. 28) for features 62, 63
- rhs_shmem2[threadIdx.y][threadIdx.x] = make_float2(rhs_pf0.x, rhs_pf1.x);
- // Row 32 -> times (1, 5, 9, .. 29) for features 0, 1.
- // Row 33 -> times (1, 5, 9, .. 29) for features 2, 3.
- // ..
- rhs_shmem2[threadIdx.y+32][threadIdx.x] = make_float2(rhs_pf0.y, rhs_pf1.y);
- // Row 64 -> times (2, 6, 10, .. 30) for features 0, 1.
- // Row 65 -> times (2, 6, 10, .. 30) for features 2, 3.
- rhs_shmem2[threadIdx.y+64][threadIdx.x] = make_float2(rhs_pf0.z, rhs_pf1.z);
- // Row 96 -> times (3, 7, 11, .. 31) for features 0, 1.
- // Row 97 -> times (3, 7, 11, .. 31) for features 2, 3.
- rhs_shmem2[threadIdx.y+96][threadIdx.x] = make_float2(rhs_pf0.w, rhs_pf1.w);
-
- // LHS.
- // Row 0 (time 0) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61) .. (124, 125)
- // Row 1 (time 1) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61) .. (124, 125)
- // ...
- // Row 8 (time 0) -> features (2, 3), (6, 7), .. (30, 31), (34, 35), .. (62, 63) .. (126, 127)
- // Row 15 (time 7) -> features (2, 3), (6, 7), .. (30, 31), (34, 35), .. (62, 63) .. (126, 127)
-
-
-#define add_vals(a_feat1, a_feat2, f1, f2, f3, f4)\
- results[0].x += a_feat1.x * f1.x;\
- results[1].x += a_feat1.x * f1.y;\
- results[2].x += a_feat1.x * f2.x;\
- results[3].x += a_feat1.x * f2.y;\
- results[4].x += a_feat1.x * f3.x;\
- results[5].x += a_feat1.x * f3.y;\
- results[6].x += a_feat1.x * f4.x;\
- results[7].x += a_feat1.x * f4.y;\
-\
- results[0].y += a_feat1.y * f1.x;\
- results[1].y += a_feat1.y * f1.y;\
- results[2].y += a_feat1.y * f2.x;\
- results[3].y += a_feat1.y * f2.y;\
- results[4].y += a_feat1.y * f3.x;\
- results[5].y += a_feat1.y * f3.y;\
- results[6].y += a_feat1.y * f4.x;\
- results[7].y += a_feat1.y * f4.y;\
-\
- results[0].z += a_feat2.x * f1.x;\
- results[1].z += a_feat2.x * f1.y;\
- results[2].z += a_feat2.x * f2.x;\
- results[3].z += a_feat2.x * f2.y;\
- results[4].z += a_feat2.x * f3.x;\
- results[5].z += a_feat2.x * f3.y;\
- results[6].z += a_feat2.x * f4.x;\
- results[7].z += a_feat2.x * f4.y;\
-\
- results[0].w += a_feat2.y * f1.x;\
- results[1].w += a_feat2.y * f1.y;\
- results[2].w += a_feat2.y * f2.x;\
- results[3].w += a_feat2.y * f2.y;\
- results[4].w += a_feat2.y * f3.x;\
- results[5].w += a_feat2.y * f3.y;\
- results[6].w += a_feat2.y * f4.x;\
- results[7].w += a_feat2.y * f4.y;\
-
- lhs_shmem2[threadIdx.y/4][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf0.x, lhs_pf0.y);
- lhs_shmem2[threadIdx.y/4+8][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf1.x, lhs_pf1.y);
- lhs_shmem2[threadIdx.y/4+16][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf2.x, lhs_pf2.y);
- lhs_shmem2[threadIdx.y/4+24][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf3.x, lhs_pf3.y);
-
- lhs_shmem2[threadIdx.y/4 + 32][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf0.z, lhs_pf0.w);
- lhs_shmem2[threadIdx.y/4 + 40][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf1.z, lhs_pf1.w);
- lhs_shmem2[threadIdx.y/4 + 48][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf2.z, lhs_pf2.w);
- lhs_shmem2[threadIdx.y/4 + 56][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf3.z, lhs_pf3.w);
-
- __syncthreads();
-
- // Do the multiplies.
- #pragma unroll
- for (int koff = 0; koff < 32; koff ++) {
- float2 a3 = lhs_shmem2[koff][threadIdx.x + (threadIdx.y % 4) * 8];
- float2 a4 = lhs_shmem2[koff + 32][threadIdx.x + (threadIdx.y % 4) * 8];
-
- // first feature is at (threadIdx.y/4) * 8 last is at start + 8.
- int start_feature = (threadIdx.y / 4) * 8;
-
- float2 br1 = rhs_shmem2[start_feature/2 + (koff % 4) * 32][koff/4];
- float2 br2 = rhs_shmem2[start_feature/2 + 1 + (koff % 4) * 32][koff/4];
- float2 br3 = rhs_shmem2[start_feature/2 + 2 + (koff % 4) * 32][koff/4];
- float2 br4 = rhs_shmem2[start_feature/2 + 3 + (koff % 4) * 32][koff/4];
-
- add_vals(a3, a4, br1, br2, br3, br4)
- }
- __syncthreads();
- } // end loop over k
-
-
- __syncthreads();
- Index horiz_base = (threadIdx.y/4)*8+base_n;
- if (!CHECK_LHS_BOUNDARY && !CHECK_RHS_BOUNDARY) {
- for (int i = 0; i < 8; i++) {
- output(lhs_vert, horiz_base + i) = results[i].x;
- output(lhs_vert + 1, horiz_base + i) = results[i].y;
- output(lhs_vert + 2, horiz_base + i) = results[i].z;
- output(lhs_vert + 3, horiz_base + i) = results[i].w;
- }
- } else if (!CHECK_RHS_BOUNDARY) {
- if (lhs_vert + 3 < m_size) {
- for (int i = 0; i < 8; i++) {
- output(lhs_vert, horiz_base + i) = results[i].x;
- output(lhs_vert + 1, horiz_base + i) = results[i].y;
- output(lhs_vert + 2, horiz_base + i) = results[i].z;
- output(lhs_vert + 3, horiz_base + i) = results[i].w;
- }
- } else if (lhs_vert + 2 < m_size) {
- for (int i = 0; i < 8; i++) {
- output(lhs_vert, horiz_base + i) = results[i].x;
- output(lhs_vert + 1, horiz_base + i) = results[i].y;
- output(lhs_vert + 2, horiz_base + i) = results[i].z;
- }
- } else if (lhs_vert + 1 < m_size) {
- for (int i = 0; i < 8; i++) {
- output(lhs_vert, horiz_base + i) = results[i].x;
- output(lhs_vert + 1, horiz_base + i) = results[i].y;
- }
- } else if (lhs_vert < m_size) {
- for (int i = 0; i < 8; i++) {
- output(lhs_vert, horiz_base + i) = results[i].x;
- }
- }
- } else if (!CHECK_LHS_BOUNDARY) {
- // CHECK BOUNDARY_B
- for (int i = 0; i < 8; i++) {
- if (horiz_base + i < n_size) {
- output(lhs_vert, horiz_base + i) = results[i].x;
- output(lhs_vert + 1, horiz_base + i) = results[i].y;
- output(lhs_vert + 2, horiz_base + i) = results[i].z;
- output(lhs_vert + 3, horiz_base + i) = results[i].w;
- }
- }
- } else {
- // CHECK both boundaries.
- for (int i = 0; i < 8; i++) {
- if (horiz_base + i < n_size) {
- if (lhs_vert < m_size)
- output(lhs_vert, horiz_base + i) = results[i].x;
- if (lhs_vert + 1 < m_size)
- output(lhs_vert + 1, horiz_base + i) = results[i].y;
- if (lhs_vert + 2 < m_size)
- output(lhs_vert + 2, horiz_base + i) = results[i].z;
- if (lhs_vert + 3 < m_size)
- output(lhs_vert + 3, horiz_base + i) = results[i].w;
- }
- }
- }
-}
-
-
-template<typename Index, typename LhsMapper,
- typename RhsMapper, typename OutputMapper>
-__global__ void
-__launch_bounds__(256)
-EigenFloatContractionKernel(const LhsMapper lhs, const RhsMapper rhs,
- const OutputMapper output,
- const Index m_size, const Index n_size, const Index k_size) {
- __shared__ float2 lhs_shmem[64*32];
- __shared__ float2 rhs_shmem[128*8];
-
- typedef float2 LHS_MEM[64][32];
- typedef float2 RHS_MEM[128][8];
-
- typedef float2 LHS_MEM16x16[32][16];
- typedef float2 RHS_MEM16x16[64][8];
-
- const Index m_block_idx = blockIdx.x;
- const Index n_block_idx = blockIdx.y;
-
- const Index base_m = 128 * m_block_idx;
- const Index base_n = 64 * n_block_idx;
-
- bool check_rhs = (base_n + 63) >= n_size;
- bool check_lhs128 = (base_m + 127) >= m_size;
-
- if (!check_rhs) {
- if (!check_lhs128) {
- // >= 128 rows left
- EigenFloatContractionKernelInternal<Index, LhsMapper, RhsMapper, OutputMapper, false, false>(
- lhs, rhs, output, *((LHS_MEM *) lhs_shmem), *((RHS_MEM *) rhs_shmem), m_size, n_size, k_size, base_m, base_n);
- } else {
- EigenFloatContractionKernelInternal<Index, LhsMapper, RhsMapper, OutputMapper, true, false>(
- lhs, rhs, output, *((LHS_MEM *) lhs_shmem), *((RHS_MEM *) rhs_shmem), m_size, n_size, k_size, base_m, base_n);
- }
- } else {
- if (!check_lhs128) {
- // >= 128 rows left
- EigenFloatContractionKernelInternal<Index, LhsMapper, RhsMapper, OutputMapper, false, true>(
- lhs, rhs, output, *((LHS_MEM *) lhs_shmem), *((RHS_MEM *) rhs_shmem), m_size, n_size, k_size, base_m, base_n);
- } else {
- EigenFloatContractionKernelInternal<Index, LhsMapper, RhsMapper, OutputMapper, true, true>(
- lhs, rhs, output, *((LHS_MEM *) lhs_shmem), *((RHS_MEM *) rhs_shmem), m_size, n_size, k_size, base_m, base_n);
- }
- }
-}
-
-template<typename Index, typename LhsMapper,
- typename RhsMapper, typename OutputMapper>
-__global__ void
-__launch_bounds__(256)
-EigenFloatContractionKernel16x16(const LhsMapper lhs, const RhsMapper rhs,
- const OutputMapper output,
- const Index m_size, const Index n_size, const Index k_size) {
- __shared__ float2 lhs_shmem[32][16];
- __shared__ float2 rhs_shmem[64][8];
-
- const Index m_block_idx = blockIdx.x;
- const Index n_block_idx = blockIdx.y;
-
- const Index base_m = 64 * m_block_idx;
- const Index base_n = 64 * n_block_idx;
-
- if (base_m + 63 < m_size) {
- if (base_n + 63 < n_size) {
- EigenFloatContractionKernelInternal16x16<Index, LhsMapper, RhsMapper, OutputMapper, false, false>(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size, base_m, base_n);
- } else {
- EigenFloatContractionKernelInternal16x16<Index, LhsMapper, RhsMapper, OutputMapper, false, true>(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size, base_m, base_n);
- }
- } else {
- if (base_n + 63 < n_size) {
- EigenFloatContractionKernelInternal16x16<Index, LhsMapper, RhsMapper, OutputMapper, true, false>(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size, base_m, base_n);
- } else {
- EigenFloatContractionKernelInternal16x16<Index, LhsMapper, RhsMapper, OutputMapper, true, true>(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size, base_m, base_n);
- }
- }
-}
-
-
-template<typename Indices, typename LeftArgType, typename RightArgType>
-struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType>, GpuDevice> :
- public TensorContractionEvaluatorBase<TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType>, GpuDevice> > {
-
- typedef GpuDevice Device;
-
- typedef TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType>, Device> Self;
- typedef TensorContractionEvaluatorBase<Self> Base;
-
- typedef TensorContractionOp<Indices, LeftArgType, RightArgType> XprType;
- typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar;
- typedef typename XprType::Index Index;
- typedef typename XprType::CoeffReturnType CoeffReturnType;
- typedef typename PacketType<CoeffReturnType, GpuDevice>::type PacketReturnType;
-
- enum {
- Layout = TensorEvaluator<LeftArgType, Device>::Layout,
- };
-
- // Most of the code is assuming that both input tensors are ColMajor. If the
- // inputs are RowMajor, we will "cheat" by swapping the LHS and RHS:
- // If we want to compute A * B = C, where A is LHS and B is RHS, the code
- // will pretend B is LHS and A is RHS.
- typedef typename internal::conditional<
- static_cast<int>(Layout) == static_cast<int>(ColMajor), LeftArgType, RightArgType>::type EvalLeftArgType;
- typedef typename internal::conditional<
- static_cast<int>(Layout) == static_cast<int>(ColMajor), RightArgType, LeftArgType>::type EvalRightArgType;
-
- static const int LDims =
- internal::array_size<typename TensorEvaluator<EvalLeftArgType, Device>::Dimensions>::value;
- static const int RDims =
- internal::array_size<typename TensorEvaluator<EvalRightArgType, Device>::Dimensions>::value;
- static const int ContractDims = internal::array_size<Indices>::value;
-
- typedef array<Index, LDims> left_dim_mapper_t;
- typedef array<Index, RDims> right_dim_mapper_t;
-
- typedef array<Index, ContractDims> contract_t;
- typedef array<Index, LDims - ContractDims> left_nocontract_t;
- typedef array<Index, RDims - ContractDims> right_nocontract_t;
-
- static const int NumDims = LDims + RDims - 2 * ContractDims;
-
- typedef DSizes<Index, NumDims> Dimensions;
-
- // typedefs needed in evalTo
- typedef typename internal::remove_const<typename EvalLeftArgType::Scalar>::type LhsScalar;
- typedef typename internal::remove_const<typename EvalRightArgType::Scalar>::type RhsScalar;
-
- typedef TensorEvaluator<EvalLeftArgType, Device> LeftEvaluator;
- typedef TensorEvaluator<EvalRightArgType, Device> RightEvaluator;
-
- typedef typename LeftEvaluator::Dimensions LeftDimensions;
- typedef typename RightEvaluator::Dimensions RightDimensions;
-
- EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device) :
- Base(op, device) {}
-
- // We need to redefine this method to make nvcc happy
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data) {
- this->m_leftImpl.evalSubExprsIfNeeded(NULL);
- this->m_rightImpl.evalSubExprsIfNeeded(NULL);
- if (data) {
- evalTo(data);
- return false;
- } else {
- this->m_result = static_cast<Scalar *>(this->m_device.allocate(this->dimensions().TotalSize() * sizeof(Scalar)));
- evalTo(this->m_result);
- return true;
- }
- }
-
- void evalTo(Scalar* buffer) const {
- if (this->m_lhs_inner_dim_contiguous) {
- if (this->m_rhs_inner_dim_contiguous) {
- if (this->m_rhs_inner_dim_reordered) {
- evalTyped<true, true, true, Unaligned>(buffer);
- }
- else {
- evalTyped<true, true, false, Unaligned>(buffer);
- }
- }
- else {
- if (this->m_rhs_inner_dim_reordered) {
- evalTyped<true, false, true, Unaligned>(buffer);
- }
- else {
- evalTyped<true, false, false, Unaligned>(buffer);
- }
- }
- }
- else {
- if (this->m_rhs_inner_dim_contiguous) {
- if (this->m_rhs_inner_dim_reordered) {
- evalTyped<false, true, true, Unaligned>(buffer);
- }
- else {
- evalTyped<false, true, false, Unaligned>(buffer);
- }
- }
- else {
- if (this->m_rhs_inner_dim_reordered) {
- evalTyped<false, false, true, Unaligned>(buffer);
- }
- else {
- evalTyped<false, false, false, Unaligned>(buffer);
- }
- }
- }
- }
-
- template <typename LhsScalar, typename RhsScalar, typename Index, typename LhsMapper, typename RhsMapper, typename OutputMapper> struct LaunchKernels {
- static void Run(const LhsMapper& lhs, const RhsMapper& rhs, const OutputMapper& output, Index m, Index n, Index k, const GpuDevice& device) {
- const Index m_blocks = (m + 63) / 64;
- const Index n_blocks = (n + 63) / 64;
- const dim3 num_blocks(m_blocks, n_blocks, 1);
- const dim3 block_size(8, 8, 8);
- LAUNCH_CUDA_KERNEL((EigenContractionKernel<Scalar, Index, LhsMapper, RhsMapper, OutputMapper>), num_blocks, block_size, 0, device, lhs, rhs, output, m, n, k);
- }
- };
-
- template <typename Index, typename LhsMapper, typename RhsMapper, typename OutputMapper> struct LaunchKernels<float, float, Index, LhsMapper, RhsMapper, OutputMapper> {
- static void Run(const LhsMapper& lhs, const RhsMapper& rhs, const OutputMapper& output, Index m, Index n, Index k, const GpuDevice& device) {
- if (m < 768 || n < 768) {
- const Index m_blocks = (m + 63) / 64;
- const Index n_blocks = (n + 63) / 64;
- const dim3 num_blocks(m_blocks, n_blocks, 1);
- const dim3 block_size(16, 16, 1);
- LAUNCH_CUDA_KERNEL((EigenFloatContractionKernel16x16<Index, LhsMapper, RhsMapper, OutputMapper>), num_blocks, block_size, 0, device, lhs, rhs, output, m, n, k);
- } else {
- const Index m_blocks = (m + 127) / 128;
- const Index n_blocks = (n + 63) / 64;
- const dim3 num_blocks(m_blocks, n_blocks, 1);
- const dim3 block_size(8, 32, 1);
- LAUNCH_CUDA_KERNEL((EigenFloatContractionKernel<Index, LhsMapper, RhsMapper, OutputMapper>), num_blocks, block_size, 0, device, lhs, rhs, output, m, n, k);
- }
- }
- };
-
- template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment>
- void evalTyped(Scalar* buffer) const {
- // columns in left side, rows in right side
- const Index k = this->m_k_size;
- EIGEN_UNUSED_VARIABLE(k)
-
- // rows in left side
- const Index m = this->m_i_size;
-
- // columns in right side
- const Index n = this->m_j_size;
-
- // zero out the result buffer (which must be of size at least m * n * sizeof(Scalar)
- this->m_device.memset(buffer, 0, m * n * sizeof(Scalar));
-
- typedef internal::TensorContractionInputMapper<LhsScalar, Index, internal::Lhs,
- LeftEvaluator, left_nocontract_t,
- contract_t, 4,
- lhs_inner_dim_contiguous,
- false, Unaligned> LhsMapper;
-
- typedef internal::TensorContractionInputMapper<RhsScalar, Index, internal::Rhs,
- RightEvaluator, right_nocontract_t,
- contract_t, 4,
- rhs_inner_dim_contiguous,
- rhs_inner_dim_reordered, Unaligned> RhsMapper;
-
- typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper;
-
-
- // initialize data mappers
- LhsMapper lhs(this->m_leftImpl, this->m_left_nocontract_strides, this->m_i_strides,
- this->m_left_contracting_strides, this->m_k_strides);
-
- RhsMapper rhs(this->m_rightImpl, this->m_right_nocontract_strides, this->m_j_strides,
- this->m_right_contracting_strides, this->m_k_strides);
-
- OutputMapper output(buffer, m);
-
- setCudaSharedMemConfig(cudaSharedMemBankSizeEightByte);
- LaunchKernels<LhsScalar, RhsScalar, Index, LhsMapper, RhsMapper, OutputMapper>::Run(lhs, rhs, output, m, n, k, this->m_device);
- }
-};
-
-} // end namespace Eigen
-
-#endif // EIGEN_USE_GPU and __CUDACC__
-#endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_CUDA_H
+#include "TensorContractionGpu.h"
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionGpu.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionGpu.h
new file mode 100644
index 000000000..056665749
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionGpu.h
@@ -0,0 +1,1412 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014-2015 Benoit Steiner <benoit.steiner.goog@gmail.com>
+// Copyright (C) 2015 Navdeep Jaitly <ndjaitly@google.com>
+// Copyright (C) 2014 Eric Martin <eric@ericmart.in>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_GPU_H
+#define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_GPU_H
+
+#if defined(EIGEN_USE_GPU) && defined(EIGEN_GPUCC)
+
+namespace Eigen {
+
+template<typename Scalar, typename Index, typename LhsMapper,
+ typename RhsMapper, typename OutputMapper, bool needs_edge_check>
+__device__ EIGEN_STRONG_INLINE void
+EigenContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs,
+ const OutputMapper output, Scalar* lhs_shmem, Scalar* rhs_shmem,
+ const Index m_size, const Index n_size, const Index k_size) {
+
+ const Index m_block_idx = blockIdx.x;
+ const Index n_block_idx = blockIdx.y;
+
+ const Index base_m = 64 * m_block_idx;
+ const Index base_n = 64 * n_block_idx;
+
+ // declare and initialize 64 registers for output 8x8 block
+
+ // prefetch registers
+ Scalar lhs_pf0;
+ Scalar lhs_pf1;
+ Scalar lhs_pf2;
+ Scalar lhs_pf3;
+ Scalar lhs_pf4;
+ Scalar lhs_pf5;
+ Scalar lhs_pf6;
+ Scalar lhs_pf7;
+
+ Scalar rhs_pf0;
+ Scalar rhs_pf1;
+ Scalar rhs_pf2;
+ Scalar rhs_pf3;
+ Scalar rhs_pf4;
+ Scalar rhs_pf5;
+ Scalar rhs_pf6;
+ Scalar rhs_pf7;
+
+ // shared memory is formatted
+ // (contract idx in block, nocontract idx in block, block idx)
+ // where block idx is column major. This transposition limits the number of
+ // bank conflicts when reading the LHS. The core idea is that since the contracting
+ // index is shared by both sides, then the contracting index should be in threadIdx.x.
+
+ // On the LHS, we pad each row inside of each block with an extra element. This makes
+ // each block 8 rows of 9 elements, which is 72 elements. This gives no bank conflicts
+ // on writes and very few 2-way conflicts on reads. There is an 8x8 grid of these blocks.
+
+ // On the RHS we just add 8 padding elements to the end of each block. This gives no bank
+ // conflicts on writes and also none on reads.
+
+ // storage indices
+ const Index lhs_store_idx_base = threadIdx.y * 72 + threadIdx.x * 9 + threadIdx.z;
+ const Index rhs_store_idx_base = threadIdx.y * 72 + threadIdx.z * 8 + threadIdx.x;
+
+ const Index lhs_store_idx_0 = lhs_store_idx_base + 576 * 0;
+ const Index lhs_store_idx_1 = lhs_store_idx_base + 576 * 1;
+ const Index lhs_store_idx_2 = lhs_store_idx_base + 576 * 2;
+ const Index lhs_store_idx_3 = lhs_store_idx_base + 576 * 3;
+ const Index lhs_store_idx_4 = lhs_store_idx_base + 576 * 4;
+ const Index lhs_store_idx_5 = lhs_store_idx_base + 576 * 5;
+ const Index lhs_store_idx_6 = lhs_store_idx_base + 576 * 6;
+ const Index lhs_store_idx_7 = lhs_store_idx_base + 576 * 7;
+
+ const Index rhs_store_idx_0 = rhs_store_idx_base + 576 * 0;
+ const Index rhs_store_idx_1 = rhs_store_idx_base + 576 * 1;
+ const Index rhs_store_idx_2 = rhs_store_idx_base + 576 * 2;
+ const Index rhs_store_idx_3 = rhs_store_idx_base + 576 * 3;
+ const Index rhs_store_idx_4 = rhs_store_idx_base + 576 * 4;
+ const Index rhs_store_idx_5 = rhs_store_idx_base + 576 * 5;
+ const Index rhs_store_idx_6 = rhs_store_idx_base + 576 * 6;
+ const Index rhs_store_idx_7 = rhs_store_idx_base + 576 * 7;
+
+ // in the loading code, the following variables are important:
+ // threadIdx.x: the vertical position in an 8x8 block
+ // threadIdx.y: the vertical index of the 8x8 block in the grid
+ // threadIdx.z: the horizontal position in an 8x8 block
+ // k: the horizontal index of the 8x8 block in the grid
+ //
+ // The k parameter is implicit (it was the loop counter for a loop that went
+ // from 0 to <8, but now that loop is unrolled in the below code.
+
+ const Index load_idx_vert = threadIdx.x + 8 * threadIdx.y;
+ const Index lhs_vert = base_m + load_idx_vert;
+
+#define prefetchIntoRegisters(base_k) \
+ { \
+ lhs_pf0 = conv(0); \
+ lhs_pf1 = conv(0); \
+ lhs_pf2 = conv(0); \
+ lhs_pf3 = conv(0); \
+ lhs_pf4 = conv(0); \
+ lhs_pf5 = conv(0); \
+ lhs_pf6 = conv(0); \
+ lhs_pf7 = conv(0); \
+ \
+ rhs_pf0 = conv(0); \
+ rhs_pf1 = conv(0); \
+ rhs_pf2 = conv(0); \
+ rhs_pf3 = conv(0); \
+ rhs_pf4 = conv(0); \
+ rhs_pf5 = conv(0); \
+ rhs_pf6 = conv(0); \
+ rhs_pf7 = conv(0); \
+ \
+ if (!needs_edge_check || lhs_vert < m_size) { \
+ const Index lhs_horiz_0 = base_k + threadIdx.z + 0 * 8; \
+ const Index lhs_horiz_1 = base_k + threadIdx.z + 1 * 8; \
+ const Index lhs_horiz_2 = base_k + threadIdx.z + 2 * 8; \
+ const Index lhs_horiz_3 = base_k + threadIdx.z + 3 * 8; \
+ const Index lhs_horiz_4 = base_k + threadIdx.z + 4 * 8; \
+ const Index lhs_horiz_5 = base_k + threadIdx.z + 5 * 8; \
+ const Index lhs_horiz_6 = base_k + threadIdx.z + 6 * 8; \
+ const Index lhs_horiz_7 = base_k + threadIdx.z + 7 * 8; \
+ \
+ if (!needs_edge_check || lhs_horiz_7 < k_size) { \
+ lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
+ lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \
+ lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \
+ lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \
+ lhs_pf4 = lhs(lhs_vert, lhs_horiz_4); \
+ lhs_pf5 = lhs(lhs_vert, lhs_horiz_5); \
+ lhs_pf6 = lhs(lhs_vert, lhs_horiz_6); \
+ lhs_pf7 = lhs(lhs_vert, lhs_horiz_7); \
+ } else if (lhs_horiz_6 < k_size) { \
+ lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
+ lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \
+ lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \
+ lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \
+ lhs_pf4 = lhs(lhs_vert, lhs_horiz_4); \
+ lhs_pf5 = lhs(lhs_vert, lhs_horiz_5); \
+ lhs_pf6 = lhs(lhs_vert, lhs_horiz_6); \
+ } else if (lhs_horiz_5 < k_size) { \
+ lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
+ lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \
+ lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \
+ lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \
+ lhs_pf4 = lhs(lhs_vert, lhs_horiz_4); \
+ lhs_pf5 = lhs(lhs_vert, lhs_horiz_5); \
+ } else if (lhs_horiz_4 < k_size) { \
+ lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
+ lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \
+ lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \
+ lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \
+ lhs_pf4 = lhs(lhs_vert, lhs_horiz_4); \
+ } else if (lhs_horiz_3 < k_size) { \
+ lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
+ lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \
+ lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \
+ lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \
+ } else if (lhs_horiz_2 < k_size) { \
+ lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
+ lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \
+ lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \
+ } else if (lhs_horiz_1 < k_size) { \
+ lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
+ lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \
+ } else if (lhs_horiz_0 < k_size) { \
+ lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
+ } \
+ } \
+ \
+ const Index rhs_vert = base_k + load_idx_vert; \
+ if (!needs_edge_check || rhs_vert < k_size) { \
+ const Index rhs_horiz_0 = base_n + threadIdx.z + 0 * 8; \
+ const Index rhs_horiz_1 = base_n + threadIdx.z + 1 * 8; \
+ const Index rhs_horiz_2 = base_n + threadIdx.z + 2 * 8; \
+ const Index rhs_horiz_3 = base_n + threadIdx.z + 3 * 8; \
+ const Index rhs_horiz_4 = base_n + threadIdx.z + 4 * 8; \
+ const Index rhs_horiz_5 = base_n + threadIdx.z + 5 * 8; \
+ const Index rhs_horiz_6 = base_n + threadIdx.z + 6 * 8; \
+ const Index rhs_horiz_7 = base_n + threadIdx.z + 7 * 8; \
+ \
+ if (rhs_horiz_7 < n_size) { \
+ rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
+ rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \
+ rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \
+ rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \
+ rhs_pf4 = rhs(rhs_vert, rhs_horiz_4); \
+ rhs_pf5 = rhs(rhs_vert, rhs_horiz_5); \
+ rhs_pf6 = rhs(rhs_vert, rhs_horiz_6); \
+ rhs_pf7 = rhs(rhs_vert, rhs_horiz_7); \
+ } else if (rhs_horiz_6 < n_size) { \
+ rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
+ rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \
+ rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \
+ rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \
+ rhs_pf4 = rhs(rhs_vert, rhs_horiz_4); \
+ rhs_pf5 = rhs(rhs_vert, rhs_horiz_5); \
+ rhs_pf6 = rhs(rhs_vert, rhs_horiz_6); \
+ } else if (rhs_horiz_5 < n_size) { \
+ rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
+ rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \
+ rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \
+ rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \
+ rhs_pf4 = rhs(rhs_vert, rhs_horiz_4); \
+ rhs_pf5 = rhs(rhs_vert, rhs_horiz_5); \
+ } else if (rhs_horiz_4 < n_size) { \
+ rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
+ rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \
+ rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \
+ rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \
+ rhs_pf4 = rhs(rhs_vert, rhs_horiz_4); \
+ } else if (rhs_horiz_3 < n_size) { \
+ rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
+ rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \
+ rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \
+ rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \
+ } else if (rhs_horiz_2 < n_size) { \
+ rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
+ rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \
+ rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \
+ } else if (rhs_horiz_1 < n_size) { \
+ rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
+ rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \
+ } else if (rhs_horiz_0 < n_size) { \
+ rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
+ } \
+ } \
+ } \
+
+#define writeRegToShmem(_) \
+ lhs_shmem[lhs_store_idx_0] = lhs_pf0; \
+ rhs_shmem[rhs_store_idx_0] = rhs_pf0; \
+ \
+ lhs_shmem[lhs_store_idx_1] = lhs_pf1; \
+ rhs_shmem[rhs_store_idx_1] = rhs_pf1; \
+ \
+ lhs_shmem[lhs_store_idx_2] = lhs_pf2; \
+ rhs_shmem[rhs_store_idx_2] = rhs_pf2; \
+ \
+ lhs_shmem[lhs_store_idx_3] = lhs_pf3; \
+ rhs_shmem[rhs_store_idx_3] = rhs_pf3; \
+ \
+ lhs_shmem[lhs_store_idx_4] = lhs_pf4; \
+ rhs_shmem[rhs_store_idx_4] = rhs_pf4; \
+ \
+ lhs_shmem[lhs_store_idx_5] = lhs_pf5; \
+ rhs_shmem[rhs_store_idx_5] = rhs_pf5; \
+ \
+ lhs_shmem[lhs_store_idx_6] = lhs_pf6; \
+ rhs_shmem[rhs_store_idx_6] = rhs_pf6; \
+ \
+ lhs_shmem[lhs_store_idx_7] = lhs_pf7; \
+ rhs_shmem[rhs_store_idx_7] = rhs_pf7; \
+
+ // declare and initialize result array
+#define res(i, j) _res_##i##j
+#define initResultRow(i) \
+ Scalar res(i, 0) = conv(0); \
+ Scalar res(i, 1) = conv(0); \
+ Scalar res(i, 2) = conv(0); \
+ Scalar res(i, 3) = conv(0); \
+ Scalar res(i, 4) = conv(0); \
+ Scalar res(i, 5) = conv(0); \
+ Scalar res(i, 6) = conv(0); \
+ Scalar res(i, 7) = conv(0); \
+
+ internal::scalar_cast_op<int, Scalar> conv;
+ initResultRow(0);
+ initResultRow(1);
+ initResultRow(2);
+ initResultRow(3);
+ initResultRow(4);
+ initResultRow(5);
+ initResultRow(6);
+ initResultRow(7);
+#undef initResultRow
+
+ for (Index base_k = 0; base_k < k_size; base_k += 64) {
+ // wait for previous iteration to finish with shmem. Despite common sense,
+ // the code is a bit faster with this here then at bottom of loop
+ __syncthreads();
+
+ prefetchIntoRegisters(base_k);
+ writeRegToShmem();
+
+ #undef prefetchIntoRegisters
+ #undef writeRegToShmem
+
+ // wait for shared mem packing to be done before starting computation
+ __syncthreads();
+
+ // compute 8x8 matrix product by outer product. This involves packing one column
+ // of LHS and one row of RHS into registers (takes 16 registers).
+
+#define lcol(i) _lcol##i
+ Scalar lcol(0);
+ Scalar lcol(1);
+ Scalar lcol(2);
+ Scalar lcol(3);
+ Scalar lcol(4);
+ Scalar lcol(5);
+ Scalar lcol(6);
+ Scalar lcol(7);
+
+#define rrow(j) _rrow##j
+ Scalar rrow(0);
+ Scalar rrow(1);
+ Scalar rrow(2);
+ Scalar rrow(3);
+ Scalar rrow(4);
+ Scalar rrow(5);
+ Scalar rrow(6);
+ Scalar rrow(7);
+
+ // Now x corresponds to k, y to m, and z to n
+ const Scalar* lhs_block = &lhs_shmem[threadIdx.x + 9 * threadIdx.y];
+ const Scalar* rhs_block = &rhs_shmem[threadIdx.x + 8 * threadIdx.z];
+
+#define lhs_element(i, j) lhs_block[72 * ((i) + 8 * (j))]
+#define rhs_element(i, j) rhs_block[72 * ((i) + 8 * (j))]
+
+#define loadData(i, j) \
+ lcol(0) = lhs_element(0, j); \
+ rrow(0) = rhs_element(i, 0); \
+ lcol(1) = lhs_element(1, j); \
+ rrow(1) = rhs_element(i, 1); \
+ lcol(2) = lhs_element(2, j); \
+ rrow(2) = rhs_element(i, 2); \
+ lcol(3) = lhs_element(3, j); \
+ rrow(3) = rhs_element(i, 3); \
+ lcol(4) = lhs_element(4, j); \
+ rrow(4) = rhs_element(i, 4); \
+ lcol(5) = lhs_element(5, j); \
+ rrow(5) = rhs_element(i, 5); \
+ lcol(6) = lhs_element(6, j); \
+ rrow(6) = rhs_element(i, 6); \
+ lcol(7) = lhs_element(7, j); \
+ rrow(7) = rhs_element(i, 7); \
+
+#define computeCol(j) \
+ res(0, j) += lcol(0) * rrow(j); \
+ res(1, j) += lcol(1) * rrow(j); \
+ res(2, j) += lcol(2) * rrow(j); \
+ res(3, j) += lcol(3) * rrow(j); \
+ res(4, j) += lcol(4) * rrow(j); \
+ res(5, j) += lcol(5) * rrow(j); \
+ res(6, j) += lcol(6) * rrow(j); \
+ res(7, j) += lcol(7) * rrow(j); \
+
+#define computePass(i) \
+ loadData(i, i); \
+ \
+ computeCol(0); \
+ computeCol(1); \
+ computeCol(2); \
+ computeCol(3); \
+ computeCol(4); \
+ computeCol(5); \
+ computeCol(6); \
+ computeCol(7); \
+
+ computePass(0);
+ computePass(1);
+ computePass(2);
+ computePass(3);
+ computePass(4);
+ computePass(5);
+ computePass(6);
+ computePass(7);
+
+#undef lcol
+#undef rrow
+#undef lhs_element
+#undef rhs_element
+#undef loadData
+#undef computeCol
+#undef computePass
+ } // end loop over k
+
+ // we've now iterated over all of the large (ie width 64) k blocks and
+ // accumulated results in registers. At this point thread (x, y, z) contains
+ // the sum across all big k blocks of the product of little k block of index (x, y)
+ // with block of index (y, z). To compute the final output, we need to reduce
+ // the 8 threads over y by summation.
+#if defined(EIGEN_HIPCC) || (defined(EIGEN_CUDACC_VER) && EIGEN_CUDACC_VER < 90000)
+#define shuffleInc(i, j, mask) res(i, j) += __shfl_xor(res(i, j), mask)
+#else
+#define shuffleInc(i, j, mask) res(i, j) += __shfl_xor_sync(0xFFFFFFFF, res(i, j), mask)
+#endif
+
+#define reduceRow(i, mask) \
+ shuffleInc(i, 0, mask); \
+ shuffleInc(i, 1, mask); \
+ shuffleInc(i, 2, mask); \
+ shuffleInc(i, 3, mask); \
+ shuffleInc(i, 4, mask); \
+ shuffleInc(i, 5, mask); \
+ shuffleInc(i, 6, mask); \
+ shuffleInc(i, 7, mask); \
+
+#define reduceMatrix(mask) \
+ reduceRow(0, mask); \
+ reduceRow(1, mask); \
+ reduceRow(2, mask); \
+ reduceRow(3, mask); \
+ reduceRow(4, mask); \
+ reduceRow(5, mask); \
+ reduceRow(6, mask); \
+ reduceRow(7, mask); \
+
+ // actually perform the reduction, now each thread of index (_, y, z)
+ // contains the correct values in its registers that belong in the output
+ // block
+ reduceMatrix(1);
+ reduceMatrix(2);
+ reduceMatrix(4);
+
+#undef shuffleInc
+#undef reduceRow
+#undef reduceMatrix
+
+ // now we need to copy the 64 values into main memory. We can't split work
+ // among threads because all variables are in registers. There's 2 ways
+ // to do this:
+ // (1) have 1 thread do 64 writes from registers into global memory
+ // (2) have 1 thread do 64 writes into shared memory, and then 8 threads
+ // each do 8 writes into global memory. We can just overwrite the shared
+ // memory from the problem we just solved.
+ // (2) is slightly faster than (1) due to less branching and more ILP
+
+ // TODO: won't yield much gain, but could just use currently unused shared mem
+ // and then we won't have to sync
+ // wait for shared mem to be out of use
+ __syncthreads();
+
+#define writeResultShmem(i, j) \
+ lhs_shmem[i + 8 * threadIdx.y + 64 * threadIdx.z + 512 * j] = res(i, j); \
+
+#define writeRow(i) \
+ writeResultShmem(i, 0); \
+ writeResultShmem(i, 1); \
+ writeResultShmem(i, 2); \
+ writeResultShmem(i, 3); \
+ writeResultShmem(i, 4); \
+ writeResultShmem(i, 5); \
+ writeResultShmem(i, 6); \
+ writeResultShmem(i, 7); \
+
+ if (threadIdx.x == 0) {
+ writeRow(0);
+ writeRow(1);
+ writeRow(2);
+ writeRow(3);
+ writeRow(4);
+ writeRow(5);
+ writeRow(6);
+ writeRow(7);
+ }
+#undef writeResultShmem
+#undef writeRow
+
+ const int max_i_write = numext::mini((int)((m_size - base_m - threadIdx.y + 7) / 8), 8);
+ const int max_j_write = numext::mini((int)((n_size - base_n - threadIdx.z + 7) / 8), 8);
+
+ if (threadIdx.x < max_i_write) {
+ if (max_j_write == 8) {
+ // TODO: can i trade bank conflicts for coalesced writes?
+ Scalar val0 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 0];
+ Scalar val1 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 1];
+ Scalar val2 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 2];
+ Scalar val3 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 3];
+ Scalar val4 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 4];
+ Scalar val5 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 5];
+ Scalar val6 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 6];
+ Scalar val7 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 7];
+
+ output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 0) = val0;
+ output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 1) = val1;
+ output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 2) = val2;
+ output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 3) = val3;
+ output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 4) = val4;
+ output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 5) = val5;
+ output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 6) = val6;
+ output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 7) = val7;
+ } else {
+#pragma unroll 7
+ for (int j = 0; j < max_j_write; j++) {
+ Scalar val = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * j];
+ output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * j) = val;
+ }
+ }
+ }
+#undef res
+}
+
+
+template<typename Scalar, typename Index, typename LhsMapper,
+ typename RhsMapper, typename OutputMapper>
+__global__ void
+#if defined(EIGEN_HIPCC)
+__launch_bounds__(512, 1)
+#else
+__launch_bounds__(512)
+#endif
+EigenContractionKernel(const LhsMapper lhs, const RhsMapper rhs,
+ const OutputMapper output,
+ const Index m_size, const Index n_size, const Index k_size) {
+ __shared__ Scalar lhs_shmem[72 * 64];
+ __shared__ Scalar rhs_shmem[72 * 64];
+
+ const Index m_block_idx = blockIdx.x;
+ const Index n_block_idx = blockIdx.y;
+
+ const Index base_m = 64 * m_block_idx;
+ const Index base_n = 64 * n_block_idx;
+
+ if (base_m + 63 < m_size && base_n + 63 < n_size) {
+ EigenContractionKernelInternal<Scalar, Index, LhsMapper, RhsMapper, OutputMapper, false>(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size);
+ } else {
+ EigenContractionKernelInternal<Scalar, Index, LhsMapper, RhsMapper, OutputMapper, true>(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size);
+ }
+}
+
+
+template<typename Index, typename LhsMapper,
+ typename RhsMapper, typename OutputMapper, bool CHECK_LHS_BOUNDARY,
+ bool CHECK_RHS_BOUNDARY>
+__device__ EIGEN_STRONG_INLINE void
+EigenFloatContractionKernelInternal16x16(const LhsMapper lhs, const RhsMapper rhs,
+ const OutputMapper output, float2 lhs_shmem2[][16],
+ float2 rhs_shmem2[][8], const Index m_size,
+ const Index n_size, const Index k_size,
+ const Index base_m, const Index base_n) {
+
+ // prefetch registers
+ float4 lhs_pf0, rhs_pf0;
+
+ float4 results[4];
+ for (int i=0; i < 4; i++) {
+ results[i].x = results[i].y = results[i].z = results[i].w = 0;
+ }
+
+#define prefetch_lhs(reg, row, col) \
+ if (!CHECK_LHS_BOUNDARY) { \
+ if (col < k_size) { \
+ reg =lhs.template loadPacket<float4,Unaligned>(row, col); \
+ } \
+ } else { \
+ if (col < k_size) { \
+ if (row + 3 < m_size) { \
+ reg =lhs.template loadPacket<float4,Unaligned>(row, col); \
+ } else if (row + 2 < m_size) { \
+ reg.x =lhs(row + 0, col); \
+ reg.y =lhs(row + 1, col); \
+ reg.z =lhs(row + 2, col); \
+ } else if (row + 1 < m_size) { \
+ reg.x =lhs(row + 0, col); \
+ reg.y =lhs(row + 1, col); \
+ } else if (row < m_size) { \
+ reg.x =lhs(row + 0, col); \
+ } \
+ } \
+ } \
+
+ Index lhs_vert = base_m+threadIdx.x*4;
+
+ for (Index k = 0; k < k_size; k += 16) {
+
+ lhs_pf0 = internal::pset1<float4>(0);
+ rhs_pf0 = internal::pset1<float4>(0);
+
+ Index lhs_horiz = threadIdx.y+k;
+ prefetch_lhs(lhs_pf0, lhs_vert, lhs_horiz)
+
+ Index rhs_vert = k+(threadIdx.x%4)*4;
+ Index rhs_horiz0 = (threadIdx.x>>2)+threadIdx.y*4+base_n;
+
+ if (!CHECK_RHS_BOUNDARY) {
+ if ((rhs_vert + 3) < k_size) {
+ // just CHECK_RHS_BOUNDARY
+ rhs_pf0 = rhs.template loadPacket<float4,Unaligned>(rhs_vert, rhs_horiz0);
+ } else if (rhs_vert + 2 < k_size) {
+ // just CHECK_RHS_BOUNDARY
+ rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
+ rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
+ rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0);
+ } else if (rhs_vert + 1 < k_size) {
+ rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
+ rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
+ } else if (rhs_vert < k_size) {
+ rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
+ }
+ } else {
+ if (rhs_horiz0 < n_size) {
+ if ((rhs_vert + 3) < k_size) {
+ rhs_pf0 = rhs.template loadPacket<float4,Unaligned>(rhs_vert, rhs_horiz0);
+ } else if ((rhs_vert + 2) < k_size) {
+ rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
+ rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
+ rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0);
+ } else if ((rhs_vert + 1) < k_size) {
+ rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
+ rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
+ } else if (rhs_vert < k_size) {
+ rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
+ }
+ }
+ }
+ float x1, x2 ;
+ // the following can be a bitwise operation..... some day.
+ if((threadIdx.x%8) < 4) {
+ x1 = rhs_pf0.y;
+ x2 = rhs_pf0.w;
+ } else {
+ x1 = rhs_pf0.x;
+ x2 = rhs_pf0.z;
+ }
+ #if defined(EIGEN_HIPCC) || (defined(EIGEN_CUDACC_VER) && EIGEN_CUDACC_VER < 90000)
+ x1 = __shfl_xor(x1, 4);
+ x2 = __shfl_xor(x2, 4);
+ #else
+ x1 = __shfl_xor_sync(0xFFFFFFFF, x1, 4);
+ x2 = __shfl_xor_sync(0xFFFFFFFF, x2, 4);
+ #endif
+ if((threadIdx.x%8) < 4) {
+ rhs_pf0.y = x1;
+ rhs_pf0.w = x2;
+ } else {
+ rhs_pf0.x = x1;
+ rhs_pf0.z = x2;
+ }
+
+ // We have 64 features.
+ // Row 0 -> times (0, 4, 8, 12, 1, 5, 9, 13) for features 0, 1.
+ // Row 1 -> times (0, 4, 8, 12, 1, 5, 9, 13) for features 2, 3.
+ // ...
+ // Row 31 -> times (0, 4, 8, 12, 1, 5, 9, 13) for features 62, 63
+ // Row 32 -> times (2, 6, 10, 14, 3, 7, 11, 15) for features 0, 1
+ // ...
+ rhs_shmem2[(threadIdx.x>>3)+ threadIdx.y*2][threadIdx.x%8] = make_float2(rhs_pf0.x, rhs_pf0.y);
+ rhs_shmem2[(threadIdx.x>>3)+ threadIdx.y*2+32][threadIdx.x%8] = make_float2(rhs_pf0.z, rhs_pf0.w);
+
+ // Row 0 (time 0) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61)
+ // Row 1 (time 1) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61)
+ // ...
+ // Row 15 (time 15) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61)
+ // Row 16 (time 0) -> features (2, 3), (6, 7), .. (30, 31), (34, 35), .. (62, 63)
+ // ...
+
+ lhs_shmem2[threadIdx.y][threadIdx.x] = make_float2(lhs_pf0.x, lhs_pf0.y);
+ lhs_shmem2[threadIdx.y+16][threadIdx.x] = make_float2(lhs_pf0.z, lhs_pf0.w);
+
+
+#define add_vals(fl1, fl2, fr1, fr2)\
+ results[0].x += fl1.x * fr1.x;\
+ results[0].y += fl1.y * fr1.x;\
+ results[0].z += fl2.x * fr1.x;\
+ results[0].w += fl2.y * fr1.x;\
+\
+ results[1].x += fl1.x * fr1.y;\
+ results[1].y += fl1.y * fr1.y;\
+ results[1].z += fl2.x * fr1.y;\
+ results[1].w += fl2.y * fr1.y;\
+\
+ results[2].x += fl1.x * fr2.x;\
+ results[2].y += fl1.y * fr2.x;\
+ results[2].z += fl2.x * fr2.x;\
+ results[2].w += fl2.y * fr2.x;\
+\
+ results[3].x += fl1.x * fr2.y;\
+ results[3].y += fl1.y * fr2.y;\
+ results[3].z += fl2.x * fr2.y;\
+ results[3].w += fl2.y * fr2.y;\
+
+ __syncthreads();
+
+ // Do the multiplies.
+ #pragma unroll
+ for (int koff = 0; koff < 16; koff ++) {
+ // 32 x threads.
+ float2 fl1 = lhs_shmem2[koff][threadIdx.x];
+ float2 fl2 = lhs_shmem2[koff + 16][threadIdx.x];
+
+ int start_feature = threadIdx.y * 4;
+ float2 fr1 = rhs_shmem2[(start_feature>>1) + 32*((koff%4)/2)][koff/4 + (koff%2)*4];
+ float2 fr2 = rhs_shmem2[(start_feature>>1) + 1 + 32*((koff%4)/2)][koff/4 + (koff%2)*4];
+
+ add_vals(fl1, fl2, fr1, fr2)
+ }
+ __syncthreads();
+ }
+
+#undef prefetch_lhs
+#undef add_vals
+
+ Index horiz_base = threadIdx.y*4+base_n;
+ if (!CHECK_LHS_BOUNDARY && !CHECK_RHS_BOUNDARY) {
+ for (int i = 0; i < 4; i++) {
+ output(lhs_vert, horiz_base + i) = results[i].x;
+ output(lhs_vert + 1, horiz_base + i) = results[i].y;
+ output(lhs_vert + 2, horiz_base + i) = results[i].z;
+ output(lhs_vert + 3, horiz_base + i) = results[i].w;
+ }
+ } else if (!CHECK_RHS_BOUNDARY) {
+ // CHECK LHS
+ if (lhs_vert + 3 < m_size) {
+ for (int i = 0; i < 4; i++) {
+ output(lhs_vert, horiz_base + i) = results[i].x;
+ output(lhs_vert + 1, horiz_base + i) = results[i].y;
+ output(lhs_vert + 2, horiz_base + i) = results[i].z;
+ output(lhs_vert + 3, horiz_base + i) = results[i].w;
+ }
+ } else if (lhs_vert + 2 < m_size) {
+ for (int i = 0; i < 4; i++) {
+ output(lhs_vert, horiz_base + i) = results[i].x;
+ output(lhs_vert + 1, horiz_base + i) = results[i].y;
+ output(lhs_vert + 2, horiz_base + i) = results[i].z;
+ }
+ } else if (lhs_vert + 1 < m_size) {
+ for (int i = 0; i < 4; i++) {
+ output(lhs_vert, horiz_base + i) = results[i].x;
+ output(lhs_vert + 1, horiz_base + i) = results[i].y;
+ }
+ } else if (lhs_vert < m_size) {
+ for (int i = 0; i < 4; i++) {
+ output(lhs_vert, horiz_base + i) = results[i].x;
+ }
+ }
+ } else if (!CHECK_LHS_BOUNDARY) {
+ // CHECK RHS
+ /*
+ int ncols_rem = fminf(n_size- horiz_base, 4);
+ for (int i = 0; i < ncols_rem; i++) {
+ output(lhs_vert, horiz_base + i) = results[i].x;
+ output(lhs_vert + 1, horiz_base + i) = results[i].y;
+ output(lhs_vert + 2, horiz_base + i) = results[i].z;
+ output(lhs_vert + 3, horiz_base + i) = results[i].w;
+ }*/
+ for (int i = 0; i < 4; i++) {
+ if (horiz_base+i < n_size) {
+ output(lhs_vert, horiz_base + i) = results[i].x;
+ output(lhs_vert + 1, horiz_base + i) = results[i].y;
+ output(lhs_vert + 2, horiz_base + i) = results[i].z;
+ output(lhs_vert + 3, horiz_base + i) = results[i].w;
+ }
+ }
+ } else {
+ // CHECK both boundaries.
+ for (int i = 0; i < 4; i++) {
+ if (horiz_base+i < n_size) {
+ if (lhs_vert < m_size)
+ output(lhs_vert, horiz_base + i) = results[i].x;
+ if (lhs_vert + 1 < m_size)
+ output(lhs_vert + 1, horiz_base + i) = results[i].y;
+ if (lhs_vert + 2 < m_size)
+ output(lhs_vert + 2, horiz_base + i) = results[i].z;
+ if (lhs_vert + 3 < m_size)
+ output(lhs_vert + 3, horiz_base + i) = results[i].w;
+ }
+ }
+ }
+}
+
+
+template<typename Index, typename LhsMapper,
+ typename RhsMapper, typename OutputMapper, bool CHECK_LHS_BOUNDARY,
+ bool CHECK_RHS_BOUNDARY>
+__device__ EIGEN_STRONG_INLINE void
+EigenFloatContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs,
+ const OutputMapper output, float2 lhs_shmem2[][32],
+ float2 rhs_shmem2[][8], const Index m_size,
+ const Index n_size, const Index k_size,
+ const Index base_m, const Index base_n) {
+
+ // prefetch registers
+ float4 lhs_pf0, lhs_pf1, lhs_pf2, lhs_pf3;
+ float4 rhs_pf0, rhs_pf1;
+
+ float4 results[8];
+ for (int i=0; i < 8; i++) {
+ results[i].x = results[i].y = results[i].z = results[i].w = 0;
+ }
+
+ Index lhs_vert = base_m+threadIdx.x*4+(threadIdx.y%4)*32;
+ for (Index k = 0; k < k_size; k += 32) {
+ lhs_pf0 = internal::pset1<float4>(0);
+ lhs_pf1 = internal::pset1<float4>(0);
+ lhs_pf2 = internal::pset1<float4>(0);
+ lhs_pf3 = internal::pset1<float4>(0);
+
+ rhs_pf0 = internal::pset1<float4>(0);
+ rhs_pf1 = internal::pset1<float4>(0);
+
+ if (!CHECK_LHS_BOUNDARY) {
+ if ((threadIdx.y/4+k+24) < k_size) {
+ lhs_pf0 =lhs.template loadPacket<float4,Unaligned>(lhs_vert, (threadIdx.y/4+k));
+ lhs_pf1 =lhs.template loadPacket<float4,Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
+ lhs_pf2 =lhs.template loadPacket<float4,Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
+ lhs_pf3 =lhs.template loadPacket<float4,Unaligned>(lhs_vert, (threadIdx.y/4+k+24));
+ } else if ((threadIdx.y/4+k+16) < k_size) {
+ lhs_pf0 =lhs.template loadPacket<float4,Unaligned>(lhs_vert, (threadIdx.y/4+k));
+ lhs_pf1 =lhs.template loadPacket<float4,Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
+ lhs_pf2 =lhs.template loadPacket<float4,Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
+ } else if ((threadIdx.y/4+k+8) < k_size) {
+ lhs_pf0 =lhs.template loadPacket<float4,Unaligned>(lhs_vert, (threadIdx.y/4+k));
+ lhs_pf1 =lhs.template loadPacket<float4,Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
+ } else if ((threadIdx.y/4+k) < k_size) {
+ lhs_pf0 =lhs.template loadPacket<float4,Unaligned>(lhs_vert, (threadIdx.y/4+k));
+ }
+ } else {
+ // just CHECK_LHS_BOUNDARY
+ if (lhs_vert + 3 < m_size) {
+ if ((threadIdx.y/4+k+24) < k_size) {
+ lhs_pf0 =lhs.template loadPacket<float4,Unaligned>(lhs_vert, (threadIdx.y/4+k));
+ lhs_pf1 =lhs.template loadPacket<float4,Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
+ lhs_pf2 =lhs.template loadPacket<float4,Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
+ lhs_pf3 =lhs.template loadPacket<float4,Unaligned>(lhs_vert, (threadIdx.y/4+k+24));
+ } else if ((threadIdx.y/4+k+16) < k_size) {
+ lhs_pf0 =lhs.template loadPacket<float4,Unaligned>(lhs_vert, (threadIdx.y/4+k));
+ lhs_pf1 =lhs.template loadPacket<float4,Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
+ lhs_pf2 =lhs.template loadPacket<float4,Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
+ } else if ((threadIdx.y/4+k+8) < k_size) {
+ lhs_pf0 =lhs.template loadPacket<float4,Unaligned>(lhs_vert, (threadIdx.y/4+k));
+ lhs_pf1 =lhs.template loadPacket<float4,Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
+ } else if ((threadIdx.y/4+k) < k_size) {
+ lhs_pf0 =lhs.template loadPacket<float4,Unaligned>(lhs_vert, (threadIdx.y/4+k));
+ }
+ } else if (lhs_vert + 2 < m_size) {
+ if ((threadIdx.y/4+k+24) < k_size) {
+ lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
+ lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
+ lhs_pf0.z =lhs(lhs_vert + 2, (threadIdx.y/4+k));
+ lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
+ lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8));
+ lhs_pf1.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+8));
+ lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16));
+ lhs_pf2.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+16));
+ lhs_pf2.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+16));
+ lhs_pf3.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+24));
+ lhs_pf3.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+24));
+ lhs_pf3.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+24));
+ } else if ((threadIdx.y/4+k+16) < k_size) {
+ lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
+ lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
+ lhs_pf0.z =lhs(lhs_vert + 2, (threadIdx.y/4+k));
+ lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
+ lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8));
+ lhs_pf1.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+8));
+ lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16));
+ lhs_pf2.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+16));
+ lhs_pf2.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+16));
+ } else if ((threadIdx.y/4+k+8) < k_size) {
+ lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
+ lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
+ lhs_pf0.z =lhs(lhs_vert + 2, (threadIdx.y/4+k));
+ lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
+ lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8));
+ lhs_pf1.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+8));
+ } else if ((threadIdx.y/4+k) < k_size) {
+ lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
+ lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
+ lhs_pf0.z =lhs(lhs_vert + 2, (threadIdx.y/4+k));
+ }
+ } else if (lhs_vert + 1 < m_size) {
+ if ((threadIdx.y/4+k+24) < k_size) {
+ lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
+ lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
+ lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
+ lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8));
+ lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16));
+ lhs_pf2.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+16));
+ lhs_pf3.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+24));
+ lhs_pf3.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+24));
+ } else if ((threadIdx.y/4+k+16) < k_size) {
+ lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
+ lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
+ lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
+ lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8));
+ lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16));
+ lhs_pf2.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+16));
+ } else if ((threadIdx.y/4+k+8) < k_size) {
+ lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
+ lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
+ lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
+ lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8));
+ } else if ((threadIdx.y/4+k) < k_size) {
+ lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
+ lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
+ }
+ } else if (lhs_vert < m_size) {
+ if ((threadIdx.y/4+k+24) < k_size) {
+ lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
+ lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
+ lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16));
+ lhs_pf3.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+24));
+ } else if ((threadIdx.y/4+k+16) < k_size) {
+ lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
+ lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
+ lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16));
+ } else if ((threadIdx.y/4+k+8) < k_size) {
+ lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
+ lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
+ } else if ((threadIdx.y/4+k) < k_size) {
+ lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
+ }
+ }
+ }
+ __syncthreads();
+ Index rhs_vert = k+threadIdx.x*4;
+ Index rhs_horiz0 = threadIdx.y*2+base_n;
+ Index rhs_horiz1 = threadIdx.y*2+1+base_n;
+ if (!CHECK_RHS_BOUNDARY) {
+ if ((rhs_vert + 3) < k_size) {
+ // just CHECK_RHS_BOUNDARY
+ rhs_pf0 = rhs.template loadPacket<float4,Unaligned>(rhs_vert, rhs_horiz0);
+ rhs_pf1 = rhs.template loadPacket<float4,Unaligned>(rhs_vert, rhs_horiz1);
+ } else if (rhs_vert + 2 < k_size) {
+ // just CHECK_RHS_BOUNDARY
+ rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
+ rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
+ rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0);
+ rhs_pf1.x = rhs(rhs_vert, rhs_horiz1);
+ rhs_pf1.y = rhs(rhs_vert + 1, rhs_horiz1);
+ rhs_pf1.z = rhs(rhs_vert + 2, rhs_horiz1);
+ } else if (rhs_vert + 1 < k_size) {
+ rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
+ rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
+ rhs_pf1.x = rhs(rhs_vert, rhs_horiz1);
+ rhs_pf1.y = rhs(rhs_vert + 1, rhs_horiz1);
+ } else if (rhs_vert < k_size) {
+ rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
+ rhs_pf1.x = rhs(rhs_vert, rhs_horiz1);
+ }
+ } else {
+ if (rhs_horiz1 < n_size) {
+ if ((rhs_vert + 3) < k_size) {
+ // just CHECK_RHS_BOUNDARY
+ rhs_pf0 = rhs.template loadPacket<float4,Unaligned>(rhs_vert, rhs_horiz0);
+ rhs_pf1 = rhs.template loadPacket<float4,Unaligned>(rhs_vert, rhs_horiz1);
+ } else if (rhs_vert + 2 < k_size) {
+ // just CHECK_RHS_BOUNDARY
+ rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
+ rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
+ rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0);
+ rhs_pf1.x = rhs(rhs_vert, rhs_horiz1);
+ rhs_pf1.y = rhs(rhs_vert + 1, rhs_horiz1);
+ rhs_pf1.z = rhs(rhs_vert + 2, rhs_horiz1);
+ } else if (k+threadIdx.x*4 + 1 < k_size) {
+ rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
+ rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
+ rhs_pf1.x = rhs(rhs_vert, rhs_horiz1);
+ rhs_pf1.y = rhs(rhs_vert + 1, rhs_horiz1);
+ } else if (k+threadIdx.x*4 < k_size) {
+ rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
+ rhs_pf1.x = rhs(rhs_vert, rhs_horiz1);
+ }
+ } else if (rhs_horiz0 < n_size) {
+ if ((rhs_vert + 3) < k_size) {
+ // just CHECK_RHS_BOUNDARY
+ rhs_pf0 = rhs.template loadPacket<float4,Unaligned>(rhs_vert, rhs_horiz0);
+ } else if ((rhs_vert + 2) < k_size) {
+ // just CHECK_RHS_BOUNDARY
+ rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
+ rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
+ rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0);
+ } else if ((rhs_vert + 1) < k_size) {
+ rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
+ rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
+ } else if (rhs_vert < k_size) {
+ rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
+ }
+ }
+ }
+ __syncthreads();
+ // Loaded. Do computation
+ // Row 0 -> times (0, 4, 8, .. 28) for features 0, 1.
+ // Row 1 -> times (0, 4, 8, .. 28) for features 2, 3.
+ // ..
+ // Row 31 -> times (0, 4, 8, .. 28) for features 62, 63
+ rhs_shmem2[threadIdx.y][threadIdx.x] = make_float2(rhs_pf0.x, rhs_pf1.x);
+ // Row 32 -> times (1, 5, 9, .. 29) for features 0, 1.
+ // Row 33 -> times (1, 5, 9, .. 29) for features 2, 3.
+ // ..
+ rhs_shmem2[threadIdx.y+32][threadIdx.x] = make_float2(rhs_pf0.y, rhs_pf1.y);
+ // Row 64 -> times (2, 6, 10, .. 30) for features 0, 1.
+ // Row 65 -> times (2, 6, 10, .. 30) for features 2, 3.
+ rhs_shmem2[threadIdx.y+64][threadIdx.x] = make_float2(rhs_pf0.z, rhs_pf1.z);
+ // Row 96 -> times (3, 7, 11, .. 31) for features 0, 1.
+ // Row 97 -> times (3, 7, 11, .. 31) for features 2, 3.
+ rhs_shmem2[threadIdx.y+96][threadIdx.x] = make_float2(rhs_pf0.w, rhs_pf1.w);
+
+ // LHS.
+ // Row 0 (time 0) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61) .. (124, 125)
+ // Row 1 (time 1) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61) .. (124, 125)
+ // ...
+ // Row 8 (time 0) -> features (2, 3), (6, 7), .. (30, 31), (34, 35), .. (62, 63) .. (126, 127)
+ // Row 15 (time 7) -> features (2, 3), (6, 7), .. (30, 31), (34, 35), .. (62, 63) .. (126, 127)
+
+
+#define add_vals(a_feat1, a_feat2, f1, f2, f3, f4)\
+ results[0].x += a_feat1.x * f1.x;\
+ results[1].x += a_feat1.x * f1.y;\
+ results[2].x += a_feat1.x * f2.x;\
+ results[3].x += a_feat1.x * f2.y;\
+ results[4].x += a_feat1.x * f3.x;\
+ results[5].x += a_feat1.x * f3.y;\
+ results[6].x += a_feat1.x * f4.x;\
+ results[7].x += a_feat1.x * f4.y;\
+\
+ results[0].y += a_feat1.y * f1.x;\
+ results[1].y += a_feat1.y * f1.y;\
+ results[2].y += a_feat1.y * f2.x;\
+ results[3].y += a_feat1.y * f2.y;\
+ results[4].y += a_feat1.y * f3.x;\
+ results[5].y += a_feat1.y * f3.y;\
+ results[6].y += a_feat1.y * f4.x;\
+ results[7].y += a_feat1.y * f4.y;\
+\
+ results[0].z += a_feat2.x * f1.x;\
+ results[1].z += a_feat2.x * f1.y;\
+ results[2].z += a_feat2.x * f2.x;\
+ results[3].z += a_feat2.x * f2.y;\
+ results[4].z += a_feat2.x * f3.x;\
+ results[5].z += a_feat2.x * f3.y;\
+ results[6].z += a_feat2.x * f4.x;\
+ results[7].z += a_feat2.x * f4.y;\
+\
+ results[0].w += a_feat2.y * f1.x;\
+ results[1].w += a_feat2.y * f1.y;\
+ results[2].w += a_feat2.y * f2.x;\
+ results[3].w += a_feat2.y * f2.y;\
+ results[4].w += a_feat2.y * f3.x;\
+ results[5].w += a_feat2.y * f3.y;\
+ results[6].w += a_feat2.y * f4.x;\
+ results[7].w += a_feat2.y * f4.y;\
+
+ lhs_shmem2[threadIdx.y/4][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf0.x, lhs_pf0.y);
+ lhs_shmem2[threadIdx.y/4+8][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf1.x, lhs_pf1.y);
+ lhs_shmem2[threadIdx.y/4+16][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf2.x, lhs_pf2.y);
+ lhs_shmem2[threadIdx.y/4+24][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf3.x, lhs_pf3.y);
+
+ lhs_shmem2[threadIdx.y/4 + 32][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf0.z, lhs_pf0.w);
+ lhs_shmem2[threadIdx.y/4 + 40][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf1.z, lhs_pf1.w);
+ lhs_shmem2[threadIdx.y/4 + 48][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf2.z, lhs_pf2.w);
+ lhs_shmem2[threadIdx.y/4 + 56][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf3.z, lhs_pf3.w);
+
+ __syncthreads();
+
+ // Do the multiplies.
+ #pragma unroll
+ for (int koff = 0; koff < 32; koff ++) {
+ float2 a3 = lhs_shmem2[koff][threadIdx.x + (threadIdx.y % 4) * 8];
+ float2 a4 = lhs_shmem2[koff + 32][threadIdx.x + (threadIdx.y % 4) * 8];
+
+ // first feature is at (threadIdx.y/4) * 8 last is at start + 8.
+ int start_feature = (threadIdx.y / 4) * 8;
+
+ float2 br1 = rhs_shmem2[start_feature/2 + (koff % 4) * 32][koff/4];
+ float2 br2 = rhs_shmem2[start_feature/2 + 1 + (koff % 4) * 32][koff/4];
+ float2 br3 = rhs_shmem2[start_feature/2 + 2 + (koff % 4) * 32][koff/4];
+ float2 br4 = rhs_shmem2[start_feature/2 + 3 + (koff % 4) * 32][koff/4];
+
+ add_vals(a3, a4, br1, br2, br3, br4)
+ }
+ __syncthreads();
+ } // end loop over k
+
+ __syncthreads();
+ Index horiz_base = (threadIdx.y/4)*8+base_n;
+ if (!CHECK_LHS_BOUNDARY && !CHECK_RHS_BOUNDARY) {
+ for (int i = 0; i < 8; i++) {
+ output(lhs_vert, horiz_base + i) = results[i].x;
+ output(lhs_vert + 1, horiz_base + i) = results[i].y;
+ output(lhs_vert + 2, horiz_base + i) = results[i].z;
+ output(lhs_vert + 3, horiz_base + i) = results[i].w;
+ }
+ } else if (!CHECK_RHS_BOUNDARY) {
+ if (lhs_vert + 3 < m_size) {
+ for (int i = 0; i < 8; i++) {
+ output(lhs_vert, horiz_base + i) = results[i].x;
+ output(lhs_vert + 1, horiz_base + i) = results[i].y;
+ output(lhs_vert + 2, horiz_base + i) = results[i].z;
+ output(lhs_vert + 3, horiz_base + i) = results[i].w;
+ }
+ } else if (lhs_vert + 2 < m_size) {
+ for (int i = 0; i < 8; i++) {
+ output(lhs_vert, horiz_base + i) = results[i].x;
+ output(lhs_vert + 1, horiz_base + i) = results[i].y;
+ output(lhs_vert + 2, horiz_base + i) = results[i].z;
+ }
+ } else if (lhs_vert + 1 < m_size) {
+ for (int i = 0; i < 8; i++) {
+ output(lhs_vert, horiz_base + i) = results[i].x;
+ output(lhs_vert + 1, horiz_base + i) = results[i].y;
+ }
+ } else if (lhs_vert < m_size) {
+ for (int i = 0; i < 8; i++) {
+ output(lhs_vert, horiz_base + i) = results[i].x;
+ }
+ }
+ } else if (!CHECK_LHS_BOUNDARY) {
+ // CHECK BOUNDARY_B
+ for (int i = 0; i < 8; i++) {
+ if (horiz_base + i < n_size) {
+ output(lhs_vert, horiz_base + i) = results[i].x;
+ output(lhs_vert + 1, horiz_base + i) = results[i].y;
+ output(lhs_vert + 2, horiz_base + i) = results[i].z;
+ output(lhs_vert + 3, horiz_base + i) = results[i].w;
+ }
+ }
+ } else {
+ // CHECK both boundaries.
+ for (int i = 0; i < 8; i++) {
+ if (horiz_base + i < n_size) {
+ if (lhs_vert < m_size)
+ output(lhs_vert, horiz_base + i) = results[i].x;
+ if (lhs_vert + 1 < m_size)
+ output(lhs_vert + 1, horiz_base + i) = results[i].y;
+ if (lhs_vert + 2 < m_size)
+ output(lhs_vert + 2, horiz_base + i) = results[i].z;
+ if (lhs_vert + 3 < m_size)
+ output(lhs_vert + 3, horiz_base + i) = results[i].w;
+ }
+ }
+ }
+}
+
+
+template<typename Index, typename LhsMapper,
+ typename RhsMapper, typename OutputMapper>
+__global__ void
+#if defined(EIGEN_HIPCC)
+__launch_bounds__(256, 1)
+#else
+__launch_bounds__(256)
+#endif
+EigenFloatContractionKernel(const LhsMapper lhs, const RhsMapper rhs,
+ const OutputMapper output,
+ const Index m_size, const Index n_size, const Index k_size) {
+ __shared__ float2 lhs_shmem[64*32];
+ __shared__ float2 rhs_shmem[128*8];
+
+ typedef float2 LHS_MEM[64][32];
+ typedef float2 RHS_MEM[128][8];
+
+ const Index m_block_idx = blockIdx.x;
+ const Index n_block_idx = blockIdx.y;
+
+ const Index base_m = 128 * m_block_idx;
+ const Index base_n = 64 * n_block_idx;
+
+ bool check_rhs = (base_n + 63) >= n_size;
+ bool check_lhs128 = (base_m + 127) >= m_size;
+
+ if (!check_rhs) {
+ if (!check_lhs128) {
+ // >= 128 rows left
+ EigenFloatContractionKernelInternal<Index, LhsMapper, RhsMapper, OutputMapper, false, false>(
+ lhs, rhs, output, *((LHS_MEM *) lhs_shmem), *((RHS_MEM *) rhs_shmem), m_size, n_size, k_size, base_m, base_n);
+ } else {
+ EigenFloatContractionKernelInternal<Index, LhsMapper, RhsMapper, OutputMapper, true, false>(
+ lhs, rhs, output, *((LHS_MEM *) lhs_shmem), *((RHS_MEM *) rhs_shmem), m_size, n_size, k_size, base_m, base_n);
+ }
+ } else {
+ if (!check_lhs128) {
+ // >= 128 rows left
+ EigenFloatContractionKernelInternal<Index, LhsMapper, RhsMapper, OutputMapper, false, true>(
+ lhs, rhs, output, *((LHS_MEM *) lhs_shmem), *((RHS_MEM *) rhs_shmem), m_size, n_size, k_size, base_m, base_n);
+ } else {
+ EigenFloatContractionKernelInternal<Index, LhsMapper, RhsMapper, OutputMapper, true, true>(
+ lhs, rhs, output, *((LHS_MEM *) lhs_shmem), *((RHS_MEM *) rhs_shmem), m_size, n_size, k_size, base_m, base_n);
+ }
+ }
+}
+
+template<typename Index, typename LhsMapper,
+ typename RhsMapper, typename OutputMapper>
+__global__ void
+#if defined(EIGEN_HIPCC)
+__launch_bounds__(256, 1)
+#else
+__launch_bounds__(256)
+#endif
+EigenFloatContractionKernel16x16(const LhsMapper lhs, const RhsMapper rhs,
+ const OutputMapper output,
+ const Index m_size, const Index n_size, const Index k_size) {
+ __shared__ float2 lhs_shmem[32][16];
+ __shared__ float2 rhs_shmem[64][8];
+
+ const Index m_block_idx = blockIdx.x;
+ const Index n_block_idx = blockIdx.y;
+
+ const Index base_m = 64 * m_block_idx;
+ const Index base_n = 64 * n_block_idx;
+
+ if (base_m + 63 < m_size) {
+ if (base_n + 63 < n_size) {
+ EigenFloatContractionKernelInternal16x16<Index, LhsMapper, RhsMapper, OutputMapper, false, false>(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size, base_m, base_n);
+ } else {
+ EigenFloatContractionKernelInternal16x16<Index, LhsMapper, RhsMapper, OutputMapper, false, true>(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size, base_m, base_n);
+ }
+ } else {
+ if (base_n + 63 < n_size) {
+ EigenFloatContractionKernelInternal16x16<Index, LhsMapper, RhsMapper, OutputMapper, true, false>(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size, base_m, base_n);
+ } else {
+ EigenFloatContractionKernelInternal16x16<Index, LhsMapper, RhsMapper, OutputMapper, true, true>(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size, base_m, base_n);
+ }
+ }
+}
+
+
+template<typename Indices, typename LeftArgType, typename RightArgType, typename OutputKernelType>
+struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType>, GpuDevice> :
+ public TensorContractionEvaluatorBase<TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType>, GpuDevice> > {
+
+ static_assert(std::is_same<OutputKernelType, const NoOpOutputKernel>::value,
+ "GPU tensor contraction does not support output kernels.");
+
+ typedef GpuDevice Device;
+
+ typedef TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType>, Device> Self;
+ typedef TensorContractionEvaluatorBase<Self> Base;
+
+ typedef TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType> XprType;
+ typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar;
+ typedef typename XprType::Index Index;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, GpuDevice>::type PacketReturnType;
+
+ enum {
+ Layout = TensorEvaluator<LeftArgType, Device>::Layout,
+ };
+
+ // Most of the code is assuming that both input tensors are ColMajor. If the
+ // inputs are RowMajor, we will "cheat" by swapping the LHS and RHS:
+ // If we want to compute A * B = C, where A is LHS and B is RHS, the code
+ // will pretend B is LHS and A is RHS.
+ typedef typename internal::conditional<
+ static_cast<int>(Layout) == static_cast<int>(ColMajor), LeftArgType, RightArgType>::type EvalLeftArgType;
+ typedef typename internal::conditional<
+ static_cast<int>(Layout) == static_cast<int>(ColMajor), RightArgType, LeftArgType>::type EvalRightArgType;
+
+ static const int LDims =
+ internal::array_size<typename TensorEvaluator<EvalLeftArgType, Device>::Dimensions>::value;
+ static const int RDims =
+ internal::array_size<typename TensorEvaluator<EvalRightArgType, Device>::Dimensions>::value;
+ static const int ContractDims = internal::array_size<Indices>::value;
+
+ typedef array<Index, LDims> left_dim_mapper_t;
+ typedef array<Index, RDims> right_dim_mapper_t;
+
+ typedef array<Index, ContractDims> contract_t;
+ typedef array<Index, LDims - ContractDims> left_nocontract_t;
+ typedef array<Index, RDims - ContractDims> right_nocontract_t;
+
+ static const int NumDims = LDims + RDims - 2 * ContractDims;
+
+ typedef DSizes<Index, NumDims> Dimensions;
+
+ // typedefs needed in evalTo
+ typedef typename internal::remove_const<typename EvalLeftArgType::Scalar>::type LhsScalar;
+ typedef typename internal::remove_const<typename EvalRightArgType::Scalar>::type RhsScalar;
+
+ typedef TensorEvaluator<EvalLeftArgType, Device> LeftEvaluator;
+ typedef TensorEvaluator<EvalRightArgType, Device> RightEvaluator;
+
+ typedef typename LeftEvaluator::Dimensions LeftDimensions;
+ typedef typename RightEvaluator::Dimensions RightDimensions;
+
+ EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device) :
+ Base(op, device) {}
+
+ // We need to redefine this method to make nvcc happy
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data) {
+ this->m_leftImpl.evalSubExprsIfNeeded(NULL);
+ this->m_rightImpl.evalSubExprsIfNeeded(NULL);
+ if (data) {
+ evalTo(data);
+ return false;
+ } else {
+ this->m_result = static_cast<Scalar *>(this->m_device.allocate(this->dimensions().TotalSize() * sizeof(Scalar)));
+ evalTo(this->m_result);
+ return true;
+ }
+ }
+
+ void evalTo(Scalar* buffer) const {
+ if (this->m_lhs_inner_dim_contiguous) {
+ if (this->m_rhs_inner_dim_contiguous) {
+ if (this->m_rhs_inner_dim_reordered) {
+ evalTyped<true, true, true, Unaligned>(buffer);
+ }
+ else {
+ evalTyped<true, true, false, Unaligned>(buffer);
+ }
+ }
+ else {
+ if (this->m_rhs_inner_dim_reordered) {
+ evalTyped<true, false, true, Unaligned>(buffer);
+ }
+ else {
+ evalTyped<true, false, false, Unaligned>(buffer);
+ }
+ }
+ }
+ else {
+ if (this->m_rhs_inner_dim_contiguous) {
+ if (this->m_rhs_inner_dim_reordered) {
+ evalTyped<false, true, true, Unaligned>(buffer);
+ }
+ else {
+ evalTyped<false, true, false, Unaligned>(buffer);
+ }
+ }
+ else {
+ if (this->m_rhs_inner_dim_reordered) {
+ evalTyped<false, false, true, Unaligned>(buffer);
+ }
+ else {
+ evalTyped<false, false, false, Unaligned>(buffer);
+ }
+ }
+ }
+ }
+
+ template <typename LhsScalar, typename RhsScalar, typename Index, typename LhsMapper, typename RhsMapper, typename OutputMapper> struct LaunchKernels {
+ static void Run(const LhsMapper& lhs, const RhsMapper& rhs, const OutputMapper& output, Index m, Index n, Index k, const GpuDevice& device) {
+ const Index m_blocks = (m + 63) / 64;
+ const Index n_blocks = (n + 63) / 64;
+ const dim3 num_blocks(m_blocks, n_blocks, 1);
+ const dim3 block_size(8, 8, 8);
+ LAUNCH_GPU_KERNEL((EigenContractionKernel<Scalar, Index, LhsMapper, RhsMapper, OutputMapper>), num_blocks, block_size, 0, device, lhs, rhs, output, m, n, k);
+ }
+ };
+
+ template <typename Index, typename LhsMapper, typename RhsMapper, typename OutputMapper> struct LaunchKernels<float, float, Index, LhsMapper, RhsMapper, OutputMapper> {
+ static void Run(const LhsMapper& lhs, const RhsMapper& rhs, const OutputMapper& output, Index m, Index n, Index k, const GpuDevice& device) {
+ if (m < 768 || n < 768) {
+ const Index m_blocks = (m + 63) / 64;
+ const Index n_blocks = (n + 63) / 64;
+ const dim3 num_blocks(m_blocks, n_blocks, 1);
+ const dim3 block_size(16, 16, 1);
+ LAUNCH_GPU_KERNEL((EigenFloatContractionKernel16x16<Index, LhsMapper, RhsMapper, OutputMapper>), num_blocks, block_size, 0, device, lhs, rhs, output, m, n, k);
+ } else {
+ const Index m_blocks = (m + 127) / 128;
+ const Index n_blocks = (n + 63) / 64;
+ const dim3 num_blocks(m_blocks, n_blocks, 1);
+ const dim3 block_size(8, 32, 1);
+ LAUNCH_GPU_KERNEL((EigenFloatContractionKernel<Index, LhsMapper, RhsMapper, OutputMapper>), num_blocks, block_size, 0, device, lhs, rhs, output, m, n, k);
+ }
+ }
+ };
+
+ template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment>
+ void evalTyped(Scalar* buffer) const {
+ // columns in left side, rows in right side
+ const Index k = this->m_k_size;
+ EIGEN_UNUSED_VARIABLE(k)
+
+ // rows in left side
+ const Index m = this->m_i_size;
+
+ // columns in right side
+ const Index n = this->m_j_size;
+
+ // zero out the result buffer (which must be of size at least m * n * sizeof(Scalar)
+ this->m_device.memset(buffer, 0, m * n * sizeof(Scalar));
+
+ typedef internal::TensorContractionInputMapper<LhsScalar, Index, internal::Lhs,
+ LeftEvaluator, left_nocontract_t,
+ contract_t, 4,
+ lhs_inner_dim_contiguous,
+ false, Unaligned> LhsMapper;
+
+ typedef internal::TensorContractionInputMapper<RhsScalar, Index, internal::Rhs,
+ RightEvaluator, right_nocontract_t,
+ contract_t, 4,
+ rhs_inner_dim_contiguous,
+ rhs_inner_dim_reordered, Unaligned> RhsMapper;
+
+ typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper;
+
+
+ // initialize data mappers
+ LhsMapper lhs(this->m_leftImpl, this->m_left_nocontract_strides, this->m_i_strides,
+ this->m_left_contracting_strides, this->m_k_strides);
+
+ RhsMapper rhs(this->m_rightImpl, this->m_right_nocontract_strides, this->m_j_strides,
+ this->m_right_contracting_strides, this->m_k_strides);
+
+ OutputMapper output(buffer, m);
+
+#if defined(EIGEN_USE_HIP)
+ setGpuSharedMemConfig(hipSharedMemBankSizeEightByte);
+#else
+ setGpuSharedMemConfig(cudaSharedMemBankSizeEightByte);
+#endif
+
+ LaunchKernels<LhsScalar, RhsScalar, Index, LhsMapper, RhsMapper, OutputMapper>::Run(lhs, rhs, output, m, n, k, this->m_device);
+ }
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_USE_GPU and EIGEN_GPUCC
+#endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_GPU_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h
index ab320a50d..dbb0f76bb 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h
@@ -238,9 +238,6 @@ class BaseTensorContractionMapper : public SimpleTensorContractionMapper<Scalar,
const contract_t& k_strides) :
ParentMapper(tensor, nocontract_strides, ij_strides, contract_strides, k_strides) { }
- typedef typename Tensor::PacketReturnType Packet;
- typedef typename unpacket_traits<Packet>::half HalfPacket;
-
template <typename PacketT,int AlignmentType>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE PacketT load(Index i, Index j) const {
@@ -284,27 +281,10 @@ class BaseTensorContractionMapper : public SimpleTensorContractionMapper<Scalar,
return pload<PacketT>(data);
}
- template <int AlignmentType>
- EIGEN_DEVICE_FUNC
- EIGEN_STRONG_INLINE Packet loadPacket(Index i, Index j) const {
- return this->load<Packet,AlignmentType>(i,j);
- }
-
- template <int AlignmentType>
+ template <typename PacketT,int AlignmentType>
EIGEN_DEVICE_FUNC
- EIGEN_STRONG_INLINE HalfPacket loadHalfPacket(Index i, Index j) const {
- // whole method makes column major assumption
-
- // don't need to add offsets for now (because operator handles that)
- const Index half_packet_size = unpacket_traits<HalfPacket>::size;
- if (half_packet_size == packet_size) {
- return loadPacket<AlignmentType>(i, j);
- }
- EIGEN_ALIGN_MAX Scalar data[half_packet_size];
- for (Index k = 0; k < half_packet_size; k++) {
- data[k] = operator()(i + k, j);
- }
- return pload<HalfPacket>(data);
+ EIGEN_STRONG_INLINE PacketT loadPacket(Index i, Index j) const {
+ return this->load<PacketT,AlignmentType>(i,j);
}
};
@@ -314,7 +294,8 @@ template<typename Scalar, typename Index, int side,
typename nocontract_t, typename contract_t,
bool inner_dim_contiguous,
bool inner_dim_reordered, int Alignment, template <class> class MakePointer_>
-class BaseTensorContractionMapper<Scalar, Index, side, Tensor, nocontract_t, contract_t, 1, inner_dim_contiguous, inner_dim_reordered, Alignment, MakePointer_> : public SimpleTensorContractionMapper<Scalar, Index, side, Tensor, nocontract_t, contract_t, 1, inner_dim_contiguous, Alignment, MakePointer_>
+class BaseTensorContractionMapper<Scalar, Index, side, Tensor, nocontract_t, contract_t, 1, inner_dim_contiguous, inner_dim_reordered, Alignment, MakePointer_>
+ : public SimpleTensorContractionMapper<Scalar, Index, side, Tensor, nocontract_t, contract_t, 1, inner_dim_contiguous, Alignment, MakePointer_>
{
public:
typedef SimpleTensorContractionMapper<Scalar, Index, side, Tensor, nocontract_t, contract_t, 1, inner_dim_contiguous, Alignment, MakePointer_> ParentMapper;
@@ -327,12 +308,11 @@ class BaseTensorContractionMapper<Scalar, Index, side, Tensor, nocontract_t, con
const contract_t& k_strides) :
ParentMapper(tensor, nocontract_strides, ij_strides, contract_strides, k_strides) { }
- typedef typename Tensor::PacketReturnType Packet;
- template <int> EIGEN_DEVICE_FUNC
- EIGEN_STRONG_INLINE Packet loadPacket(Index i, Index j) const {
+ template <typename PacketT,int> EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE PacketT loadPacket(Index i, Index j) const {
EIGEN_ALIGN_MAX Scalar data[1];
data[0] = this->m_tensor.coeff(this->computeIndex(i, j));
- return pload<typename Tensor::PacketReturnType>(data);
+ return pload<PacketT>(data);
}
template <typename PacketT,int> EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE PacketT load(Index i, Index j) const {
@@ -340,10 +320,6 @@ class BaseTensorContractionMapper<Scalar, Index, side, Tensor, nocontract_t, con
data[0] = this->m_tensor.coeff(this->computeIndex(i, j));
return pload<PacketT>(data);
}
- template <int> EIGEN_DEVICE_FUNC
- EIGEN_STRONG_INLINE Packet loadHalfPacket(Index i, Index j) const {
- return loadPacket(i, j);
- }
};
@@ -354,8 +330,6 @@ template<typename Scalar, typename Index, int side,
bool inner_dim_contiguous, bool inner_dim_reordered, int Alignment, template <class> class MakePointer_=MakePointer>
class TensorContractionSubMapper {
public:
- typedef typename Tensor::PacketReturnType Packet;
- typedef typename unpacket_traits<Packet>::half HalfPacket;
typedef BaseTensorContractionMapper<Scalar, Index, side, Tensor, nocontract_t, contract_t, packet_size, inner_dim_contiguous, inner_dim_reordered, Alignment, MakePointer_> ParentMapper;
typedef TensorContractionSubMapper<Scalar, Index, side, Tensor, nocontract_t, contract_t, packet_size, inner_dim_contiguous, inner_dim_reordered, Alignment, MakePointer_> Self;
@@ -390,17 +364,20 @@ class TensorContractionSubMapper {
return m_base_mapper(i + m_vert_offset, j + m_horiz_offset);
}
- EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet loadPacket(Index i) const {
+ template <typename PacketT>
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketT loadPacket(Index i) const {
if (UseDirectOffsets) {
- return m_base_mapper.template loadPacket<Alignment>(i, 0);
+ return m_base_mapper.template loadPacket<PacketT,Alignment>(i, 0);
}
- return m_base_mapper.template loadPacket<Alignment>(i + m_vert_offset, m_horiz_offset);
+ return m_base_mapper.template loadPacket<PacketT,Alignment>(i + m_vert_offset, m_horiz_offset);
}
- EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet loadPacket(Index i, Index j) const {
+
+ template <typename PacketT>
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketT loadPacket(Index i, Index j) const {
if (UseDirectOffsets) {
- return m_base_mapper.template loadPacket<Alignment>(i, j);
+ return m_base_mapper.template loadPacket<PacketT,Alignment>(i, j);
}
- return m_base_mapper.template loadPacket<Alignment>(i + m_vert_offset, j + m_horiz_offset);
+ return m_base_mapper.template loadPacket<PacketT,Alignment>(i + m_vert_offset, j + m_horiz_offset);
}
template <typename PacketT, int AlignmentType>
@@ -411,14 +388,8 @@ class TensorContractionSubMapper {
return m_base_mapper.template loadPacket<PacketT,AlignmentType>(i + m_vert_offset, j + m_horiz_offset);
}
- EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE HalfPacket loadHalfPacket(Index i) const {
- if (UseDirectOffsets) {
- return m_base_mapper.template loadHalfPacket<Alignment>(i, 0);
- }
- return m_base_mapper.template loadHalfPacket<Alignment>(i + m_vert_offset, m_horiz_offset);
- }
-
- EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void storePacket(Index i, const Packet& p) const {
+ template <typename PacketT>
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void storePacket(Index i, const PacketT& p) const {
if (UseDirectOffsets) {
m_base_mapper.storePacket(i, 0, p);
}
@@ -434,15 +405,15 @@ class TensorContractionSubMapper {
template <typename PacketT, int AlignmentType>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketT load(Index i) const {
- EIGEN_STATIC_ASSERT((internal::is_same<PacketT, Packet>::value), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((internal::is_same<PacketT, PacketT>::value), YOU_MADE_A_PROGRAMMING_MISTAKE);
const int ActualAlignment = (AlignmentType == Aligned) && (Alignment == Aligned) ? Aligned : Unaligned;
if (UseDirectOffsets) {
- return m_base_mapper.template loadPacket<ActualAlignment>(i, 0);
+ return m_base_mapper.template loadPacket<PacketT,ActualAlignment>(i, 0);
}
- return m_base_mapper.template loadPacket<ActualAlignment>(i + m_vert_offset, m_horiz_offset);
+ return m_base_mapper.template loadPacket<PacketT,ActualAlignment>(i + m_vert_offset, m_horiz_offset);
}
- template <typename Packet>
+ template <typename PacketT>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool aligned(Index) const {
return false;
}
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionSycl.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionSycl.h
index e87de0c57..35f931c53 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionSycl.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionSycl.h
@@ -11,7 +11,7 @@
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
/*****************************************************************
- * TensorSyclConvertToDeviceExpression.h
+ * TensorTensorContractionsycl.h
*
* \brief:
* TensorContractionsycl
@@ -23,15 +23,18 @@
namespace Eigen {
template <typename Index, typename LhsScalar, typename RhsScalar,bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered> struct LaunchSyclKernels;
-template<typename Indices, typename LeftArgType, typename RightArgType>
-struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType>, const Eigen::SyclDevice> :
- public TensorContractionEvaluatorBase<TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType>, const Eigen::SyclDevice> > {
+template<typename Indices, typename LeftArgType, typename RightArgType, typename OutputKernelType>
+struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType>, const Eigen::SyclDevice> :
+ public TensorContractionEvaluatorBase<TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType>, const Eigen::SyclDevice> > {
+
+ static_assert(std::is_same<OutputKernelType, const NoOpOutputKernel>::value,
+ "SYCL tensor contraction does not support output kernels.");
typedef const Eigen::SyclDevice Device;
- typedef TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType>, Device> Self;
+ typedef TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType>, Device> Self;
typedef TensorContractionEvaluatorBase<Self> Base;
- typedef TensorContractionOp<Indices, LeftArgType, RightArgType> XprType;
+ typedef TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType> XprType;
typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar;
typedef typename XprType::Index Index;
typedef typename XprType::CoeffReturnType CoeffReturnType;
@@ -84,7 +87,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data) {
this->m_leftImpl.evalSubExprsIfNeeded(NULL);
this->m_rightImpl.evalSubExprsIfNeeded(NULL);
- if (data) {
+ if (data) {
evalTo(data);
return false;
} else {
@@ -173,6 +176,7 @@ typename HostExpr::Index LocalThreadSizeM, typename HostExpr::Index LocalThreadS
LhsLocalAcc localLhs;
RhsLocalAcc localRhs;
OutAccessor out_res;
+ size_t out_offset;
Index roundUpK, M, N, K;
ContractT m_k_strides, m_left_contracting_strides, m_right_contracting_strides;
LeftNocontractT m_i_strides, m_left_nocontract_strides;
@@ -182,18 +186,19 @@ typename HostExpr::Index LocalThreadSizeM, typename HostExpr::Index LocalThreadS
Device dev;
- KernelConstructor(LHSFunctorExpr lhs_functors_, RHSFunctorExpr rhs_functors_, LhsLocalAcc localLhs_, RhsLocalAcc localRhs_, OutAccessor out_res_,
+ KernelConstructor(LHSFunctorExpr lhs_functors_, RHSFunctorExpr rhs_functors_, LhsLocalAcc localLhs_, RhsLocalAcc localRhs_, OutAccessor out_res_, size_t out_offset_,
Index roundUpK_, Index M_, Index N_, Index K_, ContractT m_k_strides_, ContractT m_left_contracting_strides_,
ContractT m_right_contracting_strides_, LeftNocontractT m_i_strides_, RightNocontractT m_j_strides_,
LeftNocontractT m_left_nocontract_strides_, RightNocontractT m_right_nocontract_strides_, LHSTupleType left_tuple_of_accessors_, RHSTupleType right_tuple_of_accessors_, Device dev_)
- :lhs_functors(lhs_functors_), rhs_functors(rhs_functors_), localLhs(localLhs_), localRhs(localRhs_), out_res(out_res_), roundUpK(roundUpK_), M(M_), N(N_), K(K_),
+ :lhs_functors(lhs_functors_), rhs_functors(rhs_functors_), localLhs(localLhs_), localRhs(localRhs_), out_res(out_res_),
+ out_offset(out_offset_), roundUpK(roundUpK_), M(M_), N(N_), K(K_),
m_k_strides(m_k_strides_), m_left_contracting_strides(m_left_contracting_strides_),
m_right_contracting_strides(m_right_contracting_strides_),
m_i_strides(m_i_strides_), m_left_nocontract_strides(m_left_nocontract_strides_),
m_j_strides(m_j_strides_), m_right_nocontract_strides(m_right_nocontract_strides_),
left_tuple_of_accessors(left_tuple_of_accessors_), right_tuple_of_accessors(right_tuple_of_accessors_), dev(dev_){}
- void operator()(cl::sycl::nd_item<1> itemID) {
+ void operator()(cl::sycl::nd_item<2> itemID) {
typedef typename Eigen::TensorSycl::internal::ConvertToDeviceExpression<HostExpr>::Type DevExpr;
typedef typename Eigen::TensorSycl::internal::ConvertToDeviceExpression<LHSHostExpr>::Type LHSDevExpr;
typedef typename Eigen::TensorSycl::internal::ConvertToDeviceExpression<RHSHostExpr>::Type RHSDevExpr;
@@ -230,13 +235,13 @@ typename HostExpr::Index LocalThreadSizeM, typename HostExpr::Index LocalThreadS
const Index nGroupId = itemID.get_group(1); // Work-group ID localCol
const Index linearLocalThreadId = nLocalThreadId*LocalThreadSizeM + mLocalThreadId; // linear local thread ID
// Allocate register space
- float privateLhs;
- float privateRhs[WorkLoadPerThreadN];
- float privateRes[WorkLoadPerThreadM][WorkLoadPerThreadN];
+ LhsScalar privateLhs;
+ RhsScalar privateRhs[WorkLoadPerThreadN];
+ OutScalar privateRes[WorkLoadPerThreadM][WorkLoadPerThreadN];
// Initialise the privateResumulation registers
for (Index wLPTM=0; wLPTM<WorkLoadPerThreadM; wLPTM++) {
for (Index wLPTN=0; wLPTN<WorkLoadPerThreadN; wLPTN++) {
- privateRes[wLPTM][wLPTN] = 0.0f;
+ privateRes[wLPTM][wLPTN] = static_cast<OutScalar>(0);
}
}
@@ -316,7 +321,7 @@ typename HostExpr::Index LocalThreadSizeM, typename HostExpr::Index LocalThreadS
for (Index wLPTN=0; wLPTN<WorkLoadPerThreadN; wLPTN++) {
Index globalCol = nGroupId*TileSizeDimN + nLocalThreadId + wLPTN*LocalThreadSizeN;
if(globalCol<N)
- out_ptr[globalCol*M + globalRow] = privateRes[wLPTM][wLPTN];
+ out_ptr[globalCol*M + globalRow +ConvertToActualSyclOffset(OutScalar, out_offset)] = privateRes[wLPTM][wLPTN];
}
}
}
@@ -356,12 +361,12 @@ template< typename Self, typename OutScalar, typename ContractT, typename LeftNo
// extract lhs functor list
LHSFunctorExpr lhs_functors = Eigen::TensorSycl::internal::extractFunctors(self.left_impl());
// extract rhs functor list
- RHSFunctorExpr rhs_functors = Eigen::TensorSycl::internal::extractFunctors(self.left_impl());
+ RHSFunctorExpr rhs_functors = Eigen::TensorSycl::internal::extractFunctors(self.right_impl());
Index roundUpK = RoundUp(K, TileSizeDimK);
Index roundUpM = RoundUp(M, TileSizeDimM);
Index roundUpN = RoundUp(N, TileSizeDimN);
-
+ ptrdiff_t out_offset = self.device().get_offset(buffer);
self.device().sycl_queue().submit([&](cl::sycl::handler &cgh) {
/// work-around for gcc bug
typedef decltype(Eigen::TensorSycl::internal::createTupleOfAccessors<OrigLHSExpr>(cgh, self.left_impl())) LHSTupleType;
@@ -379,18 +384,17 @@ template< typename Self, typename OutScalar, typename ContractT, typename LeftNo
typedef cl::sycl::accessor<RhsScalar, 1, cl::sycl::access::mode::read_write, cl::sycl::access::target::local> RhsLocalAcc;
RhsLocalAcc localRhs(cl::sycl::range<1>(2* TileSizeDimK * TileSizeDimN), cgh);
- typedef cl::sycl::accessor<uint8_t, 1, cl::sycl::access::mode::write, cl::sycl::access::target::global_buffer> OutAccessor;
+ typedef cl::sycl::accessor<uint8_t, 1, cl::sycl::access::mode::read_write, cl::sycl::access::target::global_buffer> OutAccessor;
//OutScalar memory
- OutAccessor out_res= self.device(). template get_sycl_accessor<cl::sycl::access::mode::write>(cgh, buffer);
-
+ OutAccessor out_res= self.device(). template get_sycl_accessor<cl::sycl::access::mode::read_write>(cgh, buffer);
// sycl parallel for
cgh.parallel_for(cl::sycl::nd_range<2>(cl::sycl::range<2>(roundUpM/WorkLoadPerThreadM, roundUpN/WorkLoadPerThreadN),
cl::sycl::range<2>(LocalThreadSizeM, LocalThreadSizeN)),
KernelConstructor<HostExpr, OutScalar, LhsScalar, RhsScalar, LHSFunctorExpr, RHSFunctorExpr, LhsLocalAcc, RhsLocalAcc, OutAccessor, Index, ContractT, LeftNocontractT,
RightNocontractT, lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, TileSizeDimM, TileSizeDimN, TileSizeDimK,
- WorkLoadPerThreadM, WorkLoadPerThreadN, LocalThreadSizeM, LocalThreadSizeN, LoadPerThreadLhs, LoadPerThreadRhs, LHSTupleType, RHSTupleType, Eigen::DefaultDevice>(lhs_functors, rhs_functors,
- localLhs, localRhs, out_res, roundUpK, M, N, K, m_k_strides, m_left_contracting_strides, m_right_contracting_strides,m_i_strides, m_j_strides,
- m_left_nocontract_strides,m_right_nocontract_strides, left_tuple_of_accessors, right_tuple_of_accessors, Eigen::DefaultDevice()));
+ WorkLoadPerThreadM, WorkLoadPerThreadN, LocalThreadSizeM, LocalThreadSizeN, LoadPerThreadLhs, LoadPerThreadRhs, LHSTupleType, RHSTupleType, Eigen::SyclKernelDevice>(lhs_functors, rhs_functors,
+ localLhs, localRhs, out_res, out_offset, roundUpK, M, N, K, m_k_strides, m_left_contracting_strides, m_right_contracting_strides,m_i_strides, m_j_strides,
+ m_left_nocontract_strides,m_right_nocontract_strides, left_tuple_of_accessors, right_tuple_of_accessors, Eigen::SyclKernelDevice()));
});
self.device().asynchronousExec();
}
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index d30cc96ab..0980854b4 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -15,57 +15,16 @@
namespace Eigen {
-#ifdef EIGEN_USE_SIMPLE_THREAD_POOL
-namespace internal {
-
-template<typename LhsScalar, typename LhsMapper, typename Index>
-struct packLhsArg {
- LhsScalar* blockA;
- const LhsMapper& lhs;
- const Index m_start;
- const Index k_start;
- const Index mc;
- const Index kc;
-};
-
-template<typename LhsScalar, typename RhsScalar, typename RhsMapper, typename OutputMapper, typename Index>
-struct packRhsAndKernelArg {
- const MaxSizeVector<LhsScalar*>* blockAs;
- RhsScalar* blockB;
- const RhsMapper& rhs;
- OutputMapper& output;
- const Index m;
- const Index k;
- const Index n;
- const Index mc;
- const Index kc;
- const Index nc;
- const Index num_threads;
- const Index num_blockAs;
- const Index max_m;
- const Index k_block_idx;
- const Index m_block_idx;
- const Index n_block_idx;
- const Index m_blocks;
- const Index n_blocks;
- MaxSizeVector<Notification*>* kernel_notifications;
- const MaxSizeVector<Notification*>* lhs_notifications;
- const bool need_to_pack;
-};
-
-} // end namespace internal
-#endif // EIGEN_USE_SIMPLE_THREAD_POOL
-
-template<typename Indices, typename LeftArgType, typename RightArgType>
-struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType>, ThreadPoolDevice> :
- public TensorContractionEvaluatorBase<TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType>, ThreadPoolDevice> > {
+template<typename Indices, typename LeftArgType, typename RightArgType, typename OutputKernelType>
+struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType>, ThreadPoolDevice> :
+ public TensorContractionEvaluatorBase<TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType>, ThreadPoolDevice> > {
typedef ThreadPoolDevice Device;
- typedef TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType>, Device> Self;
+ typedef TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType>, Device> Self;
typedef TensorContractionEvaluatorBase<Self> Base;
- typedef TensorContractionOp<Indices, LeftArgType, RightArgType> XprType;
+ typedef TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType> XprType;
typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar;
typedef typename XprType::Index Index;
typedef typename XprType::CoeffReturnType CoeffReturnType;
@@ -112,9 +71,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
TensorEvaluator(const XprType& op, const Device& device) :
Base(op, device) {}
-#ifndef EIGEN_USE_SIMPLE_THREAD_POOL
- template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous,
- bool rhs_inner_dim_reordered, int Alignment>
+ template <int Alignment>
void evalProduct(Scalar* buffer) const {
const Index m = this->m_i_size;
const Index n = this->m_j_size;
@@ -138,39 +95,6 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
}
#endif
- typedef
- typename internal::remove_const<typename EvalLeftArgType::Scalar>::type
- LhsScalar;
- typedef
- typename internal::remove_const<typename EvalRightArgType::Scalar>::type
- RhsScalar;
- typedef typename internal::gebp_traits<LhsScalar, RhsScalar> Traits;
- typedef TensorEvaluator<EvalLeftArgType, Device> LeftEvaluator;
- typedef TensorEvaluator<EvalRightArgType, Device> RightEvaluator;
- typedef internal::TensorContractionInputMapper<
- LhsScalar, Index, internal::Lhs, LeftEvaluator, left_nocontract_t,
- contract_t, internal::packet_traits<LhsScalar>::size,
- lhs_inner_dim_contiguous, false, Unaligned>
- LhsMapper;
- typedef internal::TensorContractionInputMapper<
- RhsScalar, Index, internal::Rhs, RightEvaluator, right_nocontract_t,
- contract_t, internal::packet_traits<RhsScalar>::size,
- rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Unaligned>
- RhsMapper;
- typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper;
- typedef internal::gemm_pack_lhs<LhsScalar, Index,
- typename LhsMapper::SubMapper, Traits::mr,
- Traits::LhsProgress, ColMajor>
- LhsPacker;
- typedef internal::gemm_pack_rhs<
- RhsScalar, Index, typename RhsMapper::SubMapper, Traits::nr, ColMajor>
- RhsPacker;
- typedef internal::gebp_kernel<LhsScalar, RhsScalar, Index, OutputMapper,
- Traits::mr, Traits::nr, false, false>
- GebpKernel;
-
-
-
// Compute a set of algorithm parameters:
// - kernel block sizes (bm, bn, bk)
// - task grain sizes (number of kernels executed per task: gm, gn)
@@ -200,14 +124,14 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
// Again, we don't know number of threads yet, so we use 2.
Index bm, bn, bk;
if (shard_by_col) {
- internal::TensorContractionBlocking<LhsMapper, RhsMapper, Index,
+ internal::TensorContractionBlocking<LhsScalar, RhsScalar, Index,
internal::ShardByCol>
blocking(k, m, n, 2);
bm = blocking.mc();
bn = blocking.nc();
bk = blocking.kc();
} else {
- internal::TensorContractionBlocking<LhsMapper, RhsMapper, Index,
+ internal::TensorContractionBlocking<LhsScalar, RhsScalar, Index,
internal::ShardByRow>
blocking(k, m, n, 2);
bm = blocking.mc();
@@ -229,29 +153,22 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
if (n == 1) num_threads = 1;
if (num_threads == 1) {
- // The single-threaded algorithm should be faster in this case.
- if (n == 1)
- this->template evalGemv<lhs_inner_dim_contiguous,
- rhs_inner_dim_contiguous,
- rhs_inner_dim_reordered, Alignment>(buffer);
- else
- this->template evalGemm<lhs_inner_dim_contiguous,
- rhs_inner_dim_contiguous,
- rhs_inner_dim_reordered, Alignment>(buffer);
+ TENSOR_CONTRACTION_DISPATCH(this->template evalProductSequential,
+ Unaligned, (buffer));
return;
}
// Now that we know number of threads, recalculate sharding and blocking.
shard_by_col = shardByCol(m, n, num_threads);
if (shard_by_col) {
- internal::TensorContractionBlocking<LhsMapper, RhsMapper, Index,
+ internal::TensorContractionBlocking<LhsScalar, RhsScalar, Index,
internal::ShardByCol>
blocking(k, m, n, num_threads);
bm = blocking.mc();
bn = blocking.nc();
bk = blocking.kc();
} else {
- internal::TensorContractionBlocking<LhsMapper, RhsMapper, Index,
+ internal::TensorContractionBlocking<LhsScalar, RhsScalar, Index,
internal::ShardByRow>
blocking(k, m, n, num_threads);
bm = blocking.mc();
@@ -299,36 +216,59 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
// more important in this case.
if ((shard_by_col ? nm : nn) == 1) parallel_pack = false;
- LhsMapper lhs(this->m_leftImpl, this->m_left_nocontract_strides,
- this->m_i_strides, this->m_left_contracting_strides,
- this->m_k_strides);
+ #define CONTEXT_ARGS \
+ (this, num_threads, buffer, m, n, k, bm, bn, bk, nm, nn, nk, gm, gn, nm0, \
+ nn0, shard_by_col, parallel_pack) \
+ .run()
- RhsMapper rhs(this->m_rightImpl, this->m_right_nocontract_strides,
- this->m_j_strides, this->m_right_contracting_strides,
- this->m_k_strides);
+ TENSOR_CONTRACTION_DISPATCH(Context, Alignment, CONTEXT_ARGS);
+
+#undef CONTEXT_ARGS
- Context<LhsPacker, RhsPacker, GebpKernel, LhsMapper, RhsMapper,
- OutputMapper>(this->m_device, num_threads, lhs, rhs, buffer, m, n,
- k, bm, bn, bk, nm, nn, nk, gm, gn, nm0, nn0,
- shard_by_col, parallel_pack)
- .run();
}
// Context coordinates a single parallel gemm operation.
- template <typename LhsPacker, typename RhsPacker, typename GebpKernel,
- typename LhsMapper, typename RhsMapper, typename OutputMapper>
+ template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous,
+ bool rhs_inner_dim_reordered, int Alignment>
class Context {
public:
- Context(const Device& device, int num_threads, LhsMapper& lhs,
- RhsMapper& rhs, Scalar* buffer, Index tm, Index tn, Index tk, Index bm,
- Index bn, Index bk, Index nm, Index nn, Index nk, Index gm,
- Index gn, Index nm0, Index nn0, bool shard_by_col,
+ typedef internal::TensorContractionInputMapper<
+ LhsScalar, Index, internal::Lhs, LeftEvaluator, left_nocontract_t,
+ contract_t, internal::packet_traits<LhsScalar>::size,
+ lhs_inner_dim_contiguous, false, Unaligned>
+ LhsMapper;
+ typedef internal::TensorContractionInputMapper<
+ RhsScalar, Index, internal::Rhs, RightEvaluator, right_nocontract_t,
+ contract_t, internal::packet_traits<RhsScalar>::size,
+ rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Unaligned>
+ RhsMapper;
+ typedef internal::gemm_pack_lhs<LhsScalar, Index,
+ typename LhsMapper::SubMapper, Traits::mr,
+ Traits::LhsProgress, typename Traits::LhsPacket4Packing, ColMajor>
+ LhsPacker;
+ typedef internal::gemm_pack_rhs<
+ RhsScalar, Index, typename RhsMapper::SubMapper, Traits::nr, ColMajor>
+ RhsPacker;
+ typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper;
+ typedef internal::gebp_kernel<LhsScalar, RhsScalar, Index, OutputMapper,
+ Traits::mr, Traits::nr, false, false>
+ GebpKernel;
+
+ Context(const Self* self, int num_threads, Scalar* buffer, Index tm, Index tn,
+ Index tk, Index bm, Index bn, Index bk, Index nm, Index nn, Index nk,
+ Index gm, Index gn, Index nm0, Index nn0, bool shard_by_col,
bool parallel_pack)
- : device_(device),
- lhs_(lhs),
- rhs_(rhs),
+ : device_(self->m_device),
+ lhs_(self->m_leftImpl, self->m_left_nocontract_strides,
+ self->m_i_strides, self->m_left_contracting_strides,
+ self->m_k_strides),
+ rhs_(self->m_rightImpl, self->m_right_nocontract_strides,
+ self->m_j_strides, self->m_right_contracting_strides,
+ self->m_k_strides),
buffer_(buffer),
output_(buffer, tm),
+ output_kernel_(self->m_output_kernel),
+ tensor_contraction_params_(self->m_tensor_contraction_params),
num_threads_(num_threads),
shard_by_col_(shard_by_col),
parallel_pack_(parallel_pack),
@@ -350,7 +290,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
// Normal number of notifications for k slice switch is
// nm_ + nn_ + nm_ * nn_. However, first P - 1 slices will receive only
// nm_ + nn_ notifications, because they will not receive notifications
- // from preceeding kernels.
+ // from preceding kernels.
state_switch_[x] =
x == 0
? 1
@@ -377,7 +317,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
divup<size_t>(bm_ * bk_ * sizeof(LhsScalar), align) * align;
size_t rhs_size =
divup<size_t>(bn_ * bk_ * sizeof(RhsScalar), align) * align;
- packed_mem_ = static_cast<char*>(internal::aligned_malloc(
+ packed_mem_ = static_cast<char*>(device_.allocate(
(nm0_ * lhs_size + nn0_ * rhs_size) * std::min<size_t>(nk_, P - 1)));
char* mem = static_cast<char*>(packed_mem_);
for (Index x = 0; x < numext::mini<Index>(nk_, P - 1); x++) {
@@ -399,7 +339,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
for (Index m = 0; m < nm_; m++) delete[] state_kernel_[x][m];
delete[] state_kernel_[x];
}
- internal::aligned_free(packed_mem_);
+ device_.deallocate(packed_mem_);
}
void run() {
@@ -416,10 +356,12 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
private:
Notification done_;
const Device& device_;
- LhsMapper& lhs_;
- RhsMapper& rhs_;
+ LhsMapper lhs_;
+ RhsMapper rhs_;
Scalar* const buffer_;
OutputMapper output_;
+ OutputKernelType output_kernel_;
+ TensorContractionParams tensor_contraction_params_;
const int num_threads_;
const bool shard_by_col_;
const bool parallel_pack_;
@@ -530,25 +472,38 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
void kernel(Index m, Index n, Index k) {
// Note: order of iteration matters here. Iteration over m is innermost
- // because we want to reuse the same packed rhs in consequetive tasks
+ // because we want to reuse the same packed rhs in consecutive tasks
// (rhs fits into L2$ while lhs only into L3$).
const Index nend = n * gn_ + gn(n);
const Index mend = m * gm_ + gm(m);
if (shard_by_col_) {
for (Index n1 = n * gn_; n1 < nend; n1++) {
- for (Index m1 = m * gm_; m1 < mend; m1++)
- GebpKernel()(output_.getSubMapper(m1 * bm_, n1 * bn_),
- packed_lhs_[k % (P - 1)][m1],
+ for (Index m1 = m * gm_; m1 < mend; m1++) {
+ const auto output_mapper = output_.getSubMapper(m1 * bm_, n1 * bn_);
+ GebpKernel()(output_mapper, packed_lhs_[k % (P - 1)][m1],
packed_rhs_[k % (P - 1)][n1], bm(m1), bk(k), bn(n1),
Scalar(1), -1, -1, 0, 0);
+
+ // We are done with the last task for the [m1, n1] block.
+ if (k + 1 == nk_) {
+ output_kernel_(output_mapper, tensor_contraction_params_,
+ m1 * bm_, n1 * bn_, bm(m1), bn(n1));
+ }
+ }
}
} else {
for (Index m1 = m * gm_; m1 < mend; m1++)
for (Index n1 = n * gn_; n1 < nend; n1++) {
- GebpKernel()(output_.getSubMapper(m1 * bm_, n1 * bn_),
- packed_lhs_[k % (P - 1)][m1],
+ const auto output_mapper = output_.getSubMapper(m1 * bm_, n1 * bn_);
+ GebpKernel()(output_mapper, packed_lhs_[k % (P - 1)][m1],
packed_rhs_[k % (P - 1)][n1], bm(m1), bk(k), bn(n1),
Scalar(1), -1, -1, 0, 0);
+
+ // We are done with the last task for the [m1, n1] block.
+ if (k + 1 == nk_) {
+ output_kernel_(output_mapper, tensor_contraction_params_,
+ m1 * bm_, n1 * bn_, bm(m1), bn(n1));
+ }
}
}
signal_kernel(m, n, k + 1, false);
@@ -623,11 +578,13 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
else
pack_lhs(start, k);
} else {
- Index mid = (start + end) / 2;
- device_.enqueueNoNotification(
- [=]() { enqueue_packing_helper(mid, end, k, rhs); });
- device_.enqueueNoNotification(
- [=]() { enqueue_packing_helper(start, mid, k, rhs); });
+ while (end - start > 1) {
+ Index mid = (start + end) / 2;
+ device_.enqueueNoNotification(
+ [=]() { enqueue_packing_helper(mid, end, k, rhs); });
+ end = mid;
+ }
+ enqueue_packing_helper(start, end, k, rhs);
}
}
@@ -746,284 +703,6 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
return 0;
}
-#else // EIGEN_USE_SIMPLE_THREAD_POOL
-
- template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment>
- void evalProduct(Scalar* buffer) const {
- if (this->m_j_size == 1) {
- this->template evalGemv<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>(buffer);
- return;
- }
-
- evalGemm<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>(buffer);
- }
-
- template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment>
- void evalGemm(Scalar* buffer) const {
- // columns in left side, rows in right side
- const Index k = this->m_k_size;
-
- // rows in left side
- const Index m = this->m_i_size;
-
- // columns in right side
- const Index n = this->m_j_size;
-
- // zero out the result buffer (which must be of size at least m * n * sizeof(Scalar)
- this->m_device.memset(buffer, 0, m * n * sizeof(Scalar));
-
-
- const int lhs_packet_size = internal::unpacket_traits<typename LeftEvaluator::PacketReturnType>::size;
- const int rhs_packet_size = internal::unpacket_traits<typename RightEvaluator::PacketReturnType>::size;
-
- typedef internal::TensorContractionInputMapper<LhsScalar, Index, internal::Lhs,
- LeftEvaluator, left_nocontract_t,
- contract_t, lhs_packet_size,
- lhs_inner_dim_contiguous,
- false, Unaligned> LhsMapper;
-
- typedef internal::TensorContractionInputMapper<RhsScalar, Index, internal::Rhs,
- RightEvaluator, right_nocontract_t,
- contract_t, rhs_packet_size,
- rhs_inner_dim_contiguous,
- rhs_inner_dim_reordered, Unaligned> RhsMapper;
-
- typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper;
-
- // TODO: packing could be faster sometimes if we supported row major tensor mappers
- typedef internal::gemm_pack_lhs<LhsScalar, Index, typename LhsMapper::SubMapper, Traits::mr,
- Traits::LhsProgress, ColMajor> LhsPacker;
- typedef internal::gemm_pack_rhs<RhsScalar, Index, typename RhsMapper::SubMapper, Traits::nr, ColMajor> RhsPacker;
-
- // TODO: replace false, false with conjugate values?
- typedef internal::gebp_kernel<LhsScalar, RhsScalar, Index, OutputMapper,
- Traits::mr, Traits::nr, false, false> GebpKernel;
-
- typedef internal::packLhsArg<LhsScalar, LhsMapper, Index> packLArg;
- typedef internal::packRhsAndKernelArg<LhsScalar, RhsScalar, RhsMapper, OutputMapper, Index> packRKArg;
-
- // initialize data mappers
- LhsMapper lhs(this->m_leftImpl, this->m_left_nocontract_strides, this->m_i_strides,
- this->m_left_contracting_strides, this->m_k_strides);
-
- RhsMapper rhs(this->m_rightImpl, this->m_right_nocontract_strides, this->m_j_strides,
- this->m_right_contracting_strides, this->m_k_strides);
-
- OutputMapper output(buffer, m);
-
- // compute block sizes (which depend on number of threads)
- const Index num_threads = this->m_device.numThreads();
- internal::TensorContractionBlocking<LhsMapper, RhsMapper, Index, internal::ShardByCol> blocking(k, m, n, num_threads);
- Index mc = blocking.mc();
- Index nc = blocking.nc();
- Index kc = blocking.kc();
- eigen_assert(mc <= m);
- eigen_assert(nc <= n);
- eigen_assert(kc <= k);
-
-#define CEIL_DIV(a, b) (((a) + (b) - 1) / (b))
- const Index k_blocks = CEIL_DIV(k, kc);
- const Index n_blocks = CEIL_DIV(n, nc);
- const Index m_blocks = CEIL_DIV(m, mc);
- const Index sizeA = mc * kc;
- const Index sizeB = kc * nc;
-
- /* cout << "m: " << m << " n: " << n << " k: " << k << endl;
- cout << "mc: " << mc << " nc: " << nc << " kc: " << kc << endl;
- cout << "m_blocks: " << m_blocks << " n_blocks: " << n_blocks << " k_blocks: " << k_blocks << endl;
- cout << "num threads: " << num_threads << endl;
- */
-
- // note: m_device.allocate should return 16 byte aligned pointers, but if blockA and blockB
- // aren't 16 byte aligned segfaults will happen due to SIMD instructions
- // note: You can get away with allocating just a single blockA and offsets and meet the
- // the alignment requirements with the assumption that
- // (Traits::mr * sizeof(ResScalar)) % 16 == 0
- const Index numBlockAs = numext::mini(num_threads, m_blocks);
- MaxSizeVector<LhsScalar *> blockAs(num_threads);
- for (int i = 0; i < num_threads; i++) {
- blockAs.push_back(static_cast<LhsScalar *>(this->m_device.allocate(sizeA * sizeof(LhsScalar))));
- }
-
- // To circumvent alignment issues, I'm just going to separately allocate the memory for each thread
- // TODO: is this too much memory to allocate? This simplifies coding a lot, but is wasteful.
- // Other options: (1) reuse memory when a thread finishes. con: tricky
- // (2) allocate block B memory in each thread. con: overhead
- MaxSizeVector<RhsScalar *> blockBs(n_blocks);
- for (int i = 0; i < n_blocks; i++) {
- blockBs.push_back(static_cast<RhsScalar *>(this->m_device.allocate(sizeB * sizeof(RhsScalar))));
- }
-
- // lhs_notifications starts with all null Notifications
- MaxSizeVector<Notification*> lhs_notifications(num_threads, nullptr);
-
- // this should really be numBlockAs * n_blocks;
- const Index num_kernel_notifications = num_threads * n_blocks;
- MaxSizeVector<Notification*> kernel_notifications(num_kernel_notifications,
- nullptr);
-
- for (Index k_block_idx = 0; k_block_idx < k_blocks; k_block_idx++) {
- const Index k_start = k_block_idx * kc;
- // make sure we don't overshoot right edge of left matrix
- const Index actual_kc = numext::mini(k_start + kc, k) - k_start;
-
- for (Index m_block_idx = 0; m_block_idx < m_blocks; m_block_idx += numBlockAs) {
- const Index num_blocks = numext::mini(m_blocks-m_block_idx, numBlockAs);
-
- for (Index mt_block_idx = m_block_idx; mt_block_idx < m_block_idx+num_blocks; mt_block_idx++) {
- const Index m_start = mt_block_idx * mc;
- const Index actual_mc = numext::mini(m_start + mc, m) - m_start;
- eigen_assert(actual_mc > 0);
-
- Index blockAId = (k_block_idx * m_blocks + mt_block_idx) % num_threads;
-
- for (int i = 0; i < n_blocks; ++i) {
- Index notification_id = (blockAId * n_blocks + i);
- // Wait for any current kernels using this slot to complete
- // before using it.
- if (kernel_notifications[notification_id]) {
- wait_until_ready(kernel_notifications[notification_id]);
- delete kernel_notifications[notification_id];
- }
- kernel_notifications[notification_id] = new Notification();
- }
- const packLArg arg = {
- blockAs[blockAId], // blockA
- lhs, // lhs
- m_start, // m
- k_start, // k
- actual_mc, // mc
- actual_kc, // kc
- };
-
- // Delete any existing notification since we may be
- // replacing it. The algorithm should ensure that there are
- // no existing waiters on this notification.
- delete lhs_notifications[blockAId];
- lhs_notifications[blockAId] =
- this->m_device.enqueue(&Self::packLhs<packLArg, LhsPacker>, arg);
- }
-
- // now start kernels.
- const Index m_base_start = m_block_idx * mc;
- const bool need_to_pack = m_block_idx == 0;
-
- for (Index n_block_idx = 0; n_block_idx < n_blocks; n_block_idx++) {
- const Index n_start = n_block_idx * nc;
- const Index actual_nc = numext::mini(n_start + nc, n) - n_start;
-
- // first make sure the previous kernels are all done before overwriting rhs. Also wait if
- // we're going to start new k. In both cases need_to_pack is true.
- if (need_to_pack) {
- for (Index i = num_blocks; i < num_threads; ++i) {
- Index blockAId = (k_block_idx * m_blocks + i + m_block_idx) % num_threads;
- Index future_id = (blockAId * n_blocks + n_block_idx);
- wait_until_ready(kernel_notifications[future_id]);
- }
- }
-
- packRKArg arg = {
- &blockAs, // blockA
- blockBs[n_block_idx], // blockB
- rhs, // rhs
- output, // output
- m_base_start, // m
- k_start, // k
- n_start, // n
- mc, // mc
- actual_kc, // kc
- actual_nc, // nc
- num_threads,
- numBlockAs,
- m,
- k_block_idx,
- m_block_idx,
- n_block_idx, // n_block_idx
- m_blocks, // m_blocks
- n_blocks, // n_blocks
- &kernel_notifications, // kernel notifications
- &lhs_notifications, // lhs notifications
- need_to_pack, // need_to_pack
- };
-
- // We asynchronously kick off this function, which ends up
- // notifying the appropriate kernel_notifications objects,
- // which this thread waits on before exiting.
- this->m_device.enqueueNoNotification(&Self::packRhsAndKernel<packRKArg, RhsPacker, GebpKernel>, arg);
- }
- }
- }
-
- // Make sure all the kernels are done.
- for (size_t i = 0; i < kernel_notifications.size(); ++i) {
- wait_until_ready(kernel_notifications[i]);
- delete kernel_notifications[i];
- }
-
- // No need to wait for lhs notifications since they should have
- // already been waited on. Just clean them up.
- for (size_t i = 0; i < lhs_notifications.size(); ++i) {
- delete lhs_notifications[i];
- }
-
- // deallocate all of the memory for both A and B's
- for (size_t i = 0; i < blockAs.size(); i++) {
- this->m_device.deallocate(blockAs[i]);
- }
- for (size_t i = 0; i < blockBs.size(); i++) {
- this->m_device.deallocate(blockBs[i]);
- }
-
-#undef CEIL_DIV
- }
-
- /*
- * Packs a LHS block of size (mt, kc) starting at lhs(m, k). Before packing
- * the LHS block, check that all of the kernels that worked on the same
- * mt_block_idx in the previous m_block are done.
- */
- template <typename packLArg, typename LhsPacker>
- static void packLhs(const packLArg arg) {
- // perform actual packing
- LhsPacker pack_lhs;
- pack_lhs(arg.blockA, arg.lhs.getSubMapper(arg.m_start, arg.k_start), arg.kc, arg.mc);
- }
-
- /*
- * Packs a RHS block of size (kc, nc) starting at (k, n) after checking that
- * all kernels in the previous block are done.
- * Then for each LHS future, we wait on the future and then call GEBP
- * on the area packed by the future (which starts at
- * blockA + future_idx * mt * kc) on the LHS and with the full packed
- * RHS block.
- * The output of this GEBP is written to output(m + i * mt, n).
- */
- template <typename packRKArg, typename RhsPacker, typename GebpKernel>
- static void packRhsAndKernel(packRKArg arg) {
- if (arg.need_to_pack) {
- RhsPacker pack_rhs;
- pack_rhs(arg.blockB, arg.rhs.getSubMapper(arg.k, arg.n), arg.kc, arg.nc);
- }
-
- GebpKernel gebp;
- for (Index mt_block_idx = 0; mt_block_idx < arg.num_blockAs; mt_block_idx++) {
- const Index m_base_start = arg.m + arg.mc*mt_block_idx;
- if (m_base_start < arg.max_m) {
- Index blockAId = (arg.k_block_idx * arg.m_blocks + mt_block_idx + arg.m_block_idx) % arg.num_threads;
- wait_until_ready((*arg.lhs_notifications)[blockAId]);
- const Index actual_mc = numext::mini(m_base_start + arg.mc, arg.max_m) - m_base_start;
- gebp(arg.output.getSubMapper(m_base_start, arg.n),
- (*arg.blockAs)[blockAId], arg.blockB,
- actual_mc, arg.kc, arg.nc, Scalar(1), -1, -1, 0, 0);
-
- // Notify that the kernel is done.
- const Index set_idx = blockAId * arg.n_blocks + arg.n_block_idx;
- (*arg.kernel_notifications)[set_idx]->Notify();
- }
- }
- }
-#endif // EIGEN_USE_SIMPLE_THREAD_POOL
-
TensorOpCost contractionCost(Index m, Index n, Index bm, Index bn, Index bk,
bool shard_by_col, bool prepacked) const {
const int packed_size = std::min<int>(PacketType<LhsScalar, Device>::size,
@@ -1065,6 +744,10 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
}
#if defined(EIGEN_VECTORIZE_AVX) && defined(EIGEN_USE_LIBXSMM)
+ // TODO(ezhulenev): Add support for output kernels and LIBXSMM.
+ static_assert(std::is_same<OutputKernelType, const NoOpOutputKernel>::value,
+ "XSMM does not support contraction output kernels.");
+
template<int Alignment>
class ContextXsmm {
public:
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorConversion.h b/unsupported/Eigen/CXX11/src/Tensor/TensorConversion.h
index b29968b63..1f613d3c7 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorConversion.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorConversion.h
@@ -32,6 +32,7 @@ struct traits<TensorConversionOp<TargetType, XprType> >
static const int NumDimensions = traits<XprType>::NumDimensions;
static const int Layout = traits<XprType>::Layout;
enum { Flags = 0 };
+ typedef typename TypeConversion<Scalar, typename traits<XprType>::PointerType>::type PointerType;
};
template<typename TargetType, typename XprType>
@@ -189,11 +190,13 @@ struct TensorEvaluator<const TensorConversionOp<TargetType, ArgType>, Device>
typedef typename internal::remove_all<typename internal::traits<ArgType>::Scalar>::type SrcType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
typedef typename PacketType<SrcType, Device>::type PacketSourceType;
- static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+ static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
enum {
IsAligned = false,
PacketAccess = true,
+ BlockAccess = false,
+ PreferBlockAccess = false,
Layout = TensorEvaluator<ArgType, Device>::Layout,
RawAccess = false
};
@@ -244,7 +247,7 @@ struct TensorEvaluator<const TensorConversionOp<TargetType, ArgType>, Device>
}
}
- EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
+ EIGEN_DEVICE_FUNC typename Eigen::internal::traits<XprType>::PointerType data() const { return NULL; }
/// required by sycl in order to extract the sycl accessor
const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h b/unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h
index 378f5cccb..2d0e6599f 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h
@@ -54,8 +54,8 @@ class IndexMapper {
}
}
- array<Index, NumDims> cudaInputDimensions;
- array<Index, NumDims> cudaOutputDimensions;
+ array<Index, NumDims> gpuInputDimensions;
+ array<Index, NumDims> gpuOutputDimensions;
array<Index, NumDims> tmp = dimensions;
array<Index, NumDims> ordering;
const size_t offset = static_cast<int>(Layout) == static_cast<int>(ColMajor)
@@ -65,8 +65,8 @@ class IndexMapper {
const Index index = i + offset;
ordering[index] = indices[i];
tmp[indices[i]] = -1;
- cudaInputDimensions[index] = input_dims[indices[i]];
- cudaOutputDimensions[index] = dimensions[indices[i]];
+ gpuInputDimensions[index] = input_dims[indices[i]];
+ gpuOutputDimensions[index] = dimensions[indices[i]];
}
int written = static_cast<int>(Layout) == static_cast<int>(ColMajor)
@@ -75,8 +75,8 @@ class IndexMapper {
for (int i = 0; i < NumDims; ++i) {
if (tmp[i] >= 0) {
ordering[written] = i;
- cudaInputDimensions[written] = input_dims[i];
- cudaOutputDimensions[written] = dimensions[i];
+ gpuInputDimensions[written] = input_dims[i];
+ gpuOutputDimensions[written] = dimensions[i];
++written;
}
}
@@ -89,37 +89,37 @@ class IndexMapper {
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
for (int i = 0; i < NumDims; ++i) {
if (i > NumKernelDims) {
- m_cudaInputStrides[i] =
- m_cudaInputStrides[i - 1] * cudaInputDimensions[i - 1];
- m_cudaOutputStrides[i] =
- m_cudaOutputStrides[i - 1] * cudaOutputDimensions[i - 1];
+ m_gpuInputStrides[i] =
+ m_gpuInputStrides[i - 1] * gpuInputDimensions[i - 1];
+ m_gpuOutputStrides[i] =
+ m_gpuOutputStrides[i - 1] * gpuOutputDimensions[i - 1];
} else {
- m_cudaInputStrides[i] = 1;
- m_cudaOutputStrides[i] = 1;
+ m_gpuInputStrides[i] = 1;
+ m_gpuOutputStrides[i] = 1;
}
}
} else {
for (int i = NumDims - 1; i >= 0; --i) {
if (static_cast<size_t>(i + 1) < offset) {
- m_cudaInputStrides[i] =
- m_cudaInputStrides[i + 1] * cudaInputDimensions[i + 1];
- m_cudaOutputStrides[i] =
- m_cudaOutputStrides[i + 1] * cudaOutputDimensions[i + 1];
+ m_gpuInputStrides[i] =
+ m_gpuInputStrides[i + 1] * gpuInputDimensions[i + 1];
+ m_gpuOutputStrides[i] =
+ m_gpuOutputStrides[i + 1] * gpuOutputDimensions[i + 1];
} else {
- m_cudaInputStrides[i] = 1;
- m_cudaOutputStrides[i] = 1;
+ m_gpuInputStrides[i] = 1;
+ m_gpuOutputStrides[i] = 1;
}
}
}
}
- EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaInputPlaneToTensorInputOffset(Index p) const {
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapGpuInputPlaneToTensorInputOffset(Index p) const {
Index inputIndex = 0;
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
for (int d = NumDims - 1; d > NumKernelDims; --d) {
- const Index idx = p / m_cudaInputStrides[d];
+ const Index idx = p / m_gpuInputStrides[d];
inputIndex += idx * m_inputStrides[d];
- p -= idx * m_cudaInputStrides[d];
+ p -= idx * m_gpuInputStrides[d];
}
inputIndex += p * m_inputStrides[NumKernelDims];
} else {
@@ -128,22 +128,22 @@ class IndexMapper {
limit = NumDims - NumKernelDims - 1;
}
for (int d = 0; d < limit; ++d) {
- const Index idx = p / m_cudaInputStrides[d];
+ const Index idx = p / m_gpuInputStrides[d];
inputIndex += idx * m_inputStrides[d];
- p -= idx * m_cudaInputStrides[d];
+ p -= idx * m_gpuInputStrides[d];
}
inputIndex += p * m_inputStrides[limit];
}
return inputIndex;
}
- EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaOutputPlaneToTensorOutputOffset(Index p) const {
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapGpuOutputPlaneToTensorOutputOffset(Index p) const {
Index outputIndex = 0;
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
for (int d = NumDims - 1; d > NumKernelDims; --d) {
- const Index idx = p / m_cudaOutputStrides[d];
+ const Index idx = p / m_gpuOutputStrides[d];
outputIndex += idx * m_outputStrides[d];
- p -= idx * m_cudaOutputStrides[d];
+ p -= idx * m_gpuOutputStrides[d];
}
outputIndex += p * m_outputStrides[NumKernelDims];
} else {
@@ -152,44 +152,44 @@ class IndexMapper {
limit = NumDims - NumKernelDims - 1;
}
for (int d = 0; d < limit; ++d) {
- const Index idx = p / m_cudaOutputStrides[d];
+ const Index idx = p / m_gpuOutputStrides[d];
outputIndex += idx * m_outputStrides[d];
- p -= idx * m_cudaOutputStrides[d];
+ p -= idx * m_gpuOutputStrides[d];
}
outputIndex += p * m_outputStrides[limit];
}
return outputIndex;
}
- EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaInputKernelToTensorInputOffset(Index i) const {
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapGpuInputKernelToTensorInputOffset(Index i) const {
const size_t offset = static_cast<int>(Layout) == static_cast<int>(ColMajor)
? 0
: NumDims - NumKernelDims;
return i * m_inputStrides[offset];
}
- EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaOutputKernelToTensorOutputOffset(Index i) const {
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapGpuOutputKernelToTensorOutputOffset(Index i) const {
const size_t offset = static_cast<int>(Layout) == static_cast<int>(ColMajor)
? 0
: NumDims - NumKernelDims;
return i * m_outputStrides[offset];
}
- EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaInputKernelToTensorInputOffset(Index i, Index j) const {
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapGpuInputKernelToTensorInputOffset(Index i, Index j) const {
const size_t offset = static_cast<int>(Layout) == static_cast<int>(ColMajor)
? 0
: NumDims - NumKernelDims;
return i * m_inputStrides[offset] + j * m_inputStrides[offset + 1];
}
- EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaOutputKernelToTensorOutputOffset(Index i, Index j) const {
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapGpuOutputKernelToTensorOutputOffset(Index i, Index j) const {
const size_t offset = static_cast<int>(Layout) == static_cast<int>(ColMajor)
? 0
: NumDims - NumKernelDims;
return i * m_outputStrides[offset] + j * m_outputStrides[offset + 1];
}
- EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaInputKernelToTensorInputOffset(Index i, Index j, Index k) const {
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapGpuInputKernelToTensorInputOffset(Index i, Index j, Index k) const {
const size_t offset = static_cast<int>(Layout) == static_cast<int>(ColMajor)
? 0
: NumDims - NumKernelDims;
@@ -197,7 +197,7 @@ class IndexMapper {
k * m_inputStrides[offset + 2];
}
- EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaOutputKernelToTensorOutputOffset(Index i, Index j, Index k) const {
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapGpuOutputKernelToTensorOutputOffset(Index i, Index j, Index k) const {
const size_t offset = static_cast<int>(Layout) == static_cast<int>(ColMajor)
? 0
: NumDims - NumKernelDims;
@@ -209,8 +209,8 @@ class IndexMapper {
static const int NumDims = internal::array_size<InputDims>::value;
array<Index, NumDims> m_inputStrides;
array<Index, NumDims> m_outputStrides;
- array<Index, NumDims> m_cudaInputStrides;
- array<Index, NumDims> m_cudaOutputStrides;
+ array<Index, NumDims> m_gpuInputStrides;
+ array<Index, NumDims> m_gpuOutputStrides;
};
@@ -231,6 +231,8 @@ struct traits<TensorConvolutionOp<Dimensions, InputXprType, KernelXprType> >
typedef typename remove_reference<RhsNested>::type _RhsNested;
static const int NumDimensions = traits<InputXprType>::NumDimensions;
static const int Layout = traits<InputXprType>::Layout;
+ typedef typename conditional<Pointer_type_promotion<typename InputXprType::Scalar, Scalar>::val,
+ typename traits<InputXprType>::PointerType, typename traits<KernelXprType>::PointerType>::type PointerType;
enum {
Flags = 0
@@ -300,11 +302,13 @@ struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelAr
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
- static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+ static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
enum {
IsAligned = TensorEvaluator<InputArgType, Device>::IsAligned & TensorEvaluator<KernelArgType, Device>::IsAligned,
PacketAccess = TensorEvaluator<InputArgType, Device>::PacketAccess & TensorEvaluator<KernelArgType, Device>::PacketAccess,
+ BlockAccess = false,
+ PreferBlockAccess = false,
Layout = TensorEvaluator<InputArgType, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
@@ -465,7 +469,7 @@ struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelAr
PacketSize));
}
- EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
+ EIGEN_DEVICE_FUNC typename Eigen::internal::traits<XprType>::PointerType data() const { return NULL; }
private:
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index firstInput(Index index) const {
@@ -524,8 +528,8 @@ struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelAr
Scalar* local = (Scalar*)m_device.allocate(kernel_sz);
typedef TensorEvalToOp<const KernelArgType> EvalTo;
EvalTo evalToTmp(local, m_kernelArg);
- const bool PacketAccess = internal::IsVectorizable<Device, KernelArgType>::value;
- internal::TensorExecutor<const EvalTo, Device, PacketAccess>::run(evalToTmp, m_device);
+ const bool Vectorize = internal::IsVectorizable<Device, KernelArgType>::value;
+ internal::TensorExecutor<const EvalTo, Device, Vectorize>::run(evalToTmp, m_device);
m_kernel = local;
m_local_kernel = true;
@@ -551,7 +555,7 @@ struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelAr
// Use an optimized implementation of the evaluation code for GPUs whenever possible.
-#if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
+#if defined(EIGEN_USE_GPU) && defined(EIGEN_GPUCC)
template <int StaticKernelSize>
struct GetKernelSize {
@@ -574,7 +578,11 @@ __global__ void EigenConvolutionKernel1D(
indexMapper,
const float* __restrict kernel, const int numPlanes, const int numX,
const int maxX, const int kernelSize, float* buffer) {
+#if defined(EIGEN_HIPCC)
+ HIP_DYNAMIC_SHARED(float, s)
+#else
extern __shared__ float s[];
+#endif
const int first_x = blockIdx.x * maxX;
const int last_x = (first_x + maxX < numX ? first_x + maxX : numX) - 1;
@@ -586,18 +594,18 @@ __global__ void EigenConvolutionKernel1D(
for (int p = first_plane + threadIdx.y; p < numPlanes; p += plane_stride) {
// Load inputs to shared memory
- const int plane_input_offset = indexMapper.mapCudaInputPlaneToTensorInputOffset(p);
+ const int plane_input_offset = indexMapper.mapGpuInputPlaneToTensorInputOffset(p);
const int plane_kernel_offset = threadIdx.y * num_x_input;
#pragma unroll
for (int i = threadIdx.x; i < num_x_input; i += blockDim.x) {
- const int tensor_index = plane_input_offset + indexMapper.mapCudaInputKernelToTensorInputOffset(i+first_x);
+ const int tensor_index = plane_input_offset + indexMapper.mapGpuInputKernelToTensorInputOffset(i+first_x);
s[i + plane_kernel_offset] = eval.coeff(tensor_index);
}
__syncthreads();
// Compute the convolution
- const int plane_output_offset = indexMapper.mapCudaOutputPlaneToTensorOutputOffset(p);
+ const int plane_output_offset = indexMapper.mapGpuOutputPlaneToTensorOutputOffset(p);
#pragma unroll
for (int i = threadIdx.x; i < num_x_output; i += blockDim.x) {
@@ -607,7 +615,7 @@ __global__ void EigenConvolutionKernel1D(
for (int k = 0; k < GetKernelSize<StaticKernelSize>()(kernelSize); ++k) {
result += s[k + kernel_offset] * kernel[k];
}
- const int tensor_index = plane_output_offset + indexMapper.mapCudaOutputKernelToTensorOutputOffset(i+first_x);
+ const int tensor_index = plane_output_offset + indexMapper.mapGpuOutputKernelToTensorOutputOffset(i+first_x);
buffer[tensor_index] = result;
}
__syncthreads();
@@ -623,7 +631,11 @@ __global__ void EigenConvolutionKernel2D(
const float* __restrict kernel, const int numPlanes, const int numX,
const int maxX, const int numY, const int maxY, const int kernelSizeX,
const int kernelSizeY, float* buffer) {
+#if defined(EIGEN_HIPCC)
+ HIP_DYNAMIC_SHARED(float, s)
+#else
extern __shared__ float s[];
+#endif
const int first_x = blockIdx.x * maxX;
const int last_x = (first_x + maxX < numX ? first_x + maxX : numX) - 1;
@@ -640,7 +652,7 @@ __global__ void EigenConvolutionKernel2D(
for (int p = first_plane + threadIdx.z; p < numPlanes; p += plane_stride) {
- const int plane_input_offset = indexMapper.mapCudaInputPlaneToTensorInputOffset(p);
+ const int plane_input_offset = indexMapper.mapGpuInputPlaneToTensorInputOffset(p);
const int plane_kernel_offset = threadIdx.z * num_y_input;
// Load inputs to shared memory
@@ -649,7 +661,7 @@ __global__ void EigenConvolutionKernel2D(
const int input_offset = num_x_input * (j + plane_kernel_offset);
#pragma unroll
for (int i = threadIdx.x; i < num_x_input; i += blockDim.x) {
- const int tensor_index = plane_input_offset + indexMapper.mapCudaInputKernelToTensorInputOffset(i+first_x, j+first_y);
+ const int tensor_index = plane_input_offset + indexMapper.mapGpuInputKernelToTensorInputOffset(i+first_x, j+first_y);
s[i + input_offset] = eval.coeff(tensor_index);
}
}
@@ -657,7 +669,7 @@ __global__ void EigenConvolutionKernel2D(
__syncthreads();
// Convolution
- const int plane_output_offset = indexMapper.mapCudaOutputPlaneToTensorOutputOffset(p);
+ const int plane_output_offset = indexMapper.mapGpuOutputPlaneToTensorOutputOffset(p);
#pragma unroll
for (int j = threadIdx.y; j < num_y_output; j += blockDim.y) {
@@ -673,7 +685,7 @@ __global__ void EigenConvolutionKernel2D(
result += s[k + input_offset] * kernel[k + kernel_offset];
}
}
- const int tensor_index = plane_output_offset + indexMapper.mapCudaOutputKernelToTensorOutputOffset(i+first_x, j+first_y);
+ const int tensor_index = plane_output_offset + indexMapper.mapGpuOutputKernelToTensorOutputOffset(i+first_x, j+first_y);
buffer[tensor_index] = result;
}
}
@@ -691,7 +703,11 @@ __global__ void EigenConvolutionKernel3D(
const size_t maxX, const size_t numY, const size_t maxY, const size_t numZ,
const size_t maxZ, const size_t kernelSizeX, const size_t kernelSizeY,
const size_t kernelSizeZ, float* buffer) {
+#if defined(EIGEN_HIPCC)
+ HIP_DYNAMIC_SHARED(float, s)
+#else
extern __shared__ float s[];
+#endif
// Load inputs to shared memory
const int first_x = blockIdx.x * maxX;
@@ -708,13 +724,13 @@ __global__ void EigenConvolutionKernel3D(
for (int p = 0; p < numPlanes; ++p) {
- const int plane_input_offset = indexMapper.mapCudaInputPlaneToTensorInputOffset(p);
+ const int plane_input_offset = indexMapper.mapGpuInputPlaneToTensorInputOffset(p);
const int plane_kernel_offset = 0;
for (int k = threadIdx.z; k < num_z_input; k += blockDim.z) {
for (int j = threadIdx.y; j < num_y_input; j += blockDim.y) {
for (int i = threadIdx.x; i < num_x_input; i += blockDim.x) {
- const int tensor_index = plane_input_offset + indexMapper.mapCudaInputKernelToTensorInputOffset(i+first_x, j+first_y, k+first_z);
+ const int tensor_index = plane_input_offset + indexMapper.mapGpuInputKernelToTensorInputOffset(i+first_x, j+first_y, k+first_z);
s[i + num_x_input * (j + num_y_input * (k + plane_kernel_offset))] = eval.coeff(tensor_index);
}
}
@@ -726,7 +742,7 @@ __global__ void EigenConvolutionKernel3D(
const int num_z_output = last_z - first_z + 1;
const int num_y_output = last_y - first_y + 1;
const int num_x_output = last_x - first_x + 1;
- const int plane_output_offset = indexMapper.mapCudaOutputPlaneToTensorOutputOffset(p);
+ const int plane_output_offset = indexMapper.mapGpuOutputPlaneToTensorOutputOffset(p);
for (int k = threadIdx.z; k < num_z_output; k += blockDim.z) {
for (int j = threadIdx.y; j < num_y_output; j += blockDim.y) {
@@ -739,7 +755,7 @@ __global__ void EigenConvolutionKernel3D(
}
}
}
- const int tensor_index = plane_output_offset + indexMapper.mapCudaOutputKernelToTensorOutputOffset(i+first_x, j+first_y, k+first_z);
+ const int tensor_index = plane_output_offset + indexMapper.mapGpuOutputKernelToTensorOutputOffset(i+first_x, j+first_y, k+first_z);
buffer[tensor_index] = result;
}
}
@@ -764,13 +780,15 @@ struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelAr
enum {
IsAligned = TensorEvaluator<InputArgType, GpuDevice>::IsAligned & TensorEvaluator<KernelArgType, GpuDevice>::IsAligned,
PacketAccess = false,
+ BlockAccess = false,
+ PreferBlockAccess = false,
Layout = TensorEvaluator<InputArgType, GpuDevice>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
};
EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const GpuDevice& device)
- : m_inputImpl(op.inputExpression(), device), m_kernelArg(op.kernelExpression()), m_kernelImpl(op.kernelExpression(), device), m_indices(op.indices()), m_buf(NULL), m_kernel(NULL), m_local_kernel(false), m_device(device)
+ : m_inputImpl(op.inputExpression(), device), m_kernelImpl(op.kernelExpression(), device), m_kernelArg(op.kernelExpression()), m_indices(op.indices()), m_buf(NULL), m_kernel(NULL), m_local_kernel(false), m_device(device)
{
EIGEN_STATIC_ASSERT((static_cast<int>(TensorEvaluator<InputArgType, GpuDevice>::Layout) == static_cast<int>(TensorEvaluator<KernelArgType, GpuDevice>::Layout)), YOU_MADE_A_PROGRAMMING_MISTAKE);
@@ -852,9 +870,9 @@ struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelAr
typedef typename TensorEvaluator<InputArgType, GpuDevice>::Dimensions InputDims;
const int maxSharedMem = m_device.sharedMemPerBlock();
- const int maxThreadsPerBlock = m_device.maxCudaThreadsPerBlock();
- const int maxBlocksPerProcessor = m_device.maxCudaThreadsPerMultiProcessor() / maxThreadsPerBlock;
- const int numMultiProcessors = m_device.getNumCudaMultiProcessors();
+ const int maxThreadsPerBlock = m_device.maxGpuThreadsPerBlock();
+ const int maxBlocksPerProcessor = m_device.maxGpuThreadsPerMultiProcessor() / maxThreadsPerBlock;
+ const int numMultiProcessors = m_device.getNumGpuMultiProcessors();
const int warpSize = 32;
switch (NumKernelDims) {
@@ -889,7 +907,7 @@ struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelAr
}
const int shared_mem = block_size.y * (maxX + kernel_size - 1) * sizeof(Scalar);
- assert(shared_mem <= maxSharedMem);
+ gpu_assert(shared_mem <= maxSharedMem);
const int num_x_blocks = ceil(numX, maxX);
const int blocksPerProcessor = numext::mini(maxBlocksPerProcessor, maxSharedMem / shared_mem);
@@ -906,15 +924,15 @@ struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelAr
m_inputImpl.dimensions(), kernel_dims, indices);
switch(kernel_size) {
case 4: {
- LAUNCH_CUDA_KERNEL((EigenConvolutionKernel1D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 4>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, 4, data);
+ LAUNCH_GPU_KERNEL((EigenConvolutionKernel1D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 4>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, 4, data);
break;
}
case 7: {
- LAUNCH_CUDA_KERNEL((EigenConvolutionKernel1D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 7>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, 7, data);
+ LAUNCH_GPU_KERNEL((EigenConvolutionKernel1D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 7>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, 7, data);
break;
}
default: {
- LAUNCH_CUDA_KERNEL((EigenConvolutionKernel1D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, Dynamic>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, kernel_size, data);
+ LAUNCH_GPU_KERNEL((EigenConvolutionKernel1D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, Dynamic>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, kernel_size, data);
}
}
break;
@@ -946,7 +964,7 @@ struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelAr
block_size.z = numext::mini<int>(1024/(block_size.x*block_size.y), maxP);
const int shared_mem = block_size.z * (maxX + kernel_size_x - 1) * (maxY + kernel_size_y - 1) * sizeof(Scalar);
- assert(shared_mem <= maxSharedMem);
+ gpu_assert(shared_mem <= maxSharedMem);
const int num_x_blocks = ceil(numX, maxX);
const int num_y_blocks = ceil(numY, maxY);
@@ -967,11 +985,11 @@ struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelAr
case 4: {
switch (kernel_size_y) {
case 7: {
- LAUNCH_CUDA_KERNEL((EigenConvolutionKernel2D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 4, 7>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 4, 7, data);
+ LAUNCH_GPU_KERNEL((EigenConvolutionKernel2D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 4, 7>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 4, 7, data);
break;
}
default: {
- LAUNCH_CUDA_KERNEL((EigenConvolutionKernel2D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 4, Dynamic>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 4, kernel_size_y, data);
+ LAUNCH_GPU_KERNEL((EigenConvolutionKernel2D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 4, Dynamic>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 4, kernel_size_y, data);
break;
}
}
@@ -980,18 +998,18 @@ struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelAr
case 7: {
switch (kernel_size_y) {
case 4: {
- LAUNCH_CUDA_KERNEL((EigenConvolutionKernel2D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 7, 4>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 7, 4, data);
+ LAUNCH_GPU_KERNEL((EigenConvolutionKernel2D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 7, 4>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 7, 4, data);
break;
}
default: {
- LAUNCH_CUDA_KERNEL((EigenConvolutionKernel2D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 7, Dynamic>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 7, kernel_size_y, data);
+ LAUNCH_GPU_KERNEL((EigenConvolutionKernel2D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 7, Dynamic>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 7, kernel_size_y, data);
break;
}
}
break;
}
default: {
- LAUNCH_CUDA_KERNEL((EigenConvolutionKernel2D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, Dynamic, Dynamic>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, kernel_size_x, kernel_size_y, data);
+ LAUNCH_GPU_KERNEL((EigenConvolutionKernel2D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, Dynamic, Dynamic>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, kernel_size_x, kernel_size_y, data);
break;
}
}
@@ -1026,7 +1044,7 @@ struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelAr
dim3 num_blocks(ceil(numX, maxX), ceil(numY, maxY), ceil(numZ, maxZ));
const int shared_mem = (maxX + kernel_size_x - 1) * (maxY + kernel_size_y - 1) * (maxZ + kernel_size_z - 1) * sizeof(Scalar);
- assert(shared_mem <= maxSharedMem);
+ gpu_assert(shared_mem <= maxSharedMem);
//cout << "launching 3D kernel with block_size.x: " << block_size.x << " block_size.y: " << block_size.y << " block_size.z: " << block_size.z << " num_blocks.x: " << num_blocks.x << " num_blocks.y: " << num_blocks.y << " num_blocks.z: " << num_blocks.z << " shared_mem: " << shared_mem << " in stream " << m_device.stream() << endl;
const array<Index, 3> indices(m_indices[idxX], m_indices[idxY],
@@ -1037,7 +1055,7 @@ struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelAr
internal::IndexMapper<Index, InputDims, 3, Layout> indexMapper(
m_inputImpl.dimensions(), kernel_dims, indices);
- LAUNCH_CUDA_KERNEL((EigenConvolutionKernel3D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, numZ, maxZ, kernel_size_x, kernel_size_y, kernel_size_z, data);
+ LAUNCH_GPU_KERNEL((EigenConvolutionKernel3D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, numZ, maxZ, kernel_size_x, kernel_size_y, kernel_size_z, data);
break;
}
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorConvolutionSycl.h b/unsupported/Eigen/CXX11/src/Tensor/TensorConvolutionSycl.h
index 4247c1c4a..e79958fc9 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorConvolutionSycl.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorConvolutionSycl.h
@@ -32,19 +32,20 @@ internal::IndexMapper<Index, InputDims, 1, Eigen::internal::traits<HostExpr>::La
Kernel_accessor kernel_filter;
const size_t kernelSize, range_x, range_y;
Buffer_accessor buffer_acc;
+ptrdiff_t out_offset;
Local_accessor local_acc;
FunctorExpr functors;
TupleType tuple_of_accessors;
EigenConvolutionKernel1D(internal::IndexMapper<Index, InputDims, 1, Eigen::internal::traits<HostExpr>::Layout> indexMapper_,
Kernel_accessor kernel_filter_, const size_t kernelSize_, const size_t range_x_, const size_t range_y_,
- Buffer_accessor buffer_acc_, Local_accessor local_acc_, FunctorExpr functors_, TupleType tuple_of_accessors_)
+ Buffer_accessor buffer_acc_, ptrdiff_t out_offset_, Local_accessor local_acc_, FunctorExpr functors_, TupleType tuple_of_accessors_)
:indexMapper(indexMapper_), kernel_filter(kernel_filter_), kernelSize(kernelSize_), range_x(range_x_), range_y(range_y_),
- buffer_acc(buffer_acc_), local_acc(local_acc_), functors(functors_), tuple_of_accessors(tuple_of_accessors_) {}
+ buffer_acc(buffer_acc_), out_offset(out_offset_),local_acc(local_acc_), functors(functors_), tuple_of_accessors(tuple_of_accessors_) {}
void operator()(cl::sycl::nd_item<2> itemID) {
typedef typename TensorSycl::internal::ConvertToDeviceExpression<HostExpr>::Type DevExpr;
auto device_expr =TensorSycl::internal::createDeviceExpression<DevExpr, PlaceHolderExpr>(functors, tuple_of_accessors);
- auto device_evaluator = Eigen::TensorEvaluator<DevExpr, Eigen::DefaultDevice>(device_expr.expr, Eigen::DefaultDevice());
+ auto device_evaluator = Eigen::TensorEvaluator<DevExpr, Eigen::SyclKernelDevice>(device_expr.expr, Eigen::SyclKernelDevice());
auto buffer_ptr = ConvertToActualTypeSycl(CoeffReturnType, buffer_acc);
auto kernel_ptr = ConvertToActualTypeSycl(KernelType, kernel_filter);
@@ -75,7 +76,7 @@ EigenConvolutionKernel1D(internal::IndexMapper<Index, InputDims, 1, Eigen::inter
}
const size_t tensor_index = indexMapper.mapCudaOutputPlaneToTensorOutputOffset(itemID.get_global(1))
+indexMapper.mapCudaOutputKernelToTensorOutputOffset(itemID.get_local(0) + first_output_start);
- buffer_ptr[tensor_index] = result;
+ buffer_ptr[tensor_index+ConvertToActualSyclOffset(CoeffReturnType, out_offset)] = result;
}
}
};
@@ -89,19 +90,20 @@ internal::IndexMapper<Index, InputDims, 2, Eigen::internal::traits<HostExpr>::La
Kernel_accessor kernel_filter;
const size_t kernelSize_x, kernelSize_y, range_x, range_y , range_z;
Buffer_accessor buffer_acc;
+ptrdiff_t out_offset;
Local_accessor local_acc;
FunctorExpr functors;
TupleType tuple_of_accessors;
EigenConvolutionKernel2D(internal::IndexMapper<Index, InputDims, 2, Eigen::internal::traits<HostExpr>::Layout> indexMapper_,
Kernel_accessor kernel_filter_, const size_t kernelSize_x_, const size_t kernelSize_y_ ,const size_t range_x_, const size_t range_y_, const size_t range_z_,
- Buffer_accessor buffer_acc_, Local_accessor local_acc_, FunctorExpr functors_, TupleType tuple_of_accessors_)
+ Buffer_accessor buffer_acc_, ptrdiff_t out_offset_, Local_accessor local_acc_, FunctorExpr functors_, TupleType tuple_of_accessors_)
:indexMapper(indexMapper_), kernel_filter(kernel_filter_), kernelSize_x(kernelSize_x_), kernelSize_y(kernelSize_y_), range_x(range_x_), range_y(range_y_), range_z(range_z_),
- buffer_acc(buffer_acc_), local_acc(local_acc_), functors(functors_), tuple_of_accessors(tuple_of_accessors_) {}
+ buffer_acc(buffer_acc_), out_offset(out_offset_), local_acc(local_acc_), functors(functors_), tuple_of_accessors(tuple_of_accessors_) {}
void operator()(cl::sycl::nd_item<3> itemID) {
typedef typename TensorSycl::internal::ConvertToDeviceExpression<HostExpr>::Type DevExpr;
auto device_expr =TensorSycl::internal::createDeviceExpression<DevExpr, PlaceHolderExpr>(functors, tuple_of_accessors);
- auto device_evaluator = Eigen::TensorEvaluator<DevExpr, Eigen::DefaultDevice>(device_expr.expr, Eigen::DefaultDevice());
+ auto device_evaluator = Eigen::TensorEvaluator<DevExpr, Eigen::SyclKernelDevice>(device_expr.expr, Eigen::SyclKernelDevice());
auto buffer_ptr = ConvertToActualTypeSycl(CoeffReturnType, buffer_acc);
auto kernel_ptr = ConvertToActualTypeSycl(KernelType, kernel_filter);
@@ -141,7 +143,7 @@ EigenConvolutionKernel2D(internal::IndexMapper<Index, InputDims, 2, Eigen::inter
}
const size_t tensor_index = indexMapper.mapCudaOutputPlaneToTensorOutputOffset(itemID.get_global(2))
+indexMapper.mapCudaOutputKernelToTensorOutputOffset(itemID.get_local(0) + fitst_x_output_start, itemID.get_local(1) + fitst_y_output_start);
- buffer_ptr[tensor_index] = result;
+ buffer_ptr[tensor_index +ConvertToActualSyclOffset(CoeffReturnType, out_offset)] = result;
}
}
};
@@ -156,21 +158,22 @@ internal::IndexMapper<Index, InputDims, 3, Eigen::internal::traits<HostExpr>::La
Kernel_accessor kernel_filter;
const size_t kernelSize_x, kernelSize_y, kernelSize_z, range_x, range_y , range_z, numP;
Buffer_accessor buffer_acc;
+ptrdiff_t out_offset;
Local_accessor local_acc;
FunctorExpr functors;
TupleType tuple_of_accessors;
EigenConvolutionKernel3D(internal::IndexMapper<Index, InputDims, 3, Eigen::internal::traits<HostExpr>::Layout> indexMapper_,
Kernel_accessor kernel_filter_, const size_t kernelSize_x_, const size_t kernelSize_y_ , const size_t kernelSize_z_ ,
const size_t range_x_, const size_t range_y_, const size_t range_z_, const size_t numP_,
- Buffer_accessor buffer_acc_, Local_accessor local_acc_, FunctorExpr functors_, TupleType tuple_of_accessors_)
+ Buffer_accessor buffer_acc_, ptrdiff_t out_offset_, Local_accessor local_acc_, FunctorExpr functors_, TupleType tuple_of_accessors_)
:indexMapper(indexMapper_), kernel_filter(kernel_filter_), kernelSize_x(kernelSize_x_), kernelSize_y(kernelSize_y_),
kernelSize_z(kernelSize_z_), range_x(range_x_), range_y(range_y_), range_z(range_z_), numP(numP_),
- buffer_acc(buffer_acc_), local_acc(local_acc_), functors(functors_), tuple_of_accessors(tuple_of_accessors_) {}
+ buffer_acc(buffer_acc_), out_offset(out_offset_), local_acc(local_acc_), functors(functors_), tuple_of_accessors(tuple_of_accessors_) {}
void operator()(cl::sycl::nd_item<3> itemID) {
typedef typename TensorSycl::internal::ConvertToDeviceExpression<HostExpr>::Type DevExpr;
auto device_expr =TensorSycl::internal::createDeviceExpression<DevExpr, PlaceHolderExpr>(functors, tuple_of_accessors);
- auto device_evaluator = Eigen::TensorEvaluator<DevExpr, Eigen::DefaultDevice>(device_expr.expr, Eigen::DefaultDevice());
+ auto device_evaluator = Eigen::TensorEvaluator<DevExpr, Eigen::SyclKernelDevice>(device_expr.expr, Eigen::SyclKernelDevice());
auto buffer_ptr = ConvertToActualTypeSycl(CoeffReturnType, buffer_acc);
auto kernel_ptr = ConvertToActualTypeSycl(KernelType, kernel_filter);
@@ -215,7 +218,7 @@ EigenConvolutionKernel3D(internal::IndexMapper<Index, InputDims, 3, Eigen::inter
}
const size_t tensor_index = indexMapper.mapCudaOutputPlaneToTensorOutputOffset(p)
+indexMapper.mapCudaOutputKernelToTensorOutputOffset(itemID.get_local(0) + fitst_x_output_start, itemID.get_local(1) + fitst_y_output_start, itemID.get_local(2) + fitst_z_output_start );
- buffer_ptr[tensor_index] = result;
+ buffer_ptr[tensor_index+ConvertToActualSyclOffset(CoeffReturnType, out_offset)] = result;
}
itemID.barrier(cl::sycl::access::fence_space::local_space);
@@ -239,6 +242,8 @@ struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelAr
enum {
IsAligned = TensorEvaluator<InputArgType, const Eigen::SyclDevice>::IsAligned & TensorEvaluator<KernelArgType, const Eigen::SyclDevice>::IsAligned,
PacketAccess = false,
+ BlockAccess = false,
+ PreferBlockAccess = false,
Layout = TensorEvaluator<InputArgType, const Eigen::SyclDevice>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
@@ -297,7 +302,7 @@ struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelAr
/// used by sycl in order to build the sycl buffer
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Device& device() const{return m_device;}
/// used by sycl in order to build the sycl buffer
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType* data() const { return m_buf; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Eigen::internal::traits<XprType>::PointerType data() const { return m_buf; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void preloadKernel() {
// Don't make a local copy of the kernel unless we have to (i.e. it's an
@@ -307,7 +312,7 @@ struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelAr
m_kernel = in_place;
m_local_kernel = false;
} else {
- size_t kernel_sz = m_kernelImpl.dimensions().TotalSize() * sizeof(Scalar);
+ ptrdiff_t kernel_sz = m_kernelImpl.dimensions().TotalSize() * sizeof(Scalar);
Scalar* local = (Scalar*)m_device.allocate(kernel_sz);
typedef TensorEvalToOp<const KernelArgType> EvalTo;
EvalTo evalToTmp(local, m_kernelArg);
@@ -325,6 +330,7 @@ struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelAr
typedef Eigen::TensorSycl::internal::FunctorExtractor<InputEvaluator> InputFunctorExpr;
// extract input functor list
InputFunctorExpr input_functors = Eigen::TensorSycl::internal::extractFunctors(m_inputImpl);
+ ptrdiff_t out_offset = m_device.get_offset(data);
m_device.sycl_queue().submit([&](cl::sycl::handler &cgh) {
@@ -335,8 +341,8 @@ struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelAr
// create input tuple of accessors
InputTupleType tuple_of_accessors = Eigen::TensorSycl::internal::createTupleOfAccessors<InputEvaluator>(cgh, m_inputImpl);
- typedef cl::sycl::accessor<uint8_t, 1, cl::sycl::access::mode::discard_write, cl::sycl::access::target::global_buffer> OutputAccessorType;
- OutputAccessorType out_res= m_device. template get_sycl_accessor<cl::sycl::access::mode::discard_write>(cgh, data);
+ typedef cl::sycl::accessor<uint8_t, 1, cl::sycl::access::mode::write, cl::sycl::access::target::global_buffer> OutputAccessorType;
+ OutputAccessorType out_res= m_device. template get_sycl_accessor<cl::sycl::access::mode::write>(cgh, data);
typedef cl::sycl::accessor<uint8_t, 1, cl::sycl::access::mode::read, cl::sycl::access::target::global_buffer> KernelAccessorType;
KernelAccessorType kernel_acc= m_device. template get_sycl_accessor<cl::sycl::access::mode::read>(cgh, m_kernel);
@@ -348,7 +354,7 @@ struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelAr
size_t range_x, GRange_x, tileSize_x, range_y, GRange_y, tileSize_y;
m_device.parallel_for_setup(numX, numP, tileSize_x,tileSize_y,range_x,range_y, GRange_x, GRange_y );
const size_t shared_mem =(tileSize_x +kernel_size -1)*(tileSize_y);
- assert(static_cast<unsigned long>(shared_mem) <= m_device.sharedMemPerBlock());
+ gpu_assert(static_cast<unsigned long>(shared_mem) <= m_device.sharedMemPerBlock());
auto global_range=cl::sycl::range<2>(GRange_x, GRange_y); // global range
auto local_range=cl::sycl::range<2>(tileSize_x, tileSize_y); // local range
InputLocalAcc local_acc(cl::sycl::range<1>(shared_mem), cgh);
@@ -358,7 +364,7 @@ struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelAr
cgh.parallel_for(cl::sycl::nd_range<2>(global_range, local_range),
EigenConvolutionKernel1D<CoeffReturnType, Scalar, InputArgType, InputFunctorExpr, Index,
InputDims, KernelAccessorType, OutputAccessorType, InputLocalAcc, InputTupleType>(
- indexMapper,kernel_acc, kernel_size, numX, numP, out_res, local_acc, input_functors, tuple_of_accessors));
+ indexMapper,kernel_acc, kernel_size, numX, numP, out_res, out_offset, local_acc, input_functors, tuple_of_accessors));
break;
}
@@ -373,7 +379,7 @@ struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelAr
size_t range_x, GRange_x, tileSize_x, range_y, GRange_y, tileSize_y, range_z, GRange_z, tileSize_z;
m_device.parallel_for_setup(numX, numY, numP, tileSize_x, tileSize_y, tileSize_z, range_x, range_y, range_z, GRange_x, GRange_y, GRange_z );
const size_t shared_mem =(tileSize_x +kernel_size_x -1)*(tileSize_y +kernel_size_y -1) * tileSize_z;
- assert(static_cast<unsigned long>(shared_mem) <= m_device.sharedMemPerBlock());
+ gpu_assert(static_cast<unsigned long>(shared_mem) <= m_device.sharedMemPerBlock());
auto global_range=cl::sycl::range<3>(GRange_x, GRange_y, GRange_z); // global range
auto local_range=cl::sycl::range<3>(tileSize_x, tileSize_y, tileSize_z); // local range
InputLocalAcc local_acc(cl::sycl::range<1>(shared_mem), cgh);
@@ -383,7 +389,7 @@ struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelAr
cgh.parallel_for(cl::sycl::nd_range<3>(global_range, local_range),
EigenConvolutionKernel2D<CoeffReturnType, Scalar, InputArgType, InputFunctorExpr, Index,
InputDims, KernelAccessorType, OutputAccessorType, InputLocalAcc, InputTupleType>(
- indexMapper,kernel_acc, kernel_size_x, kernel_size_y, numX, numY, numP, out_res, local_acc, input_functors, tuple_of_accessors));
+ indexMapper,kernel_acc, kernel_size_x, kernel_size_y, numX, numY, numP, out_res, out_offset, local_acc, input_functors, tuple_of_accessors));
break;
}
@@ -404,7 +410,7 @@ struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelAr
size_t range_x, GRange_x, tileSize_x, range_y, GRange_y, tileSize_y, range_z, GRange_z, tileSize_z;
m_device.parallel_for_setup(numX, numY, numZ, tileSize_x, tileSize_y, tileSize_z, range_x, range_y, range_z, GRange_x, GRange_y, GRange_z );
const size_t shared_mem =(tileSize_x +kernel_size_x -1)*(tileSize_y +kernel_size_y -1) * (tileSize_z +kernel_size_y -1);
- assert(static_cast<unsigned long>(shared_mem) <= m_device.sharedMemPerBlock());
+ gpu_assert(static_cast<unsigned long>(shared_mem) <= m_device.sharedMemPerBlock());
auto global_range=cl::sycl::range<3>(GRange_x, GRange_y, GRange_z); // global range
auto local_range=cl::sycl::range<3>(tileSize_x, tileSize_y, tileSize_z); // local range
InputLocalAcc local_acc(cl::sycl::range<1>(shared_mem), cgh);
@@ -412,7 +418,7 @@ struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelAr
EigenConvolutionKernel3D<CoeffReturnType, Scalar, InputArgType, InputFunctorExpr, Index,
InputDims, KernelAccessorType, OutputAccessorType, InputLocalAcc, InputTupleType>(
indexMapper,kernel_acc, kernel_size_x, kernel_size_y, kernel_size_z, numX, numY,
- numZ, numP, out_res, local_acc, input_functors, tuple_of_accessors));
+ numZ, numP, out_res, out_offset, local_acc, input_functors, tuple_of_accessors));
break;
}
@@ -421,6 +427,7 @@ struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelAr
}
}
});
+ m_device.asynchronousExec();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h b/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h
index 83c449cf1..bb63baee2 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h
@@ -174,8 +174,10 @@ class TensorCostModel {
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int numThreads(
double output_size, const TensorOpCost& cost_per_coeff, int max_threads) {
double cost = totalCost(output_size, cost_per_coeff);
- int threads = (cost - kStartupCycles) / kPerThreadCycles + 0.9;
- return numext::mini(max_threads, numext::maxi(1, threads));
+ double threads = (cost - kStartupCycles) / kPerThreadCycles + 0.9;
+ // Make sure we don't invoke undefined behavior when we convert to an int.
+ threads = numext::mini<double>(threads, GenericNumTraits<int>::highest());
+ return numext::mini(max_threads, numext::maxi<int>(1, threads));
}
// taskSize assesses parallel task size.
@@ -193,7 +195,7 @@ class TensorCostModel {
// 11 is L2 cache latency on Haswell.
// We don't know whether data is in L1, L2 or L3. But we are most interested
// in single-threaded computational time around 100us-10ms (smaller time
- // is too small for parallelization, larger time is not intersting
+ // is too small for parallelization, larger time is not interesting
// either because we are probably using all available threads already).
// And for the target time range, L2 seems to be what matters. Data set
// fitting into L1 is too small to take noticeable time. Data set fitting
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorCustomOp.h b/unsupported/Eigen/CXX11/src/Tensor/TensorCustomOp.h
index e020d076f..6ee3827f3 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorCustomOp.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorCustomOp.h
@@ -30,6 +30,7 @@ struct traits<TensorCustomUnaryOp<CustomUnaryFunc, XprType> >
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = traits<XprType>::NumDimensions;
static const int Layout = traits<XprType>::Layout;
+ typedef typename traits<XprType>::PointerType PointerType;
};
template<typename CustomUnaryFunc, typename XprType>
@@ -86,12 +87,14 @@ struct TensorEvaluator<const TensorCustomUnaryOp<CustomUnaryFunc, XprType>, Devi
typedef typename internal::remove_const<typename ArgType::Scalar>::type Scalar;
typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
- static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+ static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
+ typedef typename PointerType<CoeffReturnType, Device>::Type PointerT;
enum {
IsAligned = false,
- PacketAccess = (internal::packet_traits<Scalar>::size > 1),
+ PacketAccess = (PacketType<CoeffReturnType, Device>::size > 1),
BlockAccess = false,
+ PreferBlockAccess = false,
Layout = TensorEvaluator<XprType, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
@@ -105,13 +108,13 @@ struct TensorEvaluator<const TensorCustomUnaryOp<CustomUnaryFunc, XprType>, Devi
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) {
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(PointerT data) {
if (data) {
evalTo(data);
return false;
} else {
- m_result = static_cast<CoeffReturnType*>(
- m_device.allocate(dimensions().TotalSize() * sizeof(Scalar)));
+ m_result = static_cast<PointerT>(
+ m_device.allocate_temp(dimensions().TotalSize() * sizeof(Scalar)));
evalTo(m_result);
return true;
}
@@ -119,7 +122,7 @@ struct TensorEvaluator<const TensorCustomUnaryOp<CustomUnaryFunc, XprType>, Devi
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
if (m_result != NULL) {
- m_device.deallocate(m_result);
+ m_device.deallocate_temp(m_result);
m_result = NULL;
}
}
@@ -138,19 +141,22 @@ struct TensorEvaluator<const TensorCustomUnaryOp<CustomUnaryFunc, XprType>, Devi
return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize);
}
- EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return m_result; }
+ EIGEN_DEVICE_FUNC PointerT data() const { return m_result; }
+
+#ifdef EIGEN_USE_SYCL
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Device& device() const { return m_device; }
+#endif
protected:
- EIGEN_DEVICE_FUNC void evalTo(Scalar* data) {
- TensorMap<Tensor<CoeffReturnType, NumDims, Layout, Index> > result(
- data, m_dimensions);
+ EIGEN_DEVICE_FUNC void evalTo(PointerT data) {
+ TensorMap<Tensor<CoeffReturnType, NumDims, Layout, Index> > result(data, m_dimensions);
m_op.func().eval(m_op.expression(), result, m_device);
}
Dimensions m_dimensions;
const ArgType m_op;
const Device& m_device;
- CoeffReturnType* m_result;
+ PointerT m_result;
};
@@ -180,6 +186,8 @@ struct traits<TensorCustomBinaryOp<CustomBinaryFunc, LhsXprType, RhsXprType> >
typedef typename remove_reference<RhsNested>::type _RhsNested;
static const int NumDimensions = traits<LhsXprType>::NumDimensions;
static const int Layout = traits<LhsXprType>::Layout;
+ typedef typename conditional<Pointer_type_promotion<typename LhsXprType::Scalar, Scalar>::val,
+ typename traits<LhsXprType>::PointerType, typename traits<RhsXprType>::PointerType>::type PointerType;
};
template<typename CustomBinaryFunc, typename LhsXprType, typename RhsXprType>
@@ -242,12 +250,14 @@ struct TensorEvaluator<const TensorCustomBinaryOp<CustomBinaryFunc, LhsXprType,
typedef typename XprType::Scalar Scalar;
typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
- static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+ static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
+ typedef typename PointerType<CoeffReturnType, Device>::Type PointerT;
enum {
IsAligned = false,
- PacketAccess = (internal::packet_traits<Scalar>::size > 1),
+ PacketAccess = (PacketType<CoeffReturnType, Device>::size > 1),
BlockAccess = false,
+ PreferBlockAccess = false,
Layout = TensorEvaluator<LhsXprType, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
@@ -261,12 +271,12 @@ struct TensorEvaluator<const TensorCustomBinaryOp<CustomBinaryFunc, LhsXprType,
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) {
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(PointerT data) {
if (data) {
evalTo(data);
return false;
} else {
- m_result = static_cast<Scalar *>(m_device.allocate(dimensions().TotalSize() * sizeof(Scalar)));
+ m_result = static_cast<PointerT>(m_device.allocate_temp(dimensions().TotalSize() * sizeof(CoeffReturnType)));
evalTo(m_result);
return true;
}
@@ -274,7 +284,7 @@ struct TensorEvaluator<const TensorCustomBinaryOp<CustomBinaryFunc, LhsXprType,
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
if (m_result != NULL) {
- m_device.deallocate(m_result);
+ m_device.deallocate_temp(m_result);
m_result = NULL;
}
}
@@ -293,18 +303,22 @@ struct TensorEvaluator<const TensorCustomBinaryOp<CustomBinaryFunc, LhsXprType,
return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize);
}
- EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return m_result; }
+ EIGEN_DEVICE_FUNC PointerT data() const { return m_result; }
+
+#ifdef EIGEN_USE_SYCL
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Device& device() const { return m_device; }
+#endif
protected:
- EIGEN_DEVICE_FUNC void evalTo(Scalar* data) {
- TensorMap<Tensor<Scalar, NumDims, Layout> > result(data, m_dimensions);
+ EIGEN_DEVICE_FUNC void evalTo(PointerT data) {
+ TensorMap<Tensor<CoeffReturnType, NumDims, Layout> > result(data, m_dimensions);
m_op.func().eval(m_op.lhsExpression(), m_op.rhsExpression(), result, m_device);
}
Dimensions m_dimensions;
const XprType m_op;
const Device& m_device;
- CoeffReturnType* m_result;
+ PointerT m_result;
};
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceCuda.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceCuda.h
index e6cee11ef..f77923933 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceCuda.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceCuda.h
@@ -1,337 +1,6 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-#if defined(EIGEN_USE_GPU) && !defined(EIGEN_CXX11_TENSOR_TENSOR_DEVICE_CUDA_H)
-#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_CUDA_H
-
-namespace Eigen {
-
-static const int kCudaScratchSize = 1024;
-
-// This defines an interface that GPUDevice can take to use
-// CUDA streams underneath.
-class StreamInterface {
- public:
- virtual ~StreamInterface() {}
-
- virtual const cudaStream_t& stream() const = 0;
- virtual const cudaDeviceProp& deviceProperties() const = 0;
-
- // Allocate memory on the actual device where the computation will run
- virtual void* allocate(size_t num_bytes) const = 0;
- virtual void deallocate(void* buffer) const = 0;
-
- // Return a scratchpad buffer of size 1k
- virtual void* scratchpad() const = 0;
-
- // Return a semaphore. The semaphore is initially initialized to 0, and
- // each kernel using it is responsible for resetting to 0 upon completion
- // to maintain the invariant that the semaphore is always equal to 0 upon
- // each kernel start.
- virtual unsigned int* semaphore() const = 0;
-};
-
-static cudaDeviceProp* m_deviceProperties;
-static bool m_devicePropInitialized = false;
-
-static void initializeDeviceProp() {
- if (!m_devicePropInitialized) {
- // Attempts to ensure proper behavior in the case of multiple threads
- // calling this function simultaneously. This would be trivial to
- // implement if we could use std::mutex, but unfortunately mutex don't
- // compile with nvcc, so we resort to atomics and thread fences instead.
- // Note that if the caller uses a compiler that doesn't support c++11 we
- // can't ensure that the initialization is thread safe.
-#if __cplusplus >= 201103L
- static std::atomic<bool> first(true);
- if (first.exchange(false)) {
-#else
- static bool first = true;
- if (first) {
- first = false;
-#endif
- // We're the first thread to reach this point.
- int num_devices;
- cudaError_t status = cudaGetDeviceCount(&num_devices);
- if (status != cudaSuccess) {
- std::cerr << "Failed to get the number of CUDA devices: "
- << cudaGetErrorString(status)
- << std::endl;
- assert(status == cudaSuccess);
- }
- m_deviceProperties = new cudaDeviceProp[num_devices];
- for (int i = 0; i < num_devices; ++i) {
- status = cudaGetDeviceProperties(&m_deviceProperties[i], i);
- if (status != cudaSuccess) {
- std::cerr << "Failed to initialize CUDA device #"
- << i
- << ": "
- << cudaGetErrorString(status)
- << std::endl;
- assert(status == cudaSuccess);
- }
- }
-
-#if __cplusplus >= 201103L
- std::atomic_thread_fence(std::memory_order_release);
-#endif
- m_devicePropInitialized = true;
- } else {
- // Wait for the other thread to inititialize the properties.
- while (!m_devicePropInitialized) {
-#if __cplusplus >= 201103L
- std::atomic_thread_fence(std::memory_order_acquire);
-#endif
- EIGEN_SLEEP(1000);
- }
- }
- }
-}
-
-static const cudaStream_t default_stream = cudaStreamDefault;
-
-class CudaStreamDevice : public StreamInterface {
- public:
- // Use the default stream on the current device
- CudaStreamDevice() : stream_(&default_stream), scratch_(NULL), semaphore_(NULL) {
- cudaGetDevice(&device_);
- initializeDeviceProp();
- }
- // Use the default stream on the specified device
- CudaStreamDevice(int device) : stream_(&default_stream), device_(device), scratch_(NULL), semaphore_(NULL) {
- initializeDeviceProp();
- }
- // Use the specified stream. Note that it's the
- // caller responsibility to ensure that the stream can run on
- // the specified device. If no device is specified the code
- // assumes that the stream is associated to the current gpu device.
- CudaStreamDevice(const cudaStream_t* stream, int device = -1)
- : stream_(stream), device_(device), scratch_(NULL), semaphore_(NULL) {
- if (device < 0) {
- cudaGetDevice(&device_);
- } else {
- int num_devices;
- cudaError_t err = cudaGetDeviceCount(&num_devices);
- EIGEN_UNUSED_VARIABLE(err)
- assert(err == cudaSuccess);
- assert(device < num_devices);
- device_ = device;
- }
- initializeDeviceProp();
- }
-
- virtual ~CudaStreamDevice() {
- if (scratch_) {
- deallocate(scratch_);
- }
- }
-
- const cudaStream_t& stream() const { return *stream_; }
- const cudaDeviceProp& deviceProperties() const {
- return m_deviceProperties[device_];
- }
- virtual void* allocate(size_t num_bytes) const {
- cudaError_t err = cudaSetDevice(device_);
- EIGEN_UNUSED_VARIABLE(err)
- assert(err == cudaSuccess);
- void* result;
- err = cudaMalloc(&result, num_bytes);
- assert(err == cudaSuccess);
- assert(result != NULL);
- return result;
- }
- virtual void deallocate(void* buffer) const {
- cudaError_t err = cudaSetDevice(device_);
- EIGEN_UNUSED_VARIABLE(err)
- assert(err == cudaSuccess);
- assert(buffer != NULL);
- err = cudaFree(buffer);
- assert(err == cudaSuccess);
- }
-
- virtual void* scratchpad() const {
- if (scratch_ == NULL) {
- scratch_ = allocate(kCudaScratchSize + sizeof(unsigned int));
- }
- return scratch_;
- }
-
- virtual unsigned int* semaphore() const {
- if (semaphore_ == NULL) {
- char* scratch = static_cast<char*>(scratchpad()) + kCudaScratchSize;
- semaphore_ = reinterpret_cast<unsigned int*>(scratch);
- cudaError_t err = cudaMemsetAsync(semaphore_, 0, sizeof(unsigned int), *stream_);
- EIGEN_UNUSED_VARIABLE(err)
- assert(err == cudaSuccess);
- }
- return semaphore_;
- }
-
- private:
- const cudaStream_t* stream_;
- int device_;
- mutable void* scratch_;
- mutable unsigned int* semaphore_;
-};
-
-struct GpuDevice {
- // The StreamInterface is not owned: the caller is
- // responsible for its initialization and eventual destruction.
- explicit GpuDevice(const StreamInterface* stream) : stream_(stream), max_blocks_(INT_MAX) {
- eigen_assert(stream);
- }
- explicit GpuDevice(const StreamInterface* stream, int num_blocks) : stream_(stream), max_blocks_(num_blocks) {
- eigen_assert(stream);
- }
- // TODO(bsteiner): This is an internal API, we should not expose it.
- EIGEN_STRONG_INLINE const cudaStream_t& stream() const {
- return stream_->stream();
- }
-
- EIGEN_STRONG_INLINE void* allocate(size_t num_bytes) const {
- return stream_->allocate(num_bytes);
- }
-
- EIGEN_STRONG_INLINE void deallocate(void* buffer) const {
- stream_->deallocate(buffer);
- }
-
- EIGEN_STRONG_INLINE void* scratchpad() const {
- return stream_->scratchpad();
- }
-
- EIGEN_STRONG_INLINE unsigned int* semaphore() const {
- return stream_->semaphore();
- }
-
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpy(void* dst, const void* src, size_t n) const {
-#ifndef __CUDA_ARCH__
- cudaError_t err = cudaMemcpyAsync(dst, src, n, cudaMemcpyDeviceToDevice,
- stream_->stream());
- EIGEN_UNUSED_VARIABLE(err)
- assert(err == cudaSuccess);
-#else
- eigen_assert(false && "The default device should be used instead to generate kernel code");
-#endif
- }
-
- EIGEN_STRONG_INLINE void memcpyHostToDevice(void* dst, const void* src, size_t n) const {
- cudaError_t err =
- cudaMemcpyAsync(dst, src, n, cudaMemcpyHostToDevice, stream_->stream());
- EIGEN_UNUSED_VARIABLE(err)
- assert(err == cudaSuccess);
- }
-
- EIGEN_STRONG_INLINE void memcpyDeviceToHost(void* dst, const void* src, size_t n) const {
- cudaError_t err =
- cudaMemcpyAsync(dst, src, n, cudaMemcpyDeviceToHost, stream_->stream());
- EIGEN_UNUSED_VARIABLE(err)
- assert(err == cudaSuccess);
- }
-
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memset(void* buffer, int c, size_t n) const {
-#ifndef __CUDA_ARCH__
- cudaError_t err = cudaMemsetAsync(buffer, c, n, stream_->stream());
- EIGEN_UNUSED_VARIABLE(err)
- assert(err == cudaSuccess);
-#else
- eigen_assert(false && "The default device should be used instead to generate kernel code");
-#endif
- }
-
- EIGEN_STRONG_INLINE size_t numThreads() const {
- // FIXME
- return 32;
- }
-
- EIGEN_STRONG_INLINE size_t firstLevelCacheSize() const {
- // FIXME
- return 48*1024;
- }
-
- EIGEN_STRONG_INLINE size_t lastLevelCacheSize() const {
- // We won't try to take advantage of the l2 cache for the time being, and
- // there is no l3 cache on cuda devices.
- return firstLevelCacheSize();
- }
-
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void synchronize() const {
-#if defined(__CUDACC__) && !defined(__CUDA_ARCH__)
- cudaError_t err = cudaStreamSynchronize(stream_->stream());
- if (err != cudaSuccess) {
- std::cerr << "Error detected in CUDA stream: "
- << cudaGetErrorString(err)
- << std::endl;
- assert(err == cudaSuccess);
- }
-#else
- assert(false && "The default device should be used instead to generate kernel code");
+#if defined(__clang__) || defined(__GNUC__)
+#warning "Deprecated header file, please either include the main Eigen/CXX11/Tensor header or the respective TensorDeviceGpu.h file"
#endif
- }
-
- EIGEN_STRONG_INLINE int getNumCudaMultiProcessors() const {
- return stream_->deviceProperties().multiProcessorCount;
- }
- EIGEN_STRONG_INLINE int maxCudaThreadsPerBlock() const {
- return stream_->deviceProperties().maxThreadsPerBlock;
- }
- EIGEN_STRONG_INLINE int maxCudaThreadsPerMultiProcessor() const {
- return stream_->deviceProperties().maxThreadsPerMultiProcessor;
- }
- EIGEN_STRONG_INLINE int sharedMemPerBlock() const {
- return stream_->deviceProperties().sharedMemPerBlock;
- }
- EIGEN_STRONG_INLINE int majorDeviceVersion() const {
- return stream_->deviceProperties().major;
- }
- EIGEN_STRONG_INLINE int minorDeviceVersion() const {
- return stream_->deviceProperties().minor;
- }
-
- EIGEN_STRONG_INLINE int maxBlocks() const {
- return max_blocks_;
- }
-
- // This function checks if the CUDA runtime recorded an error for the
- // underlying stream device.
- inline bool ok() const {
-#ifdef __CUDACC__
- cudaError_t error = cudaStreamQuery(stream_->stream());
- return (error == cudaSuccess) || (error == cudaErrorNotReady);
-#else
- return false;
-#endif
- }
-
- private:
- const StreamInterface* stream_;
- int max_blocks_;
-};
-
-#define LAUNCH_CUDA_KERNEL(kernel, gridsize, blocksize, sharedmem, device, ...) \
- (kernel) <<< (gridsize), (blocksize), (sharedmem), (device).stream() >>> (__VA_ARGS__); \
- assert(cudaGetLastError() == cudaSuccess);
-
-
-// FIXME: Should be device and kernel specific.
-#ifdef __CUDACC__
-static EIGEN_DEVICE_FUNC inline void setCudaSharedMemConfig(cudaSharedMemConfig config) {
-#ifndef __CUDA_ARCH__
- cudaError_t status = cudaDeviceSetSharedMemConfig(config);
- EIGEN_UNUSED_VARIABLE(status)
- assert(status == cudaSuccess);
-#else
- EIGEN_UNUSED_VARIABLE(config)
-#endif
-}
-#endif
-
-} // end namespace Eigen
-#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_CUDA_H
+#include "TensorDeviceGpu.h"
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceDefault.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceDefault.h
index ccaaa6cb2..8cb95f731 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceDefault.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceDefault.h
@@ -21,6 +21,12 @@ struct DefaultDevice {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void deallocate(void* buffer) const {
internal::aligned_free(buffer);
}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void* allocate_temp(size_t num_bytes) const {
+ return allocate(num_bytes);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void deallocate_temp(void* buffer) const {
+ deallocate(buffer);
+ }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpy(void* dst, const void* src, size_t n) const {
::memcpy(dst, src, n);
}
@@ -35,9 +41,12 @@ struct DefaultDevice {
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t numThreads() const {
-#ifndef __CUDA_ARCH__
+#if !defined(EIGEN_GPU_COMPILE_PHASE)
// Running on the host CPU
return 1;
+#elif defined(EIGEN_HIP_DEVICE_COMPILE)
+ // Running on a HIP device
+ return 64;
#else
// Running on a CUDA device
return 32;
@@ -45,9 +54,12 @@ struct DefaultDevice {
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t firstLevelCacheSize() const {
-#if !defined(__CUDA_ARCH__) && !defined(__SYCL_DEVICE_ONLY__)
+#if !defined(EIGEN_GPU_COMPILE_PHASE) && !defined(__SYCL_DEVICE_ONLY__)
// Running on the host CPU
return l1CacheSize();
+#elif defined(EIGEN_HIP_DEVICE_COMPILE)
+ // Running on a HIP device
+ return 48*1024; // FIXME : update this number for HIP
#else
// Running on a CUDA device, return the amount of shared memory available.
return 48*1024;
@@ -55,9 +67,12 @@ struct DefaultDevice {
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t lastLevelCacheSize() const {
-#if !defined(__CUDA_ARCH__) && !defined(__SYCL_DEVICE_ONLY__)
+#if !defined(EIGEN_GPU_COMPILE_PHASE) && !defined(__SYCL_DEVICE_ONLY__)
// Running single threaded on the host CPU
return l3CacheSize();
+#elif defined(EIGEN_HIP_DEVICE_COMPILE)
+ // Running on a HIP device
+ return firstLevelCacheSize(); // FIXME : update this number for HIP
#else
// Running on a CUDA device
return firstLevelCacheSize();
@@ -65,13 +80,17 @@ struct DefaultDevice {
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int majorDeviceVersion() const {
-#ifndef __CUDA_ARCH__
+#if !defined(EIGEN_GPU_COMPILE_PHASE)
// Running single threaded on the host CPU
// Should return an enum that encodes the ISA supported by the CPU
return 1;
+#elif defined(EIGEN_HIP_DEVICE_COMPILE)
+ // Running on a HIP device
+ // return 1 as major for HIP
+ return 1;
#else
// Running on a CUDA device
- return __CUDA_ARCH__ / 100;
+ return EIGEN_CUDA_ARCH / 100;
#endif
}
};
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceGpu.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceGpu.h
new file mode 100644
index 000000000..b490433db
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceGpu.h
@@ -0,0 +1,366 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#if defined(EIGEN_USE_GPU) && !defined(EIGEN_CXX11_TENSOR_TENSOR_DEVICE_GPU_H)
+#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_GPU_H
+
+// This header file container defines fo gpu* macros which will resolve to
+// their equivalent hip* or cuda* versions depending on the compiler in use
+// A separte header (included at the end of this file) will undefine all
+#include "TensorGpuHipCudaDefines.h"
+
+namespace Eigen {
+
+static const int kGpuScratchSize = 1024;
+
+// This defines an interface that GPUDevice can take to use
+// HIP / CUDA streams underneath.
+class StreamInterface {
+ public:
+ virtual ~StreamInterface() {}
+
+ virtual const gpuStream_t& stream() const = 0;
+ virtual const gpuDeviceProp_t& deviceProperties() const = 0;
+
+ // Allocate memory on the actual device where the computation will run
+ virtual void* allocate(size_t num_bytes) const = 0;
+ virtual void deallocate(void* buffer) const = 0;
+
+ // Return a scratchpad buffer of size 1k
+ virtual void* scratchpad() const = 0;
+
+ // Return a semaphore. The semaphore is initially initialized to 0, and
+ // each kernel using it is responsible for resetting to 0 upon completion
+ // to maintain the invariant that the semaphore is always equal to 0 upon
+ // each kernel start.
+ virtual unsigned int* semaphore() const = 0;
+};
+
+static gpuDeviceProp_t* m_deviceProperties;
+static bool m_devicePropInitialized = false;
+
+static void initializeDeviceProp() {
+ if (!m_devicePropInitialized) {
+ // Attempts to ensure proper behavior in the case of multiple threads
+ // calling this function simultaneously. This would be trivial to
+ // implement if we could use std::mutex, but unfortunately mutex don't
+ // compile with nvcc, so we resort to atomics and thread fences instead.
+ // Note that if the caller uses a compiler that doesn't support c++11 we
+ // can't ensure that the initialization is thread safe.
+#if __cplusplus >= 201103L
+ static std::atomic<bool> first(true);
+ if (first.exchange(false)) {
+#else
+ static bool first = true;
+ if (first) {
+ first = false;
+#endif
+ // We're the first thread to reach this point.
+ int num_devices;
+ gpuError_t status = gpuGetDeviceCount(&num_devices);
+ if (status != gpuSuccess) {
+ std::cerr << "Failed to get the number of GPU devices: "
+ << gpuGetErrorString(status)
+ << std::endl;
+ gpu_assert(status == gpuSuccess);
+ }
+ m_deviceProperties = new gpuDeviceProp_t[num_devices];
+ for (int i = 0; i < num_devices; ++i) {
+ status = gpuGetDeviceProperties(&m_deviceProperties[i], i);
+ if (status != gpuSuccess) {
+ std::cerr << "Failed to initialize GPU device #"
+ << i
+ << ": "
+ << gpuGetErrorString(status)
+ << std::endl;
+ gpu_assert(status == gpuSuccess);
+ }
+ }
+
+#if __cplusplus >= 201103L
+ std::atomic_thread_fence(std::memory_order_release);
+#endif
+ m_devicePropInitialized = true;
+ } else {
+ // Wait for the other thread to inititialize the properties.
+ while (!m_devicePropInitialized) {
+#if __cplusplus >= 201103L
+ std::atomic_thread_fence(std::memory_order_acquire);
+#endif
+ EIGEN_SLEEP(1000);
+ }
+ }
+ }
+}
+
+static const gpuStream_t default_stream = gpuStreamDefault;
+
+class GpuStreamDevice : public StreamInterface {
+ public:
+ // Use the default stream on the current device
+ GpuStreamDevice() : stream_(&default_stream), scratch_(NULL), semaphore_(NULL) {
+ gpuGetDevice(&device_);
+ initializeDeviceProp();
+ }
+ // Use the default stream on the specified device
+ GpuStreamDevice(int device) : stream_(&default_stream), device_(device), scratch_(NULL), semaphore_(NULL) {
+ initializeDeviceProp();
+ }
+ // Use the specified stream. Note that it's the
+ // caller responsibility to ensure that the stream can run on
+ // the specified device. If no device is specified the code
+ // assumes that the stream is associated to the current gpu device.
+ GpuStreamDevice(const gpuStream_t* stream, int device = -1)
+ : stream_(stream), device_(device), scratch_(NULL), semaphore_(NULL) {
+ if (device < 0) {
+ gpuGetDevice(&device_);
+ } else {
+ int num_devices;
+ gpuError_t err = gpuGetDeviceCount(&num_devices);
+ EIGEN_UNUSED_VARIABLE(err)
+ gpu_assert(err == gpuSuccess);
+ gpu_assert(device < num_devices);
+ device_ = device;
+ }
+ initializeDeviceProp();
+ }
+
+ virtual ~GpuStreamDevice() {
+ if (scratch_) {
+ deallocate(scratch_);
+ }
+ }
+
+ const gpuStream_t& stream() const { return *stream_; }
+ const gpuDeviceProp_t& deviceProperties() const {
+ return m_deviceProperties[device_];
+ }
+ virtual void* allocate(size_t num_bytes) const {
+ gpuError_t err = gpuSetDevice(device_);
+ EIGEN_UNUSED_VARIABLE(err)
+ gpu_assert(err == gpuSuccess);
+ void* result;
+ err = gpuMalloc(&result, num_bytes);
+ gpu_assert(err == gpuSuccess);
+ gpu_assert(result != NULL);
+ return result;
+ }
+ virtual void deallocate(void* buffer) const {
+ gpuError_t err = gpuSetDevice(device_);
+ EIGEN_UNUSED_VARIABLE(err)
+ gpu_assert(err == gpuSuccess);
+ gpu_assert(buffer != NULL);
+ err = gpuFree(buffer);
+ gpu_assert(err == gpuSuccess);
+ }
+
+ virtual void* scratchpad() const {
+ if (scratch_ == NULL) {
+ scratch_ = allocate(kGpuScratchSize + sizeof(unsigned int));
+ }
+ return scratch_;
+ }
+
+ virtual unsigned int* semaphore() const {
+ if (semaphore_ == NULL) {
+ char* scratch = static_cast<char*>(scratchpad()) + kGpuScratchSize;
+ semaphore_ = reinterpret_cast<unsigned int*>(scratch);
+ gpuError_t err = gpuMemsetAsync(semaphore_, 0, sizeof(unsigned int), *stream_);
+ EIGEN_UNUSED_VARIABLE(err)
+ gpu_assert(err == gpuSuccess);
+ }
+ return semaphore_;
+ }
+
+ private:
+ const gpuStream_t* stream_;
+ int device_;
+ mutable void* scratch_;
+ mutable unsigned int* semaphore_;
+};
+
+struct GpuDevice {
+ // The StreamInterface is not owned: the caller is
+ // responsible for its initialization and eventual destruction.
+ explicit GpuDevice(const StreamInterface* stream) : stream_(stream), max_blocks_(INT_MAX) {
+ eigen_assert(stream);
+ }
+ explicit GpuDevice(const StreamInterface* stream, int num_blocks) : stream_(stream), max_blocks_(num_blocks) {
+ eigen_assert(stream);
+ }
+ // TODO(bsteiner): This is an internal API, we should not expose it.
+ EIGEN_STRONG_INLINE const gpuStream_t& stream() const {
+ return stream_->stream();
+ }
+
+ EIGEN_STRONG_INLINE void* allocate(size_t num_bytes) const {
+ return stream_->allocate(num_bytes);
+ }
+
+ EIGEN_STRONG_INLINE void deallocate(void* buffer) const {
+ stream_->deallocate(buffer);
+ }
+
+ EIGEN_STRONG_INLINE void* allocate_temp(size_t num_bytes) const {
+ return stream_->allocate(num_bytes);
+ }
+
+ EIGEN_STRONG_INLINE void deallocate_temp(void* buffer) const {
+ stream_->deallocate(buffer);
+ }
+
+
+ EIGEN_STRONG_INLINE void* scratchpad() const {
+ return stream_->scratchpad();
+ }
+
+ EIGEN_STRONG_INLINE unsigned int* semaphore() const {
+ return stream_->semaphore();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpy(void* dst, const void* src, size_t n) const {
+#ifndef EIGEN_GPU_COMPILE_PHASE
+ gpuError_t err = gpuMemcpyAsync(dst, src, n, gpuMemcpyDeviceToDevice,
+ stream_->stream());
+ EIGEN_UNUSED_VARIABLE(err)
+ gpu_assert(err == gpuSuccess);
+#else
+ EIGEN_UNUSED_VARIABLE(dst);
+ EIGEN_UNUSED_VARIABLE(src);
+ EIGEN_UNUSED_VARIABLE(n);
+ eigen_assert(false && "The default device should be used instead to generate kernel code");
+#endif
+ }
+
+ EIGEN_STRONG_INLINE void memcpyHostToDevice(void* dst, const void* src, size_t n) const {
+ gpuError_t err =
+ gpuMemcpyAsync(dst, src, n, gpuMemcpyHostToDevice, stream_->stream());
+ EIGEN_UNUSED_VARIABLE(err)
+ gpu_assert(err == gpuSuccess);
+ }
+
+ EIGEN_STRONG_INLINE void memcpyDeviceToHost(void* dst, const void* src, size_t n) const {
+ gpuError_t err =
+ gpuMemcpyAsync(dst, src, n, gpuMemcpyDeviceToHost, stream_->stream());
+ EIGEN_UNUSED_VARIABLE(err)
+ gpu_assert(err == gpuSuccess);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memset(void* buffer, int c, size_t n) const {
+#ifndef EIGEN_GPU_COMPILE_PHASE
+ gpuError_t err = gpuMemsetAsync(buffer, c, n, stream_->stream());
+ EIGEN_UNUSED_VARIABLE(err)
+ gpu_assert(err == gpuSuccess);
+#else
+ eigen_assert(false && "The default device should be used instead to generate kernel code");
+#endif
+ }
+
+ EIGEN_STRONG_INLINE size_t numThreads() const {
+ // FIXME
+ return 32;
+ }
+
+ EIGEN_STRONG_INLINE size_t firstLevelCacheSize() const {
+ // FIXME
+ return 48*1024;
+ }
+
+ EIGEN_STRONG_INLINE size_t lastLevelCacheSize() const {
+ // We won't try to take advantage of the l2 cache for the time being, and
+ // there is no l3 cache on hip/cuda devices.
+ return firstLevelCacheSize();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void synchronize() const {
+#if defined(EIGEN_GPUCC) && !defined(EIGEN_GPU_COMPILE_PHASE)
+ gpuError_t err = gpuStreamSynchronize(stream_->stream());
+ if (err != gpuSuccess) {
+ std::cerr << "Error detected in GPU stream: "
+ << gpuGetErrorString(err)
+ << std::endl;
+ gpu_assert(err == gpuSuccess);
+ }
+#else
+ gpu_assert(false && "The default device should be used instead to generate kernel code");
+#endif
+ }
+
+ EIGEN_STRONG_INLINE int getNumGpuMultiProcessors() const {
+ return stream_->deviceProperties().multiProcessorCount;
+ }
+ EIGEN_STRONG_INLINE int maxGpuThreadsPerBlock() const {
+ return stream_->deviceProperties().maxThreadsPerBlock;
+ }
+ EIGEN_STRONG_INLINE int maxGpuThreadsPerMultiProcessor() const {
+ return stream_->deviceProperties().maxThreadsPerMultiProcessor;
+ }
+ EIGEN_STRONG_INLINE int sharedMemPerBlock() const {
+ return stream_->deviceProperties().sharedMemPerBlock;
+ }
+ EIGEN_STRONG_INLINE int majorDeviceVersion() const {
+ return stream_->deviceProperties().major;
+ }
+ EIGEN_STRONG_INLINE int minorDeviceVersion() const {
+ return stream_->deviceProperties().minor;
+ }
+
+ EIGEN_STRONG_INLINE int maxBlocks() const {
+ return max_blocks_;
+ }
+
+ // This function checks if the GPU runtime recorded an error for the
+ // underlying stream device.
+ inline bool ok() const {
+#ifdef EIGEN_GPUCC
+ gpuError_t error = gpuStreamQuery(stream_->stream());
+ return (error == gpuSuccess) || (error == gpuErrorNotReady);
+#else
+ return false;
+#endif
+ }
+
+ private:
+ const StreamInterface* stream_;
+ int max_blocks_;
+};
+
+#if defined(EIGEN_HIPCC)
+
+#define LAUNCH_GPU_KERNEL(kernel, gridsize, blocksize, sharedmem, device, ...) \
+ hipLaunchKernelGGL(kernel, dim3(gridsize), dim3(blocksize), (sharedmem), (device).stream(), __VA_ARGS__); \
+ gpu_assert(hipGetLastError() == hipSuccess);
+
+#else
+
+#define LAUNCH_GPU_KERNEL(kernel, gridsize, blocksize, sharedmem, device, ...) \
+ (kernel) <<< (gridsize), (blocksize), (sharedmem), (device).stream() >>> (__VA_ARGS__); \
+ gpu_assert(cudaGetLastError() == cudaSuccess);
+
+#endif
+
+// FIXME: Should be device and kernel specific.
+#ifdef EIGEN_GPUCC
+static EIGEN_DEVICE_FUNC inline void setGpuSharedMemConfig(gpuSharedMemConfig config) {
+#ifndef EIGEN_GPU_COMPILE_PHASE
+ gpuError_t status = gpuDeviceSetSharedMemConfig(config);
+ EIGEN_UNUSED_VARIABLE(status)
+ gpu_assert(status == gpuSuccess);
+#else
+ EIGEN_UNUSED_VARIABLE(config)
+#endif
+}
+#endif
+
+} // end namespace Eigen
+
+// undefine all the gpu* macros we defined at the beginning of the file
+#include "TensorGpuHipCudaUndefines.h"
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_GPU_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h
index e209799bb..e7beb2c82 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h
@@ -14,10 +14,41 @@
#if defined(EIGEN_USE_SYCL) && !defined(EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H)
#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H
+template<size_t Align> struct CheckAlignStatically {
+ static const bool Val= (((Align&(Align-1))==0) && (Align >= sizeof(void *)));
+};
+template <bool IsAligned, size_t Align>
+struct Conditional_Allocate {
+
+ EIGEN_ALWAYS_INLINE static void* conditional_allocate(std::size_t elements) {
+ return aligned_alloc(Align, elements);
+ }
+};
+template <size_t Align>
+struct Conditional_Allocate<false, Align> {
+
+ EIGEN_ALWAYS_INLINE static void* conditional_allocate(std::size_t elements){
+ return malloc(elements);
+ }
+};
+template <typename Scalar, size_t Align = EIGEN_MAX_ALIGN_BYTES, class Allocator = std::allocator<Scalar>>
+struct SyclAllocator {
+ typedef Scalar value_type;
+ typedef typename std::allocator_traits<Allocator>::pointer pointer;
+ typedef typename std::allocator_traits<Allocator>::size_type size_type;
+
+ SyclAllocator( ){};
+ Scalar* allocate(std::size_t elements) {
+ return static_cast<Scalar*>(Conditional_Allocate<CheckAlignStatically<Align>::Val, Align>::conditional_allocate(elements));
+ }
+ void deallocate(Scalar * p, std::size_t size) { EIGEN_UNUSED_VARIABLE(size); free(p); }
+};
namespace Eigen {
- #define ConvertToActualTypeSycl(Scalar, buf_acc) reinterpret_cast<typename cl::sycl::global_ptr<Scalar>::pointer_t>((&(*buf_acc.get_pointer())))
+#define ConvertToActualTypeSycl(Scalar, buf_acc) static_cast<Scalar*>(static_cast<void*>(((buf_acc.get_pointer().get()))))
+#define ConvertToActualSyclOffset(Scalar, offset) offset/sizeof(Scalar)
+
template <typename Scalar, typename read_accessor, typename write_accessor> class MemCopyFunctor {
public:
@@ -40,47 +71,58 @@ namespace Eigen {
size_t m_offset;
};
+template<typename AccType>
struct memsetkernelFunctor{
- typedef cl::sycl::accessor<uint8_t, 1, cl::sycl::access::mode::discard_write, cl::sycl::access::target::global_buffer> AccType;
AccType m_acc;
+ const ptrdiff_t buff_offset;
const size_t m_rng, m_c;
- memsetkernelFunctor(AccType acc, const size_t rng, const size_t c):m_acc(acc), m_rng(rng), m_c(c){}
+ memsetkernelFunctor(AccType acc, const ptrdiff_t buff_offset_, const size_t rng, const size_t c):m_acc(acc), buff_offset(buff_offset_), m_rng(rng), m_c(c){}
void operator()(cl::sycl::nd_item<1> itemID) {
auto globalid=itemID.get_global_linear_id();
- if (globalid< m_rng) m_acc[globalid] = m_c;
+ if (globalid< m_rng) m_acc[globalid + buff_offset] = m_c;
}
};
+struct memsetCghFunctor{
+ cl::sycl::buffer<uint8_t, 1, SyclAllocator<uint8_t, EIGEN_MAX_ALIGN_BYTES> >& m_buf;
+ const ptrdiff_t& buff_offset;
+ const size_t& rng , GRange, tileSize;
+ const int &c;
+ memsetCghFunctor(cl::sycl::buffer<uint8_t, 1, SyclAllocator<uint8_t, EIGEN_MAX_ALIGN_BYTES> >& buff, const ptrdiff_t& buff_offset_, const size_t& rng_, const size_t& GRange_, const size_t& tileSize_, const int& c_)
+ :m_buf(buff), buff_offset(buff_offset_), rng(rng_), GRange(GRange_), tileSize(tileSize_), c(c_){}
+
+ void operator()(cl::sycl::handler &cgh) const {
+ auto buf_acc = m_buf.template get_access<cl::sycl::access::mode::write, cl::sycl::access::target::global_buffer>(cgh);
+ typedef decltype(buf_acc) AccType;
+ cgh.parallel_for(cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), memsetkernelFunctor<AccType>(buf_acc, buff_offset, rng, c));
+ }
+};
+
+//get_devices returns all the available opencl devices. Either use device_selector or exclude devices that computecpp does not support (AMD OpenCL for CPU and intel GPU)
EIGEN_STRONG_INLINE auto get_sycl_supported_devices()->decltype(cl::sycl::device::get_devices()){
- auto devices = cl::sycl::device::get_devices();
- std::vector<cl::sycl::device>::iterator it =devices.begin();
- while(it!=devices.end()) {
- /// get_devices returns all the available opencl devices. Either use device_selector or exclude devices that computecpp does not support (AMD OpenCL for CPU )
- auto s= (*it).template get_info<cl::sycl::info::device::vendor>();
- std::transform(s.begin(), s.end(), s.begin(), ::tolower);
- if((*it).is_cpu() && s.find("amd")!=std::string::npos && s.find("apu") == std::string::npos){ // remove amd cpu as it is not supported by computecpp allow APUs
- it=devices.erase(it);
- }
- else{
- ++it;
+std::vector<cl::sycl::device> supported_devices;
+auto plafrom_list =cl::sycl::platform::get_platforms();
+for(const auto& platform : plafrom_list){
+ auto device_list = platform.get_devices();
+ auto platform_name =platform.template get_info<cl::sycl::info::platform::name>();
+ std::transform(platform_name.begin(), platform_name.end(), platform_name.begin(), ::tolower);
+ for(const auto& device : device_list){
+ auto vendor = device.template get_info<cl::sycl::info::device::vendor>();
+ std::transform(vendor.begin(), vendor.end(), vendor.begin(), ::tolower);
+ bool unsuported_condition = (device.is_cpu() && platform_name.find("amd")!=std::string::npos && vendor.find("apu") == std::string::npos) ||
+ (device.is_gpu() && platform_name.find("intel")!=std::string::npos);
+ if(!unsuported_condition){
+ std::cout << "Platform name "<< platform_name << std::endl;
+ supported_devices.push_back(device);
}
}
- return devices;
+}
+return supported_devices;
}
-struct QueueInterface {
- /// class members:
- bool exception_caught_ = false;
-
- mutable std::mutex mutex_;
-
- /// std::map is the container used to make sure that we create only one buffer
- /// per pointer. The lifespan of the buffer now depends on the lifespan of SyclDevice.
- /// If a non-read-only pointer is needed to be accessed on the host we should manually deallocate it.
- mutable std::map<const uint8_t *, cl::sycl::buffer<uint8_t, 1>> buffer_map;
- /// sycl queue
- mutable cl::sycl::queue m_queue;
+class QueueInterface {
+public:
/// creating device by using cl::sycl::selector or cl::sycl::device both are the same and can be captured through dev_Selector typename
/// SyclStreamDevice is not owned. it is the caller's responsibility to destroy it.
template<typename dev_Selector> explicit QueueInterface(const dev_Selector& s):
@@ -116,11 +158,11 @@ m_queue(cl::sycl::queue(s, [&](cl::sycl::exception_list l) {
/// use this pointer as a key in our buffer_map and we make sure that we dedicate only one buffer only for this pointer.
/// The device pointer would be deleted by calling deallocate function.
EIGEN_STRONG_INLINE void* allocate(size_t num_bytes) const {
- auto buf = cl::sycl::buffer<uint8_t,1>(cl::sycl::range<1>(num_bytes));
+ std::lock_guard<std::mutex> lock(mutex_);
+ auto buf = cl::sycl::buffer<uint8_t,1, SyclAllocator<uint8_t, EIGEN_MAX_ALIGN_BYTES> >(cl::sycl::range<1>(num_bytes));
auto ptr =buf.get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::host_buffer>().get_pointer();
buf.set_final_data(nullptr);
- std::lock_guard<std::mutex> lock(mutex_);
- buffer_map.insert(std::pair<const uint8_t *, cl::sycl::buffer<uint8_t, 1>>(static_cast<const uint8_t*>(ptr),buf));
+ buffer_map.insert(std::pair<const uint8_t *, cl::sycl::buffer<uint8_t, 1, SyclAllocator<uint8_t, EIGEN_MAX_ALIGN_BYTES> > >(static_cast<const uint8_t*>(ptr),buf));
return static_cast<void*>(ptr);
}
@@ -138,62 +180,113 @@ m_queue(cl::sycl::queue(s, [&](cl::sycl::exception_list l) {
std::lock_guard<std::mutex> lock(mutex_);
buffer_map.clear();
}
-
- EIGEN_STRONG_INLINE std::map<const uint8_t *, cl::sycl::buffer<uint8_t,1>>::iterator find_buffer(const void* ptr) const {
- std::lock_guard<std::mutex> lock(mutex_);
- auto it1 = buffer_map.find(static_cast<const uint8_t*>(ptr));
- if (it1 != buffer_map.end()){
- return it1;
- }
- else{
- for(std::map<const uint8_t *, cl::sycl::buffer<uint8_t,1>>::iterator it=buffer_map.begin(); it!=buffer_map.end(); ++it){
- auto size = it->second.get_size();
- if((it->first < (static_cast<const uint8_t*>(ptr))) && ((static_cast<const uint8_t*>(ptr)) < (it->first + size)) ) return it;
- }
- }
- std::cerr << "No sycl buffer found. Make sure that you have allocated memory for your buffer by calling malloc-ed function."<< std::endl;
- abort();
+ /// The memcpyHostToDevice is used to copy the device only pointer to a host pointer. Using the device
+ /// pointer created as a key we find the sycl buffer and get the host accessor with write mode
+ /// on it. Then we use the memcpy to copy the data to the host accessor. The first time that
+ /// this buffer is accessed, the data will be copied to the device.
+ /// In this case we can separate the kernel actual execution from data transfer which is required for benchmark
+ /// Also, this is faster as it uses the map_allocator instead of memcpy
+ template<typename Index> EIGEN_STRONG_INLINE void memcpyHostToDevice(Index *dst, const Index *src, size_t n) const {
+ auto it =find_buffer(dst);
+ auto offset =static_cast<const uint8_t*>(static_cast<const void*>(dst))- it->first;
+ offset/=sizeof(Index);
+ size_t rng, GRange, tileSize;
+ parallel_for_setup(n/sizeof(Index), tileSize, rng, GRange);
+ auto src_buf = cl::sycl::buffer<uint8_t, 1, cl::sycl::map_allocator<uint8_t> >(static_cast<uint8_t*>(static_cast<void*>(const_cast<Index*>(src))), cl::sycl::range<1>(n));
+ m_queue.submit([&](cl::sycl::handler &cgh) {
+ auto dst_acc= it->second.template get_access<cl::sycl::access::mode::write, cl::sycl::access::target::global_buffer>(cgh);
+ auto src_acc =src_buf.template get_access<cl::sycl::access::mode::read, cl::sycl::access::target::global_buffer>(cgh);
+ typedef decltype(src_acc) read_accessor;
+ typedef decltype(dst_acc) write_accessor;
+ cgh.parallel_for( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), MemCopyFunctor<Index, read_accessor, write_accessor>(src_acc, dst_acc, rng, offset, 0));
+ });
+ synchronize();
}
-
- // This function checks if the runtime recorded an error for the
- // underlying stream device.
- EIGEN_STRONG_INLINE bool ok() const {
- if (!exception_caught_) {
- m_queue.wait_and_throw();
- }
- return !exception_caught_;
+ /// The memcpyDeviceToHost is used to copy the data from host to device. Here, in order to avoid double copying the data. We create a sycl
+ /// buffer with map_allocator for the destination pointer with a discard_write accessor on it. The lifespan of the buffer is bound to the
+ /// lifespan of the memcpyDeviceToHost function. We create a kernel to copy the data, from the device- only source buffer to the destination
+ /// buffer with map_allocator on the gpu in parallel. At the end of the function call the destination buffer would be destroyed and the data
+ /// would be available on the dst pointer using fast copy technique (map_allocator). In this case we can make sure that we copy the data back
+ /// to the cpu only once per function call.
+ template<typename Index> EIGEN_STRONG_INLINE void memcpyDeviceToHost(void *dst, const Index *src, size_t n) const {
+ auto it =find_buffer(src);
+ auto offset =static_cast<const uint8_t*>(static_cast<const void*>(src))- it->first;
+ offset/=sizeof(Index);
+ size_t rng, GRange, tileSize;
+ parallel_for_setup(n/sizeof(Index), tileSize, rng, GRange);
+ auto dest_buf = cl::sycl::buffer<uint8_t, 1, cl::sycl::map_allocator<uint8_t> >(static_cast<uint8_t*>(dst), cl::sycl::range<1>(n));
+ m_queue.submit([&](cl::sycl::handler &cgh) {
+ auto src_acc= it->second.template get_access<cl::sycl::access::mode::read, cl::sycl::access::target::global_buffer>(cgh);
+ auto dst_acc =dest_buf.template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::global_buffer>(cgh);
+ typedef decltype(src_acc) read_accessor;
+ typedef decltype(dst_acc) write_accessor;
+ cgh.parallel_for( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), MemCopyFunctor<Index, read_accessor, write_accessor>(src_acc, dst_acc, rng, 0, offset));
+ });
+ synchronize();
}
- // destructor
- ~QueueInterface() { buffer_map.clear(); }
-};
+ /// the memcpy function
+ template<typename Index> EIGEN_STRONG_INLINE void memcpy(void *dst, const Index *src, size_t n) const {
+ auto it1 = find_buffer(static_cast<const void*>(src));
+ auto it2 = find_buffer(dst);
+ auto offset= (static_cast<const uint8_t*>(static_cast<const void*>(src))) - it1->first;
+ auto i= (static_cast<const uint8_t*>(dst)) - it2->first;
+ offset/=sizeof(Index);
+ i/=sizeof(Index);
+ size_t rng, GRange, tileSize;
+ parallel_for_setup(n/sizeof(Index), tileSize, rng, GRange);
+ m_queue.submit([&](cl::sycl::handler &cgh) {
+ auto src_acc =it1->second.template get_access<cl::sycl::access::mode::read, cl::sycl::access::target::global_buffer>(cgh);
+ auto dst_acc =it2->second.template get_access<cl::sycl::access::mode::write, cl::sycl::access::target::global_buffer>(cgh);
+ typedef decltype(src_acc) read_accessor;
+ typedef decltype(dst_acc) write_accessor;
+ cgh.parallel_for(cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), MemCopyFunctor<Index, read_accessor, write_accessor>(src_acc, dst_acc, rng, i, offset));
+ });
+ synchronize();
+ }
-struct SyclDevice {
- // class member.
- QueueInterface* m_queue_stream;
- /// QueueInterface is not owned. it is the caller's responsibility to destroy it.
- explicit SyclDevice(QueueInterface* queue_stream) : m_queue_stream(queue_stream){}
+ EIGEN_STRONG_INLINE void memset(void *data, int c, size_t n) const {
+ size_t rng, GRange, tileSize;
+ parallel_for_setup(n, tileSize, rng, GRange);
+ auto it1 = find_buffer(static_cast<const void*>(data));
+ ptrdiff_t buff_offset= (static_cast<const uint8_t*>(data)) - it1->first;
+ m_queue.submit(memsetCghFunctor(it1->second, buff_offset, rng, GRange, tileSize, c ));
+ synchronize();
+ }
/// Creation of sycl accessor for a buffer. This function first tries to find
/// the buffer in the buffer_map. If found it gets the accessor from it, if not,
/// the function then adds an entry by creating a sycl buffer for that particular pointer.
template <cl::sycl::access::mode AcMd> EIGEN_STRONG_INLINE cl::sycl::accessor<uint8_t, 1, AcMd, cl::sycl::access::target::global_buffer>
get_sycl_accessor(cl::sycl::handler &cgh, const void* ptr) const {
- return (get_sycl_buffer(ptr).template get_access<AcMd, cl::sycl::access::target::global_buffer>(cgh));
+ return (find_buffer(ptr)->second.template get_access<AcMd, cl::sycl::access::target::global_buffer>(cgh));
}
/// Accessing the created sycl device buffer for the device pointer
- EIGEN_STRONG_INLINE cl::sycl::buffer<uint8_t, 1>& get_sycl_buffer(const void * ptr) const {
- return m_queue_stream->find_buffer(ptr)->second;
+ EIGEN_STRONG_INLINE cl::sycl::buffer<uint8_t, 1, SyclAllocator<uint8_t, EIGEN_MAX_ALIGN_BYTES> >& get_sycl_buffer(const void * ptr) const {
+ return find_buffer(ptr)->second;
+ }
+
+ EIGEN_STRONG_INLINE ptrdiff_t get_offset(const void *ptr) const {
+ return (static_cast<const uint8_t*>(ptr))-(find_buffer(ptr)->first);
+ }
+
+ EIGEN_STRONG_INLINE void synchronize() const {
+ m_queue.wait_and_throw(); //pass
+ }
+
+ EIGEN_STRONG_INLINE void asynchronousExec() const {
+ ///FIXEDME:: currently there is a race condition regarding the asynch scheduler.
+ //sycl_queue().throw_asynchronous();// FIXME::does not pass. Temporarily disabled
+ m_queue.wait_and_throw(); //pass
}
- /// This is used to prepare the number of threads and also the number of threads per block for sycl kernels
template<typename Index>
EIGEN_STRONG_INLINE void parallel_for_setup(Index n, Index &tileSize, Index &rng, Index &GRange) const {
- tileSize =static_cast<Index>(sycl_queue().get_device(). template get_info<cl::sycl::info::device::max_work_group_size>());
- auto s= sycl_queue().get_device().template get_info<cl::sycl::info::device::vendor>();
+ tileSize =static_cast<Index>(m_queue.get_device(). template get_info<cl::sycl::info::device::max_work_group_size>());
+ auto s= m_queue.get_device().template get_info<cl::sycl::info::device::vendor>();
std::transform(s.begin(), s.end(), s.begin(), ::tolower);
- if(sycl_queue().get_device().is_cpu()){ // intel doesnot allow to use max workgroup size
+ if(m_queue.get_device().is_cpu()){ // intel doesn't allow to use max workgroup size
tileSize=std::min(static_cast<Index>(256), static_cast<Index>(tileSize));
}
rng = n;
@@ -210,7 +303,7 @@ struct SyclDevice {
template<typename Index>
EIGEN_STRONG_INLINE void parallel_for_setup(Index dim0, Index dim1, Index &tileSize0, Index &tileSize1, Index &rng0, Index &rng1, Index &GRange0, Index &GRange1) const {
Index max_workgroup_Size = static_cast<Index>(maxSyclThreadsPerBlock());
- if(sycl_queue().get_device().is_cpu()){ // intel doesnot allow to use max workgroup size
+ if(m_queue.get_device().is_cpu()){ // intel doesn't allow to use max workgroup size
max_workgroup_Size=std::min(static_cast<Index>(256), static_cast<Index>(max_workgroup_Size));
}
Index pow_of_2 = static_cast<Index>(std::log2(max_workgroup_Size));
@@ -234,13 +327,11 @@ struct SyclDevice {
}
}
-
-
/// This is used to prepare the number of threads and also the number of threads per block for sycl kernels
template<typename Index>
EIGEN_STRONG_INLINE void parallel_for_setup(Index dim0, Index dim1,Index dim2, Index &tileSize0, Index &tileSize1, Index &tileSize2, Index &rng0, Index &rng1, Index &rng2, Index &GRange0, Index &GRange1, Index &GRange2) const {
Index max_workgroup_Size = static_cast<Index>(maxSyclThreadsPerBlock());
- if(sycl_queue().get_device().is_cpu()){ // intel doesnot allow to use max workgroup size
+ if(m_queue.get_device().is_cpu()){ // intel doesn't allow to use max workgroup size
max_workgroup_Size=std::min(static_cast<Index>(256), static_cast<Index>(max_workgroup_Size));
}
Index pow_of_2 = static_cast<Index>(std::log2(max_workgroup_Size));
@@ -273,6 +364,108 @@ struct SyclDevice {
if (xMode != 0) GRange0 += static_cast<Index>(tileSize0 - xMode);
}
}
+
+ EIGEN_STRONG_INLINE unsigned long getNumSyclMultiProcessors() const {
+ return m_queue.get_device(). template get_info<cl::sycl::info::device::max_compute_units>();
+ }
+
+ EIGEN_STRONG_INLINE unsigned long maxSyclThreadsPerBlock() const {
+ return m_queue.get_device(). template get_info<cl::sycl::info::device::max_work_group_size>();
+ }
+
+ /// No need for sycl it should act the same as CPU version
+ EIGEN_STRONG_INLINE int majorDeviceVersion() const { return 1; }
+
+ EIGEN_STRONG_INLINE unsigned long maxSyclThreadsPerMultiProcessor() const {
+ // OpenCL doesn't have such concept
+ return 2;
+ }
+
+ EIGEN_STRONG_INLINE size_t sharedMemPerBlock() const {
+ return m_queue.get_device(). template get_info<cl::sycl::info::device::local_mem_size>();
+ }
+
+ EIGEN_STRONG_INLINE cl::sycl::queue& sycl_queue() const { return m_queue;}
+
+ // This function checks if the runtime recorded an error for the
+ // underlying stream device.
+ EIGEN_STRONG_INLINE bool ok() const {
+ if (!exception_caught_) {
+ m_queue.wait_and_throw();
+ }
+ return !exception_caught_;
+ }
+
+ // destructor
+ ~QueueInterface() { buffer_map.clear(); }
+
+private:
+ /// class members:
+ bool exception_caught_ = false;
+
+ mutable std::mutex mutex_;
+
+ /// std::map is the container used to make sure that we create only one buffer
+ /// per pointer. The lifespan of the buffer now depends on the lifespan of SyclDevice.
+ /// If a non-read-only pointer is needed to be accessed on the host we should manually deallocate it.
+ mutable std::map<const uint8_t *, cl::sycl::buffer<uint8_t, 1, SyclAllocator<uint8_t, EIGEN_MAX_ALIGN_BYTES> > > buffer_map;
+ /// sycl queue
+ mutable cl::sycl::queue m_queue;
+
+ EIGEN_STRONG_INLINE std::map<const uint8_t *, cl::sycl::buffer<uint8_t,1, SyclAllocator<uint8_t, EIGEN_MAX_ALIGN_BYTES> > >::iterator find_buffer(const void* ptr) const {
+ std::lock_guard<std::mutex> lock(mutex_);
+ auto it1 = buffer_map.find(static_cast<const uint8_t*>(ptr));
+ if (it1 != buffer_map.end()){
+ return it1;
+ }
+ else{
+ for(std::map<const uint8_t *, cl::sycl::buffer<uint8_t,1, SyclAllocator<uint8_t, EIGEN_MAX_ALIGN_BYTES> > >::iterator it=buffer_map.begin(); it!=buffer_map.end(); ++it){
+ auto size = it->second.get_size();
+ if((it->first < (static_cast<const uint8_t*>(ptr))) && ((static_cast<const uint8_t*>(ptr)) < (it->first + size)) ) return it;
+ }
+ }
+ std::cerr << "No sycl buffer found. Make sure that you have allocated memory for your buffer by calling malloc-ed function."<< std::endl;
+ abort();
+ }
+};
+
+// Here is a sycl deviuce struct which accept the sycl queue interface
+// as an input
+struct SyclDevice {
+ // class member.
+ QueueInterface* m_queue_stream;
+ /// QueueInterface is not owned. it is the caller's responsibility to destroy it.
+ explicit SyclDevice(QueueInterface* queue_stream) : m_queue_stream(queue_stream){}
+
+ // get sycl accessor
+ template <cl::sycl::access::mode AcMd> EIGEN_STRONG_INLINE cl::sycl::accessor<uint8_t, 1, AcMd, cl::sycl::access::target::global_buffer>
+ get_sycl_accessor(cl::sycl::handler &cgh, const void* ptr) const {
+ return m_queue_stream->template get_sycl_accessor<AcMd>(cgh, ptr);
+ }
+
+ /// Accessing the created sycl device buffer for the device pointer
+ EIGEN_STRONG_INLINE cl::sycl::buffer<uint8_t, 1, SyclAllocator<uint8_t, EIGEN_MAX_ALIGN_BYTES> >& get_sycl_buffer(const void * ptr) const {
+ return m_queue_stream->get_sycl_buffer(ptr);
+ }
+
+ /// This is used to prepare the number of threads and also the number of threads per block for sycl kernels
+ template<typename Index>
+ EIGEN_STRONG_INLINE void parallel_for_setup(Index n, Index &tileSize, Index &rng, Index &GRange) const {
+ m_queue_stream->parallel_for_setup(n, tileSize, rng, GRange);
+ }
+
+ /// This is used to prepare the number of threads and also the number of threads per block for sycl kernels
+ template<typename Index>
+ EIGEN_STRONG_INLINE void parallel_for_setup(Index dim0, Index dim1, Index &tileSize0, Index &tileSize1, Index &rng0, Index &rng1, Index &GRange0, Index &GRange1) const {
+ m_queue_stream->parallel_for_setup(dim0, dim1, tileSize0, tileSize1, rng0, rng1, GRange0, GRange1);
+ }
+
+ /// This is used to prepare the number of threads and also the number of threads per block for sycl kernels
+ template<typename Index>
+ EIGEN_STRONG_INLINE void parallel_for_setup(Index dim0, Index dim1,Index dim2, Index &tileSize0, Index &tileSize1, Index &tileSize2, Index &rng0, Index &rng1, Index &rng2, Index &GRange0, Index &GRange1, Index &GRange2) const {
+ m_queue_stream->parallel_for_setup(dim0, dim1, dim2, tileSize0, tileSize1, tileSize2, rng0, rng1, rng2, GRange0, GRange1, GRange2);
+
+ }
/// allocate device memory
EIGEN_STRONG_INLINE void *allocate(size_t num_bytes) const {
return m_queue_stream->allocate(num_bytes);
@@ -287,78 +480,27 @@ struct SyclDevice {
/// the memcpy function
template<typename Index> EIGEN_STRONG_INLINE void memcpy(void *dst, const Index *src, size_t n) const {
- auto it1 = m_queue_stream->find_buffer(static_cast<const void*>(src));
- auto it2 = m_queue_stream->find_buffer(dst);
- auto offset= (static_cast<const uint8_t*>(static_cast<const void*>(src))) - it1->first;
- auto i= (static_cast<const uint8_t*>(dst)) - it2->first;
- offset/=sizeof(Index);
- i/=sizeof(Index);
- size_t rng, GRange, tileSize;
- parallel_for_setup(n/sizeof(Index), tileSize, rng, GRange);
- sycl_queue().submit([&](cl::sycl::handler &cgh) {
- auto src_acc =it1->second.template get_access<cl::sycl::access::mode::read, cl::sycl::access::target::global_buffer>(cgh);
- auto dst_acc =it2->second.template get_access<cl::sycl::access::mode::write, cl::sycl::access::target::global_buffer>(cgh);
- typedef decltype(src_acc) read_accessor;
- typedef decltype(dst_acc) write_accessor;
- cgh.parallel_for(cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), MemCopyFunctor<Index, read_accessor, write_accessor>(src_acc, dst_acc, rng, i, offset));
- });
- synchronize();
+ m_queue_stream->memcpy(dst,src,n);
}
- /// The memcpyHostToDevice is used to copy the device only pointer to a host pointer. Using the device
- /// pointer created as a key we find the sycl buffer and get the host accessor with discard_write mode
- /// on it. Using a discard_write accessor guarantees that we do not bring back the current value of the
- /// buffer to host. Then we use the memcpy to copy the data to the host accessor. The first time that
- /// this buffer is accessed, the data will be copied to the device.
+ EIGEN_STRONG_INLINE ptrdiff_t get_offset(const void *ptr) const {
+ return m_queue_stream->get_offset(ptr);
+
+ }
+// memcpyHostToDevice
template<typename Index> EIGEN_STRONG_INLINE void memcpyHostToDevice(Index *dst, const Index *src, size_t n) const {
- auto host_acc= get_sycl_buffer(dst). template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::host_buffer>();
- ::memcpy(host_acc.get_pointer(), src, n);
+ m_queue_stream->memcpyHostToDevice(dst,src,n);
}
- /// The memcpyDeviceToHost is used to copy the data from host to device. Here, in order to avoid double copying the data. We create a sycl
- /// buffer with map_allocator for the destination pointer with a discard_write accessor on it. The lifespan of the buffer is bound to the
- /// lifespan of the memcpyDeviceToHost function. We create a kernel to copy the data, from the device- only source buffer to the destination
- /// buffer with map_allocator on the gpu in parallel. At the end of the function call the destination buffer would be destroyed and the data
- /// would be available on the dst pointer using fast copy technique (map_allocator). In this case we can make sure that we copy the data back
- /// to the cpu only once per function call.
+/// here is the memcpyDeviceToHost
template<typename Index> EIGEN_STRONG_INLINE void memcpyDeviceToHost(void *dst, const Index *src, size_t n) const {
- auto it = m_queue_stream->find_buffer(src);
- auto offset =static_cast<const uint8_t*>(static_cast<const void*>(src))- it->first;
- offset/=sizeof(Index);
- size_t rng, GRange, tileSize;
- parallel_for_setup(n/sizeof(Index), tileSize, rng, GRange);
- // Assuming that the dst is the start of the destination pointer
- auto dest_buf = cl::sycl::buffer<uint8_t, 1, cl::sycl::map_allocator<uint8_t> >(static_cast<uint8_t*>(dst), cl::sycl::range<1>(n));
- sycl_queue().submit([&](cl::sycl::handler &cgh) {
- auto src_acc= it->second.template get_access<cl::sycl::access::mode::read, cl::sycl::access::target::global_buffer>(cgh);
- auto dst_acc =dest_buf.template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::global_buffer>(cgh);
- typedef decltype(src_acc) read_accessor;
- typedef decltype(dst_acc) write_accessor;
- cgh.parallel_for( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), MemCopyFunctor<Index, read_accessor, write_accessor>(src_acc, dst_acc, rng, 0, offset));
- });
- synchronize();
+ m_queue_stream->memcpyDeviceToHost(dst,src,n);
}
- /// returning the sycl queue
- EIGEN_STRONG_INLINE cl::sycl::queue& sycl_queue() const { return m_queue_stream->m_queue;}
/// Here is the implementation of memset function on sycl.
EIGEN_STRONG_INLINE void memset(void *data, int c, size_t n) const {
- size_t rng, GRange, tileSize;
- parallel_for_setup(n, tileSize, rng, GRange);
- sycl_queue().submit(memsetCghFunctor(get_sycl_buffer(static_cast<uint8_t*>(static_cast<void*>(data))),rng, GRange, tileSize, c ));
- synchronize();
+ m_queue_stream->memset(data,c,n);
}
-
- struct memsetCghFunctor{
- cl::sycl::buffer<uint8_t, 1>& m_buf;
- const size_t& rng , GRange, tileSize;
- const int &c;
- memsetCghFunctor(cl::sycl::buffer<uint8_t, 1>& buff, const size_t& rng_, const size_t& GRange_, const size_t& tileSize_, const int& c_)
- :m_buf(buff), rng(rng_), GRange(GRange_), tileSize(tileSize_), c(c_){}
-
- void operator()(cl::sycl::handler &cgh) const {
- auto buf_acc = m_buf.template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::global_buffer>(cgh);
- cgh.parallel_for(cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), memsetkernelFunctor(buf_acc, rng, c));
- }
- };
+ /// returning the sycl queue
+ EIGEN_STRONG_INLINE cl::sycl::queue& sycl_queue() const { return m_queue_stream->sycl_queue();}
EIGEN_STRONG_INLINE size_t firstLevelCacheSize() const {
// FIXME
@@ -367,39 +509,32 @@ struct SyclDevice {
EIGEN_STRONG_INLINE size_t lastLevelCacheSize() const {
// We won't try to take advantage of the l2 cache for the time being, and
- // there is no l3 cache on cuda devices.
+ // there is no l3 cache on sycl devices.
return firstLevelCacheSize();
}
EIGEN_STRONG_INLINE unsigned long getNumSyclMultiProcessors() const {
- return sycl_queue().get_device(). template get_info<cl::sycl::info::device::max_compute_units>();
- // return stream_->deviceProperties().multiProcessorCount;
+ return m_queue_stream->getNumSyclMultiProcessors();
}
EIGEN_STRONG_INLINE unsigned long maxSyclThreadsPerBlock() const {
- return sycl_queue().get_device(). template get_info<cl::sycl::info::device::max_work_group_size>();
-
- // return stream_->deviceProperties().maxThreadsPerBlock;
+ return m_queue_stream->maxSyclThreadsPerBlock();
}
EIGEN_STRONG_INLINE unsigned long maxSyclThreadsPerMultiProcessor() const {
- // OpenCL doesnot have such concept
- return 2;//sycl_queue().get_device(). template get_info<cl::sycl::info::device::max_work_group_size>();
+ // OpenCL doesn't have such concept
+ return m_queue_stream->maxSyclThreadsPerMultiProcessor();
// return stream_->deviceProperties().maxThreadsPerMultiProcessor;
}
EIGEN_STRONG_INLINE size_t sharedMemPerBlock() const {
- return sycl_queue().get_device(). template get_info<cl::sycl::info::device::local_mem_size>();
- // return stream_->deviceProperties().sharedMemPerBlock;
+ return m_queue_stream->sharedMemPerBlock();
}
/// No need for sycl it should act the same as CPU version
- EIGEN_STRONG_INLINE int majorDeviceVersion() const { return 1; }
+ EIGEN_STRONG_INLINE int majorDeviceVersion() const { return m_queue_stream->majorDeviceVersion(); }
EIGEN_STRONG_INLINE void synchronize() const {
- sycl_queue().wait_and_throw(); //pass
+ m_queue_stream->synchronize(); //pass
}
EIGEN_STRONG_INLINE void asynchronousExec() const {
- ///FIXEDME:: currently there is a race condition regarding the asynch scheduler.
- //sycl_queue().throw_asynchronous();// does not pass. Temporarily disabled
- sycl_queue().wait_and_throw(); //pass
-
+ m_queue_stream->asynchronousExec();
}
// This function checks if the runtime recorded an error for the
// underlying stream device.
@@ -407,8 +542,10 @@ struct SyclDevice {
return m_queue_stream->ok();
}
};
-
-
+// This is used as a distingushable device inside the kernel as the sycl device class is not Standard layout.
+// This is internal and must not be used by user. This dummy device allow us to specialise the tensor evaluator
+// inside the kernel. So we can have two types of eval for host and device. This is required for TensorArgMax operation
+struct SyclKernelDevice:DefaultDevice{};
} // end namespace Eigen
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h
index 16180ca69..6fc6688d3 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h
@@ -12,56 +12,6 @@
namespace Eigen {
-// Barrier is an object that allows one or more threads to wait until
-// Notify has been called a specified number of times.
-class Barrier {
- public:
- Barrier(unsigned int count) : state_(count << 1), notified_(false) {
- eigen_assert(((count << 1) >> 1) == count);
- }
- ~Barrier() {
- eigen_assert((state_>>1) == 0);
- }
-
- void Notify() {
- unsigned int v = state_.fetch_sub(2, std::memory_order_acq_rel) - 2;
- if (v != 1) {
- eigen_assert(((v + 2) & ~1) != 0);
- return; // either count has not dropped to 0, or waiter is not waiting
- }
- std::unique_lock<std::mutex> l(mu_);
- eigen_assert(!notified_);
- notified_ = true;
- cv_.notify_all();
- }
-
- void Wait() {
- unsigned int v = state_.fetch_or(1, std::memory_order_acq_rel);
- if ((v >> 1) == 0) return;
- std::unique_lock<std::mutex> l(mu_);
- while (!notified_) {
- cv_.wait(l);
- }
- }
-
- private:
- std::mutex mu_;
- std::condition_variable cv_;
- std::atomic<unsigned int> state_; // low bit is waiter flag
- bool notified_;
-};
-
-
-// Notification is an object that allows a user to to wait for another
-// thread to signal a notification that an event has occurred.
-//
-// Multiple threads can wait on the same Notification object,
-// but only one caller must call Notify() on the object.
-struct Notification : Barrier {
- Notification() : Barrier(1) {};
-};
-
-
// Runs an arbitrary function and then calls Notify() on the passed in
// Notification.
template <typename Function, typename... Args> struct FunctionWrapperWithNotification
@@ -91,18 +41,39 @@ static EIGEN_STRONG_INLINE void wait_until_ready(SyncType* n) {
}
}
+// An abstract interface to a device specific memory allocator.
+class Allocator {
+ public:
+ virtual ~Allocator() {}
+ EIGEN_DEVICE_FUNC virtual void* allocate(size_t num_bytes) const = 0;
+ EIGEN_DEVICE_FUNC virtual void deallocate(void* buffer) const = 0;
+};
// Build a thread pool device on top the an existing pool of threads.
struct ThreadPoolDevice {
// The ownership of the thread pool remains with the caller.
- ThreadPoolDevice(ThreadPoolInterface* pool, int num_cores) : pool_(pool), num_threads_(num_cores) { }
+ ThreadPoolDevice(ThreadPoolInterface* pool, int num_cores, Allocator* allocator = NULL)
+ : pool_(pool), num_threads_(num_cores), allocator_(allocator) { }
EIGEN_STRONG_INLINE void* allocate(size_t num_bytes) const {
- return internal::aligned_malloc(num_bytes);
+ return allocator_ ? allocator_->allocate(num_bytes)
+ : internal::aligned_malloc(num_bytes);
}
EIGEN_STRONG_INLINE void deallocate(void* buffer) const {
- internal::aligned_free(buffer);
+ if (allocator_) {
+ allocator_->deallocate(buffer);
+ } else {
+ internal::aligned_free(buffer);
+ }
+ }
+
+ EIGEN_STRONG_INLINE void* allocate_temp(size_t num_bytes) const {
+ return allocate(num_bytes);
+ }
+
+ EIGEN_STRONG_INLINE void deallocate_temp(void* buffer) const {
+ deallocate(buffer);
}
EIGEN_STRONG_INLINE void memcpy(void* dst, const void* src, size_t n) const {
@@ -154,7 +125,11 @@ struct ThreadPoolDevice {
template <class Function, class... Args>
EIGEN_STRONG_INLINE void enqueueNoNotification(Function&& f, Args&&... args) const {
- pool_->Schedule(std::bind(f, args...));
+ if (sizeof...(args) > 0) {
+ pool_->Schedule(std::bind(f, args...));
+ } else {
+ pool_->Schedule(f);
+ }
}
// Returns a logical thread index between 0 and pool_->NumThreads() - 1 if
@@ -165,7 +140,7 @@ struct ThreadPoolDevice {
// parallelFor executes f with [0, n) arguments in parallel and waits for
// completion. F accepts a half-open interval [first, last).
- // Block size is choosen based on the iteration cost and resulting parallel
+ // Block size is chosen based on the iteration cost and resulting parallel
// efficiency. If block_align is not nullptr, it is called to round up the
// block size.
void parallelFor(Index n, const TensorOpCost& cost,
@@ -185,9 +160,11 @@ struct ThreadPoolDevice {
// of blocks to be evenly dividable across threads.
double block_size_f = 1.0 / CostModel::taskSize(1, cost);
- Index block_size = numext::mini(n, numext::maxi<Index>(1, block_size_f));
- const Index max_block_size =
- numext::mini(n, numext::maxi<Index>(1, 2 * block_size_f));
+ const Index max_oversharding_factor = 4;
+ Index block_size = numext::mini(
+ n, numext::maxi<Index>(divup<Index>(n, max_oversharding_factor * numThreads()),
+ block_size_f));
+ const Index max_block_size = numext::mini(n, 2 * block_size);
if (block_align) {
Index new_block_size = block_align(block_size);
eigen_assert(new_block_size >= block_size);
@@ -201,7 +178,8 @@ struct ThreadPoolDevice {
(divup<int>(block_count, numThreads()) * numThreads());
// Now try to increase block size up to max_block_size as long as it
// doesn't decrease parallel efficiency.
- for (Index prev_block_count = block_count; prev_block_count > 1;) {
+ for (Index prev_block_count = block_count;
+ max_efficiency < 1.0 && prev_block_count > 1;) {
// This is the next block size that divides size into a smaller number
// of blocks than the current block_size.
Index coarser_block_size = divup(n, prev_block_count - 1);
@@ -254,12 +232,19 @@ struct ThreadPoolDevice {
// Convenience wrapper for parallelFor that does not align blocks.
void parallelFor(Index n, const TensorOpCost& cost,
std::function<void(Index, Index)> f) const {
- parallelFor(n, cost, nullptr, std::move(f));
+ parallelFor(n, cost, NULL, std::move(f));
}
+ // Thread pool accessor.
+ ThreadPoolInterface* getPool() const { return pool_; }
+
+ // Allocator accessor.
+ Allocator* allocator() const { return allocator_; }
+
private:
ThreadPoolInterface* pool_;
int num_threads_;
+ Allocator* allocator_;
};
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h
index 86405e69b..dbf5af094 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h
@@ -32,16 +32,16 @@ namespace Eigen {
// Boilerplate code
namespace internal {
-template<std::size_t n, typename Dimension> struct dget {
+template<std::ptrdiff_t n, typename Dimension> struct dget {
static const std::ptrdiff_t value = get<n, Dimension>::value;
};
-template<typename Index, std::size_t NumIndices, std::size_t n, bool RowMajor>
+template<typename Index, std::ptrdiff_t NumIndices, std::ptrdiff_t n, bool RowMajor>
struct fixed_size_tensor_index_linearization_helper
{
template <typename Dimensions> EIGEN_DEVICE_FUNC
- static inline Index run(array<Index, NumIndices> const& indices,
+ static EIGEN_STRONG_INLINE Index run(array<Index, NumIndices> const& indices,
const Dimensions& dimensions)
{
return array_get<RowMajor ? n - 1 : (NumIndices - n)>(indices) +
@@ -50,21 +50,21 @@ struct fixed_size_tensor_index_linearization_helper
}
};
-template<typename Index, std::size_t NumIndices, bool RowMajor>
+template<typename Index, std::ptrdiff_t NumIndices, bool RowMajor>
struct fixed_size_tensor_index_linearization_helper<Index, NumIndices, 0, RowMajor>
{
template <typename Dimensions> EIGEN_DEVICE_FUNC
- static inline Index run(array<Index, NumIndices> const&, const Dimensions&)
+ static EIGEN_STRONG_INLINE Index run(array<Index, NumIndices> const&, const Dimensions&)
{
return 0;
}
};
-template<typename Index, std::size_t n>
+template<typename Index, std::ptrdiff_t n>
struct fixed_size_tensor_index_extraction_helper
{
template <typename Dimensions> EIGEN_DEVICE_FUNC
- static inline Index run(const Index index,
+ static EIGEN_STRONG_INLINE Index run(const Index index,
const Dimensions& dimensions)
{
const Index mult = (index == n-1) ? 1 : 0;
@@ -77,7 +77,7 @@ template<typename Index>
struct fixed_size_tensor_index_extraction_helper<Index, 0>
{
template <typename Dimensions> EIGEN_DEVICE_FUNC
- static inline Index run(const Index,
+ static EIGEN_STRONG_INLINE Index run(const Index,
const Dimensions&)
{
return 0;
@@ -94,7 +94,7 @@ struct Sizes {
typedef internal::numeric_list<std::ptrdiff_t, Indices...> Base;
const Base t = Base();
static const std::ptrdiff_t total_size = internal::arg_prod(Indices...);
- static const size_t count = Base::count;
+ static const ptrdiff_t count = Base::count;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t rank() const {
return Base::count;
@@ -121,16 +121,16 @@ struct Sizes {
return *this;
}
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t operator[] (const std::size_t index) const {
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t operator[] (const std::ptrdiff_t index) const {
return internal::fixed_size_tensor_index_extraction_helper<std::ptrdiff_t, Base::count>::run(index, t);
}
template <typename DenseIndex> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
- size_t IndexOfColMajor(const array<DenseIndex, Base::count>& indices) const {
+ ptrdiff_t IndexOfColMajor(const array<DenseIndex, Base::count>& indices) const {
return internal::fixed_size_tensor_index_linearization_helper<DenseIndex, Base::count, Base::count, false>::run(indices, t);
}
template <typename DenseIndex> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
- size_t IndexOfRowMajor(const array<DenseIndex, Base::count>& indices) const {
+ ptrdiff_t IndexOfRowMajor(const array<DenseIndex, Base::count>& indices) const {
return internal::fixed_size_tensor_index_linearization_helper<DenseIndex, Base::count, Base::count, true>::run(indices, t);
}
};
@@ -144,25 +144,25 @@ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_prod(const Sizes<Indi
#else
-template <std::size_t n>
+template <std::ptrdiff_t n>
struct non_zero_size {
- typedef internal::type2val<std::size_t, n> type;
+ typedef internal::type2val<std::ptrdiff_t, n> type;
};
template <>
struct non_zero_size<0> {
typedef internal::null_type type;
};
-template <std::size_t V1=0, std::size_t V2=0, std::size_t V3=0, std::size_t V4=0, std::size_t V5=0> struct Sizes {
+template <std::ptrdiff_t V1=0, std::ptrdiff_t V2=0, std::ptrdiff_t V3=0, std::ptrdiff_t V4=0, std::ptrdiff_t V5=0> struct Sizes {
typedef typename internal::make_type_list<typename non_zero_size<V1>::type, typename non_zero_size<V2>::type, typename non_zero_size<V3>::type, typename non_zero_size<V4>::type, typename non_zero_size<V5>::type >::type Base;
- static const size_t count = Base::count;
- static const std::size_t total_size = internal::arg_prod<Base>::value;
+ static const std::ptrdiff_t count = Base::count;
+ static const std::ptrdiff_t total_size = internal::arg_prod<Base>::value;
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t rank() const {
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ptrdiff_t rank() const {
return count;
}
- static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t TotalSize() {
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ptrdiff_t TotalSize() {
return internal::arg_prod<Base>::value;
}
@@ -178,7 +178,7 @@ template <std::size_t V1=0, std::size_t V2=0, std::size_t V3=0, std::size_t V4=0
#if EIGEN_HAS_VARIADIC_TEMPLATES
template <typename... DenseIndex> Sizes(DenseIndex... /*indices*/) { }
- explicit Sizes(std::initializer_list<std::size_t>) {
+ explicit Sizes(std::initializer_list<std::ptrdiff_t>) {
// todo: add assertion
}
#else
@@ -194,7 +194,7 @@ template <std::size_t V1=0, std::size_t V2=0, std::size_t V3=0, std::size_t V4=0
}
#endif
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex operator[] (const int index) const {
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index operator[] (const Index index) const {
switch (index) {
case 0:
return internal::get<0, Base>::value;
@@ -208,23 +208,23 @@ template <std::size_t V1=0, std::size_t V2=0, std::size_t V3=0, std::size_t V4=0
return internal::get<4, Base>::value;
default:
eigen_assert(false && "index overflow");
- return static_cast<DenseIndex>(-1);
+ return static_cast<Index>(-1);
}
}
template <typename DenseIndex> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
- size_t IndexOfColMajor(const array<DenseIndex, Base::count>& indices) const {
+ ptrdiff_t IndexOfColMajor(const array<DenseIndex, Base::count>& indices) const {
return internal::fixed_size_tensor_index_linearization_helper<DenseIndex, Base::count, Base::count, false>::run(indices, *reinterpret_cast<const Base*>(this));
}
template <typename DenseIndex> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
- size_t IndexOfRowMajor(const array<DenseIndex, Base::count>& indices) const {
+ ptrdiff_t IndexOfRowMajor(const array<DenseIndex, Base::count>& indices) const {
return internal::fixed_size_tensor_index_linearization_helper<DenseIndex, Base::count, Base::count, true>::run(indices, *reinterpret_cast<const Base*>(this));
}
};
namespace internal {
-template <std::size_t V1, std::size_t V2, std::size_t V3, std::size_t V4, std::size_t V5>
-EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::size_t array_prod(const Sizes<V1, V2, V3, V4, V5>&) {
+template <std::ptrdiff_t V1, std::ptrdiff_t V2, std::ptrdiff_t V3, std::ptrdiff_t V4, std::ptrdiff_t V5>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_prod(const Sizes<V1, V2, V3, V4, V5>&) {
return Sizes<V1, V2, V3, V4, V5>::total_size;
}
}
@@ -233,7 +233,7 @@ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::size_t array_prod(const Sizes<V1, V2,
// Boilerplate
namespace internal {
-template<typename Index, std::size_t NumIndices, std::size_t n, bool RowMajor>
+template<typename Index, std::ptrdiff_t NumIndices, std::ptrdiff_t n, bool RowMajor>
struct tensor_index_linearization_helper
{
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
@@ -245,7 +245,7 @@ struct tensor_index_linearization_helper
}
};
-template<typename Index, std::size_t NumIndices, bool RowMajor>
+template<typename Index, std::ptrdiff_t NumIndices, bool RowMajor>
struct tensor_index_linearization_helper<Index, NumIndices, 0, RowMajor>
{
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
@@ -264,7 +264,7 @@ struct DSizes : array<DenseIndex, NumDims> {
typedef array<DenseIndex, NumDims> Base;
static const int count = NumDims;
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t rank() const {
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rank() const {
return NumDims;
}
@@ -284,6 +284,57 @@ struct DSizes : array<DenseIndex, NumDims> {
(*this)[0] = i0;
}
+ EIGEN_DEVICE_FUNC DSizes(const DimensionList<DenseIndex, NumDims>& a) {
+ for (int i = 0 ; i < NumDims; ++i) {
+ (*this)[i] = a[i];
+ }
+ }
+
+ // Enable DSizes index type promotion only if we are promoting to the
+ // larger type, e.g. allow to promote dimensions of type int to long.
+ template<typename OtherIndex>
+ EIGEN_DEVICE_FUNC
+ explicit DSizes(const array<OtherIndex, NumDims>& other,
+ // Default template parameters require c++11.
+ typename internal::enable_if<
+ internal::is_same<
+ DenseIndex,
+ typename internal::promote_index_type<
+ DenseIndex,
+ OtherIndex
+ >::type
+ >::value, void*>::type = 0) {
+ for (int i = 0; i < NumDims; ++i) {
+ (*this)[i] = static_cast<DenseIndex>(other[i]);
+ }
+ }
+
+#ifdef EIGEN_HAS_INDEX_LIST
+ template <typename FirstType, typename... OtherTypes>
+ EIGEN_DEVICE_FUNC
+ explicit DSizes(const Eigen::IndexList<FirstType, OtherTypes...>& dimensions) {
+ for (int i = 0; i < dimensions.count; ++i) {
+ (*this)[i] = dimensions[i];
+ }
+ }
+#endif
+
+#ifndef EIGEN_EMULATE_CXX11_META_H
+ template <typename std::ptrdiff_t... Indices>
+ EIGEN_DEVICE_FUNC DSizes(const Sizes<Indices...>& a) {
+ for (int i = 0 ; i < NumDims; ++i) {
+ (*this)[i] = a[i];
+ }
+ }
+#else
+ template <std::ptrdiff_t V1, std::ptrdiff_t V2, std::ptrdiff_t V3, std::ptrdiff_t V4, std::ptrdiff_t V5>
+ EIGEN_DEVICE_FUNC DSizes(const Sizes<V1, V2, V3, V4, V5>& a) {
+ for (int i = 0 ; i < NumDims; ++i) {
+ (*this)[i] = a[i];
+ }
+ }
+#endif
+
#if EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes> EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE explicit DSizes(DenseIndex firstDimension, DenseIndex secondDimension, IndexTypes... otherDimensions) : Base({{firstDimension, secondDimension, otherDimensions...}}) {
@@ -337,7 +388,7 @@ struct DSizes : array<DenseIndex, NumDims> {
// Boilerplate
namespace internal {
-template<typename Index, std::size_t NumIndices, std::size_t n, bool RowMajor>
+template<typename Index, std::ptrdiff_t NumIndices, std::ptrdiff_t n, bool RowMajor>
struct tensor_vsize_index_linearization_helper
{
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
@@ -349,7 +400,7 @@ struct tensor_vsize_index_linearization_helper
}
};
-template<typename Index, std::size_t NumIndices, bool RowMajor>
+template<typename Index, std::ptrdiff_t NumIndices, bool RowMajor>
struct tensor_vsize_index_linearization_helper<Index, NumIndices, 0, RowMajor>
{
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
@@ -364,10 +415,10 @@ struct tensor_vsize_index_linearization_helper<Index, NumIndices, 0, RowMajor>
namespace internal {
template <typename DenseIndex, int NumDims> struct array_size<const DSizes<DenseIndex, NumDims> > {
- static const size_t value = NumDims;
+ static const ptrdiff_t value = NumDims;
};
template <typename DenseIndex, int NumDims> struct array_size<DSizes<DenseIndex, NumDims> > {
- static const size_t value = NumDims;
+ static const ptrdiff_t value = NumDims;
};
#ifndef EIGEN_EMULATE_CXX11_META_H
template <typename std::ptrdiff_t... Indices> struct array_size<const Sizes<Indices...> > {
@@ -377,42 +428,42 @@ template <typename std::ptrdiff_t... Indices> struct array_size<Sizes<Indices...
static const std::ptrdiff_t value = Sizes<Indices...>::count;
};
template <std::ptrdiff_t n, typename std::ptrdiff_t... Indices> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_get(const Sizes<Indices...>&) {
- return get<n, internal::numeric_list<std::size_t, Indices...> >::value;
+ return get<n, internal::numeric_list<std::ptrdiff_t, Indices...> >::value;
}
template <std::ptrdiff_t n> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_get(const Sizes<>&) {
eigen_assert(false && "should never be called");
return -1;
}
#else
-template <std::size_t V1, std::size_t V2, std::size_t V3, std::size_t V4, std::size_t V5> struct array_size<const Sizes<V1,V2,V3,V4,V5> > {
- static const size_t value = Sizes<V1,V2,V3,V4,V5>::count;
+template <std::ptrdiff_t V1, std::ptrdiff_t V2, std::ptrdiff_t V3, std::ptrdiff_t V4, std::ptrdiff_t V5> struct array_size<const Sizes<V1,V2,V3,V4,V5> > {
+ static const ptrdiff_t value = Sizes<V1,V2,V3,V4,V5>::count;
};
-template <std::size_t V1, std::size_t V2, std::size_t V3, std::size_t V4, std::size_t V5> struct array_size<Sizes<V1,V2,V3,V4,V5> > {
- static const size_t value = Sizes<V1,V2,V3,V4,V5>::count;
+template <std::ptrdiff_t V1, std::ptrdiff_t V2, std::ptrdiff_t V3, std::ptrdiff_t V4, std::ptrdiff_t V5> struct array_size<Sizes<V1,V2,V3,V4,V5> > {
+ static const ptrdiff_t value = Sizes<V1,V2,V3,V4,V5>::count;
};
-template <std::size_t n, std::size_t V1, std::size_t V2, std::size_t V3, std::size_t V4, std::size_t V5> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::size_t array_get(const Sizes<V1,V2,V3,V4,V5>&) {
+template <std::ptrdiff_t n, std::ptrdiff_t V1, std::ptrdiff_t V2, std::ptrdiff_t V3, std::ptrdiff_t V4, std::ptrdiff_t V5> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_get(const Sizes<V1,V2,V3,V4,V5>&) {
return get<n, typename Sizes<V1,V2,V3,V4,V5>::Base>::value;
}
#endif
-template <typename Dims1, typename Dims2, size_t n, size_t m>
+template <typename Dims1, typename Dims2, ptrdiff_t n, ptrdiff_t m>
struct sizes_match_below_dim {
- static EIGEN_DEVICE_FUNC inline bool run(Dims1&, Dims2&) {
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool run(Dims1&, Dims2&) {
return false;
}
};
-template <typename Dims1, typename Dims2, size_t n>
+template <typename Dims1, typename Dims2, ptrdiff_t n>
struct sizes_match_below_dim<Dims1, Dims2, n, n> {
- static EIGEN_DEVICE_FUNC inline bool run(Dims1& dims1, Dims2& dims2) {
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool run(Dims1& dims1, Dims2& dims2) {
return (array_get<n-1>(dims1) == array_get<n-1>(dims2)) &
sizes_match_below_dim<Dims1, Dims2, n-1, n-1>::run(dims1, dims2);
}
};
template <typename Dims1, typename Dims2>
struct sizes_match_below_dim<Dims1, Dims2, 0, 0> {
- static EIGEN_DEVICE_FUNC inline bool run(Dims1&, Dims2&) {
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool run(Dims1&, Dims2&) {
return true;
}
};
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h b/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h
index 82dd1e640..554ee5f59 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h
@@ -32,6 +32,7 @@ struct traits<TensorEvalToOp<XprType, MakePointer_> >
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
+ typedef typename MakePointer_<Scalar>::Type PointerType;
enum {
Flags = 0
@@ -101,11 +102,13 @@ struct TensorEvaluator<const TensorEvalToOp<ArgType, MakePointer_>, Device>
typedef typename XprType::Index Index;
typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
- static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+ static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
enum {
IsAligned = TensorEvaluator<ArgType, Device>::IsAligned,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ BlockAccess = false,
+ PreferBlockAccess = false,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = true
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h b/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h
index d6415817b..4ca6b3d8c 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h
@@ -33,6 +33,7 @@ struct TensorEvaluator
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
typedef typename Derived::Dimensions Dimensions;
typedef Derived XprType;
+ static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
// NumDimensions is -1 for variable dim tensors
static const int NumCoords = internal::traits<Derived>::NumDimensions > 0 ?
@@ -40,12 +41,24 @@ struct TensorEvaluator
enum {
IsAligned = Derived::IsAligned,
- PacketAccess = (internal::unpacket_traits<PacketReturnType>::size > 1),
+ PacketAccess = (PacketType<CoeffReturnType, Device>::size > 1),
+ BlockAccess = internal::is_arithmetic<typename internal::remove_const<Scalar>::type>::value,
+ PreferBlockAccess = false,
Layout = Derived::Layout,
CoordAccess = NumCoords > 0,
RawAccess = true
};
+ typedef typename internal::TensorBlock<
+ typename internal::remove_const<Scalar>::type, Index, NumCoords, Layout>
+ TensorBlock;
+ typedef typename internal::TensorBlockReader<
+ typename internal::remove_const<Scalar>::type, Index, NumCoords, Layout>
+ TensorBlockReader;
+ typedef typename internal::TensorBlockWriter<
+ typename internal::remove_const<Scalar>::type, Index, NumCoords, Layout>
+ TensorBlockWriter;
+
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const Derived& m, const Device& device)
: m_data(const_cast<typename internal::traits<Derived>::template MakePointer<Scalar>::Type>(m.data())), m_dims(m.dimensions()), m_device(device), m_impl(m)
{ }
@@ -55,7 +68,7 @@ struct TensorEvaluator
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dims; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* dest) {
- if (dest) {
+ if (!NumTraits<typename internal::remove_const<Scalar>::type>::RequireInitialization && dest) {
m_device.memcpy((void*)dest, m_data, sizeof(Scalar) * m_dims.TotalSize());
return false;
}
@@ -110,7 +123,21 @@ struct TensorEvaluator
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized,
- internal::unpacket_traits<PacketReturnType>::size);
+ PacketType<CoeffReturnType, Device>::size);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void getResourceRequirements(
+ std::vector<internal::TensorOpResourceRequirements>*) const {}
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void block(TensorBlock* block) const {
+ assert(m_data != NULL);
+ TensorBlockReader::Run(block, m_data);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writeBlock(
+ const TensorBlock& block) {
+ assert(m_data != NULL);
+ TensorBlockWriter::Run(block, m_data);
}
EIGEN_DEVICE_FUNC typename internal::traits<Derived>::template MakePointer<Scalar>::Type data() const { return m_data; }
@@ -131,7 +158,7 @@ T loadConstant(const T* address) {
return *address;
}
// Use the texture cache on CUDA devices whenever possible
-#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 350
+#if defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 350
template <> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float loadConstant(const float* address) {
return __ldg(address);
@@ -163,15 +190,25 @@ struct TensorEvaluator<const Derived, Device>
// NumDimensions is -1 for variable dim tensors
static const int NumCoords = internal::traits<Derived>::NumDimensions > 0 ?
internal::traits<Derived>::NumDimensions : 0;
+ static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
enum {
IsAligned = Derived::IsAligned,
- PacketAccess = (internal::unpacket_traits<PacketReturnType>::size > 1),
+ PacketAccess = (PacketType<CoeffReturnType, Device>::size > 1),
+ BlockAccess = internal::is_arithmetic<typename internal::remove_const<Scalar>::type>::value,
+ PreferBlockAccess = false,
Layout = Derived::Layout,
CoordAccess = NumCoords > 0,
RawAccess = true
};
+ typedef typename internal::TensorBlock<
+ typename internal::remove_const<Scalar>::type, Index, NumCoords, Layout>
+ TensorBlock;
+ typedef typename internal::TensorBlockReader<
+ typename internal::remove_const<Scalar>::type, Index, NumCoords, Layout>
+ TensorBlockReader;
+
// Used for accessor extraction in SYCL Managed TensorMap:
const Derived& derived() const { return m_impl; }
@@ -193,7 +230,12 @@ struct TensorEvaluator<const Derived, Device>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const {
eigen_assert(m_data);
+#ifndef __SYCL_DEVICE_ONLY__
return loadConstant(m_data+index);
+#else
+ CoeffReturnType tmp = m_data[index];
+ return tmp;
+#endif
}
template<int LoadMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
@@ -211,7 +253,15 @@ struct TensorEvaluator<const Derived, Device>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized,
- internal::unpacket_traits<PacketReturnType>::size);
+ PacketType<CoeffReturnType, Device>::size);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void getResourceRequirements(
+ std::vector<internal::TensorOpResourceRequirements>*) const {}
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void block(TensorBlock* block) const {
+ assert(m_data != NULL);
+ TensorBlockReader::Run(block, m_data);
}
EIGEN_DEVICE_FUNC typename internal::traits<Derived>::template MakePointer<const Scalar>::Type data() const { return m_data; }
@@ -239,6 +289,8 @@ struct TensorEvaluator<const TensorCwiseNullaryOp<NullaryOp, ArgType>, Device>
enum {
IsAligned = true,
PacketAccess = internal::functor_traits<NullaryOp>::PacketAccess,
+ BlockAccess = false,
+ PreferBlockAccess = false,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
@@ -253,7 +305,7 @@ struct TensorEvaluator<const TensorCwiseNullaryOp<NullaryOp, ArgType>, Device>
typedef typename XprType::Scalar Scalar;
typedef typename internal::traits<XprType>::Scalar CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
- static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+ static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
typedef typename TensorEvaluator<ArgType, Device>::Dimensions Dimensions;
EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_argImpl.dimensions(); }
@@ -275,10 +327,10 @@ struct TensorEvaluator<const TensorCwiseNullaryOp<NullaryOp, ArgType>, Device>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
costPerCoeff(bool vectorized) const {
return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized,
- internal::unpacket_traits<PacketReturnType>::size);
+ PacketType<CoeffReturnType, Device>::size);
}
- EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return NULL; }
+ EIGEN_DEVICE_FUNC typename Eigen::internal::traits<XprType>::PointerType data() const { return NULL; }
/// required by sycl in order to extract the accessor
const TensorEvaluator<ArgType, Device>& impl() const { return m_argImpl; }
@@ -302,25 +354,34 @@ struct TensorEvaluator<const TensorCwiseUnaryOp<UnaryOp, ArgType>, Device>
typedef TensorCwiseUnaryOp<UnaryOp, ArgType> XprType;
enum {
- IsAligned = TensorEvaluator<ArgType, Device>::IsAligned,
- PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess & internal::functor_traits<UnaryOp>::PacketAccess,
- Layout = TensorEvaluator<ArgType, Device>::Layout,
- CoordAccess = false, // to be implemented
- RawAccess = false
+ IsAligned = TensorEvaluator<ArgType, Device>::IsAligned,
+ PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess &
+ internal::functor_traits<UnaryOp>::PacketAccess,
+ BlockAccess = TensorEvaluator<ArgType, Device>::BlockAccess,
+ PreferBlockAccess = TensorEvaluator<ArgType, Device>::PreferBlockAccess,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ CoordAccess = false, // to be implemented
+ RawAccess = false
};
EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device)
- : m_functor(op.functor()),
+ : m_device(device),
+ m_functor(op.functor()),
m_argImpl(op.nestedExpression(), device)
{ }
typedef typename XprType::Index Index;
typedef typename XprType::Scalar Scalar;
+ typedef typename internal::remove_const<Scalar>::type ScalarNoConst;
typedef typename internal::traits<XprType>::Scalar CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
- static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+ static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
typedef typename TensorEvaluator<ArgType, Device>::Dimensions Dimensions;
+ static const int NumDims = internal::array_size<Dimensions>::value;
+ typedef internal::TensorBlock<ScalarNoConst, Index, NumDims, Layout>
+ TensorBlock;
+
EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_argImpl.dimensions(); }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar*) {
@@ -348,7 +409,30 @@ struct TensorEvaluator<const TensorCwiseUnaryOp<UnaryOp, ArgType>, Device>
TensorOpCost(0, 0, functor_cost, vectorized, PacketSize);
}
- EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return NULL; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void getResourceRequirements(
+ std::vector<internal::TensorOpResourceRequirements>* resources) const {
+ m_argImpl.getResourceRequirements(resources);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void block(
+ TensorBlock* output_block) const {
+ if (NumDims <= 0) {
+ output_block->data()[0] = coeff(0);
+ return;
+ }
+ internal::TensorBlockView<ArgType, Device> arg_block(m_device, m_argImpl,
+ *output_block);
+ internal::TensorBlockCwiseUnaryIO<UnaryOp, Index, ScalarNoConst, NumDims,
+ Layout>::Run(m_functor,
+ output_block->block_sizes(),
+ output_block
+ ->block_strides(),
+ output_block->data(),
+ arg_block.block_strides(),
+ arg_block.data());
+ }
+
+ EIGEN_DEVICE_FUNC typename Eigen::internal::traits<XprType>::PointerType data() const { return NULL; }
/// required by sycl in order to extract the accessor
const TensorEvaluator<ArgType, Device> & impl() const { return m_argImpl; }
@@ -357,6 +441,7 @@ struct TensorEvaluator<const TensorCwiseUnaryOp<UnaryOp, ArgType>, Device>
private:
+ const Device& m_device;
const UnaryOp m_functor;
TensorEvaluator<ArgType, Device> m_argImpl;
};
@@ -370,16 +455,23 @@ struct TensorEvaluator<const TensorCwiseBinaryOp<BinaryOp, LeftArgType, RightArg
typedef TensorCwiseBinaryOp<BinaryOp, LeftArgType, RightArgType> XprType;
enum {
- IsAligned = TensorEvaluator<LeftArgType, Device>::IsAligned & TensorEvaluator<RightArgType, Device>::IsAligned,
- PacketAccess = TensorEvaluator<LeftArgType, Device>::PacketAccess & TensorEvaluator<RightArgType, Device>::PacketAccess &
- internal::functor_traits<BinaryOp>::PacketAccess,
- Layout = TensorEvaluator<LeftArgType, Device>::Layout,
- CoordAccess = false, // to be implemented
- RawAccess = false
+ IsAligned = TensorEvaluator<LeftArgType, Device>::IsAligned &
+ TensorEvaluator<RightArgType, Device>::IsAligned,
+ PacketAccess = TensorEvaluator<LeftArgType, Device>::PacketAccess &
+ TensorEvaluator<RightArgType, Device>::PacketAccess &
+ internal::functor_traits<BinaryOp>::PacketAccess,
+ BlockAccess = TensorEvaluator<LeftArgType, Device>::BlockAccess &
+ TensorEvaluator<RightArgType, Device>::BlockAccess,
+ PreferBlockAccess = TensorEvaluator<LeftArgType, Device>::PreferBlockAccess |
+ TensorEvaluator<RightArgType, Device>::PreferBlockAccess,
+ Layout = TensorEvaluator<LeftArgType, Device>::Layout,
+ CoordAccess = false, // to be implemented
+ RawAccess = false
};
EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device)
- : m_functor(op.functor()),
+ : m_device(device),
+ m_functor(op.functor()),
m_leftImpl(op.lhsExpression(), device),
m_rightImpl(op.rhsExpression(), device)
{
@@ -391,9 +483,17 @@ struct TensorEvaluator<const TensorCwiseBinaryOp<BinaryOp, LeftArgType, RightArg
typedef typename XprType::Scalar Scalar;
typedef typename internal::traits<XprType>::Scalar CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
- static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+ static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
typedef typename TensorEvaluator<LeftArgType, Device>::Dimensions Dimensions;
+ static const int NumDims = internal::array_size<
+ typename TensorEvaluator<LeftArgType, Device>::Dimensions>::value;
+
+ typedef internal::TensorBlock<
+ typename internal::remove_const<Scalar>::type, Index, NumDims,
+ TensorEvaluator<LeftArgType, Device>::Layout>
+ TensorBlock;
+
EIGEN_DEVICE_FUNC const Dimensions& dimensions() const
{
// TODO: use right impl instead if right impl dimensions are known at compile time.
@@ -428,7 +528,31 @@ struct TensorEvaluator<const TensorCwiseBinaryOp<BinaryOp, LeftArgType, RightArg
TensorOpCost(0, 0, functor_cost, vectorized, PacketSize);
}
- EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return NULL; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void getResourceRequirements(
+ std::vector<internal::TensorOpResourceRequirements>* resources) const {
+ m_leftImpl.getResourceRequirements(resources);
+ m_rightImpl.getResourceRequirements(resources);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void block(
+ TensorBlock* output_block) const {
+ if (NumDims <= 0) {
+ output_block->data()[0] = coeff(Index(0));
+ return;
+ }
+ internal::TensorBlockView<LeftArgType, Device> left_block(
+ m_device, m_leftImpl, *output_block);
+ internal::TensorBlockView<RightArgType, Device> right_block(
+ m_device, m_rightImpl, *output_block);
+ internal::TensorBlockCwiseBinaryIO<
+ BinaryOp, Index, typename internal::remove_const<Scalar>::type, NumDims,
+ Layout>::Run(m_functor, output_block->block_sizes(),
+ output_block->block_strides(), output_block->data(),
+ left_block.block_strides(), left_block.data(),
+ right_block.block_strides(), right_block.data());
+ }
+
+ EIGEN_DEVICE_FUNC typename Eigen::internal::traits<XprType>::PointerType data() const { return NULL; }
/// required by sycl in order to extract the accessor
const TensorEvaluator<LeftArgType, Device>& left_impl() const { return m_leftImpl; }
/// required by sycl in order to extract the accessor
@@ -437,6 +561,7 @@ struct TensorEvaluator<const TensorCwiseBinaryOp<BinaryOp, LeftArgType, RightArg
BinaryOp functor() const { return m_functor; }
private:
+ const Device& m_device;
const BinaryOp m_functor;
TensorEvaluator<LeftArgType, Device> m_leftImpl;
TensorEvaluator<RightArgType, Device> m_rightImpl;
@@ -453,6 +578,8 @@ struct TensorEvaluator<const TensorCwiseTernaryOp<TernaryOp, Arg1Type, Arg2Type,
IsAligned = TensorEvaluator<Arg1Type, Device>::IsAligned & TensorEvaluator<Arg2Type, Device>::IsAligned & TensorEvaluator<Arg3Type, Device>::IsAligned,
PacketAccess = TensorEvaluator<Arg1Type, Device>::PacketAccess & TensorEvaluator<Arg2Type, Device>::PacketAccess & TensorEvaluator<Arg3Type, Device>::PacketAccess &
internal::functor_traits<TernaryOp>::PacketAccess,
+ BlockAccess = false,
+ PreferBlockAccess = false,
Layout = TensorEvaluator<Arg1Type, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
@@ -486,7 +613,7 @@ struct TensorEvaluator<const TensorCwiseTernaryOp<TernaryOp, Arg1Type, Arg2Type,
typedef typename XprType::Scalar Scalar;
typedef typename internal::traits<XprType>::Scalar CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
- static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+ static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
typedef typename TensorEvaluator<Arg1Type, Device>::Dimensions Dimensions;
EIGEN_DEVICE_FUNC const Dimensions& dimensions() const
@@ -528,7 +655,7 @@ struct TensorEvaluator<const TensorCwiseTernaryOp<TernaryOp, Arg1Type, Arg2Type,
TensorOpCost(0, 0, functor_cost, vectorized, PacketSize);
}
- EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return NULL; }
+ EIGEN_DEVICE_FUNC typename Eigen::internal::traits<XprType>::PointerType data() const { return NULL; }
/// required by sycl in order to extract the accessor
const TensorEvaluator<Arg1Type, Device> & arg1Impl() const { return m_arg1Impl; }
@@ -556,7 +683,9 @@ struct TensorEvaluator<const TensorSelectOp<IfArgType, ThenArgType, ElseArgType>
enum {
IsAligned = TensorEvaluator<ThenArgType, Device>::IsAligned & TensorEvaluator<ElseArgType, Device>::IsAligned,
PacketAccess = TensorEvaluator<ThenArgType, Device>::PacketAccess & TensorEvaluator<ElseArgType, Device>::PacketAccess &
- internal::packet_traits<Scalar>::HasBlend,
+ PacketType<Scalar, Device>::HasBlend,
+ BlockAccess = false,
+ PreferBlockAccess = false,
Layout = TensorEvaluator<IfArgType, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
@@ -576,7 +705,7 @@ struct TensorEvaluator<const TensorSelectOp<IfArgType, ThenArgType, ElseArgType>
typedef typename XprType::Index Index;
typedef typename internal::traits<XprType>::Scalar CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
- static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+ static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
typedef typename TensorEvaluator<IfArgType, Device>::Dimensions Dimensions;
EIGEN_DEVICE_FUNC const Dimensions& dimensions() const
@@ -620,7 +749,7 @@ struct TensorEvaluator<const TensorSelectOp<IfArgType, ThenArgType, ElseArgType>
.cwiseMax(m_elseImpl.costPerCoeff(vectorized));
}
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType* data() const { return NULL; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Eigen::internal::traits<XprType>::PointerType data() const { return NULL; }
/// required by sycl in order to extract the accessor
const TensorEvaluator<IfArgType, Device> & cond_impl() const { return m_condImpl; }
/// required by sycl in order to extract the accessor
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h
index f01d77c0a..bfe1f97b8 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h
@@ -12,31 +12,40 @@
namespace Eigen {
-/** \class TensorExecutor
- * \ingroup CXX11_Tensor_Module
- *
- * \brief The tensor executor class.
- *
- * This class is responsible for launch the evaluation of the expression on
- * the specified computing device.
- */
+/**
+ * \class TensorExecutor
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief The tensor executor class.
+ *
+ * This class is responsible for launch the evaluation of the expression on
+ * the specified computing device.
+ *
+ * @tparam Vectorizable can use packet math (SSE/AVX/etc... registers and
+ * instructions)
+ * @tparam Tileable can use block based tensor evaluation
+ * (see TensorBlock.h)
+ */
namespace internal {
-// Default strategy: the expression is evaluated with a single cpu thread.
-template<typename Expression, typename Device, bool Vectorizable>
-class TensorExecutor
-{
+/**
+ * Default strategy: the expression is evaluated sequentially with a single cpu
+ * thread, without vectorization and block evaluation.
+ */
+template <typename Expression, typename Device, bool Vectorizable,
+ bool Tileable>
+class TensorExecutor {
public:
- typedef typename Expression::Index Index;
+ typedef typename Expression::Index StorageIndex;
+
EIGEN_DEVICE_FUNC
- static inline void run(const Expression& expr, const Device& device = Device())
- {
+ static EIGEN_STRONG_INLINE void run(const Expression& expr,
+ const Device& device = Device()) {
TensorEvaluator<Expression, Device> evaluator(expr, device);
const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL);
- if (needs_assign)
- {
- const Index size = array_prod(evaluator.dimensions());
- for (Index i = 0; i < size; ++i) {
+ if (needs_assign) {
+ const StorageIndex size = array_prod(evaluator.dimensions());
+ for (StorageIndex i = 0; i < size; ++i) {
evaluator.evalScalar(i);
}
}
@@ -44,35 +53,40 @@ class TensorExecutor
}
};
-
-template<typename Expression>
-class TensorExecutor<Expression, DefaultDevice, true>
-{
+/**
+ * Process all the data with a single cpu thread, using vectorized instructions.
+ */
+template <typename Expression>
+class TensorExecutor<Expression, DefaultDevice, /*Vectorizable*/ true,
+ /*Tileable*/ false> {
public:
- typedef typename Expression::Index Index;
+ typedef typename Expression::Index StorageIndex;
+
EIGEN_DEVICE_FUNC
- static inline void run(const Expression& expr, const DefaultDevice& device = DefaultDevice())
- {
+ static EIGEN_STRONG_INLINE void run(const Expression& expr,
+ const DefaultDevice& device = DefaultDevice()) {
TensorEvaluator<Expression, DefaultDevice> evaluator(expr, device);
const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL);
- if (needs_assign)
- {
- const Index size = array_prod(evaluator.dimensions());
- const int PacketSize = unpacket_traits<typename TensorEvaluator<Expression, DefaultDevice>::PacketReturnType>::size;
- // Give the compiler a strong hint to unroll the loop. But don't insist
- // on unrolling, because if the function is expensive the compiler should not
+ if (needs_assign) {
+ const StorageIndex size = array_prod(evaluator.dimensions());
+ const int PacketSize = unpacket_traits<typename TensorEvaluator<
+ Expression, DefaultDevice>::PacketReturnType>::size;
+
+ // Give compiler a strong possibility to unroll the loop. But don't insist
+ // on unrolling, because if the function is expensive compiler should not
// unroll the loop at the expense of inlining.
- const Index UnrolledSize = (size / (4 * PacketSize)) * 4 * PacketSize;
- for (Index i = 0; i < UnrolledSize; i += 4*PacketSize) {
- for (Index j = 0; j < 4; j++) {
+ const StorageIndex UnrolledSize =
+ (size / (4 * PacketSize)) * 4 * PacketSize;
+ for (StorageIndex i = 0; i < UnrolledSize; i += 4 * PacketSize) {
+ for (StorageIndex j = 0; j < 4; j++) {
evaluator.evalPacket(i + j * PacketSize);
}
}
- const Index VectorizedSize = (size / PacketSize) * PacketSize;
- for (Index i = UnrolledSize; i < VectorizedSize; i += PacketSize) {
+ const StorageIndex VectorizedSize = (size / PacketSize) * PacketSize;
+ for (StorageIndex i = UnrolledSize; i < VectorizedSize; i += PacketSize) {
evaluator.evalPacket(i);
}
- for (Index i = VectorizedSize; i < size; ++i) {
+ for (StorageIndex i = VectorizedSize; i < size; ++i) {
evaluator.evalScalar(i);
}
}
@@ -80,41 +94,107 @@ class TensorExecutor<Expression, DefaultDevice, true>
}
};
+/**
+ * Process all the data with a single cpu thread, using blocks of data. By
+ * sizing a block to fit L1 cache we get better cache performance.
+ */
+template <typename Expression, bool Vectorizable>
+class TensorExecutor<Expression, DefaultDevice, Vectorizable,
+ /*Tileable*/ true> {
+ public:
+ typedef typename traits<Expression>::Scalar Scalar;
+ typedef typename remove_const<Scalar>::type ScalarNoConst;
+
+ typedef TensorEvaluator<Expression, DefaultDevice> Evaluator;
+ typedef typename traits<Expression>::Index StorageIndex;
+
+ static const int NumDims = traits<Expression>::NumDimensions;
+
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE void run(const Expression& expr,
+ const DefaultDevice& device = DefaultDevice()) {
+ typedef TensorBlock<ScalarNoConst, StorageIndex, NumDims, Evaluator::Layout> TensorBlock;
+ typedef TensorBlockMapper<ScalarNoConst, StorageIndex, NumDims, Evaluator::Layout> TensorBlockMapper;
+ typedef typename TensorBlock::Dimensions TensorBlockDimensions;
+ Evaluator evaluator(expr, device);
+ Index total_size = array_prod(evaluator.dimensions());
+ Index cache_size = device.firstLevelCacheSize() / sizeof(Scalar);
+
+ if (total_size < cache_size) {
+ // TODO(andydavis) Reduce block management overhead for small tensors.
+ // TODO(wuke) Do not do this when evaluating TensorBroadcastingOp.
+ internal::TensorExecutor<Expression, DefaultDevice, Vectorizable,
+ /*Tileable*/ false>::run(expr, device);
+ return;
+ }
-// Multicore strategy: the index space is partitioned and each partition is executed on a single core
+ const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL);
+ if (needs_assign) {
+ // Size tensor blocks to fit in cache (or requested target block size).
+ Index block_total_size = numext::mini(cache_size, total_size);
+ TensorBlockShapeType block_shape = kSkewedInnerDims;
+ // Query expression tree for desired block size/shape.
+ std::vector<TensorOpResourceRequirements> resources;
+ evaluator.getResourceRequirements(&resources);
+ MergeResourceRequirements(resources, &block_shape, &block_total_size);
+
+ TensorBlockMapper block_mapper(
+ TensorBlockDimensions(evaluator.dimensions()), block_shape,
+ block_total_size);
+ block_total_size = block_mapper.block_dims_total_size();
+
+ Scalar* data = static_cast<Scalar*>(
+ device.allocate(block_total_size * sizeof(Scalar)));
+
+ const StorageIndex total_block_count = block_mapper.total_block_count();
+ for (StorageIndex i = 0; i < total_block_count; ++i) {
+ TensorBlock block = block_mapper.GetBlockForIndex(i, data);
+ evaluator.evalBlock(&block);
+ }
+ device.deallocate(data);
+ }
+ evaluator.cleanup();
+ }
+};
+
+/**
+ * Multicore strategy: the index space is partitioned and each partition is
+ * executed on a single core.
+ */
#ifdef EIGEN_USE_THREADS
-template <typename Evaluator, typename Index, bool Vectorizable>
+template <typename Evaluator, typename StorageIndex, bool Vectorizable>
struct EvalRange {
- static void run(Evaluator* evaluator_in, const Index first, const Index last) {
+ static void run(Evaluator* evaluator_in, const StorageIndex first,
+ const StorageIndex last) {
Evaluator evaluator = *evaluator_in;
eigen_assert(last >= first);
- for (Index i = first; i < last; ++i) {
+ for (StorageIndex i = first; i < last; ++i) {
evaluator.evalScalar(i);
}
}
- static Index alignBlockSize(Index size) {
- return size;
- }
+ static StorageIndex alignBlockSize(StorageIndex size) { return size; }
};
-template <typename Evaluator, typename Index>
-struct EvalRange<Evaluator, Index, true> {
- static const int PacketSize = unpacket_traits<typename Evaluator::PacketReturnType>::size;
+template <typename Evaluator, typename StorageIndex>
+struct EvalRange<Evaluator, StorageIndex, /*Vectorizable*/ true> {
+ static const int PacketSize =
+ unpacket_traits<typename Evaluator::PacketReturnType>::size;
- static void run(Evaluator* evaluator_in, const Index first, const Index last) {
+ static void run(Evaluator* evaluator_in, const StorageIndex first,
+ const StorageIndex last) {
Evaluator evaluator = *evaluator_in;
eigen_assert(last >= first);
- Index i = first;
+ StorageIndex i = first;
if (last - first >= PacketSize) {
eigen_assert(first % PacketSize == 0);
- Index last_chunk_offset = last - 4 * PacketSize;
- // Give the compiler a strong hint to unroll the loop. But don't insist
- // on unrolling, because if the function is expensive the compiler should not
+ StorageIndex last_chunk_offset = last - 4 * PacketSize;
+ // Give compiler a strong possibility to unroll the loop. But don't insist
+ // on unrolling, because if the function is expensive compiler should not
// unroll the loop at the expense of inlining.
- for (; i <= last_chunk_offset; i += 4*PacketSize) {
- for (Index j = 0; j < 4; j++) {
+ for (; i <= last_chunk_offset; i += 4 * PacketSize) {
+ for (StorageIndex j = 0; j < 4; j++) {
evaluator.evalPacket(i + j * PacketSize);
}
}
@@ -128,7 +208,7 @@ struct EvalRange<Evaluator, Index, true> {
}
}
- static Index alignBlockSize(Index size) {
+ static StorageIndex alignBlockSize(StorageIndex size) {
// Align block size to packet size and account for unrolling in run above.
if (size >= 16 * PacketSize) {
return (size + 4 * PacketSize - 1) & ~(4 * PacketSize - 1);
@@ -138,133 +218,179 @@ struct EvalRange<Evaluator, Index, true> {
}
};
-template <typename Expression, bool Vectorizable>
-class TensorExecutor<Expression, ThreadPoolDevice, Vectorizable> {
+template <typename Expression, bool Vectorizable, bool Tileable>
+class TensorExecutor<Expression, ThreadPoolDevice, Vectorizable, Tileable> {
public:
- typedef typename Expression::Index Index;
- static inline void run(const Expression& expr, const ThreadPoolDevice& device)
- {
+ typedef typename Expression::Index StorageIndex;
+
+ static EIGEN_STRONG_INLINE void run(const Expression& expr,
+ const ThreadPoolDevice& device) {
typedef TensorEvaluator<Expression, ThreadPoolDevice> Evaluator;
+ typedef EvalRange<Evaluator, StorageIndex, Vectorizable> EvalRange;
+
Evaluator evaluator(expr, device);
const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL);
- if (needs_assign)
- {
- const Index size = array_prod(evaluator.dimensions());
-#if !defined(EIGEN_USE_SIMPLE_THREAD_POOL)
+ if (needs_assign) {
+ const StorageIndex size = array_prod(evaluator.dimensions());
device.parallelFor(size, evaluator.costPerCoeff(Vectorizable),
- EvalRange<Evaluator, Index, Vectorizable>::alignBlockSize,
- [&evaluator](Index first, Index last) {
- EvalRange<Evaluator, Index, Vectorizable>::run(&evaluator, first, last);
+ EvalRange::alignBlockSize,
+ [&evaluator](StorageIndex first, StorageIndex last) {
+ EvalRange::run(&evaluator, first, last);
});
-#else
- size_t num_threads = device.numThreads();
- if (num_threads > 1) {
- num_threads = TensorCostModel<ThreadPoolDevice>::numThreads(
- size, evaluator.costPerCoeff(Vectorizable), num_threads);
- }
- if (num_threads == 1) {
- EvalRange<Evaluator, Index, Vectorizable>::run(&evaluator, 0, size);
- } else {
- const Index PacketSize = Vectorizable ? unpacket_traits<typename Evaluator::PacketReturnType>::size : 1;
- Index blocksz = std::ceil<Index>(static_cast<float>(size)/num_threads) + PacketSize - 1;
- const Index blocksize = numext::maxi<Index>(PacketSize, (blocksz - (blocksz % PacketSize)));
- const Index numblocks = size / blocksize;
-
- Barrier barrier(numblocks);
- for (int i = 0; i < numblocks; ++i) {
- device.enqueue_with_barrier(
- &barrier, &EvalRange<Evaluator, Index, Vectorizable>::run,
- &evaluator, i * blocksize, (i + 1) * blocksize);
- }
- if (numblocks * blocksize < size) {
- EvalRange<Evaluator, Index, Vectorizable>::run(
- &evaluator, numblocks * blocksize, size);
- }
- barrier.Wait();
- }
-#endif // defined(!EIGEN_USE_SIMPLE_THREAD_POOL)
}
evaluator.cleanup();
}
};
+
+template <typename Expression, bool Vectorizable>
+class TensorExecutor<Expression, ThreadPoolDevice, Vectorizable, /*Tileable*/ true> {
+ public:
+ typedef typename traits<Expression>::Scalar Scalar;
+ typedef typename remove_const<Scalar>::type ScalarNoConst;
+
+ typedef TensorEvaluator<Expression, ThreadPoolDevice> Evaluator;
+ typedef typename traits<Expression>::Index StorageIndex;
+
+ static const int NumDims = traits<Expression>::NumDimensions;
+
+ static EIGEN_STRONG_INLINE void run(const Expression& expr,
+ const ThreadPoolDevice& device) {
+ typedef TensorBlockMapper<ScalarNoConst, StorageIndex, NumDims, Evaluator::Layout> TensorBlockMapper;
+
+ Evaluator evaluator(expr, device);
+ Index total_size = array_prod(evaluator.dimensions());
+ Index cache_size = device.firstLevelCacheSize() / sizeof(Scalar);
+ if (total_size < cache_size) {
+ // TODO(andydavis) Reduce block management overhead for small tensors.
+ internal::TensorExecutor<Expression, ThreadPoolDevice, Vectorizable,
+ false>::run(expr, device);
+ evaluator.cleanup();
+ return;
+ }
+
+ const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL);
+ if (needs_assign) {
+ TensorBlockShapeType block_shape = kSkewedInnerDims;
+ Index block_total_size = 0;
+ // Query expression tree for desired block size/shape.
+ std::vector<internal::TensorOpResourceRequirements> resources;
+ evaluator.getResourceRequirements(&resources);
+ MergeResourceRequirements(resources, &block_shape, &block_total_size);
+ int num_threads = device.numThreads();
+
+ // Estimate minimum block size based on cost.
+ TensorOpCost cost = evaluator.costPerCoeff(Vectorizable);
+ double taskSize = TensorCostModel<ThreadPoolDevice>::taskSize(1, cost);
+ size_t block_size = static_cast<size_t>(1.0 / taskSize);
+ TensorBlockMapper block_mapper(
+ typename TensorBlockMapper::Dimensions(evaluator.dimensions()),
+ block_shape, block_size);
+ block_size = block_mapper.block_dims_total_size();
+ const size_t aligned_blocksize =
+ EIGEN_MAX_ALIGN_BYTES *
+ divup<size_t>(block_size * sizeof(Scalar), EIGEN_MAX_ALIGN_BYTES);
+ void* buf = device.allocate((num_threads + 1) * aligned_blocksize);
+ device.parallelFor(
+ block_mapper.total_block_count(), cost * block_size,
+ [=, &device, &evaluator, &block_mapper](StorageIndex first,
+ StorageIndex last) {
+ // currentThreadId() returns -1 if called from a thread not in the
+ // thread pool, such as the main thread dispatching Eigen
+ // expressions.
+ const int thread_idx = device.currentThreadId();
+ eigen_assert(thread_idx >= -1 && thread_idx < num_threads);
+ Scalar* thread_buf = reinterpret_cast<Scalar*>(
+ static_cast<char*>(buf) + aligned_blocksize * (thread_idx + 1));
+ for (StorageIndex i = first; i < last; ++i) {
+ auto block = block_mapper.GetBlockForIndex(i, thread_buf);
+ evaluator.evalBlock(&block);
+ }
+ });
+ device.deallocate(buf);
+ }
+ evaluator.cleanup();
+ }
+};
+
#endif // EIGEN_USE_THREADS
// GPU: the evaluation of the expression is offloaded to a GPU.
#if defined(EIGEN_USE_GPU)
-template <typename Expression, bool Vectorizable>
-class TensorExecutor<Expression, GpuDevice, Vectorizable> {
+template <typename Expression, bool Vectorizable, bool Tileable>
+class TensorExecutor<Expression, GpuDevice, Vectorizable, Tileable> {
public:
- typedef typename Expression::Index Index;
+ typedef typename Expression::Index StorageIndex;
static void run(const Expression& expr, const GpuDevice& device);
};
-#if defined(__CUDACC__)
-template <typename Evaluator, typename Index, bool Vectorizable>
+#if defined(EIGEN_GPUCC)
+template <typename Evaluator, typename StorageIndex, bool Vectorizable>
struct EigenMetaKernelEval {
static __device__ EIGEN_ALWAYS_INLINE
- void run(Evaluator& eval, Index first, Index last, Index step_size) {
- for (Index i = first; i < last; i += step_size) {
+ void run(Evaluator& eval, StorageIndex first, StorageIndex last, StorageIndex step_size) {
+ for (StorageIndex i = first; i < last; i += step_size) {
eval.evalScalar(i);
}
}
};
-template <typename Evaluator, typename Index>
-struct EigenMetaKernelEval<Evaluator, Index, true> {
+template <typename Evaluator, typename StorageIndex>
+struct EigenMetaKernelEval<Evaluator, StorageIndex, true> {
static __device__ EIGEN_ALWAYS_INLINE
- void run(Evaluator& eval, Index first, Index last, Index step_size) {
- const Index PacketSize = unpacket_traits<typename Evaluator::PacketReturnType>::size;
- const Index vectorized_size = (last / PacketSize) * PacketSize;
- const Index vectorized_step_size = step_size * PacketSize;
+ void run(Evaluator& eval, StorageIndex first, StorageIndex last, StorageIndex step_size) {
+ const StorageIndex PacketSize = unpacket_traits<typename Evaluator::PacketReturnType>::size;
+ const StorageIndex vectorized_size = (last / PacketSize) * PacketSize;
+ const StorageIndex vectorized_step_size = step_size * PacketSize;
// Use the vector path
- for (Index i = first * PacketSize; i < vectorized_size;
+ for (StorageIndex i = first * PacketSize; i < vectorized_size;
i += vectorized_step_size) {
eval.evalPacket(i);
}
- for (Index i = vectorized_size + first; i < last; i += step_size) {
+ for (StorageIndex i = vectorized_size + first; i < last; i += step_size) {
eval.evalScalar(i);
}
}
};
-template <typename Evaluator, typename Index>
+template <typename Evaluator, typename StorageIndex>
__global__ void
__launch_bounds__(1024)
-EigenMetaKernel(Evaluator eval, Index size) {
+EigenMetaKernel(Evaluator eval, StorageIndex size) {
- const Index first_index = blockIdx.x * blockDim.x + threadIdx.x;
- const Index step_size = blockDim.x * gridDim.x;
+ const StorageIndex first_index = blockIdx.x * blockDim.x + threadIdx.x;
+ const StorageIndex step_size = blockDim.x * gridDim.x;
const bool vectorizable = Evaluator::PacketAccess & Evaluator::IsAligned;
- EigenMetaKernelEval<Evaluator, Index, vectorizable>::run(eval, first_index, size, step_size);
+ EigenMetaKernelEval<Evaluator, StorageIndex, vectorizable>::run(eval, first_index, size, step_size);
}
/*static*/
-template <typename Expression, bool Vectorizable>
-inline void TensorExecutor<Expression, GpuDevice, Vectorizable>::run(
+template <typename Expression, bool Vectorizable, bool Tileable>
+EIGEN_STRONG_INLINE void TensorExecutor<Expression, GpuDevice, Vectorizable, Tileable>::run(
const Expression& expr, const GpuDevice& device) {
TensorEvaluator<Expression, GpuDevice> evaluator(expr, device);
const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL);
if (needs_assign) {
- const int block_size = device.maxCudaThreadsPerBlock();
- const int max_blocks = device.getNumCudaMultiProcessors() *
- device.maxCudaThreadsPerMultiProcessor() / block_size;
- const Index size = array_prod(evaluator.dimensions());
+
+ const int block_size = device.maxGpuThreadsPerBlock();
+ const int max_blocks = device.getNumGpuMultiProcessors() *
+ device.maxGpuThreadsPerMultiProcessor() / block_size;
+ const StorageIndex size = array_prod(evaluator.dimensions());
// Create a least one block to ensure we won't crash when tensorflow calls with tensors of size 0.
const int num_blocks = numext::maxi<int>(numext::mini<int>(max_blocks, divup<int>(size, block_size)), 1);
- LAUNCH_CUDA_KERNEL(
- (EigenMetaKernel<TensorEvaluator<Expression, GpuDevice>, Index>),
+ LAUNCH_GPU_KERNEL(
+ (EigenMetaKernel<TensorEvaluator<Expression, GpuDevice>, StorageIndex>),
num_blocks, block_size, 0, device, evaluator, size);
}
evaluator.cleanup();
}
-#endif // __CUDACC__
+#endif // EIGEN_GPUCC
#endif // EIGEN_USE_GPU
// SYCL Executor policy
@@ -273,7 +399,7 @@ inline void TensorExecutor<Expression, GpuDevice, Vectorizable>::run(
template <typename Expression, bool Vectorizable>
class TensorExecutor<Expression, SyclDevice, Vectorizable> {
public:
- static inline void run(const Expression &expr, const SyclDevice &device) {
+ static EIGEN_STRONG_INLINE void run(const Expression &expr, const SyclDevice &device) {
// call TensorSYCL module
TensorSycl::run(expr, device);
}
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h b/unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h
index 85dfc7a69..4b6540c07 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h
@@ -38,7 +38,7 @@ struct traits<TensorCwiseNullaryOp<NullaryOp, XprType> >
typedef typename remove_reference<XprTypeNested>::type _XprTypeNested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
-
+ typedef typename XprTraits::PointerType PointerType;
enum {
Flags = 0
};
@@ -89,6 +89,7 @@ struct traits<TensorCwiseUnaryOp<UnaryOp, XprType> >
typedef typename remove_reference<XprTypeNested>::type _XprTypeNested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
+ typedef typename TypeConversion<Scalar, typename XprTraits::PointerType>::type PointerType;
};
template<typename UnaryOp, typename XprType>
@@ -161,7 +162,11 @@ struct traits<TensorCwiseBinaryOp<BinaryOp, LhsXprType, RhsXprType> >
typedef typename remove_reference<RhsNested>::type _RhsNested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
-
+ typedef typename TypeConversion<Scalar,
+ typename conditional<Pointer_type_promotion<typename LhsXprType::Scalar, Scalar>::val,
+ typename traits<LhsXprType>::PointerType,
+ typename traits<RhsXprType>::PointerType>::type
+ >::type PointerType;
enum {
Flags = 0
};
@@ -238,7 +243,11 @@ struct traits<TensorCwiseTernaryOp<TernaryOp, Arg1XprType, Arg2XprType, Arg3XprT
typedef typename remove_reference<Arg3Nested>::type _Arg3Nested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
-
+ typedef typename TypeConversion<Scalar,
+ typename conditional<Pointer_type_promotion<typename Arg2XprType::Scalar, Scalar>::val,
+ typename traits<Arg2XprType>::PointerType,
+ typename traits<Arg3XprType>::PointerType>::type
+ >::type PointerType;
enum {
Flags = 0
};
@@ -314,6 +323,9 @@ struct traits<TensorSelectOp<IfXprType, ThenXprType, ElseXprType> >
typedef typename ElseXprType::Nested ElseNested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
+ typedef typename conditional<Pointer_type_promotion<typename ThenXprType::Scalar, Scalar>::val,
+ typename traits<ThenXprType>::PointerType,
+ typename traits<ElseXprType>::PointerType>::type PointerType;
};
template<typename IfXprType, typename ThenXprType, typename ElseXprType>
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorFFT.h b/unsupported/Eigen/CXX11/src/Tensor/TensorFFT.h
index f060191ab..480cf1f39 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorFFT.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorFFT.h
@@ -71,6 +71,7 @@ struct traits<TensorFFTOp<FFT, XprType, FFTResultType, FFTDir> > : public traits
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
+ typedef typename traits<XprType>::PointerType PointerType;
};
template <typename FFT, typename XprType, int FFTResultType, int FFTDirection>
@@ -135,6 +136,7 @@ struct TensorEvaluator<const TensorFFTOp<FFT, ArgType, FFTResultType, FFTDir>, D
IsAligned = false,
PacketAccess = true,
BlockAccess = false,
+ PreferBlockAccess = false,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false,
RawAccess = false
@@ -230,20 +232,32 @@ struct TensorEvaluator<const TensorFFTOp<FFT, ArgType, FFTResultType, FFTDir>, D
// t_n = exp(sqrt(-1) * pi * n^2 / line_len)
// for n = 0, 1,..., line_len-1.
// For n > 2 we use the recurrence t_n = t_{n-1}^2 / t_{n-2} * t_1^2
- pos_j_base_powered[0] = ComplexScalar(1, 0);
- if (line_len > 1) {
- const RealScalar pi_over_len(EIGEN_PI / line_len);
- const ComplexScalar pos_j_base = ComplexScalar(
- std::cos(pi_over_len), std::sin(pi_over_len));
- pos_j_base_powered[1] = pos_j_base;
- if (line_len > 2) {
- const ComplexScalar pos_j_base_sq = pos_j_base * pos_j_base;
- for (int j = 2; j < line_len + 1; ++j) {
- pos_j_base_powered[j] = pos_j_base_powered[j - 1] *
- pos_j_base_powered[j - 1] /
- pos_j_base_powered[j - 2] * pos_j_base_sq;
- }
- }
+
+ // The recurrence is correct in exact arithmetic, but causes
+ // numerical issues for large transforms, especially in
+ // single-precision floating point.
+ //
+ // pos_j_base_powered[0] = ComplexScalar(1, 0);
+ // if (line_len > 1) {
+ // const ComplexScalar pos_j_base = ComplexScalar(
+ // numext::cos(M_PI / line_len), numext::sin(M_PI / line_len));
+ // pos_j_base_powered[1] = pos_j_base;
+ // if (line_len > 2) {
+ // const ComplexScalar pos_j_base_sq = pos_j_base * pos_j_base;
+ // for (int i = 2; i < line_len + 1; ++i) {
+ // pos_j_base_powered[i] = pos_j_base_powered[i - 1] *
+ // pos_j_base_powered[i - 1] /
+ // pos_j_base_powered[i - 2] *
+ // pos_j_base_sq;
+ // }
+ // }
+ // }
+ // TODO(rmlarsen): Find a way to use Eigen's vectorized sin
+ // and cosine functions here.
+ for (int j = 0; j < line_len + 1; ++j) {
+ double arg = ((EIGEN_PI * j) * j) / line_len;
+ std::complex<double> tmp(numext::cos(arg), numext::sin(arg));
+ pos_j_base_powered[j] = static_cast<ComplexScalar>(tmp);
}
}
@@ -261,7 +275,7 @@ struct TensorEvaluator<const TensorFFTOp<FFT, ArgType, FFTResultType, FFTDir>, D
}
}
- // processs the line
+ // process the line
if (is_power_of_two) {
processDataLineCooleyTukey(line_buf, line_len, log_len);
}
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h b/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h
index fcee5f60d..71ba56773 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h
@@ -20,7 +20,7 @@ namespace Eigen {
* The fixed sized equivalent of
* Eigen::Tensor<float, 3> t(3, 5, 7);
* is
- * Eigen::TensorFixedSize<float, Size<3,5,7>> t;
+ * Eigen::TensorFixedSize<float, Sizes<3,5,7>> t;
*/
template<typename Scalar_, typename Dimensions_, int Options_, typename IndexType>
@@ -40,6 +40,9 @@ class TensorFixedSize : public TensorBase<TensorFixedSize<Scalar_, Dimensions_,
enum {
IsAligned = bool(EIGEN_MAX_ALIGN_BYTES>0),
+ PacketAccess = (internal::packet_traits<Scalar>::size > 1),
+ BlockAccess = false,
+ PreferBlockAccess = false,
Layout = Options_ & RowMajor ? RowMajor : ColMajor,
CoordAccess = true,
RawAccess = true
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h b/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h
index abe85c860..edf9f85d8 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h
@@ -38,6 +38,7 @@ struct traits<TensorForcedEvalOp<XprType> >
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
+ typedef typename XprTraits::PointerType PointerType;
enum {
Flags = 0
@@ -92,11 +93,13 @@ struct TensorEvaluator<const TensorForcedEvalOp<ArgType>, Device>
typedef typename XprType::Index Index;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
- static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+ static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
enum {
IsAligned = true,
- PacketAccess = (PacketSize > 1),
+ PacketAccess = (PacketType<CoeffReturnType, Device>::size > 1),
+ BlockAccess = false,
+ PreferBlockAccess = false,
Layout = TensorEvaluator<ArgType, Device>::Layout,
RawAccess = true
};
@@ -108,9 +111,12 @@ struct TensorEvaluator<const TensorForcedEvalOp<ArgType>, Device>
EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_impl.dimensions(); }
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType*) {
+ #if !defined(EIGEN_HIPCC)
+ EIGEN_DEVICE_FUNC
+ #endif
+ EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType*) {
const Index numValues = internal::array_prod(m_impl.dimensions());
- m_buffer = (CoeffReturnType*)m_device.allocate(numValues * sizeof(CoeffReturnType));
+ m_buffer = (CoeffReturnType*)m_device.allocate_temp(numValues * sizeof(CoeffReturnType));
// Should initialize the memory in case we're dealing with non POD types.
if (NumTraits<CoeffReturnType>::RequireInitialization) {
for (Index i = 0; i < numValues; ++i) {
@@ -119,12 +125,12 @@ struct TensorEvaluator<const TensorForcedEvalOp<ArgType>, Device>
}
typedef TensorEvalToOp< const typename internal::remove_const<ArgType>::type > EvalTo;
EvalTo evalToTmp(m_buffer, m_op);
- const bool PacketAccess = internal::IsVectorizable<Device, const ArgType>::value;
- internal::TensorExecutor<const EvalTo, typename internal::remove_const<Device>::type, PacketAccess>::run(evalToTmp, m_device);
+ const bool Vectorize = internal::IsVectorizable<Device, const ArgType>::value;
+ internal::TensorExecutor<const EvalTo, typename internal::remove_const<Device>::type, Vectorize>::run(evalToTmp, m_device);
return true;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
- m_device.deallocate(m_buffer);
+ m_device.deallocate_temp(m_buffer);
m_buffer = NULL;
}
@@ -143,7 +149,8 @@ struct TensorEvaluator<const TensorForcedEvalOp<ArgType>, Device>
return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize);
}
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType* data() const { return m_buffer; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ typename Eigen::internal::traits<XprType>::PointerType data() const { return m_buffer; }
/// required by sycl in order to extract the sycl accessor
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const TensorEvaluator<ArgType, Device>& impl() { return m_impl; }
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h b/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h
index 2e638992a..04a8b953d 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h
@@ -12,7 +12,7 @@
namespace Eigen {
-// MakePointer class is used as a container of the adress space of the pointer
+// MakePointer class is used as a container of the address space of the pointer
// on the host and on the device. From the host side it generates the T* pointer
// and when EIGEN_USE_SYCL is used it construct a buffer with a map_allocator to
// T* m_data on the host. It is always called on the device.
@@ -21,7 +21,32 @@ namespace Eigen {
template<typename T> struct MakePointer {
typedef T* Type;
typedef T& RefType;
+ typedef T ScalarType;
};
+
+// The PointerType class is a container of the device specefic pointer
+// used for refering to a Pointer on TensorEvaluator class. While the TensorExpression
+// is a device-agnostic type and need MakePointer class for type conversion,
+// the TensorEvaluator calss can be specialized for a device, hence it is possible
+// to construct different types of temproray storage memory in TensorEvaluator
+// for different devices by specializing the following PointerType class.
+template<typename T, typename Device> struct PointerType : MakePointer<T>{};
+
+namespace internal{
+template<typename A, typename B> struct Pointer_type_promotion {
+ static const bool val=false;
+};
+template<typename A> struct Pointer_type_promotion<A, A> {
+ static const bool val = true;
+};
+template<typename A, typename B> struct TypeConversion;
+#ifndef __SYCL_DEVICE_ONLY__
+template<typename A, typename B> struct TypeConversion{
+ typedef A* type;
+};
+#endif
+}
+
#if defined(EIGEN_USE_SYCL)
namespace TensorSycl {
namespace internal{
@@ -49,7 +74,7 @@ template<typename Op, typename Dims, typename XprType, template <class> class Ma
template<typename XprType> class TensorIndexTupleOp;
template<typename ReduceOp, typename Dims, typename XprType> class TensorTupleReducerOp;
template<typename Axis, typename LeftXprType, typename RightXprType> class TensorConcatenationOp;
-template<typename Dimensions, typename LeftXprType, typename RightXprType> class TensorContractionOp;
+template<typename Dimensions, typename LeftXprType, typename RightXprType, typename OutputKernelType> class TensorContractionOp;
template<typename TargetType, typename XprType> class TensorConversionOp;
template<typename Dimensions, typename InputXprType, typename KernelXprType> class TensorConvolutionOp;
template<typename FFT, typename XprType, int FFTDataType, int FFTDirection> class TensorFFTOp;
@@ -70,6 +95,7 @@ template<typename Strides, typename XprType> class TensorInflationOp;
template<typename Generator, typename XprType> class TensorGeneratorOp;
template<typename LeftXprType, typename RightXprType> class TensorAssignOp;
template<typename Op, typename XprType> class TensorScanOp;
+template<typename Dims, typename XprType> class TensorTraceOp;
template<typename CustomUnaryFunc, typename XprType> class TensorCustomUnaryOp;
template<typename CustomBinaryFunc, typename LhsXprType, typename RhsXprType> class TensorCustomBinaryOp;
@@ -80,6 +106,8 @@ template<typename XprType> class TensorForcedEvalOp;
template<typename ExpressionType, typename DeviceType> class TensorDevice;
template<typename Derived, typename Device> struct TensorEvaluator;
+struct NoOpOutputKernel;
+
struct DefaultDevice;
struct ThreadPoolDevice;
struct GpuDevice;
@@ -110,8 +138,18 @@ struct IsVectorizable<GpuDevice, Expression> {
TensorEvaluator<Expression, GpuDevice>::IsAligned;
};
+template <typename Device, typename Expression>
+struct IsTileable {
+ // Check that block evaluation is supported and it's a preferred option (at
+ // least one sub-expression has much faster block evaluation, e.g.
+ // broadcasting).
+ static const bool value = TensorEvaluator<Expression, Device>::BlockAccess &&
+ TensorEvaluator<Expression, Device>::PreferBlockAccess;
+};
+
template <typename Expression, typename Device,
- bool Vectorizable = IsVectorizable<Device, Expression>::value>
+ bool Vectorizable = IsVectorizable<Device, Expression>::value,
+ bool Tileable = IsTileable<Device, Expression>::value>
class TensorExecutor;
} // end namespace internal
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h b/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h
index 3b4f8eda1..51572f9e7 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h
@@ -20,7 +20,7 @@ namespace internal {
template <typename Scalar>
struct scalar_mod_op {
EIGEN_DEVICE_FUNC scalar_mod_op(const Scalar& divisor) : m_divisor(divisor) {}
- EIGEN_DEVICE_FUNC inline Scalar operator() (const Scalar& a) const { return a % m_divisor; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a) const { return a % m_divisor; }
const Scalar m_divisor;
};
template <typename Scalar>
@@ -34,7 +34,7 @@ struct functor_traits<scalar_mod_op<Scalar> >
template <typename Scalar>
struct scalar_mod2_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_mod2_op)
- EIGEN_DEVICE_FUNC inline Scalar operator() (const Scalar& a, const Scalar& b) const { return a % b; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a, const Scalar& b) const { return a % b; }
};
template <typename Scalar>
struct functor_traits<scalar_mod2_op<Scalar> >
@@ -54,50 +54,19 @@ struct functor_traits<scalar_fmod_op<Scalar> > {
PacketAccess = false };
};
-
-/** \internal
- * \brief Template functor to compute the sigmoid of a scalar
- * \sa class CwiseUnaryOp, ArrayBase::sigmoid()
- */
-template <typename T>
-struct scalar_sigmoid_op {
- EIGEN_EMPTY_STRUCT_CTOR(scalar_sigmoid_op)
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator()(const T& x) const {
- const T one = T(1);
- return one / (one + numext::exp(-x));
- }
-
- template <typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
- Packet packetOp(const Packet& x) const {
- const Packet one = pset1<Packet>(T(1));
- return pdiv(one, padd(one, pexp(pnegate(x))));
- }
-};
-
-template <typename T>
-struct functor_traits<scalar_sigmoid_op<T> > {
- enum {
- Cost = NumTraits<T>::AddCost * 2 + NumTraits<T>::MulCost * 6,
- PacketAccess = packet_traits<T>::HasAdd && packet_traits<T>::HasDiv &&
- packet_traits<T>::HasNegate && packet_traits<T>::HasExp
- };
-};
-
-
template<typename Reducer, typename Device>
struct reducer_traits {
enum {
Cost = 1,
- PacketAccess = false
+ PacketAccess = false,
+ IsStateful = false,
+ IsExactlyAssociative = true
};
};
// Standard reduction functors
template <typename T> struct SumReducer
{
- static const bool PacketAccess = packet_traits<T>::HasAdd;
- static const bool IsStateful = false;
-
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const {
internal::scalar_sum_op<T> sum_op;
*accum = sum_op(*accum, t);
@@ -133,16 +102,14 @@ template <typename T, typename Device>
struct reducer_traits<SumReducer<T>, Device> {
enum {
Cost = NumTraits<T>::AddCost,
- PacketAccess = PacketType<T, Device>::HasAdd
+ PacketAccess = PacketType<T, Device>::HasAdd,
+ IsStateful = false,
+ IsExactlyAssociative = NumTraits<T>::IsInteger
};
};
-
template <typename T> struct MeanReducer
{
- static const bool PacketAccess = packet_traits<T>::HasAdd && !NumTraits<T>::IsInteger;
- static const bool IsStateful = true;
-
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
MeanReducer() : scalarCount_(0), packetCount_(0) { }
@@ -166,16 +133,20 @@ template <typename T> struct MeanReducer
return pset1<Packet>(initialize());
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const {
- return accum / scalarCount_;
+ internal::scalar_quotient_op<T> quotient_op;
+ return quotient_op(accum, T(scalarCount_));
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const {
- return pdiv(vaccum, pset1<Packet>(packetCount_));
+ return pdiv(vaccum, pset1<Packet>(T(packetCount_)));
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const {
internal::scalar_sum_op<T> sum_op;
- return sum_op(saccum, predux(vaccum)) / (scalarCount_ + packetCount_ * unpacket_traits<Packet>::size);
+ internal::scalar_quotient_op<T> quotient_op;
+ return quotient_op(
+ sum_op(saccum, predux(vaccum)),
+ T(scalarCount_ + packetCount_ * unpacket_traits<Packet>::size));
}
protected:
@@ -187,7 +158,10 @@ template <typename T, typename Device>
struct reducer_traits<MeanReducer<T>, Device> {
enum {
Cost = NumTraits<T>::AddCost,
- PacketAccess = PacketType<T, Device>::HasAdd
+ PacketAccess = PacketType<T, Device>::HasAdd &&
+ PacketType<T, Device>::HasDiv && !NumTraits<T>::IsInteger,
+ IsStateful = true,
+ IsExactlyAssociative = NumTraits<T>::IsInteger
};
};
@@ -220,9 +194,6 @@ struct MinMaxBottomValue<T, false, false> {
template <typename T> struct MaxReducer
{
- static const bool PacketAccess = packet_traits<T>::HasMax;
- static const bool IsStateful = false;
-
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const {
if (t > *accum) { *accum = t; }
}
@@ -254,16 +225,15 @@ template <typename T, typename Device>
struct reducer_traits<MaxReducer<T>, Device> {
enum {
Cost = NumTraits<T>::AddCost,
- PacketAccess = PacketType<T, Device>::HasMax
+ PacketAccess = PacketType<T, Device>::HasMax,
+ IsStateful = false,
+ IsExactlyAssociative = true
};
};
template <typename T> struct MinReducer
{
- static const bool PacketAccess = packet_traits<T>::HasMin;
- static const bool IsStateful = false;
-
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const {
if (t < *accum) { *accum = t; }
}
@@ -295,16 +265,15 @@ template <typename T, typename Device>
struct reducer_traits<MinReducer<T>, Device> {
enum {
Cost = NumTraits<T>::AddCost,
- PacketAccess = PacketType<T, Device>::HasMin
+ PacketAccess = PacketType<T, Device>::HasMin,
+ IsStateful = false,
+ IsExactlyAssociative = true
};
};
template <typename T> struct ProdReducer
{
- static const bool PacketAccess = packet_traits<T>::HasMul;
- static const bool IsStateful = false;
-
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const {
internal::scalar_product_op<T> prod_op;
(*accum) = prod_op(*accum, t);
@@ -340,16 +309,15 @@ template <typename T, typename Device>
struct reducer_traits<ProdReducer<T>, Device> {
enum {
Cost = NumTraits<T>::MulCost,
- PacketAccess = PacketType<T, Device>::HasMul
+ PacketAccess = PacketType<T, Device>::HasMul,
+ IsStateful = false,
+ IsExactlyAssociative = true
};
};
struct AndReducer
{
- static const bool PacketAccess = false;
- static const bool IsStateful = false;
-
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(bool t, bool* accum) const {
*accum = *accum && t;
}
@@ -365,15 +333,14 @@ template <typename Device>
struct reducer_traits<AndReducer, Device> {
enum {
Cost = 1,
- PacketAccess = false
+ PacketAccess = false,
+ IsStateful = false,
+ IsExactlyAssociative = true
};
};
struct OrReducer {
- static const bool PacketAccess = false;
- static const bool IsStateful = false;
-
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(bool t, bool* accum) const {
*accum = *accum || t;
}
@@ -389,7 +356,9 @@ template <typename Device>
struct reducer_traits<OrReducer, Device> {
enum {
Cost = 1,
- PacketAccess = false
+ PacketAccess = false,
+ IsStateful = false,
+ IsExactlyAssociative = true
};
};
@@ -397,9 +366,6 @@ struct reducer_traits<OrReducer, Device> {
// Argmin/Argmax reducers
template <typename T> struct ArgMaxTupleReducer
{
- static const bool PacketAccess = false;
- static const bool IsStateful = false;
-
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const {
if (t.second > accum->second) { *accum = t; }
}
@@ -415,16 +381,15 @@ template <typename T, typename Device>
struct reducer_traits<ArgMaxTupleReducer<T>, Device> {
enum {
Cost = NumTraits<T>::AddCost,
- PacketAccess = false
+ PacketAccess = false,
+ IsStateful = false,
+ IsExactlyAssociative = true
};
};
template <typename T> struct ArgMinTupleReducer
{
- static const bool PacketAccess = false;
- static const bool IsStateful = false;
-
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T& t, T* accum) const {
if (t.second < accum->second) { *accum = t; }
}
@@ -440,7 +405,9 @@ template <typename T, typename Device>
struct reducer_traits<ArgMinTupleReducer<T>, Device> {
enum {
Cost = NumTraits<T>::AddCost,
- PacketAccess = false
+ PacketAccess = false,
+ IsStateful = false,
+ IsExactlyAssociative = true
};
};
@@ -483,6 +450,25 @@ struct functor_traits<GaussianGenerator<T, Index, NumDims> > {
};
};
+template <typename Scalar>
+struct scalar_clamp_op {
+ EIGEN_DEVICE_FUNC inline scalar_clamp_op(const Scalar& _min, const Scalar& _max) : m_min(_min), m_max(_max) {}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar
+ operator()(const Scalar& x) const {
+ return numext::mini(numext::maxi(x, m_min), m_max);
+ }
+ template <typename Packet>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet
+ packetOp(const Packet& x) const {
+ return internal::pmin(internal::pmax(x, pset1<Packet>(m_min)), pset1<Packet>(m_max));
+ }
+ const Scalar m_min;
+ const Scalar m_max;
+};
+template<typename Scalar>
+struct functor_traits<scalar_clamp_op<Scalar> >
+{ enum { Cost = 2 * NumTraits<Scalar>::AddCost, PacketAccess = (packet_traits<Scalar>::HasMin && packet_traits<Scalar>::HasMax)}; };
+
} // end namespace internal
} // end namespace Eigen
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorGenerator.h b/unsupported/Eigen/CXX11/src/Tensor/TensorGenerator.h
index eb1d4934e..95c9e6aee 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorGenerator.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorGenerator.h
@@ -31,6 +31,7 @@ struct traits<TensorGeneratorOp<Generator, XprType> > : public traits<XprType>
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
+ typedef typename XprTraits::PointerType PointerType;
};
template<typename Generator, typename XprType>
@@ -89,8 +90,9 @@ struct TensorEvaluator<const TensorGeneratorOp<Generator, ArgType>, Device>
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
enum {
IsAligned = false,
- PacketAccess = (internal::unpacket_traits<PacketReturnType>::size > 1),
+ PacketAccess = (PacketType<CoeffReturnType, Device>::size > 1),
BlockAccess = false,
+ PreferBlockAccess = false,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
@@ -98,9 +100,12 @@ struct TensorEvaluator<const TensorGeneratorOp<Generator, ArgType>, Device>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_generator(op.generator())
+#ifdef EIGEN_USE_SYCL
+ , m_argImpl(op.expression(), device)
+#endif
{
- TensorEvaluator<ArgType, Device> impl(op.expression(), device);
- m_dimensions = impl.dimensions();
+ TensorEvaluator<ArgType, Device> argImpl(op.expression(), device);
+ m_dimensions = argImpl.dimensions();
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
m_strides[0] = 1;
@@ -133,7 +138,7 @@ struct TensorEvaluator<const TensorGeneratorOp<Generator, ArgType>, Device>
template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
{
- const int packetSize = internal::unpacket_traits<PacketReturnType>::size;
+ const int packetSize = PacketType<CoeffReturnType, Device>::size;
EIGEN_STATIC_ASSERT((packetSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
eigen_assert(index+packetSize-1 < dimensions().TotalSize());
@@ -153,7 +158,12 @@ struct TensorEvaluator<const TensorGeneratorOp<Generator, ArgType>, Device>
TensorOpCost::MulCost<Scalar>());
}
- EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
+ EIGEN_DEVICE_FUNC typename Eigen::internal::traits<XprType>::PointerType data() const { return NULL; }
+
+#ifdef EIGEN_USE_SYCL
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const TensorEvaluator<ArgType, Device>& impl() const { return m_argImpl; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Generator& functor() const { return m_generator; }
+#endif
protected:
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
@@ -178,6 +188,9 @@ struct TensorEvaluator<const TensorGeneratorOp<Generator, ArgType>, Device>
Dimensions m_dimensions;
array<Index, NumDims> m_strides;
Generator m_generator;
+#ifdef EIGEN_USE_SYCL
+ TensorEvaluator<ArgType, Device> m_argImpl;
+#endif
};
} // end namespace Eigen
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h b/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h
new file mode 100644
index 000000000..5438ebe71
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h
@@ -0,0 +1,88 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+// Copyright (C) 2018 Deven Desai <deven.desai.amd@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#if defined(EIGEN_USE_GPU) && !defined(EIGEN_CXX11_TENSOR_GPU_HIP_CUDA_DEFINES_H)
+#define EIGEN_CXX11_TENSOR_GPU_HIP_CUDA_DEFINES_H
+
+// Note that we are using EIGEN_USE_HIP here instead of EIGEN_HIPCC...this is by design
+// There is code in the Tensorflow codebase that will define EIGEN_USE_GPU, but
+// for some reason gets sent to the gcc/host compiler instead of the gpu/nvcc/hipcc compiler
+// When compiling such files, gcc will end up trying to pick up the CUDA headers by
+// default (see the code within "unsupported/Eigen/CXX11/Tensor" that is guarded by EIGEN_USE_GPU)
+// This will obsviously not work when trying to compile tensorflow on a sytem with no CUDA
+// To work around this issue for HIP systems (and leave the default behaviour intact), the
+// HIP tensorflow build defines EIGEN_USE_HIP when compiling all source files, and
+// "unsupported/Eigen/CXX11/Tensor" has been updated to use HIP header when EIGEN_USE_HIP is
+// defined. In continuation of that requirement, the guard here needs to be EIGEN_USE_HIP as well
+
+#if defined(EIGEN_USE_HIP)
+
+#define gpuStream_t hipStream_t
+#define gpuDeviceProp_t hipDeviceProp_t
+#define gpuError_t hipError_t
+#define gpuSuccess hipSuccess
+#define gpuErrorNotReady hipErrorNotReady
+#define gpuGetDeviceCount hipGetDeviceCount
+#define gpuGetErrorString hipGetErrorString
+#define gpuGetDeviceProperties hipGetDeviceProperties
+#define gpuStreamDefault hipStreamDefault
+#define gpuGetDevice hipGetDevice
+#define gpuSetDevice hipSetDevice
+#define gpuMalloc hipMalloc
+#define gpuFree hipFree
+#define gpuMemsetAsync hipMemsetAsync
+#define gpuMemcpyAsync hipMemcpyAsync
+#define gpuMemcpyDeviceToDevice hipMemcpyDeviceToDevice
+#define gpuMemcpyDeviceToHost hipMemcpyDeviceToHost
+#define gpuMemcpyHostToDevice hipMemcpyHostToDevice
+#define gpuStreamQuery hipStreamQuery
+#define gpuSharedMemConfig hipSharedMemConfig
+#define gpuDeviceSetSharedMemConfig hipDeviceSetSharedMemConfig
+#define gpuStreamSynchronize hipStreamSynchronize
+#define gpuDeviceSynchronize hipDeviceSynchronize
+#define gpuMemcpy hipMemcpy
+
+#else
+
+#define gpuStream_t cudaStream_t
+#define gpuDeviceProp_t cudaDeviceProp
+#define gpuError_t cudaError_t
+#define gpuSuccess cudaSuccess
+#define gpuErrorNotReady cudaErrorNotReady
+#define gpuGetDeviceCount cudaGetDeviceCount
+#define gpuGetErrorString cudaGetErrorString
+#define gpuGetDeviceProperties cudaGetDeviceProperties
+#define gpuStreamDefault cudaStreamDefault
+#define gpuGetDevice cudaGetDevice
+#define gpuSetDevice cudaSetDevice
+#define gpuMalloc cudaMalloc
+#define gpuFree cudaFree
+#define gpuMemsetAsync cudaMemsetAsync
+#define gpuMemcpyAsync cudaMemcpyAsync
+#define gpuMemcpyDeviceToDevice cudaMemcpyDeviceToDevice
+#define gpuMemcpyDeviceToHost cudaMemcpyDeviceToHost
+#define gpuMemcpyHostToDevice cudaMemcpyHostToDevice
+#define gpuStreamQuery cudaStreamQuery
+#define gpuSharedMemConfig cudaSharedMemConfig
+#define gpuDeviceSetSharedMemConfig cudaDeviceSetSharedMemConfig
+#define gpuStreamSynchronize cudaStreamSynchronize
+#define gpuDeviceSynchronize cudaDeviceSynchronize
+#define gpuMemcpy cudaMemcpy
+
+#endif
+
+#if defined(EIGEN_HIP_DEVICE_COMPILE) || (defined(EIGEN_CUDACC) && (EIGEN_CUDACC_VER==0))
+// clang-cuda and HIPCC do not support the use of assert on the GPU side.
+#define gpu_assert(COND)
+#else
+#define gpu_assert(COND) assert(COND)
+#endif
+
+#endif // EIGEN_CXX11_TENSOR_GPU_HIP_CUDA_DEFINES_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaUndefines.h b/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaUndefines.h
new file mode 100644
index 000000000..db394bcbb
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaUndefines.h
@@ -0,0 +1,40 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+// Copyright (C) 2018 Deven Desai <deven.desai.amd@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#if defined(EIGEN_CXX11_TENSOR_GPU_HIP_CUDA_DEFINES_H)
+
+#undef gpuStream_t
+#undef gpuDeviceProp_t
+#undef gpuError_t
+#undef gpuSuccess
+#undef gpuErrorNotReady
+#undef gpuGetDeviceCount
+#undef gpuGetErrorString
+#undef gpuGetDeviceProperties
+#undef gpuStreamDefault
+#undef gpuGetDevice
+#undef gpuSetDevice
+#undef gpuMalloc
+#undef gpuFree
+#undef gpuMemsetAsync
+#undef gpuMemcpyAsync
+#undef gpuMemcpyDeviceToDevice
+#undef gpuMemcpyDeviceToHost
+#undef gpuMemcpyHostToDevice
+#undef gpuStreamQuery
+#undef gpuSharedMemConfig
+#undef gpuDeviceSetSharedMemConfig
+#undef gpuStreamSynchronize
+#undef gpuDeviceSynchronize
+#undef gpuMemcpy
+
+#undef EIGEN_CXX11_TENSOR_GPU_HIP_CUDA_DEFINES_H
+
+#endif // EIGEN_CXX11_TENSOR_GPU_HIP_CUDA_DEFINES_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h b/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h
index 566856ed2..965bd8f1e 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h
@@ -27,6 +27,7 @@ namespace Eigen {
* patch_cols, and 1 for all the additional dimensions.
*/
namespace internal {
+
template<DenseIndex Rows, DenseIndex Cols, typename XprType>
struct traits<TensorImagePatchOp<Rows, Cols, XprType> > : public traits<XprType>
{
@@ -38,6 +39,7 @@ struct traits<TensorImagePatchOp<Rows, Cols, XprType> > : public traits<XprType>
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions + 1;
static const int Layout = XprTraits::Layout;
+ typedef typename XprTraits::PointerType PointerType;
};
template<DenseIndex Rows, DenseIndex Cols, typename XprType>
@@ -52,6 +54,66 @@ struct nested<TensorImagePatchOp<Rows, Cols, XprType>, 1, typename eval<TensorIm
typedef TensorImagePatchOp<Rows, Cols, XprType> type;
};
+template <typename Self, bool Vectorizable>
+struct ImagePatchCopyOp {
+ typedef typename Self::Index Index;
+ typedef typename Self::Scalar Scalar;
+ typedef typename Self::Impl Impl;
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run(
+ const Self& self, const Index num_coeff_to_copy, const Index dst_index,
+ Scalar* dst_data, const Index src_index) {
+ const Impl& impl = self.impl();
+ for (Index i = 0; i < num_coeff_to_copy; ++i) {
+ dst_data[dst_index + i] = impl.coeff(src_index + i);
+ }
+ }
+};
+
+template <typename Self>
+struct ImagePatchCopyOp<Self, true> {
+ typedef typename Self::Index Index;
+ typedef typename Self::Scalar Scalar;
+ typedef typename Self::Impl Impl;
+ typedef typename packet_traits<Scalar>::type Packet;
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run(
+ const Self& self, const Index num_coeff_to_copy, const Index dst_index,
+ Scalar* dst_data, const Index src_index) {
+ const Impl& impl = self.impl();
+ const Index packet_size = internal::unpacket_traits<Packet>::size;
+ const Index vectorized_size =
+ (num_coeff_to_copy / packet_size) * packet_size;
+ for (Index i = 0; i < vectorized_size; i += packet_size) {
+ Packet p = impl.template packet<Unaligned>(src_index + i);
+ internal::pstoret<Scalar, Packet, Unaligned>(dst_data + dst_index + i, p);
+ }
+ for (Index i = vectorized_size; i < num_coeff_to_copy; ++i) {
+ dst_data[dst_index + i] = impl.coeff(src_index + i);
+ }
+ }
+};
+
+template <typename Self>
+struct ImagePatchPaddingOp {
+ typedef typename Self::Index Index;
+ typedef typename Self::Scalar Scalar;
+ typedef typename packet_traits<Scalar>::type Packet;
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run(
+ const Index num_coeff_to_pad, const Scalar padding_value,
+ const Index dst_index, Scalar* dst_data) {
+ const Index packet_size = internal::unpacket_traits<Packet>::size;
+ const Packet padded_packet = internal::pset1<Packet>(padding_value);
+ const Index vectorized_size =
+ (num_coeff_to_pad / packet_size) * packet_size;
+ for (Index i = 0; i < vectorized_size; i += packet_size) {
+ internal::pstoret<Scalar, Packet, Unaligned>(dst_data + dst_index + i,
+ padded_packet);
+ }
+ for (Index i = vectorized_size; i < num_coeff_to_pad; ++i) {
+ dst_data[dst_index + i] = padding_value;
+ }
+ }
+};
+
} // end namespace internal
template<DenseIndex Rows, DenseIndex Cols, typename XprType>
@@ -70,12 +132,12 @@ class TensorImagePatchOp : public TensorBase<TensorImagePatchOp<Rows, Cols, XprT
DenseIndex in_row_strides, DenseIndex in_col_strides,
DenseIndex row_inflate_strides, DenseIndex col_inflate_strides,
PaddingType padding_type, Scalar padding_value)
- : m_xpr(expr), m_patch_rows(patch_rows), m_patch_cols(patch_cols),
- m_row_strides(row_strides), m_col_strides(col_strides),
- m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides),
- m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides),
- m_padding_explicit(false), m_padding_top(0), m_padding_bottom(0), m_padding_left(0), m_padding_right(0),
- m_padding_type(padding_type), m_padding_value(padding_value) {}
+ : m_xpr(expr), m_patch_rows(patch_rows), m_patch_cols(patch_cols),
+ m_row_strides(row_strides), m_col_strides(col_strides),
+ m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides),
+ m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides),
+ m_padding_explicit(false), m_padding_top(0), m_padding_bottom(0), m_padding_left(0), m_padding_right(0),
+ m_padding_type(padding_type), m_padding_value(padding_value) {}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorImagePatchOp(const XprType& expr, DenseIndex patch_rows, DenseIndex patch_cols,
DenseIndex row_strides, DenseIndex col_strides,
@@ -84,13 +146,31 @@ class TensorImagePatchOp : public TensorBase<TensorImagePatchOp<Rows, Cols, XprT
DenseIndex padding_top, DenseIndex padding_bottom,
DenseIndex padding_left, DenseIndex padding_right,
Scalar padding_value)
- : m_xpr(expr), m_patch_rows(patch_rows), m_patch_cols(patch_cols),
- m_row_strides(row_strides), m_col_strides(col_strides),
- m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides),
- m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides),
- m_padding_explicit(true), m_padding_top(padding_top), m_padding_bottom(padding_bottom),
- m_padding_left(padding_left), m_padding_right(padding_right),
- m_padding_type(PADDING_VALID), m_padding_value(padding_value) {}
+ : m_xpr(expr), m_patch_rows(patch_rows), m_patch_cols(patch_cols),
+ m_row_strides(row_strides), m_col_strides(col_strides),
+ m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides),
+ m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides),
+ m_padding_explicit(true), m_padding_top(padding_top), m_padding_bottom(padding_bottom),
+ m_padding_left(padding_left), m_padding_right(padding_right),
+ m_padding_type(PADDING_VALID), m_padding_value(padding_value) {}
+
+#ifdef EIGEN_USE_SYCL // this is work around for sycl as Eigen could not use c++11 deligate constructor feature
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorImagePatchOp(const XprType& expr, DenseIndex patch_rows, DenseIndex patch_cols,
+ DenseIndex row_strides, DenseIndex col_strides,
+ DenseIndex in_row_strides, DenseIndex in_col_strides,
+ DenseIndex row_inflate_strides, DenseIndex col_inflate_strides,
+ bool padding_explicit, DenseIndex padding_top, DenseIndex padding_bottom,
+ DenseIndex padding_left, DenseIndex padding_right, PaddingType padding_type,
+ Scalar padding_value)
+ : m_xpr(expr), m_patch_rows(patch_rows), m_patch_cols(patch_cols),
+ m_row_strides(row_strides), m_col_strides(col_strides),
+ m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides),
+ m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides),
+ m_padding_explicit(padding_explicit), m_padding_top(padding_top), m_padding_bottom(padding_bottom),
+ m_padding_left(padding_left), m_padding_right(padding_right),
+ m_padding_type(padding_type), m_padding_value(padding_value) {}
+
+#endif
EIGEN_DEVICE_FUNC
DenseIndex patch_rows() const { return m_patch_rows; }
@@ -161,18 +241,30 @@ struct TensorEvaluator<const TensorImagePatchOp<Rows, Cols, ArgType>, Device>
typedef TensorEvaluator<ArgType, Device> Impl;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
- static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+ static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
enum {
- IsAligned = false,
- PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
- Layout = TensorEvaluator<ArgType, Device>::Layout,
- CoordAccess = false,
- RawAccess = false
+ IsAligned = false,
+ PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ BlockAccess = true,
+ PreferBlockAccess = true,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ CoordAccess = false,
+ RawAccess = false
};
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
- : m_impl(op.expression(), device)
+ typedef internal::TensorBlock<Scalar, Index, NumDims, Layout>
+ OutputTensorBlock;
+
+#ifdef __SYCL_DEVICE_ONLY__
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator( const XprType op, const Device& device)
+ #else
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator( const XprType& op, const Device& device)
+ #endif
+ : m_device(device), m_impl(op.expression(), device)
+#ifdef EIGEN_USE_SYCL
+ , m_op(op)
+#endif
{
EIGEN_STATIC_ASSERT((NumDims >= 4), YOU_MADE_A_PROGRAMMING_MISTAKE);
@@ -238,9 +330,15 @@ struct TensorEvaluator<const TensorImagePatchOp<Rows, Cols, ArgType>, Device>
// Calculate the padding
m_rowPaddingTop = ((m_outputRows - 1) * m_row_strides + m_patch_rows_eff - m_input_rows_eff) / 2;
m_colPaddingLeft = ((m_outputCols - 1) * m_col_strides + m_patch_cols_eff - m_input_cols_eff) / 2;
+ // The padding size calculation for PADDING_SAME has been updated to
+ // be consistent with how TensorFlow extracts its paddings.
+ m_rowPaddingTop = numext::maxi<Index>(0, m_rowPaddingTop);
+ m_colPaddingLeft = numext::maxi<Index>(0, m_colPaddingLeft);
break;
default:
eigen_assert(false && "unexpected padding");
+ m_outputCols=0; // silence the uninitialised warning;
+ m_outputRows=0; //// silence the uninitialised warning;
}
}
eigen_assert(m_outputRows > 0);
@@ -418,9 +516,14 @@ struct TensorEvaluator<const TensorImagePatchOp<Rows, Cols, ArgType>, Device>
return packetWithPossibleZero(index);
}
- EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
+ EIGEN_DEVICE_FUNC typename Eigen::internal::traits<XprType>::PointerType data() const { return NULL; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
- const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
+#ifdef EIGEN_USE_SYCL
+ // Required by SYCL in order to construct the expression tree on the device
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const XprType& xpr() const { return m_op; }
+#endif
Index rowPaddingTop() const { return m_rowPaddingTop; }
Index colPaddingLeft() const { return m_colPaddingLeft; }
@@ -445,6 +548,147 @@ struct TensorEvaluator<const TensorImagePatchOp<Rows, Cols, ArgType>, Device>
TensorOpCost(0, 0, compute_cost, vectorized, PacketSize);
}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void getResourceRequirements(
+ std::vector<internal::TensorOpResourceRequirements>* resources) const {
+ Eigen::Index block_total_size_max = numext::maxi<Eigen::Index>(
+ 1, m_device.lastLevelCacheSize() / sizeof(Scalar));
+ resources->push_back(internal::TensorOpResourceRequirements(
+ internal::kSkewedInnerDims, block_total_size_max));
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void block(
+ OutputTensorBlock* output_block) const {
+ typedef internal::ImagePatchCopyOp<Self, PacketAccess> ImagePatchCopyOp;
+ typedef internal::ImagePatchPaddingOp<Self> ImagePatchPaddingOp;
+
+ // Calculate loop limits and various input/output dim sizes.
+ const DSizes<Index, NumDims>& block_sizes = output_block->block_sizes();
+ const bool col_major =
+ static_cast<int>(Layout) == static_cast<int>(ColMajor);
+ const Index depth_dim_size = block_sizes[col_major ? 0 : NumDims - 1];
+ const Index output_depth_dim_size =
+ m_dimensions[col_major ? 0 : NumDims - 1];
+ const Index row_dim_size = block_sizes[col_major ? 1 : NumDims - 2];
+ const Index output_row_dim_size = m_dimensions[col_major ? 1 : NumDims - 2];
+ const Index col_dim_size = block_sizes[col_major ? 2 : NumDims - 3];
+ const Index block_col_stride = row_dim_size * depth_dim_size;
+ const Index patch_index_dim_size = block_sizes[col_major ? 3 : NumDims - 4];
+ const Index outer_dim_size =
+ block_sizes.TotalSize() /
+ (depth_dim_size * row_dim_size * col_dim_size * patch_index_dim_size);
+
+ const Index patch_size = row_dim_size * col_dim_size * depth_dim_size;
+ const Index batch_size = patch_size * patch_index_dim_size;
+
+ Index output_index = output_block->first_coeff_index();
+
+ // Loop through outer dimensions.
+ for (Index outer_dim_index = 0; outer_dim_index < outer_dim_size;
+ ++outer_dim_index) {
+ const Index outer_output_base_index = outer_dim_index * batch_size;
+ // Find the offset of the element wrt the location of the first element.
+ const Index patchIndexStart = output_index / m_fastPatchStride;
+ const Index patchOffset =
+ (output_index - patchIndexStart * m_patchStride) / m_fastOutputDepth;
+ const Index colOffsetStart = patchOffset / m_fastColStride;
+ // Other ways to index this element.
+ const Index otherIndex =
+ (NumDims == 4) ? 0 : output_index / m_fastOtherStride;
+ const Index patch2DIndexStart =
+ (NumDims == 4)
+ ? 0
+ : (output_index - otherIndex * m_otherStride) / m_fastPatchStride;
+ // Calculate starting depth index.
+ const Index depth = output_index - (output_index / m_fastOutputDepth) *
+ output_depth_dim_size;
+ const Index patch_input_base_index =
+ depth + otherIndex * m_patchInputStride;
+
+ // Loop through patches.
+ for (Index patch_index_dim_index = 0;
+ patch_index_dim_index < patch_index_dim_size;
+ ++patch_index_dim_index) {
+ const Index patch_output_base_index =
+ outer_output_base_index + patch_index_dim_index * patch_size;
+ // Patch index corresponding to the passed in index.
+ const Index patchIndex = patchIndexStart + patch_index_dim_index;
+ const Index patch2DIndex =
+ (NumDims == 4) ? patchIndex
+ : patch2DIndexStart + patch_index_dim_index;
+ const Index colIndex = patch2DIndex / m_fastOutputRows;
+ const Index input_col_base = colIndex * m_col_strides;
+ const Index row_offset_base =
+ (patch2DIndex - colIndex * m_outputRows) * m_row_strides -
+ m_rowPaddingTop;
+
+ // Loop through columns.
+ for (Index col_dim_index = 0; col_dim_index < col_dim_size;
+ ++col_dim_index) {
+ const Index col_output_base_index =
+ patch_output_base_index + col_dim_index * block_col_stride;
+
+ // Calculate col index in the input original tensor.
+ Index colOffset = colOffsetStart + col_dim_index;
+ Index inputCol =
+ input_col_base + colOffset * m_in_col_strides - m_colPaddingLeft;
+ Index origInputCol =
+ (m_col_inflate_strides == 1)
+ ? inputCol
+ : ((inputCol >= 0) ? (inputCol / m_fastInflateColStride) : 0);
+
+ bool pad_column = false;
+ if (inputCol < 0 || inputCol >= m_input_cols_eff ||
+ ((m_col_inflate_strides != 1) &&
+ (inputCol != origInputCol * m_col_inflate_strides))) {
+ pad_column = true;
+ }
+
+ const Index col_input_base_index =
+ patch_input_base_index + origInputCol * m_colInputStride;
+ const Index input_row_base =
+ row_offset_base +
+ ((patchOffset + col_dim_index * output_row_dim_size) -
+ colOffset * m_colStride) *
+ m_in_row_strides;
+ // Loop through rows.
+ for (Index row_dim_index = 0; row_dim_index < row_dim_size;
+ ++row_dim_index) {
+ const Index output_base_index =
+ col_output_base_index + row_dim_index * depth_dim_size;
+ bool pad_row = false;
+ Index inputIndex;
+ if (!pad_column) {
+ Index inputRow =
+ input_row_base + row_dim_index * m_in_row_strides;
+ Index origInputRow =
+ (m_row_inflate_strides == 1)
+ ? inputRow
+ : ((inputRow >= 0) ? (inputRow / m_fastInflateRowStride)
+ : 0);
+ if (inputRow < 0 || inputRow >= m_input_rows_eff ||
+ ((m_row_inflate_strides != 1) &&
+ (inputRow != origInputRow * m_row_inflate_strides))) {
+ pad_row = true;
+ } else {
+ inputIndex =
+ col_input_base_index + origInputRow * m_rowInputStride;
+ }
+ }
+ // Copy (or pad) along depth dimension.
+ if (pad_column || pad_row) {
+ ImagePatchPaddingOp::Run(depth_dim_size, Scalar(m_paddingValue),
+ output_base_index, output_block->data());
+ } else {
+ ImagePatchCopyOp::Run(*this, depth_dim_size, output_base_index,
+ output_block->data(), inputIndex);
+ }
+ }
+ }
+ }
+ output_index += m_otherStride;
+ }
+ }
+
protected:
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetWithPossibleZero(Index index) const
{
@@ -500,7 +744,12 @@ struct TensorEvaluator<const TensorImagePatchOp<Rows, Cols, ArgType>, Device>
Scalar m_paddingValue;
+ const Device& m_device;
TensorEvaluator<ArgType, Device> m_impl;
+ #ifdef EIGEN_USE_SYCL
+ // Required for SYCL in order to construct the expression tree on the device
+ XprType m_op;
+ #endif
};
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h b/unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h
index 3209fecd3..32caccf87 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h
@@ -37,36 +37,36 @@ namespace Eigen {
* \sa Tensor
*/
-template <DenseIndex n>
+template <Index n>
struct type2index {
- static const DenseIndex value = n;
- EIGEN_DEVICE_FUNC constexpr operator DenseIndex() const { return n; }
- EIGEN_DEVICE_FUNC void set(DenseIndex val) {
+ static const Index value = n;
+ EIGEN_DEVICE_FUNC constexpr operator Index() const { return n; }
+ EIGEN_DEVICE_FUNC void set(Index val) {
eigen_assert(val == n);
}
};
// This can be used with IndexPairList to get compile-time constant pairs,
// such as IndexPairList<type2indexpair<1,2>, type2indexpair<3,4>>().
-template <DenseIndex f, DenseIndex s>
+template <Index f, Index s>
struct type2indexpair {
- static const DenseIndex first = f;
- static const DenseIndex second = s;
+ static const Index first = f;
+ static const Index second = s;
- constexpr EIGEN_DEVICE_FUNC operator IndexPair<DenseIndex>() const {
- return IndexPair<DenseIndex>(f, s);
+ constexpr EIGEN_DEVICE_FUNC operator IndexPair<Index>() const {
+ return IndexPair<Index>(f, s);
}
- EIGEN_DEVICE_FUNC void set(const IndexPair<DenseIndex>& val) {
+ EIGEN_DEVICE_FUNC void set(const IndexPair<Index>& val) {
eigen_assert(val.first == f);
eigen_assert(val.second == s);
}
};
-template<DenseIndex n> struct NumTraits<type2index<n> >
+template<Index n> struct NumTraits<type2index<n> >
{
- typedef DenseIndex Real;
+ typedef Index Real;
enum {
IsComplex = 0,
RequireInitialization = false,
@@ -75,28 +75,28 @@ template<DenseIndex n> struct NumTraits<type2index<n> >
MulCost = 1
};
- EIGEN_DEVICE_FUNC static inline Real epsilon() { return 0; }
- EIGEN_DEVICE_FUNC static inline Real dummy_precision() { return 0; }
- EIGEN_DEVICE_FUNC static inline Real highest() { return n; }
- EIGEN_DEVICE_FUNC static inline Real lowest() { return n; }
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Real epsilon() { return 0; }
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Real dummy_precision() { return 0; }
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Real highest() { return n; }
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Real lowest() { return n; }
};
namespace internal {
template <typename T>
-EIGEN_DEVICE_FUNC void update_value(T& val, DenseIndex new_val) {
- val = new_val;
+EIGEN_DEVICE_FUNC void update_value(T& val, Index new_val) {
+ val = internal::convert_index<T>(new_val);
}
-template <DenseIndex n>
-EIGEN_DEVICE_FUNC void update_value(type2index<n>& val, DenseIndex new_val) {
+template <Index n>
+EIGEN_DEVICE_FUNC void update_value(type2index<n>& val, Index new_val) {
val.set(new_val);
}
template <typename T>
-EIGEN_DEVICE_FUNC void update_value(T& val, IndexPair<DenseIndex> new_val) {
+EIGEN_DEVICE_FUNC void update_value(T& val, IndexPair<Index> new_val) {
val = new_val;
}
-template <DenseIndex f, DenseIndex s>
-EIGEN_DEVICE_FUNC void update_value(type2indexpair<f, s>& val, IndexPair<DenseIndex> new_val) {
+template <Index f, Index s>
+EIGEN_DEVICE_FUNC void update_value(type2indexpair<f, s>& val, IndexPair<Index> new_val) {
val.set(new_val);
}
@@ -106,36 +106,36 @@ struct is_compile_time_constant {
static constexpr bool value = false;
};
-template <DenseIndex idx>
+template <Index idx>
struct is_compile_time_constant<type2index<idx> > {
static constexpr bool value = true;
};
-template <DenseIndex idx>
+template <Index idx>
struct is_compile_time_constant<const type2index<idx> > {
static constexpr bool value = true;
};
-template <DenseIndex idx>
+template <Index idx>
struct is_compile_time_constant<type2index<idx>& > {
static constexpr bool value = true;
};
-template <DenseIndex idx>
+template <Index idx>
struct is_compile_time_constant<const type2index<idx>& > {
static constexpr bool value = true;
};
-template <DenseIndex f, DenseIndex s>
+template <Index f, Index s>
struct is_compile_time_constant<type2indexpair<f, s> > {
static constexpr bool value = true;
};
-template <DenseIndex f, DenseIndex s>
+template <Index f, Index s>
struct is_compile_time_constant<const type2indexpair<f, s> > {
static constexpr bool value = true;
};
-template <DenseIndex f, DenseIndex s>
+template <Index f, Index s>
struct is_compile_time_constant<type2indexpair<f, s>& > {
static constexpr bool value = true;
};
-template <DenseIndex f, DenseIndex s>
+template <Index f, Index s>
struct is_compile_time_constant<const type2indexpair<f, s>& > {
static constexpr bool value = true;
};
@@ -228,15 +228,15 @@ template <typename T, typename... O>
-template <DenseIndex Idx, typename ValueT>
+template <Index Idx, typename ValueT>
struct tuple_coeff {
template <typename... T>
- EIGEN_DEVICE_FUNC static constexpr ValueT get(const DenseIndex i, const IndexTuple<T...>& t) {
+ EIGEN_DEVICE_FUNC static constexpr ValueT get(const Index i, const IndexTuple<T...>& t) {
// return array_get<Idx>(t) * (i == Idx) + tuple_coeff<Idx-1>::get(i, t) * (i != Idx);
return (i == Idx ? array_get<Idx>(t) : tuple_coeff<Idx-1, ValueT>::get(i, t));
}
template <typename... T>
- EIGEN_DEVICE_FUNC static void set(const DenseIndex i, IndexTuple<T...>& t, const ValueT& value) {
+ EIGEN_DEVICE_FUNC static void set(const Index i, IndexTuple<T...>& t, const ValueT& value) {
if (i == Idx) {
update_value(array_get<Idx>(t), value);
} else {
@@ -245,7 +245,7 @@ struct tuple_coeff {
}
template <typename... T>
- EIGEN_DEVICE_FUNC static constexpr bool value_known_statically(const DenseIndex i, const IndexTuple<T...>& t) {
+ EIGEN_DEVICE_FUNC static constexpr bool value_known_statically(const Index i, const IndexTuple<T...>& t) {
return ((i == Idx) & is_compile_time_constant<typename IndexTupleExtractor<Idx, T...>::ValType>::value) ||
tuple_coeff<Idx-1, ValueT>::value_known_statically(i, t);
}
@@ -268,17 +268,17 @@ struct tuple_coeff {
template <typename ValueT>
struct tuple_coeff<0, ValueT> {
template <typename... T>
- EIGEN_DEVICE_FUNC static constexpr ValueT get(const DenseIndex /*i*/, const IndexTuple<T...>& t) {
+ EIGEN_DEVICE_FUNC static constexpr ValueT get(const Index /*i*/, const IndexTuple<T...>& t) {
// eigen_assert (i == 0); // gcc fails to compile assertions in constexpr
return array_get<0>(t)/* * (i == 0)*/;
}
template <typename... T>
- EIGEN_DEVICE_FUNC static void set(const DenseIndex i, IndexTuple<T...>& t, const ValueT value) {
+ EIGEN_DEVICE_FUNC static void set(const Index i, IndexTuple<T...>& t, const ValueT value) {
eigen_assert (i == 0);
update_value(array_get<0>(t), value);
}
template <typename... T>
- EIGEN_DEVICE_FUNC static constexpr bool value_known_statically(const DenseIndex i, const IndexTuple<T...>&) {
+ EIGEN_DEVICE_FUNC static constexpr bool value_known_statically(const Index i, const IndexTuple<T...>&) {
return is_compile_time_constant<typename IndexTupleExtractor<0, T...>::ValType>::value & (i == 0);
}
@@ -298,29 +298,29 @@ struct tuple_coeff<0, ValueT> {
template<typename FirstType, typename... OtherTypes>
struct IndexList : internal::IndexTuple<FirstType, OtherTypes...> {
- EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC constexpr DenseIndex operator[] (const DenseIndex i) const {
- return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...> >::value-1, DenseIndex>::get(i, *this);
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC constexpr Index operator[] (const Index i) const {
+ return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...> >::value-1, Index>::get(i, *this);
}
- EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC constexpr DenseIndex get(const DenseIndex i) const {
- return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...> >::value-1, DenseIndex>::get(i, *this);
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC constexpr Index get(const Index i) const {
+ return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...> >::value-1, Index>::get(i, *this);
}
- EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC void set(const DenseIndex i, const DenseIndex value) {
- return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...> >::value-1, DenseIndex>::set(i, *this, value);
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC void set(const Index i, const Index value) {
+ return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...> >::value-1, Index>::set(i, *this, value);
}
EIGEN_DEVICE_FUNC constexpr IndexList(const internal::IndexTuple<FirstType, OtherTypes...>& other) : internal::IndexTuple<FirstType, OtherTypes...>(other) { }
EIGEN_DEVICE_FUNC constexpr IndexList(FirstType& first, OtherTypes... other) : internal::IndexTuple<FirstType, OtherTypes...>(first, other...) { }
EIGEN_DEVICE_FUNC constexpr IndexList() : internal::IndexTuple<FirstType, OtherTypes...>() { }
- EIGEN_DEVICE_FUNC constexpr bool value_known_statically(const DenseIndex i) const {
- return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...> >::value-1, DenseIndex>::value_known_statically(i, *this);
+ EIGEN_DEVICE_FUNC constexpr bool value_known_statically(const Index i) const {
+ return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...> >::value-1, Index>::value_known_statically(i, *this);
}
EIGEN_DEVICE_FUNC constexpr bool all_values_known_statically() const {
- return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...> >::value-1, DenseIndex>::values_up_to_known_statically(*this);
+ return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...> >::value-1, Index>::values_up_to_known_statically(*this);
}
EIGEN_DEVICE_FUNC constexpr bool values_statically_known_to_increase() const {
- return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...> >::value-1, DenseIndex>::values_up_to_statically_known_to_increase(*this);
+ return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...> >::value-1, Index>::values_up_to_statically_known_to_increase(*this);
}
};
@@ -333,26 +333,27 @@ constexpr IndexList<FirstType, OtherTypes...> make_index_list(FirstType val1, Ot
template<typename FirstType, typename... OtherTypes>
struct IndexPairList : internal::IndexTuple<FirstType, OtherTypes...> {
- EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC constexpr IndexPair<DenseIndex> operator[] (const DenseIndex i) const {
- return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...> >::value-1, IndexPair<DenseIndex>>::get(i, *this);
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC constexpr IndexPair<Index> operator[] (const Index i) const {
+ return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...> >::value-1, IndexPair<Index>>::get(i, *this);
}
- EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC void set(const DenseIndex i, const IndexPair<DenseIndex> value) {
- return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...>>::value-1, IndexPair<DenseIndex> >::set(i, *this, value);
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC void set(const Index i, const IndexPair<Index> value) {
+ return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...>>::value-1, IndexPair<Index> >::set(i, *this, value);
}
EIGEN_DEVICE_FUNC constexpr IndexPairList(const internal::IndexTuple<FirstType, OtherTypes...>& other) : internal::IndexTuple<FirstType, OtherTypes...>(other) { }
EIGEN_DEVICE_FUNC constexpr IndexPairList() : internal::IndexTuple<FirstType, OtherTypes...>() { }
- EIGEN_DEVICE_FUNC constexpr bool value_known_statically(const DenseIndex i) const {
- return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...> >::value-1, DenseIndex>::value_known_statically(i, *this);
+ EIGEN_DEVICE_FUNC constexpr bool value_known_statically(const Index i) const {
+ return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...> >::value-1, Index>::value_known_statically(i, *this);
}
};
namespace internal {
-template<typename FirstType, typename... OtherTypes> size_t array_prod(const IndexList<FirstType, OtherTypes...>& sizes) {
- size_t result = 1;
- for (int i = 0; i < array_size<IndexList<FirstType, OtherTypes...> >::value; ++i) {
+template<typename FirstType, typename... OtherTypes>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index array_prod(const IndexList<FirstType, OtherTypes...>& sizes) {
+ Index result = 1;
+ for (size_t i = 0; i < array_size<IndexList<FirstType, OtherTypes...> >::value; ++i) {
result *= sizes[i];
}
return result;
@@ -372,30 +373,30 @@ template<typename FirstType, typename... OtherTypes> struct array_size<const Ind
static const size_t value = std::tuple_size<std::tuple<FirstType, OtherTypes...> >::value;
};
-template<DenseIndex N, typename FirstType, typename... OtherTypes> EIGEN_DEVICE_FUNC constexpr DenseIndex array_get(IndexList<FirstType, OtherTypes...>& a) {
+template<Index N, typename FirstType, typename... OtherTypes> EIGEN_DEVICE_FUNC constexpr Index array_get(IndexList<FirstType, OtherTypes...>& a) {
return IndexTupleExtractor<N, FirstType, OtherTypes...>::get_val(a);
}
-template<DenseIndex N, typename FirstType, typename... OtherTypes> EIGEN_DEVICE_FUNC constexpr DenseIndex array_get(const IndexList<FirstType, OtherTypes...>& a) {
+template<Index N, typename FirstType, typename... OtherTypes> EIGEN_DEVICE_FUNC constexpr Index array_get(const IndexList<FirstType, OtherTypes...>& a) {
return IndexTupleExtractor<N, FirstType, OtherTypes...>::get_val(a);
}
template <typename T>
struct index_known_statically_impl {
- EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex) {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const Index) {
return false;
}
};
template <typename FirstType, typename... OtherTypes>
struct index_known_statically_impl<IndexList<FirstType, OtherTypes...> > {
- EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i) {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const Index i) {
return IndexList<FirstType, OtherTypes...>().value_known_statically(i);
}
};
template <typename FirstType, typename... OtherTypes>
struct index_known_statically_impl<const IndexList<FirstType, OtherTypes...> > {
- EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i) {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const Index i) {
return IndexList<FirstType, OtherTypes...>().value_known_statically(i);
}
};
@@ -447,14 +448,14 @@ template <typename FirstType, typename... OtherTypes>
template <typename Tx>
struct index_statically_eq_impl {
- EIGEN_DEVICE_FUNC static constexpr bool run(DenseIndex, DenseIndex) {
+ EIGEN_DEVICE_FUNC static constexpr bool run(Index, Index) {
return false;
}
};
template <typename FirstType, typename... OtherTypes>
struct index_statically_eq_impl<IndexList<FirstType, OtherTypes...> > {
- EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) {
return IndexList<FirstType, OtherTypes...>().value_known_statically(i) &
(IndexList<FirstType, OtherTypes...>().get(i) == value);
}
@@ -462,7 +463,7 @@ struct index_statically_eq_impl<IndexList<FirstType, OtherTypes...> > {
template <typename FirstType, typename... OtherTypes>
struct index_statically_eq_impl<const IndexList<FirstType, OtherTypes...> > {
- EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) {
return IndexList<FirstType, OtherTypes...>().value_known_statically(i) &
(IndexList<FirstType, OtherTypes...>().get(i) == value);
}
@@ -471,14 +472,14 @@ struct index_statically_eq_impl<const IndexList<FirstType, OtherTypes...> > {
template <typename T>
struct index_statically_ne_impl {
- EIGEN_DEVICE_FUNC static constexpr bool run(DenseIndex, DenseIndex) {
+ EIGEN_DEVICE_FUNC static constexpr bool run(Index, Index) {
return false;
}
};
template <typename FirstType, typename... OtherTypes>
struct index_statically_ne_impl<IndexList<FirstType, OtherTypes...> > {
- EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) {
return IndexList<FirstType, OtherTypes...>().value_known_statically(i) &
(IndexList<FirstType, OtherTypes...>().get(i) != value);
}
@@ -486,7 +487,7 @@ struct index_statically_ne_impl<IndexList<FirstType, OtherTypes...> > {
template <typename FirstType, typename... OtherTypes>
struct index_statically_ne_impl<const IndexList<FirstType, OtherTypes...> > {
- EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) {
return IndexList<FirstType, OtherTypes...>().value_known_statically(i) &
(IndexList<FirstType, OtherTypes...>().get(i) != value);
}
@@ -495,14 +496,14 @@ struct index_statically_ne_impl<const IndexList<FirstType, OtherTypes...> > {
template <typename T>
struct index_statically_gt_impl {
- EIGEN_DEVICE_FUNC static constexpr bool run(DenseIndex, DenseIndex) {
+ EIGEN_DEVICE_FUNC static constexpr bool run(Index, Index) {
return false;
}
};
template <typename FirstType, typename... OtherTypes>
struct index_statically_gt_impl<IndexList<FirstType, OtherTypes...> > {
- EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) {
return IndexList<FirstType, OtherTypes...>().value_known_statically(i) &
(IndexList<FirstType, OtherTypes...>().get(i) > value);
}
@@ -510,7 +511,7 @@ struct index_statically_gt_impl<IndexList<FirstType, OtherTypes...> > {
template <typename FirstType, typename... OtherTypes>
struct index_statically_gt_impl<const IndexList<FirstType, OtherTypes...> > {
- EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) {
return IndexList<FirstType, OtherTypes...>().value_known_statically(i) &
(IndexList<FirstType, OtherTypes...>().get(i) > value);
}
@@ -520,14 +521,14 @@ struct index_statically_gt_impl<const IndexList<FirstType, OtherTypes...> > {
template <typename T>
struct index_statically_lt_impl {
- EIGEN_DEVICE_FUNC static constexpr bool run(DenseIndex, DenseIndex) {
+ EIGEN_DEVICE_FUNC static constexpr bool run(Index, Index) {
return false;
}
};
template <typename FirstType, typename... OtherTypes>
struct index_statically_lt_impl<IndexList<FirstType, OtherTypes...> > {
- EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) {
return IndexList<FirstType, OtherTypes...>().value_known_statically(i) &
(IndexList<FirstType, OtherTypes...>().get(i) < value);
}
@@ -535,7 +536,7 @@ struct index_statically_lt_impl<IndexList<FirstType, OtherTypes...> > {
template <typename FirstType, typename... OtherTypes>
struct index_statically_lt_impl<const IndexList<FirstType, OtherTypes...> > {
- EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) {
return IndexList<FirstType, OtherTypes...>().value_known_statically(i) &
(IndexList<FirstType, OtherTypes...>().get(i) < value);
}
@@ -545,14 +546,14 @@ struct index_statically_lt_impl<const IndexList<FirstType, OtherTypes...> > {
template <typename Tx>
struct index_pair_first_statically_eq_impl {
- EIGEN_DEVICE_FUNC static constexpr bool run(DenseIndex, DenseIndex) {
+ EIGEN_DEVICE_FUNC static constexpr bool run(Index, Index) {
return false;
}
};
template <typename FirstType, typename... OtherTypes>
struct index_pair_first_statically_eq_impl<IndexPairList<FirstType, OtherTypes...> > {
- EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) {
return IndexPairList<FirstType, OtherTypes...>().value_known_statically(i) &
(IndexPairList<FirstType, OtherTypes...>().operator[](i).first == value);
}
@@ -560,7 +561,7 @@ struct index_pair_first_statically_eq_impl<IndexPairList<FirstType, OtherTypes..
template <typename FirstType, typename... OtherTypes>
struct index_pair_first_statically_eq_impl<const IndexPairList<FirstType, OtherTypes...> > {
- EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) {
return IndexPairList<FirstType, OtherTypes...>().value_known_statically(i) &
(IndexPairList<FirstType, OtherTypes...>().operator[](i).first == value);
}
@@ -570,14 +571,14 @@ struct index_pair_first_statically_eq_impl<const IndexPairList<FirstType, OtherT
template <typename Tx>
struct index_pair_second_statically_eq_impl {
- EIGEN_DEVICE_FUNC static constexpr bool run(DenseIndex, DenseIndex) {
+ EIGEN_DEVICE_FUNC static constexpr bool run(Index, Index) {
return false;
}
};
template <typename FirstType, typename... OtherTypes>
struct index_pair_second_statically_eq_impl<IndexPairList<FirstType, OtherTypes...> > {
- EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) {
return IndexPairList<FirstType, OtherTypes...>().value_known_statically(i) &
(IndexPairList<FirstType, OtherTypes...>().operator[](i).second == value);
}
@@ -585,7 +586,7 @@ struct index_pair_second_statically_eq_impl<IndexPairList<FirstType, OtherTypes.
template <typename FirstType, typename... OtherTypes>
struct index_pair_second_statically_eq_impl<const IndexPairList<FirstType, OtherTypes...> > {
- EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
+ EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) {
return IndexPairList<FirstType, OtherTypes...>().value_known_statically(i) &
(IndexPairList<FirstType, OtherTypes...>().operator[](i).second == value);
}
@@ -602,7 +603,7 @@ namespace internal {
template <typename T>
struct index_known_statically_impl {
- static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex) {
+ static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const Index) {
return false;
}
};
@@ -623,42 +624,42 @@ struct indices_statically_known_to_increase_impl {
template <typename T>
struct index_statically_eq_impl {
- static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(DenseIndex, DenseIndex) {
+ static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(Index, Index) {
return false;
}
};
template <typename T>
struct index_statically_ne_impl {
- static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(DenseIndex, DenseIndex) {
+ static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(Index, Index) {
return false;
}
};
template <typename T>
struct index_statically_gt_impl {
- static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(DenseIndex, DenseIndex) {
+ static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(Index, Index) {
return false;
}
};
template <typename T>
struct index_statically_lt_impl {
- static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(DenseIndex, DenseIndex) {
+ static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(Index, Index) {
return false;
}
};
template <typename Tx>
struct index_pair_first_statically_eq_impl {
- static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(DenseIndex, DenseIndex) {
+ static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(Index, Index) {
return false;
}
};
template <typename Tx>
struct index_pair_second_statically_eq_impl {
- static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(DenseIndex, DenseIndex) {
+ static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(Index, Index) {
return false;
}
};
@@ -674,7 +675,7 @@ struct index_pair_second_statically_eq_impl {
namespace Eigen {
namespace internal {
template <typename T>
-static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_known_statically(DenseIndex i) {
+static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_known_statically(Index i) {
return index_known_statically_impl<T>::run(i);
}
@@ -689,32 +690,32 @@ static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool indices_statically_known_to_increa
}
template <typename T>
-static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_statically_eq(DenseIndex i, DenseIndex value) {
+static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_statically_eq(Index i, Index value) {
return index_statically_eq_impl<T>::run(i, value);
}
template <typename T>
-static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_statically_ne(DenseIndex i, DenseIndex value) {
+static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_statically_ne(Index i, Index value) {
return index_statically_ne_impl<T>::run(i, value);
}
template <typename T>
-static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_statically_gt(DenseIndex i, DenseIndex value) {
+static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_statically_gt(Index i, Index value) {
return index_statically_gt_impl<T>::run(i, value);
}
template <typename T>
-static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_statically_lt(DenseIndex i, DenseIndex value) {
+static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_statically_lt(Index i, Index value) {
return index_statically_lt_impl<T>::run(i, value);
}
template <typename T>
-static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_pair_first_statically_eq(DenseIndex i, DenseIndex value) {
+static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_pair_first_statically_eq(Index i, Index value) {
return index_pair_first_statically_eq_impl<T>::run(i, value);
}
template <typename T>
-static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_pair_second_statically_eq(DenseIndex i, DenseIndex value) {
+static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_pair_second_statically_eq(Index i, Index value) {
return index_pair_second_statically_eq_impl<T>::run(i, value);
}
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorInflation.h b/unsupported/Eigen/CXX11/src/Tensor/TensorInflation.h
index f391fb9ee..e28565009 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorInflation.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorInflation.h
@@ -31,6 +31,7 @@ struct traits<TensorInflationOp<Strides, XprType> > : public traits<XprType>
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
+ typedef typename XprTraits::PointerType PointerType;
};
template<typename Strides, typename XprType>
@@ -84,12 +85,13 @@ struct TensorEvaluator<const TensorInflationOp<Strides, ArgType>, Device>
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
- static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+ static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
enum {
IsAligned = /*TensorEvaluator<ArgType, Device>::IsAligned*/ false,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
BlockAccess = false,
+ PreferBlockAccess = false,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
@@ -213,7 +215,12 @@ struct TensorEvaluator<const TensorInflationOp<Strides, ArgType>, Device>
compute_cost, vectorized, PacketSize);
}
- EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
+ EIGEN_DEVICE_FUNC typename Eigen::internal::traits<XprType>::PointerType data() const { return NULL; }
+
+#ifdef EIGEN_USE_SYCL
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Strides& functor() const { return m_strides; }
+#endif
protected:
Dimensions m_dimensions;
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorIntDiv.h b/unsupported/Eigen/CXX11/src/Tensor/TensorIntDiv.h
index ef1c9c42c..b6d445c50 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorIntDiv.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorIntDiv.h
@@ -21,7 +21,7 @@ namespace Eigen {
* \brief Fast integer division by a constant.
*
* See the paper from Granlund and Montgomery for explanation.
- * (at http://dx.doi.org/10.1145/773473.178249)
+ * (at https://doi.org/10.1145/773473.178249)
*
* \sa Tensor
*/
@@ -35,7 +35,7 @@ namespace {
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
typename internal::enable_if<sizeof(T)==4,int>::type count_leading_zeros(const T val)
{
-#ifdef __CUDA_ARCH__
+#ifdef EIGEN_GPU_COMPILE_PHASE
return __clz(val);
#elif defined(__SYCL_DEVICE_ONLY__)
return cl::sycl::clz(val);
@@ -53,7 +53,7 @@ namespace {
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
typename internal::enable_if<sizeof(T)==8,int>::type count_leading_zeros(const T val)
{
-#ifdef __CUDA_ARCH__
+#ifdef EIGEN_GPU_COMPILE_PHASE
return __clzll(val);
#elif defined(__SYCL_DEVICE_ONLY__)
return cl::sycl::clz(val);
@@ -90,7 +90,7 @@ namespace {
template <typename T>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint32_t muluh(const uint32_t a, const T b) {
-#if defined(__CUDA_ARCH__)
+#if defined(EIGEN_GPU_COMPILE_PHASE)
return __umulhi(a, b);
#elif defined(__SYCL_DEVICE_ONLY__)
return cl::sycl::mul_hi(a, static_cast<uint32_t>(b));
@@ -101,7 +101,7 @@ namespace {
template <typename T>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint64_t muluh(const uint64_t a, const T b) {
-#if defined(__CUDA_ARCH__)
+#if defined(EIGEN_GPU_COMPILE_PHASE)
return __umul64hi(a, b);
#elif defined(__SYCL_DEVICE_ONLY__)
return cl::sycl::mul_hi(a, static_cast<uint64_t>(b));
@@ -124,7 +124,7 @@ namespace {
template <typename T>
struct DividerHelper<64, T> {
static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint64_t computeMultiplier(const int log_div, const T divider) {
-#if defined(__SIZEOF_INT128__) && !defined(__CUDA_ARCH__) && !defined(__SYCL_DEVICE_ONLY__)
+#if defined(__SIZEOF_INT128__) && !defined(EIGEN_GPU_COMPILE_PHASE) && !defined(__SYCL_DEVICE_ONLY__)
return static_cast<uint64_t>((static_cast<__uint128_t>(1) << (64+log_div)) / static_cast<__uint128_t>(divider) - (static_cast<__uint128_t>(1) << 64) + 1);
#else
const uint64_t shift = 1ULL << log_div;
@@ -167,7 +167,7 @@ struct TensorIntDivisor {
shift2 = log_div > 1 ? log_div-1 : 0;
}
- // Must have 0 <= numerator. On platforms that dont support the __uint128_t
+ // Must have 0 <= numerator. On platforms that don't support the __uint128_t
// type numerator should also be less than 2^32-1.
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T divide(const T numerator) const {
eigen_assert(static_cast<typename UnsignedTraits<T>::type>(numerator) < NumTraits<UnsignedType>::highest()/2);
@@ -203,7 +203,7 @@ class TensorIntDivisor<int32_t, true> {
}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE int divide(const int32_t n) const {
-#ifdef __CUDA_ARCH__
+#ifdef EIGEN_GPU_COMPILE_PHASE
return (__umulhi(magic, n) >> shift);
#elif defined(__SYCL_DEVICE_ONLY__)
return (cl::sycl::mul_hi(static_cast<uint64_t>(magic), static_cast<uint64_t>(n)) >> shift);
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h b/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h
index cd0109ef4..998757d14 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h
@@ -46,6 +46,7 @@ struct traits<TensorLayoutSwapOp<XprType> > : public traits<XprType>
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = traits<XprType>::NumDimensions;
static const int Layout = (traits<XprType>::Layout == ColMajor) ? RowMajor : ColMajor;
+ typedef typename XprTraits::PointerType PointerType;
};
template<typename XprType>
@@ -118,6 +119,8 @@ struct TensorEvaluator<const TensorLayoutSwapOp<ArgType>, Device>
enum {
IsAligned = TensorEvaluator<ArgType, Device>::IsAligned,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ BlockAccess = false,
+ PreferBlockAccess = false,
Layout = (static_cast<int>(TensorEvaluator<ArgType, Device>::Layout) == static_cast<int>(ColMajor)) ? RowMajor : ColMajor,
CoordAccess = false, // to be implemented
RawAccess = TensorEvaluator<ArgType, Device>::RawAccess
@@ -159,7 +162,7 @@ struct TensorEvaluator<const TensorLayoutSwapOp<ArgType>, Device>
return m_impl.costPerCoeff(vectorized);
}
- EIGEN_DEVICE_FUNC Scalar* data() const { return m_impl.data(); }
+ EIGEN_DEVICE_FUNC typename Eigen::internal::traits<XprType>::PointerType data() const { return m_impl.data(); }
const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
@@ -180,6 +183,8 @@ template<typename ArgType, typename Device>
enum {
IsAligned = TensorEvaluator<ArgType, Device>::IsAligned,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ BlockAccess = false,
+ PreferBlockAccess = false,
Layout = (static_cast<int>(TensorEvaluator<ArgType, Device>::Layout) == static_cast<int>(ColMajor)) ? RowMajor : ColMajor,
CoordAccess = false // to be implemented
};
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorMacros.h b/unsupported/Eigen/CXX11/src/Tensor/TensorMacros.h
index f92e39d69..c6ca396a3 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorMacros.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorMacros.h
@@ -27,7 +27,7 @@
*/
// SFINAE requires variadic templates
-#ifndef __CUDACC__
+#if !defined(EIGEN_GPUCC)
#if EIGEN_HAS_VARIADIC_TEMPLATES
// SFINAE doesn't work for gcc <= 4.7
#ifdef EIGEN_COMP_GNUC
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h b/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h
index a8e55757e..d1cc0593f 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h
@@ -150,6 +150,7 @@ template<typename PlainObjectType, int Options_, template <class> class MakePoin
EIGEN_STRONG_INLINE const Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
{
EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ eigen_assert(internal::all((Eigen::NumTraits<Index>::highest() >= otherIndices)...));
if (PlainObjectType::Options&RowMajor) {
const Index index = m_dimensions.IndexOfRowMajor(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
return m_data[index];
@@ -237,6 +238,7 @@ template<typename PlainObjectType, int Options_, template <class> class MakePoin
EIGEN_STRONG_INLINE Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
{
static_assert(sizeof...(otherIndices) + 2 == NumIndices || NumIndices == Dynamic, "Number of indices used to access a tensor coefficient must be equal to the rank of the tensor.");
+ eigen_assert(internal::all((Eigen::NumTraits<Index>::highest() >= otherIndices)...));
const std::size_t NumDims = sizeof...(otherIndices) + 2;
if (PlainObjectType::Options&RowMajor) {
const Index index = m_dimensions.IndexOfRowMajor(array<Index, NumDims>{{firstIndex, secondIndex, otherIndices...}});
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorMeta.h b/unsupported/Eigen/CXX11/src/Tensor/TensorMeta.h
index b5ef31d55..87be090f9 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorMeta.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorMeta.h
@@ -52,7 +52,7 @@ struct PacketType : internal::packet_traits<Scalar> {
};
// For CUDA packet types when using a GpuDevice
-#if defined(EIGEN_USE_GPU) && defined(__CUDACC__) && defined(EIGEN_HAS_CUDA_FP16)
+#if defined(EIGEN_USE_GPU) && defined(EIGEN_HAS_GPU_FP16)
template <>
struct PacketType<half, GpuDevice> {
typedef half2 type;
@@ -124,7 +124,9 @@ template <typename U, typename V> struct Tuple {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Tuple& operator= (const Tuple& rhs) {
+ #ifndef __SYCL_DEVICE_ONLY__
if (&rhs == this) return *this;
+ #endif
first = rhs.first;
second = rhs.second;
return *this;
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h b/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h
index 6ddd2ca18..d19aba3b3 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h
@@ -31,6 +31,7 @@ struct traits<TensorReshapingOp<NewDimensions, XprType> > : public traits<XprTyp
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = array_size<NewDimensions>::value;
static const int Layout = XprTraits::Layout;
+ typedef typename XprTraits::PointerType PointerType;
};
template<typename NewDimensions, typename XprType>
@@ -101,26 +102,69 @@ struct TensorEvaluator<const TensorReshapingOp<NewDimensions, ArgType>, Device>
typedef TensorReshapingOp<NewDimensions, ArgType> XprType;
typedef NewDimensions Dimensions;
+ typedef typename XprType::Index Index;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+
+ static const int NumOutputDims = internal::array_size<Dimensions>::value;
+ static const int NumInputDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
+
enum {
- IsAligned = TensorEvaluator<ArgType, Device>::IsAligned,
- PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
- Layout = TensorEvaluator<ArgType, Device>::Layout,
- CoordAccess = false, // to be implemented
- RawAccess = TensorEvaluator<ArgType, Device>::RawAccess
+ IsAligned = TensorEvaluator<ArgType, Device>::IsAligned,
+ PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ // TODO(andydavis, wuke) Enable BlockAccess for the general case when the
+ // performance issue with block-based reshape is resolved.
+ BlockAccess = TensorEvaluator<ArgType, Device>::BlockAccess &&
+ TensorEvaluator<ArgType, Device>::RawAccess &&
+ NumInputDims > 0 && NumOutputDims > 0,
+ PreferBlockAccess = true,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ CoordAccess = false, // to be implemented
+ RawAccess = TensorEvaluator<ArgType, Device>::RawAccess
};
+ typedef typename internal::remove_const<Scalar>::type ScalarNoConst;
+
+ typedef internal::TensorBlock<ScalarNoConst, Index, NumInputDims, Layout>
+ InputTensorBlock;
+ typedef internal::TensorBlock<ScalarNoConst, Index, NumOutputDims, Layout>
+ OutputTensorBlock;
+ typedef internal::TensorBlockReader<ScalarNoConst, Index, NumOutputDims,
+ Layout>
+ OutputTensorBlockReader;
+
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_impl(op.expression(), device), m_dimensions(op.dimensions())
{
// The total size of the reshaped tensor must be equal to the total size
// of the input tensor.
eigen_assert(internal::array_prod(m_impl.dimensions()) == internal::array_prod(op.dimensions()));
- }
- typedef typename XprType::Index Index;
- typedef typename XprType::Scalar Scalar;
- typedef typename XprType::CoeffReturnType CoeffReturnType;
- typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ if (BlockAccess) {
+ const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims =
+ m_impl.dimensions();
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ m_outputStrides[0] = 1;
+ for (int i = 1; i < NumOutputDims; ++i) {
+ m_outputStrides[i] = m_outputStrides[i - 1] * m_dimensions[i - 1];
+ }
+ m_inputStrides[0] = 1;
+ for (int i = 1; i < NumInputDims; ++i) {
+ m_inputStrides[i] = m_inputStrides[i - 1] * input_dims[i - 1];
+ }
+ } else {
+ m_outputStrides[NumOutputDims - 1] = 1;
+ for (int i = NumOutputDims - 2; i >= 0; --i) {
+ m_outputStrides[i] = m_outputStrides[i + 1] * m_dimensions[i + 1];
+ }
+ m_inputStrides[NumInputDims - 1] = 1;
+ for (int i = NumInputDims - 2; i >= 0; --i) {
+ m_inputStrides[i] = m_inputStrides[i + 1] * input_dims[i + 1];
+ }
+ }
+ }
+ }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
@@ -146,13 +190,149 @@ struct TensorEvaluator<const TensorReshapingOp<NewDimensions, ArgType>, Device>
return m_impl.costPerCoeff(vectorized);
}
- EIGEN_DEVICE_FUNC Scalar* data() const { return const_cast<Scalar*>(m_impl.data()); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void getResourceRequirements(
+ std::vector<internal::TensorOpResourceRequirements>* resources) const {
+ m_impl.getResourceRequirements(resources);
+ }
+
+ // TODO(andydavis) Reduce the overhead of this function.
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void block(
+ OutputTensorBlock* output_block) const {
+ if (m_impl.data() != NULL) {
+ OutputTensorBlockReader::Run(output_block, m_impl.data());
+ return;
+ }
+
+ // Calculate output block unit-stride inner dimension length.
+ const DSizes<Index, NumOutputDims>& output_block_sizes =
+ output_block->block_sizes();
+ Index output_inner_dim_size = 1;
+ Index output_outer_dim_start = NumOutputDims;
+ for (Index i = 0; i < NumOutputDims; ++i) {
+ const Index dim = static_cast<int>(Layout) == static_cast<int>(ColMajor)
+ ? i : NumOutputDims - i - 1;
+ output_inner_dim_size *= output_block_sizes[dim];
+ if (output_block_sizes[dim] < m_dimensions[dim]) {
+ output_outer_dim_start = i + 1;
+ break;
+ }
+ }
+
+ // Initialize output block iterator state.
+ struct BlockIteratorState {
+ Index stride;
+ Index span;
+ Index size;
+ Index count;
+ };
+ array<BlockIteratorState, NumOutputDims> block_iter_state;
+
+ for (Index i = 0; i < NumOutputDims; ++i) {
+ const Index dim = static_cast<int>(Layout) == static_cast<int>(ColMajor)
+ ? i : NumOutputDims - i - 1;
+ block_iter_state[i].size = output_block_sizes[dim];
+ block_iter_state[i].stride = m_outputStrides[dim];
+ block_iter_state[i].span =
+ block_iter_state[i].stride * (block_iter_state[i].size - 1);
+ block_iter_state[i].count = 0;
+ }
+
+ const Index output_outer_dim_size = output_block_sizes.TotalSize() /
+ output_inner_dim_size;
+ const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims =
+ m_impl.dimensions();
+
+ Index index = output_block->first_coeff_index();
+ for (Index outer_idx = 0; outer_idx < output_outer_dim_size; ++outer_idx) {
+ Index inner_idx = 0;
+ while (inner_idx < output_inner_dim_size) {
+ // Calculate input coords based on 'index'.
+ array<Index, NumInputDims> input_coords;
+ Index idx = index;
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int i = NumInputDims - 1; i > 0; --i) {
+ input_coords[i] = idx / m_inputStrides[i];
+ idx -= input_coords[i] * m_inputStrides[i];
+ }
+ input_coords[0] = idx;
+ } else {
+ for (int i = 0; i < NumInputDims - 1; ++i) {
+ input_coords[i] = idx / m_inputStrides[i];
+ idx -= input_coords[i] * m_inputStrides[i];
+ }
+ input_coords[NumInputDims - 1] = idx;
+ }
+
+ // Calculate target input block shape, using at most
+ // 'output_inner_dim_size' coefficients along the input block's inner
+ // dimensions.
+ DSizes<Index, NumInputDims> input_block_sizes;
+ Index num_to_allocate = output_inner_dim_size - inner_idx;
+ for (Index i = 0; i < NumInputDims; ++i) {
+ const Index dim =
+ static_cast<int>(Layout) == static_cast<int>(ColMajor)
+ ? i : NumInputDims - i - 1;
+ input_block_sizes[dim] = numext::mini(
+ num_to_allocate, (static_cast<Index>(input_dims[dim]) -
+ input_coords[dim]));
+ if (input_coords[dim] == 0) {
+ num_to_allocate /= input_block_sizes[dim];
+ } else {
+ num_to_allocate = 1;
+ }
+ }
+
+ // Calculate input block strides.
+ DSizes<Index, NumInputDims> input_block_strides;
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ input_block_strides[0] = 1;
+ for (int i = 1; i < NumInputDims; ++i) {
+ input_block_strides[i] = input_block_strides[i - 1] *
+ input_block_sizes[i - 1];
+ }
+ } else {
+ input_block_strides[NumInputDims - 1] = 1;
+ for (int i = NumInputDims - 2; i >= 0; --i) {
+ input_block_strides[i] = input_block_strides[i + 1] *
+ input_block_sizes[i + 1];
+ }
+ }
+
+ // Instantiate and read input block from input tensor.
+ InputTensorBlock input_block(index, input_block_sizes,
+ input_block_strides, m_inputStrides,
+ output_block->data() + outer_idx *
+ output_inner_dim_size + inner_idx);
+
+ m_impl.block(&input_block);
+
+ const Index input_block_total_size = input_block_sizes.TotalSize();
+ index += input_block_total_size;
+ inner_idx += input_block_total_size;
+ }
+ eigen_assert(inner_idx == output_inner_dim_size);
+ index -= output_inner_dim_size;
+ // Update index.
+ for (Index i = output_outer_dim_start; i < NumOutputDims; ++i) {
+ if (++block_iter_state[i].count < block_iter_state[i].size) {
+ index += block_iter_state[i].stride;
+ break;
+ }
+ block_iter_state[i].count = 0;
+ index -= block_iter_state[i].span;
+ }
+ }
+ }
+
+ EIGEN_DEVICE_FUNC typename Eigen::internal::traits<XprType>::PointerType data() const { return const_cast<Scalar*>(m_impl.data()); }
EIGEN_DEVICE_FUNC const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
protected:
TensorEvaluator<ArgType, Device> m_impl;
NewDimensions m_dimensions;
+ DSizes<Index, NumOutputDims> m_outputStrides;
+ DSizes<Index, NumInputDims> m_inputStrides;
};
@@ -169,6 +349,8 @@ template<typename NewDimensions, typename ArgType, typename Device>
enum {
IsAligned = TensorEvaluator<ArgType, Device>::IsAligned,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ BlockAccess = false,
+ PreferBlockAccess = false,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = TensorEvaluator<ArgType, Device>::RawAccess
@@ -214,6 +396,7 @@ struct traits<TensorSlicingOp<StartIndices, Sizes, XprType> > : public traits<Xp
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = array_size<StartIndices>::value;
static const int Layout = XprTraits::Layout;
+ typedef typename XprTraits::PointerType PointerType;
};
template<typename StartIndices, typename Sizes, typename XprType>
@@ -318,23 +501,46 @@ struct TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, ArgType>, Devi
typedef TensorSlicingOp<StartIndices, Sizes, ArgType> XprType;
static const int NumDims = internal::array_size<Sizes>::value;
+ typedef typename XprType::Index Index;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ typedef Sizes Dimensions;
+
enum {
// Alignment can't be guaranteed at compile time since it depends on the
// slice offsets and sizes.
- IsAligned = /*TensorEvaluator<ArgType, Device>::IsAligned*/false,
- PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
- Layout = TensorEvaluator<ArgType, Device>::Layout,
- CoordAccess = false,
- RawAccess = false
+ IsAligned = false,
+ PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ BlockAccess = TensorEvaluator<ArgType, Device>::BlockAccess,
+ PreferBlockAccess = true,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ CoordAccess = false,
+ RawAccess = false
};
+ typedef typename internal::remove_const<Scalar>::type ScalarNoConst;
+
+ typedef internal::TensorBlock<ScalarNoConst, Index, NumDims, Layout> TensorBlock;
+ typedef typename TensorBlock::Dimensions TensorBlockDimensions;
+
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_impl(op.expression(), device), m_device(device), m_dimensions(op.sizes()), m_offsets(op.startIndices())
{
- for (std::size_t i = 0; i < internal::array_size<Dimensions>::value; ++i) {
+ for (Index i = 0; i < internal::array_size<Dimensions>::value; ++i) {
eigen_assert(m_impl.dimensions()[i] >= op.sizes()[i] + op.startIndices()[i]);
}
+ m_is_identity = true;
+ for (int i = 0; i < internal::array_size<Dimensions>::value; ++i) {
+ eigen_assert(m_impl.dimensions()[i] >=
+ op.sizes()[i] + op.startIndices()[i]);
+ if (m_impl.dimensions()[i] != op.sizes()[i] ||
+ op.startIndices()[i] != 0) {
+ m_is_identity = false;
+ }
+ }
+
const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
const Sizes& output_dims = op.sizes();
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
@@ -364,12 +570,6 @@ struct TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, ArgType>, Devi
}
}
- typedef typename XprType::Index Index;
- typedef typename XprType::Scalar Scalar;
- typedef typename XprType::CoeffReturnType CoeffReturnType;
- typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
- typedef Sizes Dimensions;
-
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
@@ -396,7 +596,7 @@ struct TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, ArgType>, Devi
const MemcpyTriggerForSlicing<Index, Device> trigger(m_device);
if (trigger(contiguous_values)) {
Scalar* src = (Scalar*)m_impl.data();
- for (int i = 0; i < internal::array_prod(dimensions()); i += contiguous_values) {
+ for (Index i = 0; i < internal::array_prod(dimensions()); i += contiguous_values) {
Index offset = srcCoeff(i);
m_device.memcpy((void*)(data+i), src+offset, contiguous_values * sizeof(Scalar));
}
@@ -412,16 +612,24 @@ struct TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, ArgType>, Devi
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
{
- return m_impl.coeff(srcCoeff(index));
+ if (m_is_identity) {
+ return m_impl.coeff(index);
+ } else {
+ return m_impl.coeff(srcCoeff(index));
+ }
}
template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
{
- const int packetSize = internal::unpacket_traits<PacketReturnType>::size;
+ const int packetSize = PacketType<CoeffReturnType, Device>::size;
EIGEN_STATIC_ASSERT((packetSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
eigen_assert(index+packetSize-1 < internal::array_prod(dimensions()));
+ if (m_is_identity) {
+ return m_impl.template packet<LoadMode>(index);
+ }
+
Index inputIndices[] = {0, 0};
Index indices[] = {index, index + packetSize - 1};
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
@@ -464,12 +672,30 @@ struct TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, ArgType>, Devi
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
- return m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, NumDims);
+ return m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, m_is_identity ? 1 : NumDims);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void getResourceRequirements(
+ std::vector<internal::TensorOpResourceRequirements>* resources) const {
+ Eigen::Index block_total_size_max = numext::maxi<Eigen::Index>(
+ 1, m_device.lastLevelCacheSize() / sizeof(Scalar));
+ resources->push_back(internal::TensorOpResourceRequirements(
+ internal::kSkewedInnerDims, block_total_size_max));
+ m_impl.getResourceRequirements(resources);
}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void block(
+ TensorBlock* output_block) const {
+ TensorBlock input_block(srcCoeff(output_block->first_coeff_index()),
+ output_block->block_sizes(),
+ output_block->block_strides(),
+ TensorBlockDimensions(m_inputStrides),
+ output_block->data());
+ m_impl.block(&input_block);
+ }
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar* data() const {
- Scalar* result = m_impl.data();
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Eigen::internal::traits<XprType>::PointerType data() const {
+ Scalar* result = const_cast<Scalar*>(m_impl.data());
if (result) {
Index offset = 0;
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
@@ -539,6 +765,7 @@ struct TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, ArgType>, Devi
TensorEvaluator<ArgType, Device> m_impl;
const Device& m_device;
Dimensions m_dimensions;
+ bool m_is_identity;
const StartIndices m_offsets;
};
@@ -552,33 +779,49 @@ struct TensorEvaluator<TensorSlicingOp<StartIndices, Sizes, ArgType>, Device>
typedef TensorSlicingOp<StartIndices, Sizes, ArgType> XprType;
static const int NumDims = internal::array_size<Sizes>::value;
+ typedef typename XprType::Index Index;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ typedef Sizes Dimensions;
+
enum {
- IsAligned = /*TensorEvaluator<ArgType, Device>::IsAligned*/false,
- PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
- Layout = TensorEvaluator<ArgType, Device>::Layout,
- CoordAccess = false,
- RawAccess = false
+ IsAligned = false,
+ PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ BlockAccess = TensorEvaluator<ArgType, Device>::BlockAccess,
+ PreferBlockAccess = true,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ CoordAccess = false,
+ RawAccess = (NumDims == 1) & TensorEvaluator<ArgType, Device>::RawAccess
};
+ typedef typename internal::remove_const<Scalar>::type ScalarNoConst;
+
+ typedef internal::TensorBlock<ScalarNoConst, Index, NumDims, Layout> TensorBlock;
+ typedef typename TensorBlock::Dimensions TensorBlockDimensions;
+
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: Base(op, device)
{ }
- typedef typename XprType::Index Index;
- typedef typename XprType::Scalar Scalar;
- typedef typename XprType::CoeffReturnType CoeffReturnType;
- typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
- typedef Sizes Dimensions;
-
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index)
{
- return this->m_impl.coeffRef(this->srcCoeff(index));
+ if (this->m_is_identity) {
+ return this->m_impl.coeffRef(index);
+ } else {
+ return this->m_impl.coeffRef(this->srcCoeff(index));
+ }
}
template <int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void writePacket(Index index, const PacketReturnType& x)
{
- const int packetSize = internal::unpacket_traits<PacketReturnType>::size;
+ if (this->m_is_identity) {
+ this->m_impl.template writePacket<StoreMode>(index, x);
+ return;
+ }
+
+ const int packetSize = PacketType<CoeffReturnType, Device>::size;
Index inputIndices[] = {0, 0};
Index indices[] = {index, index + packetSize - 1};
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
@@ -617,9 +860,15 @@ struct TensorEvaluator<TensorSlicingOp<StartIndices, Sizes, ArgType>, Device>
}
}
}
-};
-
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writeBlock(
+ const TensorBlock& block) {
+ this->m_impl.writeBlock(TensorBlock(
+ this->srcCoeff(block.first_coeff_index()), block.block_sizes(),
+ block.block_strides(), TensorBlockDimensions(this->m_inputStrides),
+ const_cast<ScalarNoConst*>(block.data())));
+ }
+};
namespace internal {
template<typename StartIndices, typename StopIndices, typename Strides, typename XprType>
@@ -633,6 +882,7 @@ struct traits<TensorStridingSlicingOp<StartIndices, StopIndices, Strides, XprTyp
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = array_size<StartIndices>::value;
static const int Layout = XprTraits::Layout;
+ typedef typename XprTraits::PointerType PointerType;
};
template<typename StartIndices, typename StopIndices, typename Strides, typename XprType>
@@ -713,7 +963,6 @@ struct TensorEvaluator<const TensorStridingSlicingOp<StartIndices, StopIndices,
static const int NumDims = internal::array_size<Strides>::value;
typedef typename XprType::Index Index;
typedef typename XprType::Scalar Scalar;
- typedef typename internal::remove_const<Scalar>::type ScalarNonConst;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
typedef Strides Dimensions;
@@ -724,6 +973,7 @@ struct TensorEvaluator<const TensorStridingSlicingOp<StartIndices, StopIndices,
IsAligned = false,
PacketAccess = false,
BlockAccess = false,
+ PreferBlockAccess = false,
Layout = TensorEvaluator<ArgType, Device>::Layout,
RawAccess = false
};
@@ -733,7 +983,13 @@ struct TensorEvaluator<const TensorStridingSlicingOp<StartIndices, StopIndices,
{
// Handle degenerate intervals by gracefully clamping and allowing m_dimensions to be zero
DSizes<Index,NumDims> startIndicesClamped, stopIndicesClamped;
- for (size_t i = 0; i < internal::array_size<Dimensions>::value; ++i) {
+ m_is_identity = true;
+ for (Index i = 0; i < internal::array_size<Dimensions>::value; ++i) {
+ if (m_strides[i] != 1 || op.startIndices()[i] != 0 ||
+ op.stopIndices()[i] != (m_impl.dimensions()[i] - 1)) {
+ m_is_identity = false;
+ }
+
eigen_assert(m_strides[i] != 0 && "0 stride is invalid");
if(m_strides[i]>0){
startIndicesClamped[i] = clamp(op.startIndices()[i], 0, m_impl.dimensions()[i]);
@@ -797,9 +1053,6 @@ struct TensorEvaluator<const TensorStridingSlicingOp<StartIndices, StopIndices,
m_fastOutputStrides[i] = internal::TensorIntDivisor<Index>(degenerate ? 1 : m_outputStrides[i]);
}
}
- m_block_total_size_max = numext::maxi(static_cast<std::size_t>(1),
- device.lastLevelCacheSize() /
- sizeof(Scalar));
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
@@ -816,14 +1069,18 @@ struct TensorEvaluator<const TensorStridingSlicingOp<StartIndices, StopIndices,
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
{
- return m_impl.coeff(srcCoeff(index));
+ if (m_is_identity) {
+ return m_impl.coeff(index);
+ } else {
+ return m_impl.coeff(srcCoeff(index));
+ }
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
- return m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, NumDims);
+ return m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, m_is_identity ? 1 : NumDims);
}
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar* data() const {
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Eigen::internal::traits<XprType>::PointerType data() const {
return NULL;
}
@@ -856,7 +1113,7 @@ struct TensorEvaluator<const TensorStridingSlicingOp<StartIndices, StopIndices,
return inputIndex;
}
- static EIGEN_STRONG_INLINE Index clamp(Index value, Index min, Index max) {
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index clamp(Index value, Index min, Index max) {
#ifndef __SYCL_DEVICE_ONLY__
return numext::maxi(min, numext::mini(max,value));
#else
@@ -867,13 +1124,13 @@ struct TensorEvaluator<const TensorStridingSlicingOp<StartIndices, StopIndices,
array<Index, NumDims> m_outputStrides;
array<internal::TensorIntDivisor<Index>, NumDims> m_fastOutputStrides;
array<Index, NumDims> m_inputStrides;
+ bool m_is_identity;
TensorEvaluator<ArgType, Device> m_impl;
const Device& m_device;
DSizes<Index, NumDims> m_startIndices; // clamped startIndices
DSizes<Index, NumDims> m_dimensions;
DSizes<Index, NumDims> m_offsets; // offset in a flattened shape
const Strides m_strides;
- std::size_t m_block_total_size_max;
//use by sycl
const StartIndices m_exprStartIndices;
//use by sycl
@@ -893,6 +1150,7 @@ struct TensorEvaluator<TensorStridingSlicingOp<StartIndices, StopIndices, Stride
IsAligned = false,
PacketAccess = false,
BlockAccess = false,
+ PreferBlockAccess = false,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = TensorEvaluator<ArgType, Device>::CoordAccess,
RawAccess = false
@@ -904,14 +1162,17 @@ struct TensorEvaluator<TensorStridingSlicingOp<StartIndices, StopIndices, Stride
typedef typename XprType::Index Index;
typedef typename XprType::Scalar Scalar;
- typedef typename internal::remove_const<Scalar>::type ScalarNonConst;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
typedef Strides Dimensions;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index)
{
- return this->m_impl.coeffRef(this->srcCoeff(index));
+ if (this->m_is_identity) {
+ return this->m_impl.coeffRef(index);
+ } else {
+ return this->m_impl.coeffRef(this->srcCoeff(index));
+ }
}
};
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h b/unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h
index a8e255246..59c1704ed 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h
@@ -31,6 +31,7 @@ struct traits<TensorPaddingOp<PaddingDimensions, XprType> > : public traits<XprT
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
+ typedef typename XprTraits::PointerType PointerType;
};
template<typename PaddingDimensions, typename XprType>
@@ -90,11 +91,13 @@ struct TensorEvaluator<const TensorPaddingOp<PaddingDimensions, ArgType>, Device
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
- static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+ static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
enum {
IsAligned = true,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ BlockAccess = false,
+ PreferBlockAccess = false,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = true,
RawAccess = false
@@ -198,7 +201,7 @@ struct TensorEvaluator<const TensorPaddingOp<PaddingDimensions, ArgType>, Device
return cost;
}
- EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
+ EIGEN_DEVICE_FUNC EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Eigen::internal::traits<XprType>::PointerType data() const { return NULL; }
/// used by sycl
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const PaddingDimensions& padding() const { return m_padding; }
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorPatch.h b/unsupported/Eigen/CXX11/src/Tensor/TensorPatch.h
index 886a254f6..4292fe0c2 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorPatch.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorPatch.h
@@ -31,6 +31,7 @@ struct traits<TensorPatchOp<PatchDim, XprType> > : public traits<XprType>
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions + 1;
static const int Layout = XprTraits::Layout;
+ typedef typename XprTraits::PointerType PointerType;
};
template<typename PatchDim, typename XprType>
@@ -87,12 +88,14 @@ struct TensorEvaluator<const TensorPatchOp<PatchDim, ArgType>, Device>
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
- static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+ static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
enum {
IsAligned = false,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ BlockAccess = false,
+ PreferBlockAccess = false,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false,
RawAccess = false
@@ -100,6 +103,9 @@ struct TensorEvaluator<const TensorPatchOp<PatchDim, ArgType>, Device>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_impl(op.expression(), device)
+#ifdef EIGEN_USE_SYCL
+ , m_patch_dims(op.patch_dims())
+#endif
{
Index num_patches = 1;
const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
@@ -253,7 +259,12 @@ struct TensorEvaluator<const TensorPatchOp<PatchDim, ArgType>, Device>
TensorOpCost(0, 0, compute_cost, vectorized, PacketSize);
}
- EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
+ EIGEN_DEVICE_FUNC typename Eigen::internal::traits<XprType>::PointerType data() const { return NULL; }
+
+#ifdef EIGEN_USE_SYCL
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const PatchDim& functor() const { return m_patch_dims; }
+#endif
protected:
Dimensions m_dimensions;
@@ -262,6 +273,10 @@ struct TensorEvaluator<const TensorPatchOp<PatchDim, ArgType>, Device>
array<Index, NumDims-1> m_patchStrides;
TensorEvaluator<ArgType, Device> m_impl;
+
+#ifdef EIGEN_USE_SYCL
+ const PatchDim m_patch_dims;
+#endif
};
} // end namespace Eigen
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorRandom.h b/unsupported/Eigen/CXX11/src/Tensor/TensorRandom.h
index 1655a813e..787cbd031 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorRandom.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorRandom.h
@@ -16,10 +16,10 @@ namespace internal {
namespace {
EIGEN_DEVICE_FUNC uint64_t get_random_seed() {
-#ifdef __CUDA_ARCH__
+#if defined(EIGEN_GPU_COMPILE_PHASE)
// We don't support 3d kernels since we currently only use 1 and
// 2d kernels.
- assert(threadIdx.z == 0);
+ gpu_assert(threadIdx.z == 0);
return clock64() +
blockIdx.x * blockDim.x + threadIdx.x +
gridDim.x * blockDim.x * (blockIdx.y * blockDim.y + threadIdx.y);
@@ -55,11 +55,11 @@ EIGEN_DEVICE_FUNC uint64_t get_random_seed() {
#endif
}
-static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE unsigned PCG_XSH_RS_generator(uint64_t* state) {
+static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE unsigned PCG_XSH_RS_generator(uint64_t* state, uint64_t stream) {
// TODO: Unify with the implementation in the non blocking thread pool.
uint64_t current = *state;
// Update the internal state
- *state = current * 6364136223846793005ULL + 0xda3e39cb94b95bdbULL;
+ *state = current * 6364136223846793005ULL + (stream << 1 | 1);
// Generate the random output (using the PCG-XSH-RS scheme)
return static_cast<unsigned>((current ^ (current >> 22)) >> (22 + (current >> 61)));
}
@@ -73,17 +73,17 @@ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE uint64_t PCG_XSH_RS_state(uint64_t
template <typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
-T RandomToTypeUniform(uint64_t* state) {
- unsigned rnd = PCG_XSH_RS_generator(state);
+T RandomToTypeUniform(uint64_t* state, uint64_t stream) {
+ unsigned rnd = PCG_XSH_RS_generator(state, stream);
return static_cast<T>(rnd);
}
template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
-Eigen::half RandomToTypeUniform<Eigen::half>(uint64_t* state) {
+Eigen::half RandomToTypeUniform<Eigen::half>(uint64_t* state, uint64_t stream) {
Eigen::half result;
// Generate 10 random bits for the mantissa
- unsigned rnd = PCG_XSH_RS_generator(state);
+ unsigned rnd = PCG_XSH_RS_generator(state, stream);
result.x = static_cast<uint16_t>(rnd & 0x3ffu);
// Set the exponent
result.x |= (static_cast<uint16_t>(15) << 10);
@@ -93,14 +93,14 @@ Eigen::half RandomToTypeUniform<Eigen::half>(uint64_t* state) {
template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
-float RandomToTypeUniform<float>(uint64_t* state) {
+float RandomToTypeUniform<float>(uint64_t* state, uint64_t stream) {
typedef union {
uint32_t raw;
float fp;
} internal;
internal result;
// Generate 23 random bits for the mantissa mantissa
- const unsigned rnd = PCG_XSH_RS_generator(state);
+ const unsigned rnd = PCG_XSH_RS_generator(state, stream);
result.raw = rnd & 0x7fffffu;
// Set the exponent
result.raw |= (static_cast<uint32_t>(127) << 23);
@@ -109,7 +109,7 @@ float RandomToTypeUniform<float>(uint64_t* state) {
}
template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
-double RandomToTypeUniform<double>(uint64_t* state) {
+double RandomToTypeUniform<double>(uint64_t* state, uint64_t stream) {
typedef union {
uint64_t raw;
double dp;
@@ -118,9 +118,9 @@ double RandomToTypeUniform<double>(uint64_t* state) {
result.raw = 0;
// Generate 52 random bits for the mantissa
// First generate the upper 20 bits
- unsigned rnd1 = PCG_XSH_RS_generator(state) & 0xfffffu;
+ unsigned rnd1 = PCG_XSH_RS_generator(state, stream) & 0xfffffu;
// The generate the lower 32 bits
- unsigned rnd2 = PCG_XSH_RS_generator(state);
+ unsigned rnd2 = PCG_XSH_RS_generator(state, stream);
result.raw = (static_cast<uint64_t>(rnd1) << 32) | rnd2;
// Set the exponent
result.raw |= (static_cast<uint64_t>(1023) << 52);
@@ -129,14 +129,14 @@ double RandomToTypeUniform<double>(uint64_t* state) {
}
template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
-std::complex<float> RandomToTypeUniform<std::complex<float> >(uint64_t* state) {
- return std::complex<float>(RandomToTypeUniform<float>(state),
- RandomToTypeUniform<float>(state));
+std::complex<float> RandomToTypeUniform<std::complex<float> >(uint64_t* state, uint64_t stream) {
+ return std::complex<float>(RandomToTypeUniform<float>(state, stream),
+ RandomToTypeUniform<float>(state, stream));
}
template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
-std::complex<double> RandomToTypeUniform<std::complex<double> >(uint64_t* state) {
- return std::complex<double>(RandomToTypeUniform<double>(state),
- RandomToTypeUniform<double>(state));
+std::complex<double> RandomToTypeUniform<std::complex<double> >(uint64_t* state, uint64_t stream) {
+ return std::complex<double>(RandomToTypeUniform<double>(state, stream),
+ RandomToTypeUniform<double>(state, stream));
}
template <typename T> class UniformRandomGenerator {
@@ -155,9 +155,7 @@ template <typename T> class UniformRandomGenerator {
template<typename Index> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
T operator()(Index i) const {
- uint64_t local_state = m_state + i;
- T result = RandomToTypeUniform<T>(&local_state);
- m_state = local_state;
+ T result = RandomToTypeUniform<T>(&m_state, i);
return result;
}
@@ -165,11 +163,9 @@ template <typename T> class UniformRandomGenerator {
Packet packetOp(Index i) const {
const int packetSize = internal::unpacket_traits<Packet>::size;
EIGEN_ALIGN_MAX T values[packetSize];
- uint64_t local_state = m_state + i;
for (int j = 0; j < packetSize; ++j) {
- values[j] = RandomToTypeUniform<T>(&local_state);
+ values[j] = RandomToTypeUniform<T>(&m_state, i);
}
- m_state = local_state;
return internal::pload<Packet>(values);
}
@@ -190,14 +186,14 @@ struct functor_traits<UniformRandomGenerator<Scalar> > {
template <typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
-T RandomToTypeNormal(uint64_t* state) {
+T RandomToTypeNormal(uint64_t* state, uint64_t stream) {
// Use the ratio of uniform method to generate numbers following a normal
// distribution. See for example Numerical Recipes chapter 7.3.9 for the
// details.
T u, v, q;
do {
- u = RandomToTypeUniform<T>(state);
- v = T(1.7156) * (RandomToTypeUniform<T>(state) - T(0.5));
+ u = RandomToTypeUniform<T>(state, stream);
+ v = T(1.7156) * (RandomToTypeUniform<T>(state, stream) - T(0.5));
const T x = u - T(0.449871);
const T y = numext::abs(v) + T(0.386595);
q = x*x + y * (T(0.196)*y - T(0.25472)*x);
@@ -208,14 +204,14 @@ T RandomToTypeNormal(uint64_t* state) {
}
template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
-std::complex<float> RandomToTypeNormal<std::complex<float> >(uint64_t* state) {
- return std::complex<float>(RandomToTypeNormal<float>(state),
- RandomToTypeNormal<float>(state));
+std::complex<float> RandomToTypeNormal<std::complex<float> >(uint64_t* state, uint64_t stream) {
+ return std::complex<float>(RandomToTypeNormal<float>(state, stream),
+ RandomToTypeNormal<float>(state, stream));
}
template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
-std::complex<double> RandomToTypeNormal<std::complex<double> >(uint64_t* state) {
- return std::complex<double>(RandomToTypeNormal<double>(state),
- RandomToTypeNormal<double>(state));
+std::complex<double> RandomToTypeNormal<std::complex<double> >(uint64_t* state, uint64_t stream) {
+ return std::complex<double>(RandomToTypeNormal<double>(state, stream),
+ RandomToTypeNormal<double>(state, stream));
}
@@ -234,9 +230,7 @@ template <typename T> class NormalRandomGenerator {
template<typename Index> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
T operator()(Index i) const {
- uint64_t local_state = m_state + i;
- T result = RandomToTypeNormal<T>(&local_state);
- m_state = local_state;
+ T result = RandomToTypeNormal<T>(&m_state, i);
return result;
}
@@ -244,11 +238,9 @@ template <typename T> class NormalRandomGenerator {
Packet packetOp(Index i) const {
const int packetSize = internal::unpacket_traits<Packet>::size;
EIGEN_ALIGN_MAX T values[packetSize];
- uint64_t local_state = m_state + i;
for (int j = 0; j < packetSize; ++j) {
- values[j] = RandomToTypeNormal<T>(&local_state);
+ values[j] = RandomToTypeNormal<T>(&m_state, i);
}
- m_state = local_state;
return internal::pload<Packet>(values);
}
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h
index c841786b8..949764f3a 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h
@@ -11,6 +11,17 @@
#ifndef EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_H
#define EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_H
+// clang is incompatible with the CUDA syntax wrt making a kernel a class friend,
+// so we'll use a macro to make clang happy.
+#ifndef KERNEL_FRIEND
+#if defined(__clang__) && (defined(__CUDA__) || defined(__HIP__))
+#define KERNEL_FRIEND friend __global__
+#else
+#define KERNEL_FRIEND friend
+#endif
+#endif
+
+
namespace Eigen {
@@ -33,6 +44,7 @@ namespace internal {
typedef typename XprType::Nested Nested;
static const int NumDimensions = XprTraits::NumDimensions - array_size<Dims>::value;
static const int Layout = XprTraits::Layout;
+ typedef typename XprTraits::PointerType PointerType;
template <class T> struct MakePointer {
// Intermediate typedef to workaround MSVC issue.
@@ -153,7 +165,9 @@ struct GenericDimReducer<-1, Self, Op> {
}
};
-template <typename Self, typename Op, bool Vectorizable = (Self::InputPacketAccess & Op::PacketAccess)>
+template <typename Self, typename Op, bool Vectorizable = (Self::InputPacketAccess && Self::ReducerTraits::PacketAccess),
+ bool UseTreeReduction = (!Self::ReducerTraits::IsStateful &&
+ !Self::ReducerTraits::IsExactlyAssociative)>
struct InnerMostDimReducer {
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Self::CoeffReturnType reduce(const Self& self, typename Self::Index firstIndex, typename Self::Index numValuesToReduce, Op& reducer) {
typename Self::CoeffReturnType accum = reducer.initialize();
@@ -165,23 +179,86 @@ struct InnerMostDimReducer {
};
template <typename Self, typename Op>
-struct InnerMostDimReducer<Self, Op, true> {
+struct InnerMostDimReducer<Self, Op, true, false> {
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Self::CoeffReturnType reduce(const Self& self, typename Self::Index firstIndex, typename Self::Index numValuesToReduce, Op& reducer) {
- const int packetSize = internal::unpacket_traits<typename Self::PacketReturnType>::size;
+ const typename Self::Index packetSize = internal::unpacket_traits<typename Self::PacketReturnType>::size;
const typename Self::Index VectorizedSize = (numValuesToReduce / packetSize) * packetSize;
- typename Self::PacketReturnType p = reducer.template initializePacket<typename Self::PacketReturnType>();
+ typename Self::PacketReturnType paccum = reducer.template initializePacket<typename Self::PacketReturnType>();
for (typename Self::Index j = 0; j < VectorizedSize; j += packetSize) {
- reducer.reducePacket(self.m_impl.template packet<Unaligned>(firstIndex + j), &p);
+ reducer.reducePacket(self.m_impl.template packet<Unaligned>(firstIndex + j), &paccum);
}
typename Self::CoeffReturnType accum = reducer.initialize();
for (typename Self::Index j = VectorizedSize; j < numValuesToReduce; ++j) {
reducer.reduce(self.m_impl.coeff(firstIndex + j), &accum);
}
- return reducer.finalizeBoth(accum, p);
+ return reducer.finalizeBoth(accum, paccum);
}
};
-template <int DimIndex, typename Self, typename Op, bool vectorizable = (Self::InputPacketAccess & Op::PacketAccess)>
+static const int kLeafSize = 1024;
+
+template <typename Self, typename Op>
+struct InnerMostDimReducer<Self, Op, false, true> {
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Self::CoeffReturnType
+ reduce(const Self& self, typename Self::Index firstIndex,
+ typename Self::Index numValuesToReduce, Op& reducer) {
+ typename Self::CoeffReturnType accum = reducer.initialize();
+ if (numValuesToReduce > kLeafSize) {
+ const typename Self::Index half = numValuesToReduce / 2;
+ reducer.reduce(reduce(self, firstIndex, half, reducer), &accum);
+ reducer.reduce(
+ reduce(self, firstIndex + half, numValuesToReduce - half, reducer),
+ &accum);
+ } else {
+ for (typename Self::Index j = 0; j < numValuesToReduce; ++j) {
+ reducer.reduce(self.m_impl.coeff(firstIndex + j), &accum);
+ }
+ }
+ return reducer.finalize(accum);
+ }
+};
+
+template <typename Self, typename Op>
+struct InnerMostDimReducer<Self, Op, true, true> {
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Self::CoeffReturnType
+ reduce(const Self& self, typename Self::Index firstIndex,
+ typename Self::Index numValuesToReduce, Op& reducer) {
+ const typename Self::Index packetSize =
+ internal::unpacket_traits<typename Self::PacketReturnType>::size;
+ typename Self::CoeffReturnType accum = reducer.initialize();
+ if (numValuesToReduce > packetSize * kLeafSize) {
+ // Make sure the split point is aligned on a packet boundary.
+ const typename Self::Index split =
+ packetSize *
+ divup(firstIndex + divup(numValuesToReduce, typename Self::Index(2)),
+ packetSize);
+ const typename Self::Index num_left =
+ numext::mini(split - firstIndex, numValuesToReduce);
+ reducer.reduce(reduce(self, firstIndex, num_left, reducer), &accum);
+ if (num_left < numValuesToReduce) {
+ reducer.reduce(
+ reduce(self, split, numValuesToReduce - num_left, reducer), &accum);
+ }
+ return reducer.finalize(accum);
+ } else {
+ const typename Self::Index VectorizedSize =
+ (numValuesToReduce / packetSize) * packetSize;
+ typename Self::PacketReturnType paccum =
+ reducer.template initializePacket<typename Self::PacketReturnType>();
+ for (typename Self::Index j = 0; j < VectorizedSize; j += packetSize) {
+ reducer.reducePacket(
+ self.m_impl.template packet<Unaligned>(firstIndex + j), &paccum);
+ }
+ for (typename Self::Index j = VectorizedSize; j < numValuesToReduce;
+ ++j) {
+ reducer.reduce(self.m_impl.coeff(firstIndex + j), &accum);
+ }
+ return reducer.finalizeBoth(accum, paccum);
+ }
+ }
+};
+
+template <int DimIndex, typename Self, typename Op, bool vectorizable = (Self::InputPacketAccess && Self::ReducerTraits::PacketAccess)>
struct InnerMostDimPreserver {
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self&, typename Self::Index, Op&, typename Self::PacketReturnType*) {
eigen_assert(false && "should never be called");
@@ -216,7 +293,7 @@ struct InnerMostDimPreserver<-1, Self, Op, true> {
};
// Default full reducer
-template <typename Self, typename Op, typename Device, bool Vectorizable = (Self::InputPacketAccess & Op::PacketAccess)>
+template <typename Self, typename Op, typename Device, bool Vectorizable = (Self::InputPacketAccess && Self::ReducerTraits::PacketAccess)>
struct FullReducer {
static const bool HasOptimizedImplementation = false;
@@ -230,7 +307,7 @@ struct FullReducer {
#ifdef EIGEN_USE_THREADS
// Multithreaded full reducers
template <typename Self, typename Op,
- bool Vectorizable = (Self::InputPacketAccess & Op::PacketAccess)>
+ bool Vectorizable = (Self::InputPacketAccess && Self::ReducerTraits::PacketAccess)>
struct FullReducerShard {
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(const Self& self, typename Self::Index firstIndex,
typename Self::Index numValuesToReduce, Op& reducer,
@@ -243,8 +320,8 @@ struct FullReducerShard {
// Multithreaded full reducer
template <typename Self, typename Op, bool Vectorizable>
struct FullReducer<Self, Op, ThreadPoolDevice, Vectorizable> {
- static const bool HasOptimizedImplementation = !Op::IsStateful;
- static const int PacketSize =
+ static const bool HasOptimizedImplementation = !Self::ReducerTraits::IsStateful;
+ static const Index PacketSize =
unpacket_traits<typename Self::PacketReturnType>::size;
// launch one reducer per thread and accumulate the result.
@@ -322,12 +399,12 @@ struct OuterReducer {
};
-#if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
+#if defined(EIGEN_USE_GPU) && (defined(EIGEN_GPUCC))
template <int B, int N, typename S, typename R, typename I>
__global__ void FullReductionKernel(R, const S, I, typename S::CoeffReturnType*, unsigned int*);
-#ifdef EIGEN_HAS_CUDA_FP16
+#if defined(EIGEN_HAS_GPU_FP16)
template <typename S, typename R, typename I>
__global__ void ReductionInitFullReduxKernelHalfFloat(R, const S, I, half2*);
template <int B, int N, typename S, typename R, typename I>
@@ -344,6 +421,70 @@ template <int NPT, typename S, typename R, typename I>
__global__ void OuterReductionKernel(R, const S, I, I, typename S::CoeffReturnType*);
#endif
+template <typename Self, typename Op,
+ bool Vectorizable =
+ (Self::InputPacketAccess & Self::ReducerTraits::PacketAccess)>
+class BlockReducer {
+ public:
+ typedef typename Self::Index Index;
+ typedef typename Self::Scalar Scalar;
+ typedef typename Self::CoeffReturnType CoeffReturnType;
+ typedef typename Self::PacketReturnType PacketReturnType;
+ explicit BlockReducer(const Op& reducer) : op_(reducer) {
+ accum_ = op_.initialize();
+ }
+ void Reduce(Index index, Index num_values_to_reduce, Scalar* data) {
+ for (Index i = 0; i < num_values_to_reduce; ++i) {
+ op_.reduce(data[index + i], &accum_);
+ }
+ }
+ CoeffReturnType Finalize() { return op_.finalize(accum_); }
+ PacketReturnType FinalizePacket() {
+ // TODO(andydavis) This function should not be called for Scalar
+ // reductions: clean this up or add an assert here.
+ return PacketReturnType();
+ }
+
+ private:
+ CoeffReturnType accum_;
+ Op op_;
+};
+
+template <typename Self, typename Op>
+class BlockReducer<Self, Op, true> {
+ public:
+ typedef typename Self::Index Index;
+ typedef typename Self::Scalar Scalar;
+ typedef typename Self::CoeffReturnType CoeffReturnType;
+ typedef typename Self::PacketReturnType PacketReturnType;
+ static const Index PacketSize =
+ internal::unpacket_traits<PacketReturnType>::size;
+
+ explicit BlockReducer(const Op& reducer) : op_(reducer) {
+ vaccum_ = op_.template initializePacket<PacketReturnType>();
+ accum_ = op_.initialize();
+ }
+ void Reduce(Index index, Index num_values_to_reduce, Scalar* data) {
+ const Index vectorized_size =
+ (num_values_to_reduce / PacketSize) * PacketSize;
+ for (Index i = 0; i < vectorized_size; i += PacketSize) {
+ op_.reducePacket(
+ internal::ploadt<PacketReturnType, Unaligned>(&data[index + i]),
+ &vaccum_);
+ }
+ for (Index i = vectorized_size; i < num_values_to_reduce; ++i) {
+ op_.reduce(data[index + i], &accum_);
+ }
+ }
+ CoeffReturnType Finalize() { return op_.finalizeBoth(accum_, vaccum_); }
+ PacketReturnType FinalizePacket() { return op_.finalizePacket(vaccum_); }
+
+ private:
+ PacketReturnType vaccum_;
+ CoeffReturnType accum_;
+ Op op_;
+};
+
} // end namespace internal
@@ -382,6 +523,7 @@ class TensorReductionOp : public TensorBase<TensorReductionOp<Op, Dims, XprType,
template<typename Op, typename Dims, typename ArgType, template <class> class MakePointer_, typename Device>
struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Device>
{
+ typedef internal::reducer_traits<Op, Device> ReducerTraits;
typedef TensorReductionOp<Op, Dims, ArgType, MakePointer_> XprType;
typedef typename XprType::Index Index;
typedef ArgType ChildType;
@@ -395,22 +537,34 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>,
static const bool InputPacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess;
typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
- static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+ static const Index PacketSize = PacketType<CoeffReturnType, Device>::size;
enum {
IsAligned = false,
- PacketAccess = Self::InputPacketAccess && Op::PacketAccess,
+ PacketAccess = Self::InputPacketAccess && ReducerTraits::PacketAccess,
+ BlockAccess = false,
+ PreferBlockAccess = true,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
};
+ typedef typename internal::remove_const<Scalar>::type ScalarNoConst;
+
+ typedef internal::TensorBlock<ScalarNoConst, Index, NumOutputDims, Layout>
+ OutputTensorBlock;
+ typedef internal::TensorBlock<ScalarNoConst, Index, NumInputDims, Layout>
+ InputTensorBlock;
+
static const bool ReducingInnerMostDims = internal::are_inner_most_dims<Dims, NumInputDims, Layout>::value;
static const bool PreservingInnerMostDims = internal::preserve_inner_most_dims<Dims, NumInputDims, Layout>::value;
static const bool RunningFullReduction = (NumOutputDims==0);
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
- : m_impl(op.expression(), device), m_reducer(op.reducer()), m_result(NULL), m_device(device), m_xpr_dims(op.dims())
+ : m_impl(op.expression(), device), m_reducer(op.reducer()), m_result(NULL), m_device(device)
+#if defined(EIGEN_USE_SYCL)
+ , m_xpr_dims(op.dims())
+#endif
{
EIGEN_STATIC_ASSERT((NumInputDims >= NumReducedDims), YOU_MADE_A_PROGRAMMING_MISTAKE);
EIGEN_STATIC_ASSERT((!ReducingInnerMostDims | !PreservingInnerMostDims | (NumReducedDims == NumInputDims)),
@@ -435,11 +589,13 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>,
m_outputStrides[0] = 1;
for (int i = 1; i < NumOutputDims; ++i) {
m_outputStrides[i] = m_outputStrides[i - 1] * m_dimensions[i - 1];
+ m_fastOutputStrides[i] = internal::TensorIntDivisor<Index>(m_outputStrides[i]);
}
} else {
- m_outputStrides.back() = 1;
+ m_outputStrides[NumOutputDims - 1] = 1;
for (int i = NumOutputDims - 2; i >= 0; --i) {
m_outputStrides[i] = m_outputStrides[i + 1] * m_dimensions[i + 1];
+ m_fastOutputStrides[i] = internal::TensorIntDivisor<Index>(m_outputStrides[i]);
}
}
}
@@ -467,6 +623,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>,
++reduceIndex;
} else {
m_preservedStrides[outputIndex] = input_strides[i];
+ m_output_to_input_dim_map[outputIndex] = i;
++outputIndex;
}
}
@@ -476,11 +633,25 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>,
if (NumOutputDims == 0) {
m_preservedStrides[0] = internal::array_prod(input_dims);
}
+
+ m_numValuesToReduce =
+ NumOutputDims == 0
+ ? internal::array_prod(input_dims)
+ : (static_cast<int>(Layout) == static_cast<int>(ColMajor))
+ ? m_preservedStrides[0]
+ : m_preservedStrides[NumOutputDims - 1];
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
- EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool evalSubExprsIfNeeded(typename MakePointer_<CoeffReturnType>::Type data) {
+ EIGEN_STRONG_INLINE
+ #if !defined(EIGEN_HIPCC)
+ // Marking this as EIGEN_DEVICE_FUNC for HIPCC requires also doing the same for all the functions
+ // being called within here, which then leads to proliferation of EIGEN_DEVICE_FUNC markings, one
+ // of which will eventually result in an NVCC error
+ EIGEN_DEVICE_FUNC
+ #endif
+ bool evalSubExprsIfNeeded(typename MakePointer_<CoeffReturnType>::Type data) {
m_impl.evalSubExprsIfNeeded(NULL);
// Use the FullReducer if possible.
@@ -490,7 +661,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>,
!RunningOnGPU))) {
bool need_assign = false;
if (!data) {
- m_result = static_cast<CoeffReturnType*>(m_device.allocate(sizeof(CoeffReturnType)));
+ m_result = static_cast<CoeffReturnType*>(m_device.allocate_temp(sizeof(CoeffReturnType)));
data = m_result;
need_assign = true;
}
@@ -502,7 +673,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>,
const Index num_values_to_reduce = internal::array_prod(m_reducedDims);
const Index num_coeffs_to_preserve = internal::array_prod(m_dimensions);
if (!data) {
- data = static_cast<CoeffReturnType*>(m_device.allocate(sizeof(CoeffReturnType) * num_coeffs_to_preserve));
+ data = static_cast<CoeffReturnType*>(m_device.allocate_temp(sizeof(CoeffReturnType) * num_coeffs_to_preserve));
m_result = data;
}
Op reducer(m_reducer);
@@ -526,7 +697,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>,
const Index num_coeffs_to_preserve = internal::array_prod(m_dimensions);
if (!data) {
if (num_coeffs_to_preserve < 1024 && num_values_to_reduce > num_coeffs_to_preserve && num_values_to_reduce > 128) {
- data = static_cast<CoeffReturnType*>(m_device.allocate(sizeof(CoeffReturnType) * num_coeffs_to_preserve));
+ data = static_cast<CoeffReturnType*>(m_device.allocate_temp(sizeof(CoeffReturnType) * num_coeffs_to_preserve));
m_result = data;
}
else {
@@ -536,7 +707,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>,
Op reducer(m_reducer);
if (internal::InnerReducer<Self, Op, Device>::run(*this, reducer, m_device, data, num_values_to_reduce, num_coeffs_to_preserve)) {
if (m_result) {
- m_device.deallocate(m_result);
+ m_device.deallocate_temp(m_result);
m_result = NULL;
}
return true;
@@ -559,7 +730,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>,
const Index num_coeffs_to_preserve = internal::array_prod(m_dimensions);
if (!data) {
if (num_coeffs_to_preserve < 1024 && num_values_to_reduce > num_coeffs_to_preserve && num_values_to_reduce > 32) {
- data = static_cast<CoeffReturnType*>(m_device.allocate(sizeof(CoeffReturnType) * num_coeffs_to_preserve));
+ data = static_cast<CoeffReturnType*>(m_device.allocate_temp(sizeof(CoeffReturnType) * num_coeffs_to_preserve));
m_result = data;
}
else {
@@ -569,7 +740,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>,
Op reducer(m_reducer);
if (internal::OuterReducer<Self, Op, Device>::run(*this, reducer, m_device, data, num_values_to_reduce, num_coeffs_to_preserve)) {
if (m_result) {
- m_device.deallocate(m_result);
+ m_device.deallocate_temp(m_result);
m_result = NULL;
}
return true;
@@ -584,7 +755,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>,
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
m_impl.cleanup();
if (m_result) {
- m_device.deallocate(m_result);
+ m_device.deallocate_temp(m_result);
m_result = NULL;
}
}
@@ -663,33 +834,293 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>,
}
}
- EIGEN_DEVICE_FUNC typename MakePointer_<Scalar>::Type data() const { return m_result; }
- /// required by sycl in order to extract the accessor
- const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
- /// added for sycl in order to construct the buffer from the sycl device
- const Device& device() const{return m_device;}
- /// added for sycl in order to re-construct the reduction eval on the device for the sub-kernel
- const Dims& xprDims() const {return m_xpr_dims;}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void getResourceRequirements(
+ std::vector<internal::TensorOpResourceRequirements>* resources) const {
+ Eigen::Index block_total_size_max = numext::maxi<Eigen::Index>(
+ 1, m_device.lastLevelCacheSize() / sizeof(Scalar));
+ resources->push_back(internal::TensorOpResourceRequirements(
+ internal::kSkewedInnerDims, block_total_size_max));
+ m_impl.getResourceRequirements(resources);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void block(
+ OutputTensorBlock* output_block) const {
+ // Special case full reductions to avoid input block copy below.
+ if (NumInputDims == NumReducedDims) {
+ eigen_assert(output_block->first_coeff_index() == 0);
+ eigen_assert(output_block->block_sizes().TotalSize() == 1);
+ Op reducer(m_reducer);
+ output_block->data()[0] = internal::InnerMostDimReducer<Self, Op>::reduce(
+ *this, 0, m_numValuesToReduce, reducer);
+ return;
+ }
+
+ // Calculate input tensor 'slice' required to reduce output block coeffs.
+ DSizes<Index, NumInputDims> input_slice_sizes(m_impl.dimensions());
+ for (int i = 0; i < NumOutputDims; ++i) {
+ // Clip preserved input dimensions by output block size.
+ input_slice_sizes[m_output_to_input_dim_map[i]] =
+ output_block->block_sizes()[i];
+ }
+
+ // Shard input tensor slice into blocks (because it could be large if we
+ // need to reduce along several dimensions to calculate required output
+ // coefficients).
+ const Index max_coeff_count =
+ numext::mini<Index>(((m_device.firstLevelCacheSize()) / sizeof(Scalar)),
+ input_slice_sizes.TotalSize());
+
+ // Calculate max output shard size needed to keep working set of reducers
+ // in L1, while leaving enough space for reducer overhead and 'PacketSize'
+ // reductions.
+ DSizes<Index, NumInputDims> target_input_block_sizes;
+ CalculateTargetInputBlockShape(max_coeff_count, input_slice_sizes,
+ &target_input_block_sizes);
+ // Calculate indices for first preserved dimension.
+ const Index first_preserved_dim_output_index =
+ static_cast<int>(Layout) == static_cast<int>(ColMajor)
+ ? 0
+ : NumOutputDims - 1;
+ const Index first_preserved_dim_input_index =
+ m_output_to_input_dim_map[first_preserved_dim_output_index];
+ const bool inner_most_dim_preserved =
+ PreservingInnerMostDims ||
+ (first_preserved_dim_input_index ==
+ (static_cast<int>(Layout) == static_cast<int>(ColMajor)
+ ? 0
+ : NumInputDims - 1));
+
+ // Calculate output block inner/outer dimension sizes.
+ const Index output_block_inner_dim_size =
+ output_block->block_sizes()[first_preserved_dim_output_index];
+ const Index output_block_outer_dim_size =
+ output_block->block_sizes().TotalSize() / output_block_inner_dim_size;
+ // Calculate shard size for first preserved dimension.
+ const Index output_shard_size =
+ target_input_block_sizes[first_preserved_dim_input_index];
+ const Index num_output_shards =
+ (output_block_inner_dim_size + output_shard_size - 1) /
+ output_shard_size;
+
+ // Initialize 'tensor_slice_offsets' from input coords of output index.
+ DSizes<Index, NumInputDims> tensor_slice_offsets;
+ GetInputCoordsForOutputIndex(output_block->first_coeff_index(),
+ &tensor_slice_offsets);
+
+ // Store tensor slice offset in first preserved dimension to be used
+ // to update tensor slice extents in loop below.
+ const Index first_preserved_dim_offset_start =
+ tensor_slice_offsets[first_preserved_dim_input_index];
+
+ array<BlockIteratorState, NumOutputDims> block_iter_state;
+
+ // Initialize state used to iterate through output coefficients
+ // and update 'tensor_slice_offsets' in outer preserved dims.
+ for (int i = 0; i < NumOutputDims - 1; ++i) {
+ const int dim = static_cast<int>(Layout) == static_cast<int>(ColMajor)
+ ? i + 1
+ : NumOutputDims - i - 2;
+ block_iter_state[i].input_dim = m_output_to_input_dim_map[dim];
+ block_iter_state[i].output_size = output_block->block_sizes()[dim];
+ block_iter_state[i].output_count = 0;
+ }
+
+ // Allocate input block memory.
+ ScalarNoConst* input_block_data = static_cast<ScalarNoConst*>(
+ m_device.allocate(max_coeff_count * sizeof(Scalar)));
+ // Allocate reducer memory.
+ const bool packet_reductions_enabled =
+ (Self::InputPacketAccess & Self::ReducerTraits::PacketAccess);
+ const Index num_reducers =
+ (inner_most_dim_preserved && packet_reductions_enabled)
+ ? (output_shard_size / PacketSize + output_shard_size % PacketSize +
+ PacketSize)
+ : output_shard_size;
+ typedef internal::BlockReducer<Self, Op> BlockReducer;
+ BlockReducer* reducers = static_cast<BlockReducer*>(
+ m_device.allocate(num_reducers * sizeof(BlockReducer)));
+
+ InputDimensions input_tensor_dims(m_impl.dimensions());
+ for (Index output_outer_index = 0;
+ output_outer_index < output_block_outer_dim_size;
+ ++output_outer_index) {
+ for (Index output_shard_index = 0; output_shard_index < num_output_shards;
+ ++output_shard_index) {
+ // Initialize 'tensor_slice_extents' for this output shard.
+ DSizes<Index, NumInputDims> tensor_slice_extents(input_slice_sizes);
+ for (int i = 0; i < NumInputDims; ++i) {
+ if (i == first_preserved_dim_input_index) {
+ // Clip first preserved dim size to output shard size.
+ tensor_slice_extents[i] = numext::mini(
+ output_shard_size,
+ input_slice_sizes[i] - (tensor_slice_offsets[i] -
+ first_preserved_dim_offset_start));
+
+ } else if (!m_reduced[i]) {
+ // Clip outer preserved dims to size 1, so that we reduce a
+ // contiguous set of output coefficients.
+ tensor_slice_extents[i] = 1;
+ }
+ }
+
+ // Intialize output coefficient reducers.
+ for (int i = 0; i < num_reducers; ++i) {
+ new (&reducers[i]) BlockReducer(m_reducer);
+ }
+
+ typedef internal::TensorSliceBlockMapper<ScalarNoConst, Index,
+ NumInputDims, Layout>
+ TensorSliceBlockMapper;
+
+ // TODO(andydavis) Consider removing 'input_block_stride_order' if we
+ // find that scattered reads are not worth supporting in
+ // TensorSliceBlockMapper.
+ TensorSliceBlockMapper block_mapper(
+ typename TensorSliceBlockMapper::Dimensions(input_tensor_dims),
+ tensor_slice_offsets, tensor_slice_extents,
+ target_input_block_sizes, DimensionList<Index, NumInputDims>());
+
+ const Index num_outputs_to_update =
+ tensor_slice_extents[first_preserved_dim_input_index];
+ const Index preserved_dim_vector_reducer_count =
+ (inner_most_dim_preserved && packet_reductions_enabled)
+ ? num_outputs_to_update / PacketSize
+ : 0;
+ const Index preserved_dim_vector_coeff_count =
+ inner_most_dim_preserved
+ ? preserved_dim_vector_reducer_count * PacketSize
+ : 0;
+ const Index preserved_dim_reducer_limit =
+ (inner_most_dim_preserved && packet_reductions_enabled)
+ ? (preserved_dim_vector_reducer_count +
+ num_outputs_to_update % PacketSize)
+ : num_outputs_to_update;
+
+ const Index total_block_count = block_mapper.total_block_count();
+ for (Index b = 0; b < total_block_count; ++b) {
+ InputTensorBlock input_block =
+ block_mapper.GetBlockForIndex(b, input_block_data);
+ // Read.
+ m_impl.block(&input_block);
+
+ Index num_values_to_reduce = 1;
+ for (Index i = 0; i < NumInputDims; ++i) {
+ if (m_reduced[i]) {
+ num_values_to_reduce *= input_block.block_sizes()[i];
+ }
+ }
+ // Reduce.
+ if (inner_most_dim_preserved) {
+ const Index input_outer_dim_size =
+ input_block.block_sizes().TotalSize() / num_outputs_to_update;
+ for (Index input_outer_dim_index = 0;
+ input_outer_dim_index < input_outer_dim_size;
+ ++input_outer_dim_index) {
+ const Index input_outer_dim_base =
+ input_outer_dim_index * num_outputs_to_update;
+ for (Index i = 0; i < preserved_dim_vector_reducer_count; ++i) {
+ reducers[i].Reduce(input_outer_dim_base + i * PacketSize,
+ PacketSize, input_block.data());
+ }
+ const Index scalar_reducer_base =
+ input_outer_dim_base + preserved_dim_vector_coeff_count;
+ for (Index i = preserved_dim_vector_reducer_count;
+ i < preserved_dim_reducer_limit; ++i) {
+ reducers[i].Reduce(scalar_reducer_base + i -
+ preserved_dim_vector_reducer_count,
+ 1, input_block.data());
+ }
+ }
+ } else {
+ for (Index i = 0; i < num_outputs_to_update; ++i) {
+ reducers[i].Reduce(i * num_values_to_reduce, num_values_to_reduce,
+ input_block.data());
+ }
+ }
+ }
+
+ // Finalize all reducers for this output shard.
+ const Index output_base_index =
+ output_outer_index * output_block_inner_dim_size +
+ output_shard_index * output_shard_size;
+ if (inner_most_dim_preserved) {
+ EIGEN_ALIGN_MAX
+ typename internal::remove_const<CoeffReturnType>::type
+ values[PacketSize];
+ for (Index i = 0; i < preserved_dim_vector_reducer_count; ++i) {
+ const Index reducer_base = output_base_index + i * PacketSize;
+ internal::pstore<CoeffReturnType, PacketReturnType>(
+ values, reducers[i].FinalizePacket());
+ for (Index j = 0; j < PacketSize; ++j) {
+ output_block->data()[reducer_base + j] = values[j];
+ }
+ }
+ const Index scalar_reducer_base =
+ output_base_index + preserved_dim_vector_coeff_count;
+
+ for (Index i = preserved_dim_vector_reducer_count;
+ i < preserved_dim_reducer_limit; ++i) {
+ output_block->data()[scalar_reducer_base + i -
+ preserved_dim_vector_reducer_count] =
+ reducers[i].Finalize();
+ }
+ } else {
+ for (int i = 0; i < num_outputs_to_update; ++i) {
+ output_block->data()[output_base_index + i] =
+ reducers[i].Finalize();
+ }
+ }
+
+ // Update 'tensor_slice_offsets' by num outputs for this output shard.
+ tensor_slice_offsets[first_preserved_dim_input_index] +=
+ num_outputs_to_update;
+ }
+ // Update slice offset for inner preserved dim.
+ tensor_slice_offsets[first_preserved_dim_input_index] -=
+ output_block_inner_dim_size;
+ // Update slice offsets for remaining output dims.
+ for (int i = 0; i < NumOutputDims - 1; ++i) {
+ BlockIteratorState& b = block_iter_state[i];
+ if (++b.output_count < b.output_size) {
+ ++tensor_slice_offsets[b.input_dim];
+ break;
+ }
+ b.output_count = 0;
+ tensor_slice_offsets[b.input_dim] -= b.output_size - 1;
+ }
+ }
+
+ // Free memory.
+ m_device.deallocate(input_block_data);
+ m_device.deallocate(reducers);
+ }
+
+ EIGEN_DEVICE_FUNC typename MakePointer_<CoeffReturnType>::Type data() const { return m_result; }
+#if defined(EIGEN_USE_SYCL)
+ const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
+ const Device& device() const { return m_device; }
+ const Dims& xprDims() const { return m_xpr_dims; }
+#endif
private:
template <int, typename, typename> friend struct internal::GenericDimReducer;
- template <typename, typename, bool> friend struct internal::InnerMostDimReducer;
+ template <typename, typename, bool, bool> friend struct internal::InnerMostDimReducer;
template <int, typename, typename, bool> friend struct internal::InnerMostDimPreserver;
template <typename S, typename O, typename D, bool V> friend struct internal::FullReducer;
#ifdef EIGEN_USE_THREADS
template <typename S, typename O, bool V> friend struct internal::FullReducerShard;
#endif
-#if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
- template <int B, int N, typename S, typename R, typename I> friend void internal::FullReductionKernel(R, const S, I, typename S::CoeffReturnType*, unsigned int*);
-#ifdef EIGEN_HAS_CUDA_FP16
- template <typename S, typename R, typename I> friend void internal::ReductionInitFullReduxKernelHalfFloat(R, const S, I, half2*);
- template <int B, int N, typename S, typename R, typename I> friend void internal::FullReductionKernelHalfFloat(R, const S, I, half*, half2*);
- template <int NPT, typename S, typename R, typename I> friend void internal::InnerReductionKernelHalfFloat(R, const S, I, I, half*);
+#if defined(EIGEN_USE_GPU) && (defined(EIGEN_GPUCC))
+ template <int B, int N, typename S, typename R, typename I> KERNEL_FRIEND void internal::FullReductionKernel(R, const S, I, typename S::CoeffReturnType*, unsigned int*);
+#if defined(EIGEN_HAS_GPU_FP16)
+ template <typename S, typename R, typename I> KERNEL_FRIEND void internal::ReductionInitFullReduxKernelHalfFloat(R, const S, I, half2*);
+ template <int B, int N, typename S, typename R, typename I> KERNEL_FRIEND void internal::FullReductionKernelHalfFloat(R, const S, I, half*, half2*);
+ template <int NPT, typename S, typename R, typename I> KERNEL_FRIEND void internal::InnerReductionKernelHalfFloat(R, const S, I, I, half*);
#endif
- template <int NPT, typename S, typename R, typename I> friend void internal::InnerReductionKernel(R, const S, I, I, typename S::CoeffReturnType*);
+ template <int NPT, typename S, typename R, typename I> KERNEL_FRIEND void internal::InnerReductionKernel(R, const S, I, I, typename S::CoeffReturnType*);
- template <int NPT, typename S, typename R, typename I> friend void internal::OuterReductionKernel(R, const S, I, I, typename S::CoeffReturnType*);
+ template <int NPT, typename S, typename R, typename I> KERNEL_FRIEND void internal::OuterReductionKernel(R, const S, I, I, typename S::CoeffReturnType*);
#endif
#if defined(EIGEN_USE_SYCL)
@@ -700,6 +1131,12 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>,
template <typename S, typename O, typename D> friend struct internal::InnerReducer;
+ struct BlockIteratorState {
+ Index input_dim;
+ Index output_size;
+ Index output_count;
+ };
+
// Returns the Index in the input tensor of the first value that needs to be
// used to compute the reduction at output index "index".
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index firstInput(Index index) const {
@@ -742,16 +1179,88 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>,
return startInput;
}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void GetInputCoordsForOutputIndex(
+ Index index,
+ DSizes<Index, NumInputDims>* coords) const {
+ for (int i = 0; i < NumInputDims; ++i) {
+ (*coords)[i] = 0;
+ }
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int i = NumOutputDims - 1; i > 0; --i) {
+ const Index idx = index / m_fastOutputStrides[i];
+ (*coords)[m_output_to_input_dim_map[i]] = idx;
+ index -= idx * m_outputStrides[i];
+ }
+ (*coords)[m_output_to_input_dim_map[0]] = index;
+ } else {
+ for (int i = 0; i < NumOutputDims - 1; ++i) {
+ const Index idx = index / m_fastOutputStrides[i];
+ (*coords)[m_output_to_input_dim_map[i]] = idx;
+ index -= idx * m_outputStrides[i];
+ }
+ (*coords)[m_output_to_input_dim_map[NumOutputDims-1]] = index;
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void CalculateTargetInputBlockShape(
+ const Index max_coeff_count,
+ const DSizes<Index, NumInputDims>& input_slice_sizes,
+ DSizes<Index, NumInputDims>* target_input_block_sizes) const {
+ typedef internal::BlockReducer<Self, Op> BlockReducer;
+ // TODO(andydavis) Compute reducer overhead correctly for the case where
+ // we are preserving the inner most dimension, and a single reducer
+ // reduces a packet's worth of output coefficients.
+ const Index reducer_overhead = sizeof(BlockReducer) / sizeof(Scalar);
+
+ Index coeff_to_allocate = max_coeff_count;
+ bool first_preserved_dim_allocated = false;
+ bool first_reduced_dim_allocated = false;
+ for (int i = 0; i < NumInputDims; ++i) {
+ const int dim = static_cast<int>(Layout) == static_cast<int>(ColMajor)
+ ? i
+ : NumInputDims - i - 1;
+ (*target_input_block_sizes)[dim] = 1;
+ if (m_reduced[dim]) {
+ // TODO(andydavis) Consider allocating to multiple reduced dimensions.
+ // Watch out for cases where reduced dimensions are not contiguous,
+ // which induces scattered reads.
+ if (!first_reduced_dim_allocated) {
+ (*target_input_block_sizes)[dim] =
+ numext::mini(input_slice_sizes[dim], coeff_to_allocate);
+ coeff_to_allocate /= (*target_input_block_sizes)[dim];
+ first_reduced_dim_allocated = true;
+ }
+ } else if (!first_preserved_dim_allocated) {
+ // TODO(andydavis) Include output block size in this L1 working set
+ // calculation.
+ const Index alloc_size = numext::maxi(
+ static_cast<Index>(1), coeff_to_allocate / reducer_overhead);
+ (*target_input_block_sizes)[dim] =
+ numext::mini(input_slice_sizes[dim], alloc_size);
+ coeff_to_allocate = numext::maxi(
+ static_cast<Index>(1),
+ coeff_to_allocate /
+ ((*target_input_block_sizes)[dim] * reducer_overhead));
+ first_preserved_dim_allocated = true;
+ }
+ }
+ }
+
// Bitmap indicating if an input dimension is reduced or not.
array<bool, NumInputDims> m_reduced;
// Dimensions of the output of the operation.
Dimensions m_dimensions;
// Precomputed strides for the output tensor.
array<Index, NumOutputDims> m_outputStrides;
+ array<internal::TensorIntDivisor<Index>, NumOutputDims> m_fastOutputStrides;
// Subset of strides of the input tensor for the non-reduced dimensions.
// Indexed by output dimensions.
static const int NumPreservedStrides = max_n_1<NumOutputDims>::size;
array<Index, NumPreservedStrides> m_preservedStrides;
+ // Map from output to input dimension index.
+ array<Index, NumOutputDims> m_output_to_input_dim_map;
+ // How many values go into each reduction
+ Index m_numValuesToReduce;
// Subset of strides of the input tensor for the reduced dimensions.
// Indexed by reduced dimensions.
@@ -767,7 +1276,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>,
Op m_reducer;
// For full reductions
-#if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
+#if defined(EIGEN_USE_GPU) && (defined(EIGEN_GPUCC))
static const bool RunningOnGPU = internal::is_same<Device, Eigen::GpuDevice>::value;
static const bool RunningOnSycl = false;
#elif defined(EIGEN_USE_SYCL)
@@ -780,7 +1289,10 @@ static const bool RunningOnGPU = false;
typename MakePointer_<CoeffReturnType>::Type m_result;
const Device& m_device;
- const Dims& m_xpr_dims;
+
+#if defined(EIGEN_USE_SYCL)
+ const Dims m_xpr_dims;
+#endif
};
} // end namespace Eigen
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h
index 65638b6a8..68780cd3c 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h
@@ -1,750 +1,6 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-#ifndef EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_CUDA_H
-#define EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_CUDA_H
-
-namespace Eigen {
-namespace internal {
-
-
-#if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
-// Full reducers for GPU, don't vectorize for now
-
-// Reducer function that enables multiple cuda thread to safely accumulate at the same
-// output address. It basically reads the current value of the output variable, and
-// attempts to update it with the new value. If in the meantime another cuda thread
-// updated the content of the output address it will try again.
-template <typename T, typename R>
-__device__ EIGEN_ALWAYS_INLINE void atomicReduce(T* output, T accum, R& reducer) {
-#if __CUDA_ARCH__ >= 300
- if (sizeof(T) == 4)
- {
- unsigned int oldval = *reinterpret_cast<unsigned int*>(output);
- unsigned int newval = oldval;
- reducer.reduce(accum, reinterpret_cast<T*>(&newval));
- if (newval == oldval) {
- return;
- }
- unsigned int readback;
- while ((readback = atomicCAS((unsigned int*)output, oldval, newval)) != oldval) {
- oldval = readback;
- newval = oldval;
- reducer.reduce(accum, reinterpret_cast<T*>(&newval));
- if (newval == oldval) {
- return;
- }
- }
- }
- else if (sizeof(T) == 8) {
- unsigned long long oldval = *reinterpret_cast<unsigned long long*>(output);
- unsigned long long newval = oldval;
- reducer.reduce(accum, reinterpret_cast<T*>(&newval));
- if (newval == oldval) {
- return;
- }
- unsigned long long readback;
- while ((readback = atomicCAS((unsigned long long*)output, oldval, newval)) != oldval) {
- oldval = readback;
- newval = oldval;
- reducer.reduce(accum, reinterpret_cast<T*>(&newval));
- if (newval == oldval) {
- return;
- }
- }
- }
- else {
- assert(0 && "Wordsize not supported");
- }
-#else
- assert(0 && "Shouldn't be called on unsupported device");
-#endif
-}
-
-// We extend atomicExch to support extra data types
-template <typename Type>
-__device__ inline Type atomicExchCustom(Type* address, Type val) {
- return atomicExch(address, val);
-}
-
-template <>
-__device__ inline double atomicExchCustom(double* address, double val) {
- unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(address);
- return __longlong_as_double(atomicExch(address_as_ull, __double_as_longlong(val)));
-}
-
-#ifdef EIGEN_HAS_CUDA_FP16
-template <template <typename T> class R>
-__device__ inline void atomicReduce(half2* output, half2 accum, R<half>& reducer) {
- unsigned int oldval = *reinterpret_cast<unsigned int*>(output);
- unsigned int newval = oldval;
- reducer.reducePacket(accum, reinterpret_cast<half2*>(&newval));
- if (newval == oldval) {
- return;
- }
- unsigned int readback;
- while ((readback = atomicCAS((unsigned int*)output, oldval, newval)) != oldval) {
- oldval = readback;
- newval = oldval;
- reducer.reducePacket(accum, reinterpret_cast<half2*>(&newval));
- if (newval == oldval) {
- return;
- }
- }
-}
+#if defined(__clang__) || defined(__GNUC__)
+#warning "Deprecated header file, please either include the main Eigen/CXX11/Tensor header or the respective TensorReductionGpu.h file"
#endif
-template <>
-__device__ inline void atomicReduce(float* output, float accum, SumReducer<float>&) {
-#if __CUDA_ARCH__ >= 300
- atomicAdd(output, accum);
-#else
- assert(0 && "Shouldn't be called on unsupported device");
-#endif
-}
-
-
-template <typename CoeffType, typename Index>
-__global__ void ReductionInitKernel(const CoeffType val, Index num_preserved_coeffs, CoeffType* output) {
- const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x;
- const Index num_threads = blockDim.x * gridDim.x;
- for (Index i = thread_id; i < num_preserved_coeffs; i += num_threads) {
- output[i] = val;
- }
-}
-
-
-template <int BlockSize, int NumPerThread, typename Self,
- typename Reducer, typename Index>
-__global__ void FullReductionKernel(Reducer reducer, const Self input, Index num_coeffs,
- typename Self::CoeffReturnType* output, unsigned int* semaphore) {
-#if __CUDA_ARCH__ >= 300
- // Initialize the output value
- const Index first_index = blockIdx.x * BlockSize * NumPerThread + threadIdx.x;
- if (gridDim.x == 1) {
- if (first_index == 0) {
- *output = reducer.initialize();
- }
- }
- else {
- if (threadIdx.x == 0) {
- unsigned int block = atomicCAS(semaphore, 0u, 1u);
- if (block == 0) {
- // We're the first block to run, initialize the output value
- atomicExchCustom(output, reducer.initialize());
- __threadfence();
- atomicExch(semaphore, 2u);
- }
- else {
- // Wait for the first block to initialize the output value.
- // Use atomicCAS here to ensure that the reads aren't cached
- unsigned int val;
- do {
- val = atomicCAS(semaphore, 2u, 2u);
- }
- while (val < 2u);
- }
- }
- }
-
- __syncthreads();
-
- eigen_assert(gridDim.x == 1 || *semaphore >= 2u);
-
- typename Self::CoeffReturnType accum = reducer.initialize();
- Index max_iter = numext::mini<Index>(num_coeffs - first_index, NumPerThread*BlockSize);
- for (Index i = 0; i < max_iter; i+=BlockSize) {
- const Index index = first_index + i;
- eigen_assert(index < num_coeffs);
- typename Self::CoeffReturnType val = input.m_impl.coeff(index);
- reducer.reduce(val, &accum);
- }
-
-#pragma unroll
- for (int offset = warpSize/2; offset > 0; offset /= 2) {
- reducer.reduce(__shfl_down(accum, offset, warpSize), &accum);
- }
-
- if ((threadIdx.x & (warpSize - 1)) == 0) {
- atomicReduce(output, accum, reducer);
- }
-
- if (gridDim.x > 1 && threadIdx.x == 0) {
- // Let the last block reset the semaphore
- atomicInc(semaphore, gridDim.x + 1);
- }
-#else
- assert(0 && "Shouldn't be called on unsupported device");
-#endif
-}
-
-
-#ifdef EIGEN_HAS_CUDA_FP16
-template <typename Self,
- typename Reducer, typename Index>
-__global__ void ReductionInitFullReduxKernelHalfFloat(Reducer reducer, const Self input, Index num_coeffs, half2* scratch) {
- eigen_assert(blockDim.x == 1);
- eigen_assert(gridDim.x == 1);
- if (num_coeffs % 2 != 0) {
- half last = input.m_impl.coeff(num_coeffs-1);
- *scratch = __halves2half2(last, reducer.initialize());
- } else {
- *scratch = reducer.template initializePacket<half2>();
- }
-}
-
-template <typename Self,
- typename Reducer, typename Index>
-__global__ void ReductionInitKernelHalfFloat(Reducer reducer, const Self input, Index num_coeffs, half* output) {
- const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x;
- const Index num_threads = blockDim.x * gridDim.x;
- const Index num_packets = num_coeffs / 2;
- for (Index i = thread_id; i < num_packets; i += num_threads) {
- ((half2*)output)[i] = reducer.template initializePacket<half2>();
- }
-
- if (thread_id == 0 && num_coeffs % 2 != 0) {
- output[num_coeffs-1] = reducer.initialize();
- }
-}
-
-template <int BlockSize, int NumPerThread, typename Self,
- typename Reducer, typename Index>
-__global__ void FullReductionKernelHalfFloat(Reducer reducer, const Self input, Index num_coeffs,
- half* output, half2* scratch) {
- eigen_assert(NumPerThread % 2 == 0);
-
- const Index first_index = blockIdx.x * BlockSize * NumPerThread + 2*threadIdx.x;
-
- // Initialize the output value if it wasn't initialized by the ReductionInitKernel
- if (gridDim.x == 1 && first_index == 0) {
- if (num_coeffs % 2 != 0) {
- half last = input.m_impl.coeff(num_coeffs-1);
- *scratch = __halves2half2(last, reducer.initialize());
- } else {
- *scratch = reducer.template initializePacket<half2>();
- }
- __syncthreads();
- }
-
- half2 accum = reducer.template initializePacket<half2>();
- const Index max_iter = numext::mini<Index>((num_coeffs - first_index) / 2, NumPerThread*BlockSize / 2);
- for (Index i = 0; i < max_iter; i += BlockSize) {
- const Index index = first_index + 2*i;
- eigen_assert(index + 1 < num_coeffs);
- half2 val = input.m_impl.template packet<Unaligned>(index);
- reducer.reducePacket(val, &accum);
- }
-
-#pragma unroll
- for (int offset = warpSize/2; offset > 0; offset /= 2) {
- reducer.reducePacket(__shfl_down(accum, offset, warpSize), &accum);
- }
-
- if ((threadIdx.x & (warpSize - 1)) == 0) {
- atomicReduce(scratch, accum, reducer);
- }
-
- __syncthreads();
-
- if (gridDim.x == 1 && first_index == 0) {
- half tmp = __low2half(*scratch);
- reducer.reduce(__high2half(*scratch), &tmp);
- *output = tmp;
- }
-}
-
-template <typename Op>
-__global__ void ReductionCleanupKernelHalfFloat(Op& reducer, half* output, half2* scratch) {
- eigen_assert(threadIdx.x == 1);
- half tmp = __low2half(*scratch);
- reducer.reduce(__high2half(*scratch), &tmp);
- *output = tmp;
-}
-
-#endif
-
-template <typename Self, typename Op, typename OutputType, bool PacketAccess, typename Enabled = void>
-struct FullReductionLauncher {
- static void run(const Self&, Op&, const GpuDevice&, OutputType*, typename Self::Index) {
- assert(false && "Should only be called on doubles, floats and half floats");
- }
-};
-
-// Specialization for float and double
-template <typename Self, typename Op, typename OutputType, bool PacketAccess>
-struct FullReductionLauncher<
- Self, Op, OutputType, PacketAccess,
- typename internal::enable_if<
- internal::is_same<float, OutputType>::value ||
- internal::is_same<double, OutputType>::value,
- void>::type> {
- static void run(const Self& self, Op& reducer, const GpuDevice& device, OutputType* output, typename Self::Index num_coeffs) {
- typedef typename Self::Index Index;
- typedef typename Self::CoeffReturnType Scalar;
- const int block_size = 256;
- const int num_per_thread = 128;
- const int num_blocks = divup<int>(num_coeffs, block_size * num_per_thread);
-
- unsigned int* semaphore = NULL;
- if (num_blocks > 1) {
- semaphore = device.semaphore();
- }
-
- LAUNCH_CUDA_KERNEL((FullReductionKernel<block_size, num_per_thread, Self, Op, Index>),
- num_blocks, block_size, 0, device, reducer, self, num_coeffs, output, semaphore);
- }
-};
-
-#ifdef EIGEN_HAS_CUDA_FP16
-template <typename Self, typename Op>
-struct FullReductionLauncher<Self, Op, Eigen::half, false> {
- static void run(const Self&, Op&, const GpuDevice&, half*, typename Self::Index) {
- assert(false && "Should not be called since there is no packet accessor");
- }
-};
-
-template <typename Self, typename Op>
-struct FullReductionLauncher<Self, Op, Eigen::half, true> {
- static void run(const Self& self, Op& reducer, const GpuDevice& device, half* output, typename Self::Index num_coeffs) {
- typedef typename Self::Index Index;
-
- const int block_size = 256;
- const int num_per_thread = 128;
- const int num_blocks = divup<int>(num_coeffs, block_size * num_per_thread);
- half2* scratch = static_cast<half2*>(device.scratchpad());
-
- if (num_blocks > 1) {
- // We initialize the output and the scrathpad outside the reduction kernel when we can't be sure that there
- // won't be a race conditions between multiple thread blocks.
- LAUNCH_CUDA_KERNEL((ReductionInitFullReduxKernelHalfFloat<Self, Op, Index>),
- 1, 1, 0, device, reducer, self, num_coeffs, scratch);
- }
-
- LAUNCH_CUDA_KERNEL((FullReductionKernelHalfFloat<block_size, num_per_thread, Self, Op, Index>),
- num_blocks, block_size, 0, device, reducer, self, num_coeffs, output, scratch);
-
- if (num_blocks > 1) {
- LAUNCH_CUDA_KERNEL((ReductionCleanupKernelHalfFloat<Op>),
- 1, 1, 0, device, reducer, output, scratch);
- }
- }
-};
-#endif
-
-
-template <typename Self, typename Op, bool Vectorizable>
-struct FullReducer<Self, Op, GpuDevice, Vectorizable> {
- // Unfortunately nvidia doesn't support well exotic types such as complex,
- // so reduce the scope of the optimized version of the code to the simple cases
- // of doubles, floats and half floats
-#ifdef EIGEN_HAS_CUDA_FP16
- static const bool HasOptimizedImplementation = !Op::IsStateful &&
- (internal::is_same<typename Self::CoeffReturnType, float>::value ||
- internal::is_same<typename Self::CoeffReturnType, double>::value ||
- (internal::is_same<typename Self::CoeffReturnType, Eigen::half>::value && reducer_traits<Op, GpuDevice>::PacketAccess));
-#else
- static const bool HasOptimizedImplementation = !Op::IsStateful &&
- (internal::is_same<typename Self::CoeffReturnType, float>::value ||
- internal::is_same<typename Self::CoeffReturnType, double>::value);
-#endif
-
- template <typename OutputType>
- static void run(const Self& self, Op& reducer, const GpuDevice& device, OutputType* output) {
- assert(HasOptimizedImplementation && "Should only be called on doubles, floats or half floats");
- const Index num_coeffs = array_prod(self.m_impl.dimensions());
- // Don't crash when we're called with an input tensor of size 0.
- if (num_coeffs == 0) {
- return;
- }
-
- FullReductionLauncher<Self, Op, OutputType, reducer_traits<Op, GpuDevice>::PacketAccess>::run(self, reducer, device, output, num_coeffs);
- }
-};
-
-
-template <int NumPerThread, typename Self,
- typename Reducer, typename Index>
-__global__ void InnerReductionKernel(Reducer reducer, const Self input, Index num_coeffs_to_reduce, Index num_preserved_coeffs,
- typename Self::CoeffReturnType* output) {
-#if __CUDA_ARCH__ >= 300
- typedef typename Self::CoeffReturnType Type;
- eigen_assert(blockDim.y == 1);
- eigen_assert(blockDim.z == 1);
- eigen_assert(gridDim.y == 1);
- eigen_assert(gridDim.z == 1);
-
- const int unroll_times = 16;
- eigen_assert(NumPerThread % unroll_times == 0);
-
- const Index input_col_blocks = divup<Index>(num_coeffs_to_reduce, blockDim.x * NumPerThread);
- const Index num_input_blocks = input_col_blocks * num_preserved_coeffs;
-
- const Index num_threads = blockDim.x * gridDim.x;
- const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x;
-
- // Initialize the output values if they weren't initialized by the ReductionInitKernel
- if (gridDim.x == 1) {
- for (Index i = thread_id; i < num_preserved_coeffs; i += num_threads) {
- output[i] = reducer.initialize();
- }
- __syncthreads();
- }
-
- for (Index i = blockIdx.x; i < num_input_blocks; i += gridDim.x) {
- const Index row = i / input_col_blocks;
-
- if (row < num_preserved_coeffs) {
- const Index col_block = i % input_col_blocks;
- const Index col_begin = col_block * blockDim.x * NumPerThread + threadIdx.x;
-
- Type reduced_val = reducer.initialize();
-
- for (Index j = 0; j < NumPerThread; j += unroll_times) {
- const Index last_col = col_begin + blockDim.x * (j + unroll_times - 1);
- if (last_col >= num_coeffs_to_reduce) {
- for (Index col = col_begin + blockDim.x * j; col < num_coeffs_to_reduce; col += blockDim.x) {
- const Type val = input.m_impl.coeff(row * num_coeffs_to_reduce + col);
- reducer.reduce(val, &reduced_val);
- }
- break;
- } else {
- // Faster version of the loop with no branches after unrolling.
-#pragma unroll
- for (int k = 0; k < unroll_times; ++k) {
- const Index col = col_begin + blockDim.x * (j + k);
- reducer.reduce(input.m_impl.coeff(row * num_coeffs_to_reduce + col), &reduced_val);
- }
- }
- }
-
-#pragma unroll
- for (int offset = warpSize/2; offset > 0; offset /= 2) {
- reducer.reduce(__shfl_down(reduced_val, offset), &reduced_val);
- }
-
- if ((threadIdx.x & (warpSize - 1)) == 0) {
- atomicReduce(&(output[row]), reduced_val, reducer);
- }
- }
- }
-#else
- assert(0 && "Shouldn't be called on unsupported device");
-#endif
-}
-
-#ifdef EIGEN_HAS_CUDA_FP16
-
-template <int NumPerThread, typename Self,
- typename Reducer, typename Index>
-__global__ void InnerReductionKernelHalfFloat(Reducer reducer, const Self input, Index num_coeffs_to_reduce, Index num_preserved_coeffs,
- half* output) {
- eigen_assert(blockDim.y == 1);
- eigen_assert(blockDim.z == 1);
- eigen_assert(gridDim.y == 1);
- eigen_assert(gridDim.z == 1);
-
- const int unroll_times = 16;
- eigen_assert(NumPerThread % unroll_times == 0);
- eigen_assert(unroll_times % 2 == 0);
-
- const Index input_col_blocks = divup<Index>(num_coeffs_to_reduce, blockDim.x * NumPerThread * 2);
- const Index num_input_blocks = divup<Index>(input_col_blocks * num_preserved_coeffs, 2);
-
- const Index num_threads = blockDim.x * gridDim.x;
- const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x;
-
- // Initialize the output values if they weren't initialized by the ReductionInitKernel
- if (gridDim.x == 1) {
- Index i = 2*thread_id;
- for (; i + 1 < num_preserved_coeffs; i += 2*num_threads) {
- half* loc = output + i;
- *((half2*)loc) = reducer.template initializePacket<half2>();
- }
- if (i < num_preserved_coeffs) {
- output[i] = reducer.initialize();
- }
- __syncthreads();
- }
-
- for (Index i = blockIdx.x; i < num_input_blocks; i += gridDim.x) {
- const Index row = 2 * (i / input_col_blocks);
-
- if (row + 1 < num_preserved_coeffs) {
- const Index col_block = i % input_col_blocks;
- const Index col_begin = 2 * (col_block * blockDim.x * NumPerThread + threadIdx.x);
-
- half2 reduced_val1 = reducer.template initializePacket<half2>();
- half2 reduced_val2 = reducer.template initializePacket<half2>();
-
- for (Index j = 0; j < NumPerThread; j += unroll_times) {
- const Index last_col = col_begin + blockDim.x * (j + unroll_times - 1) * 2;
- if (last_col >= num_coeffs_to_reduce) {
- Index col = col_begin + blockDim.x * j;
- for (; col + 1 < num_coeffs_to_reduce; col += blockDim.x) {
- const half2 val1 = input.m_impl.template packet<Unaligned>(row * num_coeffs_to_reduce + col);
- reducer.reducePacket(val1, &reduced_val1);
- const half2 val2 = input.m_impl.template packet<Unaligned>((row+1) * num_coeffs_to_reduce + col);
- reducer.reducePacket(val2, &reduced_val2);
- }
- if (col < num_coeffs_to_reduce) {
- // Peel;
- const half last1 = input.m_impl.coeff(row * num_coeffs_to_reduce + col);
- const half2 val1 = __halves2half2(last1, reducer.initialize());
- reducer.reducePacket(val1, &reduced_val1);
- const half last2 = input.m_impl.coeff((row+1) * num_coeffs_to_reduce + col);
- const half2 val2 = __halves2half2(last2, reducer.initialize());
- reducer.reducePacket(val2, &reduced_val2);
- }
- break;
- } else {
- // Faster version of the loop with no branches after unrolling.
-#pragma unroll
- for (int k = 0; k < unroll_times; ++k) {
- const Index col = col_begin + blockDim.x * (j + k) * 2;
- reducer.reducePacket(input.m_impl.template packet<Unaligned>(row * num_coeffs_to_reduce + col), &reduced_val1);
- reducer.reducePacket(input.m_impl.template packet<Unaligned>((row + 1)* num_coeffs_to_reduce + col), &reduced_val2);
- }
- }
- }
-
-#pragma unroll
- for (int offset = warpSize/2; offset > 0; offset /= 2) {
- reducer.reducePacket(__shfl_down(reduced_val1, offset, warpSize), &reduced_val1);
- reducer.reducePacket(__shfl_down(reduced_val2, offset, warpSize), &reduced_val2);
- }
-
- half val1 = __low2half(reduced_val1);
- reducer.reduce(__high2half(reduced_val1), &val1);
- half val2 = __low2half(reduced_val2);
- reducer.reduce(__high2half(reduced_val2), &val2);
- half2 val = __halves2half2(val1, val2);
-
- if ((threadIdx.x & (warpSize - 1)) == 0) {
- half* loc = output + row;
- atomicReduce((half2*)loc, val, reducer);
- }
- }
- }
-}
-
-#endif
-
-template <typename Self, typename Op, typename OutputType, bool PacketAccess, typename Enabled = void>
-struct InnerReductionLauncher {
- static EIGEN_DEVICE_FUNC bool run(const Self&, Op&, const GpuDevice&, OutputType*, typename Self::Index, typename Self::Index) {
- assert(false && "Should only be called to reduce doubles, floats and half floats on a gpu device");
- return true;
- }
-};
-
-// Specialization for float and double
-template <typename Self, typename Op, typename OutputType, bool PacketAccess>
-struct InnerReductionLauncher<
- Self, Op, OutputType, PacketAccess,
- typename internal::enable_if<
- internal::is_same<float, OutputType>::value ||
- internal::is_same<double, OutputType>::value,
- void>::type> {
- static bool run(const Self& self, Op& reducer, const GpuDevice& device, OutputType* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) {
- typedef typename Self::Index Index;
-
- const Index num_coeffs = num_coeffs_to_reduce * num_preserved_vals;
- const int block_size = 256;
- const int num_per_thread = 128;
- const int dyn_blocks = divup<int>(num_coeffs, block_size * num_per_thread);
- const int max_blocks = device.getNumCudaMultiProcessors() *
- device.maxCudaThreadsPerMultiProcessor() / block_size;
- const int num_blocks = numext::mini<int>(max_blocks, dyn_blocks);
-
- if (num_blocks > 1) {
- // We initialize the outputs outside the reduction kernel when we can't be sure that there
- // won't be a race conditions between multiple thread blocks.
- const int dyn_blocks = divup<int>(num_preserved_vals, 1024);
- const int max_blocks = device.getNumCudaMultiProcessors() *
- device.maxCudaThreadsPerMultiProcessor() / 1024;
- const int num_blocks = numext::mini<int>(max_blocks, dyn_blocks);
- LAUNCH_CUDA_KERNEL((ReductionInitKernel<OutputType, Index>),
- num_blocks, 1024, 0, device, reducer.initialize(),
- num_preserved_vals, output);
- }
-
- LAUNCH_CUDA_KERNEL((InnerReductionKernel<num_per_thread, Self, Op, Index>),
- num_blocks, block_size, 0, device, reducer, self, num_coeffs_to_reduce, num_preserved_vals, output);
-
- return false;
- }
-};
-
-#ifdef EIGEN_HAS_CUDA_FP16
-template <typename Self, typename Op>
-struct InnerReductionLauncher<Self, Op, Eigen::half, false> {
- static bool run(const Self&, Op&, const GpuDevice&, half*, typename Self::Index, typename Self::Index) {
- assert(false && "Should not be called since there is no packet accessor");
- return true;
- }
-};
-
-template <typename Self, typename Op>
-struct InnerReductionLauncher<Self, Op, Eigen::half, true> {
- static bool run(const Self& self, Op& reducer, const GpuDevice& device, half* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) {
- typedef typename Self::Index Index;
-
- if (num_preserved_vals % 2 != 0) {
- // Not supported yet, revert to the slower code path
- return true;
- }
-
- const Index num_coeffs = num_coeffs_to_reduce * num_preserved_vals;
- const int block_size = /*256*/128;
- const int num_per_thread = /*128*/64;
- const int dyn_blocks = divup<int>(num_coeffs, block_size * num_per_thread);
- const int max_blocks = device.getNumCudaMultiProcessors() *
- device.maxCudaThreadsPerMultiProcessor() / block_size;
- const int num_blocks = numext::mini<int>(max_blocks, dyn_blocks);
-
- if (num_blocks > 1) {
- // We initialize the outputs outside the reduction kernel when we can't be sure that there
- // won't be a race conditions between multiple thread blocks.
- const int dyn_blocks = divup<int>(num_preserved_vals, 1024);
- const int max_blocks = device.getNumCudaMultiProcessors() *
- device.maxCudaThreadsPerMultiProcessor() / 1024;
- const int num_blocks = numext::mini<int>(max_blocks, dyn_blocks);
- LAUNCH_CUDA_KERNEL((ReductionInitKernelHalfFloat<Self, Op, Index>),
- 1, 1, 0, device, reducer, self, num_preserved_vals, output);
- }
-
- LAUNCH_CUDA_KERNEL((InnerReductionKernelHalfFloat<num_per_thread, Self, Op, Index>),
- num_blocks, block_size, 0, device, reducer, self, num_coeffs_to_reduce, num_preserved_vals, output);
-
- return false;
- }
-};
-#endif
-
-
-template <typename Self, typename Op>
-struct InnerReducer<Self, Op, GpuDevice> {
- // Unfortunately nvidia doesn't support well exotic types such as complex,
- // so reduce the scope of the optimized version of the code to the simple case
- // of floats and half floats.
-#ifdef EIGEN_HAS_CUDA_FP16
- static const bool HasOptimizedImplementation = !Op::IsStateful &&
- (internal::is_same<typename Self::CoeffReturnType, float>::value ||
- internal::is_same<typename Self::CoeffReturnType, double>::value ||
- (internal::is_same<typename Self::CoeffReturnType, Eigen::half>::value && reducer_traits<Op, GpuDevice>::PacketAccess));
-#else
- static const bool HasOptimizedImplementation = !Op::IsStateful &&
- (internal::is_same<typename Self::CoeffReturnType, float>::value ||
- internal::is_same<typename Self::CoeffReturnType, double>::value);
-#endif
-
- template <typename OutputType>
- static bool run(const Self& self, Op& reducer, const GpuDevice& device, OutputType* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) {
- assert(HasOptimizedImplementation && "Should only be called on doubles, floats or half floats");
- const Index num_coeffs = array_prod(self.m_impl.dimensions());
- // Don't crash when we're called with an input tensor of size 0.
- if (num_coeffs == 0) {
- return true;
- }
- // It's faster to use the usual code.
- if (num_coeffs_to_reduce <= 128) {
- return true;
- }
-
- return InnerReductionLauncher<Self, Op, OutputType, reducer_traits<Op, GpuDevice>::PacketAccess>::run(self, reducer, device, output, num_coeffs_to_reduce, num_preserved_vals);
- }
-};
-
-template <int NumPerThread, typename Self,
- typename Reducer, typename Index>
-__global__ void OuterReductionKernel(Reducer reducer, const Self input, Index num_coeffs_to_reduce, Index num_preserved_coeffs,
- typename Self::CoeffReturnType* output) {
- const Index num_threads = blockDim.x * gridDim.x;
- const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x;
- // Initialize the output values if they weren't initialized by the ReductionInitKernel
- if (gridDim.x == 1) {
- for (Index i = thread_id; i < num_preserved_coeffs; i += num_threads) {
- output[i] = reducer.initialize();
- }
- __syncthreads();
- }
-
- // Do the reduction.
- const Index max_iter = num_preserved_coeffs * divup<Index>(num_coeffs_to_reduce, NumPerThread);
- for (Index i = thread_id; i < max_iter; i += num_threads) {
- const Index input_col = i % num_preserved_coeffs;
- const Index input_row = (i / num_preserved_coeffs) * NumPerThread;
- typename Self::CoeffReturnType reduced_val = reducer.initialize();
- const Index max_row = numext::mini(input_row + NumPerThread, num_coeffs_to_reduce);
- for (Index j = input_row; j < max_row; j++) {
- typename Self::CoeffReturnType val = input.m_impl.coeff(j * num_preserved_coeffs + input_col);
- reducer.reduce(val, &reduced_val);
- }
- atomicReduce(&(output[input_col]), reduced_val, reducer);
- }
-}
-
-
-template <typename Self, typename Op>
-struct OuterReducer<Self, Op, GpuDevice> {
- // Unfortunately nvidia doesn't support well exotic types such as complex,
- // so reduce the scope of the optimized version of the code to the simple case
- // of floats.
- static const bool HasOptimizedImplementation = !Op::IsStateful &&
- (internal::is_same<typename Self::CoeffReturnType, float>::value ||
- internal::is_same<typename Self::CoeffReturnType, double>::value);
- template <typename Device, typename OutputType>
- static EIGEN_DEVICE_FUNC bool run(const Self&, Op&, const Device&, OutputType*, typename Self::Index, typename Self::Index) {
- assert(false && "Should only be called to reduce doubles or floats on a gpu device");
- return true;
- }
-
- static bool run(const Self& self, Op& reducer, const GpuDevice& device, float* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) {
- typedef typename Self::Index Index;
-
- // It's faster to use the usual code.
- if (num_coeffs_to_reduce <= 32) {
- return true;
- }
-
- const Index num_coeffs = num_coeffs_to_reduce * num_preserved_vals;
- const int block_size = 256;
- const int num_per_thread = 16;
- const int dyn_blocks = divup<int>(num_coeffs, block_size * num_per_thread);
- const int max_blocks = device.getNumCudaMultiProcessors() *
- device.maxCudaThreadsPerMultiProcessor() / block_size;
- const int num_blocks = numext::mini<int>(max_blocks, dyn_blocks);
-
- if (num_blocks > 1) {
- // We initialize the outputs in the reduction kernel itself when we don't have to worry
- // about race conditions between multiple thread blocks.
- const int dyn_blocks = divup<int>(num_preserved_vals, 1024);
- const int max_blocks = device.getNumCudaMultiProcessors() *
- device.maxCudaThreadsPerMultiProcessor() / 1024;
- const int num_blocks = numext::mini<int>(max_blocks, dyn_blocks);
- LAUNCH_CUDA_KERNEL((ReductionInitKernel<float, Index>),
- num_blocks, 1024, 0, device, reducer.initialize(),
- num_preserved_vals, output);
- }
-
- LAUNCH_CUDA_KERNEL((OuterReductionKernel<num_per_thread, Self, Op, Index>),
- num_blocks, block_size, 0, device, reducer, self, num_coeffs_to_reduce, num_preserved_vals, output);
-
- return false;
- }
-};
-
-#endif
-
-
-} // end namespace internal
-} // end namespace Eigen
-
-#endif // EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_CUDA_H
+#include "TensorReductionGpu.h"
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h
new file mode 100644
index 000000000..7504c1598
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h
@@ -0,0 +1,825 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_GPU_H
+#define EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_GPU_H
+
+namespace Eigen {
+namespace internal {
+
+
+#if defined(EIGEN_USE_GPU) && defined(EIGEN_GPUCC)
+// Full reducers for GPU, don't vectorize for now
+
+// Reducer function that enables multiple gpu thread to safely accumulate at the same
+// output address. It basically reads the current value of the output variable, and
+// attempts to update it with the new value. If in the meantime another gpu thread
+// updated the content of the output address it will try again.
+template <typename T, typename R>
+__device__ EIGEN_ALWAYS_INLINE void atomicReduce(T* output, T accum, R& reducer) {
+#if (defined(EIGEN_HIP_DEVICE_COMPILE) && defined(__HIP_ARCH_HAS_WARP_SHUFFLE__)) || (EIGEN_CUDA_ARCH >= 300)
+ if (sizeof(T) == 4)
+ {
+ unsigned int oldval = *reinterpret_cast<unsigned int*>(output);
+ unsigned int newval = oldval;
+ reducer.reduce(accum, reinterpret_cast<T*>(&newval));
+ if (newval == oldval) {
+ return;
+ }
+ unsigned int readback;
+ while ((readback = atomicCAS((unsigned int*)output, oldval, newval)) != oldval) {
+ oldval = readback;
+ newval = oldval;
+ reducer.reduce(accum, reinterpret_cast<T*>(&newval));
+ if (newval == oldval) {
+ return;
+ }
+ }
+ }
+ else if (sizeof(T) == 8) {
+ unsigned long long oldval = *reinterpret_cast<unsigned long long*>(output);
+ unsigned long long newval = oldval;
+ reducer.reduce(accum, reinterpret_cast<T*>(&newval));
+ if (newval == oldval) {
+ return;
+ }
+ unsigned long long readback;
+ while ((readback = atomicCAS((unsigned long long*)output, oldval, newval)) != oldval) {
+ oldval = readback;
+ newval = oldval;
+ reducer.reduce(accum, reinterpret_cast<T*>(&newval));
+ if (newval == oldval) {
+ return;
+ }
+ }
+ }
+ else {
+ gpu_assert(0 && "Wordsize not supported");
+ }
+#else // EIGEN_CUDA_ARCH >= 300
+ gpu_assert(0 && "Shouldn't be called on unsupported device");
+#endif // EIGEN_CUDA_ARCH >= 300
+}
+
+// We extend atomicExch to support extra data types
+template <typename Type>
+__device__ inline Type atomicExchCustom(Type* address, Type val) {
+ return atomicExch(address, val);
+}
+
+template <>
+__device__ inline double atomicExchCustom(double* address, double val) {
+ unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(address);
+ return __longlong_as_double(atomicExch(address_as_ull, __double_as_longlong(val)));
+}
+
+#ifdef EIGEN_HAS_GPU_FP16
+template <template <typename T> class R>
+__device__ inline void atomicReduce(half2* output, half2 accum, R<half>& reducer) {
+ unsigned int oldval = *reinterpret_cast<unsigned int*>(output);
+ unsigned int newval = oldval;
+ reducer.reducePacket(accum, reinterpret_cast<half2*>(&newval));
+ if (newval == oldval) {
+ return;
+ }
+ unsigned int readback;
+ while ((readback = atomicCAS((unsigned int*)output, oldval, newval)) != oldval) {
+ oldval = readback;
+ newval = oldval;
+ reducer.reducePacket(accum, reinterpret_cast<half2*>(&newval));
+ if (newval == oldval) {
+ return;
+ }
+ }
+}
+#endif // EIGEN_HAS_GPU_FP16
+
+template <>
+__device__ inline void atomicReduce(float* output, float accum, SumReducer<float>&) {
+#if (defined(EIGEN_HIP_DEVICE_COMPILE) && defined(__HIP_ARCH_HAS_WARP_SHUFFLE__)) || (EIGEN_CUDA_ARCH >= 300)
+ atomicAdd(output, accum);
+#else // EIGEN_CUDA_ARCH >= 300
+ gpu_assert(0 && "Shouldn't be called on unsupported device");
+#endif // EIGEN_CUDA_ARCH >= 300
+}
+
+
+template <typename CoeffType, typename Index>
+__global__ void ReductionInitKernel(const CoeffType val, Index num_preserved_coeffs, CoeffType* output) {
+ const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x;
+ const Index num_threads = blockDim.x * gridDim.x;
+ for (Index i = thread_id; i < num_preserved_coeffs; i += num_threads) {
+ output[i] = val;
+ }
+}
+
+
+template <int BlockSize, int NumPerThread, typename Self,
+ typename Reducer, typename Index>
+__global__ void FullReductionKernel(Reducer reducer, const Self input, Index num_coeffs,
+ typename Self::CoeffReturnType* output, unsigned int* semaphore) {
+#if (defined(EIGEN_HIP_DEVICE_COMPILE) && defined(__HIP_ARCH_HAS_WARP_SHUFFLE__)) || (EIGEN_CUDA_ARCH >= 300)
+ // Initialize the output value
+ const Index first_index = blockIdx.x * BlockSize * NumPerThread + threadIdx.x;
+ if (gridDim.x == 1) {
+ if (first_index == 0) {
+ *output = reducer.initialize();
+ }
+ }
+ else {
+ if (threadIdx.x == 0) {
+ unsigned int block = atomicCAS(semaphore, 0u, 1u);
+ if (block == 0) {
+ // We're the first block to run, initialize the output value
+ atomicExchCustom(output, reducer.initialize());
+ __threadfence();
+ atomicExch(semaphore, 2u);
+ }
+ else {
+ // Wait for the first block to initialize the output value.
+ // Use atomicCAS here to ensure that the reads aren't cached
+ unsigned int val;
+ do {
+ val = atomicCAS(semaphore, 2u, 2u);
+ }
+ while (val < 2u);
+ }
+ }
+ }
+
+ __syncthreads();
+
+ eigen_assert(gridDim.x == 1 || *semaphore >= 2u);
+
+ typename Self::CoeffReturnType accum = reducer.initialize();
+ Index max_iter = numext::mini<Index>(num_coeffs - first_index, NumPerThread*BlockSize);
+ for (Index i = 0; i < max_iter; i+=BlockSize) {
+ const Index index = first_index + i;
+ eigen_assert(index < num_coeffs);
+ typename Self::CoeffReturnType val = input.m_impl.coeff(index);
+ reducer.reduce(val, &accum);
+ }
+
+#pragma unroll
+ for (int offset = warpSize/2; offset > 0; offset /= 2) {
+ #if defined(EIGEN_HIPCC)
+ // use std::is_floating_point to determine the type of reduced_val
+ // This is needed because when Type == double, hipcc will give a "call to __shfl_down is ambguous" error
+ // and list the float and int versions of __shfl_down as the candidate functions.
+ if (std::is_floating_point<typename Self::CoeffReturnType>::value) {
+ reducer.reduce(__shfl_down(static_cast<float>(accum), offset, warpSize), &accum);
+ } else {
+ reducer.reduce(__shfl_down(static_cast<int>(accum), offset, warpSize), &accum);
+ }
+ #elif defined(EIGEN_CUDACC_VER) && EIGEN_CUDACC_VER < 90000
+ reducer.reduce(__shfl_down(accum, offset, warpSize), &accum);
+ #else
+ reducer.reduce(__shfl_down_sync(0xFFFFFFFF, accum, offset, warpSize), &accum);
+ #endif
+ }
+
+ if ((threadIdx.x & (warpSize - 1)) == 0) {
+ atomicReduce(output, accum, reducer);
+ }
+
+ if (gridDim.x > 1 && threadIdx.x == 0) {
+ // Let the last block reset the semaphore
+ atomicInc(semaphore, gridDim.x + 1);
+#if defined(EIGEN_HIPCC)
+ __threadfence_system();
+#endif
+ }
+#else // EIGEN_CUDA_ARCH >= 300
+ gpu_assert(0 && "Shouldn't be called on unsupported device");
+#endif // EIGEN_CUDA_ARCH >= 300
+}
+
+
+#ifdef EIGEN_HAS_GPU_FP16
+template <typename Self,
+ typename Reducer, typename Index>
+__global__ void ReductionInitFullReduxKernelHalfFloat(Reducer reducer, const Self input, Index num_coeffs, half2* scratch) {
+ eigen_assert(blockDim.x == 1);
+ eigen_assert(gridDim.x == 1);
+ if (num_coeffs % 2 != 0) {
+ half last = input.m_impl.coeff(num_coeffs-1);
+ *scratch = __halves2half2(last, reducer.initialize());
+ } else {
+ *scratch = reducer.template initializePacket<half2>();
+ }
+}
+
+template <typename Self,
+ typename Reducer, typename Index>
+__global__ void ReductionInitKernelHalfFloat(Reducer reducer, const Self input, Index num_coeffs, half* output) {
+ const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x;
+ const Index num_threads = blockDim.x * gridDim.x;
+ const Index num_packets = num_coeffs / 2;
+ for (Index i = thread_id; i < num_packets; i += num_threads) {
+ ((half2*)output)[i] = reducer.template initializePacket<half2>();
+ }
+
+ if (thread_id == 0 && num_coeffs % 2 != 0) {
+ output[num_coeffs-1] = reducer.initialize();
+ }
+}
+
+template <int BlockSize, int NumPerThread, typename Self,
+ typename Reducer, typename Index>
+__global__ void FullReductionKernelHalfFloat(Reducer reducer, const Self input, Index num_coeffs,
+ half* output, half2* scratch) {
+ eigen_assert(NumPerThread % 2 == 0);
+
+ const Index first_index = blockIdx.x * BlockSize * NumPerThread + 2*threadIdx.x;
+
+ // Initialize the output value if it wasn't initialized by the ReductionInitKernel
+
+ if (gridDim.x == 1) {
+ if (first_index == 0) {
+ if (num_coeffs % 2 != 0) {
+ half last = input.m_impl.coeff(num_coeffs-1);
+ *scratch = __halves2half2(last, reducer.initialize());
+ } else {
+ *scratch = reducer.template initializePacket<half2>();
+ }
+ }
+ __syncthreads();
+ }
+
+ half2 accum = reducer.template initializePacket<half2>();
+ const Index max_iter = numext::mini<Index>((num_coeffs - first_index) / 2, NumPerThread*BlockSize / 2);
+ for (Index i = 0; i < max_iter; i += BlockSize) {
+ const Index index = first_index + 2*i;
+ eigen_assert(index + 1 < num_coeffs);
+ half2 val = input.m_impl.template packet<Unaligned>(index);
+ reducer.reducePacket(val, &accum);
+ }
+
+#pragma unroll
+ for (int offset = warpSize/2; offset > 0; offset /= 2) {
+ #if defined(EIGEN_HIPCC)
+ // FIXME : remove this workaround once we have native half/half2 support for __shfl_down
+ union { int i; half2 h; } wka_in, wka_out;
+ wka_in.h = accum;
+ wka_out.i = __shfl_down(wka_in.i, offset, warpSize);
+ reducer.reducePacket(wka_out.h, &accum);
+ #elif defined(EIGEN_CUDACC_VER) && EIGEN_CUDACC_VER < 90000
+ reducer.reducePacket(__shfl_down(accum, offset, warpSize), &accum);
+ #else
+ int temp = __shfl_down_sync(0xFFFFFFFF, *(int*)(&accum), (unsigned)offset, warpSize);
+ reducer.reducePacket(*(half2*)(&temp), &accum);
+ #endif
+ }
+
+ if ((threadIdx.x & (warpSize - 1)) == 0) {
+ atomicReduce(scratch, accum, reducer);
+ }
+
+ if (gridDim.x == 1) {
+ __syncthreads();
+ if (first_index == 0) {
+ half tmp = __low2half(*scratch);
+ reducer.reduce(__high2half(*scratch), &tmp);
+ *output = tmp;
+ }
+ }
+}
+
+template <typename Op>
+__global__ void ReductionCleanupKernelHalfFloat(Op& reducer, half* output, half2* scratch) {
+ eigen_assert(threadIdx.x == 1);
+ half tmp = __low2half(*scratch);
+ reducer.reduce(__high2half(*scratch), &tmp);
+ *output = tmp;
+}
+
+#endif // EIGEN_HAS_GPU_FP16
+
+template <typename Self, typename Op, typename OutputType, bool PacketAccess, typename Enabled = void>
+struct FullReductionLauncher {
+ static void run(const Self&, Op&, const GpuDevice&, OutputType*, typename Self::Index) {
+ gpu_assert(false && "Should only be called on doubles, floats and half floats");
+ }
+};
+
+// Specialization for float and double
+template <typename Self, typename Op, typename OutputType, bool PacketAccess>
+struct FullReductionLauncher<
+ Self, Op, OutputType, PacketAccess,
+ typename internal::enable_if<
+ internal::is_same<float, OutputType>::value ||
+ internal::is_same<double, OutputType>::value,
+ void>::type> {
+ static void run(const Self& self, Op& reducer, const GpuDevice& device, OutputType* output, typename Self::Index num_coeffs) {
+
+ typedef typename Self::Index Index;
+ const int block_size = 256;
+ const int num_per_thread = 128;
+ const int num_blocks = divup<int>(num_coeffs, block_size * num_per_thread);
+
+ unsigned int* semaphore = NULL;
+ if (num_blocks > 1) {
+ semaphore = device.semaphore();
+ }
+
+ LAUNCH_GPU_KERNEL((FullReductionKernel<block_size, num_per_thread, Self, Op, Index>),
+ num_blocks, block_size, 0, device, reducer, self, num_coeffs, output, semaphore);
+ }
+};
+
+#ifdef EIGEN_HAS_GPU_FP16
+template <typename Self, typename Op>
+struct FullReductionLauncher<Self, Op, Eigen::half, false> {
+ static void run(const Self&, Op&, const GpuDevice&, half*, typename Self::Index) {
+ gpu_assert(false && "Should not be called since there is no packet accessor");
+ }
+};
+
+template <typename Self, typename Op>
+struct FullReductionLauncher<Self, Op, Eigen::half, true> {
+ static void run(const Self& self, Op& reducer, const GpuDevice& device, half* output, typename Self::Index num_coeffs) {
+ typedef typename Self::Index Index;
+
+ const int block_size = 256;
+ const int num_per_thread = 128;
+ const int num_blocks = divup<int>(num_coeffs, block_size * num_per_thread);
+ half2* scratch = static_cast<half2*>(device.scratchpad());
+
+ if (num_blocks > 1) {
+ // We initialize the output and the scrathpad outside the reduction kernel when we can't be sure that there
+ // won't be a race conditions between multiple thread blocks.
+ LAUNCH_GPU_KERNEL((ReductionInitFullReduxKernelHalfFloat<Self, Op, Index>),
+ 1, 1, 0, device, reducer, self, num_coeffs, scratch);
+ }
+
+ LAUNCH_GPU_KERNEL((FullReductionKernelHalfFloat<block_size, num_per_thread, Self, Op, Index>),
+ num_blocks, block_size, 0, device, reducer, self, num_coeffs, output, scratch);
+
+ if (num_blocks > 1) {
+ LAUNCH_GPU_KERNEL((ReductionCleanupKernelHalfFloat<Op>),
+ 1, 1, 0, device, reducer, output, scratch);
+ }
+ }
+};
+#endif // EIGEN_HAS_GPU_FP16
+
+
+template <typename Self, typename Op, bool Vectorizable>
+struct FullReducer<Self, Op, GpuDevice, Vectorizable> {
+ // Unfortunately nvidia doesn't support well exotic types such as complex,
+ // so reduce the scope of the optimized version of the code to the simple cases
+ // of doubles, floats and half floats
+#ifdef EIGEN_HAS_GPU_FP16
+ static const bool HasOptimizedImplementation = !Self::ReducerTraits::IsStateful &&
+ (internal::is_same<typename Self::CoeffReturnType, float>::value ||
+ internal::is_same<typename Self::CoeffReturnType, double>::value ||
+ (internal::is_same<typename Self::CoeffReturnType, Eigen::half>::value && reducer_traits<Op, GpuDevice>::PacketAccess));
+#else // EIGEN_HAS_GPU_FP16
+ static const bool HasOptimizedImplementation = !Self::ReducerTraits::IsStateful &&
+ (internal::is_same<typename Self::CoeffReturnType, float>::value ||
+ internal::is_same<typename Self::CoeffReturnType, double>::value);
+#endif // EIGEN_HAS_GPU_FP16
+
+ template <typename OutputType>
+ static void run(const Self& self, Op& reducer, const GpuDevice& device, OutputType* output) {
+ gpu_assert(HasOptimizedImplementation && "Should only be called on doubles, floats or half floats");
+ const Index num_coeffs = array_prod(self.m_impl.dimensions());
+ // Don't crash when we're called with an input tensor of size 0.
+ if (num_coeffs == 0) {
+ return;
+ }
+
+ FullReductionLauncher<Self, Op, OutputType, reducer_traits<Op, GpuDevice>::PacketAccess>::run(self, reducer, device, output, num_coeffs);
+ }
+};
+
+
+template <int NumPerThread, typename Self,
+ typename Reducer, typename Index>
+__global__ void InnerReductionKernel(Reducer reducer, const Self input, Index num_coeffs_to_reduce, Index num_preserved_coeffs,
+ typename Self::CoeffReturnType* output) {
+#if (defined(EIGEN_HIP_DEVICE_COMPILE) && defined(__HIP_ARCH_HAS_WARP_SHUFFLE__)) || (EIGEN_CUDA_ARCH >= 300)
+ typedef typename Self::CoeffReturnType Type;
+ eigen_assert(blockDim.y == 1);
+ eigen_assert(blockDim.z == 1);
+ eigen_assert(gridDim.y == 1);
+ eigen_assert(gridDim.z == 1);
+
+ const int unroll_times = 16;
+ eigen_assert(NumPerThread % unroll_times == 0);
+
+ const Index input_col_blocks = divup<Index>(num_coeffs_to_reduce, blockDim.x * NumPerThread);
+ const Index num_input_blocks = input_col_blocks * num_preserved_coeffs;
+
+ const Index num_threads = blockDim.x * gridDim.x;
+ const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x;
+
+ // Initialize the output values if they weren't initialized by the ReductionInitKernel
+ if (gridDim.x == 1) {
+ for (Index i = thread_id; i < num_preserved_coeffs; i += num_threads) {
+ output[i] = reducer.initialize();
+ }
+ __syncthreads();
+ }
+
+ for (Index i = blockIdx.x; i < num_input_blocks; i += gridDim.x) {
+ const Index row = i / input_col_blocks;
+
+ if (row < num_preserved_coeffs) {
+ const Index col_block = i % input_col_blocks;
+ const Index col_begin = col_block * blockDim.x * NumPerThread + threadIdx.x;
+
+ Type reduced_val = reducer.initialize();
+
+ for (Index j = 0; j < NumPerThread; j += unroll_times) {
+ const Index last_col = col_begin + blockDim.x * (j + unroll_times - 1);
+ if (last_col >= num_coeffs_to_reduce) {
+ for (Index col = col_begin + blockDim.x * j; col < num_coeffs_to_reduce; col += blockDim.x) {
+ const Type val = input.m_impl.coeff(row * num_coeffs_to_reduce + col);
+ reducer.reduce(val, &reduced_val);
+ }
+ break;
+ } else {
+ // Faster version of the loop with no branches after unrolling.
+#pragma unroll
+ for (int k = 0; k < unroll_times; ++k) {
+ const Index col = col_begin + blockDim.x * (j + k);
+ reducer.reduce(input.m_impl.coeff(row * num_coeffs_to_reduce + col), &reduced_val);
+ }
+ }
+ }
+
+#pragma unroll
+ for (int offset = warpSize/2; offset > 0; offset /= 2) {
+ #if defined(EIGEN_HIPCC)
+ // use std::is_floating_point to determine the type of reduced_val
+ // This is needed because when Type == double, hipcc will give a "call to __shfl_down is ambguous" error
+ // and list the float and int versions of __shfl_down as the candidate functions.
+ if (std::is_floating_point<Type>::value) {
+ reducer.reduce(__shfl_down(static_cast<float>(reduced_val), offset), &reduced_val);
+ } else {
+ reducer.reduce(__shfl_down(static_cast<int>(reduced_val), offset), &reduced_val);
+ }
+ #elif defined(EIGEN_CUDACC_VER) && EIGEN_CUDACC_VER < 90000
+ reducer.reduce(__shfl_down(reduced_val, offset), &reduced_val);
+ #else
+ reducer.reduce(__shfl_down_sync(0xFFFFFFFF, reduced_val, offset), &reduced_val);
+ #endif
+ }
+
+ if ((threadIdx.x & (warpSize - 1)) == 0) {
+ atomicReduce(&(output[row]), reduced_val, reducer);
+ }
+ }
+ }
+#else // EIGEN_CUDA_ARCH >= 300
+ gpu_assert(0 && "Shouldn't be called on unsupported device");
+#endif // EIGEN_CUDA_ARCH >= 300
+}
+
+#ifdef EIGEN_HAS_GPU_FP16
+
+template <int NumPerThread, typename Self,
+ typename Reducer, typename Index>
+__global__ void InnerReductionKernelHalfFloat(Reducer reducer, const Self input, Index num_coeffs_to_reduce, Index num_preserved_coeffs,
+ half* output) {
+ eigen_assert(blockDim.y == 1);
+ eigen_assert(blockDim.z == 1);
+ eigen_assert(gridDim.y == 1);
+ eigen_assert(gridDim.z == 1);
+
+ const int unroll_times = 16;
+ eigen_assert(NumPerThread % unroll_times == 0);
+ eigen_assert(unroll_times % 2 == 0);
+
+ const Index input_col_blocks = divup<Index>(num_coeffs_to_reduce, blockDim.x * NumPerThread * 2);
+ const Index num_input_blocks = divup<Index>(input_col_blocks * num_preserved_coeffs, 2);
+
+ const Index num_threads = blockDim.x * gridDim.x;
+ const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x;
+
+ // Initialize the output values if they weren't initialized by the ReductionInitKernel
+ if (gridDim.x == 1) {
+ Index i = 2*thread_id;
+ for (; i + 1 < num_preserved_coeffs; i += 2*num_threads) {
+ half* loc = output + i;
+ *((half2*)loc) = reducer.template initializePacket<half2>();
+ }
+ if (i < num_preserved_coeffs) {
+ output[i] = reducer.initialize();
+ }
+ __syncthreads();
+ }
+
+ for (Index i = blockIdx.x; i < num_input_blocks; i += gridDim.x) {
+ const Index row = 2 * (i / input_col_blocks);
+
+ if (row + 1 < num_preserved_coeffs) {
+ const Index col_block = i % input_col_blocks;
+ const Index col_begin = 2 * (col_block * blockDim.x * NumPerThread + threadIdx.x);
+
+ half2 reduced_val1 = reducer.template initializePacket<half2>();
+ half2 reduced_val2 = reducer.template initializePacket<half2>();
+
+ for (Index j = 0; j < NumPerThread; j += unroll_times) {
+ const Index last_col = col_begin + blockDim.x * (j + unroll_times - 1) * 2;
+ if (last_col >= num_coeffs_to_reduce) {
+ Index col = col_begin + blockDim.x * j;
+ for (; col + 1 < num_coeffs_to_reduce; col += blockDim.x) {
+ const half2 val1 = input.m_impl.template packet<Unaligned>(row * num_coeffs_to_reduce + col);
+ reducer.reducePacket(val1, &reduced_val1);
+ const half2 val2 = input.m_impl.template packet<Unaligned>((row+1) * num_coeffs_to_reduce + col);
+ reducer.reducePacket(val2, &reduced_val2);
+ }
+ if (col < num_coeffs_to_reduce) {
+ // Peel;
+ const half last1 = input.m_impl.coeff(row * num_coeffs_to_reduce + col);
+ const half2 val1 = __halves2half2(last1, reducer.initialize());
+ reducer.reducePacket(val1, &reduced_val1);
+ const half last2 = input.m_impl.coeff((row+1) * num_coeffs_to_reduce + col);
+ const half2 val2 = __halves2half2(last2, reducer.initialize());
+ reducer.reducePacket(val2, &reduced_val2);
+ }
+ break;
+ } else {
+ // Faster version of the loop with no branches after unrolling.
+#pragma unroll
+ for (int k = 0; k < unroll_times; ++k) {
+ const Index col = col_begin + blockDim.x * (j + k) * 2;
+ reducer.reducePacket(input.m_impl.template packet<Unaligned>(row * num_coeffs_to_reduce + col), &reduced_val1);
+ reducer.reducePacket(input.m_impl.template packet<Unaligned>((row + 1)* num_coeffs_to_reduce + col), &reduced_val2);
+ }
+ }
+ }
+
+#pragma unroll
+ for (int offset = warpSize/2; offset > 0; offset /= 2) {
+ #if defined(EIGEN_HIPCC)
+ // FIXME : remove this workaround once we have native half/half2 support for __shfl_down
+ union { int i; half2 h; } wka_in, wka_out;
+
+ wka_in.h = reduced_val1;
+ wka_out.i = __shfl_down(wka_in.i, offset, warpSize);
+ reducer.reducePacket(wka_out.h, &reduced_val1);
+
+ wka_in.h = reduced_val2;
+ wka_out.i = __shfl_down(wka_in.i, offset, warpSize);
+ reducer.reducePacket(wka_out.h, &reduced_val2);
+ #elif defined(EIGEN_CUDACC_VER) && EIGEN_CUDACC_VER < 90000
+ reducer.reducePacket(__shfl_down(reduced_val1, offset, warpSize), &reduced_val1);
+ reducer.reducePacket(__shfl_down(reduced_val2, offset, warpSize), &reduced_val2);
+ #else
+ int temp1 = __shfl_down_sync(0xFFFFFFFF, *(int*)(&reduced_val1), (unsigned)offset, warpSize);
+ int temp2 = __shfl_down_sync(0xFFFFFFFF, *(int*)(&reduced_val2), (unsigned)offset, warpSize);
+ reducer.reducePacket(*(half2*)(&temp1), &reduced_val1);
+ reducer.reducePacket(*(half2*)(&temp2), &reduced_val2);
+ #endif
+ }
+
+ half val1 = __low2half(reduced_val1);
+ reducer.reduce(__high2half(reduced_val1), &val1);
+ half val2 = __low2half(reduced_val2);
+ reducer.reduce(__high2half(reduced_val2), &val2);
+ half2 val = __halves2half2(val1, val2);
+
+ if ((threadIdx.x & (warpSize - 1)) == 0) {
+ half* loc = output + row;
+ atomicReduce((half2*)loc, val, reducer);
+ }
+ }
+ }
+}
+
+#endif // EIGEN_HAS_GPU_FP16
+
+template <typename Self, typename Op, typename OutputType, bool PacketAccess, typename Enabled = void>
+struct InnerReductionLauncher {
+ static EIGEN_DEVICE_FUNC bool run(const Self&, Op&, const GpuDevice&, OutputType*, typename Self::Index, typename Self::Index) {
+ gpu_assert(false && "Should only be called to reduce doubles, floats and half floats on a gpu device");
+ return true;
+ }
+};
+
+// Specialization for float and double
+template <typename Self, typename Op, typename OutputType, bool PacketAccess>
+struct InnerReductionLauncher<
+ Self, Op, OutputType, PacketAccess,
+ typename internal::enable_if<
+ internal::is_same<float, OutputType>::value ||
+ internal::is_same<double, OutputType>::value,
+ void>::type> {
+ static bool run(const Self& self, Op& reducer, const GpuDevice& device, OutputType* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) {
+ typedef typename Self::Index Index;
+
+ const Index num_coeffs = num_coeffs_to_reduce * num_preserved_vals;
+ const int block_size = 256;
+ const int num_per_thread = 128;
+ const int dyn_blocks = divup<int>(num_coeffs, block_size * num_per_thread);
+ const int max_blocks = device.getNumGpuMultiProcessors() *
+ device.maxGpuThreadsPerMultiProcessor() / block_size;
+ const int num_blocks = numext::mini<int>(max_blocks, dyn_blocks);
+
+ if (num_blocks > 1) {
+ // We initialize the outputs outside the reduction kernel when we can't be sure that there
+ // won't be a race conditions between multiple thread blocks.
+ const int dyn_blocks = divup<int>(num_preserved_vals, 1024);
+ const int max_blocks = device.getNumGpuMultiProcessors() *
+ device.maxGpuThreadsPerMultiProcessor() / 1024;
+ const int num_blocks = numext::mini<int>(max_blocks, dyn_blocks);
+ LAUNCH_GPU_KERNEL((ReductionInitKernel<OutputType, Index>),
+ num_blocks, 1024, 0, device, reducer.initialize(),
+ num_preserved_vals, output);
+ }
+
+ LAUNCH_GPU_KERNEL((InnerReductionKernel<num_per_thread, Self, Op, Index>),
+ num_blocks, block_size, 0, device, reducer, self, num_coeffs_to_reduce, num_preserved_vals, output);
+
+ return false;
+ }
+};
+
+#ifdef EIGEN_HAS_GPU_FP16
+template <typename Self, typename Op>
+struct InnerReductionLauncher<Self, Op, Eigen::half, false> {
+ static bool run(const Self&, Op&, const GpuDevice&, half*, typename Self::Index, typename Self::Index) {
+ gpu_assert(false && "Should not be called since there is no packet accessor");
+ return true;
+ }
+};
+
+template <typename Self, typename Op>
+struct InnerReductionLauncher<Self, Op, Eigen::half, true> {
+ static bool run(const Self& self, Op& reducer, const GpuDevice& device, half* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) {
+ typedef typename Self::Index Index;
+
+ if (num_preserved_vals % 2 != 0) {
+ // Not supported yet, revert to the slower code path
+ return true;
+ }
+
+ const Index num_coeffs = num_coeffs_to_reduce * num_preserved_vals;
+ const int block_size = /*256*/128;
+ const int num_per_thread = /*128*/64;
+ const int dyn_blocks = divup<int>(num_coeffs, block_size * num_per_thread);
+ const int max_blocks = device.getNumGpuMultiProcessors() *
+ device.maxGpuThreadsPerMultiProcessor() / block_size;
+ const int num_blocks = numext::mini<int>(max_blocks, dyn_blocks);
+
+ if (num_blocks > 1) {
+ // We initialize the outputs outside the reduction kernel when we can't be sure that there
+ // won't be a race conditions between multiple thread blocks.
+ const int dyn_blocks = divup<int>(num_preserved_vals, 1024);
+ const int max_blocks = device.getNumGpuMultiProcessors() *
+ device.maxGpuThreadsPerMultiProcessor() / 1024;
+ const int num_blocks = numext::mini<int>(max_blocks, dyn_blocks);
+ LAUNCH_GPU_KERNEL((ReductionInitKernelHalfFloat<Self, Op, Index>),
+ 1, 1, 0, device, reducer, self, num_preserved_vals, output);
+ }
+
+ LAUNCH_GPU_KERNEL((InnerReductionKernelHalfFloat<num_per_thread, Self, Op, Index>),
+ num_blocks, block_size, 0, device, reducer, self, num_coeffs_to_reduce, num_preserved_vals, output);
+
+ return false;
+ }
+};
+#endif // EIGEN_HAS_GPU_FP16
+
+
+template <typename Self, typename Op>
+struct InnerReducer<Self, Op, GpuDevice> {
+ // Unfortunately nvidia doesn't support well exotic types such as complex,
+ // so reduce the scope of the optimized version of the code to the simple case
+ // of floats and half floats.
+#ifdef EIGEN_HAS_GPU_FP16
+ static const bool HasOptimizedImplementation = !Self::ReducerTraits::IsStateful &&
+ (internal::is_same<typename Self::CoeffReturnType, float>::value ||
+ internal::is_same<typename Self::CoeffReturnType, double>::value ||
+ (internal::is_same<typename Self::CoeffReturnType, Eigen::half>::value && reducer_traits<Op, GpuDevice>::PacketAccess));
+#else // EIGEN_HAS_GPU_FP16
+ static const bool HasOptimizedImplementation = !Self::ReducerTraits::IsStateful &&
+ (internal::is_same<typename Self::CoeffReturnType, float>::value ||
+ internal::is_same<typename Self::CoeffReturnType, double>::value);
+#endif // EIGEN_HAS_GPU_FP16
+
+ template <typename OutputType>
+ static bool run(const Self& self, Op& reducer, const GpuDevice& device, OutputType* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) {
+ gpu_assert(HasOptimizedImplementation && "Should only be called on doubles, floats or half floats");
+ const Index num_coeffs = array_prod(self.m_impl.dimensions());
+ // Don't crash when we're called with an input tensor of size 0.
+ if (num_coeffs == 0) {
+ return true;
+ }
+ // It's faster to use the usual code.
+ if (num_coeffs_to_reduce <= 128) {
+ return true;
+ }
+
+ return InnerReductionLauncher<Self, Op, OutputType, reducer_traits<Op, GpuDevice>::PacketAccess>::run(self, reducer, device, output, num_coeffs_to_reduce, num_preserved_vals);
+ }
+};
+
+template <int NumPerThread, typename Self,
+ typename Reducer, typename Index>
+__global__ void OuterReductionKernel(Reducer reducer, const Self input, Index num_coeffs_to_reduce, Index num_preserved_coeffs,
+ typename Self::CoeffReturnType* output) {
+ const Index num_threads = blockDim.x * gridDim.x;
+ const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x;
+ // Initialize the output values if they weren't initialized by the ReductionInitKernel
+ if (gridDim.x == 1) {
+ for (Index i = thread_id; i < num_preserved_coeffs; i += num_threads) {
+ output[i] = reducer.initialize();
+ }
+ __syncthreads();
+ }
+
+ // Do the reduction.
+ const Index max_iter = num_preserved_coeffs * divup<Index>(num_coeffs_to_reduce, NumPerThread);
+ for (Index i = thread_id; i < max_iter; i += num_threads) {
+ const Index input_col = i % num_preserved_coeffs;
+ const Index input_row = (i / num_preserved_coeffs) * NumPerThread;
+ typename Self::CoeffReturnType reduced_val = reducer.initialize();
+ const Index max_row = numext::mini(input_row + NumPerThread, num_coeffs_to_reduce);
+ for (Index j = input_row; j < max_row; j++) {
+ typename Self::CoeffReturnType val = input.m_impl.coeff(j * num_preserved_coeffs + input_col);
+ reducer.reduce(val, &reduced_val);
+ }
+ atomicReduce(&(output[input_col]), reduced_val, reducer);
+ }
+}
+
+
+template <typename Self, typename Op>
+struct OuterReducer<Self, Op, GpuDevice> {
+ // Unfortunately nvidia doesn't support well exotic types such as complex,
+ // so reduce the scope of the optimized version of the code to the simple case
+ // of floats.
+ static const bool HasOptimizedImplementation = !Self::ReducerTraits::IsStateful &&
+ (internal::is_same<typename Self::CoeffReturnType, float>::value ||
+ internal::is_same<typename Self::CoeffReturnType, double>::value);
+ template <typename Device, typename OutputType>
+ static
+ #if !defined(EIGEN_HIPCC)
+ // FIXME : leaving this EIGEN_DEVICE_FUNC in, results in the following runtime error
+ // (in the cxx11_tensor_reduction_gpu test)
+ //
+ // terminate called after throwing an instance of 'std::runtime_error'
+ // what(): No device code available for function: _ZN5Eigen8internal20OuterReductionKernelIL...
+ //
+ // dont know why this happens (and why is it a runtime error instead of a compile time errror)
+ //
+ // this will be fixed by HIP PR#457
+ EIGEN_DEVICE_FUNC
+ #endif
+ bool run(const Self&, Op&, const Device&, OutputType*, typename Self::Index, typename Self::Index) {
+ gpu_assert(false && "Should only be called to reduce doubles or floats on a gpu device");
+ return true;
+ }
+
+ static bool run(const Self& self, Op& reducer, const GpuDevice& device, float* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) {
+ typedef typename Self::Index Index;
+
+ // It's faster to use the usual code.
+ if (num_coeffs_to_reduce <= 32) {
+ return true;
+ }
+
+ const Index num_coeffs = num_coeffs_to_reduce * num_preserved_vals;
+ const int block_size = 256;
+ const int num_per_thread = 16;
+ const int dyn_blocks = divup<int>(num_coeffs, block_size * num_per_thread);
+ const int max_blocks = device.getNumGpuMultiProcessors() *
+ device.maxGpuThreadsPerMultiProcessor() / block_size;
+ const int num_blocks = numext::mini<int>(max_blocks, dyn_blocks);
+
+ if (num_blocks > 1) {
+ // We initialize the outputs in the reduction kernel itself when we don't have to worry
+ // about race conditions between multiple thread blocks.
+ const int dyn_blocks = divup<int>(num_preserved_vals, 1024);
+ const int max_blocks = device.getNumGpuMultiProcessors() *
+ device.maxGpuThreadsPerMultiProcessor() / 1024;
+ const int num_blocks = numext::mini<int>(max_blocks, dyn_blocks);
+ LAUNCH_GPU_KERNEL((ReductionInitKernel<float, Index>),
+ num_blocks, 1024, 0, device, reducer.initialize(),
+ num_preserved_vals, output);
+ }
+
+ LAUNCH_GPU_KERNEL((OuterReductionKernel<num_per_thread, Self, Op, Index>),
+ num_blocks, block_size, 0, device, reducer, self, num_coeffs_to_reduce, num_preserved_vals, output);
+
+ return false;
+ }
+};
+
+#endif // defined(EIGEN_USE_GPU) && defined(EIGEN_GPUCC)
+
+
+} // end namespace internal
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_GPU_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h
index c3ca129e2..a379f5a94 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h
@@ -27,15 +27,15 @@ namespace internal {
template<typename OP, typename CoeffReturnType> struct syclGenericBufferReducer{
template<typename BufferTOut, typename BufferTIn>
-static void run(OP op, BufferTOut& bufOut, BufferTIn& bufI, const Eigen::SyclDevice& dev, size_t length, size_t local){
+static void run(OP op, BufferTOut& bufOut, ptrdiff_t out_offset, BufferTIn& bufI, const Eigen::SyclDevice& dev, size_t length, size_t local){
do {
- auto f = [length, local, op, &bufOut, &bufI](cl::sycl::handler& h) mutable {
+ auto f = [length, local, op, out_offset, &bufOut, &bufI](cl::sycl::handler& h) mutable {
cl::sycl::nd_range<1> r{cl::sycl::range<1>{std::max(length, local)},
cl::sycl::range<1>{std::min(length, local)}};
/* Two accessors are used: one to the buffer that is being reduced,
* and a second to local memory, used to store intermediate data. */
auto aI =bufI.template get_access<cl::sycl::access::mode::read_write>(h);
- auto aOut =bufOut.template get_access<cl::sycl::access::mode::discard_write>(h);
+ auto aOut =bufOut.template get_access<cl::sycl::access::mode::write>(h);
typedef decltype(aI) InputAccessor;
typedef decltype(aOut) OutputAccessor;
typedef cl::sycl::accessor<CoeffReturnType, 1, cl::sycl::access::mode::read_write,cl::sycl::access::target::local> LocalAccessor;
@@ -43,7 +43,7 @@ static void run(OP op, BufferTOut& bufOut, BufferTIn& bufI, const Eigen::SyclDev
/* The parallel_for invocation chosen is the variant with an nd_item
* parameter, since the code requires barriers for correctness. */
- h.parallel_for(r, TensorSycl::internal::GenericKernelReducer<CoeffReturnType, OP, OutputAccessor, InputAccessor, LocalAccessor>(op, aOut, aI, scratch, length, local));
+ h.parallel_for(r, TensorSycl::internal::GenericKernelReducer<CoeffReturnType, OP, OutputAccessor, InputAccessor, LocalAccessor>(op, aOut, out_offset, aI, scratch, length, local));
};
dev.sycl_queue().submit(f);
dev.asynchronousExec();
@@ -60,9 +60,9 @@ static void run(OP op, BufferTOut& bufOut, BufferTIn& bufI, const Eigen::SyclDev
template<typename CoeffReturnType> struct syclGenericBufferReducer<Eigen::internal::MeanReducer<CoeffReturnType>, CoeffReturnType>{
template<typename BufferTOut, typename BufferTIn>
-static void run(Eigen::internal::MeanReducer<CoeffReturnType>, BufferTOut& bufOut, BufferTIn& bufI, const Eigen::SyclDevice& dev, size_t length, size_t local){
+static void run(Eigen::internal::MeanReducer<CoeffReturnType>, BufferTOut& bufOut,ptrdiff_t out_offset, BufferTIn& bufI, const Eigen::SyclDevice& dev, size_t length, size_t local){
syclGenericBufferReducer<Eigen::internal::SumReducer<CoeffReturnType>, CoeffReturnType>::run(Eigen::internal::SumReducer<CoeffReturnType>(),
- bufOut, bufI, dev, length, local);
+ bufOut, out_offset, bufI, dev, length, local);
}
};
@@ -106,7 +106,7 @@ struct FullReducer<Self, Op, const Eigen::SyclDevice, Vectorizable> {
/// if the shared memory is less than the GRange, we set shared_mem size to the TotalSize and in this case one kernel would be created for recursion to reduce all to one.
if (GRange < outTileSize) outTileSize=GRange;
/// creating the shared memory for calculating reduction.
- /// This one is used to collect all the reduced value of shared memory as we dont have global barrier on GPU. Once it is saved we can
+ /// This one is used to collect all the reduced value of shared memory as we don't have global barrier on GPU. Once it is saved we can
/// recursively apply reduction on it in order to reduce the whole.
auto temp_global_buffer =cl::sycl::buffer<CoeffReturnType, 1>(cl::sycl::range<1>(GRange));
typedef typename Eigen::internal::remove_all<decltype(self.xprDims())>::type Dims;
@@ -127,8 +127,9 @@ struct FullReducer<Self, Op, const Eigen::SyclDevice, Vectorizable> {
// getting final out buffer at the moment the created buffer is true because there is no need for assign
auto out_buffer =dev.get_sycl_buffer(output);
+ ptrdiff_t out_offset = dev.get_offset(output);
/// This is used to recursively reduce the tmp value to an element of 1;
- syclGenericBufferReducer<Op, CoeffReturnType>::run(reducer, out_buffer, temp_global_buffer,dev, GRange, outTileSize);
+ syclGenericBufferReducer<Op, CoeffReturnType>::run(reducer, out_buffer, out_offset, temp_global_buffer,dev, GRange, outTileSize);
}
};
@@ -149,7 +150,7 @@ struct InnerReducer<Self, Op, const Eigen::SyclDevice> {
// getting final out buffer at the moment the created buffer is true because there is no need for assign
/// creating the shared memory for calculating reduction.
- /// This one is used to collect all the reduced value of shared memory as we dont have global barrier on GPU. Once it is saved we can
+ /// This one is used to collect all the reduced value of shared memory as we don't have global barrier on GPU. Once it is saved we can
/// recursively apply reduction on it in order to reduce the whole.
dev.parallel_for_setup(num_coeffs_to_preserve, tileSize, range, GRange);
dev.sycl_queue().submit([&](cl::sycl::handler &cgh) {
@@ -157,11 +158,12 @@ struct InnerReducer<Self, Op, const Eigen::SyclDevice> {
typedef decltype(TensorSycl::internal::createTupleOfAccessors(cgh, self.impl())) Tuple_of_Acc;
// create a tuple of accessors from Evaluator
Tuple_of_Acc tuple_of_accessors = TensorSycl::internal::createTupleOfAccessors(cgh, self.impl());
- auto output_accessor = dev.template get_sycl_accessor<cl::sycl::access::mode::discard_write>(cgh, output);
+ auto output_accessor = dev.template get_sycl_accessor<cl::sycl::access::mode::write>(cgh, output);
+ ptrdiff_t out_offset = dev.get_offset(output);
Index red_size = (num_values_to_reduce!=0)? num_values_to_reduce : static_cast<Index>(1);
cgh.parallel_for( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)),
TensorSycl::internal::ReductionFunctor<HostExpr, FunctorExpr, Tuple_of_Acc, Dims, Op, typename Self::Index>
- (output_accessor, functors, tuple_of_accessors, self.xprDims(), reducer, range, red_size));
+ (output_accessor, out_offset, functors, tuple_of_accessors, self.xprDims(), reducer, range, red_size));
});
dev.asynchronousExec();
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorRef.h b/unsupported/Eigen/CXX11/src/Tensor/TensorRef.h
index 99245f778..6e15e75f9 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorRef.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorRef.h
@@ -31,7 +31,7 @@ class TensorLazyBaseEvaluator {
int refCount() const { return m_refcount; }
private:
- // No copy, no assigment;
+ // No copy, no assignment;
TensorLazyBaseEvaluator(const TensorLazyBaseEvaluator& other);
TensorLazyBaseEvaluator& operator = (const TensorLazyBaseEvaluator& other);
@@ -136,6 +136,8 @@ template<typename PlainObjectType> class TensorRef : public TensorBase<TensorRef
enum {
IsAligned = false,
PacketAccess = false,
+ BlockAccess = false,
+ PreferBlockAccess = false,
Layout = PlainObjectType::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
@@ -364,6 +366,8 @@ struct TensorEvaluator<const TensorRef<Derived>, Device>
enum {
IsAligned = false,
PacketAccess = false,
+ BlockAccess = false,
+ PreferBlockAccess = false,
Layout = TensorRef<Derived>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
@@ -411,6 +415,8 @@ struct TensorEvaluator<TensorRef<Derived>, Device> : public TensorEvaluator<cons
enum {
IsAligned = false,
PacketAccess = false,
+ BlockAccess = false,
+ PreferBlockAccess = false,
RawAccess = false
};
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReverse.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReverse.h
index e430b0826..b7fb969f3 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorReverse.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReverse.h
@@ -31,6 +31,7 @@ struct traits<TensorReverseOp<ReverseDimensions,
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
+ typedef typename XprTraits::PointerType PointerType;
};
template<typename ReverseDimensions, typename XprType>
@@ -107,11 +108,13 @@ struct TensorEvaluator<const TensorReverseOp<ReverseDimensions, ArgType>, Device
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
- static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+ static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
enum {
IsAligned = false,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ BlockAccess = false,
+ PreferBlockAccess = false,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
@@ -222,7 +225,7 @@ struct TensorEvaluator<const TensorReverseOp<ReverseDimensions, ArgType>, Device
TensorOpCost(0, 0, compute_cost, false /* vectorized */, PacketSize);
}
- EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
+ EIGEN_DEVICE_FUNC typename Eigen::internal::traits<XprType>::PointerType data() const { return NULL; }
/// required by sycl in order to extract the accessor
const TensorEvaluator<ArgType, Device> & impl() const { return m_impl; }
@@ -252,6 +255,8 @@ struct TensorEvaluator<TensorReverseOp<ReverseDimensions, ArgType>, Device>
enum {
IsAligned = false,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ BlockAccess = false,
+ PreferBlockAccess = false,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
@@ -263,7 +268,7 @@ struct TensorEvaluator<TensorReverseOp<ReverseDimensions, ArgType>, Device>
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
- static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+ static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Dimensions& dimensions() const { return this->m_dimensions; }
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorScan.h b/unsupported/Eigen/CXX11/src/Tensor/TensorScan.h
index 8501466ce..641366d9d 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorScan.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorScan.h
@@ -24,6 +24,7 @@ struct traits<TensorScanOp<Op, XprType> >
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
+ typedef typename XprTraits::PointerType PointerType;
};
template<typename Op, typename XprType>
@@ -94,8 +95,9 @@ struct TensorEvaluator<const TensorScanOp<Op, ArgType>, Device> {
enum {
IsAligned = false,
- PacketAccess = (internal::unpacket_traits<PacketReturnType>::size > 1),
+ PacketAccess = (PacketType<CoeffReturnType, Device>::size > 1),
BlockAccess = false,
+ PreferBlockAccess = false,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false,
RawAccess = true
@@ -175,7 +177,7 @@ struct TensorEvaluator<const TensorScanOp<Op, ArgType>, Device> {
return internal::ploadt<PacketReturnType, LoadMode>(m_output + index);
}
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType* data() const
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Eigen::internal::traits<XprType>::PointerType data() const
{
return m_output;
}
@@ -241,7 +243,7 @@ struct ScanLauncher {
}
};
-#if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
+#if defined(EIGEN_USE_GPU) && (defined(EIGEN_GPUCC))
// GPU implementation of scan
// TODO(ibab) This placeholder implementation performs multiple scans in
@@ -277,10 +279,11 @@ struct ScanLauncher<Self, Reducer, GpuDevice> {
Index total_size = internal::array_prod(self.dimensions());
Index num_blocks = (total_size / self.size() + 63) / 64;
Index block_size = 64;
- LAUNCH_CUDA_KERNEL((ScanKernel<Self, Reducer>), num_blocks, block_size, 0, self.device(), self, total_size, data);
+
+ LAUNCH_GPU_KERNEL((ScanKernel<Self, Reducer>), num_blocks, block_size, 0, self.device(), self, total_size, data);
}
};
-#endif // EIGEN_USE_GPU && __CUDACC__
+#endif // EIGEN_USE_GPU && (EIGEN_GPUCC)
} // end namespace Eigen
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorShuffling.h b/unsupported/Eigen/CXX11/src/Tensor/TensorShuffling.h
index edc9dd3f3..416948765 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorShuffling.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorShuffling.h
@@ -31,6 +31,7 @@ struct traits<TensorShufflingOp<Shuffle, XprType> > : public traits<XprType>
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
+ typedef typename XprTraits::PointerType PointerType;
};
template<typename Shuffle, typename XprType>
@@ -60,8 +61,8 @@ class TensorShufflingOp : public TensorBase<TensorShufflingOp<Shuffle, XprType>
typedef typename Eigen::internal::traits<TensorShufflingOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorShufflingOp>::Index Index;
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorShufflingOp(const XprType& expr, const Shuffle& shuffle)
- : m_xpr(expr), m_shuffle(shuffle) {}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorShufflingOp(const XprType& expr, const Shuffle& shfl)
+ : m_xpr(expr), m_shuffle(shfl) {}
EIGEN_DEVICE_FUNC
const Shuffle& shufflePermutation() const { return m_shuffle; }
@@ -99,6 +100,7 @@ class TensorShufflingOp : public TensorBase<TensorShufflingOp<Shuffle, XprType>
template<typename Shuffle, typename ArgType, typename Device>
struct TensorEvaluator<const TensorShufflingOp<Shuffle, ArgType>, Device>
{
+ typedef TensorEvaluator<const TensorShufflingOp<Shuffle, ArgType>, Device> Self;
typedef TensorShufflingOp<Shuffle, ArgType> XprType;
typedef typename XprType::Index Index;
static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
@@ -106,45 +108,65 @@ struct TensorEvaluator<const TensorShufflingOp<Shuffle, ArgType>, Device>
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
- static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+ static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
enum {
- IsAligned = false,
- PacketAccess = (internal::packet_traits<Scalar>::size > 1),
- Layout = TensorEvaluator<ArgType, Device>::Layout,
- CoordAccess = false, // to be implemented
- RawAccess = false
+ IsAligned = false,
+ PacketAccess = (PacketType<CoeffReturnType, Device>::size > 1),
+ BlockAccess = TensorEvaluator<ArgType, Device>::BlockAccess,
+ PreferBlockAccess = true,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ CoordAccess = false, // to be implemented
+ RawAccess = false
};
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
- : m_impl(op.expression(), device), m_shuffle(op.shufflePermutation())
+ typedef typename internal::remove_const<Scalar>::type ScalarNoConst;
+
+ typedef internal::TensorBlock<ScalarNoConst, Index, NumDims, Layout>
+ TensorBlock;
+ typedef internal::TensorBlockReader<ScalarNoConst, Index, NumDims, Layout>
+ TensorBlockReader;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op,
+ const Device& device)
+ : m_device(device),
+ m_impl(op.expression(), device),
+ m_shuffle(op.shufflePermutation())
{
const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
const Shuffle& shuffle = op.shufflePermutation();
+ m_is_identity = true;
for (int i = 0; i < NumDims; ++i) {
m_dimensions[i] = input_dims[shuffle[i]];
+ m_inverseShuffle[shuffle[i]] = i;
+ if (m_is_identity && shuffle[i] != i) {
+ m_is_identity = false;
+ }
}
- array<Index, NumDims> inputStrides;
-
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
- inputStrides[0] = 1;
+ m_unshuffledInputStrides[0] = 1;
m_outputStrides[0] = 1;
+
for (int i = 1; i < NumDims; ++i) {
- inputStrides[i] = inputStrides[i - 1] * input_dims[i - 1];
+ m_unshuffledInputStrides[i] =
+ m_unshuffledInputStrides[i - 1] * input_dims[i - 1];
m_outputStrides[i] = m_outputStrides[i - 1] * m_dimensions[i - 1];
+ m_fastOutputStrides[i] = internal::TensorIntDivisor<Index>(m_outputStrides[i]);
}
} else {
- inputStrides[NumDims - 1] = 1;
+ m_unshuffledInputStrides[NumDims - 1] = 1;
m_outputStrides[NumDims - 1] = 1;
for (int i = NumDims - 2; i >= 0; --i) {
- inputStrides[i] = inputStrides[i + 1] * input_dims[i + 1];
+ m_unshuffledInputStrides[i] =
+ m_unshuffledInputStrides[i + 1] * input_dims[i + 1];
m_outputStrides[i] = m_outputStrides[i + 1] * m_dimensions[i + 1];
+ m_fastOutputStrides[i] = internal::TensorIntDivisor<Index>(m_outputStrides[i]);
}
}
for (int i = 0; i < NumDims; ++i) {
- m_inputStrides[i] = inputStrides[shuffle[i]];
+ m_inputStrides[i] = m_unshuffledInputStrides[shuffle[i]];
}
}
@@ -160,32 +182,155 @@ struct TensorEvaluator<const TensorShufflingOp<Shuffle, ArgType>, Device>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
{
- return m_impl.coeff(srcCoeff(index));
+ if (m_is_identity) {
+ return m_impl.coeff(index);
+ } else {
+ return m_impl.coeff(srcCoeff(index));
+ }
}
+ template <int LoadMode, typename Self, bool ImplPacketAccess>
+ struct PacketLoader {
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ static PacketReturnType Run(const Self& self, Index index) {
+ EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
+ for (int i = 0; i < PacketSize; ++i) {
+ values[i] = self.coeff(index + i);
+ }
+ PacketReturnType rslt = internal::pload<PacketReturnType>(values);
+ return rslt;
+ }
+ };
+
+ template<int LoadMode, typename Self>
+ struct PacketLoader<LoadMode, Self, true> {
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ static PacketReturnType Run(const Self& self, Index index) {
+ if (self.m_is_identity) {
+ return self.m_impl.template packet<LoadMode>(index);
+ } else {
+ EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
+ for (int i = 0; i < PacketSize; ++i) {
+ values[i] = self.coeff(index + i);
+ }
+ PacketReturnType rslt = internal::pload<PacketReturnType>(values);
+ return rslt;
+ }
+ }
+ };
+
template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
{
EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
- eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
+ eigen_assert(index + PacketSize - 1 < dimensions().TotalSize());
+ return PacketLoader<LoadMode, Self, TensorEvaluator<ArgType, Device>::PacketAccess>::Run(*this, index);
+ }
- EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
- for (int i = 0; i < PacketSize; ++i) {
- values[i] = coeff(index+i);
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void getResourceRequirements(
+ std::vector<internal::TensorOpResourceRequirements>* resources) const {
+ Eigen::Index block_total_size_max = numext::maxi<Eigen::Index>(
+ 1, m_device.firstLevelCacheSize() / sizeof(Scalar));
+ resources->push_back(internal::TensorOpResourceRequirements(
+ internal::kUniformAllDims, block_total_size_max));
+ m_impl.getResourceRequirements(resources);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void block(
+ TensorBlock* output_block) const {
+ if (m_impl.data() != NULL) {
+ // Fast path: we have direct access to the data, so shuffle as we read.
+ TensorBlockReader::Run(output_block,
+ srcCoeff(output_block->first_coeff_index()),
+ m_inverseShuffle,
+ m_unshuffledInputStrides,
+ m_impl.data());
+ return;
+ }
+
+ // Slow path: read unshuffled block from the input and shuffle in-place.
+ // Initialize input block sizes using input-to-output shuffle map.
+ DSizes<Index, NumDims> input_block_sizes;
+ for (Index i = 0; i < NumDims; ++i) {
+ input_block_sizes[i] = output_block->block_sizes()[m_inverseShuffle[i]];
+ }
+
+ // Calculate input block strides.
+ DSizes<Index, NumDims> input_block_strides;
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ input_block_strides[0] = 1;
+ for (int i = 1; i < NumDims; ++i) {
+ input_block_strides[i] =
+ input_block_strides[i - 1] * input_block_sizes[i - 1];
+ }
+ } else {
+ input_block_strides[NumDims - 1] = 1;
+ for (int i = NumDims - 2; i >= 0; --i) {
+ input_block_strides[i] =
+ input_block_strides[i + 1] * input_block_sizes[i + 1];
+ }
+ }
+
+ // Read input block.
+ TensorBlock input_block(srcCoeff(output_block->first_coeff_index()),
+ input_block_sizes,
+ input_block_strides,
+ Dimensions(m_unshuffledInputStrides),
+ output_block->data());
+
+ m_impl.block(&input_block);
+
+ // Naive In-place shuffle: random IO but block size is O(L1 cache size).
+ // TODO(andydavis) Improve the performance of this in-place shuffle.
+ const Index total_size = input_block_sizes.TotalSize();
+ std::vector<bool> bitmap(total_size, false);
+ ScalarNoConst* data = const_cast<ScalarNoConst*>(output_block->data());
+ const DSizes<Index, NumDims>& output_block_strides =
+ output_block->block_strides();
+ for (Index input_index = 0; input_index < total_size; ++input_index) {
+ if (bitmap[input_index]) {
+ // Coefficient at this index has already been shuffled.
+ continue;
+ }
+
+ Index output_index = GetBlockOutputIndex(input_index, input_block_strides,
+ output_block_strides);
+ if (output_index == input_index) {
+ // Coefficient already in place.
+ bitmap[output_index] = true;
+ continue;
+ }
+
+ // The following loop starts at 'input_index', and shuffles
+ // coefficients into their shuffled location at 'output_index'.
+ // It skips through the array shuffling coefficients by following
+ // the shuffle cycle starting and ending a 'start_index'.
+ ScalarNoConst evicted_value;
+ ScalarNoConst shuffled_value = data[input_index];
+ do {
+ evicted_value = data[output_index];
+ data[output_index] = shuffled_value;
+ shuffled_value = evicted_value;
+ bitmap[output_index] = true;
+ output_index = GetBlockOutputIndex(output_index, input_block_strides,
+ output_block_strides);
+ } while (output_index != input_index);
+
+ data[output_index] = shuffled_value;
+ bitmap[output_index] = true;
}
- PacketReturnType rslt = internal::pload<PacketReturnType>(values);
- return rslt;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
- const double compute_cost = NumDims * (2 * TensorOpCost::AddCost<Index>() +
+ const double compute_cost = m_is_identity ? TensorOpCost::AddCost<Index>() :
+ NumDims * (2 * TensorOpCost::AddCost<Index>() +
2 * TensorOpCost::MulCost<Index>() +
TensorOpCost::DivCost<Index>());
return m_impl.costPerCoeff(vectorized) +
- TensorOpCost(0, 0, compute_cost, false /* vectorized */, PacketSize);
+ TensorOpCost(0, 0, compute_cost, m_is_identity /* vectorized */, PacketSize);
}
- EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
+ EIGEN_DEVICE_FUNC typename Eigen::internal::traits<XprType>::PointerType data() const { return NULL; }
// required by sycl
EIGEN_STRONG_INLINE const Shuffle& shufflePermutation() const {return m_shuffle;}
@@ -193,27 +338,58 @@ struct TensorEvaluator<const TensorShufflingOp<Shuffle, ArgType>, Device>
EIGEN_STRONG_INLINE const TensorEvaluator<ArgType, Device>& impl() const {return m_impl;}
protected:
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index GetBlockOutputIndex(
+ Index input_index,
+ const DSizes<Index, NumDims>& input_block_strides,
+ const DSizes<Index, NumDims>& output_block_strides) const {
+ Index output_index = 0;
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int i = NumDims - 1; i > 0; --i) {
+ const Index idx = input_index / input_block_strides[i];
+ output_index += idx * output_block_strides[m_inverseShuffle[i]];
+ input_index -= idx * input_block_strides[i];
+ }
+ return output_index + input_index *
+ output_block_strides[m_inverseShuffle[0]];
+ } else {
+ for (int i = 0; i < NumDims - 1; ++i) {
+ const Index idx = input_index / input_block_strides[i];
+ output_index += idx * output_block_strides[m_inverseShuffle[i]];
+ input_index -= idx * input_block_strides[i];
+ }
+ return output_index + input_index *
+ output_block_strides[m_inverseShuffle[NumDims - 1]];
+ }
+ }
+
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const {
Index inputIndex = 0;
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
for (int i = NumDims - 1; i > 0; --i) {
- const Index idx = index / m_outputStrides[i];
+ const Index idx = index / m_fastOutputStrides[i];
inputIndex += idx * m_inputStrides[i];
index -= idx * m_outputStrides[i];
}
return inputIndex + index * m_inputStrides[0];
} else {
for (int i = 0; i < NumDims - 1; ++i) {
- const Index idx = index / m_outputStrides[i];
+ const Index idx = index / m_fastOutputStrides[i];
inputIndex += idx * m_inputStrides[i];
index -= idx * m_outputStrides[i];
}
return inputIndex + index * m_inputStrides[NumDims - 1];
}
}
+
Dimensions m_dimensions;
+ bool m_is_identity;
+ array<Index, NumDims> m_inverseShuffle;
array<Index, NumDims> m_outputStrides;
+ array<internal::TensorIntDivisor<Index>, NumDims> m_fastOutputStrides;
array<Index, NumDims> m_inputStrides;
+ array<Index, NumDims> m_unshuffledInputStrides;
+
+ const Device& m_device;
TensorEvaluator<ArgType, Device> m_impl;
/// required by sycl
Shuffle m_shuffle;
@@ -234,14 +410,24 @@ struct TensorEvaluator<TensorShufflingOp<Shuffle, ArgType>, Device>
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
- static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+ static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
enum {
- IsAligned = false,
- PacketAccess = (internal::packet_traits<Scalar>::size > 1),
- RawAccess = false
+ IsAligned = false,
+ PacketAccess = (PacketType<CoeffReturnType, Device>::size > 1),
+ BlockAccess = TensorEvaluator<ArgType, Device>::BlockAccess,
+ PreferBlockAccess = true,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ RawAccess = false
};
+ typedef typename internal::remove_const<Scalar>::type ScalarNoConst;
+
+ typedef internal::TensorBlock<ScalarNoConst, Index, NumDims, Layout>
+ TensorBlock;
+ typedef internal::TensorBlockWriter<ScalarNoConst, Index, NumDims, Layout>
+ TensorBlockWriter;
+
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: Base(op, device)
{ }
@@ -262,6 +448,14 @@ struct TensorEvaluator<TensorShufflingOp<Shuffle, ArgType>, Device>
this->coeffRef(index+i) = values[i];
}
}
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writeBlock(
+ const TensorBlock& block) {
+ eigen_assert(this->m_impl.data() != NULL);
+ TensorBlockWriter::Run(block, this->srcCoeff(block.first_coeff_index()),
+ this->m_inverseShuffle,
+ this->m_unshuffledInputStrides, this->m_impl.data());
+ }
};
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h b/unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h
index 2854a4a17..e6a666f78 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h
@@ -31,12 +31,12 @@ namespace Eigen {
*
* \sa Tensor
*/
-template<typename T, typename Dimensions, int Options_> class TensorStorage;
+template<typename T, typename Dimensions, int Options> class TensorStorage;
// Pure fixed-size storage
-template<typename T, int Options_, typename FixedDimensions>
-class TensorStorage<T, FixedDimensions, Options_>
+template<typename T, typename FixedDimensions, int Options_>
+class TensorStorage
{
private:
static const std::size_t Size = FixedDimensions::total_size;
@@ -66,7 +66,7 @@ class TensorStorage<T, FixedDimensions, Options_>
// pure dynamic
-template<typename T, int Options_, typename IndexType, int NumIndices_>
+template<typename T, typename IndexType, int NumIndices_, int Options_>
class TensorStorage<T, DSizes<IndexType, NumIndices_>, Options_>
{
public:
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorStriding.h b/unsupported/Eigen/CXX11/src/Tensor/TensorStriding.h
index 2237140e7..221dc96c9 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorStriding.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorStriding.h
@@ -31,6 +31,7 @@ struct traits<TensorStridingOp<Strides, XprType> > : public traits<XprType>
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
+ typedef typename XprTraits::PointerType PointerType;
};
template<typename Strides, typename XprType>
@@ -106,11 +107,13 @@ struct TensorEvaluator<const TensorStridingOp<Strides, ArgType>, Device>
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
- static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+ static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
enum {
IsAligned = /*TensorEvaluator<ArgType, Device>::IsAligned*/false,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ BlockAccess = false,
+ PreferBlockAccess = false,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
@@ -222,7 +225,7 @@ struct TensorEvaluator<const TensorStridingOp<Strides, ArgType>, Device>
TensorOpCost(0, 0, compute_cost, vectorized, PacketSize);
}
- EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
+ EIGEN_DEVICE_FUNC typename Eigen::internal::traits<XprType>::PointerType data() const { return NULL; }
/// required by sycl in order to extract the accessor
const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
@@ -272,6 +275,8 @@ struct TensorEvaluator<TensorStridingOp<Strides, ArgType>, Device>
enum {
IsAligned = /*TensorEvaluator<ArgType, Device>::IsAligned*/false,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ BlockAccess = false,
+ PreferBlockAccess = false,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
@@ -284,7 +289,7 @@ struct TensorEvaluator<TensorStridingOp<Strides, ArgType>, Device>
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
- static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+ static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)
{
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSycl.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSycl.h
index 9d5a6d4c1..7b8bd2df7 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSycl.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSycl.h
@@ -32,12 +32,28 @@ struct MakeLocalPointer {
namespace Eigen {
-namespace TensorSycl {
+ template<typename StrideDims, typename XprType> class TensorTupleReducerDeviceOp;
+ template<typename StrideDims, typename ArgType> struct TensorEvaluator<const TensorTupleReducerDeviceOp<StrideDims, ArgType>, SyclKernelDevice>;
namespace internal {
- template<typename CoeffReturnType, typename OP, typename OutputAccessor, typename InputAccessor, typename LocalAccessor> struct GenericKernelReducer;
+#ifdef __SYCL_DEVICE_ONLY__
+template<typename A, typename B> struct TypeConversion {
+ template<typename T>
+ static typename MakeGlobalPointer<A>::Type get_address_space_pointer(typename MakeGlobalPointer<T>::Type p);
+ template<typename T>
+ static typename MakeLocalPointer<A>::Type get_address_space_pointer(typename MakeLocalPointer<T>::Type p);
+
+ template<typename T>
+ static A* get_address_space_pointer(T* p);
+ typedef decltype(get_address_space_pointer(B())) type;
+};
+#endif
+}
+namespace TensorSycl {
+namespace internal {
+ template<typename CoeffReturnType, typename OP, typename OutputAccessor, typename InputAccessor, typename LocalAccessor> struct GenericKernelReducer;
/// This struct is used for special expression nodes with no operations (for example assign and selectOP).
struct NoOP;
@@ -48,6 +64,13 @@ template<typename T> struct GetType<false, T>{
typedef T Type;
};
+template <bool Conds, size_t X , size_t Y > struct ValueCondition {
+ static constexpr size_t Res =X;
+};
+template<size_t X, size_t Y> struct ValueCondition<false, X, Y> {
+ static constexpr size_t Res =Y;
+};
+
}
}
}
@@ -80,6 +103,9 @@ template<typename T> struct GetType<false, T>{
/// this is used for extracting tensor reduction
#include "TensorReductionSycl.h"
+// TensorArgMaxSycl.h
+#include "TensorArgMaxSycl.h"
+
/// this is used for extracting tensor convolution
#include "TensorConvolutionSycl.h"
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclConvertToDeviceExpression.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclConvertToDeviceExpression.h
index ee8f3c9c2..d6ac7b91f 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclConvertToDeviceExpression.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclConvertToDeviceExpression.h
@@ -91,27 +91,37 @@ ASSIGNCONVERT(, false)
#undef ASSIGNCONVERT
/// specialisation of the \ref ConvertToDeviceExpression struct when the node
-/// type is either TensorForcedEvalOp or TensorEvalToOp
+/// type is TensorEvalToOp
#define KERNELBROKERCONVERT(CVQual, Res, ExprNode)\
template <typename Expr>\
struct ConvertToDeviceExpression<CVQual ExprNode<Expr> > \
: DeviceConvertor<ExprNode, Res, Expr>{};
-/// specialisation of the \ref ConvertToDeviceExpression struct when the node type is TensorForcedEvalOp
-#define KERNELBROKERCONVERTFORCEDEVAL(CVQual)\
+
+KERNELBROKERCONVERT(const, true, TensorEvalToOp)
+KERNELBROKERCONVERT(, false, TensorEvalToOp)
+#undef KERNELBROKERCONVERT
+
+/// specialisation of the \ref ConvertToDeviceExpression struct when the node types are TensorForcedEvalOp and TensorLayoutSwapOp
+#define KERNELBROKERCONVERTFORCEDEVALLAYOUTSWAPINDEXTUPLEOP(CVQual, ExprNode)\
template <typename Expr>\
-struct ConvertToDeviceExpression<CVQual TensorForcedEvalOp<Expr> > {\
- typedef CVQual TensorForcedEvalOp< typename ConvertToDeviceExpression<Expr>::Type> Type;\
+struct ConvertToDeviceExpression<CVQual ExprNode<Expr> > {\
+ typedef CVQual ExprNode< typename ConvertToDeviceExpression<Expr>::Type> Type;\
};
-KERNELBROKERCONVERTFORCEDEVAL(const)
-KERNELBROKERCONVERTFORCEDEVAL()
-#undef KERNELBROKERCONVERTFORCEDEVAL
+// TensorForcedEvalOp
+KERNELBROKERCONVERTFORCEDEVALLAYOUTSWAPINDEXTUPLEOP(const,TensorForcedEvalOp)
+KERNELBROKERCONVERTFORCEDEVALLAYOUTSWAPINDEXTUPLEOP(,TensorForcedEvalOp)
-KERNELBROKERCONVERT(const, true, TensorEvalToOp)
-KERNELBROKERCONVERT(, false, TensorEvalToOp)
-#undef KERNELBROKERCONVERT
+// TensorLayoutSwapOp
+KERNELBROKERCONVERTFORCEDEVALLAYOUTSWAPINDEXTUPLEOP(const,TensorLayoutSwapOp)
+KERNELBROKERCONVERTFORCEDEVALLAYOUTSWAPINDEXTUPLEOP(,TensorLayoutSwapOp)
+
+//TensorIndexTupleOp
+KERNELBROKERCONVERTFORCEDEVALLAYOUTSWAPINDEXTUPLEOP(const,TensorIndexTupleOp)
+KERNELBROKERCONVERTFORCEDEVALLAYOUTSWAPINDEXTUPLEOP(,TensorIndexTupleOp)
+#undef KERNELBROKERCONVERTFORCEDEVALLAYOUTSWAPINDEXTUPLEOP
/// specialisation of the \ref ConvertToDeviceExpression struct when the node type is TensorReductionOp
#define KERNELBROKERCONVERTREDUCTION(CVQual)\
@@ -124,6 +134,18 @@ KERNELBROKERCONVERTREDUCTION(const)
KERNELBROKERCONVERTREDUCTION()
#undef KERNELBROKERCONVERTREDUCTION
+/// specialisation of the \ref ConvertToDeviceExpression struct when the node type is TensorReductionOp
+#define KERNELBROKERCONVERTTUPLEREDUCTION(CVQual)\
+template <typename OP, typename Dim, typename subExpr>\
+struct ConvertToDeviceExpression<CVQual TensorTupleReducerOp<OP, Dim, subExpr> > {\
+ typedef CVQual TensorTupleReducerOp<OP, Dim, typename ConvertToDeviceExpression<subExpr>::Type> Type;\
+};
+
+KERNELBROKERCONVERTTUPLEREDUCTION(const)
+KERNELBROKERCONVERTTUPLEREDUCTION()
+#undef KERNELBROKERCONVERTTUPLEREDUCTION
+
+//TensorSlicingOp
#define KERNELBROKERCONVERTSLICEOP(CVQual)\
template<typename StartIndices, typename Sizes, typename XprType>\
struct ConvertToDeviceExpression<CVQual TensorSlicingOp <StartIndices, Sizes, XprType> >{\
@@ -134,7 +156,7 @@ KERNELBROKERCONVERTSLICEOP(const)
KERNELBROKERCONVERTSLICEOP()
#undef KERNELBROKERCONVERTSLICEOP
-
+//TensorStridingSlicingOp
#define KERNELBROKERCONVERTERSLICESTRIDEOP(CVQual)\
template<typename StartIndices, typename StopIndices, typename Strides, typename XprType>\
struct ConvertToDeviceExpression<CVQual TensorStridingSlicingOp<StartIndices, StopIndices, Strides, XprType> >{\
@@ -145,7 +167,6 @@ KERNELBROKERCONVERTERSLICESTRIDEOP(const)
KERNELBROKERCONVERTERSLICESTRIDEOP()
#undef KERNELBROKERCONVERTERSLICESTRIDEOP
-
/// specialisation of the \ref ConvertToDeviceExpression struct when the node type is TensorChippingOp
#define KERNELBROKERCONVERTCHIPPINGOP(CVQual)\
template <DenseIndex DimId, typename Expr>\
@@ -156,7 +177,26 @@ KERNELBROKERCONVERTCHIPPINGOP(const)
KERNELBROKERCONVERTCHIPPINGOP()
#undef KERNELBROKERCONVERTCHIPPINGOP
+/// specialisation of the \ref ConvertToDeviceExpression struct when the node type is TensorImagePatchOp
+#define KERNELBROKERCONVERTIMAGEPATCHOP(CVQual)\
+template<DenseIndex Rows, DenseIndex Cols, typename XprType>\
+struct ConvertToDeviceExpression<CVQual TensorImagePatchOp<Rows, Cols, XprType> >{\
+ typedef CVQual TensorImagePatchOp<Rows, Cols, typename ConvertToDeviceExpression<XprType>::Type> Type;\
+};
+KERNELBROKERCONVERTIMAGEPATCHOP(const)
+KERNELBROKERCONVERTIMAGEPATCHOP()
+#undef KERNELBROKERCONVERTIMAGEPATCHOP
+
+/// specialisation of the \ref ConvertToDeviceExpression struct when the node type is TensorVolumePatchOp
+#define KERNELBROKERCONVERTVOLUMEPATCHOP(CVQual)\
+template<DenseIndex Plannes, DenseIndex Rows, DenseIndex Cols, typename XprType>\
+struct ConvertToDeviceExpression<CVQual TensorVolumePatchOp<Plannes, Rows, Cols, XprType> >{\
+ typedef CVQual TensorVolumePatchOp<Plannes, Rows, Cols, typename ConvertToDeviceExpression<XprType>::Type> Type;\
+};
+KERNELBROKERCONVERTVOLUMEPATCHOP(const)
+KERNELBROKERCONVERTVOLUMEPATCHOP()
+#undef KERNELBROKERCONVERTVOLUMEPATCHOP
} // namespace internal
} // namespace TensorSycl
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h
index 3b83b1d2c..cbae4ea1d 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h
@@ -65,7 +65,6 @@ CVQual PlaceHolder<CVQual TensorMap<T, Options_, MakePointer_>, N>, Params...>{\
: expr(Type(ConvertToActualTypeSycl(typename Type::Scalar, utility::tuple::get<N>(t)), fd.dimensions())){}\
};
-
TENSORMAP(const)
TENSORMAP()
#undef TENSORMAP
@@ -83,6 +82,7 @@ CVQual PlaceHolder<CVQual TensorMap<TensorFixedSize<Scalar_, Dimensions_, Option
ExprConstructor(FuncDetector &, const utility::tuple::Tuple<Params...> &t)\
: expr(DeviceFixedSizeTensor<Type,Dimensions_>::instantiate(utility::tuple::get<N>(t))){}\
};
+
TENSORMAPFIXEDSIZE(const)
TENSORMAPFIXEDSIZE()
#undef TENSORMAPFIXEDSIZE
@@ -189,9 +189,6 @@ struct ExprConstructor<CVQual TensorAssignOp<OrigLHSExpr, OrigRHSExpr>, CVQual
ASSIGN()
#undef ASSIGN
-
-
-
/// specialisation of the \ref ExprConstructor struct when the node type is
/// const TensorAssignOp
#define CONVERSIONEXPRCONST(CVQual)\
@@ -223,7 +220,7 @@ struct ExprConstructor<CVQual TensorEvalToOp<OrigExpr, MakeGlobalPointer>, CVQua
Type expr;\
template <typename FuncDetector>\
ExprConstructor(FuncDetector &funcD, const utility::tuple::Tuple<Params...> &t)\
- : nestedExpression(funcD.rhsExpr, t), buffer(t), expr(buffer.expr, nestedExpression.expr) {}\
+ : nestedExpression(funcD.xprExpr, t), buffer(t), expr(buffer.expr, nestedExpression.expr) {}\
};
EVALTO(const)
@@ -236,8 +233,12 @@ EVALTO()
template <typename OrigExpr, typename DevExpr, size_t N, typename... Params>\
struct ExprConstructor<CVQual TensorForcedEvalOp<OrigExpr>,\
CVQual PlaceHolder<CVQual TensorForcedEvalOp<DevExpr>, N>, Params...> {\
- typedef CVQual TensorMap<Tensor<typename TensorForcedEvalOp<DevExpr>::Scalar,\
- TensorForcedEvalOp<DevExpr>::NumDimensions, Eigen::internal::traits<TensorForcedEvalOp<DevExpr>>::Layout, typename TensorForcedEvalOp<DevExpr>::Index>, Eigen::internal::traits<TensorForcedEvalOp<DevExpr>>::Layout, MakeGlobalPointer> Type;\
+ typedef TensorForcedEvalOp<OrigExpr> XprType;\
+ typedef CVQual TensorMap<\
+ Tensor<typename XprType::Scalar,XprType::NumDimensions, Eigen::internal::traits<XprType>::Layout,typename XprType::Index>,\
+ Eigen::internal::traits<XprType>::Layout, \
+ MakeGlobalPointer\
+ > Type;\
Type expr;\
template <typename FuncDetector>\
ExprConstructor(FuncDetector &fd, const utility::tuple::Tuple<Params...> &t)\
@@ -248,19 +249,32 @@ FORCEDEVAL(const)
FORCEDEVAL()
#undef FORCEDEVAL
-template <bool Conds, size_t X , size_t Y > struct ValueCondition {
- static const size_t Res =X;
-};
-template<size_t X, size_t Y> struct ValueCondition<false, X , Y> {
- static const size_t Res =Y;
+#define TENSORCUSTOMUNARYOP(CVQual)\
+template <typename CustomUnaryFunc, typename OrigExpr, typename DevExpr, size_t N, typename... Params>\
+struct ExprConstructor<CVQual TensorCustomUnaryOp<CustomUnaryFunc, OrigExpr>,\
+CVQual PlaceHolder<CVQual TensorCustomUnaryOp<CustomUnaryFunc, DevExpr>, N>, Params...> {\
+ typedef TensorCustomUnaryOp<CustomUnaryFunc, OrigExpr> XprType;\
+ typedef CVQual TensorMap<\
+ Tensor<typename XprType::Scalar,XprType::NumDimensions, Eigen::internal::traits<XprType>::Layout,typename XprType::Index>,\
+ Eigen::internal::traits<XprType>::Layout, \
+ MakeGlobalPointer\
+ > Type;\
+ Type expr;\
+ template <typename FuncDetector>\
+ ExprConstructor(FuncDetector &fd, const utility::tuple::Tuple<Params...> &t)\
+ : expr(Type(ConvertToActualTypeSycl(typename Type::Scalar, utility::tuple::get<N>(t)), fd.dimensions())) {}\
};
+TENSORCUSTOMUNARYOP(const)
+TENSORCUSTOMUNARYOP()
+#undef TENSORCUSTOMUNARYOP
+
/// specialisation of the \ref ExprConstructor struct when the node type is TensorReductionOp
#define SYCLREDUCTIONEXPR(CVQual)\
template <typename OP, typename Dim, typename OrigExpr, typename DevExpr, size_t N, typename... Params>\
struct ExprConstructor<CVQual TensorReductionOp<OP, Dim, OrigExpr, MakeGlobalPointer>,\
CVQual PlaceHolder<CVQual TensorReductionOp<OP, Dim, DevExpr>, N>, Params...> {\
- static const size_t NumIndices= ValueCondition< TensorReductionOp<OP, Dim, DevExpr, MakeGlobalPointer>::NumDimensions==0, 1, TensorReductionOp<OP, Dim, DevExpr, MakeGlobalPointer>::NumDimensions >::Res;\
+ static const auto NumIndices= ValueCondition< TensorReductionOp<OP, Dim, DevExpr, MakeGlobalPointer>::NumDimensions==0, 1, TensorReductionOp<OP, Dim, DevExpr, MakeGlobalPointer>::NumDimensions >::Res;\
typedef CVQual TensorMap<Tensor<typename TensorReductionOp<OP, Dim, DevExpr, MakeGlobalPointer>::Scalar,\
NumIndices, Eigen::internal::traits<TensorReductionOp<OP, Dim, DevExpr, MakeGlobalPointer>>::Layout, typename TensorReductionOp<OP, Dim, DevExpr>::Index>, Eigen::internal::traits<TensorReductionOp<OP, Dim, DevExpr, MakeGlobalPointer>>::Layout, MakeGlobalPointer> Type;\
Type expr;\
@@ -273,32 +287,67 @@ SYCLREDUCTIONEXPR(const)
SYCLREDUCTIONEXPR()
#undef SYCLREDUCTIONEXPR
+/// specialisation of the \ref ExprConstructor struct when the node type is TensorTupleReducerOp
+/// use reductionOp instead of the TensorTupleReducerOp in order to build the tensor map. Because the tensorMap is the output of Tensor ReductionOP.
+#define SYCLTUPLEREDUCTIONEXPR(CVQual)\
+template <typename OP, typename Dim, typename OrigExpr, typename DevExpr, size_t N, typename... Params>\
+struct ExprConstructor<CVQual TensorTupleReducerOp<OP, Dim, OrigExpr>,\
+CVQual PlaceHolder<CVQual TensorTupleReducerOp<OP, Dim, DevExpr>, N>, Params...> {\
+ static const auto NumRedDims= TensorReductionOp<OP, Dim, const TensorIndexTupleOp<OrigExpr> , MakeGlobalPointer>::NumDimensions;\
+ static const auto NumIndices= ValueCondition<NumRedDims==0, 1, NumRedDims>::Res;\
+static const int Layout =static_cast<int>(Eigen::internal::traits<TensorReductionOp<OP, Dim, const TensorIndexTupleOp<OrigExpr>, MakeGlobalPointer>>::Layout);\
+ typedef CVQual TensorMap<\
+ Tensor<typename TensorIndexTupleOp<OrigExpr>::CoeffReturnType,NumIndices, Layout, typename TensorTupleReducerOp<OP, Dim, OrigExpr>::Index>,\
+ Layout,\
+ MakeGlobalPointer\
+ > XprType;\
+ typedef typename TensorEvaluator<const TensorIndexTupleOp<OrigExpr> , SyclKernelDevice>::Dimensions InputDimensions;\
+ static const int NumDims = Eigen::internal::array_size<InputDimensions>::value;\
+ typedef array<Index, NumDims> StrideDims;\
+ typedef const TensorTupleReducerDeviceOp<StrideDims, XprType> Type;\
+ Type expr;\
+ template <typename FuncDetector>\
+ ExprConstructor(FuncDetector &fd, const utility::tuple::Tuple<Params...> &t)\
+ :expr(Type(XprType(ConvertToActualTypeSycl(typename XprType::CoeffReturnType, utility::tuple::get<N>(t)), fd.dimensions()),\
+ fd.return_dim(), fd.strides(), fd.stride_mod(), fd.stride_div())) {\
+ }\
+};
+
+SYCLTUPLEREDUCTIONEXPR(const)
+SYCLTUPLEREDUCTIONEXPR()
+#undef SYCLTUPLEREDUCTIONEXPR
/// specialisation of the \ref ExprConstructor struct when the node type is
-/// TensorContractionOp
-#define SYCLCONTRACTIONCONVOLUTION(CVQual, ExprNode)\
+/// TensorContractionOp, TensorConvolutionOp TensorCustomBinaryOp
+#define SYCLCONTRACTCONVCUSBIOPS(CVQual, ExprNode)\
template <typename Indices, typename OrigLhsXprType, typename OrigRhsXprType, typename LhsXprType, typename RhsXprType, size_t N, typename... Params>\
struct ExprConstructor<CVQual ExprNode<Indices, OrigLhsXprType, OrigRhsXprType>,\
CVQual PlaceHolder<CVQual ExprNode<Indices, LhsXprType, RhsXprType>, N>, Params...> {\
- static const size_t NumIndices= Eigen::internal::traits<ExprNode<Indices, OrigLhsXprType, OrigRhsXprType> >::NumDimensions;\
- typedef CVQual TensorMap<Tensor<typename ExprNode<Indices, OrigLhsXprType, OrigRhsXprType>::Scalar,\
- NumIndices, Eigen::internal::traits<ExprNode<Indices, OrigRhsXprType, OrigRhsXprType> >::Layout,\
- typename ExprNode<Indices, OrigRhsXprType, OrigRhsXprType>::Index>,\
- Eigen::internal::traits<ExprNode<Indices, OrigRhsXprType, OrigRhsXprType>>::Layout, MakeGlobalPointer> Type;\
+ typedef ExprNode<Indices, OrigLhsXprType, OrigRhsXprType> XprTyp;\
+ static const auto NumIndices= Eigen::internal::traits<XprTyp>::NumDimensions;\
+ typedef CVQual TensorMap<\
+ Tensor<typename XprTyp::Scalar,NumIndices, Eigen::internal::traits<XprTyp>::Layout, typename XprTyp::Index>,\
+ Eigen::internal::traits<XprTyp>::Layout, \
+ MakeGlobalPointer\
+ > Type;\
Type expr;\
template <typename FuncDetector>\
ExprConstructor(FuncDetector &fd, const utility::tuple::Tuple<Params...> &t)\
:expr(Type(ConvertToActualTypeSycl(typename Type::Scalar, utility::tuple::get<N>(t)), fd.dimensions())) {}\
};
-SYCLCONTRACTIONCONVOLUTION(const, TensorContractionOp)
-SYCLCONTRACTIONCONVOLUTION(, TensorContractionOp)
-SYCLCONTRACTIONCONVOLUTION(const, TensorConvolutionOp)
-SYCLCONTRACTIONCONVOLUTION(, TensorConvolutionOp)
-#undef SYCLCONTRACTIONCONVOLUTION
-
-
-
+//TensorContractionOp
+SYCLCONTRACTCONVCUSBIOPS(const, TensorContractionOp)
+SYCLCONTRACTCONVCUSBIOPS(, TensorContractionOp)
+//TensorConvolutionOp
+SYCLCONTRACTCONVCUSBIOPS(const, TensorConvolutionOp)
+SYCLCONTRACTCONVCUSBIOPS(, TensorConvolutionOp)
+//TensorCustomBinaryOp
+SYCLCONTRACTCONVCUSBIOPS(const, TensorCustomBinaryOp)
+SYCLCONTRACTCONVCUSBIOPS(, TensorCustomBinaryOp)
+#undef SYCLCONTRACTCONVCUSBIOPS
+
+//TensorSlicingOp
#define SYCLSLICEOPEXPR(CVQual)\
template<typename StartIndices, typename Sizes, typename OrigXprType, typename XprType, typename... Params>\
struct ExprConstructor<CVQual TensorSlicingOp <StartIndices, Sizes, OrigXprType> , CVQual TensorSlicingOp<StartIndices, Sizes, XprType>, Params... >{\
@@ -315,7 +364,7 @@ SYCLSLICEOPEXPR(const)
SYCLSLICEOPEXPR()
#undef SYCLSLICEOPEXPR
-
+//TensorStridingSlicingOp
#define SYCLSLICESTRIDEOPEXPR(CVQual)\
template<typename StartIndices, typename StopIndices, typename Strides, typename OrigXprType, typename XprType, typename... Params>\
struct ExprConstructor<CVQual TensorStridingSlicingOp<StartIndices, StopIndices, Strides, OrigXprType>, CVQual TensorStridingSlicingOp<StartIndices, StopIndices, Strides, XprType>, Params... >{\
@@ -332,6 +381,7 @@ SYCLSLICESTRIDEOPEXPR(const)
SYCLSLICESTRIDEOPEXPR()
#undef SYCLSLICESTRIDEOPEXPR
+//TensorReshapingOp and TensorShufflingOp
#define SYCLRESHAPEANDSHUFFLEOPEXPRCONST(OPEXPR, CVQual)\
template<typename Param, typename OrigXprType, typename XprType, typename... Params>\
struct ExprConstructor<CVQual OPEXPR <Param, OrigXprType> , CVQual OPEXPR <Param, XprType>, Params... >{\
@@ -344,13 +394,15 @@ struct ExprConstructor<CVQual OPEXPR <Param, OrigXprType> , CVQual OPEXPR <Param
: xprExpr(funcD.xprExpr, t), expr(xprExpr.expr, funcD.param()) {}\
};
+// TensorReshapingOp
SYCLRESHAPEANDSHUFFLEOPEXPRCONST(TensorReshapingOp, const)
SYCLRESHAPEANDSHUFFLEOPEXPRCONST(TensorReshapingOp, )
-
+// TensorShufflingOp
SYCLRESHAPEANDSHUFFLEOPEXPRCONST(TensorShufflingOp, const)
SYCLRESHAPEANDSHUFFLEOPEXPRCONST(TensorShufflingOp, )
#undef SYCLRESHAPEANDSHUFFLEOPEXPRCONST
+//TensorPaddingOp
#define SYCLPADDINGOPEXPRCONST(OPEXPR, CVQual)\
template<typename Param, typename OrigXprType, typename XprType, typename... Params>\
struct ExprConstructor<CVQual OPEXPR <Param, OrigXprType> , CVQual OPEXPR <Param, XprType>, Params... >{\
@@ -363,11 +415,11 @@ struct ExprConstructor<CVQual OPEXPR <Param, OrigXprType> , CVQual OPEXPR <Param
: xprExpr(funcD.xprExpr, t), expr(xprExpr.expr, funcD.param() , funcD.scalar_param()) {}\
};
+//TensorPaddingOp
SYCLPADDINGOPEXPRCONST(TensorPaddingOp, const)
SYCLPADDINGOPEXPRCONST(TensorPaddingOp, )
#undef SYCLPADDINGOPEXPRCONST
-
// TensorChippingOp
#define SYCLTENSORCHIPPINGOPEXPR(CVQual)\
template<DenseIndex DimId, typename OrigXprType, typename XprType, typename... Params>\
@@ -385,6 +437,67 @@ SYCLTENSORCHIPPINGOPEXPR(const)
SYCLTENSORCHIPPINGOPEXPR()
#undef SYCLTENSORCHIPPINGOPEXPR
+// TensorImagePatchOp
+#define SYCLTENSORIMAGEPATCHOPEXPR(CVQual)\
+template<DenseIndex Rows, DenseIndex Cols, typename OrigXprType, typename XprType, typename... Params>\
+struct ExprConstructor<CVQual TensorImagePatchOp<Rows, Cols, OrigXprType>, CVQual TensorImagePatchOp<Rows, Cols, XprType>, Params... > {\
+ typedef ExprConstructor<OrigXprType, XprType, Params...> my_xpr_type;\
+ typedef CVQual TensorImagePatchOp<Rows, Cols, typename my_xpr_type::Type> Type;\
+ my_xpr_type xprExpr;\
+ Type expr;\
+ template <typename FuncDetector>\
+ ExprConstructor(FuncDetector &funcD, const utility::tuple::Tuple<Params...> &t)\
+ : xprExpr(funcD.xprExpr, t), expr(xprExpr.expr, funcD.m_patch_rows, funcD.m_patch_cols, funcD.m_row_strides, funcD.m_col_strides,\
+ funcD.m_in_row_strides, funcD.m_in_col_strides, funcD.m_row_inflate_strides, funcD.m_col_inflate_strides, funcD.m_padding_explicit, \
+ funcD.m_padding_top, funcD.m_padding_bottom, funcD.m_padding_left, funcD.m_padding_right, funcD.m_padding_type, funcD.m_padding_value){}\
+};
+
+SYCLTENSORIMAGEPATCHOPEXPR(const)
+SYCLTENSORIMAGEPATCHOPEXPR()
+#undef SYCLTENSORIMAGEPATCHOPEXPR
+
+// TensorVolumePatchOp
+#define SYCLTENSORVOLUMEPATCHOPEXPR(CVQual)\
+template<DenseIndex Planes, DenseIndex Rows, DenseIndex Cols, typename OrigXprType, typename XprType, typename... Params>\
+struct ExprConstructor<CVQual TensorVolumePatchOp<Planes, Rows, Cols, OrigXprType>, CVQual TensorVolumePatchOp<Planes, Rows, Cols, XprType>, Params... > {\
+ typedef ExprConstructor<OrigXprType, XprType, Params...> my_xpr_type;\
+ typedef CVQual TensorVolumePatchOp<Planes, Rows, Cols, typename my_xpr_type::Type> Type;\
+ my_xpr_type xprExpr;\
+ Type expr;\
+ template <typename FuncDetector>\
+ ExprConstructor(FuncDetector &funcD, const utility::tuple::Tuple<Params...> &t)\
+ : xprExpr(funcD.xprExpr, t), expr(xprExpr.expr, funcD.m_patch_planes, funcD.m_patch_rows, funcD.m_patch_cols, funcD.m_plane_strides, funcD.m_row_strides, funcD.m_col_strides,\
+ funcD.m_in_plane_strides, funcD.m_in_row_strides, funcD.m_in_col_strides,funcD.m_plane_inflate_strides, funcD.m_row_inflate_strides, funcD.m_col_inflate_strides, \
+ funcD.m_padding_explicit, funcD.m_padding_top_z, funcD.m_padding_bottom_z, funcD.m_padding_top, funcD.m_padding_bottom, funcD.m_padding_left, funcD.m_padding_right, \
+ funcD.m_padding_type, funcD.m_padding_value ){\
+ }\
+};
+
+SYCLTENSORVOLUMEPATCHOPEXPR(const)
+SYCLTENSORVOLUMEPATCHOPEXPR()
+#undef SYCLTENSORVOLUMEPATCHOPEXPR
+
+// TensorLayoutSwapOp and TensorIndexTupleOp
+#define SYCLTENSORLAYOUTSWAPINDEXTUPLEOPEXPR(CVQual, ExprNode)\
+template<typename OrigXprType, typename XprType, typename... Params>\
+struct ExprConstructor<CVQual ExprNode <OrigXprType> , CVQual ExprNode<XprType>, Params... >{\
+ typedef ExprConstructor<OrigXprType, XprType, Params...> my_xpr_type;\
+ typedef CVQual ExprNode<typename my_xpr_type::Type> Type;\
+ my_xpr_type xprExpr;\
+ Type expr;\
+ template <typename FuncDetector>\
+ ExprConstructor(FuncDetector &funcD, const utility::tuple::Tuple<Params...> &t)\
+ : xprExpr(funcD.xprExpr, t), expr(xprExpr.expr) {}\
+};
+
+//TensorLayoutSwapOp
+SYCLTENSORLAYOUTSWAPINDEXTUPLEOPEXPR(const, TensorLayoutSwapOp)
+SYCLTENSORLAYOUTSWAPINDEXTUPLEOPEXPR(, TensorLayoutSwapOp)
+//TensorIndexTupleOp
+SYCLTENSORLAYOUTSWAPINDEXTUPLEOPEXPR(const, TensorIndexTupleOp)
+SYCLTENSORLAYOUTSWAPINDEXTUPLEOPEXPR(, TensorIndexTupleOp)
+
+#undef SYCLTENSORLAYOUTSWAPINDEXTUPLEOPEXPR
/// template deduction for \ref ExprConstructor struct
template <typename OrigExpr, typename IndexExpr, typename FuncD, typename... Params>
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h
index b512d43f6..fb95af59e 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h
@@ -147,6 +147,30 @@ SYCLFORCEDEVALEXTACC(const)
SYCLFORCEDEVALEXTACC()
#undef SYCLFORCEDEVALEXTACC
+//TensorCustomUnaryOp
+#define SYCLCUSTOMUNARYOPEXTACC(CVQual)\
+template <typename CustomUnaryFunc, typename XprType, typename Dev >\
+struct ExtractAccessor<TensorEvaluator<CVQual TensorCustomUnaryOp<CustomUnaryFunc, XprType>, Dev> > {\
+ static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator<CVQual TensorCustomUnaryOp<CustomUnaryFunc, XprType>, Dev>& eval)\
+ RETURN_CPP11(AccessorConstructor::template getAccessor<cl::sycl::access::mode::read>(cgh, eval))\
+};
+
+
+SYCLCUSTOMUNARYOPEXTACC(const)
+SYCLCUSTOMUNARYOPEXTACC()
+#undef SYCLCUSTOMUNARYOPEXTACC
+
+//TensorCustomBinaryOp
+#define SYCLCUSTOMBINARYOPEXTACC(CVQual)\
+template <typename CustomBinaryFunc, typename LhsXprType, typename RhsXprType , typename Dev>\
+struct ExtractAccessor<TensorEvaluator<CVQual TensorCustomBinaryOp<CustomBinaryFunc, LhsXprType, RhsXprType>, Dev> > {\
+ static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator<CVQual TensorCustomBinaryOp<CustomBinaryFunc, LhsXprType, RhsXprType>, Dev>& eval)\
+ RETURN_CPP11(AccessorConstructor::template getAccessor<cl::sycl::access::mode::read>(cgh, eval))\
+};
+
+SYCLCUSTOMBINARYOPEXTACC(const)
+SYCLCUSTOMBINARYOPEXTACC()
+#undef SYCLCUSTOMBIBARYOPEXTACC
/// specialisation of the \ref ExtractAccessor struct when the node type is TensorEvalToOp
#define SYCLEVALTOEXTACC(CVQual)\
@@ -161,15 +185,19 @@ SYCLEVALTOEXTACC()
#undef SYCLEVALTOEXTACC
/// specialisation of the \ref ExtractAccessor struct when the node type is TensorReductionOp
-#define SYCLREDUCTIONEXTACC(CVQual)\
+#define SYCLREDUCTIONEXTACC(CVQual, ExprNode)\
template <typename OP, typename Dim, typename Expr, typename Dev>\
-struct ExtractAccessor<TensorEvaluator<CVQual TensorReductionOp<OP, Dim, Expr>, Dev> > {\
- static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator<CVQual TensorReductionOp<OP, Dim, Expr>, Dev>& eval)\
+struct ExtractAccessor<TensorEvaluator<CVQual ExprNode<OP, Dim, Expr>, Dev> > {\
+ static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator<CVQual ExprNode<OP, Dim, Expr>, Dev>& eval)\
RETURN_CPP11(AccessorConstructor::template getAccessor<cl::sycl::access::mode::read>(cgh, eval))\
};
+// TensorReductionOp
+SYCLREDUCTIONEXTACC(const,TensorReductionOp)
+SYCLREDUCTIONEXTACC(,TensorReductionOp)
-SYCLREDUCTIONEXTACC(const)
-SYCLREDUCTIONEXTACC()
+// TensorTupleReducerOp
+SYCLREDUCTIONEXTACC(const,TensorTupleReducerOp)
+SYCLREDUCTIONEXTACC(,TensorTupleReducerOp)
#undef SYCLREDUCTIONEXTACC
/// specialisation of the \ref ExtractAccessor struct when the node type is TensorContractionOp and TensorConvolutionOp
@@ -179,14 +207,14 @@ template<typename Indices, typename LhsXprType, typename RhsXprType, typename De
static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator<CVQual ExprNode<Indices, LhsXprType, RhsXprType>, Dev>& eval)\
RETURN_CPP11(AccessorConstructor::template getAccessor<cl::sycl::access::mode::read>(cgh, eval))\
};
-
+//TensorContractionOp
SYCLCONTRACTIONCONVOLUTIONEXTACC(const,TensorContractionOp)
SYCLCONTRACTIONCONVOLUTIONEXTACC(,TensorContractionOp)
+//TensorConvolutionOp
SYCLCONTRACTIONCONVOLUTIONEXTACC(const,TensorConvolutionOp)
SYCLCONTRACTIONCONVOLUTIONEXTACC(,TensorConvolutionOp)
#undef SYCLCONTRACTIONCONVOLUTIONEXTACC
-
/// specialisation of the \ref ExtractAccessor struct when the node type is
/// const TensorSlicingOp.
#define SYCLSLICEOPEXTACC(CVQual)\
@@ -225,6 +253,49 @@ SYCLTENSORCHIPPINGOPEXTACC(const)
SYCLTENSORCHIPPINGOPEXTACC()
#undef SYCLTENSORCHIPPINGOPEXTACC
+// specialisation of the \ref ExtractAccessor struct when the node type is
+/// TensorImagePatchOp.
+#define SYCLTENSORIMAGEPATCHOPEXTACC(CVQual)\
+template<DenseIndex Rows, DenseIndex Cols, typename XprType, typename Dev>\
+struct ExtractAccessor<TensorEvaluator<CVQual TensorImagePatchOp<Rows, Cols, XprType>, Dev> >{\
+ static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator<CVQual TensorImagePatchOp<Rows, Cols, XprType>, Dev>& eval)\
+ RETURN_CPP11(AccessorConstructor::getTuple(cgh, eval.impl()))\
+};
+
+SYCLTENSORIMAGEPATCHOPEXTACC(const)
+SYCLTENSORIMAGEPATCHOPEXTACC()
+#undef SYCLTENSORIMAGEPATCHOPEXTACC
+
+// specialisation of the \ref ExtractAccessor struct when the node type is
+/// TensorVolumePatchOp.
+#define SYCLTENSORVOLUMEPATCHOPEXTACC(CVQual)\
+template<DenseIndex Planes, DenseIndex Rows, DenseIndex Cols, typename XprType, typename Dev>\
+struct ExtractAccessor<TensorEvaluator<CVQual TensorVolumePatchOp<Planes, Rows, Cols, XprType>, Dev> >{\
+ static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator<CVQual TensorVolumePatchOp<Planes, Rows, Cols, XprType>, Dev>& eval)\
+ RETURN_CPP11(AccessorConstructor::getTuple(cgh, eval.impl()))\
+};
+
+SYCLTENSORVOLUMEPATCHOPEXTACC(const)
+SYCLTENSORVOLUMEPATCHOPEXTACC()
+#undef SYCLTENSORVOLUMEPATCHOPEXTACC
+
+// specialisation of the \ref ExtractAccessor struct when the node type is
+/// TensorLayoutSwapOp, TensorIndexTupleOp
+#define SYCLTENSORLAYOUTSWAPINDEXTUPLEOPEXTACC(CVQual, ExprNode)\
+template<typename XprType, typename Dev>\
+struct ExtractAccessor<TensorEvaluator<CVQual ExprNode<XprType>, Dev> >{\
+ static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator<CVQual ExprNode<XprType>, Dev>& eval)\
+ RETURN_CPP11(AccessorConstructor::getTuple(cgh, eval.impl()))\
+};
+
+// TensorLayoutSwapOp
+SYCLTENSORLAYOUTSWAPINDEXTUPLEOPEXTACC(const,TensorLayoutSwapOp)
+SYCLTENSORLAYOUTSWAPINDEXTUPLEOPEXTACC(,TensorLayoutSwapOp)
+//TensorIndexTupleOp
+SYCLTENSORLAYOUTSWAPINDEXTUPLEOPEXTACC(const,TensorIndexTupleOp)
+SYCLTENSORLAYOUTSWAPINDEXTUPLEOPEXTACC(,TensorIndexTupleOp)
+
+#undef SYCLTENSORLAYOUTSWAPINDEXTUPLEOPEXTACC
/// template deduction for \ref ExtractAccessor
template <typename Evaluator>
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h
index ee020184b..a248e303b 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h
@@ -33,15 +33,17 @@ namespace internal {
/// re-instantiate them on the device.
/// We have to pass instantiated functors to the device.
// This struct is used for leafNode (TensorMap) and nodes behaving like leafNode (TensorForcedEval).
-template <typename Evaluator> struct FunctorExtractor{
- typedef typename Evaluator::Dimensions Dimensions;
- const Dimensions m_dimensions;
- EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
- FunctorExtractor(const Evaluator& expr)
- : m_dimensions(expr.dimensions()) {}
+#define DEFALTACTION(Evaluator)\
+typedef typename Evaluator::Dimensions Dimensions;\
+const Dimensions m_dimensions;\
+EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }\
+FunctorExtractor(const Evaluator& expr): m_dimensions(expr.dimensions()) {}
+template <typename Evaluator> struct FunctorExtractor{
+ DEFALTACTION(Evaluator)
};
+
/// specialisation of the \ref FunctorExtractor struct when the node type does not require anything
///TensorConversionOp
#define SYCLEXTRFUNCCONVERSION(ExprNode, CVQual)\
@@ -113,6 +115,36 @@ SYCLEXTRFUNCTERNARY(const)
SYCLEXTRFUNCTERNARY()
#undef SYCLEXTRFUNCTERNARY
+
+
+//TensorCustomOp must be specialised otherwise it will be captured by UnaryCategory while its action is different
+//from the UnaryCategory and it is similar to the general FunctorExtractor.
+/// specialisation of TensorCustomOp
+#define SYCLEXTRFUNCCUSTOMUNARYOP(CVQual)\
+template <typename CustomUnaryFunc, typename ArgType, typename Dev >\
+struct FunctorExtractor<TensorEvaluator<CVQual TensorCustomUnaryOp<CustomUnaryFunc, ArgType>, Dev> > {\
+ typedef TensorEvaluator<CVQual TensorCustomUnaryOp<CustomUnaryFunc, ArgType>, Dev> Evaluator;\
+ DEFALTACTION(Evaluator)\
+};
+//TensorCustomUnaryOp
+SYCLEXTRFUNCCUSTOMUNARYOP(const)
+SYCLEXTRFUNCCUSTOMUNARYOP()
+#undef SYCLEXTRFUNCCUSTOMUNARYOP
+
+//TensorCustomBinaryOp
+#define SYCLEXTRFUNCCUSTOMBIBARYOP(CVQual)\
+template <typename CustomBinaryFunc, typename ArgType1, typename ArgType2, typename Dev >\
+struct FunctorExtractor<TensorEvaluator<CVQual TensorCustomBinaryOp<CustomBinaryFunc, ArgType1, ArgType2>, Dev> > {\
+ typedef TensorEvaluator<CVQual TensorCustomBinaryOp<CustomBinaryFunc, ArgType1, ArgType2>, Dev> Evaluator;\
+ DEFALTACTION(Evaluator)\
+};
+//TensorCustomBinaryOp
+SYCLEXTRFUNCCUSTOMBIBARYOP(const)
+SYCLEXTRFUNCCUSTOMBIBARYOP()
+#undef SYCLEXTRFUNCCUSTOMBIBARYOP
+
+
+
/// specialisation of the \ref FunctorExtractor struct when the node type is
/// TensorCwiseSelectOp. This is an specialisation without OP so it has to be separated.
#define SYCLEXTRFUNCSELECTOP(CVQual)\
@@ -143,19 +175,26 @@ SYCLEXTRFUNCASSIGNOP(const)
SYCLEXTRFUNCASSIGNOP()
#undef SYCLEXTRFUNCASSIGNOP
-/// specialisation of the \ref FunctorExtractor struct when the node type is
-/// TensorEvalToOp, This is an specialisation without OP so it has to be separated.
-#define SYCLEXTRFUNCEVALTOOP(CVQual)\
-template <typename RHSExpr, typename Dev>\
-struct FunctorExtractor<TensorEvaluator<CVQual TensorEvalToOp<RHSExpr>, Dev> > {\
- FunctorExtractor<TensorEvaluator<RHSExpr, Dev> > rhsExpr;\
- FunctorExtractor(const TensorEvaluator<CVQual TensorEvalToOp<RHSExpr>, Dev>& expr)\
- : rhsExpr(expr.impl()) {}\
+/// specialisation of the \ref FunctorExtractor struct when the node types are
+/// TensorEvalToOp, TensorLayoutSwapOp. This is an specialisation without OP so it has to be separated.
+#define SYCLEXTRFUNCEVALTOOPSWAPLAYOUTINDEXTUPLE(CVQual, ExprNode)\
+template <typename Expr, typename Dev>\
+struct FunctorExtractor<TensorEvaluator<CVQual ExprNode<Expr>, Dev> > {\
+ FunctorExtractor<TensorEvaluator<Expr, Dev> > xprExpr;\
+ FunctorExtractor(const TensorEvaluator<CVQual ExprNode<Expr>, Dev>& expr)\
+ : xprExpr(expr.impl()) {}\
};
-
-SYCLEXTRFUNCEVALTOOP(const)
-SYCLEXTRFUNCEVALTOOP()
-#undef SYCLEXTRFUNCEVALTOOP
+//TensorEvalToOp
+SYCLEXTRFUNCEVALTOOPSWAPLAYOUTINDEXTUPLE(const, TensorEvalToOp)
+SYCLEXTRFUNCEVALTOOPSWAPLAYOUTINDEXTUPLE(, TensorEvalToOp)
+// TensorLayoutSwapOp
+SYCLEXTRFUNCEVALTOOPSWAPLAYOUTINDEXTUPLE(const, TensorLayoutSwapOp)
+SYCLEXTRFUNCEVALTOOPSWAPLAYOUTINDEXTUPLE(, TensorLayoutSwapOp)
+// TensorIndexTupleOp
+SYCLEXTRFUNCEVALTOOPSWAPLAYOUTINDEXTUPLE(const, TensorIndexTupleOp)
+SYCLEXTRFUNCEVALTOOPSWAPLAYOUTINDEXTUPLE(, TensorIndexTupleOp)
+
+#undef SYCLEXTRFUNCEVALTOOPSWAPLAYOUTINDEXTUPLE
template<typename Dim, size_t NumOutputDim> struct DimConstr {
template<typename InDim>
@@ -166,10 +205,10 @@ template<typename Dim> struct DimConstr<Dim, 0> {
template<typename InDim>
static EIGEN_STRONG_INLINE Dim getDim(InDim dims ) {return Dim(static_cast<Dim>(dims.TotalSize()));}
};
-
+//TensorReductionOp
#define SYCLEXTRFUNCREDUCTIONOP(CVQual)\
template<typename Op, typename Dims, typename ArgType, template <class> class MakePointer_, typename Device>\
-struct FunctorExtractor<TensorEvaluator<CVQual TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Device>>{\
+struct FunctorExtractor<TensorEvaluator<CVQual TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Device> >{\
typedef TensorEvaluator<CVQual TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Device> Evaluator;\
typedef typename Eigen::internal::conditional<Evaluator::NumOutputDims==0, DSizes<typename Evaluator::Index, 1>, typename Evaluator::Dimensions >::type Dimensions;\
const Dimensions m_dimensions;\
@@ -177,12 +216,39 @@ struct FunctorExtractor<TensorEvaluator<CVQual TensorReductionOp<Op, Dims, ArgTy
FunctorExtractor(const TensorEvaluator<CVQual TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Device>& expr)\
: m_dimensions(DimConstr<Dimensions, Evaluator::NumOutputDims>::getDim(expr.dimensions())) {}\
};
-
-
SYCLEXTRFUNCREDUCTIONOP(const)
SYCLEXTRFUNCREDUCTIONOP()
#undef SYCLEXTRFUNCREDUCTIONOP
+//TensorTupleReducerOp
+#define SYCLEXTRFUNCTUPLEREDUCTIONOP(CVQual)\
+template<typename ReduceOp, typename Dims, typename ArgType, typename Device>\
+ struct FunctorExtractor<TensorEvaluator<CVQual TensorTupleReducerOp<ReduceOp, Dims, ArgType>, Device> >{\
+ typedef TensorEvaluator<CVQual TensorTupleReducerOp<ReduceOp, Dims, ArgType>, Device> Evaluator;\
+ static const int NumOutputDims= Eigen::internal::traits<TensorTupleReducerOp<ReduceOp, Dims, ArgType> >::NumDimensions;\
+ typedef typename Evaluator::StrideDims StrideDims;\
+ typedef typename Evaluator::Index Index;\
+ typedef typename Eigen::internal::conditional<NumOutputDims==0, DSizes<Index, 1>, typename Evaluator::Dimensions >::type Dimensions;\
+ const Dimensions m_dimensions;\
+ const Index m_return_dim;\
+ const StrideDims m_strides;\
+ const Index m_stride_mod;\
+ const Index m_stride_div;\
+ EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }\
+ EIGEN_STRONG_INLINE Index return_dim() const {return m_return_dim;}\
+ EIGEN_STRONG_INLINE const StrideDims strides() const {return m_strides;}\
+ EIGEN_STRONG_INLINE const Index stride_mod() const {return m_stride_mod;}\
+ EIGEN_STRONG_INLINE const Index stride_div() const {return m_stride_div;}\
+ FunctorExtractor(const TensorEvaluator<CVQual TensorTupleReducerOp<ReduceOp, Dims, ArgType>, Device>& expr)\
+ : m_dimensions(DimConstr<Dimensions, NumOutputDims>::getDim(expr.dimensions())), m_return_dim(expr.return_dim()),\
+ m_strides(expr.strides()), m_stride_mod(expr.stride_mod()), m_stride_div(expr.stride_div()){}\
+};
+
+SYCLEXTRFUNCTUPLEREDUCTIONOP(const)
+SYCLEXTRFUNCTUPLEREDUCTIONOP()
+#undef SYCLEXTRFUNCTUPLEREDUCTIONOP
+
+//TensorContractionOp and TensorConvolutionOp
#define SYCLEXTRFUNCCONTRACTCONVOLUTIONOP(CVQual, ExprNode)\
template<typename Indices, typename LhsXprType, typename RhsXprType, typename Device>\
struct FunctorExtractor<TensorEvaluator<CVQual ExprNode<Indices, LhsXprType, RhsXprType>, Device>>{\
@@ -194,9 +260,10 @@ struct FunctorExtractor<TensorEvaluator<CVQual ExprNode<Indices, LhsXprType, Rhs
: m_dimensions(expr.dimensions()) {}\
};
-
+//TensorContractionOp
SYCLEXTRFUNCCONTRACTCONVOLUTIONOP(const,TensorContractionOp)
SYCLEXTRFUNCCONTRACTCONVOLUTIONOP(,TensorContractionOp)
+//TensorConvolutionOp
SYCLEXTRFUNCCONTRACTCONVOLUTIONOP(const,TensorConvolutionOp)
SYCLEXTRFUNCCONTRACTCONVOLUTIONOP(,TensorConvolutionOp)
#undef SYCLEXTRFUNCCONTRACTCONVOLUTIONOP
@@ -219,6 +286,7 @@ SYCLEXTRFUNCTSLICEOP(const)
SYCLEXTRFUNCTSLICEOP()
#undef SYCLEXTRFUNCTSLICEOP
+//TensorStridingSlicingOp
#define SYCLEXTRFUNCTSLICESTRIDEOP(CVQual)\
template<typename StartIndices, typename StopIndices, typename Strides, typename XprType, typename Dev>\
struct FunctorExtractor<TensorEvaluator<CVQual TensorStridingSlicingOp<StartIndices, StopIndices, Strides, XprType>, Dev> >{\
@@ -237,7 +305,7 @@ SYCLEXTRFUNCTSLICESTRIDEOP(const)
SYCLEXTRFUNCTSLICESTRIDEOP()
#undef SYCLEXTRFUNCTSLICESTRIDEOP
-// Had to separate reshapeOP otherwise it will be mistaken by UnaryCategory
+// Had to separate TensorReshapingOp and TensorShufflingOp. Otherwise it will be mistaken by UnaryCategory
#define SYCLRESHAPEANDSHUFFLEOPFUNCEXT(OPEXPR, FUNCCALL, CVQual)\
template<typename Param, typename XprType, typename Dev>\
struct FunctorExtractor<Eigen::TensorEvaluator<CVQual Eigen::OPEXPR<Param, XprType>, Dev> > {\
@@ -248,9 +316,11 @@ struct FunctorExtractor<Eigen::TensorEvaluator<CVQual Eigen::OPEXPR<Param, XprTy
: xprExpr(expr.impl()), m_param(expr.FUNCCALL) {}\
};
+//TensorReshapingOp
SYCLRESHAPEANDSHUFFLEOPFUNCEXT(TensorReshapingOp, dimensions(), const)
SYCLRESHAPEANDSHUFFLEOPFUNCEXT(TensorReshapingOp, dimensions(), )
+//TensorShufflingOp
SYCLRESHAPEANDSHUFFLEOPFUNCEXT(TensorShufflingOp, shufflePermutation(), const)
SYCLRESHAPEANDSHUFFLEOPFUNCEXT(TensorShufflingOp, shufflePermutation(), )
#undef SYCLRESHAPEANDSHUFFLEOPFUNCEXT
@@ -293,7 +363,7 @@ SYCLEXTRFUNCCONTRACTCONCAT(TensorConcatenationOp, axis(),)
//TensorChippingOp
#define SYCLEXTRFUNCCHIPPINGOP(CVQual)\
template<DenseIndex DimId, typename XprType, typename Device>\
-struct FunctorExtractor<TensorEvaluator<CVQual TensorChippingOp<DimId, XprType>, Device>>{\
+struct FunctorExtractor<TensorEvaluator<CVQual TensorChippingOp<DimId, XprType>, Device> >{\
FunctorExtractor<Eigen::TensorEvaluator<XprType, Device> > xprExpr;\
const DenseIndex m_dim;\
const DenseIndex m_offset;\
@@ -307,6 +377,84 @@ SYCLEXTRFUNCCHIPPINGOP(const)
SYCLEXTRFUNCCHIPPINGOP()
#undef SYCLEXTRFUNCCHIPPINGOP
+//TensorImagePatchOp
+#define SYCLEXTRFUNCIMAGEPATCHOP(CVQual)\
+template<DenseIndex Rows, DenseIndex Cols, typename XprType, typename Device>\
+struct FunctorExtractor<TensorEvaluator<CVQual TensorImagePatchOp<Rows, Cols, XprType>, Device> >{\
+typedef CVQual TensorImagePatchOp<Rows, Cols, XprType> Self;\
+FunctorExtractor<Eigen::TensorEvaluator<XprType, Device> > xprExpr;\
+const DenseIndex m_patch_rows;\
+const DenseIndex m_patch_cols;\
+const DenseIndex m_row_strides;\
+const DenseIndex m_col_strides;\
+const DenseIndex m_in_row_strides;\
+const DenseIndex m_in_col_strides;\
+const DenseIndex m_row_inflate_strides;\
+const DenseIndex m_col_inflate_strides;\
+const bool m_padding_explicit;\
+const DenseIndex m_padding_top;\
+const DenseIndex m_padding_bottom;\
+const DenseIndex m_padding_left;\
+const DenseIndex m_padding_right;\
+const PaddingType m_padding_type;\
+const typename Self::Scalar m_padding_value;\
+FunctorExtractor(const TensorEvaluator<Self, Device>& expr)\
+: xprExpr(expr.impl()), m_patch_rows(expr.xpr().patch_rows()), m_patch_cols(expr.xpr().patch_cols()),\
+ m_row_strides(expr.xpr().row_strides()), m_col_strides(expr.xpr().col_strides()),\
+ m_in_row_strides(expr.xpr().in_row_strides()), m_in_col_strides(expr.xpr().in_col_strides()),\
+ m_row_inflate_strides(expr.xpr().row_inflate_strides()), m_col_inflate_strides(expr.xpr().col_inflate_strides()),\
+ m_padding_explicit(expr.xpr().padding_explicit()),m_padding_top(expr.xpr().padding_top()),\
+ m_padding_bottom(expr.xpr().padding_bottom()), m_padding_left(expr.xpr().padding_left()),\
+ m_padding_right(expr.xpr().padding_right()), m_padding_type(expr.xpr().padding_type()),\
+ m_padding_value(expr.xpr().padding_value()){}\
+};
+
+SYCLEXTRFUNCIMAGEPATCHOP(const)
+SYCLEXTRFUNCIMAGEPATCHOP()
+#undef SYCLEXTRFUNCIMAGEPATCHOP
+
+/// TensorVolumePatchOp
+#define SYCLEXTRFUNCVOLUMEPATCHOP(CVQual)\
+template<DenseIndex Planes, DenseIndex Rows, DenseIndex Cols, typename XprType, typename Device>\
+struct FunctorExtractor<TensorEvaluator<CVQual TensorVolumePatchOp<Planes, Rows, Cols, XprType>, Device> >{\
+typedef CVQual TensorVolumePatchOp<Planes, Rows, Cols, XprType> Self;\
+FunctorExtractor<Eigen::TensorEvaluator<XprType, Device> > xprExpr;\
+const DenseIndex m_patch_planes;\
+const DenseIndex m_patch_rows;\
+const DenseIndex m_patch_cols;\
+const DenseIndex m_plane_strides;\
+const DenseIndex m_row_strides;\
+const DenseIndex m_col_strides;\
+const DenseIndex m_in_plane_strides;\
+const DenseIndex m_in_row_strides;\
+const DenseIndex m_in_col_strides;\
+const DenseIndex m_plane_inflate_strides;\
+const DenseIndex m_row_inflate_strides;\
+const DenseIndex m_col_inflate_strides;\
+const bool m_padding_explicit;\
+const DenseIndex m_padding_top_z;\
+const DenseIndex m_padding_bottom_z;\
+const DenseIndex m_padding_top;\
+const DenseIndex m_padding_bottom;\
+const DenseIndex m_padding_left;\
+const DenseIndex m_padding_right;\
+const PaddingType m_padding_type;\
+const typename Self::Scalar m_padding_value;\
+FunctorExtractor(const TensorEvaluator<Self, Device>& expr)\
+: xprExpr(expr.impl()), m_patch_planes(expr.xpr().patch_planes()), m_patch_rows(expr.xpr().patch_rows()), m_patch_cols(expr.xpr().patch_cols()),\
+ m_plane_strides(expr.xpr().plane_strides()), m_row_strides(expr.xpr().row_strides()), m_col_strides(expr.xpr().col_strides()),\
+ m_in_plane_strides(expr.xpr().in_plane_strides()), m_in_row_strides(expr.xpr().in_row_strides()), m_in_col_strides(expr.xpr().in_col_strides()),\
+ m_plane_inflate_strides(expr.xpr().plane_inflate_strides()),m_row_inflate_strides(expr.xpr().row_inflate_strides()),\
+ m_col_inflate_strides(expr.xpr().col_inflate_strides()), m_padding_explicit(expr.xpr().padding_explicit()),\
+ m_padding_top_z(expr.xpr().padding_top_z()), m_padding_bottom_z(expr.xpr().padding_bottom_z()), \
+ m_padding_top(expr.xpr().padding_top()), m_padding_bottom(expr.xpr().padding_bottom()), m_padding_left(expr.xpr().padding_left()),\
+ m_padding_right(expr.xpr().padding_right()), m_padding_type(expr.xpr().padding_type()),m_padding_value(expr.xpr().padding_value()){}\
+};
+SYCLEXTRFUNCVOLUMEPATCHOP(const)
+SYCLEXTRFUNCVOLUMEPATCHOP()
+#undef SYCLEXTRFUNCVOLUMEPATCHOP
+
+
/// template deduction function for FunctorExtractor
template <typename Evaluator>
auto inline extractFunctors(const Evaluator& evaluator)-> FunctorExtractor<Evaluator> {
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclFunctors.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclFunctors.h
index 2f7779036..a447c3f88 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclFunctors.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclFunctors.h
@@ -21,11 +21,12 @@ namespace internal {
template<typename CoeffReturnType, typename OP, typename OutputAccessor, typename InputAccessor, typename LocalAccessor> struct GenericKernelReducer{
OP op;
OutputAccessor aOut;
+ ptrdiff_t out_offset;
InputAccessor aI;
LocalAccessor scratch;
size_t length, local;
- GenericKernelReducer(OP op_, OutputAccessor aOut_, InputAccessor aI_, LocalAccessor scratch_, size_t length_, size_t local_)
- : op(op_), aOut(aOut_), aI(aI_), scratch(scratch_), length(length_), local(local_){}
+ GenericKernelReducer(OP op_, OutputAccessor aOut_, ptrdiff_t out_offset_, InputAccessor aI_, LocalAccessor scratch_, size_t length_, size_t local_)
+ : op(op_), aOut(aOut_), out_offset(out_offset_), aI(aI_), scratch(scratch_), length(length_), local(local_){}
void operator()(cl::sycl::nd_item<1> itemID) {
size_t globalid = itemID.get_global(0);
size_t localid = itemID.get_local(0);
@@ -59,7 +60,7 @@ namespace internal {
aI[itemID.get_group(0)] = scratch[localid];
if((length<=local) && globalid ==0){
auto aOutPtr = ConvertToActualTypeSycl(CoeffReturnType, aOut);
- aOutPtr[0]=scratch[0];
+ aOutPtr[0 + ConvertToActualSyclOffset(CoeffReturnType, out_offset)]=scratch[0];
}
}
}
@@ -71,21 +72,21 @@ namespace internal {
template < typename HostExpr, typename FunctorExpr, typename Tuple_of_Acc, typename Dims, typename Op, typename Index> class ReductionFunctor {
public:
typedef typename TensorSycl::internal::createPlaceHolderExpression<HostExpr>::Type PlaceHolderExpr;
- typedef cl::sycl::accessor<uint8_t, 1, cl::sycl::access::mode::discard_write, cl::sycl::access::target::global_buffer> write_accessor;
- ReductionFunctor(write_accessor output_accessor_, FunctorExpr functors_, Tuple_of_Acc tuple_of_accessors_,Dims dims_, Op functor_, Index range_, Index)
- :output_accessor(output_accessor_), functors(functors_), tuple_of_accessors(tuple_of_accessors_), dims(dims_), functor(functor_), range(range_) {}
+ typedef cl::sycl::accessor<uint8_t, 1, cl::sycl::access::mode::write, cl::sycl::access::target::global_buffer> write_accessor;
+ ReductionFunctor(write_accessor output_accessor_, ptrdiff_t out_offset_, FunctorExpr functors_, Tuple_of_Acc tuple_of_accessors_,Dims dims_, Op functor_, Index range_, Index)
+ :output_accessor(output_accessor_), out_offset(out_offset_), functors(functors_), tuple_of_accessors(tuple_of_accessors_), dims(dims_), functor(functor_), range(range_) {}
void operator()(cl::sycl::nd_item<1> itemID) {
typedef typename ConvertToDeviceExpression<const HostExpr>::Type DevExpr;
auto device_expr = createDeviceExpression<DevExpr, PlaceHolderExpr>(functors, tuple_of_accessors);
/// reduction cannot be captured automatically through our device conversion recursion. The reason is that reduction has two behaviour
- /// the first behaviour is when it is used as a root to lauch the sub-kernel. The second one is when it is treated as a leafnode to pass the
+ /// the first behaviour is when it is used as a root to launch the sub-kernel. The second one is when it is treated as a leafnode to pass the
/// calculated result to its parent kernel. While the latter is automatically detected through our device expression generator. The former is created here.
const auto device_self_expr= Eigen::TensorReductionOp<Op, Dims, decltype(device_expr.expr) ,MakeGlobalPointer>(device_expr.expr, dims, functor);
/// This is the evaluator for device_self_expr. This is exactly similar to the self which has been passed to run function. The difference is
/// the device_evaluator is detectable and recognisable on the device.
- typedef Eigen::TensorEvaluator<decltype(device_self_expr), Eigen::DefaultDevice> DeviceSelf;
- auto device_self_evaluator = Eigen::TensorEvaluator<decltype(device_self_expr), Eigen::DefaultDevice>(device_self_expr, Eigen::DefaultDevice());
+ typedef Eigen::TensorEvaluator<decltype(device_self_expr), Eigen::SyclKernelDevice> DeviceSelf;
+ auto device_self_evaluator = Eigen::TensorEvaluator<decltype(device_self_expr), Eigen::SyclKernelDevice>(device_self_expr, Eigen::SyclKernelDevice());
auto output_accessor_ptr =ConvertToActualTypeSycl(typename DeviceSelf::CoeffReturnType, output_accessor);
/// const cast added as a naive solution to solve the qualifier drop error
auto globalid=static_cast<Index>(itemID.get_global_linear_id());
@@ -93,11 +94,12 @@ template < typename HostExpr, typename FunctorExpr, typename Tuple_of_Acc, typen
typename DeviceSelf::CoeffReturnType accum = functor.initialize();
Eigen::internal::GenericDimReducer<DeviceSelf::NumReducedDims-1, DeviceSelf, Op>::reduce(device_self_evaluator, device_self_evaluator.firstInput(static_cast<typename DevExpr::Index>(globalid)),const_cast<Op&>(functor), &accum);
functor.finalize(accum);
- output_accessor_ptr[globalid]= accum;
+ output_accessor_ptr[globalid + ConvertToActualSyclOffset(typename DeviceSelf::CoeffReturnType, out_offset)]= accum;
}
}
private:
write_accessor output_accessor;
+ ptrdiff_t out_offset;
FunctorExpr functors;
Tuple_of_Acc tuple_of_accessors;
Dims dims;
@@ -109,23 +111,23 @@ template < typename HostExpr, typename FunctorExpr, typename Tuple_of_Acc, typen
class ReductionFunctor<HostExpr, FunctorExpr, Tuple_of_Acc, Dims, Eigen::internal::MeanReducer<typename HostExpr::CoeffReturnType>, Index> {
public:
typedef typename TensorSycl::internal::createPlaceHolderExpression<HostExpr>::Type PlaceHolderExpr;
- typedef cl::sycl::accessor<uint8_t, 1, cl::sycl::access::mode::discard_write, cl::sycl::access::target::global_buffer> write_accessor;
+ typedef cl::sycl::accessor<uint8_t, 1, cl::sycl::access::mode::write, cl::sycl::access::target::global_buffer> write_accessor;
typedef Eigen::internal::SumReducer<typename HostExpr::CoeffReturnType> Op;
- ReductionFunctor(write_accessor output_accessor_, FunctorExpr functors_, Tuple_of_Acc tuple_of_accessors_,Dims dims_,
+ ReductionFunctor(write_accessor output_accessor_, ptrdiff_t out_offset_, FunctorExpr functors_, Tuple_of_Acc tuple_of_accessors_,Dims dims_,
Eigen::internal::MeanReducer<typename HostExpr::CoeffReturnType>, Index range_, Index num_values_to_reduce_)
- :output_accessor(output_accessor_), functors(functors_), tuple_of_accessors(tuple_of_accessors_), dims(dims_), functor(Op()), range(range_), num_values_to_reduce(num_values_to_reduce_) {}
+ :output_accessor(output_accessor_), out_offset(out_offset_), functors(functors_), tuple_of_accessors(tuple_of_accessors_), dims(dims_), functor(Op()), range(range_), num_values_to_reduce(num_values_to_reduce_) {}
void operator()(cl::sycl::nd_item<1> itemID) {
typedef typename ConvertToDeviceExpression<const HostExpr>::Type DevExpr;
auto device_expr = createDeviceExpression<DevExpr, PlaceHolderExpr>(functors, tuple_of_accessors);
/// reduction cannot be captured automatically through our device conversion recursion. The reason is that reduction has two behaviour
- /// the first behaviour is when it is used as a root to lauch the sub-kernel. The second one is when it is treated as a leafnode to pass the
+ /// the first behaviour is when it is used as a root to launch the sub-kernel. The second one is when it is treated as a leafnode to pass the
/// calculated result to its parent kernel. While the latter is automatically detected through our device expression generator. The former is created here.
const auto device_self_expr= Eigen::TensorReductionOp<Op, Dims, decltype(device_expr.expr) ,MakeGlobalPointer>(device_expr.expr, dims, functor);
/// This is the evaluator for device_self_expr. This is exactly similar to the self which has been passed to run function. The difference is
/// the device_evaluator is detectable and recognisable on the device.
- typedef Eigen::TensorEvaluator<decltype(device_self_expr), Eigen::DefaultDevice> DeviceSelf;
- auto device_self_evaluator = Eigen::TensorEvaluator<decltype(device_self_expr), Eigen::DefaultDevice>(device_self_expr, Eigen::DefaultDevice());
+ typedef Eigen::TensorEvaluator<decltype(device_self_expr), Eigen::SyclKernelDevice> DeviceSelf;
+ auto device_self_evaluator = Eigen::TensorEvaluator<decltype(device_self_expr), Eigen::SyclKernelDevice>(device_self_expr, Eigen::SyclKernelDevice());
auto output_accessor_ptr =ConvertToActualTypeSycl(typename DeviceSelf::CoeffReturnType, output_accessor);
/// const cast added as a naive solution to solve the qualifier drop error
auto globalid=static_cast<Index>(itemID.get_global_linear_id());
@@ -133,11 +135,12 @@ class ReductionFunctor<HostExpr, FunctorExpr, Tuple_of_Acc, Dims, Eigen::interna
typename DeviceSelf::CoeffReturnType accum = functor.initialize();
Eigen::internal::GenericDimReducer<DeviceSelf::NumReducedDims-1, DeviceSelf, Op>::reduce(device_self_evaluator, device_self_evaluator.firstInput(static_cast<typename DevExpr::Index>(globalid)),const_cast<Op&>(functor), &accum);
functor.finalize(accum);
- output_accessor_ptr[globalid]= accum/num_values_to_reduce;
+ output_accessor_ptr[globalid+ ConvertToActualSyclOffset(typename DeviceSelf::CoeffReturnType, out_offset)]= accum/num_values_to_reduce;
}
}
private:
write_accessor output_accessor;
+ ptrdiff_t out_offset;
FunctorExpr functors;
Tuple_of_Acc tuple_of_accessors;
Dims dims;
@@ -165,12 +168,12 @@ public:
typedef typename TensorSycl::internal::ConvertToDeviceExpression<const HostExpr>::Type DevExpr;
auto device_expr = TensorSycl::internal::createDeviceExpression<DevExpr, PlaceHolderExpr>(functors, tuple_of_accessors);
/// reduction cannot be captured automatically through our device conversion recursion. The reason is that reduction has two behaviour
- /// the first behaviour is when it is used as a root to lauch the sub-kernel. The second one is when it is treated as a leafnode to pass the
+ /// the first behaviour is when it is used as a root to launch the sub-kernel. The second one is when it is treated as a leafnode to pass the
/// calculated result to its parent kernel. While the latter is automatically detected through our device expression generator. The former is created here.
const auto device_self_expr= Eigen::TensorReductionOp<Op, Dims, decltype(device_expr.expr) ,MakeGlobalPointer>(device_expr.expr, dims, op);
/// This is the evaluator for device_self_expr. This is exactly similar to the self which has been passed to run function. The difference is
/// the device_evaluator is detectable and recognisable on the device.
- auto device_self_evaluator = Eigen::TensorEvaluator<decltype(device_self_expr), Eigen::DefaultDevice>(device_self_expr, Eigen::DefaultDevice());
+ auto device_self_evaluator = Eigen::TensorEvaluator<decltype(device_self_expr), Eigen::SyclKernelDevice>(device_self_expr, Eigen::SyclKernelDevice());
/// const cast added as a naive solution to solve the qualifier drop error
auto globalid=itemID.get_global_linear_id();
@@ -212,12 +215,12 @@ public:
typedef typename TensorSycl::internal::ConvertToDeviceExpression<const HostExpr>::Type DevExpr;
auto device_expr = TensorSycl::internal::createDeviceExpression<DevExpr, PlaceHolderExpr>(functors, tuple_of_accessors);
/// reduction cannot be captured automatically through our device conversion recursion. The reason is that reduction has two behaviour
- /// the first behaviour is when it is used as a root to lauch the sub-kernel. The second one is when it is treated as a leafnode to pass the
+ /// the first behaviour is when it is used as a root to launch the sub-kernel. The second one is when it is treated as a leafnode to pass the
/// calculated result to its parent kernel. While the latter is automatically detected through our device expression generator. The former is created here.
const auto device_self_expr= Eigen::TensorReductionOp<Op, Dims, decltype(device_expr.expr) ,MakeGlobalPointer>(device_expr.expr, dims, op);
/// This is the evaluator for device_self_expr. This is exactly similar to the self which has been passed to run function. The difference is
/// the device_evaluator is detectable and recognisable on the device.
- auto device_self_evaluator = Eigen::TensorEvaluator<decltype(device_self_expr), Eigen::DefaultDevice>(device_self_expr, Eigen::DefaultDevice());
+ auto device_self_evaluator = Eigen::TensorEvaluator<decltype(device_self_expr), Eigen::SyclKernelDevice>(device_self_expr, Eigen::SyclKernelDevice());
/// const cast added as a naive solution to solve the qualifier drop error
auto globalid=itemID.get_global_linear_id();
auto scale = (rng*red_factor) + remaining;
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclLeafCount.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclLeafCount.h
index a1c112f4d..234580c7c 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclLeafCount.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclLeafCount.h
@@ -93,26 +93,58 @@ SYCLFORCEDEVALLEAFCOUNT(const)
SYCLFORCEDEVALLEAFCOUNT()
#undef SYCLFORCEDEVALLEAFCOUNT
+#define SYCLCUSTOMUNARYOPLEAFCOUNT(CVQual)\
+template <typename CustomUnaryFunc, typename XprType>\
+struct LeafCount<CVQual TensorCustomUnaryOp<CustomUnaryFunc, XprType> > {\
+static const size_t Count =1;\
+};
+
+SYCLCUSTOMUNARYOPLEAFCOUNT(const)
+SYCLCUSTOMUNARYOPLEAFCOUNT()
+#undef SYCLCUSTOMUNARYOPLEAFCOUNT
+
+
+#define SYCLCUSTOMBINARYOPLEAFCOUNT(CVQual)\
+template <typename CustomBinaryFunc, typename LhsXprType, typename RhsXprType>\
+struct LeafCount<CVQual TensorCustomBinaryOp<CustomBinaryFunc, LhsXprType, RhsXprType> > {\
+static const size_t Count =1;\
+};
+SYCLCUSTOMBINARYOPLEAFCOUNT( const)
+SYCLCUSTOMBINARYOPLEAFCOUNT()
+#undef SYCLCUSTOMBINARYOPLEAFCOUNT
+
/// specialisation of the \ref LeafCount struct when the node type is TensorEvalToOp
-#define EVALTOLEAFCOUNT(CVQual)\
+#define EVALTOLAYOUTSWAPINDEXTUPLELEAFCOUNT(CVQual , ExprNode, Num)\
template <typename Expr>\
-struct LeafCount<CVQual TensorEvalToOp<Expr> > {\
- static const size_t Count = 1 + CategoryCount<Expr>::Count;\
+struct LeafCount<CVQual ExprNode<Expr> > {\
+ static const size_t Count = Num + CategoryCount<Expr>::Count;\
};
-EVALTOLEAFCOUNT(const)
-EVALTOLEAFCOUNT()
-#undef EVALTOLEAFCOUNT
+EVALTOLAYOUTSWAPINDEXTUPLELEAFCOUNT(const, TensorEvalToOp, 1)
+EVALTOLAYOUTSWAPINDEXTUPLELEAFCOUNT(, TensorEvalToOp, 1)
+EVALTOLAYOUTSWAPINDEXTUPLELEAFCOUNT(const, TensorLayoutSwapOp, 0)
+EVALTOLAYOUTSWAPINDEXTUPLELEAFCOUNT(, TensorLayoutSwapOp, 0)
+
+EVALTOLAYOUTSWAPINDEXTUPLELEAFCOUNT(const, TensorIndexTupleOp, 0)
+EVALTOLAYOUTSWAPINDEXTUPLELEAFCOUNT(, TensorIndexTupleOp, 0)
+
+#undef EVALTOLAYOUTSWAPINDEXTUPLELEAFCOUNT
/// specialisation of the \ref LeafCount struct when the node type is const TensorReductionOp
-#define REDUCTIONLEAFCOUNT(CVQual)\
+#define REDUCTIONLEAFCOUNT(CVQual, ExprNode)\
template <typename OP, typename Dim, typename Expr>\
-struct LeafCount<CVQual TensorReductionOp<OP, Dim, Expr> > {\
+struct LeafCount<CVQual ExprNode<OP, Dim, Expr> > {\
static const size_t Count =1;\
};
-REDUCTIONLEAFCOUNT(const)
-REDUCTIONLEAFCOUNT()
+// TensorReductionOp
+REDUCTIONLEAFCOUNT(const,TensorReductionOp)
+REDUCTIONLEAFCOUNT(,TensorReductionOp)
+
+// tensor Argmax -TensorTupleReducerOp
+REDUCTIONLEAFCOUNT(const, TensorTupleReducerOp)
+REDUCTIONLEAFCOUNT(, TensorTupleReducerOp)
+
#undef REDUCTIONLEAFCOUNT
/// specialisation of the \ref LeafCount struct when the node type is const TensorContractionOp
@@ -128,8 +160,6 @@ CONTRACTIONCONVOLUTIONLEAFCOUNT(const,TensorConvolutionOp)
CONTRACTIONCONVOLUTIONLEAFCOUNT(,TensorConvolutionOp)
#undef CONTRACTIONCONVOLUTIONLEAFCOUNT
-
-
/// specialisation of the \ref LeafCount struct when the node type is TensorSlicingOp
#define SLICEOPLEAFCOUNT(CVQual)\
template <typename StartIndices, typename Sizes, typename XprType>\
@@ -139,7 +169,6 @@ SLICEOPLEAFCOUNT(const)
SLICEOPLEAFCOUNT()
#undef SLICEOPLEAFCOUNT
-
/// specialisation of the \ref LeafCount struct when the node type is TensorChippingOp
#define CHIPPINGOPLEAFCOUNT(CVQual)\
template <DenseIndex DimId, typename XprType>\
@@ -149,7 +178,7 @@ CHIPPINGOPLEAFCOUNT(const)
CHIPPINGOPLEAFCOUNT()
#undef CHIPPINGOPLEAFCOUNT
-
+///TensorStridingSlicingOp
#define SLICESTRIDEOPLEAFCOUNT(CVQual)\
template<typename StartIndices, typename StopIndices, typename Strides, typename XprType>\
struct LeafCount<CVQual TensorStridingSlicingOp<StartIndices, StopIndices, Strides, XprType> >:CategoryCount<XprType>{};
@@ -158,6 +187,24 @@ SLICESTRIDEOPLEAFCOUNT(const)
SLICESTRIDEOPLEAFCOUNT()
#undef SLICESTRIDEOPLEAFCOUNT
+//TensorImagePatchOp
+#define TENSORIMAGEPATCHOPLEAFCOUNT(CVQual)\
+template<DenseIndex Rows, DenseIndex Cols, typename XprType>\
+struct LeafCount<CVQual TensorImagePatchOp<Rows, Cols, XprType> >:CategoryCount<XprType>{};
+
+
+TENSORIMAGEPATCHOPLEAFCOUNT(const)
+TENSORIMAGEPATCHOPLEAFCOUNT()
+#undef TENSORIMAGEPATCHOPLEAFCOUNT
+
+// TensorVolumePatchOp
+#define TENSORVOLUMEPATCHOPLEAFCOUNT(CVQual)\
+template<DenseIndex Planes, DenseIndex Rows, DenseIndex Cols, typename XprType>\
+struct LeafCount<CVQual TensorVolumePatchOp<Planes, Rows, Cols, XprType> >:CategoryCount<XprType>{};
+
+TENSORVOLUMEPATCHOPLEAFCOUNT(const)
+TENSORVOLUMEPATCHOPLEAFCOUNT()
+#undef TENSORVOLUMEPATCHOPLEAFCOUNT
} /// namespace TensorSycl
} /// namespace internal
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclPlaceHolderExpr.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclPlaceHolderExpr.h
index 74566dcee..9d5708fc5 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclPlaceHolderExpr.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclPlaceHolderExpr.h
@@ -143,17 +143,52 @@ FORCEDEVAL(const)
FORCEDEVAL()
#undef FORCEDEVAL
+
+/// specialisation of the \ref PlaceHolderExpression when the node is
+/// TensorForcedEvalOp
+#define CUSTOMUNARYOPEVAL(CVQual)\
+template <typename CustomUnaryFunc, typename XprType, size_t N>\
+struct PlaceHolderExpression<CVQual TensorCustomUnaryOp<CustomUnaryFunc, XprType>, N> {\
+ typedef CVQual PlaceHolder<CVQual TensorCustomUnaryOp<CustomUnaryFunc, XprType>, N> Type;\
+};
+
+CUSTOMUNARYOPEVAL(const)
+CUSTOMUNARYOPEVAL()
+#undef CUSTOMUNARYOPEVAL
+
+
/// specialisation of the \ref PlaceHolderExpression when the node is
-/// TensorEvalToOp
-#define EVALTO(CVQual)\
+/// TensorForcedEvalOp
+#define CUSTOMBINARYOPEVAL(CVQual)\
+template <typename CustomBinaryFunc, typename LhsXprType, typename RhsXprType, size_t N>\
+struct PlaceHolderExpression<CVQual TensorCustomBinaryOp<CustomBinaryFunc, LhsXprType, RhsXprType>, N> {\
+ typedef CVQual PlaceHolder<CVQual TensorCustomBinaryOp<CustomBinaryFunc, LhsXprType, RhsXprType>, N> Type;\
+};
+
+CUSTOMBINARYOPEVAL(const)
+CUSTOMBINARYOPEVAL()
+#undef CUSTOMBINARYOPEVAL
+
+
+/// specialisation of the \ref PlaceHolderExpression when the node is
+/// TensoroOp, TensorLayoutSwapOp, and TensorIndexTupleOp
+#define EVALTOLAYOUTSWAPINDEXTUPLE(CVQual, ExprNode)\
template <typename Expr, size_t N>\
-struct PlaceHolderExpression<CVQual TensorEvalToOp<Expr>, N> {\
- typedef CVQual TensorEvalToOp<typename CalculateIndex <N, Expr>::ArgType> Type;\
+struct PlaceHolderExpression<CVQual ExprNode<Expr>, N> {\
+ typedef CVQual ExprNode<typename CalculateIndex <N, Expr>::ArgType> Type;\
};
-EVALTO(const)
-EVALTO()
-#undef EVALTO
+// TensorEvalToOp
+EVALTOLAYOUTSWAPINDEXTUPLE(const, TensorEvalToOp)
+EVALTOLAYOUTSWAPINDEXTUPLE(, TensorEvalToOp)
+//TensorLayoutSwapOp
+EVALTOLAYOUTSWAPINDEXTUPLE(const, TensorLayoutSwapOp)
+EVALTOLAYOUTSWAPINDEXTUPLE(, TensorLayoutSwapOp)
+//TensorIndexTupleOp
+EVALTOLAYOUTSWAPINDEXTUPLE(const, TensorIndexTupleOp)
+EVALTOLAYOUTSWAPINDEXTUPLE(, TensorIndexTupleOp)
+
+#undef EVALTOLAYOUTSWAPINDEXTUPLE
/// specialisation of the \ref PlaceHolderExpression when the node is
@@ -169,17 +204,24 @@ CHIPPINGOP()
#undef CHIPPINGOP
/// specialisation of the \ref PlaceHolderExpression when the node is
-/// TensorReductionOp
-#define SYCLREDUCTION(CVQual)\
+/// TensorReductionOp and TensorTupleReducerOp (Argmax)
+#define SYCLREDUCTION(CVQual, ExprNode)\
template <typename OP, typename Dims, typename Expr, size_t N>\
-struct PlaceHolderExpression<CVQual TensorReductionOp<OP, Dims, Expr>, N>{\
- typedef CVQual PlaceHolder<CVQual TensorReductionOp<OP, Dims,Expr>, N> Type;\
+struct PlaceHolderExpression<CVQual ExprNode<OP, Dims, Expr>, N>{\
+ typedef CVQual PlaceHolder<CVQual ExprNode<OP, Dims,Expr>, N> Type;\
};
-SYCLREDUCTION(const)
-SYCLREDUCTION()
+
+// tensor reduction
+SYCLREDUCTION(const, TensorReductionOp)
+SYCLREDUCTION(, TensorReductionOp)
+
+// tensor Argmax -TensorTupleReducerOp
+SYCLREDUCTION(const, TensorTupleReducerOp)
+SYCLREDUCTION(, TensorTupleReducerOp)
#undef SYCLREDUCTION
+
/// specialisation of the \ref PlaceHolderExpression when the node is
/// TensorReductionOp
#define SYCLCONTRACTIONCONVOLUTIONPLH(CVQual, ExprNode)\
@@ -218,6 +260,34 @@ SYCLSLICESTRIDEOPPLH()
#undef SYCLSLICESTRIDEOPPLH
+
+/// specialisation of the \ref PlaceHolderExpression when the node is
+/// TensorImagePatchOp
+#define SYCLTENSORIMAGEPATCHOP(CVQual)\
+template<DenseIndex Rows, DenseIndex Cols, typename XprType, size_t N>\
+struct PlaceHolderExpression<CVQual TensorImagePatchOp<Rows, Cols, XprType>, N> {\
+ typedef CVQual TensorImagePatchOp<Rows, Cols, typename CalculateIndex <N, XprType>::ArgType> Type;\
+};
+
+SYCLTENSORIMAGEPATCHOP(const)
+SYCLTENSORIMAGEPATCHOP()
+#undef SYCLTENSORIMAGEPATCHOP
+
+
+
+/// specialisation of the \ref PlaceHolderExpression when the node is
+/// TensorVolumePatchOp
+#define SYCLTENSORVOLUMEPATCHOP(CVQual)\
+template<DenseIndex Planes, DenseIndex Rows, DenseIndex Cols, typename XprType, size_t N>\
+struct PlaceHolderExpression<CVQual TensorVolumePatchOp<Planes,Rows, Cols, XprType>, N> {\
+ typedef CVQual TensorVolumePatchOp<Planes,Rows, Cols, typename CalculateIndex <N, XprType>::ArgType> Type;\
+};
+
+SYCLTENSORVOLUMEPATCHOP(const)
+SYCLTENSORVOLUMEPATCHOP()
+#undef SYCLTENSORVOLUMEPATCHOP
+
+
/// template deduction for \ref PlaceHolderExpression struct
template <typename Expr>
struct createPlaceHolderExpression {
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclRun.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclRun.h
index cac785540..29c78184d 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclRun.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclRun.h
@@ -25,7 +25,6 @@
namespace Eigen {
namespace TensorSycl {
-
template<typename Expr, typename FunctorExpr, typename TupleType > struct ExecExprFunctorKernel{
typedef typename internal::createPlaceHolderExpression<Expr>::Type PlaceHolderExpr;
@@ -38,7 +37,7 @@ template<typename Expr, typename FunctorExpr, typename TupleType > struct ExecEx
void operator()(cl::sycl::nd_item<1> itemID) {
typedef typename internal::ConvertToDeviceExpression<Expr>::Type DevExpr;
auto device_expr =internal::createDeviceExpression<DevExpr, PlaceHolderExpr>(functors, tuple_of_accessors);
- auto device_evaluator = Eigen::TensorEvaluator<decltype(device_expr.expr), Eigen::DefaultDevice>(device_expr.expr, Eigen::DefaultDevice());
+ auto device_evaluator = Eigen::TensorEvaluator<decltype(device_expr.expr), Eigen::SyclKernelDevice>(device_expr.expr, Eigen::SyclKernelDevice());
typename DevExpr::Index gId = static_cast<typename DevExpr::Index>(itemID.get_global_linear_id());
if (gId < range)
device_evaluator.evalScalar(gId);
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclTuple.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclTuple.h
index 58ab0f0d5..9e6c3e4fa 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclTuple.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclTuple.h
@@ -143,7 +143,7 @@ struct IndexList {};
/// \brief Collects internal details for generating index ranges [MIN, MAX)
/// Declare primary template for index range builder
/// \tparam MIN is the starting index in the tuple
-/// \tparam N represents sizeof..(elemens)- sizeof...(Is)
+/// \tparam N represents sizeof..(elements)- sizeof...(Is)
/// \tparam Is... are the list of generated index so far
template <size_t MIN, size_t N, size_t... Is>
struct RangeBuilder;
@@ -161,7 +161,7 @@ struct RangeBuilder<MIN, MIN, Is...> {
/// in this case we are recursively subtracting N by one and adding one
/// index to Is... list until MIN==N
/// \tparam MIN is the starting index in the tuple
-/// \tparam N represents sizeof..(elemens)- sizeof...(Is)
+/// \tparam N represents sizeof..(elements)- sizeof...(Is)
/// \tparam Is... are the list of generated index so far
template <size_t MIN, size_t N, size_t... Is>
struct RangeBuilder : public RangeBuilder<MIN, N - 1, N - 1, Is...> {};
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorTrace.h b/unsupported/Eigen/CXX11/src/Tensor/TensorTrace.h
new file mode 100644
index 000000000..9fc54a4c0
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorTrace.h
@@ -0,0 +1,290 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2017 Gagan Goel <gagan.nith@gmail.com>
+// Copyright (C) 2017 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_TRACE_H
+#define EIGEN_CXX11_TENSOR_TENSOR_TRACE_H
+
+namespace Eigen {
+
+/** \class TensorTrace
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Tensor Trace class.
+ *
+ *
+ */
+
+namespace internal {
+template<typename Dims, typename XprType>
+struct traits<TensorTraceOp<Dims, XprType> > : public traits<XprType>
+{
+ typedef typename XprType::Scalar Scalar;
+ typedef traits<XprType> XprTraits;
+ typedef typename XprTraits::StorageKind StorageKind;
+ typedef typename XprTraits::Index Index;
+ typedef typename XprType::Nested Nested;
+ typedef typename remove_reference<Nested>::type _Nested;
+ static const int NumDimensions = XprTraits::NumDimensions - array_size<Dims>::value;
+ static const int Layout = XprTraits::Layout;
+};
+
+template<typename Dims, typename XprType>
+struct eval<TensorTraceOp<Dims, XprType>, Eigen::Dense>
+{
+ typedef const TensorTraceOp<Dims, XprType>& type;
+};
+
+template<typename Dims, typename XprType>
+struct nested<TensorTraceOp<Dims, XprType>, 1, typename eval<TensorTraceOp<Dims, XprType> >::type>
+{
+ typedef TensorTraceOp<Dims, XprType> type;
+};
+
+} // end namespace internal
+
+
+template<typename Dims, typename XprType>
+class TensorTraceOp : public TensorBase<TensorTraceOp<Dims, XprType> >
+{
+ public:
+ typedef typename Eigen::internal::traits<TensorTraceOp>::Scalar Scalar;
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename Eigen::internal::nested<TensorTraceOp>::type Nested;
+ typedef typename Eigen::internal::traits<TensorTraceOp>::StorageKind StorageKind;
+ typedef typename Eigen::internal::traits<TensorTraceOp>::Index Index;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorTraceOp(const XprType& expr, const Dims& dims)
+ : m_xpr(expr), m_dims(dims) {
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const Dims& dims() const { return m_dims; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const typename internal::remove_all<typename XprType::Nested>::type& expression() const { return m_xpr; }
+
+ protected:
+ typename XprType::Nested m_xpr;
+ const Dims m_dims;
+};
+
+
+// Eval as rvalue
+template<typename Dims, typename ArgType, typename Device>
+struct TensorEvaluator<const TensorTraceOp<Dims, ArgType>, Device>
+{
+ typedef TensorTraceOp<Dims, ArgType> XprType;
+ static const int NumInputDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
+ static const int NumReducedDims = internal::array_size<Dims>::value;
+ static const int NumOutputDims = NumInputDims - NumReducedDims;
+ typedef typename XprType::Index Index;
+ typedef DSizes<Index, NumOutputDims> Dimensions;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+
+ enum {
+ IsAligned = false,
+ PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
+ BlockAccess = false,
+ PreferBlockAccess = false,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ CoordAccess = false,
+ RawAccess = false
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
+ : m_impl(op.expression(), device), m_traceDim(1), m_device(device)
+ {
+
+ EIGEN_STATIC_ASSERT((NumOutputDims >= 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((NumReducedDims >= 2) || ((NumReducedDims == 0) && (NumInputDims == 0)), YOU_MADE_A_PROGRAMMING_MISTAKE);
+
+ for (int i = 0; i < NumInputDims; ++i) {
+ m_reduced[i] = false;
+ }
+
+ const Dims& op_dims = op.dims();
+ for (int i = 0; i < NumReducedDims; ++i) {
+ eigen_assert(op_dims[i] >= 0);
+ eigen_assert(op_dims[i] < NumInputDims);
+ m_reduced[op_dims[i]] = true;
+ }
+
+ // All the dimensions should be distinct to compute the trace
+ int num_distinct_reduce_dims = 0;
+ for (int i = 0; i < NumInputDims; ++i) {
+ if (m_reduced[i]) {
+ ++num_distinct_reduce_dims;
+ }
+ }
+
+ eigen_assert(num_distinct_reduce_dims == NumReducedDims);
+
+ // Compute the dimensions of the result.
+ const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
+
+ int output_index = 0;
+ int reduced_index = 0;
+ for (int i = 0; i < NumInputDims; ++i) {
+ if (m_reduced[i]) {
+ m_reducedDims[reduced_index] = input_dims[i];
+ if (reduced_index > 0) {
+ // All the trace dimensions must have the same size
+ eigen_assert(m_reducedDims[0] == m_reducedDims[reduced_index]);
+ }
+ ++reduced_index;
+ }
+ else {
+ m_dimensions[output_index] = input_dims[i];
+ ++output_index;
+ }
+ }
+
+ if (NumReducedDims != 0) {
+ m_traceDim = m_reducedDims[0];
+ }
+
+ // Compute the output strides
+ if (NumOutputDims > 0) {
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ m_outputStrides[0] = 1;
+ for (int i = 1; i < NumOutputDims; ++i) {
+ m_outputStrides[i] = m_outputStrides[i - 1] * m_dimensions[i - 1];
+ }
+ }
+ else {
+ m_outputStrides.back() = 1;
+ for (int i = NumOutputDims - 2; i >= 0; --i) {
+ m_outputStrides[i] = m_outputStrides[i + 1] * m_dimensions[i + 1];
+ }
+ }
+ }
+
+ // Compute the input strides
+ if (NumInputDims > 0) {
+ array<Index, NumInputDims> input_strides;
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ input_strides[0] = 1;
+ for (int i = 1; i < NumInputDims; ++i) {
+ input_strides[i] = input_strides[i - 1] * input_dims[i - 1];
+ }
+ }
+ else {
+ input_strides.back() = 1;
+ for (int i = NumInputDims - 2; i >= 0; --i) {
+ input_strides[i] = input_strides[i + 1] * input_dims[i + 1];
+ }
+ }
+
+ output_index = 0;
+ reduced_index = 0;
+ for (int i = 0; i < NumInputDims; ++i) {
+ if(m_reduced[i]) {
+ m_reducedStrides[reduced_index] = input_strides[i];
+ ++reduced_index;
+ }
+ else {
+ m_preservedStrides[output_index] = input_strides[i];
+ ++output_index;
+ }
+ }
+ }
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const {
+ return m_dimensions;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) {
+ m_impl.evalSubExprsIfNeeded(NULL);
+ return true;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
+ m_impl.cleanup();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
+ {
+ // Initialize the result
+ CoeffReturnType result = internal::cast<int, CoeffReturnType>(0);
+ Index index_stride = 0;
+ for (int i = 0; i < NumReducedDims; ++i) {
+ index_stride += m_reducedStrides[i];
+ }
+
+ // If trace is requested along all dimensions, starting index would be 0
+ Index cur_index = 0;
+ if (NumOutputDims != 0)
+ cur_index = firstInput(index);
+ for (Index i = 0; i < m_traceDim; ++i) {
+ result += m_impl.coeff(cur_index);
+ cur_index += index_stride;
+ }
+
+ return result;
+ }
+
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const {
+
+ EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ eigen_assert(index + PacketSize - 1 < dimensions().TotalSize());
+
+ EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
+ for (int i = 0; i < PacketSize; ++i) {
+ values[i] = coeff(index + i);
+ }
+ PacketReturnType result = internal::ploadt<PacketReturnType, LoadMode>(values);
+ return result;
+ }
+
+ protected:
+ // Given the output index, finds the first index in the input tensor used to compute the trace
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index firstInput(Index index) const {
+ Index startInput = 0;
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
+ for (int i = NumOutputDims - 1; i > 0; --i) {
+ const Index idx = index / m_outputStrides[i];
+ startInput += idx * m_preservedStrides[i];
+ index -= idx * m_outputStrides[i];
+ }
+ startInput += index * m_preservedStrides[0];
+ }
+ else {
+ for (int i = 0; i < NumOutputDims - 1; ++i) {
+ const Index idx = index / m_outputStrides[i];
+ startInput += idx * m_preservedStrides[i];
+ index -= idx * m_outputStrides[i];
+ }
+ startInput += index * m_preservedStrides[NumOutputDims - 1];
+ }
+ return startInput;
+ }
+
+ Dimensions m_dimensions;
+ TensorEvaluator<ArgType, Device> m_impl;
+ // Initialize the size of the trace dimension
+ Index m_traceDim;
+ const Device& m_device;
+ array<bool, NumInputDims> m_reduced;
+ array<Index, NumReducedDims> m_reducedDims;
+ array<Index, NumOutputDims> m_outputStrides;
+ array<Index, NumReducedDims> m_reducedStrides;
+ array<Index, NumOutputDims> m_preservedStrides;
+};
+
+
+} // End namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_TRACE_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h b/unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h
index a1e944e59..0a394c88d 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h
@@ -59,8 +59,10 @@ struct traits<Tensor<Scalar_, NumIndices_, Options_, IndexType_> >
template <typename T> struct MakePointer {
typedef T* Type;
typedef T& RefType;
+ typedef T ScalarType;
};
+ typedef typename MakePointer<Scalar>::Type PointerType;
};
@@ -79,8 +81,10 @@ struct traits<TensorFixedSize<Scalar_, Dimensions, Options_, IndexType_> >
template <typename T> struct MakePointer {
typedef T* Type;
typedef T& RefType;
+ typedef T ScalarType;
};
+ typedef typename MakePointer<Scalar>::Type PointerType;
};
@@ -103,8 +107,11 @@ struct traits<TensorMap<PlainObjectType, Options_, MakePointer_> >
typedef MakePointer_<T> MakePointerT;
typedef typename MakePointerT::Type Type;
typedef typename MakePointerT::RefType RefType;
+ typedef typename MakePointerT::ScalarType ScalarType;
+
};
+ typedef typename MakePointer<Scalar>::Type PointerType;
};
template<typename PlainObjectType>
@@ -121,6 +128,7 @@ struct traits<TensorRef<PlainObjectType> >
Options = BaseTraits::Options,
Flags = BaseTraits::Flags
};
+ typedef typename BaseTraits::PointerType PointerType;
};
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorUInt128.h b/unsupported/Eigen/CXX11/src/Tensor/TensorUInt128.h
index 3523e7c94..d23f2e4c8 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorUInt128.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorUInt128.h
@@ -23,6 +23,7 @@ struct static_val {
template <typename T>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE static_val(const T& v) {
+ EIGEN_UNUSED_VARIABLE(v);
eigen_assert(v == n);
}
};
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorVolumePatch.h b/unsupported/Eigen/CXX11/src/Tensor/TensorVolumePatch.h
index 0ca2cac84..c1b7a58ca 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorVolumePatch.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorVolumePatch.h
@@ -22,6 +22,7 @@ namespace Eigen {
* dimensions.
*/
namespace internal {
+
template<DenseIndex Planes, DenseIndex Rows, DenseIndex Cols, typename XprType>
struct traits<TensorVolumePatchOp<Planes, Rows, Cols, XprType> > : public traits<XprType>
{
@@ -33,6 +34,8 @@ struct traits<TensorVolumePatchOp<Planes, Rows, Cols, XprType> > : public traits
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions + 1;
static const int Layout = XprTraits::Layout;
+ typedef typename XprTraits::PointerType PointerType;
+
};
template<DenseIndex Planes, DenseIndex Rows, DenseIndex Cols, typename XprType>
@@ -65,12 +68,12 @@ class TensorVolumePatchOp : public TensorBase<TensorVolumePatchOp<Planes, Rows,
DenseIndex in_plane_strides, DenseIndex in_row_strides, DenseIndex in_col_strides,
DenseIndex plane_inflate_strides, DenseIndex row_inflate_strides, DenseIndex col_inflate_strides,
PaddingType padding_type, Scalar padding_value)
- : m_xpr(expr), m_patch_planes(patch_planes), m_patch_rows(patch_rows), m_patch_cols(patch_cols),
- m_plane_strides(plane_strides), m_row_strides(row_strides), m_col_strides(col_strides),
- m_in_plane_strides(in_plane_strides), m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides),
- m_plane_inflate_strides(plane_inflate_strides), m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides),
- m_padding_explicit(false), m_padding_top_z(0), m_padding_bottom_z(0), m_padding_top(0), m_padding_bottom(0), m_padding_left(0), m_padding_right(0),
- m_padding_type(padding_type), m_padding_value(padding_value) {}
+ : m_xpr(expr), m_patch_planes(patch_planes), m_patch_rows(patch_rows), m_patch_cols(patch_cols),
+ m_plane_strides(plane_strides), m_row_strides(row_strides), m_col_strides(col_strides),
+ m_in_plane_strides(in_plane_strides), m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides),
+ m_plane_inflate_strides(plane_inflate_strides), m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides),
+ m_padding_explicit(false), m_padding_top_z(0), m_padding_bottom_z(0), m_padding_top(0), m_padding_bottom(0), m_padding_left(0), m_padding_right(0),
+ m_padding_type(padding_type), m_padding_value(padding_value) {}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorVolumePatchOp(const XprType& expr, DenseIndex patch_planes, DenseIndex patch_rows, DenseIndex patch_cols,
DenseIndex plane_strides, DenseIndex row_strides, DenseIndex col_strides,
@@ -80,13 +83,31 @@ class TensorVolumePatchOp : public TensorBase<TensorVolumePatchOp<Planes, Rows,
DenseIndex padding_top, DenseIndex padding_bottom,
DenseIndex padding_left, DenseIndex padding_right,
Scalar padding_value)
- : m_xpr(expr), m_patch_planes(patch_planes), m_patch_rows(patch_rows), m_patch_cols(patch_cols),
- m_plane_strides(plane_strides), m_row_strides(row_strides), m_col_strides(col_strides),
- m_in_plane_strides(in_plane_strides), m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides),
- m_plane_inflate_strides(plane_inflate_strides), m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides),
- m_padding_explicit(true), m_padding_top_z(padding_top_z), m_padding_bottom_z(padding_bottom_z), m_padding_top(padding_top), m_padding_bottom(padding_bottom),
- m_padding_left(padding_left), m_padding_right(padding_right),
- m_padding_type(PADDING_VALID), m_padding_value(padding_value) {}
+ : m_xpr(expr), m_patch_planes(patch_planes), m_patch_rows(patch_rows), m_patch_cols(patch_cols),
+ m_plane_strides(plane_strides), m_row_strides(row_strides), m_col_strides(col_strides),
+ m_in_plane_strides(in_plane_strides), m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides),
+ m_plane_inflate_strides(plane_inflate_strides), m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides),
+ m_padding_explicit(true), m_padding_top_z(padding_top_z), m_padding_bottom_z(padding_bottom_z), m_padding_top(padding_top), m_padding_bottom(padding_bottom),
+ m_padding_left(padding_left), m_padding_right(padding_right),
+ m_padding_type(PADDING_VALID), m_padding_value(padding_value) {}
+
+#ifdef EIGEN_USE_SYCL // this is work around for sycl as Eigen could not use c++11 deligate constructor feature
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorVolumePatchOp(const XprType& expr, DenseIndex patch_planes, DenseIndex patch_rows, DenseIndex patch_cols,
+ DenseIndex plane_strides, DenseIndex row_strides, DenseIndex col_strides,
+ DenseIndex in_plane_strides, DenseIndex in_row_strides, DenseIndex in_col_strides,
+ DenseIndex plane_inflate_strides, DenseIndex row_inflate_strides, DenseIndex col_inflate_strides,
+ bool padding_explicit, DenseIndex padding_top_z, DenseIndex padding_bottom_z,
+ DenseIndex padding_top, DenseIndex padding_bottom, DenseIndex padding_left,
+ DenseIndex padding_right, PaddingType padding_type, Scalar padding_value)
+ : m_xpr(expr), m_patch_planes(patch_planes), m_patch_rows(patch_rows), m_patch_cols(patch_cols),
+ m_plane_strides(plane_strides), m_row_strides(row_strides), m_col_strides(col_strides),
+ m_in_plane_strides(in_plane_strides), m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides),
+ m_plane_inflate_strides(plane_inflate_strides), m_row_inflate_strides(row_inflate_strides),
+ m_col_inflate_strides(col_inflate_strides), m_padding_explicit(padding_explicit), m_padding_top_z(padding_top_z),
+ m_padding_bottom_z(padding_bottom_z), m_padding_top(padding_top), m_padding_bottom(padding_bottom), m_padding_left(padding_left),
+ m_padding_right(padding_right), m_padding_type(padding_type), m_padding_value(padding_value) {}
+
+#endif
EIGEN_DEVICE_FUNC
DenseIndex patch_planes() const { return m_patch_planes; }
@@ -173,19 +194,27 @@ struct TensorEvaluator<const TensorVolumePatchOp<Planes, Rows, Cols, ArgType>, D
typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
- static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
+ static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
enum {
IsAligned = false,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
BlockAccess = false,
+ PreferBlockAccess = false,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false,
RawAccess = false
};
+#ifdef __SYCL_DEVICE_ONLY__
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator( const XprType op, const Device& device)
+#else
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator( const XprType& op, const Device& device)
+#endif
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_impl(op.expression(), device)
+#ifdef EIGEN_USE_SYCL
+ , m_op(op)
+#endif
{
EIGEN_STATIC_ASSERT((NumDims >= 5), YOU_MADE_A_PROGRAMMING_MISTAKE);
@@ -322,6 +351,7 @@ struct TensorEvaluator<const TensorVolumePatchOp<Planes, Rows, Cols, ArgType>, D
// Fast representations of different variables.
m_fastOtherStride = internal::TensorIntDivisor<Index>(m_otherStride);
+
m_fastPatchStride = internal::TensorIntDivisor<Index>(m_patchStride);
m_fastColStride = internal::TensorIntDivisor<Index>(m_colStride);
m_fastRowStride = internal::TensorIntDivisor<Index>(m_rowStride);
@@ -338,7 +368,6 @@ struct TensorEvaluator<const TensorVolumePatchOp<Planes, Rows, Cols, ArgType>, D
m_fastOutputDepth = internal::TensorIntDivisor<Index>(m_dimensions[NumDims-1]);
}
}
-
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) {
@@ -502,10 +531,15 @@ struct TensorEvaluator<const TensorVolumePatchOp<Planes, Rows, Cols, ArgType>, D
return TensorOpCost(0, 0, compute_cost, vectorized, PacketSize);
}
- EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
+ EIGEN_DEVICE_FUNC typename Eigen::internal::traits<XprType>::PointerType data() const { return NULL; }
const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
+#ifdef EIGEN_USE_SYCL
+ // Required by SYCL in order to construct the expression on the device
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const XprType& xpr() const { return m_op; }
+#endif
+
Index planePaddingTop() const { return m_planePaddingTop; }
Index rowPaddingTop() const { return m_rowPaddingTop; }
Index colPaddingLeft() const { return m_colPaddingLeft; }
@@ -535,7 +569,7 @@ struct TensorEvaluator<const TensorVolumePatchOp<Planes, Rows, Cols, ArgType>, D
Dimensions m_dimensions;
- // Parameters passed to the costructor.
+ // Parameters passed to the constructor.
Index m_plane_strides;
Index m_row_strides;
Index m_col_strides;
@@ -600,6 +634,12 @@ struct TensorEvaluator<const TensorVolumePatchOp<Planes, Rows, Cols, ArgType>, D
Scalar m_paddingValue;
TensorEvaluator<ArgType, Device> m_impl;
+
+#ifdef EIGEN_USE_SYCL
+// Required by SYCL in order to construct the expression on the device
+ XprType m_op;
+#endif
+
};
diff --git a/unsupported/Eigen/CXX11/src/TensorSymmetry/util/TemplateGroupTheory.h b/unsupported/Eigen/CXX11/src/TensorSymmetry/util/TemplateGroupTheory.h
index 0fe0b7c46..04d6d6b23 100644
--- a/unsupported/Eigen/CXX11/src/TensorSymmetry/util/TemplateGroupTheory.h
+++ b/unsupported/Eigen/CXX11/src/TensorSymmetry/util/TemplateGroupTheory.h
@@ -241,7 +241,7 @@ struct dimino_first_step_elements
* multiplying all elements in the given subgroup with the new
* coset representative. Note that the first element of the
* subgroup is always the identity element, so the first element of
- * ther result of this template is going to be the coset
+ * the result of this template is going to be the coset
* representative itself.
*
* Note that this template accepts an additional boolean parameter
diff --git a/unsupported/Eigen/CXX11/src/ThreadPool/Barrier.h b/unsupported/Eigen/CXX11/src/ThreadPool/Barrier.h
new file mode 100644
index 000000000..ef5e9ff18
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/ThreadPool/Barrier.h
@@ -0,0 +1,64 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2018 Rasmus Munk Larsen <rmlarsen@google.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// Barrier is an object that allows one or more threads to wait until
+// Notify has been called a specified number of times.
+
+#ifndef EIGEN_CXX11_THREADPOOL_BARRIER_H
+#define EIGEN_CXX11_THREADPOOL_BARRIER_H
+
+namespace Eigen {
+
+class Barrier {
+ public:
+ Barrier(unsigned int count) : state_(count << 1), notified_(false) {
+ eigen_assert(((count << 1) >> 1) == count);
+ }
+ ~Barrier() { eigen_plain_assert((state_ >> 1) == 0); }
+
+ void Notify() {
+ unsigned int v = state_.fetch_sub(2, std::memory_order_acq_rel) - 2;
+ if (v != 1) {
+ eigen_assert(((v + 2) & ~1) != 0);
+ return; // either count has not dropped to 0, or waiter is not waiting
+ }
+ std::unique_lock<std::mutex> l(mu_);
+ eigen_assert(!notified_);
+ notified_ = true;
+ cv_.notify_all();
+ }
+
+ void Wait() {
+ unsigned int v = state_.fetch_or(1, std::memory_order_acq_rel);
+ if ((v >> 1) == 0) return;
+ std::unique_lock<std::mutex> l(mu_);
+ while (!notified_) {
+ cv_.wait(l);
+ }
+ }
+
+ private:
+ std::mutex mu_;
+ std::condition_variable cv_;
+ std::atomic<unsigned int> state_; // low bit is waiter flag
+ bool notified_;
+};
+
+// Notification is an object that allows a user to to wait for another
+// thread to signal a notification that an event has occurred.
+//
+// Multiple threads can wait on the same Notification object,
+// but only one caller must call Notify() on the object.
+struct Notification : Barrier {
+ Notification() : Barrier(1){};
+};
+
+} // namespace Eigen
+
+#endif // EIGEN_CXX11_THREADPOOL_BARRIER_H
diff --git a/unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h b/unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h
index 71d55552d..22c952ae1 100644
--- a/unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h
+++ b/unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h
@@ -33,10 +33,10 @@ namespace Eigen {
// ec.Notify(true);
//
// Notify is cheap if there are no waiting threads. Prewait/CommitWait are not
-// cheap, but they are executed only if the preceeding predicate check has
+// cheap, but they are executed only if the preceding predicate check has
// failed.
//
-// Algorihtm outline:
+// Algorithm outline:
// There are two main variables: predicate (managed by user) and state_.
// Operation closely resembles Dekker mutual algorithm:
// https://en.wikipedia.org/wiki/Dekker%27s_algorithm
@@ -58,7 +58,7 @@ class EventCount {
~EventCount() {
// Ensure there are no waiters.
- eigen_assert((state_.load() & (kStackMask | kWaiterMask)) == kStackMask);
+ eigen_plain_assert((state_.load() & (kStackMask | kWaiterMask)) == kStackMask);
}
// Prewait prepares for waiting.
@@ -79,7 +79,7 @@ class EventCount {
uint64_t state = state_.load(std::memory_order_seq_cst);
for (;;) {
if (int64_t((state & kEpochMask) - epoch) < 0) {
- // The preceeding waiter has not decided on its fate. Wait until it
+ // The preceding waiter has not decided on its fate. Wait until it
// calls either CancelWait or CommitWait, or is notified.
EIGEN_THREAD_YIELD();
state = state_.load(std::memory_order_seq_cst);
@@ -110,7 +110,7 @@ class EventCount {
uint64_t state = state_.load(std::memory_order_relaxed);
for (;;) {
if (int64_t((state & kEpochMask) - epoch) < 0) {
- // The preceeding waiter has not decided on its fate. Wait until it
+ // The preceding waiter has not decided on its fate. Wait until it
// calls either CancelWait or CommitWait, or is notified.
EIGEN_THREAD_YIELD();
state = state_.load(std::memory_order_relaxed);
@@ -169,7 +169,8 @@ class EventCount {
class Waiter {
friend class EventCount;
- // Align to 128 byte boundary to prevent false sharing with other Waiter objects in the same vector.
+ // Align to 128 byte boundary to prevent false sharing with other Waiter
+ // objects in the same vector.
EIGEN_ALIGN_TO_BOUNDARY(128) std::atomic<Waiter*> next;
std::mutex mu;
std::condition_variable cv;
diff --git a/unsupported/Eigen/CXX11/src/ThreadPool/NonBlockingThreadPool.h b/unsupported/Eigen/CXX11/src/ThreadPool/NonBlockingThreadPool.h
index ed1a761b6..9bd6a175d 100644
--- a/unsupported/Eigen/CXX11/src/ThreadPool/NonBlockingThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/ThreadPool/NonBlockingThreadPool.h
@@ -10,57 +10,60 @@
#ifndef EIGEN_CXX11_THREADPOOL_NONBLOCKING_THREAD_POOL_H
#define EIGEN_CXX11_THREADPOOL_NONBLOCKING_THREAD_POOL_H
-
namespace Eigen {
template <typename Environment>
-class NonBlockingThreadPoolTempl : public Eigen::ThreadPoolInterface {
+class ThreadPoolTempl : public Eigen::ThreadPoolInterface {
public:
typedef typename Environment::Task Task;
typedef RunQueue<Task, 1024> Queue;
- NonBlockingThreadPoolTempl(int num_threads, Environment env = Environment())
+ ThreadPoolTempl(int num_threads, Environment env = Environment())
+ : ThreadPoolTempl(num_threads, true, env) {}
+
+ ThreadPoolTempl(int num_threads, bool allow_spinning,
+ Environment env = Environment())
: env_(env),
- threads_(num_threads),
- queues_(num_threads),
- coprimes_(num_threads),
+ num_threads_(num_threads),
+ allow_spinning_(allow_spinning),
+ thread_data_(num_threads),
+ all_coprimes_(num_threads),
waiters_(num_threads),
blocked_(0),
spinning_(0),
done_(false),
cancelled_(false),
ec_(waiters_) {
- waiters_.resize(num_threads);
-
- // Calculate coprimes of num_threads.
- // Coprimes are used for a random walk over all threads in Steal
+ waiters_.resize(num_threads_);
+ // Calculate coprimes of all numbers [1, num_threads].
+ // Coprimes are used for random walks over all threads in Steal
// and NonEmptyQueueIndex. Iteration is based on the fact that if we take
- // a walk starting thread index t and calculate num_threads - 1 subsequent
+ // a random starting thread index t and calculate num_threads - 1 subsequent
// indices as (t + coprime) % num_threads, we will cover all threads without
// repetitions (effectively getting a presudo-random permutation of thread
// indices).
- for (int i = 1; i <= num_threads; i++) {
- unsigned a = i;
- unsigned b = num_threads;
- // If GCD(a, b) == 1, then a and b are coprimes.
- while (b != 0) {
- unsigned tmp = a;
- a = b;
- b = tmp % b;
- }
- if (a == 1) {
- coprimes_.push_back(i);
- }
- }
- for (int i = 0; i < num_threads; i++) {
- queues_.push_back(new Queue());
+ eigen_assert(num_threads_ < kMaxThreads);
+ for (int i = 1; i <= num_threads_; ++i) {
+ all_coprimes_.emplace_back(i);
+ ComputeCoprimes(i, &all_coprimes_.back());
}
- for (int i = 0; i < num_threads; i++) {
- threads_.push_back(env_.CreateThread([this, i]() { WorkerLoop(i); }));
+#ifndef EIGEN_THREAD_LOCAL
+ init_barrier_.reset(new Barrier(num_threads_));
+#endif
+ thread_data_.resize(num_threads_);
+ for (int i = 0; i < num_threads_; i++) {
+ SetStealPartition(i, EncodePartition(0, num_threads_));
+ thread_data_[i].thread.reset(
+ env_.CreateThread([this, i]() { WorkerLoop(i); }));
}
+#ifndef EIGEN_THREAD_LOCAL
+ // Wait for workers to initialize per_thread_map_. Otherwise we might race
+ // with them in Schedule or CurrentThreadId.
+ init_barrier_->Wait();
+#endif
}
- ~NonBlockingThreadPoolTempl() {
+ ~ThreadPoolTempl() {
done_ = true;
// Now if all threads block without work, they will start exiting.
@@ -71,28 +74,51 @@ class NonBlockingThreadPoolTempl : public Eigen::ThreadPoolInterface {
} else {
// Since we were cancelled, there might be entries in the queues.
// Empty them to prevent their destructor from asserting.
- for (size_t i = 0; i < queues_.size(); i++) {
- queues_[i]->Flush();
+ for (size_t i = 0; i < thread_data_.size(); i++) {
+ thread_data_[i].queue.Flush();
}
}
+ // Join threads explicitly (by destroying) to avoid destruction order within
+ // this class.
+ for (size_t i = 0; i < thread_data_.size(); ++i)
+ thread_data_[i].thread.reset();
+ }
+
+ void SetStealPartitions(const std::vector<std::pair<unsigned, unsigned>>& partitions) {
+ eigen_assert(partitions.size() == static_cast<std::size_t>(num_threads_));
- // Join threads explicitly to avoid destruction order issues.
- for (size_t i = 0; i < threads_.size(); i++) delete threads_[i];
- for (size_t i = 0; i < threads_.size(); i++) delete queues_[i];
+ // Pass this information to each thread queue.
+ for (int i = 0; i < num_threads_; i++) {
+ const auto& pair = partitions[i];
+ unsigned start = pair.first, end = pair.second;
+ AssertBounds(start, end);
+ unsigned val = EncodePartition(start, end);
+ SetStealPartition(i, val);
+ }
}
void Schedule(std::function<void()> fn) {
+ ScheduleWithHint(std::move(fn), 0, num_threads_);
+ }
+
+ void ScheduleWithHint(std::function<void()> fn, int start,
+ int limit) override {
Task t = env_.CreateTask(std::move(fn));
PerThread* pt = GetPerThread();
if (pt->pool == this) {
// Worker thread of this pool, push onto the thread's queue.
- Queue* q = queues_[pt->thread_id];
- t = q->PushFront(std::move(t));
+ Queue& q = thread_data_[pt->thread_id].queue;
+ t = q.PushFront(std::move(t));
} else {
// A free-standing thread (or worker of another pool), push onto a random
// queue.
- Queue* q = queues_[Rand(&pt->rand) % queues_.size()];
- t = q->PushBack(std::move(t));
+ eigen_assert(start < limit);
+ eigen_assert(limit <= num_threads_);
+ int num_queues = limit - start;
+ int rnd = Rand(&pt->rand) % num_queues;
+ eigen_assert(start + rnd < limit);
+ Queue& q = thread_data_[start + rnd].queue;
+ t = q.PushBack(std::move(t));
}
// Note: below we touch this after making w available to worker threads.
// Strictly speaking, this can lead to a racy-use-after-free. Consider that
@@ -103,8 +129,7 @@ class NonBlockingThreadPoolTempl : public Eigen::ThreadPoolInterface {
// this is kept alive while any threads can potentially be in Schedule.
if (!t.f) {
ec_.Notify(false);
- }
- else {
+ } else {
env_.ExecuteTask(t); // Push failed, execute directly.
}
}
@@ -115,8 +140,8 @@ class NonBlockingThreadPoolTempl : public Eigen::ThreadPoolInterface {
// Let each thread know it's been cancelled.
#ifdef EIGEN_THREAD_ENV_SUPPORTS_CANCELLATION
- for (size_t i = 0; i < threads_.size(); i++) {
- threads_[i]->OnCancel();
+ for (size_t i = 0; i < thread_data_.size(); i++) {
+ thread_data_[i].thread->OnCancel();
}
#endif
@@ -124,13 +149,10 @@ class NonBlockingThreadPoolTempl : public Eigen::ThreadPoolInterface {
ec_.Notify(true);
}
- int NumThreads() const final {
- return static_cast<int>(threads_.size());
- }
+ int NumThreads() const final { return num_threads_; }
int CurrentThreadId() const final {
- const PerThread* pt =
- const_cast<NonBlockingThreadPoolTempl*>(this)->GetPerThread();
+ const PerThread* pt = const_cast<ThreadPoolTempl*>(this)->GetPerThread();
if (pt->pool == this) {
return pt->thread_id;
} else {
@@ -139,77 +161,184 @@ class NonBlockingThreadPoolTempl : public Eigen::ThreadPoolInterface {
}
private:
+ // Create a single atomic<int> that encodes start and limit information for
+ // each thread.
+ // We expect num_threads_ < 65536, so we can store them in a single
+ // std::atomic<unsigned>.
+ // Exposed publicly as static functions so that external callers can reuse
+ // this encode/decode logic for maintaining their own thread-safe copies of
+ // scheduling and steal domain(s).
+ static const int kMaxPartitionBits = 16;
+ static const int kMaxThreads = 1 << kMaxPartitionBits;
+
+ inline unsigned EncodePartition(unsigned start, unsigned limit) {
+ return (start << kMaxPartitionBits) | limit;
+ }
+
+ inline void DecodePartition(unsigned val, unsigned* start, unsigned* limit) {
+ *limit = val & (kMaxThreads - 1);
+ val >>= kMaxPartitionBits;
+ *start = val;
+ }
+
+ void AssertBounds(int start, int end) {
+ eigen_assert(start >= 0);
+ eigen_assert(start < end); // non-zero sized partition
+ eigen_assert(end <= num_threads_);
+ }
+
+ inline void SetStealPartition(size_t i, unsigned val) {
+ thread_data_[i].steal_partition.store(val, std::memory_order_relaxed);
+ }
+
+ inline unsigned GetStealPartition(int i) {
+ return thread_data_[i].steal_partition.load(std::memory_order_relaxed);
+ }
+
+ void ComputeCoprimes(int N, MaxSizeVector<unsigned>* coprimes) {
+ for (int i = 1; i <= N; i++) {
+ unsigned a = i;
+ unsigned b = N;
+ // If GCD(a, b) == 1, then a and b are coprimes.
+ while (b != 0) {
+ unsigned tmp = a;
+ a = b;
+ b = tmp % b;
+ }
+ if (a == 1) {
+ coprimes->push_back(i);
+ }
+ }
+ }
+
typedef typename Environment::EnvThread Thread;
struct PerThread {
- constexpr PerThread() : pool(NULL), rand(0), thread_id(-1) { }
- NonBlockingThreadPoolTempl* pool; // Parent pool, or null for normal threads.
- uint64_t rand; // Random generator state.
- int thread_id; // Worker thread index in pool.
+ constexpr PerThread() : pool(NULL), rand(0), thread_id(-1) {}
+ ThreadPoolTempl* pool; // Parent pool, or null for normal threads.
+ uint64_t rand; // Random generator state.
+ int thread_id; // Worker thread index in pool.
+#ifndef EIGEN_THREAD_LOCAL
+ // Prevent false sharing.
+ char pad_[128];
+#endif
+ };
+
+ struct ThreadData {
+ constexpr ThreadData() : thread(), steal_partition(0), queue() {}
+ std::unique_ptr<Thread> thread;
+ std::atomic<unsigned> steal_partition;
+ Queue queue;
};
Environment env_;
- MaxSizeVector<Thread*> threads_;
- MaxSizeVector<Queue*> queues_;
- MaxSizeVector<unsigned> coprimes_;
+ const int num_threads_;
+ const bool allow_spinning_;
+ MaxSizeVector<ThreadData> thread_data_;
+ MaxSizeVector<MaxSizeVector<unsigned>> all_coprimes_;
MaxSizeVector<EventCount::Waiter> waiters_;
std::atomic<unsigned> blocked_;
std::atomic<bool> spinning_;
std::atomic<bool> done_;
std::atomic<bool> cancelled_;
EventCount ec_;
+#ifndef EIGEN_THREAD_LOCAL
+ std::unique_ptr<Barrier> init_barrier_;
+ std::mutex per_thread_map_mutex_; // Protects per_thread_map_.
+ std::unordered_map<uint64_t, std::unique_ptr<PerThread>> per_thread_map_;
+#endif
// Main worker thread loop.
void WorkerLoop(int thread_id) {
+#ifndef EIGEN_THREAD_LOCAL
+ std::unique_ptr<PerThread> new_pt(new PerThread());
+ per_thread_map_mutex_.lock();
+ eigen_assert(per_thread_map_.emplace(GlobalThreadIdHash(), std::move(new_pt)).second);
+ per_thread_map_mutex_.unlock();
+ init_barrier_->Notify();
+ init_barrier_->Wait();
+#endif
PerThread* pt = GetPerThread();
pt->pool = this;
- pt->rand = std::hash<std::thread::id>()(std::this_thread::get_id());
+ pt->rand = GlobalThreadIdHash();
pt->thread_id = thread_id;
- Queue* q = queues_[thread_id];
+ Queue& q = thread_data_[thread_id].queue;
EventCount::Waiter* waiter = &waiters_[thread_id];
- while (!cancelled_) {
- Task t = q->PopFront();
- if (!t.f) {
- t = Steal();
+ // TODO(dvyukov,rmlarsen): The time spent in NonEmptyQueueIndex() is
+ // proportional to num_threads_ and we assume that new work is scheduled at
+ // a constant rate, so we set spin_count to 5000 / num_threads_. The
+ // constant was picked based on a fair dice roll, tune it.
+ const int spin_count =
+ allow_spinning_ && num_threads_ > 0 ? 5000 / num_threads_ : 0;
+ if (num_threads_ == 1) {
+ // For num_threads_ == 1 there is no point in going through the expensive
+ // steal loop. Moreover, since NonEmptyQueueIndex() calls PopBack() on the
+ // victim queues it might reverse the order in which ops are executed
+ // compared to the order in which they are scheduled, which tends to be
+ // counter-productive for the types of I/O workloads the single thread
+ // pools tend to be used for.
+ while (!cancelled_) {
+ Task t = q.PopFront();
+ for (int i = 0; i < spin_count && !t.f; i++) {
+ if (!cancelled_.load(std::memory_order_relaxed)) {
+ t = q.PopFront();
+ }
+ }
if (!t.f) {
- // Leave one thread spinning. This reduces latency.
- // TODO(dvyukov): 1000 iterations is based on fair dice roll, tune it.
- // Also, the time it takes to attempt to steal work 1000 times depends
- // on the size of the thread pool. However the speed at which the user
- // of the thread pool submit tasks is independent of the size of the
- // pool. Consider a time based limit instead.
- if (!spinning_ && !spinning_.exchange(true)) {
- for (int i = 0; i < 1000 && !t.f; i++) {
- if (!cancelled_.load(std::memory_order_relaxed)) {
- t = Steal();
- } else {
- return;
- }
- }
- spinning_ = false;
+ if (!WaitForWork(waiter, &t)) {
+ return;
}
+ }
+ if (t.f) {
+ env_.ExecuteTask(t);
+ }
+ }
+ } else {
+ while (!cancelled_) {
+ Task t = q.PopFront();
+ if (!t.f) {
+ t = LocalSteal();
if (!t.f) {
- if (!WaitForWork(waiter, &t)) {
- return;
+ t = GlobalSteal();
+ if (!t.f) {
+ // Leave one thread spinning. This reduces latency.
+ if (allow_spinning_ && !spinning_ && !spinning_.exchange(true)) {
+ for (int i = 0; i < spin_count && !t.f; i++) {
+ if (!cancelled_.load(std::memory_order_relaxed)) {
+ t = GlobalSteal();
+ } else {
+ return;
+ }
+ }
+ spinning_ = false;
+ }
+ if (!t.f) {
+ if (!WaitForWork(waiter, &t)) {
+ return;
+ }
+ }
}
}
}
- }
- if (t.f) {
- env_.ExecuteTask(t);
+ if (t.f) {
+ env_.ExecuteTask(t);
+ }
}
}
}
- // Steal tries to steal work from other worker threads in best-effort manner.
- Task Steal() {
+ // Steal tries to steal work from other worker threads in the range [start,
+ // limit) in best-effort manner.
+ Task Steal(unsigned start, unsigned limit) {
PerThread* pt = GetPerThread();
- const size_t size = queues_.size();
+ const size_t size = limit - start;
unsigned r = Rand(&pt->rand);
- unsigned inc = coprimes_[r % coprimes_.size()];
unsigned victim = r % size;
+ unsigned inc = all_coprimes_[size - 1][r % all_coprimes_[size - 1].size()];
+
for (unsigned i = 0; i < size; i++) {
- Task t = queues_[victim]->PopBack();
+ eigen_assert(start + victim < limit);
+ Task t = thread_data_[start + victim].queue.PopBack();
if (t.f) {
return t;
}
@@ -221,6 +350,23 @@ class NonBlockingThreadPoolTempl : public Eigen::ThreadPoolInterface {
return Task();
}
+ // Steals work within threads belonging to the partition.
+ Task LocalSteal() {
+ PerThread* pt = GetPerThread();
+ unsigned partition = GetStealPartition(pt->thread_id);
+ unsigned start, limit;
+ DecodePartition(partition, &start, &limit);
+ AssertBounds(start, limit);
+
+ return Steal(start, limit);
+ }
+
+ // Steals work from any other thread in the pool.
+ Task GlobalSteal() {
+ return Steal(0, num_threads_);
+ }
+
+
// WaitForWork blocks until new work is available (returns true), or if it is
// time to exit (returns false). Can optionally return a task to execute in t
// (in such case t.f != nullptr on return).
@@ -236,7 +382,7 @@ class NonBlockingThreadPoolTempl : public Eigen::ThreadPoolInterface {
if (cancelled_) {
return false;
} else {
- *t = queues_[victim]->PopBack();
+ *t = thread_data_[victim].queue.PopBack();
return true;
}
}
@@ -244,7 +390,8 @@ class NonBlockingThreadPoolTempl : public Eigen::ThreadPoolInterface {
// If we are shutting down and all worker threads blocked without work,
// that's we are done.
blocked_++;
- if (done_ && blocked_ == threads_.size()) {
+ // TODO is blocked_ required to be unsigned?
+ if (done_ && blocked_ == static_cast<unsigned>(num_threads_)) {
ec_.CancelWait(waiter);
// Almost done, but need to re-check queues.
// Consider that all queues are empty and all worker threads are preempted
@@ -272,12 +419,15 @@ class NonBlockingThreadPoolTempl : public Eigen::ThreadPoolInterface {
int NonEmptyQueueIndex() {
PerThread* pt = GetPerThread();
- const size_t size = queues_.size();
+ // We intentionally design NonEmptyQueueIndex to steal work from
+ // anywhere in the queue so threads don't block in WaitForWork() forever
+ // when all threads in their partition go to sleep. Steal is still local.
+ const size_t size = thread_data_.size();
unsigned r = Rand(&pt->rand);
- unsigned inc = coprimes_[r % coprimes_.size()];
+ unsigned inc = all_coprimes_[size - 1][r % all_coprimes_[size - 1].size()];
unsigned victim = r % size;
for (unsigned i = 0; i < size; i++) {
- if (!queues_[victim]->Empty()) {
+ if (!thread_data_[victim].queue.Empty()) {
return victim;
}
victim += inc;
@@ -288,10 +438,24 @@ class NonBlockingThreadPoolTempl : public Eigen::ThreadPoolInterface {
return -1;
}
- static EIGEN_STRONG_INLINE PerThread* GetPerThread() {
+ static EIGEN_STRONG_INLINE uint64_t GlobalThreadIdHash() {
+ return std::hash<std::thread::id>()(std::this_thread::get_id());
+ }
+
+ EIGEN_STRONG_INLINE PerThread* GetPerThread() {
+#ifndef EIGEN_THREAD_LOCAL
+ static PerThread dummy;
+ auto it = per_thread_map_.find(GlobalThreadIdHash());
+ if (it == per_thread_map_.end()) {
+ return &dummy;
+ } else {
+ return it->second.get();
+ }
+#else
EIGEN_THREAD_LOCAL PerThread per_thread_;
PerThread* pt = &per_thread_;
return pt;
+#endif
}
static EIGEN_STRONG_INLINE unsigned Rand(uint64_t* state) {
@@ -299,11 +463,12 @@ class NonBlockingThreadPoolTempl : public Eigen::ThreadPoolInterface {
// Update the internal state
*state = current * 6364136223846793005ULL + 0xda3e39cb94b95bdbULL;
// Generate the random output (using the PCG-XSH-RS scheme)
- return static_cast<unsigned>((current ^ (current >> 22)) >> (22 + (current >> 61)));
+ return static_cast<unsigned>((current ^ (current >> 22)) >>
+ (22 + (current >> 61)));
}
};
-typedef NonBlockingThreadPoolTempl<StlThreadEnvironment> NonBlockingThreadPool;
+typedef ThreadPoolTempl<StlThreadEnvironment> ThreadPool;
} // namespace Eigen
diff --git a/unsupported/Eigen/CXX11/src/ThreadPool/RunQueue.h b/unsupported/Eigen/CXX11/src/ThreadPool/RunQueue.h
index 49d0cdc36..05c739aa1 100644
--- a/unsupported/Eigen/CXX11/src/ThreadPool/RunQueue.h
+++ b/unsupported/Eigen/CXX11/src/ThreadPool/RunQueue.h
@@ -10,7 +10,6 @@
#ifndef EIGEN_CXX11_THREADPOOL_RUNQUEUE_H_
#define EIGEN_CXX11_THREADPOOL_RUNQUEUE_H_
-
namespace Eigen {
// RunQueue is a fixed-size, partially non-blocking deque or Work items.
@@ -47,7 +46,7 @@ class RunQueue {
array_[i].state.store(kEmpty, std::memory_order_relaxed);
}
- ~RunQueue() { eigen_assert(Size() == 0); }
+ ~RunQueue() { eigen_plain_assert(Size() == 0); }
// PushFront inserts w at the beginning of the queue.
// If queue is full returns w, otherwise returns default-constructed Work.
@@ -131,9 +130,8 @@ class RunQueue {
Elem* e = &array_[mid & kMask];
uint8_t s = e->state.load(std::memory_order_relaxed);
if (n == 0) {
- if (s != kReady ||
- !e->state.compare_exchange_strong(s, kBusy,
- std::memory_order_acquire))
+ if (s != kReady || !e->state.compare_exchange_strong(
+ s, kBusy, std::memory_order_acquire))
continue;
start = mid;
} else {
@@ -198,7 +196,7 @@ class RunQueue {
};
std::mutex mutex_;
// Low log(kSize) + 1 bits in front_ and back_ contain rolling index of
- // front/back, repsectively. The remaining bits contain modification counters
+ // front/back, respectively. The remaining bits contain modification counters
// that are incremented on Push operations. This allows us to (1) distinguish
// between empty and full conditions (if we would use log(kSize) bits for
// position, these conditions would be indistinguishable); (2) obtain
diff --git a/unsupported/Eigen/CXX11/src/ThreadPool/SimpleThreadPool.h b/unsupported/Eigen/CXX11/src/ThreadPool/SimpleThreadPool.h
deleted file mode 100644
index 335728665..000000000
--- a/unsupported/Eigen/CXX11/src/ThreadPool/SimpleThreadPool.h
+++ /dev/null
@@ -1,162 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_CXX11_THREADPOOL_SIMPLE_THREAD_POOL_H
-#define EIGEN_CXX11_THREADPOOL_SIMPLE_THREAD_POOL_H
-
-namespace Eigen {
-
-// The implementation of the ThreadPool type ensures that the Schedule method
-// runs the functions it is provided in FIFO order when the scheduling is done
-// by a single thread.
-// Environment provides a way to create threads and also allows to intercept
-// task submission and execution.
-template <typename Environment>
-class SimpleThreadPoolTempl : public ThreadPoolInterface {
- public:
- // Construct a pool that contains "num_threads" threads.
- explicit SimpleThreadPoolTempl(int num_threads, Environment env = Environment())
- : env_(env), threads_(num_threads), waiters_(num_threads) {
- for (int i = 0; i < num_threads; i++) {
- threads_.push_back(env.CreateThread([this, i]() { WorkerLoop(i); }));
- }
- }
-
- // Wait until all scheduled work has finished and then destroy the
- // set of threads.
- ~SimpleThreadPoolTempl() {
- {
- // Wait for all work to get done.
- std::unique_lock<std::mutex> l(mu_);
- while (!pending_.empty()) {
- empty_.wait(l);
- }
- exiting_ = true;
-
- // Wakeup all waiters.
- for (auto w : waiters_) {
- w->ready = true;
- w->task.f = nullptr;
- w->cv.notify_one();
- }
- }
-
- // Wait for threads to finish.
- for (auto t : threads_) {
- delete t;
- }
- }
-
- // Schedule fn() for execution in the pool of threads. The functions are
- // executed in the order in which they are scheduled.
- void Schedule(std::function<void()> fn) final {
- Task t = env_.CreateTask(std::move(fn));
- std::unique_lock<std::mutex> l(mu_);
- if (waiters_.empty()) {
- pending_.push_back(std::move(t));
- } else {
- Waiter* w = waiters_.back();
- waiters_.pop_back();
- w->ready = true;
- w->task = std::move(t);
- w->cv.notify_one();
- }
- }
-
- void Cancel() {
-#ifdef EIGEN_THREAD_ENV_SUPPORTS_CANCELLATION
- for (size_t i = 0; i < threads_.size(); i++) {
- threads_[i]->OnCancel();
- }
-#endif
- }
-
- int NumThreads() const final {
- return static_cast<int>(threads_.size());
- }
-
- int CurrentThreadId() const final {
- const PerThread* pt = this->GetPerThread();
- if (pt->pool == this) {
- return pt->thread_id;
- } else {
- return -1;
- }
- }
-
- protected:
- void WorkerLoop(int thread_id) {
- std::unique_lock<std::mutex> l(mu_);
- PerThread* pt = GetPerThread();
- pt->pool = this;
- pt->thread_id = thread_id;
- Waiter w;
- Task t;
- while (!exiting_) {
- if (pending_.empty()) {
- // Wait for work to be assigned to me
- w.ready = false;
- waiters_.push_back(&w);
- while (!w.ready) {
- w.cv.wait(l);
- }
- t = w.task;
- w.task.f = nullptr;
- } else {
- // Pick up pending work
- t = std::move(pending_.front());
- pending_.pop_front();
- if (pending_.empty()) {
- empty_.notify_all();
- }
- }
- if (t.f) {
- mu_.unlock();
- env_.ExecuteTask(t);
- t.f = nullptr;
- mu_.lock();
- }
- }
- }
-
- private:
- typedef typename Environment::Task Task;
- typedef typename Environment::EnvThread Thread;
-
- struct Waiter {
- std::condition_variable cv;
- Task task;
- bool ready;
- };
-
- struct PerThread {
- constexpr PerThread() : pool(NULL), thread_id(-1) { }
- SimpleThreadPoolTempl* pool; // Parent pool, or null for normal threads.
- int thread_id; // Worker thread index in pool.
- };
-
- Environment env_;
- std::mutex mu_;
- MaxSizeVector<Thread*> threads_; // All threads
- MaxSizeVector<Waiter*> waiters_; // Stack of waiting threads.
- std::deque<Task> pending_; // Queue of pending work
- std::condition_variable empty_; // Signaled on pending_.empty()
- bool exiting_ = false;
-
- PerThread* GetPerThread() const {
- EIGEN_THREAD_LOCAL PerThread per_thread;
- return &per_thread;
- }
-};
-
-typedef SimpleThreadPoolTempl<StlThreadEnvironment> SimpleThreadPool;
-
-} // namespace Eigen
-
-#endif // EIGEN_CXX11_THREADPOOL_SIMPLE_THREAD_POOL_H
diff --git a/unsupported/Eigen/CXX11/src/ThreadPool/ThreadLocal.h b/unsupported/Eigen/CXX11/src/ThreadPool/ThreadLocal.h
index cfa221732..7229839ac 100644
--- a/unsupported/Eigen/CXX11/src/ThreadPool/ThreadLocal.h
+++ b/unsupported/Eigen/CXX11/src/ThreadPool/ThreadLocal.h
@@ -10,13 +10,46 @@
#ifndef EIGEN_CXX11_THREADPOOL_THREAD_LOCAL_H
#define EIGEN_CXX11_THREADPOOL_THREAD_LOCAL_H
-// Try to come up with a portable implementation of thread local variables
-#if EIGEN_COMP_GNUC && EIGEN_GNUC_AT_MOST(4, 7)
-#define EIGEN_THREAD_LOCAL static __thread
-#elif EIGEN_COMP_CLANG
-#define EIGEN_THREAD_LOCAL static __thread
-#else
+#if EIGEN_MAX_CPP_VER >= 11 && \
+ ((EIGEN_COMP_GNUC && EIGEN_GNUC_AT_LEAST(4, 8)) || \
+ __has_feature(cxx_thread_local) || \
+ (EIGEN_COMP_MSVC >= 1900) )
#define EIGEN_THREAD_LOCAL static thread_local
#endif
+// Disable TLS for Apple and Android builds with older toolchains.
+#if defined(__APPLE__)
+// Included for TARGET_OS_IPHONE, __IPHONE_OS_VERSION_MIN_REQUIRED,
+// __IPHONE_8_0.
+#include <Availability.h>
+#include <TargetConditionals.h>
+#endif
+// Checks whether C++11's `thread_local` storage duration specifier is
+// supported.
+#if defined(__apple_build_version__) && \
+ ((__apple_build_version__ < 8000042) || \
+ (TARGET_OS_IPHONE && __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0))
+// Notes: Xcode's clang did not support `thread_local` until version
+// 8, and even then not for all iOS < 9.0.
+#undef EIGEN_THREAD_LOCAL
+
+#elif defined(__ANDROID__) && EIGEN_COMP_CLANG
+// There are platforms for which TLS should not be used even though the compiler
+// makes it seem like it's supported (Android NDK < r12b for example).
+// This is primarily because of linker problems and toolchain misconfiguration:
+// TLS isn't supported until NDK r12b per
+// https://developer.android.com/ndk/downloads/revision_history.html
+// Since NDK r16, `__NDK_MAJOR__` and `__NDK_MINOR__` are defined in
+// <android/ndk-version.h>. For NDK < r16, users should define these macros,
+// e.g. `-D__NDK_MAJOR__=11 -D__NKD_MINOR__=0` for NDK r11.
+#if __has_include(<android/ndk-version.h>)
+#include <android/ndk-version.h>
+#endif // __has_include(<android/ndk-version.h>)
+#if defined(__ANDROID__) && defined(__clang__) && defined(__NDK_MAJOR__) && \
+ defined(__NDK_MINOR__) && \
+ ((__NDK_MAJOR__ < 12) || ((__NDK_MAJOR__ == 12) && (__NDK_MINOR__ < 1)))
+#undef EIGEN_THREAD_LOCAL
+#endif
+#endif // defined(__ANDROID__) && defined(__clang__)
+
#endif // EIGEN_CXX11_THREADPOOL_THREAD_LOCAL_H
diff --git a/unsupported/Eigen/CXX11/src/ThreadPool/ThreadPoolInterface.h b/unsupported/Eigen/CXX11/src/ThreadPool/ThreadPoolInterface.h
index 84e1e6cc0..25030dc0b 100644
--- a/unsupported/Eigen/CXX11/src/ThreadPool/ThreadPoolInterface.h
+++ b/unsupported/Eigen/CXX11/src/ThreadPool/ThreadPoolInterface.h
@@ -19,6 +19,15 @@ class ThreadPoolInterface {
// Submits a closure to be run by a thread in the pool.
virtual void Schedule(std::function<void()> fn) = 0;
+ // Submits a closure to be run by threads in the range [start, end) in the
+ // pool.
+ virtual void ScheduleWithHint(std::function<void()> fn, int /*start*/,
+ int /*end*/) {
+ // Just defer to Schedule in case sub-classes aren't interested in
+ // overriding this functionality.
+ Schedule(fn);
+ }
+
// If implemented, stop processing the closures that have been enqueued.
// Currently running closures may still be processed.
// If not implemented, does nothing.
diff --git a/unsupported/Eigen/CXX11/src/util/CXX11Meta.h b/unsupported/Eigen/CXX11/src/util/CXX11Meta.h
index 49d315a66..6c95d0a6c 100644
--- a/unsupported/Eigen/CXX11/src/util/CXX11Meta.h
+++ b/unsupported/Eigen/CXX11/src/util/CXX11Meta.h
@@ -104,9 +104,9 @@ template<> struct h_skip_helper_type<0>
template<int n>
struct h_skip {
template<typename T, T... ii>
- constexpr static inline typename h_skip_helper_numeric<T, n, ii...>::type helper(numeric_list<T, ii...>) { return typename h_skip_helper_numeric<T, n, ii...>::type(); }
+ constexpr static EIGEN_STRONG_INLINE typename h_skip_helper_numeric<T, n, ii...>::type helper(numeric_list<T, ii...>) { return typename h_skip_helper_numeric<T, n, ii...>::type(); }
template<typename... tt>
- constexpr static inline typename h_skip_helper_type<n, tt...>::type helper(type_list<tt...>) { return typename h_skip_helper_type<n, tt...>::type(); }
+ constexpr static EIGEN_STRONG_INLINE typename h_skip_helper_type<n, tt...>::type helper(type_list<tt...>) { return typename h_skip_helper_type<n, tt...>::type(); }
};
template<int n, typename a> struct skip { typedef decltype(h_skip<n>::helper(a())) type; };
@@ -268,7 +268,7 @@ template<
typename Reducer
> struct reduce<Reducer>
{
- constexpr static inline int run() { return Reducer::Identity; }
+ EIGEN_DEVICE_FUNC constexpr static EIGEN_STRONG_INLINE int run() { return Reducer::Identity; }
};
template<
@@ -276,7 +276,7 @@ template<
typename A
> struct reduce<Reducer, A>
{
- constexpr static inline A run(A a) { return a; }
+ EIGEN_DEVICE_FUNC constexpr static EIGEN_STRONG_INLINE A run(A a) { return a; }
};
template<
@@ -285,7 +285,7 @@ template<
typename... Ts
> struct reduce<Reducer, A, Ts...>
{
- constexpr static inline auto run(A a, Ts... ts) -> decltype(Reducer::run(a, reduce<Reducer, Ts...>::run(ts...))) {
+ EIGEN_DEVICE_FUNC constexpr static EIGEN_STRONG_INLINE auto run(A a, Ts... ts) -> decltype(Reducer::run(a, reduce<Reducer, Ts...>::run(ts...))) {
return Reducer::run(a, reduce<Reducer, Ts...>::run(ts...));
}
};
@@ -293,29 +293,29 @@ template<
/* generic binary operations */
struct sum_op {
- template<typename A, typename B> EIGEN_DEVICE_FUNC constexpr static inline auto run(A a, B b) -> decltype(a + b) { return a + b; }
+ template<typename A, typename B> EIGEN_DEVICE_FUNC constexpr static EIGEN_STRONG_INLINE auto run(A a, B b) -> decltype(a + b) { return a + b; }
static constexpr int Identity = 0;
};
struct product_op {
- template<typename A, typename B> EIGEN_DEVICE_FUNC constexpr static inline auto run(A a, B b) -> decltype(a * b) { return a * b; }
+ template<typename A, typename B> EIGEN_DEVICE_FUNC constexpr static EIGEN_STRONG_INLINE auto run(A a, B b) -> decltype(a * b) { return a * b; }
static constexpr int Identity = 1;
};
-struct logical_and_op { template<typename A, typename B> constexpr static inline auto run(A a, B b) -> decltype(a && b) { return a && b; } };
-struct logical_or_op { template<typename A, typename B> constexpr static inline auto run(A a, B b) -> decltype(a || b) { return a || b; } };
+struct logical_and_op { template<typename A, typename B> constexpr static EIGEN_STRONG_INLINE auto run(A a, B b) -> decltype(a && b) { return a && b; } };
+struct logical_or_op { template<typename A, typename B> constexpr static EIGEN_STRONG_INLINE auto run(A a, B b) -> decltype(a || b) { return a || b; } };
-struct equal_op { template<typename A, typename B> constexpr static inline auto run(A a, B b) -> decltype(a == b) { return a == b; } };
-struct not_equal_op { template<typename A, typename B> constexpr static inline auto run(A a, B b) -> decltype(a != b) { return a != b; } };
-struct lesser_op { template<typename A, typename B> constexpr static inline auto run(A a, B b) -> decltype(a < b) { return a < b; } };
-struct lesser_equal_op { template<typename A, typename B> constexpr static inline auto run(A a, B b) -> decltype(a <= b) { return a <= b; } };
-struct greater_op { template<typename A, typename B> constexpr static inline auto run(A a, B b) -> decltype(a > b) { return a > b; } };
-struct greater_equal_op { template<typename A, typename B> constexpr static inline auto run(A a, B b) -> decltype(a >= b) { return a >= b; } };
+struct equal_op { template<typename A, typename B> constexpr static EIGEN_STRONG_INLINE auto run(A a, B b) -> decltype(a == b) { return a == b; } };
+struct not_equal_op { template<typename A, typename B> constexpr static EIGEN_STRONG_INLINE auto run(A a, B b) -> decltype(a != b) { return a != b; } };
+struct lesser_op { template<typename A, typename B> constexpr static EIGEN_STRONG_INLINE auto run(A a, B b) -> decltype(a < b) { return a < b; } };
+struct lesser_equal_op { template<typename A, typename B> constexpr static EIGEN_STRONG_INLINE auto run(A a, B b) -> decltype(a <= b) { return a <= b; } };
+struct greater_op { template<typename A, typename B> constexpr static EIGEN_STRONG_INLINE auto run(A a, B b) -> decltype(a > b) { return a > b; } };
+struct greater_equal_op { template<typename A, typename B> constexpr static EIGEN_STRONG_INLINE auto run(A a, B b) -> decltype(a >= b) { return a >= b; } };
/* generic unary operations */
-struct not_op { template<typename A> constexpr static inline auto run(A a) -> decltype(!a) { return !a; } };
-struct negation_op { template<typename A> constexpr static inline auto run(A a) -> decltype(-a) { return -a; } };
-struct greater_equal_zero_op { template<typename A> constexpr static inline auto run(A a) -> decltype(a >= 0) { return a >= 0; } };
+struct not_op { template<typename A> constexpr static EIGEN_STRONG_INLINE auto run(A a) -> decltype(!a) { return !a; } };
+struct negation_op { template<typename A> constexpr static EIGEN_STRONG_INLINE auto run(A a) -> decltype(-a) { return -a; } };
+struct greater_equal_zero_op { template<typename A> constexpr static EIGEN_STRONG_INLINE auto run(A a) -> decltype(a >= 0) { return a >= 0; } };
/* reductions for lists */
@@ -324,13 +324,13 @@ struct greater_equal_zero_op { template<typename A> constexpr static inline auto
// together in front... (13.0 doesn't work with array_prod/array_reduce/... anyway, but 13.1
// does...
template<typename... Ts>
-constexpr inline decltype(reduce<product_op, Ts...>::run((*((Ts*)0))...)) arg_prod(Ts... ts)
+EIGEN_DEVICE_FUNC constexpr EIGEN_STRONG_INLINE decltype(reduce<product_op, Ts...>::run((*((Ts*)0))...)) arg_prod(Ts... ts)
{
return reduce<product_op, Ts...>::run(ts...);
}
template<typename... Ts>
-constexpr inline decltype(reduce<sum_op, Ts...>::run((*((Ts*)0))...)) arg_sum(Ts... ts)
+constexpr EIGEN_STRONG_INLINE decltype(reduce<sum_op, Ts...>::run((*((Ts*)0))...)) arg_sum(Ts... ts)
{
return reduce<sum_op, Ts...>::run(ts...);
}
@@ -338,13 +338,13 @@ constexpr inline decltype(reduce<sum_op, Ts...>::run((*((Ts*)0))...)) arg_sum(Ts
/* reverse arrays */
template<typename Array, int... n>
-constexpr inline Array h_array_reverse(Array arr, numeric_list<int, n...>)
+constexpr EIGEN_STRONG_INLINE Array h_array_reverse(Array arr, numeric_list<int, n...>)
{
return {{array_get<sizeof...(n) - n - 1>(arr)...}};
}
template<typename T, std::size_t N>
-constexpr inline array<T, N> array_reverse(array<T, N> arr)
+constexpr EIGEN_STRONG_INLINE array<T, N> array_reverse(array<T, N> arr)
{
return h_array_reverse(arr, typename gen_numeric_list<int, N>::type());
}
@@ -359,7 +359,7 @@ constexpr inline array<T, N> array_reverse(array<T, N> arr)
// an infinite loop)
template<typename Reducer, typename T, std::size_t N, std::size_t n = N - 1>
struct h_array_reduce {
- EIGEN_DEVICE_FUNC constexpr static inline auto run(array<T, N> arr, T identity) -> decltype(Reducer::run(h_array_reduce<Reducer, T, N, n - 1>::run(arr, identity), array_get<n>(arr)))
+ EIGEN_DEVICE_FUNC constexpr static EIGEN_STRONG_INLINE auto run(array<T, N> arr, T identity) -> decltype(Reducer::run(h_array_reduce<Reducer, T, N, n - 1>::run(arr, identity), array_get<n>(arr)))
{
return Reducer::run(h_array_reduce<Reducer, T, N, n - 1>::run(arr, identity), array_get<n>(arr));
}
@@ -368,7 +368,7 @@ struct h_array_reduce {
template<typename Reducer, typename T, std::size_t N>
struct h_array_reduce<Reducer, T, N, 0>
{
- EIGEN_DEVICE_FUNC constexpr static inline T run(const array<T, N>& arr, T)
+ EIGEN_DEVICE_FUNC constexpr static EIGEN_STRONG_INLINE T run(const array<T, N>& arr, T)
{
return array_get<0>(arr);
}
@@ -377,14 +377,14 @@ struct h_array_reduce<Reducer, T, N, 0>
template<typename Reducer, typename T>
struct h_array_reduce<Reducer, T, 0>
{
- EIGEN_DEVICE_FUNC constexpr static inline T run(const array<T, 0>&, T identity)
+ EIGEN_DEVICE_FUNC constexpr static EIGEN_STRONG_INLINE T run(const array<T, 0>&, T identity)
{
return identity;
}
};
template<typename Reducer, typename T, std::size_t N>
-EIGEN_DEVICE_FUNC constexpr inline auto array_reduce(const array<T, N>& arr, T identity) -> decltype(h_array_reduce<Reducer, T, N>::run(arr, identity))
+EIGEN_DEVICE_FUNC constexpr EIGEN_STRONG_INLINE auto array_reduce(const array<T, N>& arr, T identity) -> decltype(h_array_reduce<Reducer, T, N>::run(arr, identity))
{
return h_array_reduce<Reducer, T, N>::run(arr, identity);
}
@@ -392,13 +392,13 @@ EIGEN_DEVICE_FUNC constexpr inline auto array_reduce(const array<T, N>& arr, T i
/* standard array reductions */
template<typename T, std::size_t N>
-EIGEN_DEVICE_FUNC constexpr inline auto array_sum(const array<T, N>& arr) -> decltype(array_reduce<sum_op, T, N>(arr, static_cast<T>(0)))
+EIGEN_DEVICE_FUNC constexpr EIGEN_STRONG_INLINE auto array_sum(const array<T, N>& arr) -> decltype(array_reduce<sum_op, T, N>(arr, static_cast<T>(0)))
{
return array_reduce<sum_op, T, N>(arr, static_cast<T>(0));
}
template<typename T, std::size_t N>
-EIGEN_DEVICE_FUNC constexpr inline auto array_prod(const array<T, N>& arr) -> decltype(array_reduce<product_op, T, N>(arr, static_cast<T>(1)))
+EIGEN_DEVICE_FUNC constexpr EIGEN_STRONG_INLINE auto array_prod(const array<T, N>& arr) -> decltype(array_reduce<product_op, T, N>(arr, static_cast<T>(1)))
{
return array_reduce<product_op, T, N>(arr, static_cast<T>(1));
}
@@ -414,13 +414,13 @@ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE t array_prod(const std::vector<t>& a) {
/* zip an array */
template<typename Op, typename A, typename B, std::size_t N, int... n>
-constexpr inline array<decltype(Op::run(A(), B())),N> h_array_zip(array<A, N> a, array<B, N> b, numeric_list<int, n...>)
+constexpr EIGEN_STRONG_INLINE array<decltype(Op::run(A(), B())),N> h_array_zip(array<A, N> a, array<B, N> b, numeric_list<int, n...>)
{
return array<decltype(Op::run(A(), B())),N>{{ Op::run(array_get<n>(a), array_get<n>(b))... }};
}
template<typename Op, typename A, typename B, std::size_t N>
-constexpr inline array<decltype(Op::run(A(), B())),N> array_zip(array<A, N> a, array<B, N> b)
+constexpr EIGEN_STRONG_INLINE array<decltype(Op::run(A(), B())),N> array_zip(array<A, N> a, array<B, N> b)
{
return h_array_zip<Op>(a, b, typename gen_numeric_list<int, N>::type());
}
@@ -428,13 +428,13 @@ constexpr inline array<decltype(Op::run(A(), B())),N> array_zip(array<A, N> a, a
/* zip an array and reduce the result */
template<typename Reducer, typename Op, typename A, typename B, std::size_t N, int... n>
-constexpr inline auto h_array_zip_and_reduce(array<A, N> a, array<B, N> b, numeric_list<int, n...>) -> decltype(reduce<Reducer, typename id_numeric<int,n,decltype(Op::run(A(), B()))>::type...>::run(Op::run(array_get<n>(a), array_get<n>(b))...))
+constexpr EIGEN_STRONG_INLINE auto h_array_zip_and_reduce(array<A, N> a, array<B, N> b, numeric_list<int, n...>) -> decltype(reduce<Reducer, typename id_numeric<int,n,decltype(Op::run(A(), B()))>::type...>::run(Op::run(array_get<n>(a), array_get<n>(b))...))
{
return reduce<Reducer, typename id_numeric<int,n,decltype(Op::run(A(), B()))>::type...>::run(Op::run(array_get<n>(a), array_get<n>(b))...);
}
template<typename Reducer, typename Op, typename A, typename B, std::size_t N>
-constexpr inline auto array_zip_and_reduce(array<A, N> a, array<B, N> b) -> decltype(h_array_zip_and_reduce<Reducer, Op, A, B, N>(a, b, typename gen_numeric_list<int, N>::type()))
+constexpr EIGEN_STRONG_INLINE auto array_zip_and_reduce(array<A, N> a, array<B, N> b) -> decltype(h_array_zip_and_reduce<Reducer, Op, A, B, N>(a, b, typename gen_numeric_list<int, N>::type()))
{
return h_array_zip_and_reduce<Reducer, Op, A, B, N>(a, b, typename gen_numeric_list<int, N>::type());
}
@@ -442,13 +442,13 @@ constexpr inline auto array_zip_and_reduce(array<A, N> a, array<B, N> b) -> decl
/* apply stuff to an array */
template<typename Op, typename A, std::size_t N, int... n>
-constexpr inline array<decltype(Op::run(A())),N> h_array_apply(array<A, N> a, numeric_list<int, n...>)
+constexpr EIGEN_STRONG_INLINE array<decltype(Op::run(A())),N> h_array_apply(array<A, N> a, numeric_list<int, n...>)
{
return array<decltype(Op::run(A())),N>{{ Op::run(array_get<n>(a))... }};
}
template<typename Op, typename A, std::size_t N>
-constexpr inline array<decltype(Op::run(A())),N> array_apply(array<A, N> a)
+constexpr EIGEN_STRONG_INLINE array<decltype(Op::run(A())),N> array_apply(array<A, N> a)
{
return h_array_apply<Op>(a, typename gen_numeric_list<int, N>::type());
}
@@ -456,13 +456,13 @@ constexpr inline array<decltype(Op::run(A())),N> array_apply(array<A, N> a)
/* apply stuff to an array and reduce */
template<typename Reducer, typename Op, typename A, std::size_t N, int... n>
-constexpr inline auto h_array_apply_and_reduce(array<A, N> arr, numeric_list<int, n...>) -> decltype(reduce<Reducer, typename id_numeric<int,n,decltype(Op::run(A()))>::type...>::run(Op::run(array_get<n>(arr))...))
+constexpr EIGEN_STRONG_INLINE auto h_array_apply_and_reduce(array<A, N> arr, numeric_list<int, n...>) -> decltype(reduce<Reducer, typename id_numeric<int,n,decltype(Op::run(A()))>::type...>::run(Op::run(array_get<n>(arr))...))
{
return reduce<Reducer, typename id_numeric<int,n,decltype(Op::run(A()))>::type...>::run(Op::run(array_get<n>(arr))...);
}
template<typename Reducer, typename Op, typename A, std::size_t N>
-constexpr inline auto array_apply_and_reduce(array<A, N> a) -> decltype(h_array_apply_and_reduce<Reducer, Op, A, N>(a, typename gen_numeric_list<int, N>::type()))
+constexpr EIGEN_STRONG_INLINE auto array_apply_and_reduce(array<A, N> a) -> decltype(h_array_apply_and_reduce<Reducer, Op, A, N>(a, typename gen_numeric_list<int, N>::type()))
{
return h_array_apply_and_reduce<Reducer, Op, A, N>(a, typename gen_numeric_list<int, N>::type());
}
@@ -476,7 +476,7 @@ template<int n>
struct h_repeat
{
template<typename t, int... ii>
- constexpr static inline array<t, n> run(t v, numeric_list<int, ii...>)
+ constexpr static EIGEN_STRONG_INLINE array<t, n> run(t v, numeric_list<int, ii...>)
{
return {{ typename id_numeric<int, ii, t>::type(v)... }};
}
diff --git a/unsupported/Eigen/CXX11/src/util/EmulateArray.h b/unsupported/Eigen/CXX11/src/util/EmulateArray.h
index 03169d591..39c255791 100644
--- a/unsupported/Eigen/CXX11/src/util/EmulateArray.h
+++ b/unsupported/Eigen/CXX11/src/util/EmulateArray.h
@@ -15,15 +15,20 @@
// The array class is only available starting with cxx11. Emulate our own here
// if needed. Beware, msvc still doesn't advertise itself as a c++11 compiler!
// Moreover, CUDA doesn't support the STL containers, so we use our own instead.
-#if (__cplusplus <= 199711L && EIGEN_COMP_MSVC < 1900) || defined(__CUDACC__) || defined(EIGEN_AVOID_STL_ARRAY)
+#if (__cplusplus <= 199711L && EIGEN_COMP_MSVC < 1900) || defined(EIGEN_GPUCC) || defined(EIGEN_AVOID_STL_ARRAY)
namespace Eigen {
template <typename T, size_t n> class array {
public:
EIGEN_DEVICE_FUNC
- EIGEN_STRONG_INLINE T& operator[] (size_t index) { return values[index]; }
+ EIGEN_STRONG_INLINE T& operator[] (size_t index) { eigen_internal_assert(index < size()); return values[index]; }
EIGEN_DEVICE_FUNC
- EIGEN_STRONG_INLINE const T& operator[] (size_t index) const { return values[index]; }
+ EIGEN_STRONG_INLINE const T& operator[] (size_t index) const { eigen_internal_assert(index < size()); return values[index]; }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE T& at(size_t index) { eigen_assert(index < size()); return values[index]; }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const T& at(size_t index) const { eigen_assert(index < size()); return values[index]; }
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE T& front() { return values[0]; }
@@ -169,6 +174,7 @@ template <typename T> class array<T, 0> {
#if EIGEN_HAS_VARIADIC_TEMPLATES
EIGEN_DEVICE_FUNC array(std::initializer_list<T> l) : dummy() {
+ EIGEN_UNUSED_VARIABLE(l);
eigen_assert(l.size() == 0);
}
#endif
@@ -201,16 +207,16 @@ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const T& array_get(const array<T,N>& a) {
}
template<class T, std::size_t N> struct array_size<array<T,N> > {
- static const size_t value = N;
+ enum { value = N };
};
template<class T, std::size_t N> struct array_size<array<T,N>& > {
- static const size_t value = N;
+ enum { value = N };
};
template<class T, std::size_t N> struct array_size<const array<T,N> > {
- static const size_t value = N;
+ enum { value = N };
};
template<class T, std::size_t N> struct array_size<const array<T,N>& > {
- static const size_t value = N;
+ enum { value = N };
};
} // end namespace internal
@@ -218,7 +224,7 @@ template<class T, std::size_t N> struct array_size<const array<T,N>& > {
#else
-// The compiler supports c++11, and we're not targetting cuda: use std::array as Eigen::array
+// The compiler supports c++11, and we're not targeting cuda: use std::array as Eigen::array
#include <array>
namespace Eigen {
diff --git a/unsupported/Eigen/CXX11/src/util/EmulateCXX11Meta.h b/unsupported/Eigen/CXX11/src/util/EmulateCXX11Meta.h
index f3aa1b144..8a536faf6 100644
--- a/unsupported/Eigen/CXX11/src/util/EmulateCXX11Meta.h
+++ b/unsupported/Eigen/CXX11/src/util/EmulateCXX11Meta.h
@@ -188,7 +188,7 @@ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE t array_prod(const array<t, n>& a) {
}
template<typename t>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE t array_prod(const array<t, 0>& /*a*/) {
- return 0;
+ return 1;
}
template<typename t>
diff --git a/unsupported/Eigen/CXX11/src/util/MaxSizeVector.h b/unsupported/Eigen/CXX11/src/util/MaxSizeVector.h
index 4bc3dd1ba..277ab149a 100644
--- a/unsupported/Eigen/CXX11/src/util/MaxSizeVector.h
+++ b/unsupported/Eigen/CXX11/src/util/MaxSizeVector.h
@@ -29,13 +29,13 @@ namespace Eigen {
*/
template <typename T>
class MaxSizeVector {
+ static const size_t alignment = EIGEN_PLAIN_ENUM_MAX(EIGEN_ALIGNOF(T), sizeof(void*));
public:
// Construct a new MaxSizeVector, reserve n elements.
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
explicit MaxSizeVector(size_t n)
: reserve_(n), size_(0),
- data_(static_cast<T*>(internal::aligned_malloc(n * sizeof(T)))) {
- for (size_t i = 0; i < n; ++i) { new (&data_[i]) T; }
+ data_(static_cast<T*>(internal::handmade_aligned_malloc(n * sizeof(T), alignment))) {
}
// Construct a new MaxSizeVector, reserve and resize to n.
@@ -43,36 +43,56 @@ class MaxSizeVector {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
MaxSizeVector(size_t n, const T& init)
: reserve_(n), size_(n),
- data_(static_cast<T*>(internal::aligned_malloc(n * sizeof(T)))) {
- for (size_t i = 0; i < n; ++i) { new (&data_[i]) T(init); }
+ data_(static_cast<T*>(internal::handmade_aligned_malloc(n * sizeof(T), alignment))) {
+ size_t i = 0;
+ EIGEN_TRY
+ {
+ for(; i < size_; ++i) { new (&data_[i]) T(init); }
+ }
+ EIGEN_CATCH(...)
+ {
+ // Construction failed, destruct in reverse order:
+ for(; (i+1) > 0; --i) { data_[i-1].~T(); }
+ internal::handmade_aligned_free(data_);
+ EIGEN_THROW;
+ }
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
~MaxSizeVector() {
- for (size_t i = 0; i < size_; ++i) {
- data_[i].~T();
+ for (size_t i = size_; i > 0; --i) {
+ data_[i-1].~T();
}
- internal::aligned_free(data_);
+ internal::handmade_aligned_free(data_);
}
void resize(size_t n) {
eigen_assert(n <= reserve_);
- for (size_t i = size_; i < n; ++i) {
- new (&data_[i]) T;
+ for (; size_ < n; ++size_) {
+ new (&data_[size_]) T;
}
- for (size_t i = n; i < size_; ++i) {
- data_[i].~T();
+ for (; size_ > n; --size_) {
+ data_[size_-1].~T();
}
- size_ = n;
+ eigen_assert(size_ == n);
}
// Append new elements (up to reserved size).
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void push_back(const T& t) {
eigen_assert(size_ < reserve_);
- data_[size_++] = t;
+ new (&data_[size_++]) T(t);
}
+ // For C++03 compatibility this only takes one argument
+ template<class X>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ void emplace_back(const X& x) {
+ eigen_assert(size_ < reserve_);
+ new (&data_[size_++]) T(x);
+ }
+
+
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const T& operator[] (size_t i) const {
eigen_assert(i < size_);
@@ -99,11 +119,8 @@ class MaxSizeVector {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void pop_back() {
- // NOTE: This does not destroy the value at the end the way
- // std::vector's version of pop_back() does. That happens when
- // the Vector is destroyed.
eigen_assert(size_ > 0);
- size_--;
+ data_[--size_].~T();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
diff --git a/unsupported/Eigen/EulerAngles b/unsupported/Eigen/EulerAngles
index 521fa3f76..f8f1c5d0b 100644
--- a/unsupported/Eigen/EulerAngles
+++ b/unsupported/Eigen/EulerAngles
@@ -11,10 +11,10 @@
#define EIGEN_EULERANGLES_MODULE_H
-#include "Eigen/Core"
-#include "Eigen/Geometry"
+#include "../../Eigen/Core"
+#include "../../Eigen/Geometry"
-#include "Eigen/src/Core/util/DisableStupidWarnings.h"
+#include "../../Eigen/src/Core/util/DisableStupidWarnings.h"
namespace Eigen {
@@ -38,6 +38,6 @@ namespace Eigen {
#include "src/EulerAngles/EulerSystem.h"
#include "src/EulerAngles/EulerAngles.h"
-#include "Eigen/src/Core/util/ReenableStupidWarnings.h"
+#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h"
#endif // EIGEN_EULERANGLES_MODULE_H
diff --git a/unsupported/Eigen/FFT b/unsupported/Eigen/FFT
index 2c45b3999..d9ad21a5a 100644
--- a/unsupported/Eigen/FFT
+++ b/unsupported/Eigen/FFT
@@ -13,7 +13,7 @@
#include <complex>
#include <vector>
#include <map>
-#include <Eigen/Core>
+#include "../../Eigen/Core"
/**
@@ -68,6 +68,8 @@
*/
+#include "../../Eigen/src/Core/util/DisableStupidWarnings.h"
+
#ifdef EIGEN_FFTW_DEFAULT
// FFTW: faster, GPL -- incompatible with Eigen in LGPL form, bigger code size
# include <fftw3.h>
@@ -289,6 +291,7 @@ class FFT
void inv( MatrixBase<OutputDerived> & dst, const MatrixBase<ComplexDerived> & src, Index nfft=-1)
{
typedef typename ComplexDerived::Scalar src_type;
+ typedef typename ComplexDerived::RealScalar real_type;
typedef typename OutputDerived::Scalar dst_type;
const bool realfft= (NumTraits<dst_type>::IsComplex == 0);
EIGEN_STATIC_ASSERT_VECTOR_ONLY(OutputDerived)
@@ -329,9 +332,9 @@ class FFT
tmp.head(nhead) = src.head(nhead);
tmp.tail(ntail) = src.tail(ntail);
if (resize_input<0) { //shrinking -- create the Nyquist bin as the average of the two bins that fold into it
- tmp(nhead) = ( src(nfft/2) + src( src.size() - nfft/2 ) )*src_type(.5);
+ tmp(nhead) = ( src(nfft/2) + src( src.size() - nfft/2 ) )*real_type(.5);
}else{ // expanding -- split the old Nyquist bin into two halves
- tmp(nhead) = src(nhead) * src_type(.5);
+ tmp(nhead) = src(nhead) * real_type(.5);
tmp(tmp.size()-nhead) = tmp(nhead);
}
}
@@ -414,5 +417,8 @@ void fft_inv_proxy<T_SrcMat,T_FftIfc>::evalTo(T_DestMat& dst) const
}
}
+
+#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h"
+
#endif
/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/unsupported/Eigen/IterativeSolvers b/unsupported/Eigen/IterativeSolvers
index 31e880bdc..0fa129a7b 100644
--- a/unsupported/Eigen/IterativeSolvers
+++ b/unsupported/Eigen/IterativeSolvers
@@ -10,7 +10,9 @@
#ifndef EIGEN_ITERATIVE_SOLVERS_MODULE_H
#define EIGEN_ITERATIVE_SOLVERS_MODULE_H
-#include <Eigen/Sparse>
+#include "../../Eigen/Sparse"
+#include "../../Eigen/Jacobi"
+#include "../../Eigen/Householder"
/**
* \defgroup IterativeSolvers_Module Iterative solvers module
@@ -24,19 +26,21 @@
*/
//@{
+#include "../../Eigen/src/Core/util/DisableStupidWarnings.h"
+
#ifndef EIGEN_MPL2_ONLY
#include "src/IterativeSolvers/IterationController.h"
#include "src/IterativeSolvers/ConstrainedConjGrad.h"
#endif
#include "src/IterativeSolvers/IncompleteLU.h"
-#include "../../Eigen/Jacobi"
-#include "../../Eigen/Householder"
#include "src/IterativeSolvers/GMRES.h"
#include "src/IterativeSolvers/DGMRES.h"
//#include "src/IterativeSolvers/SSORPreconditioner.h"
#include "src/IterativeSolvers/MINRES.h"
+#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h"
+
//@}
#endif // EIGEN_ITERATIVE_SOLVERS_MODULE_H
diff --git a/unsupported/Eigen/LevenbergMarquardt b/unsupported/Eigen/LevenbergMarquardt
index 0fe2680ba..109050501 100644
--- a/unsupported/Eigen/LevenbergMarquardt
+++ b/unsupported/Eigen/LevenbergMarquardt
@@ -12,12 +12,12 @@
// #include <vector>
-#include <Eigen/Core>
-#include <Eigen/Jacobi>
-#include <Eigen/QR>
-#include <unsupported/Eigen/NumericalDiff>
+#include "../../Eigen/Core"
+#include "../../Eigen/Jacobi"
+#include "../../Eigen/QR"
+#include "NumericalDiff"
-#include <Eigen/SparseQR>
+#include "../../Eigen/SparseQR"
/**
* \defgroup LevenbergMarquardt_Module Levenberg-Marquardt module
@@ -29,7 +29,10 @@
*
*/
-#include "Eigen/SparseCore"
+#include "../../Eigen/SparseCore"
+
+#include "../../Eigen/src/Core/util/DisableStupidWarnings.h"
+
#ifndef EIGEN_PARSED_BY_DOXYGEN
#include "src/LevenbergMarquardt/LMqrsolv.h"
@@ -41,5 +44,6 @@
#include "src/LevenbergMarquardt/LevenbergMarquardt.h"
#include "src/LevenbergMarquardt/LMonestep.h"
+#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h"
#endif // EIGEN_LEVENBERGMARQUARDT_MODULE
diff --git a/unsupported/Eigen/MPRealSupport b/unsupported/Eigen/MPRealSupport
index 7f0b70c63..c4ea4ec5f 100644
--- a/unsupported/Eigen/MPRealSupport
+++ b/unsupported/Eigen/MPRealSupport
@@ -12,7 +12,7 @@
#ifndef EIGEN_MPREALSUPPORT_MODULE_H
#define EIGEN_MPREALSUPPORT_MODULE_H
-#include <Eigen/Core>
+#include "../../Eigen/Core"
#include <mpreal.h>
namespace Eigen {
@@ -90,6 +90,9 @@ int main()
#ifdef MPREAL_HAVE_DYNAMIC_STD_NUMERIC_LIMITS
static inline int digits10 (long Precision = mpfr::mpreal::get_default_prec()) { return std::numeric_limits<Real>::digits10(Precision); }
static inline int digits10 (const Real& x) { return std::numeric_limits<Real>::digits10(x); }
+
+ static inline int digits () { return std::numeric_limits<Real>::digits(); }
+ static inline int digits (const Real& x) { return std::numeric_limits<Real>::digits(x); }
#endif
static inline Real dummy_precision()
@@ -159,6 +162,7 @@ int main()
typedef ResScalar LhsPacket;
typedef ResScalar RhsPacket;
typedef ResScalar ResPacket;
+ typedef LhsPacket LhsPacket4Packing;
};
diff --git a/unsupported/Eigen/MatrixFunctions b/unsupported/Eigen/MatrixFunctions
index 0320606c1..06acde68d 100644
--- a/unsupported/Eigen/MatrixFunctions
+++ b/unsupported/Eigen/MatrixFunctions
@@ -14,9 +14,9 @@
#include <cfloat>
#include <list>
-#include <Eigen/Core>
-#include <Eigen/LU>
-#include <Eigen/Eigenvalues>
+#include "../../Eigen/Core"
+#include "../../Eigen/LU"
+#include "../../Eigen/Eigenvalues"
/**
* \defgroup MatrixFunctions_Module Matrix functions module
@@ -53,12 +53,16 @@
*
*/
+#include "../../Eigen/src/Core/util/DisableStupidWarnings.h"
+
#include "src/MatrixFunctions/MatrixExponential.h"
#include "src/MatrixFunctions/MatrixFunction.h"
#include "src/MatrixFunctions/MatrixSquareRoot.h"
#include "src/MatrixFunctions/MatrixLogarithm.h"
#include "src/MatrixFunctions/MatrixPower.h"
+#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h"
+
/**
\page matrixbaseextra_page
diff --git a/unsupported/Eigen/MoreVectorization b/unsupported/Eigen/MoreVectorization
index 470e72430..7662b4780 100644
--- a/unsupported/Eigen/MoreVectorization
+++ b/unsupported/Eigen/MoreVectorization
@@ -9,7 +9,7 @@
#ifndef EIGEN_MOREVECTORIZATION_MODULE_H
#define EIGEN_MOREVECTORIZATION_MODULE_H
-#include <Eigen/Core>
+#include "../../Eigen/Core"
namespace Eigen {
diff --git a/unsupported/Eigen/NonLinearOptimization b/unsupported/Eigen/NonLinearOptimization
index 600ab4c12..961f192b5 100644
--- a/unsupported/Eigen/NonLinearOptimization
+++ b/unsupported/Eigen/NonLinearOptimization
@@ -12,10 +12,10 @@
#include <vector>
-#include <Eigen/Core>
-#include <Eigen/Jacobi>
-#include <Eigen/QR>
-#include <unsupported/Eigen/NumericalDiff>
+#include "../../Eigen/Core"
+#include "../../Eigen/Jacobi"
+#include "../../Eigen/QR"
+#include "NumericalDiff"
/**
* \defgroup NonLinearOptimization_Module Non linear optimization module
@@ -30,12 +30,12 @@
* actually linear. But if this is so, you should probably better use other
* methods more fitted to this special case.
*
- * One algorithm allows to find an extremum of such a system (Levenberg
- * Marquardt algorithm) and the second one is used to find
+ * One algorithm allows to find a least-squares solution of such a system
+ * (Levenberg-Marquardt algorithm) and the second one is used to find
* a zero for the system (Powell hybrid "dogleg" method).
*
* This code is a port of minpack (http://en.wikipedia.org/wiki/MINPACK).
- * Minpack is a very famous, old, robust and well-reknown package, written in
+ * Minpack is a very famous, old, robust and well renowned package, written in
* fortran. Those implementations have been carefully tuned, tested, and used
* for several decades.
*
@@ -58,35 +58,41 @@
* There are two kinds of tests : those that come from examples bundled with cminpack.
* They guaranty we get the same results as the original algorithms (value for 'x',
* for the number of evaluations of the function, and for the number of evaluations
- * of the jacobian if ever).
+ * of the Jacobian if ever).
*
* Other tests were added by myself at the very beginning of the
- * process and check the results for levenberg-marquardt using the reference data
+ * process and check the results for Levenberg-Marquardt using the reference data
* on http://www.itl.nist.gov/div898/strd/nls/nls_main.shtml. Since then i've
- * carefully checked that the same results were obtained when modifiying the
+ * carefully checked that the same results were obtained when modifying the
* code. Please note that we do not always get the exact same decimals as they do,
* but this is ok : they use 128bits float, and we do the tests using the C type 'double',
* which is 64 bits on most platforms (x86 and amd64, at least).
- * I've performed those tests on several other implementations of levenberg-marquardt, and
+ * I've performed those tests on several other implementations of Levenberg-Marquardt, and
* (c)minpack performs VERY well compared to those, both in accuracy and speed.
*
* The documentation for running the tests is on the wiki
* http://eigen.tuxfamily.org/index.php?title=Tests
*
- * \section API API : overview of methods
+ * \section API API: overview of methods
*
- * Both algorithms can use either the jacobian (provided by the user) or compute
- * an approximation by themselves (actually using Eigen \ref NumericalDiff_Module).
- * The part of API referring to the latter use 'NumericalDiff' in the method names
- * (exemple: LevenbergMarquardt.minimizeNumericalDiff() )
+ * Both algorithms needs a functor computing the Jacobian. It can be computed by
+ * hand, using auto-differentiation (see \ref AutoDiff_Module), or using numerical
+ * differences (see \ref NumericalDiff_Module). For instance:
+ *\code
+ * MyFunc func;
+ * NumericalDiff<MyFunc> func_with_num_diff(func);
+ * LevenbergMarquardt<NumericalDiff<MyFunc> > lm(func_with_num_diff);
+ * \endcode
+ * For HybridNonLinearSolver, the method solveNumericalDiff() does the above wrapping for
+ * you.
*
* The methods LevenbergMarquardt.lmder1()/lmdif1()/lmstr1() and
* HybridNonLinearSolver.hybrj1()/hybrd1() are specific methods from the original
* minpack package that you probably should NOT use until you are porting a code that
- * was previously using minpack. They just define a 'simple' API with default values
+ * was previously using minpack. They just define a 'simple' API with default values
* for some parameters.
*
- * All algorithms are provided using Two APIs :
+ * All algorithms are provided using two APIs :
* - one where the user inits the algorithm, and uses '*OneStep()' as much as he wants :
* this way the caller have control over the steps
* - one where the user just calls a method (optimize() or solve()) which will
@@ -94,7 +100,7 @@
* convenience.
*
* As an example, the method LevenbergMarquardt::minimize() is
- * implemented as follow :
+ * implemented as follow:
* \code
* Status LevenbergMarquardt<FunctorType,Scalar>::minimize(FVectorType &x, const int mode)
* {
diff --git a/unsupported/Eigen/NumericalDiff b/unsupported/Eigen/NumericalDiff
index 433334ca8..0668f960f 100644
--- a/unsupported/Eigen/NumericalDiff
+++ b/unsupported/Eigen/NumericalDiff
@@ -10,7 +10,7 @@
#ifndef EIGEN_NUMERICALDIFF_MODULE
#define EIGEN_NUMERICALDIFF_MODULE
-#include <Eigen/Core>
+#include "../../Eigen/Core"
namespace Eigen {
diff --git a/unsupported/Eigen/OpenGLSupport b/unsupported/Eigen/OpenGLSupport
index 87f50947d..f8c213003 100644
--- a/unsupported/Eigen/OpenGLSupport
+++ b/unsupported/Eigen/OpenGLSupport
@@ -10,7 +10,7 @@
#ifndef EIGEN_OPENGL_MODULE
#define EIGEN_OPENGL_MODULE
-#include <Eigen/Geometry>
+#include "../../Eigen/Geometry"
#if defined(__APPLE_CC__)
#include <OpenGL/gl.h>
@@ -25,7 +25,7 @@ namespace Eigen {
*
* This module provides wrapper functions for a couple of OpenGL functions
* which simplify the way to pass Eigen's object to openGL.
- * Here is an exmaple:
+ * Here is an example:
*
* \code
* // You need to add path_to_eigen/unsupported to your include path.
@@ -184,7 +184,7 @@ inline void glRotate(const Rotation2D<float>& rot)
}
inline void glRotate(const Rotation2D<double>& rot)
{
- glRotated(rot.angle()*180.0/EIGEN_PI, 0.0, 0.0, 1.0);
+ glRotated(rot.angle()*180.0/double(EIGEN_PI), 0.0, 0.0, 1.0);
}
template<typename Derived> void glRotate(const RotationBase<Derived,3>& rot)
diff --git a/unsupported/Eigen/Polynomials b/unsupported/Eigen/Polynomials
index cece56337..146e5c404 100644
--- a/unsupported/Eigen/Polynomials
+++ b/unsupported/Eigen/Polynomials
@@ -9,11 +9,11 @@
#ifndef EIGEN_POLYNOMIALS_MODULE_H
#define EIGEN_POLYNOMIALS_MODULE_H
-#include <Eigen/Core>
+#include "../../Eigen/Core"
-#include <Eigen/src/Core/util/DisableStupidWarnings.h>
+#include "../../Eigen/Eigenvalues"
-#include <Eigen/Eigenvalues>
+#include "../../Eigen/src/Core/util/DisableStupidWarnings.h"
// Note that EIGEN_HIDE_HEAVY_CODE has to be defined per module
#if (defined EIGEN_EXTERN_INSTANTIATIONS) && (EIGEN_EXTERN_INSTANTIATIONS>=2)
@@ -132,7 +132,7 @@
Output: \verbinclude PolynomialSolver1.out
*/
-#include <Eigen/src/Core/util/ReenableStupidWarnings.h>
+#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h"
#endif // EIGEN_POLYNOMIALS_MODULE_H
/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/unsupported/Eigen/Skyline b/unsupported/Eigen/Skyline
index 71a68cb42..ebdf143f7 100644
--- a/unsupported/Eigen/Skyline
+++ b/unsupported/Eigen/Skyline
@@ -10,9 +10,9 @@
#define EIGEN_SKYLINE_MODULE_H
-#include "Eigen/Core"
+#include "../../Eigen/Core"
-#include "Eigen/src/Core/util/DisableStupidWarnings.h"
+#include "../../Eigen/src/Core/util/DisableStupidWarnings.h"
#include <map>
#include <cstdlib>
@@ -34,6 +34,6 @@
#include "src/Skyline/SkylineInplaceLU.h"
#include "src/Skyline/SkylineProduct.h"
-#include "Eigen/src/Core/util/ReenableStupidWarnings.h"
+#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h"
#endif // EIGEN_SKYLINE_MODULE_H
diff --git a/unsupported/Eigen/SpecialFunctions b/unsupported/Eigen/SpecialFunctions
index a2ad4925e..44fd99b43 100644
--- a/unsupported/Eigen/SpecialFunctions
+++ b/unsupported/Eigen/SpecialFunctions
@@ -29,11 +29,15 @@ namespace Eigen {
* - erfc
* - lgamma
* - igamma
+ * - igamma_der_a
+ * - gamma_sample_der_alpha
* - igammac
* - digamma
* - polygamma
* - zeta
* - betainc
+ * - i0e
+ * - i1e
*
* \code
* #include <unsupported/Eigen/SpecialFunctions>
@@ -49,8 +53,8 @@ namespace Eigen {
#include "src/SpecialFunctions/SpecialFunctionsFunctors.h"
#include "src/SpecialFunctions/SpecialFunctionsArrayAPI.h"
-#if defined EIGEN_VECTORIZE_CUDA
- #include "src/SpecialFunctions/arch/CUDA/CudaSpecialFunctions.h"
+#if defined EIGEN_VECTORIZE_GPU
+ #include "src/SpecialFunctions/arch/GPU/GpuSpecialFunctions.h"
#endif
namespace Eigen {
diff --git a/unsupported/Eigen/Splines b/unsupported/Eigen/Splines
index 322e6b9f5..2ca581364 100644
--- a/unsupported/Eigen/Splines
+++ b/unsupported/Eigen/Splines
@@ -24,8 +24,12 @@ namespace Eigen
*/
}
+#include "../../Eigen/src/Core/util/DisableStupidWarnings.h"
+
#include "src/Splines/SplineFwd.h"
#include "src/Splines/Spline.h"
#include "src/Splines/SplineFitting.h"
+#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h"
+
#endif // EIGEN_SPLINES_MODULE_H
diff --git a/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h b/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h
index 50fedf6ac..2db914765 100755
--- a/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h
+++ b/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h
@@ -108,7 +108,9 @@ class AutoDiffScalar
template<typename OtherDerType>
AutoDiffScalar(const AutoDiffScalar<OtherDerType>& other
#ifndef EIGEN_PARSED_BY_DOXYGEN
- , typename internal::enable_if<internal::is_same<Scalar, typename internal::traits<typename internal::remove_all<OtherDerType>::type>::Scalar>::value,void*>::type = 0
+ , typename internal::enable_if<
+ internal::is_same<Scalar, typename internal::traits<typename internal::remove_all<OtherDerType>::type>::Scalar>::value
+ && internal::is_convertible<OtherDerType,DerType>::value , void*>::type = 0
#endif
)
: m_value(other.value()), m_derivatives(other.derivatives())
@@ -532,7 +534,8 @@ struct ScalarBinaryOpTraits<typename DerType::Scalar,AutoDiffScalar<DerType>, Bi
EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(typename Eigen::internal::remove_all<DerType>::type, typename Eigen::internal::traits<typename Eigen::internal::remove_all<DerType>::type>::Scalar, product) > \
FUNC(const Eigen::AutoDiffScalar<DerType>& x) { \
using namespace Eigen; \
- EIGEN_UNUSED typedef typename Eigen::internal::traits<typename Eigen::internal::remove_all<DerType>::type>::Scalar Scalar; \
+ typedef typename Eigen::internal::traits<typename Eigen::internal::remove_all<DerType>::type>::Scalar Scalar; \
+ EIGEN_UNUSED_VARIABLE(sizeof(Scalar)); \
CODE; \
}
@@ -681,4 +684,16 @@ template<typename DerType> struct NumTraits<AutoDiffScalar<DerType> >
}
+namespace std {
+
+template <typename T>
+class numeric_limits<Eigen::AutoDiffScalar<T> >
+ : public numeric_limits<typename T::Scalar> {};
+
+template <typename T>
+class numeric_limits<Eigen::AutoDiffScalar<T&> >
+ : public numeric_limits<typename T::Scalar> {};
+
+} // namespace std
+
#endif // EIGEN_AUTODIFF_SCALAR_H
diff --git a/unsupported/Eigen/src/BVH/KdBVH.h b/unsupported/Eigen/src/BVH/KdBVH.h
index 1b8d75865..2d5b76ad0 100644
--- a/unsupported/Eigen/src/BVH/KdBVH.h
+++ b/unsupported/Eigen/src/BVH/KdBVH.h
@@ -35,6 +35,7 @@ struct get_boxes_helper {
{
outBoxes.insert(outBoxes.end(), boxBegin, boxEnd);
eigen_assert(outBoxes.size() == objects.size());
+ EIGEN_ONLY_USED_FOR_DEBUG(objects);
}
};
@@ -170,7 +171,7 @@ private:
typedef internal::vector_int_pair<Scalar, Dim> VIPair;
typedef std::vector<VIPair, aligned_allocator<VIPair> > VIPairList;
typedef Matrix<Scalar, Dim, 1> VectorType;
- struct VectorComparator //compares vectors, or, more specificall, VIPairs along a particular dimension
+ struct VectorComparator //compares vectors, or more specifically, VIPairs along a particular dimension
{
VectorComparator(int inDim) : dim(inDim) {}
inline bool operator()(const VIPair &v1, const VIPair &v2) const { return v1.first[dim] < v2.first[dim]; }
diff --git a/unsupported/Eigen/src/Eigenvalues/ArpackSelfAdjointEigenSolver.h b/unsupported/Eigen/src/Eigenvalues/ArpackSelfAdjointEigenSolver.h
index 866a8a460..3c6cfb1e3 100644
--- a/unsupported/Eigen/src/Eigenvalues/ArpackSelfAdjointEigenSolver.h
+++ b/unsupported/Eigen/src/Eigenvalues/ArpackSelfAdjointEigenSolver.h
@@ -25,7 +25,7 @@
#ifndef EIGEN_ARPACKGENERALIZEDSELFADJOINTEIGENSOLVER_H
#define EIGEN_ARPACKGENERALIZEDSELFADJOINTEIGENSOLVER_H
-#include <Eigen/Dense>
+#include "../../../../Eigen/Dense"
namespace Eigen {
@@ -300,7 +300,7 @@ public:
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
+ * \returns \c Success if computation was successful, \c NoConvergence otherwise.
*/
ComputationInfo info() const
{
diff --git a/unsupported/Eigen/src/EulerAngles/EulerAngles.h b/unsupported/Eigen/src/EulerAngles/EulerAngles.h
index a5d034d71..e43cdb7fb 100644
--- a/unsupported/Eigen/src/EulerAngles/EulerAngles.h
+++ b/unsupported/Eigen/src/EulerAngles/EulerAngles.h
@@ -341,7 +341,7 @@ EIGEN_EULER_ANGLES_TYPEDEFS(double, d)
// set from a vector of Euler angles
template<class System, class Other>
- struct eulerangles_assign_impl<System,Other,4,1>
+ struct eulerangles_assign_impl<System,Other,3,1>
{
typedef typename Other::Scalar Scalar;
static void run(EulerAngles<Scalar, System>& e, const Other& vec)
diff --git a/unsupported/Eigen/src/EulerAngles/EulerSystem.h b/unsupported/Eigen/src/EulerAngles/EulerSystem.h
index 28f52da61..88acabcf8 100644
--- a/unsupported/Eigen/src/EulerAngles/EulerSystem.h
+++ b/unsupported/Eigen/src/EulerAngles/EulerSystem.h
@@ -12,7 +12,7 @@
namespace Eigen
{
- // Forward declerations
+ // Forward declarations
template <typename _Scalar, class _System>
class EulerAngles;
@@ -173,15 +173,14 @@ namespace Eigen
EIGEN_EULER_ANGLES_CLASS_STATIC_ASSERT((unsigned)BetaAxisAbs != (unsigned)GammaAxisAbs,
BETA_AXIS_CANT_BE_EQUAL_TO_GAMMA_AXIS);
- enum
- {
+ static const int
// I, J, K are the pivot indexes permutation for the rotation matrix, that match this Euler system.
// They are used in this class converters.
// They are always different from each other, and their possible values are: 0, 1, or 2.
I = AlphaAxisAbs - 1,
J = (AlphaAxisAbs - 1 + 1 + IsOdd)%3,
K = (AlphaAxisAbs - 1 + 2 - IsOdd)%3
- };
+ ;
// TODO: Get @mat parameter in form that avoids double evaluation.
template <typename Derived>
diff --git a/unsupported/Eigen/src/FFT/ei_kissfft_impl.h b/unsupported/Eigen/src/FFT/ei_kissfft_impl.h
index be51b4e6f..079e88602 100644
--- a/unsupported/Eigen/src/FFT/ei_kissfft_impl.h
+++ b/unsupported/Eigen/src/FFT/ei_kissfft_impl.h
@@ -316,8 +316,8 @@ struct kissfft_impl
// use optimized mode for even real
fwd( dst, reinterpret_cast<const Complex*> (src), ncfft);
- Complex dc = dst[0].real() + dst[0].imag();
- Complex nyquist = dst[0].real() - dst[0].imag();
+ Complex dc(dst[0].real() + dst[0].imag());
+ Complex nyquist(dst[0].real() - dst[0].imag());
int k;
for ( k=1;k <= ncfft2 ; ++k ) {
Complex fpk = dst[k];
diff --git a/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h b/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h
index dc0093eb9..5f7cdf29a 100644
--- a/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h
+++ b/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h
@@ -31,7 +31,7 @@
#ifndef EIGEN_CONSTRAINEDCG_H
#define EIGEN_CONSTRAINEDCG_H
-#include <Eigen/Core>
+#include "../../../../Eigen/Core"
namespace Eigen {
@@ -99,7 +99,7 @@ void pseudo_inverse(const CMatrix &C, CINVMatrix &CINV)
/** \ingroup IterativeSolvers_Module
* Constrained conjugate gradient
*
- * Computes the minimum of \f$ 1/2((Ax).x) - bx \f$ under the contraint \f$ Cx \le f \f$
+ * Computes the minimum of \f$ 1/2((Ax).x) - bx \f$ under the constraint \f$ Cx \le f \f$
*/
template<typename TMatrix, typename CMatrix,
typename VectorX, typename VectorB, typename VectorF>
diff --git a/unsupported/Eigen/src/IterativeSolvers/DGMRES.h b/unsupported/Eigen/src/IterativeSolvers/DGMRES.h
index bae04fc30..2aea53dcb 100644
--- a/unsupported/Eigen/src/IterativeSolvers/DGMRES.h
+++ b/unsupported/Eigen/src/IterativeSolvers/DGMRES.h
@@ -10,7 +10,7 @@
#ifndef EIGEN_DGMRES_H
#define EIGEN_DGMRES_H
-#include <Eigen/Eigenvalues>
+#include "../../../../Eigen/Eigenvalues"
namespace Eigen {
@@ -39,7 +39,6 @@ template <typename VectorType, typename IndexType>
void sortWithPermutation (VectorType& vec, IndexType& perm, typename IndexType::Scalar& ncut)
{
eigen_assert(vec.size() == perm.size());
- typedef typename IndexType::Scalar Index;
bool flag;
for (Index k = 0; k < ncut; k++)
{
@@ -89,7 +88,7 @@ void sortWithPermutation (VectorType& vec, IndexType& perm, typename IndexType::
* [1] D. NUENTSA WAKAM and F. PACULL, Memory Efficient Hybrid
* Algebraic Solvers for Linear Systems Arising from Compressible
* Flows, Computers and Fluids, In Press,
- * http://dx.doi.org/10.1016/j.compfluid.2012.03.023
+ * https://doi.org/10.1016/j.compfluid.2012.03.023
* [2] K. Burrage and J. Erhel, On the performance of various
* adaptive preconditioned GMRES strategies, 5(1998), 101-121.
* [3] J. Erhel, K. Burrage and B. Pohl, Restarted GMRES
@@ -112,7 +111,6 @@ class DGMRES : public IterativeSolverBase<DGMRES<_MatrixType,_Preconditioner> >
using Base::_solve_impl;
typedef _MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::StorageIndex StorageIndex;
typedef typename MatrixType::RealScalar RealScalar;
typedef _Preconditioner Preconditioner;
@@ -146,7 +144,7 @@ class DGMRES : public IterativeSolverBase<DGMRES<_MatrixType,_Preconditioner> >
void _solve_with_guess_impl(const Rhs& b, Dest& x) const
{
bool failed = false;
- for(int j=0; j<b.cols(); ++j)
+ for(Index j=0; j<b.cols(); ++j)
{
m_iterations = Base::maxIterations();
m_error = Base::m_tolerance;
@@ -170,17 +168,17 @@ class DGMRES : public IterativeSolverBase<DGMRES<_MatrixType,_Preconditioner> >
/**
* Get the restart value
*/
- int restart() { return m_restart; }
+ Index restart() { return m_restart; }
/**
* Set the restart value (default is 30)
*/
- void set_restart(const int restart) { m_restart=restart; }
+ void set_restart(const Index restart) { m_restart=restart; }
/**
* Set the number of eigenvalues to deflate at each restart
*/
- void setEigenv(const int neig)
+ void setEigenv(const Index neig)
{
m_neig = neig;
if (neig+1 > m_maxNeig) m_maxNeig = neig+1; // To allow for complex conjugates
@@ -189,12 +187,12 @@ class DGMRES : public IterativeSolverBase<DGMRES<_MatrixType,_Preconditioner> >
/**
* Get the size of the deflation subspace size
*/
- int deflSize() {return m_r; }
+ Index deflSize() {return m_r; }
/**
* Set the maximum size of the deflation subspace
*/
- void setMaxEigenv(const int maxNeig) { m_maxNeig = maxNeig; }
+ void setMaxEigenv(const Index maxNeig) { m_maxNeig = maxNeig; }
protected:
// DGMRES algorithm
@@ -202,27 +200,27 @@ class DGMRES : public IterativeSolverBase<DGMRES<_MatrixType,_Preconditioner> >
void dgmres(const MatrixType& mat,const Rhs& rhs, Dest& x, const Preconditioner& precond) const;
// Perform one cycle of GMRES
template<typename Dest>
- int dgmresCycle(const MatrixType& mat, const Preconditioner& precond, Dest& x, DenseVector& r0, RealScalar& beta, const RealScalar& normRhs, int& nbIts) const;
+ Index dgmresCycle(const MatrixType& mat, const Preconditioner& precond, Dest& x, DenseVector& r0, RealScalar& beta, const RealScalar& normRhs, Index& nbIts) const;
// Compute data to use for deflation
- int dgmresComputeDeflationData(const MatrixType& mat, const Preconditioner& precond, const Index& it, StorageIndex& neig) const;
+ Index dgmresComputeDeflationData(const MatrixType& mat, const Preconditioner& precond, const Index& it, StorageIndex& neig) const;
// Apply deflation to a vector
template<typename RhsType, typename DestType>
- int dgmresApplyDeflation(const RhsType& In, DestType& Out) const;
+ Index dgmresApplyDeflation(const RhsType& In, DestType& Out) const;
ComplexVector schurValues(const ComplexSchur<DenseMatrix>& schurofH) const;
ComplexVector schurValues(const RealSchur<DenseMatrix>& schurofH) const;
// Init data for deflation
void dgmresInitDeflation(Index& rows) const;
mutable DenseMatrix m_V; // Krylov basis vectors
mutable DenseMatrix m_H; // Hessenberg matrix
- mutable DenseMatrix m_Hes; // Initial hessenberg matrix wihout Givens rotations applied
+ mutable DenseMatrix m_Hes; // Initial hessenberg matrix without Givens rotations applied
mutable Index m_restart; // Maximum size of the Krylov subspace
mutable DenseMatrix m_U; // Vectors that form the basis of the invariant subspace
mutable DenseMatrix m_MU; // matrix operator applied to m_U (for next cycles)
mutable DenseMatrix m_T; /* T=U^T*M^{-1}*A*U */
mutable PartialPivLU<DenseMatrix> m_luT; // LU factorization of m_T
mutable StorageIndex m_neig; //Number of eigenvalues to extract at each restart
- mutable int m_r; // Current number of deflated eigenvalues, size of m_U
- mutable int m_maxNeig; // Maximum number of eigenvalues to deflate
+ mutable Index m_r; // Current number of deflated eigenvalues, size of m_U
+ mutable Index m_maxNeig; // Maximum number of eigenvalues to deflate
mutable RealScalar m_lambdaN; //Modulus of the largest eigenvalue of A
mutable bool m_isDeflAllocated;
mutable bool m_isDeflInitialized;
@@ -244,13 +242,13 @@ void DGMRES<_MatrixType, _Preconditioner>::dgmres(const MatrixType& mat,const Rh
const Preconditioner& precond) const
{
//Initialization
- int n = mat.rows();
+ Index n = mat.rows();
DenseVector r0(n);
- int nbIts = 0;
+ Index nbIts = 0;
m_H.resize(m_restart+1, m_restart);
m_Hes.resize(m_restart, m_restart);
m_V.resize(n,m_restart+1);
- //Initial residual vector and intial norm
+ //Initial residual vector and initial norm
x = precond.solve(x);
r0 = rhs - mat * x;
RealScalar beta = r0.norm();
@@ -284,7 +282,7 @@ void DGMRES<_MatrixType, _Preconditioner>::dgmres(const MatrixType& mat,const Rh
*/
template< typename _MatrixType, typename _Preconditioner>
template<typename Dest>
-int DGMRES<_MatrixType, _Preconditioner>::dgmresCycle(const MatrixType& mat, const Preconditioner& precond, Dest& x, DenseVector& r0, RealScalar& beta, const RealScalar& normRhs, int& nbIts) const
+Index DGMRES<_MatrixType, _Preconditioner>::dgmresCycle(const MatrixType& mat, const Preconditioner& precond, Dest& x, DenseVector& r0, RealScalar& beta, const RealScalar& normRhs, Index& nbIts) const
{
//Initialization
DenseVector g(m_restart+1); // Right hand side of the least square problem
@@ -293,8 +291,8 @@ int DGMRES<_MatrixType, _Preconditioner>::dgmresCycle(const MatrixType& mat, con
m_V.col(0) = r0/beta;
m_info = NoConvergence;
std::vector<JacobiRotation<Scalar> >gr(m_restart); // Givens rotations
- int it = 0; // Number of inner iterations
- int n = mat.rows();
+ Index it = 0; // Number of inner iterations
+ Index n = mat.rows();
DenseVector tv1(n), tv2(n); //Temporary vectors
while (m_info == NoConvergence && it < m_restart && nbIts < m_iterations)
{
@@ -312,7 +310,7 @@ int DGMRES<_MatrixType, _Preconditioner>::dgmresCycle(const MatrixType& mat, con
// Orthogonalize it with the previous basis in the basis using modified Gram-Schmidt
Scalar coef;
- for (int i = 0; i <= it; ++i)
+ for (Index i = 0; i <= it; ++i)
{
coef = tv1.dot(m_V.col(i));
tv1 = tv1 - coef * m_V.col(i);
@@ -328,7 +326,7 @@ int DGMRES<_MatrixType, _Preconditioner>::dgmresCycle(const MatrixType& mat, con
// FIXME Check for happy breakdown
// Update Hessenberg matrix with Givens rotations
- for (int i = 1; i <= it; ++i)
+ for (Index i = 1; i <= it; ++i)
{
m_H.col(it).applyOnTheLeft(i-1,i,gr[i-1].adjoint());
}
@@ -394,7 +392,6 @@ inline typename DGMRES<_MatrixType, _Preconditioner>::ComplexVector DGMRES<_Matr
template< typename _MatrixType, typename _Preconditioner>
inline typename DGMRES<_MatrixType, _Preconditioner>::ComplexVector DGMRES<_MatrixType, _Preconditioner>::schurValues(const RealSchur<DenseMatrix>& schurofH) const
{
- typedef typename MatrixType::Index Index;
const DenseMatrix& T = schurofH.matrixT();
Index it = T.rows();
ComplexVector eig(it);
@@ -418,7 +415,7 @@ inline typename DGMRES<_MatrixType, _Preconditioner>::ComplexVector DGMRES<_Matr
}
template< typename _MatrixType, typename _Preconditioner>
-int DGMRES<_MatrixType, _Preconditioner>::dgmresComputeDeflationData(const MatrixType& mat, const Preconditioner& precond, const Index& it, StorageIndex& neig) const
+Index DGMRES<_MatrixType, _Preconditioner>::dgmresComputeDeflationData(const MatrixType& mat, const Preconditioner& precond, const Index& it, StorageIndex& neig) const
{
// First, find the Schur form of the Hessenberg matrix H
typename internal::conditional<NumTraits<Scalar>::IsComplex, ComplexSchur<DenseMatrix>, RealSchur<DenseMatrix> >::type schurofH;
@@ -433,8 +430,8 @@ int DGMRES<_MatrixType, _Preconditioner>::dgmresComputeDeflationData(const Matri
// Reorder the absolute values of Schur values
DenseRealVector modulEig(it);
- for (int j=0; j<it; ++j) modulEig(j) = std::abs(eig(j));
- perm.setLinSpaced(it,0,it-1);
+ for (Index j=0; j<it; ++j) modulEig(j) = std::abs(eig(j));
+ perm.setLinSpaced(it,0,internal::convert_index<StorageIndex>(it-1));
internal::sortWithPermutation(modulEig, perm, neig);
if (!m_lambdaN)
@@ -442,7 +439,7 @@ int DGMRES<_MatrixType, _Preconditioner>::dgmresComputeDeflationData(const Matri
m_lambdaN = (std::max)(modulEig.maxCoeff(), m_lambdaN);
}
//Count the real number of extracted eigenvalues (with complex conjugates)
- int nbrEig = 0;
+ Index nbrEig = 0;
while (nbrEig < neig)
{
if(eig(perm(it-nbrEig-1)).imag() == RealScalar(0)) nbrEig++;
@@ -451,7 +448,7 @@ int DGMRES<_MatrixType, _Preconditioner>::dgmresComputeDeflationData(const Matri
// Extract the Schur vectors corresponding to the smallest Ritz values
DenseMatrix Sr(it, nbrEig);
Sr.setZero();
- for (int j = 0; j < nbrEig; j++)
+ for (Index j = 0; j < nbrEig; j++)
{
Sr.col(j) = schurofH.matrixU().col(perm(it-j-1));
}
@@ -462,8 +459,8 @@ int DGMRES<_MatrixType, _Preconditioner>::dgmresComputeDeflationData(const Matri
if (m_r)
{
// Orthogonalize X against m_U using modified Gram-Schmidt
- for (int j = 0; j < nbrEig; j++)
- for (int k =0; k < m_r; k++)
+ for (Index j = 0; j < nbrEig; j++)
+ for (Index k =0; k < m_r; k++)
X.col(j) = X.col(j) - (m_U.col(k).dot(X.col(j)))*m_U.col(k);
}
@@ -473,7 +470,7 @@ int DGMRES<_MatrixType, _Preconditioner>::dgmresComputeDeflationData(const Matri
dgmresInitDeflation(m);
DenseMatrix MX(m, nbrEig);
DenseVector tv1(m);
- for (int j = 0; j < nbrEig; j++)
+ for (Index j = 0; j < nbrEig; j++)
{
tv1 = mat * X.col(j);
MX.col(j) = precond.solve(tv1);
@@ -488,8 +485,8 @@ int DGMRES<_MatrixType, _Preconditioner>::dgmresComputeDeflationData(const Matri
}
// Save X into m_U and m_MX in m_MU
- for (int j = 0; j < nbrEig; j++) m_U.col(m_r+j) = X.col(j);
- for (int j = 0; j < nbrEig; j++) m_MU.col(m_r+j) = MX.col(j);
+ for (Index j = 0; j < nbrEig; j++) m_U.col(m_r+j) = X.col(j);
+ for (Index j = 0; j < nbrEig; j++) m_MU.col(m_r+j) = MX.col(j);
// Increase the size of the invariant subspace
m_r += nbrEig;
@@ -502,7 +499,7 @@ int DGMRES<_MatrixType, _Preconditioner>::dgmresComputeDeflationData(const Matri
}
template<typename _MatrixType, typename _Preconditioner>
template<typename RhsType, typename DestType>
-int DGMRES<_MatrixType, _Preconditioner>::dgmresApplyDeflation(const RhsType &x, DestType &y) const
+Index DGMRES<_MatrixType, _Preconditioner>::dgmresApplyDeflation(const RhsType &x, DestType &y) const
{
DenseVector x1 = m_U.leftCols(m_r).transpose() * x;
y = x + m_U.leftCols(m_r) * ( m_lambdaN * m_luT.solve(x1) - x1);
diff --git a/unsupported/Eigen/src/IterativeSolvers/MINRES.h b/unsupported/Eigen/src/IterativeSolvers/MINRES.h
index 256990c1a..3a5c73eaf 100644
--- a/unsupported/Eigen/src/IterativeSolvers/MINRES.h
+++ b/unsupported/Eigen/src/IterativeSolvers/MINRES.h
@@ -3,6 +3,7 @@
//
// Copyright (C) 2012 Giacomo Po <gpo@ucla.edu>
// Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2018 David Hyde <dabh@stanford.edu>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -64,8 +65,6 @@ namespace Eigen {
eigen_assert(beta_new2 >= 0.0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE");
RealScalar beta_new(sqrt(beta_new2));
const RealScalar beta_one(beta_new);
- v_new /= beta_new;
- w_new /= beta_new;
// Initialize other variables
RealScalar c(1.0); // the cosine of the Givens rotation
RealScalar c_old(1.0);
@@ -83,18 +82,18 @@ namespace Eigen {
/* Note that there are 4 variants on the Lanczos algorithm. These are
* described in Paige, C. C. (1972). Computational variants of
* the Lanczos method for the eigenproblem. IMA Journal of Applied
- * Mathematics, 10(3), 373–381. The current implementation corresponds
+ * Mathematics, 10(3), 373-381. The current implementation corresponds
* to the case A(2,7) in the paper. It also corresponds to
- * algorithm 6.14 in Y. Saad, Iterative Methods for Sparse Linear
+ * algorithm 6.14 in Y. Saad, Iterative Methods for Sparse Linear
* Systems, 2003 p.173. For the preconditioned version see
* A. Greenbaum, Iterative Methods for Solving Linear Systems, SIAM (1987).
*/
const RealScalar beta(beta_new);
v_old = v; // update: at first time step, this makes v_old = 0 so value of beta doesn't matter
-// const VectorType v_old(v); // NOT SURE IF CREATING v_old EVERY ITERATION IS EFFICIENT
+ v_new /= beta_new; // overwrite v_new for next iteration
+ w_new /= beta_new; // overwrite w_new for next iteration
v = v_new; // update
w = w_new; // update
-// const VectorType w(w_new); // NOT SURE IF CREATING w EVERY ITERATION IS EFFICIENT
v_new.noalias() = mat*w - beta*v_old; // compute v_new
const RealScalar alpha = v_new.dot(w);
v_new -= alpha*v; // overwrite v_new
@@ -102,8 +101,6 @@ namespace Eigen {
beta_new2 = v_new.dot(w_new); // compute beta_new
eigen_assert(beta_new2 >= 0.0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE");
beta_new = sqrt(beta_new2); // compute beta_new
- v_new /= beta_new; // overwrite v_new for next iteration
- w_new /= beta_new; // overwrite w_new for next iteration
// Givens rotation
const RealScalar r2 =s*alpha+c*c_old*beta; // s, s_old, c and c_old are still from previous iteration
@@ -117,7 +114,6 @@ namespace Eigen {
// Update solution
p_oold = p_old;
-// const VectorType p_oold(p_old); // NOT SURE IF CREATING p_oold EVERY ITERATION IS EFFICIENT
p_old = p;
p.noalias()=(w-r2*p_old-r3*p_oold) /r1; // IS NOALIAS REQUIRED?
x += beta_one*c*eta*p;
@@ -286,4 +282,3 @@ namespace Eigen {
} // end namespace Eigen
#endif // EIGEN_MINRES_H
-
diff --git a/unsupported/Eigen/src/IterativeSolvers/Scaling.h b/unsupported/Eigen/src/IterativeSolvers/Scaling.h
index d113e6e90..9b3eb53e0 100644
--- a/unsupported/Eigen/src/IterativeSolvers/Scaling.h
+++ b/unsupported/Eigen/src/IterativeSolvers/Scaling.h
@@ -104,12 +104,18 @@ class IterScaling
for (int i = 0; i < m; ++i)
{
Dr(i) = std::sqrt(Dr(i));
+ }
+ for (int i = 0; i < n; ++i)
+ {
Dc(i) = std::sqrt(Dc(i));
}
// Save the scaling factors
for (int i = 0; i < m; ++i)
{
m_left(i) /= Dr(i);
+ }
+ for (int i = 0; i < n; ++i)
+ {
m_right(i) /= Dc(i);
}
// Scale the rows and the columns of the matrix
diff --git a/unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h b/unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h
index ae9d793b1..123485817 100644
--- a/unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h
+++ b/unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h
@@ -73,7 +73,7 @@ void lmqrsolv(
qtbpj = -givens.s() * wa[k] + givens.c() * qtbpj;
wa[k] = temp;
- /* accumulate the tranformation in the row of s. */
+ /* accumulate the transformation in the row of s. */
for (i = k+1; i<n; ++i) {
temp = givens.c() * s(i,k) + givens.s() * sdiag[i];
sdiag[i] = -givens.s() * s(i,k) + givens.c() * sdiag[i];
diff --git a/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h b/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h
index 995427978..62561da1d 100644
--- a/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h
+++ b/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h
@@ -117,7 +117,7 @@ class LevenbergMarquardt : internal::no_assignment_operator
typedef typename JacobianType::RealScalar RealScalar;
typedef typename QRSolver::StorageIndex PermIndex;
typedef Matrix<Scalar,Dynamic,1> FVectorType;
- typedef PermutationMatrix<Dynamic,Dynamic> PermutationType;
+ typedef PermutationMatrix<Dynamic,Dynamic,int> PermutationType;
public:
LevenbergMarquardt(FunctorType& functor)
: m_functor(functor),m_nfev(0),m_njev(0),m_fnorm(0.0),m_gnorm(0),
@@ -233,9 +233,9 @@ class LevenbergMarquardt : internal::no_assignment_operator
/**
* \brief Reports whether the minimization was successful
- * \returns \c Success if the minimization was succesful,
+ * \returns \c Success if the minimization was successful,
* \c NumericalIssue if a numerical problem arises during the
- * minimization process, for exemple during the QR factorization
+ * minimization process, for example during the QR factorization
* \c NoConvergence if the minimization did not converge after
* the maximum number of function evaluation allowed
* \c InvalidInput if the input matrix is invalid
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h b/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
index bb6d9e1fe..8aebcd67a 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
@@ -234,12 +234,13 @@ struct matrix_exp_computeUV<MatrixType, float>
template <typename MatrixType>
struct matrix_exp_computeUV<MatrixType, double>
{
+ typedef typename NumTraits<typename traits<MatrixType>::Scalar>::Real RealScalar;
template <typename ArgType>
static void run(const ArgType& arg, MatrixType& U, MatrixType& V, int& squarings)
{
using std::frexp;
using std::pow;
- const double l1norm = arg.cwiseAbs().colwise().sum().maxCoeff();
+ const RealScalar l1norm = arg.cwiseAbs().colwise().sum().maxCoeff();
squarings = 0;
if (l1norm < 1.495585217958292e-002) {
matrix_exp_pade3(arg, U, V);
@@ -250,10 +251,10 @@ struct matrix_exp_computeUV<MatrixType, double>
} else if (l1norm < 2.097847961257068e+000) {
matrix_exp_pade9(arg, U, V);
} else {
- const double maxnorm = 5.371920351148152;
+ const RealScalar maxnorm = 5.371920351148152;
frexp(l1norm / maxnorm, &squarings);
if (squarings < 0) squarings = 0;
- MatrixType A = arg.unaryExpr(MatrixExponentialScalingOp<double>(squarings));
+ MatrixType A = arg.unaryExpr(MatrixExponentialScalingOp<RealScalar>(squarings));
matrix_exp_pade13(A, U, V);
}
}
@@ -313,7 +314,7 @@ struct matrix_exp_computeUV<MatrixType, long double>
matrix_exp_pade17(A, U, V);
}
-#elif LDBL_MANT_DIG <= 112 // quadruple precison
+#elif LDBL_MANT_DIG <= 112 // quadruple precision
if (l1norm < 1.639394610288918690547467954466970e-005L) {
matrix_exp_pade3(arg, U, V);
@@ -326,6 +327,7 @@ struct matrix_exp_computeUV<MatrixType, long double>
} else if (l1norm < 1.125358383453143065081397882891878e+000L) {
matrix_exp_pade13(arg, U, V);
} else {
+ const long double maxnorm = 2.884233277829519311757165057717815L;
frexp(l1norm / maxnorm, &squarings);
if (squarings < 0) squarings = 0;
MatrixType A = arg.unaryExpr(MatrixExponentialScalingOp<long double>(squarings));
@@ -342,6 +344,27 @@ struct matrix_exp_computeUV<MatrixType, long double>
}
};
+template<typename T> struct is_exp_known_type : false_type {};
+template<> struct is_exp_known_type<float> : true_type {};
+template<> struct is_exp_known_type<double> : true_type {};
+#if LDBL_MANT_DIG <= 112
+template<> struct is_exp_known_type<long double> : true_type {};
+#endif
+
+template <typename ArgType, typename ResultType>
+void matrix_exp_compute(const ArgType& arg, ResultType &result, true_type) // natively supported scalar type
+{
+ typedef typename ArgType::PlainObject MatrixType;
+ MatrixType U, V;
+ int squarings;
+ matrix_exp_computeUV<MatrixType>::run(arg, U, V, squarings); // Pade approximant is (U+V) / (-U+V)
+ MatrixType numer = U + V;
+ MatrixType denom = -U + V;
+ result = denom.partialPivLu().solve(numer);
+ for (int i=0; i<squarings; i++)
+ result *= result; // undo scaling by repeated squaring
+}
+
/* Computes the matrix exponential
*
@@ -349,26 +372,13 @@ struct matrix_exp_computeUV<MatrixType, long double>
* \param result variable in which result will be stored
*/
template <typename ArgType, typename ResultType>
-void matrix_exp_compute(const ArgType& arg, ResultType &result)
+void matrix_exp_compute(const ArgType& arg, ResultType &result, false_type) // default
{
typedef typename ArgType::PlainObject MatrixType;
-#if LDBL_MANT_DIG > 112 // rarely happens
typedef typename traits<MatrixType>::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef typename std::complex<RealScalar> ComplexScalar;
- if (sizeof(RealScalar) > 14) {
- result = arg.matrixFunction(internal::stem_function_exp<ComplexScalar>);
- return;
- }
-#endif
- MatrixType U, V;
- int squarings;
- matrix_exp_computeUV<MatrixType>::run(arg, U, V, squarings); // Pade approximant is (U+V) / (-U+V)
- MatrixType numer = U + V;
- MatrixType denom = -U + V;
- result = denom.partialPivLu().solve(numer);
- for (int i=0; i<squarings; i++)
- result *= result; // undo scaling by repeated squaring
+ result = arg.matrixFunction(internal::stem_function_exp<ComplexScalar>);
}
} // end namespace Eigen::internal
@@ -386,7 +396,6 @@ void matrix_exp_compute(const ArgType& arg, ResultType &result)
template<typename Derived> struct MatrixExponentialReturnValue
: public ReturnByValue<MatrixExponentialReturnValue<Derived> >
{
- typedef typename Derived::Index Index;
public:
/** \brief Constructor.
*
@@ -402,7 +411,7 @@ template<typename Derived> struct MatrixExponentialReturnValue
inline void evalTo(ResultType& result) const
{
const typename internal::nested_eval<Derived, 10>::type tmp(m_src);
- internal::matrix_exp_compute(tmp, result);
+ internal::matrix_exp_compute(tmp, result, internal::is_exp_known_type<typename Derived::Scalar>());
}
Index rows() const { return m_src.rows(); }
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h b/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
index db2449d02..133d78625 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
@@ -53,7 +53,7 @@ template <typename MatrixType>
typename NumTraits<typename MatrixType::Scalar>::Real matrix_function_compute_mu(const MatrixType& A)
{
typedef typename plain_col_type<MatrixType>::type VectorType;
- typename MatrixType::Index rows = A.rows();
+ Index rows = A.rows();
const MatrixType N = MatrixType::Identity(rows, rows) - A;
VectorType e = VectorType::Ones(rows);
N.template triangularView<Upper>().solveInPlace(e);
@@ -65,7 +65,6 @@ MatrixType MatrixFunctionAtomic<MatrixType>::compute(const MatrixType& A)
{
// TODO: Use that A is upper triangular
typedef typename NumTraits<Scalar>::Real RealScalar;
- typedef typename MatrixType::Index Index;
Index rows = A.rows();
Scalar avgEival = A.trace() / Scalar(RealScalar(rows));
MatrixType Ashifted = A - avgEival * MatrixType::Identity(rows, rows);
@@ -131,7 +130,6 @@ typename ListOfClusters::iterator matrix_function_find_cluster(Index key, ListOf
template <typename EivalsType, typename Cluster>
void matrix_function_partition_eigenvalues(const EivalsType& eivals, std::list<Cluster>& clusters)
{
- typedef typename EivalsType::Index Index;
typedef typename EivalsType::RealScalar RealScalar;
for (Index i=0; i<eivals.rows(); ++i) {
// Find cluster containing i-th ei'val, adding a new cluster if necessary
@@ -179,7 +177,7 @@ void matrix_function_compute_block_start(const VectorType& clusterSize, VectorTy
{
blockStart.resize(clusterSize.rows());
blockStart(0) = 0;
- for (typename VectorType::Index i = 1; i < clusterSize.rows(); i++) {
+ for (Index i = 1; i < clusterSize.rows(); i++) {
blockStart(i) = blockStart(i-1) + clusterSize(i-1);
}
}
@@ -188,7 +186,6 @@ void matrix_function_compute_block_start(const VectorType& clusterSize, VectorTy
template <typename EivalsType, typename ListOfClusters, typename VectorType>
void matrix_function_compute_map(const EivalsType& eivals, const ListOfClusters& clusters, VectorType& eivalToCluster)
{
- typedef typename EivalsType::Index Index;
eivalToCluster.resize(eivals.rows());
Index clusterIndex = 0;
for (typename ListOfClusters::const_iterator cluster = clusters.begin(); cluster != clusters.end(); ++cluster) {
@@ -205,7 +202,6 @@ void matrix_function_compute_map(const EivalsType& eivals, const ListOfClusters&
template <typename DynVectorType, typename VectorType>
void matrix_function_compute_permutation(const DynVectorType& blockStart, const DynVectorType& eivalToCluster, VectorType& permutation)
{
- typedef typename VectorType::Index Index;
DynVectorType indexNextEntry = blockStart;
permutation.resize(eivalToCluster.rows());
for (Index i = 0; i < eivalToCluster.rows(); i++) {
@@ -219,7 +215,6 @@ void matrix_function_compute_permutation(const DynVectorType& blockStart, const
template <typename VectorType, typename MatrixType>
void matrix_function_permute_schur(VectorType& permutation, MatrixType& U, MatrixType& T)
{
- typedef typename VectorType::Index Index;
for (Index i = 0; i < permutation.rows() - 1; i++) {
Index j;
for (j = i; j < permutation.rows(); j++) {
@@ -247,7 +242,7 @@ template <typename MatrixType, typename AtomicType, typename VectorType>
void matrix_function_compute_block_atomic(const MatrixType& T, AtomicType& atomic, const VectorType& blockStart, const VectorType& clusterSize, MatrixType& fT)
{
fT.setZero(T.rows(), T.cols());
- for (typename VectorType::Index i = 0; i < clusterSize.rows(); ++i) {
+ for (Index i = 0; i < clusterSize.rows(); ++i) {
fT.block(blockStart(i), blockStart(i), clusterSize(i), clusterSize(i))
= atomic.compute(T.block(blockStart(i), blockStart(i), clusterSize(i), clusterSize(i)));
}
@@ -285,7 +280,6 @@ MatrixType matrix_function_solve_triangular_sylvester(const MatrixType& A, const
eigen_assert(C.rows() == A.rows());
eigen_assert(C.cols() == B.rows());
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
Index m = A.rows();
@@ -330,11 +324,8 @@ void matrix_function_compute_above_diagonal(const MatrixType& T, const VectorTyp
{
typedef internal::traits<MatrixType> Traits;
typedef typename MatrixType::Scalar Scalar;
- typedef typename MatrixType::Index Index;
- static const int RowsAtCompileTime = Traits::RowsAtCompileTime;
- static const int ColsAtCompileTime = Traits::ColsAtCompileTime;
static const int Options = MatrixType::Options;
- typedef Matrix<Scalar, Dynamic, Dynamic, Options, RowsAtCompileTime, ColsAtCompileTime> DynMatrixType;
+ typedef Matrix<Scalar, Dynamic, Dynamic, Options, Traits::RowsAtCompileTime, Traits::ColsAtCompileTime> DynMatrixType;
for (Index k = 1; k < clusterSize.rows(); k++) {
for (Index i = 0; i < clusterSize.rows() - k; i++) {
@@ -398,8 +389,8 @@ struct matrix_function_compute
template <typename MatrixType>
struct matrix_function_compute<MatrixType, 0>
{
- template <typename AtomicType, typename ResultType>
- static void run(const MatrixType& A, AtomicType& atomic, ResultType &result)
+ template <typename MatA, typename AtomicType, typename ResultType>
+ static void run(const MatA& A, AtomicType& atomic, ResultType &result)
{
typedef internal::traits<MatrixType> Traits;
typedef typename Traits::Scalar Scalar;
@@ -422,14 +413,14 @@ struct matrix_function_compute<MatrixType, 0>
template <typename MatrixType>
struct matrix_function_compute<MatrixType, 1>
{
- template <typename AtomicType, typename ResultType>
- static void run(const MatrixType& A, AtomicType& atomic, ResultType &result)
+ template <typename MatA, typename AtomicType, typename ResultType>
+ static void run(const MatA& A, AtomicType& atomic, ResultType &result)
{
typedef internal::traits<MatrixType> Traits;
- typedef typename MatrixType::Index Index;
// compute Schur decomposition of A
- const ComplexSchur<MatrixType> schurOfA(A);
+ const ComplexSchur<MatrixType> schurOfA(A);
+ eigen_assert(schurOfA.info()==Success);
MatrixType T = schurOfA.matrixT();
MatrixType U = schurOfA.matrixU();
@@ -481,7 +472,6 @@ template<typename Derived> class MatrixFunctionReturnValue
{
public:
typedef typename Derived::Scalar Scalar;
- typedef typename Derived::Index Index;
typedef typename internal::stem_function<Scalar>::type StemFunction;
protected:
@@ -506,15 +496,13 @@ template<typename Derived> class MatrixFunctionReturnValue
typedef typename internal::nested_eval<Derived, 10>::type NestedEvalType;
typedef typename internal::remove_all<NestedEvalType>::type NestedEvalTypeClean;
typedef internal::traits<NestedEvalTypeClean> Traits;
- static const int RowsAtCompileTime = Traits::RowsAtCompileTime;
- static const int ColsAtCompileTime = Traits::ColsAtCompileTime;
typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
- typedef Matrix<ComplexScalar, Dynamic, Dynamic, 0, RowsAtCompileTime, ColsAtCompileTime> DynMatrixType;
+ typedef Matrix<ComplexScalar, Dynamic, Dynamic, 0, Traits::RowsAtCompileTime, Traits::ColsAtCompileTime> DynMatrixType;
typedef internal::MatrixFunctionAtomic<DynMatrixType> AtomicType;
AtomicType atomic(m_f);
- internal::matrix_function_compute<NestedEvalTypeClean>::run(m_A, atomic, result);
+ internal::matrix_function_compute<typename NestedEvalTypeClean::PlainObject>::run(m_A, atomic, result);
}
Index rows() const { return m_A.rows(); }
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h b/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h
index 1acfbed9e..a8d879a12 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h
@@ -332,14 +332,12 @@ public:
typedef typename internal::nested_eval<Derived, 10>::type DerivedEvalType;
typedef typename internal::remove_all<DerivedEvalType>::type DerivedEvalTypeClean;
typedef internal::traits<DerivedEvalTypeClean> Traits;
- static const int RowsAtCompileTime = Traits::RowsAtCompileTime;
- static const int ColsAtCompileTime = Traits::ColsAtCompileTime;
typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
- typedef Matrix<ComplexScalar, Dynamic, Dynamic, 0, RowsAtCompileTime, ColsAtCompileTime> DynMatrixType;
+ typedef Matrix<ComplexScalar, Dynamic, Dynamic, 0, Traits::RowsAtCompileTime, Traits::ColsAtCompileTime> DynMatrixType;
typedef internal::MatrixLogarithmAtomic<DynMatrixType> AtomicType;
AtomicType atomic;
- internal::matrix_function_compute<DerivedEvalTypeClean>::run(m_A, atomic, result);
+ internal::matrix_function_compute<typename DerivedEvalTypeClean::PlainObject>::run(m_A, atomic, result);
}
Index rows() const { return m_A.rows(); }
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h b/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h
index ebc433d89..1ceb5cf39 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h
@@ -40,7 +40,6 @@ class MatrixPowerParenthesesReturnValue : public ReturnByValue< MatrixPowerParen
{
public:
typedef typename MatrixType::RealScalar RealScalar;
- typedef typename MatrixType::Index Index;
/**
* \brief Constructor.
@@ -81,7 +80,7 @@ class MatrixPowerParenthesesReturnValue : public ReturnByValue< MatrixPowerParen
*
* \note Currently this class is only used by MatrixPower. One may
* insist that this be nested into MatrixPower. This class is here to
- * faciliate future development of triangular matrix functions.
+ * facilitate future development of triangular matrix functions.
*/
template<typename MatrixType>
class MatrixPowerAtomic : internal::noncopyable
@@ -94,7 +93,6 @@ class MatrixPowerAtomic : internal::noncopyable
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef std::complex<RealScalar> ComplexScalar;
- typedef typename MatrixType::Index Index;
typedef Block<MatrixType,Dynamic,Dynamic> ResultType;
const MatrixType& m_A;
@@ -340,7 +338,6 @@ class MatrixPower : internal::noncopyable
private:
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
- typedef typename MatrixType::Index Index;
public:
/**
@@ -600,7 +597,6 @@ class MatrixPowerReturnValue : public ReturnByValue< MatrixPowerReturnValue<Deri
public:
typedef typename Derived::PlainObject PlainObject;
typedef typename Derived::RealScalar RealScalar;
- typedef typename Derived::Index Index;
/**
* \brief Constructor.
@@ -648,7 +644,6 @@ class MatrixComplexPowerReturnValue : public ReturnByValue< MatrixComplexPowerRe
public:
typedef typename Derived::PlainObject PlainObject;
typedef typename std::complex<typename Derived::RealScalar> ComplexScalar;
- typedef typename Derived::Index Index;
/**
* \brief Constructor.
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h b/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h
index afd88ec4d..34bf78913 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h
@@ -17,7 +17,7 @@ namespace internal {
// pre: T.block(i,i,2,2) has complex conjugate eigenvalues
// post: sqrtT.block(i,i,2,2) is square root of T.block(i,i,2,2)
template <typename MatrixType, typename ResultType>
-void matrix_sqrt_quasi_triangular_2x2_diagonal_block(const MatrixType& T, typename MatrixType::Index i, ResultType& sqrtT)
+void matrix_sqrt_quasi_triangular_2x2_diagonal_block(const MatrixType& T, Index i, ResultType& sqrtT)
{
// TODO: This case (2-by-2 blocks with complex conjugate eigenvalues) is probably hidden somewhere
// in EigenSolver. If we expose it, we could call it directly from here.
@@ -32,7 +32,7 @@ void matrix_sqrt_quasi_triangular_2x2_diagonal_block(const MatrixType& T, typena
// all blocks of sqrtT to left of and below (i,j) are correct
// post: sqrtT(i,j) has the correct value
template <typename MatrixType, typename ResultType>
-void matrix_sqrt_quasi_triangular_1x1_off_diagonal_block(const MatrixType& T, typename MatrixType::Index i, typename MatrixType::Index j, ResultType& sqrtT)
+void matrix_sqrt_quasi_triangular_1x1_off_diagonal_block(const MatrixType& T, Index i, Index j, ResultType& sqrtT)
{
typedef typename traits<MatrixType>::Scalar Scalar;
Scalar tmp = (sqrtT.row(i).segment(i+1,j-i-1) * sqrtT.col(j).segment(i+1,j-i-1)).value();
@@ -41,7 +41,7 @@ void matrix_sqrt_quasi_triangular_1x1_off_diagonal_block(const MatrixType& T, ty
// similar to compute1x1offDiagonalBlock()
template <typename MatrixType, typename ResultType>
-void matrix_sqrt_quasi_triangular_1x2_off_diagonal_block(const MatrixType& T, typename MatrixType::Index i, typename MatrixType::Index j, ResultType& sqrtT)
+void matrix_sqrt_quasi_triangular_1x2_off_diagonal_block(const MatrixType& T, Index i, Index j, ResultType& sqrtT)
{
typedef typename traits<MatrixType>::Scalar Scalar;
Matrix<Scalar,1,2> rhs = T.template block<1,2>(i,j);
@@ -54,7 +54,7 @@ void matrix_sqrt_quasi_triangular_1x2_off_diagonal_block(const MatrixType& T, ty
// similar to compute1x1offDiagonalBlock()
template <typename MatrixType, typename ResultType>
-void matrix_sqrt_quasi_triangular_2x1_off_diagonal_block(const MatrixType& T, typename MatrixType::Index i, typename MatrixType::Index j, ResultType& sqrtT)
+void matrix_sqrt_quasi_triangular_2x1_off_diagonal_block(const MatrixType& T, Index i, Index j, ResultType& sqrtT)
{
typedef typename traits<MatrixType>::Scalar Scalar;
Matrix<Scalar,2,1> rhs = T.template block<2,1>(i,j);
@@ -101,7 +101,7 @@ void matrix_sqrt_quasi_triangular_solve_auxiliary_equation(MatrixType& X, const
// similar to compute1x1offDiagonalBlock()
template <typename MatrixType, typename ResultType>
-void matrix_sqrt_quasi_triangular_2x2_off_diagonal_block(const MatrixType& T, typename MatrixType::Index i, typename MatrixType::Index j, ResultType& sqrtT)
+void matrix_sqrt_quasi_triangular_2x2_off_diagonal_block(const MatrixType& T, Index i, Index j, ResultType& sqrtT)
{
typedef typename traits<MatrixType>::Scalar Scalar;
Matrix<Scalar,2,2> A = sqrtT.template block<2,2>(i,i);
@@ -120,7 +120,6 @@ template <typename MatrixType, typename ResultType>
void matrix_sqrt_quasi_triangular_diagonal(const MatrixType& T, ResultType& sqrtT)
{
using std::sqrt;
- typedef typename MatrixType::Index Index;
const Index size = T.rows();
for (Index i = 0; i < size; i++) {
if (i == size - 1 || T.coeff(i+1, i) == 0) {
@@ -139,7 +138,6 @@ void matrix_sqrt_quasi_triangular_diagonal(const MatrixType& T, ResultType& sqrt
template <typename MatrixType, typename ResultType>
void matrix_sqrt_quasi_triangular_off_diagonal(const MatrixType& T, ResultType& sqrtT)
{
- typedef typename MatrixType::Index Index;
const Index size = T.rows();
for (Index j = 1; j < size; j++) {
if (T.coeff(j, j-1) != 0) // if T(j-1:j, j-1:j) is a 2-by-2 block
@@ -206,8 +204,7 @@ template <typename MatrixType, typename ResultType>
void matrix_sqrt_triangular(const MatrixType &arg, ResultType &result)
{
using std::sqrt;
- typedef typename MatrixType::Index Index;
- typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Scalar Scalar;
eigen_assert(arg.rows() == arg.cols());
@@ -318,7 +315,6 @@ template<typename Derived> class MatrixSquareRootReturnValue
: public ReturnByValue<MatrixSquareRootReturnValue<Derived> >
{
protected:
- typedef typename Derived::Index Index;
typedef typename internal::ref_selector<Derived>::type DerivedNested;
public:
diff --git a/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h b/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h
index feafd62a8..4f2f560b3 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h
@@ -61,7 +61,7 @@ void qrsolv(
qtbpj = -givens.s() * wa[k] + givens.c() * qtbpj;
wa[k] = temp;
- /* accumulate the tranformation in the row of s. */
+ /* accumulate the transformation in the row of s. */
for (i = k+1; i<n; ++i) {
temp = givens.c() * s(i,k) + givens.s() * sdiag[i];
sdiag[i] = -givens.s() * s(i,k) + givens.c() * sdiag[i];
diff --git a/unsupported/Eigen/src/NonLinearOptimization/r1updt.h b/unsupported/Eigen/src/NonLinearOptimization/r1updt.h
index f28766061..09fc65255 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/r1updt.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/r1updt.h
@@ -22,7 +22,7 @@ void r1updt(
Scalar temp;
JacobiRotation<Scalar> givens;
- // r1updt had a broader usecase, but we dont use it here. And, more
+ // r1updt had a broader usecase, but we don't use it here. And, more
// importantly, we can not test it.
eigen_assert(m==n);
eigen_assert(u.size()==m);
diff --git a/unsupported/Eigen/src/Polynomials/Companion.h b/unsupported/Eigen/src/Polynomials/Companion.h
index e0af6ebe4..126be783b 100644
--- a/unsupported/Eigen/src/Polynomials/Companion.h
+++ b/unsupported/Eigen/src/Polynomials/Companion.h
@@ -89,13 +89,13 @@ class companion
{
const Index deg = m_monic.size();
const Index deg_1 = deg-1;
- DenseCompanionMatrixType companion(deg,deg);
- companion <<
+ DenseCompanionMatrixType companMat(deg,deg);
+ companMat <<
( LeftBlock(deg,deg_1)
<< LeftBlockFirstRow::Zero(1,deg_1),
BottomLeftBlock::Identity(deg-1,deg-1)*m_bl_diag.asDiagonal() ).finished()
, m_monic;
- return companion;
+ return companMat;
}
@@ -104,7 +104,7 @@ class companion
/** Helper function for the balancing algorithm.
* \returns true if the row and the column, having colNorm and rowNorm
* as norms, are balanced, false otherwise.
- * colB and rowB are repectively the multipliers for
+ * colB and rowB are respectively the multipliers for
* the column and the row in order to balance them.
* */
bool balanced( RealScalar colNorm, RealScalar rowNorm,
@@ -113,7 +113,7 @@ class companion
/** Helper function for the balancing algorithm.
* \returns true if the row and the column, having colNorm and rowNorm
* as norms, are balanced, false otherwise.
- * colB and rowB are repectively the multipliers for
+ * colB and rowB are respectively the multipliers for
* the column and the row in order to balance them.
* */
bool balancedR( RealScalar colNorm, RealScalar rowNorm,
diff --git a/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h b/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h
index a1f54ed35..bda057a85 100644
--- a/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h
+++ b/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h
@@ -41,7 +41,7 @@ public:
/** Sets the relative threshold value used to prune zero coefficients during the decomposition.
*
- * Setting a value greater than zero speeds up computation, and yields to an imcomplete
+ * Setting a value greater than zero speeds up computation, and yields to an incomplete
* factorization with fewer non zero coefficients. Such approximate factors are especially
* useful to initialize an iterative solver.
*
diff --git a/unsupported/Eigen/src/Skyline/SkylineMatrix.h b/unsupported/Eigen/src/Skyline/SkylineMatrix.h
index a2a8933ca..f77d79a04 100644
--- a/unsupported/Eigen/src/Skyline/SkylineMatrix.h
+++ b/unsupported/Eigen/src/Skyline/SkylineMatrix.h
@@ -206,26 +206,26 @@ public:
if (col > row) //upper matrix
{
const Index minOuterIndex = inner - m_data.upperProfile(inner);
- eigen_assert(outer >= minOuterIndex && "you try to acces a coeff that do not exist in the storage");
+ eigen_assert(outer >= minOuterIndex && "You tried to access a coeff that does not exist in the storage");
return this->m_data.upper(m_colStartIndex[inner] + outer - (inner - m_data.upperProfile(inner)));
}
if (col < row) //lower matrix
{
const Index minInnerIndex = outer - m_data.lowerProfile(outer);
- eigen_assert(inner >= minInnerIndex && "you try to acces a coeff that do not exist in the storage");
+ eigen_assert(inner >= minInnerIndex && "You tried to access a coeff that does not exist in the storage");
return this->m_data.lower(m_rowStartIndex[outer] + inner - (outer - m_data.lowerProfile(outer)));
}
} else {
if (outer > inner) //upper matrix
{
const Index maxOuterIndex = inner + m_data.upperProfile(inner);
- eigen_assert(outer <= maxOuterIndex && "you try to acces a coeff that do not exist in the storage");
+ eigen_assert(outer <= maxOuterIndex && "You tried to access a coeff that does not exist in the storage");
return this->m_data.upper(m_colStartIndex[inner] + (outer - inner));
}
if (outer < inner) //lower matrix
{
const Index maxInnerIndex = outer + m_data.lowerProfile(outer);
- eigen_assert(inner <= maxInnerIndex && "you try to acces a coeff that do not exist in the storage");
+ eigen_assert(inner <= maxInnerIndex && "You tried to access a coeff that does not exist in the storage");
return this->m_data.lower(m_rowStartIndex[outer] + (inner - outer));
}
}
@@ -300,11 +300,11 @@ public:
if (IsRowMajor) {
const Index minInnerIndex = outer - m_data.lowerProfile(outer);
- eigen_assert(inner >= minInnerIndex && "you try to acces a coeff that do not exist in the storage");
+ eigen_assert(inner >= minInnerIndex && "You tried to access a coeff that does not exist in the storage");
return this->m_data.lower(m_rowStartIndex[outer] + inner - (outer - m_data.lowerProfile(outer)));
} else {
const Index maxInnerIndex = outer + m_data.lowerProfile(outer);
- eigen_assert(inner <= maxInnerIndex && "you try to acces a coeff that do not exist in the storage");
+ eigen_assert(inner <= maxInnerIndex && "You tried to access a coeff that does not exist in the storage");
return this->m_data.lower(m_rowStartIndex[outer] + (inner - outer));
}
}
@@ -336,11 +336,11 @@ public:
if (IsRowMajor) {
const Index minOuterIndex = inner - m_data.upperProfile(inner);
- eigen_assert(outer >= minOuterIndex && "you try to acces a coeff that do not exist in the storage");
+ eigen_assert(outer >= minOuterIndex && "You tried to access a coeff that does not exist in the storage");
return this->m_data.upper(m_colStartIndex[inner] + outer - (inner - m_data.upperProfile(inner)));
} else {
const Index maxOuterIndex = inner + m_data.upperProfile(inner);
- eigen_assert(outer <= maxOuterIndex && "you try to acces a coeff that do not exist in the storage");
+ eigen_assert(outer <= maxOuterIndex && "You tried to access a coeff that does not exist in the storage");
return this->m_data.upper(m_colStartIndex[inner] + (outer - inner));
}
}
diff --git a/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h b/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h
index 037a13f86..3f1ff14ad 100644
--- a/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h
+++ b/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h
@@ -187,7 +187,7 @@ template<typename _Scalar, int _Options, typename _StorageIndex>
/** Does nothing: provided for compatibility with SparseMatrix */
inline void finalize() {}
- /** Suppress all nonzeros which are smaller than \a reference under the tolerence \a epsilon */
+ /** Suppress all nonzeros which are smaller than \a reference under the tolerance \a epsilon */
void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
{
for (Index j=0; j<outerSize(); ++j)
@@ -224,21 +224,21 @@ template<typename _Scalar, int _Options, typename _StorageIndex>
}
}
- /** The class DynamicSparseMatrix is deprectaed */
+ /** The class DynamicSparseMatrix is deprecated */
EIGEN_DEPRECATED inline DynamicSparseMatrix()
: m_innerSize(0), m_data(0)
{
eigen_assert(innerSize()==0 && outerSize()==0);
}
- /** The class DynamicSparseMatrix is deprectaed */
+ /** The class DynamicSparseMatrix is deprecated */
EIGEN_DEPRECATED inline DynamicSparseMatrix(Index rows, Index cols)
: m_innerSize(0)
{
resize(rows, cols);
}
- /** The class DynamicSparseMatrix is deprectaed */
+ /** The class DynamicSparseMatrix is deprecated */
template<typename OtherDerived>
EIGEN_DEPRECATED explicit inline DynamicSparseMatrix(const SparseMatrixBase<OtherDerived>& other)
: m_innerSize(0)
diff --git a/unsupported/Eigen/src/SparseExtra/MarketIO.h b/unsupported/Eigen/src/SparseExtra/MarketIO.h
index fc70a24d8..1618b09a8 100644
--- a/unsupported/Eigen/src/SparseExtra/MarketIO.h
+++ b/unsupported/Eigen/src/SparseExtra/MarketIO.h
@@ -104,11 +104,12 @@ namespace internal
out << value.real << " " << value.imag()<< "\n";
}
-} // end namepsace internal
+} // end namespace internal
inline bool getMarketHeader(const std::string& filename, int& sym, bool& iscomplex, bool& isvector)
{
sym = 0;
+ iscomplex = false;
isvector = false;
std::ifstream in(filename.c_str(),std::ios::in);
if(!in)
diff --git a/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsArrayAPI.h b/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsArrayAPI.h
index ed415db99..ed6d83251 100644
--- a/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsArrayAPI.h
+++ b/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsArrayAPI.h
@@ -24,7 +24,7 @@ namespace Eigen {
* \sa Eigen::igammac(), Eigen::lgamma()
*/
template<typename Derived,typename ExponentDerived>
-inline const Eigen::CwiseBinaryOp<Eigen::internal::scalar_igamma_op<typename Derived::Scalar>, const Derived, const ExponentDerived>
+EIGEN_STRONG_INLINE const Eigen::CwiseBinaryOp<Eigen::internal::scalar_igamma_op<typename Derived::Scalar>, const Derived, const ExponentDerived>
igamma(const Eigen::ArrayBase<Derived>& a, const Eigen::ArrayBase<ExponentDerived>& x)
{
return Eigen::CwiseBinaryOp<Eigen::internal::scalar_igamma_op<typename Derived::Scalar>, const Derived, const ExponentDerived>(
@@ -33,6 +33,48 @@ igamma(const Eigen::ArrayBase<Derived>& a, const Eigen::ArrayBase<ExponentDerive
);
}
+/** \cpp11 \returns an expression of the coefficient-wise igamma_der_a(\a a, \a x) to the given arrays.
+ *
+ * This function computes the coefficient-wise derivative of the incomplete
+ * gamma function with respect to the parameter a.
+ *
+ * \note This function supports only float and double scalar types in c++11
+ * mode. To support other scalar types,
+ * or float/double in non c++11 mode, the user has to provide implementations
+ * of igamma_der_a(T,T) for any scalar
+ * type T to be supported.
+ *
+ * \sa Eigen::igamma(), Eigen::lgamma()
+ */
+template <typename Derived, typename ExponentDerived>
+EIGEN_STRONG_INLINE const Eigen::CwiseBinaryOp<Eigen::internal::scalar_igamma_der_a_op<typename Derived::Scalar>, const Derived, const ExponentDerived>
+igamma_der_a(const Eigen::ArrayBase<Derived>& a, const Eigen::ArrayBase<ExponentDerived>& x) {
+ return Eigen::CwiseBinaryOp<Eigen::internal::scalar_igamma_der_a_op<typename Derived::Scalar>, const Derived, const ExponentDerived>(
+ a.derived(),
+ x.derived());
+}
+
+/** \cpp11 \returns an expression of the coefficient-wise gamma_sample_der_alpha(\a alpha, \a sample) to the given arrays.
+ *
+ * This function computes the coefficient-wise derivative of the sample
+ * of a Gamma(alpha, 1) random variable with respect to the parameter alpha.
+ *
+ * \note This function supports only float and double scalar types in c++11
+ * mode. To support other scalar types,
+ * or float/double in non c++11 mode, the user has to provide implementations
+ * of gamma_sample_der_alpha(T,T) for any scalar
+ * type T to be supported.
+ *
+ * \sa Eigen::igamma(), Eigen::lgamma()
+ */
+template <typename AlphaDerived, typename SampleDerived>
+EIGEN_STRONG_INLINE const Eigen::CwiseBinaryOp<Eigen::internal::scalar_gamma_sample_der_alpha_op<typename AlphaDerived::Scalar>, const AlphaDerived, const SampleDerived>
+gamma_sample_der_alpha(const Eigen::ArrayBase<AlphaDerived>& alpha, const Eigen::ArrayBase<SampleDerived>& sample) {
+ return Eigen::CwiseBinaryOp<Eigen::internal::scalar_gamma_sample_der_alpha_op<typename AlphaDerived::Scalar>, const AlphaDerived, const SampleDerived>(
+ alpha.derived(),
+ sample.derived());
+}
+
/** \cpp11 \returns an expression of the coefficient-wise igammac(\a a, \a x) to the given arrays.
*
* This function computes the coefficient-wise complementary incomplete gamma function.
@@ -44,7 +86,7 @@ igamma(const Eigen::ArrayBase<Derived>& a, const Eigen::ArrayBase<ExponentDerive
* \sa Eigen::igamma(), Eigen::lgamma()
*/
template<typename Derived,typename ExponentDerived>
-inline const Eigen::CwiseBinaryOp<Eigen::internal::scalar_igammac_op<typename Derived::Scalar>, const Derived, const ExponentDerived>
+EIGEN_STRONG_INLINE const Eigen::CwiseBinaryOp<Eigen::internal::scalar_igammac_op<typename Derived::Scalar>, const Derived, const ExponentDerived>
igammac(const Eigen::ArrayBase<Derived>& a, const Eigen::ArrayBase<ExponentDerived>& x)
{
return Eigen::CwiseBinaryOp<Eigen::internal::scalar_igammac_op<typename Derived::Scalar>, const Derived, const ExponentDerived>(
@@ -66,7 +108,7 @@ igammac(const Eigen::ArrayBase<Derived>& a, const Eigen::ArrayBase<ExponentDeriv
// * \warning Be careful with the order of the parameters: x.polygamma(n) is equivalent to polygamma(n,x)
// * \sa ArrayBase::polygamma()
template<typename DerivedN,typename DerivedX>
-inline const Eigen::CwiseBinaryOp<Eigen::internal::scalar_polygamma_op<typename DerivedX::Scalar>, const DerivedN, const DerivedX>
+EIGEN_STRONG_INLINE const Eigen::CwiseBinaryOp<Eigen::internal::scalar_polygamma_op<typename DerivedX::Scalar>, const DerivedN, const DerivedX>
polygamma(const Eigen::ArrayBase<DerivedN>& n, const Eigen::ArrayBase<DerivedX>& x)
{
return Eigen::CwiseBinaryOp<Eigen::internal::scalar_polygamma_op<typename DerivedX::Scalar>, const DerivedN, const DerivedX>(
@@ -86,7 +128,7 @@ polygamma(const Eigen::ArrayBase<DerivedN>& n, const Eigen::ArrayBase<DerivedX>&
* \sa Eigen::betainc(), Eigen::lgamma()
*/
template<typename ArgADerived, typename ArgBDerived, typename ArgXDerived>
-inline const Eigen::CwiseTernaryOp<Eigen::internal::scalar_betainc_op<typename ArgXDerived::Scalar>, const ArgADerived, const ArgBDerived, const ArgXDerived>
+EIGEN_STRONG_INLINE const Eigen::CwiseTernaryOp<Eigen::internal::scalar_betainc_op<typename ArgXDerived::Scalar>, const ArgADerived, const ArgBDerived, const ArgXDerived>
betainc(const Eigen::ArrayBase<ArgADerived>& a, const Eigen::ArrayBase<ArgBDerived>& b, const Eigen::ArrayBase<ArgXDerived>& x)
{
return Eigen::CwiseTernaryOp<Eigen::internal::scalar_betainc_op<typename ArgXDerived::Scalar>, const ArgADerived, const ArgBDerived, const ArgXDerived>(
@@ -110,7 +152,7 @@ betainc(const Eigen::ArrayBase<ArgADerived>& a, const Eigen::ArrayBase<ArgBDeriv
* \sa ArrayBase::zeta()
*/
template<typename DerivedX,typename DerivedQ>
-inline const Eigen::CwiseBinaryOp<Eigen::internal::scalar_zeta_op<typename DerivedX::Scalar>, const DerivedX, const DerivedQ>
+EIGEN_STRONG_INLINE const Eigen::CwiseBinaryOp<Eigen::internal::scalar_zeta_op<typename DerivedX::Scalar>, const DerivedX, const DerivedQ>
zeta(const Eigen::ArrayBase<DerivedX>& x, const Eigen::ArrayBase<DerivedQ>& q)
{
return Eigen::CwiseBinaryOp<Eigen::internal::scalar_zeta_op<typename DerivedX::Scalar>, const DerivedX, const DerivedQ>(
@@ -119,6 +161,52 @@ zeta(const Eigen::ArrayBase<DerivedX>& x, const Eigen::ArrayBase<DerivedQ>& q)
);
}
+/** \returns an expression of the coefficient-wise i0e(\a x) to the given
+ * arrays.
+ *
+ * It returns the exponentially scaled modified Bessel
+ * function of order zero.
+ *
+ * \param x is the argument
+ *
+ * \note This function supports only float and double scalar types. To support
+ * other scalar types, the user has to provide implementations of i0e(T) for
+ * any scalar type T to be supported.
+ *
+ * \sa ArrayBase::i0e()
+ */
+template <typename Derived>
+EIGEN_STRONG_INLINE const Eigen::CwiseUnaryOp<
+ Eigen::internal::scalar_i0e_op<typename Derived::Scalar>, const Derived>
+i0e(const Eigen::ArrayBase<Derived>& x) {
+ return Eigen::CwiseUnaryOp<
+ Eigen::internal::scalar_i0e_op<typename Derived::Scalar>,
+ const Derived>(x.derived());
+}
+
+/** \returns an expression of the coefficient-wise i1e(\a x) to the given
+ * arrays.
+ *
+ * It returns the exponentially scaled modified Bessel
+ * function of order one.
+ *
+ * \param x is the argument
+ *
+ * \note This function supports only float and double scalar types. To support
+ * other scalar types, the user has to provide implementations of i1e(T) for
+ * any scalar type T to be supported.
+ *
+ * \sa ArrayBase::i1e()
+ */
+template <typename Derived>
+EIGEN_STRONG_INLINE const Eigen::CwiseUnaryOp<
+ Eigen::internal::scalar_i1e_op<typename Derived::Scalar>, const Derived>
+i1e(const Eigen::ArrayBase<Derived>& x) {
+ return Eigen::CwiseUnaryOp<
+ Eigen::internal::scalar_i1e_op<typename Derived::Scalar>,
+ const Derived>(x.derived());
+}
+
} // end namespace Eigen
#endif // EIGEN_SPECIALFUNCTIONS_ARRAYAPI_H
diff --git a/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsFunctors.h b/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsFunctors.h
index d8f2363be..c6fac91bb 100644
--- a/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsFunctors.h
+++ b/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsFunctors.h
@@ -41,6 +41,60 @@ struct functor_traits<scalar_igamma_op<Scalar> > {
};
};
+/** \internal
+ * \brief Template functor to compute the derivative of the incomplete gamma
+ * function igamma_der_a(a, x)
+ *
+ * \sa class CwiseBinaryOp, Cwise::igamma_der_a
+ */
+template <typename Scalar>
+struct scalar_igamma_der_a_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_igamma_der_a_op)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator()(const Scalar& a, const Scalar& x) const {
+ using numext::igamma_der_a;
+ return igamma_der_a(a, x);
+ }
+ template <typename Packet>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& x) const {
+ return internal::pigamma_der_a(a, x);
+ }
+};
+template <typename Scalar>
+struct functor_traits<scalar_igamma_der_a_op<Scalar> > {
+ enum {
+ // 2x the cost of igamma
+ Cost = 40 * NumTraits<Scalar>::MulCost + 20 * NumTraits<Scalar>::AddCost,
+ PacketAccess = packet_traits<Scalar>::HasIGammaDerA
+ };
+};
+
+/** \internal
+ * \brief Template functor to compute the derivative of the sample
+ * of a Gamma(alpha, 1) random variable with respect to the parameter alpha
+ * gamma_sample_der_alpha(alpha, sample)
+ *
+ * \sa class CwiseBinaryOp, Cwise::gamma_sample_der_alpha
+ */
+template <typename Scalar>
+struct scalar_gamma_sample_der_alpha_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_gamma_sample_der_alpha_op)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator()(const Scalar& alpha, const Scalar& sample) const {
+ using numext::gamma_sample_der_alpha;
+ return gamma_sample_der_alpha(alpha, sample);
+ }
+ template <typename Packet>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& alpha, const Packet& sample) const {
+ return internal::pgamma_sample_der_alpha(alpha, sample);
+ }
+};
+template <typename Scalar>
+struct functor_traits<scalar_gamma_sample_der_alpha_op<Scalar> > {
+ enum {
+ // 2x the cost of igamma, minus the lgamma cost (the lgamma cancels out)
+ Cost = 30 * NumTraits<Scalar>::MulCost + 15 * NumTraits<Scalar>::AddCost,
+ PacketAccess = packet_traits<Scalar>::HasGammaSampleDerAlpha
+ };
+};
/** \internal
* \brief Template functor to compute the complementary incomplete gamma function igammac(a, x)
@@ -101,11 +155,11 @@ struct functor_traits<scalar_betainc_op<Scalar> > {
*/
template<typename Scalar> struct scalar_lgamma_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_lgamma_op)
- EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const {
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const {
using numext::lgamma; return lgamma(a);
}
typedef typename packet_traits<Scalar>::type Packet;
- EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::plgamma(a); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& a) const { return internal::plgamma(a); }
};
template<typename Scalar>
struct functor_traits<scalar_lgamma_op<Scalar> >
@@ -123,11 +177,11 @@ struct functor_traits<scalar_lgamma_op<Scalar> >
*/
template<typename Scalar> struct scalar_digamma_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_digamma_op)
- EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const {
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const {
using numext::digamma; return digamma(a);
}
typedef typename packet_traits<Scalar>::type Packet;
- EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::pdigamma(a); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& a) const { return internal::pdigamma(a); }
};
template<typename Scalar>
struct functor_traits<scalar_digamma_op<Scalar> >
@@ -145,11 +199,11 @@ struct functor_traits<scalar_digamma_op<Scalar> >
*/
template<typename Scalar> struct scalar_zeta_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_zeta_op)
- EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& x, const Scalar& q) const {
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& x, const Scalar& q) const {
using numext::zeta; return zeta(x, q);
}
typedef typename packet_traits<Scalar>::type Packet;
- EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& x, const Packet& q) const { return internal::pzeta(x, q); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& x, const Packet& q) const { return internal::pzeta(x, q); }
};
template<typename Scalar>
struct functor_traits<scalar_zeta_op<Scalar> >
@@ -167,11 +221,11 @@ struct functor_traits<scalar_zeta_op<Scalar> >
*/
template<typename Scalar> struct scalar_polygamma_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_polygamma_op)
- EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& n, const Scalar& x) const {
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& n, const Scalar& x) const {
using numext::polygamma; return polygamma(n, x);
}
typedef typename packet_traits<Scalar>::type Packet;
- EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& n, const Packet& x) const { return internal::ppolygamma(n, x); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& n, const Packet& x) const { return internal::ppolygamma(n, x); }
};
template<typename Scalar>
struct functor_traits<scalar_polygamma_op<Scalar> >
@@ -190,11 +244,11 @@ struct functor_traits<scalar_polygamma_op<Scalar> >
*/
template<typename Scalar> struct scalar_erf_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_erf_op)
- EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const {
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const {
using numext::erf; return erf(a);
}
typedef typename packet_traits<Scalar>::type Packet;
- EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::perf(a); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& a) const { return internal::perf(a); }
};
template<typename Scalar>
struct functor_traits<scalar_erf_op<Scalar> >
@@ -213,11 +267,11 @@ struct functor_traits<scalar_erf_op<Scalar> >
*/
template<typename Scalar> struct scalar_erfc_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_erfc_op)
- EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const {
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const {
using numext::erfc; return erfc(a);
}
typedef typename packet_traits<Scalar>::type Packet;
- EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::perfc(a); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& a) const { return internal::perfc(a); }
};
template<typename Scalar>
struct functor_traits<scalar_erfc_op<Scalar> >
@@ -229,6 +283,60 @@ struct functor_traits<scalar_erfc_op<Scalar> >
};
};
+/** \internal
+ * \brief Template functor to compute the exponentially scaled modified Bessel
+ * function of order zero
+ * \sa class CwiseUnaryOp, Cwise::i0e()
+ */
+template <typename Scalar>
+struct scalar_i0e_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_i0e_op)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator()(const Scalar& x) const {
+ using numext::i0e;
+ return i0e(x);
+ }
+ typedef typename packet_traits<Scalar>::type Packet;
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& x) const {
+ return internal::pi0e(x);
+ }
+};
+template <typename Scalar>
+struct functor_traits<scalar_i0e_op<Scalar> > {
+ enum {
+ // On average, a Chebyshev polynomial of order N=20 is computed.
+ // The cost is N multiplications and 2N additions.
+ Cost = 20 * NumTraits<Scalar>::MulCost + 40 * NumTraits<Scalar>::AddCost,
+ PacketAccess = packet_traits<Scalar>::HasI0e
+ };
+};
+
+/** \internal
+ * \brief Template functor to compute the exponentially scaled modified Bessel
+ * function of order zero
+ * \sa class CwiseUnaryOp, Cwise::i1e()
+ */
+template <typename Scalar>
+struct scalar_i1e_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_i1e_op)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator()(const Scalar& x) const {
+ using numext::i1e;
+ return i1e(x);
+ }
+ typedef typename packet_traits<Scalar>::type Packet;
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& x) const {
+ return internal::pi1e(x);
+ }
+};
+template <typename Scalar>
+struct functor_traits<scalar_i1e_op<Scalar> > {
+ enum {
+ // On average, a Chebyshev polynomial of order N=20 is computed.
+ // The cost is N multiplications and 2N additions.
+ Cost = 20 * NumTraits<Scalar>::MulCost + 40 * NumTraits<Scalar>::AddCost,
+ PacketAccess = packet_traits<Scalar>::HasI1e
+ };
+};
+
} // end namespace internal
} // end namespace Eigen
diff --git a/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsHalf.h b/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsHalf.h
index 553bcda6a..fbdfd299e 100644
--- a/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsHalf.h
+++ b/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsHalf.h
@@ -33,12 +33,28 @@ template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half erfc(const Eigen::h
template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half igamma(const Eigen::half& a, const Eigen::half& x) {
return Eigen::half(Eigen::numext::igamma(static_cast<float>(a), static_cast<float>(x)));
}
+template <>
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half igamma_der_a(const Eigen::half& a, const Eigen::half& x) {
+ return Eigen::half(Eigen::numext::igamma_der_a(static_cast<float>(a), static_cast<float>(x)));
+}
+template <>
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half gamma_sample_der_alpha(const Eigen::half& alpha, const Eigen::half& sample) {
+ return Eigen::half(Eigen::numext::gamma_sample_der_alpha(static_cast<float>(alpha), static_cast<float>(sample)));
+}
template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half igammac(const Eigen::half& a, const Eigen::half& x) {
return Eigen::half(Eigen::numext::igammac(static_cast<float>(a), static_cast<float>(x)));
}
template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half betainc(const Eigen::half& a, const Eigen::half& b, const Eigen::half& x) {
return Eigen::half(Eigen::numext::betainc(static_cast<float>(a), static_cast<float>(b), static_cast<float>(x)));
}
+template <>
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half i0e(const Eigen::half& x) {
+ return Eigen::half(Eigen::numext::i0e(static_cast<float>(x)));
+}
+template <>
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half i1e(const Eigen::half& x) {
+ return Eigen::half(Eigen::numext::i1e(static_cast<float>(x)));
+}
#endif
} // end namespace numext
diff --git a/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsImpl.h b/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsImpl.h
index 369ad97b4..5784cbc86 100644
--- a/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsImpl.h
+++ b/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsImpl.h
@@ -95,6 +95,75 @@ struct polevl<Scalar, 0> {
}
};
+/* chbevl (modified for Eigen)
+ *
+ * Evaluate Chebyshev series
+ *
+ *
+ *
+ * SYNOPSIS:
+ *
+ * int N;
+ * Scalar x, y, coef[N], chebevl();
+ *
+ * y = chbevl( x, coef, N );
+ *
+ *
+ *
+ * DESCRIPTION:
+ *
+ * Evaluates the series
+ *
+ * N-1
+ * - '
+ * y = > coef[i] T (x/2)
+ * - i
+ * i=0
+ *
+ * of Chebyshev polynomials Ti at argument x/2.
+ *
+ * Coefficients are stored in reverse order, i.e. the zero
+ * order term is last in the array. Note N is the number of
+ * coefficients, not the order.
+ *
+ * If coefficients are for the interval a to b, x must
+ * have been transformed to x -> 2(2x - b - a)/(b-a) before
+ * entering the routine. This maps x from (a, b) to (-1, 1),
+ * over which the Chebyshev polynomials are defined.
+ *
+ * If the coefficients are for the inverted interval, in
+ * which (a, b) is mapped to (1/b, 1/a), the transformation
+ * required is x -> 2(2ab/x - b - a)/(b-a). If b is infinity,
+ * this becomes x -> 4a/x - 1.
+ *
+ *
+ *
+ * SPEED:
+ *
+ * Taking advantage of the recurrence properties of the
+ * Chebyshev polynomials, the routine requires one more
+ * addition per loop than evaluating a nested polynomial of
+ * the same degree.
+ *
+ */
+template <typename Scalar, int N>
+struct chebevl {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE Scalar run(Scalar x, const Scalar coef[]) {
+ Scalar b0 = coef[0];
+ Scalar b1 = 0;
+ Scalar b2;
+
+ for (int i = 1; i < N; i++) {
+ b2 = b1;
+ b1 = b0;
+ b0 = x * b1 - b2 + coef[i];
+ }
+
+ return Scalar(0.5) * (b0 - b2);
+ }
+};
+
} // end namespace cephes
/****************************************************************************
@@ -121,9 +190,11 @@ template <>
struct lgamma_impl<float> {
EIGEN_DEVICE_FUNC
static EIGEN_STRONG_INLINE float run(float x) {
-#if !defined(__CUDA_ARCH__) && (defined(_BSD_SOURCE) || defined(_SVID_SOURCE)) && !defined(__APPLE__)
+#if !defined(EIGEN_GPU_COMPILE_PHASE) && (defined(_BSD_SOURCE) || defined(_SVID_SOURCE)) && !defined(__APPLE__)
int dummy;
return ::lgammaf_r(x, &dummy);
+#elif defined(EIGEN_USE_SYCL) && defined(__SYCL_DEVICE_ONLY__)
+ return cl::sycl::lgamma(x);
#else
return ::lgammaf(x);
#endif
@@ -134,9 +205,11 @@ template <>
struct lgamma_impl<double> {
EIGEN_DEVICE_FUNC
static EIGEN_STRONG_INLINE double run(double x) {
-#if !defined(__CUDA_ARCH__) && (defined(_BSD_SOURCE) || defined(_SVID_SOURCE)) && !defined(__APPLE__)
+#if !defined(EIGEN_GPU_COMPILE_PHASE) && (defined(_BSD_SOURCE) || defined(_SVID_SOURCE)) && !defined(__APPLE__)
int dummy;
return ::lgamma_r(x, &dummy);
+#elif defined(EIGEN_USE_SYCL) && defined(__SYCL_DEVICE_ONLY__)
+ return cl::sycl::lgamma(x);
#else
return ::lgamma(x);
#endif
@@ -354,13 +427,25 @@ struct erf_retval {
template <>
struct erf_impl<float> {
EIGEN_DEVICE_FUNC
- static EIGEN_STRONG_INLINE float run(float x) { return ::erff(x); }
+ static EIGEN_STRONG_INLINE float run(float x) {
+#if defined(EIGEN_USE_SYCL) && defined(__SYCL_DEVICE_ONLY__)
+ return cl::sycl::erf(x);
+#else
+ return ::erff(x);
+#endif
+ }
};
template <>
struct erf_impl<double> {
EIGEN_DEVICE_FUNC
- static EIGEN_STRONG_INLINE double run(double x) { return ::erf(x); }
+ static EIGEN_STRONG_INLINE double run(double x) {
+#if defined(EIGEN_USE_SYCL) && defined(__SYCL_DEVICE_ONLY__)
+ return cl::sycl::erf(x);
+#else
+ return ::erf(x);
+#endif
+ }
};
#endif // EIGEN_HAS_C99_MATH
@@ -387,13 +472,25 @@ struct erfc_retval {
template <>
struct erfc_impl<float> {
EIGEN_DEVICE_FUNC
- static EIGEN_STRONG_INLINE float run(const float x) { return ::erfcf(x); }
+ static EIGEN_STRONG_INLINE float run(const float x) {
+#if defined(EIGEN_USE_SYCL) && defined(__SYCL_DEVICE_ONLY__)
+ return cl::sycl::erfc(x);
+#else
+ return ::erfcf(x);
+#endif
+ }
};
template <>
struct erfc_impl<double> {
EIGEN_DEVICE_FUNC
- static EIGEN_STRONG_INLINE double run(const double x) { return ::erfc(x); }
+ static EIGEN_STRONG_INLINE double run(const double x) {
+#if defined(EIGEN_USE_SYCL) && defined(__SYCL_DEVICE_ONLY__)
+ return cl::sycl::erfc(x);
+#else
+ return ::erfc(x);
+#endif
+ }
};
#endif // EIGEN_HAS_C99_MATH
@@ -452,6 +549,199 @@ struct cephes_helper<double> {
}
};
+enum IgammaComputationMode { VALUE, DERIVATIVE, SAMPLE_DERIVATIVE };
+
+template <typename Scalar, IgammaComputationMode mode>
+EIGEN_DEVICE_FUNC
+int igamma_num_iterations() {
+ /* Returns the maximum number of internal iterations for igamma computation.
+ */
+ if (mode == VALUE) {
+ return 2000;
+ }
+
+ if (internal::is_same<Scalar, float>::value) {
+ return 200;
+ } else if (internal::is_same<Scalar, double>::value) {
+ return 500;
+ } else {
+ return 2000;
+ }
+}
+
+template <typename Scalar, IgammaComputationMode mode>
+struct igammac_cf_impl {
+ /* Computes igamc(a, x) or derivative (depending on the mode)
+ * using the continued fraction expansion of the complementary
+ * incomplete Gamma function.
+ *
+ * Preconditions:
+ * a > 0
+ * x >= 1
+ * x >= a
+ */
+ EIGEN_DEVICE_FUNC
+ static Scalar run(Scalar a, Scalar x) {
+ const Scalar zero = 0;
+ const Scalar one = 1;
+ const Scalar two = 2;
+ const Scalar machep = cephes_helper<Scalar>::machep();
+ const Scalar big = cephes_helper<Scalar>::big();
+ const Scalar biginv = cephes_helper<Scalar>::biginv();
+
+ if ((numext::isinf)(x)) {
+ return zero;
+ }
+
+ // continued fraction
+ Scalar y = one - a;
+ Scalar z = x + y + one;
+ Scalar c = zero;
+ Scalar pkm2 = one;
+ Scalar qkm2 = x;
+ Scalar pkm1 = x + one;
+ Scalar qkm1 = z * x;
+ Scalar ans = pkm1 / qkm1;
+
+ Scalar dpkm2_da = zero;
+ Scalar dqkm2_da = zero;
+ Scalar dpkm1_da = zero;
+ Scalar dqkm1_da = -x;
+ Scalar dans_da = (dpkm1_da - ans * dqkm1_da) / qkm1;
+
+ for (int i = 0; i < igamma_num_iterations<Scalar, mode>(); i++) {
+ c += one;
+ y += one;
+ z += two;
+
+ Scalar yc = y * c;
+ Scalar pk = pkm1 * z - pkm2 * yc;
+ Scalar qk = qkm1 * z - qkm2 * yc;
+
+ Scalar dpk_da = dpkm1_da * z - pkm1 - dpkm2_da * yc + pkm2 * c;
+ Scalar dqk_da = dqkm1_da * z - qkm1 - dqkm2_da * yc + qkm2 * c;
+
+ if (qk != zero) {
+ Scalar ans_prev = ans;
+ ans = pk / qk;
+
+ Scalar dans_da_prev = dans_da;
+ dans_da = (dpk_da - ans * dqk_da) / qk;
+
+ if (mode == VALUE) {
+ if (numext::abs(ans_prev - ans) <= machep * numext::abs(ans)) {
+ break;
+ }
+ } else {
+ if (numext::abs(dans_da - dans_da_prev) <= machep) {
+ break;
+ }
+ }
+ }
+
+ pkm2 = pkm1;
+ pkm1 = pk;
+ qkm2 = qkm1;
+ qkm1 = qk;
+
+ dpkm2_da = dpkm1_da;
+ dpkm1_da = dpk_da;
+ dqkm2_da = dqkm1_da;
+ dqkm1_da = dqk_da;
+
+ if (numext::abs(pk) > big) {
+ pkm2 *= biginv;
+ pkm1 *= biginv;
+ qkm2 *= biginv;
+ qkm1 *= biginv;
+
+ dpkm2_da *= biginv;
+ dpkm1_da *= biginv;
+ dqkm2_da *= biginv;
+ dqkm1_da *= biginv;
+ }
+ }
+
+ /* Compute x**a * exp(-x) / gamma(a) */
+ Scalar logax = a * numext::log(x) - x - lgamma_impl<Scalar>::run(a);
+ Scalar dlogax_da = numext::log(x) - digamma_impl<Scalar>::run(a);
+ Scalar ax = numext::exp(logax);
+ Scalar dax_da = ax * dlogax_da;
+
+ switch (mode) {
+ case VALUE:
+ return ans * ax;
+ case DERIVATIVE:
+ return ans * dax_da + dans_da * ax;
+ case SAMPLE_DERIVATIVE:
+ default: // this is needed to suppress clang warning
+ return -(dans_da + ans * dlogax_da) * x;
+ }
+ }
+};
+
+template <typename Scalar, IgammaComputationMode mode>
+struct igamma_series_impl {
+ /* Computes igam(a, x) or its derivative (depending on the mode)
+ * using the series expansion of the incomplete Gamma function.
+ *
+ * Preconditions:
+ * x > 0
+ * a > 0
+ * !(x > 1 && x > a)
+ */
+ EIGEN_DEVICE_FUNC
+ static Scalar run(Scalar a, Scalar x) {
+ const Scalar zero = 0;
+ const Scalar one = 1;
+ const Scalar machep = cephes_helper<Scalar>::machep();
+
+ /* power series */
+ Scalar r = a;
+ Scalar c = one;
+ Scalar ans = one;
+
+ Scalar dc_da = zero;
+ Scalar dans_da = zero;
+
+ for (int i = 0; i < igamma_num_iterations<Scalar, mode>(); i++) {
+ r += one;
+ Scalar term = x / r;
+ Scalar dterm_da = -x / (r * r);
+ dc_da = term * dc_da + dterm_da * c;
+ dans_da += dc_da;
+ c *= term;
+ ans += c;
+
+ if (mode == VALUE) {
+ if (c <= machep * ans) {
+ break;
+ }
+ } else {
+ if (numext::abs(dc_da) <= machep * numext::abs(dans_da)) {
+ break;
+ }
+ }
+ }
+
+ /* Compute x**a * exp(-x) / gamma(a + 1) */
+ Scalar logax = a * numext::log(x) - x - lgamma_impl<Scalar>::run(a + one);
+ Scalar dlogax_da = numext::log(x) - digamma_impl<Scalar>::run(a + one);
+ Scalar ax = numext::exp(logax);
+ Scalar dax_da = ax * dlogax_da;
+
+ switch (mode) {
+ case VALUE:
+ return ans * ax;
+ case DERIVATIVE:
+ return ans * dax_da + dans_da * ax;
+ case SAMPLE_DERIVATIVE:
+ default: // this is needed to suppress clang warning
+ return -(dans_da + ans * dlogax_da) * x / a;
+ }
+ }
+};
+
#if !EIGEN_HAS_C99_MATH
template <typename Scalar>
@@ -466,8 +756,6 @@ struct igammac_impl {
#else
-template <typename Scalar> struct igamma_impl; // predeclare igamma_impl
-
template <typename Scalar>
struct igammac_impl {
EIGEN_DEVICE_FUNC
@@ -535,93 +823,15 @@ struct igammac_impl {
return nan;
}
- if ((x < one) || (x < a)) {
- /* The checks above ensure that we meet the preconditions for
- * igamma_impl::Impl(), so call it, rather than igamma_impl::Run().
- * Calling Run() would also work, but in that case the compiler may not be
- * able to prove that igammac_impl::Run and igamma_impl::Run are not
- * mutually recursive. This leads to worse code, particularly on
- * platforms like nvptx, where recursion is allowed only begrudgingly.
- */
- return (one - igamma_impl<Scalar>::Impl(a, x));
- }
-
- return Impl(a, x);
- }
-
- private:
- /* igamma_impl calls igammac_impl::Impl. */
- friend struct igamma_impl<Scalar>;
-
- /* Actually computes igamc(a, x).
- *
- * Preconditions:
- * a > 0
- * x >= 1
- * x >= a
- */
- EIGEN_DEVICE_FUNC static Scalar Impl(Scalar a, Scalar x) {
- const Scalar zero = 0;
- const Scalar one = 1;
- const Scalar two = 2;
- const Scalar machep = cephes_helper<Scalar>::machep();
- const Scalar maxlog = numext::log(NumTraits<Scalar>::highest());
- const Scalar big = cephes_helper<Scalar>::big();
- const Scalar biginv = cephes_helper<Scalar>::biginv();
- const Scalar inf = NumTraits<Scalar>::infinity();
-
- Scalar ans, ax, c, yc, r, t, y, z;
- Scalar pk, pkm1, pkm2, qk, qkm1, qkm2;
-
- if (x == inf) return zero; // std::isinf crashes on CUDA
-
- /* Compute x**a * exp(-x) / gamma(a) */
- ax = a * numext::log(x) - x - lgamma_impl<Scalar>::run(a);
- if (ax < -maxlog) { // underflow
- return zero;
+ if ((numext::isnan)(a) || (numext::isnan)(x)) { // propagate nans
+ return nan;
}
- ax = numext::exp(ax);
- // continued fraction
- y = one - a;
- z = x + y + one;
- c = zero;
- pkm2 = one;
- qkm2 = x;
- pkm1 = x + one;
- qkm1 = z * x;
- ans = pkm1 / qkm1;
-
- while (true) {
- c += one;
- y += one;
- z += two;
- yc = y * c;
- pk = pkm1 * z - pkm2 * yc;
- qk = qkm1 * z - qkm2 * yc;
- if (qk != zero) {
- r = pk / qk;
- t = numext::abs((ans - r) / r);
- ans = r;
- } else {
- t = one;
- }
- pkm2 = pkm1;
- pkm1 = pk;
- qkm2 = qkm1;
- qkm1 = qk;
- if (numext::abs(pk) > big) {
- pkm2 *= biginv;
- pkm1 *= biginv;
- qkm2 *= biginv;
- qkm1 *= biginv;
- }
- if (t <= machep) {
- break;
- }
+ if ((x < one) || (x < a)) {
+ return (one - igamma_series_impl<Scalar, VALUE>::run(a, x));
}
- return (ans * ax);
+ return igammac_cf_impl<Scalar, VALUE>::run(a, x);
}
};
@@ -631,15 +841,10 @@ struct igammac_impl {
* Implementation of igamma (incomplete gamma integral), based on Cephes but requires C++11/C99 *
************************************************************************************************/
-template <typename Scalar>
-struct igamma_retval {
- typedef Scalar type;
-};
-
#if !EIGEN_HAS_C99_MATH
-template <typename Scalar>
-struct igamma_impl {
+template <typename Scalar, IgammaComputationMode mode>
+struct igamma_generic_impl {
EIGEN_DEVICE_FUNC
static EIGEN_STRONG_INLINE Scalar run(Scalar a, Scalar x) {
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, Scalar>::value == false),
@@ -650,69 +855,17 @@ struct igamma_impl {
#else
-template <typename Scalar>
-struct igamma_impl {
+template <typename Scalar, IgammaComputationMode mode>
+struct igamma_generic_impl {
EIGEN_DEVICE_FUNC
static Scalar run(Scalar a, Scalar x) {
- /* igam()
- * Incomplete gamma integral
- *
- *
- *
- * SYNOPSIS:
- *
- * double a, x, y, igam();
- *
- * y = igam( a, x );
- *
- * DESCRIPTION:
- *
- * The function is defined by
- *
- * x
- * -
- * 1 | | -t a-1
- * igam(a,x) = ----- | e t dt.
- * - | |
- * | (a) -
- * 0
- *
- *
- * In this implementation both arguments must be positive.
- * The integral is evaluated by either a power series or
- * continued fraction expansion, depending on the relative
- * values of a and x.
- *
- * ACCURACY (double):
- *
- * Relative error:
- * arithmetic domain # trials peak rms
- * IEEE 0,30 200000 3.6e-14 2.9e-15
- * IEEE 0,100 300000 9.9e-14 1.5e-14
- *
- *
- * ACCURACY (float):
- *
- * Relative error:
- * arithmetic domain # trials peak rms
- * IEEE 0,30 20000 7.8e-6 5.9e-7
- *
- */
- /*
- Cephes Math Library Release 2.2: June, 1992
- Copyright 1985, 1987, 1992 by Stephen L. Moshier
- Direct inquiries to 30 Frost Street, Cambridge, MA 02140
- */
-
-
- /* left tail of incomplete gamma function:
- *
- * inf. k
- * a -x - x
- * x e > ----------
- * - -
- * k=0 | (a+k+1)
+ /* Depending on the mode, returns
+ * - VALUE: incomplete Gamma function igamma(a, x)
+ * - DERIVATIVE: derivative of incomplete Gamma function d/da igamma(a, x)
+ * - SAMPLE_DERIVATIVE: implicit derivative of a Gamma random variable
+ * x ~ Gamma(x | a, 1), dx/da = -1 / Gamma(x | a, 1) * d igamma(a, x) / dx
*
+ * Derivatives are implemented by forward-mode differentiation.
*/
const Scalar zero = 0;
const Scalar one = 1;
@@ -724,67 +877,167 @@ struct igamma_impl {
return nan;
}
+ if ((numext::isnan)(a) || (numext::isnan)(x)) { // propagate nans
+ return nan;
+ }
+
if ((x > one) && (x > a)) {
- /* The checks above ensure that we meet the preconditions for
- * igammac_impl::Impl(), so call it, rather than igammac_impl::Run().
- * Calling Run() would also work, but in that case the compiler may not be
- * able to prove that igammac_impl::Run and igamma_impl::Run are not
- * mutually recursive. This leads to worse code, particularly on
- * platforms like nvptx, where recursion is allowed only begrudgingly.
- */
- return (one - igammac_impl<Scalar>::Impl(a, x));
+ Scalar ret = igammac_cf_impl<Scalar, mode>::run(a, x);
+ if (mode == VALUE) {
+ return one - ret;
+ } else {
+ return -ret;
+ }
}
- return Impl(a, x);
+ return igamma_series_impl<Scalar, mode>::run(a, x);
}
+};
- private:
- /* igammac_impl calls igamma_impl::Impl. */
- friend struct igammac_impl<Scalar>;
+#endif // EIGEN_HAS_C99_MATH
+
+template <typename Scalar>
+struct igamma_retval {
+ typedef Scalar type;
+};
- /* Actually computes igam(a, x).
+template <typename Scalar>
+struct igamma_impl : igamma_generic_impl<Scalar, VALUE> {
+ /* igam()
+ * Incomplete gamma integral.
+ *
+ * The CDF of Gamma(a, 1) random variable at the point x.
+ *
+ * Accuracy estimation. For each a in [10^-2, 10^-1...10^3] we sample
+ * 50 Gamma random variables x ~ Gamma(x | a, 1), a total of 300 points.
+ * The ground truth is computed by mpmath. Mean absolute error:
+ * float: 1.26713e-05
+ * double: 2.33606e-12
+ *
+ * Cephes documentation below.
+ *
+ * SYNOPSIS:
+ *
+ * double a, x, y, igam();
+ *
+ * y = igam( a, x );
+ *
+ * DESCRIPTION:
+ *
+ * The function is defined by
+ *
+ * x
+ * -
+ * 1 | | -t a-1
+ * igam(a,x) = ----- | e t dt.
+ * - | |
+ * | (a) -
+ * 0
+ *
+ *
+ * In this implementation both arguments must be positive.
+ * The integral is evaluated by either a power series or
+ * continued fraction expansion, depending on the relative
+ * values of a and x.
+ *
+ * ACCURACY (double):
+ *
+ * Relative error:
+ * arithmetic domain # trials peak rms
+ * IEEE 0,30 200000 3.6e-14 2.9e-15
+ * IEEE 0,100 300000 9.9e-14 1.5e-14
+ *
+ *
+ * ACCURACY (float):
+ *
+ * Relative error:
+ * arithmetic domain # trials peak rms
+ * IEEE 0,30 20000 7.8e-6 5.9e-7
*
- * Preconditions:
- * x > 0
- * a > 0
- * !(x > 1 && x > a)
*/
- EIGEN_DEVICE_FUNC static Scalar Impl(Scalar a, Scalar x) {
- const Scalar zero = 0;
- const Scalar one = 1;
- const Scalar machep = cephes_helper<Scalar>::machep();
- const Scalar maxlog = numext::log(NumTraits<Scalar>::highest());
+ /*
+ Cephes Math Library Release 2.2: June, 1992
+ Copyright 1985, 1987, 1992 by Stephen L. Moshier
+ Direct inquiries to 30 Frost Street, Cambridge, MA 02140
+ */
- Scalar ans, ax, c, r;
+ /* left tail of incomplete gamma function:
+ *
+ * inf. k
+ * a -x - x
+ * x e > ----------
+ * - -
+ * k=0 | (a+k+1)
+ *
+ */
+};
- /* Compute x**a * exp(-x) / gamma(a) */
- ax = a * numext::log(x) - x - lgamma_impl<Scalar>::run(a);
- if (ax < -maxlog) {
- // underflow
- return zero;
- }
- ax = numext::exp(ax);
+template <typename Scalar>
+struct igamma_der_a_retval : igamma_retval<Scalar> {};
- /* power series */
- r = a;
- c = one;
- ans = one;
+template <typename Scalar>
+struct igamma_der_a_impl : igamma_generic_impl<Scalar, DERIVATIVE> {
+ /* Derivative of the incomplete Gamma function with respect to a.
+ *
+ * Computes d/da igamma(a, x) by forward differentiation of the igamma code.
+ *
+ * Accuracy estimation. For each a in [10^-2, 10^-1...10^3] we sample
+ * 50 Gamma random variables x ~ Gamma(x | a, 1), a total of 300 points.
+ * The ground truth is computed by mpmath. Mean absolute error:
+ * float: 6.17992e-07
+ * double: 4.60453e-12
+ *
+ * Reference:
+ * R. Moore. "Algorithm AS 187: Derivatives of the incomplete gamma
+ * integral". Journal of the Royal Statistical Society. 1982
+ */
+};
- while (true) {
- r += one;
- c *= x/r;
- ans += c;
- if (c/ans <= machep) {
- break;
- }
- }
+template <typename Scalar>
+struct gamma_sample_der_alpha_retval : igamma_retval<Scalar> {};
- return (ans * ax / a);
- }
+template <typename Scalar>
+struct gamma_sample_der_alpha_impl
+ : igamma_generic_impl<Scalar, SAMPLE_DERIVATIVE> {
+ /* Derivative of a Gamma random variable sample with respect to alpha.
+ *
+ * Consider a sample of a Gamma random variable with the concentration
+ * parameter alpha: sample ~ Gamma(alpha, 1). The reparameterization
+ * derivative that we want to compute is dsample / dalpha =
+ * d igammainv(alpha, u) / dalpha, where u = igamma(alpha, sample).
+ * However, this formula is numerically unstable and expensive, so instead
+ * we use implicit differentiation:
+ *
+ * igamma(alpha, sample) = u, where u ~ Uniform(0, 1).
+ * Apply d / dalpha to both sides:
+ * d igamma(alpha, sample) / dalpha
+ * + d igamma(alpha, sample) / dsample * dsample/dalpha = 0
+ * d igamma(alpha, sample) / dalpha
+ * + Gamma(sample | alpha, 1) dsample / dalpha = 0
+ * dsample/dalpha = - (d igamma(alpha, sample) / dalpha)
+ * / Gamma(sample | alpha, 1)
+ *
+ * Here Gamma(sample | alpha, 1) is the PDF of the Gamma distribution
+ * (note that the derivative of the CDF w.r.t. sample is the PDF).
+ * See the reference below for more details.
+ *
+ * The derivative of igamma(alpha, sample) is computed by forward
+ * differentiation of the igamma code. Division by the Gamma PDF is performed
+ * in the same code, increasing the accuracy and speed due to cancellation
+ * of some terms.
+ *
+ * Accuracy estimation. For each alpha in [10^-2, 10^-1...10^3] we sample
+ * 50 Gamma random variables sample ~ Gamma(sample | alpha, 1), a total of 300
+ * points. The ground truth is computed by mpmath. Mean absolute error:
+ * float: 2.1686e-06
+ * double: 1.4774e-12
+ *
+ * Reference:
+ * M. Figurnov, S. Mohamed, A. Mnih "Implicit Reparameterization Gradients".
+ * 2018
+ */
};
-#endif // EIGEN_HAS_C99_MATH
-
/*****************************************************************************
* Implementation of Riemann zeta function of two arguments, based on Cephes *
*****************************************************************************/
@@ -1499,6 +1752,334 @@ struct betainc_impl<double> {
#endif // EIGEN_HAS_C99_MATH
+/****************************************************************************
+ * Implementation of Bessel function, based on Cephes *
+ ****************************************************************************/
+
+template <typename Scalar>
+struct i0e_retval {
+ typedef Scalar type;
+};
+
+template <typename Scalar>
+struct i0e_impl {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE Scalar run(const Scalar) {
+ EIGEN_STATIC_ASSERT((internal::is_same<Scalar, Scalar>::value == false),
+ THIS_TYPE_IS_NOT_SUPPORTED);
+ return Scalar(0);
+ }
+};
+
+template <>
+struct i0e_impl<float> {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE float run(float x) {
+ /* i0ef.c
+ *
+ * Modified Bessel function of order zero,
+ * exponentially scaled
+ *
+ *
+ *
+ * SYNOPSIS:
+ *
+ * float x, y, i0ef();
+ *
+ * y = i0ef( x );
+ *
+ *
+ *
+ * DESCRIPTION:
+ *
+ * Returns exponentially scaled modified Bessel function
+ * of order zero of the argument.
+ *
+ * The function is defined as i0e(x) = exp(-|x|) j0( ix ).
+ *
+ *
+ *
+ * ACCURACY:
+ *
+ * Relative error:
+ * arithmetic domain # trials peak rms
+ * IEEE 0,30 100000 3.7e-7 7.0e-8
+ * See i0f().
+ *
+ */
+ const float A[] = {-1.30002500998624804212E-8f, 6.04699502254191894932E-8f,
+ -2.67079385394061173391E-7f, 1.11738753912010371815E-6f,
+ -4.41673835845875056359E-6f, 1.64484480707288970893E-5f,
+ -5.75419501008210370398E-5f, 1.88502885095841655729E-4f,
+ -5.76375574538582365885E-4f, 1.63947561694133579842E-3f,
+ -4.32430999505057594430E-3f, 1.05464603945949983183E-2f,
+ -2.37374148058994688156E-2f, 4.93052842396707084878E-2f,
+ -9.49010970480476444210E-2f, 1.71620901522208775349E-1f,
+ -3.04682672343198398683E-1f, 6.76795274409476084995E-1f};
+
+ const float B[] = {3.39623202570838634515E-9f, 2.26666899049817806459E-8f,
+ 2.04891858946906374183E-7f, 2.89137052083475648297E-6f,
+ 6.88975834691682398426E-5f, 3.36911647825569408990E-3f,
+ 8.04490411014108831608E-1f};
+ if (x < 0.0f) {
+ x = -x;
+ }
+
+ if (x <= 8.0f) {
+ float y = 0.5f * x - 2.0f;
+ return cephes::chebevl<float, 18>::run(y, A);
+ }
+
+ return cephes::chebevl<float, 7>::run(32.0f / x - 2.0f, B) / numext::sqrt(x);
+ }
+};
+
+template <>
+struct i0e_impl<double> {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE double run(double x) {
+ /* i0e.c
+ *
+ * Modified Bessel function of order zero,
+ * exponentially scaled
+ *
+ *
+ *
+ * SYNOPSIS:
+ *
+ * double x, y, i0e();
+ *
+ * y = i0e( x );
+ *
+ *
+ *
+ * DESCRIPTION:
+ *
+ * Returns exponentially scaled modified Bessel function
+ * of order zero of the argument.
+ *
+ * The function is defined as i0e(x) = exp(-|x|) j0( ix ).
+ *
+ *
+ *
+ * ACCURACY:
+ *
+ * Relative error:
+ * arithmetic domain # trials peak rms
+ * IEEE 0,30 30000 5.4e-16 1.2e-16
+ * See i0().
+ *
+ */
+ const double A[] = {-4.41534164647933937950E-18, 3.33079451882223809783E-17,
+ -2.43127984654795469359E-16, 1.71539128555513303061E-15,
+ -1.16853328779934516808E-14, 7.67618549860493561688E-14,
+ -4.85644678311192946090E-13, 2.95505266312963983461E-12,
+ -1.72682629144155570723E-11, 9.67580903537323691224E-11,
+ -5.18979560163526290666E-10, 2.65982372468238665035E-9,
+ -1.30002500998624804212E-8, 6.04699502254191894932E-8,
+ -2.67079385394061173391E-7, 1.11738753912010371815E-6,
+ -4.41673835845875056359E-6, 1.64484480707288970893E-5,
+ -5.75419501008210370398E-5, 1.88502885095841655729E-4,
+ -5.76375574538582365885E-4, 1.63947561694133579842E-3,
+ -4.32430999505057594430E-3, 1.05464603945949983183E-2,
+ -2.37374148058994688156E-2, 4.93052842396707084878E-2,
+ -9.49010970480476444210E-2, 1.71620901522208775349E-1,
+ -3.04682672343198398683E-1, 6.76795274409476084995E-1};
+ const double B[] = {
+ -7.23318048787475395456E-18, -4.83050448594418207126E-18,
+ 4.46562142029675999901E-17, 3.46122286769746109310E-17,
+ -2.82762398051658348494E-16, -3.42548561967721913462E-16,
+ 1.77256013305652638360E-15, 3.81168066935262242075E-15,
+ -9.55484669882830764870E-15, -4.15056934728722208663E-14,
+ 1.54008621752140982691E-14, 3.85277838274214270114E-13,
+ 7.18012445138366623367E-13, -1.79417853150680611778E-12,
+ -1.32158118404477131188E-11, -3.14991652796324136454E-11,
+ 1.18891471078464383424E-11, 4.94060238822496958910E-10,
+ 3.39623202570838634515E-9, 2.26666899049817806459E-8,
+ 2.04891858946906374183E-7, 2.89137052083475648297E-6,
+ 6.88975834691682398426E-5, 3.36911647825569408990E-3,
+ 8.04490411014108831608E-1};
+
+ if (x < 0.0) {
+ x = -x;
+ }
+
+ if (x <= 8.0) {
+ double y = (x / 2.0) - 2.0;
+ return cephes::chebevl<double, 30>::run(y, A);
+ }
+
+ return cephes::chebevl<double, 25>::run(32.0 / x - 2.0, B) /
+ numext::sqrt(x);
+ }
+};
+
+template <typename Scalar>
+struct i1e_retval {
+ typedef Scalar type;
+};
+
+template <typename Scalar>
+struct i1e_impl {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE Scalar run(const Scalar) {
+ EIGEN_STATIC_ASSERT((internal::is_same<Scalar, Scalar>::value == false),
+ THIS_TYPE_IS_NOT_SUPPORTED);
+ return Scalar(0);
+ }
+};
+
+template <>
+struct i1e_impl<float> {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE float run(float x) {
+ /* i1ef.c
+ *
+ * Modified Bessel function of order one,
+ * exponentially scaled
+ *
+ *
+ *
+ * SYNOPSIS:
+ *
+ * float x, y, i1ef();
+ *
+ * y = i1ef( x );
+ *
+ *
+ *
+ * DESCRIPTION:
+ *
+ * Returns exponentially scaled modified Bessel function
+ * of order one of the argument.
+ *
+ * The function is defined as i1(x) = -i exp(-|x|) j1( ix ).
+ *
+ *
+ *
+ * ACCURACY:
+ *
+ * Relative error:
+ * arithmetic domain # trials peak rms
+ * IEEE 0, 30 30000 1.5e-6 1.5e-7
+ * See i1().
+ *
+ */
+ const float A[] = {9.38153738649577178388E-9f, -4.44505912879632808065E-8f,
+ 2.00329475355213526229E-7f, -8.56872026469545474066E-7f,
+ 3.47025130813767847674E-6f, -1.32731636560394358279E-5f,
+ 4.78156510755005422638E-5f, -1.61760815825896745588E-4f,
+ 5.12285956168575772895E-4f, -1.51357245063125314899E-3f,
+ 4.15642294431288815669E-3f, -1.05640848946261981558E-2f,
+ 2.47264490306265168283E-2f, -5.29459812080949914269E-2f,
+ 1.02643658689847095384E-1f, -1.76416518357834055153E-1f,
+ 2.52587186443633654823E-1f};
+
+ const float B[] = {-3.83538038596423702205E-9f, -2.63146884688951950684E-8f,
+ -2.51223623787020892529E-7f, -3.88256480887769039346E-6f,
+ -1.10588938762623716291E-4f, -9.76109749136146840777E-3f,
+ 7.78576235018280120474E-1f};
+
+ float z = numext::abs(x);
+
+ if (z <= 8.0f) {
+ float y = 0.5f * z - 2.0f;
+ z = cephes::chebevl<float, 17>::run(y, A) * z;
+ } else {
+ z = cephes::chebevl<float, 7>::run(32.0f / z - 2.0f, B) / numext::sqrt(z);
+ }
+
+ if (x < 0.0f) {
+ z = -z;
+ }
+
+ return z;
+ }
+};
+
+template <>
+struct i1e_impl<double> {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE double run(double x) {
+ /* i1e.c
+ *
+ * Modified Bessel function of order one,
+ * exponentially scaled
+ *
+ *
+ *
+ * SYNOPSIS:
+ *
+ * double x, y, i1e();
+ *
+ * y = i1e( x );
+ *
+ *
+ *
+ * DESCRIPTION:
+ *
+ * Returns exponentially scaled modified Bessel function
+ * of order one of the argument.
+ *
+ * The function is defined as i1(x) = -i exp(-|x|) j1( ix ).
+ *
+ *
+ *
+ * ACCURACY:
+ *
+ * Relative error:
+ * arithmetic domain # trials peak rms
+ * IEEE 0, 30 30000 2.0e-15 2.0e-16
+ * See i1().
+ *
+ */
+ const double A[] = {2.77791411276104639959E-18, -2.11142121435816608115E-17,
+ 1.55363195773620046921E-16, -1.10559694773538630805E-15,
+ 7.60068429473540693410E-15, -5.04218550472791168711E-14,
+ 3.22379336594557470981E-13, -1.98397439776494371520E-12,
+ 1.17361862988909016308E-11, -6.66348972350202774223E-11,
+ 3.62559028155211703701E-10, -1.88724975172282928790E-9,
+ 9.38153738649577178388E-9, -4.44505912879632808065E-8,
+ 2.00329475355213526229E-7, -8.56872026469545474066E-7,
+ 3.47025130813767847674E-6, -1.32731636560394358279E-5,
+ 4.78156510755005422638E-5, -1.61760815825896745588E-4,
+ 5.12285956168575772895E-4, -1.51357245063125314899E-3,
+ 4.15642294431288815669E-3, -1.05640848946261981558E-2,
+ 2.47264490306265168283E-2, -5.29459812080949914269E-2,
+ 1.02643658689847095384E-1, -1.76416518357834055153E-1,
+ 2.52587186443633654823E-1};
+ const double B[] = {
+ 7.51729631084210481353E-18, 4.41434832307170791151E-18,
+ -4.65030536848935832153E-17, -3.20952592199342395980E-17,
+ 2.96262899764595013876E-16, 3.30820231092092828324E-16,
+ -1.88035477551078244854E-15, -3.81440307243700780478E-15,
+ 1.04202769841288027642E-14, 4.27244001671195135429E-14,
+ -2.10154184277266431302E-14, -4.08355111109219731823E-13,
+ -7.19855177624590851209E-13, 2.03562854414708950722E-12,
+ 1.41258074366137813316E-11, 3.25260358301548823856E-11,
+ -1.89749581235054123450E-11, -5.58974346219658380687E-10,
+ -3.83538038596423702205E-9, -2.63146884688951950684E-8,
+ -2.51223623787020892529E-7, -3.88256480887769039346E-6,
+ -1.10588938762623716291E-4, -9.76109749136146840777E-3,
+ 7.78576235018280120474E-1};
+
+ double z = numext::abs(x);
+
+ if (z <= 8.0) {
+ double y = (z / 2.0) - 2.0;
+ z = cephes::chebevl<double, 29>::run(y, A) * z;
+ } else {
+ z = cephes::chebevl<double, 25>::run(32.0 / z - 2.0, B) / numext::sqrt(z);
+ }
+
+ if (x < 0.0) {
+ z = -z;
+ }
+
+ return z;
+ }
+};
+
} // end namespace internal
namespace numext {
@@ -1546,6 +2127,18 @@ EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(igamma, Scalar)
}
template <typename Scalar>
+EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(igamma_der_a, Scalar)
+ igamma_der_a(const Scalar& a, const Scalar& x) {
+ return EIGEN_MATHFUNC_IMPL(igamma_der_a, Scalar)::run(a, x);
+}
+
+template <typename Scalar>
+EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(gamma_sample_der_alpha, Scalar)
+ gamma_sample_der_alpha(const Scalar& a, const Scalar& x) {
+ return EIGEN_MATHFUNC_IMPL(gamma_sample_der_alpha, Scalar)::run(a, x);
+}
+
+template <typename Scalar>
EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(igammac, Scalar)
igammac(const Scalar& a, const Scalar& x) {
return EIGEN_MATHFUNC_IMPL(igammac, Scalar)::run(a, x);
@@ -1557,6 +2150,18 @@ EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(betainc, Scalar)
return EIGEN_MATHFUNC_IMPL(betainc, Scalar)::run(a, b, x);
}
+template <typename Scalar>
+EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(i0e, Scalar)
+ i0e(const Scalar& x) {
+ return EIGEN_MATHFUNC_IMPL(i0e, Scalar)::run(x);
+}
+
+template <typename Scalar>
+EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(i1e, Scalar)
+ i1e(const Scalar& x) {
+ return EIGEN_MATHFUNC_IMPL(i1e, Scalar)::run(x);
+}
+
} // end namespace numext
diff --git a/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsPacketMath.h b/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsPacketMath.h
index 46d60d323..465f41d54 100644
--- a/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsPacketMath.h
+++ b/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsPacketMath.h
@@ -42,6 +42,21 @@ Packet perfc(const Packet& a) { using numext::erfc; return erfc(a); }
template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Packet pigamma(const Packet& a, const Packet& x) { using numext::igamma; return igamma(a, x); }
+/** \internal \returns the derivative of the incomplete gamma function
+ * igamma_der_a(\a a, \a x) */
+template <typename Packet>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet pigamma_der_a(const Packet& a, const Packet& x) {
+ using numext::igamma_der_a; return igamma_der_a(a, x);
+}
+
+/** \internal \returns compute the derivative of the sample
+ * of Gamma(alpha, 1) random variable with respect to the parameter a
+ * gamma_sample_der_alpha(\a alpha, \a sample) */
+template <typename Packet>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet pgamma_sample_der_alpha(const Packet& alpha, const Packet& sample) {
+ using numext::gamma_sample_der_alpha; return gamma_sample_der_alpha(alpha, sample);
+}
+
/** \internal \returns the complementary incomplete gamma function igammac(\a a, \a x) */
template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Packet pigammac(const Packet& a, const Packet& x) { using numext::igammac; return igammac(a, x); }
@@ -50,6 +65,18 @@ Packet pigammac(const Packet& a, const Packet& x) { using numext::igammac; retur
template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Packet pbetainc(const Packet& a, const Packet& b,const Packet& x) { using numext::betainc; return betainc(a, b, x); }
+/** \internal \returns the exponentially scaled modified Bessel function of
+ * order zero i0e(\a a) (coeff-wise) */
+template <typename Packet>
+EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+Packet pi0e(const Packet& x) { using numext::i0e; return i0e(x); }
+
+/** \internal \returns the exponentially scaled modified Bessel function of
+ * order one i1e(\a a) (coeff-wise) */
+template <typename Packet>
+EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+Packet pi1e(const Packet& x) { using numext::i1e; return i1e(x); }
+
} // end namespace internal
} // end namespace Eigen
diff --git a/unsupported/Eigen/src/SpecialFunctions/arch/CUDA/CudaSpecialFunctions.h b/unsupported/Eigen/src/SpecialFunctions/arch/GPU/GpuSpecialFunctions.h
index ec4fa8448..40abcee3a 100644
--- a/unsupported/Eigen/src/SpecialFunctions/arch/CUDA/CudaSpecialFunctions.h
+++ b/unsupported/Eigen/src/SpecialFunctions/arch/GPU/GpuSpecialFunctions.h
@@ -7,8 +7,8 @@
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-#ifndef EIGEN_CUDA_SPECIALFUNCTIONS_H
-#define EIGEN_CUDA_SPECIALFUNCTIONS_H
+#ifndef EIGEN_GPU_SPECIALFUNCTIONS_H
+#define EIGEN_GPU_SPECIALFUNCTIONS_H
namespace Eigen {
@@ -17,7 +17,7 @@ namespace internal {
// Make sure this is only available when targeting a GPU: we don't want to
// introduce conflicts between these packet_traits definitions and the ones
// we'll use on the host side (SSE, AVX, ...)
-#if defined(__CUDACC__) && defined(EIGEN_USE_GPU)
+#if defined(EIGEN_GPUCC) && defined(EIGEN_USE_GPU)
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
float4 plgamma<float4>(const float4& a)
@@ -120,6 +120,41 @@ double2 pigamma<double2>(const double2& a, const double2& x)
return make_double2(igamma(a.x, x.x), igamma(a.y, x.y));
}
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pigamma_der_a<float4>(
+ const float4& a, const float4& x) {
+ using numext::igamma_der_a;
+ return make_float4(igamma_der_a(a.x, x.x), igamma_der_a(a.y, x.y),
+ igamma_der_a(a.z, x.z), igamma_der_a(a.w, x.w));
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2
+pigamma_der_a<double2>(const double2& a, const double2& x) {
+ using numext::igamma_der_a;
+ return make_double2(igamma_der_a(a.x, x.x), igamma_der_a(a.y, x.y));
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pgamma_sample_der_alpha<float4>(
+ const float4& alpha, const float4& sample) {
+ using numext::gamma_sample_der_alpha;
+ return make_float4(
+ gamma_sample_der_alpha(alpha.x, sample.x),
+ gamma_sample_der_alpha(alpha.y, sample.y),
+ gamma_sample_der_alpha(alpha.z, sample.z),
+ gamma_sample_der_alpha(alpha.w, sample.w));
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2
+pgamma_sample_der_alpha<double2>(const double2& alpha, const double2& sample) {
+ using numext::gamma_sample_der_alpha;
+ return make_double2(
+ gamma_sample_der_alpha(alpha.x, sample.x),
+ gamma_sample_der_alpha(alpha.y, sample.y));
+}
+
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
float4 pigammac<float4>(const float4& a, const float4& x)
{
@@ -156,10 +191,36 @@ double2 pbetainc<double2>(const double2& a, const double2& b, const double2& x)
return make_double2(betainc(a.x, b.x, x.x), betainc(a.y, b.y, x.y));
}
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pi0e<float4>(const float4& x) {
+ using numext::i0e;
+ return make_float4(i0e(x.x), i0e(x.y), i0e(x.z), i0e(x.w));
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2
+pi0e<double2>(const double2& x) {
+ using numext::i0e;
+ return make_double2(i0e(x.x), i0e(x.y));
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pi1e<float4>(const float4& x) {
+ using numext::i1e;
+ return make_float4(i1e(x.x), i1e(x.y), i1e(x.z), i1e(x.w));
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2
+pi1e<double2>(const double2& x) {
+ using numext::i1e;
+ return make_double2(i1e(x.x), i1e(x.y));
+}
+
#endif
} // end namespace internal
} // end namespace Eigen
-#endif // EIGEN_CUDA_SPECIALFUNCTIONS_H
+#endif // EIGEN_GPU_SPECIALFUNCTIONS_H
diff --git a/unsupported/Eigen/src/Splines/Spline.h b/unsupported/Eigen/src/Splines/Spline.h
index 627f6e482..c1cf5b7e4 100644
--- a/unsupported/Eigen/src/Splines/Spline.h
+++ b/unsupported/Eigen/src/Splines/Spline.h
@@ -249,15 +249,13 @@ namespace Eigen
DenseIndex degree,
const typename Spline<_Scalar, _Dim, _Degree>::KnotVectorType& knots)
{
- typedef typename Spline<_Scalar, _Dim, _Degree>::BasisVectorType BasisVectorType;
-
const DenseIndex p = degree;
const DenseIndex i = Spline::Span(u, degree, knots);
const KnotVectorType& U = knots;
BasisVectorType left(p+1); left(0) = Scalar(0);
- BasisVectorType right(p+1); right(0) = Scalar(0);
+ BasisVectorType right(p+1); right(0) = Scalar(0);
VectorBlock<BasisVectorType,Degree>(left,1,p) = u - VectorBlock<const KnotVectorType,Degree>(U,i+1-p,p).reverse();
VectorBlock<BasisVectorType,Degree>(right,1,p) = VectorBlock<const KnotVectorType,Degree>(U,i+1,p) - u;
@@ -380,9 +378,6 @@ namespace Eigen
typedef Spline<_Scalar, _Dim, _Degree> SplineType;
enum { Order = SplineTraits<SplineType>::OrderAtCompileTime };
- typedef typename SplineTraits<SplineType>::Scalar Scalar;
- typedef typename SplineTraits<SplineType>::BasisVectorType BasisVectorType;
-
const DenseIndex span = SplineType::Span(u, p, U);
const DenseIndex n = (std::min)(p, order);
diff --git a/unsupported/Eigen/src/Splines/SplineFitting.h b/unsupported/Eigen/src/Splines/SplineFitting.h
index c761a9b3d..850f78a5e 100644
--- a/unsupported/Eigen/src/Splines/SplineFitting.h
+++ b/unsupported/Eigen/src/Splines/SplineFitting.h
@@ -17,8 +17,8 @@
#include "SplineFwd.h"
-#include <Eigen/LU>
-#include <Eigen/QR>
+#include "../../../../Eigen/LU"
+#include "../../../../Eigen/QR"
namespace Eigen
{
@@ -181,7 +181,7 @@ namespace Eigen
* \ingroup Splines_Module
*
* \param[in] pts The data points to which a spline should be fit.
- * \param[out] chord_lengths The resulting chord lenggth vector.
+ * \param[out] chord_lengths The resulting chord length vector.
*
* \sa Les Piegl and Wayne Tiller, The NURBS book (2nd ed.), 1997, 9.2.1 Global Curve Interpolation to Point Data
**/
diff --git a/unsupported/Eigen/src/Splines/SplineFwd.h b/unsupported/Eigen/src/Splines/SplineFwd.h
index 0a95fbf3e..00d6b4921 100644
--- a/unsupported/Eigen/src/Splines/SplineFwd.h
+++ b/unsupported/Eigen/src/Splines/SplineFwd.h
@@ -10,7 +10,7 @@
#ifndef EIGEN_SPLINES_FWD_H
#define EIGEN_SPLINES_FWD_H
-#include <Eigen/Core>
+#include "../../../../Eigen/Core"
namespace Eigen
{
diff --git a/unsupported/README.txt b/unsupported/README.txt
index 83479ff0b..70793bf13 100644
--- a/unsupported/README.txt
+++ b/unsupported/README.txt
@@ -20,7 +20,7 @@ However, it:
- must rely on Eigen,
- must be highly related to math,
- should have some general purpose in the sense that it could
- potentially become an offical Eigen module (or be merged into another one).
+ potentially become an official Eigen module (or be merged into another one).
In doubt feel free to contact us. For instance, if your addons is very too specific
but it shows an interesting way of using Eigen, then it could be a nice demo.
diff --git a/unsupported/bench/bench_svd.cpp b/unsupported/bench/bench_svd.cpp
index 01d8231ae..e7028a2b9 100644
--- a/unsupported/bench/bench_svd.cpp
+++ b/unsupported/bench/bench_svd.cpp
@@ -70,7 +70,7 @@ void bench_svd(const MatrixType& a = MatrixType())
std::cout<< std::endl;
timerJacobi.reset();
timerBDC.reset();
- cout << " Computes rotaion matrix" <<endl;
+ cout << " Computes rotation matrix" <<endl;
for (int k=1; k<=NUMBER_SAMPLE; ++k)
{
timerBDC.start();
diff --git a/unsupported/doc/examples/FFT.cpp b/unsupported/doc/examples/FFT.cpp
index fcbf81276..85e8a0241 100644
--- a/unsupported/doc/examples/FFT.cpp
+++ b/unsupported/doc/examples/FFT.cpp
@@ -61,14 +61,14 @@ template <typename T>
void RandomFill(std::vector<T> & vec)
{
for (size_t k=0;k<vec.size();++k)
- vec[k] = T( rand() )/T(RAND_MAX) - .5;
+ vec[k] = T( rand() )/T(RAND_MAX) - T(.5);
}
template <typename T>
void RandomFill(std::vector<std::complex<T> > & vec)
{
for (size_t k=0;k<vec.size();++k)
- vec[k] = std::complex<T> ( T( rand() )/T(RAND_MAX) - .5, T( rand() )/T(RAND_MAX) - .5);
+ vec[k] = std::complex<T> ( T( rand() )/T(RAND_MAX) - T(.5), T( rand() )/T(RAND_MAX) - T(.5));
}
template <typename T_time,typename T_freq>
@@ -85,7 +85,7 @@ void fwd_inv(size_t nfft)
vector<T_time> timebuf2;
fft.inv(timebuf2,freqbuf);
- long double rmse = mag2(timebuf - timebuf2) / mag2(timebuf);
+ T_time rmse = mag2(timebuf - timebuf2) / mag2(timebuf);
cout << "roundtrip rmse: " << rmse << endl;
}
diff --git a/unsupported/test/BVH.cpp b/unsupported/test/BVH.cpp
index ff5b3299d..d8c39d556 100644
--- a/unsupported/test/BVH.cpp
+++ b/unsupported/test/BVH.cpp
@@ -192,7 +192,7 @@ struct TreeTest
};
-void test_BVH()
+EIGEN_DECLARE_TEST(BVH)
{
for(int i = 0; i < g_repeat; i++) {
#ifdef EIGEN_TEST_PART_1
diff --git a/unsupported/test/CMakeLists.txt b/unsupported/test/CMakeLists.txt
index 003c9de0b..1a4a77058 100644
--- a/unsupported/test/CMakeLists.txt
+++ b/unsupported/test/CMakeLists.txt
@@ -1,16 +1,7 @@
-# generate split test header file only if it does not yet exist
-# in order to prevent a rebuild everytime cmake is configured
-if(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h)
- file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h "")
- foreach(i RANGE 1 999)
- file(APPEND ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h
- "#ifdef EIGEN_TEST_PART_${i}\n"
- "#define CALL_SUBTEST_${i}(FUNC) CALL_SUBTEST(FUNC)\n"
- "#else\n"
- "#define CALL_SUBTEST_${i}(FUNC)\n"
- "#endif\n\n"
- )
- endforeach()
+# The file split_test_helper.h was generated at first run,
+# it is now included in test/
+if(EXISTS ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h)
+ file(REMOVE ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h)
endif()
set_property(GLOBAL PROPERTY EIGEN_CURRENT_SUBPROJECT "Unsupported")
@@ -41,11 +32,16 @@ else(GOOGLEHASH_FOUND)
ei_add_property(EIGEN_MISSING_BACKENDS "GoogleHash, ")
endif(GOOGLEHASH_FOUND)
+
find_package(Adolc)
if(ADOLC_FOUND)
include_directories(${ADOLC_INCLUDES})
ei_add_property(EIGEN_TESTED_BACKENDS "Adolc, ")
- ei_add_test(forward_adolc "" ${ADOLC_LIBRARIES})
+ if(EIGEN_TEST_CXX11)
+ ei_add_test(forward_adolc "" ${ADOLC_LIBRARIES})
+ else()
+ message(STATUS "Adolc found, but tests require C++11 mode")
+ endif()
else(ADOLC_FOUND)
ei_add_property(EIGEN_MISSING_BACKENDS "Adolc, ")
endif(ADOLC_FOUND)
@@ -119,6 +115,7 @@ ei_add_test(polynomialsolver)
ei_add_test(polynomialutils)
ei_add_test(splines)
ei_add_test(gmres)
+ei_add_test(dgmres)
ei_add_test(minres)
ei_add_test(levenberg_marquardt)
ei_add_test(kronecker_product)
@@ -133,6 +130,7 @@ if (NOT CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
ei_add_test(cxx11_tensor_dimension)
ei_add_test(cxx11_tensor_map)
ei_add_test(cxx11_tensor_assign)
+ei_add_test(cxx11_tensor_block_access)
ei_add_test(cxx11_tensor_comparisons)
ei_add_test(cxx11_tensor_forced_eval)
ei_add_test(cxx11_tensor_math)
@@ -144,33 +142,45 @@ ei_add_test(cxx11_tensor_sugar)
ei_add_test(cxx11_tensor_roundings)
ei_add_test(cxx11_tensor_layout_swap)
ei_add_test(cxx11_tensor_io)
-if("${CMAKE_SIZEOF_VOID_P}" EQUAL "8")
- # This test requires __uint128_t which is only available on 64bit systems
- ei_add_test(cxx11_tensor_uint128)
-endif()
+ei_add_test(cxx11_maxsizevector)
endif()
if(EIGEN_TEST_CXX11)
if(EIGEN_TEST_SYCL)
- ei_add_test_sycl(cxx11_tensor_sycl "-std=c++11")
- ei_add_test_sycl(cxx11_tensor_forced_eval_sycl "-std=c++11")
- ei_add_test_sycl(cxx11_tensor_broadcast_sycl "-std=c++11")
- ei_add_test_sycl(cxx11_tensor_device_sycl "-std=c++11")
- ei_add_test_sycl(cxx11_tensor_reduction_sycl "-std=c++11")
- ei_add_test_sycl(cxx11_tensor_morphing_sycl "-std=c++11")
- ei_add_test_sycl(cxx11_tensor_shuffling_sycl "-std=c++11")
- ei_add_test_sycl(cxx11_tensor_padding_sycl "-std=c++11")
- ei_add_test_sycl(cxx11_tensor_builtins_sycl "-std=c++11")
- ei_add_test_sycl(cxx11_tensor_contract_sycl "-std=c++11")
- ei_add_test_sycl(cxx11_tensor_concatenation_sycl "-std=c++11")
- ei_add_test_sycl(cxx11_tensor_reverse_sycl "-std=c++11")
- ei_add_test_sycl(cxx11_tensor_convolution_sycl "-std=c++11")
- ei_add_test_sycl(cxx11_tensor_striding_sycl "-std=c++11")
- ei_add_test_sycl(cxx11_tensor_chipping_sycl "-std=c++11")
+ if(EIGEN_SYCL_TRISYCL)
+ set(CMAKE_CXX_STANDARD 14)
+ set(STD_CXX_FLAG "-std=c++1z")
+ else(EIGEN_SYCL_TRISYCL)
+ # It should be safe to always run these tests as there is some fallback code for
+ # older compiler that don't support cxx11.
+ set(CMAKE_CXX_STANDARD 11)
+ set(STD_CXX_FLAG "-std=c++11")
+ endif(EIGEN_SYCL_TRISYCL)
+
+ ei_add_test_sycl(cxx11_tensor_sycl ${STD_CXX_FLAG})
+ ei_add_test_sycl(cxx11_tensor_forced_eval_sycl ${STD_CXX_FLAG})
+ ei_add_test_sycl(cxx11_tensor_broadcast_sycl ${STD_CXX_FLAG})
+ ei_add_test_sycl(cxx11_tensor_device_sycl ${STD_CXX_FLAG})
+ ei_add_test_sycl(cxx11_tensor_reduction_sycl ${STD_CXX_FLAG})
+ ei_add_test_sycl(cxx11_tensor_morphing_sycl ${STD_CXX_FLAG})
+ ei_add_test_sycl(cxx11_tensor_shuffling_sycl ${STD_CXX_FLAG})
+ ei_add_test_sycl(cxx11_tensor_padding_sycl ${STD_CXX_FLAG})
+ ei_add_test_sycl(cxx11_tensor_builtins_sycl ${STD_CXX_FLAG})
+ ei_add_test_sycl(cxx11_tensor_contract_sycl ${STD_CXX_FLAG})
+ ei_add_test_sycl(cxx11_tensor_concatenation_sycl ${STD_CXX_FLAG})
+ ei_add_test_sycl(cxx11_tensor_reverse_sycl ${STD_CXX_FLAG})
+ ei_add_test_sycl(cxx11_tensor_convolution_sycl ${STD_CXX_FLAG})
+ ei_add_test_sycl(cxx11_tensor_striding_sycl ${STD_CXX_FLAG})
+ ei_add_test_sycl(cxx11_tensor_chipping_sycl ${STD_CXX_FLAG})
+ ei_add_test_sycl(cxx11_tensor_layout_swap_sycl ${STD_CXX_FLAG})
+ ei_add_test_sycl(cxx11_tensor_inflation_sycl ${STD_CXX_FLAG})
+ ei_add_test_sycl(cxx11_tensor_generator_sycl ${STD_CXX_FLAG})
+ ei_add_test_sycl(cxx11_tensor_patch_sycl ${STD_CXX_FLAG})
+ ei_add_test_sycl(cxx11_tensor_image_patch_sycl ${STD_CXX_FLAG})
+ ei_add_test_sycl(cxx11_tensor_volume_patch_sycl ${STD_CXX_FLAG})
+ ei_add_test_sycl(cxx11_tensor_argmax_sycl ${STD_CXX_FLAG})
+ ei_add_test_sycl(cxx11_tensor_custom_op_sycl ${STD_CXX_FLAG})
endif(EIGEN_TEST_SYCL)
- # It should be safe to always run these tests as there is some fallback code for
- # older compiler that don't support cxx11.
- set(CMAKE_CXX_STANDARD 11)
ei_add_test(cxx11_eventcount "-pthread" "${CMAKE_THREAD_LIBS_INIT}")
ei_add_test(cxx11_runqueue "-pthread" "${CMAKE_THREAD_LIBS_INIT}")
@@ -204,6 +214,7 @@ if(EIGEN_TEST_CXX11)
ei_add_test(cxx11_tensor_striding)
ei_add_test(cxx11_tensor_notification "-pthread" "${CMAKE_THREAD_LIBS_INIT}")
ei_add_test(cxx11_tensor_thread_pool "-pthread" "${CMAKE_THREAD_LIBS_INIT}")
+ ei_add_test(cxx11_tensor_executor "-pthread" "${CMAKE_THREAD_LIBS_INIT}")
ei_add_test(cxx11_tensor_ref)
ei_add_test(cxx11_tensor_random)
ei_add_test(cxx11_tensor_generator)
@@ -212,6 +223,12 @@ if(EIGEN_TEST_CXX11)
ei_add_test(cxx11_tensor_fft)
ei_add_test(cxx11_tensor_ifft)
ei_add_test(cxx11_tensor_scan)
+ ei_add_test(cxx11_tensor_trace)
+ ei_add_test(cxx11_tensor_move)
+if("${CMAKE_SIZEOF_VOID_P}" EQUAL "8" AND NOT CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ # This test requires __uint128_t which is only available on 64bit systems
+ ei_add_test(cxx11_tensor_uint128)
+endif()
endif()
@@ -253,26 +270,83 @@ if(CUDA_FOUND AND EIGEN_TEST_CUDA)
cuda_include_directories("${CMAKE_CURRENT_BINARY_DIR}" "${CUDA_TOOLKIT_ROOT_DIR}/include")
set(EIGEN_ADD_TEST_FILENAME_EXTENSION "cu")
- ei_add_test(cxx11_tensor_complex_cuda)
- ei_add_test(cxx11_tensor_complex_cwise_ops_cuda)
- ei_add_test(cxx11_tensor_reduction_cuda)
- ei_add_test(cxx11_tensor_argmax_cuda)
- ei_add_test(cxx11_tensor_cast_float16_cuda)
- ei_add_test(cxx11_tensor_scan_cuda)
+ ei_add_test(cxx11_tensor_complex_gpu)
+ ei_add_test(cxx11_tensor_complex_cwise_ops_gpu)
+ ei_add_test(cxx11_tensor_reduction_gpu)
+ ei_add_test(cxx11_tensor_argmax_gpu)
+ ei_add_test(cxx11_tensor_cast_float16_gpu)
+ ei_add_test(cxx11_tensor_scan_gpu)
# Contractions require arch 3.0 or higher
if (${EIGEN_CUDA_COMPUTE_ARCH} GREATER 29)
ei_add_test(cxx11_tensor_device)
- ei_add_test(cxx11_tensor_cuda)
- ei_add_test(cxx11_tensor_contract_cuda)
- ei_add_test(cxx11_tensor_of_float16_cuda)
+ ei_add_test(cxx11_tensor_gpu)
+ ei_add_test(cxx11_tensor_contract_gpu)
+ ei_add_test(cxx11_tensor_of_float16_gpu)
endif()
# The random number generation code requires arch 3.5 or greater.
if (${EIGEN_CUDA_COMPUTE_ARCH} GREATER 34)
- ei_add_test(cxx11_tensor_random_cuda)
+ ei_add_test(cxx11_tensor_random_gpu)
endif()
unset(EIGEN_ADD_TEST_FILENAME_EXTENSION)
endif()
+
+# Add HIP specific tests
+if (EIGEN_TEST_HIP)
+
+ set(HIP_PATH "/opt/rocm/hip" CACHE STRING "Path to the HIP installation.")
+
+ if (EXISTS ${HIP_PATH})
+
+ list(APPEND CMAKE_MODULE_PATH ${HIP_PATH}/cmake)
+
+ find_package(HIP REQUIRED)
+ if (HIP_FOUND)
+
+ execute_process(COMMAND ${HIP_PATH}/bin/hipconfig --platform OUTPUT_VARIABLE HIP_PLATFORM)
+
+ if (${HIP_PLATFORM} STREQUAL "hcc")
+
+ include_directories(${CMAKE_CURRENT_BINARY_DIR})
+ include_directories(${HIP_PATH}/include)
+
+ set(EIGEN_ADD_TEST_FILENAME_EXTENSION "cu")
+ #
+ # complex datatype is not yet supported by HIP
+ # so leaving out those tests for now
+ #
+ # ei_add_test(cxx11_tensor_complex_gpu)
+ # ei_add_test(cxx11_tensor_complex_cwise_ops_gpu)
+ #
+ ei_add_test(cxx11_tensor_reduction_gpu)
+ ei_add_test(cxx11_tensor_argmax_gpu)
+ ei_add_test(cxx11_tensor_cast_float16_gpu)
+ ei_add_test(cxx11_tensor_scan_gpu)
+ ei_add_test(cxx11_tensor_device)
+
+ ei_add_test(cxx11_tensor_gpu)
+ ei_add_test(cxx11_tensor_contract_gpu)
+ ei_add_test(cxx11_tensor_of_float16_gpu)
+ ei_add_test(cxx11_tensor_random_gpu)
+
+ unset(EIGEN_ADD_TEST_FILENAME_EXTENSION)
+
+ elseif (${HIP_PLATFORM} STREQUAL "nvcc")
+ message(FATAL_ERROR "HIP_PLATFORM = nvcc is not supported within Eigen")
+ else ()
+ message(FATAL_ERROR "Unknown HIP_PLATFORM = ${HIP_PLATFORM}")
+ endif()
+
+ endif(HIP_FOUND)
+
+ else ()
+
+ message(FATAL_ERROR "EIGEN_TEST_HIP is ON, but the specified HIP_PATH (${HIP_PATH}) does not exist")
+
+ endif()
+
+endif(EIGEN_TEST_HIP)
+
diff --git a/unsupported/test/EulerAngles.cpp b/unsupported/test/EulerAngles.cpp
index 79ee72847..67533e364 100644
--- a/unsupported/test/EulerAngles.cpp
+++ b/unsupported/test/EulerAngles.cpp
@@ -197,6 +197,7 @@ template<typename Scalar> void check_singular_cases(const Scalar& singularBeta)
template<typename Scalar> void eulerangles_manual()
{
typedef Matrix<Scalar,3,1> Vector3;
+ typedef Matrix<Scalar,Dynamic,1> VectorX;
const Vector3 Zero = Vector3::Zero();
const Scalar PI = Scalar(EIGEN_PI);
@@ -213,13 +214,13 @@ template<typename Scalar> void eulerangles_manual()
check_singular_cases(-PI);
// non-singular cases
- VectorXd alpha = VectorXd::LinSpaced(Eigen::Sequential, 20, Scalar(-0.99) * PI, PI);
- VectorXd beta = VectorXd::LinSpaced(Eigen::Sequential, 20, Scalar(-0.49) * PI, Scalar(0.49) * PI);
- VectorXd gamma = VectorXd::LinSpaced(Eigen::Sequential, 20, Scalar(-0.99) * PI, PI);
+ VectorX alpha = VectorX::LinSpaced(Eigen::Sequential, 20, Scalar(-0.99) * PI, PI);
+ VectorX beta = VectorX::LinSpaced(Eigen::Sequential, 20, Scalar(-0.49) * PI, Scalar(0.49) * PI);
+ VectorX gamma = VectorX::LinSpaced(Eigen::Sequential, 20, Scalar(-0.99) * PI, PI);
for (int i = 0; i < alpha.size(); ++i) {
for (int j = 0; j < beta.size(); ++j) {
for (int k = 0; k < gamma.size(); ++k) {
- check_all_var(Vector3d(alpha(i), beta(j), gamma(k)));
+ check_all_var(Vector3(alpha(i), beta(j), gamma(k)));
}
}
}
@@ -272,12 +273,15 @@ template<typename Scalar> void eulerangles_rand()
check_all_var(ea);
}
-void test_EulerAngles()
+EIGEN_DECLARE_TEST(EulerAngles)
{
// Simple cast test
EulerAnglesXYZd onesEd(1, 1, 1);
EulerAnglesXYZf onesEf = onesEd.cast<float>();
VERIFY_IS_APPROX(onesEd, onesEf.cast<double>());
+
+ // Simple Construction from Vector3 test
+ VERIFY_IS_APPROX(onesEd, EulerAnglesXYZd(Vector3d::Ones()));
CALL_SUBTEST_1( eulerangles_manual<float>() );
CALL_SUBTEST_2( eulerangles_manual<double>() );
diff --git a/unsupported/test/FFTW.cpp b/unsupported/test/FFTW.cpp
index 8b7528fb7..cfe559ebd 100644
--- a/unsupported/test/FFTW.cpp
+++ b/unsupported/test/FFTW.cpp
@@ -225,7 +225,7 @@ void test_return_by_value(int len)
VERIFY( (in1-in).norm() < test_precision<float>() );
}
-void test_FFTW()
+EIGEN_DECLARE_TEST(FFTW)
{
CALL_SUBTEST( test_return_by_value(32) );
//CALL_SUBTEST( ( test_complex2d<float,4,8> () ) ); CALL_SUBTEST( ( test_complex2d<double,4,8> () ) );
diff --git a/unsupported/test/NonLinearOptimization.cpp b/unsupported/test/NonLinearOptimization.cpp
index 1d682dd83..2770c3c49 100644
--- a/unsupported/test/NonLinearOptimization.cpp
+++ b/unsupported/test/NonLinearOptimization.cpp
@@ -565,7 +565,7 @@ void testLmdif1()
// do the computation
lmdif_functor functor;
- DenseIndex nfev;
+ DenseIndex nfev = -1; // initialize to avoid maybe-uninitialized warning
info = LevenbergMarquardt<lmdif_functor>::lmdif1(functor, x, &nfev);
// check return value
@@ -1818,7 +1818,7 @@ void testNistEckerle4(void)
VERIFY_IS_APPROX(x[2], 4.5154121844E+02);
}
-void test_NonLinearOptimization()
+EIGEN_DECLARE_TEST(NonLinearOptimization)
{
// Tests using the examples provided by (c)minpack
CALL_SUBTEST/*_1*/(testChkder());
diff --git a/unsupported/test/NumericalDiff.cpp b/unsupported/test/NumericalDiff.cpp
index 27d888056..6d836413b 100644
--- a/unsupported/test/NumericalDiff.cpp
+++ b/unsupported/test/NumericalDiff.cpp
@@ -24,7 +24,7 @@ struct Functor
int m_inputs, m_values;
Functor() : m_inputs(InputsAtCompileTime), m_values(ValuesAtCompileTime) {}
- Functor(int inputs, int values) : m_inputs(inputs), m_values(values) {}
+ Functor(int inputs_, int values_) : m_inputs(inputs_), m_values(values_) {}
int inputs() const { return m_inputs; }
int values() const { return m_values; }
@@ -107,7 +107,7 @@ void test_central()
VERIFY_IS_APPROX(jac, actual_jac);
}
-void test_NumericalDiff()
+EIGEN_DECLARE_TEST(NumericalDiff)
{
CALL_SUBTEST(test_forward());
CALL_SUBTEST(test_central());
diff --git a/unsupported/test/alignedvector3.cpp b/unsupported/test/alignedvector3.cpp
index 252cb1d3f..fcc89daab 100644
--- a/unsupported/test/alignedvector3.cpp
+++ b/unsupported/test/alignedvector3.cpp
@@ -76,7 +76,7 @@ void alignedvector3()
VERIFY(ss1.str()==ss2.str());
}
-void test_alignedvector3()
+EIGEN_DECLARE_TEST(alignedvector3)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST( alignedvector3<float>() );
diff --git a/unsupported/test/autodiff.cpp b/unsupported/test/autodiff.cpp
index 85743137e..bafea6ae9 100644
--- a/unsupported/test/autodiff.cpp
+++ b/unsupported/test/autodiff.cpp
@@ -44,7 +44,7 @@ struct TestFunc1
int m_inputs, m_values;
TestFunc1() : m_inputs(InputsAtCompileTime), m_values(ValuesAtCompileTime) {}
- TestFunc1(int inputs, int values) : m_inputs(inputs), m_values(values) {}
+ TestFunc1(int inputs_, int values_) : m_inputs(inputs_), m_values(values_) {}
int inputs() const { return m_inputs; }
int values() const { return m_values; }
@@ -306,6 +306,8 @@ double bug_1222() {
return denom.value();
}
+#ifdef EIGEN_TEST_PART_5
+
double bug_1223() {
using std::min;
typedef Eigen::AutoDiffScalar<Eigen::Vector3d> AD;
@@ -326,8 +328,8 @@ double bug_1223() {
// regression test for some compilation issues with specializations of ScalarBinaryOpTraits
void bug_1260() {
- Matrix4d A;
- Vector4d v;
+ Matrix4d A = Matrix4d::Ones();
+ Vector4d v = Vector4d::Ones();
A*v;
}
@@ -336,7 +338,7 @@ double bug_1261() {
typedef AutoDiffScalar<Matrix2d> AD;
typedef Matrix<AD,2,1> VectorAD;
- VectorAD v;
+ VectorAD v(0.,0.);
const AD maxVal = v.maxCoeff();
const AD minVal = v.minCoeff();
return maxVal.value() + minVal.value();
@@ -344,13 +346,15 @@ double bug_1261() {
double bug_1264() {
typedef AutoDiffScalar<Vector2d> AD;
- const AD s;
- const Matrix<AD, 3, 1> v1;
+ const AD s = 0.;
+ const Matrix<AD, 3, 1> v1(0.,0.,0.);
const Matrix<AD, 3, 1> v2 = (s + 3.0) * v1;
return v2(0).value();
}
-void test_autodiff()
+#endif
+
+EIGEN_DECLARE_TEST(autodiff)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( test_autodiff_scalar<1>() );
@@ -359,9 +363,9 @@ void test_autodiff()
CALL_SUBTEST_4( test_autodiff_hessian<1>() );
}
- bug_1222();
- bug_1223();
- bug_1260();
- bug_1261();
+ CALL_SUBTEST_5( bug_1222() );
+ CALL_SUBTEST_5( bug_1223() );
+ CALL_SUBTEST_5( bug_1260() );
+ CALL_SUBTEST_5( bug_1261() );
}
diff --git a/unsupported/test/autodiff_scalar.cpp b/unsupported/test/autodiff_scalar.cpp
index 4df2f5c57..e81a7788b 100644
--- a/unsupported/test/autodiff_scalar.cpp
+++ b/unsupported/test/autodiff_scalar.cpp
@@ -72,12 +72,30 @@ template<typename Scalar> void check_hyperbolic_functions()
VERIFY_IS_APPROX(res3.derivatives().x(), Scalar(0.339540557256150));
}
-void test_autodiff_scalar()
+template <typename Scalar>
+void check_limits_specialization()
+{
+ typedef Eigen::Matrix<Scalar, 1, 1> Deriv;
+ typedef Eigen::AutoDiffScalar<Deriv> AD;
+
+ typedef std::numeric_limits<AD> A;
+ typedef std::numeric_limits<Scalar> B;
+
+ // workaround "unused typedef" warning:
+ VERIFY(!bool(internal::is_same<B, A>::value));
+
+#if EIGEN_HAS_CXX11
+ VERIFY(bool(std::is_base_of<B, A>::value));
+#endif
+}
+
+EIGEN_DECLARE_TEST(autodiff_scalar)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( check_atan2<float>() );
CALL_SUBTEST_2( check_atan2<double>() );
CALL_SUBTEST_3( check_hyperbolic_functions<float>() );
CALL_SUBTEST_4( check_hyperbolic_functions<double>() );
+ CALL_SUBTEST_5( check_limits_specialization<double>());
}
}
diff --git a/unsupported/test/cxx11_eventcount.cpp b/unsupported/test/cxx11_eventcount.cpp
index 3b598bf42..2f1418684 100644
--- a/unsupported/test/cxx11_eventcount.cpp
+++ b/unsupported/test/cxx11_eventcount.cpp
@@ -135,7 +135,7 @@ static void test_stress_eventcount()
}
}
-void test_cxx11_eventcount()
+EIGEN_DECLARE_TEST(cxx11_eventcount)
{
CALL_SUBTEST(test_basic_eventcount());
CALL_SUBTEST(test_stress_eventcount());
diff --git a/unsupported/test/cxx11_maxsizevector.cpp b/unsupported/test/cxx11_maxsizevector.cpp
new file mode 100644
index 000000000..46b689a8e
--- /dev/null
+++ b/unsupported/test/cxx11_maxsizevector.cpp
@@ -0,0 +1,77 @@
+#include "main.h"
+
+#include <exception> // std::exception
+
+#include <unsupported/Eigen/CXX11/Tensor>
+
+struct Foo
+{
+ static Index object_count;
+ static Index object_limit;
+ EIGEN_ALIGN_TO_BOUNDARY(128) int dummy;
+
+ Foo(int x=0) : dummy(x)
+ {
+#ifdef EIGEN_EXCEPTIONS
+ // TODO: Is this the correct way to handle this?
+ if (Foo::object_count > Foo::object_limit) { std::cout << "\nThrow!\n"; throw Foo::Fail(); }
+#endif
+ std::cout << '+';
+ ++Foo::object_count;
+ eigen_assert((internal::UIntPtr(this) & (127)) == 0);
+ }
+ Foo(const Foo&)
+ {
+ std::cout << 'c';
+ ++Foo::object_count;
+ eigen_assert((internal::UIntPtr(this) & (127)) == 0);
+ }
+
+ ~Foo()
+ {
+ std::cout << '~';
+ --Foo::object_count;
+ }
+
+ class Fail : public std::exception {};
+};
+
+Index Foo::object_count = 0;
+Index Foo::object_limit = 0;
+
+
+
+EIGEN_DECLARE_TEST(cxx11_maxsizevector)
+{
+ typedef MaxSizeVector<Foo> VectorX;
+ Foo::object_count = 0;
+ for(int r = 0; r < g_repeat; r++) {
+ Index rows = internal::random<Index>(3,30);
+ Foo::object_limit = internal::random<Index>(0, rows - 2);
+ std::cout << "object_limit = " << Foo::object_limit << std::endl;
+ bool exception_raised = false;
+#ifdef EIGEN_EXCEPTIONS
+ try
+ {
+#endif
+ std::cout << "\nVectorX m(" << rows << ");\n";
+ VectorX vect(rows);
+ for(int i=0; i<rows; ++i)
+ vect.push_back(Foo());
+#ifdef EIGEN_EXCEPTIONS
+ VERIFY(false); // not reached if exceptions are enabled
+ }
+ catch (const Foo::Fail&) { exception_raised = true; }
+ VERIFY(exception_raised);
+#endif
+ VERIFY_IS_EQUAL(Index(0), Foo::object_count);
+
+ {
+ Foo::object_limit = rows+1;
+ VectorX vect2(rows, Foo());
+ VERIFY_IS_EQUAL(Foo::object_count, rows);
+ }
+ VERIFY_IS_EQUAL(Index(0), Foo::object_count);
+ std::cout << '\n';
+ }
+}
diff --git a/unsupported/test/cxx11_meta.cpp b/unsupported/test/cxx11_meta.cpp
index 8911c59d8..510e11032 100644
--- a/unsupported/test/cxx11_meta.cpp
+++ b/unsupported/test/cxx11_meta.cpp
@@ -340,7 +340,7 @@ static void test_array_misc()
VERIFY_IS_EQUAL((instantiate_by_c_array<dummy_inst, int, 5>(data).c), 5);
}
-void test_cxx11_meta()
+EIGEN_DECLARE_TEST(cxx11_meta)
{
CALL_SUBTEST(test_gen_numeric_list());
CALL_SUBTEST(test_concat());
diff --git a/unsupported/test/cxx11_non_blocking_thread_pool.cpp b/unsupported/test/cxx11_non_blocking_thread_pool.cpp
index 2c5765ce4..90b330fdc 100644
--- a/unsupported/test/cxx11_non_blocking_thread_pool.cpp
+++ b/unsupported/test/cxx11_non_blocking_thread_pool.cpp
@@ -18,16 +18,16 @@ static void test_create_destroy_empty_pool()
// Just create and destroy the pool. This will wind up and tear down worker
// threads. Ensure there are no issues in that logic.
for (int i = 0; i < 16; ++i) {
- NonBlockingThreadPool tp(i);
+ ThreadPool tp(i);
}
}
-static void test_parallelism()
+static void test_parallelism(bool allow_spinning)
{
// Test we never-ever fail to match available tasks with idle threads.
const int kThreads = 16; // code below expects that this is a multiple of 4
- NonBlockingThreadPool tp(kThreads);
+ ThreadPool tp(kThreads, allow_spinning);
VERIFY_IS_EQUAL(tp.NumThreads(), kThreads);
VERIFY_IS_EQUAL(tp.CurrentThreadId(), -1);
for (int iter = 0; iter < 100; ++iter) {
@@ -104,7 +104,7 @@ static void test_parallelism()
static void test_cancel()
{
- NonBlockingThreadPool tp(2);
+ ThreadPool tp(2);
// Schedule a large number of closure that each sleeps for one second. This
// will keep the thread pool busy for much longer than the default test timeout.
@@ -116,9 +116,63 @@ static void test_cancel()
tp.Cancel();
}
-void test_cxx11_non_blocking_thread_pool()
+static void test_pool_partitions() {
+ const int kThreads = 2;
+ ThreadPool tp(kThreads);
+
+ // Assign each thread to its own partition, so that stealing other work only
+ // occurs globally when a thread is idle.
+ std::vector<std::pair<unsigned, unsigned>> steal_partitions(kThreads);
+ for (int i = 0; i < kThreads; ++i) {
+ steal_partitions[i] = std::make_pair(i, i + 1);
+ }
+ tp.SetStealPartitions(steal_partitions);
+
+ std::atomic<int> running(0);
+ std::atomic<int> done(0);
+ std::atomic<int> phase(0);
+
+ // Schedule kThreads tasks and ensure that they all are running.
+ for (int i = 0; i < kThreads; ++i) {
+ tp.Schedule([&]() {
+ const int thread_id = tp.CurrentThreadId();
+ VERIFY_GE(thread_id, 0);
+ VERIFY_LE(thread_id, kThreads - 1);
+ ++running;
+ while (phase < 1) {
+ }
+ ++done;
+ });
+ }
+ while (running != kThreads) {
+ }
+ // Schedule each closure to only run on thread 'i' and verify that it does.
+ for (int i = 0; i < kThreads; ++i) {
+ tp.ScheduleWithHint(
+ [&, i]() {
+ ++running;
+ const int thread_id = tp.CurrentThreadId();
+ VERIFY_IS_EQUAL(thread_id, i);
+ while (phase < 2) {
+ }
+ ++done;
+ },
+ i, i + 1);
+ }
+ running = 0;
+ phase = 1;
+ while (running != kThreads) {
+ }
+ running = 0;
+ phase = 2;
+}
+
+
+EIGEN_DECLARE_TEST(cxx11_non_blocking_thread_pool)
{
CALL_SUBTEST(test_create_destroy_empty_pool());
- CALL_SUBTEST(test_parallelism());
+ CALL_SUBTEST(test_parallelism(true));
+ CALL_SUBTEST(test_parallelism(false));
CALL_SUBTEST(test_cancel());
+ CALL_SUBTEST(test_pool_partitions());
}
diff --git a/unsupported/test/cxx11_runqueue.cpp b/unsupported/test/cxx11_runqueue.cpp
index 91f690114..8fc5a3074 100644
--- a/unsupported/test/cxx11_runqueue.cpp
+++ b/unsupported/test/cxx11_runqueue.cpp
@@ -227,7 +227,7 @@ void test_stress_runqueue()
VERIFY(total.load() == 0);
}
-void test_cxx11_runqueue()
+EIGEN_DECLARE_TEST(cxx11_runqueue)
{
CALL_SUBTEST_1(test_basic_runqueue());
CALL_SUBTEST_2(test_empty_runqueue());
diff --git a/unsupported/test/cxx11_tensor_argmax.cpp b/unsupported/test/cxx11_tensor_argmax.cpp
index 037767270..4a0c8967b 100644
--- a/unsupported/test/cxx11_tensor_argmax.cpp
+++ b/unsupported/test/cxx11_tensor_argmax.cpp
@@ -273,7 +273,7 @@ static void test_argmin_dim()
}
}
-void test_cxx11_tensor_argmax()
+EIGEN_DECLARE_TEST(cxx11_tensor_argmax)
{
CALL_SUBTEST(test_simple_index_tuples<RowMajor>());
CALL_SUBTEST(test_simple_index_tuples<ColMajor>());
diff --git a/unsupported/test/cxx11_tensor_argmax_cuda.cu b/unsupported/test/cxx11_tensor_argmax_gpu.cu
index 653443dc5..79f4066e9 100644
--- a/unsupported/test/cxx11_tensor_argmax_cuda.cu
+++ b/unsupported/test/cxx11_tensor_argmax_gpu.cu
@@ -9,19 +9,18 @@
#define EIGEN_TEST_NO_LONGDOUBLE
-#define EIGEN_TEST_FUNC cxx11_tensor_cuda
+
#define EIGEN_USE_GPU
-#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500
-#include <cuda_fp16.h>
-#endif
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
+#include <unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h>
+
using Eigen::Tensor;
template <int Layout>
-void test_cuda_simple_argmax()
+void test_gpu_simple_argmax()
{
Tensor<double, 3, Layout> in(Eigen::array<DenseIndex, 3>(72,53,97));
Tensor<DenseIndex, 1, Layout> out_max(Eigen::array<DenseIndex, 1>(1));
@@ -37,13 +36,13 @@ void test_cuda_simple_argmax()
double* d_in;
DenseIndex* d_out_max;
DenseIndex* d_out_min;
- cudaMalloc((void**)(&d_in), in_bytes);
- cudaMalloc((void**)(&d_out_max), out_bytes);
- cudaMalloc((void**)(&d_out_min), out_bytes);
+ gpuMalloc((void**)(&d_in), in_bytes);
+ gpuMalloc((void**)(&d_out_max), out_bytes);
+ gpuMalloc((void**)(&d_out_min), out_bytes);
- cudaMemcpy(d_in, in.data(), in_bytes, cudaMemcpyHostToDevice);
+ gpuMemcpy(d_in, in.data(), in_bytes, gpuMemcpyHostToDevice);
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<double, 3, Layout>, Aligned > gpu_in(d_in, Eigen::array<DenseIndex, 3>(72,53,97));
@@ -53,20 +52,20 @@ void test_cuda_simple_argmax()
gpu_out_max.device(gpu_device) = gpu_in.argmax();
gpu_out_min.device(gpu_device) = gpu_in.argmin();
- assert(cudaMemcpyAsync(out_max.data(), d_out_max, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
- assert(cudaMemcpyAsync(out_min.data(), d_out_min, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
- assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+ assert(gpuMemcpyAsync(out_max.data(), d_out_max, out_bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess);
+ assert(gpuMemcpyAsync(out_min.data(), d_out_min, out_bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess);
+ assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess);
VERIFY_IS_EQUAL(out_max(Eigen::array<DenseIndex, 1>(0)), 72*53*97 - 1);
VERIFY_IS_EQUAL(out_min(Eigen::array<DenseIndex, 1>(0)), 0);
- cudaFree(d_in);
- cudaFree(d_out_max);
- cudaFree(d_out_min);
+ gpuFree(d_in);
+ gpuFree(d_out_max);
+ gpuFree(d_out_min);
}
template <int DataLayout>
-void test_cuda_argmax_dim()
+void test_gpu_argmax_dim()
{
Tensor<float, 4, DataLayout> tensor(2,3,5,7);
std::vector<int> dims;
@@ -100,12 +99,12 @@ void test_cuda_argmax_dim()
float* d_in;
DenseIndex* d_out;
- cudaMalloc((void**)(&d_in), in_bytes);
- cudaMalloc((void**)(&d_out), out_bytes);
+ gpuMalloc((void**)(&d_in), in_bytes);
+ gpuMalloc((void**)(&d_out), out_bytes);
- cudaMemcpy(d_in, tensor.data(), in_bytes, cudaMemcpyHostToDevice);
+ gpuMemcpy(d_in, tensor.data(), in_bytes, gpuMemcpyHostToDevice);
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout>, Aligned > gpu_in(d_in, Eigen::array<DenseIndex, 4>(2, 3, 5, 7));
@@ -113,8 +112,8 @@ void test_cuda_argmax_dim()
gpu_out.device(gpu_device) = gpu_in.argmax(dim);
- assert(cudaMemcpyAsync(tensor_arg.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
- assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+ assert(gpuMemcpyAsync(tensor_arg.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess);
+ assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess);
VERIFY_IS_EQUAL(tensor_arg.size(),
size_t(2*3*5*7 / tensor.dimension(dim)));
@@ -137,25 +136,25 @@ void test_cuda_argmax_dim()
}
}
- cudaMemcpy(d_in, tensor.data(), in_bytes, cudaMemcpyHostToDevice);
+ gpuMemcpy(d_in, tensor.data(), in_bytes, gpuMemcpyHostToDevice);
gpu_out.device(gpu_device) = gpu_in.argmax(dim);
- assert(cudaMemcpyAsync(tensor_arg.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
- assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+ assert(gpuMemcpyAsync(tensor_arg.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess);
+ assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess);
for (DenseIndex n = 0; n < tensor_arg.size(); ++n) {
// Expect max to be in the last index of the reduced dimension
VERIFY_IS_EQUAL(tensor_arg.data()[n], tensor.dimension(dim) - 1);
}
- cudaFree(d_in);
- cudaFree(d_out);
+ gpuFree(d_in);
+ gpuFree(d_out);
}
}
template <int DataLayout>
-void test_cuda_argmin_dim()
+void test_gpu_argmin_dim()
{
Tensor<float, 4, DataLayout> tensor(2,3,5,7);
std::vector<int> dims;
@@ -189,12 +188,12 @@ void test_cuda_argmin_dim()
float* d_in;
DenseIndex* d_out;
- cudaMalloc((void**)(&d_in), in_bytes);
- cudaMalloc((void**)(&d_out), out_bytes);
+ gpuMalloc((void**)(&d_in), in_bytes);
+ gpuMalloc((void**)(&d_out), out_bytes);
- cudaMemcpy(d_in, tensor.data(), in_bytes, cudaMemcpyHostToDevice);
+ gpuMemcpy(d_in, tensor.data(), in_bytes, gpuMemcpyHostToDevice);
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout>, Aligned > gpu_in(d_in, Eigen::array<DenseIndex, 4>(2, 3, 5, 7));
@@ -202,8 +201,8 @@ void test_cuda_argmin_dim()
gpu_out.device(gpu_device) = gpu_in.argmin(dim);
- assert(cudaMemcpyAsync(tensor_arg.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
- assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+ assert(gpuMemcpyAsync(tensor_arg.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess);
+ assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess);
VERIFY_IS_EQUAL(tensor_arg.size(),
2*3*5*7 / tensor.dimension(dim));
@@ -226,29 +225,29 @@ void test_cuda_argmin_dim()
}
}
- cudaMemcpy(d_in, tensor.data(), in_bytes, cudaMemcpyHostToDevice);
+ gpuMemcpy(d_in, tensor.data(), in_bytes, gpuMemcpyHostToDevice);
gpu_out.device(gpu_device) = gpu_in.argmin(dim);
- assert(cudaMemcpyAsync(tensor_arg.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
- assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+ assert(gpuMemcpyAsync(tensor_arg.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess);
+ assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess);
for (DenseIndex n = 0; n < tensor_arg.size(); ++n) {
// Expect max to be in the last index of the reduced dimension
VERIFY_IS_EQUAL(tensor_arg.data()[n], tensor.dimension(dim) - 1);
}
- cudaFree(d_in);
- cudaFree(d_out);
+ gpuFree(d_in);
+ gpuFree(d_out);
}
}
-void test_cxx11_tensor_cuda()
+EIGEN_DECLARE_TEST(cxx11_tensor_argmax_gpu)
{
- CALL_SUBTEST_1(test_cuda_simple_argmax<RowMajor>());
- CALL_SUBTEST_1(test_cuda_simple_argmax<ColMajor>());
- CALL_SUBTEST_2(test_cuda_argmax_dim<RowMajor>());
- CALL_SUBTEST_2(test_cuda_argmax_dim<ColMajor>());
- CALL_SUBTEST_3(test_cuda_argmin_dim<RowMajor>());
- CALL_SUBTEST_3(test_cuda_argmin_dim<ColMajor>());
+ CALL_SUBTEST_1(test_gpu_simple_argmax<RowMajor>());
+ CALL_SUBTEST_1(test_gpu_simple_argmax<ColMajor>());
+ CALL_SUBTEST_2(test_gpu_argmax_dim<RowMajor>());
+ CALL_SUBTEST_2(test_gpu_argmax_dim<ColMajor>());
+ CALL_SUBTEST_3(test_gpu_argmin_dim<RowMajor>());
+ CALL_SUBTEST_3(test_gpu_argmin_dim<ColMajor>());
}
diff --git a/unsupported/test/cxx11_tensor_argmax_sycl.cpp b/unsupported/test/cxx11_tensor_argmax_sycl.cpp
new file mode 100644
index 000000000..0bbb0f6dc
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_argmax_sycl.cpp
@@ -0,0 +1,245 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_TEST_NO_LONGDOUBLE
+#define EIGEN_TEST_NO_COMPLEX
+
+#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t
+#define EIGEN_USE_SYCL
+
+#include "main.h"
+#include <unsupported/Eigen/CXX11/Tensor>
+
+using Eigen::array;
+using Eigen::SyclDevice;
+using Eigen::Tensor;
+using Eigen::TensorMap;
+
+template <typename DataType, int Layout, typename DenseIndex>
+static void test_sycl_simple_argmax(const Eigen::SyclDevice &sycl_device){
+
+ Tensor<DataType, 3, Layout, DenseIndex> in(Eigen::array<DenseIndex, 3>{{2,2,2}});
+ Tensor<DenseIndex, 0, Layout, DenseIndex> out_max;
+ Tensor<DenseIndex, 0, Layout, DenseIndex> out_min;
+ in.setRandom();
+ in *= in.constant(100.0);
+ in(0, 0, 0) = -1000.0;
+ in(1, 1, 1) = 1000.0;
+
+ std::size_t in_bytes = in.size() * sizeof(DataType);
+ std::size_t out_bytes = out_max.size() * sizeof(DenseIndex);
+
+ DataType * d_in = static_cast<DataType*>(sycl_device.allocate(in_bytes));
+ DenseIndex* d_out_max = static_cast<DenseIndex*>(sycl_device.allocate(out_bytes));
+ DenseIndex* d_out_min = static_cast<DenseIndex*>(sycl_device.allocate(out_bytes));
+
+ Eigen::TensorMap<Eigen::Tensor<DataType, 3, Layout, DenseIndex> > gpu_in(d_in, Eigen::array<DenseIndex, 3>{{2,2,2}});
+ Eigen::TensorMap<Eigen::Tensor<DenseIndex, 0, Layout, DenseIndex> > gpu_out_max(d_out_max);
+ Eigen::TensorMap<Eigen::Tensor<DenseIndex, 0, Layout, DenseIndex> > gpu_out_min(d_out_min);
+ sycl_device.memcpyHostToDevice(d_in, in.data(),in_bytes);
+
+ gpu_out_max.device(sycl_device) = gpu_in.argmax();
+ gpu_out_min.device(sycl_device) = gpu_in.argmin();
+
+ sycl_device.memcpyDeviceToHost(out_max.data(), d_out_max, out_bytes);
+ sycl_device.memcpyDeviceToHost(out_min.data(), d_out_min, out_bytes);
+
+ VERIFY_IS_EQUAL(out_max(), 2*2*2 - 1);
+ VERIFY_IS_EQUAL(out_min(), 0);
+
+ sycl_device.deallocate(d_in);
+ sycl_device.deallocate(d_out_max);
+ sycl_device.deallocate(d_out_min);
+}
+
+
+template <typename DataType, int DataLayout, typename DenseIndex>
+static void test_sycl_argmax_dim(const Eigen::SyclDevice &sycl_device)
+{
+ DenseIndex sizeDim0=9;
+ DenseIndex sizeDim1=3;
+ DenseIndex sizeDim2=5;
+ DenseIndex sizeDim3=7;
+ Tensor<DataType, 4, DataLayout, DenseIndex> tensor(sizeDim0,sizeDim1,sizeDim2,sizeDim3);
+
+ std::vector<DenseIndex> dims;
+ dims.push_back(sizeDim0); dims.push_back(sizeDim1); dims.push_back(sizeDim2); dims.push_back(sizeDim3);
+ for (DenseIndex dim = 0; dim < 4; ++dim) {
+
+ array<DenseIndex, 3> out_shape;
+ for (DenseIndex d = 0; d < 3; ++d) out_shape[d] = (d < dim) ? dims[d] : dims[d+1];
+
+ Tensor<DenseIndex, 3, DataLayout, DenseIndex> tensor_arg(out_shape);
+
+ array<DenseIndex, 4> ix;
+ for (DenseIndex i = 0; i < sizeDim0; ++i) {
+ for (DenseIndex j = 0; j < sizeDim1; ++j) {
+ for (DenseIndex k = 0; k < sizeDim2; ++k) {
+ for (DenseIndex l = 0; l < sizeDim3; ++l) {
+ ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l;
+ // suppose dim == 1, then for all i, k, l, set tensor(i, 0, k, l) = 10.0
+ tensor(ix)=(ix[dim] != 0)?-1.0:10.0;
+ }
+ }
+ }
+ }
+
+ std::size_t in_bytes = tensor.size() * sizeof(DataType);
+ std::size_t out_bytes = tensor_arg.size() * sizeof(DenseIndex);
+
+
+ DataType * d_in = static_cast<DataType*>(sycl_device.allocate(in_bytes));
+ DenseIndex* d_out= static_cast<DenseIndex*>(sycl_device.allocate(out_bytes));
+
+ Eigen::TensorMap<Eigen::Tensor<DataType, 4, DataLayout, DenseIndex> > gpu_in(d_in, Eigen::array<DenseIndex, 4>{{sizeDim0,sizeDim1,sizeDim2,sizeDim3}});
+ Eigen::TensorMap<Eigen::Tensor<DenseIndex, 3, DataLayout, DenseIndex> > gpu_out(d_out, out_shape);
+
+ sycl_device.memcpyHostToDevice(d_in, tensor.data(),in_bytes);
+ gpu_out.device(sycl_device) = gpu_in.argmax(dim);
+ sycl_device.memcpyDeviceToHost(tensor_arg.data(), d_out, out_bytes);
+
+ VERIFY_IS_EQUAL(static_cast<size_t>(tensor_arg.size()),
+ size_t(sizeDim0*sizeDim1*sizeDim2*sizeDim3 / tensor.dimension(dim)));
+
+ for (DenseIndex n = 0; n < tensor_arg.size(); ++n) {
+ // Expect max to be in the first index of the reduced dimension
+ VERIFY_IS_EQUAL(tensor_arg.data()[n], 0);
+ }
+
+ sycl_device.synchronize();
+
+ for (DenseIndex i = 0; i < sizeDim0; ++i) {
+ for (DenseIndex j = 0; j < sizeDim1; ++j) {
+ for (DenseIndex k = 0; k < sizeDim2; ++k) {
+ for (DenseIndex l = 0; l < sizeDim3; ++l) {
+ ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l;
+ // suppose dim == 1, then for all i, k, l, set tensor(i, 2, k, l) = 20.0
+ tensor(ix)=(ix[dim] != tensor.dimension(dim) - 1)?-1.0:20.0;
+ }
+ }
+ }
+ }
+
+ sycl_device.memcpyHostToDevice(d_in, tensor.data(),in_bytes);
+ gpu_out.device(sycl_device) = gpu_in.argmax(dim);
+ sycl_device.memcpyDeviceToHost(tensor_arg.data(), d_out, out_bytes);
+
+ for (DenseIndex n = 0; n < tensor_arg.size(); ++n) {
+ // Expect max to be in the last index of the reduced dimension
+ VERIFY_IS_EQUAL(tensor_arg.data()[n], tensor.dimension(dim) - 1);
+ }
+ sycl_device.deallocate(d_in);
+ sycl_device.deallocate(d_out);
+ }
+}
+
+template <typename DataType, int DataLayout, typename DenseIndex>
+static void test_sycl_argmin_dim(const Eigen::SyclDevice &sycl_device)
+{
+ DenseIndex sizeDim0=9;
+ DenseIndex sizeDim1=3;
+ DenseIndex sizeDim2=5;
+ DenseIndex sizeDim3=7;
+ Tensor<DataType, 4, DataLayout, DenseIndex> tensor(sizeDim0,sizeDim1,sizeDim2,sizeDim3);
+
+ std::vector<DenseIndex> dims;
+ dims.push_back(sizeDim0); dims.push_back(sizeDim1); dims.push_back(sizeDim2); dims.push_back(sizeDim3);
+ for (DenseIndex dim = 0; dim < 4; ++dim) {
+
+ array<DenseIndex, 3> out_shape;
+ for (DenseIndex d = 0; d < 3; ++d) out_shape[d] = (d < dim) ? dims[d] : dims[d+1];
+
+ Tensor<DenseIndex, 3, DataLayout, DenseIndex> tensor_arg(out_shape);
+
+ array<DenseIndex, 4> ix;
+ for (DenseIndex i = 0; i < sizeDim0; ++i) {
+ for (DenseIndex j = 0; j < sizeDim1; ++j) {
+ for (DenseIndex k = 0; k < sizeDim2; ++k) {
+ for (DenseIndex l = 0; l < sizeDim3; ++l) {
+ ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l;
+ // suppose dim == 1, then for all i, k, l, set tensor(i, 0, k, l) = 10.0
+ tensor(ix)=(ix[dim] != 0)?1.0:-10.0;
+ }
+ }
+ }
+ }
+
+ std::size_t in_bytes = tensor.size() * sizeof(DataType);
+ std::size_t out_bytes = tensor_arg.size() * sizeof(DenseIndex);
+
+
+ DataType * d_in = static_cast<DataType*>(sycl_device.allocate(in_bytes));
+ DenseIndex* d_out= static_cast<DenseIndex*>(sycl_device.allocate(out_bytes));
+
+ Eigen::TensorMap<Eigen::Tensor<DataType, 4, DataLayout, DenseIndex> > gpu_in(d_in, Eigen::array<DenseIndex, 4>{{sizeDim0,sizeDim1,sizeDim2,sizeDim3}});
+ Eigen::TensorMap<Eigen::Tensor<DenseIndex, 3, DataLayout, DenseIndex> > gpu_out(d_out, out_shape);
+
+ sycl_device.memcpyHostToDevice(d_in, tensor.data(),in_bytes);
+ gpu_out.device(sycl_device) = gpu_in.argmin(dim);
+ sycl_device.memcpyDeviceToHost(tensor_arg.data(), d_out, out_bytes);
+
+ VERIFY_IS_EQUAL(static_cast<size_t>(tensor_arg.size()),
+ size_t(sizeDim0*sizeDim1*sizeDim2*sizeDim3 / tensor.dimension(dim)));
+
+ for (DenseIndex n = 0; n < tensor_arg.size(); ++n) {
+ // Expect max to be in the first index of the reduced dimension
+ VERIFY_IS_EQUAL(tensor_arg.data()[n], 0);
+ }
+
+ sycl_device.synchronize();
+
+ for (DenseIndex i = 0; i < sizeDim0; ++i) {
+ for (DenseIndex j = 0; j < sizeDim1; ++j) {
+ for (DenseIndex k = 0; k < sizeDim2; ++k) {
+ for (DenseIndex l = 0; l < sizeDim3; ++l) {
+ ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l;
+ // suppose dim == 1, then for all i, k, l, set tensor(i, 2, k, l) = 20.0
+ tensor(ix)=(ix[dim] != tensor.dimension(dim) - 1)?1.0:-20.0;
+ }
+ }
+ }
+ }
+
+ sycl_device.memcpyHostToDevice(d_in, tensor.data(),in_bytes);
+ gpu_out.device(sycl_device) = gpu_in.argmin(dim);
+ sycl_device.memcpyDeviceToHost(tensor_arg.data(), d_out, out_bytes);
+
+ for (DenseIndex n = 0; n < tensor_arg.size(); ++n) {
+ // Expect max to be in the last index of the reduced dimension
+ VERIFY_IS_EQUAL(tensor_arg.data()[n], tensor.dimension(dim) - 1);
+ }
+ sycl_device.deallocate(d_in);
+ sycl_device.deallocate(d_out);
+ }
+}
+
+
+
+
+template<typename DataType, typename Device_Selector> void sycl_argmax_test_per_device(const Device_Selector& d){
+ QueueInterface queueInterface(d);
+ auto sycl_device = Eigen::SyclDevice(&queueInterface);
+ test_sycl_simple_argmax<DataType, RowMajor, int64_t>(sycl_device);
+ test_sycl_simple_argmax<DataType, ColMajor, int64_t>(sycl_device);
+ test_sycl_argmax_dim<DataType, ColMajor, int64_t>(sycl_device);
+ test_sycl_argmax_dim<DataType, RowMajor, int64_t>(sycl_device);
+ test_sycl_argmin_dim<DataType, ColMajor, int64_t>(sycl_device);
+ test_sycl_argmin_dim<DataType, RowMajor, int64_t>(sycl_device);
+}
+
+EIGEN_DECLARE_TEST(cxx11_tensor_argmax_sycl) {
+ for (const auto& device :Eigen::get_sycl_supported_devices()) {
+ CALL_SUBTEST(sycl_argmax_test_per_device<double>(device));
+ }
+
+}
diff --git a/unsupported/test/cxx11_tensor_assign.cpp b/unsupported/test/cxx11_tensor_assign.cpp
index 8fe85d83c..ce9d24369 100644
--- a/unsupported/test/cxx11_tensor_assign.cpp
+++ b/unsupported/test/cxx11_tensor_assign.cpp
@@ -358,7 +358,7 @@ static void test_std_initializers_tensor() {
#endif // EIGEN_HAS_VARIADIC_TEMPLATES
}
-void test_cxx11_tensor_assign()
+EIGEN_DECLARE_TEST(cxx11_tensor_assign)
{
CALL_SUBTEST(test_1d());
CALL_SUBTEST(test_2d());
diff --git a/unsupported/test/cxx11_tensor_block_access.cpp b/unsupported/test/cxx11_tensor_block_access.cpp
new file mode 100644
index 000000000..ad12ae557
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_block_access.cpp
@@ -0,0 +1,1110 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2018 Andy Davis <andydavis@google.com>
+// Copyright (C) 2018 Eugene Zhulenev <ezhulenev@google.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <algorithm>
+#include <set>
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+using Eigen::Index;
+using Eigen::RowMajor;
+using Eigen::ColMajor;
+
+
+template<typename T>
+static const T& choose(int layout, const T& col, const T& row) {
+ return layout == ColMajor ? col : row;
+}
+
+static internal::TensorBlockShapeType RandomShape() {
+ return internal::random<bool>()
+ ? internal::kUniformAllDims
+ : internal::kSkewedInnerDims;
+}
+
+template <int NumDims>
+static Index RandomTargetSize(const DSizes<Index, NumDims>& dims) {
+ return internal::random<Index>(1, dims.TotalSize());
+}
+
+template <int NumDims>
+static DSizes<Index, NumDims> RandomDims() {
+ array<Index, NumDims> dims;
+ for (int i = 0; i < NumDims; ++i) {
+ dims[i] = internal::random<int>(1, 20);
+ }
+ return DSizes<Index, NumDims>(dims);
+}
+
+/** Dummy data type to test TensorBlock copy ops. */
+struct Data {
+ Data() : value(0) {}
+ explicit Data(int v) : value(v) { }
+ int value;
+};
+
+bool operator==(const Data& lhs, const Data& rhs) {
+ return lhs.value == rhs.value;
+}
+
+std::ostream& operator<<(std::ostream& os, const Data& d) {
+ os << "Data: value=" << d.value;
+ return os;
+}
+
+template <typename T>
+static T* GenerateRandomData(const Index& size) {
+ T* data = new T[size];
+ for (int i = 0; i < size; ++i) {
+ data[i] = internal::random<T>();
+ }
+ return data;
+}
+
+template <>
+Data* GenerateRandomData(const Index& size) {
+ Data* data = new Data[size];
+ for (int i = 0; i < size; ++i) {
+ data[i] = Data(internal::random<int>(1, 100));
+ }
+ return data;
+}
+
+template <int NumDims>
+static void Debug(DSizes<Index, NumDims> dims) {
+ for (int i = 0; i < NumDims; ++i) {
+ std::cout << dims[i] << "; ";
+ }
+ std::cout << std::endl;
+}
+
+template <int Layout>
+static void test_block_mapper_sanity()
+{
+ typedef internal::TensorBlockMapper<int, Index, 2, Layout> TensorBlockMapper;
+
+ DSizes<Index, 2> tensor_dims(100, 100);
+
+ // Test uniform blocks.
+ TensorBlockMapper uniform_block_mapper(
+ tensor_dims, internal::kUniformAllDims, 100);
+
+ VERIFY_IS_EQUAL(uniform_block_mapper.total_block_count(), 100);
+ VERIFY_IS_EQUAL(uniform_block_mapper.block_dims_total_size(), 100);
+
+ // 10x10 blocks
+ typename TensorBlockMapper::Block uniform_b0 = uniform_block_mapper.GetBlockForIndex(0, NULL);
+ VERIFY_IS_EQUAL(uniform_b0.block_sizes().at(0), 10);
+ VERIFY_IS_EQUAL(uniform_b0.block_sizes().at(1), 10);
+ // Depending on a layout we stride by cols rows.
+ VERIFY_IS_EQUAL(uniform_b0.block_strides().at(0), choose(Layout, 1, 10));
+ VERIFY_IS_EQUAL(uniform_b0.block_strides().at(1), choose(Layout, 10, 1));
+ // Tensor strides depend only on a layout and not on the block size.
+ VERIFY_IS_EQUAL(uniform_b0.tensor_strides().at(0), choose(Layout, 1, 100));
+ VERIFY_IS_EQUAL(uniform_b0.tensor_strides().at(1), choose(Layout, 100, 1));
+
+ // Test skewed to inner dims blocks.
+ TensorBlockMapper skewed_block_mapper(
+ tensor_dims, internal::kSkewedInnerDims, 100);
+
+ VERIFY_IS_EQUAL(skewed_block_mapper.total_block_count(), 100);
+ VERIFY_IS_EQUAL(skewed_block_mapper.block_dims_total_size(), 100);
+
+ // 1x100 (100x1) rows/cols depending on a tensor layout.
+ typename TensorBlockMapper::Block skewed_b0 = skewed_block_mapper.GetBlockForIndex(0, NULL);
+ VERIFY_IS_EQUAL(skewed_b0.block_sizes().at(0), choose(Layout, 100, 1));
+ VERIFY_IS_EQUAL(skewed_b0.block_sizes().at(1), choose(Layout, 1, 100));
+ // Depending on a layout we stride by cols rows.
+ VERIFY_IS_EQUAL(skewed_b0.block_strides().at(0), choose(Layout, 1, 100));
+ VERIFY_IS_EQUAL(skewed_b0.block_strides().at(1), choose(Layout, 100, 1));
+ // Tensor strides depend only on a layout and not on the block size.
+ VERIFY_IS_EQUAL(skewed_b0.tensor_strides().at(0), choose(Layout, 1, 100));
+ VERIFY_IS_EQUAL(skewed_b0.tensor_strides().at(1), choose(Layout, 100, 1));
+}
+
+// Given a TensorBlock "visit" every element accessible though it, and a keep an
+// index in the visited set. Verify that every coeff accessed only once.
+template <typename T, int Layout, int NumDims>
+static void UpdateCoeffSet(
+ const internal::TensorBlock<T, Index, NumDims, Layout>& block,
+ Index first_coeff_index, int dim_index, std::set<Index>* visited_coeffs) {
+ const DSizes<Index, NumDims>& block_sizes = block.block_sizes();
+ const DSizes<Index, NumDims>& tensor_strides = block.tensor_strides();
+
+ for (int i = 0; i < block_sizes[dim_index]; ++i) {
+ if (tensor_strides[dim_index] == 1) {
+ typedef std::pair<std::set<Index>::iterator, bool> ReturnType;
+ ReturnType inserted = visited_coeffs->insert(first_coeff_index + i);
+ VERIFY_IS_EQUAL(inserted.second, true);
+ } else {
+ int next_dim_index = dim_index + choose(Layout, -1, 1);
+ UpdateCoeffSet<T, Layout, NumDims>(block, first_coeff_index,
+ next_dim_index, visited_coeffs);
+ first_coeff_index += tensor_strides[dim_index];
+ }
+ }
+}
+
+template <typename T, int NumDims, int Layout>
+static void test_block_mapper_maps_every_element() {
+ typedef internal::TensorBlock<T, Index, NumDims, Layout> TensorBlock;
+ typedef internal::TensorBlockMapper<T, Index, NumDims, Layout> TensorBlockMapper;
+
+ DSizes<Index, NumDims> dims = RandomDims<NumDims>();
+
+ // Keep track of elements indices available via block access.
+ std::set<Index> coeff_set;
+
+ // Try different combinations of block types and sizes.
+ TensorBlockMapper block_mapper(dims, RandomShape(), RandomTargetSize(dims));
+
+ for (int i = 0; i < block_mapper.total_block_count(); ++i) {
+ TensorBlock block = block_mapper.GetBlockForIndex(i, NULL);
+ UpdateCoeffSet<T, Layout, NumDims>(block, block.first_coeff_index(),
+ choose(Layout, NumDims - 1, 0),
+ &coeff_set);
+ }
+
+ // Verify that every coefficient in the original Tensor is accessible through
+ // TensorBlock only once.
+ Index total_coeffs = dims.TotalSize();
+ VERIFY_IS_EQUAL(Index(coeff_set.size()), total_coeffs);
+ VERIFY_IS_EQUAL(*coeff_set.begin(), 0);
+ VERIFY_IS_EQUAL(*coeff_set.rbegin(), total_coeffs - 1);
+}
+
+template <typename T, int NumDims, int Layout>
+static void test_slice_block_mapper_maps_every_element() {
+ typedef internal::TensorBlock<T, Index, NumDims, Layout> TensorBlock;
+ typedef internal::TensorSliceBlockMapper<T, Index, NumDims, Layout> TensorSliceBlockMapper;
+
+ DSizes<Index, NumDims> tensor_dims = RandomDims<NumDims>();
+ DSizes<Index, NumDims> tensor_slice_offsets = RandomDims<NumDims>();
+ DSizes<Index, NumDims> tensor_slice_extents = RandomDims<NumDims>();
+
+ // Make sure that tensor offsets + extents do not overflow.
+ for (int i = 0; i < NumDims; ++i) {
+ tensor_slice_offsets[i] =
+ numext::mini(tensor_dims[i] - 1, tensor_slice_offsets[i]);
+ tensor_slice_extents[i] = numext::mini(
+ tensor_slice_extents[i], tensor_dims[i] - tensor_slice_offsets[i]);
+ }
+
+ // Keep track of elements indices available via block access.
+ std::set<Index> coeff_set;
+
+ int total_coeffs = static_cast<int>(tensor_slice_extents.TotalSize());
+
+ // Pick a random dimension sizes for the tensor blocks.
+ DSizes<Index, NumDims> block_sizes;
+ for (int i = 0; i < NumDims; ++i) {
+ block_sizes[i] = internal::random<Index>(1, tensor_slice_extents[i]);
+ }
+
+ TensorSliceBlockMapper block_mapper(tensor_dims, tensor_slice_offsets,
+ tensor_slice_extents, block_sizes,
+ DimensionList<Index, NumDims>());
+
+ for (int i = 0; i < block_mapper.total_block_count(); ++i) {
+ TensorBlock block = block_mapper.GetBlockForIndex(i, NULL);
+ UpdateCoeffSet<T, Layout, NumDims>(block, block.first_coeff_index(),
+ choose(Layout, NumDims - 1, 0),
+ &coeff_set);
+ }
+
+ VERIFY_IS_EQUAL(Index(coeff_set.size()), total_coeffs);
+}
+
+template <typename T, int NumDims, int Layout>
+static void test_block_io_copy_data_from_source_to_target() {
+ typedef internal::TensorBlock<T, Index, NumDims, Layout> TensorBlock;
+ typedef internal::TensorBlockMapper<T, Index, NumDims, Layout>
+ TensorBlockMapper;
+
+ typedef internal::TensorBlockReader<T, Index, NumDims, Layout>
+ TensorBlockReader;
+ typedef internal::TensorBlockWriter<T, Index, NumDims, Layout>
+ TensorBlockWriter;
+
+ DSizes<Index, NumDims> input_tensor_dims = RandomDims<NumDims>();
+ const Index input_tensor_size = input_tensor_dims.TotalSize();
+
+ T* input_data = GenerateRandomData<T>(input_tensor_size);
+ T* output_data = new T[input_tensor_size];
+
+ TensorBlockMapper block_mapper(input_tensor_dims, RandomShape(),
+ RandomTargetSize(input_tensor_dims));
+ T* block_data = new T[block_mapper.block_dims_total_size()];
+
+ for (int i = 0; i < block_mapper.total_block_count(); ++i) {
+ TensorBlock block = block_mapper.GetBlockForIndex(i, block_data);
+ TensorBlockReader::Run(&block, input_data);
+ TensorBlockWriter::Run(block, output_data);
+ }
+
+ for (int i = 0; i < input_tensor_size; ++i) {
+ VERIFY_IS_EQUAL(input_data[i], output_data[i]);
+ }
+
+ delete[] input_data;
+ delete[] output_data;
+ delete[] block_data;
+}
+
+template <int Layout, int NumDims>
+static Index GetInputIndex(Index output_index,
+ const array<Index, NumDims>& output_to_input_dim_map,
+ const array<Index, NumDims>& input_strides,
+ const array<Index, NumDims>& output_strides) {
+ int input_index = 0;
+ if (Layout == ColMajor) {
+ for (int i = NumDims - 1; i > 0; --i) {
+ const Index idx = output_index / output_strides[i];
+ input_index += idx * input_strides[output_to_input_dim_map[i]];
+ output_index -= idx * output_strides[i];
+ }
+ return input_index +
+ output_index * input_strides[output_to_input_dim_map[0]];
+ } else {
+ for (int i = 0; i < NumDims - 1; ++i) {
+ const Index idx = output_index / output_strides[i];
+ input_index += idx * input_strides[output_to_input_dim_map[i]];
+ output_index -= idx * output_strides[i];
+ }
+ return input_index +
+ output_index * input_strides[output_to_input_dim_map[NumDims - 1]];
+ }
+}
+
+template <int Layout, int NumDims>
+static array<Index, NumDims> ComputeStrides(
+ const array<Index, NumDims>& sizes) {
+ array<Index, NumDims> strides;
+ if (Layout == ColMajor) {
+ strides[0] = 1;
+ for (int i = 1; i < NumDims; ++i) {
+ strides[i] = strides[i - 1] * sizes[i - 1];
+ }
+ } else {
+ strides[NumDims - 1] = 1;
+ for (int i = NumDims - 2; i >= 0; --i) {
+ strides[i] = strides[i + 1] * sizes[i + 1];
+ }
+ }
+ return strides;
+}
+
+template <typename T, int NumDims, int Layout>
+static void test_block_io_copy_using_reordered_dimensions() {
+ typedef internal::TensorBlock<T, Index, NumDims, Layout> TensorBlock;
+ typedef internal::TensorBlockMapper<T, Index, NumDims, Layout>
+ TensorBlockMapper;
+
+ typedef internal::TensorBlockReader<T, Index, NumDims, Layout>
+ TensorBlockReader;
+ typedef internal::TensorBlockWriter<T, Index, NumDims, Layout>
+ TensorBlockWriter;
+
+ DSizes<Index, NumDims> input_tensor_dims = RandomDims<NumDims>();
+ const Index input_tensor_size = input_tensor_dims.TotalSize();
+
+ // Create a random input tensor.
+ T* input_data = GenerateRandomData<T>(input_tensor_size);
+
+ // Create a random dimension re-ordering/shuffle.
+ std::vector<Index> shuffle;
+ for (int i = 0; i < NumDims; ++i) shuffle.push_back(i);
+ std::random_shuffle(shuffle.begin(), shuffle.end());
+
+ DSizes<Index, NumDims> output_tensor_dims;
+ array<Index, NumDims> input_to_output_dim_map;
+ array<Index, NumDims> output_to_input_dim_map;
+ for (Index i = 0; i < NumDims; ++i) {
+ output_tensor_dims[shuffle[i]] = input_tensor_dims[i];
+ input_to_output_dim_map[i] = shuffle[i];
+ output_to_input_dim_map[shuffle[i]] = i;
+ }
+
+ // Random block shape and size.
+ TensorBlockMapper block_mapper(output_tensor_dims, RandomShape(),
+ RandomTargetSize(input_tensor_dims));
+
+ T* block_data = new T[block_mapper.block_dims_total_size()];
+ T* output_data = new T[input_tensor_size];
+
+ array<Index, NumDims> input_tensor_strides =
+ ComputeStrides<Layout, NumDims>(input_tensor_dims);
+ array<Index, NumDims> output_tensor_strides =
+ ComputeStrides<Layout, NumDims>(output_tensor_dims);
+
+ for (Index i = 0; i < block_mapper.total_block_count(); ++i) {
+ TensorBlock block = block_mapper.GetBlockForIndex(i, block_data);
+ const Index first_coeff_index = GetInputIndex<Layout, NumDims>(
+ block.first_coeff_index(), output_to_input_dim_map,
+ input_tensor_strides, output_tensor_strides);
+ TensorBlockReader::Run(&block, first_coeff_index, input_to_output_dim_map,
+ input_tensor_strides, input_data);
+ TensorBlockWriter::Run(block, first_coeff_index, input_to_output_dim_map,
+ input_tensor_strides, output_data);
+ }
+
+ for (int i = 0; i < input_tensor_size; ++i) {
+ VERIFY_IS_EQUAL(input_data[i], output_data[i]);
+ }
+
+ delete[] input_data;
+ delete[] block_data;
+ delete[] output_data;
+}
+
+template<typename Scalar, typename StorageIndex, int Dim>
+class EqualityChecker
+{
+ const Scalar* input_data;
+ const DSizes<StorageIndex, Dim> &input_dims, &input_strides, &output_dims, &output_strides;
+ void check_recursive(const Scalar* input, const Scalar* output, int depth=0) const
+ {
+ if(depth==Dim)
+ {
+ VERIFY_IS_EQUAL(*input, *output);
+ return;
+ }
+
+ for(int i=0; i<output_dims[depth]; ++i)
+ {
+ check_recursive(input + i % input_dims[depth] * input_strides[depth], output + i*output_strides[depth], depth+1);
+ }
+ }
+public:
+ EqualityChecker(const Scalar* input_data_,
+ const DSizes<StorageIndex, Dim> &input_dims_, const DSizes<StorageIndex, Dim> &input_strides_,
+ const DSizes<StorageIndex, Dim> &output_dims_, const DSizes<StorageIndex, Dim> &output_strides_)
+ : input_data(input_data_)
+ , input_dims(input_dims_), input_strides(input_strides_)
+ , output_dims(output_dims_), output_strides(output_strides_)
+ {}
+
+ void operator()(const Scalar* output_data) const
+ {
+ check_recursive(input_data, output_data);
+ }
+};
+
+
+template <int Layout>
+static void test_block_io_zero_stride()
+{
+ typedef internal::TensorBlock<float, Index, 5, Layout> TensorBlock;
+ typedef internal::TensorBlockReader<float, Index, 5, Layout>
+ TensorBlockReader;
+ typedef internal::TensorBlockWriter<float, Index, 5, Layout>
+ TensorBlockWriter;
+
+ DSizes<Index, 5> rnd_dims = RandomDims<5>();
+
+ DSizes<Index, 5> input_tensor_dims = rnd_dims;
+ input_tensor_dims[0] = 1;
+ input_tensor_dims[2] = 1;
+ input_tensor_dims[4] = 1;
+ const Index input_tensor_size = input_tensor_dims.TotalSize();
+ float* input_data = GenerateRandomData<float>(input_tensor_size);
+
+ DSizes<Index, 5> output_tensor_dims = rnd_dims;
+
+ DSizes<Index, 5> input_tensor_strides(
+ ComputeStrides<Layout, 5>(input_tensor_dims));
+ DSizes<Index, 5> output_tensor_strides(
+ ComputeStrides<Layout, 5>(output_tensor_dims));
+
+ DSizes<Index, 5> input_tensor_strides_with_zeros(input_tensor_strides);
+ input_tensor_strides_with_zeros[0] = 0;
+ input_tensor_strides_with_zeros[2] = 0;
+ input_tensor_strides_with_zeros[4] = 0;
+
+ // Verify that data was correctly read/written from/into the block.
+ const EqualityChecker<float, Index, 5> verify_is_equal(input_data, input_tensor_dims, input_tensor_strides, output_tensor_dims, output_tensor_strides);
+
+ {
+ float* output_data = new float[output_tensor_dims.TotalSize()];
+ TensorBlock read_block(0, output_tensor_dims, output_tensor_strides,
+ input_tensor_strides_with_zeros, output_data);
+ TensorBlockReader::Run(&read_block, input_data);
+ verify_is_equal(output_data);
+ delete[] output_data;
+ }
+
+ {
+ float* output_data = new float[output_tensor_dims.TotalSize()];
+ TensorBlock write_block(0, output_tensor_dims,
+ input_tensor_strides_with_zeros,
+ output_tensor_strides, input_data);
+ TensorBlockWriter::Run(write_block, output_data);
+ verify_is_equal(output_data);
+ delete[] output_data;
+ }
+
+ delete[] input_data;
+}
+
+template <int Layout>
+static void test_block_io_squeeze_ones() {
+ typedef internal::TensorBlock<float, Index, 5, Layout> TensorBlock;
+ typedef internal::TensorBlockReader<float, Index, 5, Layout>
+ TensorBlockReader;
+ typedef internal::TensorBlockWriter<float, Index, 5, Layout>
+ TensorBlockWriter;
+
+ // Total size > 1.
+ {
+ DSizes<Index, 5> block_sizes(1, 2, 1, 2, 1);
+ const Index total_size = block_sizes.TotalSize();
+
+ // Create a random input tensor.
+ float* input_data = GenerateRandomData<float>(total_size);
+ DSizes<Index, 5> strides(ComputeStrides<Layout, 5>(block_sizes));
+
+ {
+ float* output_data = new float[block_sizes.TotalSize()];
+ TensorBlock read_block(0, block_sizes, strides, strides, output_data);
+ TensorBlockReader::Run(&read_block, input_data);
+ for (int i = 0; i < total_size; ++i) {
+ VERIFY_IS_EQUAL(output_data[i], input_data[i]);
+ }
+ delete[] output_data;
+ }
+
+ {
+ float* output_data = new float[block_sizes.TotalSize()];
+ TensorBlock write_block(0, block_sizes, strides, strides, input_data);
+ TensorBlockWriter::Run(write_block, output_data);
+ for (int i = 0; i < total_size; ++i) {
+ VERIFY_IS_EQUAL(output_data[i], input_data[i]);
+ }
+ delete[] output_data;
+ }
+ }
+
+ // Total size == 1.
+ {
+ DSizes<Index, 5> block_sizes(1, 1, 1, 1, 1);
+ const Index total_size = block_sizes.TotalSize();
+
+ // Create a random input tensor.
+ float* input_data = GenerateRandomData<float>(total_size);
+ DSizes<Index, 5> strides(ComputeStrides<Layout, 5>(block_sizes));
+
+ {
+ float* output_data = new float[block_sizes.TotalSize()];
+ TensorBlock read_block(0, block_sizes, strides, strides, output_data);
+ TensorBlockReader::Run(&read_block, input_data);
+ for (int i = 0; i < total_size; ++i) {
+ VERIFY_IS_EQUAL(output_data[i], input_data[i]);
+ }
+ delete[] output_data;
+ }
+
+ {
+ float* output_data = new float[block_sizes.TotalSize()];
+ TensorBlock write_block(0, block_sizes, strides, strides, input_data);
+ TensorBlockWriter::Run(write_block, output_data);
+ for (int i = 0; i < total_size; ++i) {
+ VERIFY_IS_EQUAL(output_data[i], input_data[i]);
+ }
+ delete[] output_data;
+ }
+ }
+}
+
+template <typename T, int NumDims, int Layout>
+static void test_block_cwise_unary_io_basic() {
+ typedef internal::scalar_square_op<T> UnaryFunctor;
+ typedef internal::TensorBlockCwiseUnaryIO<UnaryFunctor, Index, T, NumDims,
+ Layout>
+ TensorBlockCwiseUnaryIO;
+
+ DSizes<Index, NumDims> block_sizes = RandomDims<NumDims>();
+ DSizes<Index, NumDims> strides(ComputeStrides<Layout, NumDims>(block_sizes));
+
+ const Index total_size = block_sizes.TotalSize();
+
+ // Create a random input tensors.
+ T* input_data = GenerateRandomData<T>(total_size);
+
+ T* output_data = new T[total_size];
+ UnaryFunctor functor;
+ TensorBlockCwiseUnaryIO::Run(functor, block_sizes, strides, output_data,
+ strides, input_data);
+ for (int i = 0; i < total_size; ++i) {
+ VERIFY_IS_EQUAL(output_data[i], functor(input_data[i]));
+ }
+
+ delete[] input_data;
+ delete[] output_data;
+}
+
+template <int Layout>
+static void test_block_cwise_unary_io_squeeze_ones() {
+ typedef internal::scalar_square_op<float> UnaryFunctor;
+ typedef internal::TensorBlockCwiseUnaryIO<UnaryFunctor, Index, float, 5,
+ Layout>
+ TensorBlockCwiseUnaryIO;
+
+ DSizes<Index, 5> block_sizes(1, 2, 1, 3, 1);
+ DSizes<Index, 5> strides(ComputeStrides<Layout, 5>(block_sizes));
+
+ const Index total_size = block_sizes.TotalSize();
+
+ // Create a random input tensors.
+ float* input_data = GenerateRandomData<float>(total_size);
+
+ float* output_data = new float[total_size];
+ UnaryFunctor functor;
+ TensorBlockCwiseUnaryIO::Run(functor, block_sizes, strides, output_data,
+ strides, input_data);
+ for (int i = 0; i < total_size; ++i) {
+ VERIFY_IS_EQUAL(output_data[i], functor(input_data[i]));
+ }
+
+ delete[] input_data;
+ delete[] output_data;
+}
+
+template <int Layout>
+static void test_block_cwise_unary_io_zero_strides() {
+ typedef internal::scalar_square_op<float> UnaryFunctor;
+ typedef internal::TensorBlockCwiseUnaryIO<UnaryFunctor, Index, float, 5,
+ Layout>
+ TensorBlockCwiseUnaryIO;
+
+ DSizes<Index, 5> rnd_dims = RandomDims<5>();
+
+ DSizes<Index, 5> input_sizes = rnd_dims;
+ input_sizes[0] = 1;
+ input_sizes[2] = 1;
+ input_sizes[4] = 1;
+
+ DSizes<Index, 5> input_strides(ComputeStrides<Layout, 5>(input_sizes));
+ input_strides[0] = 0;
+ input_strides[2] = 0;
+ input_strides[4] = 0;
+
+ // Generate random data.
+ float* input_data = GenerateRandomData<float>(input_sizes.TotalSize());
+
+ DSizes<Index, 5> output_sizes = rnd_dims;
+ DSizes<Index, 5> output_strides(ComputeStrides<Layout, 5>(output_sizes));
+
+ const Index output_total_size = output_sizes.TotalSize();
+ float* output_data = new float[output_total_size];
+
+ UnaryFunctor functor;
+ TensorBlockCwiseUnaryIO::Run(functor, output_sizes, output_strides,
+ output_data, input_strides, input_data);
+ for (int i = 0; i < rnd_dims[0]; ++i) {
+ for (int j = 0; j < rnd_dims[1]; ++j) {
+ for (int k = 0; k < rnd_dims[2]; ++k) {
+ for (int l = 0; l < rnd_dims[3]; ++l) {
+ for (int m = 0; m < rnd_dims[4]; ++m) {
+ Index output_index = i * output_strides[0] + j * output_strides[1] +
+ k * output_strides[2] + l * output_strides[3] +
+ m * output_strides[4];
+ Index input_index = i * input_strides[0] + j * input_strides[1] +
+ k * input_strides[2] + l * input_strides[3] +
+ m * input_strides[4];
+ VERIFY_IS_EQUAL(output_data[output_index],
+ functor(input_data[input_index]));
+ }
+ }
+ }
+ }
+ }
+
+ delete[] input_data;
+ delete[] output_data;
+}
+
+template <typename T, int NumDims, int Layout>
+static void test_block_cwise_binary_io_basic() {
+ typedef internal::scalar_sum_op<T> BinaryFunctor;
+ typedef internal::TensorBlockCwiseBinaryIO<BinaryFunctor, Index, T, NumDims,
+ Layout>
+ TensorBlockCwiseBinaryIO;
+
+ DSizes<Index, NumDims> block_sizes = RandomDims<NumDims>();
+ DSizes<Index, NumDims> strides(ComputeStrides<Layout, NumDims>(block_sizes));
+
+ const Index total_size = block_sizes.TotalSize();
+
+ // Create a random input tensors.
+ T* left_data = GenerateRandomData<T>(total_size);
+ T* right_data = GenerateRandomData<T>(total_size);
+
+ T* output_data = new T[total_size];
+ BinaryFunctor functor;
+ TensorBlockCwiseBinaryIO::Run(functor, block_sizes, strides, output_data,
+ strides, left_data, strides, right_data);
+ for (int i = 0; i < total_size; ++i) {
+ VERIFY_IS_EQUAL(output_data[i], functor(left_data[i], right_data[i]));
+ }
+
+ delete[] left_data;
+ delete[] right_data;
+ delete[] output_data;
+}
+
+template <int Layout>
+static void test_block_cwise_binary_io_squeeze_ones() {
+ typedef internal::scalar_sum_op<float> BinaryFunctor;
+ typedef internal::TensorBlockCwiseBinaryIO<BinaryFunctor, Index, float, 5,
+ Layout>
+ TensorBlockCwiseBinaryIO;
+
+ DSizes<Index, 5> block_sizes(1, 2, 1, 3, 1);
+ DSizes<Index, 5> strides(ComputeStrides<Layout, 5>(block_sizes));
+
+ const Index total_size = block_sizes.TotalSize();
+
+ // Create a random input tensors.
+ float* left_data = GenerateRandomData<float>(total_size);
+ float* right_data = GenerateRandomData<float>(total_size);
+
+ float* output_data = new float[total_size];
+ BinaryFunctor functor;
+ TensorBlockCwiseBinaryIO::Run(functor, block_sizes, strides, output_data,
+ strides, left_data, strides, right_data);
+ for (int i = 0; i < total_size; ++i) {
+ VERIFY_IS_EQUAL(output_data[i], functor(left_data[i], right_data[i]));
+ }
+
+ delete[] left_data;
+ delete[] right_data;
+ delete[] output_data;
+}
+
+template <int Layout>
+static void test_block_cwise_binary_io_zero_strides() {
+ typedef internal::scalar_sum_op<float> BinaryFunctor;
+ typedef internal::TensorBlockCwiseBinaryIO<BinaryFunctor, Index, float, 5,
+ Layout>
+ TensorBlockCwiseBinaryIO;
+
+ DSizes<Index, 5> rnd_dims = RandomDims<5>();
+
+ DSizes<Index, 5> left_sizes = rnd_dims;
+ left_sizes[0] = 1;
+ left_sizes[2] = 1;
+ left_sizes[4] = 1;
+
+ DSizes<Index, 5> left_strides(ComputeStrides<Layout, 5>(left_sizes));
+ left_strides[0] = 0;
+ left_strides[2] = 0;
+ left_strides[4] = 0;
+
+ DSizes<Index, 5> right_sizes = rnd_dims;
+ right_sizes[1] = 0;
+ right_sizes[3] = 0;
+
+ DSizes<Index, 5> right_strides(ComputeStrides<Layout, 5>(right_sizes));
+ right_strides[1] = 0;
+ right_strides[3] = 0;
+
+ // Generate random data.
+ float* left_data = GenerateRandomData<float>(left_sizes.TotalSize());
+ float* right_data = GenerateRandomData<float>(right_sizes.TotalSize());
+
+ DSizes<Index, 5> output_sizes = rnd_dims;
+ DSizes<Index, 5> output_strides(ComputeStrides<Layout, 5>(output_sizes));
+
+ const Index output_total_size = output_sizes.TotalSize();
+ float* output_data = new float[output_total_size];
+
+ BinaryFunctor functor;
+ TensorBlockCwiseBinaryIO::Run(functor, output_sizes, output_strides,
+ output_data, left_strides, left_data,
+ right_strides, right_data);
+ for (int i = 0; i < rnd_dims[0]; ++i) {
+ for (int j = 0; j < rnd_dims[1]; ++j) {
+ for (int k = 0; k < rnd_dims[2]; ++k) {
+ for (int l = 0; l < rnd_dims[3]; ++l) {
+ for (int m = 0; m < rnd_dims[4]; ++m) {
+ Index output_index = i * output_strides[0] + j * output_strides[1] +
+ k * output_strides[2] + l * output_strides[3] +
+ m * output_strides[4];
+ Index left_index = i * left_strides[0] + j * left_strides[1] +
+ k * left_strides[2] + l * left_strides[3] +
+ m * left_strides[4];
+ Index right_index = i * right_strides[0] + j * right_strides[1] +
+ k * right_strides[2] + l * right_strides[3] +
+ m * right_strides[4];
+ VERIFY_IS_EQUAL(
+ output_data[output_index],
+ functor(left_data[left_index], right_data[right_index]));
+ }
+ }
+ }
+ }
+ }
+
+ delete[] left_data;
+ delete[] right_data;
+ delete[] output_data;
+}
+
+template <int Layout>
+static void test_uniform_block_shape()
+{
+ typedef internal::TensorBlock<int, Index, 5, Layout> TensorBlock;
+ typedef internal::TensorBlockMapper<int, Index, 5, Layout> TensorBlockMapper;
+
+ {
+ // Test shape 'UniformAllDims' with uniform 'max_coeff count'.
+ DSizes<Index, 5> dims(11, 5, 6, 17, 7);
+ const Index max_coeff_count = 5 * 5 * 5 * 5 * 5;
+ TensorBlockMapper block_mapper(dims, internal::kUniformAllDims,
+ max_coeff_count);
+ TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
+ for (int i = 0; i < 5; ++i) {
+ VERIFY_IS_EQUAL(5, block.block_sizes()[i]);
+ }
+ VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ }
+
+ // Test shape 'UniformAllDims' with larger 'max_coeff count' which spills
+ // partially into first inner-most dimension.
+ if (Layout == ColMajor) {
+ DSizes<Index, 5> dims(11, 5, 6, 17, 7);
+ const Index max_coeff_count = 7 * 5 * 5 * 5 * 5;
+ TensorBlockMapper block_mapper(dims, internal::kUniformAllDims,
+ max_coeff_count);
+ TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
+ VERIFY_IS_EQUAL(7, block.block_sizes()[0]);
+ for (int i = 1; i < 5; ++i) {
+ VERIFY_IS_EQUAL(5, block.block_sizes()[i]);
+ }
+ VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ } else {
+ DSizes<Index, 5> dims(11, 5, 6, 17, 7);
+ const Index max_coeff_count = 5 * 5 * 5 * 5 * 6;
+ TensorBlockMapper block_mapper(dims, internal::kUniformAllDims,
+ max_coeff_count);
+ TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
+ VERIFY_IS_EQUAL(6, block.block_sizes()[4]);
+ for (int i = 3; i >= 0; --i) {
+ VERIFY_IS_EQUAL(5, block.block_sizes()[i]);
+ }
+ VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ }
+
+ // Test shape 'UniformAllDims' with larger 'max_coeff count' which spills
+ // fully into first inner-most dimension.
+ if (Layout == ColMajor) {
+ DSizes<Index, 5> dims(11, 5, 6, 17, 7);
+ const Index max_coeff_count = 11 * 5 * 5 * 5 * 5;
+ TensorBlockMapper block_mapper(dims, internal::kUniformAllDims,
+ max_coeff_count);
+ TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
+ VERIFY_IS_EQUAL(11, block.block_sizes()[0]);
+ for (int i = 1; i < 5; ++i) {
+ VERIFY_IS_EQUAL(5, block.block_sizes()[i]);
+ }
+ VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ } else {
+ DSizes<Index, 5> dims(11, 5, 6, 17, 7);
+ const Index max_coeff_count = 5 * 5 * 5 * 5 * 7;
+ TensorBlockMapper block_mapper(dims, internal::kUniformAllDims,
+ max_coeff_count);
+ TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
+ VERIFY_IS_EQUAL(7, block.block_sizes()[4]);
+ for (int i = 3; i >= 0; --i) {
+ VERIFY_IS_EQUAL(5, block.block_sizes()[i]);
+ }
+ VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ }
+
+ // Test shape 'UniformAllDims' with larger 'max_coeff count' which spills
+ // fully into first few inner-most dimensions.
+ if (Layout == ColMajor) {
+ DSizes<Index, 5> dims(7, 5, 6, 17, 7);
+ const Index max_coeff_count = 7 * 5 * 6 * 7 * 5;
+ TensorBlockMapper block_mapper(dims, internal::kUniformAllDims,
+ max_coeff_count);
+ TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
+ VERIFY_IS_EQUAL(7, block.block_sizes()[0]);
+ VERIFY_IS_EQUAL(5, block.block_sizes()[1]);
+ VERIFY_IS_EQUAL(6, block.block_sizes()[2]);
+ VERIFY_IS_EQUAL(7, block.block_sizes()[3]);
+ VERIFY_IS_EQUAL(5, block.block_sizes()[4]);
+ VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ } else {
+ DSizes<Index, 5> dims(7, 5, 6, 9, 7);
+ const Index max_coeff_count = 5 * 5 * 5 * 6 * 7;
+ TensorBlockMapper block_mapper(dims, internal::kUniformAllDims,
+ max_coeff_count);
+ TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
+ VERIFY_IS_EQUAL(7, block.block_sizes()[4]);
+ VERIFY_IS_EQUAL(6, block.block_sizes()[3]);
+ VERIFY_IS_EQUAL(5, block.block_sizes()[2]);
+ VERIFY_IS_EQUAL(5, block.block_sizes()[1]);
+ VERIFY_IS_EQUAL(5, block.block_sizes()[0]);
+ VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ }
+
+ // Test shape 'UniformAllDims' with full allocation to all dims.
+ if (Layout == ColMajor) {
+ DSizes<Index, 5> dims(7, 5, 6, 17, 7);
+ const Index max_coeff_count = 7 * 5 * 6 * 17 * 7;
+ TensorBlockMapper block_mapper(dims, internal::kUniformAllDims,
+ max_coeff_count);
+ TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
+ VERIFY_IS_EQUAL(7, block.block_sizes()[0]);
+ VERIFY_IS_EQUAL(5, block.block_sizes()[1]);
+ VERIFY_IS_EQUAL(6, block.block_sizes()[2]);
+ VERIFY_IS_EQUAL(17, block.block_sizes()[3]);
+ VERIFY_IS_EQUAL(7, block.block_sizes()[4]);
+ VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ } else {
+ DSizes<Index, 5> dims(7, 5, 6, 9, 7);
+ const Index max_coeff_count = 7 * 5 * 6 * 9 * 7;
+ TensorBlockMapper block_mapper(dims, internal::kUniformAllDims,
+ max_coeff_count);
+ TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
+ VERIFY_IS_EQUAL(7, block.block_sizes()[4]);
+ VERIFY_IS_EQUAL(9, block.block_sizes()[3]);
+ VERIFY_IS_EQUAL(6, block.block_sizes()[2]);
+ VERIFY_IS_EQUAL(5, block.block_sizes()[1]);
+ VERIFY_IS_EQUAL(7, block.block_sizes()[0]);
+ VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ }
+}
+
+template <int Layout>
+static void test_skewed_inner_dim_block_shape()
+{
+ typedef internal::TensorBlock<int, Index, 5, Layout> TensorBlock;
+ typedef internal::TensorBlockMapper<int, Index, 5, Layout> TensorBlockMapper;
+
+ // Test shape 'SkewedInnerDims' with partial allocation to inner-most dim.
+ if (Layout == ColMajor) {
+ DSizes<Index, 5> dims(11, 5, 6, 17, 7);
+ const Index max_coeff_count = 10 * 1 * 1 * 1 * 1;
+ TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims,
+ max_coeff_count);
+ TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
+ VERIFY_IS_EQUAL(10, block.block_sizes()[0]);
+ for (int i = 1; i < 5; ++i) {
+ VERIFY_IS_EQUAL(1, block.block_sizes()[i]);
+ }
+ VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ } else {
+ DSizes<Index, 5> dims(11, 5, 6, 17, 7);
+ const Index max_coeff_count = 1 * 1 * 1 * 1 * 6;
+ TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims,
+ max_coeff_count);
+ TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
+ VERIFY_IS_EQUAL(6, block.block_sizes()[4]);
+ for (int i = 3; i >= 0; --i) {
+ VERIFY_IS_EQUAL(1, block.block_sizes()[i]);
+ }
+ VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ }
+
+ // Test shape 'SkewedInnerDims' with full allocation to inner-most dim.
+ if (Layout == ColMajor) {
+ DSizes<Index, 5> dims(11, 5, 6, 17, 7);
+ const Index max_coeff_count = 11 * 1 * 1 * 1 * 1;
+ TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims,
+ max_coeff_count);
+ TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
+ VERIFY_IS_EQUAL(11, block.block_sizes()[0]);
+ for (int i = 1; i < 5; ++i) {
+ VERIFY_IS_EQUAL(1, block.block_sizes()[i]);
+ }
+ VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ } else {
+ DSizes<Index, 5> dims(11, 5, 6, 17, 7);
+ const Index max_coeff_count = 1 * 1 * 1 * 1 * 7;
+ TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims,
+ max_coeff_count);
+ TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
+ VERIFY_IS_EQUAL(7, block.block_sizes()[4]);
+ for (int i = 3; i >= 0; --i) {
+ VERIFY_IS_EQUAL(1, block.block_sizes()[i]);
+ }
+ VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ }
+
+ // Test shape 'SkewedInnerDims' with full allocation to inner-most dim,
+ // and partial allocation to second inner-dim.
+ if (Layout == ColMajor) {
+ DSizes<Index, 5> dims(11, 5, 6, 17, 7);
+ const Index max_coeff_count = 11 * 3 * 1 * 1 * 1;
+ TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims,
+ max_coeff_count);
+ TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
+ VERIFY_IS_EQUAL(11, block.block_sizes()[0]);
+ VERIFY_IS_EQUAL(3, block.block_sizes()[1]);
+ for (int i = 2; i < 5; ++i) {
+ VERIFY_IS_EQUAL(1, block.block_sizes()[i]);
+ }
+ VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ } else {
+ DSizes<Index, 5> dims(11, 5, 6, 17, 7);
+ const Index max_coeff_count = 1 * 1 * 1 * 15 * 7;
+ TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims,
+ max_coeff_count);
+ TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
+ VERIFY_IS_EQUAL(7, block.block_sizes()[4]);
+ VERIFY_IS_EQUAL(15, block.block_sizes()[3]);
+ for (int i = 2; i >= 0; --i) {
+ VERIFY_IS_EQUAL(1, block.block_sizes()[i]);
+ }
+ VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ }
+
+ // Test shape 'SkewedInnerDims' with full allocation to inner-most dim,
+ // and partial allocation to third inner-dim.
+ if (Layout == ColMajor) {
+ DSizes<Index, 5> dims(11, 5, 6, 17, 7);
+ const Index max_coeff_count = 11 * 5 * 5 * 1 * 1;
+ TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims,
+ max_coeff_count);
+ TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
+ VERIFY_IS_EQUAL(11, block.block_sizes()[0]);
+ VERIFY_IS_EQUAL(5, block.block_sizes()[1]);
+ VERIFY_IS_EQUAL(5, block.block_sizes()[2]);
+ for (int i = 3; i < 5; ++i) {
+ VERIFY_IS_EQUAL(1, block.block_sizes()[i]);
+ }
+ VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ } else {
+ DSizes<Index, 5> dims(11, 5, 6, 17, 7);
+ const Index max_coeff_count = 1 * 1 * 5 * 17 * 7;
+ TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims,
+ max_coeff_count);
+ TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
+ VERIFY_IS_EQUAL(7, block.block_sizes()[4]);
+ VERIFY_IS_EQUAL(17, block.block_sizes()[3]);
+ VERIFY_IS_EQUAL(5, block.block_sizes()[2]);
+ for (int i = 1; i >= 0; --i) {
+ VERIFY_IS_EQUAL(1, block.block_sizes()[i]);
+ }
+ VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ }
+
+ // Test shape 'SkewedInnerDims' with full allocation to all dims.
+ if (Layout == ColMajor) {
+ DSizes<Index, 5> dims(11, 5, 6, 17, 7);
+ const Index max_coeff_count = 11 * 5 * 6 * 17 * 7;
+ TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims,
+ max_coeff_count);
+ TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
+ VERIFY_IS_EQUAL(11, block.block_sizes()[0]);
+ VERIFY_IS_EQUAL(5, block.block_sizes()[1]);
+ VERIFY_IS_EQUAL(6, block.block_sizes()[2]);
+ VERIFY_IS_EQUAL(17, block.block_sizes()[3]);
+ VERIFY_IS_EQUAL(7, block.block_sizes()[4]);
+ VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ } else {
+ DSizes<Index, 5> dims(11, 5, 6, 17, 7);
+ const Index max_coeff_count = 11 * 5 * 6 * 17 * 7;
+ TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims,
+ max_coeff_count);
+ TensorBlock block = block_mapper.GetBlockForIndex(0, NULL);
+ VERIFY_IS_EQUAL(7, block.block_sizes()[4]);
+ VERIFY_IS_EQUAL(17, block.block_sizes()[3]);
+ VERIFY_IS_EQUAL(6, block.block_sizes()[2]);
+ VERIFY_IS_EQUAL(5, block.block_sizes()[1]);
+ VERIFY_IS_EQUAL(11, block.block_sizes()[0]);
+ VERIFY(block.block_sizes().TotalSize() <= max_coeff_count);
+ }
+}
+
+template <int Layout>
+static void test_empty_dims(const internal::TensorBlockShapeType block_shape)
+{
+ // Test blocking of tensors with zero dimensions:
+ // - we must not crash on asserts and divisions by zero
+ // - we must not return block with zero dimensions
+ // (recipe for overflows/underflows, divisions by zero and NaNs later)
+ // - total block count must be zero
+ {
+ typedef internal::TensorBlockMapper<int, Index, 1, Layout> TensorBlockMapper;
+ DSizes<Index, 1> dims(0);
+ for (int max_coeff_count = 0; max_coeff_count < 2; ++max_coeff_count) {
+ TensorBlockMapper block_mapper(dims, block_shape, max_coeff_count);
+ VERIFY_IS_EQUAL(block_mapper.total_block_count(), 0);
+ VERIFY(block_mapper.block_dims_total_size() >= 1);
+ }
+ }
+
+ {
+ typedef internal::TensorBlockMapper<int, Index, 2, Layout> TensorBlockMapper;
+ for (int dim1 = 0; dim1 < 3; ++dim1) {
+ for (int dim2 = 0; dim2 < 3; ++dim2) {
+ DSizes<Index, 2> dims(dim1, dim2);
+ for (int max_coeff_count = 0; max_coeff_count < 2; ++max_coeff_count) {
+ TensorBlockMapper block_mapper(dims, block_shape, max_coeff_count);
+ if (dim1 * dim2 == 0) {
+ VERIFY_IS_EQUAL(block_mapper.total_block_count(), 0);
+ }
+ VERIFY(block_mapper.block_dims_total_size() >= 1);
+ }
+ }
+ }
+ }
+}
+
+#define TEST_LAYOUTS(NAME) \
+ CALL_SUBTEST(NAME<ColMajor>()); \
+ CALL_SUBTEST(NAME<RowMajor>())
+
+#define TEST_LAYOUTS_AND_DIMS(TYPE, NAME) \
+ CALL_SUBTEST((NAME<TYPE, 1, ColMajor>())); \
+ CALL_SUBTEST((NAME<TYPE, 1, RowMajor>())); \
+ CALL_SUBTEST((NAME<TYPE, 2, ColMajor>())); \
+ CALL_SUBTEST((NAME<TYPE, 2, RowMajor>())); \
+ CALL_SUBTEST((NAME<TYPE, 3, ColMajor>())); \
+ CALL_SUBTEST((NAME<TYPE, 3, RowMajor>())); \
+ CALL_SUBTEST((NAME<TYPE, 4, ColMajor>())); \
+ CALL_SUBTEST((NAME<TYPE, 4, RowMajor>())); \
+ CALL_SUBTEST((NAME<TYPE, 5, ColMajor>())); \
+ CALL_SUBTEST((NAME<TYPE, 5, RowMajor>()))
+
+#define TEST_LAYOUTS_WITH_ARG(NAME, ARG) \
+ CALL_SUBTEST(NAME<ColMajor>(ARG)); \
+ CALL_SUBTEST(NAME<RowMajor>(ARG))
+
+EIGEN_DECLARE_TEST(cxx11_tensor_block_access) {
+ TEST_LAYOUTS(test_block_mapper_sanity);
+ TEST_LAYOUTS_AND_DIMS(float, test_block_mapper_maps_every_element);
+ TEST_LAYOUTS_AND_DIMS(float, test_slice_block_mapper_maps_every_element);
+ TEST_LAYOUTS_AND_DIMS(float, test_block_io_copy_data_from_source_to_target);
+ TEST_LAYOUTS_AND_DIMS(Data, test_block_io_copy_data_from_source_to_target);
+ TEST_LAYOUTS_AND_DIMS(float, test_block_io_copy_using_reordered_dimensions);
+ TEST_LAYOUTS_AND_DIMS(Data, test_block_io_copy_using_reordered_dimensions);
+ TEST_LAYOUTS(test_block_io_zero_stride);
+ TEST_LAYOUTS(test_block_io_squeeze_ones);
+ TEST_LAYOUTS_AND_DIMS(float, test_block_cwise_unary_io_basic);
+ TEST_LAYOUTS(test_block_cwise_unary_io_squeeze_ones);
+ TEST_LAYOUTS(test_block_cwise_unary_io_zero_strides);
+ TEST_LAYOUTS_AND_DIMS(float, test_block_cwise_binary_io_basic);
+ TEST_LAYOUTS(test_block_cwise_binary_io_squeeze_ones);
+ TEST_LAYOUTS(test_block_cwise_binary_io_zero_strides);
+ TEST_LAYOUTS(test_uniform_block_shape);
+ TEST_LAYOUTS(test_skewed_inner_dim_block_shape);
+ TEST_LAYOUTS_WITH_ARG(test_empty_dims, internal::kUniformAllDims);
+ TEST_LAYOUTS_WITH_ARG(test_empty_dims, internal::kSkewedInnerDims);
+}
+
+#undef TEST_LAYOUTS
+#undef TEST_LAYOUTS_WITH_ARG
diff --git a/unsupported/test/cxx11_tensor_broadcast_sycl.cpp b/unsupported/test/cxx11_tensor_broadcast_sycl.cpp
index 21fdfca22..20f84b8e0 100644
--- a/unsupported/test/cxx11_tensor_broadcast_sycl.cpp
+++ b/unsupported/test/cxx11_tensor_broadcast_sycl.cpp
@@ -13,7 +13,7 @@
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
-#define EIGEN_TEST_FUNC cxx11_tensor_broadcast_sycl
+
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t
#define EIGEN_USE_SYCL
@@ -137,7 +137,7 @@ template<typename DataType> void sycl_broadcast_test_per_device(const cl::sycl::
test_broadcast_sycl_fixed<DataType, ColMajor, int64_t>(sycl_device);
}
-void test_cxx11_tensor_broadcast_sycl() {
+EIGEN_DECLARE_TEST(cxx11_tensor_broadcast_sycl) {
for (const auto& device :Eigen::get_sycl_supported_devices()) {
CALL_SUBTEST(sycl_broadcast_test_per_device<float>(device));
}
diff --git a/unsupported/test/cxx11_tensor_broadcasting.cpp b/unsupported/test/cxx11_tensor_broadcasting.cpp
index 5c0ea5889..7df5b53d6 100644
--- a/unsupported/test/cxx11_tensor_broadcasting.cpp
+++ b/unsupported/test/cxx11_tensor_broadcasting.cpp
@@ -115,7 +115,7 @@ static void test_static_broadcasting()
Tensor<float, 3, DataLayout> tensor(8,3,5);
tensor.setRandom();
-#if EIGEN_HAS_CONSTEXPR
+#if defined(EIGEN_HAS_INDEX_LIST)
Eigen::IndexList<Eigen::type2index<2>, Eigen::type2index<3>, Eigen::type2index<4>> broadcasts;
#else
Eigen::array<int, 3> broadcasts;
@@ -180,8 +180,119 @@ static void test_fixed_size_broadcasting()
#endif
}
+template <int DataLayout>
+static void test_simple_broadcasting_one_by_n()
+{
+ Tensor<float, 4, DataLayout> tensor(1,13,5,7);
+ tensor.setRandom();
+ array<ptrdiff_t, 4> broadcasts;
+ broadcasts[0] = 9;
+ broadcasts[1] = 1;
+ broadcasts[2] = 1;
+ broadcasts[3] = 1;
+ Tensor<float, 4, DataLayout> broadcast;
+ broadcast = tensor.broadcast(broadcasts);
+
+ VERIFY_IS_EQUAL(broadcast.dimension(0), 9);
+ VERIFY_IS_EQUAL(broadcast.dimension(1), 13);
+ VERIFY_IS_EQUAL(broadcast.dimension(2), 5);
+ VERIFY_IS_EQUAL(broadcast.dimension(3), 7);
+
+ for (int i = 0; i < 9; ++i) {
+ for (int j = 0; j < 13; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ VERIFY_IS_EQUAL(tensor(i%1,j%13,k%5,l%7), broadcast(i,j,k,l));
+ }
+ }
+ }
+ }
+}
+
+template <int DataLayout>
+static void test_simple_broadcasting_n_by_one()
+{
+ Tensor<float, 4, DataLayout> tensor(7,3,5,1);
+ tensor.setRandom();
+ array<ptrdiff_t, 4> broadcasts;
+ broadcasts[0] = 1;
+ broadcasts[1] = 1;
+ broadcasts[2] = 1;
+ broadcasts[3] = 19;
+ Tensor<float, 4, DataLayout> broadcast;
+ broadcast = tensor.broadcast(broadcasts);
+
+ VERIFY_IS_EQUAL(broadcast.dimension(0), 7);
+ VERIFY_IS_EQUAL(broadcast.dimension(1), 3);
+ VERIFY_IS_EQUAL(broadcast.dimension(2), 5);
+ VERIFY_IS_EQUAL(broadcast.dimension(3), 19);
+
+ for (int i = 0; i < 7; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 19; ++l) {
+ VERIFY_IS_EQUAL(tensor(i%7,j%3,k%5,l%1), broadcast(i,j,k,l));
+ }
+ }
+ }
+ }
+}
+
+template <int DataLayout>
+static void test_simple_broadcasting_one_by_n_by_one_1d()
+{
+ Tensor<float, 3, DataLayout> tensor(1,7,1);
+ tensor.setRandom();
+ array<ptrdiff_t, 3> broadcasts;
+ broadcasts[0] = 5;
+ broadcasts[1] = 1;
+ broadcasts[2] = 13;
+ Tensor<float, 3, DataLayout> broadcasted;
+ broadcasted = tensor.broadcast(broadcasts);
+
+ VERIFY_IS_EQUAL(broadcasted.dimension(0), 5);
+ VERIFY_IS_EQUAL(broadcasted.dimension(1), 7);
+ VERIFY_IS_EQUAL(broadcasted.dimension(2), 13);
+
+ for (int i = 0; i < 5; ++i) {
+ for (int j = 0; j < 7; ++j) {
+ for (int k = 0; k < 13; ++k) {
+ VERIFY_IS_EQUAL(tensor(0,j%7,0), broadcasted(i,j,k));
+ }
+ }
+ }
+}
+
+template <int DataLayout>
+static void test_simple_broadcasting_one_by_n_by_one_2d()
+{
+ Tensor<float, 4, DataLayout> tensor(1,7,13,1);
+ tensor.setRandom();
+ array<ptrdiff_t, 4> broadcasts;
+ broadcasts[0] = 5;
+ broadcasts[1] = 1;
+ broadcasts[2] = 1;
+ broadcasts[3] = 19;
+ Tensor<float, 4, DataLayout> broadcast;
+ broadcast = tensor.broadcast(broadcasts);
+
+ VERIFY_IS_EQUAL(broadcast.dimension(0), 5);
+ VERIFY_IS_EQUAL(broadcast.dimension(1), 7);
+ VERIFY_IS_EQUAL(broadcast.dimension(2), 13);
+ VERIFY_IS_EQUAL(broadcast.dimension(3), 19);
+
+ for (int i = 0; i < 5; ++i) {
+ for (int j = 0; j < 7; ++j) {
+ for (int k = 0; k < 13; ++k) {
+ for (int l = 0; l < 19; ++l) {
+ VERIFY_IS_EQUAL(tensor(0,j%7,k%13,0), broadcast(i,j,k,l));
+ }
+ }
+ }
+ }
+}
-void test_cxx11_tensor_broadcasting()
+EIGEN_DECLARE_TEST(cxx11_tensor_broadcasting)
{
CALL_SUBTEST(test_simple_broadcasting<ColMajor>());
CALL_SUBTEST(test_simple_broadcasting<RowMajor>());
@@ -191,4 +302,12 @@ void test_cxx11_tensor_broadcasting()
CALL_SUBTEST(test_static_broadcasting<RowMajor>());
CALL_SUBTEST(test_fixed_size_broadcasting<ColMajor>());
CALL_SUBTEST(test_fixed_size_broadcasting<RowMajor>());
+ CALL_SUBTEST(test_simple_broadcasting_one_by_n<RowMajor>());
+ CALL_SUBTEST(test_simple_broadcasting_n_by_one<RowMajor>());
+ CALL_SUBTEST(test_simple_broadcasting_one_by_n<ColMajor>());
+ CALL_SUBTEST(test_simple_broadcasting_n_by_one<ColMajor>());
+ CALL_SUBTEST(test_simple_broadcasting_one_by_n_by_one_1d<ColMajor>());
+ CALL_SUBTEST(test_simple_broadcasting_one_by_n_by_one_2d<ColMajor>());
+ CALL_SUBTEST(test_simple_broadcasting_one_by_n_by_one_1d<RowMajor>());
+ CALL_SUBTEST(test_simple_broadcasting_one_by_n_by_one_2d<RowMajor>());
}
diff --git a/unsupported/test/cxx11_tensor_builtins_sycl.cpp b/unsupported/test/cxx11_tensor_builtins_sycl.cpp
index 400a31d09..db2975783 100644
--- a/unsupported/test/cxx11_tensor_builtins_sycl.cpp
+++ b/unsupported/test/cxx11_tensor_builtins_sycl.cpp
@@ -13,7 +13,7 @@
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
-#define EIGEN_TEST_FUNC cxx11_tensor_builtins_sycl
+
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t
#define EIGEN_USE_SYCL
@@ -257,7 +257,7 @@ static void test_builtin_binary_sycl(const Eigen::SyclDevice &sycl_device) {
TEST_BINARY_BUILTINS_OPERATORS_THAT_TAKES_SCALAR(int, %, ColMajor)
}
-void test_cxx11_tensor_builtins_sycl() {
+EIGEN_DECLARE_TEST(cxx11_tensor_builtins_sycl) {
for (const auto& device :Eigen::get_sycl_supported_devices()) {
QueueInterface queueInterface(device);
Eigen::SyclDevice sycl_device(&queueInterface);
diff --git a/unsupported/test/cxx11_tensor_cast_float16_cuda.cu b/unsupported/test/cxx11_tensor_cast_float16_gpu.cu
index 88c233994..97923d15f 100644
--- a/unsupported/test/cxx11_tensor_cast_float16_cuda.cu
+++ b/unsupported/test/cxx11_tensor_cast_float16_gpu.cu
@@ -9,20 +9,17 @@
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
-#define EIGEN_TEST_FUNC cxx11_tensor_cast_float16_cuda
+
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
#define EIGEN_USE_GPU
-#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500
-#include <cuda_fp16.h>
-#endif
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
using Eigen::Tensor;
-void test_cuda_conversion() {
- Eigen::CudaStreamDevice stream;
+void test_gpu_conversion() {
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
int num_elem = 101;
@@ -75,8 +72,8 @@ void test_fallback_conversion() {
}
-void test_cxx11_tensor_cast_float16_cuda()
+EIGEN_DECLARE_TEST(cxx11_tensor_cast_float16_gpu)
{
- CALL_SUBTEST(test_cuda_conversion());
+ CALL_SUBTEST(test_gpu_conversion());
CALL_SUBTEST(test_fallback_conversion());
}
diff --git a/unsupported/test/cxx11_tensor_casts.cpp b/unsupported/test/cxx11_tensor_casts.cpp
index 3c6d0d2ff..c4fe9a798 100644
--- a/unsupported/test/cxx11_tensor_casts.cpp
+++ b/unsupported/test/cxx11_tensor_casts.cpp
@@ -105,7 +105,7 @@ static void test_small_to_big_type_cast()
}
-void test_cxx11_tensor_casts()
+EIGEN_DECLARE_TEST(cxx11_tensor_casts)
{
CALL_SUBTEST(test_simple_cast());
CALL_SUBTEST(test_vectorized_cast());
diff --git a/unsupported/test/cxx11_tensor_chipping.cpp b/unsupported/test/cxx11_tensor_chipping.cpp
index 89cf5c7b7..922274462 100644
--- a/unsupported/test/cxx11_tensor_chipping.cpp
+++ b/unsupported/test/cxx11_tensor_chipping.cpp
@@ -410,7 +410,7 @@ static void test_chip_raw_data_row_major()
VERIFY_IS_EQUAL(chip4.data(), static_cast<float*>(0));
}
-void test_cxx11_tensor_chipping()
+EIGEN_DECLARE_TEST(cxx11_tensor_chipping)
{
CALL_SUBTEST(test_simple_chip<ColMajor>());
CALL_SUBTEST(test_simple_chip<RowMajor>());
diff --git a/unsupported/test/cxx11_tensor_chipping_sycl.cpp b/unsupported/test/cxx11_tensor_chipping_sycl.cpp
index 39e4f0a7f..a91efe00c 100644
--- a/unsupported/test/cxx11_tensor_chipping_sycl.cpp
+++ b/unsupported/test/cxx11_tensor_chipping_sycl.cpp
@@ -15,7 +15,7 @@
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
-#define EIGEN_TEST_FUNC cxx11_tensor_chipping_sycl
+
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t
#define EIGEN_USE_SYCL
@@ -614,7 +614,7 @@ template<typename DataType, typename dev_Selector> void sycl_chipping_test_per_d
test_chip_as_lvalue_sycl<DataType, RowMajor, int64_t>(sycl_device);
test_chip_as_lvalue_sycl<DataType, ColMajor, int64_t>(sycl_device);
}
-void test_cxx11_tensor_chipping_sycl()
+EIGEN_DECLARE_TEST(cxx11_tensor_chipping_sycl)
{
for (const auto& device :Eigen::get_sycl_supported_devices()) {
CALL_SUBTEST(sycl_chipping_test_per_device<float>(device));
diff --git a/unsupported/test/cxx11_tensor_comparisons.cpp b/unsupported/test/cxx11_tensor_comparisons.cpp
index b1ff8aecb..1a18e07cc 100644
--- a/unsupported/test/cxx11_tensor_comparisons.cpp
+++ b/unsupported/test/cxx11_tensor_comparisons.cpp
@@ -77,7 +77,7 @@ static void test_equality()
}
-void test_cxx11_tensor_comparisons()
+EIGEN_DECLARE_TEST(cxx11_tensor_comparisons)
{
CALL_SUBTEST(test_orderings());
CALL_SUBTEST(test_equality());
diff --git a/unsupported/test/cxx11_tensor_complex_cwise_ops_cuda.cu b/unsupported/test/cxx11_tensor_complex_cwise_ops_gpu.cu
index 2baf5eaad..f2a2a6cfa 100644
--- a/unsupported/test/cxx11_tensor_complex_cwise_ops_cuda.cu
+++ b/unsupported/test/cxx11_tensor_complex_cwise_ops_gpu.cu
@@ -8,12 +8,9 @@
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define EIGEN_TEST_NO_LONGDOUBLE
-#define EIGEN_TEST_FUNC cxx11_tensor_complex_cwise_ops
+
#define EIGEN_USE_GPU
-#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500
-#include <cuda_fp16.h>
-#endif
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
@@ -31,7 +28,7 @@ void test_cuda_complex_cwise_ops() {
cudaMalloc((void**)(&d_in2), complex_bytes);
cudaMalloc((void**)(&d_out), complex_bytes);
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<std::complex<T>, 1, 0, int>, Eigen::Aligned> gpu_in1(
@@ -51,11 +48,13 @@ void test_cuda_complex_cwise_ops() {
Add = 0,
Sub,
Mul,
- Div
+ Div,
+ Neg,
+ NbOps
};
Tensor<std::complex<T>, 1, 0, int> actual(kNumItems);
- for (int op = Add; op <= Div; op++) {
+ for (int op = Add; op < NbOps; op++) {
std::complex<T> expected;
switch (static_cast<CwiseOp>(op)) {
case Add:
@@ -74,6 +73,10 @@ void test_cuda_complex_cwise_ops() {
gpu_out.device(gpu_device) = gpu_in1 / gpu_in2;
expected = a / b;
break;
+ case Neg:
+ gpu_out.device(gpu_device) = -gpu_in1;
+ expected = -a;
+ break;
}
assert(cudaMemcpyAsync(actual.data(), d_out, complex_bytes, cudaMemcpyDeviceToHost,
gpu_device.stream()) == cudaSuccess);
@@ -90,7 +93,7 @@ void test_cuda_complex_cwise_ops() {
}
-void test_cxx11_tensor_complex_cwise_ops()
+EIGEN_DECLARE_TEST(test_cxx11_tensor_complex_cwise_ops)
{
CALL_SUBTEST(test_cuda_complex_cwise_ops<float>());
CALL_SUBTEST(test_cuda_complex_cwise_ops<double>());
diff --git a/unsupported/test/cxx11_tensor_complex_cuda.cu b/unsupported/test/cxx11_tensor_complex_gpu.cu
index d4e111f5d..f8b8ae704 100644
--- a/unsupported/test/cxx11_tensor_complex_cuda.cu
+++ b/unsupported/test/cxx11_tensor_complex_gpu.cu
@@ -8,12 +8,9 @@
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define EIGEN_TEST_NO_LONGDOUBLE
-#define EIGEN_TEST_FUNC cxx11_tensor_complex
+
#define EIGEN_USE_GPU
-#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500
-#include <cuda_fp16.h>
-#endif
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
@@ -37,7 +34,7 @@ void test_cuda_nullary() {
cudaMemcpy(d_in1, in1.data(), complex_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_in2, in2.data(), complex_bytes, cudaMemcpyHostToDevice);
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<std::complex<float>, 1, 0, int>, Eigen::Aligned> gpu_in1(
@@ -73,7 +70,7 @@ void test_cuda_nullary() {
static void test_cuda_sum_reductions() {
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
const int num_rows = internal::random<int>(1024, 5*1024);
@@ -107,10 +104,45 @@ static void test_cuda_sum_reductions() {
gpu_device.deallocate(gpu_out_ptr);
}
+static void test_cuda_mean_reductions() {
+
+ Eigen::GpuStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ const int num_rows = internal::random<int>(1024, 5*1024);
+ const int num_cols = internal::random<int>(1024, 5*1024);
+
+ Tensor<std::complex<float>, 2> in(num_rows, num_cols);
+ in.setRandom();
+
+ Tensor<std::complex<float>, 0> full_redux;
+ full_redux = in.mean();
+
+ std::size_t in_bytes = in.size() * sizeof(std::complex<float>);
+ std::size_t out_bytes = full_redux.size() * sizeof(std::complex<float>);
+ std::complex<float>* gpu_in_ptr = static_cast<std::complex<float>*>(gpu_device.allocate(in_bytes));
+ std::complex<float>* gpu_out_ptr = static_cast<std::complex<float>*>(gpu_device.allocate(out_bytes));
+ gpu_device.memcpyHostToDevice(gpu_in_ptr, in.data(), in_bytes);
+
+ TensorMap<Tensor<std::complex<float>, 2> > in_gpu(gpu_in_ptr, num_rows, num_cols);
+ TensorMap<Tensor<std::complex<float>, 0> > out_gpu(gpu_out_ptr);
+
+ out_gpu.device(gpu_device) = in_gpu.mean();
+
+ Tensor<std::complex<float>, 0> full_redux_gpu;
+ gpu_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_ptr, out_bytes);
+ gpu_device.synchronize();
+
+ // Check that the CPU and GPU reductions return the same result.
+ VERIFY_IS_APPROX(full_redux(), full_redux_gpu());
+
+ gpu_device.deallocate(gpu_in_ptr);
+ gpu_device.deallocate(gpu_out_ptr);
+}
static void test_cuda_product_reductions() {
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
const int num_rows = internal::random<int>(1024, 5*1024);
@@ -145,9 +177,10 @@ static void test_cuda_product_reductions() {
}
-void test_cxx11_tensor_complex()
+EIGEN_DECLARE_TEST(test_cxx11_tensor_complex)
{
CALL_SUBTEST(test_cuda_nullary());
CALL_SUBTEST(test_cuda_sum_reductions());
+ CALL_SUBTEST(test_cuda_mean_reductions());
CALL_SUBTEST(test_cuda_product_reductions());
}
diff --git a/unsupported/test/cxx11_tensor_concatenation.cpp b/unsupported/test/cxx11_tensor_concatenation.cpp
index 03ef12e63..e223d9ffd 100644
--- a/unsupported/test/cxx11_tensor_concatenation.cpp
+++ b/unsupported/test/cxx11_tensor_concatenation.cpp
@@ -50,7 +50,13 @@ static void test_static_dimension_failure()
.reshape(Tensor<int, 3>::Dimensions(2, 3, 1))
.concatenate(right, 0);
Tensor<int, 2, DataLayout> alternative = left
- .concatenate(right.reshape(Tensor<int, 2>::Dimensions{{{2, 3}}}), 0);
+ // Clang compiler break with {{{}}} with an ambigous error on copy constructor
+ // the variadic DSize constructor added for #ifndef EIGEN_EMULATE_CXX11_META_H.
+ // Solution:
+ // either the code should change to
+ // Tensor<int, 2>::Dimensions{{2, 3}}
+ // or Tensor<int, 2>::Dimensions{Tensor<int, 2>::Dimensions{{2, 3}}}
+ .concatenate(right.reshape(Tensor<int, 2>::Dimensions(2, 3)), 0);
}
template<int DataLayout>
@@ -123,7 +129,7 @@ static void test_concatenation_as_lvalue()
}
-void test_cxx11_tensor_concatenation()
+EIGEN_DECLARE_TEST(cxx11_tensor_concatenation)
{
CALL_SUBTEST(test_dimension_failures<ColMajor>());
CALL_SUBTEST(test_dimension_failures<RowMajor>());
diff --git a/unsupported/test/cxx11_tensor_concatenation_sycl.cpp b/unsupported/test/cxx11_tensor_concatenation_sycl.cpp
index e3023a368..765991b35 100644
--- a/unsupported/test/cxx11_tensor_concatenation_sycl.cpp
+++ b/unsupported/test/cxx11_tensor_concatenation_sycl.cpp
@@ -13,7 +13,7 @@
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
-#define EIGEN_TEST_FUNC cxx11_tensor_concatenation_sycl
+
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t
#define EIGEN_USE_SYCL
@@ -173,7 +173,7 @@ template <typename DataType, typename Dev_selector> void tensorConcat_perDevice(
test_simple_concatenation<DataType, ColMajor, int64_t>(sycl_device);
test_concatenation_as_lvalue<DataType, ColMajor, int64_t>(sycl_device);
}
-void test_cxx11_tensor_concatenation_sycl() {
+EIGEN_DECLARE_TEST(cxx11_tensor_concatenation_sycl) {
for (const auto& device :Eigen::get_sycl_supported_devices()) {
CALL_SUBTEST(tensorConcat_perDevice<float>(device));
}
diff --git a/unsupported/test/cxx11_tensor_const.cpp b/unsupported/test/cxx11_tensor_const.cpp
index ad9c9da39..9d806ee3c 100644
--- a/unsupported/test/cxx11_tensor_const.cpp
+++ b/unsupported/test/cxx11_tensor_const.cpp
@@ -55,7 +55,7 @@ static void test_assign_of_const_tensor()
}
-void test_cxx11_tensor_const()
+EIGEN_DECLARE_TEST(cxx11_tensor_const)
{
CALL_SUBTEST(test_simple_assign());
CALL_SUBTEST(test_assign_of_const_tensor());
diff --git a/unsupported/test/cxx11_tensor_contract_cuda.cu b/unsupported/test/cxx11_tensor_contract_gpu.cu
index dd68430ce..575bdc1f9 100644
--- a/unsupported/test/cxx11_tensor_contract_cuda.cu
+++ b/unsupported/test/cxx11_tensor_contract_gpu.cu
@@ -10,21 +10,20 @@
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
-#define EIGEN_TEST_FUNC cxx11_tensor_cuda
+
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
#define EIGEN_USE_GPU
-#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500
-#include <cuda_fp16.h>
-#endif
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
+#include <unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h>
+
using Eigen::Tensor;
typedef Tensor<float, 1>::DimensionPair DimPair;
template<int DataLayout>
-void test_cuda_contraction(int m_size, int k_size, int n_size)
+void test_gpu_contraction(int m_size, int k_size, int n_size)
{
std::cout << "Testing for (" << m_size << "," << k_size << "," << n_size << ")" << std::endl;
// with these dimensions, the output has 300 * 140 elements, which is
@@ -47,14 +46,14 @@ void test_cuda_contraction(int m_size, int k_size, int n_size)
float* d_t_right;
float* d_t_result;
- cudaMalloc((void**)(&d_t_left), t_left_bytes);
- cudaMalloc((void**)(&d_t_right), t_right_bytes);
- cudaMalloc((void**)(&d_t_result), t_result_bytes);
+ gpuMalloc((void**)(&d_t_left), t_left_bytes);
+ gpuMalloc((void**)(&d_t_right), t_right_bytes);
+ gpuMalloc((void**)(&d_t_result), t_result_bytes);
- cudaMemcpy(d_t_left, t_left.data(), t_left_bytes, cudaMemcpyHostToDevice);
- cudaMemcpy(d_t_right, t_right.data(), t_right_bytes, cudaMemcpyHostToDevice);
+ gpuMemcpy(d_t_left, t_left.data(), t_left_bytes, gpuMemcpyHostToDevice);
+ gpuMemcpy(d_t_right, t_right.data(), t_right_bytes, gpuMemcpyHostToDevice);
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 2, DataLayout> >
@@ -68,7 +67,7 @@ void test_cuda_contraction(int m_size, int k_size, int n_size)
gpu_t_result.device(gpu_device) = gpu_t_left.contract(gpu_t_right, dims);
t_result = t_left.contract(t_right, dims);
- cudaMemcpy(t_result_gpu.data(), d_t_result, t_result_bytes, cudaMemcpyDeviceToHost);
+ gpuMemcpy(t_result_gpu.data(), d_t_result, t_result_bytes, gpuMemcpyDeviceToHost);
for (DenseIndex i = 0; i < t_result.size(); i++) {
if (fabs(t_result(i) - t_result_gpu(i)) < 1e-4f) {
continue;
@@ -81,9 +80,9 @@ void test_cuda_contraction(int m_size, int k_size, int n_size)
assert(false);
}
- cudaFree((void*)d_t_left);
- cudaFree((void*)d_t_right);
- cudaFree((void*)d_t_result);
+ gpuFree((void*)d_t_left);
+ gpuFree((void*)d_t_right);
+ gpuFree((void*)d_t_result);
}
@@ -111,14 +110,14 @@ void test_scalar(int m_size, int k_size, int n_size)
float* d_t_right;
float* d_t_result;
- cudaMalloc((void**)(&d_t_left), t_left_bytes);
- cudaMalloc((void**)(&d_t_right), t_right_bytes);
- cudaMalloc((void**)(&d_t_result), t_result_bytes);
+ gpuMalloc((void**)(&d_t_left), t_left_bytes);
+ gpuMalloc((void**)(&d_t_right), t_right_bytes);
+ gpuMalloc((void**)(&d_t_result), t_result_bytes);
- cudaMemcpy(d_t_left, t_left.data(), t_left_bytes, cudaMemcpyHostToDevice);
- cudaMemcpy(d_t_right, t_right.data(), t_right_bytes, cudaMemcpyHostToDevice);
+ gpuMemcpy(d_t_left, t_left.data(), t_left_bytes, gpuMemcpyHostToDevice);
+ gpuMemcpy(d_t_right, t_right.data(), t_right_bytes, gpuMemcpyHostToDevice);
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 2, DataLayout> >
@@ -131,7 +130,7 @@ void test_scalar(int m_size, int k_size, int n_size)
gpu_t_result.device(gpu_device) = gpu_t_left.contract(gpu_t_right, dims);
t_result = t_left.contract(t_right, dims);
- cudaMemcpy(t_result_gpu.data(), d_t_result, t_result_bytes, cudaMemcpyDeviceToHost);
+ gpuMemcpy(t_result_gpu.data(), d_t_result, t_result_bytes, gpuMemcpyDeviceToHost);
if (fabs(t_result() - t_result_gpu()) > 1e-4f &&
!Eigen::internal::isApprox(t_result(), t_result_gpu(), 1e-4f)) {
std::cout << "mismatch detected: " << t_result()
@@ -139,39 +138,39 @@ void test_scalar(int m_size, int k_size, int n_size)
assert(false);
}
- cudaFree((void*)d_t_left);
- cudaFree((void*)d_t_right);
- cudaFree((void*)d_t_result);
+ gpuFree((void*)d_t_left);
+ gpuFree((void*)d_t_right);
+ gpuFree((void*)d_t_result);
}
template<int DataLayout>
-void test_cuda_contraction_m() {
+void test_gpu_contraction_m() {
for (int k = 32; k < 256; k++) {
- test_cuda_contraction<ColMajor>(k, 128, 128);
- test_cuda_contraction<RowMajor>(k, 128, 128);
+ test_gpu_contraction<ColMajor>(k, 128, 128);
+ test_gpu_contraction<RowMajor>(k, 128, 128);
}
}
template<int DataLayout>
-void test_cuda_contraction_k() {
+void test_gpu_contraction_k() {
for (int k = 32; k < 256; k++) {
- test_cuda_contraction<ColMajor>(128, k, 128);
- test_cuda_contraction<RowMajor>(128, k, 128);
+ test_gpu_contraction<ColMajor>(128, k, 128);
+ test_gpu_contraction<RowMajor>(128, k, 128);
}
}
template<int DataLayout>
-void test_cuda_contraction_n() {
+void test_gpu_contraction_n() {
for (int k = 32; k < 256; k++) {
- test_cuda_contraction<ColMajor>(128, 128, k);
- test_cuda_contraction<RowMajor>(128, 128, k);
+ test_gpu_contraction<ColMajor>(128, 128, k);
+ test_gpu_contraction<RowMajor>(128, 128, k);
}
}
template<int DataLayout>
-void test_cuda_contraction_sizes() {
+void test_gpu_contraction_sizes() {
int m_sizes[] = { 31, 39, 63, 64, 65,
127, 129, 255, 257 , 511,
512, 513, 1023, 1024, 1025};
@@ -188,29 +187,32 @@ void test_cuda_contraction_sizes() {
for (int i = 0; i < 15; i++) {
for (int j = 0; j < 15; j++) {
for (int k = 0; k < 17; k++) {
- test_cuda_contraction<DataLayout>(m_sizes[i], n_sizes[j], k_sizes[k]);
+ test_gpu_contraction<DataLayout>(m_sizes[i], n_sizes[j], k_sizes[k]);
}
}
}
}
-void test_cxx11_tensor_cuda()
+EIGEN_DECLARE_TEST(cxx11_tensor_contract_gpu)
{
- CALL_SUBTEST_1(test_cuda_contraction<ColMajor>(128, 128, 128));
- CALL_SUBTEST_1(test_cuda_contraction<RowMajor>(128, 128, 128));
+ CALL_SUBTEST_1(test_gpu_contraction<ColMajor>(128, 128, 128));
+ CALL_SUBTEST_1(test_gpu_contraction<RowMajor>(128, 128, 128));
CALL_SUBTEST_1(test_scalar<ColMajor>(128, 128, 128));
CALL_SUBTEST_1(test_scalar<RowMajor>(128, 128, 128));
- CALL_SUBTEST_2(test_cuda_contraction_m<ColMajor>());
- CALL_SUBTEST_3(test_cuda_contraction_m<RowMajor>());
+ CALL_SUBTEST_2(test_gpu_contraction_m<ColMajor>());
+ CALL_SUBTEST_3(test_gpu_contraction_m<RowMajor>());
- CALL_SUBTEST_4(test_cuda_contraction_k<ColMajor>());
- CALL_SUBTEST_5(test_cuda_contraction_k<RowMajor>());
+ CALL_SUBTEST_4(test_gpu_contraction_k<ColMajor>());
+ CALL_SUBTEST_5(test_gpu_contraction_k<RowMajor>());
- CALL_SUBTEST_6(test_cuda_contraction_n<ColMajor>());
- CALL_SUBTEST_7(test_cuda_contraction_n<RowMajor>());
+ CALL_SUBTEST_6(test_gpu_contraction_n<ColMajor>());
+ CALL_SUBTEST_7(test_gpu_contraction_n<RowMajor>());
- CALL_SUBTEST_8(test_cuda_contraction_sizes<ColMajor>());
- CALL_SUBTEST_9(test_cuda_contraction_sizes<RowMajor>());
+#if !defined(EIGEN_USE_HIP)
+// disable these subtests for HIP
+ CALL_SUBTEST_8(test_gpu_contraction_sizes<ColMajor>());
+ CALL_SUBTEST_9(test_gpu_contraction_sizes<RowMajor>());
+#endif
}
diff --git a/unsupported/test/cxx11_tensor_contract_sycl.cpp b/unsupported/test/cxx11_tensor_contract_sycl.cpp
index 5bace66c5..c8e86e69f 100644
--- a/unsupported/test/cxx11_tensor_contract_sycl.cpp
+++ b/unsupported/test/cxx11_tensor_contract_sycl.cpp
@@ -13,7 +13,7 @@
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
-#define EIGEN_TEST_FUNC cxx11_tensor_contract_sycl
+
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t
#define EIGEN_USE_SYCL
@@ -283,7 +283,7 @@ template <typename Dev_selector> void tensorContractionPerDevice(Dev_selector& s
}
-void test_cxx11_tensor_contract_sycl() {
+EIGEN_DECLARE_TEST(cxx11_tensor_contract_sycl) {
for (const auto& device :Eigen::get_sycl_supported_devices()) {
CALL_SUBTEST(tensorContractionPerDevice(device));
}
diff --git a/unsupported/test/cxx11_tensor_contraction.cpp b/unsupported/test/cxx11_tensor_contraction.cpp
index ace97057f..4e5922440 100644
--- a/unsupported/test/cxx11_tensor_contraction.cpp
+++ b/unsupported/test/cxx11_tensor_contraction.cpp
@@ -510,7 +510,56 @@ static void test_const_inputs()
VERIFY_IS_APPROX(mat3(1,1), mat1(1,0)*mat2(0,1) + mat1(1,1)*mat2(1,1) + mat1(1,2)*mat2(2,1));
}
-void test_cxx11_tensor_contraction()
+// Apply Sqrt to all output elements.
+struct SqrtOutputKernel {
+ template <typename Index, typename Scalar>
+ EIGEN_ALWAYS_INLINE void operator()(
+ const internal::blas_data_mapper<Scalar, Index, ColMajor>& output_mapper,
+ const TensorContractionParams&, Index, Index, Index num_rows,
+ Index num_cols) const {
+ for (int i = 0; i < num_rows; ++i) {
+ for (int j = 0; j < num_cols; ++j) {
+ output_mapper(i, j) = std::sqrt(output_mapper(i, j));
+ }
+ }
+ }
+};
+
+template <int DataLayout>
+static void test_large_contraction_with_output_kernel() {
+ Tensor<float, 4, DataLayout> t_left(30, 50, 8, 31);
+ Tensor<float, 5, DataLayout> t_right(8, 31, 7, 20, 10);
+ Tensor<float, 5, DataLayout> t_result(30, 50, 7, 20, 10);
+
+ t_left.setRandom();
+ t_right.setRandom();
+ // Put trash in mat4 to verify contraction clears output memory.
+ t_result.setRandom();
+
+ // Add a little offset so that the results won't be close to zero.
+ t_left += t_left.constant(1.0f);
+ t_right += t_right.constant(1.0f);
+
+ typedef Map<Eigen::Matrix<float, Dynamic, Dynamic, DataLayout>> MapXf;
+ MapXf m_left(t_left.data(), 1500, 248);
+ MapXf m_right(t_right.data(), 248, 1400);
+ Eigen::Matrix<float, Dynamic, Dynamic, DataLayout> m_result(1500, 1400);
+
+ // this contraction should be equivalent to a single matrix multiplication
+ Eigen::array<DimPair, 2> dims({{DimPair(2, 0), DimPair(3, 1)}});
+
+ // compute results by separate methods
+ t_result = t_left.contract(t_right, dims, SqrtOutputKernel());
+
+ m_result = m_left * m_right;
+
+ for (std::ptrdiff_t i = 0; i < t_result.dimensions().TotalSize(); i++) {
+ VERIFY(&t_result.data()[i] != &m_result.data()[i]);
+ VERIFY_IS_APPROX(t_result.data()[i], std::sqrt(m_result.data()[i]));
+ }
+}
+
+EIGEN_DECLARE_TEST(cxx11_tensor_contraction)
{
CALL_SUBTEST(test_evals<ColMajor>());
CALL_SUBTEST(test_evals<RowMajor>());
@@ -542,4 +591,6 @@ void test_cxx11_tensor_contraction()
CALL_SUBTEST(test_tensor_product<RowMajor>());
CALL_SUBTEST(test_const_inputs<ColMajor>());
CALL_SUBTEST(test_const_inputs<RowMajor>());
+ CALL_SUBTEST(test_large_contraction_with_output_kernel<ColMajor>());
+ CALL_SUBTEST(test_large_contraction_with_output_kernel<RowMajor>());
}
diff --git a/unsupported/test/cxx11_tensor_convolution.cpp b/unsupported/test/cxx11_tensor_convolution.cpp
index e3d4675eb..c3688f678 100644
--- a/unsupported/test/cxx11_tensor_convolution.cpp
+++ b/unsupported/test/cxx11_tensor_convolution.cpp
@@ -25,7 +25,8 @@ static void test_evals()
Tensor<float, 2, DataLayout> result(2,3);
result.setZero();
- Eigen::array<Tensor<float, 2>::Index, 1> dims3{{0}};
+ Eigen::array<Tensor<float, 2>::Index, 1> dims3;
+ dims3[0] = 0;
typedef TensorEvaluator<decltype(input.convolve(kernel, dims3)), DefaultDevice> Evaluator;
Evaluator eval(input.convolve(kernel, dims3), DefaultDevice());
@@ -136,7 +137,7 @@ static void test_strides() {
input(12)*kernel(2)));
}
-void test_cxx11_tensor_convolution()
+EIGEN_DECLARE_TEST(cxx11_tensor_convolution)
{
CALL_SUBTEST(test_evals<ColMajor>());
CALL_SUBTEST(test_evals<RowMajor>());
diff --git a/unsupported/test/cxx11_tensor_convolution_sycl.cpp b/unsupported/test/cxx11_tensor_convolution_sycl.cpp
index a4226a63a..3954c8a28 100644
--- a/unsupported/test/cxx11_tensor_convolution_sycl.cpp
+++ b/unsupported/test/cxx11_tensor_convolution_sycl.cpp
@@ -13,7 +13,7 @@
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
-#define EIGEN_TEST_FUNC cxx11_tensor_convolution_sycl
+
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t
#define EIGEN_USE_SYCL
@@ -462,7 +462,7 @@ template <typename Dev_selector> void tensorConvolutionPerDevice(Dev_selector& s
test_strides<float, RowMajor, int64_t>(sycl_device);
}
-void test_cxx11_tensor_convolution_sycl() {
+EIGEN_DECLARE_TEST(cxx11_tensor_convolution_sycl) {
for (const auto& device :Eigen::get_sycl_supported_devices()) {
CALL_SUBTEST(tensorConvolutionPerDevice(device));
}
diff --git a/unsupported/test/cxx11_tensor_custom_index.cpp b/unsupported/test/cxx11_tensor_custom_index.cpp
index 4528cc176..b5dbc97bd 100644
--- a/unsupported/test/cxx11_tensor_custom_index.cpp
+++ b/unsupported/test/cxx11_tensor_custom_index.cpp
@@ -88,7 +88,7 @@ static void test_sizes_as_index()
}
-void test_cxx11_tensor_custom_index() {
+EIGEN_DECLARE_TEST(cxx11_tensor_custom_index) {
test_map_as_index<ColMajor>();
test_map_as_index<RowMajor>();
test_matrix_as_index<ColMajor>();
diff --git a/unsupported/test/cxx11_tensor_custom_op.cpp b/unsupported/test/cxx11_tensor_custom_op.cpp
index 8baa477cc..875ea57d2 100644
--- a/unsupported/test/cxx11_tensor_custom_op.cpp
+++ b/unsupported/test/cxx11_tensor_custom_op.cpp
@@ -104,7 +104,7 @@ static void test_custom_binary_op()
}
-void test_cxx11_tensor_custom_op()
+EIGEN_DECLARE_TEST(cxx11_tensor_custom_op)
{
CALL_SUBTEST(test_custom_unary_op());
CALL_SUBTEST(test_custom_binary_op());
diff --git a/unsupported/test/cxx11_tensor_custom_op_sycl.cpp b/unsupported/test/cxx11_tensor_custom_op_sycl.cpp
new file mode 100644
index 000000000..cc3b02448
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_custom_op_sycl.cpp
@@ -0,0 +1,165 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_TEST_NO_LONGDOUBLE
+#define EIGEN_TEST_NO_COMPLEX
+
+#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t
+#define EIGEN_USE_SYCL
+
+#include "main.h"
+#include <unsupported/Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+template<typename TensorType>
+struct InsertZeros {
+ DSizes<DenseIndex, 2> dimensions(const TensorType& input) const {
+ DSizes<DenseIndex, 2> result;
+ result[0] = input.dimension(0) * 2;
+ result[1] = input.dimension(1) * 2;
+ return result;
+ }
+
+ template <typename Output, typename Device>
+ void eval(const TensorType& input, Output& output, const Device& device) const
+ {
+ array<DenseIndex, 2> strides;
+ strides[0] = 2;
+ strides[1] = 2;
+ output.stride(strides).device(device) = input;
+
+ Eigen::DSizes<DenseIndex, 2> offsets(1,1);
+ Eigen::DSizes<DenseIndex, 2> extents(output.dimension(0)-1, output.dimension(1)-1);
+ output.slice(offsets, extents).stride(strides).device(device) = input.constant(0.0f);
+ }
+};
+
+template<typename DataType, int DataLayout, typename IndexType>
+static void test_custom_unary_op_sycl(const Eigen::SyclDevice &sycl_device)
+{
+ IndexType sizeDim1 = 3;
+ IndexType sizeDim2 = 5;
+ Eigen::array<IndexType, 2> tensorRange = {{sizeDim1, sizeDim2}};
+ Eigen::array<IndexType, 2> tensorResultRange = {{6, 10}};
+
+ Eigen::Tensor<DataType, 2, DataLayout, IndexType> in1(tensorRange);
+ Eigen::Tensor<DataType, 2, DataLayout, IndexType> out(tensorResultRange);
+
+ DataType * gpu_in1_data = static_cast<DataType*>(sycl_device.allocate(in1.dimensions().TotalSize()*sizeof(DataType)));
+ DataType * gpu_out_data = static_cast<DataType*>(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(DataType)));
+
+ typedef Eigen::TensorMap<Eigen::Tensor<DataType, 2, DataLayout, IndexType> > TensorType;
+ TensorType gpu_in1(gpu_in1_data, tensorRange);
+ TensorType gpu_out(gpu_out_data, tensorResultRange);
+
+ in1.setRandom();
+ sycl_device.memcpyHostToDevice(gpu_in1_data, in1.data(),(in1.dimensions().TotalSize())*sizeof(DataType));
+ gpu_out.device(sycl_device) = gpu_in1.customOp(InsertZeros<TensorType>());
+ sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(DataType));
+
+ VERIFY_IS_EQUAL(out.dimension(0), 6);
+ VERIFY_IS_EQUAL(out.dimension(1), 10);
+
+ for (int i = 0; i < 6; i+=2) {
+ for (int j = 0; j < 10; j+=2) {
+ VERIFY_IS_EQUAL(out(i, j), in1(i/2, j/2));
+ }
+ }
+ for (int i = 1; i < 6; i+=2) {
+ for (int j = 1; j < 10; j+=2) {
+ VERIFY_IS_EQUAL(out(i, j), 0);
+ }
+ }
+}
+
+template<typename TensorType>
+struct BatchMatMul {
+ DSizes<DenseIndex, 3> dimensions(const TensorType& input1, const TensorType& input2) const {
+ DSizes<DenseIndex, 3> result;
+ result[0] = input1.dimension(0);
+ result[1] = input2.dimension(1);
+ result[2] = input2.dimension(2);
+ return result;
+ }
+
+ template <typename Output, typename Device>
+ void eval(const TensorType& input1, const TensorType& input2,
+ Output& output, const Device& device) const
+ {
+ typedef typename TensorType::DimensionPair DimPair;
+ array<DimPair, 1> dims;
+ dims[0] = DimPair(1, 0);
+ for (int64_t i = 0; i < output.dimension(2); ++i) {
+ output.template chip<2>(i).device(device) = input1.template chip<2>(i).contract(input2.template chip<2>(i), dims);
+ }
+ }
+};
+
+template<typename DataType, int DataLayout, typename IndexType>
+static void test_custom_binary_op_sycl(const Eigen::SyclDevice &sycl_device)
+{
+
+ Eigen::array<IndexType, 3> tensorRange1 = {{2, 3, 5}};
+ Eigen::array<IndexType, 3> tensorRange2 = {{3,7,5}};
+ Eigen::array<IndexType, 3> tensorResultRange = {{2, 7, 5}};
+
+ Eigen::Tensor<DataType, 3, DataLayout, IndexType> in1(tensorRange1);
+ Eigen::Tensor<DataType, 3, DataLayout, IndexType> in2(tensorRange2);
+ Eigen::Tensor<DataType, 3, DataLayout, IndexType> out(tensorResultRange);
+
+ DataType * gpu_in1_data = static_cast<DataType*>(sycl_device.allocate(in1.dimensions().TotalSize()*sizeof(DataType)));
+ DataType * gpu_in2_data = static_cast<DataType*>(sycl_device.allocate(in2.dimensions().TotalSize()*sizeof(DataType)));
+ DataType * gpu_out_data = static_cast<DataType*>(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(DataType)));
+
+ typedef Eigen::TensorMap<Eigen::Tensor<DataType, 3, DataLayout, IndexType> > TensorType;
+ TensorType gpu_in1(gpu_in1_data, tensorRange1);
+ TensorType gpu_in2(gpu_in2_data, tensorRange2);
+ TensorType gpu_out(gpu_out_data, tensorResultRange);
+
+ in1.setRandom();
+ in2.setRandom();
+
+ sycl_device.memcpyHostToDevice(gpu_in1_data, in1.data(),(in1.dimensions().TotalSize())*sizeof(DataType));
+ sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(),(in2.dimensions().TotalSize())*sizeof(DataType));
+
+ gpu_out.device(sycl_device) = gpu_in1.customOp(gpu_in2, BatchMatMul<TensorType>());
+ sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(DataType));
+
+ for (IndexType i = 0; i < 5; ++i) {
+ typedef typename Eigen::Tensor<DataType, 3, DataLayout, IndexType>::DimensionPair DimPair;
+ array<DimPair, 1> dims;
+ dims[0] = DimPair(1, 0);
+ Eigen::Tensor<DataType, 2, DataLayout, IndexType> reference = in1.template chip<2>(i).contract(in2.template chip<2>(i), dims);
+ TensorRef<Eigen::Tensor<DataType, 2, DataLayout, IndexType> > val = out.template chip<2>(i);
+ for (IndexType j = 0; j < 2; ++j) {
+ for (IndexType k = 0; k < 7; ++k) {
+ VERIFY_IS_APPROX(val(j, k), reference(j, k));
+ }
+ }
+ }
+}
+
+template <typename DataType, typename Dev_selector> void custom_op_perDevice(Dev_selector s){
+ QueueInterface queueInterface(s);
+ auto sycl_device = Eigen::SyclDevice(&queueInterface);
+ test_custom_unary_op_sycl<DataType, RowMajor, int64_t>(sycl_device);
+ test_custom_unary_op_sycl<DataType, ColMajor, int64_t>(sycl_device);
+ test_custom_binary_op_sycl<DataType, ColMajor, int64_t>(sycl_device);
+ test_custom_binary_op_sycl<DataType, RowMajor, int64_t>(sycl_device);
+
+}
+EIGEN_DECLARE_TEST(cxx11_tensor_custom_op_sycl) {
+ for (const auto& device :Eigen::get_sycl_supported_devices()) {
+ CALL_SUBTEST(custom_op_perDevice<float>(device));
+ }
+}
diff --git a/unsupported/test/cxx11_tensor_device.cu b/unsupported/test/cxx11_tensor_device.cu
index fde20ddf2..c9f78d2d3 100644
--- a/unsupported/test/cxx11_tensor_device.cu
+++ b/unsupported/test/cxx11_tensor_device.cu
@@ -9,16 +9,15 @@
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
-#define EIGEN_TEST_FUNC cxx11_tensor_device
+
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
#define EIGEN_USE_GPU
-#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500
-#include <cuda_fp16.h>
-#endif
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
+#include <unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h>
+
using Eigen::Tensor;
using Eigen::RowMajor;
@@ -68,22 +67,22 @@ struct CPUContext {
// Context for evaluation on GPU
struct GPUContext {
GPUContext(const Eigen::TensorMap<Eigen::Tensor<float, 3> >& in1, Eigen::TensorMap<Eigen::Tensor<float, 3> >& in2, Eigen::TensorMap<Eigen::Tensor<float, 3> >& out) : in1_(in1), in2_(in2), out_(out), gpu_device_(&stream_) {
- assert(cudaMalloc((void**)(&kernel_1d_), 2*sizeof(float)) == cudaSuccess);
+ assert(gpuMalloc((void**)(&kernel_1d_), 2*sizeof(float)) == gpuSuccess);
float kernel_1d_val[] = {3.14f, 2.7f};
- assert(cudaMemcpy(kernel_1d_, kernel_1d_val, 2*sizeof(float), cudaMemcpyHostToDevice) == cudaSuccess);
+ assert(gpuMemcpy(kernel_1d_, kernel_1d_val, 2*sizeof(float), gpuMemcpyHostToDevice) == gpuSuccess);
- assert(cudaMalloc((void**)(&kernel_2d_), 4*sizeof(float)) == cudaSuccess);
+ assert(gpuMalloc((void**)(&kernel_2d_), 4*sizeof(float)) == gpuSuccess);
float kernel_2d_val[] = {3.14f, 2.7f, 0.2f, 7.0f};
- assert(cudaMemcpy(kernel_2d_, kernel_2d_val, 4*sizeof(float), cudaMemcpyHostToDevice) == cudaSuccess);
+ assert(gpuMemcpy(kernel_2d_, kernel_2d_val, 4*sizeof(float), gpuMemcpyHostToDevice) == gpuSuccess);
- assert(cudaMalloc((void**)(&kernel_3d_), 8*sizeof(float)) == cudaSuccess);
+ assert(gpuMalloc((void**)(&kernel_3d_), 8*sizeof(float)) == gpuSuccess);
float kernel_3d_val[] = {3.14f, -1.0f, 2.7f, -0.3f, 0.2f, -0.7f, 7.0f, -0.5f};
- assert(cudaMemcpy(kernel_3d_, kernel_3d_val, 8*sizeof(float), cudaMemcpyHostToDevice) == cudaSuccess);
+ assert(gpuMemcpy(kernel_3d_, kernel_3d_val, 8*sizeof(float), gpuMemcpyHostToDevice) == gpuSuccess);
}
~GPUContext() {
- assert(cudaFree(kernel_1d_) == cudaSuccess);
- assert(cudaFree(kernel_2d_) == cudaSuccess);
- assert(cudaFree(kernel_3d_) == cudaSuccess);
+ assert(gpuFree(kernel_1d_) == gpuSuccess);
+ assert(gpuFree(kernel_2d_) == gpuSuccess);
+ assert(gpuFree(kernel_3d_) == gpuSuccess);
}
const Eigen::GpuDevice& device() const { return gpu_device_; }
@@ -104,7 +103,7 @@ struct GPUContext {
float* kernel_2d_;
float* kernel_3d_;
- Eigen::CudaStreamDevice stream_;
+ Eigen::GpuStreamDevice stream_;
Eigen::GpuDevice gpu_device_;
};
@@ -283,12 +282,12 @@ void test_gpu() {
float* d_in1;
float* d_in2;
float* d_out;
- cudaMalloc((void**)(&d_in1), in1_bytes);
- cudaMalloc((void**)(&d_in2), in2_bytes);
- cudaMalloc((void**)(&d_out), out_bytes);
+ gpuMalloc((void**)(&d_in1), in1_bytes);
+ gpuMalloc((void**)(&d_in2), in2_bytes);
+ gpuMalloc((void**)(&d_out), out_bytes);
- cudaMemcpy(d_in1, in1.data(), in1_bytes, cudaMemcpyHostToDevice);
- cudaMemcpy(d_in2, in2.data(), in2_bytes, cudaMemcpyHostToDevice);
+ gpuMemcpy(d_in1, in1.data(), in1_bytes, gpuMemcpyHostToDevice);
+ gpuMemcpy(d_in2, in2.data(), in2_bytes, gpuMemcpyHostToDevice);
Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_in1(d_in1, 40,50,70);
Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_in2(d_in2, 40,50,70);
@@ -296,7 +295,7 @@ void test_gpu() {
GPUContext context(gpu_in1, gpu_in2, gpu_out);
test_contextual_eval(&context);
- assert(cudaMemcpy(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost) == cudaSuccess);
+ assert(gpuMemcpy(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost) == gpuSuccess);
for (int i = 0; i < 40; ++i) {
for (int j = 0; j < 50; ++j) {
for (int k = 0; k < 70; ++k) {
@@ -306,7 +305,7 @@ void test_gpu() {
}
test_forced_contextual_eval(&context);
- assert(cudaMemcpy(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost) == cudaSuccess);
+ assert(gpuMemcpy(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost) == gpuSuccess);
for (int i = 0; i < 40; ++i) {
for (int j = 0; j < 50; ++j) {
for (int k = 0; k < 70; ++k) {
@@ -316,7 +315,7 @@ void test_gpu() {
}
test_compound_assignment(&context);
- assert(cudaMemcpy(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost) == cudaSuccess);
+ assert(gpuMemcpy(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost) == gpuSuccess);
for (int i = 0; i < 40; ++i) {
for (int j = 0; j < 50; ++j) {
for (int k = 0; k < 70; ++k) {
@@ -326,7 +325,7 @@ void test_gpu() {
}
test_contraction(&context);
- assert(cudaMemcpy(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost) == cudaSuccess);
+ assert(gpuMemcpy(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost) == gpuSuccess);
for (int i = 0; i < 40; ++i) {
for (int j = 0; j < 40; ++j) {
const float result = out(i,j,0);
@@ -341,8 +340,8 @@ void test_gpu() {
}
test_1d_convolution(&context);
- assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, context.device().stream()) == cudaSuccess);
- assert(cudaStreamSynchronize(context.device().stream()) == cudaSuccess);
+ assert(gpuMemcpyAsync(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, context.device().stream()) == gpuSuccess);
+ assert(gpuStreamSynchronize(context.device().stream()) == gpuSuccess);
for (int i = 0; i < 40; ++i) {
for (int j = 0; j < 49; ++j) {
for (int k = 0; k < 70; ++k) {
@@ -352,8 +351,8 @@ void test_gpu() {
}
test_2d_convolution(&context);
- assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, context.device().stream()) == cudaSuccess);
- assert(cudaStreamSynchronize(context.device().stream()) == cudaSuccess);
+ assert(gpuMemcpyAsync(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, context.device().stream()) == gpuSuccess);
+ assert(gpuStreamSynchronize(context.device().stream()) == gpuSuccess);
for (int i = 0; i < 40; ++i) {
for (int j = 0; j < 49; ++j) {
for (int k = 0; k < 69; ++k) {
@@ -365,9 +364,13 @@ void test_gpu() {
}
}
+#if !defined(EIGEN_USE_HIP)
+// disable this test on the HIP platform
+// 3D tensor convolutions seem to hang on the HIP platform
+
test_3d_convolution(&context);
- assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, context.device().stream()) == cudaSuccess);
- assert(cudaStreamSynchronize(context.device().stream()) == cudaSuccess);
+ assert(gpuMemcpyAsync(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, context.device().stream()) == gpuSuccess);
+ assert(gpuStreamSynchronize(context.device().stream()) == gpuSuccess);
for (int i = 0; i < 39; ++i) {
for (int j = 0; j < 49; ++j) {
for (int k = 0; k < 69; ++k) {
@@ -380,10 +383,13 @@ void test_gpu() {
}
}
}
+
+#endif
+
}
-void test_cxx11_tensor_device()
+EIGEN_DECLARE_TEST(cxx11_tensor_device)
{
CALL_SUBTEST_1(test_cpu());
CALL_SUBTEST_2(test_gpu());
diff --git a/unsupported/test/cxx11_tensor_device_sycl.cpp b/unsupported/test/cxx11_tensor_device_sycl.cpp
index 3ecc68df0..5095cb078 100644
--- a/unsupported/test/cxx11_tensor_device_sycl.cpp
+++ b/unsupported/test/cxx11_tensor_device_sycl.cpp
@@ -13,7 +13,7 @@
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
-#define EIGEN_TEST_FUNC cxx11_tensor_device_sycl
+
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t
#define EIGEN_USE_SYCL
@@ -70,7 +70,7 @@ template<typename DataType> void sycl_device_test_per_device(const cl::sycl::dev
//test_device_exceptions<DataType, ColMajor>(sycl_device);
}
-void test_cxx11_tensor_device_sycl() {
+EIGEN_DECLARE_TEST(cxx11_tensor_device_sycl) {
for (const auto& device :Eigen::get_sycl_supported_devices()) {
CALL_SUBTEST(sycl_device_test_per_device<float>(device));
}
diff --git a/unsupported/test/cxx11_tensor_dimension.cpp b/unsupported/test/cxx11_tensor_dimension.cpp
index 16f168ed4..ee416e14a 100644
--- a/unsupported/test/cxx11_tensor_dimension.cpp
+++ b/unsupported/test/cxx11_tensor_dimension.cpp
@@ -60,10 +60,29 @@ static void test_rank_zero()
VERIFY_IS_EQUAL((int)dscalar.rank(), 0);
}
-void test_cxx11_tensor_dimension()
+static void test_index_type_promotion() {
+ Eigen::DSizes<int, 3> src0(1, 2, 3);
+ Eigen::array<int, 3> src1;
+ src1[0] = 4;
+ src1[1] = 5;
+ src1[2] = 6;
+
+ Eigen::DSizes<long, 3> dst0(src0);
+ Eigen::DSizes<long, 3> dst1(src1);
+
+ VERIFY_IS_EQUAL(dst0[0], 1L);
+ VERIFY_IS_EQUAL(dst0[1], 2L);
+ VERIFY_IS_EQUAL(dst0[2], 3L);
+ VERIFY_IS_EQUAL(dst1[0], 4L);
+ VERIFY_IS_EQUAL(dst1[1], 5L);
+ VERIFY_IS_EQUAL(dst1[2], 6L);
+}
+
+EIGEN_DECLARE_TEST(cxx11_tensor_dimension)
{
CALL_SUBTEST(test_dynamic_size());
CALL_SUBTEST(test_fixed_size());
CALL_SUBTEST(test_match());
CALL_SUBTEST(test_rank_zero());
+ CALL_SUBTEST(test_index_type_promotion());
}
diff --git a/unsupported/test/cxx11_tensor_empty.cpp b/unsupported/test/cxx11_tensor_empty.cpp
index d7eea42d7..fd889c46c 100644
--- a/unsupported/test/cxx11_tensor_empty.cpp
+++ b/unsupported/test/cxx11_tensor_empty.cpp
@@ -33,7 +33,7 @@ static void test_empty_fixed_size_tensor()
}
-void test_cxx11_tensor_empty()
+EIGEN_DECLARE_TEST(cxx11_tensor_empty)
{
CALL_SUBTEST(test_empty_tensor());
CALL_SUBTEST(test_empty_fixed_size_tensor());
diff --git a/unsupported/test/cxx11_tensor_executor.cpp b/unsupported/test/cxx11_tensor_executor.cpp
new file mode 100644
index 000000000..aa789c2e4
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_executor.cpp
@@ -0,0 +1,535 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2018 Eugene Zhulenev <ezhulenev@google.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_USE_THREADS
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+using Eigen::RowMajor;
+using Eigen::ColMajor;
+
+// A set of tests to verify that different TensorExecutor strategies yields the
+// same results for all the ops, supporting tiled evaluation.
+
+template <int NumDims>
+static array<Index, NumDims> RandomDims(int min_dim = 1, int max_dim = 20) {
+ array<Index, NumDims> dims;
+ for (int i = 0; i < NumDims; ++i) {
+ dims[i] = internal::random<int>(min_dim, max_dim);
+ }
+ return dims;
+}
+
+template <typename T, int NumDims, typename Device, bool Vectorizable,
+ bool Tileable, int Layout>
+static void test_execute_unary_expr(Device d)
+{
+ static constexpr int Options = 0 | Layout;
+
+ // Pick a large enough tensor size to bypass small tensor block evaluation
+ // optimization.
+ auto dims = RandomDims<NumDims>(50 / NumDims, 100 / NumDims);
+
+ Tensor<T, NumDims, Options, Index> src(dims);
+ Tensor<T, NumDims, Options, Index> dst(dims);
+
+ src.setRandom();
+ const auto expr = src.square();
+
+ using Assign = TensorAssignOp<decltype(dst), const decltype(expr)>;
+ using Executor =
+ internal::TensorExecutor<const Assign, Device, Vectorizable, Tileable>;
+
+ Executor::run(Assign(dst, expr), d);
+
+ for (Index i = 0; i < dst.dimensions().TotalSize(); ++i) {
+ T square = src.coeff(i) * src.coeff(i);
+ VERIFY_IS_EQUAL(square, dst.coeff(i));
+ }
+}
+
+template <typename T, int NumDims, typename Device, bool Vectorizable,
+ bool Tileable, int Layout>
+static void test_execute_binary_expr(Device d)
+{
+ static constexpr int Options = 0 | Layout;
+
+ // Pick a large enough tensor size to bypass small tensor block evaluation
+ // optimization.
+ auto dims = RandomDims<NumDims>(50 / NumDims, 100 / NumDims);
+
+ Tensor<T, NumDims, Options, Index> lhs(dims);
+ Tensor<T, NumDims, Options, Index> rhs(dims);
+ Tensor<T, NumDims, Options, Index> dst(dims);
+
+ lhs.setRandom();
+ rhs.setRandom();
+
+ const auto expr = lhs + rhs;
+
+ using Assign = TensorAssignOp<decltype(dst), const decltype(expr)>;
+ using Executor =
+ internal::TensorExecutor<const Assign, Device, Vectorizable, Tileable>;
+
+ Executor::run(Assign(dst, expr), d);
+
+ for (Index i = 0; i < dst.dimensions().TotalSize(); ++i) {
+ T sum = lhs.coeff(i) + rhs.coeff(i);
+ VERIFY_IS_EQUAL(sum, dst.coeff(i));
+ }
+}
+
+template <typename T, int NumDims, typename Device, bool Vectorizable,
+ bool Tileable, int Layout>
+static void test_execute_broadcasting(Device d)
+{
+ static constexpr int Options = 0 | Layout;
+
+ auto dims = RandomDims<NumDims>(1, 10);
+ Tensor<T, NumDims, Options, Index> src(dims);
+ src.setRandom();
+
+ const auto broadcasts = RandomDims<NumDims>(1, 7);
+ const auto expr = src.broadcast(broadcasts);
+
+ // We assume that broadcasting on a default device is tested and correct, so
+ // we can rely on it to verify correctness of tensor executor and tiling.
+ Tensor<T, NumDims, Options, Index> golden;
+ golden = expr;
+
+ // Now do the broadcasting using configured tensor executor.
+ Tensor<T, NumDims, Options, Index> dst(golden.dimensions());
+
+ using Assign = TensorAssignOp<decltype(dst), const decltype(expr)>;
+ using Executor =
+ internal::TensorExecutor<const Assign, Device, Vectorizable, Tileable>;
+
+ Executor::run(Assign(dst, expr), d);
+
+ for (Index i = 0; i < dst.dimensions().TotalSize(); ++i) {
+ VERIFY_IS_EQUAL(dst.coeff(i), golden.coeff(i));
+ }
+}
+
+template <typename T, int NumDims, typename Device, bool Vectorizable,
+ bool Tileable, int Layout>
+static void test_execute_chipping_rvalue(Device d)
+{
+ auto dims = RandomDims<NumDims>(1, 10);
+ Tensor<T, NumDims, Layout, Index> src(dims);
+ src.setRandom();
+
+#define TEST_CHIPPING(CHIP_DIM) \
+ if (NumDims > (CHIP_DIM)) { \
+ const auto offset = internal::random<Index>(0, dims[(CHIP_DIM)] - 1); \
+ const auto expr = src.template chip<(CHIP_DIM)>(offset); \
+ \
+ Tensor<T, NumDims - 1, Layout, Index> golden; \
+ golden = expr; \
+ \
+ Tensor<T, NumDims - 1, Layout, Index> dst(golden.dimensions()); \
+ \
+ using Assign = TensorAssignOp<decltype(dst), const decltype(expr)>; \
+ using Executor = internal::TensorExecutor<const Assign, Device, \
+ Vectorizable, Tileable>; \
+ \
+ Executor::run(Assign(dst, expr), d); \
+ \
+ for (Index i = 0; i < dst.dimensions().TotalSize(); ++i) { \
+ VERIFY_IS_EQUAL(dst.coeff(i), golden.coeff(i)); \
+ } \
+ }
+
+ TEST_CHIPPING(0)
+ TEST_CHIPPING(1)
+ TEST_CHIPPING(2)
+ TEST_CHIPPING(3)
+ TEST_CHIPPING(4)
+ TEST_CHIPPING(5)
+
+#undef TEST_CHIPPING
+}
+
+template <typename T, int NumDims, typename Device, bool Vectorizable,
+ bool Tileable, int Layout>
+static void test_execute_chipping_lvalue(Device d)
+{
+ auto dims = RandomDims<NumDims>(1, 10);
+
+#define TEST_CHIPPING(CHIP_DIM) \
+ if (NumDims > (CHIP_DIM)) { \
+ /* Generate random data that we'll assign to the chipped tensor dim. */ \
+ array<Index, NumDims - 1> src_dims; \
+ for (int i = 0; i < NumDims - 1; ++i) { \
+ int dim = i < (CHIP_DIM) ? i : i + 1; \
+ src_dims[i] = dims[dim]; \
+ } \
+ \
+ Tensor<T, NumDims - 1, Layout, Index> src(src_dims); \
+ src.setRandom(); \
+ \
+ const auto offset = internal::random<Index>(0, dims[(CHIP_DIM)] - 1); \
+ \
+ /* Generate random data to fill non-chipped dimensions*/ \
+ Tensor<T, NumDims, Layout, Index> random(dims); \
+ random.setRandom(); \
+ \
+ Tensor<T, NumDims, Layout, Index> golden(dims); \
+ golden = random; \
+ golden.template chip<(CHIP_DIM)>(offset) = src; \
+ \
+ Tensor<T, NumDims, Layout, Index> dst(dims); \
+ dst = random; \
+ auto expr = dst.template chip<(CHIP_DIM)>(offset); \
+ \
+ using Assign = TensorAssignOp<decltype(expr), const decltype(src)>; \
+ using Executor = internal::TensorExecutor<const Assign, Device, \
+ Vectorizable, Tileable>; \
+ \
+ Executor::run(Assign(expr, src), d); \
+ \
+ for (Index i = 0; i < dst.dimensions().TotalSize(); ++i) { \
+ VERIFY_IS_EQUAL(dst.coeff(i), golden.coeff(i)); \
+ } \
+ }
+
+ TEST_CHIPPING(0)
+ TEST_CHIPPING(1)
+ TEST_CHIPPING(2)
+ TEST_CHIPPING(3)
+ TEST_CHIPPING(4)
+ TEST_CHIPPING(5)
+
+#undef TEST_CHIPPING
+}
+
+template <typename T, int NumDims, typename Device, bool Vectorizable,
+ bool Tileable, int Layout>
+static void test_execute_shuffle_rvalue(Device d)
+{
+ static constexpr int Options = 0 | Layout;
+
+ auto dims = RandomDims<NumDims>(1, 10);
+ Tensor<T, NumDims, Options, Index> src(dims);
+ src.setRandom();
+
+ // Create a random dimension re-ordering/shuffle.
+ std::vector<Index> shuffle;
+ for (int i = 0; i < NumDims; ++i) shuffle.push_back(i);
+ std::shuffle(shuffle.begin(), shuffle.end(), std::mt19937());
+
+ const auto expr = src.shuffle(shuffle);
+
+ // We assume that shuffling on a default device is tested and correct, so
+ // we can rely on it to verify correctness of tensor executor and tiling.
+ Tensor<T, NumDims, Options, Index> golden;
+ golden = expr;
+
+ // Now do the shuffling using configured tensor executor.
+ Tensor<T, NumDims, Options, Index> dst(golden.dimensions());
+
+ using Assign = TensorAssignOp<decltype(dst), const decltype(expr)>;
+ using Executor =
+ internal::TensorExecutor<const Assign, Device, Vectorizable, Tileable>;
+
+ Executor::run(Assign(dst, expr), d);
+
+ for (Index i = 0; i < dst.dimensions().TotalSize(); ++i) {
+ VERIFY_IS_EQUAL(dst.coeff(i), golden.coeff(i));
+ }
+}
+
+template <typename T, int NumDims, typename Device, bool Vectorizable,
+ bool Tileable, int Layout>
+static void test_execute_shuffle_lvalue(Device d)
+{
+ static constexpr int Options = 0 | Layout;
+
+ auto dims = RandomDims<NumDims>(5, 10);
+ Tensor<T, NumDims, Options, Index> src(dims);
+ src.setRandom();
+
+ // Create a random dimension re-ordering/shuffle.
+ std::vector<Index> shuffle;
+ for (int i = 0; i < NumDims; ++i) shuffle.push_back(i);
+ std::shuffle(shuffle.begin(), shuffle.end(), std::mt19937());
+
+ array<Index, NumDims> shuffled_dims;
+ for (int i = 0; i < NumDims; ++i) shuffled_dims[shuffle[i]] = dims[i];
+
+ // We assume that shuffling on a default device is tested and correct, so
+ // we can rely on it to verify correctness of tensor executor and tiling.
+ Tensor<T, NumDims, Options, Index> golden(shuffled_dims);
+ golden.shuffle(shuffle) = src;
+
+ // Now do the shuffling using configured tensor executor.
+ Tensor<T, NumDims, Options, Index> dst(shuffled_dims);
+
+ auto expr = dst.shuffle(shuffle);
+
+ using Assign = TensorAssignOp<decltype(expr), const decltype(src)>;
+ using Executor =
+ internal::TensorExecutor<const Assign, Device, Vectorizable, Tileable>;
+
+ Executor::run(Assign(expr, src), d);
+
+ for (Index i = 0; i < dst.dimensions().TotalSize(); ++i) {
+ VERIFY_IS_EQUAL(dst.coeff(i), golden.coeff(i));
+ }
+}
+
+template <typename T, int NumDims, typename Device, bool Vectorizable,
+ bool Tileable, int Layout>
+static void test_execute_reduction(Device d)
+{
+ static_assert(NumDims >= 2, "NumDims must be greater or equal than 2");
+
+ static constexpr int ReducedDims = NumDims - 2;
+ static constexpr int Options = 0 | Layout;
+
+ auto dims = RandomDims<NumDims>(5, 10);
+ Tensor<T, NumDims, Options, Index> src(dims);
+ src.setRandom();
+
+ // Pick two random and unique reduction dimensions.
+ int reduction0 = internal::random<int>(0, NumDims - 1);
+ int reduction1 = internal::random<int>(0, NumDims - 1);
+ while (reduction0 == reduction1) {
+ reduction1 = internal::random<int>(0, NumDims - 1);
+ }
+
+ DSizes<Index, 2> reduction_axis;
+ reduction_axis[0] = reduction0;
+ reduction_axis[1] = reduction1;
+
+ Tensor<T, ReducedDims, Options, Index> golden = src.sum(reduction_axis);
+
+ // Now do the reduction using configured tensor executor.
+ Tensor<T, ReducedDims, Options, Index> dst(golden.dimensions());
+
+ auto expr = src.sum(reduction_axis);
+
+ using Assign = TensorAssignOp<decltype(dst), const decltype(expr)>;
+ using Executor =
+ internal::TensorExecutor<const Assign, Device, Vectorizable, Tileable>;
+
+ Executor::run(Assign(dst, expr), d);
+
+ for (Index i = 0; i < dst.dimensions().TotalSize(); ++i) {
+ VERIFY_IS_EQUAL(dst.coeff(i), golden.coeff(i));
+ }
+}
+
+template <typename T, int NumDims, typename Device, bool Vectorizable,
+ bool Tileable, int Layout>
+static void test_execute_reshape(Device d)
+{
+ static_assert(NumDims >= 2, "NumDims must be greater or equal than 2");
+
+ static constexpr int ReshapedDims = NumDims - 1;
+ static constexpr int Options = 0 | Layout;
+
+ auto dims = RandomDims<NumDims>(5, 10);
+ Tensor<T, NumDims, Options, Index> src(dims);
+ src.setRandom();
+
+ // Multiple 0th dimension and then shuffle.
+ std::vector<Index> shuffle;
+ for (int i = 0; i < ReshapedDims; ++i) shuffle.push_back(i);
+ std::shuffle(shuffle.begin(), shuffle.end(), std::mt19937());
+
+ DSizes<Index, ReshapedDims> reshaped_dims;
+ reshaped_dims[shuffle[0]] = dims[0] * dims[1];
+ for (int i = 1; i < ReshapedDims; ++i) reshaped_dims[shuffle[i]] = dims[i + 1];
+
+ Tensor<T, ReshapedDims, Options, Index> golden = src.reshape(reshaped_dims);
+
+ // Now reshape using configured tensor executor.
+ Tensor<T, ReshapedDims, Options, Index> dst(golden.dimensions());
+
+ auto expr = src.reshape(reshaped_dims);
+
+ using Assign = TensorAssignOp<decltype(dst), const decltype(expr)>;
+ using Executor =
+ internal::TensorExecutor<const Assign, Device, Vectorizable, Tileable>;
+
+ Executor::run(Assign(dst, expr), d);
+
+ for (Index i = 0; i < dst.dimensions().TotalSize(); ++i) {
+ VERIFY_IS_EQUAL(dst.coeff(i), golden.coeff(i));
+ }
+}
+
+template <typename T, int NumDims, typename Device, bool Vectorizable,
+ bool Tileable, int Layout>
+static void test_execute_slice_rvalue(Device d)
+{
+ static_assert(NumDims >= 2, "NumDims must be greater or equal than 2");
+ static constexpr int Options = 0 | Layout;
+
+ auto dims = RandomDims<NumDims>(5, 10);
+ Tensor<T, NumDims, Options, Index> src(dims);
+ src.setRandom();
+
+ // Pick a random slice of src tensor.
+ auto slice_start = DSizes<Index, NumDims>(RandomDims<NumDims>());
+ auto slice_size = DSizes<Index, NumDims>(RandomDims<NumDims>());
+
+ // Make sure that slice start + size do not overflow tensor dims.
+ for (int i = 0; i < NumDims; ++i) {
+ slice_start[i] = numext::mini(dims[i] - 1, slice_start[i]);
+ slice_size[i] = numext::mini(slice_size[i], dims[i] - slice_start[i]);
+ }
+
+ Tensor<T, NumDims, Options, Index> golden =
+ src.slice(slice_start, slice_size);
+
+ // Now reshape using configured tensor executor.
+ Tensor<T, NumDims, Options, Index> dst(golden.dimensions());
+
+ auto expr = src.slice(slice_start, slice_size);
+
+ using Assign = TensorAssignOp<decltype(dst), const decltype(expr)>;
+ using Executor =
+ internal::TensorExecutor<const Assign, Device, Vectorizable, Tileable>;
+
+ Executor::run(Assign(dst, expr), d);
+
+ for (Index i = 0; i < dst.dimensions().TotalSize(); ++i) {
+ VERIFY_IS_EQUAL(dst.coeff(i), golden.coeff(i));
+ }
+}
+
+template <typename T, int NumDims, typename Device, bool Vectorizable,
+ bool Tileable, int Layout>
+static void test_execute_slice_lvalue(Device d)
+{
+ static_assert(NumDims >= 2, "NumDims must be greater or equal than 2");
+ static constexpr int Options = 0 | Layout;
+
+ auto dims = RandomDims<NumDims>(5, 10);
+ Tensor<T, NumDims, Options, Index> src(dims);
+ src.setRandom();
+
+ // Pick a random slice of src tensor.
+ auto slice_start = DSizes<Index, NumDims>(RandomDims<NumDims>(1, 10));
+ auto slice_size = DSizes<Index, NumDims>(RandomDims<NumDims>(1, 10));
+
+ // Make sure that slice start + size do not overflow tensor dims.
+ for (int i = 0; i < NumDims; ++i) {
+ slice_start[i] = numext::mini(dims[i] - 1, slice_start[i]);
+ slice_size[i] = numext::mini(slice_size[i], dims[i] - slice_start[i]);
+ }
+
+ Tensor<T, NumDims, Options, Index> slice(slice_size);
+ slice.setRandom();
+
+ // Asign a slice using default executor.
+ Tensor<T, NumDims, Options, Index> golden = src;
+ golden.slice(slice_start, slice_size) = slice;
+
+ // And using configured execution strategy.
+ Tensor<T, NumDims, Options, Index> dst = src;
+ auto expr = dst.slice(slice_start, slice_size);
+
+ using Assign = TensorAssignOp<decltype(expr), const decltype(slice)>;
+ using Executor =
+ internal::TensorExecutor<const Assign, Device, Vectorizable, Tileable>;
+
+ Executor::run(Assign(expr, slice), d);
+
+ for (Index i = 0; i < dst.dimensions().TotalSize(); ++i) {
+ VERIFY_IS_EQUAL(dst.coeff(i), golden.coeff(i));
+ }
+}
+
+#define CALL_SUBTEST_PART(PART) \
+ CALL_SUBTEST_##PART
+
+#define CALL_SUBTEST_COMBINATIONS(PART, NAME, T, NUM_DIMS) \
+ CALL_SUBTEST_PART(PART)((NAME<T, NUM_DIMS, DefaultDevice, false, false, ColMajor>(default_device))); \
+ CALL_SUBTEST_PART(PART)((NAME<T, NUM_DIMS, DefaultDevice, false, true, ColMajor>(default_device))); \
+ CALL_SUBTEST_PART(PART)((NAME<T, NUM_DIMS, DefaultDevice, true, false, ColMajor>(default_device))); \
+ CALL_SUBTEST_PART(PART)((NAME<T, NUM_DIMS, DefaultDevice, true, true, ColMajor>(default_device))); \
+ CALL_SUBTEST_PART(PART)((NAME<T, NUM_DIMS, DefaultDevice, false, false, RowMajor>(default_device))); \
+ CALL_SUBTEST_PART(PART)((NAME<T, NUM_DIMS, DefaultDevice, false, true, RowMajor>(default_device))); \
+ CALL_SUBTEST_PART(PART)((NAME<T, NUM_DIMS, DefaultDevice, true, false, RowMajor>(default_device))); \
+ CALL_SUBTEST_PART(PART)((NAME<T, NUM_DIMS, DefaultDevice, true, true, RowMajor>(default_device))); \
+ CALL_SUBTEST_PART(PART)((NAME<T, NUM_DIMS, ThreadPoolDevice, false, false, ColMajor>(tp_device))); \
+ CALL_SUBTEST_PART(PART)((NAME<T, NUM_DIMS, ThreadPoolDevice, false, true, ColMajor>(tp_device))); \
+ CALL_SUBTEST_PART(PART)((NAME<T, NUM_DIMS, ThreadPoolDevice, true, false, ColMajor>(tp_device))); \
+ CALL_SUBTEST_PART(PART)((NAME<T, NUM_DIMS, ThreadPoolDevice, true, true, ColMajor>(tp_device))); \
+ CALL_SUBTEST_PART(PART)((NAME<T, NUM_DIMS, ThreadPoolDevice, false, false, RowMajor>(tp_device))); \
+ CALL_SUBTEST_PART(PART)((NAME<T, NUM_DIMS, ThreadPoolDevice, false, true, RowMajor>(tp_device))); \
+ CALL_SUBTEST_PART(PART)((NAME<T, NUM_DIMS, ThreadPoolDevice, true, false, RowMajor>(tp_device))); \
+ CALL_SUBTEST_PART(PART)((NAME<T, NUM_DIMS, ThreadPoolDevice, true, true, RowMajor>(tp_device)))
+
+EIGEN_DECLARE_TEST(cxx11_tensor_executor) {
+ Eigen::DefaultDevice default_device;
+
+ const auto num_threads = internal::random<int>(1, 24);
+ Eigen::ThreadPool tp(num_threads);
+ Eigen::ThreadPoolDevice tp_device(&tp, num_threads);
+
+ CALL_SUBTEST_COMBINATIONS(1, test_execute_unary_expr, float, 3);
+ CALL_SUBTEST_COMBINATIONS(1, test_execute_unary_expr, float, 4);
+ CALL_SUBTEST_COMBINATIONS(1, test_execute_unary_expr, float, 5);
+
+ CALL_SUBTEST_COMBINATIONS(2, test_execute_binary_expr, float, 3);
+ CALL_SUBTEST_COMBINATIONS(2, test_execute_binary_expr, float, 4);
+ CALL_SUBTEST_COMBINATIONS(2, test_execute_binary_expr, float, 5);
+
+ CALL_SUBTEST_COMBINATIONS(3, test_execute_broadcasting, float, 3);
+ CALL_SUBTEST_COMBINATIONS(3, test_execute_broadcasting, float, 4);
+ CALL_SUBTEST_COMBINATIONS(3, test_execute_broadcasting, float, 5);
+
+ CALL_SUBTEST_COMBINATIONS(4, test_execute_chipping_rvalue, float, 3);
+ CALL_SUBTEST_COMBINATIONS(4, test_execute_chipping_rvalue, float, 4);
+ CALL_SUBTEST_COMBINATIONS(4, test_execute_chipping_rvalue, float, 5);
+
+ CALL_SUBTEST_COMBINATIONS(5, test_execute_chipping_lvalue, float, 3);
+ CALL_SUBTEST_COMBINATIONS(5, test_execute_chipping_lvalue, float, 4);
+ CALL_SUBTEST_COMBINATIONS(5, test_execute_chipping_lvalue, float, 5);
+
+ CALL_SUBTEST_COMBINATIONS(6, test_execute_shuffle_rvalue, float, 3);
+ CALL_SUBTEST_COMBINATIONS(6, test_execute_shuffle_rvalue, float, 4);
+ CALL_SUBTEST_COMBINATIONS(6, test_execute_shuffle_rvalue, float, 5);
+
+ CALL_SUBTEST_COMBINATIONS(7, test_execute_shuffle_lvalue, float, 3);
+ CALL_SUBTEST_COMBINATIONS(7, test_execute_shuffle_lvalue, float, 4);
+ CALL_SUBTEST_COMBINATIONS(7, test_execute_shuffle_lvalue, float, 5);
+
+ CALL_SUBTEST_COMBINATIONS(8, test_execute_reduction, float, 2);
+ CALL_SUBTEST_COMBINATIONS(8, test_execute_reduction, float, 3);
+ CALL_SUBTEST_COMBINATIONS(8, test_execute_reduction, float, 4);
+ CALL_SUBTEST_COMBINATIONS(8, test_execute_reduction, float, 5);
+
+ CALL_SUBTEST_COMBINATIONS(9, test_execute_reshape, float, 2);
+ CALL_SUBTEST_COMBINATIONS(9, test_execute_reshape, float, 3);
+ CALL_SUBTEST_COMBINATIONS(9, test_execute_reshape, float, 4);
+ CALL_SUBTEST_COMBINATIONS(9, test_execute_reshape, float, 5);
+
+ CALL_SUBTEST_COMBINATIONS(10, test_execute_slice_rvalue, float, 2);
+ CALL_SUBTEST_COMBINATIONS(10, test_execute_slice_rvalue, float, 3);
+ CALL_SUBTEST_COMBINATIONS(10, test_execute_slice_rvalue, float, 4);
+ CALL_SUBTEST_COMBINATIONS(10, test_execute_slice_rvalue, float, 5);
+
+ CALL_SUBTEST_COMBINATIONS(11, test_execute_slice_lvalue, float, 2);
+ CALL_SUBTEST_COMBINATIONS(11, test_execute_slice_lvalue, float, 3);
+ CALL_SUBTEST_COMBINATIONS(11, test_execute_slice_lvalue, float, 4);
+ CALL_SUBTEST_COMBINATIONS(11, test_execute_slice_lvalue, float, 5);
+
+ // Force CMake to split this test.
+ // EIGEN_SUFFIXES;1;2;3;4;5;6;7;8;9;10;11
+}
+
+#undef CALL_SUBTEST_COMBINATIONS
diff --git a/unsupported/test/cxx11_tensor_expr.cpp b/unsupported/test/cxx11_tensor_expr.cpp
index 129b4e659..30924b6b6 100644
--- a/unsupported/test/cxx11_tensor_expr.cpp
+++ b/unsupported/test/cxx11_tensor_expr.cpp
@@ -340,13 +340,33 @@ void test_minmax_nan_propagation_templ() {
}
}
+static void test_clip()
+{
+ Tensor<float, 1> vec(6);
+ vec(0) = 4.0;
+ vec(1) = 8.0;
+ vec(2) = 15.0;
+ vec(3) = 16.0;
+ vec(4) = 23.0;
+ vec(5) = 42.0;
+
+ float kMin = 20;
+ float kMax = 30;
+
+ Tensor<float, 1> vec_clipped(6);
+ vec_clipped = vec.clip(kMin, kMax);
+ for (int i = 0; i < 6; ++i) {
+ VERIFY_IS_EQUAL(vec_clipped(i), numext::mini(numext::maxi(vec(i), kMin), kMax));
+ }
+}
+
static void test_minmax_nan_propagation()
{
test_minmax_nan_propagation_templ<float>();
test_minmax_nan_propagation_templ<double>();
}
-void test_cxx11_tensor_expr()
+EIGEN_DECLARE_TEST(cxx11_tensor_expr)
{
CALL_SUBTEST(test_1d());
CALL_SUBTEST(test_2d());
@@ -356,5 +376,6 @@ void test_cxx11_tensor_expr()
CALL_SUBTEST(test_functors());
CALL_SUBTEST(test_type_casting());
CALL_SUBTEST(test_select());
+ CALL_SUBTEST(test_clip());
CALL_SUBTEST(test_minmax_nan_propagation());
}
diff --git a/unsupported/test/cxx11_tensor_fft.cpp b/unsupported/test/cxx11_tensor_fft.cpp
index 2f14ebc62..4e4c9c4ec 100644
--- a/unsupported/test/cxx11_tensor_fft.cpp
+++ b/unsupported/test/cxx11_tensor_fft.cpp
@@ -224,7 +224,36 @@ static void test_fft_real_input_energy() {
}
}
-void test_cxx11_tensor_fft() {
+template <typename RealScalar>
+static void test_fft_non_power_of_2_round_trip(int exponent) {
+ int n = (1 << exponent) + 1;
+
+ // The dimension type needs to be at least 8 bytes long for the
+ // Tensor constructor to work. On Windows, long is only 4 bytes long,
+ // so use long long here to force the usage of a 8 bytes integer type.
+ Eigen::DSizes<std::int64_t, 1> dimensions;
+ dimensions[0] = n;
+ const DSizes<std::int64_t, 1> arr = dimensions;
+ Tensor<RealScalar, 1, ColMajor, std::int64_t> input;
+
+ input.resize(arr);
+ input.setRandom();
+
+ array<int, 1> fft;
+ fft[0] = 0;
+
+ Tensor<std::complex<RealScalar>, 1, ColMajor> forward =
+ input.template fft<BothParts, FFT_FORWARD>(fft);
+
+ Tensor<RealScalar, 1, ColMajor, std::int64_t> output =
+ forward.template fft<RealPart, FFT_REVERSE>(fft);
+
+ for (int i = 0; i < n; ++i) {
+ VERIFY_IS_APPROX(input[i], output[i]);
+ }
+}
+
+EIGEN_DECLARE_TEST(cxx11_tensor_fft) {
test_fft_complex_input_golden();
test_fft_real_input_golden();
@@ -270,4 +299,6 @@ void test_cxx11_tensor_fft() {
test_fft_real_input_energy<RowMajor, double, true, Eigen::BothParts, FFT_FORWARD, 4>();
test_fft_real_input_energy<RowMajor, float, false, Eigen::BothParts, FFT_FORWARD, 4>();
test_fft_real_input_energy<RowMajor, double, false, Eigen::BothParts, FFT_FORWARD, 4>();
+
+ test_fft_non_power_of_2_round_trip<float>(7);
}
diff --git a/unsupported/test/cxx11_tensor_fixed_size.cpp b/unsupported/test/cxx11_tensor_fixed_size.cpp
index e6274f8eb..456ce6bea 100644
--- a/unsupported/test/cxx11_tensor_fixed_size.cpp
+++ b/unsupported/test/cxx11_tensor_fixed_size.cpp
@@ -250,7 +250,7 @@ static void test_array()
}
}
-void test_cxx11_tensor_fixed_size()
+EIGEN_DECLARE_TEST(cxx11_tensor_fixed_size)
{
CALL_SUBTEST(test_0d());
CALL_SUBTEST(test_1d());
diff --git a/unsupported/test/cxx11_tensor_forced_eval.cpp b/unsupported/test/cxx11_tensor_forced_eval.cpp
index 45d7345e9..f76e2ea97 100644
--- a/unsupported/test/cxx11_tensor_forced_eval.cpp
+++ b/unsupported/test/cxx11_tensor_forced_eval.cpp
@@ -72,7 +72,7 @@ static void test_const()
}
-void test_cxx11_tensor_forced_eval()
+EIGEN_DECLARE_TEST(cxx11_tensor_forced_eval)
{
CALL_SUBTEST(test_simple());
CALL_SUBTEST(test_const());
diff --git a/unsupported/test/cxx11_tensor_forced_eval_sycl.cpp b/unsupported/test/cxx11_tensor_forced_eval_sycl.cpp
index aca036cde..74d38a644 100644
--- a/unsupported/test/cxx11_tensor_forced_eval_sycl.cpp
+++ b/unsupported/test/cxx11_tensor_forced_eval_sycl.cpp
@@ -13,7 +13,7 @@
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
-#define EIGEN_TEST_FUNC cxx11_tensor_forced_eval_sycl
+
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t
#define EIGEN_USE_SYCL
@@ -44,7 +44,7 @@ void test_forced_eval_sycl(const Eigen::SyclDevice &sycl_device) {
Eigen::TensorMap<Eigen::Tensor<DataType, 3, DataLayout, IndexType>> gpu_in2(gpu_in2_data, tensorRange);
Eigen::TensorMap<Eigen::Tensor<DataType, 3, DataLayout, IndexType>> gpu_out(gpu_out_data, tensorRange);
sycl_device.memcpyHostToDevice(gpu_in1_data, in1.data(),(in1.dimensions().TotalSize())*sizeof(DataType));
- sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(),(in1.dimensions().TotalSize())*sizeof(DataType));
+ sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(),(in2.dimensions().TotalSize())*sizeof(DataType));
/// c=(a+b)*b
gpu_out.device(sycl_device) =(gpu_in1 + gpu_in2).eval() * gpu_in2;
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(DataType));
@@ -69,7 +69,7 @@ template <typename DataType, typename Dev_selector> void tensorForced_evalperDev
test_forced_eval_sycl<DataType, RowMajor, int64_t>(sycl_device);
test_forced_eval_sycl<DataType, ColMajor, int64_t>(sycl_device);
}
-void test_cxx11_tensor_forced_eval_sycl() {
+EIGEN_DECLARE_TEST(cxx11_tensor_forced_eval_sycl) {
for (const auto& device :Eigen::get_sycl_supported_devices()) {
CALL_SUBTEST(tensorForced_evalperDevice<float>(device));
}
diff --git a/unsupported/test/cxx11_tensor_generator.cpp b/unsupported/test/cxx11_tensor_generator.cpp
index dcb928714..ee5e29b77 100644
--- a/unsupported/test/cxx11_tensor_generator.cpp
+++ b/unsupported/test/cxx11_tensor_generator.cpp
@@ -80,7 +80,7 @@ static void test_gaussian()
}
-void test_cxx11_tensor_generator()
+EIGEN_DECLARE_TEST(cxx11_tensor_generator)
{
CALL_SUBTEST(test_1D<ColMajor>());
CALL_SUBTEST(test_1D<RowMajor>());
diff --git a/unsupported/test/cxx11_tensor_generator_sycl.cpp b/unsupported/test/cxx11_tensor_generator_sycl.cpp
new file mode 100644
index 000000000..fb6e3d9d0
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_generator_sycl.cpp
@@ -0,0 +1,147 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_TEST_NO_LONGDOUBLE
+#define EIGEN_TEST_NO_COMPLEX
+
+#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t
+#define EIGEN_USE_SYCL
+static const float error_threshold =1e-8f;
+
+#include "main.h"
+#include <unsupported/Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+struct Generator1D {
+ Generator1D() { }
+
+ float operator()(const array<Eigen::DenseIndex, 1>& coordinates) const {
+ return coordinates[0];
+ }
+};
+
+template <typename DataType, int DataLayout, typename IndexType>
+static void test_1D_sycl(const Eigen::SyclDevice& sycl_device)
+{
+
+ IndexType sizeDim1 = 6;
+ array<IndexType, 1> tensorRange = {{sizeDim1}};
+ Tensor<DataType, 1, DataLayout,IndexType> vec(tensorRange);
+ Tensor<DataType, 1, DataLayout,IndexType> result(tensorRange);
+
+ const size_t tensorBuffSize =vec.size()*sizeof(DataType);
+ DataType* gpu_data_vec = static_cast<DataType*>(sycl_device.allocate(tensorBuffSize));
+ DataType* gpu_data_result = static_cast<DataType*>(sycl_device.allocate(tensorBuffSize));
+
+ TensorMap<Tensor<DataType, 1, DataLayout,IndexType>> gpu_vec(gpu_data_vec, tensorRange);
+ TensorMap<Tensor<DataType, 1, DataLayout,IndexType>> gpu_result(gpu_data_result, tensorRange);
+
+ sycl_device.memcpyHostToDevice(gpu_data_vec, vec.data(), tensorBuffSize);
+ gpu_result.device(sycl_device)=gpu_vec.generate(Generator1D());
+ sycl_device.memcpyDeviceToHost(result.data(), gpu_data_result, tensorBuffSize);
+
+ for (IndexType i = 0; i < 6; ++i) {
+ VERIFY_IS_EQUAL(result(i), i);
+ }
+}
+
+
+struct Generator2D {
+ Generator2D() { }
+
+ float operator()(const array<Eigen::DenseIndex, 2>& coordinates) const {
+ return 3 * coordinates[0] + 11 * coordinates[1];
+ }
+};
+
+template <typename DataType, int DataLayout, typename IndexType>
+static void test_2D_sycl(const Eigen::SyclDevice& sycl_device)
+{
+ IndexType sizeDim1 = 5;
+ IndexType sizeDim2 = 7;
+ array<IndexType, 2> tensorRange = {{sizeDim1, sizeDim2}};
+ Tensor<DataType, 2, DataLayout,IndexType> matrix(tensorRange);
+ Tensor<DataType, 2, DataLayout,IndexType> result(tensorRange);
+
+ const size_t tensorBuffSize =matrix.size()*sizeof(DataType);
+ DataType* gpu_data_matrix = static_cast<DataType*>(sycl_device.allocate(tensorBuffSize));
+ DataType* gpu_data_result = static_cast<DataType*>(sycl_device.allocate(tensorBuffSize));
+
+ TensorMap<Tensor<DataType, 2, DataLayout,IndexType>> gpu_matrix(gpu_data_matrix, tensorRange);
+ TensorMap<Tensor<DataType, 2, DataLayout,IndexType>> gpu_result(gpu_data_result, tensorRange);
+
+ sycl_device.memcpyHostToDevice(gpu_data_matrix, matrix.data(), tensorBuffSize);
+ gpu_result.device(sycl_device)=gpu_matrix.generate(Generator2D());
+ sycl_device.memcpyDeviceToHost(result.data(), gpu_data_result, tensorBuffSize);
+
+ for (IndexType i = 0; i < 5; ++i) {
+ for (IndexType j = 0; j < 5; ++j) {
+ VERIFY_IS_EQUAL(result(i, j), 3*i + 11*j);
+ }
+ }
+}
+
+template <typename DataType, int DataLayout, typename IndexType>
+static void test_gaussian_sycl(const Eigen::SyclDevice& sycl_device)
+{
+ IndexType rows = 32;
+ IndexType cols = 48;
+ array<DataType, 2> means;
+ means[0] = rows / 2.0f;
+ means[1] = cols / 2.0f;
+ array<DataType, 2> std_devs;
+ std_devs[0] = 3.14f;
+ std_devs[1] = 2.7f;
+ internal::GaussianGenerator<DataType, Eigen::DenseIndex, 2> gaussian_gen(means, std_devs);
+
+ array<IndexType, 2> tensorRange = {{rows, cols}};
+ Tensor<DataType, 2, DataLayout,IndexType> matrix(tensorRange);
+ Tensor<DataType, 2, DataLayout,IndexType> result(tensorRange);
+
+ const size_t tensorBuffSize =matrix.size()*sizeof(DataType);
+ DataType* gpu_data_matrix = static_cast<DataType*>(sycl_device.allocate(tensorBuffSize));
+ DataType* gpu_data_result = static_cast<DataType*>(sycl_device.allocate(tensorBuffSize));
+
+ TensorMap<Tensor<DataType, 2, DataLayout,IndexType>> gpu_matrix(gpu_data_matrix, tensorRange);
+ TensorMap<Tensor<DataType, 2, DataLayout,IndexType>> gpu_result(gpu_data_result, tensorRange);
+
+ sycl_device.memcpyHostToDevice(gpu_data_matrix, matrix.data(), tensorBuffSize);
+ gpu_result.device(sycl_device)=gpu_matrix.generate(gaussian_gen);
+ sycl_device.memcpyDeviceToHost(result.data(), gpu_data_result, tensorBuffSize);
+
+ for (IndexType i = 0; i < rows; ++i) {
+ for (IndexType j = 0; j < cols; ++j) {
+ DataType g_rows = powf(rows/2.0f - i, 2) / (3.14f * 3.14f) * 0.5f;
+ DataType g_cols = powf(cols/2.0f - j, 2) / (2.7f * 2.7f) * 0.5f;
+ DataType gaussian = expf(-g_rows - g_cols);
+ Eigen::internal::isApprox(result(i, j), gaussian, error_threshold);
+ }
+ }
+}
+
+template<typename DataType, typename dev_Selector> void sycl_generator_test_per_device(dev_Selector s){
+ QueueInterface queueInterface(s);
+ auto sycl_device = Eigen::SyclDevice(&queueInterface);
+ test_1D_sycl<DataType, RowMajor, int64_t>(sycl_device);
+ test_1D_sycl<DataType, ColMajor, int64_t>(sycl_device);
+ test_2D_sycl<DataType, RowMajor, int64_t>(sycl_device);
+ test_2D_sycl<DataType, ColMajor, int64_t>(sycl_device);
+ test_gaussian_sycl<DataType, RowMajor, int64_t>(sycl_device);
+ test_gaussian_sycl<DataType, ColMajor, int64_t>(sycl_device);
+}
+EIGEN_DECLARE_TEST(cxx11_tensor_generator_sycl)
+{
+ for (const auto& device :Eigen::get_sycl_supported_devices()) {
+ CALL_SUBTEST(sycl_generator_test_per_device<float>(device));
+ }
+}
diff --git a/unsupported/test/cxx11_tensor_cuda.cu b/unsupported/test/cxx11_tensor_gpu.cu
index 0ba9d52e9..14fc0bd04 100644
--- a/unsupported/test/cxx11_tensor_cuda.cu
+++ b/unsupported/test/cxx11_tensor_gpu.cu
@@ -9,18 +9,17 @@
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
-#define EIGEN_TEST_FUNC cxx11_tensor_cuda
+
#define EIGEN_USE_GPU
-#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500
-#include <cuda_fp16.h>
-#endif
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
+#include <unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h>
+
using Eigen::Tensor;
-void test_cuda_nullary() {
+void test_gpu_nullary() {
Tensor<float, 1, 0, int> in1(2);
Tensor<float, 1, 0, int> in2(2);
in1.setRandom();
@@ -30,12 +29,12 @@ void test_cuda_nullary() {
float* d_in1;
float* d_in2;
- cudaMalloc((void**)(&d_in1), tensor_bytes);
- cudaMalloc((void**)(&d_in2), tensor_bytes);
- cudaMemcpy(d_in1, in1.data(), tensor_bytes, cudaMemcpyHostToDevice);
- cudaMemcpy(d_in2, in2.data(), tensor_bytes, cudaMemcpyHostToDevice);
+ gpuMalloc((void**)(&d_in1), tensor_bytes);
+ gpuMalloc((void**)(&d_in2), tensor_bytes);
+ gpuMemcpy(d_in1, in1.data(), tensor_bytes, gpuMemcpyHostToDevice);
+ gpuMemcpy(d_in2, in2.data(), tensor_bytes, gpuMemcpyHostToDevice);
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 1, 0, int>, Eigen::Aligned> gpu_in1(
@@ -49,23 +48,23 @@ void test_cuda_nullary() {
Tensor<float, 1, 0, int> new1(2);
Tensor<float, 1, 0, int> new2(2);
- assert(cudaMemcpyAsync(new1.data(), d_in1, tensor_bytes, cudaMemcpyDeviceToHost,
- gpu_device.stream()) == cudaSuccess);
- assert(cudaMemcpyAsync(new2.data(), d_in2, tensor_bytes, cudaMemcpyDeviceToHost,
- gpu_device.stream()) == cudaSuccess);
+ assert(gpuMemcpyAsync(new1.data(), d_in1, tensor_bytes, gpuMemcpyDeviceToHost,
+ gpu_device.stream()) == gpuSuccess);
+ assert(gpuMemcpyAsync(new2.data(), d_in2, tensor_bytes, gpuMemcpyDeviceToHost,
+ gpu_device.stream()) == gpuSuccess);
- assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+ assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess);
for (int i = 0; i < 2; ++i) {
VERIFY_IS_APPROX(new1(i), 3.14f);
VERIFY_IS_NOT_EQUAL(new2(i), in2(i));
}
- cudaFree(d_in1);
- cudaFree(d_in2);
+ gpuFree(d_in1);
+ gpuFree(d_in2);
}
-void test_cuda_elementwise_small() {
+void test_gpu_elementwise_small() {
Tensor<float, 1> in1(Eigen::array<Eigen::DenseIndex, 1>(2));
Tensor<float, 1> in2(Eigen::array<Eigen::DenseIndex, 1>(2));
Tensor<float, 1> out(Eigen::array<Eigen::DenseIndex, 1>(2));
@@ -79,14 +78,14 @@ void test_cuda_elementwise_small() {
float* d_in1;
float* d_in2;
float* d_out;
- cudaMalloc((void**)(&d_in1), in1_bytes);
- cudaMalloc((void**)(&d_in2), in2_bytes);
- cudaMalloc((void**)(&d_out), out_bytes);
+ gpuMalloc((void**)(&d_in1), in1_bytes);
+ gpuMalloc((void**)(&d_in2), in2_bytes);
+ gpuMalloc((void**)(&d_out), out_bytes);
- cudaMemcpy(d_in1, in1.data(), in1_bytes, cudaMemcpyHostToDevice);
- cudaMemcpy(d_in2, in2.data(), in2_bytes, cudaMemcpyHostToDevice);
+ gpuMemcpy(d_in1, in1.data(), in1_bytes, gpuMemcpyHostToDevice);
+ gpuMemcpy(d_in2, in2.data(), in2_bytes, gpuMemcpyHostToDevice);
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_in1(
@@ -98,9 +97,9 @@ void test_cuda_elementwise_small() {
gpu_out.device(gpu_device) = gpu_in1 + gpu_in2;
- assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost,
- gpu_device.stream()) == cudaSuccess);
- assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+ assert(gpuMemcpyAsync(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost,
+ gpu_device.stream()) == gpuSuccess);
+ assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess);
for (int i = 0; i < 2; ++i) {
VERIFY_IS_APPROX(
@@ -108,12 +107,12 @@ void test_cuda_elementwise_small() {
in1(Eigen::array<Eigen::DenseIndex, 1>(i)) + in2(Eigen::array<Eigen::DenseIndex, 1>(i)));
}
- cudaFree(d_in1);
- cudaFree(d_in2);
- cudaFree(d_out);
+ gpuFree(d_in1);
+ gpuFree(d_in2);
+ gpuFree(d_out);
}
-void test_cuda_elementwise()
+void test_gpu_elementwise()
{
Tensor<float, 3> in1(Eigen::array<Eigen::DenseIndex, 3>(72,53,97));
Tensor<float, 3> in2(Eigen::array<Eigen::DenseIndex, 3>(72,53,97));
@@ -132,16 +131,16 @@ void test_cuda_elementwise()
float* d_in2;
float* d_in3;
float* d_out;
- cudaMalloc((void**)(&d_in1), in1_bytes);
- cudaMalloc((void**)(&d_in2), in2_bytes);
- cudaMalloc((void**)(&d_in3), in3_bytes);
- cudaMalloc((void**)(&d_out), out_bytes);
+ gpuMalloc((void**)(&d_in1), in1_bytes);
+ gpuMalloc((void**)(&d_in2), in2_bytes);
+ gpuMalloc((void**)(&d_in3), in3_bytes);
+ gpuMalloc((void**)(&d_out), out_bytes);
- cudaMemcpy(d_in1, in1.data(), in1_bytes, cudaMemcpyHostToDevice);
- cudaMemcpy(d_in2, in2.data(), in2_bytes, cudaMemcpyHostToDevice);
- cudaMemcpy(d_in3, in3.data(), in3_bytes, cudaMemcpyHostToDevice);
+ gpuMemcpy(d_in1, in1.data(), in1_bytes, gpuMemcpyHostToDevice);
+ gpuMemcpy(d_in2, in2.data(), in2_bytes, gpuMemcpyHostToDevice);
+ gpuMemcpy(d_in3, in3.data(), in3_bytes, gpuMemcpyHostToDevice);
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_in1(d_in1, Eigen::array<Eigen::DenseIndex, 3>(72,53,97));
@@ -151,8 +150,8 @@ void test_cuda_elementwise()
gpu_out.device(gpu_device) = gpu_in1 + gpu_in2 * gpu_in3;
- assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
- assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+ assert(gpuMemcpyAsync(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess);
+ assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess);
for (int i = 0; i < 72; ++i) {
for (int j = 0; j < 53; ++j) {
@@ -162,13 +161,13 @@ void test_cuda_elementwise()
}
}
- cudaFree(d_in1);
- cudaFree(d_in2);
- cudaFree(d_in3);
- cudaFree(d_out);
+ gpuFree(d_in1);
+ gpuFree(d_in2);
+ gpuFree(d_in3);
+ gpuFree(d_out);
}
-void test_cuda_props() {
+void test_gpu_props() {
Tensor<float, 1> in1(200);
Tensor<bool, 1> out(200);
in1.setRandom();
@@ -178,12 +177,12 @@ void test_cuda_props() {
float* d_in1;
bool* d_out;
- cudaMalloc((void**)(&d_in1), in1_bytes);
- cudaMalloc((void**)(&d_out), out_bytes);
+ gpuMalloc((void**)(&d_in1), in1_bytes);
+ gpuMalloc((void**)(&d_out), out_bytes);
- cudaMemcpy(d_in1, in1.data(), in1_bytes, cudaMemcpyHostToDevice);
+ gpuMemcpy(d_in1, in1.data(), in1_bytes, gpuMemcpyHostToDevice);
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_in1(
@@ -193,19 +192,19 @@ void test_cuda_props() {
gpu_out.device(gpu_device) = (gpu_in1.isnan)();
- assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost,
- gpu_device.stream()) == cudaSuccess);
- assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+ assert(gpuMemcpyAsync(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost,
+ gpu_device.stream()) == gpuSuccess);
+ assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess);
for (int i = 0; i < 200; ++i) {
VERIFY_IS_EQUAL(out(i), (std::isnan)(in1(i)));
}
- cudaFree(d_in1);
- cudaFree(d_out);
+ gpuFree(d_in1);
+ gpuFree(d_out);
}
-void test_cuda_reduction()
+void test_gpu_reduction()
{
Tensor<float, 4> in1(72,53,97,113);
Tensor<float, 2> out(72,97);
@@ -216,12 +215,12 @@ void test_cuda_reduction()
float* d_in1;
float* d_out;
- cudaMalloc((void**)(&d_in1), in1_bytes);
- cudaMalloc((void**)(&d_out), out_bytes);
+ gpuMalloc((void**)(&d_in1), in1_bytes);
+ gpuMalloc((void**)(&d_out), out_bytes);
- cudaMemcpy(d_in1, in1.data(), in1_bytes, cudaMemcpyHostToDevice);
+ gpuMemcpy(d_in1, in1.data(), in1_bytes, gpuMemcpyHostToDevice);
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 4> > gpu_in1(d_in1, 72,53,97,113);
@@ -233,8 +232,8 @@ void test_cuda_reduction()
gpu_out.device(gpu_device) = gpu_in1.maximum(reduction_axis);
- assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
- assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+ assert(gpuMemcpyAsync(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess);
+ assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess);
for (int i = 0; i < 72; ++i) {
for (int j = 0; j < 97; ++j) {
@@ -249,12 +248,12 @@ void test_cuda_reduction()
}
}
- cudaFree(d_in1);
- cudaFree(d_out);
+ gpuFree(d_in1);
+ gpuFree(d_out);
}
template<int DataLayout>
-void test_cuda_contraction()
+void test_gpu_contraction()
{
// with these dimensions, the output has 300 * 140 elements, which is
// more than 30 * 1024, which is the number of threads in blocks on
@@ -274,14 +273,14 @@ void test_cuda_contraction()
float* d_t_right;
float* d_t_result;
- cudaMalloc((void**)(&d_t_left), t_left_bytes);
- cudaMalloc((void**)(&d_t_right), t_right_bytes);
- cudaMalloc((void**)(&d_t_result), t_result_bytes);
+ gpuMalloc((void**)(&d_t_left), t_left_bytes);
+ gpuMalloc((void**)(&d_t_right), t_right_bytes);
+ gpuMalloc((void**)(&d_t_result), t_result_bytes);
- cudaMemcpy(d_t_left, t_left.data(), t_left_bytes, cudaMemcpyHostToDevice);
- cudaMemcpy(d_t_right, t_right.data(), t_right_bytes, cudaMemcpyHostToDevice);
+ gpuMemcpy(d_t_left, t_left.data(), t_left_bytes, gpuMemcpyHostToDevice);
+ gpuMemcpy(d_t_right, t_right.data(), t_right_bytes, gpuMemcpyHostToDevice);
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout> > gpu_t_left(d_t_left, 6, 50, 3, 31);
@@ -301,7 +300,7 @@ void test_cuda_contraction()
m_result = m_left * m_right;
gpu_t_result.device(gpu_device) = gpu_t_left.contract(gpu_t_right, dims);
- cudaMemcpy(t_result.data(), d_t_result, t_result_bytes, cudaMemcpyDeviceToHost);
+ gpuMemcpy(t_result.data(), d_t_result, t_result_bytes, gpuMemcpyDeviceToHost);
for (DenseIndex i = 0; i < t_result.size(); i++) {
if (fabs(t_result.data()[i] - m_result.data()[i]) >= 1e-4f) {
@@ -310,13 +309,13 @@ void test_cuda_contraction()
}
}
- cudaFree(d_t_left);
- cudaFree(d_t_right);
- cudaFree(d_t_result);
+ gpuFree(d_t_left);
+ gpuFree(d_t_right);
+ gpuFree(d_t_result);
}
template<int DataLayout>
-void test_cuda_convolution_1d()
+void test_gpu_convolution_1d()
{
Tensor<float, 4, DataLayout> input(74,37,11,137);
Tensor<float, 1, DataLayout> kernel(4);
@@ -331,14 +330,14 @@ void test_cuda_convolution_1d()
float* d_input;
float* d_kernel;
float* d_out;
- cudaMalloc((void**)(&d_input), input_bytes);
- cudaMalloc((void**)(&d_kernel), kernel_bytes);
- cudaMalloc((void**)(&d_out), out_bytes);
+ gpuMalloc((void**)(&d_input), input_bytes);
+ gpuMalloc((void**)(&d_kernel), kernel_bytes);
+ gpuMalloc((void**)(&d_out), out_bytes);
- cudaMemcpy(d_input, input.data(), input_bytes, cudaMemcpyHostToDevice);
- cudaMemcpy(d_kernel, kernel.data(), kernel_bytes, cudaMemcpyHostToDevice);
+ gpuMemcpy(d_input, input.data(), input_bytes, gpuMemcpyHostToDevice);
+ gpuMemcpy(d_kernel, kernel.data(), kernel_bytes, gpuMemcpyHostToDevice);
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout> > gpu_input(d_input, 74,37,11,137);
@@ -348,8 +347,8 @@ void test_cuda_convolution_1d()
Eigen::array<Eigen::DenseIndex, 1> dims(1);
gpu_out.device(gpu_device) = gpu_input.convolve(gpu_kernel, dims);
- assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
- assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+ assert(gpuMemcpyAsync(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess);
+ assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess);
for (int i = 0; i < 74; ++i) {
for (int j = 0; j < 34; ++j) {
@@ -364,12 +363,12 @@ void test_cuda_convolution_1d()
}
}
- cudaFree(d_input);
- cudaFree(d_kernel);
- cudaFree(d_out);
+ gpuFree(d_input);
+ gpuFree(d_kernel);
+ gpuFree(d_out);
}
-void test_cuda_convolution_inner_dim_col_major_1d()
+void test_gpu_convolution_inner_dim_col_major_1d()
{
Tensor<float, 4, ColMajor> input(74,9,11,7);
Tensor<float, 1, ColMajor> kernel(4);
@@ -384,14 +383,14 @@ void test_cuda_convolution_inner_dim_col_major_1d()
float* d_input;
float* d_kernel;
float* d_out;
- cudaMalloc((void**)(&d_input), input_bytes);
- cudaMalloc((void**)(&d_kernel), kernel_bytes);
- cudaMalloc((void**)(&d_out), out_bytes);
+ gpuMalloc((void**)(&d_input), input_bytes);
+ gpuMalloc((void**)(&d_kernel), kernel_bytes);
+ gpuMalloc((void**)(&d_out), out_bytes);
- cudaMemcpy(d_input, input.data(), input_bytes, cudaMemcpyHostToDevice);
- cudaMemcpy(d_kernel, kernel.data(), kernel_bytes, cudaMemcpyHostToDevice);
+ gpuMemcpy(d_input, input.data(), input_bytes, gpuMemcpyHostToDevice);
+ gpuMemcpy(d_kernel, kernel.data(), kernel_bytes, gpuMemcpyHostToDevice);
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 4, ColMajor> > gpu_input(d_input,74,9,11,7);
@@ -401,8 +400,8 @@ void test_cuda_convolution_inner_dim_col_major_1d()
Eigen::array<Eigen::DenseIndex, 1> dims(0);
gpu_out.device(gpu_device) = gpu_input.convolve(gpu_kernel, dims);
- assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
- assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+ assert(gpuMemcpyAsync(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess);
+ assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess);
for (int i = 0; i < 71; ++i) {
for (int j = 0; j < 9; ++j) {
@@ -417,12 +416,12 @@ void test_cuda_convolution_inner_dim_col_major_1d()
}
}
- cudaFree(d_input);
- cudaFree(d_kernel);
- cudaFree(d_out);
+ gpuFree(d_input);
+ gpuFree(d_kernel);
+ gpuFree(d_out);
}
-void test_cuda_convolution_inner_dim_row_major_1d()
+void test_gpu_convolution_inner_dim_row_major_1d()
{
Tensor<float, 4, RowMajor> input(7,9,11,74);
Tensor<float, 1, RowMajor> kernel(4);
@@ -437,14 +436,14 @@ void test_cuda_convolution_inner_dim_row_major_1d()
float* d_input;
float* d_kernel;
float* d_out;
- cudaMalloc((void**)(&d_input), input_bytes);
- cudaMalloc((void**)(&d_kernel), kernel_bytes);
- cudaMalloc((void**)(&d_out), out_bytes);
+ gpuMalloc((void**)(&d_input), input_bytes);
+ gpuMalloc((void**)(&d_kernel), kernel_bytes);
+ gpuMalloc((void**)(&d_out), out_bytes);
- cudaMemcpy(d_input, input.data(), input_bytes, cudaMemcpyHostToDevice);
- cudaMemcpy(d_kernel, kernel.data(), kernel_bytes, cudaMemcpyHostToDevice);
+ gpuMemcpy(d_input, input.data(), input_bytes, gpuMemcpyHostToDevice);
+ gpuMemcpy(d_kernel, kernel.data(), kernel_bytes, gpuMemcpyHostToDevice);
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 4, RowMajor> > gpu_input(d_input, 7,9,11,74);
@@ -454,8 +453,8 @@ void test_cuda_convolution_inner_dim_row_major_1d()
Eigen::array<Eigen::DenseIndex, 1> dims(3);
gpu_out.device(gpu_device) = gpu_input.convolve(gpu_kernel, dims);
- assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
- assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+ assert(gpuMemcpyAsync(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess);
+ assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess);
for (int i = 0; i < 7; ++i) {
for (int j = 0; j < 9; ++j) {
@@ -470,13 +469,13 @@ void test_cuda_convolution_inner_dim_row_major_1d()
}
}
- cudaFree(d_input);
- cudaFree(d_kernel);
- cudaFree(d_out);
+ gpuFree(d_input);
+ gpuFree(d_kernel);
+ gpuFree(d_out);
}
template<int DataLayout>
-void test_cuda_convolution_2d()
+void test_gpu_convolution_2d()
{
Tensor<float, 4, DataLayout> input(74,37,11,137);
Tensor<float, 2, DataLayout> kernel(3,4);
@@ -491,14 +490,14 @@ void test_cuda_convolution_2d()
float* d_input;
float* d_kernel;
float* d_out;
- cudaMalloc((void**)(&d_input), input_bytes);
- cudaMalloc((void**)(&d_kernel), kernel_bytes);
- cudaMalloc((void**)(&d_out), out_bytes);
+ gpuMalloc((void**)(&d_input), input_bytes);
+ gpuMalloc((void**)(&d_kernel), kernel_bytes);
+ gpuMalloc((void**)(&d_out), out_bytes);
- cudaMemcpy(d_input, input.data(), input_bytes, cudaMemcpyHostToDevice);
- cudaMemcpy(d_kernel, kernel.data(), kernel_bytes, cudaMemcpyHostToDevice);
+ gpuMemcpy(d_input, input.data(), input_bytes, gpuMemcpyHostToDevice);
+ gpuMemcpy(d_kernel, kernel.data(), kernel_bytes, gpuMemcpyHostToDevice);
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout> > gpu_input(d_input,74,37,11,137);
@@ -508,8 +507,8 @@ void test_cuda_convolution_2d()
Eigen::array<Eigen::DenseIndex, 2> dims(1,2);
gpu_out.device(gpu_device) = gpu_input.convolve(gpu_kernel, dims);
- assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
- assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+ assert(gpuMemcpyAsync(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess);
+ assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess);
for (int i = 0; i < 74; ++i) {
for (int j = 0; j < 35; ++j) {
@@ -534,13 +533,13 @@ void test_cuda_convolution_2d()
}
}
- cudaFree(d_input);
- cudaFree(d_kernel);
- cudaFree(d_out);
+ gpuFree(d_input);
+ gpuFree(d_kernel);
+ gpuFree(d_out);
}
template<int DataLayout>
-void test_cuda_convolution_3d()
+void test_gpu_convolution_3d()
{
Tensor<float, 5, DataLayout> input(Eigen::array<Eigen::DenseIndex, 5>(74,37,11,137,17));
Tensor<float, 3, DataLayout> kernel(3,4,2);
@@ -555,14 +554,14 @@ void test_cuda_convolution_3d()
float* d_input;
float* d_kernel;
float* d_out;
- cudaMalloc((void**)(&d_input), input_bytes);
- cudaMalloc((void**)(&d_kernel), kernel_bytes);
- cudaMalloc((void**)(&d_out), out_bytes);
+ gpuMalloc((void**)(&d_input), input_bytes);
+ gpuMalloc((void**)(&d_kernel), kernel_bytes);
+ gpuMalloc((void**)(&d_out), out_bytes);
- cudaMemcpy(d_input, input.data(), input_bytes, cudaMemcpyHostToDevice);
- cudaMemcpy(d_kernel, kernel.data(), kernel_bytes, cudaMemcpyHostToDevice);
+ gpuMemcpy(d_input, input.data(), input_bytes, gpuMemcpyHostToDevice);
+ gpuMemcpy(d_kernel, kernel.data(), kernel_bytes, gpuMemcpyHostToDevice);
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 5, DataLayout> > gpu_input(d_input,74,37,11,137,17);
@@ -572,8 +571,8 @@ void test_cuda_convolution_3d()
Eigen::array<Eigen::DenseIndex, 3> dims(1,2,3);
gpu_out.device(gpu_device) = gpu_input.convolve(gpu_kernel, dims);
- assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
- assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+ assert(gpuMemcpyAsync(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess);
+ assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess);
for (int i = 0; i < 74; ++i) {
for (int j = 0; j < 35; ++j) {
@@ -612,14 +611,14 @@ void test_cuda_convolution_3d()
}
}
- cudaFree(d_input);
- cudaFree(d_kernel);
- cudaFree(d_out);
+ gpuFree(d_input);
+ gpuFree(d_kernel);
+ gpuFree(d_out);
}
template <typename Scalar>
-void test_cuda_lgamma(const Scalar stddev)
+void test_gpu_lgamma(const Scalar stddev)
{
Tensor<Scalar, 2> in(72,97);
in.setRandom();
@@ -631,12 +630,12 @@ void test_cuda_lgamma(const Scalar stddev)
Scalar* d_in;
Scalar* d_out;
- cudaMalloc((void**)(&d_in), bytes);
- cudaMalloc((void**)(&d_out), bytes);
+ gpuMalloc((void**)(&d_in), bytes);
+ gpuMalloc((void**)(&d_out), bytes);
- cudaMemcpy(d_in, in.data(), bytes, cudaMemcpyHostToDevice);
+ gpuMemcpy(d_in, in.data(), bytes, gpuMemcpyHostToDevice);
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_in(d_in, 72, 97);
@@ -644,8 +643,8 @@ void test_cuda_lgamma(const Scalar stddev)
gpu_out.device(gpu_device) = gpu_in.lgamma();
- assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
- assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+ assert(gpuMemcpyAsync(out.data(), d_out, bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess);
+ assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess);
for (int i = 0; i < 72; ++i) {
for (int j = 0; j < 97; ++j) {
@@ -653,12 +652,12 @@ void test_cuda_lgamma(const Scalar stddev)
}
}
- cudaFree(d_in);
- cudaFree(d_out);
+ gpuFree(d_in);
+ gpuFree(d_out);
}
template <typename Scalar>
-void test_cuda_digamma()
+void test_gpu_digamma()
{
Tensor<Scalar, 1> in(7);
Tensor<Scalar, 1> out(7);
@@ -685,12 +684,12 @@ void test_cuda_digamma()
Scalar* d_in;
Scalar* d_out;
- cudaMalloc((void**)(&d_in), bytes);
- cudaMalloc((void**)(&d_out), bytes);
+ gpuMalloc((void**)(&d_in), bytes);
+ gpuMalloc((void**)(&d_out), bytes);
- cudaMemcpy(d_in, in.data(), bytes, cudaMemcpyHostToDevice);
+ gpuMemcpy(d_in, in.data(), bytes, gpuMemcpyHostToDevice);
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in(d_in, 7);
@@ -698,8 +697,8 @@ void test_cuda_digamma()
gpu_out.device(gpu_device) = gpu_in.digamma();
- assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
- assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+ assert(gpuMemcpyAsync(out.data(), d_out, bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess);
+ assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess);
for (int i = 0; i < 5; ++i) {
VERIFY_IS_APPROX(out(i), expected_out(i));
@@ -708,12 +707,12 @@ void test_cuda_digamma()
VERIFY_IS_EQUAL(out(i), expected_out(i));
}
- cudaFree(d_in);
- cudaFree(d_out);
+ gpuFree(d_in);
+ gpuFree(d_out);
}
template <typename Scalar>
-void test_cuda_zeta()
+void test_gpu_zeta()
{
Tensor<Scalar, 1> in_x(6);
Tensor<Scalar, 1> in_q(6);
@@ -747,14 +746,14 @@ void test_cuda_zeta()
Scalar* d_in_x;
Scalar* d_in_q;
Scalar* d_out;
- cudaMalloc((void**)(&d_in_x), bytes);
- cudaMalloc((void**)(&d_in_q), bytes);
- cudaMalloc((void**)(&d_out), bytes);
+ gpuMalloc((void**)(&d_in_x), bytes);
+ gpuMalloc((void**)(&d_in_q), bytes);
+ gpuMalloc((void**)(&d_out), bytes);
- cudaMemcpy(d_in_x, in_x.data(), bytes, cudaMemcpyHostToDevice);
- cudaMemcpy(d_in_q, in_q.data(), bytes, cudaMemcpyHostToDevice);
+ gpuMemcpy(d_in_x, in_x.data(), bytes, gpuMemcpyHostToDevice);
+ gpuMemcpy(d_in_q, in_q.data(), bytes, gpuMemcpyHostToDevice);
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_x(d_in_x, 6);
@@ -763,8 +762,8 @@ void test_cuda_zeta()
gpu_out.device(gpu_device) = gpu_in_x.zeta(gpu_in_q);
- assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
- assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+ assert(gpuMemcpyAsync(out.data(), d_out, bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess);
+ assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess);
VERIFY_IS_EQUAL(out(0), expected_out(0));
VERIFY((std::isnan)(out(3)));
@@ -775,13 +774,13 @@ void test_cuda_zeta()
}
}
- cudaFree(d_in_x);
- cudaFree(d_in_q);
- cudaFree(d_out);
+ gpuFree(d_in_x);
+ gpuFree(d_in_q);
+ gpuFree(d_out);
}
template <typename Scalar>
-void test_cuda_polygamma()
+void test_gpu_polygamma()
{
Tensor<Scalar, 1> in_x(7);
Tensor<Scalar, 1> in_n(7);
@@ -818,14 +817,14 @@ void test_cuda_polygamma()
Scalar* d_in_x;
Scalar* d_in_n;
Scalar* d_out;
- cudaMalloc((void**)(&d_in_x), bytes);
- cudaMalloc((void**)(&d_in_n), bytes);
- cudaMalloc((void**)(&d_out), bytes);
+ gpuMalloc((void**)(&d_in_x), bytes);
+ gpuMalloc((void**)(&d_in_n), bytes);
+ gpuMalloc((void**)(&d_out), bytes);
- cudaMemcpy(d_in_x, in_x.data(), bytes, cudaMemcpyHostToDevice);
- cudaMemcpy(d_in_n, in_n.data(), bytes, cudaMemcpyHostToDevice);
+ gpuMemcpy(d_in_x, in_x.data(), bytes, gpuMemcpyHostToDevice);
+ gpuMemcpy(d_in_n, in_n.data(), bytes, gpuMemcpyHostToDevice);
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_x(d_in_x, 7);
@@ -834,20 +833,20 @@ void test_cuda_polygamma()
gpu_out.device(gpu_device) = gpu_in_n.polygamma(gpu_in_x);
- assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
- assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+ assert(gpuMemcpyAsync(out.data(), d_out, bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess);
+ assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess);
for (int i = 0; i < 7; ++i) {
VERIFY_IS_APPROX(out(i), expected_out(i));
}
- cudaFree(d_in_x);
- cudaFree(d_in_n);
- cudaFree(d_out);
+ gpuFree(d_in_x);
+ gpuFree(d_in_n);
+ gpuFree(d_out);
}
template <typename Scalar>
-void test_cuda_igamma()
+void test_gpu_igamma()
{
Tensor<Scalar, 2> a(6, 6);
Tensor<Scalar, 2> x(6, 6);
@@ -883,14 +882,14 @@ void test_cuda_igamma()
Scalar* d_a;
Scalar* d_x;
Scalar* d_out;
- assert(cudaMalloc((void**)(&d_a), bytes) == cudaSuccess);
- assert(cudaMalloc((void**)(&d_x), bytes) == cudaSuccess);
- assert(cudaMalloc((void**)(&d_out), bytes) == cudaSuccess);
+ assert(gpuMalloc((void**)(&d_a), bytes) == gpuSuccess);
+ assert(gpuMalloc((void**)(&d_x), bytes) == gpuSuccess);
+ assert(gpuMalloc((void**)(&d_out), bytes) == gpuSuccess);
- cudaMemcpy(d_a, a.data(), bytes, cudaMemcpyHostToDevice);
- cudaMemcpy(d_x, x.data(), bytes, cudaMemcpyHostToDevice);
+ gpuMemcpy(d_a, a.data(), bytes, gpuMemcpyHostToDevice);
+ gpuMemcpy(d_x, x.data(), bytes, gpuMemcpyHostToDevice);
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_a(d_a, 6, 6);
@@ -899,8 +898,8 @@ void test_cuda_igamma()
gpu_out.device(gpu_device) = gpu_a.igamma(gpu_x);
- assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
- assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+ assert(gpuMemcpyAsync(out.data(), d_out, bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess);
+ assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess);
for (int i = 0; i < 6; ++i) {
for (int j = 0; j < 6; ++j) {
@@ -912,13 +911,13 @@ void test_cuda_igamma()
}
}
- cudaFree(d_a);
- cudaFree(d_x);
- cudaFree(d_out);
+ gpuFree(d_a);
+ gpuFree(d_x);
+ gpuFree(d_out);
}
template <typename Scalar>
-void test_cuda_igammac()
+void test_gpu_igammac()
{
Tensor<Scalar, 2> a(6, 6);
Tensor<Scalar, 2> x(6, 6);
@@ -953,14 +952,14 @@ void test_cuda_igammac()
Scalar* d_a;
Scalar* d_x;
Scalar* d_out;
- cudaMalloc((void**)(&d_a), bytes);
- cudaMalloc((void**)(&d_x), bytes);
- cudaMalloc((void**)(&d_out), bytes);
+ gpuMalloc((void**)(&d_a), bytes);
+ gpuMalloc((void**)(&d_x), bytes);
+ gpuMalloc((void**)(&d_out), bytes);
- cudaMemcpy(d_a, a.data(), bytes, cudaMemcpyHostToDevice);
- cudaMemcpy(d_x, x.data(), bytes, cudaMemcpyHostToDevice);
+ gpuMemcpy(d_a, a.data(), bytes, gpuMemcpyHostToDevice);
+ gpuMemcpy(d_x, x.data(), bytes, gpuMemcpyHostToDevice);
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_a(d_a, 6, 6);
@@ -969,8 +968,8 @@ void test_cuda_igammac()
gpu_out.device(gpu_device) = gpu_a.igammac(gpu_x);
- assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
- assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+ assert(gpuMemcpyAsync(out.data(), d_out, bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess);
+ assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess);
for (int i = 0; i < 6; ++i) {
for (int j = 0; j < 6; ++j) {
@@ -982,13 +981,13 @@ void test_cuda_igammac()
}
}
- cudaFree(d_a);
- cudaFree(d_x);
- cudaFree(d_out);
+ gpuFree(d_a);
+ gpuFree(d_x);
+ gpuFree(d_out);
}
template <typename Scalar>
-void test_cuda_erf(const Scalar stddev)
+void test_gpu_erf(const Scalar stddev)
{
Tensor<Scalar, 2> in(72,97);
in.setRandom();
@@ -1000,12 +999,12 @@ void test_cuda_erf(const Scalar stddev)
Scalar* d_in;
Scalar* d_out;
- assert(cudaMalloc((void**)(&d_in), bytes) == cudaSuccess);
- assert(cudaMalloc((void**)(&d_out), bytes) == cudaSuccess);
+ assert(gpuMalloc((void**)(&d_in), bytes) == gpuSuccess);
+ assert(gpuMalloc((void**)(&d_out), bytes) == gpuSuccess);
- cudaMemcpy(d_in, in.data(), bytes, cudaMemcpyHostToDevice);
+ gpuMemcpy(d_in, in.data(), bytes, gpuMemcpyHostToDevice);
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_in(d_in, 72, 97);
@@ -1013,8 +1012,8 @@ void test_cuda_erf(const Scalar stddev)
gpu_out.device(gpu_device) = gpu_in.erf();
- assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
- assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+ assert(gpuMemcpyAsync(out.data(), d_out, bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess);
+ assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess);
for (int i = 0; i < 72; ++i) {
for (int j = 0; j < 97; ++j) {
@@ -1022,12 +1021,12 @@ void test_cuda_erf(const Scalar stddev)
}
}
- cudaFree(d_in);
- cudaFree(d_out);
+ gpuFree(d_in);
+ gpuFree(d_out);
}
template <typename Scalar>
-void test_cuda_erfc(const Scalar stddev)
+void test_gpu_erfc(const Scalar stddev)
{
Tensor<Scalar, 2> in(72,97);
in.setRandom();
@@ -1039,12 +1038,12 @@ void test_cuda_erfc(const Scalar stddev)
Scalar* d_in;
Scalar* d_out;
- cudaMalloc((void**)(&d_in), bytes);
- cudaMalloc((void**)(&d_out), bytes);
+ gpuMalloc((void**)(&d_in), bytes);
+ gpuMalloc((void**)(&d_out), bytes);
- cudaMemcpy(d_in, in.data(), bytes, cudaMemcpyHostToDevice);
+ gpuMemcpy(d_in, in.data(), bytes, gpuMemcpyHostToDevice);
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_in(d_in, 72, 97);
@@ -1052,8 +1051,8 @@ void test_cuda_erfc(const Scalar stddev)
gpu_out.device(gpu_device) = gpu_in.erfc();
- assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
- assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+ assert(gpuMemcpyAsync(out.data(), d_out, bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess);
+ assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess);
for (int i = 0; i < 72; ++i) {
for (int j = 0; j < 97; ++j) {
@@ -1061,12 +1060,12 @@ void test_cuda_erfc(const Scalar stddev)
}
}
- cudaFree(d_in);
- cudaFree(d_out);
+ gpuFree(d_in);
+ gpuFree(d_out);
}
template <typename Scalar>
-void test_cuda_betainc()
+void test_gpu_betainc()
{
Tensor<Scalar, 1> in_x(125);
Tensor<Scalar, 1> in_a(125);
@@ -1175,16 +1174,16 @@ void test_cuda_betainc()
Scalar* d_in_a;
Scalar* d_in_b;
Scalar* d_out;
- cudaMalloc((void**)(&d_in_x), bytes);
- cudaMalloc((void**)(&d_in_a), bytes);
- cudaMalloc((void**)(&d_in_b), bytes);
- cudaMalloc((void**)(&d_out), bytes);
+ gpuMalloc((void**)(&d_in_x), bytes);
+ gpuMalloc((void**)(&d_in_a), bytes);
+ gpuMalloc((void**)(&d_in_b), bytes);
+ gpuMalloc((void**)(&d_out), bytes);
- cudaMemcpy(d_in_x, in_x.data(), bytes, cudaMemcpyHostToDevice);
- cudaMemcpy(d_in_a, in_a.data(), bytes, cudaMemcpyHostToDevice);
- cudaMemcpy(d_in_b, in_b.data(), bytes, cudaMemcpyHostToDevice);
+ gpuMemcpy(d_in_x, in_x.data(), bytes, gpuMemcpyHostToDevice);
+ gpuMemcpy(d_in_a, in_a.data(), bytes, gpuMemcpyHostToDevice);
+ gpuMemcpy(d_in_b, in_b.data(), bytes, gpuMemcpyHostToDevice);
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_x(d_in_x, 125);
@@ -1194,8 +1193,8 @@ void test_cuda_betainc()
gpu_out.device(gpu_device) = betainc(gpu_in_a, gpu_in_b, gpu_in_x);
- assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
- assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+ assert(gpuMemcpyAsync(out.data(), d_out, bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess);
+ assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess);
for (int i = 1; i < 125; ++i) {
if ((std::isnan)(expected_out(i))) {
@@ -1205,83 +1204,370 @@ void test_cuda_betainc()
}
}
- cudaFree(d_in_x);
- cudaFree(d_in_a);
- cudaFree(d_in_b);
- cudaFree(d_out);
+ gpuFree(d_in_x);
+ gpuFree(d_in_a);
+ gpuFree(d_in_b);
+ gpuFree(d_out);
+}
+
+template <typename Scalar>
+void test_gpu_i0e()
+{
+ Tensor<Scalar, 1> in_x(21);
+ Tensor<Scalar, 1> out(21);
+ Tensor<Scalar, 1> expected_out(21);
+ out.setZero();
+
+ Array<Scalar, 1, Dynamic> in_x_array(21);
+ Array<Scalar, 1, Dynamic> expected_out_array(21);
+
+ in_x_array << -20.0, -18.0, -16.0, -14.0, -12.0, -10.0, -8.0, -6.0, -4.0,
+ -2.0, 0.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0;
+
+ expected_out_array << 0.0897803118848, 0.0947062952128, 0.100544127361,
+ 0.107615251671, 0.116426221213, 0.127833337163, 0.143431781857,
+ 0.16665743264, 0.207001921224, 0.308508322554, 1.0, 0.308508322554,
+ 0.207001921224, 0.16665743264, 0.143431781857, 0.127833337163,
+ 0.116426221213, 0.107615251671, 0.100544127361, 0.0947062952128,
+ 0.0897803118848;
+
+ for (int i = 0; i < 21; ++i) {
+ in_x(i) = in_x_array(i);
+ expected_out(i) = expected_out_array(i);
+ }
+
+ std::size_t bytes = in_x.size() * sizeof(Scalar);
+
+ Scalar* d_in;
+ Scalar* d_out;
+ gpuMalloc((void**)(&d_in), bytes);
+ gpuMalloc((void**)(&d_out), bytes);
+
+ gpuMemcpy(d_in, in_x.data(), bytes, gpuMemcpyHostToDevice);
+
+ Eigen::GpuStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in(d_in, 21);
+ Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_out(d_out, 21);
+
+ gpu_out.device(gpu_device) = gpu_in.i0e();
+
+ assert(gpuMemcpyAsync(out.data(), d_out, bytes, gpuMemcpyDeviceToHost,
+ gpu_device.stream()) == gpuSuccess);
+ assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess);
+
+ for (int i = 0; i < 21; ++i) {
+ VERIFY_IS_APPROX(out(i), expected_out(i));
+ }
+
+ gpuFree(d_in);
+ gpuFree(d_out);
}
+template <typename Scalar>
+void test_gpu_i1e()
+{
+ Tensor<Scalar, 1> in_x(21);
+ Tensor<Scalar, 1> out(21);
+ Tensor<Scalar, 1> expected_out(21);
+ out.setZero();
+
+ Array<Scalar, 1, Dynamic> in_x_array(21);
+ Array<Scalar, 1, Dynamic> expected_out_array(21);
+
+ in_x_array << -20.0, -18.0, -16.0, -14.0, -12.0, -10.0, -8.0, -6.0, -4.0,
+ -2.0, 0.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0;
+
+ expected_out_array << -0.0875062221833, -0.092036796872, -0.0973496147565,
+ -0.103697667463, -0.11146429929, -0.121262681384, -0.134142493293,
+ -0.152051459309, -0.178750839502, -0.215269289249, 0.0, 0.215269289249,
+ 0.178750839502, 0.152051459309, 0.134142493293, 0.121262681384,
+ 0.11146429929, 0.103697667463, 0.0973496147565, 0.092036796872,
+ 0.0875062221833;
+
+ for (int i = 0; i < 21; ++i) {
+ in_x(i) = in_x_array(i);
+ expected_out(i) = expected_out_array(i);
+ }
+
+ std::size_t bytes = in_x.size() * sizeof(Scalar);
+
+ Scalar* d_in;
+ Scalar* d_out;
+ gpuMalloc((void**)(&d_in), bytes);
+ gpuMalloc((void**)(&d_out), bytes);
-void test_cxx11_tensor_cuda()
+ gpuMemcpy(d_in, in_x.data(), bytes, gpuMemcpyHostToDevice);
+
+ Eigen::GpuStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in(d_in, 21);
+ Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_out(d_out, 21);
+
+ gpu_out.device(gpu_device) = gpu_in.i1e();
+
+ assert(gpuMemcpyAsync(out.data(), d_out, bytes, gpuMemcpyDeviceToHost,
+ gpu_device.stream()) == gpuSuccess);
+ assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess);
+
+ for (int i = 0; i < 21; ++i) {
+ VERIFY_IS_APPROX(out(i), expected_out(i));
+ }
+
+ gpuFree(d_in);
+ gpuFree(d_out);
+}
+
+template <typename Scalar>
+void test_gpu_igamma_der_a()
{
- CALL_SUBTEST_1(test_cuda_nullary());
- CALL_SUBTEST_1(test_cuda_elementwise_small());
- CALL_SUBTEST_1(test_cuda_elementwise());
- CALL_SUBTEST_1(test_cuda_props());
- CALL_SUBTEST_1(test_cuda_reduction());
- CALL_SUBTEST_2(test_cuda_contraction<ColMajor>());
- CALL_SUBTEST_2(test_cuda_contraction<RowMajor>());
- CALL_SUBTEST_3(test_cuda_convolution_1d<ColMajor>());
- CALL_SUBTEST_3(test_cuda_convolution_1d<RowMajor>());
- CALL_SUBTEST_3(test_cuda_convolution_inner_dim_col_major_1d());
- CALL_SUBTEST_3(test_cuda_convolution_inner_dim_row_major_1d());
- CALL_SUBTEST_3(test_cuda_convolution_2d<ColMajor>());
- CALL_SUBTEST_3(test_cuda_convolution_2d<RowMajor>());
- CALL_SUBTEST_3(test_cuda_convolution_3d<ColMajor>());
- CALL_SUBTEST_3(test_cuda_convolution_3d<RowMajor>());
+ Tensor<Scalar, 1> in_x(30);
+ Tensor<Scalar, 1> in_a(30);
+ Tensor<Scalar, 1> out(30);
+ Tensor<Scalar, 1> expected_out(30);
+ out.setZero();
+
+ Array<Scalar, 1, Dynamic> in_a_array(30);
+ Array<Scalar, 1, Dynamic> in_x_array(30);
+ Array<Scalar, 1, Dynamic> expected_out_array(30);
+
+ // See special_functions.cpp for the Python code that generates the test data.
+
+ in_a_array << 0.01, 0.01, 0.01, 0.01, 0.01, 0.1, 0.1, 0.1, 0.1, 0.1, 1.0, 1.0,
+ 1.0, 1.0, 1.0, 10.0, 10.0, 10.0, 10.0, 10.0, 100.0, 100.0, 100.0, 100.0,
+ 100.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0;
+
+ in_x_array << 1.25668890405e-26, 1.17549435082e-38, 1.20938905072e-05,
+ 1.17549435082e-38, 1.17549435082e-38, 5.66572070696e-16, 0.0132865061065,
+ 0.0200034203853, 6.29263709118e-17, 1.37160367764e-06, 0.333412038288,
+ 1.18135687766, 0.580629033777, 0.170631439426, 0.786686768458,
+ 7.63873279537, 13.1944344379, 11.896042354, 10.5830172417, 10.5020942233,
+ 92.8918587747, 95.003720371, 86.3715926467, 96.0330217672, 82.6389930677,
+ 968.702906754, 969.463546828, 1001.79726022, 955.047416547, 1044.27458568;
+
+ expected_out_array << -32.7256441441, -36.4394150514, -9.66467612263,
+ -36.4394150514, -36.4394150514, -1.0891900302, -2.66351229645,
+ -2.48666868596, -0.929700494428, -3.56327722764, -0.455320135314,
+ -0.391437214323, -0.491352055991, -0.350454834292, -0.471773162921,
+ -0.104084440522, -0.0723646747909, -0.0992828975532, -0.121638215446,
+ -0.122619605294, -0.0317670267286, -0.0359974812869, -0.0154359225363,
+ -0.0375775365921, -0.00794899153653, -0.00777303219211, -0.00796085782042,
+ -0.0125850719397, -0.00455500206958, -0.00476436993148;
+
+ for (int i = 0; i < 30; ++i) {
+ in_x(i) = in_x_array(i);
+ in_a(i) = in_a_array(i);
+ expected_out(i) = expected_out_array(i);
+ }
+
+ std::size_t bytes = in_x.size() * sizeof(Scalar);
+
+ Scalar* d_a;
+ Scalar* d_x;
+ Scalar* d_out;
+ gpuMalloc((void**)(&d_a), bytes);
+ gpuMalloc((void**)(&d_x), bytes);
+ gpuMalloc((void**)(&d_out), bytes);
+
+ gpuMemcpy(d_a, in_a.data(), bytes, gpuMemcpyHostToDevice);
+ gpuMemcpy(d_x, in_x.data(), bytes, gpuMemcpyHostToDevice);
+
+ Eigen::GpuStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_a(d_a, 30);
+ Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_x(d_x, 30);
+ Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_out(d_out, 30);
+
+ gpu_out.device(gpu_device) = gpu_a.igamma_der_a(gpu_x);
+
+ assert(gpuMemcpyAsync(out.data(), d_out, bytes, gpuMemcpyDeviceToHost,
+ gpu_device.stream()) == gpuSuccess);
+ assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess);
+
+ for (int i = 0; i < 30; ++i) {
+ VERIFY_IS_APPROX(out(i), expected_out(i));
+ }
+
+ gpuFree(d_a);
+ gpuFree(d_x);
+ gpuFree(d_out);
+}
+
+template <typename Scalar>
+void test_gpu_gamma_sample_der_alpha()
+{
+ Tensor<Scalar, 1> in_alpha(30);
+ Tensor<Scalar, 1> in_sample(30);
+ Tensor<Scalar, 1> out(30);
+ Tensor<Scalar, 1> expected_out(30);
+ out.setZero();
+
+ Array<Scalar, 1, Dynamic> in_alpha_array(30);
+ Array<Scalar, 1, Dynamic> in_sample_array(30);
+ Array<Scalar, 1, Dynamic> expected_out_array(30);
+
+ // See special_functions.cpp for the Python code that generates the test data.
+
+ in_alpha_array << 0.01, 0.01, 0.01, 0.01, 0.01, 0.1, 0.1, 0.1, 0.1, 0.1, 1.0,
+ 1.0, 1.0, 1.0, 1.0, 10.0, 10.0, 10.0, 10.0, 10.0, 100.0, 100.0, 100.0,
+ 100.0, 100.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0;
+
+ in_sample_array << 1.25668890405e-26, 1.17549435082e-38, 1.20938905072e-05,
+ 1.17549435082e-38, 1.17549435082e-38, 5.66572070696e-16, 0.0132865061065,
+ 0.0200034203853, 6.29263709118e-17, 1.37160367764e-06, 0.333412038288,
+ 1.18135687766, 0.580629033777, 0.170631439426, 0.786686768458,
+ 7.63873279537, 13.1944344379, 11.896042354, 10.5830172417, 10.5020942233,
+ 92.8918587747, 95.003720371, 86.3715926467, 96.0330217672, 82.6389930677,
+ 968.702906754, 969.463546828, 1001.79726022, 955.047416547, 1044.27458568;
+
+ expected_out_array << 7.42424742367e-23, 1.02004297287e-34, 0.0130155240738,
+ 1.02004297287e-34, 1.02004297287e-34, 1.96505168277e-13, 0.525575786243,
+ 0.713903991771, 2.32077561808e-14, 0.000179348049886, 0.635500453302,
+ 1.27561284917, 0.878125852156, 0.41565819538, 1.03606488534,
+ 0.885964824887, 1.16424049334, 1.10764479598, 1.04590810812,
+ 1.04193666963, 0.965193152414, 0.976217589464, 0.93008035061,
+ 0.98153216096, 0.909196397698, 0.98434963993, 0.984738050206,
+ 1.00106492525, 0.97734200649, 1.02198794179;
+
+ for (int i = 0; i < 30; ++i) {
+ in_alpha(i) = in_alpha_array(i);
+ in_sample(i) = in_sample_array(i);
+ expected_out(i) = expected_out_array(i);
+ }
+
+ std::size_t bytes = in_alpha.size() * sizeof(Scalar);
+
+ Scalar* d_alpha;
+ Scalar* d_sample;
+ Scalar* d_out;
+ gpuMalloc((void**)(&d_alpha), bytes);
+ gpuMalloc((void**)(&d_sample), bytes);
+ gpuMalloc((void**)(&d_out), bytes);
+
+ gpuMemcpy(d_alpha, in_alpha.data(), bytes, gpuMemcpyHostToDevice);
+ gpuMemcpy(d_sample, in_sample.data(), bytes, gpuMemcpyHostToDevice);
+
+ Eigen::GpuStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_alpha(d_alpha, 30);
+ Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_sample(d_sample, 30);
+ Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_out(d_out, 30);
+
+ gpu_out.device(gpu_device) = gpu_alpha.gamma_sample_der_alpha(gpu_sample);
+
+ assert(gpuMemcpyAsync(out.data(), d_out, bytes, gpuMemcpyDeviceToHost,
+ gpu_device.stream()) == gpuSuccess);
+ assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess);
+
+ for (int i = 0; i < 30; ++i) {
+ VERIFY_IS_APPROX(out(i), expected_out(i));
+ }
+
+ gpuFree(d_alpha);
+ gpuFree(d_sample);
+ gpuFree(d_out);
+}
+
+EIGEN_DECLARE_TEST(cxx11_tensor_gpu)
+{
+ CALL_SUBTEST_1(test_gpu_nullary());
+ CALL_SUBTEST_1(test_gpu_elementwise_small());
+ CALL_SUBTEST_1(test_gpu_elementwise());
+ CALL_SUBTEST_1(test_gpu_props());
+ CALL_SUBTEST_1(test_gpu_reduction());
+ CALL_SUBTEST_2(test_gpu_contraction<ColMajor>());
+ CALL_SUBTEST_2(test_gpu_contraction<RowMajor>());
+ CALL_SUBTEST_3(test_gpu_convolution_1d<ColMajor>());
+ CALL_SUBTEST_3(test_gpu_convolution_1d<RowMajor>());
+ CALL_SUBTEST_3(test_gpu_convolution_inner_dim_col_major_1d());
+ CALL_SUBTEST_3(test_gpu_convolution_inner_dim_row_major_1d());
+ CALL_SUBTEST_3(test_gpu_convolution_2d<ColMajor>());
+ CALL_SUBTEST_3(test_gpu_convolution_2d<RowMajor>());
+#if !defined(EIGEN_USE_HIP)
+// disable these tests on HIP for now.
+// they hang..need to investigate and fix
+ CALL_SUBTEST_3(test_gpu_convolution_3d<ColMajor>());
+ CALL_SUBTEST_3(test_gpu_convolution_3d<RowMajor>());
+#endif
#if __cplusplus > 199711L
// std::erf, std::erfc, and so on where only added in c++11. We use them
// as a golden reference to validate the results produced by Eigen. Therefore
// we can only run these tests if we use a c++11 compiler.
- CALL_SUBTEST_4(test_cuda_lgamma<float>(1.0f));
- CALL_SUBTEST_4(test_cuda_lgamma<float>(100.0f));
- CALL_SUBTEST_4(test_cuda_lgamma<float>(0.01f));
- CALL_SUBTEST_4(test_cuda_lgamma<float>(0.001f));
-
- CALL_SUBTEST_4(test_cuda_lgamma<double>(1.0));
- CALL_SUBTEST_4(test_cuda_lgamma<double>(100.0));
- CALL_SUBTEST_4(test_cuda_lgamma<double>(0.01));
- CALL_SUBTEST_4(test_cuda_lgamma<double>(0.001));
-
- CALL_SUBTEST_4(test_cuda_erf<float>(1.0f));
- CALL_SUBTEST_4(test_cuda_erf<float>(100.0f));
- CALL_SUBTEST_4(test_cuda_erf<float>(0.01f));
- CALL_SUBTEST_4(test_cuda_erf<float>(0.001f));
-
- CALL_SUBTEST_4(test_cuda_erfc<float>(1.0f));
- // CALL_SUBTEST(test_cuda_erfc<float>(100.0f));
- CALL_SUBTEST_4(test_cuda_erfc<float>(5.0f)); // CUDA erfc lacks precision for large inputs
- CALL_SUBTEST_4(test_cuda_erfc<float>(0.01f));
- CALL_SUBTEST_4(test_cuda_erfc<float>(0.001f));
-
- CALL_SUBTEST_4(test_cuda_erf<double>(1.0));
- CALL_SUBTEST_4(test_cuda_erf<double>(100.0));
- CALL_SUBTEST_4(test_cuda_erf<double>(0.01));
- CALL_SUBTEST_4(test_cuda_erf<double>(0.001));
-
- CALL_SUBTEST_4(test_cuda_erfc<double>(1.0));
- // CALL_SUBTEST(test_cuda_erfc<double>(100.0));
- CALL_SUBTEST_4(test_cuda_erfc<double>(5.0)); // CUDA erfc lacks precision for large inputs
- CALL_SUBTEST_4(test_cuda_erfc<double>(0.01));
- CALL_SUBTEST_4(test_cuda_erfc<double>(0.001));
-
- CALL_SUBTEST_5(test_cuda_digamma<float>());
- CALL_SUBTEST_5(test_cuda_digamma<double>());
-
- CALL_SUBTEST_5(test_cuda_polygamma<float>());
- CALL_SUBTEST_5(test_cuda_polygamma<double>());
-
- CALL_SUBTEST_5(test_cuda_zeta<float>());
- CALL_SUBTEST_5(test_cuda_zeta<double>());
-
- CALL_SUBTEST_5(test_cuda_igamma<float>());
- CALL_SUBTEST_5(test_cuda_igammac<float>());
-
- CALL_SUBTEST_5(test_cuda_igamma<double>());
- CALL_SUBTEST_5(test_cuda_igammac<double>());
-
- CALL_SUBTEST_6(test_cuda_betainc<float>());
- CALL_SUBTEST_6(test_cuda_betainc<double>());
+ CALL_SUBTEST_4(test_gpu_lgamma<float>(1.0f));
+ CALL_SUBTEST_4(test_gpu_lgamma<float>(100.0f));
+ CALL_SUBTEST_4(test_gpu_lgamma<float>(0.01f));
+ CALL_SUBTEST_4(test_gpu_lgamma<float>(0.001f));
+
+ CALL_SUBTEST_4(test_gpu_lgamma<double>(1.0));
+ CALL_SUBTEST_4(test_gpu_lgamma<double>(100.0));
+ CALL_SUBTEST_4(test_gpu_lgamma<double>(0.01));
+ CALL_SUBTEST_4(test_gpu_lgamma<double>(0.001));
+
+ CALL_SUBTEST_4(test_gpu_erf<float>(1.0f));
+ CALL_SUBTEST_4(test_gpu_erf<float>(100.0f));
+ CALL_SUBTEST_4(test_gpu_erf<float>(0.01f));
+ CALL_SUBTEST_4(test_gpu_erf<float>(0.001f));
+
+ CALL_SUBTEST_4(test_gpu_erfc<float>(1.0f));
+ // CALL_SUBTEST(test_gpu_erfc<float>(100.0f));
+ CALL_SUBTEST_4(test_gpu_erfc<float>(5.0f)); // GPU erfc lacks precision for large inputs
+ CALL_SUBTEST_4(test_gpu_erfc<float>(0.01f));
+ CALL_SUBTEST_4(test_gpu_erfc<float>(0.001f));
+
+ CALL_SUBTEST_4(test_gpu_erf<double>(1.0));
+ CALL_SUBTEST_4(test_gpu_erf<double>(100.0));
+ CALL_SUBTEST_4(test_gpu_erf<double>(0.01));
+ CALL_SUBTEST_4(test_gpu_erf<double>(0.001));
+
+ CALL_SUBTEST_4(test_gpu_erfc<double>(1.0));
+ // CALL_SUBTEST(test_gpu_erfc<double>(100.0));
+ CALL_SUBTEST_4(test_gpu_erfc<double>(5.0)); // GPU erfc lacks precision for large inputs
+ CALL_SUBTEST_4(test_gpu_erfc<double>(0.01));
+ CALL_SUBTEST_4(test_gpu_erfc<double>(0.001));
+
+#if !defined(EIGEN_USE_HIP)
+// disable these tests on HIP for now.
+ CALL_SUBTEST_5(test_gpu_digamma<float>());
+ CALL_SUBTEST_5(test_gpu_digamma<double>());
+
+ CALL_SUBTEST_5(test_gpu_polygamma<float>());
+ CALL_SUBTEST_5(test_gpu_polygamma<double>());
+
+ CALL_SUBTEST_5(test_gpu_zeta<float>());
+ CALL_SUBTEST_5(test_gpu_zeta<double>());
+#endif
+
+ CALL_SUBTEST_5(test_gpu_igamma<float>());
+ CALL_SUBTEST_5(test_gpu_igammac<float>());
+
+ CALL_SUBTEST_5(test_gpu_igamma<double>());
+ CALL_SUBTEST_5(test_gpu_igammac<double>());
+
+#if !defined(EIGEN_USE_HIP)
+// disable these tests on HIP for now.
+ CALL_SUBTEST_6(test_gpu_betainc<float>());
+ CALL_SUBTEST_6(test_gpu_betainc<double>());
+
+ CALL_SUBTEST_6(test_gpu_i0e<float>());
+ CALL_SUBTEST_6(test_gpu_i0e<double>());
+
+ CALL_SUBTEST_6(test_gpu_i1e<float>());
+ CALL_SUBTEST_6(test_gpu_i1e<double>());
+
+ CALL_SUBTEST_6(test_gpu_i1e<float>());
+ CALL_SUBTEST_6(test_gpu_i1e<double>());
+
+ CALL_SUBTEST_6(test_gpu_igamma_der_a<float>());
+ CALL_SUBTEST_6(test_gpu_igamma_der_a<double>());
+
+ CALL_SUBTEST_6(test_gpu_gamma_sample_der_alpha<float>());
+ CALL_SUBTEST_6(test_gpu_gamma_sample_der_alpha<double>());
+#endif
+
#endif
}
diff --git a/unsupported/test/cxx11_tensor_ifft.cpp b/unsupported/test/cxx11_tensor_ifft.cpp
index 5fd88fa6c..c20edd9ac 100644
--- a/unsupported/test/cxx11_tensor_ifft.cpp
+++ b/unsupported/test/cxx11_tensor_ifft.cpp
@@ -131,7 +131,7 @@ static void test_sub_fft_ifft_invariant(int dim0, int dim1, int dim2, int dim3)
}
}
-void test_cxx11_tensor_ifft() {
+EIGEN_DECLARE_TEST(cxx11_tensor_ifft) {
CALL_SUBTEST(test_1D_fft_ifft_invariant<ColMajor>(4));
CALL_SUBTEST(test_1D_fft_ifft_invariant<ColMajor>(16));
CALL_SUBTEST(test_1D_fft_ifft_invariant<ColMajor>(32));
diff --git a/unsupported/test/cxx11_tensor_image_patch.cpp b/unsupported/test/cxx11_tensor_image_patch.cpp
index 475c59651..862f1f7f0 100644
--- a/unsupported/test/cxx11_tensor_image_patch.cpp
+++ b/unsupported/test/cxx11_tensor_image_patch.cpp
@@ -405,6 +405,57 @@ void test_patch_padding_same()
}
}
+// Verifies that SAME padding, when computed as negative values, will be clipped
+// to zero.
+void test_patch_padding_same_negative_padding_clip_to_zero() {
+ int input_depth = 1;
+ int input_rows = 15;
+ int input_cols = 1;
+ int input_batches = 1;
+ int ksize = 1; // Corresponds to the Rows and Cols for
+ // tensor.extract_image_patches<>.
+ int row_stride = 5;
+ int col_stride = 1;
+ // ColMajor
+ Tensor<float, 4> tensor(input_depth, input_rows, input_cols, input_batches);
+ // Initializes tensor with incrementing numbers.
+ for (int i = 0; i < tensor.size(); ++i) {
+ tensor.data()[i] = i + 1;
+ }
+ Tensor<float, 5> result = tensor.extract_image_patches(
+ ksize, ksize, row_stride, col_stride, 1, 1, PADDING_SAME);
+ // row padding will be computed as -2 originally and then be clipped to 0.
+ VERIFY_IS_EQUAL(result.coeff(0), 1.0f);
+ VERIFY_IS_EQUAL(result.coeff(1), 6.0f);
+ VERIFY_IS_EQUAL(result.coeff(2), 11.0f);
+
+ VERIFY_IS_EQUAL(result.dimension(0), input_depth); // depth
+ VERIFY_IS_EQUAL(result.dimension(1), ksize); // kernel rows
+ VERIFY_IS_EQUAL(result.dimension(2), ksize); // kernel cols
+ VERIFY_IS_EQUAL(result.dimension(3), 3); // number of patches
+ VERIFY_IS_EQUAL(result.dimension(4), input_batches); // number of batches
+
+ // RowMajor
+ Tensor<float, 4, RowMajor> tensor_row_major = tensor.swap_layout();
+ VERIFY_IS_EQUAL(tensor.dimension(0), tensor_row_major.dimension(3));
+ VERIFY_IS_EQUAL(tensor.dimension(1), tensor_row_major.dimension(2));
+ VERIFY_IS_EQUAL(tensor.dimension(2), tensor_row_major.dimension(1));
+ VERIFY_IS_EQUAL(tensor.dimension(3), tensor_row_major.dimension(0));
+
+ Tensor<float, 5, RowMajor> result_row_major =
+ tensor_row_major.extract_image_patches(ksize, ksize, row_stride,
+ col_stride, 1, 1, PADDING_SAME);
+ VERIFY_IS_EQUAL(result_row_major.coeff(0), 1.0f);
+ VERIFY_IS_EQUAL(result_row_major.coeff(1), 6.0f);
+ VERIFY_IS_EQUAL(result_row_major.coeff(2), 11.0f);
+
+ VERIFY_IS_EQUAL(result.dimension(0), result_row_major.dimension(4));
+ VERIFY_IS_EQUAL(result.dimension(1), result_row_major.dimension(3));
+ VERIFY_IS_EQUAL(result.dimension(2), result_row_major.dimension(2));
+ VERIFY_IS_EQUAL(result.dimension(3), result_row_major.dimension(1));
+ VERIFY_IS_EQUAL(result.dimension(4), result_row_major.dimension(0));
+}
+
void test_patch_no_extra_dim()
{
Tensor<float, 3> tensor(2,3,5);
@@ -746,7 +797,7 @@ void test_imagenet_patches()
}
}
-void test_cxx11_tensor_image_patch()
+EIGEN_DECLARE_TEST(cxx11_tensor_image_patch)
{
CALL_SUBTEST_1(test_simple_patch());
CALL_SUBTEST_2(test_patch_no_extra_dim());
@@ -754,4 +805,5 @@ void test_cxx11_tensor_image_patch()
CALL_SUBTEST_4(test_patch_padding_valid_same_value());
CALL_SUBTEST_5(test_patch_padding_same());
CALL_SUBTEST_6(test_imagenet_patches());
+ CALL_SUBTEST_7(test_patch_padding_same_negative_padding_clip_to_zero());
}
diff --git a/unsupported/test/cxx11_tensor_image_patch_sycl.cpp b/unsupported/test/cxx11_tensor_image_patch_sycl.cpp
new file mode 100644
index 000000000..c1828a0ec
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_image_patch_sycl.cpp
@@ -0,0 +1,1092 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_TEST_NO_LONGDOUBLE
+#define EIGEN_TEST_NO_COMPLEX
+
+#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t
+#define EIGEN_USE_SYCL
+
+#include "main.h"
+#include <unsupported/Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+static const int DataLayout = ColMajor;
+
+template <typename DataType, typename IndexType>
+static void test_simple_image_patch_sycl(const Eigen::SyclDevice& sycl_device)
+{
+ IndexType sizeDim1 = 2;
+ IndexType sizeDim2 = 3;
+ IndexType sizeDim3 = 5;
+ IndexType sizeDim4 = 7;
+ array<IndexType, 4> tensorColMajorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4}};
+ array<IndexType, 4> tensorRowMajorRange = {{sizeDim4, sizeDim3, sizeDim2, sizeDim1}};
+ Tensor<DataType, 4, DataLayout,IndexType> tensor_col_major(tensorColMajorRange);
+ Tensor<DataType, 4, RowMajor,IndexType> tensor_row_major(tensorRowMajorRange);
+ tensor_col_major.setRandom();
+
+ DataType* gpu_data_col_major = static_cast<DataType*>(sycl_device.allocate(tensor_col_major.size()*sizeof(DataType)));
+ DataType* gpu_data_row_major = static_cast<DataType*>(sycl_device.allocate(tensor_row_major.size()*sizeof(DataType)));
+ TensorMap<Tensor<DataType, 4, ColMajor, IndexType>> gpu_col_major(gpu_data_col_major, tensorColMajorRange);
+ TensorMap<Tensor<DataType, 4, RowMajor, IndexType>> gpu_row_major(gpu_data_row_major, tensorRowMajorRange);
+
+ sycl_device.memcpyHostToDevice(gpu_data_col_major, tensor_col_major.data(),(tensor_col_major.size())*sizeof(DataType));
+ gpu_row_major.device(sycl_device)=gpu_col_major.swap_layout();
+ sycl_device.memcpyDeviceToHost(tensor_row_major.data(), gpu_data_row_major, (tensor_col_major.size())*sizeof(DataType));
+
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(0), tensor_row_major.dimension(3));
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(1), tensor_row_major.dimension(2));
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(2), tensor_row_major.dimension(1));
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(3), tensor_row_major.dimension(0));
+
+ // Single pixel patch: ColMajor
+ array<IndexType, 5> patchColMajorTensorRange={{sizeDim1, 1, 1, sizeDim2*sizeDim3, sizeDim4}};
+ Tensor<DataType, 5, DataLayout,IndexType> single_patch_col_major(patchColMajorTensorRange);
+ size_t patchTensorBuffSize =single_patch_col_major.size()*sizeof(DataType);
+ DataType* gpu_data_single_patch_col_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, DataLayout,IndexType>> gpu_single_patch_col_major(gpu_data_single_patch_col_major, patchColMajorTensorRange);
+ gpu_single_patch_col_major.device(sycl_device)=gpu_col_major.extract_image_patches(1, 1);
+ sycl_device.memcpyDeviceToHost(single_patch_col_major.data(), gpu_data_single_patch_col_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(single_patch_col_major.dimension(0), 2);
+ VERIFY_IS_EQUAL(single_patch_col_major.dimension(1), 1);
+ VERIFY_IS_EQUAL(single_patch_col_major.dimension(2), 1);
+ VERIFY_IS_EQUAL(single_patch_col_major.dimension(3), 3*5);
+ VERIFY_IS_EQUAL(single_patch_col_major.dimension(4), 7);
+
+ // Single pixel patch: RowMajor
+ array<IndexType, 5> patchRowMajorTensorRange={{sizeDim4, sizeDim2*sizeDim3, 1, 1, sizeDim1}};
+ Tensor<DataType, 5, RowMajor,IndexType> single_patch_row_major(patchRowMajorTensorRange);
+ patchTensorBuffSize =single_patch_row_major.size()*sizeof(DataType);
+ DataType* gpu_data_single_patch_row_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, RowMajor,IndexType>> gpu_single_patch_row_major(gpu_data_single_patch_row_major, patchRowMajorTensorRange);
+ gpu_single_patch_row_major.device(sycl_device)=gpu_row_major.extract_image_patches(1, 1);
+ sycl_device.memcpyDeviceToHost(single_patch_row_major.data(), gpu_data_single_patch_row_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(single_patch_row_major.dimension(0), 7);
+ VERIFY_IS_EQUAL(single_patch_row_major.dimension(1), 3*5);
+ VERIFY_IS_EQUAL(single_patch_row_major.dimension(2), 1);
+ VERIFY_IS_EQUAL(single_patch_row_major.dimension(3), 1);
+ VERIFY_IS_EQUAL(single_patch_row_major.dimension(4), 2);
+
+ for (IndexType i = 0; i < tensor_col_major.size(); ++i) {
+ // ColMajor
+ if (tensor_col_major.data()[i] != single_patch_col_major.data()[i]) {
+ std::cout << "Mismatch detected at index colmajor " << i << " : "
+ << tensor_col_major.data()[i] << " vs " << single_patch_col_major.data()[i]
+ << std::endl;
+ }
+ VERIFY_IS_EQUAL(single_patch_col_major.data()[i], tensor_col_major.data()[i]);
+ // RowMajor
+ if (tensor_row_major.data()[i] != single_patch_row_major.data()[i]) {
+ std::cout << "Mismatch detected at index row major" << i << " : "
+ << tensor_row_major.data()[i] << " vs "
+ << single_patch_row_major.data()[i] << std::endl;
+ }
+ VERIFY_IS_EQUAL(single_patch_row_major.data()[i],
+ tensor_row_major.data()[i]);
+ VERIFY_IS_EQUAL(tensor_col_major.data()[i], tensor_row_major.data()[i]);
+ VERIFY_IS_EQUAL(single_patch_col_major.data()[i],
+ single_patch_row_major.data()[i]);
+ }
+
+
+ // Entire image patch: ColMajor
+ patchColMajorTensorRange={{sizeDim1, sizeDim2, sizeDim3, sizeDim2*sizeDim3, sizeDim4}};
+ Tensor<DataType, 5, DataLayout,IndexType> entire_image_patch_col_major(patchColMajorTensorRange);
+ patchTensorBuffSize =entire_image_patch_col_major.size()*sizeof(DataType);
+ DataType* gpu_data_entire_image_patch_col_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, DataLayout,IndexType>> gpu_entire_image_patch_col_major(gpu_data_entire_image_patch_col_major, patchColMajorTensorRange);
+ gpu_entire_image_patch_col_major.device(sycl_device)=gpu_col_major.extract_image_patches(3, 5);
+ sycl_device.memcpyDeviceToHost(entire_image_patch_col_major.data(), gpu_data_entire_image_patch_col_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(entire_image_patch_col_major.dimension(0), 2);
+ VERIFY_IS_EQUAL(entire_image_patch_col_major.dimension(1), 3);
+ VERIFY_IS_EQUAL(entire_image_patch_col_major.dimension(2), 5);
+ VERIFY_IS_EQUAL(entire_image_patch_col_major.dimension(3), 3*5);
+ VERIFY_IS_EQUAL(entire_image_patch_col_major.dimension(4), 7);
+
+ // Entire image patch: RowMajor
+ patchRowMajorTensorRange={{sizeDim4, sizeDim2*sizeDim3, sizeDim3, sizeDim2, sizeDim1}};
+ Tensor<DataType, 5, RowMajor,IndexType> entire_image_patch_row_major(patchRowMajorTensorRange);
+ patchTensorBuffSize =entire_image_patch_row_major.size()*sizeof(DataType);
+ DataType* gpu_data_entire_image_patch_row_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, RowMajor,IndexType>> gpu_entire_image_patch_row_major(gpu_data_entire_image_patch_row_major, patchRowMajorTensorRange);
+ gpu_entire_image_patch_row_major.device(sycl_device)=gpu_row_major.extract_image_patches(3, 5);
+ sycl_device.memcpyDeviceToHost(entire_image_patch_row_major.data(), gpu_data_entire_image_patch_row_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(0), 7);
+ VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(1), 3*5);
+ VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(2), 5);
+ VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(3), 3);
+ VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(4), 2);
+
+ for (IndexType i = 0; i < 3; ++i) {
+ for (IndexType j = 0; j < 5; ++j) {
+ IndexType patchId = i+3*j;
+ for (IndexType r = 0; r < 3; ++r) {
+ for (IndexType c = 0; c < 5; ++c) {
+ for (IndexType d = 0; d < 2; ++d) {
+ for (IndexType b = 0; b < 7; ++b) {
+ DataType expected_col_major = 0.0f;
+ DataType expected_row_major = 0.0f;
+ if (r-1+i >= 0 && c-2+j >= 0 && r-1+i < 3 && c-2+j < 5) {
+ expected_col_major = tensor_col_major(d, r-1+i, c-2+j, b);
+ expected_row_major = tensor_row_major(b, c-2+j, r-1+i, d);
+ }
+ // ColMajor
+ if (entire_image_patch_col_major(d, r, c, patchId, b) != expected_col_major) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(entire_image_patch_col_major(d, r, c, patchId, b), expected_col_major);
+ // RowMajor
+ if (entire_image_patch_row_major(b, patchId, c, r, d) !=
+ expected_row_major) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j
+ << " r=" << r << " c=" << c << " d=" << d << " b=" << b
+ << std::endl;
+ }
+ VERIFY_IS_EQUAL(entire_image_patch_row_major(b, patchId, c, r, d),
+ expected_row_major);
+ // Check that ColMajor and RowMajor agree.
+ VERIFY_IS_EQUAL(expected_col_major, expected_row_major);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // 2D patch: ColMajor
+ patchColMajorTensorRange={{sizeDim1, 2, 2, sizeDim2*sizeDim3, sizeDim4}};
+ Tensor<DataType, 5, DataLayout,IndexType> twod_patch_col_major(patchColMajorTensorRange);
+ patchTensorBuffSize =twod_patch_col_major.size()*sizeof(DataType);
+ DataType* gpu_data_twod_patch_col_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, DataLayout,IndexType>> gpu_twod_patch_col_major(gpu_data_twod_patch_col_major, patchColMajorTensorRange);
+ gpu_twod_patch_col_major.device(sycl_device)=gpu_col_major.extract_image_patches(2, 2);
+ sycl_device.memcpyDeviceToHost(twod_patch_col_major.data(), gpu_data_twod_patch_col_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(twod_patch_col_major.dimension(0), 2);
+ VERIFY_IS_EQUAL(twod_patch_col_major.dimension(1), 2);
+ VERIFY_IS_EQUAL(twod_patch_col_major.dimension(2), 2);
+ VERIFY_IS_EQUAL(twod_patch_col_major.dimension(3), 3*5);
+ VERIFY_IS_EQUAL(twod_patch_col_major.dimension(4), 7);
+
+ // 2D patch: RowMajor
+ patchRowMajorTensorRange={{sizeDim4, sizeDim2*sizeDim3, 2, 2, sizeDim1}};
+ Tensor<DataType, 5, RowMajor,IndexType> twod_patch_row_major(patchRowMajorTensorRange);
+ patchTensorBuffSize =twod_patch_row_major.size()*sizeof(DataType);
+ DataType* gpu_data_twod_patch_row_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, RowMajor,IndexType>> gpu_twod_patch_row_major(gpu_data_twod_patch_row_major, patchRowMajorTensorRange);
+ gpu_twod_patch_row_major.device(sycl_device)=gpu_row_major.extract_image_patches(2, 2);
+ sycl_device.memcpyDeviceToHost(twod_patch_row_major.data(), gpu_data_twod_patch_row_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(twod_patch_row_major.dimension(0), 7);
+ VERIFY_IS_EQUAL(twod_patch_row_major.dimension(1), 3*5);
+ VERIFY_IS_EQUAL(twod_patch_row_major.dimension(2), 2);
+ VERIFY_IS_EQUAL(twod_patch_row_major.dimension(3), 2);
+ VERIFY_IS_EQUAL(twod_patch_row_major.dimension(4), 2);
+
+
+ // Based on the calculation described in TensorTraits.h, padding happens to be 0.
+ IndexType row_padding = 0;
+ IndexType col_padding = 0;
+ IndexType stride = 1;
+
+ for (IndexType i = 0; i < 3; ++i) {
+ for (IndexType j = 0; j < 5; ++j) {
+ IndexType patchId = i+3*j;
+ for (IndexType r = 0; r < 2; ++r) {
+ for (IndexType c = 0; c < 2; ++c) {
+ for (IndexType d = 0; d < 2; ++d) {
+ for (IndexType b = 0; b < 7; ++b) {
+ DataType expected_col_major = 0.0f;
+ DataType expected_row_major = 0.0f;
+ IndexType row_offset = r*stride + i - row_padding;
+ IndexType col_offset = c*stride + j - col_padding;
+ // ColMajor
+ if (row_offset >= 0 && col_offset >= 0 && row_offset < tensor_col_major.dimension(1) && col_offset < tensor_col_major.dimension(2)) {
+ expected_col_major = tensor_col_major(d, row_offset, col_offset, b);
+ }
+ if (twod_patch_col_major(d, r, c, patchId, b) != expected_col_major) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(twod_patch_col_major(d, r, c, patchId, b), expected_col_major);
+
+ // RowMajor
+ if (row_offset >= 0 && col_offset >= 0 && row_offset < tensor_row_major.dimension(2) && col_offset < tensor_row_major.dimension(1)) {
+ expected_row_major = tensor_row_major(b, col_offset, row_offset, d);
+
+ }
+ if (twod_patch_row_major(b, patchId, c, r, d) != expected_row_major) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(twod_patch_row_major(b, patchId, c, r, d), expected_row_major);
+ // Check that ColMajor and RowMajor agree.
+ VERIFY_IS_EQUAL(expected_col_major, expected_row_major);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ sycl_device.deallocate(gpu_data_col_major);
+ sycl_device.deallocate(gpu_data_row_major);
+ sycl_device.deallocate(gpu_data_single_patch_col_major);
+ sycl_device.deallocate(gpu_data_single_patch_row_major);
+ sycl_device.deallocate(gpu_data_entire_image_patch_col_major);
+ sycl_device.deallocate(gpu_data_entire_image_patch_row_major);
+ sycl_device.deallocate(gpu_data_twod_patch_col_major);
+ sycl_device.deallocate(gpu_data_twod_patch_row_major);
+
+}
+
+
+// Verifies VALID padding (no padding) with incrementing values.
+template <typename DataType, typename IndexType>
+static void test_patch_padding_valid_sycl(const Eigen::SyclDevice& sycl_device){
+ IndexType input_depth = 3;
+ IndexType input_rows = 3;
+ IndexType input_cols = 3;
+ IndexType input_batches = 1;
+ IndexType ksize = 2; // Corresponds to the Rows and Cols for tensor.extract_image_patches<>.
+ IndexType stride = 2; // Only same stride is supported.
+
+ array<IndexType, 4> tensorColMajorRange = {{input_depth, input_rows, input_cols, input_batches}};
+ array<IndexType, 4> tensorRowMajorRange = {{input_batches, input_cols, input_rows, input_depth}};
+ Tensor<DataType, 4, DataLayout,IndexType> tensor_col_major(tensorColMajorRange);
+ Tensor<DataType, 4, RowMajor,IndexType> tensor_row_major(tensorRowMajorRange);
+
+ DataType* gpu_data_col_major = static_cast<DataType*>(sycl_device.allocate(tensor_col_major.size()*sizeof(DataType)));
+ DataType* gpu_data_row_major = static_cast<DataType*>(sycl_device.allocate(tensor_row_major.size()*sizeof(DataType)));
+ TensorMap<Tensor<DataType, 4, ColMajor, IndexType>> gpu_col_major(gpu_data_col_major, tensorColMajorRange);
+ TensorMap<Tensor<DataType, 4, RowMajor, IndexType>> gpu_row_major(gpu_data_row_major, tensorRowMajorRange);
+
+ sycl_device.memcpyHostToDevice(gpu_data_col_major, tensor_col_major.data(),(tensor_col_major.size())*sizeof(DataType));
+ gpu_row_major.device(sycl_device)=gpu_col_major.swap_layout();
+ sycl_device.memcpyDeviceToHost(tensor_row_major.data(), gpu_data_row_major, (tensor_col_major.size())*sizeof(DataType));
+
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(0), tensor_row_major.dimension(3));
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(1), tensor_row_major.dimension(2));
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(2), tensor_row_major.dimension(1));
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(3), tensor_row_major.dimension(0));
+
+ // Initializes tensor with incrementing numbers.
+ for (IndexType i = 0; i < tensor_col_major.size(); ++i) {
+ tensor_col_major.data()[i] = i + 1;
+ }
+ // ColMajor
+ array<IndexType, 5> patchColMajorTensorRange={{input_depth, ksize, ksize, 1, input_batches}};
+ Tensor<DataType, 5, DataLayout,IndexType> result_col_major(patchColMajorTensorRange);
+ size_t patchTensorBuffSize =result_col_major.size()*sizeof(DataType);
+ DataType* gpu_data_result_col_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, DataLayout,IndexType>> gpu_result_col_major(gpu_data_result_col_major, patchColMajorTensorRange);
+ gpu_result_col_major.device(sycl_device)=gpu_col_major.extract_image_patches(ksize, ksize, stride, stride, 1, 1, PADDING_VALID);
+ sycl_device.memcpyDeviceToHost(result_col_major.data(), gpu_data_result_col_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(result_col_major.dimension(0), input_depth); // depth
+ VERIFY_IS_EQUAL(result_col_major.dimension(1), ksize); // kernel rows
+ VERIFY_IS_EQUAL(result_col_major.dimension(2), ksize); // kernel cols
+ VERIFY_IS_EQUAL(result_col_major.dimension(3), 1); // number of patches
+ VERIFY_IS_EQUAL(result_col_major.dimension(4), input_batches); // number of batches
+
+ // RowMajor
+ array<IndexType, 5> patchRowMajorTensorRange={{input_batches, 1, ksize, ksize, input_depth }};
+ Tensor<DataType, 5, RowMajor,IndexType> result_row_major(patchRowMajorTensorRange);
+ patchTensorBuffSize =result_row_major.size()*sizeof(DataType);
+ DataType* gpu_data_result_row_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, RowMajor,IndexType>> gpu_result_row_major(gpu_data_result_row_major, patchRowMajorTensorRange);
+ gpu_result_row_major.device(sycl_device)=gpu_row_major.extract_image_patches(ksize, ksize, stride, stride, 1, 1, PADDING_VALID);
+ sycl_device.memcpyDeviceToHost(result_row_major.data(), gpu_data_result_row_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(result_col_major.dimension(0), result_row_major.dimension(4));
+ VERIFY_IS_EQUAL(result_col_major.dimension(1), result_row_major.dimension(3));
+ VERIFY_IS_EQUAL(result_col_major.dimension(2), result_row_major.dimension(2));
+ VERIFY_IS_EQUAL(result_col_major.dimension(3), result_row_major.dimension(1));
+ VERIFY_IS_EQUAL(result_col_major.dimension(4), result_row_major.dimension(0));
+
+ // No padding is carried out.
+ IndexType row_padding = 0;
+ IndexType col_padding = 0;
+
+ for (IndexType i = 0; (i+stride+ksize-1) < input_rows; i += stride) { // input rows
+ for (IndexType j = 0; (j+stride+ksize-1) < input_cols; j += stride) { // input cols
+ IndexType patchId = i+input_rows*j;
+ for (IndexType r = 0; r < ksize; ++r) { // patch rows
+ for (IndexType c = 0; c < ksize; ++c) { // patch cols
+ for (IndexType d = 0; d < input_depth; ++d) { // depth
+ for (IndexType b = 0; b < input_batches; ++b) { // batch
+ DataType expected_col_major = 0.0f;
+ DataType expected_row_major = 0.0f;
+ IndexType row_offset = r + i - row_padding;
+ IndexType col_offset = c + j - col_padding;
+ if (row_offset >= 0 && col_offset >= 0 && row_offset < input_rows && col_offset < input_cols) {
+ expected_col_major = tensor_col_major(d, row_offset, col_offset, b);
+ expected_row_major = tensor_row_major(b, col_offset, row_offset, d);
+ }
+ // ColMajor
+ if (result_col_major(d, r, c, patchId, b) != expected_col_major) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(result_col_major(d, r, c, patchId, b), expected_col_major);
+ // RowMajor
+ if (result_row_major(b, patchId, c, r, d) != expected_row_major) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(result_row_major(b, patchId, c, r, d), expected_row_major);
+ // Check that ColMajor and RowMajor agree.
+ VERIFY_IS_EQUAL(expected_col_major, expected_row_major);
+ }
+ }
+ }
+ }
+ }
+ }
+ sycl_device.deallocate(gpu_data_col_major);
+ sycl_device.deallocate(gpu_data_row_major);
+ sycl_device.deallocate(gpu_data_result_col_major);
+ sycl_device.deallocate(gpu_data_result_row_major);
+}
+
+// Verifies VALID padding (no padding) with the same value.
+template <typename DataType, typename IndexType>
+static void test_patch_padding_valid_same_value_sycl(const Eigen::SyclDevice& sycl_device){
+ IndexType input_depth = 1;
+ IndexType input_rows = 5;
+ IndexType input_cols = 5;
+ IndexType input_batches = 2;
+ IndexType ksize = 3; // Corresponds to the Rows and Cols for tensor.extract_image_patches<>.
+ IndexType stride = 2; // Only same stride is supported.
+ // ColMajor
+
+ array<IndexType, 4> tensorColMajorRange = {{input_depth, input_rows, input_cols, input_batches}};
+ array<IndexType, 4> tensorRowMajorRange = {{input_batches, input_cols, input_rows, input_depth}};
+ Tensor<DataType, 4, DataLayout,IndexType> tensor_col_major(tensorColMajorRange);
+ Tensor<DataType, 4, RowMajor,IndexType> tensor_row_major(tensorRowMajorRange);
+
+ DataType* gpu_data_col_major = static_cast<DataType*>(sycl_device.allocate(tensor_col_major.size()*sizeof(DataType)));
+ DataType* gpu_data_row_major = static_cast<DataType*>(sycl_device.allocate(tensor_row_major.size()*sizeof(DataType)));
+ TensorMap<Tensor<DataType, 4, ColMajor, IndexType>> gpu_col_major(gpu_data_col_major, tensorColMajorRange);
+ TensorMap<Tensor<DataType, 4, RowMajor, IndexType>> gpu_row_major(gpu_data_row_major, tensorRowMajorRange);
+ gpu_col_major.device(sycl_device)=gpu_col_major.constant(11.0f);
+ gpu_row_major.device(sycl_device)=gpu_col_major.swap_layout();
+ sycl_device.memcpyDeviceToHost(tensor_col_major.data(), gpu_data_col_major, (tensor_col_major.size())*sizeof(DataType));
+ sycl_device.memcpyDeviceToHost(tensor_row_major.data(), gpu_data_row_major, (tensor_row_major.size())*sizeof(DataType));
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(0), tensor_row_major.dimension(3));
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(1), tensor_row_major.dimension(2));
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(2), tensor_row_major.dimension(1));
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(3), tensor_row_major.dimension(0));
+
+ array<IndexType, 5> patchColMajorTensorRange={{input_depth, ksize, ksize, 4, input_batches}};
+ Tensor<DataType, 5, DataLayout,IndexType> result_col_major(patchColMajorTensorRange);
+ size_t patchTensorBuffSize =result_col_major.size()*sizeof(DataType);
+ DataType* gpu_data_result_col_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, DataLayout,IndexType>> gpu_result_col_major(gpu_data_result_col_major, patchColMajorTensorRange);
+ gpu_result_col_major.device(sycl_device)=gpu_col_major.extract_image_patches(ksize, ksize, stride, stride, 1, 1, PADDING_VALID);
+ sycl_device.memcpyDeviceToHost(result_col_major.data(), gpu_data_result_col_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(result_col_major.dimension(0), input_depth); // depth
+ VERIFY_IS_EQUAL(result_col_major.dimension(1), ksize); // kernel rows
+ VERIFY_IS_EQUAL(result_col_major.dimension(2), ksize); // kernel cols
+ VERIFY_IS_EQUAL(result_col_major.dimension(3), 4); // number of patches
+ VERIFY_IS_EQUAL(result_col_major.dimension(4), input_batches); // number of batches
+
+ // RowMajor
+ array<IndexType, 5> patchRowMajorTensorRange={{input_batches, 4, ksize, ksize, input_depth }};
+ Tensor<DataType, 5, RowMajor,IndexType> result_row_major(patchRowMajorTensorRange);
+ patchTensorBuffSize =result_row_major.size()*sizeof(DataType);
+ DataType* gpu_data_result_row_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, RowMajor,IndexType>> gpu_result_row_major(gpu_data_result_row_major, patchRowMajorTensorRange);
+ gpu_result_row_major.device(sycl_device)=gpu_row_major.extract_image_patches(ksize, ksize, stride, stride, 1, 1, PADDING_VALID);
+ sycl_device.memcpyDeviceToHost(result_row_major.data(), gpu_data_result_row_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(result_col_major.dimension(0), result_row_major.dimension(4));
+ VERIFY_IS_EQUAL(result_col_major.dimension(1), result_row_major.dimension(3));
+ VERIFY_IS_EQUAL(result_col_major.dimension(2), result_row_major.dimension(2));
+ VERIFY_IS_EQUAL(result_col_major.dimension(3), result_row_major.dimension(1));
+ VERIFY_IS_EQUAL(result_col_major.dimension(4), result_row_major.dimension(0));
+
+ // No padding is carried out.
+ IndexType row_padding = 0;
+ IndexType col_padding = 0;
+
+ for (IndexType i = 0; (i+stride+ksize-1) <= input_rows; i += stride) { // input rows
+ for (IndexType j = 0; (j+stride+ksize-1) <= input_cols; j += stride) { // input cols
+ IndexType patchId = i+input_rows*j;
+ for (IndexType r = 0; r < ksize; ++r) { // patch rows
+ for (IndexType c = 0; c < ksize; ++c) { // patch cols
+ for (IndexType d = 0; d < input_depth; ++d) { // depth
+ for (IndexType b = 0; b < input_batches; ++b) { // batch
+ DataType expected_col_major = 0.0f;
+ DataType expected_row_major = 0.0f;
+ IndexType row_offset = r + i - row_padding;
+ IndexType col_offset = c + j - col_padding;
+ if (row_offset >= 0 && col_offset >= 0 && row_offset < input_rows && col_offset < input_cols) {
+ expected_col_major = tensor_col_major(d, row_offset, col_offset, b);
+ expected_row_major = tensor_row_major(b, col_offset, row_offset, d);
+ }
+ // ColMajor
+ if (result_col_major(d, r, c, patchId, b) != expected_col_major) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(result_col_major(d, r, c, patchId, b), expected_col_major);
+ // RowMajor
+ if (result_row_major(b, patchId, c, r, d) != expected_row_major) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(result_row_major(b, patchId, c, r, d), expected_row_major);
+ // Check that ColMajor and RowMajor agree.
+ VERIFY_IS_EQUAL(expected_col_major, expected_row_major);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+// Verifies SAME padding.
+template <typename DataType, typename IndexType>
+static void test_patch_padding_same_sycl(const Eigen::SyclDevice& sycl_device){
+ IndexType input_depth = 3;
+ IndexType input_rows = 4;
+ IndexType input_cols = 2;
+ IndexType input_batches = 1;
+ IndexType ksize = 2; // Corresponds to the Rows and Cols for tensor.extract_image_patches<>.
+ IndexType stride = 2; // Only same stride is supported.
+
+ // ColMajor
+ array<IndexType, 4> tensorColMajorRange = {{input_depth, input_rows, input_cols, input_batches}};
+ array<IndexType, 4> tensorRowMajorRange = {{input_batches, input_cols, input_rows, input_depth}};
+ Tensor<DataType, 4, DataLayout,IndexType> tensor_col_major(tensorColMajorRange);
+ Tensor<DataType, 4, RowMajor,IndexType> tensor_row_major(tensorRowMajorRange);
+
+ DataType* gpu_data_col_major = static_cast<DataType*>(sycl_device.allocate(tensor_col_major.size()*sizeof(DataType)));
+ DataType* gpu_data_row_major = static_cast<DataType*>(sycl_device.allocate(tensor_row_major.size()*sizeof(DataType)));
+ TensorMap<Tensor<DataType, 4, ColMajor, IndexType>> gpu_col_major(gpu_data_col_major, tensorColMajorRange);
+ TensorMap<Tensor<DataType, 4, RowMajor, IndexType>> gpu_row_major(gpu_data_row_major, tensorRowMajorRange);
+
+ sycl_device.memcpyHostToDevice(gpu_data_col_major, tensor_col_major.data(),(tensor_col_major.size())*sizeof(DataType));
+ gpu_row_major.device(sycl_device)=gpu_col_major.swap_layout();
+ sycl_device.memcpyDeviceToHost(tensor_row_major.data(), gpu_data_row_major, (tensor_col_major.size())*sizeof(DataType));
+
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(0), tensor_row_major.dimension(3));
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(1), tensor_row_major.dimension(2));
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(2), tensor_row_major.dimension(1));
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(3), tensor_row_major.dimension(0));
+
+ // Initializes tensor with incrementing numbers.
+ for (IndexType i = 0; i < tensor_col_major.size(); ++i) {
+ tensor_col_major.data()[i] = i + 1;
+ }
+
+array<IndexType, 5> patchColMajorTensorRange={{input_depth, ksize, ksize, 2, input_batches}};
+Tensor<DataType, 5, DataLayout,IndexType> result_col_major(patchColMajorTensorRange);
+size_t patchTensorBuffSize =result_col_major.size()*sizeof(DataType);
+DataType* gpu_data_result_col_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+TensorMap<Tensor<DataType, 5, DataLayout,IndexType>> gpu_result_col_major(gpu_data_result_col_major, patchColMajorTensorRange);
+gpu_result_col_major.device(sycl_device)=gpu_col_major.extract_image_patches(ksize, ksize, stride, stride, PADDING_SAME);
+sycl_device.memcpyDeviceToHost(result_col_major.data(), gpu_data_result_col_major, patchTensorBuffSize);
+
+
+ VERIFY_IS_EQUAL(result_col_major.dimension(0), input_depth); // depth
+ VERIFY_IS_EQUAL(result_col_major.dimension(1), ksize); // kernel rows
+ VERIFY_IS_EQUAL(result_col_major.dimension(2), ksize); // kernel cols
+ VERIFY_IS_EQUAL(result_col_major.dimension(3), 2); // number of patches
+ VERIFY_IS_EQUAL(result_col_major.dimension(4), input_batches); // number of batches
+
+ // RowMajor
+
+ array<IndexType, 5> patchRowMajorTensorRange={{input_batches, 2, ksize, ksize, input_depth }};
+ Tensor<DataType, 5, RowMajor,IndexType> result_row_major(patchRowMajorTensorRange);
+ patchTensorBuffSize =result_row_major.size()*sizeof(DataType);
+ DataType* gpu_data_result_row_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, RowMajor,IndexType>> gpu_result_row_major(gpu_data_result_row_major, patchRowMajorTensorRange);
+ gpu_result_row_major.device(sycl_device)=gpu_row_major.extract_image_patches(ksize, ksize, stride, stride, PADDING_SAME);
+ sycl_device.memcpyDeviceToHost(result_row_major.data(), gpu_data_result_row_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(result_col_major.dimension(0), result_row_major.dimension(4));
+ VERIFY_IS_EQUAL(result_col_major.dimension(1), result_row_major.dimension(3));
+ VERIFY_IS_EQUAL(result_col_major.dimension(2), result_row_major.dimension(2));
+ VERIFY_IS_EQUAL(result_col_major.dimension(3), result_row_major.dimension(1));
+ VERIFY_IS_EQUAL(result_col_major.dimension(4), result_row_major.dimension(0));
+
+ // Based on the calculation described in TensorTraits.h, padding happens to be 0.
+ IndexType row_padding = 0;
+ IndexType col_padding = 0;
+
+ for (IndexType i = 0; (i+stride+ksize-1) <= input_rows; i += stride) { // input rows
+ for (IndexType j = 0; (j+stride+ksize-1) <= input_cols; j += stride) { // input cols
+ IndexType patchId = i+input_rows*j;
+ for (IndexType r = 0; r < ksize; ++r) { // patch rows
+ for (IndexType c = 0; c < ksize; ++c) { // patch cols
+ for (IndexType d = 0; d < input_depth; ++d) { // depth
+ for (IndexType b = 0; b < input_batches; ++b) { // batch
+ DataType expected_col_major = 0.0f;
+ DataType expected_row_major = 0.0f;
+ IndexType row_offset = r*stride + i - row_padding;
+ IndexType col_offset = c*stride + j - col_padding;
+ if (row_offset >= 0 && col_offset >= 0 && row_offset < input_rows && col_offset < input_cols) {
+ expected_col_major = tensor_col_major(d, row_offset, col_offset, b);
+ expected_row_major = tensor_row_major(b, col_offset, row_offset, d);
+ }
+ // ColMajor
+ if (result_col_major(d, r, c, patchId, b) != expected_col_major) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(result_col_major(d, r, c, patchId, b), expected_col_major);
+ // RowMajor
+ if (result_row_major(b, patchId, c, r, d) != expected_row_major) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(result_row_major(b, patchId, c, r, d), expected_row_major);
+ // Check that ColMajor and RowMajor agree.
+ VERIFY_IS_EQUAL(expected_col_major, expected_row_major);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+
+template <typename DataType, typename IndexType>
+static void test_patch_no_extra_dim_sycl(const Eigen::SyclDevice& sycl_device){
+
+ IndexType sizeDim1 = 2;
+ IndexType sizeDim2 = 3;
+ IndexType sizeDim3 = 5;
+
+ // ColMajor
+ array<IndexType, 3> tensorColMajorRange = {{sizeDim1, sizeDim2, sizeDim3}};
+ array<IndexType, 3> tensorRowMajorRange = {{sizeDim3, sizeDim2, sizeDim1}};
+ Tensor<DataType, 3, DataLayout,IndexType> tensor_col_major(tensorColMajorRange);
+ tensor_col_major.setRandom();
+ Tensor<DataType, 3, RowMajor,IndexType> tensor_row_major(tensorRowMajorRange);
+
+ DataType* gpu_data_col_major = static_cast<DataType*>(sycl_device.allocate(tensor_col_major.size()*sizeof(DataType)));
+ DataType* gpu_data_row_major = static_cast<DataType*>(sycl_device.allocate(tensor_row_major.size()*sizeof(DataType)));
+ TensorMap<Tensor<DataType, 3, ColMajor, IndexType>> gpu_col_major(gpu_data_col_major, tensorColMajorRange);
+ TensorMap<Tensor<DataType, 3, RowMajor, IndexType>> gpu_row_major(gpu_data_row_major, tensorRowMajorRange);
+
+ sycl_device.memcpyHostToDevice(gpu_data_col_major, tensor_col_major.data(),(tensor_col_major.size())*sizeof(DataType));
+ gpu_row_major.device(sycl_device)=gpu_col_major.swap_layout();
+ sycl_device.memcpyDeviceToHost(tensor_row_major.data(), gpu_data_row_major, (tensor_row_major.size())*sizeof(DataType));
+
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(0), tensor_row_major.dimension(2));
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(1), tensor_row_major.dimension(1));
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(2), tensor_row_major.dimension(0));
+
+
+ // Single pixel patch: ColMajor
+ array<IndexType, 4> patchColMajorTensorRange={{sizeDim1, 1, 1, sizeDim2*sizeDim3}};
+ Tensor<DataType, 4, DataLayout,IndexType> single_patch_col_major(patchColMajorTensorRange);
+ size_t patchTensorBuffSize =single_patch_col_major.size()*sizeof(DataType);
+ DataType* gpu_data_single_patch_col_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 4, DataLayout,IndexType>> gpu_single_patch_col_major(gpu_data_single_patch_col_major, patchColMajorTensorRange);
+ gpu_single_patch_col_major.device(sycl_device)=gpu_col_major.extract_image_patches(1, 1);
+ sycl_device.memcpyDeviceToHost(single_patch_col_major.data(), gpu_data_single_patch_col_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(single_patch_col_major.dimension(0), sizeDim1);
+ VERIFY_IS_EQUAL(single_patch_col_major.dimension(1), 1);
+ VERIFY_IS_EQUAL(single_patch_col_major.dimension(2), 1);
+ VERIFY_IS_EQUAL(single_patch_col_major.dimension(3), sizeDim2*sizeDim3);
+
+ // Single pixel patch: RowMajor
+ array<IndexType, 4> patchRowMajorTensorRange={{sizeDim2*sizeDim3, 1, 1, sizeDim1}};
+ Tensor<DataType, 4, RowMajor,IndexType> single_patch_row_major(patchRowMajorTensorRange);
+ patchTensorBuffSize =single_patch_row_major.size()*sizeof(DataType);
+ DataType* gpu_data_single_patch_row_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 4, RowMajor,IndexType>> gpu_single_patch_row_major(gpu_data_single_patch_row_major, patchRowMajorTensorRange);
+ gpu_single_patch_row_major.device(sycl_device)=gpu_row_major.extract_image_patches(1, 1);
+ sycl_device.memcpyDeviceToHost(single_patch_row_major.data(), gpu_data_single_patch_row_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(single_patch_row_major.dimension(0), sizeDim2*sizeDim3);
+ VERIFY_IS_EQUAL(single_patch_row_major.dimension(1), 1);
+ VERIFY_IS_EQUAL(single_patch_row_major.dimension(2), 1);
+ VERIFY_IS_EQUAL(single_patch_row_major.dimension(3), sizeDim1);
+
+ for (IndexType i = 0; i < tensor_col_major.size(); ++i) {
+ // ColMajor
+ if (tensor_col_major.data()[i] != single_patch_col_major.data()[i]) {
+ std::cout << "Mismatch detected at index " << i << " : " << tensor_col_major.data()[i] << " vs " << single_patch_col_major.data()[i] << std::endl;
+ }
+ VERIFY_IS_EQUAL(single_patch_col_major.data()[i], tensor_col_major.data()[i]);
+ // RowMajor
+ if (tensor_row_major.data()[i] != single_patch_row_major.data()[i]) {
+ std::cout << "Mismatch detected at index " << i << " : "
+ << tensor_col_major.data()[i] << " vs "
+ << single_patch_row_major.data()[i] << std::endl;
+ }
+ VERIFY_IS_EQUAL(single_patch_row_major.data()[i],
+ tensor_row_major.data()[i]);
+ VERIFY_IS_EQUAL(tensor_col_major.data()[i], tensor_row_major.data()[i]);
+ VERIFY_IS_EQUAL(single_patch_col_major.data()[i],
+ single_patch_row_major.data()[i]);
+ }
+
+ // Entire image patch: ColMajor
+ patchColMajorTensorRange={{sizeDim1, sizeDim2, sizeDim3, sizeDim2*sizeDim3}};
+ Tensor<DataType, 4, DataLayout,IndexType> entire_image_patch_col_major(patchColMajorTensorRange);
+ patchTensorBuffSize =entire_image_patch_col_major.size()*sizeof(DataType);
+ DataType* gpu_data_entire_image_patch_col_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 4, DataLayout,IndexType>> gpu_entire_image_patch_col_major(gpu_data_entire_image_patch_col_major, patchColMajorTensorRange);
+ gpu_entire_image_patch_col_major.device(sycl_device)=gpu_col_major.extract_image_patches(3, 5);
+ sycl_device.memcpyDeviceToHost(entire_image_patch_col_major.data(), gpu_data_entire_image_patch_col_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(entire_image_patch_col_major.dimension(0), 2);
+ VERIFY_IS_EQUAL(entire_image_patch_col_major.dimension(1), 3);
+ VERIFY_IS_EQUAL(entire_image_patch_col_major.dimension(2), 5);
+ VERIFY_IS_EQUAL(entire_image_patch_col_major.dimension(3), 3*5);
+
+ // Entire image patch: RowMajor
+patchRowMajorTensorRange={{sizeDim2*sizeDim3, sizeDim3, sizeDim2, sizeDim1}};
+Tensor<DataType, 4, RowMajor,IndexType> entire_image_patch_row_major(patchRowMajorTensorRange);
+patchTensorBuffSize =entire_image_patch_row_major.size()*sizeof(DataType);
+DataType* gpu_data_entire_image_patch_row_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+TensorMap<Tensor<DataType, 4, RowMajor,IndexType>> gpu_entire_image_patch_row_major(gpu_data_entire_image_patch_row_major, patchRowMajorTensorRange);
+gpu_entire_image_patch_row_major.device(sycl_device)=gpu_row_major.extract_image_patches(3, 5);
+sycl_device.memcpyDeviceToHost(entire_image_patch_row_major.data(), gpu_data_entire_image_patch_row_major, patchTensorBuffSize);
+ VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(0), 3*5);
+ VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(1), 5);
+ VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(2), 3);
+ VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(3), 2);
+
+ for (IndexType i = 0; i < 3; ++i) {
+ for (IndexType j = 0; j < 5; ++j) {
+ IndexType patchId = i+3*j;
+ for (IndexType r = 0; r < 3; ++r) {
+ for (IndexType c = 0; c < 5; ++c) {
+ for (IndexType d = 0; d < 2; ++d) {
+ DataType expected_col_major = 0.0f;
+ DataType expected_row_major = 0.0f;
+ if (r-1+i >= 0 && c-2+j >= 0 && r-1+i < 3 && c-2+j < 5) {
+ expected_col_major = tensor_col_major(d, r-1+i, c-2+j);
+ expected_row_major = tensor_row_major(c-2+j, r-1+i, d);
+ }
+ // ColMajor
+ if (entire_image_patch_col_major(d, r, c, patchId) != expected_col_major) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << std::endl;
+ }
+ VERIFY_IS_EQUAL(entire_image_patch_col_major(d, r, c, patchId), expected_col_major);
+ // RowMajor
+ if (entire_image_patch_row_major(patchId, c, r, d) !=
+ expected_row_major) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << std::endl;
+ }
+ VERIFY_IS_EQUAL(entire_image_patch_row_major(patchId, c, r, d),
+ expected_row_major);
+ // Check that ColMajor and RowMajor agree.
+ VERIFY_IS_EQUAL(expected_col_major, expected_row_major);
+ }
+ }
+ }
+ }
+ }
+
+ // 2D patch: ColMajor
+ patchColMajorTensorRange={{sizeDim1, 2, 2, sizeDim2*sizeDim3}};
+ Tensor<DataType, 4, DataLayout,IndexType> twod_patch_col_major(patchColMajorTensorRange);
+ patchTensorBuffSize =twod_patch_col_major.size()*sizeof(DataType);
+ DataType* gpu_data_twod_patch_col_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 4, DataLayout,IndexType>> gpu_twod_patch_col_major(gpu_data_twod_patch_col_major, patchColMajorTensorRange);
+ gpu_twod_patch_col_major.device(sycl_device)=gpu_col_major.extract_image_patches(2, 2);
+ sycl_device.memcpyDeviceToHost(twod_patch_col_major.data(), gpu_data_twod_patch_col_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(twod_patch_col_major.dimension(0), 2);
+ VERIFY_IS_EQUAL(twod_patch_col_major.dimension(1), 2);
+ VERIFY_IS_EQUAL(twod_patch_col_major.dimension(2), 2);
+ VERIFY_IS_EQUAL(twod_patch_col_major.dimension(3), 3*5);
+
+ // 2D patch: RowMajor
+ patchRowMajorTensorRange={{sizeDim2*sizeDim3, 2, 2, sizeDim1}};
+ Tensor<DataType, 4, RowMajor,IndexType> twod_patch_row_major(patchRowMajorTensorRange);
+ patchTensorBuffSize =twod_patch_row_major.size()*sizeof(DataType);
+ DataType* gpu_data_twod_patch_row_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 4, RowMajor,IndexType>> gpu_twod_patch_row_major(gpu_data_twod_patch_row_major, patchRowMajorTensorRange);
+ gpu_twod_patch_row_major.device(sycl_device)=gpu_row_major.extract_image_patches(2, 2);
+ sycl_device.memcpyDeviceToHost(twod_patch_row_major.data(), gpu_data_twod_patch_row_major, patchTensorBuffSize);
+ VERIFY_IS_EQUAL(twod_patch_row_major.dimension(0), 3*5);
+ VERIFY_IS_EQUAL(twod_patch_row_major.dimension(1), 2);
+ VERIFY_IS_EQUAL(twod_patch_row_major.dimension(2), 2);
+ VERIFY_IS_EQUAL(twod_patch_row_major.dimension(3), 2);
+
+ // Based on the calculation described in TensorTraits.h, padding happens to be 0.
+ IndexType row_padding = 0;
+ IndexType col_padding = 0;
+ IndexType stride = 1;
+
+ for (IndexType i = 0; i < 3; ++i) {
+ for (IndexType j = 0; j < 5; ++j) {
+ IndexType patchId = i+3*j;
+ for (IndexType r = 0; r < 2; ++r) {
+ for (IndexType c = 0; c < 2; ++c) {
+ for (IndexType d = 0; d < 2; ++d) {
+ DataType expected_col_major = 0.0f;
+ DataType expected_row_major = 0.0f;
+ IndexType row_offset = r*stride + i - row_padding;
+ IndexType col_offset = c*stride + j - col_padding;
+ // ColMajor
+ if (row_offset >= 0 && col_offset >= 0 && row_offset < tensor_col_major.dimension(1) && col_offset < tensor_col_major.dimension(2)) {
+ expected_col_major = tensor_col_major(d, row_offset, col_offset);
+ }
+ if (twod_patch_col_major(d, r, c, patchId) != expected_col_major) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << std::endl;
+ }
+ VERIFY_IS_EQUAL(twod_patch_col_major(d, r, c, patchId), expected_col_major);
+ // RowMajor
+ if (row_offset >= 0 && col_offset >= 0 && row_offset < tensor_row_major.dimension(1) && col_offset < tensor_row_major.dimension(0)) {
+ expected_row_major = tensor_row_major(col_offset, row_offset, d);
+ }
+ if (twod_patch_row_major(patchId, c, r, d) != expected_row_major) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << std::endl;
+ }
+ VERIFY_IS_EQUAL(twod_patch_row_major(patchId, c, r, d), expected_row_major);
+ // Check that ColMajor and RowMajor agree.
+ VERIFY_IS_EQUAL(expected_col_major, expected_row_major);
+ }
+ }
+ }
+ }
+ }
+
+ sycl_device.deallocate(gpu_data_col_major);
+ sycl_device.deallocate(gpu_data_row_major);
+ sycl_device.deallocate(gpu_data_single_patch_col_major);
+ sycl_device.deallocate(gpu_data_single_patch_row_major);
+ sycl_device.deallocate(gpu_data_entire_image_patch_col_major);
+ sycl_device.deallocate(gpu_data_entire_image_patch_row_major);
+ sycl_device.deallocate(gpu_data_twod_patch_col_major);
+ sycl_device.deallocate(gpu_data_twod_patch_row_major);
+}
+
+template <typename DataType, typename IndexType>
+static void test_imagenet_patches_sycl(const Eigen::SyclDevice& sycl_device)
+{
+ // Test the code on typical configurations used by the 'imagenet' benchmarks at
+ // https://github.com/soumith/convnet-benchmarks
+ // ColMajor
+ IndexType sizeDim1 = 3;
+ IndexType sizeDim2 = 128;
+ IndexType sizeDim3 = 128;
+ IndexType sizeDim4 = 16;
+ array<IndexType, 4> tensorColMajorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4}};
+ Tensor<DataType, 4, DataLayout,IndexType> l_in_col_major(tensorColMajorRange);
+ l_in_col_major.setRandom();
+
+ DataType* gpu_data_l_in_col_major = static_cast<DataType*>(sycl_device.allocate(l_in_col_major.size()*sizeof(DataType)));
+ TensorMap<Tensor<DataType, 4, ColMajor, IndexType>> gpu_l_in_col_major(gpu_data_l_in_col_major, tensorColMajorRange);
+
+ sycl_device.memcpyHostToDevice(gpu_data_l_in_col_major, l_in_col_major.data(),(l_in_col_major.size())*sizeof(DataType));
+
+ array<IndexType, 5> patchTensorRange={{sizeDim1, 11, 11, sizeDim2*sizeDim3, sizeDim4}};
+ Tensor<DataType, 5, DataLayout,IndexType> l_out_col_major(patchTensorRange);
+ size_t patchTensorBuffSize =l_out_col_major.size()*sizeof(DataType);
+ DataType* gpu_data_l_out_col_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, DataLayout,IndexType>> gpu_l_out_col_major(gpu_data_l_out_col_major, patchTensorRange);
+ gpu_l_out_col_major.device(sycl_device)=gpu_l_in_col_major.extract_image_patches(11, 11);
+ sycl_device.memcpyDeviceToHost(l_out_col_major.data(), gpu_data_l_out_col_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(0), sizeDim1);
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(1), 11);
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(2), 11);
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(3), sizeDim2*sizeDim3);
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(4), sizeDim4);
+
+ // RowMajor
+ patchTensorRange={{sizeDim4, sizeDim2*sizeDim3, 11, 11, sizeDim1}};
+ Tensor<DataType, 5, RowMajor,IndexType> l_out_row_major(patchTensorRange);
+ patchTensorBuffSize =l_out_row_major.size()*sizeof(DataType);
+ DataType* gpu_data_l_out_row_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, RowMajor,IndexType>> gpu_l_out_row_major(gpu_data_l_out_row_major, patchTensorRange);
+ gpu_l_out_row_major.device(sycl_device)=gpu_l_in_col_major.swap_layout().extract_image_patches(11, 11);
+ sycl_device.memcpyDeviceToHost(l_out_row_major.data(), gpu_data_l_out_row_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(0), sizeDim4);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(1), sizeDim2*sizeDim3);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(2), 11);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(3), 11);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(4), sizeDim1);
+
+ for (IndexType b = 0; b < 16; ++b) {
+ for (IndexType i = 0; i < 128; ++i) {
+ for (IndexType j = 0; j < 128; ++j) {
+ IndexType patchId = i+128*j;
+ for (IndexType c = 0; c < 11; ++c) {
+ for (IndexType r = 0; r < 11; ++r) {
+ for (IndexType d = 0; d < 3; ++d) {
+ DataType expected = 0.0f;
+ if (r-5+i >= 0 && c-5+j >= 0 && r-5+i < 128 && c-5+j < 128) {
+ expected = l_in_col_major(d, r-5+i, c-5+j, b);
+ }
+ // ColMajor
+ if (l_out_col_major(d, r, c, patchId, b) != expected) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(l_out_col_major(d, r, c, patchId, b), expected);
+ // RowMajor
+ if (l_out_row_major(b, patchId, c, r, d) !=
+ expected) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j
+ << " r=" << r << " c=" << c << " d=" << d << " b=" << b
+ << std::endl;
+ }
+ VERIFY_IS_EQUAL(l_out_row_major(b, patchId, c, r, d),
+ expected);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // ColMajor
+ sycl_device.deallocate(gpu_data_l_in_col_major);
+ sycl_device.deallocate(gpu_data_l_out_col_major);
+ sizeDim1 = 16;
+ sizeDim2 = 64;
+ sizeDim3 = 64;
+ sizeDim4 = 32;
+ tensorColMajorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4}};
+ l_in_col_major.resize(tensorColMajorRange);
+ l_in_col_major.setRandom();
+ gpu_data_l_in_col_major = static_cast<DataType*>(sycl_device.allocate(l_in_col_major.size()*sizeof(DataType)));
+ TensorMap<Tensor<DataType, 4, ColMajor, IndexType>>gpu_l_in_col_major_resize1(gpu_data_l_in_col_major, tensorColMajorRange);
+
+ patchTensorRange={{sizeDim1, 9, 9, sizeDim2*sizeDim3, sizeDim4}};
+ l_out_col_major.resize(patchTensorRange);
+ patchTensorBuffSize =l_out_col_major.size()*sizeof(DataType);
+ gpu_data_l_out_col_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, DataLayout,IndexType>>gpu_l_out_col_major_resize1(gpu_data_l_out_col_major, patchTensorRange);
+ sycl_device.memcpyHostToDevice(gpu_data_l_in_col_major, l_in_col_major.data(),(l_in_col_major.size())*sizeof(DataType));
+ gpu_l_out_col_major_resize1.device(sycl_device)=gpu_l_in_col_major_resize1.extract_image_patches(9, 9);
+ sycl_device.memcpyDeviceToHost(l_out_col_major.data(), gpu_data_l_out_col_major, patchTensorBuffSize);
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(0), 16);
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(1), 9);
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(2), 9);
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(3), 64*64);
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(4), 32);
+
+// RowMajor
+ sycl_device.deallocate(gpu_data_l_out_row_major);
+ patchTensorRange={{sizeDim4, sizeDim2*sizeDim3, 9, 9 ,sizeDim1}};
+ l_out_row_major.resize(patchTensorRange);
+ patchTensorBuffSize =l_out_row_major.size()*sizeof(DataType);
+ gpu_data_l_out_row_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, RowMajor,IndexType>>gpu_l_out_row_major_resize1(gpu_data_l_out_row_major, patchTensorRange);
+ gpu_l_out_row_major_resize1.device(sycl_device)=gpu_l_in_col_major_resize1.swap_layout().extract_image_patches(9, 9);
+ sycl_device.memcpyDeviceToHost(l_out_row_major.data(), gpu_data_l_out_row_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(0), 32);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(1), 64*64);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(2), 9);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(3), 9);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(4), 16);
+
+ for (IndexType b = 0; b < 32; ++b) {
+ for (IndexType i = 0; i < 64; ++i) {
+ for (IndexType j = 0; j < 64; ++j) {
+ IndexType patchId = i+64*j;
+ for (IndexType c = 0; c < 9; ++c) {
+ for (IndexType r = 0; r < 9; ++r) {
+ for (IndexType d = 0; d < 16; ++d) {
+ DataType expected = 0.0f;
+ if (r-4+i >= 0 && c-4+j >= 0 && r-4+i < 64 && c-4+j < 64) {
+ expected = l_in_col_major(d, r-4+i, c-4+j, b);
+ }
+ // ColMajor
+ if (l_out_col_major(d, r, c, patchId, b) != expected) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(l_out_col_major(d, r, c, patchId, b), expected);
+ // RowMajor
+ if (l_out_row_major(b, patchId, c, r, d) != expected) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(l_out_row_major(b, patchId, c, r, d), expected);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // ColMajor
+
+ sycl_device.deallocate(gpu_data_l_in_col_major);
+ sycl_device.deallocate(gpu_data_l_out_col_major);
+ sizeDim1 = 32;
+ sizeDim2 = 16;
+ sizeDim3 = 16;
+ sizeDim4 = 32;
+ tensorColMajorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4}};
+ l_in_col_major.resize(tensorColMajorRange);
+ l_in_col_major.setRandom();
+ gpu_data_l_in_col_major = static_cast<DataType*>(sycl_device.allocate(l_in_col_major.size()*sizeof(DataType)));
+ TensorMap<Tensor<DataType, 4, ColMajor, IndexType>>gpu_l_in_col_major_resize2(gpu_data_l_in_col_major, tensorColMajorRange);
+
+ patchTensorRange={{sizeDim1, 7, 7, sizeDim2*sizeDim3, sizeDim4}};
+ l_out_col_major.resize(patchTensorRange);
+ patchTensorBuffSize =l_out_col_major.size()*sizeof(DataType);
+ gpu_data_l_out_col_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, DataLayout,IndexType>>gpu_l_out_col_major_resize2(gpu_data_l_out_col_major, patchTensorRange);
+ sycl_device.memcpyHostToDevice(gpu_data_l_in_col_major, l_in_col_major.data(),(l_in_col_major.size())*sizeof(DataType));
+ gpu_l_out_col_major_resize2.device(sycl_device)=gpu_l_in_col_major_resize2.extract_image_patches(7, 7);
+ sycl_device.memcpyDeviceToHost(l_out_col_major.data(), gpu_data_l_out_col_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(0), 32);
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(1), 7);
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(2), 7);
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(3), 16*16);
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(4), 32);
+
+ // RowMajor
+ sycl_device.deallocate(gpu_data_l_out_row_major);
+ patchTensorRange={{sizeDim4, sizeDim2*sizeDim3, 7, 7 ,sizeDim1}};
+ l_out_row_major.resize(patchTensorRange);
+ patchTensorBuffSize =l_out_row_major.size()*sizeof(DataType);
+ gpu_data_l_out_row_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, RowMajor,IndexType>>gpu_l_out_row_major_resize2(gpu_data_l_out_row_major, patchTensorRange);
+ gpu_l_out_row_major_resize2.device(sycl_device)=gpu_l_in_col_major_resize2.swap_layout().extract_image_patches(7, 7);
+ sycl_device.memcpyDeviceToHost(l_out_row_major.data(), gpu_data_l_out_row_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(0), 32);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(1), 16*16);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(2), 7);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(3), 7);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(4), 32);
+
+ for (IndexType b = 0; b < 32; ++b) {
+ for (IndexType i = 0; i < 16; ++i) {
+ for (IndexType j = 0; j < 16; ++j) {
+ IndexType patchId = i+16*j;
+ for (IndexType c = 0; c < 7; ++c) {
+ for (IndexType r = 0; r < 7; ++r) {
+ for (IndexType d = 0; d < 32; ++d) {
+ DataType expected = 0.0f;
+ if (r-3+i >= 0 && c-3+j >= 0 && r-3+i < 16 && c-3+j < 16) {
+ expected = l_in_col_major(d, r-3+i, c-3+j, b);
+ }
+ // ColMajor
+ if (l_out_col_major(d, r, c, patchId, b) != expected) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(l_out_col_major(d, r, c, patchId, b), expected);
+ // RowMajor
+ if (l_out_row_major(b, patchId, c, r, d) != expected) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(l_out_row_major(b, patchId, c, r, d), expected);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // ColMajor
+ sycl_device.deallocate(gpu_data_l_in_col_major);
+ sycl_device.deallocate(gpu_data_l_out_col_major);
+ sizeDim1 = 64;
+ sizeDim2 = 13;
+ sizeDim3 = 13;
+ sizeDim4 = 32;
+ tensorColMajorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4}};
+ l_in_col_major.resize(tensorColMajorRange);
+ l_in_col_major.setRandom();
+ gpu_data_l_in_col_major = static_cast<DataType*>(sycl_device.allocate(l_in_col_major.size()*sizeof(DataType)));
+ TensorMap<Tensor<DataType, 4, ColMajor, IndexType>>gpu_l_in_col_major_resize3(gpu_data_l_in_col_major, tensorColMajorRange);
+
+ patchTensorRange={{sizeDim1, 3, 3, sizeDim2*sizeDim3, sizeDim4}};
+ l_out_col_major.resize(patchTensorRange);
+ patchTensorBuffSize =l_out_col_major.size()*sizeof(DataType);
+ gpu_data_l_out_col_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, DataLayout,IndexType>>gpu_l_out_col_major_resize3(gpu_data_l_out_col_major, patchTensorRange);
+ sycl_device.memcpyHostToDevice(gpu_data_l_in_col_major, l_in_col_major.data(),(l_in_col_major.size())*sizeof(DataType));
+ gpu_l_out_col_major_resize3.device(sycl_device)=gpu_l_in_col_major_resize3.extract_image_patches(3, 3);
+ sycl_device.memcpyDeviceToHost(l_out_col_major.data(), gpu_data_l_out_col_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(0), 64);
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(1), 3);
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(2), 3);
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(3), 13*13);
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(4), 32);
+
+ // RowMajor
+ sycl_device.deallocate(gpu_data_l_out_row_major);
+ patchTensorRange={{sizeDim4, sizeDim2*sizeDim3, 3, 3 ,sizeDim1}};
+ l_out_row_major.resize(patchTensorRange);
+ patchTensorBuffSize =l_out_row_major.size()*sizeof(DataType);
+ gpu_data_l_out_row_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, RowMajor,IndexType>>gpu_l_out_row_major_resize3(gpu_data_l_out_row_major, patchTensorRange);
+ gpu_l_out_row_major_resize3.device(sycl_device)=gpu_l_in_col_major_resize3.swap_layout().extract_image_patches(3, 3);
+ sycl_device.memcpyDeviceToHost(l_out_row_major.data(), gpu_data_l_out_row_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(0), 32);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(1), 13*13);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(2), 3);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(3), 3);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(4), 64);
+
+ for (IndexType b = 0; b < 32; ++b) {
+ for (IndexType i = 0; i < 13; ++i) {
+ for (IndexType j = 0; j < 13; ++j) {
+ IndexType patchId = i+13*j;
+ for (IndexType c = 0; c < 3; ++c) {
+ for (IndexType r = 0; r < 3; ++r) {
+ for (IndexType d = 0; d < 64; ++d) {
+ DataType expected = 0.0f;
+ if (r-1+i >= 0 && c-1+j >= 0 && r-1+i < 13 && c-1+j < 13) {
+ expected = l_in_col_major(d, r-1+i, c-1+j, b);
+ }
+ // ColMajor
+ if (l_out_col_major(d, r, c, patchId, b) != expected) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(l_out_col_major(d, r, c, patchId, b), expected);
+ // RowMajor
+ if (l_out_row_major(b, patchId, c, r, d) != expected) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(l_out_row_major(b, patchId, c, r, d), expected);
+ }
+ }
+ }
+ }
+ }
+ }
+ sycl_device.deallocate(gpu_data_l_in_col_major);
+ sycl_device.deallocate(gpu_data_l_out_col_major);
+ sycl_device.deallocate(gpu_data_l_out_row_major);
+}
+
+
+template<typename DataType, typename dev_Selector> void sycl_tensor_image_patch_test_per_device(dev_Selector s){
+QueueInterface queueInterface(s);
+auto sycl_device = Eigen::SyclDevice(&queueInterface);
+test_simple_image_patch_sycl<DataType, int64_t>(sycl_device);
+test_patch_padding_valid_sycl<DataType, int64_t>(sycl_device);
+test_patch_padding_valid_same_value_sycl<DataType, int64_t>(sycl_device);
+test_patch_padding_same_sycl<DataType, int64_t>(sycl_device);
+test_patch_no_extra_dim_sycl<DataType, int64_t>(sycl_device);
+test_imagenet_patches_sycl<DataType, int64_t>(sycl_device);
+}
+EIGEN_DECLARE_TEST(cxx11_tensor_image_patch_sycl)
+{
+for (const auto& device :Eigen::get_sycl_supported_devices()) {
+ CALL_SUBTEST(sycl_tensor_image_patch_test_per_device<float>(device));
+}
+}
diff --git a/unsupported/test/cxx11_tensor_index_list.cpp b/unsupported/test/cxx11_tensor_index_list.cpp
index 4cf5df666..2166532c8 100644
--- a/unsupported/test/cxx11_tensor_index_list.cpp
+++ b/unsupported/test/cxx11_tensor_index_list.cpp
@@ -22,9 +22,9 @@ static void test_static_index_list()
VERIFY_IS_EQUAL(internal::array_get<0>(reduction_axis), 0);
VERIFY_IS_EQUAL(internal::array_get<1>(reduction_axis), 1);
VERIFY_IS_EQUAL(internal::array_get<2>(reduction_axis), 2);
- VERIFY_IS_EQUAL(static_cast<DenseIndex>(reduction_axis[0]), 0);
- VERIFY_IS_EQUAL(static_cast<DenseIndex>(reduction_axis[1]), 1);
- VERIFY_IS_EQUAL(static_cast<DenseIndex>(reduction_axis[2]), 2);
+ VERIFY_IS_EQUAL(static_cast<Index>(reduction_axis[0]), 0);
+ VERIFY_IS_EQUAL(static_cast<Index>(reduction_axis[1]), 1);
+ VERIFY_IS_EQUAL(static_cast<Index>(reduction_axis[2]), 2);
EIGEN_STATIC_ASSERT((internal::array_get<0>(reduction_axis) == 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
EIGEN_STATIC_ASSERT((internal::array_get<1>(reduction_axis) == 1), YOU_MADE_A_PROGRAMMING_MISTAKE);
@@ -167,19 +167,18 @@ static void test_type2indexpair_list()
typedef Eigen::IndexPairList<Eigen::type2indexpair<0,10>> Dims0;
typedef Eigen::IndexPairList<Eigen::type2indexpair<0,10>, Eigen::type2indexpair<1,11>, Eigen::type2indexpair<2,12>> Dims2_a;
- typedef Eigen::IndexPairList<Eigen::type2indexpair<0,10>, Eigen::IndexPair<DenseIndex>, Eigen::type2indexpair<2,12>> Dims2_b;
- typedef Eigen::IndexPairList<Eigen::IndexPair<DenseIndex>, Eigen::type2indexpair<1,11>, Eigen::IndexPair<DenseIndex>> Dims2_c;
+ typedef Eigen::IndexPairList<Eigen::type2indexpair<0,10>, Eigen::IndexPair<Index>, Eigen::type2indexpair<2,12>> Dims2_b;
+ typedef Eigen::IndexPairList<Eigen::IndexPair<Index>, Eigen::type2indexpair<1,11>, Eigen::IndexPair<Index>> Dims2_c;
- Dims0 d0;
Dims2_a d2_a;
Dims2_b d2_b;
- d2_b.set(1, Eigen::IndexPair<DenseIndex>(1,11));
+ d2_b.set(1, Eigen::IndexPair<Index>(1,11));
Dims2_c d2_c;
- d2_c.set(0, Eigen::IndexPair<DenseIndex>(Eigen::IndexPair<DenseIndex>(0,10)));
- d2_c.set(1, Eigen::IndexPair<DenseIndex>(1,11)); // setting type2indexpair to correct value.
- d2_c.set(2, Eigen::IndexPair<DenseIndex>(2,12));
+ d2_c.set(0, Eigen::IndexPair<Index>(Eigen::IndexPair<Index>(0,10)));
+ d2_c.set(1, Eigen::IndexPair<Index>(1,11)); // setting type2indexpair to correct value.
+ d2_c.set(2, Eigen::IndexPair<Index>(2,12));
VERIFY_IS_EQUAL(d2_a[0].first, 0);
VERIFY_IS_EQUAL(d2_a[0].second, 10);
@@ -278,9 +277,9 @@ static void test_dynamic_index_list()
VERIFY_IS_EQUAL(internal::array_get<0>(reduction_axis), 2);
VERIFY_IS_EQUAL(internal::array_get<1>(reduction_axis), 1);
VERIFY_IS_EQUAL(internal::array_get<2>(reduction_axis), 0);
- VERIFY_IS_EQUAL(static_cast<DenseIndex>(reduction_axis[0]), 2);
- VERIFY_IS_EQUAL(static_cast<DenseIndex>(reduction_axis[1]), 1);
- VERIFY_IS_EQUAL(static_cast<DenseIndex>(reduction_axis[2]), 0);
+ VERIFY_IS_EQUAL(static_cast<Index>(reduction_axis[0]), 2);
+ VERIFY_IS_EQUAL(static_cast<Index>(reduction_axis[1]), 1);
+ VERIFY_IS_EQUAL(static_cast<Index>(reduction_axis[2]), 0);
Tensor<float, 1> result = tensor.sum(reduction_axis);
for (int i = 0; i < result.size(); ++i) {
@@ -310,10 +309,10 @@ static void test_mixed_index_list()
VERIFY_IS_EQUAL(internal::array_get<1>(reduction_axis), 1);
VERIFY_IS_EQUAL(internal::array_get<2>(reduction_axis), 2);
VERIFY_IS_EQUAL(internal::array_get<3>(reduction_axis), 3);
- VERIFY_IS_EQUAL(static_cast<DenseIndex>(reduction_axis[0]), 0);
- VERIFY_IS_EQUAL(static_cast<DenseIndex>(reduction_axis[1]), 1);
- VERIFY_IS_EQUAL(static_cast<DenseIndex>(reduction_axis[2]), 2);
- VERIFY_IS_EQUAL(static_cast<DenseIndex>(reduction_axis[3]), 3);
+ VERIFY_IS_EQUAL(static_cast<Index>(reduction_axis[0]), 0);
+ VERIFY_IS_EQUAL(static_cast<Index>(reduction_axis[1]), 1);
+ VERIFY_IS_EQUAL(static_cast<Index>(reduction_axis[2]), 2);
+ VERIFY_IS_EQUAL(static_cast<Index>(reduction_axis[3]), 3);
typedef IndexList<type2index<0>, int, type2index<2>, int> ReductionIndices;
ReductionIndices reduction_indices;
@@ -373,7 +372,7 @@ static void test_dim_check()
#endif
-void test_cxx11_tensor_index_list()
+EIGEN_DECLARE_TEST(cxx11_tensor_index_list)
{
#ifdef EIGEN_HAS_INDEX_LIST
CALL_SUBTEST(test_static_index_list());
diff --git a/unsupported/test/cxx11_tensor_inflation.cpp b/unsupported/test/cxx11_tensor_inflation.cpp
index 4997935e9..75089e856 100644
--- a/unsupported/test/cxx11_tensor_inflation.cpp
+++ b/unsupported/test/cxx11_tensor_inflation.cpp
@@ -74,7 +74,7 @@ static void test_simple_inflation()
}
}
-void test_cxx11_tensor_inflation()
+EIGEN_DECLARE_TEST(cxx11_tensor_inflation)
{
CALL_SUBTEST(test_simple_inflation<ColMajor>());
CALL_SUBTEST(test_simple_inflation<RowMajor>());
diff --git a/unsupported/test/cxx11_tensor_inflation_sycl.cpp b/unsupported/test/cxx11_tensor_inflation_sycl.cpp
new file mode 100644
index 000000000..521ae0cc3
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_inflation_sycl.cpp
@@ -0,0 +1,136 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_TEST_NO_LONGDOUBLE
+#define EIGEN_TEST_NO_COMPLEX
+
+#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t
+#define EIGEN_USE_SYCL
+
+#include "main.h"
+#include <unsupported/Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+
+// Inflation Definition for each dimension the inflated val would be
+//((dim-1)*strid[dim] +1)
+
+// for 1 dimension vector of size 3 with value (4,4,4) with the inflated stride value of 3 would be changed to
+// tensor of size (2*3) +1 = 7 with the value of
+// (4, 0, 0, 4, 0, 0, 4).
+
+template <typename DataType, int DataLayout, typename IndexType>
+void test_simple_inflation_sycl(const Eigen::SyclDevice &sycl_device) {
+
+
+ IndexType sizeDim1 = 2;
+ IndexType sizeDim2 = 3;
+ IndexType sizeDim3 = 5;
+ IndexType sizeDim4 = 7;
+ array<IndexType, 4> tensorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4}};
+ Tensor<DataType, 4, DataLayout,IndexType> tensor(tensorRange);
+ Tensor<DataType, 4, DataLayout,IndexType> no_stride(tensorRange);
+ tensor.setRandom();
+
+ array<IndexType, 4> strides;
+ strides[0] = 1;
+ strides[1] = 1;
+ strides[2] = 1;
+ strides[3] = 1;
+
+
+ const size_t tensorBuffSize =tensor.size()*sizeof(DataType);
+ DataType* gpu_data_tensor = static_cast<DataType*>(sycl_device.allocate(tensorBuffSize));
+ DataType* gpu_data_no_stride = static_cast<DataType*>(sycl_device.allocate(tensorBuffSize));
+
+ TensorMap<Tensor<DataType, 4, DataLayout,IndexType>> gpu_tensor(gpu_data_tensor, tensorRange);
+ TensorMap<Tensor<DataType, 4, DataLayout,IndexType>> gpu_no_stride(gpu_data_no_stride, tensorRange);
+
+ sycl_device.memcpyHostToDevice(gpu_data_tensor, tensor.data(), tensorBuffSize);
+ gpu_no_stride.device(sycl_device)=gpu_tensor.inflate(strides);
+ sycl_device.memcpyDeviceToHost(no_stride.data(), gpu_data_no_stride, tensorBuffSize);
+
+ VERIFY_IS_EQUAL(no_stride.dimension(0), sizeDim1);
+ VERIFY_IS_EQUAL(no_stride.dimension(1), sizeDim2);
+ VERIFY_IS_EQUAL(no_stride.dimension(2), sizeDim3);
+ VERIFY_IS_EQUAL(no_stride.dimension(3), sizeDim4);
+
+ for (IndexType i = 0; i < 2; ++i) {
+ for (IndexType j = 0; j < 3; ++j) {
+ for (IndexType k = 0; k < 5; ++k) {
+ for (IndexType l = 0; l < 7; ++l) {
+ VERIFY_IS_EQUAL(tensor(i,j,k,l), no_stride(i,j,k,l));
+ }
+ }
+ }
+ }
+
+
+ strides[0] = 2;
+ strides[1] = 4;
+ strides[2] = 2;
+ strides[3] = 3;
+
+ IndexType inflatedSizeDim1 = 3;
+ IndexType inflatedSizeDim2 = 9;
+ IndexType inflatedSizeDim3 = 9;
+ IndexType inflatedSizeDim4 = 19;
+ array<IndexType, 4> inflatedTensorRange = {{inflatedSizeDim1, inflatedSizeDim2, inflatedSizeDim3, inflatedSizeDim4}};
+
+ Tensor<DataType, 4, DataLayout, IndexType> inflated(inflatedTensorRange);
+
+ const size_t inflatedTensorBuffSize =inflated.size()*sizeof(DataType);
+ DataType* gpu_data_inflated = static_cast<DataType*>(sycl_device.allocate(inflatedTensorBuffSize));
+ TensorMap<Tensor<DataType, 4, DataLayout, IndexType>> gpu_inflated(gpu_data_inflated, inflatedTensorRange);
+ gpu_inflated.device(sycl_device)=gpu_tensor.inflate(strides);
+ sycl_device.memcpyDeviceToHost(inflated.data(), gpu_data_inflated, inflatedTensorBuffSize);
+
+ VERIFY_IS_EQUAL(inflated.dimension(0), inflatedSizeDim1);
+ VERIFY_IS_EQUAL(inflated.dimension(1), inflatedSizeDim2);
+ VERIFY_IS_EQUAL(inflated.dimension(2), inflatedSizeDim3);
+ VERIFY_IS_EQUAL(inflated.dimension(3), inflatedSizeDim4);
+
+ for (IndexType i = 0; i < inflatedSizeDim1; ++i) {
+ for (IndexType j = 0; j < inflatedSizeDim2; ++j) {
+ for (IndexType k = 0; k < inflatedSizeDim3; ++k) {
+ for (IndexType l = 0; l < inflatedSizeDim4; ++l) {
+ if (i % strides[0] == 0 &&
+ j % strides[1] == 0 &&
+ k % strides[2] == 0 &&
+ l % strides[3] == 0) {
+ VERIFY_IS_EQUAL(inflated(i,j,k,l),
+ tensor(i/strides[0], j/strides[1], k/strides[2], l/strides[3]));
+ } else {
+ VERIFY_IS_EQUAL(0, inflated(i,j,k,l));
+ }
+ }
+ }
+ }
+ }
+ sycl_device.deallocate(gpu_data_tensor);
+ sycl_device.deallocate(gpu_data_no_stride);
+ sycl_device.deallocate(gpu_data_inflated);
+}
+
+template<typename DataType, typename dev_Selector> void sycl_inflation_test_per_device(dev_Selector s){
+ QueueInterface queueInterface(s);
+ auto sycl_device = Eigen::SyclDevice(&queueInterface);
+ test_simple_inflation_sycl<DataType, RowMajor, int64_t>(sycl_device);
+ test_simple_inflation_sycl<DataType, ColMajor, int64_t>(sycl_device);
+}
+EIGEN_DECLARE_TEST(cxx11_tensor_inflation_sycl)
+{
+ for (const auto& device :Eigen::get_sycl_supported_devices()) {
+ CALL_SUBTEST(sycl_inflation_test_per_device<float>(device));
+ }
+}
diff --git a/unsupported/test/cxx11_tensor_intdiv.cpp b/unsupported/test/cxx11_tensor_intdiv.cpp
index 8e2b70b75..d18a05ec4 100644
--- a/unsupported/test/cxx11_tensor_intdiv.cpp
+++ b/unsupported/test/cxx11_tensor_intdiv.cpp
@@ -135,7 +135,7 @@ void test_specific() {
VERIFY_IS_EQUAL(result, result_op);
}
-void test_cxx11_tensor_intdiv()
+EIGEN_DECLARE_TEST(cxx11_tensor_intdiv)
{
CALL_SUBTEST_1(test_signed_32bit());
CALL_SUBTEST_2(test_unsigned_32bit());
diff --git a/unsupported/test/cxx11_tensor_io.cpp b/unsupported/test/cxx11_tensor_io.cpp
index 489960529..2c638f9bf 100644
--- a/unsupported/test/cxx11_tensor_io.cpp
+++ b/unsupported/test/cxx11_tensor_io.cpp
@@ -119,7 +119,7 @@ static void test_output_const()
}
-void test_cxx11_tensor_io()
+EIGEN_DECLARE_TEST(cxx11_tensor_io)
{
CALL_SUBTEST(test_output_0d<ColMajor>());
CALL_SUBTEST(test_output_0d<RowMajor>());
diff --git a/unsupported/test/cxx11_tensor_layout_swap.cpp b/unsupported/test/cxx11_tensor_layout_swap.cpp
index ae297a9da..efb333360 100644
--- a/unsupported/test/cxx11_tensor_layout_swap.cpp
+++ b/unsupported/test/cxx11_tensor_layout_swap.cpp
@@ -54,7 +54,7 @@ static void test_swap_as_lvalue()
}
-void test_cxx11_tensor_layout_swap()
+EIGEN_DECLARE_TEST(cxx11_tensor_layout_swap)
{
CALL_SUBTEST(test_simple_swap());
CALL_SUBTEST(test_swap_as_lvalue());
diff --git a/unsupported/test/cxx11_tensor_layout_swap_sycl.cpp b/unsupported/test/cxx11_tensor_layout_swap_sycl.cpp
new file mode 100644
index 000000000..9546b911c
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_layout_swap_sycl.cpp
@@ -0,0 +1,126 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+// Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_TEST_NO_LONGDOUBLE
+#define EIGEN_TEST_NO_COMPLEX
+
+#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t
+#define EIGEN_USE_SYCL
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+
+template <typename DataType, typename IndexType>
+static void test_simple_swap_sycl(const Eigen::SyclDevice& sycl_device)
+{
+ IndexType sizeDim1 = 2;
+ IndexType sizeDim2 = 3;
+ IndexType sizeDim3 = 7;
+ array<IndexType, 3> tensorColRange = {{sizeDim1, sizeDim2, sizeDim3}};
+ array<IndexType, 3> tensorRowRange = {{sizeDim3, sizeDim2, sizeDim1}};
+
+
+ Tensor<DataType, 3, ColMajor, IndexType> tensor1(tensorColRange);
+ Tensor<DataType, 3, RowMajor, IndexType> tensor2(tensorRowRange);
+ tensor1.setRandom();
+
+ DataType* gpu_data1 = static_cast<DataType*>(sycl_device.allocate(tensor1.size()*sizeof(DataType)));
+ DataType* gpu_data2 = static_cast<DataType*>(sycl_device.allocate(tensor2.size()*sizeof(DataType)));
+ TensorMap<Tensor<DataType, 3, ColMajor, IndexType>> gpu1(gpu_data1, tensorColRange);
+ TensorMap<Tensor<DataType, 3, RowMajor, IndexType>> gpu2(gpu_data2, tensorRowRange);
+
+ sycl_device.memcpyHostToDevice(gpu_data1, tensor1.data(),(tensor1.size())*sizeof(DataType));
+ gpu2.device(sycl_device)=gpu1.swap_layout();
+ sycl_device.memcpyDeviceToHost(tensor2.data(), gpu_data2,(tensor2.size())*sizeof(DataType));
+
+
+// Tensor<float, 3, ColMajor> tensor(2,3,7);
+ //tensor.setRandom();
+
+// Tensor<float, 3, RowMajor> tensor2 = tensor.swap_layout();
+ VERIFY_IS_EQUAL(tensor1.dimension(0), tensor2.dimension(2));
+ VERIFY_IS_EQUAL(tensor1.dimension(1), tensor2.dimension(1));
+ VERIFY_IS_EQUAL(tensor1.dimension(2), tensor2.dimension(0));
+
+ for (IndexType i = 0; i < 2; ++i) {
+ for (IndexType j = 0; j < 3; ++j) {
+ for (IndexType k = 0; k < 7; ++k) {
+ VERIFY_IS_EQUAL(tensor1(i,j,k), tensor2(k,j,i));
+ }
+ }
+ }
+ sycl_device.deallocate(gpu_data1);
+ sycl_device.deallocate(gpu_data2);
+}
+
+template <typename DataType, typename IndexType>
+static void test_swap_as_lvalue_sycl(const Eigen::SyclDevice& sycl_device)
+{
+
+ IndexType sizeDim1 = 2;
+ IndexType sizeDim2 = 3;
+ IndexType sizeDim3 = 7;
+ array<IndexType, 3> tensorColRange = {{sizeDim1, sizeDim2, sizeDim3}};
+ array<IndexType, 3> tensorRowRange = {{sizeDim3, sizeDim2, sizeDim1}};
+
+ Tensor<DataType, 3, ColMajor, IndexType> tensor1(tensorColRange);
+ Tensor<DataType, 3, RowMajor, IndexType> tensor2(tensorRowRange);
+ tensor1.setRandom();
+
+ DataType* gpu_data1 = static_cast<DataType*>(sycl_device.allocate(tensor1.size()*sizeof(DataType)));
+ DataType* gpu_data2 = static_cast<DataType*>(sycl_device.allocate(tensor2.size()*sizeof(DataType)));
+ TensorMap<Tensor<DataType, 3, ColMajor, IndexType>> gpu1(gpu_data1, tensorColRange);
+ TensorMap<Tensor<DataType, 3, RowMajor, IndexType>> gpu2(gpu_data2, tensorRowRange);
+
+ sycl_device.memcpyHostToDevice(gpu_data1, tensor1.data(),(tensor1.size())*sizeof(DataType));
+ gpu2.swap_layout().device(sycl_device)=gpu1;
+ sycl_device.memcpyDeviceToHost(tensor2.data(), gpu_data2,(tensor2.size())*sizeof(DataType));
+
+
+// Tensor<float, 3, ColMajor> tensor(2,3,7);
+// tensor.setRandom();
+
+ //Tensor<float, 3, RowMajor> tensor2(7,3,2);
+// tensor2.swap_layout() = tensor;
+ VERIFY_IS_EQUAL(tensor1.dimension(0), tensor2.dimension(2));
+ VERIFY_IS_EQUAL(tensor1.dimension(1), tensor2.dimension(1));
+ VERIFY_IS_EQUAL(tensor1.dimension(2), tensor2.dimension(0));
+
+ for (IndexType i = 0; i < 2; ++i) {
+ for (IndexType j = 0; j < 3; ++j) {
+ for (IndexType k = 0; k < 7; ++k) {
+ VERIFY_IS_EQUAL(tensor1(i,j,k), tensor2(k,j,i));
+ }
+ }
+ }
+ sycl_device.deallocate(gpu_data1);
+ sycl_device.deallocate(gpu_data2);
+}
+
+
+template<typename DataType, typename dev_Selector> void sycl_tensor_layout_swap_test_per_device(dev_Selector s){
+ QueueInterface queueInterface(s);
+ auto sycl_device = Eigen::SyclDevice(&queueInterface);
+ test_simple_swap_sycl<DataType, int64_t>(sycl_device);
+ test_swap_as_lvalue_sycl<DataType, int64_t>(sycl_device);
+}
+EIGEN_DECLARE_TEST(cxx11_tensor_layout_swap_sycl)
+{
+ for (const auto& device :Eigen::get_sycl_supported_devices()) {
+ CALL_SUBTEST(sycl_tensor_layout_swap_test_per_device<float>(device));
+ }
+}
diff --git a/unsupported/test/cxx11_tensor_lvalue.cpp b/unsupported/test/cxx11_tensor_lvalue.cpp
index 071f5b406..6ba9a212d 100644
--- a/unsupported/test/cxx11_tensor_lvalue.cpp
+++ b/unsupported/test/cxx11_tensor_lvalue.cpp
@@ -36,7 +36,7 @@ static void test_compound_assignment()
}
-void test_cxx11_tensor_lvalue()
+EIGEN_DECLARE_TEST(cxx11_tensor_lvalue)
{
CALL_SUBTEST(test_compound_assignment());
}
diff --git a/unsupported/test/cxx11_tensor_map.cpp b/unsupported/test/cxx11_tensor_map.cpp
index 3db0ee7c0..ce608aca7 100644
--- a/unsupported/test/cxx11_tensor_map.cpp
+++ b/unsupported/test/cxx11_tensor_map.cpp
@@ -265,7 +265,7 @@ static void test_casting()
VERIFY_IS_EQUAL(sum1, 861);
}
-void test_cxx11_tensor_map()
+EIGEN_DECLARE_TEST(cxx11_tensor_map)
{
CALL_SUBTEST(test_0d());
CALL_SUBTEST(test_1d());
diff --git a/unsupported/test/cxx11_tensor_math.cpp b/unsupported/test/cxx11_tensor_math.cpp
index 61c742a16..82a1a26d8 100644
--- a/unsupported/test/cxx11_tensor_math.cpp
+++ b/unsupported/test/cxx11_tensor_math.cpp
@@ -39,7 +39,7 @@ static void test_sigmoid()
}
-void test_cxx11_tensor_math()
+EIGEN_DECLARE_TEST(cxx11_tensor_math)
{
CALL_SUBTEST(test_tanh());
CALL_SUBTEST(test_sigmoid());
diff --git a/unsupported/test/cxx11_tensor_mixed_indices.cpp b/unsupported/test/cxx11_tensor_mixed_indices.cpp
index 4fba6fdd1..ee2616fd7 100644
--- a/unsupported/test/cxx11_tensor_mixed_indices.cpp
+++ b/unsupported/test/cxx11_tensor_mixed_indices.cpp
@@ -47,7 +47,7 @@ static void test_simple()
}
-void test_cxx11_tensor_mixed_indices()
+EIGEN_DECLARE_TEST(cxx11_tensor_mixed_indices)
{
CALL_SUBTEST(test_simple());
}
diff --git a/unsupported/test/cxx11_tensor_morphing.cpp b/unsupported/test/cxx11_tensor_morphing.cpp
index f7de43110..4cbe15b63 100644
--- a/unsupported/test/cxx11_tensor_morphing.cpp
+++ b/unsupported/test/cxx11_tensor_morphing.cpp
@@ -41,6 +41,28 @@ static void test_simple_reshape()
}
}
+template <typename>
+static void test_static_reshape() {
+#if defined(EIGEN_HAS_INDEX_LIST)
+ using Eigen::type2index;
+
+ Tensor<float, 5> tensor(2, 3, 1, 7, 1);
+ tensor.setRandom();
+
+ // New dimensions: [2, 3, 7]
+ Eigen::IndexList<type2index<2>, type2index<3>, type2index<7>> dim;
+ Tensor<float, 3> reshaped = tensor.reshape(dim);
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 7; ++k) {
+ VERIFY_IS_EQUAL(tensor(i, j, 0, k, 0), reshaped(i, j, k));
+ }
+ }
+ }
+#endif
+}
+
template<typename>
static void test_reshape_in_expr() {
MatrixXf m1(2,3*5*7*11);
@@ -459,9 +481,10 @@ static void test_composition()
}
-void test_cxx11_tensor_morphing()
+EIGEN_DECLARE_TEST(cxx11_tensor_morphing)
{
CALL_SUBTEST_1(test_simple_reshape<void>());
+ CALL_SUBTEST_1(test_static_reshape<void>());
CALL_SUBTEST_1(test_reshape_in_expr<void>());
CALL_SUBTEST_1(test_reshape_as_lvalue<void>());
diff --git a/unsupported/test/cxx11_tensor_morphing_sycl.cpp b/unsupported/test/cxx11_tensor_morphing_sycl.cpp
index 9b521bc6b..93dabe3ec 100644
--- a/unsupported/test/cxx11_tensor_morphing_sycl.cpp
+++ b/unsupported/test/cxx11_tensor_morphing_sycl.cpp
@@ -15,7 +15,7 @@
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
-#define EIGEN_TEST_FUNC cxx11_tensor_morphing_sycl
+
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t
#define EIGEN_USE_SYCL
@@ -240,7 +240,7 @@ template<typename DataType, typename dev_Selector> void sycl_morphing_test_per_d
test_strided_slice_write_sycl<DataType, ColMajor, int64_t>(sycl_device);
test_strided_slice_write_sycl<DataType, RowMajor, int64_t>(sycl_device);
}
-void test_cxx11_tensor_morphing_sycl()
+EIGEN_DECLARE_TEST(cxx11_tensor_morphing_sycl)
{
for (const auto& device :Eigen::get_sycl_supported_devices()) {
CALL_SUBTEST(sycl_morphing_test_per_device<float>(device));
diff --git a/unsupported/test/cxx11_tensor_move.cpp b/unsupported/test/cxx11_tensor_move.cpp
new file mode 100644
index 000000000..0ab2b7786
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_move.cpp
@@ -0,0 +1,81 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2017 Viktor Csomor <viktor.csomor@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+#include <utility>
+
+using Eigen::Tensor;
+using Eigen::RowMajor;
+
+static void calc_indices(int i, int& x, int& y, int& z)
+{
+ x = i / 4;
+ y = (i % 4) / 2;
+ z = i % 2;
+}
+
+static void test_move()
+{
+ int x;
+ int y;
+ int z;
+
+ Tensor<int,3> tensor1(2, 2, 2);
+ Tensor<int,3,RowMajor> tensor2(2, 2, 2);
+
+ for (int i = 0; i < 8; i++)
+ {
+ calc_indices(i, x, y, z);
+ tensor1(x,y,z) = i;
+ tensor2(x,y,z) = 2 * i;
+ }
+
+ // Invokes the move constructor.
+ Tensor<int,3> moved_tensor1 = std::move(tensor1);
+ Tensor<int,3,RowMajor> moved_tensor2 = std::move(tensor2);
+
+ VERIFY_IS_EQUAL(tensor1.size(), 0);
+ VERIFY_IS_EQUAL(tensor2.size(), 0);
+
+ for (int i = 0; i < 8; i++)
+ {
+ calc_indices(i, x, y, z);
+ VERIFY_IS_EQUAL(moved_tensor1(x,y,z), i);
+ VERIFY_IS_EQUAL(moved_tensor2(x,y,z), 2 * i);
+ }
+
+ Tensor<int,3> moved_tensor3(2,2,2);
+ Tensor<int,3,RowMajor> moved_tensor4(2,2,2);
+
+ moved_tensor3.setZero();
+ moved_tensor4.setZero();
+
+ // Invokes the move assignment operator.
+ moved_tensor3 = std::move(moved_tensor1);
+ moved_tensor4 = std::move(moved_tensor2);
+
+ VERIFY_IS_EQUAL(moved_tensor1.size(), 8);
+ VERIFY_IS_EQUAL(moved_tensor2.size(), 8);
+
+ for (int i = 0; i < 8; i++)
+ {
+ calc_indices(i, x, y, z);
+ VERIFY_IS_EQUAL(moved_tensor1(x,y,z), 0);
+ VERIFY_IS_EQUAL(moved_tensor2(x,y,z), 0);
+ VERIFY_IS_EQUAL(moved_tensor3(x,y,z), i);
+ VERIFY_IS_EQUAL(moved_tensor4(x,y,z), 2 * i);
+ }
+}
+
+EIGEN_DECLARE_TEST(cxx11_tensor_move)
+{
+ CALL_SUBTEST(test_move());
+}
diff --git a/unsupported/test/cxx11_tensor_notification.cpp b/unsupported/test/cxx11_tensor_notification.cpp
index 183ef02c1..f63fee013 100644
--- a/unsupported/test/cxx11_tensor_notification.cpp
+++ b/unsupported/test/cxx11_tensor_notification.cpp
@@ -65,7 +65,7 @@ static void test_notification_multiple()
VERIFY_IS_EQUAL(counter, 4);
}
-void test_cxx11_tensor_notification()
+EIGEN_DECLARE_TEST(cxx11_tensor_notification)
{
CALL_SUBTEST(test_notification_single());
CALL_SUBTEST(test_notification_multiple());
diff --git a/unsupported/test/cxx11_tensor_of_complex.cpp b/unsupported/test/cxx11_tensor_of_complex.cpp
index e9d1b2d3c..99e18076a 100644
--- a/unsupported/test/cxx11_tensor_of_complex.cpp
+++ b/unsupported/test/cxx11_tensor_of_complex.cpp
@@ -94,7 +94,7 @@ static void test_contractions()
}
-void test_cxx11_tensor_of_complex()
+EIGEN_DECLARE_TEST(cxx11_tensor_of_complex)
{
CALL_SUBTEST(test_additions());
CALL_SUBTEST(test_abs());
diff --git a/unsupported/test/cxx11_tensor_of_const_values.cpp b/unsupported/test/cxx11_tensor_of_const_values.cpp
index f179a0c21..344d678ef 100644
--- a/unsupported/test/cxx11_tensor_of_const_values.cpp
+++ b/unsupported/test/cxx11_tensor_of_const_values.cpp
@@ -97,7 +97,7 @@ static void test_plus_equal()
}
-void test_cxx11_tensor_of_const_values()
+EIGEN_DECLARE_TEST(cxx11_tensor_of_const_values)
{
CALL_SUBTEST(test_assign());
CALL_SUBTEST(test_plus());
diff --git a/unsupported/test/cxx11_tensor_of_float16_cuda.cu b/unsupported/test/cxx11_tensor_of_float16_gpu.cu
index 908a5e5a9..4d74e6138 100644
--- a/unsupported/test/cxx11_tensor_of_float16_cuda.cu
+++ b/unsupported/test/cxx11_tensor_of_float16_gpu.cu
@@ -9,21 +9,19 @@
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
-#define EIGEN_TEST_FUNC cxx11_tensor_of_float16_cuda
+
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
#define EIGEN_USE_GPU
-#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500
-#include <cuda_fp16.h>
-#endif
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
+
using Eigen::Tensor;
template<typename>
-void test_cuda_numext() {
- Eigen::CudaStreamDevice stream;
+void test_gpu_numext() {
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
int num_elem = 101;
@@ -59,11 +57,11 @@ void test_cuda_numext() {
}
-#ifdef EIGEN_HAS_CUDA_FP16
+#ifdef EIGEN_HAS_GPU_FP16
template<typename>
-void test_cuda_conversion() {
- Eigen::CudaStreamDevice stream;
+void test_gpu_conversion() {
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
int num_elem = 101;
@@ -97,8 +95,8 @@ void test_cuda_conversion() {
}
template<typename>
-void test_cuda_unary() {
- Eigen::CudaStreamDevice stream;
+void test_gpu_unary() {
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
int num_elem = 101;
@@ -134,8 +132,8 @@ void test_cuda_unary() {
}
template<typename>
-void test_cuda_elementwise() {
- Eigen::CudaStreamDevice stream;
+void test_gpu_elementwise() {
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
int num_elem = 101;
@@ -176,8 +174,8 @@ void test_cuda_elementwise() {
}
template<typename>
-void test_cuda_trancendental() {
- Eigen::CudaStreamDevice stream;
+void test_gpu_trancendental() {
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
int num_elem = 101;
@@ -249,7 +247,7 @@ void test_cuda_trancendental() {
}
for (int i = 0; i < num_elem; ++i) {
std::cout << "Checking elemwise log " << i << " input = " << input2(i) << " full = " << full_prec2(i) << " half = " << half_prec2(i) << std::endl;
- if(std::abs(input2(i)-1.f)<0.05f) // log lacks accurary nearby 1
+ if(std::abs(input2(i)-1.f)<0.05f) // log lacks accuracy nearby 1
VERIFY_IS_APPROX(full_prec2(i)+Eigen::half(0.1f), half_prec2(i)+Eigen::half(0.1f));
else
VERIFY_IS_APPROX(full_prec2(i), half_prec2(i));
@@ -270,8 +268,8 @@ void test_cuda_trancendental() {
}
template<typename>
-void test_cuda_contractions() {
- Eigen::CudaStreamDevice stream;
+void test_gpu_contractions() {
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
int rows = 23;
int cols = 23;
@@ -321,12 +319,12 @@ void test_cuda_contractions() {
}
template<typename>
-void test_cuda_reductions(int size1, int size2, int redux) {
+void test_gpu_reductions(int size1, int size2, int redux) {
std::cout << "Reducing " << size1 << " by " << size2
<< " tensor along dim " << redux << std::endl;
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
int num_elem = size1*size2;
int result_size = (redux == 1 ? size1 : size2);
@@ -370,20 +368,20 @@ void test_cuda_reductions(int size1, int size2, int redux) {
}
template<typename>
-void test_cuda_reductions() {
- test_cuda_reductions<void>(13, 13, 0);
- test_cuda_reductions<void>(13, 13, 1);
+void test_gpu_reductions() {
+ test_gpu_reductions<void>(13, 13, 0);
+ test_gpu_reductions<void>(13, 13, 1);
- test_cuda_reductions<void>(35, 36, 0);
- test_cuda_reductions<void>(35, 36, 1);
+ test_gpu_reductions<void>(35, 36, 0);
+ test_gpu_reductions<void>(35, 36, 1);
- test_cuda_reductions<void>(36, 35, 0);
- test_cuda_reductions<void>(36, 35, 1);
+ test_gpu_reductions<void>(36, 35, 0);
+ test_gpu_reductions<void>(36, 35, 1);
}
template<typename>
-void test_cuda_full_reductions() {
- Eigen::CudaStreamDevice stream;
+void test_gpu_full_reductions() {
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
int size = 13;
int num_elem = size*size;
@@ -431,9 +429,9 @@ void test_cuda_full_reductions() {
}
template<typename>
-void test_cuda_forced_evals() {
+void test_gpu_forced_evals() {
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
int num_elem = 101;
@@ -481,20 +479,20 @@ void test_cuda_forced_evals() {
#endif
-void test_cxx11_tensor_of_float16_cuda()
+EIGEN_DECLARE_TEST(cxx11_tensor_of_float16_gpu)
{
- CALL_SUBTEST_1(test_cuda_numext<void>());
-
-#ifdef EIGEN_HAS_CUDA_FP16
- CALL_SUBTEST_1(test_cuda_conversion<void>());
- CALL_SUBTEST_1(test_cuda_unary<void>());
- CALL_SUBTEST_1(test_cuda_elementwise<void>());
- CALL_SUBTEST_1(test_cuda_trancendental<void>());
- CALL_SUBTEST_2(test_cuda_contractions<void>());
- CALL_SUBTEST_3(test_cuda_reductions<void>());
- CALL_SUBTEST_4(test_cuda_full_reductions<void>());
- CALL_SUBTEST_5(test_cuda_forced_evals<void>());
+ CALL_SUBTEST_1(test_gpu_numext<void>());
+
+#ifdef EIGEN_HAS_GPU_FP16
+ CALL_SUBTEST_1(test_gpu_conversion<void>());
+ CALL_SUBTEST_1(test_gpu_unary<void>());
+ CALL_SUBTEST_1(test_gpu_elementwise<void>());
+ CALL_SUBTEST_1(test_gpu_trancendental<void>());
+ CALL_SUBTEST_2(test_gpu_contractions<void>());
+ CALL_SUBTEST_3(test_gpu_reductions<void>());
+ CALL_SUBTEST_4(test_gpu_full_reductions<void>());
+ CALL_SUBTEST_5(test_gpu_forced_evals<void>());
#else
- std::cout << "Half floats are not supported by this version of cuda: skipping the test" << std::endl;
+ std::cout << "Half floats are not supported by this version of gpu: skipping the test" << std::endl;
#endif
}
diff --git a/unsupported/test/cxx11_tensor_of_strings.cpp b/unsupported/test/cxx11_tensor_of_strings.cpp
index 4ef9aed91..159656276 100644
--- a/unsupported/test/cxx11_tensor_of_strings.cpp
+++ b/unsupported/test/cxx11_tensor_of_strings.cpp
@@ -141,7 +141,7 @@ static void test_initialization()
}
-void test_cxx11_tensor_of_strings()
+EIGEN_DECLARE_TEST(cxx11_tensor_of_strings)
{
// Beware: none of this is likely to ever work on a GPU.
CALL_SUBTEST(test_assign());
diff --git a/unsupported/test/cxx11_tensor_padding.cpp b/unsupported/test/cxx11_tensor_padding.cpp
index ffa19896e..b8a329deb 100644
--- a/unsupported/test/cxx11_tensor_padding.cpp
+++ b/unsupported/test/cxx11_tensor_padding.cpp
@@ -84,7 +84,7 @@ static void test_padded_expr()
}
}
-void test_cxx11_tensor_padding()
+EIGEN_DECLARE_TEST(cxx11_tensor_padding)
{
CALL_SUBTEST(test_simple_padding<ColMajor>());
CALL_SUBTEST(test_simple_padding<RowMajor>());
diff --git a/unsupported/test/cxx11_tensor_padding_sycl.cpp b/unsupported/test/cxx11_tensor_padding_sycl.cpp
index dc748b73e..727a9ffd7 100644
--- a/unsupported/test/cxx11_tensor_padding_sycl.cpp
+++ b/unsupported/test/cxx11_tensor_padding_sycl.cpp
@@ -15,7 +15,7 @@
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
-#define EIGEN_TEST_FUNC cxx11_tensor_padding_sycl
+
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t
#define EIGEN_USE_SYCL
@@ -149,7 +149,7 @@ template<typename DataType, typename dev_Selector> void sycl_padding_test_per_de
test_padded_expr<DataType, ColMajor, int64_t>(sycl_device);
}
-void test_cxx11_tensor_padding_sycl()
+EIGEN_DECLARE_TEST(cxx11_tensor_padding_sycl)
{
for (const auto& device :Eigen::get_sycl_supported_devices()) {
CALL_SUBTEST(sycl_padding_test_per_device<float>(device));
diff --git a/unsupported/test/cxx11_tensor_patch.cpp b/unsupported/test/cxx11_tensor_patch.cpp
index 434359730..498ab8ca7 100644
--- a/unsupported/test/cxx11_tensor_patch.cpp
+++ b/unsupported/test/cxx11_tensor_patch.cpp
@@ -164,7 +164,7 @@ static void test_simple_patch()
}
}
-void test_cxx11_tensor_patch()
+EIGEN_DECLARE_TEST(cxx11_tensor_patch)
{
CALL_SUBTEST(test_simple_patch<ColMajor>());
CALL_SUBTEST(test_simple_patch<RowMajor>());
diff --git a/unsupported/test/cxx11_tensor_patch_sycl.cpp b/unsupported/test/cxx11_tensor_patch_sycl.cpp
new file mode 100644
index 000000000..7f92bec78
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_patch_sycl.cpp
@@ -0,0 +1,249 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+// Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_TEST_NO_LONGDOUBLE
+#define EIGEN_TEST_NO_COMPLEX
+
+#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t
+#define EIGEN_USE_SYCL
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+
+template <typename DataType, int DataLayout, typename IndexType>
+static void test_simple_patch_sycl(const Eigen::SyclDevice& sycl_device){
+
+ IndexType sizeDim1 = 2;
+ IndexType sizeDim2 = 3;
+ IndexType sizeDim3 = 5;
+ IndexType sizeDim4 = 7;
+ array<IndexType, 4> tensorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4}};
+ array<IndexType, 5> patchTensorRange;
+ if (DataLayout == ColMajor) {
+ patchTensorRange = {{1, 1, 1, 1, sizeDim1*sizeDim2*sizeDim3*sizeDim4}};
+ }else{
+ patchTensorRange = {{sizeDim1*sizeDim2*sizeDim3*sizeDim4,1, 1, 1, 1}};
+ }
+
+ Tensor<DataType, 4, DataLayout,IndexType> tensor(tensorRange);
+ Tensor<DataType, 5, DataLayout,IndexType> no_patch(patchTensorRange);
+
+ tensor.setRandom();
+
+ array<ptrdiff_t, 4> patch_dims;
+ patch_dims[0] = 1;
+ patch_dims[1] = 1;
+ patch_dims[2] = 1;
+ patch_dims[3] = 1;
+
+ const size_t tensorBuffSize =tensor.size()*sizeof(DataType);
+ size_t patchTensorBuffSize =no_patch.size()*sizeof(DataType);
+ DataType* gpu_data_tensor = static_cast<DataType*>(sycl_device.allocate(tensorBuffSize));
+ DataType* gpu_data_no_patch = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+
+ TensorMap<Tensor<DataType, 4, DataLayout,IndexType>> gpu_tensor(gpu_data_tensor, tensorRange);
+ TensorMap<Tensor<DataType, 5, DataLayout,IndexType>> gpu_no_patch(gpu_data_no_patch, patchTensorRange);
+
+ sycl_device.memcpyHostToDevice(gpu_data_tensor, tensor.data(), tensorBuffSize);
+ gpu_no_patch.device(sycl_device)=gpu_tensor.extract_patches(patch_dims);
+ sycl_device.memcpyDeviceToHost(no_patch.data(), gpu_data_no_patch, patchTensorBuffSize);
+
+ if (DataLayout == ColMajor) {
+ VERIFY_IS_EQUAL(no_patch.dimension(0), 1);
+ VERIFY_IS_EQUAL(no_patch.dimension(1), 1);
+ VERIFY_IS_EQUAL(no_patch.dimension(2), 1);
+ VERIFY_IS_EQUAL(no_patch.dimension(3), 1);
+ VERIFY_IS_EQUAL(no_patch.dimension(4), tensor.size());
+ } else {
+ VERIFY_IS_EQUAL(no_patch.dimension(0), tensor.size());
+ VERIFY_IS_EQUAL(no_patch.dimension(1), 1);
+ VERIFY_IS_EQUAL(no_patch.dimension(2), 1);
+ VERIFY_IS_EQUAL(no_patch.dimension(3), 1);
+ VERIFY_IS_EQUAL(no_patch.dimension(4), 1);
+ }
+
+ for (int i = 0; i < tensor.size(); ++i) {
+ VERIFY_IS_EQUAL(tensor.data()[i], no_patch.data()[i]);
+ }
+
+ patch_dims[0] = 2;
+ patch_dims[1] = 3;
+ patch_dims[2] = 5;
+ patch_dims[3] = 7;
+
+ if (DataLayout == ColMajor) {
+ patchTensorRange = {{sizeDim1,sizeDim2,sizeDim3,sizeDim4,1}};
+ }else{
+ patchTensorRange = {{1,sizeDim1,sizeDim2,sizeDim3,sizeDim4}};
+ }
+ Tensor<DataType, 5, DataLayout,IndexType> single_patch(patchTensorRange);
+ patchTensorBuffSize =single_patch.size()*sizeof(DataType);
+ DataType* gpu_data_single_patch = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, DataLayout,IndexType>> gpu_single_patch(gpu_data_single_patch, patchTensorRange);
+
+ gpu_single_patch.device(sycl_device)=gpu_tensor.extract_patches(patch_dims);
+ sycl_device.memcpyDeviceToHost(single_patch.data(), gpu_data_single_patch, patchTensorBuffSize);
+
+ if (DataLayout == ColMajor) {
+ VERIFY_IS_EQUAL(single_patch.dimension(0), 2);
+ VERIFY_IS_EQUAL(single_patch.dimension(1), 3);
+ VERIFY_IS_EQUAL(single_patch.dimension(2), 5);
+ VERIFY_IS_EQUAL(single_patch.dimension(3), 7);
+ VERIFY_IS_EQUAL(single_patch.dimension(4), 1);
+ } else {
+ VERIFY_IS_EQUAL(single_patch.dimension(0), 1);
+ VERIFY_IS_EQUAL(single_patch.dimension(1), 2);
+ VERIFY_IS_EQUAL(single_patch.dimension(2), 3);
+ VERIFY_IS_EQUAL(single_patch.dimension(3), 5);
+ VERIFY_IS_EQUAL(single_patch.dimension(4), 7);
+ }
+
+ for (int i = 0; i < tensor.size(); ++i) {
+ VERIFY_IS_EQUAL(tensor.data()[i], single_patch.data()[i]);
+ }
+ patch_dims[0] = 1;
+ patch_dims[1] = 2;
+ patch_dims[2] = 2;
+ patch_dims[3] = 1;
+
+ if (DataLayout == ColMajor) {
+ patchTensorRange = {{1,2,2,1,2*2*4*7}};
+ }else{
+ patchTensorRange = {{2*2*4*7, 1, 2,2,1}};
+ }
+ Tensor<DataType, 5, DataLayout,IndexType> twod_patch(patchTensorRange);
+ patchTensorBuffSize =twod_patch.size()*sizeof(DataType);
+ DataType* gpu_data_twod_patch = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, DataLayout,IndexType>> gpu_twod_patch(gpu_data_twod_patch, patchTensorRange);
+
+ gpu_twod_patch.device(sycl_device)=gpu_tensor.extract_patches(patch_dims);
+ sycl_device.memcpyDeviceToHost(twod_patch.data(), gpu_data_twod_patch, patchTensorBuffSize);
+
+ if (DataLayout == ColMajor) {
+ VERIFY_IS_EQUAL(twod_patch.dimension(0), 1);
+ VERIFY_IS_EQUAL(twod_patch.dimension(1), 2);
+ VERIFY_IS_EQUAL(twod_patch.dimension(2), 2);
+ VERIFY_IS_EQUAL(twod_patch.dimension(3), 1);
+ VERIFY_IS_EQUAL(twod_patch.dimension(4), 2*2*4*7);
+ } else {
+ VERIFY_IS_EQUAL(twod_patch.dimension(0), 2*2*4*7);
+ VERIFY_IS_EQUAL(twod_patch.dimension(1), 1);
+ VERIFY_IS_EQUAL(twod_patch.dimension(2), 2);
+ VERIFY_IS_EQUAL(twod_patch.dimension(3), 2);
+ VERIFY_IS_EQUAL(twod_patch.dimension(4), 1);
+ }
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 2; ++j) {
+ for (int k = 0; k < 4; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ int patch_loc;
+ if (DataLayout == ColMajor) {
+ patch_loc = i + 2 * (j + 2 * (k + 4 * l));
+ } else {
+ patch_loc = l + 7 * (k + 4 * (j + 2 * i));
+ }
+ for (int x = 0; x < 2; ++x) {
+ for (int y = 0; y < 2; ++y) {
+ if (DataLayout == ColMajor) {
+ VERIFY_IS_EQUAL(tensor(i,j+x,k+y,l), twod_patch(0,x,y,0,patch_loc));
+ } else {
+ VERIFY_IS_EQUAL(tensor(i,j+x,k+y,l), twod_patch(patch_loc,0,x,y,0));
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ patch_dims[0] = 1;
+ patch_dims[1] = 2;
+ patch_dims[2] = 3;
+ patch_dims[3] = 5;
+
+ if (DataLayout == ColMajor) {
+ patchTensorRange = {{1,2,3,5,2*2*3*3}};
+ }else{
+ patchTensorRange = {{2*2*3*3, 1, 2,3,5}};
+ }
+ Tensor<DataType, 5, DataLayout,IndexType> threed_patch(patchTensorRange);
+ patchTensorBuffSize =threed_patch.size()*sizeof(DataType);
+ DataType* gpu_data_threed_patch = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, DataLayout,IndexType>> gpu_threed_patch(gpu_data_threed_patch, patchTensorRange);
+
+ gpu_threed_patch.device(sycl_device)=gpu_tensor.extract_patches(patch_dims);
+ sycl_device.memcpyDeviceToHost(threed_patch.data(), gpu_data_threed_patch, patchTensorBuffSize);
+
+ if (DataLayout == ColMajor) {
+ VERIFY_IS_EQUAL(threed_patch.dimension(0), 1);
+ VERIFY_IS_EQUAL(threed_patch.dimension(1), 2);
+ VERIFY_IS_EQUAL(threed_patch.dimension(2), 3);
+ VERIFY_IS_EQUAL(threed_patch.dimension(3), 5);
+ VERIFY_IS_EQUAL(threed_patch.dimension(4), 2*2*3*3);
+ } else {
+ VERIFY_IS_EQUAL(threed_patch.dimension(0), 2*2*3*3);
+ VERIFY_IS_EQUAL(threed_patch.dimension(1), 1);
+ VERIFY_IS_EQUAL(threed_patch.dimension(2), 2);
+ VERIFY_IS_EQUAL(threed_patch.dimension(3), 3);
+ VERIFY_IS_EQUAL(threed_patch.dimension(4), 5);
+ }
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 2; ++j) {
+ for (int k = 0; k < 3; ++k) {
+ for (int l = 0; l < 3; ++l) {
+ int patch_loc;
+ if (DataLayout == ColMajor) {
+ patch_loc = i + 2 * (j + 2 * (k + 3 * l));
+ } else {
+ patch_loc = l + 3 * (k + 3 * (j + 2 * i));
+ }
+ for (int x = 0; x < 2; ++x) {
+ for (int y = 0; y < 3; ++y) {
+ for (int z = 0; z < 5; ++z) {
+ if (DataLayout == ColMajor) {
+ VERIFY_IS_EQUAL(tensor(i,j+x,k+y,l+z), threed_patch(0,x,y,z,patch_loc));
+ } else {
+ VERIFY_IS_EQUAL(tensor(i,j+x,k+y,l+z), threed_patch(patch_loc,0,x,y,z));
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ sycl_device.deallocate(gpu_data_tensor);
+ sycl_device.deallocate(gpu_data_no_patch);
+ sycl_device.deallocate(gpu_data_single_patch);
+ sycl_device.deallocate(gpu_data_twod_patch);
+ sycl_device.deallocate(gpu_data_threed_patch);
+}
+
+template<typename DataType, typename dev_Selector> void sycl_tensor_patch_test_per_device(dev_Selector s){
+ QueueInterface queueInterface(s);
+ auto sycl_device = Eigen::SyclDevice(&queueInterface);
+ test_simple_patch_sycl<DataType, RowMajor, int64_t>(sycl_device);
+ test_simple_patch_sycl<DataType, ColMajor, int64_t>(sycl_device);
+}
+EIGEN_DECLARE_TEST(cxx11_tensor_patch_sycl)
+{
+ for (const auto& device :Eigen::get_sycl_supported_devices()) {
+ CALL_SUBTEST(sycl_tensor_patch_test_per_device<float>(device));
+ }
+}
diff --git a/unsupported/test/cxx11_tensor_random.cpp b/unsupported/test/cxx11_tensor_random.cpp
index 0f3dc5787..4740d5811 100644
--- a/unsupported/test/cxx11_tensor_random.cpp
+++ b/unsupported/test/cxx11_tensor_random.cpp
@@ -70,7 +70,7 @@ static void test_custom()
}
}
-void test_cxx11_tensor_random()
+EIGEN_DECLARE_TEST(cxx11_tensor_random)
{
CALL_SUBTEST(test_default());
CALL_SUBTEST(test_normal());
diff --git a/unsupported/test/cxx11_tensor_random_cuda.cu b/unsupported/test/cxx11_tensor_random_gpu.cu
index b3be199e1..090986ebc 100644
--- a/unsupported/test/cxx11_tensor_random_cuda.cu
+++ b/unsupported/test/cxx11_tensor_random_gpu.cu
@@ -9,18 +9,16 @@
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
-#define EIGEN_TEST_FUNC cxx11_tensor_random_cuda
+
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
#define EIGEN_USE_GPU
-#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500
-#include <cuda_fp16.h>
-#endif
#include "main.h"
#include <Eigen/CXX11/Tensor>
+#include <Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h>
-void test_cuda_random_uniform()
+void test_gpu_random_uniform()
{
Tensor<float, 2> out(72,97);
out.setZero();
@@ -28,24 +26,24 @@ void test_cuda_random_uniform()
std::size_t out_bytes = out.size() * sizeof(float);
float* d_out;
- cudaMalloc((void**)(&d_out), out_bytes);
+ gpuMalloc((void**)(&d_out), out_bytes);
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 2> > gpu_out(d_out, 72,97);
gpu_out.device(gpu_device) = gpu_out.random();
- assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
- assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+ assert(gpuMemcpyAsync(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess);
+ assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess);
- // For now we just check thes code doesn't crash.
+ // For now we just check this code doesn't crash.
// TODO: come up with a valid test of randomness
}
-void test_cuda_random_normal()
+void test_gpu_random_normal()
{
Tensor<float, 2> out(72,97);
out.setZero();
@@ -53,9 +51,9 @@ void test_cuda_random_normal()
std::size_t out_bytes = out.size() * sizeof(float);
float* d_out;
- cudaMalloc((void**)(&d_out), out_bytes);
+ gpuMalloc((void**)(&d_out), out_bytes);
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 2> > gpu_out(d_out, 72,97);
@@ -63,8 +61,8 @@ void test_cuda_random_normal()
Eigen::internal::NormalRandomGenerator<float> gen(true);
gpu_out.device(gpu_device) = gpu_out.random(gen);
- assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
- assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
+ assert(gpuMemcpyAsync(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess);
+ assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess);
}
static void test_complex()
@@ -80,9 +78,9 @@ static void test_complex()
}
-void test_cxx11_tensor_random_cuda()
+EIGEN_DECLARE_TEST(cxx11_tensor_random_gpu)
{
- CALL_SUBTEST(test_cuda_random_uniform());
- CALL_SUBTEST(test_cuda_random_normal());
+ CALL_SUBTEST(test_gpu_random_uniform());
+ CALL_SUBTEST(test_gpu_random_normal());
CALL_SUBTEST(test_complex());
}
diff --git a/unsupported/test/cxx11_tensor_reduction.cpp b/unsupported/test/cxx11_tensor_reduction.cpp
index 1490ec3da..9458f4e4e 100644
--- a/unsupported/test/cxx11_tensor_reduction.cpp
+++ b/unsupported/test/cxx11_tensor_reduction.cpp
@@ -53,20 +53,20 @@ static void test_trivial_reductions() {
}
}
-template <int DataLayout>
+template <typename Scalar,int DataLayout>
static void test_simple_reductions() {
- Tensor<float, 4, DataLayout> tensor(2, 3, 5, 7);
+ Tensor<Scalar, 4, DataLayout> tensor(2, 3, 5, 7);
tensor.setRandom();
array<ptrdiff_t, 2> reduction_axis2;
reduction_axis2[0] = 1;
reduction_axis2[1] = 3;
- Tensor<float, 2, DataLayout> result = tensor.sum(reduction_axis2);
+ Tensor<Scalar, 2, DataLayout> result = tensor.sum(reduction_axis2);
VERIFY_IS_EQUAL(result.dimension(0), 2);
VERIFY_IS_EQUAL(result.dimension(1), 5);
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 5; ++j) {
- float sum = 0.0f;
+ Scalar sum = Scalar(0.0f);
for (int k = 0; k < 3; ++k) {
for (int l = 0; l < 7; ++l) {
sum += tensor(i, k, j, l);
@@ -77,7 +77,7 @@ static void test_simple_reductions() {
}
{
- Tensor<float, 0, DataLayout> sum1 = tensor.sum();
+ Tensor<Scalar, 0, DataLayout> sum1 = tensor.sum();
VERIFY_IS_EQUAL(sum1.rank(), 0);
array<ptrdiff_t, 4> reduction_axis4;
@@ -85,7 +85,7 @@ static void test_simple_reductions() {
reduction_axis4[1] = 1;
reduction_axis4[2] = 2;
reduction_axis4[3] = 3;
- Tensor<float, 0, DataLayout> sum2 = tensor.sum(reduction_axis4);
+ Tensor<Scalar, 0, DataLayout> sum2 = tensor.sum(reduction_axis4);
VERIFY_IS_EQUAL(sum2.rank(), 0);
VERIFY_IS_APPROX(sum1(), sum2());
@@ -98,7 +98,7 @@ static void test_simple_reductions() {
VERIFY_IS_EQUAL(result.dimension(1), 7);
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 7; ++j) {
- float prod = 1.0f;
+ Scalar prod = Scalar(1.0f);
for (int k = 0; k < 2; ++k) {
for (int l = 0; l < 5; ++l) {
prod *= tensor(k, i, l, j);
@@ -109,7 +109,7 @@ static void test_simple_reductions() {
}
{
- Tensor<float, 0, DataLayout> prod1 = tensor.prod();
+ Tensor<Scalar, 0, DataLayout> prod1 = tensor.prod();
VERIFY_IS_EQUAL(prod1.rank(), 0);
array<ptrdiff_t, 4> reduction_axis4;
@@ -117,7 +117,7 @@ static void test_simple_reductions() {
reduction_axis4[1] = 1;
reduction_axis4[2] = 2;
reduction_axis4[3] = 3;
- Tensor<float, 0, DataLayout> prod2 = tensor.prod(reduction_axis4);
+ Tensor<Scalar, 0, DataLayout> prod2 = tensor.prod(reduction_axis4);
VERIFY_IS_EQUAL(prod2.rank(), 0);
VERIFY_IS_APPROX(prod1(), prod2());
@@ -130,7 +130,7 @@ static void test_simple_reductions() {
VERIFY_IS_EQUAL(result.dimension(1), 7);
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 7; ++j) {
- float max_val = std::numeric_limits<float>::lowest();
+ Scalar max_val = std::numeric_limits<Scalar>::lowest();
for (int k = 0; k < 2; ++k) {
for (int l = 0; l < 5; ++l) {
max_val = (std::max)(max_val, tensor(k, i, l, j));
@@ -141,7 +141,7 @@ static void test_simple_reductions() {
}
{
- Tensor<float, 0, DataLayout> max1 = tensor.maximum();
+ Tensor<Scalar, 0, DataLayout> max1 = tensor.maximum();
VERIFY_IS_EQUAL(max1.rank(), 0);
array<ptrdiff_t, 4> reduction_axis4;
@@ -149,7 +149,7 @@ static void test_simple_reductions() {
reduction_axis4[1] = 1;
reduction_axis4[2] = 2;
reduction_axis4[3] = 3;
- Tensor<float, 0, DataLayout> max2 = tensor.maximum(reduction_axis4);
+ Tensor<Scalar, 0, DataLayout> max2 = tensor.maximum(reduction_axis4);
VERIFY_IS_EQUAL(max2.rank(), 0);
VERIFY_IS_APPROX(max1(), max2());
@@ -162,7 +162,7 @@ static void test_simple_reductions() {
VERIFY_IS_EQUAL(result.dimension(1), 7);
for (int i = 0; i < 5; ++i) {
for (int j = 0; j < 7; ++j) {
- float min_val = (std::numeric_limits<float>::max)();
+ Scalar min_val = (std::numeric_limits<Scalar>::max)();
for (int k = 0; k < 2; ++k) {
for (int l = 0; l < 3; ++l) {
min_val = (std::min)(min_val, tensor(k, l, i, j));
@@ -173,7 +173,7 @@ static void test_simple_reductions() {
}
{
- Tensor<float, 0, DataLayout> min1 = tensor.minimum();
+ Tensor<Scalar, 0, DataLayout> min1 = tensor.minimum();
VERIFY_IS_EQUAL(min1.rank(), 0);
array<ptrdiff_t, 4> reduction_axis4;
@@ -181,7 +181,7 @@ static void test_simple_reductions() {
reduction_axis4[1] = 1;
reduction_axis4[2] = 2;
reduction_axis4[3] = 3;
- Tensor<float, 0, DataLayout> min2 = tensor.minimum(reduction_axis4);
+ Tensor<Scalar, 0, DataLayout> min2 = tensor.minimum(reduction_axis4);
VERIFY_IS_EQUAL(min2.rank(), 0);
VERIFY_IS_APPROX(min1(), min2());
@@ -194,7 +194,7 @@ static void test_simple_reductions() {
VERIFY_IS_EQUAL(result.dimension(1), 7);
for (int i = 0; i < 5; ++i) {
for (int j = 0; j < 7; ++j) {
- float sum = 0.0f;
+ Scalar sum = Scalar(0.0f);
int count = 0;
for (int k = 0; k < 2; ++k) {
for (int l = 0; l < 3; ++l) {
@@ -207,7 +207,7 @@ static void test_simple_reductions() {
}
{
- Tensor<float, 0, DataLayout> mean1 = tensor.mean();
+ Tensor<Scalar, 0, DataLayout> mean1 = tensor.mean();
VERIFY_IS_EQUAL(mean1.rank(), 0);
array<ptrdiff_t, 4> reduction_axis4;
@@ -215,7 +215,7 @@ static void test_simple_reductions() {
reduction_axis4[1] = 1;
reduction_axis4[2] = 2;
reduction_axis4[3] = 3;
- Tensor<float, 0, DataLayout> mean2 = tensor.mean(reduction_axis4);
+ Tensor<Scalar, 0, DataLayout> mean2 = tensor.mean(reduction_axis4);
VERIFY_IS_EQUAL(mean2.rank(), 0);
VERIFY_IS_APPROX(mean1(), mean2());
@@ -386,7 +386,7 @@ static void test_static_dims() {
expected = (std::max)(expected, in(i, k, j, l));
}
}
- VERIFY_IS_APPROX(out(i, j), expected);
+ VERIFY_IS_EQUAL(out(i, j), expected);
}
}
}
@@ -417,7 +417,7 @@ static void test_innermost_last_dims() {
expected = (std::max)(expected, in(l, k, i, j));
}
}
- VERIFY_IS_APPROX(out(i, j), expected);
+ VERIFY_IS_EQUAL(out(i, j), expected);
}
}
}
@@ -448,7 +448,7 @@ static void test_innermost_first_dims() {
expected = (std::max)(expected, in(i, j, k, l));
}
}
- VERIFY_IS_APPROX(out(i, j), expected);
+ VERIFY_IS_EQUAL(out(i, j), expected);
}
}
}
@@ -479,16 +479,36 @@ static void test_reduce_middle_dims() {
expected = (std::max)(expected, in(i, k, l, j));
}
}
- VERIFY_IS_APPROX(out(i, j), expected);
+ VERIFY_IS_EQUAL(out(i, j), expected);
+ }
+ }
+}
+
+static void test_sum_accuracy() {
+ Tensor<float, 3> tensor(101, 101, 101);
+ for (float prescribed_mean : {1.0f, 10.0f, 100.0f, 1000.0f, 10000.0f}) {
+ tensor.setRandom();
+ tensor += tensor.constant(prescribed_mean);
+
+ Tensor<float, 0> sum = tensor.sum();
+ double expected_sum = 0.0;
+ for (int i = 0; i < 101; ++i) {
+ for (int j = 0; j < 101; ++j) {
+ for (int k = 0; k < 101; ++k) {
+ expected_sum += static_cast<double>(tensor(i, j, k));
+ }
+ }
}
+ VERIFY_IS_APPROX(sum(), static_cast<float>(expected_sum));
}
}
-void test_cxx11_tensor_reduction() {
+EIGEN_DECLARE_TEST(cxx11_tensor_reduction) {
CALL_SUBTEST(test_trivial_reductions<ColMajor>());
CALL_SUBTEST(test_trivial_reductions<RowMajor>());
- CALL_SUBTEST(test_simple_reductions<ColMajor>());
- CALL_SUBTEST(test_simple_reductions<RowMajor>());
+ CALL_SUBTEST(( test_simple_reductions<float,ColMajor>() ));
+ CALL_SUBTEST(( test_simple_reductions<float,RowMajor>() ));
+ CALL_SUBTEST(( test_simple_reductions<Eigen::half,ColMajor>() ));
CALL_SUBTEST(test_reductions_in_expr<ColMajor>());
CALL_SUBTEST(test_reductions_in_expr<RowMajor>());
CALL_SUBTEST(test_full_reductions<ColMajor>());
@@ -505,4 +525,5 @@ void test_cxx11_tensor_reduction() {
CALL_SUBTEST(test_innermost_first_dims<RowMajor>());
CALL_SUBTEST(test_reduce_middle_dims<ColMajor>());
CALL_SUBTEST(test_reduce_middle_dims<RowMajor>());
+ CALL_SUBTEST(test_sum_accuracy());
}
diff --git a/unsupported/test/cxx11_tensor_reduction_cuda.cu b/unsupported/test/cxx11_tensor_reduction_gpu.cu
index 6858b43a7..122ac946b 100644
--- a/unsupported/test/cxx11_tensor_reduction_cuda.cu
+++ b/unsupported/test/cxx11_tensor_reduction_gpu.cu
@@ -9,12 +9,9 @@
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
-#define EIGEN_TEST_FUNC cxx11_tensor_reduction_cuda
+
#define EIGEN_USE_GPU
-#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500
-#include <cuda_fp16.h>
-#endif
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
@@ -22,7 +19,7 @@
template<typename Type, int DataLayout>
static void test_full_reductions() {
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
const int num_rows = internal::random<int>(1024, 5*1024);
@@ -70,7 +67,7 @@ static void test_first_dim_reductions() {
Tensor<Type, 2, DataLayout> redux = in.sum(red_axis);
// Create device
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice dev(&stream);
// Create data(T)
@@ -110,7 +107,7 @@ static void test_last_dim_reductions() {
Tensor<Type, 2, DataLayout> redux = in.sum(red_axis);
// Create device
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice dev(&stream);
// Create data
@@ -137,7 +134,7 @@ static void test_last_dim_reductions() {
}
-void test_cxx11_tensor_reduction_cuda() {
+EIGEN_DECLARE_TEST(cxx11_tensor_reduction_gpu) {
CALL_SUBTEST_1((test_full_reductions<float, ColMajor>()));
CALL_SUBTEST_1((test_full_reductions<double, ColMajor>()));
CALL_SUBTEST_2((test_full_reductions<float, RowMajor>()));
diff --git a/unsupported/test/cxx11_tensor_reduction_sycl.cpp b/unsupported/test/cxx11_tensor_reduction_sycl.cpp
index 440d48bca..f526299c6 100644
--- a/unsupported/test/cxx11_tensor_reduction_sycl.cpp
+++ b/unsupported/test/cxx11_tensor_reduction_sycl.cpp
@@ -13,7 +13,7 @@
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
-#define EIGEN_TEST_FUNC cxx11_tensor_reduction_sycl
+
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t
#define EIGEN_USE_SYCL
@@ -174,7 +174,7 @@ template<typename DataType> void sycl_reduction_test_per_device(const cl::sycl::
test_first_dim_reductions_max_sycl<DataType, ColMajor, int64_t>(sycl_device);
test_last_dim_reductions_sum_sycl<DataType, ColMajor, int64_t>(sycl_device);
}
-void test_cxx11_tensor_reduction_sycl() {
+EIGEN_DECLARE_TEST(cxx11_tensor_reduction_sycl) {
for (const auto& device :Eigen::get_sycl_supported_devices()) {
CALL_SUBTEST(sycl_reduction_test_per_device<float>(device));
}
diff --git a/unsupported/test/cxx11_tensor_ref.cpp b/unsupported/test/cxx11_tensor_ref.cpp
index c8f105e3d..7dbd0478c 100644
--- a/unsupported/test/cxx11_tensor_ref.cpp
+++ b/unsupported/test/cxx11_tensor_ref.cpp
@@ -235,7 +235,7 @@ static void test_nested_ops_with_ref()
}
-void test_cxx11_tensor_ref()
+EIGEN_DECLARE_TEST(cxx11_tensor_ref)
{
CALL_SUBTEST(test_simple_lvalue_ref());
CALL_SUBTEST(test_simple_rvalue_ref());
diff --git a/unsupported/test/cxx11_tensor_reverse.cpp b/unsupported/test/cxx11_tensor_reverse.cpp
index b35b8d29e..5e44ec007 100644
--- a/unsupported/test/cxx11_tensor_reverse.cpp
+++ b/unsupported/test/cxx11_tensor_reverse.cpp
@@ -179,7 +179,7 @@ static void test_expr_reverse(bool LValue)
}
-void test_cxx11_tensor_reverse()
+EIGEN_DECLARE_TEST(cxx11_tensor_reverse)
{
CALL_SUBTEST(test_simple_reverse<ColMajor>());
CALL_SUBTEST(test_simple_reverse<RowMajor>());
diff --git a/unsupported/test/cxx11_tensor_reverse_sycl.cpp b/unsupported/test/cxx11_tensor_reverse_sycl.cpp
index 2f5484484..77c2235d1 100644
--- a/unsupported/test/cxx11_tensor_reverse_sycl.cpp
+++ b/unsupported/test/cxx11_tensor_reverse_sycl.cpp
@@ -13,7 +13,7 @@
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
-#define EIGEN_TEST_FUNC cxx11_tensor_reverse_sycl
+
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t
#define EIGEN_USE_SYCL
@@ -214,7 +214,7 @@ template<typename DataType> void sycl_reverse_test_per_device(const cl::sycl::de
test_expr_reverse<DataType, RowMajor, int64_t>(sycl_device, true);
test_expr_reverse<DataType, ColMajor, int64_t>(sycl_device, true);
}
-void test_cxx11_tensor_reverse_sycl() {
+EIGEN_DECLARE_TEST(cxx11_tensor_reverse_sycl) {
for (const auto& device :Eigen::get_sycl_supported_devices()) {
CALL_SUBTEST(sycl_reverse_test_per_device<float>(device));
}
diff --git a/unsupported/test/cxx11_tensor_roundings.cpp b/unsupported/test/cxx11_tensor_roundings.cpp
index 2c26151ab..83b592384 100644
--- a/unsupported/test/cxx11_tensor_roundings.cpp
+++ b/unsupported/test/cxx11_tensor_roundings.cpp
@@ -54,7 +54,7 @@ static void test_float_ceiling()
}
}
-void test_cxx11_tensor_roundings()
+EIGEN_DECLARE_TEST(cxx11_tensor_roundings)
{
CALL_SUBTEST(test_float_rounding());
CALL_SUBTEST(test_float_ceiling());
diff --git a/unsupported/test/cxx11_tensor_scan.cpp b/unsupported/test/cxx11_tensor_scan.cpp
index af59aa3ef..dccee9e84 100644
--- a/unsupported/test/cxx11_tensor_scan.cpp
+++ b/unsupported/test/cxx11_tensor_scan.cpp
@@ -98,7 +98,7 @@ static void test_tensor_maps() {
}
}
-void test_cxx11_tensor_scan() {
+EIGEN_DECLARE_TEST(cxx11_tensor_scan) {
CALL_SUBTEST((test_1d_scan<ColMajor, float, true>()));
CALL_SUBTEST((test_1d_scan<ColMajor, float, false>()));
CALL_SUBTEST((test_1d_scan<RowMajor, float, true>()));
diff --git a/unsupported/test/cxx11_tensor_scan_cuda.cu b/unsupported/test/cxx11_tensor_scan_gpu.cu
index 5f146f3c9..770a144f1 100644
--- a/unsupported/test/cxx11_tensor_scan_cuda.cu
+++ b/unsupported/test/cxx11_tensor_scan_gpu.cu
@@ -9,21 +9,20 @@
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
-#define EIGEN_TEST_FUNC cxx11_tensor_scan_cuda
+
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
#define EIGEN_USE_GPU
-#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500
-#include <cuda_fp16.h>
-#endif
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
+#include <Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h>
+
using Eigen::Tensor;
typedef Tensor<float, 1>::DimensionPair DimPair;
template<int DataLayout>
-void test_cuda_cumsum(int m_size, int k_size, int n_size)
+void test_gpu_cumsum(int m_size, int k_size, int n_size)
{
std::cout << "Testing for (" << m_size << "," << k_size << "," << n_size << ")" << std::endl;
Tensor<float, 3, DataLayout> t_input(m_size, k_size, n_size);
@@ -38,12 +37,12 @@ void test_cuda_cumsum(int m_size, int k_size, int n_size)
float* d_t_input;
float* d_t_result;
- cudaMalloc((void**)(&d_t_input), t_input_bytes);
- cudaMalloc((void**)(&d_t_result), t_result_bytes);
+ gpuMalloc((void**)(&d_t_input), t_input_bytes);
+ gpuMalloc((void**)(&d_t_result), t_result_bytes);
- cudaMemcpy(d_t_input, t_input.data(), t_input_bytes, cudaMemcpyHostToDevice);
+ gpuMemcpy(d_t_input, t_input.data(), t_input_bytes, gpuMemcpyHostToDevice);
- Eigen::CudaStreamDevice stream;
+ Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 3, DataLayout> >
@@ -54,7 +53,7 @@ void test_cuda_cumsum(int m_size, int k_size, int n_size)
gpu_t_result.device(gpu_device) = gpu_t_input.cumsum(1);
t_result = t_input.cumsum(1);
- cudaMemcpy(t_result_gpu.data(), d_t_result, t_result_bytes, cudaMemcpyDeviceToHost);
+ gpuMemcpy(t_result_gpu.data(), d_t_result, t_result_bytes, gpuMemcpyDeviceToHost);
for (DenseIndex i = 0; i < t_result.size(); i++) {
if (fabs(t_result(i) - t_result_gpu(i)) < 1e-4f) {
continue;
@@ -67,13 +66,13 @@ void test_cuda_cumsum(int m_size, int k_size, int n_size)
assert(false);
}
- cudaFree((void*)d_t_input);
- cudaFree((void*)d_t_result);
+ gpuFree((void*)d_t_input);
+ gpuFree((void*)d_t_result);
}
-void test_cxx11_tensor_scan_cuda()
+EIGEN_DECLARE_TEST(cxx11_tensor_scan_gpu)
{
- CALL_SUBTEST_1(test_cuda_cumsum<ColMajor>(128, 128, 128));
- CALL_SUBTEST_2(test_cuda_cumsum<RowMajor>(128, 128, 128));
+ CALL_SUBTEST_1(test_gpu_cumsum<ColMajor>(128, 128, 128));
+ CALL_SUBTEST_2(test_gpu_cumsum<RowMajor>(128, 128, 128));
}
diff --git a/unsupported/test/cxx11_tensor_shuffling.cpp b/unsupported/test/cxx11_tensor_shuffling.cpp
index d11444a14..2ec85d2d4 100644
--- a/unsupported/test/cxx11_tensor_shuffling.cpp
+++ b/unsupported/test/cxx11_tensor_shuffling.cpp
@@ -81,12 +81,12 @@ static void test_expr_shuffling()
Tensor<float, 4, DataLayout> expected;
expected = tensor.shuffle(shuffles);
- Tensor<float, 4, DataLayout> result(5,7,3,2);
+ Tensor<float, 4, DataLayout> result(5, 7, 3, 2);
- array<int, 4> src_slice_dim{{2,3,1,7}};
- array<int, 4> src_slice_start{{0,0,0,0}};
- array<int, 4> dst_slice_dim{{1,7,3,2}};
- array<int, 4> dst_slice_start{{0,0,0,0}};
+ array<ptrdiff_t, 4> src_slice_dim{{2, 3, 1, 7}};
+ array<ptrdiff_t, 4> src_slice_start{{0, 0, 0, 0}};
+ array<ptrdiff_t, 4> dst_slice_dim{{1, 7, 3, 2}};
+ array<ptrdiff_t, 4> dst_slice_start{{0, 0, 0, 0}};
for (int i = 0; i < 5; ++i) {
result.slice(dst_slice_start, dst_slice_dim) =
@@ -215,7 +215,7 @@ static void test_shuffle_unshuffle()
}
-void test_cxx11_tensor_shuffling()
+EIGEN_DECLARE_TEST(cxx11_tensor_shuffling)
{
CALL_SUBTEST(test_simple_shuffling<ColMajor>());
CALL_SUBTEST(test_simple_shuffling<RowMajor>());
diff --git a/unsupported/test/cxx11_tensor_shuffling_sycl.cpp b/unsupported/test/cxx11_tensor_shuffling_sycl.cpp
index c88db7c72..0e8cc3bd2 100644
--- a/unsupported/test/cxx11_tensor_shuffling_sycl.cpp
+++ b/unsupported/test/cxx11_tensor_shuffling_sycl.cpp
@@ -15,7 +15,7 @@
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
-#define EIGEN_TEST_FUNC cxx11_tensor_shuffling_sycl
+
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t
#define EIGEN_USE_SYCL
@@ -111,7 +111,7 @@ template<typename DataType, typename dev_Selector> void sycl_shuffling_test_per_
test_simple_shuffling_sycl<DataType, ColMajor, int64_t>(sycl_device);
}
-void test_cxx11_tensor_shuffling_sycl()
+EIGEN_DECLARE_TEST(cxx11_tensor_shuffling_sycl)
{
for (const auto& device :Eigen::get_sycl_supported_devices()) {
CALL_SUBTEST(sycl_shuffling_test_per_device<float>(device));
diff --git a/unsupported/test/cxx11_tensor_simple.cpp b/unsupported/test/cxx11_tensor_simple.cpp
index 5a0d339ef..6d70f5435 100644
--- a/unsupported/test/cxx11_tensor_simple.cpp
+++ b/unsupported/test/cxx11_tensor_simple.cpp
@@ -316,7 +316,7 @@ static void test_resize()
VERIFY_IS_EQUAL(epsilon.size(), 3*5*7);
}
-void test_cxx11_tensor_simple()
+EIGEN_DECLARE_TEST(cxx11_tensor_simple)
{
CALL_SUBTEST(test_0d());
CALL_SUBTEST(test_1d());
diff --git a/unsupported/test/cxx11_tensor_striding.cpp b/unsupported/test/cxx11_tensor_striding.cpp
index 935b908cc..aefdfa9b4 100644
--- a/unsupported/test/cxx11_tensor_striding.cpp
+++ b/unsupported/test/cxx11_tensor_striding.cpp
@@ -110,7 +110,7 @@ static void test_striding_as_lvalue()
}
-void test_cxx11_tensor_striding()
+EIGEN_DECLARE_TEST(cxx11_tensor_striding)
{
CALL_SUBTEST(test_simple_striding<ColMajor>());
CALL_SUBTEST(test_simple_striding<RowMajor>());
diff --git a/unsupported/test/cxx11_tensor_striding_sycl.cpp b/unsupported/test/cxx11_tensor_striding_sycl.cpp
index 603c3746f..d3b1fa77c 100644
--- a/unsupported/test/cxx11_tensor_striding_sycl.cpp
+++ b/unsupported/test/cxx11_tensor_striding_sycl.cpp
@@ -13,7 +13,7 @@
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
-#define EIGEN_TEST_FUNC cxx11_tensor_striding_sycl
+
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t
#define EIGEN_USE_SYCL
@@ -196,7 +196,7 @@ template <typename Dev_selector> void tensorStridingPerDevice(Dev_selector& s){
test_striding_as_lvalue<float, RowMajor, int64_t>(sycl_device);
}
-void test_cxx11_tensor_striding_sycl() {
+EIGEN_DECLARE_TEST(cxx11_tensor_striding_sycl) {
for (const auto& device :Eigen::get_sycl_supported_devices()) {
CALL_SUBTEST(tensorStridingPerDevice(device));
}
diff --git a/unsupported/test/cxx11_tensor_sugar.cpp b/unsupported/test/cxx11_tensor_sugar.cpp
index 2f56eb495..2ca5c47db 100644
--- a/unsupported/test/cxx11_tensor_sugar.cpp
+++ b/unsupported/test/cxx11_tensor_sugar.cpp
@@ -73,7 +73,7 @@ static void test_scalar_sugar_sub_div() {
}
}
-void test_cxx11_tensor_sugar()
+EIGEN_DECLARE_TEST(cxx11_tensor_sugar)
{
CALL_SUBTEST(test_comparison_sugar());
CALL_SUBTEST(test_scalar_sugar_add_mul());
diff --git a/unsupported/test/cxx11_tensor_sycl.cpp b/unsupported/test/cxx11_tensor_sycl.cpp
index 5cd0f4c71..9357bed02 100644
--- a/unsupported/test/cxx11_tensor_sycl.cpp
+++ b/unsupported/test/cxx11_tensor_sycl.cpp
@@ -15,7 +15,7 @@
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
-#define EIGEN_TEST_FUNC cxx11_tensor_sycl
+
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t
#define EIGEN_USE_SYCL
@@ -269,7 +269,7 @@ template<typename DataType, typename dev_Selector> void sycl_computing_test_per_
test_sycl_cast<DataType, int, ColMajor, int64_t>(sycl_device);
}
-void test_cxx11_tensor_sycl() {
+EIGEN_DECLARE_TEST(cxx11_tensor_sycl) {
for (const auto& device :Eigen::get_sycl_supported_devices()) {
CALL_SUBTEST(sycl_computing_test_per_device<float>(device));
}
diff --git a/unsupported/test/cxx11_tensor_symmetry.cpp b/unsupported/test/cxx11_tensor_symmetry.cpp
index d680e9b3b..fed269a9a 100644
--- a/unsupported/test/cxx11_tensor_symmetry.cpp
+++ b/unsupported/test/cxx11_tensor_symmetry.cpp
@@ -801,7 +801,7 @@ static void test_tensor_randacc()
}
}
-void test_cxx11_tensor_symmetry()
+EIGEN_DECLARE_TEST(cxx11_tensor_symmetry)
{
CALL_SUBTEST(test_symgroups_static());
CALL_SUBTEST(test_symgroups_dynamic());
diff --git a/unsupported/test/cxx11_tensor_thread_pool.cpp b/unsupported/test/cxx11_tensor_thread_pool.cpp
index 2ef665f30..6d8e58214 100644
--- a/unsupported/test/cxx11_tensor_thread_pool.cpp
+++ b/unsupported/test/cxx11_tensor_thread_pool.cpp
@@ -16,6 +16,25 @@
using Eigen::Tensor;
+class TestAllocator : public Allocator {
+ public:
+ ~TestAllocator() override {}
+ EIGEN_DEVICE_FUNC void* allocate(size_t num_bytes) const override {
+ const_cast<TestAllocator*>(this)->alloc_count_++;
+ return internal::aligned_malloc(num_bytes);
+ }
+ EIGEN_DEVICE_FUNC void deallocate(void* buffer) const override {
+ const_cast<TestAllocator*>(this)->dealloc_count_++;
+ internal::aligned_free(buffer);
+ }
+
+ int alloc_count() const { return alloc_count_; }
+ int dealloc_count() const { return dealloc_count_; }
+
+ private:
+ int alloc_count_ = 0;
+ int dealloc_count_ = 0;
+};
void test_multithread_elementwise()
{
@@ -232,6 +251,60 @@ void test_multithread_contraction_agrees_with_singlethread() {
}
}
+// Apply Sqrt to all output elements.
+struct SqrtOutputKernel {
+ template <typename Index, typename Scalar>
+ EIGEN_ALWAYS_INLINE void operator()(
+ const internal::blas_data_mapper<Scalar, Index, ColMajor>& output_mapper,
+ const TensorContractionParams&, Index, Index, Index num_rows,
+ Index num_cols) const {
+ for (int i = 0; i < num_rows; ++i) {
+ for (int j = 0; j < num_cols; ++j) {
+ output_mapper(i, j) = std::sqrt(output_mapper(i, j));
+ }
+ }
+ }
+};
+
+template <int DataLayout>
+static void test_multithread_contraction_with_output_kernel() {
+ typedef Tensor<float, 1>::DimensionPair DimPair;
+
+ const int num_threads = internal::random<int>(2, 11);
+ ThreadPool threads(num_threads);
+ Eigen::ThreadPoolDevice device(&threads, num_threads);
+
+ Tensor<float, 4, DataLayout> t_left(30, 50, 8, 31);
+ Tensor<float, 5, DataLayout> t_right(8, 31, 7, 20, 10);
+ Tensor<float, 5, DataLayout> t_result(30, 50, 7, 20, 10);
+
+ t_left.setRandom();
+ t_right.setRandom();
+ // Put trash in mat4 to verify contraction clears output memory.
+ t_result.setRandom();
+
+ // Add a little offset so that the results won't be close to zero.
+ t_left += t_left.constant(1.0f);
+ t_right += t_right.constant(1.0f);
+
+ typedef Map<Eigen::Matrix<float, Dynamic, Dynamic, DataLayout>> MapXf;
+ MapXf m_left(t_left.data(), 1500, 248);
+ MapXf m_right(t_right.data(), 248, 1400);
+ Eigen::Matrix<float, Dynamic, Dynamic, DataLayout> m_result(1500, 1400);
+
+ // this contraction should be equivalent to a single matrix multiplication
+ Eigen::array<DimPair, 2> dims({{DimPair(2, 0), DimPair(3, 1)}});
+
+ // compute results by separate methods
+ t_result.device(device) = t_left.contract(t_right, dims, SqrtOutputKernel());
+
+ m_result = m_left * m_right;
+
+ for (Index i = 0; i < t_result.dimensions().TotalSize(); i++) {
+ VERIFY(&t_result.data()[i] != &m_result.data()[i]);
+ VERIFY_IS_APPROX(t_result.data()[i], std::sqrt(m_result.data()[i]));
+ }
+}
template<int DataLayout>
void test_full_contraction() {
@@ -320,14 +393,14 @@ void test_multithread_random()
}
template<int DataLayout>
-void test_multithread_shuffle()
+void test_multithread_shuffle(Allocator* allocator)
{
Tensor<float, 4, DataLayout> tensor(17,5,7,11);
tensor.setRandom();
const int num_threads = internal::random<int>(2, 11);
ThreadPool threads(num_threads);
- Eigen::ThreadPoolDevice device(&threads, num_threads);
+ Eigen::ThreadPoolDevice device(&threads, num_threads, allocator);
Tensor<float, 4, DataLayout> shuffle(7,5,11,17);
array<ptrdiff_t, 4> shuffles = {{2,1,3,0}};
@@ -344,8 +417,23 @@ void test_multithread_shuffle()
}
}
+void test_threadpool_allocate(TestAllocator* allocator)
+{
+ const int num_threads = internal::random<int>(2, 11);
+ const int num_allocs = internal::random<int>(2, 11);
+ ThreadPool threads(num_threads);
+ Eigen::ThreadPoolDevice device(&threads, num_threads, allocator);
-void test_cxx11_tensor_thread_pool()
+ for (int a = 0; a < num_allocs; ++a) {
+ void* ptr = device.allocate(512);
+ device.deallocate(ptr);
+ }
+ VERIFY(allocator != NULL);
+ VERIFY_IS_EQUAL(allocator->alloc_count(), num_allocs);
+ VERIFY_IS_EQUAL(allocator->dealloc_count(), num_allocs);
+}
+
+EIGEN_DECLARE_TEST(cxx11_tensor_thread_pool)
{
CALL_SUBTEST_1(test_multithread_elementwise());
CALL_SUBTEST_1(test_multithread_compound_assignment());
@@ -355,6 +443,8 @@ void test_cxx11_tensor_thread_pool()
CALL_SUBTEST_3(test_multithread_contraction_agrees_with_singlethread<ColMajor>());
CALL_SUBTEST_3(test_multithread_contraction_agrees_with_singlethread<RowMajor>());
+ CALL_SUBTEST_3(test_multithread_contraction_with_output_kernel<ColMajor>());
+ CALL_SUBTEST_3(test_multithread_contraction_with_output_kernel<RowMajor>());
// Exercise various cases that have been problematic in the past.
CALL_SUBTEST_4(test_contraction_corner_cases<ColMajor>());
@@ -368,6 +458,9 @@ void test_cxx11_tensor_thread_pool()
CALL_SUBTEST_6(test_memcpy());
CALL_SUBTEST_6(test_multithread_random());
- CALL_SUBTEST_6(test_multithread_shuffle<ColMajor>());
- CALL_SUBTEST_6(test_multithread_shuffle<RowMajor>());
+
+ TestAllocator test_allocator;
+ CALL_SUBTEST_6(test_multithread_shuffle<ColMajor>(NULL));
+ CALL_SUBTEST_6(test_multithread_shuffle<RowMajor>(&test_allocator));
+ CALL_SUBTEST_6(test_threadpool_allocate(&test_allocator));
}
diff --git a/unsupported/test/cxx11_tensor_trace.cpp b/unsupported/test/cxx11_tensor_trace.cpp
new file mode 100644
index 000000000..0cb23060e
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_trace.cpp
@@ -0,0 +1,171 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2017 Gagan Goel <gagan.nith@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+using Eigen::array;
+
+template <int DataLayout>
+static void test_0D_trace() {
+ Tensor<float, 0, DataLayout> tensor;
+ tensor.setRandom();
+ array<ptrdiff_t, 0> dims;
+ Tensor<float, 0, DataLayout> result = tensor.trace(dims);
+ VERIFY_IS_EQUAL(result(), tensor());
+}
+
+
+template <int DataLayout>
+static void test_all_dimensions_trace() {
+ Tensor<float, 3, DataLayout> tensor1(5, 5, 5);
+ tensor1.setRandom();
+ Tensor<float, 0, DataLayout> result1 = tensor1.trace();
+ VERIFY_IS_EQUAL(result1.rank(), 0);
+ float sum = 0.0f;
+ for (int i = 0; i < 5; ++i) {
+ sum += tensor1(i, i, i);
+ }
+ VERIFY_IS_EQUAL(result1(), sum);
+
+ Tensor<float, 5, DataLayout> tensor2(7, 7, 7, 7, 7);
+ array<ptrdiff_t, 5> dims = { { 2, 1, 0, 3, 4 } };
+ Tensor<float, 0, DataLayout> result2 = tensor2.trace(dims);
+ VERIFY_IS_EQUAL(result2.rank(), 0);
+ sum = 0.0f;
+ for (int i = 0; i < 7; ++i) {
+ sum += tensor2(i, i, i, i, i);
+ }
+ VERIFY_IS_EQUAL(result2(), sum);
+}
+
+
+template <int DataLayout>
+static void test_simple_trace() {
+ Tensor<float, 3, DataLayout> tensor1(3, 5, 3);
+ tensor1.setRandom();
+ array<ptrdiff_t, 2> dims1 = { { 0, 2 } };
+ Tensor<float, 1, DataLayout> result1 = tensor1.trace(dims1);
+ VERIFY_IS_EQUAL(result1.rank(), 1);
+ VERIFY_IS_EQUAL(result1.dimension(0), 5);
+ float sum = 0.0f;
+ for (int i = 0; i < 5; ++i) {
+ sum = 0.0f;
+ for (int j = 0; j < 3; ++j) {
+ sum += tensor1(j, i, j);
+ }
+ VERIFY_IS_EQUAL(result1(i), sum);
+ }
+
+ Tensor<float, 4, DataLayout> tensor2(5, 5, 7, 7);
+ tensor2.setRandom();
+ array<ptrdiff_t, 2> dims2 = { { 2, 3 } };
+ Tensor<float, 2, DataLayout> result2 = tensor2.trace(dims2);
+ VERIFY_IS_EQUAL(result2.rank(), 2);
+ VERIFY_IS_EQUAL(result2.dimension(0), 5);
+ VERIFY_IS_EQUAL(result2.dimension(1), 5);
+ for (int i = 0; i < 5; ++i) {
+ for (int j = 0; j < 5; ++j) {
+ sum = 0.0f;
+ for (int k = 0; k < 7; ++k) {
+ sum += tensor2(i, j, k, k);
+ }
+ VERIFY_IS_EQUAL(result2(i, j), sum);
+ }
+ }
+
+ array<ptrdiff_t, 2> dims3 = { { 1, 0 } };
+ Tensor<float, 2, DataLayout> result3 = tensor2.trace(dims3);
+ VERIFY_IS_EQUAL(result3.rank(), 2);
+ VERIFY_IS_EQUAL(result3.dimension(0), 7);
+ VERIFY_IS_EQUAL(result3.dimension(1), 7);
+ for (int i = 0; i < 7; ++i) {
+ for (int j = 0; j < 7; ++j) {
+ sum = 0.0f;
+ for (int k = 0; k < 5; ++k) {
+ sum += tensor2(k, k, i, j);
+ }
+ VERIFY_IS_EQUAL(result3(i, j), sum);
+ }
+ }
+
+ Tensor<float, 5, DataLayout> tensor3(3, 7, 3, 7, 3);
+ tensor3.setRandom();
+ array<ptrdiff_t, 3> dims4 = { { 0, 2, 4 } };
+ Tensor<float, 2, DataLayout> result4 = tensor3.trace(dims4);
+ VERIFY_IS_EQUAL(result4.rank(), 2);
+ VERIFY_IS_EQUAL(result4.dimension(0), 7);
+ VERIFY_IS_EQUAL(result4.dimension(1), 7);
+ for (int i = 0; i < 7; ++i) {
+ for (int j = 0; j < 7; ++j) {
+ sum = 0.0f;
+ for (int k = 0; k < 3; ++k) {
+ sum += tensor3(k, i, k, j, k);
+ }
+ VERIFY_IS_EQUAL(result4(i, j), sum);
+ }
+ }
+
+ Tensor<float, 5, DataLayout> tensor4(3, 7, 4, 7, 5);
+ tensor4.setRandom();
+ array<ptrdiff_t, 2> dims5 = { { 1, 3 } };
+ Tensor<float, 3, DataLayout> result5 = tensor4.trace(dims5);
+ VERIFY_IS_EQUAL(result5.rank(), 3);
+ VERIFY_IS_EQUAL(result5.dimension(0), 3);
+ VERIFY_IS_EQUAL(result5.dimension(1), 4);
+ VERIFY_IS_EQUAL(result5.dimension(2), 5);
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 4; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ sum = 0.0f;
+ for (int l = 0; l < 7; ++l) {
+ sum += tensor4(i, l, j, l, k);
+ }
+ VERIFY_IS_EQUAL(result5(i, j, k), sum);
+ }
+ }
+ }
+}
+
+
+template<int DataLayout>
+static void test_trace_in_expr() {
+ Tensor<float, 4, DataLayout> tensor(2, 3, 5, 3);
+ tensor.setRandom();
+ array<ptrdiff_t, 2> dims = { { 1, 3 } };
+ Tensor<float, 2, DataLayout> result(2, 5);
+ result = result.constant(1.0f) - tensor.trace(dims);
+ VERIFY_IS_EQUAL(result.rank(), 2);
+ VERIFY_IS_EQUAL(result.dimension(0), 2);
+ VERIFY_IS_EQUAL(result.dimension(1), 5);
+ float sum = 0.0f;
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 5; ++j) {
+ sum = 0.0f;
+ for (int k = 0; k < 3; ++k) {
+ sum += tensor(i, k, j, k);
+ }
+ VERIFY_IS_EQUAL(result(i, j), 1.0f - sum);
+ }
+ }
+}
+
+
+EIGEN_DECLARE_TEST(cxx11_tensor_trace) {
+ CALL_SUBTEST(test_0D_trace<ColMajor>());
+ CALL_SUBTEST(test_0D_trace<RowMajor>());
+ CALL_SUBTEST(test_all_dimensions_trace<ColMajor>());
+ CALL_SUBTEST(test_all_dimensions_trace<RowMajor>());
+ CALL_SUBTEST(test_simple_trace<ColMajor>());
+ CALL_SUBTEST(test_simple_trace<RowMajor>());
+ CALL_SUBTEST(test_trace_in_expr<ColMajor>());
+ CALL_SUBTEST(test_trace_in_expr<RowMajor>());
+} \ No newline at end of file
diff --git a/unsupported/test/cxx11_tensor_uint128.cpp b/unsupported/test/cxx11_tensor_uint128.cpp
index d2a1e8673..07691df98 100644
--- a/unsupported/test/cxx11_tensor_uint128.cpp
+++ b/unsupported/test/cxx11_tensor_uint128.cpp
@@ -144,7 +144,7 @@ void test_misc2() {
#endif
-void test_cxx11_tensor_uint128()
+EIGEN_DECLARE_TEST(cxx11_tensor_uint128)
{
#ifdef EIGEN_NO_INT128
// Skip the test on compilers that don't support 128bit integers natively
diff --git a/unsupported/test/cxx11_tensor_volume_patch.cpp b/unsupported/test/cxx11_tensor_volume_patch.cpp
index ca6840f3b..3aa363eb5 100644
--- a/unsupported/test/cxx11_tensor_volume_patch.cpp
+++ b/unsupported/test/cxx11_tensor_volume_patch.cpp
@@ -105,7 +105,7 @@ static void test_entire_volume_patch()
}
}
-void test_cxx11_tensor_volume_patch()
+EIGEN_DECLARE_TEST(cxx11_tensor_volume_patch)
{
CALL_SUBTEST(test_single_voxel_patch());
CALL_SUBTEST(test_entire_volume_patch());
diff --git a/unsupported/test/cxx11_tensor_volume_patch_sycl.cpp b/unsupported/test/cxx11_tensor_volume_patch_sycl.cpp
new file mode 100644
index 000000000..ca7994fd9
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_volume_patch_sycl.cpp
@@ -0,0 +1,222 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_TEST_NO_LONGDOUBLE
+#define EIGEN_TEST_NO_COMPLEX
+
+#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t
+#define EIGEN_USE_SYCL
+
+#include "main.h"
+#include <unsupported/Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+static const int DataLayout = ColMajor;
+
+template <typename DataType, typename IndexType>
+static void test_single_voxel_patch_sycl(const Eigen::SyclDevice& sycl_device)
+{
+
+IndexType sizeDim0 = 4;
+IndexType sizeDim1 = 2;
+IndexType sizeDim2 = 3;
+IndexType sizeDim3 = 5;
+IndexType sizeDim4 = 7;
+array<IndexType, 5> tensorColMajorRange = {{sizeDim0, sizeDim1, sizeDim2, sizeDim3, sizeDim4}};
+array<IndexType, 5> tensorRowMajorRange = {{sizeDim4, sizeDim3, sizeDim2, sizeDim1, sizeDim0}};
+Tensor<DataType, 5, DataLayout,IndexType> tensor_col_major(tensorColMajorRange);
+Tensor<DataType, 5, RowMajor,IndexType> tensor_row_major(tensorRowMajorRange);
+tensor_col_major.setRandom();
+
+
+ DataType* gpu_data_col_major = static_cast<DataType*>(sycl_device.allocate(tensor_col_major.size()*sizeof(DataType)));
+ DataType* gpu_data_row_major = static_cast<DataType*>(sycl_device.allocate(tensor_row_major.size()*sizeof(DataType)));
+ TensorMap<Tensor<DataType, 5, ColMajor, IndexType>> gpu_col_major(gpu_data_col_major, tensorColMajorRange);
+ TensorMap<Tensor<DataType, 5, RowMajor, IndexType>> gpu_row_major(gpu_data_row_major, tensorRowMajorRange);
+
+ sycl_device.memcpyHostToDevice(gpu_data_col_major, tensor_col_major.data(),(tensor_col_major.size())*sizeof(DataType));
+ gpu_row_major.device(sycl_device)=gpu_col_major.swap_layout();
+
+
+ // single volume patch: ColMajor
+ array<IndexType, 6> patchColMajorTensorRange={{sizeDim0,1, 1, 1, sizeDim1*sizeDim2*sizeDim3, sizeDim4}};
+ Tensor<DataType, 6, DataLayout,IndexType> single_voxel_patch_col_major(patchColMajorTensorRange);
+ size_t patchTensorBuffSize =single_voxel_patch_col_major.size()*sizeof(DataType);
+ DataType* gpu_data_single_voxel_patch_col_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 6, DataLayout,IndexType>> gpu_single_voxel_patch_col_major(gpu_data_single_voxel_patch_col_major, patchColMajorTensorRange);
+ gpu_single_voxel_patch_col_major.device(sycl_device)=gpu_col_major.extract_volume_patches(1, 1, 1);
+ sycl_device.memcpyDeviceToHost(single_voxel_patch_col_major.data(), gpu_data_single_voxel_patch_col_major, patchTensorBuffSize);
+
+
+ VERIFY_IS_EQUAL(single_voxel_patch_col_major.dimension(0), 4);
+ VERIFY_IS_EQUAL(single_voxel_patch_col_major.dimension(1), 1);
+ VERIFY_IS_EQUAL(single_voxel_patch_col_major.dimension(2), 1);
+ VERIFY_IS_EQUAL(single_voxel_patch_col_major.dimension(3), 1);
+ VERIFY_IS_EQUAL(single_voxel_patch_col_major.dimension(4), 2 * 3 * 5);
+ VERIFY_IS_EQUAL(single_voxel_patch_col_major.dimension(5), 7);
+
+ array<IndexType, 6> patchRowMajorTensorRange={{sizeDim4, sizeDim1*sizeDim2*sizeDim3, 1, 1, 1, sizeDim0}};
+ Tensor<DataType, 6, RowMajor,IndexType> single_voxel_patch_row_major(patchRowMajorTensorRange);
+ patchTensorBuffSize =single_voxel_patch_row_major.size()*sizeof(DataType);
+ DataType* gpu_data_single_voxel_patch_row_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 6, RowMajor,IndexType>> gpu_single_voxel_patch_row_major(gpu_data_single_voxel_patch_row_major, patchRowMajorTensorRange);
+ gpu_single_voxel_patch_row_major.device(sycl_device)=gpu_row_major.extract_volume_patches(1, 1, 1);
+ sycl_device.memcpyDeviceToHost(single_voxel_patch_row_major.data(), gpu_data_single_voxel_patch_row_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(single_voxel_patch_row_major.dimension(0), 7);
+ VERIFY_IS_EQUAL(single_voxel_patch_row_major.dimension(1), 2 * 3 * 5);
+ VERIFY_IS_EQUAL(single_voxel_patch_row_major.dimension(2), 1);
+ VERIFY_IS_EQUAL(single_voxel_patch_row_major.dimension(3), 1);
+ VERIFY_IS_EQUAL(single_voxel_patch_row_major.dimension(4), 1);
+ VERIFY_IS_EQUAL(single_voxel_patch_row_major.dimension(5), 4);
+
+ sycl_device.memcpyDeviceToHost(tensor_row_major.data(), gpu_data_row_major, (tensor_col_major.size())*sizeof(DataType));
+ for (IndexType i = 0; i < tensor_col_major.size(); ++i) {
+ VERIFY_IS_EQUAL(tensor_col_major.data()[i], single_voxel_patch_col_major.data()[i]);
+ VERIFY_IS_EQUAL(tensor_row_major.data()[i], single_voxel_patch_row_major.data()[i]);
+ VERIFY_IS_EQUAL(tensor_col_major.data()[i], tensor_row_major.data()[i]);
+ }
+
+
+ sycl_device.deallocate(gpu_data_col_major);
+ sycl_device.deallocate(gpu_data_row_major);
+ sycl_device.deallocate(gpu_data_single_voxel_patch_col_major);
+ sycl_device.deallocate(gpu_data_single_voxel_patch_row_major);
+}
+
+template <typename DataType, typename IndexType>
+static void test_entire_volume_patch_sycl(const Eigen::SyclDevice& sycl_device)
+{
+ const int depth = 4;
+ const int patch_z = 2;
+ const int patch_y = 3;
+ const int patch_x = 5;
+ const int batch = 7;
+
+ array<IndexType, 5> tensorColMajorRange = {{depth, patch_z, patch_y, patch_x, batch}};
+ array<IndexType, 5> tensorRowMajorRange = {{batch, patch_x, patch_y, patch_z, depth}};
+ Tensor<DataType, 5, DataLayout,IndexType> tensor_col_major(tensorColMajorRange);
+ Tensor<DataType, 5, RowMajor,IndexType> tensor_row_major(tensorRowMajorRange);
+ tensor_col_major.setRandom();
+
+
+ DataType* gpu_data_col_major = static_cast<DataType*>(sycl_device.allocate(tensor_col_major.size()*sizeof(DataType)));
+ DataType* gpu_data_row_major = static_cast<DataType*>(sycl_device.allocate(tensor_row_major.size()*sizeof(DataType)));
+ TensorMap<Tensor<DataType, 5, ColMajor, IndexType>> gpu_col_major(gpu_data_col_major, tensorColMajorRange);
+ TensorMap<Tensor<DataType, 5, RowMajor, IndexType>> gpu_row_major(gpu_data_row_major, tensorRowMajorRange);
+
+ sycl_device.memcpyHostToDevice(gpu_data_col_major, tensor_col_major.data(),(tensor_col_major.size())*sizeof(DataType));
+ gpu_row_major.device(sycl_device)=gpu_col_major.swap_layout();
+ sycl_device.memcpyDeviceToHost(tensor_row_major.data(), gpu_data_row_major, (tensor_col_major.size())*sizeof(DataType));
+
+
+ // single volume patch: ColMajor
+ array<IndexType, 6> patchColMajorTensorRange={{depth,patch_z, patch_y, patch_x, patch_z*patch_y*patch_x, batch}};
+ Tensor<DataType, 6, DataLayout,IndexType> entire_volume_patch_col_major(patchColMajorTensorRange);
+ size_t patchTensorBuffSize =entire_volume_patch_col_major.size()*sizeof(DataType);
+ DataType* gpu_data_entire_volume_patch_col_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 6, DataLayout,IndexType>> gpu_entire_volume_patch_col_major(gpu_data_entire_volume_patch_col_major, patchColMajorTensorRange);
+ gpu_entire_volume_patch_col_major.device(sycl_device)=gpu_col_major.extract_volume_patches(patch_z, patch_y, patch_x);
+ sycl_device.memcpyDeviceToHost(entire_volume_patch_col_major.data(), gpu_data_entire_volume_patch_col_major, patchTensorBuffSize);
+
+
+// Tensor<float, 5> tensor(depth, patch_z, patch_y, patch_x, batch);
+// tensor.setRandom();
+// Tensor<float, 5, RowMajor> tensor_row_major = tensor.swap_layout();
+
+ //Tensor<float, 6> entire_volume_patch;
+ //entire_volume_patch = tensor.extract_volume_patches(patch_z, patch_y, patch_x);
+ VERIFY_IS_EQUAL(entire_volume_patch_col_major.dimension(0), depth);
+ VERIFY_IS_EQUAL(entire_volume_patch_col_major.dimension(1), patch_z);
+ VERIFY_IS_EQUAL(entire_volume_patch_col_major.dimension(2), patch_y);
+ VERIFY_IS_EQUAL(entire_volume_patch_col_major.dimension(3), patch_x);
+ VERIFY_IS_EQUAL(entire_volume_patch_col_major.dimension(4), patch_z * patch_y * patch_x);
+ VERIFY_IS_EQUAL(entire_volume_patch_col_major.dimension(5), batch);
+
+// Tensor<float, 6, RowMajor> entire_volume_patch_row_major;
+ //entire_volume_patch_row_major = tensor_row_major.extract_volume_patches(patch_z, patch_y, patch_x);
+
+ array<IndexType, 6> patchRowMajorTensorRange={{batch,patch_z*patch_y*patch_x, patch_x, patch_y, patch_z, depth}};
+ Tensor<DataType, 6, RowMajor,IndexType> entire_volume_patch_row_major(patchRowMajorTensorRange);
+ patchTensorBuffSize =entire_volume_patch_row_major.size()*sizeof(DataType);
+ DataType* gpu_data_entire_volume_patch_row_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 6, RowMajor,IndexType>> gpu_entire_volume_patch_row_major(gpu_data_entire_volume_patch_row_major, patchRowMajorTensorRange);
+ gpu_entire_volume_patch_row_major.device(sycl_device)=gpu_row_major.extract_volume_patches(patch_z, patch_y, patch_x);
+ sycl_device.memcpyDeviceToHost(entire_volume_patch_row_major.data(), gpu_data_entire_volume_patch_row_major, patchTensorBuffSize);
+
+
+ VERIFY_IS_EQUAL(entire_volume_patch_row_major.dimension(0), batch);
+ VERIFY_IS_EQUAL(entire_volume_patch_row_major.dimension(1), patch_z * patch_y * patch_x);
+ VERIFY_IS_EQUAL(entire_volume_patch_row_major.dimension(2), patch_x);
+ VERIFY_IS_EQUAL(entire_volume_patch_row_major.dimension(3), patch_y);
+ VERIFY_IS_EQUAL(entire_volume_patch_row_major.dimension(4), patch_z);
+ VERIFY_IS_EQUAL(entire_volume_patch_row_major.dimension(5), depth);
+
+ const int dz = patch_z - 1;
+ const int dy = patch_y - 1;
+ const int dx = patch_x - 1;
+
+ const int forward_pad_z = dz - dz / 2;
+ const int forward_pad_y = dy - dy / 2;
+ const int forward_pad_x = dx - dx / 2;
+
+ for (int pz = 0; pz < patch_z; pz++) {
+ for (int py = 0; py < patch_y; py++) {
+ for (int px = 0; px < patch_x; px++) {
+ const int patchId = pz + patch_z * (py + px * patch_y);
+ for (int z = 0; z < patch_z; z++) {
+ for (int y = 0; y < patch_y; y++) {
+ for (int x = 0; x < patch_x; x++) {
+ for (int b = 0; b < batch; b++) {
+ for (int d = 0; d < depth; d++) {
+ float expected = 0.0f;
+ float expected_row_major = 0.0f;
+ const int eff_z = z - forward_pad_z + pz;
+ const int eff_y = y - forward_pad_y + py;
+ const int eff_x = x - forward_pad_x + px;
+ if (eff_z >= 0 && eff_y >= 0 && eff_x >= 0 &&
+ eff_z < patch_z && eff_y < patch_y && eff_x < patch_x) {
+ expected = tensor_col_major(d, eff_z, eff_y, eff_x, b);
+ expected_row_major = tensor_row_major(b, eff_x, eff_y, eff_z, d);
+ }
+ VERIFY_IS_EQUAL(entire_volume_patch_col_major(d, z, y, x, patchId, b), expected);
+ VERIFY_IS_EQUAL(entire_volume_patch_row_major(b, patchId, x, y, z, d), expected_row_major);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ sycl_device.deallocate(gpu_data_col_major);
+ sycl_device.deallocate(gpu_data_row_major);
+ sycl_device.deallocate(gpu_data_entire_volume_patch_col_major);
+ sycl_device.deallocate(gpu_data_entire_volume_patch_row_major);
+}
+
+
+
+template<typename DataType, typename dev_Selector> void sycl_tensor_volume_patch_test_per_device(dev_Selector s){
+QueueInterface queueInterface(s);
+auto sycl_device = Eigen::SyclDevice(&queueInterface);
+std::cout << "Running on " << s.template get_info<cl::sycl::info::device::name>() << std::endl;
+test_single_voxel_patch_sycl<DataType, int64_t>(sycl_device);
+test_entire_volume_patch_sycl<DataType, int64_t>(sycl_device);
+}
+EIGEN_DECLARE_TEST(cxx11_tensor_volume_patch_sycl)
+{
+for (const auto& device :Eigen::get_sycl_supported_devices()) {
+ CALL_SUBTEST(sycl_tensor_volume_patch_test_per_device<float>(device));
+}
+}
diff --git a/unsupported/test/dgmres.cpp b/unsupported/test/dgmres.cpp
index 2b11807c8..5f63161b2 100644
--- a/unsupported/test/dgmres.cpp
+++ b/unsupported/test/dgmres.cpp
@@ -9,7 +9,7 @@
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "../../test/sparse_solver.h"
-#include <Eigen/src/IterativeSolvers/DGMRES.h>
+#include <unsupported/Eigen/IterativeSolvers>
template<typename T> void test_dgmres_T()
{
@@ -24,7 +24,7 @@ template<typename T> void test_dgmres_T()
//CALL_SUBTEST( check_sparse_square_solving(dgmres_colmajor_ssor) );
}
-void test_dgmres()
+EIGEN_DECLARE_TEST(dgmres)
{
CALL_SUBTEST_1(test_dgmres_T<double>());
CALL_SUBTEST_2(test_dgmres_T<std::complex<double> >());
diff --git a/unsupported/test/forward_adolc.cpp b/unsupported/test/forward_adolc.cpp
index 866db8e86..14a909d3b 100644
--- a/unsupported/test/forward_adolc.cpp
+++ b/unsupported/test/forward_adolc.cpp
@@ -35,7 +35,7 @@ struct TestFunc1
int m_inputs, m_values;
TestFunc1() : m_inputs(InputsAtCompileTime), m_values(ValuesAtCompileTime) {}
- TestFunc1(int inputs, int values) : m_inputs(inputs), m_values(values) {}
+ TestFunc1(int inputs_, int values_) : m_inputs(inputs_), m_values(values_) {}
int inputs() const { return m_inputs; }
int values() const { return m_values; }
@@ -119,7 +119,7 @@ template<typename Func> void adolc_forward_jacobian(const Func& f)
VERIFY_IS_APPROX(j, jref);
}
-void test_forward_adolc()
+EIGEN_DECLARE_TEST(forward_adolc)
{
adtl::setNumDir(NUMBER_DIRECTIONS);
@@ -132,7 +132,7 @@ void test_forward_adolc()
}
{
- // simple instanciation tests
+ // simple instantiation tests
Matrix<adtl::adouble,2,1> x;
foo(x);
Matrix<adtl::adouble,Dynamic,Dynamic> A(4,4);;
diff --git a/unsupported/test/gmres.cpp b/unsupported/test/gmres.cpp
index f2969116b..8d2254b5b 100644
--- a/unsupported/test/gmres.cpp
+++ b/unsupported/test/gmres.cpp
@@ -24,7 +24,7 @@ template<typename T> void test_gmres_T()
//CALL_SUBTEST( check_sparse_square_solving(gmres_colmajor_ssor) );
}
-void test_gmres()
+EIGEN_DECLARE_TEST(gmres)
{
CALL_SUBTEST_1(test_gmres_T<double>());
CALL_SUBTEST_2(test_gmres_T<std::complex<double> >());
diff --git a/unsupported/test/kronecker_product.cpp b/unsupported/test/kronecker_product.cpp
index e770049e5..b5b764c65 100644
--- a/unsupported/test/kronecker_product.cpp
+++ b/unsupported/test/kronecker_product.cpp
@@ -9,6 +9,7 @@
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
#ifdef EIGEN_TEST_PART_1
#include "sparse.h"
@@ -83,7 +84,7 @@ void check_sparse_kronecker_product(const MatrixType& ab)
}
-void test_kronecker_product()
+EIGEN_DECLARE_TEST(kronecker_product)
{
// DM = dense matrix; SM = sparse matrix
@@ -95,7 +96,7 @@ void test_kronecker_product()
SM_a.insert(1,0) = DM_a.coeffRef(1,0) = -0.9076572187376921;
SM_a.insert(1,1) = DM_a.coeffRef(1,1) = 0.6469156566545853;
SM_a.insert(1,2) = DM_a.coeffRef(1,2) = -0.3658010398782789;
-
+
MatrixXd DM_b(3,2);
SparseMatrix<double> SM_b(3,2);
SM_b.insert(0,0) = DM_b.coeffRef(0,0) = 0.9004440976767099;
@@ -165,7 +166,7 @@ void test_kronecker_product()
SM_a.insert(0,3) = -0.2;
SM_a.insert(2,4) = 0.3;
SM_a.finalize();
-
+
SM_b.insert(0,0) = 0.4;
SM_b.insert(2,1) = -0.5;
SM_b.finalize();
@@ -183,7 +184,7 @@ void test_kronecker_product()
DM_b2.resize(4,8);
DM_ab2 = kroneckerProduct(DM_a2,DM_b2);
CALL_SUBTEST(check_dimension(DM_ab2,10*4,9*8));
-
+
for(int i = 0; i < g_repeat; i++)
{
double density = Eigen::internal::random<double>(0.01,0.5);
@@ -196,35 +197,35 @@ void test_kronecker_product()
MatrixXf dA(ra,ca), dB(rb,cb), dC;
initSparse(density, dA, sA);
initSparse(density, dB, sB);
-
+
sC = kroneckerProduct(sA,sB);
dC = kroneckerProduct(dA,dB);
VERIFY_IS_APPROX(MatrixXf(sC),dC);
-
+
sC = kroneckerProduct(sA.transpose(),sB);
dC = kroneckerProduct(dA.transpose(),dB);
VERIFY_IS_APPROX(MatrixXf(sC),dC);
-
+
sC = kroneckerProduct(sA.transpose(),sB.transpose());
dC = kroneckerProduct(dA.transpose(),dB.transpose());
VERIFY_IS_APPROX(MatrixXf(sC),dC);
-
+
sC = kroneckerProduct(sA,sB.transpose());
dC = kroneckerProduct(dA,dB.transpose());
VERIFY_IS_APPROX(MatrixXf(sC),dC);
-
+
sC2 = kroneckerProduct(sA,sB);
dC = kroneckerProduct(dA,dB);
VERIFY_IS_APPROX(MatrixXf(sC2),dC);
-
+
sC2 = kroneckerProduct(dA,sB);
dC = kroneckerProduct(dA,dB);
VERIFY_IS_APPROX(MatrixXf(sC2),dC);
-
+
sC2 = kroneckerProduct(sA,dB);
dC = kroneckerProduct(dA,dB);
VERIFY_IS_APPROX(MatrixXf(sC2),dC);
-
+
sC2 = kroneckerProduct(2*sA,sB);
dC = kroneckerProduct(2*dA,dB);
VERIFY_IS_APPROX(MatrixXf(sC2),dC);
@@ -236,11 +237,10 @@ void test_kronecker_product()
#ifdef EIGEN_TEST_PART_2
// simply check that for a dense kronecker product, sparse module is not needed
-
#include "main.h"
#include <Eigen/KroneckerProduct>
-void test_kronecker_product()
+EIGEN_DECLARE_TEST(kronecker_product)
{
MatrixXd a(2,2), b(3,3), c;
a.setRandom();
diff --git a/unsupported/test/levenberg_marquardt.cpp b/unsupported/test/levenberg_marquardt.cpp
index 64f168c16..7f9a81cd3 100644
--- a/unsupported/test/levenberg_marquardt.cpp
+++ b/unsupported/test/levenberg_marquardt.cpp
@@ -1445,7 +1445,7 @@ void testNistEckerle4(void)
VERIFY_IS_APPROX(x[2], 4.5154121844E+02);
}
-void test_levenberg_marquardt()
+EIGEN_DECLARE_TEST(levenberg_marquardt)
{
// Tests using the examples provided by (c)minpack
CALL_SUBTEST(testLmder1());
diff --git a/unsupported/test/matrix_exponential.cpp b/unsupported/test/matrix_exponential.cpp
index 50dec083d..b032cbf1d 100644
--- a/unsupported/test/matrix_exponential.cpp
+++ b/unsupported/test/matrix_exponential.cpp
@@ -119,7 +119,7 @@ void randomTest(const MatrixType& m, double tol)
}
}
-void test_matrix_exponential()
+EIGEN_DECLARE_TEST(matrix_exponential)
{
CALL_SUBTEST_2(test2dRotation<double>(1e-13));
CALL_SUBTEST_1(test2dRotation<float>(2e-5)); // was 1e-5, relaxed for clang 2.8 / linux / x86-64
diff --git a/unsupported/test/matrix_function.cpp b/unsupported/test/matrix_function.cpp
index 7c9b68a3c..2049b8ba0 100644
--- a/unsupported/test/matrix_function.cpp
+++ b/unsupported/test/matrix_function.cpp
@@ -23,9 +23,8 @@ inline bool test_isApprox_abs(const Type1& a, const Type2& b)
// Returns a matrix with eigenvalues clustered around 0, 1 and 2.
template<typename MatrixType>
-MatrixType randomMatrixWithRealEivals(const typename MatrixType::Index size)
+MatrixType randomMatrixWithRealEivals(const Index size)
{
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
MatrixType diag = MatrixType::Zero(size, size);
@@ -42,16 +41,15 @@ template <typename MatrixType, int IsComplex = NumTraits<typename internal::trai
struct randomMatrixWithImagEivals
{
// Returns a matrix with eigenvalues clustered around 0 and +/- i.
- static MatrixType run(const typename MatrixType::Index size);
+ static MatrixType run(const Index size);
};
// Partial specialization for real matrices
template<typename MatrixType>
struct randomMatrixWithImagEivals<MatrixType, 0>
{
- static MatrixType run(const typename MatrixType::Index size)
+ static MatrixType run(const Index size)
{
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
MatrixType diag = MatrixType::Zero(size, size);
Index i = 0;
@@ -77,9 +75,8 @@ struct randomMatrixWithImagEivals<MatrixType, 0>
template<typename MatrixType>
struct randomMatrixWithImagEivals<MatrixType, 1>
{
- static MatrixType run(const typename MatrixType::Index size)
+ static MatrixType run(const Index size)
{
- typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
const Scalar imagUnit(0, 1);
@@ -171,7 +168,6 @@ void testMatrixType(const MatrixType& m)
{
// Matrices with clustered eigenvalue lead to different code paths
// in MatrixFunction.h and are thus useful for testing.
- typedef typename MatrixType::Index Index;
const Index size = m.rows();
for (int i = 0; i < g_repeat; i++) {
@@ -181,7 +177,7 @@ void testMatrixType(const MatrixType& m)
}
}
-void test_matrix_function()
+EIGEN_DECLARE_TEST(matrix_function)
{
CALL_SUBTEST_1(testMatrixType(Matrix<float,1,1>()));
CALL_SUBTEST_2(testMatrixType(Matrix3cf()));
diff --git a/unsupported/test/matrix_power.cpp b/unsupported/test/matrix_power.cpp
index 7ccfacfdf..fa52d256e 100644
--- a/unsupported/test/matrix_power.cpp
+++ b/unsupported/test/matrix_power.cpp
@@ -150,7 +150,7 @@ typedef Matrix<double,3,3,RowMajor> Matrix3dRowMajor;
typedef Matrix<long double,3,3> Matrix3e;
typedef Matrix<long double,Dynamic,Dynamic> MatrixXe;
-void test_matrix_power()
+EIGEN_DECLARE_TEST(matrix_power)
{
CALL_SUBTEST_2(test2dRotation<double>(1e-13));
CALL_SUBTEST_1(test2dRotation<float>(2e-5)); // was 1e-5, relaxed for clang 2.8 / linux / x86-64
diff --git a/unsupported/test/matrix_square_root.cpp b/unsupported/test/matrix_square_root.cpp
index ea541e1ea..034f29217 100644
--- a/unsupported/test/matrix_square_root.cpp
+++ b/unsupported/test/matrix_square_root.cpp
@@ -18,7 +18,7 @@ void testMatrixSqrt(const MatrixType& m)
VERIFY_IS_APPROX(sqrtA * sqrtA, A);
}
-void test_matrix_square_root()
+EIGEN_DECLARE_TEST(matrix_square_root)
{
for (int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1(testMatrixSqrt(Matrix3cf()));
diff --git a/unsupported/test/minres.cpp b/unsupported/test/minres.cpp
index 8b300b78a..2eb40fef6 100644
--- a/unsupported/test/minres.cpp
+++ b/unsupported/test/minres.cpp
@@ -36,7 +36,7 @@ template<typename T> void test_minres_T()
}
-void test_minres()
+EIGEN_DECLARE_TEST(minres)
{
CALL_SUBTEST_1(test_minres_T<double>());
// CALL_SUBTEST_2(test_minres_T<std::compex<double> >());
diff --git a/unsupported/test/mpreal_support.cpp b/unsupported/test/mpreal_support.cpp
index 685e7ea45..4a25e993c 100644
--- a/unsupported/test/mpreal_support.cpp
+++ b/unsupported/test/mpreal_support.cpp
@@ -7,7 +7,7 @@
using namespace mpfr;
using namespace Eigen;
-void test_mpreal_support()
+EIGEN_DECLARE_TEST(mpreal_support)
{
// set precision to 256 bits (double has only 53 bits)
mpreal::set_default_prec(256);
diff --git a/unsupported/test/openglsupport.cpp b/unsupported/test/openglsupport.cpp
index 706a816f7..eadd7f985 100644
--- a/unsupported/test/openglsupport.cpp
+++ b/unsupported/test/openglsupport.cpp
@@ -106,7 +106,7 @@ GLint createShader(const char* vtx, const char* frg)
return prg_id;
}
-void test_openglsupport()
+EIGEN_DECLARE_TEST(openglsupport)
{
int argc = 0;
glutInit(&argc, 0);
@@ -318,10 +318,6 @@ void test_openglsupport()
GLint prg_id = createShader(vtx,frg);
- typedef Vector2d Vector2d;
- typedef Vector3d Vector3d;
- typedef Vector4d Vector4d;
-
VERIFY_UNIFORM(dv,v2d, Vector2d);
VERIFY_UNIFORM(dv,v3d, Vector3d);
VERIFY_UNIFORM(dv,v4d, Vector4d);
diff --git a/unsupported/test/polynomialsolver.cpp b/unsupported/test/polynomialsolver.cpp
index 7ad4aa69d..50c74f797 100644
--- a/unsupported/test/polynomialsolver.cpp
+++ b/unsupported/test/polynomialsolver.cpp
@@ -30,7 +30,6 @@ struct increment_if_fixed_size
template<int Deg, typename POLYNOMIAL, typename SOLVER>
bool aux_evalSolver( const POLYNOMIAL& pols, SOLVER& psolve )
{
- typedef typename POLYNOMIAL::Index Index;
typedef typename POLYNOMIAL::Scalar Scalar;
typedef typename POLYNOMIAL::RealScalar RealScalar;
@@ -191,7 +190,7 @@ void polynomialsolver(int deg)
realRoots );
}
-void test_polynomialsolver()
+EIGEN_DECLARE_TEST(polynomialsolver)
{
for(int i = 0; i < g_repeat; i++)
{
diff --git a/unsupported/test/polynomialutils.cpp b/unsupported/test/polynomialutils.cpp
index 5fc968402..8ff451996 100644
--- a/unsupported/test/polynomialutils.cpp
+++ b/unsupported/test/polynomialutils.cpp
@@ -101,7 +101,7 @@ template<typename _Scalar> void CauchyBounds_scalar()
internal::random<int>(18,26) )) );
}
-void test_polynomialutils()
+EIGEN_DECLARE_TEST(polynomialutils)
{
for(int i = 0; i < g_repeat; i++)
{
diff --git a/unsupported/test/sparse_extra.cpp b/unsupported/test/sparse_extra.cpp
index 4f6723d6d..51ba83b13 100644
--- a/unsupported/test/sparse_extra.cpp
+++ b/unsupported/test/sparse_extra.cpp
@@ -8,10 +8,10 @@
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-// import basic and product tests for deprectaed DynamicSparseMatrix
+// import basic and product tests for deprecated DynamicSparseMatrix
#define EIGEN_NO_DEPRECATED_WARNING
-#include "sparse_basic.cpp"
#include "sparse_product.cpp"
+#include "sparse_basic.cpp"
#include <Eigen/SparseExtra>
template<typename SetterType,typename DenseType, typename Scalar, int Options>
@@ -142,7 +142,7 @@ void check_marketio()
VERIFY_IS_EQUAL(DenseMatrix(m1),DenseMatrix(m2));
}
-void test_sparse_extra()
+EIGEN_DECLARE_TEST(sparse_extra)
{
for(int i = 0; i < g_repeat; i++) {
int s = Eigen::internal::random<int>(1,50);
diff --git a/unsupported/test/special_functions.cpp b/unsupported/test/special_functions.cpp
index 057fb3e92..50dae6c93 100644
--- a/unsupported/test/special_functions.cpp
+++ b/unsupported/test/special_functions.cpp
@@ -335,10 +335,144 @@ template<typename ArrayType> void array_special_functions()
ArrayType test = betainc(a, b + one, x) + eps;
verify_component_wise(test, expected););
}
-#endif
+#endif // EIGEN_HAS_C99_MATH
+
+ // Test Bessel function i0e. Reference results obtained with SciPy.
+ {
+ ArrayType x(21);
+ ArrayType expected(21);
+ ArrayType res(21);
+
+ x << -20.0, -18.0, -16.0, -14.0, -12.0, -10.0, -8.0, -6.0, -4.0, -2.0, 0.0,
+ 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0;
+
+ expected << 0.0897803118848, 0.0947062952128, 0.100544127361,
+ 0.107615251671, 0.116426221213, 0.127833337163, 0.143431781857,
+ 0.16665743264, 0.207001921224, 0.308508322554, 1.0, 0.308508322554,
+ 0.207001921224, 0.16665743264, 0.143431781857, 0.127833337163,
+ 0.116426221213, 0.107615251671, 0.100544127361, 0.0947062952128,
+ 0.0897803118848;
+
+ CALL_SUBTEST(res = i0e(x);
+ verify_component_wise(res, expected););
+ }
+
+ // Test Bessel function i1e. Reference results obtained with SciPy.
+ {
+ ArrayType x(21);
+ ArrayType expected(21);
+ ArrayType res(21);
+
+ x << -20.0, -18.0, -16.0, -14.0, -12.0, -10.0, -8.0, -6.0, -4.0, -2.0, 0.0,
+ 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0;
+
+ expected << -0.0875062221833, -0.092036796872, -0.0973496147565,
+ -0.103697667463, -0.11146429929, -0.121262681384, -0.134142493293,
+ -0.152051459309, -0.178750839502, -0.215269289249, 0.0, 0.215269289249,
+ 0.178750839502, 0.152051459309, 0.134142493293, 0.121262681384,
+ 0.11146429929, 0.103697667463, 0.0973496147565, 0.092036796872,
+ 0.0875062221833;
+
+ CALL_SUBTEST(res = i1e(x);
+ verify_component_wise(res, expected););
+ }
+
+ /* Code to generate the data for the following two test cases.
+ N = 5
+ np.random.seed(3)
+
+ a = np.logspace(-2, 3, 6)
+ a = np.ravel(np.tile(np.reshape(a, [-1, 1]), [1, N]))
+ x = np.random.gamma(a, 1.0)
+ x = np.maximum(x, np.finfo(np.float32).tiny)
+
+ def igamma(a, x):
+ return mpmath.gammainc(a, 0, x, regularized=True)
+
+ def igamma_der_a(a, x):
+ res = mpmath.diff(lambda a_prime: igamma(a_prime, x), a)
+ return np.float64(res)
+
+ def gamma_sample_der_alpha(a, x):
+ igamma_x = igamma(a, x)
+ def igammainv_of_igamma(a_prime):
+ return mpmath.findroot(lambda x_prime: igamma(a_prime, x_prime) -
+ igamma_x, x, solver='newton')
+ return np.float64(mpmath.diff(igammainv_of_igamma, a))
+
+ v_igamma_der_a = np.vectorize(igamma_der_a)(a, x)
+ v_gamma_sample_der_alpha = np.vectorize(gamma_sample_der_alpha)(a, x)
+ */
+
+#if EIGEN_HAS_C99_MATH
+ // Test igamma_der_a
+ {
+ ArrayType a(30);
+ ArrayType x(30);
+ ArrayType res(30);
+ ArrayType v(30);
+
+ a << 0.01, 0.01, 0.01, 0.01, 0.01, 0.1, 0.1, 0.1, 0.1, 0.1, 1.0, 1.0, 1.0,
+ 1.0, 1.0, 10.0, 10.0, 10.0, 10.0, 10.0, 100.0, 100.0, 100.0, 100.0,
+ 100.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0;
+
+ x << 1.25668890405e-26, 1.17549435082e-38, 1.20938905072e-05,
+ 1.17549435082e-38, 1.17549435082e-38, 5.66572070696e-16,
+ 0.0132865061065, 0.0200034203853, 6.29263709118e-17, 1.37160367764e-06,
+ 0.333412038288, 1.18135687766, 0.580629033777, 0.170631439426,
+ 0.786686768458, 7.63873279537, 13.1944344379, 11.896042354,
+ 10.5830172417, 10.5020942233, 92.8918587747, 95.003720371,
+ 86.3715926467, 96.0330217672, 82.6389930677, 968.702906754,
+ 969.463546828, 1001.79726022, 955.047416547, 1044.27458568;
+
+ v << -32.7256441441, -36.4394150514, -9.66467612263, -36.4394150514,
+ -36.4394150514, -1.0891900302, -2.66351229645, -2.48666868596,
+ -0.929700494428, -3.56327722764, -0.455320135314, -0.391437214323,
+ -0.491352055991, -0.350454834292, -0.471773162921, -0.104084440522,
+ -0.0723646747909, -0.0992828975532, -0.121638215446, -0.122619605294,
+ -0.0317670267286, -0.0359974812869, -0.0154359225363, -0.0375775365921,
+ -0.00794899153653, -0.00777303219211, -0.00796085782042,
+ -0.0125850719397, -0.00455500206958, -0.00476436993148;
+
+ CALL_SUBTEST(res = igamma_der_a(a, x); verify_component_wise(res, v););
+ }
+
+ // Test gamma_sample_der_alpha
+ {
+ ArrayType alpha(30);
+ ArrayType sample(30);
+ ArrayType res(30);
+ ArrayType v(30);
+
+ alpha << 0.01, 0.01, 0.01, 0.01, 0.01, 0.1, 0.1, 0.1, 0.1, 0.1, 1.0, 1.0,
+ 1.0, 1.0, 1.0, 10.0, 10.0, 10.0, 10.0, 10.0, 100.0, 100.0, 100.0, 100.0,
+ 100.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0;
+
+ sample << 1.25668890405e-26, 1.17549435082e-38, 1.20938905072e-05,
+ 1.17549435082e-38, 1.17549435082e-38, 5.66572070696e-16,
+ 0.0132865061065, 0.0200034203853, 6.29263709118e-17, 1.37160367764e-06,
+ 0.333412038288, 1.18135687766, 0.580629033777, 0.170631439426,
+ 0.786686768458, 7.63873279537, 13.1944344379, 11.896042354,
+ 10.5830172417, 10.5020942233, 92.8918587747, 95.003720371,
+ 86.3715926467, 96.0330217672, 82.6389930677, 968.702906754,
+ 969.463546828, 1001.79726022, 955.047416547, 1044.27458568;
+
+ v << 7.42424742367e-23, 1.02004297287e-34, 0.0130155240738,
+ 1.02004297287e-34, 1.02004297287e-34, 1.96505168277e-13, 0.525575786243,
+ 0.713903991771, 2.32077561808e-14, 0.000179348049886, 0.635500453302,
+ 1.27561284917, 0.878125852156, 0.41565819538, 1.03606488534,
+ 0.885964824887, 1.16424049334, 1.10764479598, 1.04590810812,
+ 1.04193666963, 0.965193152414, 0.976217589464, 0.93008035061,
+ 0.98153216096, 0.909196397698, 0.98434963993, 0.984738050206,
+ 1.00106492525, 0.97734200649, 1.02198794179;
+
+ CALL_SUBTEST(res = gamma_sample_der_alpha(alpha, sample);
+ verify_component_wise(res, v););
+ }
+#endif // EIGEN_HAS_C99_MATH
}
-void test_special_functions()
+EIGEN_DECLARE_TEST(special_functions)
{
CALL_SUBTEST_1(array_special_functions<ArrayXf>());
CALL_SUBTEST_2(array_special_functions<ArrayXd>());
diff --git a/unsupported/test/splines.cpp b/unsupported/test/splines.cpp
index 3be020434..88ec87b97 100644
--- a/unsupported/test/splines.cpp
+++ b/unsupported/test/splines.cpp
@@ -268,7 +268,7 @@ void check_global_interpolation_with_derivatives2d()
}
}
-void test_splines()
+EIGEN_DECLARE_TEST(splines)
{
for (int i = 0; i < g_repeat; ++i)
{