aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--CMakeLists.txt29
-rw-r--r--Eigen/Core17
-rw-r--r--Eigen/Geometry5
-rw-r--r--Eigen/OrderingMethods3
-rw-r--r--Eigen/Sparse2
-rw-r--r--Eigen/SparseCholesky8
-rw-r--r--Eigen/src/Cholesky/LDLT.h59
-rw-r--r--Eigen/src/Cholesky/LLT.h47
-rw-r--r--Eigen/src/Core/Array.h102
-rw-r--r--Eigen/src/Core/AssignEvaluator.h3
-rwxr-xr-xEigen/src/Core/Assign_MKL.h20
-rw-r--r--Eigen/src/Core/Block.h60
-rw-r--r--Eigen/src/Core/ConditionEstimator.h2
-rw-r--r--Eigen/src/Core/CoreEvaluators.h91
-rw-r--r--Eigen/src/Core/CwiseBinaryOp.h24
-rw-r--r--Eigen/src/Core/CwiseNullaryOp.h10
-rw-r--r--Eigen/src/Core/CwiseUnaryView.h2
-rw-r--r--Eigen/src/Core/DenseBase.h16
-rw-r--r--Eigen/src/Core/DiagonalMatrix.h24
-rw-r--r--Eigen/src/Core/GeneralProduct.h11
-rw-r--r--Eigen/src/Core/GenericPacketMath.h138
-rw-r--r--Eigen/src/Core/GlobalFunctions.h5
-rw-r--r--Eigen/src/Core/IO.h22
-rw-r--r--Eigen/src/Core/IndexedView.h2
-rw-r--r--Eigen/src/Core/Matrix.h135
-rw-r--r--Eigen/src/Core/MatrixBase.h5
-rw-r--r--Eigen/src/Core/NestByValue.h69
-rw-r--r--Eigen/src/Core/PlainObjectBase.h83
-rw-r--r--Eigen/src/Core/Product.h15
-rw-r--r--Eigen/src/Core/ProductEvaluators.h73
-rw-r--r--Eigen/src/Core/Redux.h10
-rw-r--r--Eigen/src/Core/Ref.h5
-rw-r--r--Eigen/src/Core/Reshaped.h2
-rw-r--r--Eigen/src/Core/Reverse.h10
-rw-r--r--Eigen/src/Core/SelfAdjointView.h13
-rw-r--r--Eigen/src/Core/Solve.h2
-rw-r--r--Eigen/src/Core/SolverBase.h41
-rw-r--r--Eigen/src/Core/Swap.h9
-rw-r--r--Eigen/src/Core/Transpose.h43
-rw-r--r--Eigen/src/Core/TriangularMatrix.h13
-rw-r--r--Eigen/src/Core/VectorBlock.h8
-rw-r--r--Eigen/src/Core/VectorwiseOp.h46
-rw-r--r--Eigen/src/Core/Visitor.h35
-rw-r--r--Eigen/src/Core/arch/AVX/Complex.h28
-rw-r--r--Eigen/src/Core/arch/AVX/MathFunctions.h316
-rw-r--r--Eigen/src/Core/arch/AVX/PacketMath.h221
-rw-r--r--Eigen/src/Core/arch/AVX/TypeCasting.h10
-rw-r--r--Eigen/src/Core/arch/AVX512/Complex.h488
-rw-r--r--Eigen/src/Core/arch/AVX512/MathFunctions.h26
-rw-r--r--Eigen/src/Core/arch/AVX512/PacketMath.h309
-rw-r--r--Eigen/src/Core/arch/AltiVec/Complex.h16
-rw-r--r--Eigen/src/Core/arch/AltiVec/MathFunctions.h267
-rwxr-xr-xEigen/src/Core/arch/AltiVec/PacketMath.h167
-rw-r--r--Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h471
-rw-r--r--Eigen/src/Core/arch/Default/Settings.h2
-rw-r--r--Eigen/src/Core/arch/GPU/PacketMath.h124
-rw-r--r--Eigen/src/Core/arch/GPU/PacketMathHalf.h217
-rw-r--r--Eigen/src/Core/arch/MSA/Complex.h4
-rw-r--r--Eigen/src/Core/arch/MSA/MathFunctions.h4
-rw-r--r--Eigen/src/Core/arch/MSA/PacketMath.h6
-rw-r--r--Eigen/src/Core/arch/NEON/Complex.h32
-rw-r--r--Eigen/src/Core/arch/NEON/MathFunctions.h170
-rw-r--r--Eigen/src/Core/arch/NEON/PacketMath.h69
-rw-r--r--Eigen/src/Core/arch/NEON/TypeCasting.h8
-rw-r--r--Eigen/src/Core/arch/SSE/Complex.h25
-rw-r--r--Eigen/src/Core/arch/SSE/MathFunctions.h417
-rwxr-xr-xEigen/src/Core/arch/SSE/PacketMath.h181
-rw-r--r--Eigen/src/Core/arch/SSE/TypeCasting.h7
-rw-r--r--Eigen/src/Core/arch/SYCL/InteropHeaders.h2
-rw-r--r--Eigen/src/Core/arch/ZVector/Complex.h4
-rwxr-xr-xEigen/src/Core/arch/ZVector/PacketMath.h6
-rw-r--r--Eigen/src/Core/functors/AssignmentFunctors.h11
-rw-r--r--Eigen/src/Core/functors/NullaryFunctors.h39
-rw-r--r--Eigen/src/Core/functors/UnaryFunctors.h76
-rw-r--r--Eigen/src/Core/products/GeneralBlockPanelKernel.h1371
-rw-r--r--Eigen/src/Core/products/GeneralMatrixMatrix.h14
-rw-r--r--Eigen/src/Core/products/Parallelizer.h3
-rw-r--r--Eigen/src/Core/products/SelfadjointMatrixMatrix.h23
-rwxr-xr-xEigen/src/Core/util/BlasUtil.h12
-rw-r--r--Eigen/src/Core/util/ConfigureVectorization.h39
-rw-r--r--Eigen/src/Core/util/ForwardDeclarations.h7
-rw-r--r--Eigen/src/Core/util/IndexedViewHelper.h4
-rw-r--r--Eigen/src/Core/util/Macros.h98
-rw-r--r--Eigen/src/Core/util/Memory.h15
-rwxr-xr-xEigen/src/Core/util/Meta.h33
-rw-r--r--Eigen/src/Core/util/StaticAssert.h3
-rw-r--r--Eigen/src/Core/util/XprHelper.h3
-rw-r--r--Eigen/src/Eigenvalues/ComplexSchur.h9
-rw-r--r--Eigen/src/Eigenvalues/EigenSolver.h2
-rw-r--r--Eigen/src/Eigenvalues/RealQZ.h13
-rw-r--r--Eigen/src/Eigenvalues/RealSchur.h15
-rw-r--r--Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h9
-rw-r--r--Eigen/src/Geometry/OrthoMethods.h5
-rw-r--r--Eigen/src/Geometry/Transform.h42
-rw-r--r--Eigen/src/Geometry/arch/Geometry_SSE.h39
-rw-r--r--Eigen/src/Householder/HouseholderSequence.h18
-rw-r--r--Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h12
-rw-r--r--Eigen/src/IterativeLinearSolvers/IncompleteLUT.h9
-rw-r--r--Eigen/src/LU/Determinant.h43
-rw-r--r--Eigen/src/LU/FullPivLU.h58
-rw-r--r--Eigen/src/LU/PartialPivLU.h112
-rw-r--r--Eigen/src/LU/arch/Inverse_SSE.h4
-rw-r--r--Eigen/src/OrderingMethods/Amd.h24
-rw-r--r--Eigen/src/OrderingMethods/Ordering.h4
-rw-r--r--Eigen/src/PardisoSupport/PardisoSupport.h1
-rw-r--r--Eigen/src/QR/ColPivHouseholderQR.h52
-rw-r--r--Eigen/src/QR/CompleteOrthogonalDecomposition.h106
-rw-r--r--Eigen/src/QR/FullPivHouseholderQR.h72
-rw-r--r--Eigen/src/QR/HouseholderQR.h54
-rw-r--r--Eigen/src/SPQRSupport/SuiteSparseQRSupport.h26
-rw-r--r--Eigen/src/SVD/BDCSVD.h5
-rw-r--r--Eigen/src/SVD/JacobiSVD.h1
-rw-r--r--Eigen/src/SVD/SVDBase.h72
-rw-r--r--Eigen/src/SparseCholesky/SimplicialCholesky.h12
-rw-r--r--Eigen/src/SparseCholesky/SimplicialCholesky_impl.h39
-rw-r--r--Eigen/src/SparseCore/CompressedStorage.h16
-rw-r--r--Eigen/src/SparseCore/SparseAssign.h33
-rw-r--r--Eigen/src/SparseCore/SparseCompressedBase.h35
-rw-r--r--Eigen/src/SparseCore/SparseMatrix.h111
-rw-r--r--Eigen/src/SparseCore/SparseUtil.h8
-rw-r--r--Eigen/src/SparseQR/SparseQR.h22
-rw-r--r--Eigen/src/plugins/ArrayCwiseUnaryOps.h42
-rw-r--r--Eigen/src/plugins/BlockMethods.h337
-rw-r--r--Eigen/src/plugins/CommonCwiseUnaryOps.h14
-rw-r--r--bench/BenchTimer.h4
-rw-r--r--bench/bench_gemm.cpp83
-rw-r--r--bench/perf_monitoring/changesets.txt67
-rwxr-xr-xbench/perf_monitoring/make_plot.sh18
-rw-r--r--bench/perf_monitoring/resources/chart_footer.html14
-rwxr-xr-xbench/perf_monitoring/run.sh5
-rw-r--r--blas/common.h6
-rw-r--r--blas/double.cpp2
-rw-r--r--blas/level1_cplx_impl.h8
-rw-r--r--blas/level1_impl.h4
-rw-r--r--blas/single.cpp2
-rw-r--r--cmake/EigenTesting.cmake45
-rw-r--r--doc/A05_PortingFrom2To3.dox299
-rw-r--r--doc/AsciiQuickReference.txt11
-rw-r--r--doc/CMakeLists.txt2
-rw-r--r--doc/CoeffwiseMathFunctionsTable.dox36
-rw-r--r--doc/Doxyfile.in5
-rw-r--r--doc/InsideEigenExample.dox5
-rw-r--r--doc/Manual.dox3
-rw-r--r--doc/Overview.dox2
-rw-r--r--doc/Pitfalls.dox84
-rw-r--r--doc/PreprocessorDirectives.dox2
-rw-r--r--doc/QuickReference.dox11
-rw-r--r--doc/StlContainers.dox41
-rw-r--r--doc/StructHavingEigenMembers.dox81
-rw-r--r--doc/TopicLazyEvaluation.dox76
-rw-r--r--doc/TutorialMatrixClass.dox30
-rw-r--r--doc/TutorialSTL.dox66
-rw-r--r--doc/TutorialSlicingIndexing.dox2
-rw-r--r--doc/UnalignedArrayAssert.dox40
-rw-r--r--doc/eigendoxy.css7
-rw-r--r--doc/eigendoxy_footer.html.in26
-rw-r--r--doc/snippets/Array_initializer_list_23_cxx11.cpp5
-rw-r--r--doc/snippets/Array_initializer_list_vector_cxx11.cpp2
-rw-r--r--doc/snippets/Array_variadic_ctor_cxx11.cpp3
-rw-r--r--doc/snippets/Matrix_initializer_list_23_cxx11.cpp5
-rw-r--r--doc/snippets/Matrix_initializer_list_vector_cxx11.cpp2
-rw-r--r--doc/snippets/Matrix_variadic_ctor_cxx11.cpp3
-rw-r--r--doc/snippets/Tutorial_std_sort_rows_cxx11.cpp (renamed from doc/snippets/Tutorial_std_sort_rows.cpp)0
-rw-r--r--failtest/CMakeLists.txt13
-rw-r--r--failtest/initializer_list_1.cpp14
-rw-r--r--failtest/initializer_list_2.cpp16
-rw-r--r--lapack/CMakeLists.txt1
-rw-r--r--test/CMakeLists.txt13
-rw-r--r--test/adjoint.cpp18
-rw-r--r--test/array_cwise.cpp88
-rw-r--r--test/array_reverse.cpp55
-rw-r--r--test/bdcsvd.cpp2
-rw-r--r--test/bicgstab.cpp8
-rw-r--r--test/block.cpp21
-rw-r--r--test/boostmultiprec.cpp1
-rw-r--r--test/cholesky.cpp65
-rw-r--r--test/conjugate_gradient.cpp4
-rw-r--r--test/constructor.cpp14
-rw-r--r--test/ctorleak.cpp20
-rw-r--r--test/dense_storage.cpp33
-rw-r--r--test/diagonal_matrix_variadic_ctor.cpp185
-rw-r--r--test/dynalloc.cpp2
-rw-r--r--test/eigensolver_generic.cpp96
-rwxr-xr-xtest/geo_transformations.cpp65
-rw-r--r--test/incomplete_cholesky.cpp14
-rw-r--r--test/indexed_view.cpp10
-rw-r--r--test/initializer_list_construction.cpp385
-rw-r--r--test/inverse.cpp17
-rw-r--r--test/jacobisvd.cpp19
-rw-r--r--test/lu.cpp85
-rw-r--r--test/main.h71
-rw-r--r--test/nestbyvalue.cpp37
-rw-r--r--test/nullary.cpp63
-rw-r--r--test/numext.cpp5
-rw-r--r--test/packetmath.cpp328
-rw-r--r--test/product_notemporary.cpp42
-rw-r--r--test/product_trsolve.cpp13
-rw-r--r--test/qr.cpp16
-rw-r--r--test/qr_colpivoting.cpp64
-rw-r--r--test/qr_fullpivoting.cpp16
-rw-r--r--test/ref.cpp12
-rw-r--r--test/reshape.cpp48
-rw-r--r--test/simplicial_cholesky.cpp8
-rw-r--r--test/solverbase.h36
-rw-r--r--test/sparse.h8
-rw-r--r--test/sparse_basic.cpp27
-rw-r--r--test/sparse_solvers.cpp13
-rw-r--r--test/sparseqr.cpp31
-rw-r--r--test/svd_common.h27
-rw-r--r--test/symbolic_index.cpp38
-rw-r--r--test/triangular.cpp16
-rw-r--r--test/type_alias.cpp48
-rw-r--r--test/umeyama.cpp2
-rw-r--r--test/vectorization_logic.cpp21
-rw-r--r--test/vectorwiseop.cpp35
-rw-r--r--test/zerosized.cpp9
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorBase.h23
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h2
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h4
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorContractionGpu.h9
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h31
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h447
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorConversion.h127
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h10
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h1
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h25
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorGenerator.h98
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h36
-rw-r--r--unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h187
-rw-r--r--unsupported/Eigen/CXX11/src/ThreadPool/NonBlockingThreadPool.h11
-rw-r--r--unsupported/Eigen/CXX11/src/ThreadPool/RunQueue.h66
-rw-r--r--unsupported/Eigen/CXX11/src/ThreadPool/ThreadLocal.h10
-rw-r--r--unsupported/Eigen/CXX11/src/util/CXX11Workarounds.h6
-rw-r--r--unsupported/Eigen/CXX11/src/util/EmulateArray.h20
-rw-r--r--unsupported/Eigen/CXX11/src/util/EmulateCXX11Meta.h16
-rw-r--r--unsupported/Eigen/src/Eigenvalues/ArpackSelfAdjointEigenSolver.h21
-rw-r--r--unsupported/Eigen/src/EulerAngles/EulerSystem.h42
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h4
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h16
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixPower.h13
-rw-r--r--unsupported/Eigen/src/Polynomials/Companion.h3
-rw-r--r--unsupported/Eigen/src/Polynomials/PolynomialSolver.h28
-rw-r--r--unsupported/Eigen/src/Skyline/SkylineInplaceLU.h2
-rw-r--r--unsupported/Eigen/src/Skyline/SkylineMatrix.h2
-rw-r--r--unsupported/Eigen/src/Skyline/SkylineMatrixBase.h2
-rw-r--r--unsupported/Eigen/src/Skyline/SkylineStorage.h2
-rw-r--r--unsupported/Eigen/src/SparseExtra/RandomSetter.h6
-rw-r--r--unsupported/Eigen/src/Splines/Spline.h2
-rw-r--r--unsupported/test/CMakeLists.txt2
-rw-r--r--unsupported/test/EulerAngles.cpp8
-rw-r--r--unsupported/test/cxx11_eventcount.cpp10
-rw-r--r--unsupported/test/cxx11_tensor_executor.cpp87
-rw-r--r--unsupported/test/cxx11_tensor_fft.cpp8
-rw-r--r--unsupported/test/cxx11_tensor_forced_eval.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_generator.cpp6
-rw-r--r--unsupported/test/cxx11_tensor_gpu.cu8
-rw-r--r--unsupported/test/matrix_power.cpp40
-rw-r--r--unsupported/test/polynomialsolver.cpp21
258 files changed, 9026 insertions, 3986 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 45cbc75ee..76e083314 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -250,7 +250,7 @@ if(NOT MSVC)
option(EIGEN_TEST_AVX512 "Enable/Disable AVX512 in tests/examples" OFF)
if(EIGEN_TEST_AVX512)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512f -DEIGEN_ENABLE_AVX512")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512f -mfma -DEIGEN_ENABLE_AVX512")
if (NOT "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fabi-version=6")
endif()
@@ -350,6 +350,19 @@ else(NOT MSVC)
endif(NOT CMAKE_CL_64)
message(STATUS "Enabling SSE2 in tests/examples")
endif(EIGEN_TEST_SSE2)
+
+ option(EIGEN_TEST_AVX "Enable/Disable AVX in tests/examples" OFF)
+ if(EIGEN_TEST_AVX)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:AVX")
+ message(STATUS "Enabling AVX in tests/examples")
+ endif()
+
+ option(EIGEN_TEST_FMA "Enable/Disable FMA/AVX2 in tests/examples" OFF)
+ if(EIGEN_TEST_FMA AND NOT EIGEN_TEST_NEON)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:AVX2")
+ message(STATUS "Enabling FMA/AVX2 in tests/examples")
+ endif()
+
endif(NOT MSVC)
option(EIGEN_TEST_NO_EXPLICIT_VECTORIZATION "Disable explicit vectorization in tests/examples" OFF)
@@ -456,6 +469,8 @@ if(BUILD_TESTING)
else()
add_subdirectory(test EXCLUDE_FROM_ALL)
endif()
+
+ add_subdirectory(failtest)
endif()
if(EIGEN_LEAVE_TEST_IN_ALL_TARGET)
@@ -506,11 +521,6 @@ message(STATUS "")
message(STATUS "Configured Eigen ${EIGEN_VERSION_NUMBER}")
message(STATUS "")
-option(EIGEN_FAILTEST "Enable failtests." OFF)
-if(EIGEN_FAILTEST)
- add_subdirectory(failtest)
-endif()
-
string(TOLOWER "${CMAKE_GENERATOR}" cmake_generator_tolower)
if(cmake_generator_tolower MATCHES "makefile")
message(STATUS "Some things you can do now:")
@@ -527,8 +537,10 @@ if(cmake_generator_tolower MATCHES "makefile")
message(STATUS " | Or:")
message(STATUS " | cmake . -DINCLUDE_INSTALL_DIR=yourdir")
message(STATUS "make doc | Generate the API documentation, requires Doxygen & LaTeX")
- message(STATUS "make check | Build and run the unit-tests. Read this page:")
- message(STATUS " | http://eigen.tuxfamily.org/index.php?title=Tests")
+ if(BUILD_TESTING)
+ message(STATUS "make check | Build and run the unit-tests. Read this page:")
+ message(STATUS " | http://eigen.tuxfamily.org/index.php?title=Tests")
+ endif()
message(STATUS "make blas | Build BLAS library (not the same thing as Eigen)")
message(STATUS "make uninstall| Removes files installed by make install")
message(STATUS "--------------+--------------------------------------------------------------")
@@ -555,7 +567,6 @@ if (NOT CMAKE_VERSION VERSION_LESS 3.0)
# Imported target support
add_library (eigen INTERFACE)
add_library (Eigen3::Eigen ALIAS eigen)
-
target_compile_definitions (eigen INTERFACE ${EIGEN_DEFINITIONS})
target_include_directories (eigen INTERFACE
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>
diff --git a/Eigen/Core b/Eigen/Core
index 0c09f7c79..759b1bb80 100644
--- a/Eigen/Core
+++ b/Eigen/Core
@@ -153,35 +153,40 @@ using std::ptrdiff_t;
#if defined EIGEN_VECTORIZE_AVX512
#include "src/Core/arch/SSE/PacketMath.h"
+ #include "src/Core/arch/SSE/TypeCasting.h"
+ #include "src/Core/arch/SSE/Complex.h"
#include "src/Core/arch/AVX/PacketMath.h"
+ #include "src/Core/arch/AVX/TypeCasting.h"
+ #include "src/Core/arch/AVX/Complex.h"
#include "src/Core/arch/AVX512/PacketMath.h"
+ #include "src/Core/arch/AVX512/Complex.h"
#include "src/Core/arch/SSE/MathFunctions.h"
#include "src/Core/arch/AVX/MathFunctions.h"
#include "src/Core/arch/AVX512/MathFunctions.h"
#elif defined EIGEN_VECTORIZE_AVX
// Use AVX for floats and doubles, SSE for integers
#include "src/Core/arch/SSE/PacketMath.h"
+ #include "src/Core/arch/SSE/TypeCasting.h"
#include "src/Core/arch/SSE/Complex.h"
- #include "src/Core/arch/SSE/MathFunctions.h"
#include "src/Core/arch/AVX/PacketMath.h"
- #include "src/Core/arch/AVX/MathFunctions.h"
- #include "src/Core/arch/AVX/Complex.h"
#include "src/Core/arch/AVX/TypeCasting.h"
- #include "src/Core/arch/SSE/TypeCasting.h"
+ #include "src/Core/arch/AVX/Complex.h"
+ #include "src/Core/arch/SSE/MathFunctions.h"
+ #include "src/Core/arch/AVX/MathFunctions.h"
#elif defined EIGEN_VECTORIZE_SSE
#include "src/Core/arch/SSE/PacketMath.h"
+ #include "src/Core/arch/SSE/TypeCasting.h"
#include "src/Core/arch/SSE/MathFunctions.h"
#include "src/Core/arch/SSE/Complex.h"
- #include "src/Core/arch/SSE/TypeCasting.h"
#elif defined(EIGEN_VECTORIZE_ALTIVEC) || defined(EIGEN_VECTORIZE_VSX)
#include "src/Core/arch/AltiVec/PacketMath.h"
#include "src/Core/arch/AltiVec/MathFunctions.h"
#include "src/Core/arch/AltiVec/Complex.h"
#elif defined EIGEN_VECTORIZE_NEON
#include "src/Core/arch/NEON/PacketMath.h"
+ #include "src/Core/arch/NEON/TypeCasting.h"
#include "src/Core/arch/NEON/MathFunctions.h"
#include "src/Core/arch/NEON/Complex.h"
- #include "src/Core/arch/NEON/TypeCasting.h"
#elif defined EIGEN_VECTORIZE_ZVECTOR
#include "src/Core/arch/ZVector/PacketMath.h"
#include "src/Core/arch/ZVector/MathFunctions.h"
diff --git a/Eigen/Geometry b/Eigen/Geometry
index 04aa316cb..16b4bd6e1 100644
--- a/Eigen/Geometry
+++ b/Eigen/Geometry
@@ -49,9 +49,8 @@
#include "src/Geometry/AlignedBox.h"
#include "src/Geometry/Umeyama.h"
-// Use the SSE optimized version whenever possible. At the moment the
-// SSE version doesn't compile when AVX is enabled
-#if defined EIGEN_VECTORIZE_SSE && !defined EIGEN_VECTORIZE_AVX
+// Use the SSE optimized version whenever possible.
+#if defined EIGEN_VECTORIZE_SSE
#include "src/Geometry/arch/Geometry_SSE.h"
#endif
diff --git a/Eigen/OrderingMethods b/Eigen/OrderingMethods
index d8ea36193..29691a62b 100644
--- a/Eigen/OrderingMethods
+++ b/Eigen/OrderingMethods
@@ -63,10 +63,7 @@
* \endcode
*/
-#ifndef EIGEN_MPL2_ONLY
#include "src/OrderingMethods/Amd.h"
-#endif
-
#include "src/OrderingMethods/Ordering.h"
#include "src/Core/util/ReenableStupidWarnings.h"
diff --git a/Eigen/Sparse b/Eigen/Sparse
index 136e681a1..a2ef7a665 100644
--- a/Eigen/Sparse
+++ b/Eigen/Sparse
@@ -25,9 +25,7 @@
#include "SparseCore"
#include "OrderingMethods"
-#ifndef EIGEN_MPL2_ONLY
#include "SparseCholesky"
-#endif
#include "SparseLU"
#include "SparseQR"
#include "IterativeLinearSolvers"
diff --git a/Eigen/SparseCholesky b/Eigen/SparseCholesky
index b6a320c40..d2b1f1276 100644
--- a/Eigen/SparseCholesky
+++ b/Eigen/SparseCholesky
@@ -30,16 +30,8 @@
* \endcode
*/
-#ifdef EIGEN_MPL2_ONLY
-#error The SparseCholesky module has nothing to offer in MPL2 only mode
-#endif
-
#include "src/SparseCholesky/SimplicialCholesky.h"
-
-#ifndef EIGEN_MPL2_ONLY
#include "src/SparseCholesky/SimplicialCholesky_impl.h"
-#endif
-
#include "src/Core/util/ReenableStupidWarnings.h"
#endif // EIGEN_SPARSECHOLESKY_MODULE_H
diff --git a/Eigen/src/Cholesky/LDLT.h b/Eigen/src/Cholesky/LDLT.h
index 2dfeac333..67e97ffb8 100644
--- a/Eigen/src/Cholesky/LDLT.h
+++ b/Eigen/src/Cholesky/LDLT.h
@@ -16,6 +16,15 @@
namespace Eigen {
namespace internal {
+ template<typename _MatrixType, int _UpLo> struct traits<LDLT<_MatrixType, _UpLo> >
+ : traits<_MatrixType>
+ {
+ typedef MatrixXpr XprKind;
+ typedef SolverStorage StorageKind;
+ typedef int StorageIndex;
+ enum { Flags = 0 };
+ };
+
template<typename MatrixType, int UpLo> struct LDLT_Traits;
// PositiveSemiDef means positive semi-definite and non-zero; same for NegativeSemiDef
@@ -48,20 +57,19 @@ namespace internal {
* \sa MatrixBase::ldlt(), SelfAdjointView::ldlt(), class LLT
*/
template<typename _MatrixType, int _UpLo> class LDLT
+ : public SolverBase<LDLT<_MatrixType, _UpLo> >
{
public:
typedef _MatrixType MatrixType;
+ typedef SolverBase<LDLT> Base;
+ friend class SolverBase<LDLT>;
+
+ EIGEN_GENERIC_PUBLIC_INTERFACE(LDLT)
enum {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
UpLo = _UpLo
};
- typedef typename MatrixType::Scalar Scalar;
- typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
- typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
- typedef typename MatrixType::StorageIndex StorageIndex;
typedef Matrix<Scalar, RowsAtCompileTime, 1, 0, MaxRowsAtCompileTime, 1> TmpMatrixType;
typedef Transpositions<RowsAtCompileTime, MaxRowsAtCompileTime> TranspositionType;
@@ -180,6 +188,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
return m_sign == internal::NegativeSemiDef || m_sign == internal::ZeroSign;
}
+ #ifdef EIGEN_PARSED_BY_DOXYGEN
/** \returns a solution x of \f$ A x = b \f$ using the current decomposition of A.
*
* This function also supports in-place solves using the syntax <tt>x = decompositionObject.solve(x)</tt> .
@@ -197,13 +206,8 @@ template<typename _MatrixType, int _UpLo> class LDLT
*/
template<typename Rhs>
inline const Solve<LDLT, Rhs>
- solve(const MatrixBase<Rhs>& b) const
- {
- eigen_assert(m_isInitialized && "LDLT is not initialized.");
- eigen_assert(m_matrix.rows()==b.rows()
- && "LDLT::solve(): invalid number of rows of the right hand side matrix b");
- return Solve<LDLT, Rhs>(*this, b.derived());
- }
+ solve(const MatrixBase<Rhs>& b) const;
+ #endif
template<typename Derived>
bool solveInPlace(MatrixBase<Derived> &bAndX) const;
@@ -259,6 +263,9 @@ template<typename _MatrixType, int _UpLo> class LDLT
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename RhsType, typename DstType>
void _solve_impl(const RhsType &rhs, DstType &dst) const;
+
+ template<bool Conjugate, typename RhsType, typename DstType>
+ void _solve_impl_transposed(const RhsType &rhs, DstType &dst) const;
#endif
protected:
@@ -304,7 +311,8 @@ template<> struct ldlt_inplace<Lower>
if (size <= 1)
{
transpositions.setIdentity();
- if (numext::real(mat.coeff(0,0)) > static_cast<RealScalar>(0) ) sign = PositiveSemiDef;
+ if(size==0) sign = ZeroSign;
+ else if (numext::real(mat.coeff(0,0)) > static_cast<RealScalar>(0) ) sign = PositiveSemiDef;
else if (numext::real(mat.coeff(0,0)) < static_cast<RealScalar>(0)) sign = NegativeSemiDef;
else sign = ZeroSign;
return true;
@@ -558,14 +566,22 @@ template<typename _MatrixType, int _UpLo>
template<typename RhsType, typename DstType>
void LDLT<_MatrixType,_UpLo>::_solve_impl(const RhsType &rhs, DstType &dst) const
{
- eigen_assert(rhs.rows() == rows());
+ _solve_impl_transposed<true>(rhs, dst);
+}
+
+template<typename _MatrixType,int _UpLo>
+template<bool Conjugate, typename RhsType, typename DstType>
+void LDLT<_MatrixType,_UpLo>::_solve_impl_transposed(const RhsType &rhs, DstType &dst) const
+{
// dst = P b
dst = m_transpositions * rhs;
// dst = L^-1 (P b)
- matrixL().solveInPlace(dst);
+ // dst = L^-*T (P b)
+ matrixL().template conjugateIf<!Conjugate>().solveInPlace(dst);
- // dst = D^-1 (L^-1 P b)
+ // dst = D^-* (L^-1 P b)
+ // dst = D^-1 (L^-*T P b)
// more precisely, use pseudo-inverse of D (see bug 241)
using std::abs;
const typename Diagonal<const MatrixType>::RealReturnType vecD(vectorD());
@@ -577,7 +593,6 @@ void LDLT<_MatrixType,_UpLo>::_solve_impl(const RhsType &rhs, DstType &dst) cons
// Moreover, Lapack's xSYTRS routines use 0 for the tolerance.
// Using numeric_limits::min() gives us more robustness to denormals.
RealScalar tolerance = (std::numeric_limits<RealScalar>::min)();
-
for (Index i = 0; i < vecD.size(); ++i)
{
if(abs(vecD(i)) > tolerance)
@@ -586,10 +601,12 @@ void LDLT<_MatrixType,_UpLo>::_solve_impl(const RhsType &rhs, DstType &dst) cons
dst.row(i).setZero();
}
- // dst = L^-T (D^-1 L^-1 P b)
- matrixU().solveInPlace(dst);
+ // dst = L^-* (D^-* L^-1 P b)
+ // dst = L^-T (D^-1 L^-*T P b)
+ matrixL().transpose().template conjugateIf<Conjugate>().solveInPlace(dst);
- // dst = P^-1 (L^-T D^-1 L^-1 P b) = A^-1 b
+ // dst = P^T (L^-* D^-* L^-1 P b) = A^-1 b
+ // dst = P^-T (L^-T D^-1 L^-*T P b) = A^-1 b
dst = m_transpositions.transpose() * dst;
}
#endif
diff --git a/Eigen/src/Cholesky/LLT.h b/Eigen/src/Cholesky/LLT.h
index 868766365..5876966e6 100644
--- a/Eigen/src/Cholesky/LLT.h
+++ b/Eigen/src/Cholesky/LLT.h
@@ -13,6 +13,16 @@
namespace Eigen {
namespace internal{
+
+template<typename _MatrixType, int _UpLo> struct traits<LLT<_MatrixType, _UpLo> >
+ : traits<_MatrixType>
+{
+ typedef MatrixXpr XprKind;
+ typedef SolverStorage StorageKind;
+ typedef int StorageIndex;
+ enum { Flags = 0 };
+};
+
template<typename MatrixType, int UpLo> struct LLT_Traits;
}
@@ -54,18 +64,17 @@ template<typename MatrixType, int UpLo> struct LLT_Traits;
* \sa MatrixBase::llt(), SelfAdjointView::llt(), class LDLT
*/
template<typename _MatrixType, int _UpLo> class LLT
+ : public SolverBase<LLT<_MatrixType, _UpLo> >
{
public:
typedef _MatrixType MatrixType;
+ typedef SolverBase<LLT> Base;
+ friend class SolverBase<LLT>;
+
+ EIGEN_GENERIC_PUBLIC_INTERFACE(LLT)
enum {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
- typedef typename MatrixType::Scalar Scalar;
- typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
- typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
- typedef typename MatrixType::StorageIndex StorageIndex;
enum {
PacketSize = internal::packet_traits<Scalar>::size,
@@ -129,6 +138,7 @@ template<typename _MatrixType, int _UpLo> class LLT
return Traits::getL(m_matrix);
}
+ #ifdef EIGEN_PARSED_BY_DOXYGEN
/** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
*
* Since this LLT class assumes anyway that the matrix A is invertible, the solution
@@ -141,13 +151,8 @@ template<typename _MatrixType, int _UpLo> class LLT
*/
template<typename Rhs>
inline const Solve<LLT, Rhs>
- solve(const MatrixBase<Rhs>& b) const
- {
- eigen_assert(m_isInitialized && "LLT is not initialized.");
- eigen_assert(m_matrix.rows()==b.rows()
- && "LLT::solve(): invalid number of rows of the right hand side matrix b");
- return Solve<LLT, Rhs>(*this, b.derived());
- }
+ solve(const MatrixBase<Rhs>& b) const;
+ #endif
template<typename Derived>
void solveInPlace(const MatrixBase<Derived> &bAndX) const;
@@ -205,6 +210,9 @@ template<typename _MatrixType, int _UpLo> class LLT
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename RhsType, typename DstType>
void _solve_impl(const RhsType &rhs, DstType &dst) const;
+
+ template<bool Conjugate, typename RhsType, typename DstType>
+ void _solve_impl_transposed(const RhsType &rhs, DstType &dst) const;
#endif
protected:
@@ -476,8 +484,17 @@ template<typename _MatrixType,int _UpLo>
template<typename RhsType, typename DstType>
void LLT<_MatrixType,_UpLo>::_solve_impl(const RhsType &rhs, DstType &dst) const
{
- dst = rhs;
- solveInPlace(dst);
+ _solve_impl_transposed<true>(rhs, dst);
+}
+
+template<typename _MatrixType,int _UpLo>
+template<bool Conjugate, typename RhsType, typename DstType>
+void LLT<_MatrixType,_UpLo>::_solve_impl_transposed(const RhsType &rhs, DstType &dst) const
+{
+ dst = rhs;
+
+ matrixL().template conjugateIf<!Conjugate>().solveInPlace(dst);
+ matrixU().template conjugateIf<!Conjugate>().solveInPlace(dst);
}
#endif
diff --git a/Eigen/src/Core/Array.h b/Eigen/src/Core/Array.h
index e10020d4f..ee12d96fc 100644
--- a/Eigen/src/Core/Array.h
+++ b/Eigen/src/Core/Array.h
@@ -153,8 +153,6 @@ class Array
: Base(std::move(other))
{
Base::_check_template_params();
- if (RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic)
- Base::_set_noalias(other);
}
EIGEN_DEVICE_FUNC
Array& operator=(Array&& other) EIGEN_NOEXCEPT_IF(std::is_nothrow_move_assignable<Scalar>::value)
@@ -180,6 +178,46 @@ class Array
Base::_check_template_params();
this->template _init2<T0,T1>(val0, val1);
}
+
+ #if EIGEN_HAS_CXX11
+ /** \copydoc PlainObjectBase(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args)
+ *
+ * Example: \include Array_variadic_ctor_cxx11.cpp
+ * Output: \verbinclude Array_variadic_ctor_cxx11.out
+ *
+ * \sa Array(const std::initializer_list<std::initializer_list<Scalar>>&)
+ * \sa Array(Scalar), Array(Scalar,Scalar)
+ */
+ template <typename... ArgTypes>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Array(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args)
+ : Base(a0, a1, a2, a3, args...) {}
+
+ /** \brief Constructs an array and initializes it from the coefficients given as initializer-lists grouped by row. \cpp11
+ *
+ * In the general case, the constructor takes a list of rows, each row being represented as a list of coefficients:
+ *
+ * Example: \include Array_initializer_list_23_cxx11.cpp
+ * Output: \verbinclude Array_initializer_list_23_cxx11.out
+ *
+ * Each of the inner initializer lists must contain the exact same number of elements, otherwise an assertion is triggered.
+ *
+ * In the case of a compile-time column 1D array, implicit transposition from a single row is allowed.
+ * Therefore <code> Array<int,Dynamic,1>{{1,2,3,4,5}}</code> is legal and the more verbose syntax
+ * <code>Array<int,Dynamic,1>{{1},{2},{3},{4},{5}}</code> can be avoided:
+ *
+ * Example: \include Array_initializer_list_vector_cxx11.cpp
+ * Output: \verbinclude Array_initializer_list_vector_cxx11.out
+ *
+ * In the case of fixed-sized arrays, the initializer list sizes must exactly match the array sizes,
+ * and implicit transposition is allowed for compile-time 1D arrays only.
+ *
+ * \sa Array(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args)
+ */
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Array(const std::initializer_list<std::initializer_list<Scalar> >& list) : Base(list) {}
+ #endif // end EIGEN_HAS_CXX11
+
#else
/** \brief Constructs a fixed-sized array initialized with coefficients starting at \a data */
EIGEN_DEVICE_FUNC explicit Array(const Scalar *data);
@@ -191,7 +229,8 @@ class Array
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE explicit Array(Index dim);
- /** constructs an initialized 1x1 Array with the given coefficient */
+ /** constructs an initialized 1x1 Array with the given coefficient
+ * \sa const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args */
Array(const Scalar& value);
/** constructs an uninitialized array with \a rows rows and \a cols columns.
*
@@ -199,11 +238,14 @@ class Array
* it is redundant to pass these parameters, so one should use the default constructor
* Array() instead. */
Array(Index rows, Index cols);
- /** constructs an initialized 2D vector with given coefficients */
+ /** constructs an initialized 2D vector with given coefficients
+ * \sa Array(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args) */
Array(const Scalar& val0, const Scalar& val1);
- #endif
+ #endif // end EIGEN_PARSED_BY_DOXYGEN
- /** constructs an initialized 3D vector with given coefficients */
+ /** constructs an initialized 3D vector with given coefficients
+ * \sa Array(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args)
+ */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Array(const Scalar& val0, const Scalar& val1, const Scalar& val2)
{
@@ -213,7 +255,9 @@ class Array
m_storage.data()[1] = val1;
m_storage.data()[2] = val2;
}
- /** constructs an initialized 4D vector with given coefficients */
+ /** constructs an initialized 4D vector with given coefficients
+ * \sa Array(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args)
+ */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Array(const Scalar& val0, const Scalar& val1, const Scalar& val2, const Scalar& val3)
{
@@ -260,7 +304,7 @@ class Array
/** \defgroup arraytypedefs Global array typedefs
* \ingroup Core_Module
*
- * Eigen defines several typedef shortcuts for most common 1D and 2D array types.
+ * %Eigen defines several typedef shortcuts for most common 1D and 2D array types.
*
* The general patterns are the following:
*
@@ -273,6 +317,12 @@ class Array
* There are also \c ArraySizeType which are self-explanatory. For example, \c Array4cf is
* a fixed-size 1D array of 4 complex floats.
*
+ * With \cpp11, template alias are also defined for common sizes.
+ * They follow the same pattern as above except that the scalar type suffix is replaced by a
+ * template parameter, i.e.:
+ * - `ArrayRowsCols<Type>` where `Rows` and `Cols` can be \c 2,\c 3,\c 4, or \c X for fixed or dynamic size.
+ * - `ArraySize<Type>` where `Size` can be \c 2,\c 3,\c 4 or \c X for fixed or dynamic size 1D arrays.
+ *
* \sa class Array
*/
@@ -305,9 +355,43 @@ EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(std::complex<double>, cd)
#undef EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES
#undef EIGEN_MAKE_ARRAY_TYPEDEFS
+#undef EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS
+
+#if EIGEN_HAS_CXX11
+
+#define EIGEN_MAKE_ARRAY_TYPEDEFS(Size, SizeSuffix) \
+/** \ingroup arraytypedefs */ \
+/** \brief \cpp11 */ \
+template <typename Type> \
+using Array##SizeSuffix##SizeSuffix = Array<Type, Size, Size>; \
+/** \ingroup arraytypedefs */ \
+/** \brief \cpp11 */ \
+template <typename Type> \
+using Array##SizeSuffix = Array<Type, Size, 1>;
+
+#define EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Size) \
+/** \ingroup arraytypedefs */ \
+/** \brief \cpp11 */ \
+template <typename Type> \
+using Array##Size##X = Array<Type, Size, Dynamic>; \
+/** \ingroup arraytypedefs */ \
+/** \brief \cpp11 */ \
+template <typename Type> \
+using Array##X##Size = Array<Type, Dynamic, Size>;
+
+EIGEN_MAKE_ARRAY_TYPEDEFS(2, 2)
+EIGEN_MAKE_ARRAY_TYPEDEFS(3, 3)
+EIGEN_MAKE_ARRAY_TYPEDEFS(4, 4)
+EIGEN_MAKE_ARRAY_TYPEDEFS(Dynamic, X)
+EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(2)
+EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(3)
+EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(4)
-#undef EIGEN_MAKE_ARRAY_TYPEDEFS_LARGE
+#undef EIGEN_MAKE_ARRAY_TYPEDEFS
+#undef EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS
+#endif // EIGEN_HAS_CXX11
+
#define EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, SizeSuffix) \
using Eigen::Matrix##SizeSuffix##TypeSuffix; \
using Eigen::Vector##SizeSuffix##TypeSuffix; \
diff --git a/Eigen/src/Core/AssignEvaluator.h b/Eigen/src/Core/AssignEvaluator.h
index 79575e1b4..229e25854 100644
--- a/Eigen/src/Core/AssignEvaluator.h
+++ b/Eigen/src/Core/AssignEvaluator.h
@@ -611,7 +611,8 @@ public:
typedef typename AssignmentTraits::PacketType PacketType;
- EIGEN_DEVICE_FUNC generic_dense_assignment_kernel(DstEvaluatorType &dst, const SrcEvaluatorType &src, const Functor &func, DstXprType& dstExpr)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ generic_dense_assignment_kernel(DstEvaluatorType &dst, const SrcEvaluatorType &src, const Functor &func, DstXprType& dstExpr)
: m_dst(dst), m_src(src), m_functor(func), m_dstExpr(dstExpr)
{
#ifdef EIGEN_DEBUG_ASSIGN
diff --git a/Eigen/src/Core/Assign_MKL.h b/Eigen/src/Core/Assign_MKL.h
index 6866095bf..c6140d185 100755
--- a/Eigen/src/Core/Assign_MKL.h
+++ b/Eigen/src/Core/Assign_MKL.h
@@ -68,16 +68,16 @@ class vml_assign_traits
#define EIGEN_PP_EXPAND(ARG) ARG
#if !defined (EIGEN_FAST_MATH) || (EIGEN_FAST_MATH != 1)
-#define EIGEN_VMLMODE_EXPAND_LA , VML_HA
+#define EIGEN_VMLMODE_EXPAND_xLA , VML_HA
#else
-#define EIGEN_VMLMODE_EXPAND_LA , VML_LA
+#define EIGEN_VMLMODE_EXPAND_xLA , VML_LA
#endif
-#define EIGEN_VMLMODE_EXPAND__
+#define EIGEN_VMLMODE_EXPAND_x_
-#define EIGEN_VMLMODE_PREFIX_LA vm
-#define EIGEN_VMLMODE_PREFIX__ v
-#define EIGEN_VMLMODE_PREFIX(VMLMODE) EIGEN_CAT(EIGEN_VMLMODE_PREFIX_,VMLMODE)
+#define EIGEN_VMLMODE_PREFIX_xLA vm
+#define EIGEN_VMLMODE_PREFIX_x_ v
+#define EIGEN_VMLMODE_PREFIX(VMLMODE) EIGEN_CAT(EIGEN_VMLMODE_PREFIX_x,VMLMODE)
#define EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, VMLOP, EIGENTYPE, VMLTYPE, VMLMODE) \
template< typename DstXprType, typename SrcXprNested> \
@@ -89,7 +89,7 @@ class vml_assign_traits
eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); \
if(vml_assign_traits<DstXprType,SrcXprNested>::Traversal==LinearTraversal) { \
VMLOP(dst.size(), (const VMLTYPE*)src.nestedExpression().data(), \
- (VMLTYPE*)dst.data() EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_##VMLMODE) ); \
+ (VMLTYPE*)dst.data() EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_x##VMLMODE) ); \
} else { \
const Index outerSize = dst.outerSize(); \
for(Index outer = 0; outer < outerSize; ++outer) { \
@@ -97,7 +97,7 @@ class vml_assign_traits
&(src.nestedExpression().coeffRef(0, outer)); \
EIGENTYPE *dst_ptr = dst.IsRowMajor ? &(dst.coeffRef(outer,0)) : &(dst.coeffRef(0, outer)); \
VMLOP( dst.innerSize(), (const VMLTYPE*)src_ptr, \
- (VMLTYPE*)dst_ptr EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_##VMLMODE)); \
+ (VMLTYPE*)dst_ptr EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_x##VMLMODE)); \
} \
} \
} \
@@ -152,7 +152,7 @@ EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(ceil, Ceil, _)
if(vml_assign_traits<DstXprType,SrcXprNested>::Traversal==LinearTraversal) \
{ \
VMLOP( dst.size(), (const VMLTYPE*)src.lhs().data(), exponent, \
- (VMLTYPE*)dst.data() EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_##VMLMODE) ); \
+ (VMLTYPE*)dst.data() EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_x##VMLMODE) ); \
} else { \
const Index outerSize = dst.outerSize(); \
for(Index outer = 0; outer < outerSize; ++outer) { \
@@ -160,7 +160,7 @@ EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(ceil, Ceil, _)
&(src.lhs().coeffRef(0, outer)); \
EIGENTYPE *dst_ptr = dst.IsRowMajor ? &(dst.coeffRef(outer,0)) : &(dst.coeffRef(0, outer)); \
VMLOP( dst.innerSize(), (const VMLTYPE*)src_ptr, exponent, \
- (VMLTYPE*)dst_ptr EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_##VMLMODE)); \
+ (VMLTYPE*)dst_ptr EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_x##VMLMODE)); \
} \
} \
} \
diff --git a/Eigen/src/Core/Block.h b/Eigen/src/Core/Block.h
index 11de45c2e..6e938ea58 100644
--- a/Eigen/src/Core/Block.h
+++ b/Eigen/src/Core/Block.h
@@ -114,8 +114,8 @@ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel> class
/** Column or Row constructor
*/
- EIGEN_DEVICE_FUNC
- inline Block(XprType& xpr, Index i) : Impl(xpr,i)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Block(XprType& xpr, Index i) : Impl(xpr,i)
{
eigen_assert( (i>=0) && (
((BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) && i<xpr.rows())
@@ -124,8 +124,8 @@ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel> class
/** Fixed-size constructor
*/
- EIGEN_DEVICE_FUNC
- inline Block(XprType& xpr, Index startRow, Index startCol)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Block(XprType& xpr, Index startRow, Index startCol)
: Impl(xpr, startRow, startCol)
{
EIGEN_STATIC_ASSERT(RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic,THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE)
@@ -135,8 +135,8 @@ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel> class
/** Dynamic-size constructor
*/
- EIGEN_DEVICE_FUNC
- inline Block(XprType& xpr,
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Block(XprType& xpr,
Index startRow, Index startCol,
Index blockRows, Index blockCols)
: Impl(xpr, startRow, startCol, blockRows, blockCols)
@@ -159,10 +159,10 @@ class BlockImpl<XprType, BlockRows, BlockCols, InnerPanel, Dense>
public:
typedef Impl Base;
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl)
- EIGEN_DEVICE_FUNC inline BlockImpl(XprType& xpr, Index i) : Impl(xpr,i) {}
- EIGEN_DEVICE_FUNC inline BlockImpl(XprType& xpr, Index startRow, Index startCol) : Impl(xpr, startRow, startCol) {}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE BlockImpl(XprType& xpr, Index i) : Impl(xpr,i) {}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE BlockImpl(XprType& xpr, Index startRow, Index startCol) : Impl(xpr, startRow, startCol) {}
EIGEN_DEVICE_FUNC
- inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
+ EIGEN_STRONG_INLINE BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
: Impl(xpr, startRow, startCol, blockRows, blockCols) {}
};
@@ -294,22 +294,22 @@ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, bool H
EIGEN_DEVICE_FUNC inline Index outerStride() const;
#endif
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const typename internal::remove_all<XprTypeNested>::type& nestedExpression() const
{
return m_xpr;
}
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
XprType& nestedExpression() { return m_xpr; }
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
StorageIndex startRow() const
{
return m_startRow.value();
}
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
StorageIndex startCol() const
{
return m_startCol.value();
@@ -342,8 +342,8 @@ class BlockImpl_dense<XprType,BlockRows,BlockCols, InnerPanel,true>
/** Column or Row constructor
*/
- EIGEN_DEVICE_FUNC
- inline BlockImpl_dense(XprType& xpr, Index i)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ BlockImpl_dense(XprType& xpr, Index i)
: Base(xpr.data() + i * ( ((BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) && (!XprTypeIsRowMajor))
|| ((BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) && ( XprTypeIsRowMajor)) ? xpr.innerStride() : xpr.outerStride()),
BlockRows==1 ? 1 : xpr.rows(),
@@ -357,8 +357,8 @@ class BlockImpl_dense<XprType,BlockRows,BlockCols, InnerPanel,true>
/** Fixed-size constructor
*/
- EIGEN_DEVICE_FUNC
- inline BlockImpl_dense(XprType& xpr, Index startRow, Index startCol)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ BlockImpl_dense(XprType& xpr, Index startRow, Index startCol)
: Base(xpr.data()+xpr.innerStride()*(XprTypeIsRowMajor?startCol:startRow) + xpr.outerStride()*(XprTypeIsRowMajor?startRow:startCol)),
m_xpr(xpr), m_startRow(startRow), m_startCol(startCol)
{
@@ -367,8 +367,8 @@ class BlockImpl_dense<XprType,BlockRows,BlockCols, InnerPanel,true>
/** Dynamic-size constructor
*/
- EIGEN_DEVICE_FUNC
- inline BlockImpl_dense(XprType& xpr,
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ BlockImpl_dense(XprType& xpr,
Index startRow, Index startCol,
Index blockRows, Index blockCols)
: Base(xpr.data()+xpr.innerStride()*(XprTypeIsRowMajor?startCol:startRow) + xpr.outerStride()*(XprTypeIsRowMajor?startRow:startCol), blockRows, blockCols),
@@ -377,18 +377,18 @@ class BlockImpl_dense<XprType,BlockRows,BlockCols, InnerPanel,true>
init();
}
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const typename internal::remove_all<XprTypeNested>::type& nestedExpression() const
{
return m_xpr;
}
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
XprType& nestedExpression() { return m_xpr; }
/** \sa MapBase::innerStride() */
- EIGEN_DEVICE_FUNC
- inline Index innerStride() const
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Index innerStride() const
{
return internal::traits<BlockType>::HasSameStorageOrderAsXprType
? m_xpr.innerStride()
@@ -396,19 +396,19 @@ class BlockImpl_dense<XprType,BlockRows,BlockCols, InnerPanel,true>
}
/** \sa MapBase::outerStride() */
- EIGEN_DEVICE_FUNC
- inline Index outerStride() const
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Index outerStride() const
{
return m_outerStride;
}
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
StorageIndex startRow() const
{
return m_startRow.value();
}
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
StorageIndex startCol() const
{
return m_startCol.value();
@@ -422,8 +422,8 @@ class BlockImpl_dense<XprType,BlockRows,BlockCols, InnerPanel,true>
#ifndef EIGEN_PARSED_BY_DOXYGEN
/** \internal used by allowAligned() */
- EIGEN_DEVICE_FUNC
- inline BlockImpl_dense(XprType& xpr, const Scalar* data, Index blockRows, Index blockCols)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ BlockImpl_dense(XprType& xpr, const Scalar* data, Index blockRows, Index blockCols)
: Base(data, blockRows, blockCols), m_xpr(xpr)
{
init();
@@ -431,7 +431,7 @@ class BlockImpl_dense<XprType,BlockRows,BlockCols, InnerPanel,true>
#endif
protected:
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void init()
{
m_outerStride = internal::traits<BlockType>::HasSameStorageOrderAsXprType
diff --git a/Eigen/src/Core/ConditionEstimator.h b/Eigen/src/Core/ConditionEstimator.h
index aa7efdc76..51a2e5f1b 100644
--- a/Eigen/src/Core/ConditionEstimator.h
+++ b/Eigen/src/Core/ConditionEstimator.h
@@ -160,7 +160,7 @@ rcond_estimate_helper(typename Decomposition::RealScalar matrix_norm, const Deco
{
typedef typename Decomposition::RealScalar RealScalar;
eigen_assert(dec.rows() == dec.cols());
- if (dec.rows() == 0) return RealScalar(1);
+ if (dec.rows() == 0) return NumTraits<RealScalar>::infinity();
if (matrix_norm == RealScalar(0)) return RealScalar(0);
if (dec.rows() == 1) return RealScalar(1);
const RealScalar inverse_matrix_norm = rcond_invmatrix_L1_norm_estimate(dec);
diff --git a/Eigen/src/Core/CoreEvaluators.h b/Eigen/src/Core/CoreEvaluators.h
index d5da5cdec..670fa77b5 100644
--- a/Eigen/src/Core/CoreEvaluators.h
+++ b/Eigen/src/Core/CoreEvaluators.h
@@ -90,7 +90,8 @@ template<typename T>
struct evaluator : public unary_evaluator<T>
{
typedef unary_evaluator<T> Base;
- EIGEN_DEVICE_FUNC explicit evaluator(const T& xpr) : Base(xpr) {}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit evaluator(const T& xpr) : Base(xpr) {}
};
@@ -99,7 +100,7 @@ template<typename T>
struct evaluator<const T>
: evaluator<T>
{
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
explicit evaluator(const T& xpr) : evaluator<T>(xpr) {}
};
@@ -134,21 +135,25 @@ private:
// this helper permits to completely eliminate m_outerStride if it is known at compiletime.
template<typename Scalar,int OuterStride> class plainobjectbase_evaluator_data {
public:
- EIGEN_DEVICE_FUNC plainobjectbase_evaluator_data(const Scalar* ptr, Index outerStride) : data(ptr)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ plainobjectbase_evaluator_data(const Scalar* ptr, Index outerStride) : data(ptr)
{
#ifndef EIGEN_INTERNAL_DEBUGGING
EIGEN_UNUSED_VARIABLE(outerStride);
#endif
eigen_internal_assert(outerStride==OuterStride);
}
- EIGEN_DEVICE_FUNC Index outerStride() const { return OuterStride; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Index outerStride() const { return OuterStride; }
const Scalar *data;
};
template<typename Scalar> class plainobjectbase_evaluator_data<Scalar,Dynamic> {
public:
- EIGEN_DEVICE_FUNC plainobjectbase_evaluator_data(const Scalar* ptr, Index outerStride) : data(ptr), m_outerStride(outerStride) {}
- EIGEN_DEVICE_FUNC Index outerStride() const { return m_outerStride; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ plainobjectbase_evaluator_data(const Scalar* ptr, Index outerStride) : data(ptr), m_outerStride(outerStride) {}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Index outerStride() const { return m_outerStride; }
const Scalar *data;
protected:
Index m_outerStride;
@@ -179,13 +184,15 @@ struct evaluator<PlainObjectBase<Derived> >
: RowsAtCompileTime
};
- EIGEN_DEVICE_FUNC evaluator()
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ evaluator()
: m_d(0,OuterStrideAtCompileTime)
{
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
- EIGEN_DEVICE_FUNC explicit evaluator(const PlainObjectType& m)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit evaluator(const PlainObjectType& m)
: m_d(m.data(),IsVectorAtCompileTime ? 0 : m.outerStride())
{
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
@@ -268,9 +275,11 @@ struct evaluator<Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> >
{
typedef Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> XprType;
- EIGEN_DEVICE_FUNC evaluator() {}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ evaluator() {}
- EIGEN_DEVICE_FUNC explicit evaluator(const XprType& m)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit evaluator(const XprType& m)
: evaluator<PlainObjectBase<XprType> >(m)
{ }
};
@@ -281,9 +290,11 @@ struct evaluator<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> >
{
typedef Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> XprType;
- EIGEN_DEVICE_FUNC evaluator() {}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ evaluator() {}
- EIGEN_DEVICE_FUNC explicit evaluator(const XprType& m)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit evaluator(const XprType& m)
: evaluator<PlainObjectBase<XprType> >(m)
{ }
};
@@ -302,7 +313,8 @@ struct unary_evaluator<Transpose<ArgType>, IndexBased>
Alignment = evaluator<ArgType>::Alignment
};
- EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& t) : m_argImpl(t.nestedExpression()) {}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit unary_evaluator(const XprType& t) : m_argImpl(t.nestedExpression()) {}
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
@@ -712,7 +724,8 @@ struct evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
typedef binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> > Base;
- EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : Base(xpr) {}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit evaluator(const XprType& xpr) : Base(xpr) {}
};
template<typename BinaryOp, typename Lhs, typename Rhs>
@@ -740,7 +753,8 @@ struct binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs>, IndexBased, IndexBase
Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<Lhs>::Alignment,evaluator<Rhs>::Alignment)
};
- EIGEN_DEVICE_FUNC explicit binary_evaluator(const XprType& xpr) : m_d(xpr)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit binary_evaluator(const XprType& xpr) : m_d(xpr)
{
EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
@@ -877,7 +891,8 @@ struct mapbase_evaluator : evaluator_base<Derived>
CoeffReadCost = NumTraits<Scalar>::ReadCost
};
- EIGEN_DEVICE_FUNC explicit mapbase_evaluator(const XprType& map)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit mapbase_evaluator(const XprType& map)
: m_data(const_cast<PointerType>(map.data())),
m_innerStride(map.innerStride()),
m_outerStride(map.outerStride())
@@ -941,10 +956,10 @@ struct mapbase_evaluator : evaluator_base<Derived>
internal::pstoret<Scalar, PacketType, StoreMode>(m_data + index * m_innerStride.value(), x);
}
protected:
- EIGEN_DEVICE_FUNC
- inline Index rowStride() const { return XprType::IsRowMajor ? m_outerStride.value() : m_innerStride.value(); }
- EIGEN_DEVICE_FUNC
- inline Index colStride() const { return XprType::IsRowMajor ? m_innerStride.value() : m_outerStride.value(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Index rowStride() const { return XprType::IsRowMajor ? m_outerStride.value() : m_innerStride.value(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Index colStride() const { return XprType::IsRowMajor ? m_innerStride.value() : m_outerStride.value(); }
PointerType m_data;
const internal::variable_if_dynamic<Index, XprType::InnerStrideAtCompileTime> m_innerStride;
@@ -997,7 +1012,8 @@ struct evaluator<Ref<PlainObjectType, RefOptions, StrideType> >
Alignment = evaluator<Map<PlainObjectType, RefOptions, StrideType> >::Alignment
};
- EIGEN_DEVICE_FUNC explicit evaluator(const XprType& ref)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit evaluator(const XprType& ref)
: mapbase_evaluator<XprType, PlainObjectType>(ref)
{ }
};
@@ -1052,7 +1068,8 @@ struct evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel> >
Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<ArgType>::Alignment, Alignment0)
};
typedef block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel> block_evaluator_type;
- EIGEN_DEVICE_FUNC explicit evaluator(const XprType& block) : block_evaluator_type(block)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit evaluator(const XprType& block) : block_evaluator_type(block)
{
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
@@ -1065,7 +1082,8 @@ struct block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel, /*HasDirectAcc
{
typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType;
- EIGEN_DEVICE_FUNC explicit block_evaluator(const XprType& block)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit block_evaluator(const XprType& block)
: unary_evaluator<XprType>(block)
{}
};
@@ -1076,7 +1094,8 @@ struct unary_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel>, IndexBa
{
typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType;
- EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& block)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit unary_evaluator(const XprType& block)
: m_argImpl(block.nestedExpression()),
m_startRow(block.startRow()),
m_startCol(block.startCol()),
@@ -1176,7 +1195,8 @@ struct block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel, /* HasDirectAc
typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType;
typedef typename XprType::Scalar Scalar;
- EIGEN_DEVICE_FUNC explicit block_evaluator(const XprType& block)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit block_evaluator(const XprType& block)
: mapbase_evaluator<XprType, typename XprType::PlainObject>(block)
{
// TODO: for the 3.3 release, this should be turned to an internal assertion, but let's keep it as is for the beta lifetime
@@ -1204,7 +1224,8 @@ struct evaluator<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >
Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<ThenMatrixType>::Alignment, evaluator<ElseMatrixType>::Alignment)
};
- EIGEN_DEVICE_FUNC explicit evaluator(const XprType& select)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit evaluator(const XprType& select)
: m_conditionImpl(select.conditionMatrix()),
m_thenImpl(select.thenMatrix()),
m_elseImpl(select.elseMatrix())
@@ -1261,7 +1282,8 @@ struct unary_evaluator<Replicate<ArgType, RowFactor, ColFactor> >
Alignment = evaluator<ArgTypeNestedCleaned>::Alignment
};
- EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& replicate)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit unary_evaluator(const XprType& replicate)
: m_arg(replicate.nestedExpression()),
m_argImpl(m_arg),
m_rows(replicate.nestedExpression().rows()),
@@ -1341,7 +1363,8 @@ struct evaluator_wrapper_base
Alignment = evaluator<ArgType>::Alignment
};
- EIGEN_DEVICE_FUNC explicit evaluator_wrapper_base(const ArgType& arg) : m_argImpl(arg) {}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit evaluator_wrapper_base(const ArgType& arg) : m_argImpl(arg) {}
typedef typename ArgType::Scalar Scalar;
typedef typename ArgType::CoeffReturnType CoeffReturnType;
@@ -1408,7 +1431,8 @@ struct unary_evaluator<MatrixWrapper<TArgType> >
{
typedef MatrixWrapper<TArgType> XprType;
- EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& wrapper)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit unary_evaluator(const XprType& wrapper)
: evaluator_wrapper_base<MatrixWrapper<TArgType> >(wrapper.nestedExpression())
{ }
};
@@ -1419,7 +1443,8 @@ struct unary_evaluator<ArrayWrapper<TArgType> >
{
typedef ArrayWrapper<TArgType> XprType;
- EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& wrapper)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit unary_evaluator(const XprType& wrapper)
: evaluator_wrapper_base<ArrayWrapper<TArgType> >(wrapper.nestedExpression())
{ }
};
@@ -1461,7 +1486,8 @@ struct unary_evaluator<Reverse<ArgType, Direction> >
Alignment = 0 // FIXME in some rare cases, Alignment could be preserved, like a Vector4f.
};
- EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& reverse)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit unary_evaluator(const XprType& reverse)
: m_argImpl(reverse.nestedExpression()),
m_rows(ReverseRow ? reverse.nestedExpression().rows() : 1),
m_cols(ReverseCol ? reverse.nestedExpression().cols() : 1)
@@ -1568,7 +1594,8 @@ struct evaluator<Diagonal<ArgType, DiagIndex> >
Alignment = 0
};
- EIGEN_DEVICE_FUNC explicit evaluator(const XprType& diagonal)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit evaluator(const XprType& diagonal)
: m_argImpl(diagonal.nestedExpression()),
m_index(diagonal.index())
{ }
diff --git a/Eigen/src/Core/CwiseBinaryOp.h b/Eigen/src/Core/CwiseBinaryOp.h
index bf2632d9e..8b8de8382 100644
--- a/Eigen/src/Core/CwiseBinaryOp.h
+++ b/Eigen/src/Core/CwiseBinaryOp.h
@@ -100,8 +100,14 @@ class CwiseBinaryOp :
typedef typename internal::remove_reference<LhsNested>::type _LhsNested;
typedef typename internal::remove_reference<RhsNested>::type _RhsNested;
- EIGEN_DEVICE_FUNC
- EIGEN_STRONG_INLINE CwiseBinaryOp(const Lhs& aLhs, const Rhs& aRhs, const BinaryOp& func = BinaryOp())
+#if EIGEN_COMP_MSVC && EIGEN_HAS_CXX11
+ //Required for Visual Studio or the Copy constructor will probably not get inlined!
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ CwiseBinaryOp(const CwiseBinaryOp<BinaryOp,LhsType,RhsType>&) = default;
+#endif
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ CwiseBinaryOp(const Lhs& aLhs, const Rhs& aRhs, const BinaryOp& func = BinaryOp())
: m_lhs(aLhs), m_rhs(aRhs), m_functor(func)
{
EIGEN_CHECK_BINARY_COMPATIBILIY(BinaryOp,typename Lhs::Scalar,typename Rhs::Scalar);
@@ -110,16 +116,16 @@ class CwiseBinaryOp :
eigen_assert(aLhs.rows() == aRhs.rows() && aLhs.cols() == aRhs.cols());
}
- EIGEN_DEVICE_FUNC
- EIGEN_STRONG_INLINE Index rows() const {
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Index rows() const {
// return the fixed size type if available to enable compile time optimizations
if (internal::traits<typename internal::remove_all<LhsNested>::type>::RowsAtCompileTime==Dynamic)
return m_rhs.rows();
else
return m_lhs.rows();
}
- EIGEN_DEVICE_FUNC
- EIGEN_STRONG_INLINE Index cols() const {
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Index cols() const {
// return the fixed size type if available to enable compile time optimizations
if (internal::traits<typename internal::remove_all<LhsNested>::type>::ColsAtCompileTime==Dynamic)
return m_rhs.cols();
@@ -128,13 +134,13 @@ class CwiseBinaryOp :
}
/** \returns the left hand side nested expression */
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const _LhsNested& lhs() const { return m_lhs; }
/** \returns the right hand side nested expression */
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const _RhsNested& rhs() const { return m_rhs; }
/** \returns the functor representing the binary operation */
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const BinaryOp& functor() const { return m_functor; }
protected:
diff --git a/Eigen/src/Core/CwiseNullaryOp.h b/Eigen/src/Core/CwiseNullaryOp.h
index d149abe93..ef708197b 100644
--- a/Eigen/src/Core/CwiseNullaryOp.h
+++ b/Eigen/src/Core/CwiseNullaryOp.h
@@ -239,7 +239,7 @@ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomA
DenseBase<Derived>::LinSpaced(Sequential_t, Index size, const Scalar& low, const Scalar& high)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- return DenseBase<Derived>::NullaryExpr(size, internal::linspaced_op<Scalar,PacketScalar>(low,high,size));
+ return DenseBase<Derived>::NullaryExpr(size, internal::linspaced_op<Scalar>(low,high,size));
}
/** \deprecated because of accuracy loss. In Eigen 3.3, it is an alias for LinSpaced(const Scalar&,const Scalar&)
@@ -252,7 +252,7 @@ DenseBase<Derived>::LinSpaced(Sequential_t, const Scalar& low, const Scalar& hig
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
- return DenseBase<Derived>::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op<Scalar,PacketScalar>(low,high,Derived::SizeAtCompileTime));
+ return DenseBase<Derived>::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op<Scalar>(low,high,Derived::SizeAtCompileTime));
}
/**
@@ -283,7 +283,7 @@ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomA
DenseBase<Derived>::LinSpaced(Index size, const Scalar& low, const Scalar& high)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- return DenseBase<Derived>::NullaryExpr(size, internal::linspaced_op<Scalar,PacketScalar>(low,high,size));
+ return DenseBase<Derived>::NullaryExpr(size, internal::linspaced_op<Scalar>(low,high,size));
}
/**
@@ -296,7 +296,7 @@ DenseBase<Derived>::LinSpaced(const Scalar& low, const Scalar& high)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
- return DenseBase<Derived>::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op<Scalar,PacketScalar>(low,high,Derived::SizeAtCompileTime));
+ return DenseBase<Derived>::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op<Scalar>(low,high,Derived::SizeAtCompileTime));
}
/** \returns true if all coefficients in this matrix are approximately equal to \a val, to within precision \a prec */
@@ -398,7 +398,7 @@ template<typename Derived>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(Index newSize, const Scalar& low, const Scalar& high)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- return derived() = Derived::NullaryExpr(newSize, internal::linspaced_op<Scalar,PacketScalar>(low,high,newSize));
+ return derived() = Derived::NullaryExpr(newSize, internal::linspaced_op<Scalar>(low,high,newSize));
}
/**
diff --git a/Eigen/src/Core/CwiseUnaryView.h b/Eigen/src/Core/CwiseUnaryView.h
index 271033056..21cf5ea9e 100644
--- a/Eigen/src/Core/CwiseUnaryView.h
+++ b/Eigen/src/Core/CwiseUnaryView.h
@@ -81,7 +81,7 @@ class CwiseUnaryView : public CwiseUnaryViewImpl<ViewOp, MatrixType, typename in
/** \returns the nested expression */
typename internal::remove_reference<MatrixTypeNested>::type&
- nestedExpression() { return m_matrix.const_cast_derived(); }
+ nestedExpression() { return m_matrix; }
protected:
MatrixTypeNested m_matrix;
diff --git a/Eigen/src/Core/DenseBase.h b/Eigen/src/Core/DenseBase.h
index 2a0927317..2289fe41f 100644
--- a/Eigen/src/Core/DenseBase.h
+++ b/Eigen/src/Core/DenseBase.h
@@ -40,7 +40,7 @@ static inline void check_DenseIndex_is_signed() {
*/
template<typename Derived> class DenseBase
#ifndef EIGEN_PARSED_BY_DOXYGEN
- : public DenseCoeffsBase<Derived>
+ : public DenseCoeffsBase<Derived, internal::accessors_level<Derived>::value>
#else
: public DenseCoeffsBase<Derived,DirectWriteAccessors>
#endif // not EIGEN_PARSED_BY_DOXYGEN
@@ -71,7 +71,7 @@ template<typename Derived> class DenseBase
typedef Scalar value_type;
typedef typename NumTraits<Scalar>::Real RealScalar;
- typedef DenseCoeffsBase<Derived> Base;
+ typedef DenseCoeffsBase<Derived, internal::accessors_level<Derived>::value> Base;
using Base::derived;
using Base::const_cast_derived;
@@ -150,8 +150,8 @@ template<typename Derived> class DenseBase
* \sa SizeAtCompileTime, MaxRowsAtCompileTime, MaxColsAtCompileTime
*/
- IsVectorAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime == 1
- || internal::traits<Derived>::MaxColsAtCompileTime == 1,
+ IsVectorAtCompileTime = internal::traits<Derived>::RowsAtCompileTime == 1
+ || internal::traits<Derived>::ColsAtCompileTime == 1,
/**< This is set to true if either the number of rows or the number of
* columns is known at compile-time to be equal to 1. Indeed, in that case,
* we are dealing with a column-vector (if there is only one column) or with
@@ -266,9 +266,9 @@ template<typename Derived> class DenseBase
/** \internal Represents a matrix with all coefficients equal to one another*/
typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,PlainObject> ConstantReturnType;
/** \internal \deprecated Represents a vector with linearly spaced coefficients that allows sequential access only. */
- typedef CwiseNullaryOp<internal::linspaced_op<Scalar,PacketScalar>,PlainObject> SequentialLinSpacedReturnType;
+ typedef CwiseNullaryOp<internal::linspaced_op<Scalar>,PlainObject> SequentialLinSpacedReturnType;
/** \internal Represents a vector with linearly spaced coefficients that allows random access. */
- typedef CwiseNullaryOp<internal::linspaced_op<Scalar,PacketScalar>,PlainObject> RandomAccessLinSpacedReturnType;
+ typedef CwiseNullaryOp<internal::linspaced_op<Scalar>,PlainObject> RandomAccessLinSpacedReturnType;
/** \internal the return type of MatrixBase::eigenvalues() */
typedef Matrix<typename NumTraits<typename internal::traits<Derived>::Scalar>::Real, internal::traits<Derived>::ColsAtCompileTime, 1> EigenvaluesReturnType;
@@ -415,7 +415,7 @@ template<typename Derived> class DenseBase
*
*/
template<typename OtherDerived>
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void swap(const DenseBase<OtherDerived>& other)
{
EIGEN_STATIC_ASSERT(!OtherDerived::IsPlainObjectBase,THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY);
@@ -427,7 +427,7 @@ template<typename Derived> class DenseBase
*
*/
template<typename OtherDerived>
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void swap(PlainObjectBase<OtherDerived>& other)
{
eigen_assert(rows()==other.rows() && cols()==other.cols());
diff --git a/Eigen/src/Core/DiagonalMatrix.h b/Eigen/src/Core/DiagonalMatrix.h
index afab2f1b6..542685c65 100644
--- a/Eigen/src/Core/DiagonalMatrix.h
+++ b/Eigen/src/Core/DiagonalMatrix.h
@@ -178,6 +178,30 @@ class DiagonalMatrix
EIGEN_DEVICE_FUNC
inline DiagonalMatrix(const Scalar& x, const Scalar& y, const Scalar& z) : m_diagonal(x,y,z) {}
+ #if EIGEN_HAS_CXX11
+ /** \brief Construct a diagonal matrix with fixed size from an arbitrary number of coefficients. \cpp11
+ *
+ * There exists C++98 anologue constructors for fixed-size diagonal matrices having 2 or 3 coefficients.
+ *
+ * \warning To construct a diagonal matrix of fixed size, the number of values passed to this
+ * constructor must match the fixed dimension of \c *this.
+ *
+ * \sa DiagonalMatrix(const Scalar&, const Scalar&)
+ * \sa DiagonalMatrix(const Scalar&, const Scalar&, const Scalar&)
+ */
+ template <typename... ArgTypes>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ DiagonalMatrix(const Scalar& a0, const Scalar& a1, const Scalar& a2, const ArgTypes&... args)
+ : m_diagonal(a0, a1, a2, args...) {}
+
+ /** \brief Constructs a DiagonalMatrix and initializes it by elements given by an initializer list of initializer
+ * lists \cpp11
+ */
+ EIGEN_DEVICE_FUNC
+ explicit EIGEN_STRONG_INLINE DiagonalMatrix(const std::initializer_list<std::initializer_list<Scalar>>& list)
+ : m_diagonal(list) {}
+ #endif // EIGEN_HAS_CXX11
+
/** Copy constructor. */
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
diff --git a/Eigen/src/Core/GeneralProduct.h b/Eigen/src/Core/GeneralProduct.h
index 43f3b84c8..bf7ef54b5 100644
--- a/Eigen/src/Core/GeneralProduct.h
+++ b/Eigen/src/Core/GeneralProduct.h
@@ -239,7 +239,7 @@ template<> struct gemv_dense_selector<OnTheRight,ColMajor,true>
// on, the other hand it is good for the cache to pack the vector anyways...
EvalToDestAtCompileTime = (ActualDest::InnerStrideAtCompileTime==1),
ComplexByReal = (NumTraits<LhsScalar>::IsComplex) && (!NumTraits<RhsScalar>::IsComplex),
- MightCannotUseDest = (!EvalToDestAtCompileTime) || ComplexByReal
+ MightCannotUseDest = ((!EvalToDestAtCompileTime) || ComplexByReal) && (ActualDest::MaxSizeAtCompileTime!=0)
};
typedef const_blas_data_mapper<LhsScalar,Index,ColMajor> LhsMapper;
@@ -326,7 +326,7 @@ template<> struct gemv_dense_selector<OnTheRight,RowMajor,true>
enum {
// FIXME find a way to allow an inner stride on the result if packet_traits<Scalar>::size==1
// on, the other hand it is good for the cache to pack the vector anyways...
- DirectlyUseRhs = ActualRhsTypeCleaned::InnerStrideAtCompileTime==1
+ DirectlyUseRhs = ActualRhsTypeCleaned::InnerStrideAtCompileTime==1 || ActualRhsTypeCleaned::MaxSizeAtCompileTime==0
};
gemv_static_vector_if<RhsScalar,ActualRhsTypeCleaned::SizeAtCompileTime,ActualRhsTypeCleaned::MaxSizeAtCompileTime,!DirectlyUseRhs> static_rhs;
@@ -396,8 +396,8 @@ template<> struct gemv_dense_selector<OnTheRight,RowMajor,false>
*/
template<typename Derived>
template<typename OtherDerived>
-EIGEN_DEVICE_FUNC
-inline const Product<Derived, OtherDerived>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+const Product<Derived, OtherDerived>
MatrixBase<Derived>::operator*(const MatrixBase<OtherDerived> &other) const
{
// A note regarding the function declaration: In MSVC, this function will sometimes
@@ -439,8 +439,9 @@ MatrixBase<Derived>::operator*(const MatrixBase<OtherDerived> &other) const
*/
template<typename Derived>
template<typename OtherDerived>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Product<Derived,OtherDerived,LazyProduct>
-EIGEN_DEVICE_FUNC MatrixBase<Derived>::lazyProduct(const MatrixBase<OtherDerived> &other) const
+MatrixBase<Derived>::lazyProduct(const MatrixBase<OtherDerived> &other) const
{
enum {
ProductIsValid = Derived::ColsAtCompileTime==Dynamic
diff --git a/Eigen/src/Core/GenericPacketMath.h b/Eigen/src/Core/GenericPacketMath.h
index da1350f1b..04a321b9f 100644
--- a/Eigen/src/Core/GenericPacketMath.h
+++ b/Eigen/src/Core/GenericPacketMath.h
@@ -56,6 +56,7 @@ struct default_packet_traits
HasConj = 1,
HasSetLinear = 1,
HasBlend = 0,
+ HasReduxp = 1,
HasDiv = 0,
HasSqrt = 0,
@@ -151,15 +152,18 @@ pcast(const SrcPacket& a, const SrcPacket& /*b*/, const SrcPacket& /*c*/, const
return static_cast<TgtPacket>(a);
}
+/** \internal \returns reinterpret_cast<Target>(a) */
+template <typename Target, typename Packet>
+EIGEN_DEVICE_FUNC inline Target
+preinterpret(const Packet& a); /* { return reinterpret_cast<const Target&>(a); } */
+
/** \internal \returns a + b (coeff-wise) */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
-padd(const Packet& a,
- const Packet& b) { return a+b; }
+padd(const Packet& a, const Packet& b) { return a+b; }
/** \internal \returns a - b (coeff-wise) */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
-psub(const Packet& a,
- const Packet& b) { return a-b; }
+psub(const Packet& a, const Packet& b) { return a-b; }
/** \internal \returns -a (coeff-wise) */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
@@ -172,23 +176,19 @@ pconj(const Packet& a) { return numext::conj(a); }
/** \internal \returns a * b (coeff-wise) */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
-pmul(const Packet& a,
- const Packet& b) { return a*b; }
+pmul(const Packet& a, const Packet& b) { return a*b; }
/** \internal \returns a / b (coeff-wise) */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
-pdiv(const Packet& a,
- const Packet& b) { return a/b; }
+pdiv(const Packet& a, const Packet& b) { return a/b; }
/** \internal \returns the min of \a a and \a b (coeff-wise) */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
-pmin(const Packet& a,
- const Packet& b) { return numext::mini(a, b); }
+pmin(const Packet& a, const Packet& b) { return numext::mini(a, b); }
/** \internal \returns the max of \a a and \a b (coeff-wise) */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
-pmax(const Packet& a,
- const Packet& b) { return numext::maxi(a, b); }
+pmax(const Packet& a, const Packet& b) { return numext::maxi(a, b); }
/** \internal \returns the absolute value of \a a */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
@@ -212,7 +212,72 @@ pxor(const Packet& a, const Packet& b) { return a ^ b; }
/** \internal \returns the bitwise andnot of \a a and \a b */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
-pandnot(const Packet& a, const Packet& b) { return a & (!b); }
+pandnot(const Packet& a, const Packet& b) { return a & (~b); }
+
+/** \internal \returns ones */
+template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
+ptrue(const Packet& /*a*/) { Packet b; memset((void*)&b, 0xff, sizeof(b)); return b;}
+
+template <typename RealScalar>
+EIGEN_DEVICE_FUNC inline std::complex<RealScalar> ptrue(const std::complex<RealScalar>& /*a*/) {
+ RealScalar b;
+ b = ptrue(b);
+ return std::complex<RealScalar>(b, b);
+}
+
+/** \internal \returns the bitwise not of \a a */
+template <typename Packet> EIGEN_DEVICE_FUNC inline Packet
+pnot(const Packet& a) { return pxor(ptrue(a), a);}
+
+/** \internal \returns \a a shifted by N bits to the right */
+template<int N> EIGEN_DEVICE_FUNC inline int
+pshiftright(const int& a) { return a >> N; }
+template<int N> EIGEN_DEVICE_FUNC inline long int
+pshiftright(const long int& a) { return a >> N; }
+
+/** \internal \returns \a a shifted by N bits to the left */
+template<int N> EIGEN_DEVICE_FUNC inline int
+pshiftleft(const int& a) { return a << N; }
+template<int N> EIGEN_DEVICE_FUNC inline long int
+pshiftleft(const long int& a) { return a << N; }
+
+/** \internal \returns the significant and exponent of the underlying floating point numbers
+ * See https://en.cppreference.com/w/cpp/numeric/math/frexp
+ */
+template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
+pfrexp(const Packet &a, Packet &exponent) { return std::frexp(a,&exponent); }
+
+/** \internal \returns a * 2^exponent
+ * See https://en.cppreference.com/w/cpp/numeric/math/ldexp
+ */
+template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
+pldexp(const Packet &a, const Packet &exponent) { return std::ldexp(a,exponent); }
+
+/** \internal \returns zeros */
+template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
+pzero(const Packet& a) { return pxor(a,a); }
+
+/** \internal \returns bits of \a or \b according to the input bit mask \a mask */
+template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
+pselect(const Packet& mask, const Packet& a, const Packet& b) {
+ return por(pand(a,mask),pandnot(b,mask));
+}
+
+/** \internal \returns a <= b as a bit mask */
+template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
+pcmp_le(const Packet& a, const Packet& b) { return a<=b ? ptrue(a) : pzero(a); }
+
+/** \internal \returns a < b as a bit mask */
+template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
+pcmp_lt(const Packet& a, const Packet& b) { return a<b ? ptrue(a) : pzero(a); }
+
+/** \internal \returns a == b as a bit mask */
+template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
+pcmp_eq(const Packet& a, const Packet& b) { return a==b ? ptrue(a) : pzero(a); }
+
+/** \internal \returns a < b or a==NaN or b==NaN as a bit mask */
+template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
+pcmp_lt_or_nan(const Packet& a, const Packet& b) { return pnot(pcmp_le(b,a)); }
/** \internal \returns a packet version of \a *from, from must be 16 bytes aligned */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
@@ -226,6 +291,10 @@ ploadu(const typename unpacket_traits<Packet>::type* from) { return *from; }
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
pset1(const typename unpacket_traits<Packet>::type& a) { return a; }
+/** \internal \returns a packet with constant coefficients set from bits */
+template<typename Packet,typename BitsType> EIGEN_DEVICE_FUNC inline Packet
+pset1frombits(BitsType a);
+
/** \internal \returns a packet with constant coefficients \a a[0], e.g.: (a[0],a[0],a[0],a[0]) */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
pload1(const typename unpacket_traits<Packet>::type *a) { return pset1<Packet>(*a); }
@@ -339,18 +408,39 @@ typename conditional<(unpacket_traits<Packet>::size%8)==0,typename unpacket_trai
predux_half_dowto4(const Packet& a)
{ return a; }
-/** \internal \returns the product of the elements of \a a*/
+/** \internal \returns the product of the elements of \a a */
template<typename Packet> EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_mul(const Packet& a)
{ return a; }
-/** \internal \returns the min of the elements of \a a*/
+/** \internal \returns the min of the elements of \a a */
template<typename Packet> EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_min(const Packet& a)
{ return a; }
-/** \internal \returns the max of the elements of \a a*/
+/** \internal \returns the max of the elements of \a a */
template<typename Packet> EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_max(const Packet& a)
{ return a; }
+/** \internal \returns true if all coeffs of \a a means "true"
+ * It is supposed to be called on values returned by pcmp_*.
+ */
+// not needed yet
+// template<typename Packet> EIGEN_DEVICE_FUNC inline bool predux_all(const Packet& a)
+// { return bool(a); }
+
+/** \internal \returns true if any coeffs of \a a means "true"
+ * It is supposed to be called on values returned by pcmp_*.
+ */
+template<typename Packet> EIGEN_DEVICE_FUNC inline bool predux_any(const Packet& a)
+{
+ // Dirty but generic implementation where "true" is assumed to be non 0 and all the sames.
+ // It is expected that "true" is either:
+ // - Scalar(1)
+ // - bits full of ones (NaN for floats),
+ // - or first bit equals to 1 (1 for ints, smallest denormal for floats).
+ // For all these cases, taking the sum is just fine, and this boils down to a no-op for scalars.
+ return bool(predux(a));
+}
+
/** \internal \returns the reversed elements of \a a*/
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet preverse(const Packet& a)
{ return a; }
@@ -597,6 +687,22 @@ pinsertlast(const Packet& a, typename unpacket_traits<Packet>::type b)
return pblend(mask, pset1<Packet>(b), a);
}
+/***************************************************************************
+ * Some generic implementations to be used by implementors
+***************************************************************************/
+
+/** Default implementation of pfrexp for float.
+ * It is expected to be called by implementers of template<> pfrexp.
+ */
+template<typename Packet> EIGEN_STRONG_INLINE Packet
+pfrexp_float(const Packet& a, Packet& exponent);
+
+/** Default implementation of pldexp for float.
+ * It is expected to be called by implementers of template<> pldexp.
+ */
+template<typename Packet> EIGEN_STRONG_INLINE Packet
+pldexp_float(Packet a, Packet exponent);
+
} // end namespace internal
} // end namespace Eigen
diff --git a/Eigen/src/Core/GlobalFunctions.h b/Eigen/src/Core/GlobalFunctions.h
index 563df6e84..71377cee5 100644
--- a/Eigen/src/Core/GlobalFunctions.h
+++ b/Eigen/src/Core/GlobalFunctions.h
@@ -66,6 +66,11 @@ namespace Eigen
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sinh,scalar_sinh_op,hyperbolic sine,\sa ArrayBase::sinh)
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(cosh,scalar_cosh_op,hyperbolic cosine,\sa ArrayBase::cosh)
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(tanh,scalar_tanh_op,hyperbolic tangent,\sa ArrayBase::tanh)
+#if EIGEN_HAS_CXX11_MATH
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(asinh,scalar_asinh_op,inverse hyperbolic sine,\sa ArrayBase::asinh)
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(acosh,scalar_acosh_op,inverse hyperbolic cosine,\sa ArrayBase::acosh)
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(atanh,scalar_atanh_op,inverse hyperbolic tangent,\sa ArrayBase::atanh)
+#endif
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(logistic,scalar_logistic_op,logistic function,\sa ArrayBase::logistic)
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(lgamma,scalar_lgamma_op,natural logarithm of the gamma function,\sa ArrayBase::lgamma)
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(digamma,scalar_digamma_op,derivative of lgamma,\sa ArrayBase::digamma)
diff --git a/Eigen/src/Core/IO.h b/Eigen/src/Core/IO.h
index da7fd6cce..063511f24 100644
--- a/Eigen/src/Core/IO.h
+++ b/Eigen/src/Core/IO.h
@@ -41,6 +41,7 @@ std::ostream & print_matrix(std::ostream & s, const Derived& _m, const IOFormat&
* - \b rowSuffix string printed at the end of each row
* - \b matPrefix string printed at the beginning of the matrix
* - \b matSuffix string printed at the end of the matrix
+ * - \b fill character printed to fill the empty space in aligned columns
*
* Example: \include IOFormat.cpp
* Output: \verbinclude IOFormat.out
@@ -53,9 +54,9 @@ struct IOFormat
IOFormat(int _precision = StreamPrecision, int _flags = 0,
const std::string& _coeffSeparator = " ",
const std::string& _rowSeparator = "\n", const std::string& _rowPrefix="", const std::string& _rowSuffix="",
- const std::string& _matPrefix="", const std::string& _matSuffix="")
+ const std::string& _matPrefix="", const std::string& _matSuffix="", const char _fill=' ')
: matPrefix(_matPrefix), matSuffix(_matSuffix), rowPrefix(_rowPrefix), rowSuffix(_rowSuffix), rowSeparator(_rowSeparator),
- rowSpacer(""), coeffSeparator(_coeffSeparator), precision(_precision), flags(_flags)
+ rowSpacer(""), coeffSeparator(_coeffSeparator), fill(_fill), precision(_precision), flags(_flags)
{
// TODO check if rowPrefix, rowSuffix or rowSeparator contains a newline
// don't add rowSpacer if columns are not to be aligned
@@ -71,6 +72,7 @@ struct IOFormat
std::string matPrefix, matSuffix;
std::string rowPrefix, rowSuffix, rowSeparator, rowSpacer;
std::string coeffSeparator;
+ char fill;
int precision;
int flags;
};
@@ -176,18 +178,26 @@ std::ostream & print_matrix(std::ostream & s, const Derived& _m, const IOFormat&
width = std::max<Index>(width, Index(sstr.str().length()));
}
}
+ std::streamsize old_width = s.width();
+ char old_fill_character = s.fill();
s << fmt.matPrefix;
for(Index i = 0; i < m.rows(); ++i)
{
if (i)
s << fmt.rowSpacer;
s << fmt.rowPrefix;
- if(width) s.width(width);
+ if(width) {
+ s.fill(fmt.fill);
+ s.width(width);
+ }
s << m.coeff(i, 0);
for(Index j = 1; j < m.cols(); ++j)
{
s << fmt.coeffSeparator;
- if (width) s.width(width);
+ if(width) {
+ s.fill(fmt.fill);
+ s.width(width);
+ }
s << m.coeff(i, j);
}
s << fmt.rowSuffix;
@@ -196,6 +206,10 @@ std::ostream & print_matrix(std::ostream & s, const Derived& _m, const IOFormat&
}
s << fmt.matSuffix;
if(explicit_precision) s.precision(old_precision);
+ if(width) {
+ s.fill(old_fill_character);
+ s.width(old_width);
+ }
return s;
}
diff --git a/Eigen/src/Core/IndexedView.h b/Eigen/src/Core/IndexedView.h
index 3485d8f46..377f8a5cc 100644
--- a/Eigen/src/Core/IndexedView.h
+++ b/Eigen/src/Core/IndexedView.h
@@ -132,7 +132,7 @@ public:
/** \returns the nested expression */
typename internal::remove_reference<XprType>::type&
- nestedExpression() { return m_xpr.const_cast_derived(); }
+ nestedExpression() { return m_xpr; }
/** \returns a const reference to the object storing/generating the row indices */
const RowIndices& rowIndices() const { return m_rowIndices; }
diff --git a/Eigen/src/Core/Matrix.h b/Eigen/src/Core/Matrix.h
index 90c336d8c..4b714328c 100644
--- a/Eigen/src/Core/Matrix.h
+++ b/Eigen/src/Core/Matrix.h
@@ -255,29 +255,27 @@ class Matrix
*
* \sa resize(Index,Index)
*/
- EIGEN_DEVICE_FUNC
- EIGEN_STRONG_INLINE Matrix() : Base()
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Matrix() : Base()
{
Base::_check_template_params();
EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
}
// FIXME is it still needed
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
explicit Matrix(internal::constructor_without_unaligned_array_assert)
: Base(internal::constructor_without_unaligned_array_assert())
{ Base::_check_template_params(); EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED }
#if EIGEN_HAS_RVALUE_REFERENCES
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Matrix(Matrix&& other) EIGEN_NOEXCEPT_IF(std::is_nothrow_move_constructible<Scalar>::value)
: Base(std::move(other))
{
Base::_check_template_params();
- if (RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic)
- Base::_set_noalias(other);
}
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Matrix& operator=(Matrix&& other) EIGEN_NOEXCEPT_IF(std::is_nothrow_move_assignable<Scalar>::value)
{
other.swap(*this);
@@ -289,20 +287,59 @@ class Matrix
// This constructor is for both 1x1 matrices and dynamic vectors
template<typename T>
- EIGEN_DEVICE_FUNC
- EIGEN_STRONG_INLINE explicit Matrix(const T& x)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit Matrix(const T& x)
{
Base::_check_template_params();
Base::template _init1<T>(x);
}
template<typename T0, typename T1>
- EIGEN_DEVICE_FUNC
- EIGEN_STRONG_INLINE Matrix(const T0& x, const T1& y)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Matrix(const T0& x, const T1& y)
{
Base::_check_template_params();
Base::template _init2<T0,T1>(x, y);
}
+
+ #if EIGEN_HAS_CXX11
+ /** \copydoc PlainObjectBase(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&...)
+ *
+ * Example: \include Matrix_variadic_ctor_cxx11.cpp
+ * Output: \verbinclude Matrix_variadic_ctor_cxx11.out
+ *
+ * \sa Matrix(const std::initializer_list<std::initializer_list<Scalar>>&)
+ */
+ template <typename... ArgTypes>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Matrix(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args)
+ : Base(a0, a1, a2, a3, args...) {}
+
+ /** \brief Constructs a Matrix and initializes it from the coefficients given as initializer-lists grouped by row. \cpp11
+ *
+ * In the general case, the constructor takes a list of rows, each row being represented as a list of coefficients:
+ *
+ * Example: \include Matrix_initializer_list_23_cxx11.cpp
+ * Output: \verbinclude Matrix_initializer_list_23_cxx11.out
+ *
+ * Each of the inner initializer lists must contain the exact same number of elements, otherwise an assertion is triggered.
+ *
+ * In the case of a compile-time column vector, implicit transposition from a single row is allowed.
+ * Therefore <code>VectorXd{{1,2,3,4,5}}</code> is legal and the more verbose syntax
+ * <code>RowVectorXd{{1},{2},{3},{4},{5}}</code> can be avoided:
+ *
+ * Example: \include Matrix_initializer_list_vector_cxx11.cpp
+ * Output: \verbinclude Matrix_initializer_list_vector_cxx11.out
+ *
+ * In the case of fixed-sized matrices, the initializer list sizes must exactly match the matrix sizes,
+ * and implicit transposition is allowed for compile-time vectors only.
+ *
+ * \sa Matrix(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&...)
+ */
+ EIGEN_DEVICE_FUNC
+ explicit EIGEN_STRONG_INLINE Matrix(const std::initializer_list<std::initializer_list<Scalar>>& list) : Base(list) {}
+ #endif // end EIGEN_HAS_CXX11
+
#else
/** \brief Constructs a fixed-sized matrix initialized with coefficients starting at \a data */
EIGEN_DEVICE_FUNC
@@ -321,7 +358,8 @@ class Matrix
* \c EIGEN_INITIALIZE_MATRICES_BY_{ZERO,\c NAN} macros (see \ref TopicPreprocessorDirectives).
*/
EIGEN_STRONG_INLINE explicit Matrix(Index dim);
- /** \brief Constructs an initialized 1x1 matrix with the given coefficient */
+ /** \brief Constructs an initialized 1x1 matrix with the given coefficient
+ * \sa Matrix(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&...) */
Matrix(const Scalar& x);
/** \brief Constructs an uninitialized matrix with \a rows rows and \a cols columns.
*
@@ -338,11 +376,14 @@ class Matrix
EIGEN_DEVICE_FUNC
Matrix(Index rows, Index cols);
- /** \brief Constructs an initialized 2D vector with given coefficients */
+ /** \brief Constructs an initialized 2D vector with given coefficients
+ * \sa Matrix(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&...) */
Matrix(const Scalar& x, const Scalar& y);
- #endif
+ #endif // end EIGEN_PARSED_BY_DOXYGEN
- /** \brief Constructs an initialized 3D vector with given coefficients */
+ /** \brief Constructs an initialized 3D vector with given coefficients
+ * \sa Matrix(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&...)
+ */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Matrix(const Scalar& x, const Scalar& y, const Scalar& z)
{
@@ -352,7 +393,9 @@ class Matrix
m_storage.data()[1] = y;
m_storage.data()[2] = z;
}
- /** \brief Constructs an initialized 4D vector with given coefficients */
+ /** \brief Constructs an initialized 4D vector with given coefficients
+ * \sa Matrix(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&...)
+ */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Matrix(const Scalar& x, const Scalar& y, const Scalar& z, const Scalar& w)
{
@@ -407,7 +450,7 @@ class Matrix
*
* \ingroup Core_Module
*
- * Eigen defines several typedef shortcuts for most common matrix and vector types.
+ * %Eigen defines several typedef shortcuts for most common matrix and vector types.
*
* The general patterns are the following:
*
@@ -419,6 +462,15 @@ class Matrix
*
* There are also \c VectorSizeType and \c RowVectorSizeType which are self-explanatory. For example, \c Vector4cf is
* a fixed-size vector of 4 complex floats.
+ *
+ * With \cpp11, template alias are also defined for common sizes.
+ * They follow the same pattern as above except that the scalar type suffix is replaced by a
+ * template parameter, i.e.:
+ * - `MatrixSize<Type>` where `Size` can be \c 2,\c 3,\c 4 for fixed size square matrices or \c X for dynamic size.
+ * - `MatrixXSize<Type>` and `MatrixSizeX<Type>` where `Size` can be \c 2,\c 3,\c 4 for hybrid dynamic/fixed matrices.
+ * - `VectorSize<Type>` and `RowVectorSize<Type>` for column and row vectors.
+ *
+ * With \cpp11, you can also use fully generic column and row vector types: `Vector<Type,Size>` and `RowVector<Type,Size>`.
*
* \sa class Matrix
*/
@@ -456,6 +508,55 @@ EIGEN_MAKE_TYPEDEFS_ALL_SIZES(std::complex<double>, cd)
#undef EIGEN_MAKE_TYPEDEFS
#undef EIGEN_MAKE_FIXED_TYPEDEFS
+#if EIGEN_HAS_CXX11
+
+#define EIGEN_MAKE_TYPEDEFS(Size, SizeSuffix) \
+/** \ingroup matrixtypedefs */ \
+/** \brief \cpp11 */ \
+template <typename Type> \
+using Matrix##SizeSuffix = Matrix<Type, Size, Size>; \
+/** \ingroup matrixtypedefs */ \
+/** \brief \cpp11 */ \
+template <typename Type> \
+using Vector##SizeSuffix = Matrix<Type, Size, 1>; \
+/** \ingroup matrixtypedefs */ \
+/** \brief \cpp11 */ \
+template <typename Type> \
+using RowVector##SizeSuffix = Matrix<Type, 1, Size>;
+
+#define EIGEN_MAKE_FIXED_TYPEDEFS(Size) \
+/** \ingroup matrixtypedefs */ \
+/** \brief \cpp11 */ \
+template <typename Type> \
+using Matrix##Size##X = Matrix<Type, Size, Dynamic>; \
+/** \ingroup matrixtypedefs */ \
+/** \brief \cpp11 */ \
+template <typename Type> \
+using Matrix##X##Size = Matrix<Type, Dynamic, Size>;
+
+EIGEN_MAKE_TYPEDEFS(2, 2)
+EIGEN_MAKE_TYPEDEFS(3, 3)
+EIGEN_MAKE_TYPEDEFS(4, 4)
+EIGEN_MAKE_TYPEDEFS(Dynamic, X)
+EIGEN_MAKE_FIXED_TYPEDEFS(2)
+EIGEN_MAKE_FIXED_TYPEDEFS(3)
+EIGEN_MAKE_FIXED_TYPEDEFS(4)
+
+/** \ingroup matrixtypedefs
+ * \brief \cpp11 */
+template <typename Type, int Size>
+using Vector = Matrix<Type, Size, 1>;
+
+/** \ingroup matrixtypedefs
+ * \brief \cpp11 */
+template <typename Type, int Size>
+using RowVector = Matrix<Type, 1, Size>;
+
+#undef EIGEN_MAKE_TYPEDEFS
+#undef EIGEN_MAKE_FIXED_TYPEDEFS
+
+#endif // EIGEN_HAS_CXX11
+
} // end namespace Eigen
#endif // EIGEN_MATRIX_H
diff --git a/Eigen/src/Core/MatrixBase.h b/Eigen/src/Core/MatrixBase.h
index 596cdd133..4744e5cc4 100644
--- a/Eigen/src/Core/MatrixBase.h
+++ b/Eigen/src/Core/MatrixBase.h
@@ -468,6 +468,11 @@ template<typename Derived> class MatrixBase
const MatrixFunctionReturnValue<Derived> matrixFunction(StemFunction f) const;
EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, cosh, hyperbolic cosine)
EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, sinh, hyperbolic sine)
+#if EIGEN_HAS_CXX11_MATH
+ EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, atanh, inverse hyperbolic cosine)
+ EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, acosh, inverse hyperbolic cosine)
+ EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, asinh, inverse hyperbolic sine)
+#endif
EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, cos, cosine)
EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, sin, sine)
EIGEN_MATRIX_FUNCTION(MatrixSquareRootReturnValue, sqrt, square root)
diff --git a/Eigen/src/Core/NestByValue.h b/Eigen/src/Core/NestByValue.h
index 01cf192e9..239bbba63 100644
--- a/Eigen/src/Core/NestByValue.h
+++ b/Eigen/src/Core/NestByValue.h
@@ -16,7 +16,11 @@ namespace Eigen {
namespace internal {
template<typename ExpressionType>
struct traits<NestByValue<ExpressionType> > : public traits<ExpressionType>
-{};
+{
+ enum {
+ Flags = traits<ExpressionType>::Flags & ~NestByRefBit
+ };
+};
}
/** \class NestByValue
@@ -43,55 +47,11 @@ template<typename ExpressionType> class NestByValue
EIGEN_DEVICE_FUNC inline Index rows() const { return m_expression.rows(); }
EIGEN_DEVICE_FUNC inline Index cols() const { return m_expression.cols(); }
- EIGEN_DEVICE_FUNC inline Index outerStride() const { return m_expression.outerStride(); }
- EIGEN_DEVICE_FUNC inline Index innerStride() const { return m_expression.innerStride(); }
-
- EIGEN_DEVICE_FUNC inline const CoeffReturnType coeff(Index row, Index col) const
- {
- return m_expression.coeff(row, col);
- }
-
- EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index row, Index col)
- {
- return m_expression.const_cast_derived().coeffRef(row, col);
- }
-
- EIGEN_DEVICE_FUNC inline const CoeffReturnType coeff(Index index) const
- {
- return m_expression.coeff(index);
- }
-
- EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index index)
- {
- return m_expression.const_cast_derived().coeffRef(index);
- }
-
- template<int LoadMode>
- EIGEN_DEVICE_FUNC inline const PacketScalar packet(Index row, Index col) const
- {
- return m_expression.template packet<LoadMode>(row, col);
- }
-
- template<int LoadMode>
- EIGEN_DEVICE_FUNC inline void writePacket(Index row, Index col, const PacketScalar& x)
- {
- m_expression.const_cast_derived().template writePacket<LoadMode>(row, col, x);
- }
-
- template<int LoadMode>
- EIGEN_DEVICE_FUNC inline const PacketScalar packet(Index index) const
- {
- return m_expression.template packet<LoadMode>(index);
- }
-
- template<int LoadMode>
- EIGEN_DEVICE_FUNC inline void writePacket(Index index, const PacketScalar& x)
- {
- m_expression.const_cast_derived().template writePacket<LoadMode>(index, x);
- }
EIGEN_DEVICE_FUNC operator const ExpressionType&() const { return m_expression; }
+ EIGEN_DEVICE_FUNC const ExpressionType& nestedExpression() const { return m_expression; }
+
protected:
const ExpressionType m_expression;
};
@@ -105,6 +65,21 @@ DenseBase<Derived>::nestByValue() const
return NestByValue<Derived>(derived());
}
+namespace internal {
+
+// Evaluator of Solve -> eval into a temporary
+template<typename ArgType>
+struct evaluator<NestByValue<ArgType> >
+ : public evaluator<ArgType>
+{
+ typedef evaluator<ArgType> Base;
+
+ EIGEN_DEVICE_FUNC explicit evaluator(const NestByValue<ArgType>& xpr)
+ : Base(xpr.nestedExpression())
+ {}
+};
+}
+
} // end namespace Eigen
#endif // EIGEN_NESTBYVALUE_H
diff --git a/Eigen/src/Core/PlainObjectBase.h b/Eigen/src/Core/PlainObjectBase.h
index f551dabb0..6de78fd2f 100644
--- a/Eigen/src/Core/PlainObjectBase.h
+++ b/Eigen/src/Core/PlainObjectBase.h
@@ -526,6 +526,71 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
// EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
}
+ #if EIGEN_HAS_CXX11
+ /** \brief Construct a row of column vector with fixed size from an arbitrary number of coefficients. \cpp11
+ *
+ * \only_for_vectors
+ *
+ * This constructor is for 1D array or vectors with more than 4 coefficients.
+ * There exists C++98 anologue constructors for fixed-size array/vector having 1, 2, 3, or 4 coefficients.
+ *
+ * \warning To construct a column (resp. row) vector of fixed length, the number of values passed to this
+ * constructor must match the the fixed number of rows (resp. columns) of \c *this.
+ */
+ template <typename... ArgTypes>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ PlainObjectBase(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args)
+ : m_storage()
+ {
+ _check_template_params();
+ EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, sizeof...(args) + 4);
+ m_storage.data()[0] = a0;
+ m_storage.data()[1] = a1;
+ m_storage.data()[2] = a2;
+ m_storage.data()[3] = a3;
+ int i = 4;
+ auto x = {(m_storage.data()[i++] = args, 0)...};
+ static_cast<void>(x);
+ }
+
+ /** \brief Constructs a Matrix or Array and initializes it by elements given by an initializer list of initializer
+ * lists \cpp11
+ */
+ EIGEN_DEVICE_FUNC
+ explicit EIGEN_STRONG_INLINE PlainObjectBase(const std::initializer_list<std::initializer_list<Scalar>>& list)
+ : m_storage()
+ {
+ _check_template_params();
+
+ size_t list_size = 0;
+ if (list.begin() != list.end()) {
+ list_size = list.begin()->size();
+ }
+
+ // This is to allow syntax like VectorXi {{1, 2, 3, 4}}
+ if (ColsAtCompileTime == 1 && list.size() == 1) {
+ eigen_assert(list_size == static_cast<size_t>(RowsAtCompileTime) || RowsAtCompileTime == Dynamic);
+ resize(list_size, ColsAtCompileTime);
+ std::copy(list.begin()->begin(), list.begin()->end(), m_storage.data());
+ } else {
+ eigen_assert(list.size() == static_cast<size_t>(RowsAtCompileTime) || RowsAtCompileTime == Dynamic);
+ eigen_assert(list_size == static_cast<size_t>(ColsAtCompileTime) || ColsAtCompileTime == Dynamic);
+ resize(list.size(), list_size);
+
+ Index row_index = 0;
+ for (const std::initializer_list<Scalar>& row : list) {
+ eigen_assert(list_size == row.size());
+ Index col_index = 0;
+ for (const Scalar& e : row) {
+ coeffRef(row_index, col_index) = e;
+ ++col_index;
+ }
+ ++row_index;
+ }
+ }
+ }
+ #endif // end EIGEN_HAS_CXX11
+
/** \sa PlainObjectBase::operator=(const EigenBase<OtherDerived>&) */
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
@@ -737,8 +802,10 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE void _init2(Index rows, Index cols, typename internal::enable_if<Base::SizeAtCompileTime!=2,T0>::type* = 0)
{
- EIGEN_STATIC_ASSERT(bool(NumTraits<T0>::IsInteger) &&
- bool(NumTraits<T1>::IsInteger),
+ const bool t0_is_integer_alike = internal::is_valid_index_type<T0>::value;
+ const bool t1_is_integer_alike = internal::is_valid_index_type<T1>::value;
+ EIGEN_STATIC_ASSERT(t0_is_integer_alike &&
+ t1_is_integer_alike,
FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED)
resize(rows,cols);
}
@@ -773,9 +840,9 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
&& ((!internal::is_same<typename internal::traits<Derived>::XprKind,ArrayXpr>::value || Base::SizeAtCompileTime==Dynamic)),T>::type* = 0)
{
// NOTE MSVC 2008 complains if we directly put bool(NumTraits<T>::IsInteger) as the EIGEN_STATIC_ASSERT argument.
- const bool is_integer = NumTraits<T>::IsInteger;
- EIGEN_UNUSED_VARIABLE(is_integer);
- EIGEN_STATIC_ASSERT(is_integer,
+ const bool is_integer_alike = internal::is_valid_index_type<T>::value;
+ EIGEN_UNUSED_VARIABLE(is_integer_alike);
+ EIGEN_STATIC_ASSERT(is_integer_alike,
FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED)
resize(size);
}
@@ -882,7 +949,7 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
* of same type it is enough to swap the data pointers.
*/
template<typename OtherDerived>
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void swap(DenseBase<OtherDerived> & other)
{
enum { SwapPointers = internal::is_same<Derived, OtherDerived>::value && Base::SizeAtCompileTime==Dynamic };
@@ -893,7 +960,7 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
* \brief const version forwarded to DenseBase::swap
*/
template<typename OtherDerived>
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void swap(DenseBase<OtherDerived> const & other)
{ Base::swap(other.derived()); }
@@ -1027,7 +1094,7 @@ template<typename MatrixTypeA, typename MatrixTypeB, bool SwapPointers>
struct matrix_swap_impl
{
EIGEN_DEVICE_FUNC
- static inline void run(MatrixTypeA& a, MatrixTypeB& b)
+ static EIGEN_STRONG_INLINE void run(MatrixTypeA& a, MatrixTypeB& b)
{
a.base().swap(b);
}
diff --git a/Eigen/src/Core/Product.h b/Eigen/src/Core/Product.h
index 70790dbd4..13d5662df 100644
--- a/Eigen/src/Core/Product.h
+++ b/Eigen/src/Core/Product.h
@@ -90,18 +90,23 @@ class Product : public ProductImpl<_Lhs,_Rhs,Option,
typedef typename internal::remove_all<LhsNested>::type LhsNestedCleaned;
typedef typename internal::remove_all<RhsNested>::type RhsNestedCleaned;
- EIGEN_DEVICE_FUNC Product(const Lhs& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Product(const Lhs& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs)
{
eigen_assert(lhs.cols() == rhs.rows()
&& "invalid matrix product"
&& "if you wanted a coeff-wise or a dot product use the respective explicit functions");
}
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); }
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Index rows() const { return m_lhs.rows(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Index cols() const { return m_rhs.cols(); }
- EIGEN_DEVICE_FUNC const LhsNestedCleaned& lhs() const { return m_lhs; }
- EIGEN_DEVICE_FUNC const RhsNestedCleaned& rhs() const { return m_rhs; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const LhsNestedCleaned& lhs() const { return m_lhs; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const RhsNestedCleaned& rhs() const { return m_rhs; }
protected:
diff --git a/Eigen/src/Core/ProductEvaluators.h b/Eigen/src/Core/ProductEvaluators.h
index 246bca3e5..d53dc30a3 100644
--- a/Eigen/src/Core/ProductEvaluators.h
+++ b/Eigen/src/Core/ProductEvaluators.h
@@ -411,35 +411,58 @@ struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode>
call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::sub_assign_op<typename Dst::Scalar,Scalar>());
}
- // Catch "dst {,+,-}= (s*A)*B" and evaluate it lazily by moving out the scalar factor:
- // dst {,+,-}= s * (A.lazyProduct(B))
- // This is a huge benefit for heap-allocated matrix types as it save one costly allocation.
- // For them, this strategy is also faster than simply by-passing the heap allocation through
- // stack allocation.
- // For fixed sizes matrices, this is less obvious, it is sometimes x2 faster, but sometimes x3 slower,
- // and the behavior depends also a lot on the compiler... so let's be conservative and enable them for dynamic-size only,
- // that is when coming from generic_product_impl<...,GemmProduct> in file GeneralMatrixMatrix.h
- template<typename Dst, typename Scalar1, typename Scalar2, typename Plain1, typename Xpr2, typename Func>
+ // This is a special evaluation path called from generic_product_impl<...,GemmProduct> in file GeneralMatrixMatrix.h
+ // This variant tries to extract scalar multiples from both the LHS and RHS and factor them out. For instance:
+ // dst {,+,-}= (s1*A)*(B*s2)
+ // will be rewritten as:
+ // dst {,+,-}= (s1*s2) * (A.lazyProduct(B))
+ // There are at least four benefits of doing so:
+ // 1 - huge performance gain for heap-allocated matrix types as it save costly allocations.
+ // 2 - it is faster than simply by-passing the heap allocation through stack allocation.
+ // 3 - it makes this fallback consistent with the heavy GEMM routine.
+ // 4 - it fully by-passes huge stack allocation attempts when multiplying huge fixed-size matrices.
+ // (see https://stackoverflow.com/questions/54738495)
+ // For small fixed sizes matrices, howver, the gains are less obvious, it is sometimes x2 faster, but sometimes x3 slower,
+ // and the behavior depends also a lot on the compiler... This is why this re-writting strategy is currently
+ // enabled only when falling back from the main GEMM.
+ template<typename Dst, typename Func>
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
- void eval_dynamic(Dst& dst, const CwiseBinaryOp<internal::scalar_product_op<Scalar1,Scalar2>,
- const CwiseNullaryOp<internal::scalar_constant_op<Scalar1>, Plain1>, Xpr2>& lhs, const Rhs& rhs, const Func &func)
+ void eval_dynamic(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Func &func)
{
- call_restricted_packet_assignment_no_alias(dst, lhs.lhs().functor().m_other * lhs.rhs().lazyProduct(rhs), func);
+ enum {
+ HasScalarFactor = blas_traits<Lhs>::HasScalarFactor || blas_traits<Rhs>::HasScalarFactor,
+ ConjLhs = blas_traits<Lhs>::NeedToConjugate,
+ ConjRhs = blas_traits<Rhs>::NeedToConjugate
+ };
+ // FIXME: in c++11 this should be auto, and extractScalarFactor should also return auto
+ // this is important for real*complex_mat
+ Scalar actualAlpha = blas_traits<Lhs>::extractScalarFactor(lhs)
+ * blas_traits<Rhs>::extractScalarFactor(rhs);
+ eval_dynamic_impl(dst,
+ blas_traits<Lhs>::extract(lhs).template conjugateIf<ConjLhs>(),
+ blas_traits<Rhs>::extract(rhs).template conjugateIf<ConjRhs>(),
+ func,
+ actualAlpha,
+ typename conditional<HasScalarFactor,true_type,false_type>::type());
}
- // Here, we we always have LhsT==Lhs, but we need to make it a template type to make the above
- // overload more specialized.
- template<typename Dst, typename LhsT, typename Func>
+protected:
+
+ template<typename Dst, typename LhsT, typename RhsT, typename Func, typename Scalar>
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
- void eval_dynamic(Dst& dst, const LhsT& lhs, const Rhs& rhs, const Func &func)
+ void eval_dynamic_impl(Dst& dst, const LhsT& lhs, const RhsT& rhs, const Func &func, const Scalar& s /* == 1 */, false_type)
{
+ EIGEN_UNUSED_VARIABLE(s);
+ eigen_internal_assert(s==Scalar(1));
call_restricted_packet_assignment_no_alias(dst, lhs.lazyProduct(rhs), func);
}
-
-
-// template<typename Dst>
-// static inline void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
-// { dst.noalias() += alpha * lhs.lazyProduct(rhs); }
+
+ template<typename Dst, typename LhsT, typename RhsT, typename Func, typename Scalar>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ void eval_dynamic_impl(Dst& dst, const LhsT& lhs, const RhsT& rhs, const Func &func, const Scalar& s, true_type)
+ {
+ call_restricted_packet_assignment_no_alias(dst, s * lhs.lazyProduct(rhs), func);
+ }
};
// This specialization enforces the use of a coefficient-based evaluation strategy
@@ -582,7 +605,8 @@ struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, DenseShape,
* which is why we don't set the LinearAccessBit.
* TODO: this seems possible when the result is a vector
*/
- EIGEN_DEVICE_FUNC const CoeffReturnType coeff(Index index) const
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const CoeffReturnType coeff(Index index) const
{
const Index row = (RowsAtCompileTime == 1 || MaxRowsAtCompileTime==1) ? 0 : index;
const Index col = (RowsAtCompileTime == 1 || MaxRowsAtCompileTime==1) ? index : 0;
@@ -590,6 +614,7 @@ struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, DenseShape,
}
template<int LoadMode, typename PacketType>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const PacketType packet(Index row, Index col) const
{
PacketType res;
@@ -601,6 +626,7 @@ struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, DenseShape,
}
template<int LoadMode, typename PacketType>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const PacketType packet(Index index) const
{
const Index row = (RowsAtCompileTime == 1 || MaxRowsAtCompileTime==1) ? 0 : index;
@@ -629,7 +655,8 @@ struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, LazyCoeffBasedProduc
enum {
Flags = Base::Flags | EvalBeforeNestingBit
};
- EIGEN_DEVICE_FUNC explicit product_evaluator(const XprType& xpr)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit product_evaluator(const XprType& xpr)
: Base(BaseProduct(xpr.lhs(),xpr.rhs()))
{}
};
diff --git a/Eigen/src/Core/Redux.h b/Eigen/src/Core/Redux.h
index 720b6030c..2eef5abc5 100644
--- a/Eigen/src/Core/Redux.h
+++ b/Eigen/src/Core/Redux.h
@@ -359,7 +359,8 @@ class redux_evaluator : public internal::evaluator<_XprType>
typedef internal::evaluator<_XprType> Base;
public:
typedef _XprType XprType;
- EIGEN_DEVICE_FUNC explicit redux_evaluator(const XprType &xpr) : Base(xpr) {}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit redux_evaluator(const XprType &xpr) : Base(xpr) {}
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
@@ -375,11 +376,12 @@ public:
InnerSizeAtCompileTime = XprType::InnerSizeAtCompileTime
};
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeffByOuterInner(Index outer, Index inner) const
{ return Base::coeff(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer); }
template<int LoadMode, typename PacketType>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
PacketType packetByOuterInner(Index outer, Index inner) const
{ return Base::template packet<LoadMode,PacketType>(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer); }
@@ -397,6 +399,8 @@ public:
* The template parameter \a BinaryOp is the type of the functor \a func which must be
* an associative operator. Both current C++98 and C++11 functor styles are handled.
*
+ * \warning the matrix must be not empty, otherwise an assertion is triggered.
+ *
* \sa DenseBase::sum(), DenseBase::minCoeff(), DenseBase::maxCoeff(), MatrixBase::colwise(), MatrixBase::rowwise()
*/
template<typename Derived>
@@ -415,6 +419,7 @@ DenseBase<Derived>::redux(const Func& func) const
}
/** \returns the minimum of all coefficients of \c *this.
+ * \warning the matrix must be not empty, otherwise an assertion is triggered.
* \warning the result is undefined if \c *this contains NaN.
*/
template<typename Derived>
@@ -425,6 +430,7 @@ DenseBase<Derived>::minCoeff() const
}
/** \returns the maximum of all coefficients of \c *this.
+ * \warning the matrix must be not empty, otherwise an assertion is triggered.
* \warning the result is undefined if \c *this contains NaN.
*/
template<typename Derived>
diff --git a/Eigen/src/Core/Ref.h b/Eigen/src/Core/Ref.h
index ac9502bc4..172c8ffb6 100644
--- a/Eigen/src/Core/Ref.h
+++ b/Eigen/src/Core/Ref.h
@@ -28,12 +28,13 @@ struct traits<Ref<_PlainObjectType, _Options, _StrideType> >
template<typename Derived> struct match {
enum {
+ IsVectorAtCompileTime = PlainObjectType::IsVectorAtCompileTime || Derived::IsVectorAtCompileTime,
HasDirectAccess = internal::has_direct_access<Derived>::ret,
- StorageOrderMatch = PlainObjectType::IsVectorAtCompileTime || Derived::IsVectorAtCompileTime || ((PlainObjectType::Flags&RowMajorBit)==(Derived::Flags&RowMajorBit)),
+ StorageOrderMatch = IsVectorAtCompileTime || ((PlainObjectType::Flags&RowMajorBit)==(Derived::Flags&RowMajorBit)),
InnerStrideMatch = int(StrideType::InnerStrideAtCompileTime)==int(Dynamic)
|| int(StrideType::InnerStrideAtCompileTime)==int(Derived::InnerStrideAtCompileTime)
|| (int(StrideType::InnerStrideAtCompileTime)==0 && int(Derived::InnerStrideAtCompileTime)==1),
- OuterStrideMatch = Derived::IsVectorAtCompileTime
+ OuterStrideMatch = IsVectorAtCompileTime
|| int(StrideType::OuterStrideAtCompileTime)==int(Dynamic) || int(StrideType::OuterStrideAtCompileTime)==int(Derived::OuterStrideAtCompileTime),
// NOTE, this indirection of evaluator<Derived>::Alignment is needed
// to workaround a very strange bug in MSVC related to the instantiation
diff --git a/Eigen/src/Core/Reshaped.h b/Eigen/src/Core/Reshaped.h
index b7bd1b292..c955815e6 100644
--- a/Eigen/src/Core/Reshaped.h
+++ b/Eigen/src/Core/Reshaped.h
@@ -191,7 +191,7 @@ class ReshapedImpl_dense<XprType,Rows,Cols,Order,false>
/** \returns the nested expression */
EIGEN_DEVICE_FUNC
typename internal::remove_reference<XprType>::type&
- nestedExpression() { return m_xpr.const_cast_derived(); }
+ nestedExpression() { return m_xpr; }
protected:
diff --git a/Eigen/src/Core/Reverse.h b/Eigen/src/Core/Reverse.h
index 8b6b3ab03..853093923 100644
--- a/Eigen/src/Core/Reverse.h
+++ b/Eigen/src/Core/Reverse.h
@@ -171,8 +171,10 @@ struct vectorwise_reverse_inplace_impl<Vertical>
template<typename ExpressionType>
static void run(ExpressionType &xpr)
{
+ const int HalfAtCompileTime = ExpressionType::RowsAtCompileTime==Dynamic?Dynamic:ExpressionType::RowsAtCompileTime/2;
Index half = xpr.rows()/2;
- xpr.topRows(half).swap(xpr.bottomRows(half).colwise().reverse());
+ xpr.topRows(fix<HalfAtCompileTime>(half))
+ .swap(xpr.bottomRows(fix<HalfAtCompileTime>(half)).colwise().reverse());
}
};
@@ -182,8 +184,10 @@ struct vectorwise_reverse_inplace_impl<Horizontal>
template<typename ExpressionType>
static void run(ExpressionType &xpr)
{
+ const int HalfAtCompileTime = ExpressionType::ColsAtCompileTime==Dynamic?Dynamic:ExpressionType::ColsAtCompileTime/2;
Index half = xpr.cols()/2;
- xpr.leftCols(half).swap(xpr.rightCols(half).rowwise().reverse());
+ xpr.leftCols(fix<HalfAtCompileTime>(half))
+ .swap(xpr.rightCols(fix<HalfAtCompileTime>(half)).rowwise().reverse());
}
};
@@ -203,7 +207,7 @@ struct vectorwise_reverse_inplace_impl<Horizontal>
template<typename ExpressionType, int Direction>
EIGEN_DEVICE_FUNC void VectorwiseOp<ExpressionType,Direction>::reverseInPlace()
{
- internal::vectorwise_reverse_inplace_impl<Direction>::run(_expression().const_cast_derived());
+ internal::vectorwise_reverse_inplace_impl<Direction>::run(m_matrix);
}
} // end namespace Eigen
diff --git a/Eigen/src/Core/SelfAdjointView.h b/Eigen/src/Core/SelfAdjointView.h
index 2cf3fa1ef..2173799d9 100644
--- a/Eigen/src/Core/SelfAdjointView.h
+++ b/Eigen/src/Core/SelfAdjointView.h
@@ -61,6 +61,7 @@ template<typename _MatrixType, unsigned int UpLo> class SelfAdjointView
typedef typename internal::traits<SelfAdjointView>::Scalar Scalar;
typedef typename MatrixType::StorageIndex StorageIndex;
typedef typename internal::remove_all<typename MatrixType::ConjugateReturnType>::type MatrixConjugateReturnType;
+ typedef SelfAdjointView<typename internal::add_const<MatrixType>::type, UpLo> ConstSelfAdjointView;
enum {
Mode = internal::traits<SelfAdjointView>::Mode,
@@ -197,6 +198,18 @@ template<typename _MatrixType, unsigned int UpLo> class SelfAdjointView
inline const ConjugateReturnType conjugate() const
{ return ConjugateReturnType(m_matrix.conjugate()); }
+ /** \returns an expression of the complex conjugate of \c *this if Cond==true,
+ * returns \c *this otherwise.
+ */
+ template<bool Cond>
+ EIGEN_DEVICE_FUNC
+ inline typename internal::conditional<Cond,ConjugateReturnType,ConstSelfAdjointView>::type
+ conjugateIf() const
+ {
+ typedef typename internal::conditional<Cond,ConjugateReturnType,ConstSelfAdjointView>::type ReturnType;
+ return ReturnType(m_matrix.template conjugateIf<Cond>());
+ }
+
typedef SelfAdjointView<const typename MatrixType::AdjointReturnType,TransposeMode> AdjointReturnType;
/** \sa MatrixBase::adjoint() const */
EIGEN_DEVICE_FUNC
diff --git a/Eigen/src/Core/Solve.h b/Eigen/src/Core/Solve.h
index 2bf940a26..ec4b4a987 100644
--- a/Eigen/src/Core/Solve.h
+++ b/Eigen/src/Core/Solve.h
@@ -19,7 +19,7 @@ template<typename Decomposition, typename RhsType, typename StorageKind> class S
*
* \brief Pseudo expression representing a solving operation
*
- * \tparam Decomposition the type of the matrix or decomposion object
+ * \tparam Decomposition the type of the matrix or decomposition object
* \tparam Rhstype the type of the right-hand side
*
* This class represents an expression of A.solve(B)
diff --git a/Eigen/src/Core/SolverBase.h b/Eigen/src/Core/SolverBase.h
index 702a5485c..501461042 100644
--- a/Eigen/src/Core/SolverBase.h
+++ b/Eigen/src/Core/SolverBase.h
@@ -14,8 +14,35 @@ namespace Eigen {
namespace internal {
+template<typename Derived>
+struct solve_assertion {
+ template<bool Transpose_, typename Rhs>
+ static void run(const Derived& solver, const Rhs& b) { solver.template _check_solve_assertion<Transpose_>(b); }
+};
+
+template<typename Derived>
+struct solve_assertion<Transpose<Derived> >
+{
+ typedef Transpose<Derived> type;
+
+ template<bool Transpose_, typename Rhs>
+ static void run(const type& transpose, const Rhs& b)
+ {
+ internal::solve_assertion<typename internal::remove_all<Derived>::type>::template run<true>(transpose.nestedExpression(), b);
+ }
+};
+template<typename Scalar, typename Derived>
+struct solve_assertion<CwiseUnaryOp<Eigen::internal::scalar_conjugate_op<Scalar>, const Transpose<Derived> > >
+{
+ typedef CwiseUnaryOp<Eigen::internal::scalar_conjugate_op<Scalar>, const Transpose<Derived> > type;
+ template<bool Transpose_, typename Rhs>
+ static void run(const type& adjoint, const Rhs& b)
+ {
+ internal::solve_assertion<typename internal::remove_all<Transpose<Derived> >::type>::template run<true>(adjoint.nestedExpression(), b);
+ }
+};
} // end namespace internal
/** \class SolverBase
@@ -35,7 +62,7 @@ namespace internal {
*
* \warning Currently, any other usage of transpose() and adjoint() are not supported and will produce compilation errors.
*
- * \sa class PartialPivLU, class FullPivLU
+ * \sa class PartialPivLU, class FullPivLU, class HouseholderQR, class ColPivHouseholderQR, class FullPivHouseholderQR, class CompleteOrthogonalDecomposition, class LLT, class LDLT, class SVDBase
*/
template<typename Derived>
class SolverBase : public EigenBase<Derived>
@@ -46,6 +73,9 @@ class SolverBase : public EigenBase<Derived>
typedef typename internal::traits<Derived>::Scalar Scalar;
typedef Scalar CoeffReturnType;
+ template<typename Derived_>
+ friend struct internal::solve_assertion;
+
enum {
RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
@@ -75,7 +105,7 @@ class SolverBase : public EigenBase<Derived>
inline const Solve<Derived, Rhs>
solve(const MatrixBase<Rhs>& b) const
{
- eigen_assert(derived().rows()==b.rows() && "solve(): invalid number of rows of the right hand side matrix b");
+ internal::solve_assertion<typename internal::remove_all<Derived>::type>::template run<false>(derived(), b);
return Solve<Derived, Rhs>(derived(), b.derived());
}
@@ -113,6 +143,13 @@ class SolverBase : public EigenBase<Derived>
}
protected:
+
+ template<bool Transpose_, typename Rhs>
+ void _check_solve_assertion(const Rhs& b) const {
+ EIGEN_ONLY_USED_FOR_DEBUG(b);
+ eigen_assert(derived().m_isInitialized && "Solver is not initialized.");
+ eigen_assert((Transpose_?derived().cols():derived().rows())==b.rows() && "SolverBase::solve(): invalid number of rows of the right hand side matrix b");
+ }
};
namespace internal {
diff --git a/Eigen/src/Core/Swap.h b/Eigen/src/Core/Swap.h
index d70200918..180a4e5ad 100644
--- a/Eigen/src/Core/Swap.h
+++ b/Eigen/src/Core/Swap.h
@@ -30,12 +30,13 @@ public:
typedef typename Base::DstXprType DstXprType;
typedef swap_assign_op<Scalar> Functor;
- EIGEN_DEVICE_FUNC generic_dense_assignment_kernel(DstEvaluatorTypeT &dst, const SrcEvaluatorTypeT &src, const Functor &func, DstXprType& dstExpr)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ generic_dense_assignment_kernel(DstEvaluatorTypeT &dst, const SrcEvaluatorTypeT &src, const Functor &func, DstXprType& dstExpr)
: Base(dst, src, func, dstExpr)
{}
template<int StoreMode, int LoadMode, typename PacketType>
- void assignPacket(Index row, Index col)
+ EIGEN_STRONG_INLINE void assignPacket(Index row, Index col)
{
PacketType tmp = m_src.template packet<LoadMode,PacketType>(row,col);
const_cast<SrcEvaluatorTypeT&>(m_src).template writePacket<LoadMode>(row,col, m_dst.template packet<StoreMode,PacketType>(row,col));
@@ -43,7 +44,7 @@ public:
}
template<int StoreMode, int LoadMode, typename PacketType>
- void assignPacket(Index index)
+ EIGEN_STRONG_INLINE void assignPacket(Index index)
{
PacketType tmp = m_src.template packet<LoadMode,PacketType>(index);
const_cast<SrcEvaluatorTypeT&>(m_src).template writePacket<LoadMode>(index, m_dst.template packet<StoreMode,PacketType>(index));
@@ -52,7 +53,7 @@ public:
// TODO find a simple way not to have to copy/paste this function from generic_dense_assignment_kernel, by simple I mean no CRTP (Gael)
template<int StoreMode, int LoadMode, typename PacketType>
- void assignPacketByOuterInner(Index outer, Index inner)
+ EIGEN_STRONG_INLINE void assignPacketByOuterInner(Index outer, Index inner)
{
Index row = Base::rowIndexByOuterInner(outer, inner);
Index col = Base::colIndexByOuterInner(outer, inner);
diff --git a/Eigen/src/Core/Transpose.h b/Eigen/src/Core/Transpose.h
index d7c204579..c513f7f7c 100644
--- a/Eigen/src/Core/Transpose.h
+++ b/Eigen/src/Core/Transpose.h
@@ -61,25 +61,27 @@ template<typename MatrixType> class Transpose
typedef typename internal::remove_all<MatrixType>::type NestedExpression;
EIGEN_DEVICE_FUNC
- explicit inline Transpose(MatrixType& matrix) : m_matrix(matrix) {}
+ explicit EIGEN_STRONG_INLINE Transpose(MatrixType& matrix) : m_matrix(matrix) {}
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Transpose)
- EIGEN_DEVICE_FUNC inline Index rows() const { return m_matrix.cols(); }
- EIGEN_DEVICE_FUNC inline Index cols() const { return m_matrix.rows(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Index rows() const { return m_matrix.cols(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Index cols() const { return m_matrix.rows(); }
/** \returns the nested expression */
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const typename internal::remove_all<MatrixTypeNested>::type&
nestedExpression() const { return m_matrix; }
/** \returns the nested expression */
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
typename internal::remove_reference<MatrixTypeNested>::type&
nestedExpression() { return m_matrix; }
/** \internal */
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void resize(Index nrows, Index ncols) {
m_matrix.resize(ncols,nrows);
}
@@ -123,8 +125,10 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Dense>
EIGEN_DENSE_PUBLIC_INTERFACE(Transpose<MatrixType>)
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(TransposeImpl)
- EIGEN_DEVICE_FUNC inline Index innerStride() const { return derived().nestedExpression().innerStride(); }
- EIGEN_DEVICE_FUNC inline Index outerStride() const { return derived().nestedExpression().outerStride(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Index innerStride() const { return derived().nestedExpression().innerStride(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Index outerStride() const { return derived().nestedExpression().outerStride(); }
typedef typename internal::conditional<
internal::is_lvalue<MatrixType>::value,
@@ -132,18 +136,20 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Dense>
const Scalar
>::type ScalarWithConstIfNotLvalue;
- EIGEN_DEVICE_FUNC inline ScalarWithConstIfNotLvalue* data() { return derived().nestedExpression().data(); }
- EIGEN_DEVICE_FUNC inline const Scalar* data() const { return derived().nestedExpression().data(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ ScalarWithConstIfNotLvalue* data() { return derived().nestedExpression().data(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const Scalar* data() const { return derived().nestedExpression().data(); }
// FIXME: shall we keep the const version of coeffRef?
- EIGEN_DEVICE_FUNC
- inline const Scalar& coeffRef(Index rowId, Index colId) const
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const Scalar& coeffRef(Index rowId, Index colId) const
{
return derived().nestedExpression().coeffRef(colId, rowId);
}
- EIGEN_DEVICE_FUNC
- inline const Scalar& coeffRef(Index index) const
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const Scalar& coeffRef(Index index) const
{
return derived().nestedExpression().coeffRef(index);
}
@@ -169,7 +175,8 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Dense>
*
* \sa transposeInPlace(), adjoint() */
template<typename Derived>
-EIGEN_DEVICE_FUNC inline Transpose<Derived>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+Transpose<Derived>
DenseBase<Derived>::transpose()
{
return TransposeReturnType(derived());
@@ -181,7 +188,8 @@ DenseBase<Derived>::transpose()
*
* \sa transposeInPlace(), adjoint() */
template<typename Derived>
-EIGEN_DEVICE_FUNC inline typename DenseBase<Derived>::ConstTransposeReturnType
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename DenseBase<Derived>::ConstTransposeReturnType
DenseBase<Derived>::transpose() const
{
return ConstTransposeReturnType(derived());
@@ -392,7 +400,8 @@ struct checkTransposeAliasing_impl<Derived, OtherDerived, false>
template<typename Dst, typename Src>
void check_for_aliasing(const Dst &dst, const Src &src)
{
- internal::checkTransposeAliasing_impl<Dst, Src>::run(dst, src);
+ if((!Dst::IsVectorAtCompileTime) && dst.rows()>1 && dst.cols()>1)
+ internal::checkTransposeAliasing_impl<Dst, Src>::run(dst, src);
}
} // end namespace internal
diff --git a/Eigen/src/Core/TriangularMatrix.h b/Eigen/src/Core/TriangularMatrix.h
index 521de6160..cf3532f06 100644
--- a/Eigen/src/Core/TriangularMatrix.h
+++ b/Eigen/src/Core/TriangularMatrix.h
@@ -198,6 +198,7 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView
typedef typename internal::traits<TriangularView>::MatrixTypeNestedNonRef MatrixTypeNestedNonRef;
typedef typename internal::remove_all<typename MatrixType::ConjugateReturnType>::type MatrixConjugateReturnType;
+ typedef TriangularView<typename internal::add_const<MatrixType>::type, _Mode> ConstTriangularView;
public:
@@ -243,6 +244,18 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView
inline const ConjugateReturnType conjugate() const
{ return ConjugateReturnType(m_matrix.conjugate()); }
+ /** \returns an expression of the complex conjugate of \c *this if Cond==true,
+ * returns \c *this otherwise.
+ */
+ template<bool Cond>
+ EIGEN_DEVICE_FUNC
+ inline typename internal::conditional<Cond,ConjugateReturnType,ConstTriangularView>::type
+ conjugateIf() const
+ {
+ typedef typename internal::conditional<Cond,ConjugateReturnType,ConstTriangularView>::type ReturnType;
+ return ReturnType(m_matrix.template conjugateIf<Cond>());
+ }
+
typedef TriangularView<const typename MatrixType::AdjointReturnType,TransposeMode> AdjointReturnType;
/** \sa MatrixBase::adjoint() const */
EIGEN_DEVICE_FUNC
diff --git a/Eigen/src/Core/VectorBlock.h b/Eigen/src/Core/VectorBlock.h
index 0ede5d58e..71c5b95ee 100644
--- a/Eigen/src/Core/VectorBlock.h
+++ b/Eigen/src/Core/VectorBlock.h
@@ -71,8 +71,8 @@ template<typename VectorType, int Size> class VectorBlock
/** Dynamic-size constructor
*/
- EIGEN_DEVICE_FUNC
- inline VectorBlock(VectorType& vector, Index start, Index size)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ VectorBlock(VectorType& vector, Index start, Index size)
: Base(vector,
IsColVector ? start : 0, IsColVector ? 0 : start,
IsColVector ? size : 1, IsColVector ? 1 : size)
@@ -82,8 +82,8 @@ template<typename VectorType, int Size> class VectorBlock
/** Fixed-size constructor
*/
- EIGEN_DEVICE_FUNC
- inline VectorBlock(VectorType& vector, Index start)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ VectorBlock(VectorType& vector, Index start)
: Base(vector, IsColVector ? start : 0, IsColVector ? 0 : start)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorBlock);
diff --git a/Eigen/src/Core/VectorwiseOp.h b/Eigen/src/Core/VectorwiseOp.h
index a88b6e736..db0b9f8c4 100644
--- a/Eigen/src/Core/VectorwiseOp.h
+++ b/Eigen/src/Core/VectorwiseOp.h
@@ -173,6 +173,14 @@ struct member_redux {
* Example: \include MatrixBase_colwise_iterator_cxx11.cpp
* Output: \verbinclude MatrixBase_colwise_iterator_cxx11.out
*
+ * For a partial reduction on an empty input, some rules apply.
+ * For the sake of clarity, let's consider a vertical reduction:
+ * - If the number of columns is zero, then a 1x0 row-major vector expression is returned.
+ * - Otherwise, if the number of rows is zero, then
+ * - a row vector of zeros is returned for sum-like reductions (sum, squaredNorm, norm, etc.)
+ * - a row vector of ones is returned for a product reduction (e.g., <code>MatrixXd(n,0).colwise().prod()</code>)
+ * - an assert is triggered for all other reductions (minCoeff,maxCoeff,redux(bin_op))
+ *
* \sa DenseBase::colwise(), DenseBase::rowwise(), class PartialReduxExpr
*/
template<typename ExpressionType, int Direction> class VectorwiseOp
@@ -294,13 +302,19 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
* The template parameter \a BinaryOp is the type of the functor
* of the custom redux operator. Note that func must be an associative operator.
*
+ * \warning the size along the reduction direction must be strictly positive,
+ * otherwise an assertion is triggered.
+ *
* \sa class VectorwiseOp, DenseBase::colwise(), DenseBase::rowwise()
*/
template<typename BinaryOp>
EIGEN_DEVICE_FUNC
const typename ReduxReturnType<BinaryOp>::Type
redux(const BinaryOp& func = BinaryOp()) const
- { return typename ReduxReturnType<BinaryOp>::Type(_expression(), internal::member_redux<BinaryOp,Scalar>(func)); }
+ {
+ eigen_assert(redux_length()>0 && "you are using an empty matrix");
+ return typename ReduxReturnType<BinaryOp>::Type(_expression(), internal::member_redux<BinaryOp,Scalar>(func));
+ }
typedef typename ReturnType<internal::member_minCoeff>::Type MinCoeffReturnType;
typedef typename ReturnType<internal::member_maxCoeff>::Type MaxCoeffReturnType;
@@ -325,6 +339,9 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
/** \returns a row (or column) vector expression of the smallest coefficient
* of each column (or row) of the referenced expression.
*
+ * \warning the size along the reduction direction must be strictly positive,
+ * otherwise an assertion is triggered.
+ *
* \warning the result is undefined if \c *this contains NaN.
*
* Example: \include PartialRedux_minCoeff.cpp
@@ -333,11 +350,17 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
* \sa DenseBase::minCoeff() */
EIGEN_DEVICE_FUNC
const MinCoeffReturnType minCoeff() const
- { return MinCoeffReturnType(_expression()); }
+ {
+ eigen_assert(redux_length()>0 && "you are using an empty matrix");
+ return MinCoeffReturnType(_expression());
+ }
/** \returns a row (or column) vector expression of the largest coefficient
* of each column (or row) of the referenced expression.
*
+ * \warning the size along the reduction direction must be strictly positive,
+ * otherwise an assertion is triggered.
+ *
* \warning the result is undefined if \c *this contains NaN.
*
* Example: \include PartialRedux_maxCoeff.cpp
@@ -346,7 +369,10 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
* \sa DenseBase::maxCoeff() */
EIGEN_DEVICE_FUNC
const MaxCoeffReturnType maxCoeff() const
- { return MaxCoeffReturnType(_expression()); }
+ {
+ eigen_assert(redux_length()>0 && "you are using an empty matrix");
+ return MaxCoeffReturnType(_expression());
+ }
/** \returns a row (or column) vector expression of the squared norm
* of each column (or row) of the referenced expression.
@@ -531,7 +557,7 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)
//eigen_assert((m_matrix.isNull()) == (other.isNull())); FIXME
- return const_cast<ExpressionType&>(m_matrix = extendedTo(other.derived()));
+ return m_matrix = extendedTo(other.derived());
}
/** Adds the vector \a other to each subvector of \c *this */
@@ -541,7 +567,7 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)
- return const_cast<ExpressionType&>(m_matrix += extendedTo(other.derived()));
+ return m_matrix += extendedTo(other.derived());
}
/** Substracts the vector \a other to each subvector of \c *this */
@@ -551,7 +577,7 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)
- return const_cast<ExpressionType&>(m_matrix -= extendedTo(other.derived()));
+ return m_matrix -= extendedTo(other.derived());
}
/** Multiples each subvector of \c *this by the vector \a other */
@@ -563,7 +589,7 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
EIGEN_STATIC_ASSERT_ARRAYXPR(ExpressionType)
EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)
m_matrix *= extendedTo(other.derived());
- return const_cast<ExpressionType&>(m_matrix);
+ return m_matrix;
}
/** Divides each subvector of \c *this by the vector \a other */
@@ -575,7 +601,7 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
EIGEN_STATIC_ASSERT_ARRAYXPR(ExpressionType)
EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)
m_matrix /= extendedTo(other.derived());
- return const_cast<ExpressionType&>(m_matrix);
+ return m_matrix;
}
/** Returns the expression of the sum of the vector \a other to each subvector of \c *this */
@@ -690,6 +716,10 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
const HNormalizedReturnType hnormalized() const;
protected:
+ Index redux_length() const
+ {
+ return Direction==Vertical ? m_matrix.rows() : m_matrix.cols();
+ }
ExpressionTypeNested m_matrix;
};
diff --git a/Eigen/src/Core/Visitor.h b/Eigen/src/Core/Visitor.h
index 54c1883d9..f67d83bd1 100644
--- a/Eigen/src/Core/Visitor.h
+++ b/Eigen/src/Core/Visitor.h
@@ -40,6 +40,14 @@ struct visitor_impl<Visitor, Derived, 1>
}
};
+// This specialization enables visitors on empty matrices at compile-time
+template<typename Visitor, typename Derived>
+struct visitor_impl<Visitor, Derived, 0> {
+ EIGEN_DEVICE_FUNC
+ static inline void run(const Derived &/*mat*/, Visitor& /*visitor*/)
+ {}
+};
+
template<typename Visitor, typename Derived>
struct visitor_impl<Visitor, Derived, Dynamic>
{
@@ -98,6 +106,8 @@ protected:
*
* \note compared to one or two \em for \em loops, visitors offer automatic
* unrolling for small fixed size matrix.
+ *
+ * \note if the matrix is empty, then the visitor is left unchanged.
*
* \sa minCoeff(Index*,Index*), maxCoeff(Index*,Index*), DenseBase::redux()
*/
@@ -106,6 +116,9 @@ template<typename Visitor>
EIGEN_DEVICE_FUNC
void DenseBase<Derived>::visit(Visitor& visitor) const
{
+ if(size()==0)
+ return;
+
typedef typename internal::visitor_evaluator<Derived> ThisEvaluator;
ThisEvaluator thisEval(derived());
@@ -124,6 +137,8 @@ namespace internal {
template <typename Derived>
struct coeff_visitor
{
+ // default initialization to avoid countless invalid maybe-uninitialized warnings by gcc
+ coeff_visitor() : row(-1), col(-1), res(0) {}
typedef typename Derived::Scalar Scalar;
Index row, col;
Scalar res;
@@ -196,6 +211,9 @@ struct functor_traits<max_coeff_visitor<Scalar> > {
/** \fn DenseBase<Derived>::minCoeff(IndexType* rowId, IndexType* colId) const
* \returns the minimum of all coefficients of *this and puts in *row and *col its location.
+ *
+ * \warning the matrix must be not empty, otherwise an assertion is triggered.
+ *
* \warning the result is undefined if \c *this contains NaN.
*
* \sa DenseBase::minCoeff(Index*), DenseBase::maxCoeff(Index*,Index*), DenseBase::visit(), DenseBase::minCoeff()
@@ -206,6 +224,8 @@ EIGEN_DEVICE_FUNC
typename internal::traits<Derived>::Scalar
DenseBase<Derived>::minCoeff(IndexType* rowId, IndexType* colId) const
{
+ eigen_assert(this->rows()>0 && this->cols()>0 && "you are using an empty matrix");
+
internal::min_coeff_visitor<Derived> minVisitor;
this->visit(minVisitor);
*rowId = minVisitor.row;
@@ -214,6 +234,9 @@ DenseBase<Derived>::minCoeff(IndexType* rowId, IndexType* colId) const
}
/** \returns the minimum of all coefficients of *this and puts in *index its location.
+ *
+ * \warning the matrix must be not empty, otherwise an assertion is triggered.
+ *
* \warning the result is undefined if \c *this contains NaN.
*
* \sa DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::maxCoeff(IndexType*,IndexType*), DenseBase::visit(), DenseBase::minCoeff()
@@ -224,6 +247,8 @@ EIGEN_DEVICE_FUNC
typename internal::traits<Derived>::Scalar
DenseBase<Derived>::minCoeff(IndexType* index) const
{
+ eigen_assert(this->rows()>0 && this->cols()>0 && "you are using an empty matrix");
+
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
internal::min_coeff_visitor<Derived> minVisitor;
this->visit(minVisitor);
@@ -233,6 +258,9 @@ DenseBase<Derived>::minCoeff(IndexType* index) const
/** \fn DenseBase<Derived>::maxCoeff(IndexType* rowId, IndexType* colId) const
* \returns the maximum of all coefficients of *this and puts in *row and *col its location.
+ *
+ * \warning the matrix must be not empty, otherwise an assertion is triggered.
+ *
* \warning the result is undefined if \c *this contains NaN.
*
* \sa DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::visit(), DenseBase::maxCoeff()
@@ -243,6 +271,8 @@ EIGEN_DEVICE_FUNC
typename internal::traits<Derived>::Scalar
DenseBase<Derived>::maxCoeff(IndexType* rowPtr, IndexType* colPtr) const
{
+ eigen_assert(this->rows()>0 && this->cols()>0 && "you are using an empty matrix");
+
internal::max_coeff_visitor<Derived> maxVisitor;
this->visit(maxVisitor);
*rowPtr = maxVisitor.row;
@@ -251,6 +281,9 @@ DenseBase<Derived>::maxCoeff(IndexType* rowPtr, IndexType* colPtr) const
}
/** \returns the maximum of all coefficients of *this and puts in *index its location.
+ *
+ * \warning the matrix must be not empty, otherwise an assertion is triggered.
+ *
* \warning the result is undefined if \c *this contains NaN.
*
* \sa DenseBase::maxCoeff(IndexType*,IndexType*), DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::visitor(), DenseBase::maxCoeff()
@@ -261,6 +294,8 @@ EIGEN_DEVICE_FUNC
typename internal::traits<Derived>::Scalar
DenseBase<Derived>::maxCoeff(IndexType* index) const
{
+ eigen_assert(this->rows()>0 && this->cols()>0 && "you are using an empty matrix");
+
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
internal::max_coeff_visitor<Derived> maxVisitor;
this->visit(maxVisitor);
diff --git a/Eigen/src/Core/arch/AVX/Complex.h b/Eigen/src/Core/arch/AVX/Complex.h
index 7fa61969d..5b8ff59bd 100644
--- a/Eigen/src/Core/arch/AVX/Complex.h
+++ b/Eigen/src/Core/arch/AVX/Complex.h
@@ -22,6 +22,7 @@ struct Packet4cf
__m256 v;
};
+#ifndef EIGEN_VECTORIZE_AVX512
template<> struct packet_traits<std::complex<float> > : default_packet_traits
{
typedef Packet4cf type;
@@ -44,8 +45,9 @@ template<> struct packet_traits<std::complex<float> > : default_packet_traits
HasSetLinear = 0
};
};
+#endif
-template<> struct unpacket_traits<Packet4cf> { typedef std::complex<float> type; enum {size=4, alignment=Aligned32}; typedef Packet2cf half; };
+template<> struct unpacket_traits<Packet4cf> { typedef std::complex<float> type; enum {size=4, alignment=Aligned32, vectorizable=true}; typedef Packet2cf half; };
template<> EIGEN_STRONG_INLINE Packet4cf padd<Packet4cf>(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_add_ps(a.v,b.v)); }
template<> EIGEN_STRONG_INLINE Packet4cf psub<Packet4cf>(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_sub_ps(a.v,b.v)); }
@@ -67,10 +69,18 @@ template<> EIGEN_STRONG_INLINE Packet4cf pmul<Packet4cf>(const Packet4cf& a, con
return Packet4cf(result);
}
+template <>
+EIGEN_STRONG_INLINE Packet4cf pcmp_eq(const Packet4cf& a, const Packet4cf& b) {
+ __m256 eq = _mm256_cmp_ps(a.v, b.v, _CMP_EQ_OQ);
+ return Packet4cf(_mm256_and_ps(eq, _mm256_permute_ps(eq, 0xb1)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4cf ptrue<Packet4cf>(const Packet4cf& a) { return Packet4cf(ptrue(Packet8f(a.v))); }
+template<> EIGEN_STRONG_INLINE Packet4cf pnot<Packet4cf>(const Packet4cf& a) { return Packet4cf(pnot(Packet8f(a.v))); }
template<> EIGEN_STRONG_INLINE Packet4cf pand <Packet4cf>(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_and_ps(a.v,b.v)); }
template<> EIGEN_STRONG_INLINE Packet4cf por <Packet4cf>(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_or_ps(a.v,b.v)); }
template<> EIGEN_STRONG_INLINE Packet4cf pxor <Packet4cf>(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_xor_ps(a.v,b.v)); }
-template<> EIGEN_STRONG_INLINE Packet4cf pandnot<Packet4cf>(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_andnot_ps(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet4cf pandnot<Packet4cf>(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_andnot_ps(b.v,a.v)); }
template<> EIGEN_STRONG_INLINE Packet4cf pload <Packet4cf>(const std::complex<float>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet4cf(pload<Packet8f>(&numext::real_ref(*from))); }
template<> EIGEN_STRONG_INLINE Packet4cf ploadu<Packet4cf>(const std::complex<float>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet4cf(ploadu<Packet8f>(&numext::real_ref(*from))); }
@@ -228,6 +238,7 @@ struct Packet2cd
__m256d v;
};
+#ifndef EIGEN_VECTORIZE_AVX512
template<> struct packet_traits<std::complex<double> > : default_packet_traits
{
typedef Packet2cd type;
@@ -250,8 +261,9 @@ template<> struct packet_traits<std::complex<double> > : default_packet_traits
HasSetLinear = 0
};
};
+#endif
-template<> struct unpacket_traits<Packet2cd> { typedef std::complex<double> type; enum {size=2, alignment=Aligned32}; typedef Packet1cd half; };
+template<> struct unpacket_traits<Packet2cd> { typedef std::complex<double> type; enum {size=2, alignment=Aligned32, vectorizable=true}; typedef Packet1cd half; };
template<> EIGEN_STRONG_INLINE Packet2cd padd<Packet2cd>(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_add_pd(a.v,b.v)); }
template<> EIGEN_STRONG_INLINE Packet2cd psub<Packet2cd>(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_sub_pd(a.v,b.v)); }
@@ -272,10 +284,18 @@ template<> EIGEN_STRONG_INLINE Packet2cd pmul<Packet2cd>(const Packet2cd& a, con
return Packet2cd(_mm256_addsub_pd(even, odd));
}
+template <>
+EIGEN_STRONG_INLINE Packet2cd pcmp_eq(const Packet2cd& a, const Packet2cd& b) {
+ __m256d eq = _mm256_cmp_pd(a.v, b.v, _CMP_EQ_OQ);
+ return Packet2cd(pand(eq, _mm256_permute_pd(eq, 0x5)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cd ptrue<Packet2cd>(const Packet2cd& a) { return Packet2cd(ptrue(Packet4d(a.v))); }
+template<> EIGEN_STRONG_INLINE Packet2cd pnot<Packet2cd>(const Packet2cd& a) { return Packet2cd(pnot(Packet4d(a.v))); }
template<> EIGEN_STRONG_INLINE Packet2cd pand <Packet2cd>(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_and_pd(a.v,b.v)); }
template<> EIGEN_STRONG_INLINE Packet2cd por <Packet2cd>(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_or_pd(a.v,b.v)); }
template<> EIGEN_STRONG_INLINE Packet2cd pxor <Packet2cd>(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_xor_pd(a.v,b.v)); }
-template<> EIGEN_STRONG_INLINE Packet2cd pandnot<Packet2cd>(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_andnot_pd(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cd pandnot<Packet2cd>(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_andnot_pd(b.v,a.v)); }
template<> EIGEN_STRONG_INLINE Packet2cd pload <Packet2cd>(const std::complex<double>* from)
{ EIGEN_DEBUG_ALIGNED_LOAD return Packet2cd(pload<Packet4d>((const double*)from)); }
diff --git a/Eigen/src/Core/arch/AVX/MathFunctions.h b/Eigen/src/Core/arch/AVX/MathFunctions.h
index 6af67ce2d..9f375ed98 100644
--- a/Eigen/src/Core/arch/AVX/MathFunctions.h
+++ b/Eigen/src/Core/arch/AVX/MathFunctions.h
@@ -10,7 +10,7 @@
#ifndef EIGEN_MATH_FUNCTIONS_AVX_H
#define EIGEN_MATH_FUNCTIONS_AVX_H
-/* The sin, cos, exp, and log functions of this file are loosely derived from
+/* The sin and cos functions of this file are loosely derived from
* Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/
*/
@@ -18,187 +18,22 @@ namespace Eigen {
namespace internal {
-inline Packet8i pshiftleft(Packet8i v, int n)
-{
-#ifdef EIGEN_VECTORIZE_AVX2
- return _mm256_slli_epi32(v, n);
-#else
- __m128i lo = _mm_slli_epi32(_mm256_extractf128_si256(v, 0), n);
- __m128i hi = _mm_slli_epi32(_mm256_extractf128_si256(v, 1), n);
- return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
-#endif
-}
-
-inline Packet8f pshiftright(Packet8f v, int n)
-{
-#ifdef EIGEN_VECTORIZE_AVX2
- return _mm256_cvtepi32_ps(_mm256_srli_epi32(_mm256_castps_si256(v), n));
-#else
- __m128i lo = _mm_srli_epi32(_mm256_extractf128_si256(_mm256_castps_si256(v), 0), n);
- __m128i hi = _mm_srli_epi32(_mm256_extractf128_si256(_mm256_castps_si256(v), 1), n);
- return _mm256_cvtepi32_ps(_mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1));
-#endif
-}
-
-// Sine function
-// Computes sin(x) by wrapping x to the interval [-Pi/4,3*Pi/4] and
-// evaluating interpolants in [-Pi/4,Pi/4] or [Pi/4,3*Pi/4]. The interpolants
-// are (anti-)symmetric and thus have only odd/even coefficients
template <>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f
psin<Packet8f>(const Packet8f& _x) {
- Packet8f x = _x;
-
- // Some useful values.
- _EIGEN_DECLARE_CONST_Packet8i(one, 1);
- _EIGEN_DECLARE_CONST_Packet8f(one, 1.0f);
- _EIGEN_DECLARE_CONST_Packet8f(two, 2.0f);
- _EIGEN_DECLARE_CONST_Packet8f(one_over_four, 0.25f);
- _EIGEN_DECLARE_CONST_Packet8f(one_over_pi, 3.183098861837907e-01f);
- _EIGEN_DECLARE_CONST_Packet8f(neg_pi_first, -3.140625000000000e+00f);
- _EIGEN_DECLARE_CONST_Packet8f(neg_pi_second, -9.670257568359375e-04f);
- _EIGEN_DECLARE_CONST_Packet8f(neg_pi_third, -6.278329571784980e-07f);
- _EIGEN_DECLARE_CONST_Packet8f(four_over_pi, 1.273239544735163e+00f);
-
- // Map x from [-Pi/4,3*Pi/4] to z in [-1,3] and subtract the shifted period.
- Packet8f z = pmul(x, p8f_one_over_pi);
- Packet8f shift = _mm256_floor_ps(padd(z, p8f_one_over_four));
- x = pmadd(shift, p8f_neg_pi_first, x);
- x = pmadd(shift, p8f_neg_pi_second, x);
- x = pmadd(shift, p8f_neg_pi_third, x);
- z = pmul(x, p8f_four_over_pi);
-
- // Make a mask for the entries that need flipping, i.e. wherever the shift
- // is odd.
- Packet8i shift_ints = _mm256_cvtps_epi32(shift);
- Packet8i shift_isodd = _mm256_castps_si256(_mm256_and_ps(_mm256_castsi256_ps(shift_ints), _mm256_castsi256_ps(p8i_one)));
- Packet8i sign_flip_mask = pshiftleft(shift_isodd, 31);
-
- // Create a mask for which interpolant to use, i.e. if z > 1, then the mask
- // is set to ones for that entry.
- Packet8f ival_mask = _mm256_cmp_ps(z, p8f_one, _CMP_GT_OQ);
-
- // Evaluate the polynomial for the interval [1,3] in z.
- _EIGEN_DECLARE_CONST_Packet8f(coeff_right_0, 9.999999724233232e-01f);
- _EIGEN_DECLARE_CONST_Packet8f(coeff_right_2, -3.084242535619928e-01f);
- _EIGEN_DECLARE_CONST_Packet8f(coeff_right_4, 1.584991525700324e-02f);
- _EIGEN_DECLARE_CONST_Packet8f(coeff_right_6, -3.188805084631342e-04f);
- Packet8f z_minus_two = psub(z, p8f_two);
- Packet8f z_minus_two2 = pmul(z_minus_two, z_minus_two);
- Packet8f right = pmadd(p8f_coeff_right_6, z_minus_two2, p8f_coeff_right_4);
- right = pmadd(right, z_minus_two2, p8f_coeff_right_2);
- right = pmadd(right, z_minus_two2, p8f_coeff_right_0);
-
- // Evaluate the polynomial for the interval [-1,1] in z.
- _EIGEN_DECLARE_CONST_Packet8f(coeff_left_1, 7.853981525427295e-01f);
- _EIGEN_DECLARE_CONST_Packet8f(coeff_left_3, -8.074536727092352e-02f);
- _EIGEN_DECLARE_CONST_Packet8f(coeff_left_5, 2.489871967827018e-03f);
- _EIGEN_DECLARE_CONST_Packet8f(coeff_left_7, -3.587725841214251e-05f);
- Packet8f z2 = pmul(z, z);
- Packet8f left = pmadd(p8f_coeff_left_7, z2, p8f_coeff_left_5);
- left = pmadd(left, z2, p8f_coeff_left_3);
- left = pmadd(left, z2, p8f_coeff_left_1);
- left = pmul(left, z);
-
- // Assemble the results, i.e. select the left and right polynomials.
- left = _mm256_andnot_ps(ival_mask, left);
- right = _mm256_and_ps(ival_mask, right);
- Packet8f res = _mm256_or_ps(left, right);
+ return psin_float(_x);
+}
- // Flip the sign on the odd intervals and return the result.
- res = _mm256_xor_ps(res, _mm256_castsi256_ps(sign_flip_mask));
- return res;
+template <>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f
+pcos<Packet8f>(const Packet8f& _x) {
+ return pcos_float(_x);
}
-// Natural logarithm
-// Computes log(x) as log(2^e * m) = C*e + log(m), where the constant C =log(2)
-// and m is in the range [sqrt(1/2),sqrt(2)). In this range, the logarithm can
-// be easily approximated by a polynomial centered on m=1 for stability.
-// TODO(gonnet): Further reduce the interval allowing for lower-degree
-// polynomial interpolants -> ... -> profit!
template <>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f
plog<Packet8f>(const Packet8f& _x) {
- Packet8f x = _x;
- _EIGEN_DECLARE_CONST_Packet8f(1, 1.0f);
- _EIGEN_DECLARE_CONST_Packet8f(half, 0.5f);
- _EIGEN_DECLARE_CONST_Packet8f(126f, 126.0f);
-
- _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(inv_mant_mask, ~0x7f800000);
-
- // The smallest non denormalized float number.
- _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(min_norm_pos, 0x00800000);
- _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(minus_inf, 0xff800000);
-
- // Polynomial coefficients.
- _EIGEN_DECLARE_CONST_Packet8f(cephes_SQRTHF, 0.707106781186547524f);
- _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p0, 7.0376836292E-2f);
- _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p1, -1.1514610310E-1f);
- _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p2, 1.1676998740E-1f);
- _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p3, -1.2420140846E-1f);
- _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p4, +1.4249322787E-1f);
- _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p5, -1.6668057665E-1f);
- _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p6, +2.0000714765E-1f);
- _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p7, -2.4999993993E-1f);
- _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p8, +3.3333331174E-1f);
- _EIGEN_DECLARE_CONST_Packet8f(cephes_log_q1, -2.12194440e-4f);
- _EIGEN_DECLARE_CONST_Packet8f(cephes_log_q2, 0.693359375f);
-
- Packet8f invalid_mask = _mm256_cmp_ps(x, _mm256_setzero_ps(), _CMP_NGE_UQ); // not greater equal is true if x is NaN
- Packet8f iszero_mask = _mm256_cmp_ps(x, _mm256_setzero_ps(), _CMP_EQ_OQ);
-
- // Truncate input values to the minimum positive normal.
- x = pmax(x, p8f_min_norm_pos);
-
- Packet8f emm0 = pshiftright(x,23);
- Packet8f e = _mm256_sub_ps(emm0, p8f_126f);
-
- // Set the exponents to -1, i.e. x are in the range [0.5,1).
- x = _mm256_and_ps(x, p8f_inv_mant_mask);
- x = _mm256_or_ps(x, p8f_half);
-
- // part2: Shift the inputs from the range [0.5,1) to [sqrt(1/2),sqrt(2))
- // and shift by -1. The values are then centered around 0, which improves
- // the stability of the polynomial evaluation.
- // if( x < SQRTHF ) {
- // e -= 1;
- // x = x + x - 1.0;
- // } else { x = x - 1.0; }
- Packet8f mask = _mm256_cmp_ps(x, p8f_cephes_SQRTHF, _CMP_LT_OQ);
- Packet8f tmp = _mm256_and_ps(x, mask);
- x = psub(x, p8f_1);
- e = psub(e, _mm256_and_ps(p8f_1, mask));
- x = padd(x, tmp);
-
- Packet8f x2 = pmul(x, x);
- Packet8f x3 = pmul(x2, x);
-
- // Evaluate the polynomial approximant of degree 8 in three parts, probably
- // to improve instruction-level parallelism.
- Packet8f y, y1, y2;
- y = pmadd(p8f_cephes_log_p0, x, p8f_cephes_log_p1);
- y1 = pmadd(p8f_cephes_log_p3, x, p8f_cephes_log_p4);
- y2 = pmadd(p8f_cephes_log_p6, x, p8f_cephes_log_p7);
- y = pmadd(y, x, p8f_cephes_log_p2);
- y1 = pmadd(y1, x, p8f_cephes_log_p5);
- y2 = pmadd(y2, x, p8f_cephes_log_p8);
- y = pmadd(y, x3, y1);
- y = pmadd(y, x3, y2);
- y = pmul(y, x3);
-
- // Add the logarithm of the exponent back to the result of the interpolation.
- y1 = pmul(e, p8f_cephes_log_q1);
- tmp = pmul(x2, p8f_half);
- y = padd(y, y1);
- x = psub(x, tmp);
- y2 = pmul(e, p8f_cephes_log_q2);
- x = padd(x, y);
- x = padd(x, y2);
-
- // Filter out invalid inputs, i.e. negative arg will be NAN, 0 will be -INF.
- return _mm256_or_ps(
- _mm256_andnot_ps(iszero_mask, _mm256_or_ps(x, invalid_mask)),
- _mm256_and_ps(iszero_mask, p8f_minus_inf));
+ return plog_float(_x);
}
// Exponential function. Works by writing "x = m*log(2) + r" where
@@ -207,62 +42,7 @@ plog<Packet8f>(const Packet8f& _x) {
template <>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f
pexp<Packet8f>(const Packet8f& _x) {
- _EIGEN_DECLARE_CONST_Packet8f(1, 1.0f);
- _EIGEN_DECLARE_CONST_Packet8f(half, 0.5f);
- _EIGEN_DECLARE_CONST_Packet8f(127, 127.0f);
-
- _EIGEN_DECLARE_CONST_Packet8f(exp_hi, 88.3762626647950f);
- _EIGEN_DECLARE_CONST_Packet8f(exp_lo, -88.3762626647949f);
-
- _EIGEN_DECLARE_CONST_Packet8f(cephes_LOG2EF, 1.44269504088896341f);
-
- _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p0, 1.9875691500E-4f);
- _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p1, 1.3981999507E-3f);
- _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p2, 8.3334519073E-3f);
- _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p3, 4.1665795894E-2f);
- _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p4, 1.6666665459E-1f);
- _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p5, 5.0000001201E-1f);
-
- // Clamp x.
- Packet8f x = pmax(pmin(_x, p8f_exp_hi), p8f_exp_lo);
-
- // Express exp(x) as exp(m*ln(2) + r), start by extracting
- // m = floor(x/ln(2) + 0.5).
- Packet8f m = _mm256_floor_ps(pmadd(x, p8f_cephes_LOG2EF, p8f_half));
-
-// Get r = x - m*ln(2). If no FMA instructions are available, m*ln(2) is
-// subtracted out in two parts, m*C1+m*C2 = m*ln(2), to avoid accumulating
-// truncation errors. Note that we don't use the "pmadd" function here to
-// ensure that a precision-preserving FMA instruction is used.
-#ifdef EIGEN_VECTORIZE_FMA
- _EIGEN_DECLARE_CONST_Packet8f(nln2, -0.6931471805599453f);
- Packet8f r = _mm256_fmadd_ps(m, p8f_nln2, x);
-#else
- _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_C1, 0.693359375f);
- _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_C2, -2.12194440e-4f);
- Packet8f r = psub(x, pmul(m, p8f_cephes_exp_C1));
- r = psub(r, pmul(m, p8f_cephes_exp_C2));
-#endif
-
- Packet8f r2 = pmul(r, r);
-
- // TODO(gonnet): Split into odd/even polynomials and try to exploit
- // instruction-level parallelism.
- Packet8f y = p8f_cephes_exp_p0;
- y = pmadd(y, r, p8f_cephes_exp_p1);
- y = pmadd(y, r, p8f_cephes_exp_p2);
- y = pmadd(y, r, p8f_cephes_exp_p3);
- y = pmadd(y, r, p8f_cephes_exp_p4);
- y = pmadd(y, r, p8f_cephes_exp_p5);
- y = pmadd(y, r2, r);
- y = padd(y, p8f_1);
-
- // Build emm0 = 2^m.
- Packet8i emm0 = _mm256_cvttps_epi32(padd(m, p8f_127));
- emm0 = pshiftleft(emm0, 23);
-
- // Return 2^m * exp(r).
- return pmax(pmul(y, _mm256_castsi256_ps(emm0)), _x);
+ return pexp_float(_x);
}
// Hyperbolic Tangent function.
@@ -274,82 +54,8 @@ ptanh<Packet8f>(const Packet8f& x) {
template <>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4d
-pexp<Packet4d>(const Packet4d& _x) {
- Packet4d x = _x;
-
- _EIGEN_DECLARE_CONST_Packet4d(1, 1.0);
- _EIGEN_DECLARE_CONST_Packet4d(2, 2.0);
- _EIGEN_DECLARE_CONST_Packet4d(half, 0.5);
-
- _EIGEN_DECLARE_CONST_Packet4d(exp_hi, 709.437);
- _EIGEN_DECLARE_CONST_Packet4d(exp_lo, -709.436139303);
-
- _EIGEN_DECLARE_CONST_Packet4d(cephes_LOG2EF, 1.4426950408889634073599);
-
- _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_p0, 1.26177193074810590878e-4);
- _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_p1, 3.02994407707441961300e-2);
- _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_p2, 9.99999999999999999910e-1);
-
- _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_q0, 3.00198505138664455042e-6);
- _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_q1, 2.52448340349684104192e-3);
- _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_q2, 2.27265548208155028766e-1);
- _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_q3, 2.00000000000000000009e0);
-
- _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_C1, 0.693145751953125);
- _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_C2, 1.42860682030941723212e-6);
- _EIGEN_DECLARE_CONST_Packet4i(1023, 1023);
-
- Packet4d tmp, fx;
-
- // clamp x
- x = pmax(pmin(x, p4d_exp_hi), p4d_exp_lo);
- // Express exp(x) as exp(g + n*log(2)).
- fx = pmadd(p4d_cephes_LOG2EF, x, p4d_half);
-
- // Get the integer modulus of log(2), i.e. the "n" described above.
- fx = _mm256_floor_pd(fx);
-
- // Get the remainder modulo log(2), i.e. the "g" described above. Subtract
- // n*log(2) out in two steps, i.e. n*C1 + n*C2, C1+C2=log2 to get the last
- // digits right.
- tmp = pmul(fx, p4d_cephes_exp_C1);
- Packet4d z = pmul(fx, p4d_cephes_exp_C2);
- x = psub(x, tmp);
- x = psub(x, z);
-
- Packet4d x2 = pmul(x, x);
-
- // Evaluate the numerator polynomial of the rational interpolant.
- Packet4d px = p4d_cephes_exp_p0;
- px = pmadd(px, x2, p4d_cephes_exp_p1);
- px = pmadd(px, x2, p4d_cephes_exp_p2);
- px = pmul(px, x);
-
- // Evaluate the denominator polynomial of the rational interpolant.
- Packet4d qx = p4d_cephes_exp_q0;
- qx = pmadd(qx, x2, p4d_cephes_exp_q1);
- qx = pmadd(qx, x2, p4d_cephes_exp_q2);
- qx = pmadd(qx, x2, p4d_cephes_exp_q3);
-
- // I don't really get this bit, copied from the SSE2 routines, so...
- // TODO(gonnet): Figure out what is going on here, perhaps find a better
- // rational interpolant?
- x = _mm256_div_pd(px, psub(qx, px));
- x = pmadd(p4d_2, x, p4d_1);
-
- // Build e=2^n by constructing the exponents in a 128-bit vector and
- // shifting them to where they belong in double-precision values.
- __m128i emm0 = _mm256_cvtpd_epi32(fx);
- emm0 = _mm_add_epi32(emm0, p4i_1023);
- emm0 = _mm_shuffle_epi32(emm0, _MM_SHUFFLE(3, 1, 2, 0));
- __m128i lo = _mm_slli_epi64(emm0, 52);
- __m128i hi = _mm_slli_epi64(_mm_srli_epi64(emm0, 32), 52);
- __m256i e = _mm256_insertf128_si256(_mm256_setzero_si256(), lo, 0);
- e = _mm256_insertf128_si256(e, hi, 1);
-
- // Construct the result 2^n * exp(g) = e * x. The max is used to catch
- // non-finite values in the input.
- return pmax(pmul(x, _mm256_castsi256_pd(e)), _x);
+pexp<Packet4d>(const Packet4d& x) {
+ return pexp_double(x);
}
// Functions for sqrt.
diff --git a/Eigen/src/Core/arch/AVX/PacketMath.h b/Eigen/src/Core/arch/AVX/PacketMath.h
index 774e64981..f88e36024 100644
--- a/Eigen/src/Core/arch/AVX/PacketMath.h
+++ b/Eigen/src/Core/arch/AVX/PacketMath.h
@@ -18,11 +18,11 @@ namespace internal {
#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
#endif
-#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
-#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*))
+#if !defined(EIGEN_VECTORIZE_AVX512) && !defined(EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS)
+#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 16
#endif
-#ifdef __FMA__
+#ifdef EIGEN_VECTORIZE_FMA
#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
#endif
@@ -63,7 +63,7 @@ template<> struct packet_traits<float> : default_packet_traits
HasDiv = 1,
HasSin = EIGEN_FAST_MATH,
- HasCos = 0,
+ HasCos = EIGEN_FAST_MATH,
HasLog = 1,
HasExp = 1,
HasSqrt = 1,
@@ -113,14 +113,29 @@ template<> struct packet_traits<int> : default_packet_traits
};
*/
-template<> struct unpacket_traits<Packet8f> { typedef float type; typedef Packet4f half; enum {size=8, alignment=Aligned32}; };
-template<> struct unpacket_traits<Packet4d> { typedef double type; typedef Packet2d half; enum {size=4, alignment=Aligned32}; };
-template<> struct unpacket_traits<Packet8i> { typedef int type; typedef Packet4i half; enum {size=8, alignment=Aligned32}; };
+template<> struct unpacket_traits<Packet8f> {
+ typedef float type;
+ typedef Packet4f half;
+ typedef Packet8i integer_packet;
+ enum {size=8, alignment=Aligned32, vectorizable=true};
+};
+template<> struct unpacket_traits<Packet4d> {
+ typedef double type;
+ typedef Packet2d half;
+ enum {size=4, alignment=Aligned32, vectorizable=true};
+};
+template<> struct unpacket_traits<Packet8i> { typedef int type; typedef Packet4i half; enum {size=8, alignment=Aligned32, vectorizable=false}; };
template<> EIGEN_STRONG_INLINE Packet8f pset1<Packet8f>(const float& from) { return _mm256_set1_ps(from); }
template<> EIGEN_STRONG_INLINE Packet4d pset1<Packet4d>(const double& from) { return _mm256_set1_pd(from); }
template<> EIGEN_STRONG_INLINE Packet8i pset1<Packet8i>(const int& from) { return _mm256_set1_epi32(from); }
+template<> EIGEN_STRONG_INLINE Packet8f pset1frombits<Packet8f>(unsigned int from) { return _mm256_castsi256_ps(pset1<Packet8i>(from)); }
+
+template<> EIGEN_STRONG_INLINE Packet8f pzero(const Packet8f& /*a*/) { return _mm256_setzero_ps(); }
+template<> EIGEN_STRONG_INLINE Packet4d pzero(const Packet4d& /*a*/) { return _mm256_setzero_pd(); }
+template<> EIGEN_STRONG_INLINE Packet8i pzero(const Packet8i& /*a*/) { return _mm256_setzero_si256(); }
+
template<> EIGEN_STRONG_INLINE Packet8f pload1<Packet8f>(const float* from) { return _mm256_broadcast_ss(from); }
template<> EIGEN_STRONG_INLINE Packet4d pload1<Packet4d>(const double* from) { return _mm256_broadcast_sd(from); }
@@ -129,6 +144,15 @@ template<> EIGEN_STRONG_INLINE Packet4d plset<Packet4d>(const double& a) { retur
template<> EIGEN_STRONG_INLINE Packet8f padd<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_add_ps(a,b); }
template<> EIGEN_STRONG_INLINE Packet4d padd<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_add_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8i padd<Packet8i>(const Packet8i& a, const Packet8i& b) {
+#ifdef EIGEN_VECTORIZE_AVX2
+ return _mm256_add_epi32(a,b);
+#else
+ __m128i lo = _mm_add_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
+ __m128i hi = _mm_add_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
+ return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
+#endif
+}
template<> EIGEN_STRONG_INLINE Packet8f psub<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_sub_ps(a,b); }
template<> EIGEN_STRONG_INLINE Packet4d psub<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_sub_pd(a,b); }
@@ -157,13 +181,14 @@ template<> EIGEN_STRONG_INLINE Packet8i pdiv<Packet8i>(const Packet8i& /*a*/, co
return pset1<Packet8i>(0);
}
-#ifdef __FMA__
+#ifdef EIGEN_VECTORIZE_FMA
template<> EIGEN_STRONG_INLINE Packet8f pmadd(const Packet8f& a, const Packet8f& b, const Packet8f& c) {
-#if ( EIGEN_COMP_GNUC_STRICT || (EIGEN_COMP_CLANG && (EIGEN_COMP_CLANG<308)) )
- // clang stupidly generates a vfmadd213ps instruction plus some vmovaps on registers,
- // and gcc stupidly generates a vfmadd132ps instruction,
- // so let's enforce it to generate a vfmadd231ps instruction since the most common use case is to accumulate
- // the result of the product.
+#if ( (EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC<80) || (EIGEN_COMP_CLANG) )
+ // Clang stupidly generates a vfmadd213ps instruction plus some vmovaps on registers,
+ // and even register spilling with clang>=6.0 (bug 1637).
+ // Gcc stupidly generates a vfmadd132ps instruction.
+ // So let's enforce it to generate a vfmadd231ps instruction since the most common use
+ // case is to accumulate the result of the product.
Packet8f res = c;
__asm__("vfmadd231ps %[a], %[b], %[c]" : [c] "+x" (res) : [a] "x" (a), [b] "x" (b));
return res;
@@ -172,7 +197,7 @@ template<> EIGEN_STRONG_INLINE Packet8f pmadd(const Packet8f& a, const Packet8f&
#endif
}
template<> EIGEN_STRONG_INLINE Packet4d pmadd(const Packet4d& a, const Packet4d& b, const Packet4d& c) {
-#if ( EIGEN_COMP_GNUC_STRICT || (EIGEN_COMP_CLANG && (EIGEN_COMP_CLANG<308)) )
+#if ( (EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC<80) || (EIGEN_COMP_CLANG) )
// see above
Packet4d res = c;
__asm__("vfmadd231pd %[a], %[b], %[c]" : [c] "+x" (res) : [a] "x" (a), [b] "x" (b));
@@ -184,21 +209,69 @@ template<> EIGEN_STRONG_INLINE Packet4d pmadd(const Packet4d& a, const Packet4d&
#endif
template<> EIGEN_STRONG_INLINE Packet8f pmin<Packet8f>(const Packet8f& a, const Packet8f& b) {
+#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
+ // There appears to be a bug in GCC, by which the optimizer may flip
+ // the argument order in calls to _mm_min_ps/_mm_max_ps, so we have to
+ // resort to inline ASM here. This is supposed to be fixed in gcc6.3,
+ // see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867
+ Packet8f res;
+ asm("vminps %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
+ return res;
+#else
// Arguments are swapped to match NaN propagation behavior of std::min.
return _mm256_min_ps(b,a);
+#endif
}
template<> EIGEN_STRONG_INLINE Packet4d pmin<Packet4d>(const Packet4d& a, const Packet4d& b) {
+#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
+ // See pmin above
+ Packet4d res;
+ asm("vminpd %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
+ return res;
+#else
// Arguments are swapped to match NaN propagation behavior of std::min.
return _mm256_min_pd(b,a);
+#endif
}
template<> EIGEN_STRONG_INLINE Packet8f pmax<Packet8f>(const Packet8f& a, const Packet8f& b) {
+#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
+ // See pmin above
+ Packet8f res;
+ asm("vmaxps %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
+ return res;
+#else
// Arguments are swapped to match NaN propagation behavior of std::max.
return _mm256_max_ps(b,a);
+#endif
}
template<> EIGEN_STRONG_INLINE Packet4d pmax<Packet4d>(const Packet4d& a, const Packet4d& b) {
+#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
+ // See pmin above
+ Packet4d res;
+ asm("vmaxpd %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
+ return res;
+#else
// Arguments are swapped to match NaN propagation behavior of std::max.
return _mm256_max_pd(b,a);
+#endif
}
+
+template<> EIGEN_STRONG_INLINE Packet8f pcmp_le(const Packet8f& a, const Packet8f& b) { return _mm256_cmp_ps(a,b,_CMP_LE_OQ); }
+template<> EIGEN_STRONG_INLINE Packet8f pcmp_lt(const Packet8f& a, const Packet8f& b) { return _mm256_cmp_ps(a,b,_CMP_LT_OQ); }
+template<> EIGEN_STRONG_INLINE Packet8f pcmp_eq(const Packet8f& a, const Packet8f& b) { return _mm256_cmp_ps(a,b,_CMP_EQ_OQ); }
+template<> EIGEN_STRONG_INLINE Packet4d pcmp_eq(const Packet4d& a, const Packet4d& b) { return _mm256_cmp_pd(a,b,_CMP_EQ_OQ); }
+template<> EIGEN_STRONG_INLINE Packet8f pcmp_lt_or_nan(const Packet8f& a, const Packet8f& b) { return _mm256_cmp_ps(a, b, _CMP_NGE_UQ); }
+
+template<> EIGEN_STRONG_INLINE Packet8i pcmp_eq(const Packet8i& a, const Packet8i& b) {
+#ifdef EIGEN_VECTORIZE_AVX2
+ return _mm256_cmpeq_epi32(a,b);
+#else
+ __m128i lo = _mm_cmpeq_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
+ __m128i hi = _mm_cmpeq_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
+ return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
+#endif
+}
+
template<> EIGEN_STRONG_INLINE Packet8f pround<Packet8f>(const Packet8f& a) { return _mm256_round_ps(a, _MM_FROUND_CUR_DIRECTION); }
template<> EIGEN_STRONG_INLINE Packet4d pround<Packet4d>(const Packet4d& a) { return _mm256_round_pd(a, _MM_FROUND_CUR_DIRECTION); }
@@ -208,17 +281,101 @@ template<> EIGEN_STRONG_INLINE Packet4d pceil<Packet4d>(const Packet4d& a) { ret
template<> EIGEN_STRONG_INLINE Packet8f pfloor<Packet8f>(const Packet8f& a) { return _mm256_floor_ps(a); }
template<> EIGEN_STRONG_INLINE Packet4d pfloor<Packet4d>(const Packet4d& a) { return _mm256_floor_pd(a); }
+
+template<> EIGEN_STRONG_INLINE Packet8i ptrue<Packet8i>(const Packet8i& a) {
+#ifdef EIGEN_VECTORIZE_AVX2
+ // vpcmpeqd has lower latency than the more general vcmpps
+ return _mm256_cmpeq_epi32(a,a);
+#else
+ const __m256 b = _mm256_castsi256_ps(a);
+ return _mm256_castps_si256(_mm256_cmp_ps(b,b,_CMP_TRUE_UQ));
+#endif
+}
+
+template<> EIGEN_STRONG_INLINE Packet8f ptrue<Packet8f>(const Packet8f& a) {
+#ifdef EIGEN_VECTORIZE_AVX2
+ // vpcmpeqd has lower latency than the more general vcmpps
+ const __m256i b = _mm256_castps_si256(a);
+ return _mm256_castsi256_ps(_mm256_cmpeq_epi32(b,b));
+#else
+ return _mm256_cmp_ps(a,a,_CMP_TRUE_UQ);
+#endif
+}
+
+template<> EIGEN_STRONG_INLINE Packet4d ptrue<Packet4d>(const Packet4d& a) {
+#ifdef EIGEN_VECTORIZE_AVX2
+ // vpcmpeqq has lower latency than the more general vcmppd
+ const __m256i b = _mm256_castpd_si256(a);
+ return _mm256_castsi256_pd(_mm256_cmpeq_epi64(b,b));
+#else
+ return _mm256_cmp_pd(a,a,_CMP_TRUE_UQ);
+#endif
+}
+
template<> EIGEN_STRONG_INLINE Packet8f pand<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_and_ps(a,b); }
template<> EIGEN_STRONG_INLINE Packet4d pand<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_and_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8i pand<Packet8i>(const Packet8i& a, const Packet8i& b) {
+#ifdef EIGEN_VECTORIZE_AVX2
+ return _mm256_and_si256(a,b);
+#else
+ return _mm256_castps_si256(_mm256_and_ps(_mm256_castsi256_ps(a),_mm256_castsi256_ps(b)));
+#endif
+}
template<> EIGEN_STRONG_INLINE Packet8f por<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_or_ps(a,b); }
template<> EIGEN_STRONG_INLINE Packet4d por<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_or_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8i por<Packet8i>(const Packet8i& a, const Packet8i& b) {
+#ifdef EIGEN_VECTORIZE_AVX2
+ return _mm256_or_si256(a,b);
+#else
+ return _mm256_castps_si256(_mm256_or_ps(_mm256_castsi256_ps(a),_mm256_castsi256_ps(b)));
+#endif
+}
template<> EIGEN_STRONG_INLINE Packet8f pxor<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_xor_ps(a,b); }
template<> EIGEN_STRONG_INLINE Packet4d pxor<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_xor_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8i pxor<Packet8i>(const Packet8i& a, const Packet8i& b) {
+#ifdef EIGEN_VECTORIZE_AVX2
+ return _mm256_xor_si256(a,b);
+#else
+ return _mm256_castps_si256(_mm256_xor_ps(_mm256_castsi256_ps(a),_mm256_castsi256_ps(b)));
+#endif
+}
+
+template<> EIGEN_STRONG_INLINE Packet8f pandnot<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_andnot_ps(b,a); }
+template<> EIGEN_STRONG_INLINE Packet4d pandnot<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_andnot_pd(b,a); }
+template<> EIGEN_STRONG_INLINE Packet8i pandnot<Packet8i>(const Packet8i& a, const Packet8i& b) {
+#ifdef EIGEN_VECTORIZE_AVX2
+ return _mm256_andnot_si256(b,a);
+#else
+ return _mm256_castps_si256(_mm256_andnot_ps(_mm256_castsi256_ps(b),_mm256_castsi256_ps(a)));
+#endif
+}
-template<> EIGEN_STRONG_INLINE Packet8f pandnot<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_andnot_ps(a,b); }
-template<> EIGEN_STRONG_INLINE Packet4d pandnot<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_andnot_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8f pselect<Packet8f>(const Packet8f& mask, const Packet8f& a, const Packet8f& b)
+{ return _mm256_blendv_ps(b,a,mask); }
+template<> EIGEN_STRONG_INLINE Packet4d pselect<Packet4d>(const Packet4d& mask, const Packet4d& a, const Packet4d& b)
+{ return _mm256_blendv_pd(b,a,mask); }
+
+template<int N> EIGEN_STRONG_INLINE Packet8i pshiftright(Packet8i a) {
+#ifdef EIGEN_VECTORIZE_AVX2
+ return _mm256_srli_epi32(a, N);
+#else
+ __m128i lo = _mm_srli_epi32(_mm256_extractf128_si256(a, 0), N);
+ __m128i hi = _mm_srli_epi32(_mm256_extractf128_si256(a, 1), N);
+ return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
+#endif
+}
+
+template<int N> EIGEN_STRONG_INLINE Packet8i pshiftleft(Packet8i a) {
+#ifdef EIGEN_VECTORIZE_AVX2
+ return _mm256_slli_epi32(a, N);
+#else
+ __m128i lo = _mm_slli_epi32(_mm256_extractf128_si256(a, 0), N);
+ __m128i hi = _mm_slli_epi32(_mm256_extractf128_si256(a, 1), N);
+ return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
+#endif
+}
template<> EIGEN_STRONG_INLINE Packet8f pload<Packet8f>(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_ps(from); }
template<> EIGEN_STRONG_INLINE Packet4d pload<Packet4d>(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_pd(from); }
@@ -363,6 +520,28 @@ template<> EIGEN_STRONG_INLINE Packet4d pabs(const Packet4d& a)
return _mm256_and_pd(a,mask);
}
+template<> EIGEN_STRONG_INLINE Packet8f pfrexp<Packet8f>(const Packet8f& a, Packet8f& exponent) {
+ return pfrexp_float(a,exponent);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8f pldexp<Packet8f>(const Packet8f& a, const Packet8f& exponent) {
+ return pldexp_float(a,exponent);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4d pldexp<Packet4d>(const Packet4d& a, const Packet4d& exponent) {
+ // Build e=2^n by constructing the exponents in a 128-bit vector and
+ // shifting them to where they belong in double-precision values.
+ Packet4i cst_1023 = pset1<Packet4i>(1023);
+ __m128i emm0 = _mm256_cvtpd_epi32(exponent);
+ emm0 = _mm_add_epi32(emm0, cst_1023);
+ emm0 = _mm_shuffle_epi32(emm0, _MM_SHUFFLE(3, 1, 2, 0));
+ __m128i lo = _mm_slli_epi64(emm0, 52);
+ __m128i hi = _mm_slli_epi64(_mm_srli_epi64(emm0, 32), 52);
+ __m256i e = _mm256_insertf128_si256(_mm256_setzero_si256(), lo, 0);
+ e = _mm256_insertf128_si256(e, hi, 1);
+ return pmul(a,_mm256_castsi256_pd(e));
+}
+
// preduxp should be ok
// FIXME: why is this ok? why isn't the simply implementation working as expected?
template<> EIGEN_STRONG_INLINE Packet8f preduxp<Packet8f>(const Packet8f* vecs)
@@ -459,6 +638,16 @@ template<> EIGEN_STRONG_INLINE double predux_max<Packet4d>(const Packet4d& a)
return pfirst(_mm256_max_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1)));
}
+// not needed yet
+// template<> EIGEN_STRONG_INLINE bool predux_all(const Packet8f& x)
+// {
+// return _mm256_movemask_ps(x)==0xFF;
+// }
+
+template<> EIGEN_STRONG_INLINE bool predux_any(const Packet8f& x)
+{
+ return _mm256_movemask_ps(x)!=0;
+}
template<int Offset>
struct palign_impl<Offset,Packet8f>
diff --git a/Eigen/src/Core/arch/AVX/TypeCasting.h b/Eigen/src/Core/arch/AVX/TypeCasting.h
index 83bfdc604..7d2e1e67f 100644
--- a/Eigen/src/Core/arch/AVX/TypeCasting.h
+++ b/Eigen/src/Core/arch/AVX/TypeCasting.h
@@ -37,13 +37,21 @@ struct type_casting_traits<int, float> {
template<> EIGEN_STRONG_INLINE Packet8i pcast<Packet8f, Packet8i>(const Packet8f& a) {
- return _mm256_cvtps_epi32(a);
+ return _mm256_cvttps_epi32(a);
}
template<> EIGEN_STRONG_INLINE Packet8f pcast<Packet8i, Packet8f>(const Packet8i& a) {
return _mm256_cvtepi32_ps(a);
}
+template<> EIGEN_STRONG_INLINE Packet8i preinterpret<Packet8i,Packet8f>(const Packet8f& a) {
+ return _mm256_castps_si256(a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8f preinterpret<Packet8f,Packet8i>(const Packet8i& a) {
+ return _mm256_castsi256_ps(a);
+}
+
} // end namespace internal
} // end namespace Eigen
diff --git a/Eigen/src/Core/arch/AVX512/Complex.h b/Eigen/src/Core/arch/AVX512/Complex.h
new file mode 100644
index 000000000..9a89dd01f
--- /dev/null
+++ b/Eigen/src/Core/arch/AVX512/Complex.h
@@ -0,0 +1,488 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2018 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_COMPLEX_AVX512_H
+#define EIGEN_COMPLEX_AVX512_H
+
+namespace Eigen {
+
+namespace internal {
+
+//---------- float ----------
+struct Packet8cf
+{
+ EIGEN_STRONG_INLINE Packet8cf() {}
+ EIGEN_STRONG_INLINE explicit Packet8cf(const __m512& a) : v(a) {}
+ __m512 v;
+};
+
+template<> struct packet_traits<std::complex<float> > : default_packet_traits
+{
+ typedef Packet8cf type;
+ typedef Packet4cf half;
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = 8,
+ HasHalfPacket = 1,
+
+ HasAdd = 1,
+ HasSub = 1,
+ HasMul = 1,
+ HasDiv = 1,
+ HasNegate = 1,
+ HasAbs = 0,
+ HasAbs2 = 0,
+ HasMin = 0,
+ HasMax = 0,
+ HasSetLinear = 0,
+ HasReduxp = 0
+ };
+};
+
+template<> struct unpacket_traits<Packet8cf> {
+ typedef std::complex<float> type;
+ enum {
+ size = 8,
+ alignment=unpacket_traits<Packet16f>::alignment,
+ vectorizable=true
+ };
+ typedef Packet4cf half;
+};
+
+template<> EIGEN_STRONG_INLINE Packet8cf ptrue<Packet8cf>(const Packet8cf& a) { return Packet8cf(ptrue(Packet16f(a.v))); }
+template<> EIGEN_STRONG_INLINE Packet8cf pnot<Packet8cf>(const Packet8cf& a) { return Packet8cf(pnot(Packet16f(a.v))); }
+template<> EIGEN_STRONG_INLINE Packet8cf padd<Packet8cf>(const Packet8cf& a, const Packet8cf& b) { return Packet8cf(_mm512_add_ps(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet8cf psub<Packet8cf>(const Packet8cf& a, const Packet8cf& b) { return Packet8cf(_mm512_sub_ps(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet8cf pnegate(const Packet8cf& a)
+{
+ return Packet8cf(pnegate(a.v));
+}
+template<> EIGEN_STRONG_INLINE Packet8cf pconj(const Packet8cf& a)
+{
+ const __m512 mask = _mm512_castsi512_ps(_mm512_setr_epi32(
+ 0x00000000,0x80000000,0x00000000,0x80000000,0x00000000,0x80000000,0x00000000,0x80000000,
+ 0x00000000,0x80000000,0x00000000,0x80000000,0x00000000,0x80000000,0x00000000,0x80000000));
+ return Packet8cf(pxor(a.v,mask));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8cf pmul<Packet8cf>(const Packet8cf& a, const Packet8cf& b)
+{
+ __m512 tmp2 = _mm512_mul_ps(_mm512_movehdup_ps(a.v), _mm512_permute_ps(b.v, _MM_SHUFFLE(2,3,0,1)));
+ return Packet8cf(_mm512_fmaddsub_ps(_mm512_moveldup_ps(a.v), b.v, tmp2));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8cf pand <Packet8cf>(const Packet8cf& a, const Packet8cf& b) { return Packet8cf(pand(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet8cf por <Packet8cf>(const Packet8cf& a, const Packet8cf& b) { return Packet8cf(por(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet8cf pxor <Packet8cf>(const Packet8cf& a, const Packet8cf& b) { return Packet8cf(pxor(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet8cf pandnot<Packet8cf>(const Packet8cf& a, const Packet8cf& b) { return Packet8cf(pandnot(a.v,b.v)); }
+
+template <>
+EIGEN_STRONG_INLINE Packet8cf pcmp_eq(const Packet8cf& a, const Packet8cf& b) {
+ __m512 eq = pcmp_eq<Packet16f>(a.v, b.v);
+ return Packet8cf(pand(eq, _mm512_permute_ps(eq, 0xB1)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8cf pload <Packet8cf>(const std::complex<float>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet8cf(pload<Packet16f>(&numext::real_ref(*from))); }
+template<> EIGEN_STRONG_INLINE Packet8cf ploadu<Packet8cf>(const std::complex<float>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet8cf(ploadu<Packet16f>(&numext::real_ref(*from))); }
+
+
+template<> EIGEN_STRONG_INLINE Packet8cf pset1<Packet8cf>(const std::complex<float>& from)
+{
+ return Packet8cf(_mm512_castpd_ps(pload1<Packet8d>((const double*)(const void*)&from)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8cf ploaddup<Packet8cf>(const std::complex<float>* from)
+{
+ return Packet8cf( _mm512_castpd_ps( ploaddup<Packet8d>((const double*)(const void*)from )) );
+}
+template<> EIGEN_STRONG_INLINE Packet8cf ploadquad<Packet8cf>(const std::complex<float>* from)
+{
+ return Packet8cf( _mm512_castpd_ps( ploadquad<Packet8d>((const double*)(const void*)from )) );
+}
+
+template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float>* to, const Packet8cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore(&numext::real_ref(*to), from.v); }
+template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float>* to, const Packet8cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(&numext::real_ref(*to), from.v); }
+
+template<> EIGEN_DEVICE_FUNC inline Packet8cf pgather<std::complex<float>, Packet8cf>(const std::complex<float>* from, Index stride)
+{
+ return Packet8cf(_mm512_castpd_ps(pgather<double,Packet8d>((const double*)(const void*)from, stride)));
+}
+
+template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet8cf>(std::complex<float>* to, const Packet8cf& from, Index stride)
+{
+ pscatter((double*)(void*)to, _mm512_castps_pd(from.v), stride);
+}
+
+template<> EIGEN_STRONG_INLINE std::complex<float> pfirst<Packet8cf>(const Packet8cf& a)
+{
+ return pfirst(Packet2cf(_mm512_castps512_ps128(a.v)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8cf preverse(const Packet8cf& a) {
+ return Packet8cf(_mm512_castsi512_ps(
+ _mm512_permutexvar_epi64( _mm512_set_epi32(0, 0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7),
+ _mm512_castps_si512(a.v))));
+}
+
+template<> EIGEN_STRONG_INLINE std::complex<float> predux<Packet8cf>(const Packet8cf& a)
+{
+ return predux(padd(Packet4cf(extract256<0>(a.v)),
+ Packet4cf(extract256<1>(a.v))));
+}
+
+template<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet8cf>(const Packet8cf& a)
+{
+ return predux_mul(pmul(Packet4cf(extract256<0>(a.v)),
+ Packet4cf(extract256<1>(a.v))));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4cf predux_half_dowto4<Packet8cf>(const Packet8cf& a) {
+ __m256 lane0 = extract256<0>(a.v);
+ __m256 lane1 = extract256<1>(a.v);
+ __m256 res = _mm256_add_ps(lane0, lane1);
+ return Packet4cf(res);
+}
+
+template<int Offset>
+struct palign_impl<Offset,Packet8cf>
+{
+ static EIGEN_STRONG_INLINE void run(Packet8cf& first, const Packet8cf& second)
+ {
+ if (Offset==0) return;
+ palign_impl<Offset*2,Packet16f>::run(first.v, second.v);
+ }
+};
+
+template<> struct conj_helper<Packet8cf, Packet8cf, false,true>
+{
+ EIGEN_STRONG_INLINE Packet8cf pmadd(const Packet8cf& x, const Packet8cf& y, const Packet8cf& c) const
+ { return padd(pmul(x,y),c); }
+
+ EIGEN_STRONG_INLINE Packet8cf pmul(const Packet8cf& a, const Packet8cf& b) const
+ {
+ return internal::pmul(a, pconj(b));
+ }
+};
+
+template<> struct conj_helper<Packet8cf, Packet8cf, true,false>
+{
+ EIGEN_STRONG_INLINE Packet8cf pmadd(const Packet8cf& x, const Packet8cf& y, const Packet8cf& c) const
+ { return padd(pmul(x,y),c); }
+
+ EIGEN_STRONG_INLINE Packet8cf pmul(const Packet8cf& a, const Packet8cf& b) const
+ {
+ return internal::pmul(pconj(a), b);
+ }
+};
+
+template<> struct conj_helper<Packet8cf, Packet8cf, true,true>
+{
+ EIGEN_STRONG_INLINE Packet8cf pmadd(const Packet8cf& x, const Packet8cf& y, const Packet8cf& c) const
+ { return padd(pmul(x,y),c); }
+
+ EIGEN_STRONG_INLINE Packet8cf pmul(const Packet8cf& a, const Packet8cf& b) const
+ {
+ return pconj(internal::pmul(a, b));
+ }
+};
+
+EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet8cf,Packet16f)
+
+template<> EIGEN_STRONG_INLINE Packet8cf pdiv<Packet8cf>(const Packet8cf& a, const Packet8cf& b)
+{
+ Packet8cf num = pmul(a, pconj(b));
+ __m512 tmp = _mm512_mul_ps(b.v, b.v);
+ __m512 tmp2 = _mm512_shuffle_ps(tmp,tmp,0xB1);
+ __m512 denom = _mm512_add_ps(tmp, tmp2);
+ return Packet8cf(_mm512_div_ps(num.v, denom));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8cf pcplxflip<Packet8cf>(const Packet8cf& x)
+{
+ return Packet8cf(_mm512_shuffle_ps(x.v, x.v, _MM_SHUFFLE(2, 3, 0 ,1)));
+}
+
+//---------- double ----------
+struct Packet4cd
+{
+ EIGEN_STRONG_INLINE Packet4cd() {}
+ EIGEN_STRONG_INLINE explicit Packet4cd(const __m512d& a) : v(a) {}
+ __m512d v;
+};
+
+template<> struct packet_traits<std::complex<double> > : default_packet_traits
+{
+ typedef Packet4cd type;
+ typedef Packet2cd half;
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 0,
+ size = 4,
+ HasHalfPacket = 1,
+
+ HasAdd = 1,
+ HasSub = 1,
+ HasMul = 1,
+ HasDiv = 1,
+ HasNegate = 1,
+ HasAbs = 0,
+ HasAbs2 = 0,
+ HasMin = 0,
+ HasMax = 0,
+ HasSetLinear = 0,
+ HasReduxp = 0
+ };
+};
+
+template<> struct unpacket_traits<Packet4cd> {
+ typedef std::complex<double> type;
+ enum {
+ size = 4,
+ alignment = unpacket_traits<Packet8d>::alignment,
+ vectorizable=true
+ };
+ typedef Packet2cd half;
+};
+
+template<> EIGEN_STRONG_INLINE Packet4cd padd<Packet4cd>(const Packet4cd& a, const Packet4cd& b) { return Packet4cd(_mm512_add_pd(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet4cd psub<Packet4cd>(const Packet4cd& a, const Packet4cd& b) { return Packet4cd(_mm512_sub_pd(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet4cd pnegate(const Packet4cd& a) { return Packet4cd(pnegate(a.v)); }
+template<> EIGEN_STRONG_INLINE Packet4cd pconj(const Packet4cd& a)
+{
+ const __m512d mask = _mm512_castsi512_pd(
+ _mm512_set_epi32(0x80000000,0x0,0x0,0x0,0x80000000,0x0,0x0,0x0,
+ 0x80000000,0x0,0x0,0x0,0x80000000,0x0,0x0,0x0));
+ return Packet4cd(pxor(a.v,mask));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4cd pmul<Packet4cd>(const Packet4cd& a, const Packet4cd& b)
+{
+ __m512d tmp1 = _mm512_shuffle_pd(a.v,a.v,0x0);
+ __m512d tmp2 = _mm512_shuffle_pd(a.v,a.v,0xFF);
+ __m512d tmp3 = _mm512_shuffle_pd(b.v,b.v,0x55);
+ __m512d odd = _mm512_mul_pd(tmp2, tmp3);
+ return Packet4cd(_mm512_fmaddsub_pd(tmp1, b.v, odd));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4cd ptrue<Packet4cd>(const Packet4cd& a) { return Packet4cd(ptrue(Packet8d(a.v))); }
+template<> EIGEN_STRONG_INLINE Packet4cd pnot<Packet4cd>(const Packet4cd& a) { return Packet4cd(pnot(Packet8d(a.v))); }
+template<> EIGEN_STRONG_INLINE Packet4cd pand <Packet4cd>(const Packet4cd& a, const Packet4cd& b) { return Packet4cd(pand(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet4cd por <Packet4cd>(const Packet4cd& a, const Packet4cd& b) { return Packet4cd(por(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet4cd pxor <Packet4cd>(const Packet4cd& a, const Packet4cd& b) { return Packet4cd(pxor(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet4cd pandnot<Packet4cd>(const Packet4cd& a, const Packet4cd& b) { return Packet4cd(pandnot(a.v,b.v)); }
+
+template <>
+EIGEN_STRONG_INLINE Packet4cd pcmp_eq(const Packet4cd& a, const Packet4cd& b) {
+ __m512d eq = pcmp_eq<Packet8d>(a.v, b.v);
+ return Packet4cd(pand(eq, _mm512_permute_pd(eq, 0x55)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4cd pload <Packet4cd>(const std::complex<double>* from)
+{ EIGEN_DEBUG_ALIGNED_LOAD return Packet4cd(pload<Packet8d>((const double*)from)); }
+template<> EIGEN_STRONG_INLINE Packet4cd ploadu<Packet4cd>(const std::complex<double>* from)
+{ EIGEN_DEBUG_UNALIGNED_LOAD return Packet4cd(ploadu<Packet8d>((const double*)from)); }
+
+template<> EIGEN_STRONG_INLINE Packet4cd pset1<Packet4cd>(const std::complex<double>& from)
+{
+ #ifdef EIGEN_VECTORIZE_AVX512DQ
+ return Packet4cd(_mm512_broadcast_f64x2(pset1<Packet1cd>(from).v));
+ #else
+ return Packet4cd(_mm512_castps_pd(_mm512_broadcast_f32x4( _mm_castpd_ps(pset1<Packet1cd>(from).v))));
+ #endif
+}
+
+template<> EIGEN_STRONG_INLINE Packet4cd ploaddup<Packet4cd>(const std::complex<double>* from) {
+ return Packet4cd(_mm512_insertf64x4(
+ _mm512_castpd256_pd512(ploaddup<Packet2cd>(from).v), ploaddup<Packet2cd>(from+1).v, 1));
+}
+
+template<> EIGEN_STRONG_INLINE void pstore <std::complex<double> >(std::complex<double> * to, const Packet4cd& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, from.v); }
+template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<double> >(std::complex<double> * to, const Packet4cd& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, from.v); }
+
+template<> EIGEN_DEVICE_FUNC inline Packet4cd pgather<std::complex<double>, Packet4cd>(const std::complex<double>* from, Index stride)
+{
+ return Packet4cd(_mm512_insertf64x4(_mm512_castpd256_pd512(
+ _mm256_insertf128_pd(_mm256_castpd128_pd256(ploadu<Packet1cd>(from+0*stride).v), ploadu<Packet1cd>(from+1*stride).v,1)),
+ _mm256_insertf128_pd(_mm256_castpd128_pd256(ploadu<Packet1cd>(from+2*stride).v), ploadu<Packet1cd>(from+3*stride).v,1), 1));
+}
+
+template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet4cd>(std::complex<double>* to, const Packet4cd& from, Index stride)
+{
+ __m512i fromi = _mm512_castpd_si512(from.v);
+ double* tod = (double*)(void*)to;
+ _mm_storeu_pd(tod+0*stride, _mm_castsi128_pd(_mm512_extracti32x4_epi32(fromi,0)) );
+ _mm_storeu_pd(tod+2*stride, _mm_castsi128_pd(_mm512_extracti32x4_epi32(fromi,1)) );
+ _mm_storeu_pd(tod+4*stride, _mm_castsi128_pd(_mm512_extracti32x4_epi32(fromi,2)) );
+ _mm_storeu_pd(tod+6*stride, _mm_castsi128_pd(_mm512_extracti32x4_epi32(fromi,3)) );
+}
+
+template<> EIGEN_STRONG_INLINE std::complex<double> pfirst<Packet4cd>(const Packet4cd& a)
+{
+ __m128d low = extract128<0>(a.v);
+ EIGEN_ALIGN16 double res[2];
+ _mm_store_pd(res, low);
+ return std::complex<double>(res[0],res[1]);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4cd preverse(const Packet4cd& a) {
+ return Packet4cd(_mm512_shuffle_f64x2(a.v, a.v, EIGEN_SSE_SHUFFLE_MASK(3,2,1,0)));
+}
+
+template<> EIGEN_STRONG_INLINE std::complex<double> predux<Packet4cd>(const Packet4cd& a)
+{
+ return predux(padd(Packet2cd(_mm512_extractf64x4_pd(a.v,0)),
+ Packet2cd(_mm512_extractf64x4_pd(a.v,1))));
+}
+
+template<> EIGEN_STRONG_INLINE std::complex<double> predux_mul<Packet4cd>(const Packet4cd& a)
+{
+ return predux_mul(pmul(Packet2cd(_mm512_extractf64x4_pd(a.v,0)),
+ Packet2cd(_mm512_extractf64x4_pd(a.v,1))));
+}
+
+template<int Offset>
+struct palign_impl<Offset,Packet4cd>
+{
+ static EIGEN_STRONG_INLINE void run(Packet4cd& first, const Packet4cd& second)
+ {
+ if (Offset==0) return;
+ palign_impl<Offset*2,Packet8d>::run(first.v, second.v);
+ }
+};
+
+template<> struct conj_helper<Packet4cd, Packet4cd, false,true>
+{
+ EIGEN_STRONG_INLINE Packet4cd pmadd(const Packet4cd& x, const Packet4cd& y, const Packet4cd& c) const
+ { return padd(pmul(x,y),c); }
+
+ EIGEN_STRONG_INLINE Packet4cd pmul(const Packet4cd& a, const Packet4cd& b) const
+ {
+ return internal::pmul(a, pconj(b));
+ }
+};
+
+template<> struct conj_helper<Packet4cd, Packet4cd, true,false>
+{
+ EIGEN_STRONG_INLINE Packet4cd pmadd(const Packet4cd& x, const Packet4cd& y, const Packet4cd& c) const
+ { return padd(pmul(x,y),c); }
+
+ EIGEN_STRONG_INLINE Packet4cd pmul(const Packet4cd& a, const Packet4cd& b) const
+ {
+ return internal::pmul(pconj(a), b);
+ }
+};
+
+template<> struct conj_helper<Packet4cd, Packet4cd, true,true>
+{
+ EIGEN_STRONG_INLINE Packet4cd pmadd(const Packet4cd& x, const Packet4cd& y, const Packet4cd& c) const
+ { return padd(pmul(x,y),c); }
+
+ EIGEN_STRONG_INLINE Packet4cd pmul(const Packet4cd& a, const Packet4cd& b) const
+ {
+ return pconj(internal::pmul(a, b));
+ }
+};
+
+EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet4cd,Packet8d)
+
+template<> EIGEN_STRONG_INLINE Packet4cd pdiv<Packet4cd>(const Packet4cd& a, const Packet4cd& b)
+{
+ Packet4cd num = pmul(a, pconj(b));
+ __m512d tmp = _mm512_mul_pd(b.v, b.v);
+ __m512d denom = padd(_mm512_permute_pd(tmp,0x55), tmp);
+ return Packet4cd(_mm512_div_pd(num.v, denom));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4cd pcplxflip<Packet4cd>(const Packet4cd& x)
+{
+ return Packet4cd(_mm512_permute_pd(x.v,0x55));
+}
+
+EIGEN_DEVICE_FUNC inline void
+ptranspose(PacketBlock<Packet8cf,4>& kernel) {
+ PacketBlock<Packet8d,4> pb;
+
+ pb.packet[0] = _mm512_castps_pd(kernel.packet[0].v);
+ pb.packet[1] = _mm512_castps_pd(kernel.packet[1].v);
+ pb.packet[2] = _mm512_castps_pd(kernel.packet[2].v);
+ pb.packet[3] = _mm512_castps_pd(kernel.packet[3].v);
+ ptranspose(pb);
+ kernel.packet[0].v = _mm512_castpd_ps(pb.packet[0]);
+ kernel.packet[1].v = _mm512_castpd_ps(pb.packet[1]);
+ kernel.packet[2].v = _mm512_castpd_ps(pb.packet[2]);
+ kernel.packet[3].v = _mm512_castpd_ps(pb.packet[3]);
+}
+
+EIGEN_DEVICE_FUNC inline void
+ptranspose(PacketBlock<Packet8cf,8>& kernel) {
+ PacketBlock<Packet8d,8> pb;
+
+ pb.packet[0] = _mm512_castps_pd(kernel.packet[0].v);
+ pb.packet[1] = _mm512_castps_pd(kernel.packet[1].v);
+ pb.packet[2] = _mm512_castps_pd(kernel.packet[2].v);
+ pb.packet[3] = _mm512_castps_pd(kernel.packet[3].v);
+ pb.packet[4] = _mm512_castps_pd(kernel.packet[4].v);
+ pb.packet[5] = _mm512_castps_pd(kernel.packet[5].v);
+ pb.packet[6] = _mm512_castps_pd(kernel.packet[6].v);
+ pb.packet[7] = _mm512_castps_pd(kernel.packet[7].v);
+ ptranspose(pb);
+ kernel.packet[0].v = _mm512_castpd_ps(pb.packet[0]);
+ kernel.packet[1].v = _mm512_castpd_ps(pb.packet[1]);
+ kernel.packet[2].v = _mm512_castpd_ps(pb.packet[2]);
+ kernel.packet[3].v = _mm512_castpd_ps(pb.packet[3]);
+ kernel.packet[4].v = _mm512_castpd_ps(pb.packet[4]);
+ kernel.packet[5].v = _mm512_castpd_ps(pb.packet[5]);
+ kernel.packet[6].v = _mm512_castpd_ps(pb.packet[6]);
+ kernel.packet[7].v = _mm512_castpd_ps(pb.packet[7]);
+}
+
+EIGEN_DEVICE_FUNC inline void
+ptranspose(PacketBlock<Packet4cd,4>& kernel) {
+ __m512d T0 = _mm512_shuffle_f64x2(kernel.packet[0].v, kernel.packet[1].v, EIGEN_SSE_SHUFFLE_MASK(0,1,0,1)); // [a0 a1 b0 b1]
+ __m512d T1 = _mm512_shuffle_f64x2(kernel.packet[0].v, kernel.packet[1].v, EIGEN_SSE_SHUFFLE_MASK(2,3,2,3)); // [a2 a3 b2 b3]
+ __m512d T2 = _mm512_shuffle_f64x2(kernel.packet[2].v, kernel.packet[3].v, EIGEN_SSE_SHUFFLE_MASK(0,1,0,1)); // [c0 c1 d0 d1]
+ __m512d T3 = _mm512_shuffle_f64x2(kernel.packet[2].v, kernel.packet[3].v, EIGEN_SSE_SHUFFLE_MASK(2,3,2,3)); // [c2 c3 d2 d3]
+
+ kernel.packet[3] = Packet4cd(_mm512_shuffle_f64x2(T1, T3, EIGEN_SSE_SHUFFLE_MASK(1,3,1,3))); // [a3 b3 c3 d3]
+ kernel.packet[2] = Packet4cd(_mm512_shuffle_f64x2(T1, T3, EIGEN_SSE_SHUFFLE_MASK(0,2,0,2))); // [a2 b2 c2 d2]
+ kernel.packet[1] = Packet4cd(_mm512_shuffle_f64x2(T0, T2, EIGEN_SSE_SHUFFLE_MASK(1,3,1,3))); // [a1 b1 c1 d1]
+ kernel.packet[0] = Packet4cd(_mm512_shuffle_f64x2(T0, T2, EIGEN_SSE_SHUFFLE_MASK(0,2,0,2))); // [a0 b0 c0 d0]
+}
+
+template<> EIGEN_STRONG_INLINE Packet8cf pinsertfirst(const Packet8cf& a, std::complex<float> b)
+{
+ Packet2cf tmp = Packet2cf(_mm512_extractf32x4_ps(a.v,0));
+ tmp = pinsertfirst(tmp, b);
+ return Packet8cf( _mm512_insertf32x4(a.v, tmp.v, 0) );
+}
+
+template<> EIGEN_STRONG_INLINE Packet4cd pinsertfirst(const Packet4cd& a, std::complex<double> b)
+{
+ return Packet4cd(_mm512_castsi512_pd( _mm512_inserti32x4(_mm512_castpd_si512(a.v), _mm_castpd_si128(pset1<Packet1cd>(b).v), 0) ));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8cf pinsertlast(const Packet8cf& a, std::complex<float> b)
+{
+ Packet2cf tmp = Packet2cf(_mm512_extractf32x4_ps(a.v,3) );
+ tmp = pinsertlast(tmp, b);
+ return Packet8cf( _mm512_insertf32x4(a.v, tmp.v, 3) );
+}
+
+template<> EIGEN_STRONG_INLINE Packet4cd pinsertlast(const Packet4cd& a, std::complex<double> b)
+{
+ return Packet4cd(_mm512_castsi512_pd( _mm512_inserti32x4(_mm512_castpd_si512(a.v), _mm_castpd_si128(pset1<Packet1cd>(b).v), 3) ));
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_COMPLEX_AVX512_H
diff --git a/Eigen/src/Core/arch/AVX512/MathFunctions.h b/Eigen/src/Core/arch/AVX512/MathFunctions.h
index 93c5ec43f..c2158c538 100644
--- a/Eigen/src/Core/arch/AVX512/MathFunctions.h
+++ b/Eigen/src/Core/arch/AVX512/MathFunctions.h
@@ -47,6 +47,7 @@ plog<Packet16f>(const Packet16f& _x) {
// The smallest non denormalized float number.
_EIGEN_DECLARE_CONST_Packet16f_FROM_INT(min_norm_pos, 0x00800000);
_EIGEN_DECLARE_CONST_Packet16f_FROM_INT(minus_inf, 0xff800000);
+ _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(pos_inf, 0x7f800000);
_EIGEN_DECLARE_CONST_Packet16f_FROM_INT(nan, 0x7fc00000);
// Polynomial coefficients.
@@ -116,10 +117,16 @@ plog<Packet16f>(const Packet16f& _x) {
x = padd(x, y);
x = padd(x, y2);
- // Filter out invalid inputs, i.e. negative arg will be NAN, 0 will be -INF.
+ __mmask16 pos_inf_mask = _mm512_cmp_ps_mask(_x,p16f_pos_inf,_CMP_EQ_OQ);
+ // Filter out invalid inputs, i.e.:
+ // - negative arg will be NAN,
+ // - 0 will be -INF.
+ // - +INF will be +INF
return _mm512_mask_blend_ps(iszero_mask,
- _mm512_mask_blend_ps(invalid_mask, x, p16f_nan),
- p16f_minus_inf);
+ _mm512_mask_blend_ps(invalid_mask,
+ _mm512_mask_blend_ps(pos_inf_mask,x,p16f_pos_inf),
+ p16f_nan),
+ p16f_minus_inf);
}
#endif
@@ -373,6 +380,19 @@ EIGEN_STRONG_INLINE Packet16f prsqrt<Packet16f>(const Packet16f& x) {
#endif
#endif
+
+template <>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f
+psin<Packet16f>(const Packet16f& _x) {
+ return psin_float(_x);
+}
+
+template <>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f
+pcos<Packet16f>(const Packet16f& _x) {
+ return pcos_float(_x);
+}
+
} // end namespace internal
} // end namespace Eigen
diff --git a/Eigen/src/Core/arch/AVX512/PacketMath.h b/Eigen/src/Core/arch/AVX512/PacketMath.h
index 86cefba92..60b723b08 100644
--- a/Eigen/src/Core/arch/AVX512/PacketMath.h
+++ b/Eigen/src/Core/arch/AVX512/PacketMath.h
@@ -19,10 +19,10 @@ namespace internal {
#endif
#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
-#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*))
+#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 32
#endif
-#ifdef __FMA__
+#ifdef EIGEN_VECTORIZE_FMA
#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
#endif
@@ -55,7 +55,9 @@ template<> struct packet_traits<float> : default_packet_traits
size = 16,
HasHalfPacket = 1,
HasBlend = 0,
-#if EIGEN_GNUC_AT_LEAST(5, 3) || EIGEN_COMP_CLANG
+ HasSin = EIGEN_FAST_MATH,
+ HasCos = EIGEN_FAST_MATH,
+#if EIGEN_GNUC_AT_LEAST(5, 3) || (!EIGEN_COMP_GNUC_STRICT)
#ifdef EIGEN_VECTORIZE_AVX512DQ
HasLog = 1,
#endif
@@ -75,7 +77,7 @@ template<> struct packet_traits<double> : default_packet_traits
AlignedOnScalar = 1,
size = 8,
HasHalfPacket = 1,
-#if EIGEN_GNUC_AT_LEAST(5, 3)
+#if EIGEN_GNUC_AT_LEAST(5, 3) || (!EIGEN_COMP_GNUC_STRICT)
HasSqrt = EIGEN_FAST_MATH,
HasRsqrt = EIGEN_FAST_MATH,
#endif
@@ -99,19 +101,20 @@ template <>
struct unpacket_traits<Packet16f> {
typedef float type;
typedef Packet8f half;
- enum { size = 16, alignment=Aligned64 };
+ typedef Packet16i integer_packet;
+ enum { size = 16, alignment=Aligned64, vectorizable=true };
};
template <>
struct unpacket_traits<Packet8d> {
typedef double type;
typedef Packet4d half;
- enum { size = 8, alignment=Aligned64 };
+ enum { size = 8, alignment=Aligned64, vectorizable=true };
};
template <>
struct unpacket_traits<Packet16i> {
typedef int type;
typedef Packet8i half;
- enum { size = 16, alignment=Aligned64 };
+ enum { size = 16, alignment=Aligned64, vectorizable=false };
};
template <>
@@ -128,12 +131,17 @@ EIGEN_STRONG_INLINE Packet16i pset1<Packet16i>(const int& from) {
}
template <>
+EIGEN_STRONG_INLINE Packet16f pset1frombits<Packet16f>(unsigned int from) {
+ return _mm512_castsi512_ps(_mm512_set1_epi32(from));
+}
+
+template <>
EIGEN_STRONG_INLINE Packet16f pload1<Packet16f>(const float* from) {
return _mm512_broadcastss_ps(_mm_load_ps1(from));
}
template <>
EIGEN_STRONG_INLINE Packet8d pload1<Packet8d>(const double* from) {
- return _mm512_broadcastsd_pd(_mm_load_pd1(from));
+ return _mm512_set1_pd(*from);
}
template <>
@@ -159,6 +167,11 @@ EIGEN_STRONG_INLINE Packet8d padd<Packet8d>(const Packet8d& a,
const Packet8d& b) {
return _mm512_add_pd(a, b);
}
+template <>
+EIGEN_STRONG_INLINE Packet16i padd<Packet16i>(const Packet16i& a,
+ const Packet16i& b) {
+ return _mm512_add_epi32(a, b);
+}
template <>
EIGEN_STRONG_INLINE Packet16f psub<Packet16f>(const Packet16f& a,
@@ -170,6 +183,11 @@ EIGEN_STRONG_INLINE Packet8d psub<Packet8d>(const Packet8d& a,
const Packet8d& b) {
return _mm512_sub_pd(a, b);
}
+template <>
+EIGEN_STRONG_INLINE Packet16i psub<Packet16i>(const Packet16i& a,
+ const Packet16i& b) {
+ return _mm512_sub_epi32(a, b);
+}
template <>
EIGEN_STRONG_INLINE Packet16f pnegate(const Packet16f& a) {
@@ -203,6 +221,11 @@ EIGEN_STRONG_INLINE Packet8d pmul<Packet8d>(const Packet8d& a,
const Packet8d& b) {
return _mm512_mul_pd(a, b);
}
+template <>
+EIGEN_STRONG_INLINE Packet16i pmul<Packet16i>(const Packet16i& a,
+ const Packet16i& b) {
+ return _mm512_mul_epi32(a, b);
+}
template <>
EIGEN_STRONG_INLINE Packet16f pdiv<Packet16f>(const Packet16f& a,
@@ -215,7 +238,7 @@ EIGEN_STRONG_INLINE Packet8d pdiv<Packet8d>(const Packet8d& a,
return _mm512_div_pd(a, b);
}
-#ifdef __FMA__
+#ifdef EIGEN_VECTORIZE_FMA
template <>
EIGEN_STRONG_INLINE Packet16f pmadd(const Packet16f& a, const Packet16f& b,
const Packet16f& c) {
@@ -254,30 +277,92 @@ EIGEN_STRONG_INLINE Packet8d pmax<Packet8d>(const Packet8d& a,
return _mm512_max_pd(b, a);
}
-template <>
-EIGEN_STRONG_INLINE Packet16f pand<Packet16f>(const Packet16f& a,
- const Packet16f& b) {
#ifdef EIGEN_VECTORIZE_AVX512DQ
- return _mm512_and_ps(a, b);
+template<int I_> EIGEN_STRONG_INLINE Packet8f extract256(Packet16f x) { return _mm512_extractf32x8_ps(x,I_); }
+template<int I_> EIGEN_STRONG_INLINE Packet2d extract128(Packet8d x) { return _mm512_extractf64x2_pd(x,I_); }
+EIGEN_STRONG_INLINE Packet16f cat256(Packet8f a, Packet8f b) { return _mm512_insertf32x8(_mm512_castps256_ps512(a),b,1); }
#else
- Packet16f res = _mm512_undefined_ps();
- Packet4f lane0_a = _mm512_extractf32x4_ps(a, 0);
- Packet4f lane0_b = _mm512_extractf32x4_ps(b, 0);
- res = _mm512_insertf32x4(res, _mm_and_ps(lane0_a, lane0_b), 0);
+// AVX512F does not define _mm512_extractf32x8_ps to extract _m256 from _m512
+template<int I_> EIGEN_STRONG_INLINE Packet8f extract256(Packet16f x) {
+ return _mm256_castsi256_ps(_mm512_extracti64x4_epi64( _mm512_castps_si512(x),I_));
+}
- Packet4f lane1_a = _mm512_extractf32x4_ps(a, 1);
- Packet4f lane1_b = _mm512_extractf32x4_ps(b, 1);
- res = _mm512_insertf32x4(res, _mm_and_ps(lane1_a, lane1_b), 1);
+// AVX512F does not define _mm512_extractf64x2_pd to extract _m128 from _m512
+template<int I_> EIGEN_STRONG_INLINE Packet2d extract128(Packet8d x) {
+ return _mm_castsi128_pd(_mm512_extracti32x4_epi32( _mm512_castpd_si512(x),I_));
+}
- Packet4f lane2_a = _mm512_extractf32x4_ps(a, 2);
- Packet4f lane2_b = _mm512_extractf32x4_ps(b, 2);
- res = _mm512_insertf32x4(res, _mm_and_ps(lane2_a, lane2_b), 2);
+EIGEN_STRONG_INLINE Packet16f cat256(Packet8f a, Packet8f b) {
+ return _mm512_castsi512_ps(_mm512_inserti64x4(_mm512_castsi256_si512(_mm256_castps_si256(a)),
+ _mm256_castps_si256(b),1));
+}
+#endif
- Packet4f lane3_a = _mm512_extractf32x4_ps(a, 3);
- Packet4f lane3_b = _mm512_extractf32x4_ps(b, 3);
- res = _mm512_insertf32x4(res, _mm_and_ps(lane3_a, lane3_b), 3);
+template<> EIGEN_STRONG_INLINE Packet16f pcmp_le(const Packet16f& a, const Packet16f& b) {
+ __mmask16 mask = _mm512_cmp_ps_mask(a, b, _CMP_LE_OQ);
+ return _mm512_castsi512_ps(
+ _mm512_mask_set1_epi32(_mm512_set1_epi32(0), mask, 0xffffffffu));
+}
- return res;
+template<> EIGEN_STRONG_INLINE Packet16f pcmp_lt(const Packet16f& a, const Packet16f& b) {
+ __mmask16 mask = _mm512_cmp_ps_mask(a, b, _CMP_LT_OQ);
+ return _mm512_castsi512_ps(
+ _mm512_mask_set1_epi32(_mm512_set1_epi32(0), mask, 0xffffffffu));
+}
+
+template<> EIGEN_STRONG_INLINE Packet16f pcmp_lt_or_nan(const Packet16f& a, const Packet16f& b) {
+ __mmask16 mask = _mm512_cmp_ps_mask(a, b, _CMP_NGT_UQ);
+ return _mm512_castsi512_ps(
+ _mm512_mask_set1_epi32(_mm512_set1_epi32(0), mask, 0xffffffffu));
+}
+
+template<> EIGEN_STRONG_INLINE Packet16i pcmp_eq(const Packet16i& a, const Packet16i& b) {
+ __mmask16 mask = _mm512_cmp_epi32_mask(a, b, _CMP_EQ_OQ);
+ return _mm512_mask_set1_epi32(_mm512_set1_epi32(0), mask, 0xffffffffu);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16f pcmp_eq(const Packet16f& a, const Packet16f& b) {
+ __mmask16 mask = _mm512_cmp_ps_mask(a, b, _CMP_EQ_OQ);
+ return _mm512_castsi512_ps(
+ _mm512_mask_set1_epi32(_mm512_set1_epi32(0), mask, 0xffffffffu));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet8d pcmp_eq(const Packet8d& a, const Packet8d& b) {
+ __mmask8 mask = _mm512_cmp_pd_mask(a, b, _CMP_EQ_OQ);
+ return _mm512_castsi512_pd(
+ _mm512_mask_set1_epi64(_mm512_set1_epi64(0), mask, 0xffffffffffffffffu));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16i ptrue<Packet16i>(const Packet16i& /*a*/) {
+ return _mm512_set1_epi32(0xffffffffu);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16f ptrue<Packet16f>(const Packet16f& a) {
+ return _mm512_castsi512_ps(ptrue<Packet16i>(_mm512_castps_si512(a)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet8d ptrue<Packet8d>(const Packet8d& a) {
+ return _mm512_castsi512_pd(ptrue<Packet16i>(_mm512_castpd_si512(a)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16i pand<Packet16i>(const Packet16i& a,
+ const Packet16i& b) {
+ return _mm512_and_si512(a,b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16f pand<Packet16f>(const Packet16f& a,
+ const Packet16f& b) {
+#ifdef EIGEN_VECTORIZE_AVX512DQ
+ return _mm512_and_ps(a, b);
+#else
+ return _mm512_castsi512_ps(pand(_mm512_castps_si512(a),_mm512_castps_si512(b)));
#endif
}
template <>
@@ -298,30 +383,18 @@ EIGEN_STRONG_INLINE Packet8d pand<Packet8d>(const Packet8d& a,
return res;
#endif
}
+
+template <>
+EIGEN_STRONG_INLINE Packet16i por<Packet16i>(const Packet16i& a, const Packet16i& b) {
+ return _mm512_or_si512(a, b);
+}
+
template <>
-EIGEN_STRONG_INLINE Packet16f por<Packet16f>(const Packet16f& a,
- const Packet16f& b) {
+EIGEN_STRONG_INLINE Packet16f por<Packet16f>(const Packet16f& a, const Packet16f& b) {
#ifdef EIGEN_VECTORIZE_AVX512DQ
return _mm512_or_ps(a, b);
#else
- Packet16f res = _mm512_undefined_ps();
- Packet4f lane0_a = _mm512_extractf32x4_ps(a, 0);
- Packet4f lane0_b = _mm512_extractf32x4_ps(b, 0);
- res = _mm512_insertf32x4(res, _mm_or_ps(lane0_a, lane0_b), 0);
-
- Packet4f lane1_a = _mm512_extractf32x4_ps(a, 1);
- Packet4f lane1_b = _mm512_extractf32x4_ps(b, 1);
- res = _mm512_insertf32x4(res, _mm_or_ps(lane1_a, lane1_b), 1);
-
- Packet4f lane2_a = _mm512_extractf32x4_ps(a, 2);
- Packet4f lane2_b = _mm512_extractf32x4_ps(b, 2);
- res = _mm512_insertf32x4(res, _mm_or_ps(lane2_a, lane2_b), 2);
-
- Packet4f lane3_a = _mm512_extractf32x4_ps(a, 3);
- Packet4f lane3_b = _mm512_extractf32x4_ps(b, 3);
- res = _mm512_insertf32x4(res, _mm_or_ps(lane3_a, lane3_b), 3);
-
- return res;
+ return _mm512_castsi512_ps(por(_mm512_castps_si512(a),_mm512_castps_si512(b)));
#endif
}
@@ -331,109 +404,59 @@ EIGEN_STRONG_INLINE Packet8d por<Packet8d>(const Packet8d& a,
#ifdef EIGEN_VECTORIZE_AVX512DQ
return _mm512_or_pd(a, b);
#else
- Packet8d res = _mm512_undefined_pd();
- Packet4d lane0_a = _mm512_extractf64x4_pd(a, 0);
- Packet4d lane0_b = _mm512_extractf64x4_pd(b, 0);
- res = _mm512_insertf64x4(res, _mm256_or_pd(lane0_a, lane0_b), 0);
-
- Packet4d lane1_a = _mm512_extractf64x4_pd(a, 1);
- Packet4d lane1_b = _mm512_extractf64x4_pd(b, 1);
- res = _mm512_insertf64x4(res, _mm256_or_pd(lane1_a, lane1_b), 1);
-
- return res;
+ return _mm512_castsi512_pd(por(_mm512_castpd_si512(a),_mm512_castpd_si512(b)));
#endif
}
template <>
-EIGEN_STRONG_INLINE Packet16f pxor<Packet16f>(const Packet16f& a,
- const Packet16f& b) {
+EIGEN_STRONG_INLINE Packet16i pxor<Packet16i>(const Packet16i& a, const Packet16i& b) {
+ return _mm512_xor_si512(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16f pxor<Packet16f>(const Packet16f& a, const Packet16f& b) {
#ifdef EIGEN_VECTORIZE_AVX512DQ
return _mm512_xor_ps(a, b);
#else
- Packet16f res = _mm512_undefined_ps();
- Packet4f lane0_a = _mm512_extractf32x4_ps(a, 0);
- Packet4f lane0_b = _mm512_extractf32x4_ps(b, 0);
- res = _mm512_insertf32x4(res, _mm_xor_ps(lane0_a, lane0_b), 0);
-
- Packet4f lane1_a = _mm512_extractf32x4_ps(a, 1);
- Packet4f lane1_b = _mm512_extractf32x4_ps(b, 1);
- res = _mm512_insertf32x4(res, _mm_xor_ps(lane1_a, lane1_b), 1);
-
- Packet4f lane2_a = _mm512_extractf32x4_ps(a, 2);
- Packet4f lane2_b = _mm512_extractf32x4_ps(b, 2);
- res = _mm512_insertf32x4(res, _mm_xor_ps(lane2_a, lane2_b), 2);
-
- Packet4f lane3_a = _mm512_extractf32x4_ps(a, 3);
- Packet4f lane3_b = _mm512_extractf32x4_ps(b, 3);
- res = _mm512_insertf32x4(res, _mm_xor_ps(lane3_a, lane3_b), 3);
-
- return res;
+ return _mm512_castsi512_ps(pxor(_mm512_castps_si512(a),_mm512_castps_si512(b)));
#endif
}
+
template <>
-EIGEN_STRONG_INLINE Packet8d pxor<Packet8d>(const Packet8d& a,
- const Packet8d& b) {
+EIGEN_STRONG_INLINE Packet8d pxor<Packet8d>(const Packet8d& a, const Packet8d& b) {
#ifdef EIGEN_VECTORIZE_AVX512DQ
return _mm512_xor_pd(a, b);
#else
- Packet8d res = _mm512_undefined_pd();
- Packet4d lane0_a = _mm512_extractf64x4_pd(a, 0);
- Packet4d lane0_b = _mm512_extractf64x4_pd(b, 0);
- res = _mm512_insertf64x4(res, _mm256_xor_pd(lane0_a, lane0_b), 0);
-
- Packet4d lane1_a = _mm512_extractf64x4_pd(a, 1);
- Packet4d lane1_b = _mm512_extractf64x4_pd(b, 1);
- res = _mm512_insertf64x4(res, _mm256_xor_pd(lane1_a, lane1_b), 1);
-
- return res;
+ return _mm512_castsi512_pd(pxor(_mm512_castpd_si512(a),_mm512_castpd_si512(b)));
#endif
}
template <>
-EIGEN_STRONG_INLINE Packet16f pandnot<Packet16f>(const Packet16f& a,
- const Packet16f& b) {
+EIGEN_STRONG_INLINE Packet16i pandnot<Packet16i>(const Packet16i& a, const Packet16i& b) {
+ return _mm512_andnot_si512(b, a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16f pandnot<Packet16f>(const Packet16f& a, const Packet16f& b) {
#ifdef EIGEN_VECTORIZE_AVX512DQ
- return _mm512_andnot_ps(a, b);
+ return _mm512_andnot_ps(b, a);
#else
- Packet16f res = _mm512_undefined_ps();
- Packet4f lane0_a = _mm512_extractf32x4_ps(a, 0);
- Packet4f lane0_b = _mm512_extractf32x4_ps(b, 0);
- res = _mm512_insertf32x4(res, _mm_andnot_ps(lane0_a, lane0_b), 0);
-
- Packet4f lane1_a = _mm512_extractf32x4_ps(a, 1);
- Packet4f lane1_b = _mm512_extractf32x4_ps(b, 1);
- res = _mm512_insertf32x4(res, _mm_andnot_ps(lane1_a, lane1_b), 1);
-
- Packet4f lane2_a = _mm512_extractf32x4_ps(a, 2);
- Packet4f lane2_b = _mm512_extractf32x4_ps(b, 2);
- res = _mm512_insertf32x4(res, _mm_andnot_ps(lane2_a, lane2_b), 2);
-
- Packet4f lane3_a = _mm512_extractf32x4_ps(a, 3);
- Packet4f lane3_b = _mm512_extractf32x4_ps(b, 3);
- res = _mm512_insertf32x4(res, _mm_andnot_ps(lane3_a, lane3_b), 3);
-
- return res;
+ return _mm512_castsi512_ps(pandnot(_mm512_castps_si512(a),_mm512_castps_si512(b)));
#endif
}
template <>
-EIGEN_STRONG_INLINE Packet8d pandnot<Packet8d>(const Packet8d& a,
- const Packet8d& b) {
+EIGEN_STRONG_INLINE Packet8d pandnot<Packet8d>(const Packet8d& a,const Packet8d& b) {
#ifdef EIGEN_VECTORIZE_AVX512DQ
- return _mm512_andnot_pd(a, b);
+ return _mm512_andnot_pd(b, a);
#else
- Packet8d res = _mm512_undefined_pd();
- Packet4d lane0_a = _mm512_extractf64x4_pd(a, 0);
- Packet4d lane0_b = _mm512_extractf64x4_pd(b, 0);
- res = _mm512_insertf64x4(res, _mm256_andnot_pd(lane0_a, lane0_b), 0);
-
- Packet4d lane1_a = _mm512_extractf64x4_pd(a, 1);
- Packet4d lane1_b = _mm512_extractf64x4_pd(b, 1);
- res = _mm512_insertf64x4(res, _mm256_andnot_pd(lane1_a, lane1_b), 1);
-
- return res;
+ return _mm512_castsi512_pd(pandnot(_mm512_castpd_si512(a),_mm512_castpd_si512(b)));
#endif
}
+template<int N> EIGEN_STRONG_INLINE Packet16i pshiftleft(Packet16i a) {
+ return _mm512_slli_epi32(a, N);
+}
+
template <>
EIGEN_STRONG_INLINE Packet16f pload<Packet16f>(const float* from) {
EIGEN_DEBUG_ALIGNED_LOAD return _mm512_load_ps(from);
@@ -475,6 +498,7 @@ EIGEN_STRONG_INLINE Packet16f ploaddup<Packet16f>(const float* from) {
}
#ifdef EIGEN_VECTORIZE_AVX512DQ
+// FIXME: this does not look optimal, better load a Packet4d and shuffle...
// Loads 4 doubles from memory a returns the packet {a0, a0 a1, a1, a2, a2, a3,
// a3}
template <>
@@ -502,21 +526,17 @@ EIGEN_STRONG_INLINE Packet8d ploaddup<Packet8d>(const double* from) {
// {a0, a0 a0, a0, a1, a1, a1, a1, a2, a2, a2, a2, a3, a3, a3, a3}
template <>
EIGEN_STRONG_INLINE Packet16f ploadquad<Packet16f>(const float* from) {
- Packet16f tmp = _mm512_undefined_ps();
- tmp = _mm512_insertf32x4(tmp, _mm_load_ps1(from), 0);
- tmp = _mm512_insertf32x4(tmp, _mm_load_ps1(from + 1), 1);
- tmp = _mm512_insertf32x4(tmp, _mm_load_ps1(from + 2), 2);
- tmp = _mm512_insertf32x4(tmp, _mm_load_ps1(from + 3), 3);
- return tmp;
+ Packet16f tmp = _mm512_castps128_ps512(ploadu<Packet4f>(from));
+ const Packet16i scatter_mask = _mm512_set_epi32(3,3,3,3, 2,2,2,2, 1,1,1,1, 0,0,0,0);
+ return _mm512_permutexvar_ps(scatter_mask, tmp);
}
+
// Loads 2 doubles from memory a returns the packet
// {a0, a0 a0, a0, a1, a1, a1, a1}
template <>
EIGEN_STRONG_INLINE Packet8d ploadquad<Packet8d>(const double* from) {
- __m128d tmp0 = _mm_load_pd1(from);
- __m256d lane0 = _mm256_broadcastsd_pd(tmp0);
- __m128d tmp1 = _mm_load_pd1(from + 1);
- __m256d lane1 = _mm256_broadcastsd_pd(tmp1);
+ __m256d lane0 = _mm256_set1_pd(*from);
+ __m256d lane1 = _mm256_set1_pd(*(from+1));
__m512d tmp = _mm512_undefined_pd();
tmp = _mm512_insertf64x4(tmp, lane0, 0);
return _mm512_insertf64x4(tmp, lane1, 1);
@@ -981,6 +1001,13 @@ EIGEN_STRONG_INLINE double predux_max<Packet8d>(const Packet8d& a) {
return pfirst(_mm256_max_pd(res, _mm256_shuffle_pd(res, res, 1)));
}
+template<> EIGEN_STRONG_INLINE bool predux_any(const Packet16f& x)
+{
+ Packet16i xi = _mm512_castps_si512(x);
+ __mmask16 tmp = _mm512_test_epi32_mask(xi,xi);
+ return !_mm512_kortestz(tmp,tmp);
+}
+
template <int Offset>
struct palign_impl<Offset, Packet16f> {
static EIGEN_STRONG_INLINE void run(Packet16f& first,
@@ -1322,6 +1349,22 @@ template<> EIGEN_STRONG_INLINE Packet8d pinsertlast(const Packet8d& a, double b)
return _mm512_mask_broadcastsd_pd(a, (1<<7), _mm_load_sd(&b));
}
+template<> EIGEN_STRONG_INLINE Packet16i pcast<Packet16f, Packet16i>(const Packet16f& a) {
+ return _mm512_cvttps_epi32(a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet16f pcast<Packet16i, Packet16f>(const Packet16i& a) {
+ return _mm512_cvtepi32_ps(a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet16i preinterpret<Packet16i,Packet16f>(const Packet16f& a) {
+ return _mm512_castps_si512(a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet16f preinterpret<Packet16f,Packet16i>(const Packet16i& a) {
+ return _mm512_castsi512_ps(a);
+}
+
} // end namespace internal
} // end namespace Eigen
diff --git a/Eigen/src/Core/arch/AltiVec/Complex.h b/Eigen/src/Core/arch/AltiVec/Complex.h
index 3e665730c..440d058d8 100644
--- a/Eigen/src/Core/arch/AltiVec/Complex.h
+++ b/Eigen/src/Core/arch/AltiVec/Complex.h
@@ -60,7 +60,7 @@ template<> struct packet_traits<std::complex<float> > : default_packet_traits
};
};
-template<> struct unpacket_traits<Packet2cf> { typedef std::complex<float> type; enum {size=2, alignment=Aligned16}; typedef Packet2cf half; };
+template<> struct unpacket_traits<Packet2cf> { typedef std::complex<float> type; enum {size=2, alignment=Aligned16, vectorizable=true}; typedef Packet2cf half; };
template<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>& from)
{
@@ -82,14 +82,14 @@ template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<f
template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(const std::complex<float>* from, Index stride)
{
- std::complex<float> EIGEN_ALIGN16 af[2];
+ EIGEN_ALIGN16 std::complex<float> af[2];
af[0] = from[0*stride];
af[1] = from[1*stride];
return pload<Packet2cf>(af);
}
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to, const Packet2cf& from, Index stride)
{
- std::complex<float> EIGEN_ALIGN16 af[2];
+ EIGEN_ALIGN16 std::complex<float> af[2];
pstore<std::complex<float> >((std::complex<float> *) af, from);
to[0*stride] = af[0];
to[1*stride] = af[1];
@@ -128,7 +128,7 @@ template<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::co
template<> EIGEN_STRONG_INLINE std::complex<float> pfirst<Packet2cf>(const Packet2cf& a)
{
- std::complex<float> EIGEN_ALIGN16 res[2];
+ EIGEN_ALIGN16 std::complex<float> res[2];
pstore((float *)&res, a.v);
return res[0];
@@ -286,7 +286,7 @@ template<> struct packet_traits<std::complex<double> > : default_packet_traits
};
};
-template<> struct unpacket_traits<Packet1cd> { typedef std::complex<double> type; enum {size=1, alignment=Aligned16}; typedef Packet1cd half; };
+template<> struct unpacket_traits<Packet1cd> { typedef std::complex<double> type; enum {size=1, alignment=Aligned16, vectorizable=true}; typedef Packet1cd half; };
template<> EIGEN_STRONG_INLINE Packet1cd pload <Packet1cd>(const std::complex<double>* from) { return Packet1cd(pload<Packet2d>((const double*)from)); }
template<> EIGEN_STRONG_INLINE Packet1cd ploadu<Packet1cd>(const std::complex<double>* from) { return Packet1cd(ploadu<Packet2d>((const double*)from)); }
@@ -298,14 +298,14 @@ template<> EIGEN_STRONG_INLINE Packet1cd pset1<Packet1cd>(const std::complex<dou
template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather<std::complex<double>, Packet1cd>(const std::complex<double>* from, Index stride)
{
- std::complex<double> EIGEN_ALIGN16 af[2];
+ EIGEN_ALIGN16 std::complex<double> af[2];
af[0] = from[0*stride];
af[1] = from[1*stride];
return pload<Packet1cd>(af);
}
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet1cd>(std::complex<double>* to, const Packet1cd& from, Index stride)
{
- std::complex<double> EIGEN_ALIGN16 af[2];
+ EIGEN_ALIGN16 std::complex<double> af[2];
pstore<std::complex<double> >(af, from);
to[0*stride] = af[0];
to[1*stride] = af[1];
@@ -345,7 +345,7 @@ template<> EIGEN_STRONG_INLINE void prefetch<std::complex<double> >(const std::c
template<> EIGEN_STRONG_INLINE std::complex<double> pfirst<Packet1cd>(const Packet1cd& a)
{
- std::complex<double> EIGEN_ALIGN16 res[2];
+ EIGEN_ALIGN16 std::complex<double> res[2];
pstore<std::complex<double> >(res, a);
return res[0];
diff --git a/Eigen/src/Core/arch/AltiVec/MathFunctions.h b/Eigen/src/Core/arch/AltiVec/MathFunctions.h
index c5e4bede7..81097e668 100644
--- a/Eigen/src/Core/arch/AltiVec/MathFunctions.h
+++ b/Eigen/src/Core/arch/AltiVec/MathFunctions.h
@@ -9,191 +9,37 @@
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-/* The sin, cos, exp, and log functions of this file come from
- * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/
- */
-
#ifndef EIGEN_MATH_FUNCTIONS_ALTIVEC_H
#define EIGEN_MATH_FUNCTIONS_ALTIVEC_H
+#include "../Default/GenericPacketMathFunctions.h"
+
namespace Eigen {
namespace internal {
-static _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
-static _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
-static _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f);
-static _EIGEN_DECLARE_CONST_Packet4i(23, 23);
-
-static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(inv_mant_mask, ~0x7f800000);
-
-/* the smallest non denormalized float number */
-static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(min_norm_pos, 0x00800000);
-static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(minus_inf, 0xff800000); // -1.f/0.f
-static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(minus_nan, 0xffffffff);
-
-/* natural logarithm computed for 4 simultaneous float
- return NaN for x <= 0
-*/
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_SQRTHF, 0.707106781186547524f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p0, 7.0376836292E-2f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p1, - 1.1514610310E-1f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p2, 1.1676998740E-1f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p3, - 1.2420140846E-1f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p4, + 1.4249322787E-1f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p5, - 1.6668057665E-1f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p6, + 2.0000714765E-1f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p7, - 2.4999993993E-1f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p8, + 3.3333331174E-1f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q1, -2.12194440e-4f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q2, 0.693359375f);
-
-static _EIGEN_DECLARE_CONST_Packet4f(exp_hi, 88.3762626647950f);
-static _EIGEN_DECLARE_CONST_Packet4f(exp_lo, -88.3762626647949f);
-
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_LOG2EF, 1.44269504088896341f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C1, 0.693359375f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C2, -2.12194440e-4f);
-
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p0, 1.9875691500E-4f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p1, 1.3981999507E-3f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p2, 8.3334519073E-3f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p3, 4.1665795894E-2f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459E-1f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201E-1f);
-
-#ifdef __VSX__
-static _EIGEN_DECLARE_CONST_Packet2d(1 , 1.0);
-static _EIGEN_DECLARE_CONST_Packet2d(2 , 2.0);
-static _EIGEN_DECLARE_CONST_Packet2d(half, 0.5);
-
-static _EIGEN_DECLARE_CONST_Packet2d(exp_hi, 709.437);
-static _EIGEN_DECLARE_CONST_Packet2d(exp_lo, -709.436139303);
-
-static _EIGEN_DECLARE_CONST_Packet2d(cephes_LOG2EF, 1.4426950408889634073599);
-
-static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p0, 1.26177193074810590878e-4);
-static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p1, 3.02994407707441961300e-2);
-static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p2, 9.99999999999999999910e-1);
-
-static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q0, 3.00198505138664455042e-6);
-static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q1, 2.52448340349684104192e-3);
-static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q2, 2.27265548208155028766e-1);
-static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q3, 2.00000000000000000009e0);
-
-static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C1, 0.693145751953125);
-static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C2, 1.42860682030941723212e-6);
-
-#ifdef __POWER8_VECTOR__
-static Packet2l p2l_1023 = { 1023, 1023 };
-static Packet2ul p2ul_52 = { 52, 52 };
-#endif
-
-#endif
-
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet4f plog<Packet4f>(const Packet4f& _x)
{
- Packet4f x = _x;
-
- Packet4i emm0;
-
- /* isvalid_mask is 0 if x < 0 or x is NaN. */
- Packet4ui isvalid_mask = reinterpret_cast<Packet4ui>(vec_cmpge(x, p4f_ZERO));
- Packet4ui iszero_mask = reinterpret_cast<Packet4ui>(vec_cmpeq(x, p4f_ZERO));
-
- x = pmax(x, p4f_min_norm_pos); /* cut off denormalized stuff */
- emm0 = vec_sr(reinterpret_cast<Packet4i>(x),
- reinterpret_cast<Packet4ui>(p4i_23));
-
- /* keep only the fractional part */
- x = pand(x, p4f_inv_mant_mask);
- x = por(x, p4f_half);
-
- emm0 = psub(emm0, p4i_0x7f);
- Packet4f e = padd(vec_ctf(emm0, 0), p4f_1);
-
- /* part2:
- if( x < SQRTHF ) {
- e -= 1;
- x = x + x - 1.0;
- } else { x = x - 1.0; }
- */
- Packet4f mask = reinterpret_cast<Packet4f>(vec_cmplt(x, p4f_cephes_SQRTHF));
- Packet4f tmp = pand(x, mask);
- x = psub(x, p4f_1);
- e = psub(e, pand(p4f_1, mask));
- x = padd(x, tmp);
-
- Packet4f x2 = pmul(x,x);
- Packet4f x3 = pmul(x2,x);
-
- Packet4f y, y1, y2;
- y = pmadd(p4f_cephes_log_p0, x, p4f_cephes_log_p1);
- y1 = pmadd(p4f_cephes_log_p3, x, p4f_cephes_log_p4);
- y2 = pmadd(p4f_cephes_log_p6, x, p4f_cephes_log_p7);
- y = pmadd(y , x, p4f_cephes_log_p2);
- y1 = pmadd(y1, x, p4f_cephes_log_p5);
- y2 = pmadd(y2, x, p4f_cephes_log_p8);
- y = pmadd(y, x3, y1);
- y = pmadd(y, x3, y2);
- y = pmul(y, x3);
-
- y1 = pmul(e, p4f_cephes_log_q1);
- tmp = pmul(x2, p4f_half);
- y = padd(y, y1);
- x = psub(x, tmp);
- y2 = pmul(e, p4f_cephes_log_q2);
- x = padd(x, y);
- x = padd(x, y2);
- // negative arg will be NAN, 0 will be -INF
- x = vec_sel(x, p4f_minus_inf, iszero_mask);
- x = vec_sel(p4f_minus_nan, x, isvalid_mask);
- return x;
+ return plog_float(_x);
}
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet4f pexp<Packet4f>(const Packet4f& _x)
{
- Packet4f x = _x;
-
- Packet4f tmp, fx;
- Packet4i emm0;
-
- // clamp x
- x = pmax(pmin(x, p4f_exp_hi), p4f_exp_lo);
-
- // express exp(x) as exp(g + n*log(2))
- fx = pmadd(x, p4f_cephes_LOG2EF, p4f_half);
-
- fx = pfloor(fx);
-
- tmp = pmul(fx, p4f_cephes_exp_C1);
- Packet4f z = pmul(fx, p4f_cephes_exp_C2);
- x = psub(x, tmp);
- x = psub(x, z);
-
- z = pmul(x,x);
-
- Packet4f y = p4f_cephes_exp_p0;
- y = pmadd(y, x, p4f_cephes_exp_p1);
- y = pmadd(y, x, p4f_cephes_exp_p2);
- y = pmadd(y, x, p4f_cephes_exp_p3);
- y = pmadd(y, x, p4f_cephes_exp_p4);
- y = pmadd(y, x, p4f_cephes_exp_p5);
- y = pmadd(y, z, x);
- y = padd(y, p4f_1);
+ return pexp_float(_x);
+}
- // build 2^n
- emm0 = vec_cts(fx, 0);
- emm0 = vec_add(emm0, p4i_0x7f);
- emm0 = vec_sl(emm0, reinterpret_cast<Packet4ui>(p4i_23));
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet4f psin<Packet4f>(const Packet4f& _x)
+{
+ return psin_float(_x);
+}
- // Altivec's max & min operators just drop silent NaNs. Check NaNs in
- // inputs and return them unmodified.
- Packet4ui isnumber_mask = reinterpret_cast<Packet4ui>(vec_cmpeq(_x, _x));
- return vec_sel(_x, pmax(pmul(y, reinterpret_cast<Packet4f>(emm0)), _x),
- isnumber_mask);
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet4f pcos<Packet4f>(const Packet4f& _x)
+{
+ return pcos_float(_x);
}
#ifndef EIGEN_COMP_CLANG
@@ -225,93 +71,10 @@ Packet2d psqrt<Packet2d>(const Packet2d& x)
return vec_sqrt(x);
}
-// VSX support varies between different compilers and even different
-// versions of the same compiler. For gcc version >= 4.9.3, we can use
-// vec_cts to efficiently convert Packet2d to Packet2l. Otherwise, use
-// a slow version that works with older compilers.
-// Update: apparently vec_cts/vec_ctf intrinsics for 64-bit doubles
-// are buggy, https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70963
-static inline Packet2l ConvertToPacket2l(const Packet2d& x) {
-#if EIGEN_GNUC_AT_LEAST(5, 4) || \
- (EIGEN_GNUC_AT(6, 1) && __GNUC_PATCHLEVEL__ >= 1)
- return vec_cts(x, 0); // TODO: check clang version.
-#else
- double tmp[2];
- memcpy(tmp, &x, sizeof(tmp));
- Packet2l l = { static_cast<long long>(tmp[0]),
- static_cast<long long>(tmp[1]) };
- return l;
-#endif
-}
-
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet2d pexp<Packet2d>(const Packet2d& _x)
{
- Packet2d x = _x;
-
- Packet2d tmp, fx;
- Packet2l emm0;
-
- // clamp x
- x = pmax(pmin(x, p2d_exp_hi), p2d_exp_lo);
-
- /* express exp(x) as exp(g + n*log(2)) */
- fx = pmadd(x, p2d_cephes_LOG2EF, p2d_half);
-
- fx = pfloor(fx);
-
- tmp = pmul(fx, p2d_cephes_exp_C1);
- Packet2d z = pmul(fx, p2d_cephes_exp_C2);
- x = psub(x, tmp);
- x = psub(x, z);
-
- Packet2d x2 = pmul(x,x);
-
- Packet2d px = p2d_cephes_exp_p0;
- px = pmadd(px, x2, p2d_cephes_exp_p1);
- px = pmadd(px, x2, p2d_cephes_exp_p2);
- px = pmul (px, x);
-
- Packet2d qx = p2d_cephes_exp_q0;
- qx = pmadd(qx, x2, p2d_cephes_exp_q1);
- qx = pmadd(qx, x2, p2d_cephes_exp_q2);
- qx = pmadd(qx, x2, p2d_cephes_exp_q3);
-
- x = pdiv(px,psub(qx,px));
- x = pmadd(p2d_2,x,p2d_1);
-
- // build 2^n
- emm0 = ConvertToPacket2l(fx);
-
-#ifdef __POWER8_VECTOR__
- emm0 = vec_add(emm0, p2l_1023);
- emm0 = vec_sl(emm0, p2ul_52);
-#else
- // Code is a bit complex for POWER7. There is actually a
- // vec_xxsldi intrinsic but it is not supported by some gcc versions.
- // So we shift (52-32) bits and do a word swap with zeros.
- _EIGEN_DECLARE_CONST_Packet4i(1023, 1023);
- _EIGEN_DECLARE_CONST_Packet4i(20, 20); // 52 - 32
-
- Packet4i emm04i = reinterpret_cast<Packet4i>(emm0);
- emm04i = vec_add(emm04i, p4i_1023);
- emm04i = vec_sl(emm04i, reinterpret_cast<Packet4ui>(p4i_20));
- static const Packet16uc perm = {
- 0x14, 0x15, 0x16, 0x17, 0x00, 0x01, 0x02, 0x03,
- 0x1c, 0x1d, 0x1e, 0x1f, 0x08, 0x09, 0x0a, 0x0b };
-#ifdef _BIG_ENDIAN
- emm0 = reinterpret_cast<Packet2l>(vec_perm(p4i_ZERO, emm04i, perm));
-#else
- emm0 = reinterpret_cast<Packet2l>(vec_perm(emm04i, p4i_ZERO, perm));
-#endif
-
-#endif
-
- // Altivec's max & min operators just drop silent NaNs. Check NaNs in
- // inputs and return them unmodified.
- Packet2ul isnumber_mask = reinterpret_cast<Packet2ul>(vec_cmpeq(_x, _x));
- return vec_sel(_x, pmax(pmul(x, reinterpret_cast<Packet2d>(emm0)), _x),
- isnumber_mask);
+ return pexp_double(_x);
}
#endif
diff --git a/Eigen/src/Core/arch/AltiVec/PacketMath.h b/Eigen/src/Core/arch/AltiVec/PacketMath.h
index 7f4e90f75..9535724eb 100755
--- a/Eigen/src/Core/arch/AltiVec/PacketMath.h
+++ b/Eigen/src/Core/arch/AltiVec/PacketMath.h
@@ -146,9 +146,9 @@ template<> struct packet_traits<float> : default_packet_traits
HasMin = 1,
HasMax = 1,
HasAbs = 1,
- HasSin = 0,
- HasCos = 0,
- HasLog = 0,
+ HasSin = EIGEN_FAST_MATH,
+ HasCos = EIGEN_FAST_MATH,
+ HasLog = 1,
HasExp = 1,
#ifdef __VSX__
HasSqrt = 1,
@@ -187,8 +187,19 @@ template<> struct packet_traits<int> : default_packet_traits
};
-template<> struct unpacket_traits<Packet4f> { typedef float type; enum {size=4, alignment=Aligned16}; typedef Packet4f half; };
-template<> struct unpacket_traits<Packet4i> { typedef int type; enum {size=4, alignment=Aligned16}; typedef Packet4i half; };
+template<> struct unpacket_traits<Packet4f>
+{
+ typedef float type;
+ typedef Packet4f half;
+ typedef Packet4i integer_packet;
+ enum {size=4, alignment=Aligned16, vectorizable=true};
+};
+template<> struct unpacket_traits<Packet4i>
+{
+ typedef int type;
+ typedef Packet4i half;
+ enum {size=4, alignment=Aligned16, vectorizable=false};
+};
inline std::ostream & operator <<(std::ostream & s, const Packet16uc & v)
{
@@ -285,6 +296,11 @@ template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from) {
Packet4i v = {from, from, from, from};
return v;
}
+
+template<> EIGEN_STRONG_INLINE Packet4f pset1frombits<Packet4f>(unsigned int from) {
+ return reinterpret_cast<Packet4f>(pset1<Packet4i>(from));
+}
+
template<> EIGEN_STRONG_INLINE void
pbroadcast4<Packet4f>(const float *a,
Packet4f& a0, Packet4f& a1, Packet4f& a2, Packet4f& a3)
@@ -308,7 +324,7 @@ pbroadcast4<Packet4i>(const int *a,
template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride)
{
- float EIGEN_ALIGN16 af[4];
+ EIGEN_ALIGN16 float af[4];
af[0] = from[0*stride];
af[1] = from[1*stride];
af[2] = from[2*stride];
@@ -317,7 +333,7 @@ template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const floa
}
template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* from, Index stride)
{
- int EIGEN_ALIGN16 ai[4];
+ EIGEN_ALIGN16 int ai[4];
ai[0] = from[0*stride];
ai[1] = from[1*stride];
ai[2] = from[2*stride];
@@ -326,7 +342,7 @@ template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* f
}
template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, Index stride)
{
- float EIGEN_ALIGN16 af[4];
+ EIGEN_ALIGN16 float af[4];
pstore<float>(af, from);
to[0*stride] = af[0];
to[1*stride] = af[1];
@@ -335,7 +351,7 @@ template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, co
}
template<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const Packet4i& from, Index stride)
{
- int EIGEN_ALIGN16 ai[4];
+ EIGEN_ALIGN16 int ai[4];
pstore<int>((int *)ai, from);
to[0*stride] = ai[0];
to[1*stride] = ai[1];
@@ -414,6 +430,15 @@ template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const
}
template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_max(a, b); }
+template<> EIGEN_STRONG_INLINE Packet4f pcmp_le(const Packet4f& a, const Packet4f& b) { return reinterpret_cast<Packet4f>(vec_cmple(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet4f pcmp_lt(const Packet4f& a, const Packet4f& b) { return reinterpret_cast<Packet4f>(vec_cmplt(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet4f pcmp_eq(const Packet4f& a, const Packet4f& b) { return reinterpret_cast<Packet4f>(vec_cmpeq(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet4f pcmp_lt_or_nan(const Packet4f& a, const Packet4f& b) {
+ Packet4f c = reinterpret_cast<Packet4f>(vec_cmpge(a,b));
+ return vec_nor(c,c);
+}
+template<> EIGEN_STRONG_INLINE Packet4i pcmp_eq(const Packet4i& a, const Packet4i& b) { return reinterpret_cast<Packet4i>(vec_cmpeq(a,b)); }
+
template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_and(a, b); }
template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_and(a, b); }
@@ -426,6 +451,10 @@ template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const
template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_and(a, vec_nor(b, b)); }
template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_and(a, vec_nor(b, b)); }
+template<> EIGEN_STRONG_INLINE Packet4f pselect(const Packet4f& mask, const Packet4f& a, const Packet4f& b) {
+ return vec_sel(b, a, mask);
+}
+
template<> EIGEN_STRONG_INLINE Packet4f pround<Packet4f>(const Packet4f& a) { return vec_round(a); }
template<> EIGEN_STRONG_INLINE Packet4f pceil<Packet4f>(const Packet4f& a) { return vec_ceil(a); }
template<> EIGEN_STRONG_INLINE Packet4f pfloor<Packet4f>(const Packet4f& a) { return vec_floor(a); }
@@ -536,8 +565,8 @@ template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f&
template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { EIGEN_PPC_PREFETCH(addr); }
template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { EIGEN_PPC_PREFETCH(addr); }
-template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { float EIGEN_ALIGN16 x; vec_ste(a, 0, &x); return x; }
-template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { int EIGEN_ALIGN16 x; vec_ste(a, 0, &x); return x; }
+template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { EIGEN_ALIGN16 float x; vec_ste(a, 0, &x); return x; }
+template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { EIGEN_ALIGN16 int x; vec_ste(a, 0, &x); return x; }
template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a)
{
@@ -550,6 +579,19 @@ template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a)
template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) { return vec_abs(a); }
template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { return vec_abs(a); }
+template<int N> EIGEN_STRONG_INLINE Packet4i pshiftright(Packet4i a)
+{ return vec_sr(a,reinterpret_cast<Packet4ui>(pset1<Packet4i>(N))); }
+template<int N> EIGEN_STRONG_INLINE Packet4i pshiftleft(Packet4i a)
+{ return vec_sl(a,reinterpret_cast<Packet4ui>(pset1<Packet4i>(N))); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pfrexp<Packet4f>(const Packet4f& a, Packet4f& exponent) {
+ return pfrexp_float(a,exponent);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pldexp<Packet4f>(const Packet4f& a, const Packet4f& exponent) {
+ return pldexp_float(a,exponent);
+}
+
template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
{
Packet4f b, sum;
@@ -678,6 +720,11 @@ template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
return pfirst(res);
}
+template<> EIGEN_STRONG_INLINE bool predux_any(const Packet4f& x)
+{
+ return vec_any_ne(x, pzero(x));
+}
+
template<int Offset>
struct palign_impl<Offset,Packet4f>
{
@@ -771,6 +818,43 @@ template<> EIGEN_STRONG_INLINE Packet4f pblend(const Selector<4>& ifPacket, cons
}
+template <>
+struct type_casting_traits<float, int> {
+ enum {
+ VectorizedCast = 1,
+ SrcCoeffRatio = 1,
+ TgtCoeffRatio = 1
+ };
+};
+
+template <>
+struct type_casting_traits<int, float> {
+ enum {
+ VectorizedCast = 1,
+ SrcCoeffRatio = 1,
+ TgtCoeffRatio = 1
+ };
+};
+
+
+template<> EIGEN_STRONG_INLINE Packet4i pcast<Packet4f, Packet4i>(const Packet4f& a) {
+ return vec_cts(a,0);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pcast<Packet4i, Packet4f>(const Packet4i& a) {
+ return vec_ctf(a,0);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4i preinterpret<Packet4i,Packet4f>(const Packet4f& a) {
+ return reinterpret_cast<Packet4i>(a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f preinterpret<Packet4f,Packet4i>(const Packet4i& a) {
+ return reinterpret_cast<Packet4f>(a);
+}
+
+
+
//---------- double ----------
#ifdef __VSX__
typedef __vector double Packet2d;
@@ -837,7 +921,7 @@ template<> struct packet_traits<double> : default_packet_traits
};
};
-template<> struct unpacket_traits<Packet2d> { typedef double type; enum {size=2, alignment=Aligned16}; typedef Packet2d half; };
+template<> struct unpacket_traits<Packet2d> { typedef double type; enum {size=2, alignment=Aligned16, vectorizable=true}; typedef Packet2d half; };
inline std::ostream & operator <<(std::ostream & s, const Packet2l & v)
{
@@ -901,14 +985,14 @@ pbroadcast4<Packet2d>(const double *a,
template<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, Index stride)
{
- double EIGEN_ALIGN16 af[2];
+ EIGEN_ALIGN16 double af[2];
af[0] = from[0*stride];
af[1] = from[1*stride];
return pload<Packet2d>(af);
}
template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from, Index stride)
{
- double EIGEN_ALIGN16 af[2];
+ EIGEN_ALIGN16 double af[2];
pstore<double>(af, from);
to[0*stride] = af[0];
to[1*stride] = af[1];
@@ -980,7 +1064,7 @@ template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d&
template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { EIGEN_PPC_PREFETCH(addr); }
-template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { double EIGEN_ALIGN16 x[2]; pstore<double>(x, a); return x[0]; }
+template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { EIGEN_ALIGN16 double x[2]; pstore<double>(x, a); return x[0]; }
template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a)
{
@@ -988,6 +1072,59 @@ template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a)
}
template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a) { return vec_abs(a); }
+// VSX support varies between different compilers and even different
+// versions of the same compiler. For gcc version >= 4.9.3, we can use
+// vec_cts to efficiently convert Packet2d to Packet2l. Otherwise, use
+// a slow version that works with older compilers.
+// Update: apparently vec_cts/vec_ctf intrinsics for 64-bit doubles
+// are buggy, https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70963
+static inline Packet2l ConvertToPacket2l(const Packet2d& x) {
+#if EIGEN_GNUC_AT_LEAST(5, 4) || \
+ (EIGEN_GNUC_AT(6, 1) && __GNUC_PATCHLEVEL__ >= 1)
+ return vec_cts(x, 0); // TODO: check clang version.
+#else
+ double tmp[2];
+ memcpy(tmp, &x, sizeof(tmp));
+ Packet2l l = { static_cast<long long>(tmp[0]),
+ static_cast<long long>(tmp[1]) };
+ return l;
+#endif
+}
+
+template<> EIGEN_STRONG_INLINE Packet2d pldexp<Packet2d>(const Packet2d& a, const Packet2d& exponent) {
+
+ // build 2^n
+ Packet2l emm0 = ConvertToPacket2l(exponent);
+
+#ifdef __POWER8_VECTOR__
+ const Packet2l p2l_1023 = { 1023, 1023 };
+ const Packet2ul p2ul_52 = { 52, 52 };
+ emm0 = vec_add(emm0, p2l_1023);
+ emm0 = vec_sl(emm0, p2ul_52);
+#else
+ // Code is a bit complex for POWER7. There is actually a
+ // vec_xxsldi intrinsic but it is not supported by some gcc versions.
+ // So we shift (52-32) bits and do a word swap with zeros.
+ const Packet4i p4i_1023 = pset1<Packet4i>(1023);
+ const Packet4i p4i_20 = pset1<Packet4i>(20); // 52 - 32
+
+ Packet4i emm04i = reinterpret_cast<Packet4i>(emm0);
+ emm04i = vec_add(emm04i, p4i_1023);
+ emm04i = vec_sl(emm04i, reinterpret_cast<Packet4ui>(p4i_20));
+ static const Packet16uc perm = {
+ 0x14, 0x15, 0x16, 0x17, 0x00, 0x01, 0x02, 0x03,
+ 0x1c, 0x1d, 0x1e, 0x1f, 0x08, 0x09, 0x0a, 0x0b };
+#ifdef _BIG_ENDIAN
+ emm0 = reinterpret_cast<Packet2l>(vec_perm(p4i_ZERO, emm04i, perm));
+#else
+ emm0 = reinterpret_cast<Packet2l>(vec_perm(emm04i, p4i_ZERO, perm));
+#endif
+
+#endif
+
+ return pmul(a, reinterpret_cast<Packet2d>(emm0));
+}
+
template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a)
{
Packet2d b, sum;
diff --git a/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h b/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h
new file mode 100644
index 000000000..452b4c806
--- /dev/null
+++ b/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h
@@ -0,0 +1,471 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2007 Julien Pommier
+// Copyright (C) 2014 Pedro Gonnet (pedro.gonnet@gmail.com)
+// Copyright (C) 2009-2019 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/* The exp and log functions of this file initially come from
+ * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/
+ */
+
+namespace Eigen {
+namespace internal {
+
+template<typename Packet> EIGEN_STRONG_INLINE Packet
+pfrexp_float(const Packet& a, Packet& exponent) {
+ typedef typename unpacket_traits<Packet>::integer_packet PacketI;
+ const Packet cst_126f = pset1<Packet>(126.0f);
+ const Packet cst_half = pset1<Packet>(0.5f);
+ const Packet cst_inv_mant_mask = pset1frombits<Packet>(~0x7f800000u);
+ exponent = psub(pcast<PacketI,Packet>(pshiftright<23>(preinterpret<PacketI>(a))), cst_126f);
+ return por(pand(a, cst_inv_mant_mask), cst_half);
+}
+
+template<typename Packet> EIGEN_STRONG_INLINE Packet
+pldexp_float(Packet a, Packet exponent)
+{
+ typedef typename unpacket_traits<Packet>::integer_packet PacketI;
+ const Packet cst_127 = pset1<Packet>(127.f);
+ // return a * 2^exponent
+ PacketI ei = pcast<Packet,PacketI>(padd(exponent, cst_127));
+ return pmul(a, preinterpret<Packet>(pshiftleft<23>(ei)));
+}
+
+// Natural logarithm
+// Computes log(x) as log(2^e * m) = C*e + log(m), where the constant C =log(2)
+// and m is in the range [sqrt(1/2),sqrt(2)). In this range, the logarithm can
+// be easily approximated by a polynomial centered on m=1 for stability.
+// TODO(gonnet): Further reduce the interval allowing for lower-degree
+// polynomial interpolants -> ... -> profit!
+template <typename Packet>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+EIGEN_UNUSED
+Packet plog_float(const Packet _x)
+{
+ Packet x = _x;
+
+ const Packet cst_1 = pset1<Packet>(1.0f);
+ const Packet cst_half = pset1<Packet>(0.5f);
+ // The smallest non denormalized float number.
+ const Packet cst_min_norm_pos = pset1frombits<Packet>( 0x00800000u);
+ const Packet cst_minus_inf = pset1frombits<Packet>( 0xff800000u);
+ const Packet cst_pos_inf = pset1frombits<Packet>( 0x7f800000u);
+
+ // Polynomial coefficients.
+ const Packet cst_cephes_SQRTHF = pset1<Packet>(0.707106781186547524f);
+ const Packet cst_cephes_log_p0 = pset1<Packet>(7.0376836292E-2f);
+ const Packet cst_cephes_log_p1 = pset1<Packet>(-1.1514610310E-1f);
+ const Packet cst_cephes_log_p2 = pset1<Packet>(1.1676998740E-1f);
+ const Packet cst_cephes_log_p3 = pset1<Packet>(-1.2420140846E-1f);
+ const Packet cst_cephes_log_p4 = pset1<Packet>(+1.4249322787E-1f);
+ const Packet cst_cephes_log_p5 = pset1<Packet>(-1.6668057665E-1f);
+ const Packet cst_cephes_log_p6 = pset1<Packet>(+2.0000714765E-1f);
+ const Packet cst_cephes_log_p7 = pset1<Packet>(-2.4999993993E-1f);
+ const Packet cst_cephes_log_p8 = pset1<Packet>(+3.3333331174E-1f);
+ const Packet cst_cephes_log_q1 = pset1<Packet>(-2.12194440e-4f);
+ const Packet cst_cephes_log_q2 = pset1<Packet>(0.693359375f);
+
+ // Truncate input values to the minimum positive normal.
+ x = pmax(x, cst_min_norm_pos);
+
+ Packet e;
+ // extract significant in the range [0.5,1) and exponent
+ x = pfrexp(x,e);
+
+ // part2: Shift the inputs from the range [0.5,1) to [sqrt(1/2),sqrt(2))
+ // and shift by -1. The values are then centered around 0, which improves
+ // the stability of the polynomial evaluation.
+ // if( x < SQRTHF ) {
+ // e -= 1;
+ // x = x + x - 1.0;
+ // } else { x = x - 1.0; }
+ Packet mask = pcmp_lt(x, cst_cephes_SQRTHF);
+ Packet tmp = pand(x, mask);
+ x = psub(x, cst_1);
+ e = psub(e, pand(cst_1, mask));
+ x = padd(x, tmp);
+
+ Packet x2 = pmul(x, x);
+ Packet x3 = pmul(x2, x);
+
+ // Evaluate the polynomial approximant of degree 8 in three parts, probably
+ // to improve instruction-level parallelism.
+ Packet y, y1, y2;
+ y = pmadd(cst_cephes_log_p0, x, cst_cephes_log_p1);
+ y1 = pmadd(cst_cephes_log_p3, x, cst_cephes_log_p4);
+ y2 = pmadd(cst_cephes_log_p6, x, cst_cephes_log_p7);
+ y = pmadd(y, x, cst_cephes_log_p2);
+ y1 = pmadd(y1, x, cst_cephes_log_p5);
+ y2 = pmadd(y2, x, cst_cephes_log_p8);
+ y = pmadd(y, x3, y1);
+ y = pmadd(y, x3, y2);
+ y = pmul(y, x3);
+
+ // Add the logarithm of the exponent back to the result of the interpolation.
+ y1 = pmul(e, cst_cephes_log_q1);
+ tmp = pmul(x2, cst_half);
+ y = padd(y, y1);
+ x = psub(x, tmp);
+ y2 = pmul(e, cst_cephes_log_q2);
+ x = padd(x, y);
+ x = padd(x, y2);
+
+ Packet invalid_mask = pcmp_lt_or_nan(_x, pzero(_x));
+ Packet iszero_mask = pcmp_eq(_x,pzero(_x));
+ Packet pos_inf_mask = pcmp_eq(_x,cst_pos_inf);
+ // Filter out invalid inputs, i.e.:
+ // - negative arg will be NAN
+ // - 0 will be -INF
+ // - +INF will be +INF
+ return pselect(iszero_mask, cst_minus_inf,
+ por(pselect(pos_inf_mask,cst_pos_inf,x), invalid_mask));
+}
+
+// Exponential function. Works by writing "x = m*log(2) + r" where
+// "m = floor(x/log(2)+1/2)" and "r" is the remainder. The result is then
+// "exp(x) = 2^m*exp(r)" where exp(r) is in the range [-1,1).
+template <typename Packet>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+EIGEN_UNUSED
+Packet pexp_float(const Packet _x)
+{
+ const Packet cst_1 = pset1<Packet>(1.0f);
+ const Packet cst_half = pset1<Packet>(0.5f);
+ const Packet cst_exp_hi = pset1<Packet>( 88.3762626647950f);
+ const Packet cst_exp_lo = pset1<Packet>(-88.3762626647949f);
+
+ const Packet cst_cephes_LOG2EF = pset1<Packet>(1.44269504088896341f);
+ const Packet cst_cephes_exp_p0 = pset1<Packet>(1.9875691500E-4f);
+ const Packet cst_cephes_exp_p1 = pset1<Packet>(1.3981999507E-3f);
+ const Packet cst_cephes_exp_p2 = pset1<Packet>(8.3334519073E-3f);
+ const Packet cst_cephes_exp_p3 = pset1<Packet>(4.1665795894E-2f);
+ const Packet cst_cephes_exp_p4 = pset1<Packet>(1.6666665459E-1f);
+ const Packet cst_cephes_exp_p5 = pset1<Packet>(5.0000001201E-1f);
+
+ // Clamp x.
+ Packet x = pmax(pmin(_x, cst_exp_hi), cst_exp_lo);
+
+ // Express exp(x) as exp(m*ln(2) + r), start by extracting
+ // m = floor(x/ln(2) + 0.5).
+ Packet m = pfloor(pmadd(x, cst_cephes_LOG2EF, cst_half));
+
+ // Get r = x - m*ln(2). If no FMA instructions are available, m*ln(2) is
+ // subtracted out in two parts, m*C1+m*C2 = m*ln(2), to avoid accumulating
+ // truncation errors.
+ Packet r;
+#ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
+ const Packet cst_nln2 = pset1<Packet>(-0.6931471805599453f);
+ r = pmadd(m, cst_nln2, x);
+#else
+ const Packet cst_cephes_exp_C1 = pset1<Packet>(0.693359375f);
+ const Packet cst_cephes_exp_C2 = pset1<Packet>(-2.12194440e-4f);
+ r = psub(x, pmul(m, cst_cephes_exp_C1));
+ r = psub(r, pmul(m, cst_cephes_exp_C2));
+#endif
+
+ Packet r2 = pmul(r, r);
+
+ // TODO(gonnet): Split into odd/even polynomials and try to exploit
+ // instruction-level parallelism.
+ Packet y = cst_cephes_exp_p0;
+ y = pmadd(y, r, cst_cephes_exp_p1);
+ y = pmadd(y, r, cst_cephes_exp_p2);
+ y = pmadd(y, r, cst_cephes_exp_p3);
+ y = pmadd(y, r, cst_cephes_exp_p4);
+ y = pmadd(y, r, cst_cephes_exp_p5);
+ y = pmadd(y, r2, r);
+ y = padd(y, cst_1);
+
+ // Return 2^m * exp(r).
+ return pmax(pldexp(y,m), _x);
+}
+
+template <typename Packet>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+EIGEN_UNUSED
+Packet pexp_double(const Packet _x)
+{
+ Packet x = _x;
+
+ const Packet cst_1 = pset1<Packet>(1.0);
+ const Packet cst_2 = pset1<Packet>(2.0);
+ const Packet cst_half = pset1<Packet>(0.5);
+
+ const Packet cst_exp_hi = pset1<Packet>(709.437);
+ const Packet cst_exp_lo = pset1<Packet>(-709.436139303);
+
+ const Packet cst_cephes_LOG2EF = pset1<Packet>(1.4426950408889634073599);
+ const Packet cst_cephes_exp_p0 = pset1<Packet>(1.26177193074810590878e-4);
+ const Packet cst_cephes_exp_p1 = pset1<Packet>(3.02994407707441961300e-2);
+ const Packet cst_cephes_exp_p2 = pset1<Packet>(9.99999999999999999910e-1);
+ const Packet cst_cephes_exp_q0 = pset1<Packet>(3.00198505138664455042e-6);
+ const Packet cst_cephes_exp_q1 = pset1<Packet>(2.52448340349684104192e-3);
+ const Packet cst_cephes_exp_q2 = pset1<Packet>(2.27265548208155028766e-1);
+ const Packet cst_cephes_exp_q3 = pset1<Packet>(2.00000000000000000009e0);
+ const Packet cst_cephes_exp_C1 = pset1<Packet>(0.693145751953125);
+ const Packet cst_cephes_exp_C2 = pset1<Packet>(1.42860682030941723212e-6);
+
+ Packet tmp, fx;
+
+ // clamp x
+ x = pmax(pmin(x, cst_exp_hi), cst_exp_lo);
+ // Express exp(x) as exp(g + n*log(2)).
+ fx = pmadd(cst_cephes_LOG2EF, x, cst_half);
+
+ // Get the integer modulus of log(2), i.e. the "n" described above.
+ fx = pfloor(fx);
+
+ // Get the remainder modulo log(2), i.e. the "g" described above. Subtract
+ // n*log(2) out in two steps, i.e. n*C1 + n*C2, C1+C2=log2 to get the last
+ // digits right.
+ tmp = pmul(fx, cst_cephes_exp_C1);
+ Packet z = pmul(fx, cst_cephes_exp_C2);
+ x = psub(x, tmp);
+ x = psub(x, z);
+
+ Packet x2 = pmul(x, x);
+
+ // Evaluate the numerator polynomial of the rational interpolant.
+ Packet px = cst_cephes_exp_p0;
+ px = pmadd(px, x2, cst_cephes_exp_p1);
+ px = pmadd(px, x2, cst_cephes_exp_p2);
+ px = pmul(px, x);
+
+ // Evaluate the denominator polynomial of the rational interpolant.
+ Packet qx = cst_cephes_exp_q0;
+ qx = pmadd(qx, x2, cst_cephes_exp_q1);
+ qx = pmadd(qx, x2, cst_cephes_exp_q2);
+ qx = pmadd(qx, x2, cst_cephes_exp_q3);
+
+ // I don't really get this bit, copied from the SSE2 routines, so...
+ // TODO(gonnet): Figure out what is going on here, perhaps find a better
+ // rational interpolant?
+ x = pdiv(px, psub(qx, px));
+ x = pmadd(cst_2, x, cst_1);
+
+ // Construct the result 2^n * exp(g) = e * x. The max is used to catch
+ // non-finite values in the input.
+ return pmax(pldexp(x,fx), _x);
+}
+
+// The following code is inspired by the following stack-overflow answer:
+// https://stackoverflow.com/questions/30463616/payne-hanek-algorithm-implementation-in-c/30465751#30465751
+// It has been largely optimized:
+// - By-pass calls to frexp.
+// - Aligned loads of required 96 bits of 2/pi. This is accomplished by
+// (1) balancing the mantissa and exponent to the required bits of 2/pi are
+// aligned on 8-bits, and (2) replicating the storage of the bits of 2/pi.
+// - Avoid a branch in rounding and extraction of the remaining fractional part.
+// Overall, I measured a speed up higher than x2 on x86-64.
+inline float trig_reduce_huge (float xf, int *quadrant)
+{
+ using Eigen::numext::int32_t;
+ using Eigen::numext::uint32_t;
+ using Eigen::numext::int64_t;
+ using Eigen::numext::uint64_t;
+
+ const double pio2_62 = 3.4061215800865545e-19; // pi/2 * 2^-62
+ const uint64_t zero_dot_five = uint64_t(1) << 61; // 0.5 in 2.62-bit fixed-point foramt
+
+ // 192 bits of 2/pi for Payne-Hanek reduction
+ // Bits are introduced by packet of 8 to enable aligned reads.
+ static const uint32_t two_over_pi [] =
+ {
+ 0x00000028, 0x000028be, 0x0028be60, 0x28be60db,
+ 0xbe60db93, 0x60db9391, 0xdb939105, 0x9391054a,
+ 0x91054a7f, 0x054a7f09, 0x4a7f09d5, 0x7f09d5f4,
+ 0x09d5f47d, 0xd5f47d4d, 0xf47d4d37, 0x7d4d3770,
+ 0x4d377036, 0x377036d8, 0x7036d8a5, 0x36d8a566,
+ 0xd8a5664f, 0xa5664f10, 0x664f10e4, 0x4f10e410,
+ 0x10e41000, 0xe4100000
+ };
+
+ uint32_t xi = numext::as_uint(xf);
+ // Below, -118 = -126 + 8.
+ // -126 is to get the exponent,
+ // +8 is to enable alignment of 2/pi's bits on 8 bits.
+ // This is possible because the fractional part of x as only 24 meaningful bits.
+ uint32_t e = (xi >> 23) - 118;
+ // Extract the mantissa and shift it to align it wrt the exponent
+ xi = ((xi & 0x007fffffu)| 0x00800000u) << (e & 0x7);
+
+ uint32_t i = e >> 3;
+ uint32_t twoopi_1 = two_over_pi[i-1];
+ uint32_t twoopi_2 = two_over_pi[i+3];
+ uint32_t twoopi_3 = two_over_pi[i+7];
+
+ // Compute x * 2/pi in 2.62-bit fixed-point format.
+ uint64_t p;
+ p = uint64_t(xi) * twoopi_3;
+ p = uint64_t(xi) * twoopi_2 + (p >> 32);
+ p = (uint64_t(xi * twoopi_1) << 32) + p;
+
+ // Round to nearest: add 0.5 and extract integral part.
+ uint64_t q = (p + zero_dot_five) >> 62;
+ *quadrant = int(q);
+ // Now it remains to compute "r = x - q*pi/2" with high accuracy,
+ // since we have p=x/(pi/2) with high accuracy, we can more efficiently compute r as:
+ // r = (p-q)*pi/2,
+ // where the product can be be carried out with sufficient accuracy using double precision.
+ p -= q<<62;
+ return float(double(int64_t(p)) * pio2_62);
+}
+
+template<bool ComputeSine,typename Packet>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+EIGEN_UNUSED
+#if EIGEN_GNUC_AT_LEAST(4,4)
+__attribute__((optimize("-fno-unsafe-math-optimizations")))
+#endif
+Packet psincos_float(const Packet& _x)
+{
+// Workaround -ffast-math aggressive optimizations
+// See bug 1674
+#if EIGEN_COMP_CLANG && defined(EIGEN_VECTORIZE_SSE)
+#define EIGEN_SINCOS_DONT_OPT(X) __asm__ ("" : "+x" (X));
+#else
+#define EIGEN_SINCOS_DONT_OPT(X)
+#endif
+
+ typedef typename unpacket_traits<Packet>::integer_packet PacketI;
+
+ const Packet cst_2oPI = pset1<Packet>(0.636619746685028076171875f); // 2/PI
+ const Packet cst_rounding_magic = pset1<Packet>(12582912); // 2^23 for rounding
+ const PacketI csti_1 = pset1<PacketI>(1);
+ const Packet cst_sign_mask = pset1frombits<Packet>(0x80000000u);
+
+ Packet x = pabs(_x);
+
+ // Scale x by 2/Pi to find x's octant.
+ Packet y = pmul(x, cst_2oPI);
+
+ // Rounding trick:
+ Packet y_round = padd(y, cst_rounding_magic);
+ EIGEN_SINCOS_DONT_OPT(y_round)
+ PacketI y_int = preinterpret<PacketI>(y_round); // last 23 digits represent integer (if abs(x)<2^24)
+ y = psub(y_round, cst_rounding_magic); // nearest integer to x*4/pi
+
+ // Reduce x by y octants to get: -Pi/4 <= x <= +Pi/4
+ // using "Extended precision modular arithmetic"
+ #if defined(EIGEN_HAS_SINGLE_INSTRUCTION_MADD)
+ // This version requires true FMA for high accuracy
+ // It provides a max error of 1ULP up to (with absolute_error < 5.9605e-08):
+ const float huge_th = ComputeSine ? 117435.992f : 71476.0625f;
+ x = pmadd(y, pset1<Packet>(-1.57079601287841796875f), x);
+ x = pmadd(y, pset1<Packet>(-3.1391647326017846353352069854736328125e-07f), x);
+ x = pmadd(y, pset1<Packet>(-5.390302529957764765544681040410068817436695098876953125e-15f), x);
+ #else
+ // Without true FMA, the previous set of coefficients maintain 1ULP accuracy
+ // up to x<15.7 (for sin), but accuracy is immediately lost for x>15.7.
+ // We thus use one more iteration to maintain 2ULPs up to reasonably large inputs.
+
+ // The following set of coefficients maintain 1ULP up to 9.43 and 14.16 for sin and cos respectively.
+ // and 2 ULP up to:
+ const float huge_th = ComputeSine ? 25966.f : 18838.f;
+ x = pmadd(y, pset1<Packet>(-1.5703125), x); // = 0xbfc90000
+ EIGEN_SINCOS_DONT_OPT(x)
+ x = pmadd(y, pset1<Packet>(-0.000483989715576171875), x); // = 0xb9fdc000
+ EIGEN_SINCOS_DONT_OPT(x)
+ x = pmadd(y, pset1<Packet>(1.62865035235881805419921875e-07), x); // = 0x342ee000
+ x = pmadd(y, pset1<Packet>(5.5644315544167710640977020375430583953857421875e-11), x); // = 0x2e74b9ee
+
+ // For the record, the following set of coefficients maintain 2ULP up
+ // to a slightly larger range:
+ // const float huge_th = ComputeSine ? 51981.f : 39086.125f;
+ // but it slightly fails to maintain 1ULP for two values of sin below pi.
+ // x = pmadd(y, pset1<Packet>(-3.140625/2.), x);
+ // x = pmadd(y, pset1<Packet>(-0.00048351287841796875), x);
+ // x = pmadd(y, pset1<Packet>(-3.13855707645416259765625e-07), x);
+ // x = pmadd(y, pset1<Packet>(-6.0771006282767103812147979624569416046142578125e-11), x);
+
+ // For the record, with only 3 iterations it is possible to maintain
+ // 1 ULP up to 3PI (maybe more) and 2ULP up to 255.
+ // The coefficients are: 0xbfc90f80, 0xb7354480, 0x2e74b9ee
+ #endif
+
+ if(predux_any(pcmp_le(pset1<Packet>(huge_th),pabs(_x))))
+ {
+ const int PacketSize = unpacket_traits<Packet>::size;
+ EIGEN_ALIGN_TO_BOUNDARY(sizeof(Packet)) float vals[PacketSize];
+ EIGEN_ALIGN_TO_BOUNDARY(sizeof(Packet)) float x_cpy[PacketSize];
+ EIGEN_ALIGN_TO_BOUNDARY(sizeof(Packet)) int y_int2[PacketSize];
+ pstoreu(vals, pabs(_x));
+ pstoreu(x_cpy, x);
+ pstoreu(y_int2, y_int);
+ for(int k=0; k<PacketSize;++k)
+ {
+ float val = vals[k];
+ if(val>=huge_th && (numext::isfinite)(val))
+ x_cpy[k] = trig_reduce_huge(val,&y_int2[k]);
+ }
+ x = ploadu<Packet>(x_cpy);
+ y_int = ploadu<PacketI>(y_int2);
+ }
+
+ // Compute the sign to apply to the polynomial.
+ // sin: sign = second_bit(y_int) xor signbit(_x)
+ // cos: sign = second_bit(y_int+1)
+ Packet sign_bit = ComputeSine ? pxor(_x, preinterpret<Packet>(pshiftleft<30>(y_int)))
+ : preinterpret<Packet>(pshiftleft<30>(padd(y_int,csti_1)));
+ sign_bit = pand(sign_bit, cst_sign_mask); // clear all but left most bit
+
+ // Get the polynomial selection mask from the second bit of y_int
+ // We'll calculate both (sin and cos) polynomials and then select from the two.
+ Packet poly_mask = preinterpret<Packet>(pcmp_eq(pand(y_int, csti_1), pzero(y_int)));
+
+ Packet x2 = pmul(x,x);
+
+ // Evaluate the cos(x) polynomial. (-Pi/4 <= x <= Pi/4)
+ Packet y1 = pset1<Packet>(2.4372266125283204019069671630859375e-05f);
+ y1 = pmadd(y1, x2, pset1<Packet>(-0.00138865201734006404876708984375f ));
+ y1 = pmadd(y1, x2, pset1<Packet>(0.041666619479656219482421875f ));
+ y1 = pmadd(y1, x2, pset1<Packet>(-0.5f));
+ y1 = pmadd(y1, x2, pset1<Packet>(1.f));
+
+ // Evaluate the sin(x) polynomial. (Pi/4 <= x <= Pi/4)
+ // octave/matlab code to compute those coefficients:
+ // x = (0:0.0001:pi/4)';
+ // A = [x.^3 x.^5 x.^7];
+ // w = ((1.-(x/(pi/4)).^2).^5)*2000+1; # weights trading relative accuracy
+ // c = (A'*diag(w)*A)\(A'*diag(w)*(sin(x)-x)); # weighted LS, linear coeff forced to 1
+ // printf('%.64f\n %.64f\n%.64f\n', c(3), c(2), c(1))
+ //
+ Packet y2 = pset1<Packet>(-0.0001959234114083702898469196984621021329076029360294342041015625f);
+ y2 = pmadd(y2, x2, pset1<Packet>( 0.0083326873655616851693794799871284340042620897293090820312500000f));
+ y2 = pmadd(y2, x2, pset1<Packet>(-0.1666666203982298255503735617821803316473960876464843750000000000f));
+ y2 = pmul(y2, x2);
+ y2 = pmadd(y2, x, x);
+
+ // Select the correct result from the two polynomials.
+ y = ComputeSine ? pselect(poly_mask,y2,y1)
+ : pselect(poly_mask,y1,y2);
+
+ // Update the sign and filter huge inputs
+ return pxor(y, sign_bit);
+
+#undef EIGEN_SINCOS_DONT_OPT
+}
+
+template<typename Packet>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+EIGEN_UNUSED
+Packet psin_float(const Packet& x)
+{
+ return psincos_float<true>(x);
+}
+
+template<typename Packet>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+EIGEN_UNUSED
+Packet pcos_float(const Packet& x)
+{
+ return psincos_float<false>(x);
+}
+
+} // end namespace internal
+} // end namespace Eigen
diff --git a/Eigen/src/Core/arch/Default/Settings.h b/Eigen/src/Core/arch/Default/Settings.h
index 097373c84..a5c3ada4c 100644
--- a/Eigen/src/Core/arch/Default/Settings.h
+++ b/Eigen/src/Core/arch/Default/Settings.h
@@ -21,7 +21,7 @@
* it does not correspond to the number of iterations or the number of instructions
*/
#ifndef EIGEN_UNROLLING_LIMIT
-#define EIGEN_UNROLLING_LIMIT 100
+#define EIGEN_UNROLLING_LIMIT 110
#endif
/** Defines the threshold between a "small" and a "large" matrix.
diff --git a/Eigen/src/Core/arch/GPU/PacketMath.h b/Eigen/src/Core/arch/GPU/PacketMath.h
index ddf37b9c1..cd4615a45 100644
--- a/Eigen/src/Core/arch/GPU/PacketMath.h
+++ b/Eigen/src/Core/arch/GPU/PacketMath.h
@@ -53,6 +53,7 @@ template<> struct packet_traits<float> : default_packet_traits
HasBetaInc = 1,
HasBlend = 0,
+ HasFloor = 1,
};
};
@@ -86,12 +87,13 @@ template<> struct packet_traits<double> : default_packet_traits
HasBetaInc = 1,
HasBlend = 0,
+ HasFloor = 1,
};
};
-template<> struct unpacket_traits<float4> { typedef float type; enum {size=4, alignment=Aligned16}; typedef float4 half; };
-template<> struct unpacket_traits<double2> { typedef double type; enum {size=2, alignment=Aligned16}; typedef double2 half; };
+template<> struct unpacket_traits<float4> { typedef float type; enum {size=4, alignment=Aligned16, vectorizable=true}; typedef float4 half; };
+template<> struct unpacket_traits<double2> { typedef double type; enum {size=2, alignment=Aligned16, vectorizable=true}; typedef double2 half; };
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pset1<float4>(const float& from) {
return make_float4(from, from, from, from);
@@ -100,6 +102,117 @@ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pset1<double2>(const do
return make_double2(from, from);
}
+namespace {
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float bitwise_and(const float& a,
+ const float& b) {
+ return __int_as_float(__float_as_int(a) & __float_as_int(b));
+}
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double bitwise_and(const double& a,
+ const double& b) {
+ return __longlong_as_double(__double_as_longlong(a) &
+ __double_as_longlong(b));
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float bitwise_or(const float& a,
+ const float& b) {
+ return __int_as_float(__float_as_int(a) | __float_as_int(b));
+}
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double bitwise_or(const double& a,
+ const double& b) {
+ return __longlong_as_double(__double_as_longlong(a) |
+ __double_as_longlong(b));
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float bitwise_xor(const float& a,
+ const float& b) {
+ return __int_as_float(__float_as_int(a) ^ __float_as_int(b));
+}
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double bitwise_xor(const double& a,
+ const double& b) {
+ return __longlong_as_double(__double_as_longlong(a) ^
+ __double_as_longlong(b));
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float bitwise_andnot(const float& a,
+ const float& b) {
+ return __int_as_float(__float_as_int(a) & ~__float_as_int(b));
+}
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double bitwise_andnot(const double& a,
+ const double& b) {
+ return __longlong_as_double(__double_as_longlong(a) &
+ ~__double_as_longlong(b));
+}
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float eq_mask(const float& a,
+ const float& b) {
+ return __int_as_float(a == b ? 0xffffffffu : 0u);
+}
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double eq_mask(const double& a,
+ const double& b) {
+ return __longlong_as_double(a == b ? 0xffffffffffffffffull : 0ull);
+}
+
+} // namespace
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pand<float4>(const float4& a,
+ const float4& b) {
+ return make_float4(bitwise_and(a.x, b.x), bitwise_and(a.y, b.y),
+ bitwise_and(a.z, b.z), bitwise_and(a.w, b.w));
+}
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pand<double2>(const double2& a,
+ const double2& b) {
+ return make_double2(bitwise_and(a.x, b.x), bitwise_and(a.y, b.y));
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 por<float4>(const float4& a,
+ const float4& b) {
+ return make_float4(bitwise_or(a.x, b.x), bitwise_or(a.y, b.y),
+ bitwise_or(a.z, b.z), bitwise_or(a.w, b.w));
+}
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 por<double2>(const double2& a,
+ const double2& b) {
+ return make_double2(bitwise_or(a.x, b.x), bitwise_or(a.y, b.y));
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pxor<float4>(const float4& a,
+ const float4& b) {
+ return make_float4(bitwise_xor(a.x, b.x), bitwise_xor(a.y, b.y),
+ bitwise_xor(a.z, b.z), bitwise_xor(a.w, b.w));
+}
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pxor<double2>(const double2& a,
+ const double2& b) {
+ return make_double2(bitwise_xor(a.x, b.x), bitwise_xor(a.y, b.y));
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pandnot<float4>(const float4& a,
+ const float4& b) {
+ return make_float4(bitwise_andnot(a.x, b.x), bitwise_andnot(a.y, b.y),
+ bitwise_andnot(a.z, b.z), bitwise_andnot(a.w, b.w));
+}
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2
+pandnot<double2>(const double2& a, const double2& b) {
+ return make_double2(bitwise_andnot(a.x, b.x), bitwise_andnot(a.y, b.y));
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pcmp_eq<float4>(const float4& a,
+ const float4& b) {
+ return make_float4(eq_mask(a.x, b.x), eq_mask(a.y, b.y), eq_mask(a.z, b.z),
+ eq_mask(a.w, b.w));
+}
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2
+pcmp_eq<double2>(const double2& a, const double2& b) {
+ return make_double2(eq_mask(a.x, b.x), eq_mask(a.y, b.y));
+}
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 plset<float4>(const float& a) {
return make_float4(a, a+1, a+2, a+3);
@@ -297,6 +410,13 @@ template<> EIGEN_DEVICE_FUNC inline double2 pabs<double2>(const double2& a) {
return make_double2(fabs(a.x), fabs(a.y));
}
+template<> EIGEN_DEVICE_FUNC inline float4 pfloor<float4>(const float4& a) {
+ return make_float4(floorf(a.x), floorf(a.y), floorf(a.z), floorf(a.w));
+}
+template<> EIGEN_DEVICE_FUNC inline double2 pfloor<double2>(const double2& a) {
+ return make_double2(floor(a.x), floor(a.y));
+}
+
EIGEN_DEVICE_FUNC inline void
ptranspose(PacketBlock<float4,4>& kernel) {
float tmp = kernel.packet[0].y;
diff --git a/Eigen/src/Core/arch/GPU/PacketMathHalf.h b/Eigen/src/Core/arch/GPU/PacketMathHalf.h
index 8787adcde..869fa7ec6 100644
--- a/Eigen/src/Core/arch/GPU/PacketMathHalf.h
+++ b/Eigen/src/Core/arch/GPU/PacketMathHalf.h
@@ -30,6 +30,7 @@ template<> struct packet_traits<Eigen::half> : default_packet_traits
size=2,
HasHalfPacket = 0,
HasAdd = 1,
+ HasSub = 1,
HasMul = 1,
HasDiv = 1,
HasSqrt = 1,
@@ -41,7 +42,7 @@ template<> struct packet_traits<Eigen::half> : default_packet_traits
};
};
-template<> struct unpacket_traits<half2> { typedef Eigen::half type; enum {size=2, alignment=Aligned16}; typedef half2 half; };
+template<> struct unpacket_traits<half2> { typedef Eigen::half type; enum {size=2, alignment=Aligned16, vectorizable=true}; typedef half2 half; };
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pset1<half2>(const Eigen::half& from) {
@@ -137,12 +138,22 @@ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half pfirst<half2>(const
}
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pabs<half2>(const half2& a) {
- half2 result;
- unsigned temp = *(reinterpret_cast<const unsigned*>(&(a)));
- *(reinterpret_cast<unsigned*>(&(result))) = temp & 0x7FFF7FFF;
- return result;
+ half a1 = __low2half(a);
+ half a2 = __high2half(a);
+ half result1 = half_impl::raw_uint16_to_half(a1.x & 0x7FFF);
+ half result2 = half_impl::raw_uint16_to_half(a2.x & 0x7FFF);
+ return __halves2half2(result1, result2);
}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 ptrue<half2>(const half2& a) {
+ half true_half = half_impl::raw_uint16_to_half(0xffffu);
+ return pset1<half2>(true_half);
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pzero<half2>(const half2& a) {
+ half false_half = half_impl::raw_uint16_to_half(0x0000u);
+ return pset1<half2>(false_half);
+}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void
ptranspose(PacketBlock<half2,2>& kernel) {
@@ -171,6 +182,68 @@ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 plset<half2>(const Eigen:
#endif
}
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pcmp_eq<half2>(const half2& a,
+ const half2& b) {
+ half true_half = half_impl::raw_uint16_to_half(0xffffu);
+ half false_half = half_impl::raw_uint16_to_half(0x0000u);
+ half a1 = __low2half(a);
+ half a2 = __high2half(a);
+ half b1 = __low2half(b);
+ half b2 = __high2half(b);
+ half eq1 = __half2float(a1) == __half2float(b1) ? true_half : false_half;
+ half eq2 = __half2float(a2) == __half2float(b2) ? true_half : false_half;
+ return __halves2half2(eq1, eq2);
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pand<half2>(const half2& a,
+ const half2& b) {
+ half a1 = __low2half(a);
+ half a2 = __high2half(a);
+ half b1 = __low2half(b);
+ half b2 = __high2half(b);
+ half result1 = half_impl::raw_uint16_to_half(a1.x & b1.x);
+ half result2 = half_impl::raw_uint16_to_half(a2.x & b2.x);
+ return __halves2half2(result1, result2);
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 por<half2>(const half2& a,
+ const half2& b) {
+ half a1 = __low2half(a);
+ half a2 = __high2half(a);
+ half b1 = __low2half(b);
+ half b2 = __high2half(b);
+ half result1 = half_impl::raw_uint16_to_half(a1.x | b1.x);
+ half result2 = half_impl::raw_uint16_to_half(a2.x | b2.x);
+ return __halves2half2(result1, result2);
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pxor<half2>(const half2& a,
+ const half2& b) {
+ half a1 = __low2half(a);
+ half a2 = __high2half(a);
+ half b1 = __low2half(b);
+ half b2 = __high2half(b);
+ half result1 = half_impl::raw_uint16_to_half(a1.x ^ b1.x);
+ half result2 = half_impl::raw_uint16_to_half(a2.x ^ b2.x);
+ return __halves2half2(result1, result2);
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pandnot<half2>(const half2& a,
+ const half2& b) {
+ half a1 = __low2half(a);
+ half a2 = __high2half(a);
+ half b1 = __low2half(b);
+ half b2 = __high2half(b);
+ half result1 = half_impl::raw_uint16_to_half(a1.x & ~b1.x);
+ half result2 = half_impl::raw_uint16_to_half(a2.x & ~b2.x);
+ return __halves2half2(result1, result2);
+}
+
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 padd<half2>(const half2& a, const half2& b) {
#if defined(EIGEN_HIP_DEVICE_COMPILE)
@@ -500,6 +573,7 @@ struct packet_traits<half> : default_packet_traits {
HasAdd = 1,
HasSub = 1,
HasMul = 1,
+ HasDiv = 1,
HasNegate = 1,
HasAbs = 0,
HasAbs2 = 0,
@@ -507,7 +581,6 @@ struct packet_traits<half> : default_packet_traits {
HasMax = 0,
HasConj = 0,
HasSetLinear = 0,
- HasDiv = 0,
HasSqrt = 0,
HasRsqrt = 0,
HasExp = 0,
@@ -517,7 +590,7 @@ struct packet_traits<half> : default_packet_traits {
};
-template<> struct unpacket_traits<Packet16h> { typedef Eigen::half type; enum {size=16, alignment=Aligned32}; typedef Packet16h half; };
+template<> struct unpacket_traits<Packet16h> { typedef Eigen::half type; enum {size=16, alignment=Aligned32, vectorizable=true}; typedef Packet16h half; };
template<> EIGEN_STRONG_INLINE Packet16h pset1<Packet16h>(const Eigen::half& from) {
Packet16h result;
@@ -640,6 +713,36 @@ EIGEN_STRONG_INLINE Packet16h float2half(const Packet16f& a) {
#endif
}
+template<> EIGEN_STRONG_INLINE Packet16h pnot(const Packet16h& a) {
+ Packet16h r; r.x = _mm256_xor_si256(a.x, pcmp_eq(a.x, a.x)); return r;
+}
+
+template<> EIGEN_STRONG_INLINE Packet16h ptrue(const Packet16h& a) {
+ Packet16h r; r.x = Packet8i(ptrue(a.x)); return r;
+}
+
+template<> EIGEN_STRONG_INLINE Packet16h por(const Packet16h& a,const Packet16h& b) {
+ // in some cases Packet8i is a wrapper around __m256i, so we need to
+ // cast to Packet8i to call the correct overload.
+ Packet16h r; r.x = por(Packet8i(a.x),Packet8i(b.x)); return r;
+}
+template<> EIGEN_STRONG_INLINE Packet16h pxor(const Packet16h& a,const Packet16h& b) {
+ Packet16h r; r.x = pxor(Packet8i(a.x),Packet8i(b.x)); return r;
+}
+template<> EIGEN_STRONG_INLINE Packet16h pand(const Packet16h& a,const Packet16h& b) {
+ Packet16h r; r.x = pand(Packet8i(a.x),Packet8i(b.x)); return r;
+}
+template<> EIGEN_STRONG_INLINE Packet16h pandnot(const Packet16h& a,const Packet16h& b) {
+ Packet16h r; r.x = pandnot(Packet8i(a.x),Packet8i(b.x)); return r;
+}
+
+template<> EIGEN_STRONG_INLINE Packet16h pcmp_eq(const Packet16h& a,const Packet16h& b) {
+ Packet16f af = half2float(a);
+ Packet16f bf = half2float(b);
+ Packet16f rf = pcmp_eq(af, bf);
+ return float2half(rf);
+}
+
template<> EIGEN_STRONG_INLINE Packet16h pnegate(const Packet16h& a) {
// FIXME we could do that with bit manipulation
Packet16f af = half2float(a);
@@ -668,6 +771,13 @@ template<> EIGEN_STRONG_INLINE Packet16h pmul<Packet16h>(const Packet16h& a, con
return float2half(rf);
}
+template<> EIGEN_STRONG_INLINE Packet16h pdiv<Packet16h>(const Packet16h& a, const Packet16h& b) {
+ Packet16f af = half2float(a);
+ Packet16f bf = half2float(b);
+ Packet16f rf = pdiv(af, bf);
+ return float2half(rf);
+}
+
template<> EIGEN_STRONG_INLINE half predux<Packet16h>(const Packet16h& from) {
Packet16f from_float = half2float(from);
return half(predux(from_float));
@@ -952,6 +1062,7 @@ struct packet_traits<Eigen::half> : default_packet_traits {
HasAdd = 1,
HasSub = 1,
HasMul = 1,
+ HasDiv = 1,
HasNegate = 1,
HasAbs = 0,
HasAbs2 = 0,
@@ -959,7 +1070,6 @@ struct packet_traits<Eigen::half> : default_packet_traits {
HasMax = 0,
HasConj = 0,
HasSetLinear = 0,
- HasDiv = 0,
HasSqrt = 0,
HasRsqrt = 0,
HasExp = 0,
@@ -969,7 +1079,7 @@ struct packet_traits<Eigen::half> : default_packet_traits {
};
-template<> struct unpacket_traits<Packet8h> { typedef Eigen::half type; enum {size=8, alignment=Aligned16}; typedef Packet8h half; };
+template<> struct unpacket_traits<Packet8h> { typedef Eigen::half type; enum {size=8, alignment=Aligned16, vectorizable=true}; typedef Packet8h half; };
template<> EIGEN_STRONG_INLINE Packet8h pset1<Packet8h>(const Eigen::half& from) {
Packet8h result;
@@ -1063,6 +1173,32 @@ EIGEN_STRONG_INLINE Packet8h float2half(const Packet8f& a) {
#endif
}
+template<> EIGEN_STRONG_INLINE Packet8h ptrue(const Packet8h& a) {
+ Packet8h r; r.x = _mm_cmpeq_epi32(a.x, a.x); return r;
+}
+
+template<> EIGEN_STRONG_INLINE Packet8h por(const Packet8h& a,const Packet8h& b) {
+ // in some cases Packet4i is a wrapper around __m128i, so we either need to
+ // cast to Packet4i to directly call the intrinsics as below:
+ Packet8h r; r.x = _mm_or_si128(a.x,b.x); return r;
+}
+template<> EIGEN_STRONG_INLINE Packet8h pxor(const Packet8h& a,const Packet8h& b) {
+ Packet8h r; r.x = _mm_xor_si128(a.x,b.x); return r;
+}
+template<> EIGEN_STRONG_INLINE Packet8h pand(const Packet8h& a,const Packet8h& b) {
+ Packet8h r; r.x = _mm_and_si128(a.x,b.x); return r;
+}
+template<> EIGEN_STRONG_INLINE Packet8h pandnot(const Packet8h& a,const Packet8h& b) {
+ Packet8h r; r.x = _mm_andnot_si128(b.x,a.x); return r;
+}
+
+template<> EIGEN_STRONG_INLINE Packet8h pcmp_eq(const Packet8h& a,const Packet8h& b) {
+ Packet8f af = half2float(a);
+ Packet8f bf = half2float(b);
+ Packet8f rf = pcmp_eq(af, bf);
+ return float2half(rf);
+}
+
template<> EIGEN_STRONG_INLINE Packet8h pconj(const Packet8h& a) { return a; }
template<> EIGEN_STRONG_INLINE Packet8h pnegate(const Packet8h& a) {
@@ -1093,6 +1229,13 @@ template<> EIGEN_STRONG_INLINE Packet8h pmul<Packet8h>(const Packet8h& a, const
return float2half(rf);
}
+template<> EIGEN_STRONG_INLINE Packet8h pdiv<Packet8h>(const Packet8h& a, const Packet8h& b) {
+ Packet8f af = half2float(a);
+ Packet8f bf = half2float(b);
+ Packet8f rf = pdiv(af, bf);
+ return float2half(rf);
+}
+
template<> EIGEN_STRONG_INLINE Packet8h pgather<Eigen::half, Packet8h>(const Eigen::half* from, Index stride)
{
Packet8h result;
@@ -1279,9 +1422,10 @@ struct packet_traits<Eigen::half> : default_packet_traits {
AlignedOnScalar = 1,
size = 4,
HasHalfPacket = 0,
- HasAdd = 0,
- HasSub = 0,
- HasMul = 0,
+ HasAdd = 1,
+ HasSub = 1,
+ HasMul = 1,
+ HasDiv = 1,
HasNegate = 0,
HasAbs = 0,
HasAbs2 = 0,
@@ -1289,7 +1433,6 @@ struct packet_traits<Eigen::half> : default_packet_traits {
HasMax = 0,
HasConj = 0,
HasSetLinear = 0,
- HasDiv = 0,
HasSqrt = 0,
HasRsqrt = 0,
HasExp = 0,
@@ -1299,7 +1442,7 @@ struct packet_traits<Eigen::half> : default_packet_traits {
};
-template<> struct unpacket_traits<Packet4h> { typedef Eigen::half type; enum {size=4, alignment=Aligned16}; typedef Packet4h half; };
+template<> struct unpacket_traits<Packet4h> { typedef Eigen::half type; enum {size=4, alignment=Aligned16, vectorizable=true}; typedef Packet4h half; };
template<> EIGEN_STRONG_INLINE Packet4h pset1<Packet4h>(const Eigen::half& from) {
Packet4h result;
@@ -1336,6 +1479,29 @@ template<> EIGEN_STRONG_INLINE Packet4h padd<Packet4h>(const Packet4h& a, const
return result;
}
+template<> EIGEN_STRONG_INLINE Packet4h psub<Packet4h>(const Packet4h& a, const Packet4h& b) {
+ __int64_t a64 = _mm_cvtm64_si64(a.x);
+ __int64_t b64 = _mm_cvtm64_si64(b.x);
+
+ Eigen::half h[4];
+
+ Eigen::half ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64));
+ Eigen::half hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64));
+ h[0] = ha - hb;
+ ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 16));
+ hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 16));
+ h[1] = ha - hb;
+ ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 32));
+ hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 32));
+ h[2] = ha - hb;
+ ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 48));
+ hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 48));
+ h[3] = ha - hb;
+ Packet4h result;
+ result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x);
+ return result;
+}
+
template<> EIGEN_STRONG_INLINE Packet4h pmul<Packet4h>(const Packet4h& a, const Packet4h& b) {
__int64_t a64 = _mm_cvtm64_si64(a.x);
__int64_t b64 = _mm_cvtm64_si64(b.x);
@@ -1359,6 +1525,29 @@ template<> EIGEN_STRONG_INLINE Packet4h pmul<Packet4h>(const Packet4h& a, const
return result;
}
+template<> EIGEN_STRONG_INLINE Packet4h pdiv<Packet4h>(const Packet4h& a, const Packet4h& b) {
+ __int64_t a64 = _mm_cvtm64_si64(a.x);
+ __int64_t b64 = _mm_cvtm64_si64(b.x);
+
+ Eigen::half h[4];
+
+ Eigen::half ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64));
+ Eigen::half hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64));
+ h[0] = ha / hb;
+ ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 16));
+ hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 16));
+ h[1] = ha / hb;
+ ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 32));
+ hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 32));
+ h[2] = ha / hb;
+ ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 48));
+ hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 48));
+ h[3] = ha / hb;
+ Packet4h result;
+ result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x);
+ return result;
+}
+
template<> EIGEN_STRONG_INLINE Packet4h pload<Packet4h>(const Eigen::half* from) {
Packet4h result;
result.x = _mm_cvtsi64_m64(*reinterpret_cast<const __int64_t*>(from));
diff --git a/Eigen/src/Core/arch/MSA/Complex.h b/Eigen/src/Core/arch/MSA/Complex.h
index 9a45cf51e..fa64d3564 100644
--- a/Eigen/src/Core/arch/MSA/Complex.h
+++ b/Eigen/src/Core/arch/MSA/Complex.h
@@ -127,7 +127,7 @@ struct packet_traits<std::complex<float> > : default_packet_traits {
template <>
struct unpacket_traits<Packet2cf> {
typedef std::complex<float> type;
- enum { size = 2, alignment = Aligned16 };
+ enum { size = 2, alignment = Aligned16, vectorizable=true };
typedef Packet2cf half;
};
@@ -500,7 +500,7 @@ struct packet_traits<std::complex<double> > : default_packet_traits {
template <>
struct unpacket_traits<Packet1cd> {
typedef std::complex<double> type;
- enum { size = 1, alignment = Aligned16 };
+ enum { size = 1, alignment = Aligned16, vectorizable=true };
typedef Packet1cd half;
};
diff --git a/Eigen/src/Core/arch/MSA/MathFunctions.h b/Eigen/src/Core/arch/MSA/MathFunctions.h
index 98e23e36f..f5181b90e 100644
--- a/Eigen/src/Core/arch/MSA/MathFunctions.h
+++ b/Eigen/src/Core/arch/MSA/MathFunctions.h
@@ -261,7 +261,7 @@ Packet4f psincos_inner_msa_float(const Packet4f& _x) {
// x's from odd-numbered octants will translate to octant -1: [-Pi/4, 0].
// Adjustment for odd-numbered octants: octant = (octant + 1) & (~1).
Packet4i y_int1 = __builtin_msa_addvi_w(y_int, 1);
- Packet4i y_int2 = (Packet4i)__builtin_msa_bclri_w((Packet4ui)y_int1, 0);
+ Packet4i y_int2 = (Packet4i)__builtin_msa_bclri_w((Packet4ui)y_int1, 0); // bclri = bit-clear
y = __builtin_msa_ffint_s_w(y_int2);
// Compute the sign to apply to the polynomial.
@@ -305,7 +305,7 @@ Packet4f psincos_inner_msa_float(const Packet4f& _x) {
// Update the sign.
sign_mask = pxor(sign_mask, (Packet4i)y);
- y = (Packet4f)__builtin_msa_binsli_w((v4u32)y, (v4u32)sign_mask, 0);
+ y = (Packet4f)__builtin_msa_binsli_w((v4u32)y, (v4u32)sign_mask, 0); // binsli = bit-insert-left
return y;
}
diff --git a/Eigen/src/Core/arch/MSA/PacketMath.h b/Eigen/src/Core/arch/MSA/PacketMath.h
index 094c874ee..a97156a84 100644
--- a/Eigen/src/Core/arch/MSA/PacketMath.h
+++ b/Eigen/src/Core/arch/MSA/PacketMath.h
@@ -117,14 +117,14 @@ struct packet_traits<int32_t> : default_packet_traits {
template <>
struct unpacket_traits<Packet4f> {
typedef float type;
- enum { size = 4, alignment = Aligned16 };
+ enum { size = 4, alignment = Aligned16, vectorizable=true };
typedef Packet4f half;
};
template <>
struct unpacket_traits<Packet4i> {
typedef int32_t type;
- enum { size = 4, alignment = Aligned16 };
+ enum { size = 4, alignment = Aligned16, vectorizable=true };
typedef Packet4i half;
};
@@ -925,7 +925,7 @@ struct packet_traits<double> : default_packet_traits {
template <>
struct unpacket_traits<Packet2d> {
typedef double type;
- enum { size = 2, alignment = Aligned16 };
+ enum { size = 2, alignment = Aligned16, vectorizable=true };
typedef Packet2d half;
};
diff --git a/Eigen/src/Core/arch/NEON/Complex.h b/Eigen/src/Core/arch/NEON/Complex.h
index 306a309be..f6c5c211c 100644
--- a/Eigen/src/Core/arch/NEON/Complex.h
+++ b/Eigen/src/Core/arch/NEON/Complex.h
@@ -62,7 +62,7 @@ template<> struct packet_traits<std::complex<float> > : default_packet_traits
};
};
-template<> struct unpacket_traits<Packet2cf> { typedef std::complex<float> type; enum {size=2, alignment=Aligned16}; typedef Packet2cf half; };
+template<> struct unpacket_traits<Packet2cf> { typedef std::complex<float> type; enum {size=2, alignment=Aligned16, vectorizable=true}; typedef Packet2cf half; };
template<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>& from)
{
@@ -101,6 +101,18 @@ template<> EIGEN_STRONG_INLINE Packet2cf pmul<Packet2cf>(const Packet2cf& a, con
return Packet2cf(vaddq_f32(v1, v2));
}
+template<> EIGEN_STRONG_INLINE Packet2cf pcmp_eq(const Packet2cf& a, const Packet2cf& b)
+{
+ // Compare real and imaginary parts of a and b to get the mask vector:
+ // [re(a[0])==re(b[0]), im(a[0])==im(b[0]), re(a[1])==re(b[1]), im(a[1])==im(b[1])]
+ Packet4f eq = pcmp_eq<Packet4f>(a.v, b.v);
+ // Swap real/imag elements in the mask in to get:
+ // [im(a[0])==im(b[0]), re(a[0])==re(b[0]), im(a[1])==im(b[1]), re(a[1])==re(b[1])]
+ Packet4f eq_swapped = vrev64q_f32(eq);
+ // Return re(a)==re(b) && im(a)==im(b) by computing bitwise AND of eq and eq_swapped
+ return Packet2cf(pand<Packet4f>(eq, eq_swapped));
+}
+
template<> EIGEN_STRONG_INLINE Packet2cf pand <Packet2cf>(const Packet2cf& a, const Packet2cf& b)
{
return Packet2cf(vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(a.v),vreinterpretq_u32_f32(b.v))));
@@ -146,7 +158,7 @@ template<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::co
template<> EIGEN_STRONG_INLINE std::complex<float> pfirst<Packet2cf>(const Packet2cf& a)
{
- std::complex<float> EIGEN_ALIGN16 x[2];
+ EIGEN_ALIGN16 std::complex<float> x[2];
vst1q_f32((float *)x, a.v);
return x[0];
}
@@ -328,7 +340,7 @@ template<> struct packet_traits<std::complex<double> > : default_packet_traits
};
};
-template<> struct unpacket_traits<Packet1cd> { typedef std::complex<double> type; enum {size=1, alignment=Aligned16}; typedef Packet1cd half; };
+template<> struct unpacket_traits<Packet1cd> { typedef std::complex<double> type; enum {size=1, alignment=Aligned16, vectorizable=true}; typedef Packet1cd half; };
template<> EIGEN_STRONG_INLINE Packet1cd pload<Packet1cd>(const std::complex<double>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet1cd(pload<Packet2d>((const double*)from)); }
template<> EIGEN_STRONG_INLINE Packet1cd ploadu<Packet1cd>(const std::complex<double>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet1cd(ploadu<Packet2d>((const double*)from)); }
@@ -361,6 +373,18 @@ template<> EIGEN_STRONG_INLINE Packet1cd pmul<Packet1cd>(const Packet1cd& a, con
return Packet1cd(vaddq_f64(v1, v2));
}
+template<> EIGEN_STRONG_INLINE Packet1cd pcmp_eq(const Packet1cd& a, const Packet1cd& b)
+{
+ // Compare real and imaginary parts of a and b to get the mask vector:
+ // [re(a)==re(b), im(a)==im(b)]
+ Packet2d eq = pcmp_eq<Packet2d>(a.v, b.v);
+ // Swap real/imag elements in the mask in to get:
+ // [im(a)==im(b), re(a)==re(b)]
+ Packet2d eq_swapped = vreinterpretq_f64_u32(vrev64q_u32(vreinterpretq_u32_f64(eq)));
+ // Return re(a)==re(b) & im(a)==im(b) by computing bitwise AND of eq and eq_swapped
+ return Packet1cd(pand<Packet2d>(eq, eq_swapped));
+}
+
template<> EIGEN_STRONG_INLINE Packet1cd pand <Packet1cd>(const Packet1cd& a, const Packet1cd& b)
{
return Packet1cd(vreinterpretq_f64_u64(vandq_u64(vreinterpretq_u64_f64(a.v),vreinterpretq_u64_f64(b.v))));
@@ -401,7 +425,7 @@ template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet1c
template<> EIGEN_STRONG_INLINE std::complex<double> pfirst<Packet1cd>(const Packet1cd& a)
{
- std::complex<double> EIGEN_ALIGN16 res;
+ EIGEN_ALIGN16 std::complex<double> res;
pstore<std::complex<double> >(&res, a);
return res;
diff --git a/Eigen/src/Core/arch/NEON/MathFunctions.h b/Eigen/src/Core/arch/NEON/MathFunctions.h
index c48c61023..2e7d0e944 100644
--- a/Eigen/src/Core/arch/NEON/MathFunctions.h
+++ b/Eigen/src/Core/arch/NEON/MathFunctions.h
@@ -5,175 +5,37 @@
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-/* The sin, cos, exp, and log functions of this file come from
- * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/
- */
-
#ifndef EIGEN_MATH_FUNCTIONS_NEON_H
#define EIGEN_MATH_FUNCTIONS_NEON_H
+#include "../Default/GenericPacketMathFunctions.h"
+
namespace Eigen {
namespace internal {
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
-Packet4f pexp<Packet4f>(const Packet4f& _x)
+Packet4f pexp<Packet4f>(const Packet4f& x)
{
- Packet4f x = _x;
- Packet4f tmp, fx;
-
- _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
- _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
- _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f);
- _EIGEN_DECLARE_CONST_Packet4f(exp_hi, 88.3762626647950f);
- _EIGEN_DECLARE_CONST_Packet4f(exp_lo, -88.3762626647949f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_LOG2EF, 1.44269504088896341f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C1, 0.693359375f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C2, -2.12194440e-4f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p0, 1.9875691500E-4f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p1, 1.3981999507E-3f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p2, 8.3334519073E-3f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p3, 4.1665795894E-2f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459E-1f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201E-1f);
-
- x = vminq_f32(x, p4f_exp_hi);
- x = vmaxq_f32(x, p4f_exp_lo);
-
- /* express exp(x) as exp(g + n*log(2)) */
- fx = vmlaq_f32(p4f_half, x, p4f_cephes_LOG2EF);
-
- /* perform a floorf */
- tmp = vcvtq_f32_s32(vcvtq_s32_f32(fx));
-
- /* if greater, substract 1 */
- Packet4ui mask = vcgtq_f32(tmp, fx);
- mask = vandq_u32(mask, vreinterpretq_u32_f32(p4f_1));
-
- fx = vsubq_f32(tmp, vreinterpretq_f32_u32(mask));
-
- tmp = vmulq_f32(fx, p4f_cephes_exp_C1);
- Packet4f z = vmulq_f32(fx, p4f_cephes_exp_C2);
- x = vsubq_f32(x, tmp);
- x = vsubq_f32(x, z);
-
- Packet4f y = vmulq_f32(p4f_cephes_exp_p0, x);
- z = vmulq_f32(x, x);
- y = vaddq_f32(y, p4f_cephes_exp_p1);
- y = vmulq_f32(y, x);
- y = vaddq_f32(y, p4f_cephes_exp_p2);
- y = vmulq_f32(y, x);
- y = vaddq_f32(y, p4f_cephes_exp_p3);
- y = vmulq_f32(y, x);
- y = vaddq_f32(y, p4f_cephes_exp_p4);
- y = vmulq_f32(y, x);
- y = vaddq_f32(y, p4f_cephes_exp_p5);
-
- y = vmulq_f32(y, z);
- y = vaddq_f32(y, x);
- y = vaddq_f32(y, p4f_1);
-
- /* build 2^n */
- int32x4_t mm;
- mm = vcvtq_s32_f32(fx);
- mm = vaddq_s32(mm, p4i_0x7f);
- mm = vshlq_n_s32(mm, 23);
- Packet4f pow2n = vreinterpretq_f32_s32(mm);
-
- y = vmulq_f32(y, pow2n);
- return y;
+ return pexp_float(x);
}
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
-Packet4f plog<Packet4f>(const Packet4f& _x)
+Packet4f plog<Packet4f>(const Packet4f& x)
{
- Packet4f x = _x;
- _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
- _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
- _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f);
-
- _EIGEN_DECLARE_CONST_Packet4i(inv_mant_mask, ~0x7f800000);
-
- /* natural logarithm computed for 4 simultaneous float
- return NaN for x <= 0
- */
- _EIGEN_DECLARE_CONST_Packet4f(cephes_SQRTHF, 0.707106781186547524f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p0, 7.0376836292E-2f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p1, - 1.1514610310E-1f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p2, 1.1676998740E-1f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p3, - 1.2420140846E-1f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p4, + 1.4249322787E-1f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p5, - 1.6668057665E-1f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p6, + 2.0000714765E-1f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p7, - 2.4999993993E-1f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p8, + 3.3333331174E-1f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q1, -2.12194440e-4f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q2, 0.693359375f);
-
- x = vmaxq_f32(x, vdupq_n_f32(0)); /* force flush to zero on denormal values */
- Packet4ui invalid_mask = vcleq_f32(x, vdupq_n_f32(0));
-
- Packet4i ux = vreinterpretq_s32_f32(x);
-
- Packet4i emm0 = vshrq_n_s32(ux, 23);
-
- /* keep only the fractional part */
- ux = vandq_s32(ux, p4i_inv_mant_mask);
- ux = vorrq_s32(ux, vreinterpretq_s32_f32(p4f_half));
- x = vreinterpretq_f32_s32(ux);
-
- emm0 = vsubq_s32(emm0, p4i_0x7f);
- Packet4f e = vcvtq_f32_s32(emm0);
-
- e = vaddq_f32(e, p4f_1);
-
- /* part2:
- if( x < SQRTHF ) {
- e -= 1;
- x = x + x - 1.0;
- } else { x = x - 1.0; }
- */
- Packet4ui mask = vcltq_f32(x, p4f_cephes_SQRTHF);
- Packet4f tmp = vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(x), mask));
- x = vsubq_f32(x, p4f_1);
- e = vsubq_f32(e, vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(p4f_1), mask)));
- x = vaddq_f32(x, tmp);
-
- Packet4f z = vmulq_f32(x,x);
-
- Packet4f y = p4f_cephes_log_p0;
- y = vmulq_f32(y, x);
- y = vaddq_f32(y, p4f_cephes_log_p1);
- y = vmulq_f32(y, x);
- y = vaddq_f32(y, p4f_cephes_log_p2);
- y = vmulq_f32(y, x);
- y = vaddq_f32(y, p4f_cephes_log_p3);
- y = vmulq_f32(y, x);
- y = vaddq_f32(y, p4f_cephes_log_p4);
- y = vmulq_f32(y, x);
- y = vaddq_f32(y, p4f_cephes_log_p5);
- y = vmulq_f32(y, x);
- y = vaddq_f32(y, p4f_cephes_log_p6);
- y = vmulq_f32(y, x);
- y = vaddq_f32(y, p4f_cephes_log_p7);
- y = vmulq_f32(y, x);
- y = vaddq_f32(y, p4f_cephes_log_p8);
- y = vmulq_f32(y, x);
-
- y = vmulq_f32(y, z);
-
- tmp = vmulq_f32(e, p4f_cephes_log_q1);
- y = vaddq_f32(y, tmp);
-
+ return plog_float(x);
+}
- tmp = vmulq_f32(z, p4f_half);
- y = vsubq_f32(y, tmp);
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet4f psin<Packet4f>(const Packet4f& x)
+{
+ return psin_float(x);
+}
- tmp = vmulq_f32(e, p4f_cephes_log_q2);
- x = vaddq_f32(x, y);
- x = vaddq_f32(x, tmp);
- x = vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(x), invalid_mask)); // negative arg will be NAN
- return x;
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet4f pcos<Packet4f>(const Packet4f& x)
+{
+ return pcos_float(x);
}
} // end namespace internal
diff --git a/Eigen/src/Core/arch/NEON/PacketMath.h b/Eigen/src/Core/arch/NEON/PacketMath.h
index 010739380..e8b351849 100644
--- a/Eigen/src/Core/arch/NEON/PacketMath.h
+++ b/Eigen/src/Core/arch/NEON/PacketMath.h
@@ -108,10 +108,11 @@ template<> struct packet_traits<float> : default_packet_traits
size = 4,
HasHalfPacket=0, // Packet2f intrinsics not implemented yet
- HasDiv = 1,
+ HasDiv = 1,
+ HasFloor = 1,
// FIXME check the Has*
- HasSin = 0,
- HasCos = 0,
+ HasSin = EIGEN_FAST_MATH,
+ HasCos = EIGEN_FAST_MATH,
HasLog = 1,
HasExp = 1,
HasSqrt = 0
@@ -139,12 +140,25 @@ EIGEN_STRONG_INLINE void vst1q_f32(float* to, float32x4_t from) { ::vst1q
EIGEN_STRONG_INLINE void vst1_f32 (float* to, float32x2_t from) { ::vst1_f32 ((float32_t*)to,from); }
#endif
-template<> struct unpacket_traits<Packet4f> { typedef float type; enum {size=4, alignment=Aligned16}; typedef Packet4f half; };
-template<> struct unpacket_traits<Packet4i> { typedef int32_t type; enum {size=4, alignment=Aligned16}; typedef Packet4i half; };
+template<> struct unpacket_traits<Packet4f>
+{
+ typedef float type;
+ typedef Packet4f half;
+ typedef Packet4i integer_packet;
+ enum {size=4, alignment=Aligned16, vectorizable=true};
+};
+template<> struct unpacket_traits<Packet4i>
+{
+ typedef int32_t type;
+ typedef Packet4i half;
+ enum {size=4, alignment=Aligned16, vectorizable=true};
+};
template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) { return vdupq_n_f32(from); }
template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int32_t& from) { return vdupq_n_s32(from); }
+template<> EIGEN_STRONG_INLINE Packet4f pset1frombits<Packet4f>(unsigned int from) { return vreinterpretq_f32_u32(vdupq_n_u32(from)); }
+
template<> EIGEN_STRONG_INLINE Packet4f plset<Packet4f>(const float& a)
{
const float f[] = {0, 1, 2, 3};
@@ -249,6 +263,25 @@ template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const
template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) { return vmaxq_f32(a,b); }
template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b) { return vmaxq_s32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4f pcmp_le(const Packet4f& a, const Packet4f& b) { return vreinterpretq_f32_u32(vcleq_f32(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet4f pcmp_lt(const Packet4f& a, const Packet4f& b) { return vreinterpretq_f32_u32(vcltq_f32(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet4f pcmp_eq(const Packet4f& a, const Packet4f& b) { return vreinterpretq_f32_u32(vceqq_f32(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet4f pcmp_lt_or_nan(const Packet4f& a, const Packet4f& b) { return vreinterpretq_f32_u32(vmvnq_u32(vcgeq_f32(a,b))); }
+
+template<> EIGEN_STRONG_INLINE Packet4i pcmp_eq(const Packet4i& a, const Packet4i& b) { return vreinterpretq_s32_u32(vceqq_s32(a,b)); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pfloor<Packet4f>(const Packet4f& a)
+{
+ const Packet4f cst_1 = pset1<Packet4f>(1.0f);
+ /* perform a floorf */
+ Packet4f tmp = vcvtq_f32_s32(vcvtq_s32_f32(a));
+
+ /* if greater, substract 1 */
+ Packet4ui mask = vcgtq_f32(tmp, a);
+ mask = vandq_u32(mask, vreinterpretq_u32_f32(cst_1));
+ return vsubq_f32(tmp, vreinterpretq_f32_u32(mask));
+}
+
// Logical Operations are not supported for float, so we have to reinterpret casts using NEON intrinsics
template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b)
{
@@ -274,6 +307,9 @@ template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, con
}
template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return vbicq_s32(a,b); }
+template<int N> EIGEN_STRONG_INLINE Packet4i pshiftright(Packet4i a) { return vshrq_n_s32(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet4i pshiftleft(Packet4i a) { return vshlq_n_s32(a,N); }
+
template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_f32(from); }
template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int32_t* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_s32(from); }
@@ -339,8 +375,8 @@ template<> EIGEN_STRONG_INLINE void prefetch<float> (const float* addr) { EI
template<> EIGEN_STRONG_INLINE void prefetch<int32_t>(const int32_t* addr) { EIGEN_ARM_PREFETCH(addr); }
// FIXME only store the 2 first elements ?
-template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { float EIGEN_ALIGN16 x[4]; vst1q_f32(x, a); return x[0]; }
-template<> EIGEN_STRONG_INLINE int32_t pfirst<Packet4i>(const Packet4i& a) { int32_t EIGEN_ALIGN16 x[4]; vst1q_s32(x, a); return x[0]; }
+template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { EIGEN_ALIGN16 float x[4]; vst1q_f32(x, a); return x[0]; }
+template<> EIGEN_STRONG_INLINE int32_t pfirst<Packet4i>(const Packet4i& a) { EIGEN_ALIGN16 int32_t x[4]; vst1q_s32(x, a); return x[0]; }
template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) {
float32x2_t a_lo, a_hi;
@@ -364,6 +400,14 @@ template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) {
template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) { return vabsq_f32(a); }
template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { return vabsq_s32(a); }
+template<> EIGEN_STRONG_INLINE Packet4f pfrexp<Packet4f>(const Packet4f& a, Packet4f& exponent) {
+ return pfrexp_float(a,exponent);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pldexp<Packet4f>(const Packet4f& a, const Packet4f& exponent) {
+ return pldexp_float(a,exponent);
+}
+
template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
{
float32x2_t a_lo, a_hi, sum;
@@ -507,6 +551,13 @@ template<> EIGEN_STRONG_INLINE int32_t predux_max<Packet4i>(const Packet4i& a)
return vget_lane_s32(max, 0);
}
+template<> EIGEN_STRONG_INLINE bool predux_any(const Packet4f& x)
+{
+ uint32x2_t tmp = vorr_u32(vget_low_u32( vreinterpretq_u32_f32(x)),
+ vget_high_u32(vreinterpretq_u32_f32(x)));
+ return vget_lane_u32(vpmax_u32(tmp,tmp),0);
+}
+
// this PALIGN_NEON business is to work around a bug in LLVM Clang 3.0 causing incorrect compilation errors,
// see bug 347 and this LLVM bug: http://llvm.org/bugs/show_bug.cgi?id=11074
#define PALIGN_NEON(Offset,Type,Command) \
@@ -606,7 +657,7 @@ template<> struct packet_traits<double> : default_packet_traits
};
};
-template<> struct unpacket_traits<Packet2d> { typedef double type; enum {size=2, alignment=Aligned16}; typedef Packet2d half; };
+template<> struct unpacket_traits<Packet2d> { typedef double type; enum {size=2, alignment=Aligned16, vectorizable=true}; typedef Packet2d half; };
template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return vdupq_n_f64(from); }
@@ -660,6 +711,8 @@ template<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, con
return vreinterpretq_f64_u64(vbicq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b)));
}
+template<> EIGEN_STRONG_INLINE Packet2d pcmp_eq(const Packet2d& a, const Packet2d& b) { return vreinterpretq_f64_u64(vceqq_f64(a,b)); }
+
template<> EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_f64(from); }
template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_f64(from); }
diff --git a/Eigen/src/Core/arch/NEON/TypeCasting.h b/Eigen/src/Core/arch/NEON/TypeCasting.h
index 95d1fd0e4..20dbe1332 100644
--- a/Eigen/src/Core/arch/NEON/TypeCasting.h
+++ b/Eigen/src/Core/arch/NEON/TypeCasting.h
@@ -41,6 +41,14 @@ template<> EIGEN_STRONG_INLINE Packet4f pcast<Packet4i, Packet4f>(const Packet4i
return vcvtq_f32_s32(a);
}
+template<> EIGEN_STRONG_INLINE Packet4i preinterpret<Packet4i,Packet4f>(const Packet4f& a) {
+ return vreinterpretq_s32_f32(a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f preinterpret<Packet4f,Packet4i>(const Packet4i& a) {
+ return vreinterpretq_f32_s32(a);
+}
+
} // end namespace internal
} // end namespace Eigen
diff --git a/Eigen/src/Core/arch/SSE/Complex.h b/Eigen/src/Core/arch/SSE/Complex.h
index d075043ce..f39988eac 100644
--- a/Eigen/src/Core/arch/SSE/Complex.h
+++ b/Eigen/src/Core/arch/SSE/Complex.h
@@ -50,7 +50,7 @@ template<> struct packet_traits<std::complex<float> > : default_packet_traits
};
#endif
-template<> struct unpacket_traits<Packet2cf> { typedef std::complex<float> type; enum {size=2, alignment=Aligned16}; typedef Packet2cf half; };
+template<> struct unpacket_traits<Packet2cf> { typedef std::complex<float> type; enum {size=2, alignment=Aligned16, vectorizable=true}; typedef Packet2cf half; };
template<> EIGEN_STRONG_INLINE Packet2cf padd<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_add_ps(a.v,b.v)); }
template<> EIGEN_STRONG_INLINE Packet2cf psub<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_sub_ps(a.v,b.v)); }
@@ -82,10 +82,13 @@ template<> EIGEN_STRONG_INLINE Packet2cf pmul<Packet2cf>(const Packet2cf& a, con
#endif
}
+template<> EIGEN_STRONG_INLINE Packet2cf ptrue <Packet2cf>(const Packet2cf& a) { return Packet2cf(ptrue(Packet4f(a.v))); }
+template<> EIGEN_STRONG_INLINE Packet2cf pnot <Packet2cf>(const Packet2cf& a) { return Packet2cf(pnot(Packet4f(a.v))); }
+
template<> EIGEN_STRONG_INLINE Packet2cf pand <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_and_ps(a.v,b.v)); }
template<> EIGEN_STRONG_INLINE Packet2cf por <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_or_ps(a.v,b.v)); }
template<> EIGEN_STRONG_INLINE Packet2cf pxor <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_xor_ps(a.v,b.v)); }
-template<> EIGEN_STRONG_INLINE Packet2cf pandnot<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_andnot_ps(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf pandnot<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_andnot_ps(b.v,a.v)); }
template<> EIGEN_STRONG_INLINE Packet2cf pload <Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload<Packet4f>(&numext::real_ref(*from))); }
template<> EIGEN_STRONG_INLINE Packet2cf ploadu<Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu<Packet4f>(&numext::real_ref(*from))); }
@@ -280,7 +283,7 @@ template<> struct packet_traits<std::complex<double> > : default_packet_traits
};
#endif
-template<> struct unpacket_traits<Packet1cd> { typedef std::complex<double> type; enum {size=1, alignment=Aligned16}; typedef Packet1cd half; };
+template<> struct unpacket_traits<Packet1cd> { typedef std::complex<double> type; enum {size=1, alignment=Aligned16, vectorizable=true}; typedef Packet1cd half; };
template<> EIGEN_STRONG_INLINE Packet1cd padd<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_add_pd(a.v,b.v)); }
template<> EIGEN_STRONG_INLINE Packet1cd psub<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_sub_pd(a.v,b.v)); }
@@ -305,10 +308,12 @@ template<> EIGEN_STRONG_INLINE Packet1cd pmul<Packet1cd>(const Packet1cd& a, con
#endif
}
+template<> EIGEN_STRONG_INLINE Packet1cd ptrue <Packet1cd>(const Packet1cd& a) { return Packet1cd(ptrue(Packet2d(a.v))); }
+template<> EIGEN_STRONG_INLINE Packet1cd pnot <Packet1cd>(const Packet1cd& a) { return Packet1cd(pnot(Packet2d(a.v))); }
template<> EIGEN_STRONG_INLINE Packet1cd pand <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_and_pd(a.v,b.v)); }
template<> EIGEN_STRONG_INLINE Packet1cd por <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_or_pd(a.v,b.v)); }
template<> EIGEN_STRONG_INLINE Packet1cd pxor <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_xor_pd(a.v,b.v)); }
-template<> EIGEN_STRONG_INLINE Packet1cd pandnot<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_andnot_pd(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet1cd pandnot<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_andnot_pd(b.v,a.v)); }
// FIXME force unaligned load, this is a temporary fix
template<> EIGEN_STRONG_INLINE Packet1cd pload <Packet1cd>(const std::complex<double>* from)
@@ -439,6 +444,18 @@ ptranspose(PacketBlock<Packet2cf,2>& kernel) {
kernel.packet[1].v = tmp;
}
+template<> EIGEN_STRONG_INLINE Packet2cf pcmp_eq(const Packet2cf& a, const Packet2cf& b)
+{
+ __m128 eq = _mm_cmpeq_ps(a.v, b.v);
+ return Packet2cf(pand<Packet4f>(eq, vec4f_swizzle1(eq, 1, 0, 3, 2)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet1cd pcmp_eq(const Packet1cd& a, const Packet1cd& b)
+{
+ __m128d eq = _mm_cmpeq_pd(a.v, b.v);
+ return Packet1cd(pand<Packet2d>(eq, vec2d_swizzle1(eq, 1, 0)));
+}
+
template<> EIGEN_STRONG_INLINE Packet2cf pblend(const Selector<2>& ifPacket, const Packet2cf& thenPacket, const Packet2cf& elsePacket) {
__m128d result = pblend<Packet2d>(ifPacket, _mm_castps_pd(thenPacket.v), _mm_castps_pd(elsePacket.v));
return Packet2cf(_mm_castpd_ps(result));
diff --git a/Eigen/src/Core/arch/SSE/MathFunctions.h b/Eigen/src/Core/arch/SSE/MathFunctions.h
index 4af2c6cae..0d491ab88 100644
--- a/Eigen/src/Core/arch/SSE/MathFunctions.h
+++ b/Eigen/src/Core/arch/SSE/MathFunctions.h
@@ -8,13 +8,15 @@
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-/* The sin, cos, exp, and log functions of this file come from
+/* The sin and cos and functions of this file come from
* Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/
*/
#ifndef EIGEN_MATH_FUNCTIONS_SSE_H
#define EIGEN_MATH_FUNCTIONS_SSE_H
+#include "../Default/GenericPacketMathFunctions.h"
+
namespace Eigen {
namespace internal {
@@ -22,424 +24,31 @@ namespace internal {
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet4f plog<Packet4f>(const Packet4f& _x)
{
- Packet4f x = _x;
- _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
- _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
- _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f);
-
- _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(inv_mant_mask, ~0x7f800000);
-
- /* the smallest non denormalized float number */
- _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(min_norm_pos, 0x00800000);
- _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(minus_inf, 0xff800000);//-1.f/0.f);
-
- /* natural logarithm computed for 4 simultaneous float
- return NaN for x <= 0
- */
- _EIGEN_DECLARE_CONST_Packet4f(cephes_SQRTHF, 0.707106781186547524f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p0, 7.0376836292E-2f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p1, - 1.1514610310E-1f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p2, 1.1676998740E-1f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p3, - 1.2420140846E-1f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p4, + 1.4249322787E-1f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p5, - 1.6668057665E-1f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p6, + 2.0000714765E-1f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p7, - 2.4999993993E-1f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p8, + 3.3333331174E-1f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q1, -2.12194440e-4f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q2, 0.693359375f);
-
-
- Packet4i emm0;
-
- Packet4f invalid_mask = _mm_cmpnge_ps(x, _mm_setzero_ps()); // not greater equal is true if x is NaN
- Packet4f iszero_mask = _mm_cmpeq_ps(x, _mm_setzero_ps());
-
- x = pmax(x, p4f_min_norm_pos); /* cut off denormalized stuff */
- emm0 = _mm_srli_epi32(_mm_castps_si128(x), 23);
-
- /* keep only the fractional part */
- x = _mm_and_ps(x, p4f_inv_mant_mask);
- x = _mm_or_ps(x, p4f_half);
-
- emm0 = _mm_sub_epi32(emm0, p4i_0x7f);
- Packet4f e = padd(Packet4f(_mm_cvtepi32_ps(emm0)), p4f_1);
-
- /* part2:
- if( x < SQRTHF ) {
- e -= 1;
- x = x + x - 1.0;
- } else { x = x - 1.0; }
- */
- Packet4f mask = _mm_cmplt_ps(x, p4f_cephes_SQRTHF);
- Packet4f tmp = pand(x, mask);
- x = psub(x, p4f_1);
- e = psub(e, pand(p4f_1, mask));
- x = padd(x, tmp);
-
- Packet4f x2 = pmul(x,x);
- Packet4f x3 = pmul(x2,x);
-
- Packet4f y, y1, y2;
- y = pmadd(p4f_cephes_log_p0, x, p4f_cephes_log_p1);
- y1 = pmadd(p4f_cephes_log_p3, x, p4f_cephes_log_p4);
- y2 = pmadd(p4f_cephes_log_p6, x, p4f_cephes_log_p7);
- y = pmadd(y , x, p4f_cephes_log_p2);
- y1 = pmadd(y1, x, p4f_cephes_log_p5);
- y2 = pmadd(y2, x, p4f_cephes_log_p8);
- y = pmadd(y, x3, y1);
- y = pmadd(y, x3, y2);
- y = pmul(y, x3);
-
- y1 = pmul(e, p4f_cephes_log_q1);
- tmp = pmul(x2, p4f_half);
- y = padd(y, y1);
- x = psub(x, tmp);
- y2 = pmul(e, p4f_cephes_log_q2);
- x = padd(x, y);
- x = padd(x, y2);
- // negative arg will be NAN, 0 will be -INF
- return _mm_or_ps(_mm_andnot_ps(iszero_mask, _mm_or_ps(x, invalid_mask)),
- _mm_and_ps(iszero_mask, p4f_minus_inf));
+ return plog_float(_x);
}
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet4f pexp<Packet4f>(const Packet4f& _x)
{
- Packet4f x = _x;
- _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
- _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
- _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f);
-
-
- _EIGEN_DECLARE_CONST_Packet4f(exp_hi, 88.3762626647950f);
- _EIGEN_DECLARE_CONST_Packet4f(exp_lo, -88.3762626647949f);
-
- _EIGEN_DECLARE_CONST_Packet4f(cephes_LOG2EF, 1.44269504088896341f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C1, 0.693359375f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C2, -2.12194440e-4f);
-
- _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p0, 1.9875691500E-4f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p1, 1.3981999507E-3f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p2, 8.3334519073E-3f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p3, 4.1665795894E-2f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459E-1f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201E-1f);
-
- Packet4f tmp, fx;
- Packet4i emm0;
-
- // clamp x
- x = pmax(pmin(x, p4f_exp_hi), p4f_exp_lo);
-
- /* express exp(x) as exp(g + n*log(2)) */
- fx = pmadd(x, p4f_cephes_LOG2EF, p4f_half);
-
-#ifdef EIGEN_VECTORIZE_SSE4_1
- fx = _mm_floor_ps(fx);
-#else
- emm0 = _mm_cvttps_epi32(fx);
- tmp = _mm_cvtepi32_ps(emm0);
- /* if greater, substract 1 */
- Packet4f mask = _mm_cmpgt_ps(tmp, fx);
- mask = _mm_and_ps(mask, p4f_1);
- fx = psub(tmp, mask);
-#endif
-
- tmp = pmul(fx, p4f_cephes_exp_C1);
- Packet4f z = pmul(fx, p4f_cephes_exp_C2);
- x = psub(x, tmp);
- x = psub(x, z);
-
- z = pmul(x,x);
-
- Packet4f y = p4f_cephes_exp_p0;
- y = pmadd(y, x, p4f_cephes_exp_p1);
- y = pmadd(y, x, p4f_cephes_exp_p2);
- y = pmadd(y, x, p4f_cephes_exp_p3);
- y = pmadd(y, x, p4f_cephes_exp_p4);
- y = pmadd(y, x, p4f_cephes_exp_p5);
- y = pmadd(y, z, x);
- y = padd(y, p4f_1);
-
- // build 2^n
- emm0 = _mm_cvttps_epi32(fx);
- emm0 = _mm_add_epi32(emm0, p4i_0x7f);
- emm0 = _mm_slli_epi32(emm0, 23);
- return pmax(pmul(y, Packet4f(_mm_castsi128_ps(emm0))), _x);
+ return pexp_float(_x);
}
+
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
-Packet2d pexp<Packet2d>(const Packet2d& _x)
+Packet2d pexp<Packet2d>(const Packet2d& x)
{
- Packet2d x = _x;
-
- _EIGEN_DECLARE_CONST_Packet2d(1 , 1.0);
- _EIGEN_DECLARE_CONST_Packet2d(2 , 2.0);
- _EIGEN_DECLARE_CONST_Packet2d(half, 0.5);
-
- _EIGEN_DECLARE_CONST_Packet2d(exp_hi, 709.437);
- _EIGEN_DECLARE_CONST_Packet2d(exp_lo, -709.436139303);
-
- _EIGEN_DECLARE_CONST_Packet2d(cephes_LOG2EF, 1.4426950408889634073599);
-
- _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p0, 1.26177193074810590878e-4);
- _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p1, 3.02994407707441961300e-2);
- _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p2, 9.99999999999999999910e-1);
-
- _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q0, 3.00198505138664455042e-6);
- _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q1, 2.52448340349684104192e-3);
- _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q2, 2.27265548208155028766e-1);
- _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q3, 2.00000000000000000009e0);
-
- _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C1, 0.693145751953125);
- _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C2, 1.42860682030941723212e-6);
- static const __m128i p4i_1023_0 = _mm_setr_epi32(1023, 1023, 0, 0);
-
- Packet2d tmp, fx;
- Packet4i emm0;
-
- // clamp x
- x = pmax(pmin(x, p2d_exp_hi), p2d_exp_lo);
- /* express exp(x) as exp(g + n*log(2)) */
- fx = pmadd(p2d_cephes_LOG2EF, x, p2d_half);
-
-#ifdef EIGEN_VECTORIZE_SSE4_1
- fx = _mm_floor_pd(fx);
-#else
- emm0 = _mm_cvttpd_epi32(fx);
- tmp = _mm_cvtepi32_pd(emm0);
- /* if greater, substract 1 */
- Packet2d mask = _mm_cmpgt_pd(tmp, fx);
- mask = _mm_and_pd(mask, p2d_1);
- fx = psub(tmp, mask);
-#endif
-
- tmp = pmul(fx, p2d_cephes_exp_C1);
- Packet2d z = pmul(fx, p2d_cephes_exp_C2);
- x = psub(x, tmp);
- x = psub(x, z);
-
- Packet2d x2 = pmul(x,x);
-
- Packet2d px = p2d_cephes_exp_p0;
- px = pmadd(px, x2, p2d_cephes_exp_p1);
- px = pmadd(px, x2, p2d_cephes_exp_p2);
- px = pmul (px, x);
-
- Packet2d qx = p2d_cephes_exp_q0;
- qx = pmadd(qx, x2, p2d_cephes_exp_q1);
- qx = pmadd(qx, x2, p2d_cephes_exp_q2);
- qx = pmadd(qx, x2, p2d_cephes_exp_q3);
-
- x = pdiv(px,psub(qx,px));
- x = pmadd(p2d_2,x,p2d_1);
-
- // build 2^n
- emm0 = _mm_cvttpd_epi32(fx);
- emm0 = _mm_add_epi32(emm0, p4i_1023_0);
- emm0 = _mm_slli_epi32(emm0, 20);
- emm0 = _mm_shuffle_epi32(emm0, _MM_SHUFFLE(1,2,0,3));
- return pmax(pmul(x, Packet2d(_mm_castsi128_pd(emm0))), _x);
+ return pexp_double(x);
}
-/* evaluation of 4 sines at once, using SSE2 intrinsics.
-
- The code is the exact rewriting of the cephes sinf function.
- Precision is excellent as long as x < 8192 (I did not bother to
- take into account the special handling they have for greater values
- -- it does not return garbage for arguments over 8192, though, but
- the extra precision is missing).
-
- Note that it is such that sinf((float)M_PI) = 8.74e-8, which is the
- surprising but correct result.
-*/
-
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet4f psin<Packet4f>(const Packet4f& _x)
{
- Packet4f x = _x;
- _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
- _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
-
- _EIGEN_DECLARE_CONST_Packet4i(1, 1);
- _EIGEN_DECLARE_CONST_Packet4i(not1, ~1);
- _EIGEN_DECLARE_CONST_Packet4i(2, 2);
- _EIGEN_DECLARE_CONST_Packet4i(4, 4);
-
- _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(sign_mask, 0x80000000);
-
- _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP1,-0.78515625f);
- _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP2, -2.4187564849853515625e-4f);
- _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP3, -3.77489497744594108e-8f);
- _EIGEN_DECLARE_CONST_Packet4f(sincof_p0, -1.9515295891E-4f);
- _EIGEN_DECLARE_CONST_Packet4f(sincof_p1, 8.3321608736E-3f);
- _EIGEN_DECLARE_CONST_Packet4f(sincof_p2, -1.6666654611E-1f);
- _EIGEN_DECLARE_CONST_Packet4f(coscof_p0, 2.443315711809948E-005f);
- _EIGEN_DECLARE_CONST_Packet4f(coscof_p1, -1.388731625493765E-003f);
- _EIGEN_DECLARE_CONST_Packet4f(coscof_p2, 4.166664568298827E-002f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4 / M_PI
-
- Packet4f xmm1, xmm2, xmm3, sign_bit, y;
-
- Packet4i emm0, emm2;
- sign_bit = x;
- /* take the absolute value */
- x = pabs(x);
-
- /* take the modulo */
-
- /* extract the sign bit (upper one) */
- sign_bit = _mm_and_ps(sign_bit, p4f_sign_mask);
-
- /* scale by 4/Pi */
- y = pmul(x, p4f_cephes_FOPI);
-
- /* store the integer part of y in mm0 */
- emm2 = _mm_cvttps_epi32(y);
- /* j=(j+1) & (~1) (see the cephes sources) */
- emm2 = _mm_add_epi32(emm2, p4i_1);
- emm2 = _mm_and_si128(emm2, p4i_not1);
- y = _mm_cvtepi32_ps(emm2);
- /* get the swap sign flag */
- emm0 = _mm_and_si128(emm2, p4i_4);
- emm0 = _mm_slli_epi32(emm0, 29);
- /* get the polynom selection mask
- there is one polynom for 0 <= x <= Pi/4
- and another one for Pi/4<x<=Pi/2
-
- Both branches will be computed.
- */
- emm2 = _mm_and_si128(emm2, p4i_2);
- emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
-
- Packet4f swap_sign_bit = _mm_castsi128_ps(emm0);
- Packet4f poly_mask = _mm_castsi128_ps(emm2);
- sign_bit = _mm_xor_ps(sign_bit, swap_sign_bit);
-
- /* The magic pass: "Extended precision modular arithmetic"
- x = ((x - y * DP1) - y * DP2) - y * DP3; */
- xmm1 = pmul(y, p4f_minus_cephes_DP1);
- xmm2 = pmul(y, p4f_minus_cephes_DP2);
- xmm3 = pmul(y, p4f_minus_cephes_DP3);
- x = padd(x, xmm1);
- x = padd(x, xmm2);
- x = padd(x, xmm3);
-
- /* Evaluate the first polynom (0 <= x <= Pi/4) */
- y = p4f_coscof_p0;
- Packet4f z = _mm_mul_ps(x,x);
-
- y = pmadd(y, z, p4f_coscof_p1);
- y = pmadd(y, z, p4f_coscof_p2);
- y = pmul(y, z);
- y = pmul(y, z);
- Packet4f tmp = pmul(z, p4f_half);
- y = psub(y, tmp);
- y = padd(y, p4f_1);
-
- /* Evaluate the second polynom (Pi/4 <= x <= 0) */
-
- Packet4f y2 = p4f_sincof_p0;
- y2 = pmadd(y2, z, p4f_sincof_p1);
- y2 = pmadd(y2, z, p4f_sincof_p2);
- y2 = pmul(y2, z);
- y2 = pmul(y2, x);
- y2 = padd(y2, x);
-
- /* select the correct result from the two polynoms */
- y2 = _mm_and_ps(poly_mask, y2);
- y = _mm_andnot_ps(poly_mask, y);
- y = _mm_or_ps(y,y2);
- /* update the sign */
- return _mm_xor_ps(y, sign_bit);
+ return psin_float(_x);
}
-/* almost the same as psin */
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet4f pcos<Packet4f>(const Packet4f& _x)
{
- Packet4f x = _x;
- _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
- _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
-
- _EIGEN_DECLARE_CONST_Packet4i(1, 1);
- _EIGEN_DECLARE_CONST_Packet4i(not1, ~1);
- _EIGEN_DECLARE_CONST_Packet4i(2, 2);
- _EIGEN_DECLARE_CONST_Packet4i(4, 4);
-
- _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP1,-0.78515625f);
- _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP2, -2.4187564849853515625e-4f);
- _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP3, -3.77489497744594108e-8f);
- _EIGEN_DECLARE_CONST_Packet4f(sincof_p0, -1.9515295891E-4f);
- _EIGEN_DECLARE_CONST_Packet4f(sincof_p1, 8.3321608736E-3f);
- _EIGEN_DECLARE_CONST_Packet4f(sincof_p2, -1.6666654611E-1f);
- _EIGEN_DECLARE_CONST_Packet4f(coscof_p0, 2.443315711809948E-005f);
- _EIGEN_DECLARE_CONST_Packet4f(coscof_p1, -1.388731625493765E-003f);
- _EIGEN_DECLARE_CONST_Packet4f(coscof_p2, 4.166664568298827E-002f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4 / M_PI
-
- Packet4f xmm1, xmm2, xmm3, y;
- Packet4i emm0, emm2;
-
- x = pabs(x);
-
- /* scale by 4/Pi */
- y = pmul(x, p4f_cephes_FOPI);
-
- /* get the integer part of y */
- emm2 = _mm_cvttps_epi32(y);
- /* j=(j+1) & (~1) (see the cephes sources) */
- emm2 = _mm_add_epi32(emm2, p4i_1);
- emm2 = _mm_and_si128(emm2, p4i_not1);
- y = _mm_cvtepi32_ps(emm2);
-
- emm2 = _mm_sub_epi32(emm2, p4i_2);
-
- /* get the swap sign flag */
- emm0 = _mm_andnot_si128(emm2, p4i_4);
- emm0 = _mm_slli_epi32(emm0, 29);
- /* get the polynom selection mask */
- emm2 = _mm_and_si128(emm2, p4i_2);
- emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
-
- Packet4f sign_bit = _mm_castsi128_ps(emm0);
- Packet4f poly_mask = _mm_castsi128_ps(emm2);
-
- /* The magic pass: "Extended precision modular arithmetic"
- x = ((x - y * DP1) - y * DP2) - y * DP3; */
- xmm1 = pmul(y, p4f_minus_cephes_DP1);
- xmm2 = pmul(y, p4f_minus_cephes_DP2);
- xmm3 = pmul(y, p4f_minus_cephes_DP3);
- x = padd(x, xmm1);
- x = padd(x, xmm2);
- x = padd(x, xmm3);
-
- /* Evaluate the first polynom (0 <= x <= Pi/4) */
- y = p4f_coscof_p0;
- Packet4f z = pmul(x,x);
-
- y = pmadd(y,z,p4f_coscof_p1);
- y = pmadd(y,z,p4f_coscof_p2);
- y = pmul(y, z);
- y = pmul(y, z);
- Packet4f tmp = _mm_mul_ps(z, p4f_half);
- y = psub(y, tmp);
- y = padd(y, p4f_1);
-
- /* Evaluate the second polynom (Pi/4 <= x <= 0) */
- Packet4f y2 = p4f_sincof_p0;
- y2 = pmadd(y2, z, p4f_sincof_p1);
- y2 = pmadd(y2, z, p4f_sincof_p2);
- y2 = pmul(y2, z);
- y2 = pmadd(y2, x, x);
-
- /* select the correct result from the two polynoms */
- y2 = _mm_and_ps(poly_mask, y2);
- y = _mm_andnot_ps(poly_mask, y);
- y = _mm_or_ps(y,y2);
-
- /* update the sign */
- return _mm_xor_ps(y, sign_bit);
+ return pcos_float(_x);
}
#if EIGEN_FAST_MATH
@@ -482,11 +91,11 @@ Packet2d psqrt<Packet2d>(const Packet2d& x) { return _mm_sqrt_pd(x); }
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet4f prsqrt<Packet4f>(const Packet4f& _x) {
- _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(inf, 0x7f800000);
- _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(nan, 0x7fc00000);
+ _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(inf, 0x7f800000u);
+ _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(nan, 0x7fc00000u);
_EIGEN_DECLARE_CONST_Packet4f(one_point_five, 1.5f);
_EIGEN_DECLARE_CONST_Packet4f(minus_half, -0.5f);
- _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(flt_min, 0x00800000);
+ _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(flt_min, 0x00800000u);
Packet4f neg_half = pmul(_x, p4f_minus_half);
diff --git a/Eigen/src/Core/arch/SSE/PacketMath.h b/Eigen/src/Core/arch/SSE/PacketMath.h
index 99d55d5e9..9c3750af0 100755
--- a/Eigen/src/Core/arch/SSE/PacketMath.h
+++ b/Eigen/src/Core/arch/SSE/PacketMath.h
@@ -18,11 +18,13 @@ namespace internal {
#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
#endif
-#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
+#if !defined(EIGEN_VECTORIZE_AVX) && !defined(EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS)
+// 32 bits => 8 registers
+// 64 bits => 16 registers
#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*))
#endif
-#ifdef __FMA__
+#ifdef EIGEN_VECTORIZE_FMA
#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD 1
#endif
@@ -61,20 +63,22 @@ template<> struct is_arithmetic<__m128> { enum { value = true }; };
template<> struct is_arithmetic<__m128i> { enum { value = true }; };
template<> struct is_arithmetic<__m128d> { enum { value = true }; };
+#define EIGEN_SSE_SHUFFLE_MASK(p,q,r,s) ((s)<<6|(r)<<4|(q)<<2|(p))
+
#define vec4f_swizzle1(v,p,q,r,s) \
- (_mm_castsi128_ps(_mm_shuffle_epi32( _mm_castps_si128(v), ((s)<<6|(r)<<4|(q)<<2|(p)))))
+ (_mm_castsi128_ps(_mm_shuffle_epi32( _mm_castps_si128(v), EIGEN_SSE_SHUFFLE_MASK(p,q,r,s))))
#define vec4i_swizzle1(v,p,q,r,s) \
- (_mm_shuffle_epi32( v, ((s)<<6|(r)<<4|(q)<<2|(p))))
+ (_mm_shuffle_epi32( v, EIGEN_SSE_SHUFFLE_MASK(p,q,r,s)))
#define vec2d_swizzle1(v,p,q) \
- (_mm_castsi128_pd(_mm_shuffle_epi32( _mm_castpd_si128(v), ((q*2+1)<<6|(q*2)<<4|(p*2+1)<<2|(p*2)))))
+ (_mm_castsi128_pd(_mm_shuffle_epi32( _mm_castpd_si128(v), EIGEN_SSE_SHUFFLE_MASK(2*p,2*p+1,2*q,2*q+1))))
#define vec4f_swizzle2(a,b,p,q,r,s) \
- (_mm_shuffle_ps( (a), (b), ((s)<<6|(r)<<4|(q)<<2|(p))))
+ (_mm_shuffle_ps( (a), (b), EIGEN_SSE_SHUFFLE_MASK(p,q,r,s)))
#define vec4i_swizzle2(a,b,p,q,r,s) \
- (_mm_castps_si128( (_mm_shuffle_ps( _mm_castsi128_ps(a), _mm_castsi128_ps(b), ((s)<<6|(r)<<4|(q)<<2|(p))))))
+ (_mm_castps_si128( (_mm_shuffle_ps( _mm_castsi128_ps(a), _mm_castsi128_ps(b), EIGEN_SSE_SHUFFLE_MASK(p,q,r,s)))))
#define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \
const Packet4f p4f_##NAME = pset1<Packet4f>(X)
@@ -83,7 +87,7 @@ template<> struct is_arithmetic<__m128d> { enum { value = true }; };
const Packet2d p2d_##NAME = pset1<Packet2d>(X)
#define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \
- const Packet4f p4f_##NAME = _mm_castsi128_ps(pset1<Packet4i>(X))
+ const Packet4f p4f_##NAME = pset1frombits<Packet4f>(X)
#define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \
const Packet4i p4i_##NAME = pset1<Packet4i>(X)
@@ -110,12 +114,12 @@ template<> struct packet_traits<float> : default_packet_traits
HasSqrt = 1,
HasRsqrt = 1,
HasTanh = EIGEN_FAST_MATH,
- HasBlend = 1
+ HasBlend = 1,
+ HasFloor = 1
#ifdef EIGEN_VECTORIZE_SSE4_1
,
HasRound = 1,
- HasFloor = 1,
HasCeil = 1
#endif
};
@@ -158,9 +162,22 @@ template<> struct packet_traits<int> : default_packet_traits
};
};
-template<> struct unpacket_traits<Packet4f> { typedef float type; enum {size=4, alignment=Aligned16}; typedef Packet4f half; };
-template<> struct unpacket_traits<Packet2d> { typedef double type; enum {size=2, alignment=Aligned16}; typedef Packet2d half; };
-template<> struct unpacket_traits<Packet4i> { typedef int type; enum {size=4, alignment=Aligned16}; typedef Packet4i half; };
+template<> struct unpacket_traits<Packet4f> {
+ typedef float type;
+ typedef Packet4f half;
+ typedef Packet4i integer_packet;
+ enum {size=4, alignment=Aligned16, vectorizable=true};
+};
+template<> struct unpacket_traits<Packet2d> {
+ typedef double type;
+ typedef Packet2d half;
+ enum {size=2, alignment=Aligned16, vectorizable=true};
+};
+template<> struct unpacket_traits<Packet4i> {
+ typedef int type;
+ typedef Packet4i half;
+ enum {size=4, alignment=Aligned16, vectorizable=false};
+};
#ifndef EIGEN_VECTORIZE_AVX
template<> struct scalar_div_cost<float,true> { enum { value = 7 }; };
@@ -180,6 +197,12 @@ template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { re
template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from) { return _mm_set1_epi32(from); }
#endif
+template<> EIGEN_STRONG_INLINE Packet4f pset1frombits<Packet4f>(unsigned int from) { return _mm_castsi128_ps(pset1<Packet4i>(from)); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pzero(const Packet4f& /*a*/) { return _mm_setzero_ps(); }
+template<> EIGEN_STRONG_INLINE Packet2d pzero(const Packet2d& /*a*/) { return _mm_setzero_pd(); }
+template<> EIGEN_STRONG_INLINE Packet4i pzero(const Packet4i& /*a*/) { return _mm_setzero_si128(); }
+
// GCC generates a shufps instruction for _mm_set1_ps/_mm_load1_ps instead of the more efficient pshufd instruction.
// However, using inrinsics for pset1 makes gcc to generate crappy code in some cases (see bug 203)
// Using inline assembly is also not an option because then gcc fails to reorder properly the instructions.
@@ -245,19 +268,24 @@ template<> EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const
// for some weird raisons, it has to be overloaded for packet of integers
template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd(pmul(a,b), c); }
-#ifdef __FMA__
+#ifdef EIGEN_VECTORIZE_FMA
template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return _mm_fmadd_ps(a,b,c); }
template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return _mm_fmadd_pd(a,b,c); }
#endif
template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) {
-#if EIGEN_COMP_GNUC
+#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
// There appears to be a bug in GCC, by which the optimizer may
// flip the argument order in calls to _mm_min_ps, so we have to
// resort to inline ASM here. This is supposed to be fixed in gcc6.3,
// see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867
+ #ifdef EIGEN_VECTORIZE_AVX
+ Packet4f res;
+ asm("vminps %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
+ #else
Packet4f res = b;
asm("minps %[a], %[res]" : [res] "+x" (res) : [a] "x" (a));
+ #endif
return res;
#else
// Arguments are reversed to match NaN propagation behavior of std::min.
@@ -265,13 +293,18 @@ template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const
#endif
}
template<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) {
-#if EIGEN_COMP_GNUC
+#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
// There appears to be a bug in GCC, by which the optimizer may
// flip the argument order in calls to _mm_min_pd, so we have to
// resort to inline ASM here. This is supposed to be fixed in gcc6.3,
// see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867
+ #ifdef EIGEN_VECTORIZE_AVX
+ Packet2d res;
+ asm("vminpd %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
+ #else
Packet2d res = b;
asm("minpd %[a], %[res]" : [res] "+x" (res) : [a] "x" (a));
+ #endif
return res;
#else
// Arguments are reversed to match NaN propagation behavior of std::min.
@@ -290,13 +323,18 @@ template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const
}
template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) {
-#if EIGEN_COMP_GNUC
+#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
// There appears to be a bug in GCC, by which the optimizer may
// flip the argument order in calls to _mm_max_ps, so we have to
// resort to inline ASM here. This is supposed to be fixed in gcc6.3,
// see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867
+ #ifdef EIGEN_VECTORIZE_AVX
+ Packet4f res;
+ asm("vmaxps %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
+ #else
Packet4f res = b;
asm("maxps %[a], %[res]" : [res] "+x" (res) : [a] "x" (a));
+ #endif
return res;
#else
// Arguments are reversed to match NaN propagation behavior of std::max.
@@ -304,13 +342,18 @@ template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const
#endif
}
template<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) {
-#if EIGEN_COMP_GNUC
+#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
// There appears to be a bug in GCC, by which the optimizer may
// flip the argument order in calls to _mm_max_pd, so we have to
// resort to inline ASM here. This is supposed to be fixed in gcc6.3,
// see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867
+ #ifdef EIGEN_VECTORIZE_AVX
+ Packet2d res;
+ asm("vmaxpd %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
+ #else
Packet2d res = b;
asm("maxpd %[a], %[res]" : [res] "+x" (res) : [a] "x" (a));
+ #endif
return res;
#else
// Arguments are reversed to match NaN propagation behavior of std::max.
@@ -328,16 +371,24 @@ template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const
#endif
}
-#ifdef EIGEN_VECTORIZE_SSE4_1
-template<> EIGEN_STRONG_INLINE Packet4f pround<Packet4f>(const Packet4f& a) { return _mm_round_ps(a, 0); }
-template<> EIGEN_STRONG_INLINE Packet2d pround<Packet2d>(const Packet2d& a) { return _mm_round_pd(a, 0); }
-
-template<> EIGEN_STRONG_INLINE Packet4f pceil<Packet4f>(const Packet4f& a) { return _mm_ceil_ps(a); }
-template<> EIGEN_STRONG_INLINE Packet2d pceil<Packet2d>(const Packet2d& a) { return _mm_ceil_pd(a); }
-
-template<> EIGEN_STRONG_INLINE Packet4f pfloor<Packet4f>(const Packet4f& a) { return _mm_floor_ps(a); }
-template<> EIGEN_STRONG_INLINE Packet2d pfloor<Packet2d>(const Packet2d& a) { return _mm_floor_pd(a); }
-#endif
+template<> EIGEN_STRONG_INLINE Packet4f pcmp_le(const Packet4f& a, const Packet4f& b) { return _mm_cmple_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4f pcmp_lt(const Packet4f& a, const Packet4f& b) { return _mm_cmplt_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4f pcmp_eq(const Packet4f& a, const Packet4f& b) { return _mm_cmpeq_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pcmp_eq(const Packet4i& a, const Packet4i& b) { return _mm_cmpeq_epi32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d pcmp_eq(const Packet2d& a, const Packet2d& b) { return _mm_cmpeq_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4f pcmp_lt_or_nan(const Packet4f& a, const Packet4f& b) { return _mm_cmpnge_ps(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4i ptrue<Packet4i>(const Packet4i& a) { return _mm_cmpeq_epi32(a, a); }
+template<> EIGEN_STRONG_INLINE Packet4f
+ptrue<Packet4f>(const Packet4f& a) {
+ Packet4i b = _mm_castps_si128(a);
+ return _mm_castsi128_ps(_mm_cmpeq_epi32(b, b));
+}
+template<> EIGEN_STRONG_INLINE Packet2d
+ptrue<Packet2d>(const Packet2d& a) {
+ Packet4i b = _mm_castpd_si128(a);
+ return _mm_castsi128_pd(_mm_cmpeq_epi32(b, b));
+}
template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_and_ps(a,b); }
template<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_and_pd(a,b); }
@@ -351,9 +402,47 @@ template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const
template<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_xor_pd(a,b); }
template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_xor_si128(a,b); }
-template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_andnot_ps(a,b); }
-template<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_andnot_pd(a,b); }
-template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_andnot_si128(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_andnot_ps(b,a); }
+template<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_andnot_pd(b,a); }
+template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_andnot_si128(b,a); }
+
+template<int N> EIGEN_STRONG_INLINE Packet4i pshiftright(Packet4i a) { return _mm_srli_epi32(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet4i pshiftleft(Packet4i a) { return _mm_slli_epi32(a,N); }
+
+#ifdef EIGEN_VECTORIZE_SSE4_1
+template<> EIGEN_STRONG_INLINE Packet4f pround<Packet4f>(const Packet4f& a) { return _mm_round_ps(a, 0); }
+template<> EIGEN_STRONG_INLINE Packet2d pround<Packet2d>(const Packet2d& a) { return _mm_round_pd(a, 0); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pceil<Packet4f>(const Packet4f& a) { return _mm_ceil_ps(a); }
+template<> EIGEN_STRONG_INLINE Packet2d pceil<Packet2d>(const Packet2d& a) { return _mm_ceil_pd(a); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pfloor<Packet4f>(const Packet4f& a) { return _mm_floor_ps(a); }
+template<> EIGEN_STRONG_INLINE Packet2d pfloor<Packet2d>(const Packet2d& a) { return _mm_floor_pd(a); }
+#else
+template<> EIGEN_STRONG_INLINE Packet4f pfloor<Packet4f>(const Packet4f& a)
+{
+ const Packet4f cst_1 = pset1<Packet4f>(1.0f);
+ Packet4i emm0 = _mm_cvttps_epi32(a);
+ Packet4f tmp = _mm_cvtepi32_ps(emm0);
+ /* if greater, substract 1 */
+ Packet4f mask = _mm_cmpgt_ps(tmp, a);
+ mask = pand(mask, cst_1);
+ return psub(tmp, mask);
+}
+
+// WARNING: this pfloor implementation makes sense for small inputs only,
+// It is currently only used by pexp and not exposed through HasFloor.
+template<> EIGEN_STRONG_INLINE Packet2d pfloor<Packet2d>(const Packet2d& a)
+{
+ const Packet2d cst_1 = pset1<Packet2d>(1.0);
+ Packet4i emm0 = _mm_cvttpd_epi32(a);
+ Packet2d tmp = _mm_cvtepi32_pd(emm0);
+ /* if greater, substract 1 */
+ Packet2d mask = _mm_cmpgt_pd(tmp, a);
+ mask = pand(mask, cst_1);
+ return psub(tmp, mask);
+}
+#endif
template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_ps(from); }
template<> EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_pd(from); }
@@ -517,6 +606,23 @@ template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a)
#endif
}
+template<> EIGEN_STRONG_INLINE Packet4f pfrexp<Packet4f>(const Packet4f& a, Packet4f& exponent) {
+ return pfrexp_float(a,exponent);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pldexp<Packet4f>(const Packet4f& a, const Packet4f& exponent) {
+ return pldexp_float(a,exponent);
+}
+
+template<> EIGEN_STRONG_INLINE Packet2d pldexp<Packet2d>(const Packet2d& a, const Packet2d& exponent) {
+ const Packet4i cst_1023_0 = _mm_setr_epi32(1023, 1023, 0, 0);
+ Packet4i emm0 = _mm_cvttpd_epi32(exponent);
+ emm0 = padd(emm0, cst_1023_0);
+ emm0 = _mm_slli_epi32(emm0, 20);
+ emm0 = _mm_shuffle_epi32(emm0, _MM_SHUFFLE(1,2,0,3));
+ return pmul(a, Packet2d(_mm_castsi128_pd(emm0)));
+}
+
// with AVX, the default implementations based on pload1 are faster
#ifndef __AVX__
template<> EIGEN_STRONG_INLINE void
@@ -718,6 +824,17 @@ template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
#endif // EIGEN_VECTORIZE_SSE4_1
}
+// not needed yet
+// template<> EIGEN_STRONG_INLINE bool predux_all(const Packet4f& x)
+// {
+// return _mm_movemask_ps(x) == 0xF;
+// }
+
+template<> EIGEN_STRONG_INLINE bool predux_any(const Packet4f& x)
+{
+ return _mm_movemask_ps(x) != 0x0;
+}
+
#if EIGEN_COMP_GNUC
// template <> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c)
// {
@@ -921,7 +1038,7 @@ template<> EIGEN_STRONG_INLINE Packet2d pinsertlast(const Packet2d& a, double b)
}
// Scalar path for pmadd with FMA to ensure consistency with vectorized path.
-#ifdef __FMA__
+#ifdef EIGEN_VECTORIZE_FMA
template<> EIGEN_STRONG_INLINE float pmadd(const float& a, const float& b, const float& c) {
return ::fmaf(a,b,c);
}
diff --git a/Eigen/src/Core/arch/SSE/TypeCasting.h b/Eigen/src/Core/arch/SSE/TypeCasting.h
index c6ca8c716..f607366f0 100644
--- a/Eigen/src/Core/arch/SSE/TypeCasting.h
+++ b/Eigen/src/Core/arch/SSE/TypeCasting.h
@@ -69,6 +69,13 @@ template<> EIGEN_STRONG_INLINE Packet2d pcast<Packet4f, Packet2d>(const Packet4f
return _mm_cvtps_pd(a);
}
+template<> EIGEN_STRONG_INLINE Packet4i preinterpret<Packet4i,Packet4f>(const Packet4f& a) {
+ return _mm_castps_si128(a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f preinterpret<Packet4f,Packet4i>(const Packet4i& a) {
+ return _mm_castsi128_ps(a);
+}
} // end namespace internal
diff --git a/Eigen/src/Core/arch/SYCL/InteropHeaders.h b/Eigen/src/Core/arch/SYCL/InteropHeaders.h
index c1da40d14..294cb101a 100644
--- a/Eigen/src/Core/arch/SYCL/InteropHeaders.h
+++ b/Eigen/src/Core/arch/SYCL/InteropHeaders.h
@@ -88,7 +88,7 @@ SYCL_ARITHMETIC(cl::sycl::cl_double2)
#define SYCL_UNPACKET_TRAITS(packet_type, unpacket_type, lengths)\
template<> struct unpacket_traits<packet_type> {\
typedef unpacket_type type;\
- enum {size=lengths, alignment=Aligned16};\
+ enum {size=lengths, alignment=Aligned16, vectorizable=true};\
typedef packet_type half;\
};
SYCL_UNPACKET_TRAITS(cl::sycl::cl_float4, float, 4)
diff --git a/Eigen/src/Core/arch/ZVector/Complex.h b/Eigen/src/Core/arch/ZVector/Complex.h
index 95aba428f..167c3ee4c 100644
--- a/Eigen/src/Core/arch/ZVector/Complex.h
+++ b/Eigen/src/Core/arch/ZVector/Complex.h
@@ -91,8 +91,8 @@ template<> struct packet_traits<std::complex<double> > : default_packet_traits
};
};
-template<> struct unpacket_traits<Packet2cf> { typedef std::complex<float> type; enum {size=2, alignment=Aligned16}; typedef Packet2cf half; };
-template<> struct unpacket_traits<Packet1cd> { typedef std::complex<double> type; enum {size=1, alignment=Aligned16}; typedef Packet1cd half; };
+template<> struct unpacket_traits<Packet2cf> { typedef std::complex<float> type; enum {size=2, alignment=Aligned16, vectorizable=true}; typedef Packet2cf half; };
+template<> struct unpacket_traits<Packet1cd> { typedef std::complex<double> type; enum {size=1, alignment=Aligned16, vectorizable=true}; typedef Packet1cd half; };
/* Forward declaration */
EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet2cf,2>& kernel);
diff --git a/Eigen/src/Core/arch/ZVector/PacketMath.h b/Eigen/src/Core/arch/ZVector/PacketMath.h
index 0b37f4992..c8e90f1a8 100755
--- a/Eigen/src/Core/arch/ZVector/PacketMath.h
+++ b/Eigen/src/Core/arch/ZVector/PacketMath.h
@@ -239,9 +239,9 @@ template<> struct packet_traits<double> : default_packet_traits
};
};
-template<> struct unpacket_traits<Packet4i> { typedef int type; enum {size=4, alignment=Aligned16}; typedef Packet4i half; };
-template<> struct unpacket_traits<Packet4f> { typedef float type; enum {size=4, alignment=Aligned16}; typedef Packet4f half; };
-template<> struct unpacket_traits<Packet2d> { typedef double type; enum {size=2, alignment=Aligned16}; typedef Packet2d half; };
+template<> struct unpacket_traits<Packet4i> { typedef int type; enum {size=4, alignment=Aligned16, vectorizable=true}; typedef Packet4i half; };
+template<> struct unpacket_traits<Packet4f> { typedef float type; enum {size=4, alignment=Aligned16, vectorizable=true}; typedef Packet4f half; };
+template<> struct unpacket_traits<Packet2d> { typedef double type; enum {size=2, alignment=Aligned16, vectorizable=true}; typedef Packet2d half; };
/* Forward declaration */
EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet4f,4>& kernel);
diff --git a/Eigen/src/Core/functors/AssignmentFunctors.h b/Eigen/src/Core/functors/AssignmentFunctors.h
index 9765cc763..bf64ef4ed 100644
--- a/Eigen/src/Core/functors/AssignmentFunctors.h
+++ b/Eigen/src/Core/functors/AssignmentFunctors.h
@@ -157,7 +157,16 @@ template<typename Scalar>
struct functor_traits<swap_assign_op<Scalar> > {
enum {
Cost = 3 * NumTraits<Scalar>::ReadCost,
- PacketAccess = packet_traits<Scalar>::Vectorizable
+ PacketAccess =
+ #if defined(EIGEN_VECTORIZE_AVX) && EIGEN_COMP_CLANG && (EIGEN_COMP_CLANG<800 || defined(__apple_build_version__))
+ // This is a partial workaround for a bug in clang generating bad code
+ // when mixing 256/512 bits loads and 128 bits moves.
+ // See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1684
+ // https://bugs.llvm.org/show_bug.cgi?id=40815
+ 0
+ #else
+ packet_traits<Scalar>::Vectorizable
+ #endif
};
};
diff --git a/Eigen/src/Core/functors/NullaryFunctors.h b/Eigen/src/Core/functors/NullaryFunctors.h
index b03be0269..16b645f91 100644
--- a/Eigen/src/Core/functors/NullaryFunctors.h
+++ b/Eigen/src/Core/functors/NullaryFunctors.h
@@ -37,26 +37,27 @@ template<typename Scalar>
struct functor_traits<scalar_identity_op<Scalar> >
{ enum { Cost = NumTraits<Scalar>::AddCost, PacketAccess = false, IsRepeatable = true }; };
-template <typename Scalar, typename Packet, bool IsInteger> struct linspaced_op_impl;
+template <typename Scalar, bool IsInteger> struct linspaced_op_impl;
-template <typename Scalar, typename Packet>
-struct linspaced_op_impl<Scalar,Packet,/*IsInteger*/false>
+template <typename Scalar>
+struct linspaced_op_impl<Scalar,/*IsInteger*/false>
{
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
linspaced_op_impl(const Scalar& low, const Scalar& high, Index num_steps) :
- m_low(low), m_high(high), m_size1(num_steps==1 ? 1 : num_steps-1), m_step(num_steps==1 ? Scalar() : (high-low)/Scalar(num_steps-1)),
+ m_low(low), m_high(high), m_size1(num_steps==1 ? 1 : num_steps-1), m_step(num_steps==1 ? Scalar() : (high-low)/RealScalar(num_steps-1)),
m_flip(numext::abs(high)<numext::abs(low))
{}
template<typename IndexType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (IndexType i) const {
- typedef typename NumTraits<Scalar>::Real RealScalar;
if(m_flip)
return (i==0)? m_low : (m_high - RealScalar(m_size1-i)*m_step);
else
return (i==m_size1)? m_high : (m_low + RealScalar(i)*m_step);
}
- template<typename IndexType>
+ template<typename Packet, typename IndexType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(IndexType i) const
{
// Principle:
@@ -86,8 +87,8 @@ struct linspaced_op_impl<Scalar,Packet,/*IsInteger*/false>
const bool m_flip;
};
-template <typename Scalar, typename Packet>
-struct linspaced_op_impl<Scalar,Packet,/*IsInteger*/true>
+template <typename Scalar>
+struct linspaced_op_impl<Scalar,/*IsInteger*/true>
{
linspaced_op_impl(const Scalar& low, const Scalar& high, Index num_steps) :
m_low(low),
@@ -115,8 +116,8 @@ struct linspaced_op_impl<Scalar,Packet,/*IsInteger*/true>
// Forward declaration (we default to random access which does not really give
// us a speed gain when using packet access but it allows to use the functor in
// nested expressions).
-template <typename Scalar, typename PacketType> struct linspaced_op;
-template <typename Scalar, typename PacketType> struct functor_traits< linspaced_op<Scalar,PacketType> >
+template <typename Scalar> struct linspaced_op;
+template <typename Scalar> struct functor_traits< linspaced_op<Scalar> >
{
enum
{
@@ -126,7 +127,7 @@ template <typename Scalar, typename PacketType> struct functor_traits< linspaced
IsRepeatable = true
};
};
-template <typename Scalar, typename PacketType> struct linspaced_op
+template <typename Scalar> struct linspaced_op
{
linspaced_op(const Scalar& low, const Scalar& high, Index num_steps)
: impl((num_steps==1 ? high : low),high,num_steps)
@@ -136,11 +137,11 @@ template <typename Scalar, typename PacketType> struct linspaced_op
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (IndexType i) const { return impl(i); }
template<typename Packet,typename IndexType>
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(IndexType i) const { return impl.packetOp(i); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(IndexType i) const { return impl.template packetOp<Packet>(i); }
// This proxy object handles the actual required temporaries and the different
// implementations (integer vs. floating point).
- const linspaced_op_impl<Scalar,PacketType,NumTraits<Scalar>::IsInteger> impl;
+ const linspaced_op_impl<Scalar,NumTraits<Scalar>::IsInteger> impl;
};
// Linear access is automatically determined from the operator() prototypes available for the given functor.
@@ -166,12 +167,12 @@ struct has_unary_operator<scalar_identity_op<Scalar>,IndexType> { enum { value =
template<typename Scalar,typename IndexType>
struct has_binary_operator<scalar_identity_op<Scalar>,IndexType> { enum { value = 1}; };
-template<typename Scalar, typename PacketType,typename IndexType>
-struct has_nullary_operator<linspaced_op<Scalar,PacketType>,IndexType> { enum { value = 0}; };
-template<typename Scalar, typename PacketType,typename IndexType>
-struct has_unary_operator<linspaced_op<Scalar,PacketType>,IndexType> { enum { value = 1}; };
-template<typename Scalar, typename PacketType,typename IndexType>
-struct has_binary_operator<linspaced_op<Scalar,PacketType>,IndexType> { enum { value = 0}; };
+template<typename Scalar,typename IndexType>
+struct has_nullary_operator<linspaced_op<Scalar>,IndexType> { enum { value = 0}; };
+template<typename Scalar,typename IndexType>
+struct has_unary_operator<linspaced_op<Scalar>,IndexType> { enum { value = 1}; };
+template<typename Scalar,typename IndexType>
+struct has_binary_operator<linspaced_op<Scalar>,IndexType> { enum { value = 0}; };
template<typename Scalar,typename IndexType>
struct has_nullary_operator<scalar_random_op<Scalar>,IndexType> { enum { value = 1}; };
diff --git a/Eigen/src/Core/functors/UnaryFunctors.h b/Eigen/src/Core/functors/UnaryFunctors.h
index 0c2d2cfca..03f167ac9 100644
--- a/Eigen/src/Core/functors/UnaryFunctors.h
+++ b/Eigen/src/Core/functors/UnaryFunctors.h
@@ -117,7 +117,15 @@ template<typename Scalar>
struct functor_traits<scalar_conjugate_op<Scalar> >
{
enum {
- Cost = NumTraits<Scalar>::IsComplex ? NumTraits<Scalar>::AddCost : 0,
+ Cost = 0,
+ // Yes the cost is zero even for complexes because in most cases for which
+ // the cost is used, conjugation turns to be a no-op. Some examples:
+ // cost(a*conj(b)) == cost(a*b)
+ // cost(a+conj(b)) == cost(a+b)
+ // <etc.
+ // If we don't set it to zero, then:
+ // A.conjugate().lazyProduct(B.conjugate())
+ // will bake its operands. We definitely don't want that!
PacketAccess = packet_traits<Scalar>::HasConj
};
};
@@ -548,6 +556,23 @@ struct functor_traits<scalar_tanh_op<Scalar> > {
};
};
+#if EIGEN_HAS_CXX11_MATH
+/** \internal
+ * \brief Template functor to compute the atanh of a scalar
+ * \sa class CwiseUnaryOp, ArrayBase::atanh()
+ */
+template <typename Scalar>
+struct scalar_atanh_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_atanh_op)
+ EIGEN_DEVICE_FUNC inline const Scalar operator()(const Scalar& a) const { return numext::atanh(a); }
+};
+
+template <typename Scalar>
+struct functor_traits<scalar_atanh_op<Scalar> > {
+ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false };
+};
+#endif
+
/** \internal
* \brief Template functor to compute the sinh of a scalar
* \sa class CwiseUnaryOp, ArrayBase::sinh()
@@ -567,6 +592,23 @@ struct functor_traits<scalar_sinh_op<Scalar> >
};
};
+#if EIGEN_HAS_CXX11_MATH
+/** \internal
+ * \brief Template functor to compute the asinh of a scalar
+ * \sa class CwiseUnaryOp, ArrayBase::asinh()
+ */
+template <typename Scalar>
+struct scalar_asinh_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_asinh_op)
+ EIGEN_DEVICE_FUNC inline const Scalar operator()(const Scalar& a) const { return numext::asinh(a); }
+};
+
+template <typename Scalar>
+struct functor_traits<scalar_asinh_op<Scalar> > {
+ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false };
+};
+#endif
+
/** \internal
* \brief Template functor to compute the cosh of a scalar
* \sa class CwiseUnaryOp, ArrayBase::cosh()
@@ -586,6 +628,23 @@ struct functor_traits<scalar_cosh_op<Scalar> >
};
};
+#if EIGEN_HAS_CXX11_MATH
+/** \internal
+ * \brief Template functor to compute the acosh of a scalar
+ * \sa class CwiseUnaryOp, ArrayBase::acosh()
+ */
+template <typename Scalar>
+struct scalar_acosh_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_acosh_op)
+ EIGEN_DEVICE_FUNC inline const Scalar operator()(const Scalar& a) const { return numext::acosh(a); }
+};
+
+template <typename Scalar>
+struct functor_traits<scalar_acosh_op<Scalar> > {
+ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false };
+};
+#endif
+
/** \internal
* \brief Template functor to compute the inverse of a scalar
* \sa class CwiseUnaryOp, Cwise::inverse()
@@ -598,9 +657,13 @@ struct scalar_inverse_op {
EIGEN_DEVICE_FUNC inline const Packet packetOp(const Packet& a) const
{ return internal::pdiv(pset1<Packet>(Scalar(1)),a); }
};
-template<typename Scalar>
-struct functor_traits<scalar_inverse_op<Scalar> >
-{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasDiv }; };
+template <typename Scalar>
+struct functor_traits<scalar_inverse_op<Scalar> > {
+ enum {
+ PacketAccess = packet_traits<Scalar>::HasDiv,
+ Cost = scalar_div_cost<Scalar, PacketAccess>::value
+ };
+};
/** \internal
* \brief Template functor to compute the square of a scalar
@@ -864,8 +927,9 @@ template <>
struct scalar_logistic_op<float> {
EIGEN_EMPTY_STRUCT_CTOR(scalar_logistic_op)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float operator()(const float& x) const {
- const float one = 1.0f;
- return one / (one + numext::exp(-x));
+ if (x < -18.0f) return 0.0f;
+ else if (x > 18.0f) return 1.0f;
+ else return 1.0f / (1.0f + numext::exp(-x));
}
template <typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
diff --git a/Eigen/src/Core/products/GeneralBlockPanelKernel.h b/Eigen/src/Core/products/GeneralBlockPanelKernel.h
index e7cab4720..fdd0ec0e9 100644
--- a/Eigen/src/Core/products/GeneralBlockPanelKernel.h
+++ b/Eigen/src/Core/products/GeneralBlockPanelKernel.h
@@ -15,7 +15,13 @@ namespace Eigen {
namespace internal {
-template<typename _LhsScalar, typename _RhsScalar, bool _ConjLhs=false, bool _ConjRhs=false>
+enum PacketSizeType {
+ PacketFull = 0,
+ PacketHalf,
+ PacketQuarter
+};
+
+template<typename _LhsScalar, typename _RhsScalar, bool _ConjLhs=false, bool _ConjRhs=false, int Arch=Architecture::Target, int _PacketSize=PacketFull>
class gebp_traits;
@@ -101,6 +107,16 @@ void evaluateProductBlockingSizesHeuristic(Index& k, Index& m, Index& n, Index n
// at the register level. This small horizontal panel has to stay within L1 cache.
std::ptrdiff_t l1, l2, l3;
manage_caching_sizes(GetAction, &l1, &l2, &l3);
+ #ifdef EIGEN_VECTORIZE_AVX512
+ // We need to find a rationale for that, but without this adjustment,
+ // performance with AVX512 is pretty bad, like -20% slower.
+ // One reason is that with increasing packet-size, the blocking size k
+ // has to become pretty small if we want that 1 lhs panel fit within L1.
+ // For instance, with the 3pX4 kernel and double, the size of the lhs+rhs panels are:
+ // k*(3*64 + 4*8) Bytes, with l1=32kBytes, and k%8=0, we have k=144.
+ // This is quite small for a good reuse of the accumulation registers.
+ l1 *= 4;
+ #endif
if (num_threads > 1) {
typedef typename Traits::ResScalar ResScalar;
@@ -337,6 +353,61 @@ inline void computeProductBlockingSizes(Index& k, Index& m, Index& n, Index num_
// #define CJMADD(CJ,A,B,C,T) T = B; T = CJ.pmul(A,T); C = padd(C,T);
#endif
+template <typename RhsPacket, typename RhsPacketx4, int registers_taken>
+struct RhsPanelHelper {
+ private:
+ static const int remaining_registers = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS - registers_taken;
+ public:
+ typedef typename conditional<remaining_registers>=4, RhsPacketx4, RhsPacket>::type type;
+};
+
+template <typename Packet>
+struct QuadPacket
+{
+ Packet B_0, B1, B2, B3;
+ const Packet& get(const FixedInt<0>&) const { return B_0; }
+ const Packet& get(const FixedInt<1>&) const { return B1; }
+ const Packet& get(const FixedInt<2>&) const { return B2; }
+ const Packet& get(const FixedInt<3>&) const { return B3; }
+};
+
+template <int N, typename T1, typename T2, typename T3>
+struct packet_conditional { typedef T3 type; };
+
+template <typename T1, typename T2, typename T3>
+struct packet_conditional<PacketFull, T1, T2, T3> { typedef T1 type; };
+
+template <typename T1, typename T2, typename T3>
+struct packet_conditional<PacketHalf, T1, T2, T3> { typedef T2 type; };
+
+#define PACKET_DECL_COND_PREFIX(prefix, name, packet_size) \
+ typedef typename packet_conditional<packet_size, \
+ typename packet_traits<name ## Scalar>::type, \
+ typename packet_traits<name ## Scalar>::half, \
+ typename unpacket_traits<typename packet_traits<name ## Scalar>::half>::half>::type \
+ prefix ## name ## Packet
+
+#define PACKET_DECL_COND(name, packet_size) \
+ typedef typename packet_conditional<packet_size, \
+ typename packet_traits<name ## Scalar>::type, \
+ typename packet_traits<name ## Scalar>::half, \
+ typename unpacket_traits<typename packet_traits<name ## Scalar>::half>::half>::type \
+ name ## Packet
+
+#define PACKET_DECL_COND_SCALAR_PREFIX(prefix, packet_size) \
+ typedef typename packet_conditional<packet_size, \
+ typename packet_traits<Scalar>::type, \
+ typename packet_traits<Scalar>::half, \
+ typename unpacket_traits<typename packet_traits<Scalar>::half>::half>::type \
+ prefix ## ScalarPacket
+
+#define PACKET_DECL_COND_SCALAR(packet_size) \
+ typedef typename packet_conditional<packet_size, \
+ typename packet_traits<Scalar>::type, \
+ typename packet_traits<Scalar>::half, \
+ typename unpacket_traits<typename packet_traits<Scalar>::half>::half>::type \
+ ScalarPacket
+
/* Vectorization logic
* real*real: unpack rhs to constant packets, ...
*
@@ -347,7 +418,7 @@ inline void computeProductBlockingSizes(Index& k, Index& m, Index& n, Index num_
* cplx*real : unpack rhs to constant packets, ...
* real*cplx : load lhs as (a0,a0,a1,a1), and mul as usual
*/
-template<typename _LhsScalar, typename _RhsScalar, bool _ConjLhs, bool _ConjRhs>
+template<typename _LhsScalar, typename _RhsScalar, bool _ConjLhs, bool _ConjRhs, int Arch, int _PacketSize>
class gebp_traits
{
public:
@@ -355,13 +426,17 @@ public:
typedef _RhsScalar RhsScalar;
typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
+ PACKET_DECL_COND_PREFIX(_, Lhs, _PacketSize);
+ PACKET_DECL_COND_PREFIX(_, Rhs, _PacketSize);
+ PACKET_DECL_COND_PREFIX(_, Res, _PacketSize);
+
enum {
ConjLhs = _ConjLhs,
ConjRhs = _ConjRhs,
- Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable,
- LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
- RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
- ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
+ Vectorizable = unpacket_traits<_LhsPacket>::vectorizable && unpacket_traits<_RhsPacket>::vectorizable,
+ LhsPacketSize = Vectorizable ? unpacket_traits<_LhsPacket>::size : 1,
+ RhsPacketSize = Vectorizable ? unpacket_traits<_RhsPacket>::size : 1,
+ ResPacketSize = Vectorizable ? unpacket_traits<_ResPacket>::size : 1,
NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
@@ -370,10 +445,12 @@ public:
// register block size along the M direction (currently, this one cannot be modified)
default_mr = (EIGEN_PLAIN_ENUM_MIN(16,NumberOfRegisters)/2/nr)*LhsPacketSize,
-#if defined(EIGEN_HAS_SINGLE_INSTRUCTION_MADD) && !defined(EIGEN_VECTORIZE_ALTIVEC) && !defined(EIGEN_VECTORIZE_VSX)
- // we assume 16 registers
+#if defined(EIGEN_HAS_SINGLE_INSTRUCTION_MADD) && !defined(EIGEN_VECTORIZE_ALTIVEC) && !defined(EIGEN_VECTORIZE_VSX) \
+ && ((!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1914))
+ // we assume 16 registers or more
// See bug 992, if the scalar type is not vectorizable but that EIGEN_HAS_SINGLE_INSTRUCTION_MADD is defined,
// then using 3*LhsPacketSize triggers non-implemented paths in syrk.
+ // Bug 1515: MSVC prior to v19.14 yields to register spilling.
mr = Vectorizable ? 3*LhsPacketSize : default_mr,
#else
mr = default_mr,
@@ -383,38 +460,41 @@ public:
RhsProgress = 1
};
- typedef typename packet_traits<LhsScalar>::type _LhsPacket;
- typedef typename packet_traits<RhsScalar>::type _RhsPacket;
- typedef typename packet_traits<ResScalar>::type _ResPacket;
typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
typedef LhsPacket LhsPacket4Packing;
+ typedef QuadPacket<RhsPacket> RhsPacketx4;
typedef ResPacket AccPacket;
EIGEN_STRONG_INLINE void initAcc(AccPacket& p)
{
p = pset1<ResPacket>(ResScalar(0));
}
-
- EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)
- {
- pbroadcast4(b, b0, b1, b2, b3);
- }
-
-// EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1)
-// {
-// pbroadcast2(b, b0, b1);
-// }
-
+
template<typename RhsPacketType>
EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacketType& dest) const
{
dest = pset1<RhsPacketType>(*b);
}
-
+
+ EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacketx4& dest) const
+ {
+ pbroadcast4(b, dest.B_0, dest.B1, dest.B2, dest.B3);
+ }
+
+ template<typename RhsPacketType>
+ EIGEN_STRONG_INLINE void updateRhs(const RhsScalar* b, RhsPacketType& dest) const
+ {
+ loadRhs(b, dest);
+ }
+
+ EIGEN_STRONG_INLINE void updateRhs(const RhsScalar*, RhsPacketx4&) const
+ {
+ }
+
EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const
{
dest = ploadquad<RhsPacket>(b);
@@ -432,8 +512,8 @@ public:
dest = ploadu<LhsPacketType>(a);
}
- template<typename LhsPacketType, typename RhsPacketType, typename AccPacketType>
- EIGEN_STRONG_INLINE void madd(const LhsPacketType& a, const RhsPacketType& b, AccPacketType& c, AccPacketType& tmp) const
+ template<typename LhsPacketType, typename RhsPacketType, typename AccPacketType, typename LaneIdType>
+ EIGEN_STRONG_INLINE void madd(const LhsPacketType& a, const RhsPacketType& b, AccPacketType& c, RhsPacketType& tmp, const LaneIdType&) const
{
conj_helper<LhsPacketType,RhsPacketType,ConjLhs,ConjRhs> cj;
// It would be a lot cleaner to call pmadd all the time. Unfortunately if we
@@ -448,6 +528,12 @@ public:
#endif
}
+ template<typename LhsPacketType, typename AccPacketType, typename LaneIdType>
+ EIGEN_STRONG_INLINE void madd(const LhsPacketType& a, const RhsPacketx4& b, AccPacketType& c, RhsPacket& tmp, const LaneIdType& lane) const
+ {
+ madd(a, b.get(lane), c, tmp, lane);
+ }
+
EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const
{
r = pmadd(c,alpha,r);
@@ -461,21 +547,25 @@ public:
};
-template<typename RealScalar, bool _ConjLhs>
-class gebp_traits<std::complex<RealScalar>, RealScalar, _ConjLhs, false>
+template<typename RealScalar, bool _ConjLhs, int Arch, int _PacketSize>
+class gebp_traits<std::complex<RealScalar>, RealScalar, _ConjLhs, false, Arch, _PacketSize>
{
public:
typedef std::complex<RealScalar> LhsScalar;
typedef RealScalar RhsScalar;
typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
+ PACKET_DECL_COND_PREFIX(_, Lhs, _PacketSize);
+ PACKET_DECL_COND_PREFIX(_, Rhs, _PacketSize);
+ PACKET_DECL_COND_PREFIX(_, Res, _PacketSize);
+
enum {
ConjLhs = _ConjLhs,
ConjRhs = false,
- Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable,
- LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
- RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
- ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
+ Vectorizable = unpacket_traits<_LhsPacket>::vectorizable && unpacket_traits<_RhsPacket>::vectorizable,
+ LhsPacketSize = Vectorizable ? unpacket_traits<_LhsPacket>::size : 1,
+ RhsPacketSize = Vectorizable ? unpacket_traits<_RhsPacket>::size : 1,
+ ResPacketSize = Vectorizable ? unpacket_traits<_ResPacket>::size : 1,
NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
nr = 4,
@@ -490,15 +580,13 @@ public:
RhsProgress = 1
};
- typedef typename packet_traits<LhsScalar>::type _LhsPacket;
- typedef typename packet_traits<RhsScalar>::type _RhsPacket;
- typedef typename packet_traits<ResScalar>::type _ResPacket;
-
typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
typedef LhsPacket LhsPacket4Packing;
+ typedef QuadPacket<RhsPacket> RhsPacketx4;
+
typedef ResPacket AccPacket;
EIGEN_STRONG_INLINE void initAcc(AccPacket& p)
@@ -506,42 +594,64 @@ public:
p = pset1<ResPacket>(ResScalar(0));
}
- EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const
+ template<typename RhsPacketType>
+ EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacketType& dest) const
{
- dest = pset1<RhsPacket>(*b);
+ dest = pset1<RhsPacketType>(*b);
+ }
+
+ EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacketx4& dest) const
+ {
+ pbroadcast4(b, dest.B_0, dest.B1, dest.B2, dest.B3);
+ }
+
+ template<typename RhsPacketType>
+ EIGEN_STRONG_INLINE void updateRhs(const RhsScalar* b, RhsPacketType& dest) const
+ {
+ loadRhs(b, dest);
}
+
+ EIGEN_STRONG_INLINE void updateRhs(const RhsScalar*, RhsPacketx4&) const
+ {}
EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const
{
- dest = pset1<RhsPacket>(*b);
+ loadRhsQuad_impl(b,dest, typename conditional<RhsPacketSize==16,true_type,false_type>::type());
}
- EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
+ EIGEN_STRONG_INLINE void loadRhsQuad_impl(const RhsScalar* b, RhsPacket& dest, const true_type&) const
{
- dest = pload<LhsPacket>(a);
+ // FIXME we can do better!
+ // what we want here is a ploadheight
+ RhsScalar tmp[4] = {b[0],b[0],b[1],b[1]};
+ dest = ploadquad<RhsPacket>(tmp);
}
- EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacket& dest) const
+ EIGEN_STRONG_INLINE void loadRhsQuad_impl(const RhsScalar* b, RhsPacket& dest, const false_type&) const
{
- dest = ploadu<LhsPacket>(a);
+ eigen_internal_assert(RhsPacketSize<=8);
+ dest = pset1<RhsPacket>(*b);
}
- EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)
+ EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
{
- pbroadcast4(b, b0, b1, b2, b3);
+ dest = pload<LhsPacket>(a);
}
-
-// EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1)
-// {
-// pbroadcast2(b, b0, b1);
-// }
- EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp) const
+ template<typename LhsPacketType>
+ EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacketType& dest) const
+ {
+ dest = ploadu<LhsPacketType>(a);
+ }
+
+ template <typename LhsPacketType, typename RhsPacketType, typename AccPacketType, typename LaneIdType>
+ EIGEN_STRONG_INLINE void madd(const LhsPacketType& a, const RhsPacketType& b, AccPacketType& c, RhsPacketType& tmp, const LaneIdType&) const
{
madd_impl(a, b, c, tmp, typename conditional<Vectorizable,true_type,false_type>::type());
}
- EIGEN_STRONG_INLINE void madd_impl(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp, const true_type&) const
+ template <typename LhsPacketType, typename RhsPacketType, typename AccPacketType>
+ EIGEN_STRONG_INLINE void madd_impl(const LhsPacketType& a, const RhsPacketType& b, AccPacketType& c, RhsPacketType& tmp, const true_type&) const
{
#ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
EIGEN_UNUSED_VARIABLE(tmp);
@@ -556,13 +666,20 @@ public:
c += a * b;
}
- EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const
+ template<typename LhsPacketType, typename AccPacketType, typename LaneIdType>
+ EIGEN_STRONG_INLINE void madd(const LhsPacketType& a, const RhsPacketx4& b, AccPacketType& c, RhsPacket& tmp, const LaneIdType& lane) const
+ {
+ madd(a, b.get(lane), c, tmp, lane);
+ }
+
+ template <typename ResPacketType, typename AccPacketType>
+ EIGEN_STRONG_INLINE void acc(const AccPacketType& c, const ResPacketType& alpha, ResPacketType& r) const
{
+ conj_helper<ResPacketType,ResPacketType,ConjLhs,false> cj;
r = cj.pmadd(c,alpha,r);
}
protected:
- conj_helper<ResPacket,ResPacket,ConjLhs,false> cj;
};
template<typename Packet>
@@ -581,13 +698,57 @@ DoublePacket<Packet> padd(const DoublePacket<Packet> &a, const DoublePacket<Pack
return res;
}
+// note that for DoublePacket<RealPacket> the "4" in "downto4"
+// corresponds to the number of complexes, so it means "8"
+// it terms of real coefficients.
+
template<typename Packet>
-const DoublePacket<Packet>& predux_half_dowto4(const DoublePacket<Packet> &a)
+const DoublePacket<Packet>&
+predux_half_dowto4(const DoublePacket<Packet> &a,
+ typename enable_if<unpacket_traits<Packet>::size<=8>::type* = 0)
{
return a;
}
-template<typename Packet> struct unpacket_traits<DoublePacket<Packet> > { typedef DoublePacket<Packet> half; };
+template<typename Packet>
+DoublePacket<typename unpacket_traits<Packet>::half>
+predux_half_dowto4(const DoublePacket<Packet> &a,
+ typename enable_if<unpacket_traits<Packet>::size==16>::type* = 0)
+{
+ // yes, that's pretty hackish :(
+ DoublePacket<typename unpacket_traits<Packet>::half> res;
+ typedef std::complex<typename unpacket_traits<Packet>::type> Cplx;
+ typedef typename packet_traits<Cplx>::type CplxPacket;
+ res.first = predux_half_dowto4(CplxPacket(a.first)).v;
+ res.second = predux_half_dowto4(CplxPacket(a.second)).v;
+ return res;
+}
+
+// same here, "quad" actually means "8" in terms of real coefficients
+template<typename Scalar, typename RealPacket>
+void loadQuadToDoublePacket(const Scalar* b, DoublePacket<RealPacket>& dest,
+ typename enable_if<unpacket_traits<RealPacket>::size<=8>::type* = 0)
+{
+ dest.first = pset1<RealPacket>(real(*b));
+ dest.second = pset1<RealPacket>(imag(*b));
+}
+
+template<typename Scalar, typename RealPacket>
+void loadQuadToDoublePacket(const Scalar* b, DoublePacket<RealPacket>& dest,
+ typename enable_if<unpacket_traits<RealPacket>::size==16>::type* = 0)
+{
+ // yes, that's pretty hackish too :(
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ RealScalar r[4] = {real(b[0]), real(b[0]), real(b[1]), real(b[1])};
+ RealScalar i[4] = {imag(b[0]), imag(b[0]), imag(b[1]), imag(b[1])};
+ dest.first = ploadquad<RealPacket>(r);
+ dest.second = ploadquad<RealPacket>(i);
+}
+
+
+template<typename Packet> struct unpacket_traits<DoublePacket<Packet> > {
+ typedef DoublePacket<typename unpacket_traits<Packet>::half> half;
+};
// template<typename Packet>
// DoublePacket<Packet> pmadd(const DoublePacket<Packet> &a, const DoublePacket<Packet> &b)
// {
@@ -597,8 +758,8 @@ template<typename Packet> struct unpacket_traits<DoublePacket<Packet> > { typede
// return res;
// }
-template<typename RealScalar, bool _ConjLhs, bool _ConjRhs>
-class gebp_traits<std::complex<RealScalar>, std::complex<RealScalar>, _ConjLhs, _ConjRhs >
+template<typename RealScalar, bool _ConjLhs, bool _ConjRhs, int Arch, int _PacketSize>
+class gebp_traits<std::complex<RealScalar>, std::complex<RealScalar>, _ConjLhs, _ConjRhs, Arch, _PacketSize >
{
public:
typedef std::complex<RealScalar> Scalar;
@@ -606,15 +767,21 @@ public:
typedef std::complex<RealScalar> RhsScalar;
typedef std::complex<RealScalar> ResScalar;
+ PACKET_DECL_COND_PREFIX(_, Lhs, _PacketSize);
+ PACKET_DECL_COND_PREFIX(_, Rhs, _PacketSize);
+ PACKET_DECL_COND_PREFIX(_, Res, _PacketSize);
+ PACKET_DECL_COND(Real, _PacketSize);
+ PACKET_DECL_COND_SCALAR(_PacketSize);
+
enum {
ConjLhs = _ConjLhs,
ConjRhs = _ConjRhs,
- Vectorizable = packet_traits<RealScalar>::Vectorizable
- && packet_traits<Scalar>::Vectorizable,
- RealPacketSize = Vectorizable ? packet_traits<RealScalar>::size : 1,
- ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
- LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
- RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
+ Vectorizable = unpacket_traits<RealPacket>::vectorizable
+ && unpacket_traits<ScalarPacket>::vectorizable,
+ ResPacketSize = Vectorizable ? unpacket_traits<_ResPacket>::size : 1,
+ LhsPacketSize = Vectorizable ? unpacket_traits<_LhsPacket>::size : 1,
+ RhsPacketSize = Vectorizable ? unpacket_traits<RhsScalar>::size : 1,
+ RealPacketSize = Vectorizable ? unpacket_traits<RealPacket>::size : 1,
// FIXME: should depend on NumberOfRegisters
nr = 4,
@@ -624,15 +791,16 @@ public:
RhsProgress = 1
};
- typedef typename packet_traits<RealScalar>::type RealPacket;
- typedef typename packet_traits<Scalar>::type ScalarPacket;
- typedef DoublePacket<RealPacket> DoublePacketType;
+ typedef DoublePacket<RealPacket> DoublePacketType;
typedef typename conditional<Vectorizable,ScalarPacket,Scalar>::type LhsPacket4Packing;
typedef typename conditional<Vectorizable,RealPacket, Scalar>::type LhsPacket;
typedef typename conditional<Vectorizable,DoublePacketType,Scalar>::type RhsPacket;
typedef typename conditional<Vectorizable,ScalarPacket,Scalar>::type ResPacket;
typedef typename conditional<Vectorizable,DoublePacketType,Scalar>::type AccPacket;
+
+ // this actualy holds 8 packets!
+ typedef QuadPacket<RhsPacket> RhsPacketx4;
EIGEN_STRONG_INLINE void initAcc(Scalar& p) { p = Scalar(0); }
@@ -643,51 +811,49 @@ public:
}
// Scalar path
- EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, ResPacket& dest) const
+ EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, ScalarPacket& dest) const
{
- dest = pset1<ResPacket>(*b);
+ dest = pset1<ScalarPacket>(*b);
}
// Vectorized path
- EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, DoublePacketType& dest) const
+ template<typename RealPacketType>
+ EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, DoublePacket<RealPacketType>& dest) const
{
- dest.first = pset1<RealPacket>(real(*b));
- dest.second = pset1<RealPacket>(imag(*b));
+ dest.first = pset1<RealPacketType>(real(*b));
+ dest.second = pset1<RealPacketType>(imag(*b));
}
-
- EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, ResPacket& dest) const
+
+ EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacketx4& dest) const
{
- loadRhs(b,dest);
+ loadRhs(b, dest.B_0);
+ loadRhs(b + 1, dest.B1);
+ loadRhs(b + 2, dest.B2);
+ loadRhs(b + 3, dest.B3);
}
- EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, DoublePacketType& dest) const
+
+ // Scalar path
+ EIGEN_STRONG_INLINE void updateRhs(const RhsScalar* b, ScalarPacket& dest) const
{
- eigen_internal_assert(unpacket_traits<ScalarPacket>::size<=4);
- loadRhs(b,dest);
+ loadRhs(b, dest);
}
-
- EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)
+
+ // Vectorized path
+ template<typename RealPacketType>
+ EIGEN_STRONG_INLINE void updateRhs(const RhsScalar* b, DoublePacket<RealPacketType>& dest) const
{
- // FIXME not sure that's the best way to implement it!
- loadRhs(b+0, b0);
- loadRhs(b+1, b1);
- loadRhs(b+2, b2);
- loadRhs(b+3, b3);
+ loadRhs(b, dest);
}
+
+ EIGEN_STRONG_INLINE void updateRhs(const RhsScalar*, RhsPacketx4&) const {}
- // Vectorized path
- EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, DoublePacketType& b0, DoublePacketType& b1)
+ EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, ResPacket& dest) const
{
- // FIXME not sure that's the best way to implement it!
- loadRhs(b+0, b0);
- loadRhs(b+1, b1);
+ loadRhs(b,dest);
}
-
- // Scalar path
- EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsScalar& b0, RhsScalar& b1)
+ EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, DoublePacketType& dest) const
{
- // FIXME not sure that's the best way to implement it!
- loadRhs(b+0, b0);
- loadRhs(b+1, b1);
+ loadQuadToDoublePacket(b,dest);
}
// nothing special here
@@ -696,47 +862,59 @@ public:
dest = pload<LhsPacket>((const typename unpacket_traits<LhsPacket>::type*)(a));
}
- EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacket& dest) const
+ template<typename LhsPacketType>
+ EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacketType& dest) const
{
- dest = ploadu<LhsPacket>((const typename unpacket_traits<LhsPacket>::type*)(a));
+ dest = ploadu<LhsPacketType>((const typename unpacket_traits<LhsPacketType>::type*)(a));
}
- EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, DoublePacketType& c, RhsPacket& /*tmp*/) const
+ template<typename LhsPacketType, typename RhsPacketType, typename ResPacketType, typename TmpType, typename LaneIdType>
+ EIGEN_STRONG_INLINE
+ typename enable_if<!is_same<RhsPacketType,RhsPacketx4>::value>::type
+ madd(const LhsPacketType& a, const RhsPacketType& b, DoublePacket<ResPacketType>& c, TmpType& /*tmp*/, const LaneIdType&) const
{
c.first = padd(pmul(a,b.first), c.first);
c.second = padd(pmul(a,b.second),c.second);
}
- EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, ResPacket& c, RhsPacket& /*tmp*/) const
+ template<typename LaneIdType>
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, ResPacket& c, RhsPacket& /*tmp*/, const LaneIdType&) const
{
c = cj.pmadd(a,b,c);
}
+
+ template<typename LhsPacketType, typename AccPacketType, typename LaneIdType>
+ EIGEN_STRONG_INLINE void madd(const LhsPacketType& a, const RhsPacketx4& b, AccPacketType& c, RhsPacket& tmp, const LaneIdType& lane) const
+ {
+ madd(a, b.get(lane), c, tmp, lane);
+ }
EIGEN_STRONG_INLINE void acc(const Scalar& c, const Scalar& alpha, Scalar& r) const { r += alpha * c; }
- EIGEN_STRONG_INLINE void acc(const DoublePacketType& c, const ResPacket& alpha, ResPacket& r) const
+ template<typename RealPacketType, typename ResPacketType>
+ EIGEN_STRONG_INLINE void acc(const DoublePacket<RealPacketType>& c, const ResPacketType& alpha, ResPacketType& r) const
{
// assemble c
- ResPacket tmp;
+ ResPacketType tmp;
if((!ConjLhs)&&(!ConjRhs))
{
- tmp = pcplxflip(pconj(ResPacket(c.second)));
- tmp = padd(ResPacket(c.first),tmp);
+ tmp = pcplxflip(pconj(ResPacketType(c.second)));
+ tmp = padd(ResPacketType(c.first),tmp);
}
else if((!ConjLhs)&&(ConjRhs))
{
- tmp = pconj(pcplxflip(ResPacket(c.second)));
- tmp = padd(ResPacket(c.first),tmp);
+ tmp = pconj(pcplxflip(ResPacketType(c.second)));
+ tmp = padd(ResPacketType(c.first),tmp);
}
else if((ConjLhs)&&(!ConjRhs))
{
- tmp = pcplxflip(ResPacket(c.second));
- tmp = padd(pconj(ResPacket(c.first)),tmp);
+ tmp = pcplxflip(ResPacketType(c.second));
+ tmp = padd(pconj(ResPacketType(c.first)),tmp);
}
else if((ConjLhs)&&(ConjRhs))
{
- tmp = pcplxflip(ResPacket(c.second));
- tmp = psub(pconj(ResPacket(c.first)),tmp);
+ tmp = pcplxflip(ResPacketType(c.second));
+ tmp = psub(pconj(ResPacketType(c.first)),tmp);
}
r = pmadd(tmp,alpha,r);
@@ -746,8 +924,8 @@ protected:
conj_helper<LhsScalar,RhsScalar,ConjLhs,ConjRhs> cj;
};
-template<typename RealScalar, bool _ConjRhs>
-class gebp_traits<RealScalar, std::complex<RealScalar>, false, _ConjRhs >
+template<typename RealScalar, bool _ConjRhs, int Arch, int _PacketSize>
+class gebp_traits<RealScalar, std::complex<RealScalar>, false, _ConjRhs, Arch, _PacketSize >
{
public:
typedef std::complex<RealScalar> Scalar;
@@ -755,14 +933,25 @@ public:
typedef Scalar RhsScalar;
typedef Scalar ResScalar;
+ PACKET_DECL_COND_PREFIX(_, Lhs, _PacketSize);
+ PACKET_DECL_COND_PREFIX(_, Rhs, _PacketSize);
+ PACKET_DECL_COND_PREFIX(_, Res, _PacketSize);
+ PACKET_DECL_COND_PREFIX(_, Real, _PacketSize);
+ PACKET_DECL_COND_SCALAR_PREFIX(_, _PacketSize);
+
+#undef PACKET_DECL_COND_SCALAR_PREFIX
+#undef PACKET_DECL_COND_PREFIX
+#undef PACKET_DECL_COND_SCALAR
+#undef PACKET_DECL_COND
+
enum {
ConjLhs = false,
ConjRhs = _ConjRhs,
- Vectorizable = packet_traits<RealScalar>::Vectorizable
- && packet_traits<Scalar>::Vectorizable,
- LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
- RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
- ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
+ Vectorizable = unpacket_traits<_RealPacket>::vectorizable
+ && unpacket_traits<_ScalarPacket>::vectorizable,
+ LhsPacketSize = Vectorizable ? unpacket_traits<_LhsPacket>::size : 1,
+ RhsPacketSize = Vectorizable ? unpacket_traits<_RhsPacket>::size : 1,
+ ResPacketSize = Vectorizable ? unpacket_traits<_ResPacket>::size : 1,
NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
// FIXME: should depend on NumberOfRegisters
@@ -773,15 +962,11 @@ public:
RhsProgress = 1
};
- typedef typename packet_traits<LhsScalar>::type _LhsPacket;
- typedef typename packet_traits<RhsScalar>::type _RhsPacket;
- typedef typename packet_traits<ResScalar>::type _ResPacket;
-
typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
typedef LhsPacket LhsPacket4Packing;
-
+ typedef QuadPacket<RhsPacket> RhsPacketx4;
typedef ResPacket AccPacket;
EIGEN_STRONG_INLINE void initAcc(AccPacket& p)
@@ -789,22 +974,25 @@ public:
p = pset1<ResPacket>(ResScalar(0));
}
- EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const
+ template<typename RhsPacketType>
+ EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacketType& dest) const
{
- dest = pset1<RhsPacket>(*b);
+ dest = pset1<RhsPacketType>(*b);
}
-
- void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)
+
+ EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacketx4& dest) const
{
- pbroadcast4(b, b0, b1, b2, b3);
+ pbroadcast4(b, dest.B_0, dest.B1, dest.B2, dest.B3);
}
-
-// EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1)
-// {
-// // FIXME not sure that's the best way to implement it!
-// b0 = pload1<RhsPacket>(b+0);
-// b1 = pload1<RhsPacket>(b+1);
-// }
+
+ template<typename RhsPacketType>
+ EIGEN_STRONG_INLINE void updateRhs(const RhsScalar* b, RhsPacketType& dest) const
+ {
+ loadRhs(b, dest);
+ }
+
+ EIGEN_STRONG_INLINE void updateRhs(const RhsScalar*, RhsPacketx4&) const
+ {}
EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
{
@@ -813,21 +1001,23 @@ public:
EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const
{
- eigen_internal_assert(unpacket_traits<RhsPacket>::size<=4);
- loadRhs(b,dest);
+ dest = ploadquad<RhsPacket>(b);
}
- EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacket& dest) const
+ template<typename LhsPacketType>
+ EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacketType& dest) const
{
- dest = ploaddup<LhsPacket>(a);
+ dest = ploaddup<LhsPacketType>(a);
}
- EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp) const
+ template <typename LhsPacketType, typename RhsPacketType, typename AccPacketType, typename LaneIdType>
+ EIGEN_STRONG_INLINE void madd(const LhsPacketType& a, const RhsPacketType& b, AccPacketType& c, RhsPacketType& tmp, const LaneIdType&) const
{
madd_impl(a, b, c, tmp, typename conditional<Vectorizable,true_type,false_type>::type());
}
- EIGEN_STRONG_INLINE void madd_impl(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp, const true_type&) const
+ template <typename LhsPacketType, typename RhsPacketType, typename AccPacketType>
+ EIGEN_STRONG_INLINE void madd_impl(const LhsPacketType& a, const RhsPacketType& b, AccPacketType& c, RhsPacketType& tmp, const true_type&) const
{
#ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
EIGEN_UNUSED_VARIABLE(tmp);
@@ -843,16 +1033,166 @@ public:
c += a * b;
}
- EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const
+ template<typename LhsPacketType, typename AccPacketType, typename LaneIdType>
+ EIGEN_STRONG_INLINE void madd(const LhsPacketType& a, const RhsPacketx4& b, AccPacketType& c, RhsPacket& tmp, const LaneIdType& lane) const
+ {
+ madd(a, b.get(lane), c, tmp, lane);
+ }
+
+ template <typename ResPacketType, typename AccPacketType>
+ EIGEN_STRONG_INLINE void acc(const AccPacketType& c, const ResPacketType& alpha, ResPacketType& r) const
{
+ conj_helper<ResPacketType,ResPacketType,false,ConjRhs> cj;
r = cj.pmadd(alpha,c,r);
}
protected:
- conj_helper<ResPacket,ResPacket,false,ConjRhs> cj;
+
+};
+
+
+#if EIGEN_ARCH_ARM64 && defined EIGEN_VECTORIZE_NEON
+
+template<>
+struct gebp_traits <float, float, false, false,Architecture::NEON,PacketFull>
+ : gebp_traits<float,float,false,false,Architecture::Generic,PacketFull>
+{
+ typedef float RhsPacket;
+
+ typedef float32x4_t RhsPacketx4;
+
+ EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const
+ {
+ dest = *b;
+ }
+
+ EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacketx4& dest) const
+ {
+ dest = vld1q_f32(b);
+ }
+
+ EIGEN_STRONG_INLINE void updateRhs(const RhsScalar* b, RhsPacket& dest) const
+ {
+ dest = *b;
+ }
+
+ EIGEN_STRONG_INLINE void updateRhs(const RhsScalar* b, RhsPacketx4& dest) const
+ {}
+
+ EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const
+ {
+ loadRhs(b,dest);
+ }
+
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<0>&) const
+ {
+ c = vfmaq_n_f32(c, a, b);
+ }
+
+ // NOTE: Template parameter inference failed when compiled with Android NDK:
+ // "candidate template ignored: could not match 'FixedInt<N>' against 'Eigen::internal::FixedInt<0>".
+
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<0>&) const
+ { madd_helper<0>(a, b, c); }
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<1>&) const
+ { madd_helper<1>(a, b, c); }
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<2>&) const
+ { madd_helper<2>(a, b, c); }
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<3>&) const
+ { madd_helper<3>(a, b, c); }
+
+ private:
+ template<int LaneID>
+ EIGEN_STRONG_INLINE void madd_helper(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c) const
+ {
+ #if EIGEN_COMP_GNUC_STRICT && !(EIGEN_GNUC_AT_LEAST(9,0))
+ // workaround gcc issue https://gcc.gnu.org/bugzilla/show_bug.cgi?id=89101
+ // vfmaq_laneq_f32 is implemented through a costly dup
+ if(LaneID==0) asm("fmla %0.4s, %1.4s, %2.s[0]\n" : "+w" (c) : "w" (a), "w" (b) : );
+ else if(LaneID==1) asm("fmla %0.4s, %1.4s, %2.s[1]\n" : "+w" (c) : "w" (a), "w" (b) : );
+ else if(LaneID==2) asm("fmla %0.4s, %1.4s, %2.s[2]\n" : "+w" (c) : "w" (a), "w" (b) : );
+ else if(LaneID==3) asm("fmla %0.4s, %1.4s, %2.s[3]\n" : "+w" (c) : "w" (a), "w" (b) : );
+ #else
+ c = vfmaq_laneq_f32(c, a, b, LaneID);
+ #endif
+ }
+};
+
+
+template<>
+struct gebp_traits <double, double, false, false,Architecture::NEON>
+ : gebp_traits<double,double,false,false,Architecture::Generic>
+{
+ typedef double RhsPacket;
+
+ struct RhsPacketx4 {
+ float64x2_t B_0, B_1;
+ };
+
+ EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const
+ {
+ dest = *b;
+ }
+
+ EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacketx4& dest) const
+ {
+ dest.B_0 = vld1q_f64(b);
+ dest.B_1 = vld1q_f64(b+2);
+ }
+
+ EIGEN_STRONG_INLINE void updateRhs(const RhsScalar* b, RhsPacket& dest) const
+ {
+ loadRhs(b,dest);
+ }
+
+ EIGEN_STRONG_INLINE void updateRhs(const RhsScalar* b, RhsPacketx4& dest) const
+ {}
+
+ EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const
+ {
+ loadRhs(b,dest);
+ }
+
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<0>&) const
+ {
+ c = vfmaq_n_f64(c, a, b);
+ }
+
+ // NOTE: Template parameter inference failed when compiled with Android NDK:
+ // "candidate template ignored: could not match 'FixedInt<N>' against 'Eigen::internal::FixedInt<0>".
+
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<0>&) const
+ { madd_helper<0>(a, b, c); }
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<1>&) const
+ { madd_helper<1>(a, b, c); }
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<2>&) const
+ { madd_helper<2>(a, b, c); }
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<3>&) const
+ { madd_helper<3>(a, b, c); }
+
+ private:
+ template <int LaneID>
+ EIGEN_STRONG_INLINE void madd_helper(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c) const
+ {
+ #if EIGEN_COMP_GNUC_STRICT && !(EIGEN_GNUC_AT_LEAST(9,0))
+ // workaround gcc issue https://gcc.gnu.org/bugzilla/show_bug.cgi?id=89101
+ // vfmaq_laneq_f64 is implemented through a costly dup
+ if(LaneID==0) asm("fmla %0.2d, %1.2d, %2.d[0]\n" : "+w" (c) : "w" (a), "w" (b.B_0) : );
+ else if(LaneID==1) asm("fmla %0.2d, %1.2d, %2.d[1]\n" : "+w" (c) : "w" (a), "w" (b.B_0) : );
+ else if(LaneID==2) asm("fmla %0.2d, %1.2d, %2.d[0]\n" : "+w" (c) : "w" (a), "w" (b.B_1) : );
+ else if(LaneID==3) asm("fmla %0.2d, %1.2d, %2.d[1]\n" : "+w" (c) : "w" (a), "w" (b.B_1) : );
+ #else
+ if(LaneID==0) c = vfmaq_laneq_f64(c, a, b.B_0, 0);
+ else if(LaneID==1) c = vfmaq_laneq_f64(c, a, b.B_0, 1);
+ else if(LaneID==2) c = vfmaq_laneq_f64(c, a, b.B_1, 0);
+ else if(LaneID==3) c = vfmaq_laneq_f64(c, a, b.B_1, 1);
+ #endif
+ }
};
-/* optimized GEneral packed Block * packed Panel product kernel
+#endif
+
+/* optimized General packed Block * packed Panel product kernel
*
* Mixing type logic: C += A * B
* | A | B | comments
@@ -862,26 +1202,47 @@ protected:
template<typename LhsScalar, typename RhsScalar, typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
struct gebp_kernel
{
- typedef gebp_traits<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> Traits;
+ typedef gebp_traits<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs,Architecture::Target> Traits;
+ typedef gebp_traits<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs,Architecture::Target,PacketHalf> HalfTraits;
+ typedef gebp_traits<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs,Architecture::Target,PacketQuarter> QuarterTraits;
+
typedef typename Traits::ResScalar ResScalar;
typedef typename Traits::LhsPacket LhsPacket;
typedef typename Traits::RhsPacket RhsPacket;
typedef typename Traits::ResPacket ResPacket;
typedef typename Traits::AccPacket AccPacket;
+ typedef typename Traits::RhsPacketx4 RhsPacketx4;
+
+ typedef typename RhsPanelHelper<RhsPacket, RhsPacketx4, 15>::type RhsPanel15;
+
+ typedef gebp_traits<RhsScalar,LhsScalar,ConjugateRhs,ConjugateLhs,Architecture::Target> SwappedTraits;
- typedef gebp_traits<RhsScalar,LhsScalar,ConjugateRhs,ConjugateLhs> SwappedTraits;
typedef typename SwappedTraits::ResScalar SResScalar;
typedef typename SwappedTraits::LhsPacket SLhsPacket;
typedef typename SwappedTraits::RhsPacket SRhsPacket;
typedef typename SwappedTraits::ResPacket SResPacket;
typedef typename SwappedTraits::AccPacket SAccPacket;
+ typedef typename HalfTraits::LhsPacket LhsPacketHalf;
+ typedef typename HalfTraits::RhsPacket RhsPacketHalf;
+ typedef typename HalfTraits::ResPacket ResPacketHalf;
+ typedef typename HalfTraits::AccPacket AccPacketHalf;
+
+ typedef typename QuarterTraits::LhsPacket LhsPacketQuarter;
+ typedef typename QuarterTraits::RhsPacket RhsPacketQuarter;
+ typedef typename QuarterTraits::ResPacket ResPacketQuarter;
+ typedef typename QuarterTraits::AccPacket AccPacketQuarter;
+
typedef typename DataMapper::LinearMapper LinearMapper;
enum {
Vectorizable = Traits::Vectorizable,
LhsProgress = Traits::LhsProgress,
+ LhsProgressHalf = HalfTraits::LhsProgress,
+ LhsProgressQuarter = QuarterTraits::LhsProgress,
RhsProgress = Traits::RhsProgress,
+ RhsProgressHalf = HalfTraits::RhsProgress,
+ RhsProgressQuarter = QuarterTraits::RhsProgress,
ResPacketSize = Traits::ResPacketSize
};
@@ -892,11 +1253,11 @@ struct gebp_kernel
};
template<typename LhsScalar, typename RhsScalar, typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs,
- int SwappedLhsProgress = gebp_traits<RhsScalar,LhsScalar,ConjugateRhs,ConjugateLhs>::LhsProgress>
+int SwappedLhsProgress = gebp_traits<RhsScalar,LhsScalar,ConjugateRhs,ConjugateLhs,Architecture::Target>::LhsProgress>
struct last_row_process_16_packets
{
- typedef gebp_traits<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> Traits;
- typedef gebp_traits<RhsScalar,LhsScalar,ConjugateRhs,ConjugateLhs> SwappedTraits;
+ typedef gebp_traits<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs,Architecture::Target> Traits;
+ typedef gebp_traits<RhsScalar,LhsScalar,ConjugateRhs,ConjugateLhs,Architecture::Target> SwappedTraits;
typedef typename Traits::ResScalar ResScalar;
typedef typename SwappedTraits::LhsPacket SLhsPacket;
@@ -924,8 +1285,8 @@ struct last_row_process_16_packets
template<typename LhsScalar, typename RhsScalar, typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
struct last_row_process_16_packets<LhsScalar, RhsScalar, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs, 16> {
- typedef gebp_traits<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> Traits;
- typedef gebp_traits<RhsScalar,LhsScalar,ConjugateRhs,ConjugateLhs> SwappedTraits;
+ typedef gebp_traits<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs,Architecture::Target> Traits;
+ typedef gebp_traits<RhsScalar,LhsScalar,ConjugateRhs,ConjugateLhs,Architecture::Target> SwappedTraits;
typedef typename Traits::ResScalar ResScalar;
typedef typename SwappedTraits::LhsPacket SLhsPacket;
@@ -957,7 +1318,7 @@ struct last_row_process_16_packets<LhsScalar, RhsScalar, Index, DataMapper, mr,
SRhsPacketQuarter b0;
straits.loadLhsUnaligned(blB, a0);
straits.loadRhs(blA, b0);
- straits.madd(a0,b0,c0,b0);
+ straits.madd(a0,b0,c0,b0, fix<0>);
blB += SwappedTraits::LhsProgress/4;
blA += 1;
}
@@ -971,6 +1332,219 @@ struct last_row_process_16_packets<LhsScalar, RhsScalar, Index, DataMapper, mr,
}
};
+template<int nr, Index LhsProgress, Index RhsProgress, typename LhsScalar, typename RhsScalar, typename ResScalar, typename AccPacket, typename LhsPacket, typename RhsPacket, typename ResPacket, typename GEBPTraits, typename LinearMapper, typename DataMapper>
+struct lhs_process_one_packet
+{
+ typedef typename GEBPTraits::RhsPacketx4 RhsPacketx4;
+
+ EIGEN_STRONG_INLINE void peeled_kc_onestep(Index K, const LhsScalar* blA, const RhsScalar* blB, GEBPTraits traits, LhsPacket *A0, RhsPacketx4 *rhs_panel, RhsPacket *T0, AccPacket *C0, AccPacket *C1, AccPacket *C2, AccPacket *C3)
+ {
+ EIGEN_ASM_COMMENT("begin step of gebp micro kernel 1X4");
+ EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!");
+ traits.loadLhs(&blA[(0+1*K)*LhsProgress], *A0);
+ traits.loadRhs(&blB[(0+4*K)*RhsProgress], *rhs_panel);
+ traits.madd(*A0, *rhs_panel, *C0, *T0, fix<0>);
+ traits.madd(*A0, *rhs_panel, *C1, *T0, fix<1>);
+ traits.madd(*A0, *rhs_panel, *C2, *T0, fix<2>);
+ traits.madd(*A0, *rhs_panel, *C3, *T0, fix<3>);
+ #if EIGEN_GNUC_AT_LEAST(6,0) && defined(EIGEN_VECTORIZE_SSE)
+ __asm__ ("" : "+x,m" (*A0));
+ #endif
+ EIGEN_ASM_COMMENT("end step of gebp micro kernel 1X4");
+ }
+
+ EIGEN_STRONG_INLINE void operator()(
+ const DataMapper& res, const LhsScalar* blockA, const RhsScalar* blockB, ResScalar alpha,
+ Index peelStart, Index peelEnd, Index strideA, Index strideB, Index offsetA, Index offsetB,
+ int prefetch_res_offset, Index peeled_kc, Index pk, Index cols, Index depth, Index packet_cols4)
+ {
+ GEBPTraits traits;
+
+ // loops on each largest micro horizontal panel of lhs
+ // (LhsProgress x depth)
+ for(Index i=peelStart; i<peelEnd; i+=LhsProgress)
+ {
+ // loops on each largest micro vertical panel of rhs (depth * nr)
+ for(Index j2=0; j2<packet_cols4; j2+=nr)
+ {
+ // We select a LhsProgress x nr micro block of res
+ // which is entirely stored into 1 x nr registers.
+
+ const LhsScalar* blA = &blockA[i*strideA+offsetA*(LhsProgress)];
+ prefetch(&blA[0]);
+
+ // gets res block as register
+ AccPacket C0, C1, C2, C3;
+ traits.initAcc(C0);
+ traits.initAcc(C1);
+ traits.initAcc(C2);
+ traits.initAcc(C3);
+ // To improve instruction pipelining, let's double the accumulation registers:
+ // even k will accumulate in C*, while odd k will accumulate in D*.
+ // This trick is crutial to get good performance with FMA, otherwise it is
+ // actually faster to perform separated MUL+ADD because of a naturally
+ // better instruction-level parallelism.
+ AccPacket D0, D1, D2, D3;
+ traits.initAcc(D0);
+ traits.initAcc(D1);
+ traits.initAcc(D2);
+ traits.initAcc(D3);
+
+ LinearMapper r0 = res.getLinearMapper(i, j2 + 0);
+ LinearMapper r1 = res.getLinearMapper(i, j2 + 1);
+ LinearMapper r2 = res.getLinearMapper(i, j2 + 2);
+ LinearMapper r3 = res.getLinearMapper(i, j2 + 3);
+
+ r0.prefetch(prefetch_res_offset);
+ r1.prefetch(prefetch_res_offset);
+ r2.prefetch(prefetch_res_offset);
+ r3.prefetch(prefetch_res_offset);
+
+ // performs "inner" products
+ const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
+ prefetch(&blB[0]);
+ LhsPacket A0, A1;
+
+ for(Index k=0; k<peeled_kc; k+=pk)
+ {
+ EIGEN_ASM_COMMENT("begin gebp micro kernel 1/half/quarterX4");
+ RhsPacketx4 rhs_panel;
+ RhsPacket T0;
+
+ internal::prefetch(blB+(48+0));
+ peeled_kc_onestep(0, blA, blB, traits, &A0, &rhs_panel, &T0, &C0, &C1, &C2, &C3);
+ peeled_kc_onestep(1, blA, blB, traits, &A1, &rhs_panel, &T0, &D0, &D1, &D2, &D3);
+ peeled_kc_onestep(2, blA, blB, traits, &A0, &rhs_panel, &T0, &C0, &C1, &C2, &C3);
+ peeled_kc_onestep(3, blA, blB, traits, &A1, &rhs_panel, &T0, &D0, &D1, &D2, &D3);
+ internal::prefetch(blB+(48+16));
+ peeled_kc_onestep(4, blA, blB, traits, &A0, &rhs_panel, &T0, &C0, &C1, &C2, &C3);
+ peeled_kc_onestep(5, blA, blB, traits, &A1, &rhs_panel, &T0, &D0, &D1, &D2, &D3);
+ peeled_kc_onestep(6, blA, blB, traits, &A0, &rhs_panel, &T0, &C0, &C1, &C2, &C3);
+ peeled_kc_onestep(7, blA, blB, traits, &A1, &rhs_panel, &T0, &D0, &D1, &D2, &D3);
+
+ blB += pk*4*RhsProgress;
+ blA += pk*LhsProgress;
+
+ EIGEN_ASM_COMMENT("end gebp micro kernel 1/half/quarterX4");
+ }
+ C0 = padd(C0,D0);
+ C1 = padd(C1,D1);
+ C2 = padd(C2,D2);
+ C3 = padd(C3,D3);
+
+ // process remaining peeled loop
+ for(Index k=peeled_kc; k<depth; k++)
+ {
+ RhsPacketx4 rhs_panel;
+ RhsPacket T0;
+ peeled_kc_onestep(0, blA, blB, traits, &A0, &rhs_panel, &T0, &C0, &C1, &C2, &C3);
+ blB += 4*RhsProgress;
+ blA += LhsProgress;
+ }
+
+ ResPacket R0, R1;
+ ResPacket alphav = pset1<ResPacket>(alpha);
+
+ R0 = r0.template loadPacket<ResPacket>(0);
+ R1 = r1.template loadPacket<ResPacket>(0);
+ traits.acc(C0, alphav, R0);
+ traits.acc(C1, alphav, R1);
+ r0.storePacket(0, R0);
+ r1.storePacket(0, R1);
+
+ R0 = r2.template loadPacket<ResPacket>(0);
+ R1 = r3.template loadPacket<ResPacket>(0);
+ traits.acc(C2, alphav, R0);
+ traits.acc(C3, alphav, R1);
+ r2.storePacket(0, R0);
+ r3.storePacket(0, R1);
+ }
+
+ // Deal with remaining columns of the rhs
+ for(Index j2=packet_cols4; j2<cols; j2++)
+ {
+ // One column at a time
+ const LhsScalar* blA = &blockA[i*strideA+offsetA*(LhsProgress)];
+ prefetch(&blA[0]);
+
+ // gets res block as register
+ AccPacket C0;
+ traits.initAcc(C0);
+
+ LinearMapper r0 = res.getLinearMapper(i, j2);
+
+ // performs "inner" products
+ const RhsScalar* blB = &blockB[j2*strideB+offsetB];
+ LhsPacket A0;
+
+ for(Index k= 0; k<peeled_kc; k+=pk)
+ {
+ EIGEN_ASM_COMMENT("begin gebp micro kernel 1/half/quarterX1");
+ RhsPacket B_0;
+
+#define EIGEN_GEBGP_ONESTEP(K) \
+ do { \
+ EIGEN_ASM_COMMENT("begin step of gebp micro kernel 1/half/quarterX1"); \
+ EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
+ /* FIXME: why unaligned???? */ \
+ traits.loadLhsUnaligned(&blA[(0+1*K)*LhsProgress], A0); \
+ traits.loadRhs(&blB[(0+K)*RhsProgress], B_0); \
+ traits.madd(A0, B_0, C0, B_0, fix<0>); \
+ EIGEN_ASM_COMMENT("end step of gebp micro kernel 1/half/quarterX1"); \
+ } while(false);
+
+ EIGEN_GEBGP_ONESTEP(0);
+ EIGEN_GEBGP_ONESTEP(1);
+ EIGEN_GEBGP_ONESTEP(2);
+ EIGEN_GEBGP_ONESTEP(3);
+ EIGEN_GEBGP_ONESTEP(4);
+ EIGEN_GEBGP_ONESTEP(5);
+ EIGEN_GEBGP_ONESTEP(6);
+ EIGEN_GEBGP_ONESTEP(7);
+
+ blB += pk*RhsProgress;
+ blA += pk*LhsProgress;
+
+ EIGEN_ASM_COMMENT("end gebp micro kernel 1/half/quarterX1");
+ }
+
+ // process remaining peeled loop
+ for(Index k=peeled_kc; k<depth; k++)
+ {
+ RhsPacket B_0;
+ EIGEN_GEBGP_ONESTEP(0);
+ blB += RhsProgress;
+ blA += LhsProgress;
+ }
+#undef EIGEN_GEBGP_ONESTEP
+ ResPacket R0;
+ ResPacket alphav = pset1<ResPacket>(alpha);
+ R0 = r0.template loadPacket<ResPacket>(0);
+ traits.acc(C0, alphav, R0);
+ r0.storePacket(0, R0);
+ }
+ }
+ }
+};
+
+template<int nr, Index LhsProgress, Index RhsProgress, typename LhsScalar, typename RhsScalar, typename ResScalar, typename AccPacket, typename LhsPacket, typename RhsPacket, typename ResPacket, typename GEBPTraits, typename LinearMapper, typename DataMapper>
+struct lhs_process_fraction_of_packet : lhs_process_one_packet<nr, LhsProgress, RhsProgress, LhsScalar, RhsScalar, ResScalar, AccPacket, LhsPacket, RhsPacket, ResPacket, GEBPTraits, LinearMapper, DataMapper>
+{
+
+EIGEN_STRONG_INLINE void peeled_kc_onestep(Index K, const LhsScalar* blA, const RhsScalar* blB, GEBPTraits traits, LhsPacket *A0, RhsPacket *B_0, RhsPacket *B1, RhsPacket *B2, RhsPacket *B3, AccPacket *C0, AccPacket *C1, AccPacket *C2, AccPacket *C3)
+ {
+ EIGEN_ASM_COMMENT("begin step of gebp micro kernel 1X4");
+ EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!");
+ traits.loadLhsUnaligned(&blA[(0+1*K)*(LhsProgress)], *A0);
+ traits.broadcastRhs(&blB[(0+4*K)*RhsProgress], *B_0, *B1, *B2, *B3);
+ traits.madd(*A0, *B_0, *C0, *B_0);
+ traits.madd(*A0, *B1, *C1, *B1);
+ traits.madd(*A0, *B2, *C2, *B2);
+ traits.madd(*A0, *B3, *C3, *B3);
+ EIGEN_ASM_COMMENT("end step of gebp micro kernel 1X4");
+ }
+};
+
template<typename LhsScalar, typename RhsScalar, typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
EIGEN_DONT_INLINE
void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,ConjugateRhs>
@@ -987,10 +1561,12 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;
const Index peeled_mc3 = mr>=3*Traits::LhsProgress ? (rows/(3*LhsProgress))*(3*LhsProgress) : 0;
const Index peeled_mc2 = mr>=2*Traits::LhsProgress ? peeled_mc3+((rows-peeled_mc3)/(2*LhsProgress))*(2*LhsProgress) : 0;
- const Index peeled_mc1 = mr>=1*Traits::LhsProgress ? (rows/(1*LhsProgress))*(1*LhsProgress) : 0;
+ const Index peeled_mc1 = mr>=1*Traits::LhsProgress ? peeled_mc2+((rows-peeled_mc2)/(1*LhsProgress))*(1*LhsProgress) : 0;
+ const Index peeled_mc_half = mr>=LhsProgressHalf ? peeled_mc1+((rows-peeled_mc1)/(LhsProgressHalf))*(LhsProgressHalf) : 0;
+ const Index peeled_mc_quarter = mr>=LhsProgressQuarter ? peeled_mc_half+((rows-peeled_mc_half)/(LhsProgressQuarter))*(LhsProgressQuarter) : 0;
enum { pk = 8 }; // NOTE Such a large peeling factor is important for large matrices (~ +5% when >1000 on Haswell)
const Index peeled_kc = depth & ~(pk-1);
- const Index prefetch_res_offset = 32/sizeof(ResScalar);
+ const int prefetch_res_offset = 32/sizeof(ResScalar);
// const Index depth2 = depth & ~1;
//---------- Process 3 * LhsProgress rows at once ----------
@@ -1048,36 +1624,48 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
for(Index k=0; k<peeled_kc; k+=pk)
{
EIGEN_ASM_COMMENT("begin gebp micro kernel 3pX4");
- RhsPacket B_0, T0;
+ // 15 registers are taken (12 for acc, 2 for lhs).
+ RhsPanel15 rhs_panel;
+ RhsPacket T0;
LhsPacket A2;
-
-#define EIGEN_GEBP_ONESTEP(K) \
- do { \
- EIGEN_ASM_COMMENT("begin step of gebp micro kernel 3pX4"); \
+ #if EIGEN_COMP_GNUC_STRICT && EIGEN_ARCH_ARM64 && defined(EIGEN_VECTORIZE_NEON) && !(EIGEN_GNUC_AT_LEAST(9,0))
+ // see http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1633
+ // without this workaround A0, A1, and A2 are loaded in the same register,
+ // which is not good for pipelining
+ #define EIGEN_GEBP_3PX4_REGISTER_ALLOC_WORKAROUND __asm__ ("" : "+w,m" (A0), "+w,m" (A1), "+w,m" (A2));
+ #else
+ #define EIGEN_GEBP_3PX4_REGISTER_ALLOC_WORKAROUND
+ #endif
+#define EIGEN_GEBP_ONESTEP(K) \
+ do { \
+ EIGEN_ASM_COMMENT("begin step of gebp micro kernel 3pX4"); \
EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
- internal::prefetch(blA+(3*K+16)*LhsProgress); \
- if (EIGEN_ARCH_ARM || EIGEN_ARCH_MIPS) { internal::prefetch(blB+(4*K+16)*RhsProgress); } /* Bug 953 */ \
- traits.loadLhs(&blA[(0+3*K)*LhsProgress], A0); \
- traits.loadLhs(&blA[(1+3*K)*LhsProgress], A1); \
- traits.loadLhs(&blA[(2+3*K)*LhsProgress], A2); \
- traits.loadRhs(blB + (0+4*K)*Traits::RhsProgress, B_0); \
- traits.madd(A0, B_0, C0, T0); \
- traits.madd(A1, B_0, C4, T0); \
- traits.madd(A2, B_0, C8, B_0); \
- traits.loadRhs(blB + (1+4*K)*Traits::RhsProgress, B_0); \
- traits.madd(A0, B_0, C1, T0); \
- traits.madd(A1, B_0, C5, T0); \
- traits.madd(A2, B_0, C9, B_0); \
- traits.loadRhs(blB + (2+4*K)*Traits::RhsProgress, B_0); \
- traits.madd(A0, B_0, C2, T0); \
- traits.madd(A1, B_0, C6, T0); \
- traits.madd(A2, B_0, C10, B_0); \
- traits.loadRhs(blB + (3+4*K)*Traits::RhsProgress, B_0); \
- traits.madd(A0, B_0, C3 , T0); \
- traits.madd(A1, B_0, C7, T0); \
- traits.madd(A2, B_0, C11, B_0); \
- EIGEN_ASM_COMMENT("end step of gebp micro kernel 3pX4"); \
- } while(false)
+ internal::prefetch(blA + (3 * K + 16) * LhsProgress); \
+ if (EIGEN_ARCH_ARM || EIGEN_ARCH_MIPS) { \
+ internal::prefetch(blB + (4 * K + 16) * RhsProgress); \
+ } /* Bug 953 */ \
+ traits.loadLhs(&blA[(0 + 3 * K) * LhsProgress], A0); \
+ traits.loadLhs(&blA[(1 + 3 * K) * LhsProgress], A1); \
+ traits.loadLhs(&blA[(2 + 3 * K) * LhsProgress], A2); \
+ EIGEN_GEBP_3PX4_REGISTER_ALLOC_WORKAROUND \
+ traits.loadRhs(blB + (0+4*K) * Traits::RhsProgress, rhs_panel); \
+ traits.madd(A0, rhs_panel, C0, T0, fix<0>); \
+ traits.madd(A1, rhs_panel, C4, T0, fix<0>); \
+ traits.madd(A2, rhs_panel, C8, T0, fix<0>); \
+ traits.updateRhs(blB + (1+4*K) * Traits::RhsProgress, rhs_panel); \
+ traits.madd(A0, rhs_panel, C1, T0, fix<1>); \
+ traits.madd(A1, rhs_panel, C5, T0, fix<1>); \
+ traits.madd(A2, rhs_panel, C9, T0, fix<1>); \
+ traits.updateRhs(blB + (2+4*K) * Traits::RhsProgress, rhs_panel); \
+ traits.madd(A0, rhs_panel, C2, T0, fix<2>); \
+ traits.madd(A1, rhs_panel, C6, T0, fix<2>); \
+ traits.madd(A2, rhs_panel, C10, T0, fix<2>); \
+ traits.updateRhs(blB + (3+4*K) * Traits::RhsProgress, rhs_panel); \
+ traits.madd(A0, rhs_panel, C3, T0, fix<3>); \
+ traits.madd(A1, rhs_panel, C7, T0, fix<3>); \
+ traits.madd(A2, rhs_panel, C11, T0, fix<3>); \
+ EIGEN_ASM_COMMENT("end step of gebp micro kernel 3pX4"); \
+ } while (false)
internal::prefetch(blB);
EIGEN_GEBP_ONESTEP(0);
@@ -1097,7 +1685,8 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
// process remaining peeled loop
for(Index k=peeled_kc; k<depth; k++)
{
- RhsPacket B_0, T0;
+ RhsPanel15 rhs_panel;
+ RhsPacket T0;
LhsPacket A2;
EIGEN_GEBP_ONESTEP(0);
blB += 4*RhsProgress;
@@ -1177,20 +1766,20 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
{
EIGEN_ASM_COMMENT("begin gebp micro kernel 3pX1");
RhsPacket B_0;
-#define EIGEN_GEBGP_ONESTEP(K) \
- do { \
- EIGEN_ASM_COMMENT("begin step of gebp micro kernel 3pX1"); \
+#define EIGEN_GEBGP_ONESTEP(K) \
+ do { \
+ EIGEN_ASM_COMMENT("begin step of gebp micro kernel 3pX1"); \
EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
- traits.loadLhs(&blA[(0+3*K)*LhsProgress], A0); \
- traits.loadLhs(&blA[(1+3*K)*LhsProgress], A1); \
- traits.loadLhs(&blA[(2+3*K)*LhsProgress], A2); \
- traits.loadRhs(&blB[(0+K)*RhsProgress], B_0); \
- traits.madd(A0, B_0, C0, B_0); \
- traits.madd(A1, B_0, C4, B_0); \
- traits.madd(A2, B_0, C8, B_0); \
- EIGEN_ASM_COMMENT("end step of gebp micro kernel 3pX1"); \
- } while(false)
-
+ traits.loadLhs(&blA[(0 + 3 * K) * LhsProgress], A0); \
+ traits.loadLhs(&blA[(1 + 3 * K) * LhsProgress], A1); \
+ traits.loadLhs(&blA[(2 + 3 * K) * LhsProgress], A2); \
+ traits.loadRhs(&blB[(0 + K) * RhsProgress], B_0); \
+ traits.madd(A0, B_0, C0, B_0, fix<0>); \
+ traits.madd(A1, B_0, C4, B_0, fix<0>); \
+ traits.madd(A2, B_0, C8, B_0, fix<0>); \
+ EIGEN_ASM_COMMENT("end step of gebp micro kernel 3pX1"); \
+ } while (false)
+
EIGEN_GEBGP_ONESTEP(0);
EIGEN_GEBGP_ONESTEP(1);
EIGEN_GEBGP_ONESTEP(2);
@@ -1279,26 +1868,34 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
for(Index k=0; k<peeled_kc; k+=pk)
{
EIGEN_ASM_COMMENT("begin gebp micro kernel 2pX4");
- RhsPacket B_0, B1, B2, B3, T0;
+ RhsPacketx4 rhs_panel;
+ RhsPacket T0;
+
+ // NOTE: the begin/end asm comments below work around bug 935!
+ // but they are not enough for gcc>=6 without FMA (bug 1637)
+ #if EIGEN_GNUC_AT_LEAST(6,0) && defined(EIGEN_VECTORIZE_SSE)
+ #define EIGEN_GEBP_2PX4_SPILLING_WORKAROUND __asm__ ("" : [a0] "+x,m" (A0),[a1] "+x,m" (A1));
+ #else
+ #define EIGEN_GEBP_2PX4_SPILLING_WORKAROUND
+ #endif
+#define EIGEN_GEBGP_ONESTEP(K) \
+ do { \
+ EIGEN_ASM_COMMENT("begin step of gebp micro kernel 2pX4"); \
+ traits.loadLhs(&blA[(0 + 2 * K) * LhsProgress], A0); \
+ traits.loadLhs(&blA[(1 + 2 * K) * LhsProgress], A1); \
+ traits.loadRhs(&blB[(0 + 4 * K) * RhsProgress], rhs_panel); \
+ traits.madd(A0, rhs_panel, C0, T0, fix<0>); \
+ traits.madd(A1, rhs_panel, C4, T0, fix<0>); \
+ traits.madd(A0, rhs_panel, C1, T0, fix<1>); \
+ traits.madd(A1, rhs_panel, C5, T0, fix<1>); \
+ traits.madd(A0, rhs_panel, C2, T0, fix<2>); \
+ traits.madd(A1, rhs_panel, C6, T0, fix<2>); \
+ traits.madd(A0, rhs_panel, C3, T0, fix<3>); \
+ traits.madd(A1, rhs_panel, C7, T0, fix<3>); \
+ EIGEN_GEBP_2PX4_SPILLING_WORKAROUND \
+ EIGEN_ASM_COMMENT("end step of gebp micro kernel 2pX4"); \
+ } while (false)
- #define EIGEN_GEBGP_ONESTEP(K) \
- do { \
- EIGEN_ASM_COMMENT("begin step of gebp micro kernel 2pX4"); \
- EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
- traits.loadLhs(&blA[(0+2*K)*LhsProgress], A0); \
- traits.loadLhs(&blA[(1+2*K)*LhsProgress], A1); \
- traits.broadcastRhs(&blB[(0+4*K)*RhsProgress], B_0, B1, B2, B3); \
- traits.madd(A0, B_0, C0, T0); \
- traits.madd(A1, B_0, C4, B_0); \
- traits.madd(A0, B1, C1, T0); \
- traits.madd(A1, B1, C5, B1); \
- traits.madd(A0, B2, C2, T0); \
- traits.madd(A1, B2, C6, B2); \
- traits.madd(A0, B3, C3, T0); \
- traits.madd(A1, B3, C7, B3); \
- EIGEN_ASM_COMMENT("end step of gebp micro kernel 2pX4"); \
- } while(false)
-
internal::prefetch(blB+(48+0));
EIGEN_GEBGP_ONESTEP(0);
EIGEN_GEBGP_ONESTEP(1);
@@ -1318,7 +1915,8 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
// process remaining peeled loop
for(Index k=peeled_kc; k<depth; k++)
{
- RhsPacket B_0, B1, B2, B3, T0;
+ RhsPacketx4 rhs_panel;
+ RhsPacket T0;
EIGEN_GEBGP_ONESTEP(0);
blB += 4*RhsProgress;
blA += 2*Traits::LhsProgress;
@@ -1389,8 +1987,8 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
traits.loadLhs(&blA[(0+2*K)*LhsProgress], A0); \
traits.loadLhs(&blA[(1+2*K)*LhsProgress], A1); \
traits.loadRhs(&blB[(0+K)*RhsProgress], B_0); \
- traits.madd(A0, B_0, C0, B1); \
- traits.madd(A1, B_0, C4, B_0); \
+ traits.madd(A0, B_0, C0, B1, fix<0>); \
+ traits.madd(A1, B_0, C4, B_0, fix<0>); \
EIGEN_ASM_COMMENT("end step of gebp micro kernel 2pX1"); \
} while(false)
@@ -1434,174 +2032,29 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
//---------- Process 1 * LhsProgress rows at once ----------
if(mr>=1*Traits::LhsProgress)
{
- // loops on each largest micro horizontal panel of lhs (1*LhsProgress x depth)
- for(Index i=peeled_mc2; i<peeled_mc1; i+=1*LhsProgress)
- {
- // loops on each largest micro vertical panel of rhs (depth * nr)
- for(Index j2=0; j2<packet_cols4; j2+=nr)
- {
- // We select a 1*Traits::LhsProgress x nr micro block of res which is entirely
- // stored into 1 x nr registers.
-
- const LhsScalar* blA = &blockA[i*strideA+offsetA*(1*Traits::LhsProgress)];
- prefetch(&blA[0]);
-
- // gets res block as register
- AccPacket C0, C1, C2, C3;
- traits.initAcc(C0);
- traits.initAcc(C1);
- traits.initAcc(C2);
- traits.initAcc(C3);
-
- LinearMapper r0 = res.getLinearMapper(i, j2 + 0);
- LinearMapper r1 = res.getLinearMapper(i, j2 + 1);
- LinearMapper r2 = res.getLinearMapper(i, j2 + 2);
- LinearMapper r3 = res.getLinearMapper(i, j2 + 3);
-
- r0.prefetch(prefetch_res_offset);
- r1.prefetch(prefetch_res_offset);
- r2.prefetch(prefetch_res_offset);
- r3.prefetch(prefetch_res_offset);
-
- // performs "inner" products
- const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
- prefetch(&blB[0]);
- LhsPacket A0;
-
- for(Index k=0; k<peeled_kc; k+=pk)
- {
- EIGEN_ASM_COMMENT("begin gebp micro kernel 1pX4");
- RhsPacket B_0, B1, B2, B3;
-
-#define EIGEN_GEBGP_ONESTEP(K) \
- do { \
- EIGEN_ASM_COMMENT("begin step of gebp micro kernel 1pX4"); \
- EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
- traits.loadLhs(&blA[(0+1*K)*LhsProgress], A0); \
- traits.broadcastRhs(&blB[(0+4*K)*RhsProgress], B_0, B1, B2, B3); \
- traits.madd(A0, B_0, C0, B_0); \
- traits.madd(A0, B1, C1, B1); \
- traits.madd(A0, B2, C2, B2); \
- traits.madd(A0, B3, C3, B3); \
- EIGEN_ASM_COMMENT("end step of gebp micro kernel 1pX4"); \
- } while(false)
-
- internal::prefetch(blB+(48+0));
- EIGEN_GEBGP_ONESTEP(0);
- EIGEN_GEBGP_ONESTEP(1);
- EIGEN_GEBGP_ONESTEP(2);
- EIGEN_GEBGP_ONESTEP(3);
- internal::prefetch(blB+(48+16));
- EIGEN_GEBGP_ONESTEP(4);
- EIGEN_GEBGP_ONESTEP(5);
- EIGEN_GEBGP_ONESTEP(6);
- EIGEN_GEBGP_ONESTEP(7);
-
- blB += pk*4*RhsProgress;
- blA += pk*1*LhsProgress;
-
- EIGEN_ASM_COMMENT("end gebp micro kernel 1pX4");
- }
- // process remaining peeled loop
- for(Index k=peeled_kc; k<depth; k++)
- {
- RhsPacket B_0, B1, B2, B3;
- EIGEN_GEBGP_ONESTEP(0);
- blB += 4*RhsProgress;
- blA += 1*LhsProgress;
- }
-#undef EIGEN_GEBGP_ONESTEP
-
- ResPacket R0, R1;
- ResPacket alphav = pset1<ResPacket>(alpha);
-
- R0 = r0.template loadPacket<ResPacket>(0 * Traits::ResPacketSize);
- R1 = r1.template loadPacket<ResPacket>(0 * Traits::ResPacketSize);
- traits.acc(C0, alphav, R0);
- traits.acc(C1, alphav, R1);
- r0.storePacket(0 * Traits::ResPacketSize, R0);
- r1.storePacket(0 * Traits::ResPacketSize, R1);
-
- R0 = r2.template loadPacket<ResPacket>(0 * Traits::ResPacketSize);
- R1 = r3.template loadPacket<ResPacket>(0 * Traits::ResPacketSize);
- traits.acc(C2, alphav, R0);
- traits.acc(C3, alphav, R1);
- r2.storePacket(0 * Traits::ResPacketSize, R0);
- r3.storePacket(0 * Traits::ResPacketSize, R1);
- }
-
- // Deal with remaining columns of the rhs
- for(Index j2=packet_cols4; j2<cols; j2++)
- {
- // One column at a time
- const LhsScalar* blA = &blockA[i*strideA+offsetA*(1*Traits::LhsProgress)];
- prefetch(&blA[0]);
-
- // gets res block as register
- AccPacket C0;
- traits.initAcc(C0);
-
- LinearMapper r0 = res.getLinearMapper(i, j2);
-
- // performs "inner" products
- const RhsScalar* blB = &blockB[j2*strideB+offsetB];
- LhsPacket A0;
-
- for(Index k=0; k<peeled_kc; k+=pk)
- {
- EIGEN_ASM_COMMENT("begin gebp micro kernel 1pX1");
- RhsPacket B_0;
-
-#define EIGEN_GEBGP_ONESTEP(K) \
- do { \
- EIGEN_ASM_COMMENT("begin step of gebp micro kernel 1pX1"); \
- EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
- traits.loadLhs(&blA[(0+1*K)*LhsProgress], A0); \
- traits.loadRhs(&blB[(0+K)*RhsProgress], B_0); \
- traits.madd(A0, B_0, C0, B_0); \
- EIGEN_ASM_COMMENT("end step of gebp micro kernel 1pX1"); \
- } while(false);
-
- EIGEN_GEBGP_ONESTEP(0);
- EIGEN_GEBGP_ONESTEP(1);
- EIGEN_GEBGP_ONESTEP(2);
- EIGEN_GEBGP_ONESTEP(3);
- EIGEN_GEBGP_ONESTEP(4);
- EIGEN_GEBGP_ONESTEP(5);
- EIGEN_GEBGP_ONESTEP(6);
- EIGEN_GEBGP_ONESTEP(7);
-
- blB += pk*RhsProgress;
- blA += pk*1*Traits::LhsProgress;
-
- EIGEN_ASM_COMMENT("end gebp micro kernel 1pX1");
- }
-
- // process remaining peeled loop
- for(Index k=peeled_kc; k<depth; k++)
- {
- RhsPacket B_0;
- EIGEN_GEBGP_ONESTEP(0);
- blB += RhsProgress;
- blA += 1*Traits::LhsProgress;
- }
-#undef EIGEN_GEBGP_ONESTEP
- ResPacket R0;
- ResPacket alphav = pset1<ResPacket>(alpha);
- R0 = r0.template loadPacket<ResPacket>(0 * Traits::ResPacketSize);
- traits.acc(C0, alphav, R0);
- r0.storePacket(0 * Traits::ResPacketSize, R0);
- }
- }
+ lhs_process_one_packet<nr, LhsProgress, RhsProgress, LhsScalar, RhsScalar, ResScalar, AccPacket, LhsPacket, RhsPacket, ResPacket, Traits, LinearMapper, DataMapper> p;
+ p(res, blockA, blockB, alpha, peeled_mc2, peeled_mc1, strideA, strideB, offsetA, offsetB, prefetch_res_offset, peeled_kc, pk, cols, depth, packet_cols4);
+ }
+ //---------- Process LhsProgressHalf rows at once ----------
+ if((LhsProgressHalf < LhsProgress) && mr>=LhsProgressHalf)
+ {
+ lhs_process_fraction_of_packet<nr, LhsProgressHalf, RhsProgressHalf, LhsScalar, RhsScalar, ResScalar, AccPacketHalf, LhsPacketHalf, RhsPacketHalf, ResPacketHalf, HalfTraits, LinearMapper, DataMapper> p;
+ p(res, blockA, blockB, alpha, peeled_mc1, peeled_mc_half, strideA, strideB, offsetA, offsetB, prefetch_res_offset, peeled_kc, pk, cols, depth, packet_cols4);
+ }
+ //---------- Process LhsProgressQuarter rows at once ----------
+ if((LhsProgressQuarter < LhsProgressHalf) && mr>=LhsProgressQuarter)
+ {
+ lhs_process_fraction_of_packet<nr, LhsProgressQuarter, RhsProgressQuarter, LhsScalar, RhsScalar, ResScalar, AccPacketQuarter, LhsPacketQuarter, RhsPacketQuarter, ResPacketQuarter, QuarterTraits, LinearMapper, DataMapper> p;
+ p(res, blockA, blockB, alpha, peeled_mc_half, peeled_mc_quarter, strideA, strideB, offsetA, offsetB, prefetch_res_offset, peeled_kc, pk, cols, depth, packet_cols4);
}
//---------- Process remaining rows, 1 at once ----------
- if(peeled_mc1<rows)
+ if(peeled_mc_quarter<rows)
{
// loop on each panel of the rhs
for(Index j2=0; j2<packet_cols4; j2+=nr)
{
// loop on each row of the lhs (1*LhsProgress x depth)
- for(Index i=peeled_mc1; i<rows; i+=1)
+ for(Index i=peeled_mc_quarter; i<rows; i+=1)
{
const LhsScalar* blA = &blockA[i*strideA+offsetA];
prefetch(&blA[0]);
@@ -1614,7 +2067,7 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
const int SResPacketQuarterSize = unpacket_traits<typename unpacket_traits<typename unpacket_traits<SResPacket>::half>::half>::size;
if ((SwappedTraits::LhsProgress % 4) == 0 &&
(SwappedTraits::LhsProgress<=16) &&
- (SwappedTraits::LhsProgress!=8 || SResPacketHalfSize==nr) &&
+ (SwappedTraits::LhsProgress!=8 || SResPacketHalfSize==nr) &&
(SwappedTraits::LhsProgress!=16 || SResPacketQuarterSize==nr))
{
SAccPacket C0, C1, C2, C3;
@@ -1638,15 +2091,15 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
straits.loadRhsQuad(blA+0*spk, B_0);
straits.loadRhsQuad(blA+1*spk, B_1);
- straits.madd(A0,B_0,C0,B_0);
- straits.madd(A1,B_1,C1,B_1);
+ straits.madd(A0,B_0,C0,B_0, fix<0>);
+ straits.madd(A1,B_1,C1,B_1, fix<0>);
straits.loadLhsUnaligned(blB+2*SwappedTraits::LhsProgress, A0);
straits.loadLhsUnaligned(blB+3*SwappedTraits::LhsProgress, A1);
straits.loadRhsQuad(blA+2*spk, B_0);
straits.loadRhsQuad(blA+3*spk, B_1);
- straits.madd(A0,B_0,C2,B_0);
- straits.madd(A1,B_1,C3,B_1);
+ straits.madd(A0,B_0,C2,B_0, fix<0>);
+ straits.madd(A1,B_1,C3,B_1, fix<0>);
blB += 4*SwappedTraits::LhsProgress;
blA += 4*spk;
@@ -1659,7 +2112,7 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
straits.loadLhsUnaligned(blB, A0);
straits.loadRhsQuad(blA, B_0);
- straits.madd(A0,B_0,C0,B_0);
+ straits.madd(A0,B_0,C0,B_0, fix<0>);
blB += SwappedTraits::LhsProgress;
blA += spk;
@@ -1669,7 +2122,7 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
// Special case where we have to first reduce the accumulation register C0
typedef typename conditional<SwappedTraits::LhsProgress>=8,typename unpacket_traits<SResPacket>::half,SResPacket>::type SResPacketHalf;
typedef typename conditional<SwappedTraits::LhsProgress>=8,typename unpacket_traits<SLhsPacket>::half,SLhsPacket>::type SLhsPacketHalf;
- typedef typename conditional<SwappedTraits::LhsProgress>=8,typename unpacket_traits<SLhsPacket>::half,SRhsPacket>::type SRhsPacketHalf;
+ typedef typename conditional<SwappedTraits::LhsProgress>=8,typename unpacket_traits<SRhsPacket>::half,SRhsPacket>::type SRhsPacketHalf;
typedef typename conditional<SwappedTraits::LhsProgress>=8,typename unpacket_traits<SAccPacket>::half,SAccPacket>::type SAccPacketHalf;
SResPacketHalf R = res.template gatherPacket<SResPacketHalf>(i, j2);
@@ -1683,7 +2136,7 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
straits.loadLhsUnaligned(blB, a0);
straits.loadRhs(blA, b0);
SAccPacketHalf c0 = predux_half_dowto4(C0);
- straits.madd(a0,b0,c0,b0);
+ straits.madd(a0,b0,c0,b0, fix<0>);
straits.acc(c0, alphav, R);
}
else
@@ -1699,7 +2152,7 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
// template form, so that LhsProgress < 16 paths don't
// fail to compile
last_row_process_16_packets<LhsScalar, RhsScalar, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs> p;
- p(res, straits, blA, blB, depth, endk, i, j2,alpha, C0);
+ p(res, straits, blA, blB, depth, endk, i, j2,alpha, C0);
}
else
{
@@ -1744,7 +2197,7 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
for(Index j2=packet_cols4; j2<cols; j2++)
{
// loop on each row of the lhs (1*LhsProgress x depth)
- for(Index i=peeled_mc1; i<rows; i+=1)
+ for(Index i=peeled_mc_quarter; i<rows; i+=1)
{
const LhsScalar* blA = &blockA[i*strideA+offsetA];
prefetch(&blA[0]);
@@ -1791,7 +2244,13 @@ template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pa
EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, Packet, ColMajor, Conjugate, PanelMode>
::operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset)
{
- enum { PacketSize = unpacket_traits<Packet>::size };
+ typedef typename unpacket_traits<Packet>::half HalfPacket;
+ typedef typename unpacket_traits<typename unpacket_traits<Packet>::half>::half QuarterPacket;
+ enum { PacketSize = unpacket_traits<Packet>::size,
+ HalfPacketSize = unpacket_traits<HalfPacket>::size,
+ QuarterPacketSize = unpacket_traits<QuarterPacket>::size,
+ HasHalf = (int)HalfPacketSize < (int)PacketSize,
+ HasQuarter = (int)QuarterPacketSize < (int)HalfPacketSize};
EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK LHS");
EIGEN_UNUSED_VARIABLE(stride);
@@ -1803,9 +2262,12 @@ EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, Pa
const Index peeled_mc3 = Pack1>=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0;
const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*PacketSize) : 0;
- const Index peeled_mc1 = Pack1>=1*PacketSize ? (rows/(1*PacketSize))*(1*PacketSize) : 0;
- const Index peeled_mc0 = Pack2>=1*PacketSize ? peeled_mc1
- : Pack2>1 ? (rows/Pack2)*Pack2 : 0;
+ const Index peeled_mc1 = Pack1>=1*PacketSize ? peeled_mc2+((rows-peeled_mc2)/(1*PacketSize))*(1*PacketSize) : 0;
+ const Index peeled_mc_half = Pack1>=HalfPacketSize ? peeled_mc1+((rows-peeled_mc1)/(HalfPacketSize))*(HalfPacketSize) : 0;
+ const Index peeled_mc_quarter = Pack1>=QuarterPacketSize ? (rows/(QuarterPacketSize))*(QuarterPacketSize) : 0;
+ const Index last_lhs_progress = rows > peeled_mc_quarter ? (rows - peeled_mc_quarter) & ~1 : 0;
+ const Index peeled_mc0 = Pack2>=PacketSize ? peeled_mc_quarter
+ : Pack2>1 && last_lhs_progress ? (rows/last_lhs_progress)*last_lhs_progress : 0;
Index i=0;
@@ -1864,20 +2326,60 @@ EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, Pa
if(PanelMode) count += (1*PacketSize) * (stride-offset-depth);
}
}
- // Pack scalars
+ // Pack half packets
+ if(HasHalf && Pack1>=HalfPacketSize)
+ {
+ for(; i<peeled_mc_half; i+=HalfPacketSize)
+ {
+ if(PanelMode) count += (HalfPacketSize) * offset;
+
+ for(Index k=0; k<depth; k++)
+ {
+ HalfPacket A;
+ A = lhs.template loadPacket<HalfPacket>(i+0*(HalfPacketSize), k);
+ pstoreu(blockA+count, cj.pconj(A));
+ count+=HalfPacketSize;
+ }
+ if(PanelMode) count += (HalfPacketSize) * (stride-offset-depth);
+ }
+ }
+ // Pack quarter packets
+ if(HasQuarter && Pack1>=QuarterPacketSize)
+ {
+ for(; i<peeled_mc_quarter; i+=QuarterPacketSize)
+ {
+ if(PanelMode) count += (QuarterPacketSize) * offset;
+
+ for(Index k=0; k<depth; k++)
+ {
+ QuarterPacket A;
+ A = lhs.template loadPacket<QuarterPacket>(i+0*(QuarterPacketSize), k);
+ pstoreu(blockA+count, cj.pconj(A));
+ count+=QuarterPacketSize;
+ }
+ if(PanelMode) count += (QuarterPacketSize) * (stride-offset-depth);
+ }
+ }
+ // Pack2 may be *smaller* than PacketSize—that happens for
+ // products like real * complex, where we have to go half the
+ // progress on the lhs in order to duplicate those operands to
+ // address both real & imaginary parts on the rhs. This portion will
+ // pack those half ones until they match the number expected on the
+ // last peeling loop at this point (for the rhs).
if(Pack2<PacketSize && Pack2>1)
{
- for(; i<peeled_mc0; i+=Pack2)
+ for(; i<peeled_mc0; i+=last_lhs_progress)
{
- if(PanelMode) count += Pack2 * offset;
+ if(PanelMode) count += last_lhs_progress * offset;
for(Index k=0; k<depth; k++)
- for(Index w=0; w<Pack2; w++)
+ for(Index w=0; w<last_lhs_progress; w++)
blockA[count++] = cj(lhs(i+w, k));
- if(PanelMode) count += Pack2 * (stride-offset-depth);
+ if(PanelMode) count += last_lhs_progress * (stride-offset-depth);
}
}
+ // Pack scalars
for(; i<rows; i++)
{
if(PanelMode) count += offset;
@@ -1898,7 +2400,13 @@ template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pa
EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, Packet, RowMajor, Conjugate, PanelMode>
::operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset)
{
- enum { PacketSize = unpacket_traits<Packet>::size };
+ typedef typename unpacket_traits<Packet>::half HalfPacket;
+ typedef typename unpacket_traits<typename unpacket_traits<Packet>::half>::half QuarterPacket;
+ enum { PacketSize = unpacket_traits<Packet>::size,
+ HalfPacketSize = unpacket_traits<HalfPacket>::size,
+ QuarterPacketSize = unpacket_traits<QuarterPacket>::size,
+ HasHalf = (int)HalfPacketSize < (int)PacketSize,
+ HasQuarter = (int)QuarterPacketSize < (int)HalfPacketSize};
EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK LHS");
EIGEN_UNUSED_VARIABLE(stride);
@@ -1906,37 +2414,51 @@ EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, Pa
eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
Index count = 0;
+ bool gone_half = false, gone_quarter = false, gone_last = false;
-// const Index peeled_mc3 = Pack1>=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0;
-// const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*PacketSize) : 0;
-// const Index peeled_mc1 = Pack1>=1*PacketSize ? (rows/(1*PacketSize))*(1*PacketSize) : 0;
-
- int pack = Pack1;
Index i = 0;
+ int pack = Pack1;
+ int psize = PacketSize;
while(pack>0)
{
Index remaining_rows = rows-i;
- Index peeled_mc = i+(remaining_rows/pack)*pack;
+ Index peeled_mc = gone_last ? Pack2>1 ? (rows/pack)*pack : 0 : i+(remaining_rows/pack)*pack;
+ Index starting_pos = i;
for(; i<peeled_mc; i+=pack)
{
if(PanelMode) count += pack * offset;
- const Index peeled_k = (depth/PacketSize)*PacketSize;
Index k=0;
- if(pack>=PacketSize)
+ if(pack>=psize && psize >= QuarterPacketSize)
{
- for(; k<peeled_k; k+=PacketSize)
+ const Index peeled_k = (depth/psize)*psize;
+ for(; k<peeled_k; k+=psize)
{
- for (Index m = 0; m < pack; m += PacketSize)
+ for (Index m = 0; m < pack; m += psize)
{
- PacketBlock<Packet> kernel;
- for (int p = 0; p < PacketSize; ++p) kernel.packet[p] = lhs.template loadPacket<Packet>(i+p+m, k);
- ptranspose(kernel);
- for (int p = 0; p < PacketSize; ++p) pstore(blockA+count+m+(pack)*p, cj.pconj(kernel.packet[p]));
+ if (psize == PacketSize) {
+ PacketBlock<Packet> kernel;
+ for (int p = 0; p < psize; ++p) kernel.packet[p] = lhs.template loadPacket<Packet>(i+p+m, k);
+ ptranspose(kernel);
+ for (int p = 0; p < psize; ++p) pstore(blockA+count+m+(pack)*p, cj.pconj(kernel.packet[p]));
+ } else if (HasHalf && psize == HalfPacketSize) {
+ gone_half = true;
+ PacketBlock<HalfPacket> kernel_half;
+ for (int p = 0; p < psize; ++p) kernel_half.packet[p] = lhs.template loadPacket<HalfPacket>(i+p+m, k);
+ ptranspose(kernel_half);
+ for (int p = 0; p < psize; ++p) pstore(blockA+count+m+(pack)*p, cj.pconj(kernel_half.packet[p]));
+ } else if (HasQuarter && psize == QuarterPacketSize) {
+ gone_quarter = true;
+ PacketBlock<QuarterPacket> kernel_quarter;
+ for (int p = 0; p < psize; ++p) kernel_quarter.packet[p] = lhs.template loadPacket<QuarterPacket>(i+p+m, k);
+ ptranspose(kernel_quarter);
+ for (int p = 0; p < psize; ++p) pstore(blockA+count+m+(pack)*p, cj.pconj(kernel_quarter.packet[p]));
+ }
}
- count += PacketSize*pack;
+ count += psize*pack;
}
}
+
for(; k<depth; k++)
{
Index w=0;
@@ -1959,9 +2481,28 @@ EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, Pa
if(PanelMode) count += pack * (stride-offset-depth);
}
- pack -= PacketSize;
- if(pack<Pack2 && (pack+PacketSize)!=Pack2)
- pack = Pack2;
+ pack -= psize;
+ Index left = rows - i;
+ if (pack <= 0) {
+ if (!gone_last &&
+ (starting_pos == i || left >= psize/2 || left >= psize/4) &&
+ ((psize/2 == HalfPacketSize && HasHalf && !gone_half) ||
+ (psize/2 == QuarterPacketSize && HasQuarter && !gone_quarter))) {
+ psize /= 2;
+ pack = psize;
+ continue;
+ }
+ // Pack2 may be *smaller* than PacketSize—that happens for
+ // products like real * complex, where we have to go half the
+ // progress on the lhs in order to duplicate those operands to
+ // address both real & imaginary parts on the rhs. This portion will
+ // pack those half ones until they match the number expected on the
+ // last peeling loop at this point (for the rhs).
+ if (Pack2 < PacketSize && !gone_last) {
+ gone_last = true;
+ psize = pack = left & ~1;
+ }
+ }
}
for(; i<rows; i++)
diff --git a/Eigen/src/Core/products/GeneralMatrixMatrix.h b/Eigen/src/Core/products/GeneralMatrixMatrix.h
index f49abcad5..90c9c4647 100644
--- a/Eigen/src/Core/products/GeneralMatrixMatrix.h
+++ b/Eigen/src/Core/products/GeneralMatrixMatrix.h
@@ -469,6 +469,20 @@ struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct>
if(a_lhs.cols()==0 || a_lhs.rows()==0 || a_rhs.cols()==0)
return;
+ // Fallback to GEMV if either the lhs or rhs is a runtime vector
+ if (dst.cols() == 1)
+ {
+ typename Dest::ColXpr dst_vec(dst.col(0));
+ return internal::generic_product_impl<Lhs,typename Rhs::ConstColXpr,DenseShape,DenseShape,GemvProduct>
+ ::scaleAndAddTo(dst_vec, a_lhs, a_rhs.col(0), alpha);
+ }
+ else if (dst.rows() == 1)
+ {
+ typename Dest::RowXpr dst_vec(dst.row(0));
+ return internal::generic_product_impl<typename Lhs::ConstRowXpr,Rhs,DenseShape,DenseShape,GemvProduct>
+ ::scaleAndAddTo(dst_vec, a_lhs.row(0), a_rhs, alpha);
+ }
+
typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs);
typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs);
diff --git a/Eigen/src/Core/products/Parallelizer.h b/Eigen/src/Core/products/Parallelizer.h
index 92e9b0d9f..e01e798f1 100644
--- a/Eigen/src/Core/products/Parallelizer.h
+++ b/Eigen/src/Core/products/Parallelizer.h
@@ -21,7 +21,8 @@ namespace internal {
/** \internal */
inline void manage_multi_threading(Action action, int* v)
{
- static EIGEN_UNUSED int m_maxThreads = -1;
+ static int m_maxThreads = -1;
+ EIGEN_UNUSED_VARIABLE(m_maxThreads);
if(action==SetAction)
{
diff --git a/Eigen/src/Core/products/SelfadjointMatrixMatrix.h b/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
index c84c71609..673073601 100644
--- a/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
+++ b/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
@@ -45,14 +45,23 @@ struct symm_pack_lhs
}
void operator()(Scalar* blockA, const Scalar* _lhs, Index lhsStride, Index cols, Index rows)
{
- enum { PacketSize = packet_traits<Scalar>::size };
+ typedef typename unpacket_traits<typename packet_traits<Scalar>::type>::half HalfPacket;
+ typedef typename unpacket_traits<typename unpacket_traits<typename packet_traits<Scalar>::type>::half>::half QuarterPacket;
+ enum { PacketSize = packet_traits<Scalar>::size,
+ HalfPacketSize = unpacket_traits<HalfPacket>::size,
+ QuarterPacketSize = unpacket_traits<QuarterPacket>::size,
+ HasHalf = (int)HalfPacketSize < (int)PacketSize,
+ HasQuarter = (int)QuarterPacketSize < (int)HalfPacketSize};
+
const_blas_data_mapper<Scalar,Index,StorageOrder> lhs(_lhs,lhsStride);
Index count = 0;
//Index peeled_mc3 = (rows/Pack1)*Pack1;
const Index peeled_mc3 = Pack1>=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0;
const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*PacketSize) : 0;
- const Index peeled_mc1 = Pack1>=1*PacketSize ? (rows/(1*PacketSize))*(1*PacketSize) : 0;
+ const Index peeled_mc1 = Pack1>=1*PacketSize ? peeled_mc2+((rows-peeled_mc2)/(1*PacketSize))*(1*PacketSize) : 0;
+ const Index peeled_mc_half = Pack1>=HalfPacketSize ? peeled_mc1+((rows-peeled_mc1)/(HalfPacketSize))*(HalfPacketSize) : 0;
+ const Index peeled_mc_quarter = Pack1>=QuarterPacketSize ? peeled_mc_half+((rows-peeled_mc_half)/(QuarterPacketSize))*(QuarterPacketSize) : 0;
if(Pack1>=3*PacketSize)
for(Index i=0; i<peeled_mc3; i+=3*PacketSize)
@@ -66,8 +75,16 @@ struct symm_pack_lhs
for(Index i=peeled_mc2; i<peeled_mc1; i+=1*PacketSize)
pack<1*PacketSize>(blockA, lhs, cols, i, count);
+ if(HasHalf && Pack1>=HalfPacketSize)
+ for(Index i=peeled_mc1; i<peeled_mc_half; i+=HalfPacketSize)
+ pack<HalfPacketSize>(blockA, lhs, cols, i, count);
+
+ if(HasQuarter && Pack1>=QuarterPacketSize)
+ for(Index i=peeled_mc_half; i<peeled_mc_quarter; i+=QuarterPacketSize)
+ pack<QuarterPacketSize>(blockA, lhs, cols, i, count);
+
// do the same with mr==1
- for(Index i=peeled_mc1; i<rows; i++)
+ for(Index i=peeled_mc_quarter; i<rows; i++)
{
for(Index k=0; k<i; k++)
blockA[count++] = lhs(i, k); // normal
diff --git a/Eigen/src/Core/util/BlasUtil.h b/Eigen/src/Core/util/BlasUtil.h
index a32630ed7..e6689c656 100755
--- a/Eigen/src/Core/util/BlasUtil.h
+++ b/Eigen/src/Core/util/BlasUtil.h
@@ -274,7 +274,8 @@ template<typename XprType> struct blas_traits
HasUsableDirectAccess = ( (int(XprType::Flags)&DirectAccessBit)
&& ( bool(XprType::IsVectorAtCompileTime)
|| int(inner_stride_at_compile_time<XprType>::ret) == 1)
- ) ? 1 : 0
+ ) ? 1 : 0,
+ HasScalarFactor = false
};
typedef typename conditional<bool(HasUsableDirectAccess),
ExtractType,
@@ -306,6 +307,9 @@ template<typename Scalar, typename NestedXpr, typename Plain>
struct blas_traits<CwiseBinaryOp<scalar_product_op<Scalar>, const CwiseNullaryOp<scalar_constant_op<Scalar>,Plain>, NestedXpr> >
: blas_traits<NestedXpr>
{
+ enum {
+ HasScalarFactor = true
+ };
typedef blas_traits<NestedXpr> Base;
typedef CwiseBinaryOp<scalar_product_op<Scalar>, const CwiseNullaryOp<scalar_constant_op<Scalar>,Plain>, NestedXpr> XprType;
typedef typename Base::ExtractType ExtractType;
@@ -317,6 +321,9 @@ template<typename Scalar, typename NestedXpr, typename Plain>
struct blas_traits<CwiseBinaryOp<scalar_product_op<Scalar>, NestedXpr, const CwiseNullaryOp<scalar_constant_op<Scalar>,Plain> > >
: blas_traits<NestedXpr>
{
+ enum {
+ HasScalarFactor = true
+ };
typedef blas_traits<NestedXpr> Base;
typedef CwiseBinaryOp<scalar_product_op<Scalar>, NestedXpr, const CwiseNullaryOp<scalar_constant_op<Scalar>,Plain> > XprType;
typedef typename Base::ExtractType ExtractType;
@@ -335,6 +342,9 @@ template<typename Scalar, typename NestedXpr>
struct blas_traits<CwiseUnaryOp<scalar_opposite_op<Scalar>, NestedXpr> >
: blas_traits<NestedXpr>
{
+ enum {
+ HasScalarFactor = true
+ };
typedef blas_traits<NestedXpr> Base;
typedef CwiseUnaryOp<scalar_opposite_op<Scalar>, NestedXpr> XprType;
typedef typename Base::ExtractType ExtractType;
diff --git a/Eigen/src/Core/util/ConfigureVectorization.h b/Eigen/src/Core/util/ConfigureVectorization.h
index 263604597..b00d8b038 100644
--- a/Eigen/src/Core/util/ConfigureVectorization.h
+++ b/Eigen/src/Core/util/ConfigureVectorization.h
@@ -10,13 +10,6 @@
#ifndef EIGEN_CONFIGURE_VECTORIZATION_H
#define EIGEN_CONFIGURE_VECTORIZATION_H
-// FIXME: not sure why this is needed, perhaps it is not needed anymore.
-#ifdef __NVCC__
- #ifndef EIGEN_DONT_VECTORIZE
- #define EIGEN_DONT_VECTORIZE
- #endif
-#endif
-
//------------------------------------------------------------------------------------------
// Static and dynamic alignment control
//
@@ -36,10 +29,15 @@
*
* If we made alignment depend on whether or not EIGEN_VECTORIZE is defined, it would be impossible to link
* vectorized and non-vectorized code.
+ *
+ * FIXME: this code can be cleaned up once we switch to proper C++11 only.
*/
#if (defined EIGEN_CUDACC)
#define EIGEN_ALIGN_TO_BOUNDARY(n) __align__(n)
#define EIGEN_ALIGNOF(x) __alignof(x)
+#elif EIGEN_HAS_ALIGNAS
+ #define EIGEN_ALIGN_TO_BOUNDARY(n) alignas(n)
+ #define EIGEN_ALIGNOF(x) alignof(x)
#elif EIGEN_COMP_GNUC || EIGEN_COMP_PGI || EIGEN_COMP_IBM || EIGEN_COMP_ARM
#define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n)))
#define EIGEN_ALIGNOF(x) __alignof(x)
@@ -51,12 +49,18 @@
#define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n)))
#define EIGEN_ALIGNOF(x) __alignof(x)
#else
- #error Please tell me what is the equivalent of __attribute__((aligned(n))) and __alignof(x) for your compiler
+ #error Please tell me what is the equivalent of alignas(n) and alignof(x) for your compiler
#endif
// If the user explicitly disable vectorization, then we also disable alignment
#if defined(EIGEN_DONT_VECTORIZE)
- #define EIGEN_IDEAL_MAX_ALIGN_BYTES 0
+ #if defined(EIGEN_GPUCC)
+ // GPU code is always vectorized and requires memory alignment for
+ // statically allocated buffers.
+ #define EIGEN_IDEAL_MAX_ALIGN_BYTES 16
+ #else
+ #define EIGEN_IDEAL_MAX_ALIGN_BYTES 0
+ #endif
#elif defined(__AVX512F__)
// 64 bytes static alignment is preferred only if really required
#define EIGEN_IDEAL_MAX_ALIGN_BYTES 64
@@ -183,8 +187,6 @@
//----------------------------------------------------------------------
-
-
// if alignment is disabled, then disable vectorization. Note: EIGEN_MAX_ALIGN_BYTES is the proper check, it takes into
// account both the user's will (EIGEN_MAX_ALIGN_BYTES,EIGEN_DONT_ALIGN) and our own platform checks
#if EIGEN_MAX_ALIGN_BYTES==0
@@ -211,7 +213,7 @@
#endif
-#ifndef EIGEN_DONT_VECTORIZE
+#if !(defined(EIGEN_DONT_VECTORIZE) || defined(EIGEN_GPUCC))
#if defined (EIGEN_SSE2_ON_NON_MSVC_BUT_NOT_OLD_GCC) || defined(EIGEN_SSE2_ON_MSVC_2008_OR_LATER)
@@ -253,10 +255,19 @@
#define EIGEN_VECTORIZE_SSE4_1
#define EIGEN_VECTORIZE_SSE4_2
#endif
- #ifdef __FMA__
+ #if defined(__FMA__) || (EIGEN_COMP_MSVC && defined(__AVX2__))
+ // MSVC does not expose a switch dedicated for FMA
+ // For MSVC, AVX2 => FMA
#define EIGEN_VECTORIZE_FMA
#endif
#if defined(__AVX512F__)
+ #ifndef EIGEN_VECTORIZE_FMA
+ #if EIGEN_COMP_GNUC
+ #error Please add -mfma to your compiler flags: compiling with -mavx512f alone without SSE/AVX FMA is not supported (bug 1638).
+ #else
+ #error Please enable FMA in your compiler flags (e.g. -mfma): compiling with AVX512 alone without SSE/AVX FMA is not supported (bug 1638).
+ #endif
+ #endif
#define EIGEN_VECTORIZE_AVX512
#define EIGEN_VECTORIZE_AVX2
#define EIGEN_VECTORIZE_AVX
@@ -375,7 +386,7 @@
#endif
#if defined(EIGEN_HAS_CUDA_FP16)
- #include <host_defines.h>
+ #include <cuda_runtime_api.h>
#include <cuda_fp16.h>
#endif
diff --git a/Eigen/src/Core/util/ForwardDeclarations.h b/Eigen/src/Core/util/ForwardDeclarations.h
index 3ab3a5f50..050d15e96 100644
--- a/Eigen/src/Core/util/ForwardDeclarations.h
+++ b/Eigen/src/Core/util/ForwardDeclarations.h
@@ -47,11 +47,7 @@ template<typename T> struct NumTraits;
template<typename Derived> struct EigenBase;
template<typename Derived> class DenseBase;
template<typename Derived> class PlainObjectBase;
-
-
-template<typename Derived,
- int Level = internal::accessors_level<Derived>::value >
-class DenseCoeffsBase;
+template<typename Derived, int Level> class DenseCoeffsBase;
template<typename _Scalar, int _Rows, int _Cols,
int _Options = AutoAlign |
@@ -260,6 +256,7 @@ template<typename MatrixType> class HouseholderQR;
template<typename MatrixType> class ColPivHouseholderQR;
template<typename MatrixType> class FullPivHouseholderQR;
template<typename MatrixType> class CompleteOrthogonalDecomposition;
+template<typename MatrixType> class SVDBase;
template<typename MatrixType, int QRPreconditioner = ColPivHouseholderQRPreconditioner> class JacobiSVD;
template<typename MatrixType> class BDCSVD;
template<typename MatrixType, int UpLo = Lower> class LLT;
diff --git a/Eigen/src/Core/util/IndexedViewHelper.h b/Eigen/src/Core/util/IndexedViewHelper.h
index 40e16fdb4..1cda85060 100644
--- a/Eigen/src/Core/util/IndexedViewHelper.h
+++ b/Eigen/src/Core/util/IndexedViewHelper.h
@@ -23,7 +23,7 @@ struct symbolic_last_tag {};
* Can be used as a parameter to Eigen::seq and Eigen::seqN functions to symbolically reference the last element/row/columns
* of the underlying vector or matrix once passed to DenseBase::operator()(const RowIndices&, const ColIndices&).
*
- * This symbolic placeholder support standard arithmetic operation.
+ * This symbolic placeholder supports standard arithmetic operations.
*
* A typical usage example would be:
* \code
@@ -44,7 +44,7 @@ static const symbolic::SymbolExpr<internal::symbolic_last_tag> last; // PLEASE u
* reference the last+1 element/row/columns of the underlying vector or matrix once
* passed to DenseBase::operator()(const RowIndices&, const ColIndices&).
*
- * This symbolic placeholder support standard arithmetic operation.
+ * This symbolic placeholder supports standard arithmetic operations.
* It is essentially an alias to last+fix<1>.
*
* \sa last
diff --git a/Eigen/src/Core/util/Macros.h b/Eigen/src/Core/util/Macros.h
index a7c6f50c3..ce3633388 100644
--- a/Eigen/src/Core/util/Macros.h
+++ b/Eigen/src/Core/util/Macros.h
@@ -129,16 +129,21 @@
#define EIGEN_COMP_MSVC_STRICT 0
#endif
-/// \internal EIGEN_COMP_IBM set to 1 if the compiler is IBM XL C++
-#if defined(__IBMCPP__) || defined(__xlc__)
- #define EIGEN_COMP_IBM 1
+/// \internal EIGEN_COMP_IBM set to xlc version if the compiler is IBM XL C++
+// XLC version
+// 3.1 0x0301
+// 4.5 0x0405
+// 5.0 0x0500
+// 12.1 0x0C01
+#if defined(__IBMCPP__) || defined(__xlc__) || defined(__ibmxl__)
+ #define EIGEN_COMP_IBM __xlC__
#else
#define EIGEN_COMP_IBM 0
#endif
-/// \internal EIGEN_COMP_PGI set to 1 if the compiler is Portland Group Compiler
+/// \internal EIGEN_COMP_PGI set to PGI version if the compiler is Portland Group Compiler
#if defined(__PGI)
- #define EIGEN_COMP_PGI 1
+ #define EIGEN_COMP_PGI (__PGIC__*100+__PGIC_MINOR__)
#else
#define EIGEN_COMP_PGI 0
#endif
@@ -347,9 +352,17 @@
#define EIGEN_OS_WIN_STRICT 0
#endif
-/// \internal EIGEN_OS_SUN set to 1 if the OS is SUN
+/// \internal EIGEN_OS_SUN set to __SUNPRO_C if the OS is SUN
+// compiler solaris __SUNPRO_C
+// version studio
+// 5.7 10 0x570
+// 5.8 11 0x580
+// 5.9 12 0x590
+// 5.10 12.1 0x5100
+// 5.11 12.2 0x5110
+// 5.12 12.3 0x5120
#if (defined(sun) || defined(__sun)) && !(defined(__SVR4) || defined(__svr4__))
- #define EIGEN_OS_SUN 1
+ #define EIGEN_OS_SUN __SUNPRO_C
#else
#define EIGEN_OS_SUN 0
#endif
@@ -495,13 +508,33 @@
#define EIGEN_HAS_STATIC_ARRAY_TEMPLATE 0
#endif
-#if EIGEN_MAX_CPP_VER>=11 && (defined(__cplusplus) && (__cplusplus >= 201103L) || EIGEN_COMP_MSVC >= 1900)
+
+// The macro EIGEN_COMP_CXXVER defines the c++ verson expected by the compiler.
+// For instance, if compiling with gcc and -std=c++17, then EIGEN_COMP_CXXVER
+// is defined to 17.
+#if (defined(__cplusplus) && (__cplusplus > 201402L) || EIGEN_COMP_MSVC >= 1914)
+#define EIGEN_COMP_CXXVER 17
+#elif (defined(__cplusplus) && (__cplusplus > 201103L) || EIGEN_COMP_MSVC >= 1910)
+#define EIGEN_COMP_CXXVER 14
+#elif (defined(__cplusplus) && (__cplusplus >= 201103L) || EIGEN_COMP_MSVC >= 1900)
+#define EIGEN_COMP_CXXVER 11
+#else
+#define EIGEN_COMP_CXXVER 03
+#endif
+
+
+// The macros EIGEN_HAS_CXX?? defines a rough estimate of available c++ features
+// but in practice we should not rely on them but rather on the availabilty of
+// individual features as defined later.
+// This is why there is no EIGEN_HAS_CXX17.
+// FIXME: get rid of EIGEN_HAS_CXX14 and maybe even EIGEN_HAS_CXX11.
+#if EIGEN_MAX_CPP_VER>=11 && EIGEN_COMP_CXXVER>=11
#define EIGEN_HAS_CXX11 1
#else
#define EIGEN_HAS_CXX11 0
#endif
-#if EIGEN_MAX_CPP_VER>=14 && (defined(__cplusplus) && (__cplusplus > 201103L) || EIGEN_COMP_MSVC >= 1910)
+#if EIGEN_MAX_CPP_VER>=14 && EIGEN_COMP_CXXVER>=14
#define EIGEN_HAS_CXX14 1
#else
#define EIGEN_HAS_CXX14 0
@@ -546,6 +579,22 @@
#endif
#endif
+#ifndef EIGEN_HAS_ALIGNAS
+#if EIGEN_MAX_CPP_VER>=11 && EIGEN_HAS_CXX11 && \
+ ( __has_feature(cxx_alignas) \
+ || EIGEN_HAS_CXX14 \
+ || (EIGEN_COMP_MSVC >= 1800) \
+ || (EIGEN_GNUC_AT_LEAST(4,8)) \
+ || (EIGEN_COMP_CLANG>=305) \
+ || (EIGEN_COMP_ICC>=1500) \
+ || (EIGEN_COMP_PGI>=1500) \
+ || (EIGEN_COMP_SUNCC>=0x5130))
+#define EIGEN_HAS_ALIGNAS 1
+#else
+#define EIGEN_HAS_ALIGNAS 0
+#endif
+#endif
+
// Does the compiler support type_traits?
// - full support of type traits was added only to GCC 5.1.0.
// - 20150626 corresponds to the last release of 4.x libstdc++
@@ -649,6 +698,23 @@
#endif
#endif
+// NOTE: the required Apple's clang version is very conservative
+// and it could be that XCode 9 works just fine.
+// NOTE: the MSVC version is based on https://en.cppreference.com/w/cpp/compiler_support
+// and not tested.
+#ifndef EIGEN_HAS_CXX17_OVERALIGN
+#if EIGEN_MAX_CPP_VER>=17 && EIGEN_COMP_CXXVER>=17 && ( \
+ (EIGEN_COMP_MSVC >= 1912) \
+ || (EIGEN_GNUC_AT_LEAST(7,0)) \
+ || ((!defined(__apple_build_version__)) && (EIGEN_COMP_CLANG>=500)) \
+ || (( defined(__apple_build_version__)) && (__apple_build_version__>=10000000)) \
+ )
+#define EIGEN_HAS_CXX17_OVERALIGN 1
+#else
+#define EIGEN_HAS_CXX17_OVERALIGN 0
+#endif
+#endif
+
#if defined(EIGEN_CUDACC) && EIGEN_HAS_CONSTEXPR
// While available already with c++11, this is useful mostly starting with c++14 and relaxed constexpr rules
#if defined(__NVCC__)
@@ -742,10 +808,6 @@
// All functions callable from CUDA/HIP code must be qualified with __device__
#ifdef EIGEN_GPUCC
- #ifndef EIGEN_DONT_VECTORIZE
- #define EIGEN_DONT_VECTORIZE
- #endif
-
#define EIGEN_DEVICE_FUNC __host__ __device__
#else
#define EIGEN_DEVICE_FUNC
@@ -841,7 +903,7 @@
// Suppresses 'unused variable' warnings.
namespace Eigen {
namespace internal {
- template<typename T> EIGEN_DEVICE_FUNC void ignore_unused_variable(const T&) {}
+ template<typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ignore_unused_variable(const T&) {}
}
}
#define EIGEN_UNUSED_VARIABLE(var) Eigen::internal::ignore_unused_variable(var);
@@ -1035,7 +1097,7 @@ namespace Eigen {
#endif
#define EIGEN_MAKE_SCALAR_BINARY_OP_ONTHERIGHT(METHOD,OPNAME) \
- template <typename T> EIGEN_DEVICE_FUNC inline \
+ template <typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,typename internal::promote_scalar_arg<Scalar EIGEN_COMMA T EIGEN_COMMA EIGEN_SCALAR_BINARY_SUPPORTED(OPNAME,Scalar,T)>::type,OPNAME))\
(METHOD)(const T& scalar) const { \
typedef typename internal::promote_scalar_arg<Scalar,T,EIGEN_SCALAR_BINARY_SUPPORTED(OPNAME,Scalar,T)>::type PromotedT; \
@@ -1044,7 +1106,7 @@ namespace Eigen {
}
#define EIGEN_MAKE_SCALAR_BINARY_OP_ONTHELEFT(METHOD,OPNAME) \
- template <typename T> EIGEN_DEVICE_FUNC inline friend \
+ template <typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend \
EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(typename internal::promote_scalar_arg<Scalar EIGEN_COMMA T EIGEN_COMMA EIGEN_SCALAR_BINARY_SUPPORTED(OPNAME,T,Scalar)>::type,Derived,OPNAME)) \
(METHOD)(const T& scalar, const StorageBaseType& matrix) { \
typedef typename internal::promote_scalar_arg<Scalar,T,EIGEN_SCALAR_BINARY_SUPPORTED(OPNAME,T,Scalar)>::type PromotedT; \
@@ -1093,9 +1155,9 @@ namespace Eigen {
# define EIGEN_NOEXCEPT
# define EIGEN_NOEXCEPT_IF(x)
# define EIGEN_NO_THROW throw()
-# if EIGEN_COMP_MSVC
+# if EIGEN_COMP_MSVC || EIGEN_COMP_CXXVER>=17
// MSVC does not support exception specifications (warning C4290),
- // and they are deprecated in c++11 anyway.
+ // and they are deprecated in c++11 anyway. This is even an error in c++17.
# define EIGEN_EXCEPTION_SPEC(X) throw()
# else
# define EIGEN_EXCEPTION_SPEC(X) throw(X)
diff --git a/Eigen/src/Core/util/Memory.h b/Eigen/src/Core/util/Memory.h
index 87b538658..1b12544d2 100644
--- a/Eigen/src/Core/util/Memory.h
+++ b/Eigen/src/Core/util/Memory.h
@@ -360,7 +360,7 @@ template<typename T, bool Align> EIGEN_DEVICE_FUNC inline T* conditional_aligned
template<typename T> EIGEN_DEVICE_FUNC inline void aligned_delete(T *ptr, std::size_t size)
{
destruct_elements_of_array<T>(ptr, size);
- aligned_free(ptr);
+ Eigen::internal::aligned_free(ptr);
}
/** \internal Deletes objects constructed with conditional_aligned_new
@@ -768,6 +768,17 @@ template<typename T> void swap(scoped_array<T> &a,scoped_array<T> &b)
*** Implementation of EIGEN_MAKE_ALIGNED_OPERATOR_NEW [_IF] ***
*****************************************************************************/
+#if EIGEN_HAS_CXX17_OVERALIGN
+
+// C++17 -> no need to bother about alignment anymore :)
+
+#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign)
+#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
+#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(Scalar,Size)
+
+#else
+
#if EIGEN_MAX_ALIGN_BYTES!=0
#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
void* operator new(std::size_t size, const std::nothrow_t&) EIGEN_NO_THROW { \
@@ -810,6 +821,8 @@ template<typename T> void swap(scoped_array<T> &a,scoped_array<T> &b)
((EIGEN_MAX_ALIGN_BYTES>=32) && ((sizeof(Scalar)*(Size))%(EIGEN_MAX_ALIGN_BYTES/2)==0)) || \
((EIGEN_MAX_ALIGN_BYTES>=64) && ((sizeof(Scalar)*(Size))%(EIGEN_MAX_ALIGN_BYTES/4)==0)) )))
+#endif
+
/****************************************************************************/
/** \class aligned_allocator
diff --git a/Eigen/src/Core/util/Meta.h b/Eigen/src/Core/util/Meta.h
index 1415b3fc1..8fcb18a94 100755
--- a/Eigen/src/Core/util/Meta.h
+++ b/Eigen/src/Core/util/Meta.h
@@ -636,8 +636,41 @@ template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
bool not_equal_strict(const double& x,const double& y) { return std::not_equal_to<double>()(x,y); }
#endif
+/** \internal extract the bits of the float \a x */
+inline unsigned int as_uint(float x)
+{
+ unsigned int ret;
+ std::memcpy(&ret, &x, sizeof(float));
+ return ret;
+}
+
} // end namespace numext
} // end namespace Eigen
+// Define portable (u)int{32,64} types
+#if EIGEN_HAS_CXX11
+#include <cstdint>
+namespace Eigen {
+namespace numext {
+typedef std::uint32_t uint32_t;
+typedef std::int32_t int32_t;
+typedef std::uint64_t uint64_t;
+typedef std::int64_t int64_t;
+}
+}
+#else
+// Without c++11, all compilers able to compile Eigen also
+// provides the C99 stdint.h header file.
+#include <stdint.h>
+namespace Eigen {
+namespace numext {
+typedef ::uint32_t uint32_t;
+typedef ::int32_t int32_t;
+typedef ::uint64_t uint64_t;
+typedef ::int64_t int64_t;
+}
+}
+#endif
+
#endif // EIGEN_META_H
diff --git a/Eigen/src/Core/util/StaticAssert.h b/Eigen/src/Core/util/StaticAssert.h
index b2f95153e..67714e444 100644
--- a/Eigen/src/Core/util/StaticAssert.h
+++ b/Eigen/src/Core/util/StaticAssert.h
@@ -104,7 +104,8 @@
STORAGE_INDEX_MUST_MATCH=1,
CHOLMOD_SUPPORTS_DOUBLE_PRECISION_ONLY=1,
SELFADJOINTVIEW_ACCEPTS_UPPER_AND_LOWER_MODE_ONLY=1,
- INVALID_TEMPLATE_PARAMETER=1
+ INVALID_TEMPLATE_PARAMETER=1,
+ GPU_TENSOR_CONTRACTION_DOES_NOT_SUPPORT_OUTPUT_KERNELS=1
};
};
diff --git a/Eigen/src/Core/util/XprHelper.h b/Eigen/src/Core/util/XprHelper.h
index 836ff4711..91c2e42e4 100644
--- a/Eigen/src/Core/util/XprHelper.h
+++ b/Eigen/src/Core/util/XprHelper.h
@@ -184,7 +184,8 @@ template<typename T> struct unpacket_traits
enum
{
size = 1,
- alignment = 1
+ alignment = 1,
+ vectorizable = false
};
};
diff --git a/Eigen/src/Eigenvalues/ComplexSchur.h b/Eigen/src/Eigenvalues/ComplexSchur.h
index b8b3490c6..fc71468f8 100644
--- a/Eigen/src/Eigenvalues/ComplexSchur.h
+++ b/Eigen/src/Eigenvalues/ComplexSchur.h
@@ -300,10 +300,13 @@ typename ComplexSchur<MatrixType>::ComplexScalar ComplexSchur<MatrixType>::compu
ComplexScalar trace = t.coeff(0,0) + t.coeff(1,1);
ComplexScalar eival1 = (trace + disc) / RealScalar(2);
ComplexScalar eival2 = (trace - disc) / RealScalar(2);
-
- if(numext::norm1(eival1) > numext::norm1(eival2))
+ RealScalar eival1_norm = numext::norm1(eival1);
+ RealScalar eival2_norm = numext::norm1(eival2);
+ // A division by zero can only occur if eival1==eival2==0.
+ // In this case, det==0, and all we have to do is checking that eival2_norm!=0
+ if(eival1_norm > eival2_norm)
eival2 = det / eival1;
- else
+ else if(eival2_norm!=RealScalar(0))
eival1 = det / eival2;
// choose the eigenvalue closest to the bottom entry of the diagonal
diff --git a/Eigen/src/Eigenvalues/EigenSolver.h b/Eigen/src/Eigenvalues/EigenSolver.h
index 997bebe7b..572b29e4e 100644
--- a/Eigen/src/Eigenvalues/EigenSolver.h
+++ b/Eigen/src/Eigenvalues/EigenSolver.h
@@ -110,7 +110,7 @@ template<typename _MatrixType> class EigenSolver
*
* \sa compute() for an example.
*/
- EigenSolver() : m_eivec(), m_eivalues(), m_isInitialized(false), m_realSchur(), m_matT(), m_tmp() {}
+ EigenSolver() : m_eivec(), m_eivalues(), m_isInitialized(false), m_eigenvectorsOk(false), m_realSchur(), m_matT(), m_tmp() {}
/** \brief Default constructor with memory preallocation
*
diff --git a/Eigen/src/Eigenvalues/RealQZ.h b/Eigen/src/Eigenvalues/RealQZ.h
index e2b37f40e..509130184 100644
--- a/Eigen/src/Eigenvalues/RealQZ.h
+++ b/Eigen/src/Eigenvalues/RealQZ.h
@@ -90,8 +90,9 @@ namespace Eigen {
m_Z(size, size),
m_workspace(size*2),
m_maxIters(400),
- m_isInitialized(false)
- { }
+ m_isInitialized(false),
+ m_computeQZ(true)
+ {}
/** \brief Constructor; computes real QZ decomposition of given matrices
*
@@ -108,9 +109,11 @@ namespace Eigen {
m_Z(A.rows(),A.cols()),
m_workspace(A.rows()*2),
m_maxIters(400),
- m_isInitialized(false) {
- compute(A, B, computeQZ);
- }
+ m_isInitialized(false),
+ m_computeQZ(true)
+ {
+ compute(A, B, computeQZ);
+ }
/** \brief Returns matrix Q in the QZ decomposition.
*
diff --git a/Eigen/src/Eigenvalues/RealSchur.h b/Eigen/src/Eigenvalues/RealSchur.h
index aca8a8279..7304ef344 100644
--- a/Eigen/src/Eigenvalues/RealSchur.h
+++ b/Eigen/src/Eigenvalues/RealSchur.h
@@ -236,7 +236,7 @@ template<typename _MatrixType> class RealSchur
typedef Matrix<Scalar,3,1> Vector3s;
Scalar computeNormOfT();
- Index findSmallSubdiagEntry(Index iu);
+ Index findSmallSubdiagEntry(Index iu, const Scalar& considerAsZero);
void splitOffTwoRows(Index iu, bool computeU, const Scalar& exshift);
void computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo);
void initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector);
@@ -307,12 +307,16 @@ RealSchur<MatrixType>& RealSchur<MatrixType>::computeFromHessenberg(const HessMa
Index totalIter = 0; // iteration count for whole matrix
Scalar exshift(0); // sum of exceptional shifts
Scalar norm = computeNormOfT();
+ // sub-diagonal entries smaller than considerAsZero will be treated as zero.
+ // We use eps^2 to enable more precision in small eigenvalues.
+ Scalar considerAsZero = numext::maxi<Scalar>( norm * numext::abs2(NumTraits<Scalar>::epsilon()),
+ (std::numeric_limits<Scalar>::min)() );
if(norm!=Scalar(0))
{
while (iu >= 0)
{
- Index il = findSmallSubdiagEntry(iu);
+ Index il = findSmallSubdiagEntry(iu,considerAsZero);
// Check for convergence
if (il == iu) // One root found
@@ -369,14 +373,17 @@ inline typename MatrixType::Scalar RealSchur<MatrixType>::computeNormOfT()
/** \internal Look for single small sub-diagonal element and returns its index */
template<typename MatrixType>
-inline Index RealSchur<MatrixType>::findSmallSubdiagEntry(Index iu)
+inline Index RealSchur<MatrixType>::findSmallSubdiagEntry(Index iu, const Scalar& considerAsZero)
{
using std::abs;
Index res = iu;
while (res > 0)
{
Scalar s = abs(m_matT.coeff(res-1,res-1)) + abs(m_matT.coeff(res,res));
- if (abs(m_matT.coeff(res,res-1)) <= NumTraits<Scalar>::epsilon() * s)
+
+ s = numext::maxi<Scalar>(s * NumTraits<Scalar>::epsilon(), considerAsZero);
+
+ if (abs(m_matT.coeff(res,res-1)) <= s)
break;
res--;
}
diff --git a/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h b/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
index f95606206..9bbce652f 100644
--- a/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
+++ b/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
@@ -122,7 +122,8 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
m_eivalues(),
m_subdiag(),
m_info(InvalidInput),
- m_isInitialized(false)
+ m_isInitialized(false),
+ m_eigenvectorsOk(false)
{ }
/** \brief Constructor, pre-allocates memory for dynamic-size matrices.
@@ -142,7 +143,8 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
: m_eivec(size, size),
m_eivalues(size),
m_subdiag(size > 1 ? size - 1 : 1),
- m_isInitialized(false)
+ m_isInitialized(false),
+ m_eigenvectorsOk(false)
{}
/** \brief Constructor; computes eigendecomposition of given matrix.
@@ -166,7 +168,8 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
: m_eivec(matrix.rows(), matrix.cols()),
m_eivalues(matrix.cols()),
m_subdiag(matrix.rows() > 1 ? matrix.rows() - 1 : 1),
- m_isInitialized(false)
+ m_isInitialized(false),
+ m_eigenvectorsOk(false)
{
compute(matrix.derived(), options);
}
diff --git a/Eigen/src/Geometry/OrthoMethods.h b/Eigen/src/Geometry/OrthoMethods.h
index a035e6310..524aebe1b 100644
--- a/Eigen/src/Geometry/OrthoMethods.h
+++ b/Eigen/src/Geometry/OrthoMethods.h
@@ -27,9 +27,10 @@ namespace Eigen {
template<typename Derived>
template<typename OtherDerived>
#ifndef EIGEN_PARSED_BY_DOXYGEN
-EIGEN_DEVICE_FUNC inline typename MatrixBase<Derived>::template cross_product_return_type<OtherDerived>::type
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename MatrixBase<Derived>::template cross_product_return_type<OtherDerived>::type
#else
-inline typename MatrixBase<Derived>::PlainObject
+typename MatrixBase<Derived>::PlainObject
#endif
MatrixBase<Derived>::cross(const MatrixBase<OtherDerived>& other) const
{
diff --git a/Eigen/src/Geometry/Transform.h b/Eigen/src/Geometry/Transform.h
index 75991aaed..3090351a0 100644
--- a/Eigen/src/Geometry/Transform.h
+++ b/Eigen/src/Geometry/Transform.h
@@ -97,6 +97,9 @@ template<int Mode> struct transform_make_affine;
* - #AffineCompact: the transformation is stored as a (Dim)x(Dim+1) matrix.
* - #Projective: the transformation is stored as a (Dim+1)^2 matrix
* without any assumption.
+ * - #Isometry: same as #Affine with the additional assumption that
+ * the linear part represents a rotation. This assumption is exploited
+ * to speed up some functions such as inverse() and rotation().
* \tparam _Options has the same meaning as in class Matrix. It allows to specify DontAlign and/or RowMajor.
* These Options are passed directly to the underlying matrix type.
*
@@ -252,11 +255,11 @@ protected:
public:
/** Default constructor without initialization of the meaningful coefficients.
- * If Mode==Affine, then the last row is set to [0 ... 0 1] */
+ * If Mode==Affine or Mode==Isometry, then the last row is set to [0 ... 0 1] */
EIGEN_DEVICE_FUNC inline Transform()
{
check_template_params();
- internal::transform_make_affine<(int(Mode)==Affine) ? Affine : AffineCompact>::run(m_matrix);
+ internal::transform_make_affine<(int(Mode)==Affine || int(Mode)==Isometry) ? Affine : AffineCompact>::run(m_matrix);
}
EIGEN_DEVICE_FUNC inline Transform(const Transform& other)
@@ -602,7 +605,9 @@ public:
template<typename Derived>
EIGEN_DEVICE_FUNC inline Transform operator*(const RotationBase<Derived,Dim>& r) const;
- EIGEN_DEVICE_FUNC const LinearMatrixType rotation() const;
+ typedef typename internal::conditional<int(Mode)==Isometry,ConstLinearPart,const LinearMatrixType>::type RotationReturnType;
+ EIGEN_DEVICE_FUNC RotationReturnType rotation() const;
+
template<typename RotationMatrixType, typename ScalingMatrixType>
EIGEN_DEVICE_FUNC
void computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const;
@@ -1046,20 +1051,43 @@ EIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Mode,Options> Transform<Scalar,Dim
*** Special functions ***
************************/
+namespace internal {
+template<int Mode> struct transform_rotation_impl {
+ template<typename TransformType>
+ EIGEN_DEVICE_FUNC static inline
+ const typename TransformType::LinearMatrixType run(const TransformType& t)
+ {
+ typedef typename TransformType::LinearMatrixType LinearMatrixType;
+ LinearMatrixType result;
+ t.computeRotationScaling(&result, (LinearMatrixType*)0);
+ return result;
+ }
+};
+template<> struct transform_rotation_impl<Isometry> {
+ template<typename TransformType>
+ EIGEN_DEVICE_FUNC static inline
+ typename TransformType::ConstLinearPart run(const TransformType& t)
+ {
+ return t.linear();
+ }
+};
+}
/** \returns the rotation part of the transformation
*
+ * If Mode==Isometry, then this method is an alias for linear(),
+ * otherwise it calls computeRotationScaling() to extract the rotation
+ * through a SVD decomposition.
*
* \svd_module
*
* \sa computeRotationScaling(), computeScalingRotation(), class SVD
*/
template<typename Scalar, int Dim, int Mode, int Options>
-EIGEN_DEVICE_FUNC const typename Transform<Scalar,Dim,Mode,Options>::LinearMatrixType
+EIGEN_DEVICE_FUNC
+typename Transform<Scalar,Dim,Mode,Options>::RotationReturnType
Transform<Scalar,Dim,Mode,Options>::rotation() const
{
- LinearMatrixType result;
- computeRotationScaling(&result, (LinearMatrixType*)0);
- return result;
+ return internal::transform_rotation_impl<Mode>::run(*this);
}
diff --git a/Eigen/src/Geometry/arch/Geometry_SSE.h b/Eigen/src/Geometry/arch/Geometry_SSE.h
index d4346aa1c..108cc9f8e 100644
--- a/Eigen/src/Geometry/arch/Geometry_SSE.h
+++ b/Eigen/src/Geometry/arch/Geometry_SSE.h
@@ -25,10 +25,12 @@ struct quat_product<Architecture::SSE, Derived, OtherDerived, float>
};
static inline Quaternion<float> run(const QuaternionBase<Derived>& _a, const QuaternionBase<OtherDerived>& _b)
{
+ evaluator<typename Derived::Coefficients> ae(_a.coeffs());
+ evaluator<typename OtherDerived::Coefficients> be(_b.coeffs());
Quaternion<float> res;
const Packet4f mask = _mm_setr_ps(0.f,0.f,0.f,-0.f);
- Packet4f a = _a.coeffs().template packet<AAlignment>(0);
- Packet4f b = _b.coeffs().template packet<BAlignment>(0);
+ Packet4f a = ae.template packet<AAlignment,Packet4f>(0);
+ Packet4f b = be.template packet<BAlignment,Packet4f>(0);
Packet4f s1 = pmul(vec4f_swizzle1(a,1,2,0,2),vec4f_swizzle1(b,2,0,1,2));
Packet4f s2 = pmul(vec4f_swizzle1(a,3,3,3,1),vec4f_swizzle1(b,0,1,2,1));
pstoret<float,Packet4f,ResAlignment>(
@@ -50,9 +52,10 @@ struct quat_conj<Architecture::SSE, Derived, float>
};
static inline Quaternion<float> run(const QuaternionBase<Derived>& q)
{
+ evaluator<typename Derived::Coefficients> qe(q.coeffs());
Quaternion<float> res;
- const __m128 mask = _mm_setr_ps(-0.f,-0.f,-0.f,0.f);
- pstoret<float,Packet4f,ResAlignment>(&res.x(), _mm_xor_ps(mask, q.coeffs().template packet<traits<Derived>::Alignment>(0)));
+ const Packet4f mask = _mm_setr_ps(-0.f,-0.f,-0.f,0.f);
+ pstoret<float,Packet4f,ResAlignment>(&res.x(), pxor(mask, qe.template packet<traits<Derived>::Alignment,Packet4f>(0)));
return res;
}
};
@@ -67,12 +70,14 @@ struct cross3_impl<Architecture::SSE,VectorLhs,VectorRhs,float,true>
static inline typename plain_matrix_type<VectorLhs>::type
run(const VectorLhs& lhs, const VectorRhs& rhs)
{
- __m128 a = lhs.template packet<traits<VectorLhs>::Alignment>(0);
- __m128 b = rhs.template packet<traits<VectorRhs>::Alignment>(0);
- __m128 mul1=_mm_mul_ps(vec4f_swizzle1(a,1,2,0,3),vec4f_swizzle1(b,2,0,1,3));
- __m128 mul2=_mm_mul_ps(vec4f_swizzle1(a,2,0,1,3),vec4f_swizzle1(b,1,2,0,3));
+ evaluator<VectorLhs> lhs_eval(lhs);
+ evaluator<VectorRhs> rhs_eval(rhs);
+ Packet4f a = lhs_eval.template packet<traits<VectorLhs>::Alignment,Packet4f>(0);
+ Packet4f b = rhs_eval.template packet<traits<VectorRhs>::Alignment,Packet4f>(0);
+ Packet4f mul1 = pmul(vec4f_swizzle1(a,1,2,0,3),vec4f_swizzle1(b,2,0,1,3));
+ Packet4f mul2 = pmul(vec4f_swizzle1(a,2,0,1,3),vec4f_swizzle1(b,1,2,0,3));
typename plain_matrix_type<VectorLhs>::type res;
- pstoret<float,Packet4f,ResAlignment>(&res.x(),_mm_sub_ps(mul1,mul2));
+ pstoret<float,Packet4f,ResAlignment>(&res.x(),psub(mul1,mul2));
return res;
}
};
@@ -94,9 +99,12 @@ struct quat_product<Architecture::SSE, Derived, OtherDerived, double>
Quaternion<double> res;
+ evaluator<typename Derived::Coefficients> ae(_a.coeffs());
+ evaluator<typename OtherDerived::Coefficients> be(_b.coeffs());
+
const double* a = _a.coeffs().data();
- Packet2d b_xy = _b.coeffs().template packet<BAlignment>(0);
- Packet2d b_zw = _b.coeffs().template packet<BAlignment>(2);
+ Packet2d b_xy = be.template packet<BAlignment,Packet2d>(0);
+ Packet2d b_zw = be.template packet<BAlignment,Packet2d>(2);
Packet2d a_xx = pset1<Packet2d>(a[0]);
Packet2d a_yy = pset1<Packet2d>(a[1]);
Packet2d a_zz = pset1<Packet2d>(a[2]);
@@ -145,11 +153,12 @@ struct quat_conj<Architecture::SSE, Derived, double>
};
static inline Quaternion<double> run(const QuaternionBase<Derived>& q)
{
+ evaluator<typename Derived::Coefficients> qe(q.coeffs());
Quaternion<double> res;
- const __m128d mask0 = _mm_setr_pd(-0.,-0.);
- const __m128d mask2 = _mm_setr_pd(-0.,0.);
- pstoret<double,Packet2d,ResAlignment>(&res.x(), _mm_xor_pd(mask0, q.coeffs().template packet<traits<Derived>::Alignment>(0)));
- pstoret<double,Packet2d,ResAlignment>(&res.z(), _mm_xor_pd(mask2, q.coeffs().template packet<traits<Derived>::Alignment>(2)));
+ const Packet2d mask0 = _mm_setr_pd(-0.,-0.);
+ const Packet2d mask2 = _mm_setr_pd(-0.,0.);
+ pstoret<double,Packet2d,ResAlignment>(&res.x(), pxor(mask0, qe.template packet<traits<Derived>::Alignment,Packet2d>(0)));
+ pstoret<double,Packet2d,ResAlignment>(&res.z(), pxor(mask2, qe.template packet<traits<Derived>::Alignment,Packet2d>(2)));
return res;
}
};
diff --git a/Eigen/src/Householder/HouseholderSequence.h b/Eigen/src/Householder/HouseholderSequence.h
index e62befcb6..9318c281f 100644
--- a/Eigen/src/Householder/HouseholderSequence.h
+++ b/Eigen/src/Householder/HouseholderSequence.h
@@ -156,6 +156,12 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
Side
> TransposeReturnType;
+ typedef HouseholderSequence<
+ typename internal::add_const<VectorsType>::type,
+ typename internal::add_const<CoeffsType>::type,
+ Side
+ > ConstHouseholderSequence;
+
/** \brief Constructor.
* \param[in] v %Matrix containing the essential parts of the Householder vectors
* \param[in] h Vector containing the Householder coefficients
@@ -244,6 +250,18 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
.setShift(m_shift);
}
+ /** \returns an expression of the complex conjugate of \c *this if Cond==true,
+ * returns \c *this otherwise.
+ */
+ template<bool Cond>
+ EIGEN_DEVICE_FUNC
+ inline typename internal::conditional<Cond,ConjugateReturnType,ConstHouseholderSequence>::type
+ conjugateIf() const
+ {
+ typedef typename internal::conditional<Cond,ConjugateReturnType,ConstHouseholderSequence>::type ReturnType;
+ return ReturnType(m_vectors.template conjugateIf<Cond>(), m_coeffs.template conjugateIf<Cond>());
+ }
+
/** \brief Adjoint (conjugate transpose) of the Householder sequence. */
AdjointReturnType adjoint() const
{
diff --git a/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h b/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h
index e45c272b4..e5d0308ec 100644
--- a/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h
+++ b/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h
@@ -41,13 +41,7 @@ namespace Eigen {
* the info() method, then you can either increase the initial shift, or better use another preconditioning technique.
*
*/
-template <typename Scalar, int _UpLo = Lower, typename _OrderingType =
-#ifndef EIGEN_MPL2_ONLY
-AMDOrdering<int>
-#else
-NaturalOrdering<int>
-#endif
->
+template <typename Scalar, int _UpLo = Lower, typename _OrderingType = AMDOrdering<int> >
class IncompleteCholesky : public SparseSolverBase<IncompleteCholesky<Scalar,_UpLo,_OrderingType> >
{
protected:
@@ -76,12 +70,12 @@ class IncompleteCholesky : public SparseSolverBase<IncompleteCholesky<Scalar,_Up
*
* \sa IncompleteCholesky(const MatrixType&)
*/
- IncompleteCholesky() : m_initialShift(1e-3),m_factorizationIsOk(false) {}
+ IncompleteCholesky() : m_initialShift(1e-3),m_analysisIsOk(false),m_factorizationIsOk(false) {}
/** Constructor computing the incomplete factorization for the given matrix \a matrix.
*/
template<typename MatrixType>
- IncompleteCholesky(const MatrixType& matrix) : m_initialShift(1e-3),m_factorizationIsOk(false)
+ IncompleteCholesky(const MatrixType& matrix) : m_initialShift(1e-3),m_analysisIsOk(false),m_factorizationIsOk(false)
{
compute(matrix);
}
diff --git a/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h b/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
index 43bd8e8f6..09436cb67 100644
--- a/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
+++ b/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
@@ -225,7 +225,6 @@ void IncompleteLUT<Scalar,StorageIndex>::analyzePattern(const _MatrixType& amat)
// Compute the Fill-reducing permutation
// Since ILUT does not perform any numerical pivoting,
// it is highly preferable to keep the diagonal through symmetric permutations.
-#ifndef EIGEN_MPL2_ONLY
// To this end, let's symmetrize the pattern and perform AMD on it.
SparseMatrix<Scalar,ColMajor, StorageIndex> mat1 = amat;
SparseMatrix<Scalar,ColMajor, StorageIndex> mat2 = amat.transpose();
@@ -235,14 +234,6 @@ void IncompleteLUT<Scalar,StorageIndex>::analyzePattern(const _MatrixType& amat)
AMDOrdering<StorageIndex> ordering;
ordering(AtA,m_P);
m_Pinv = m_P.inverse(); // cache the inverse permutation
-#else
- // If AMD is not available, (MPL2-only), then let's use the slower COLAMD routine.
- SparseMatrix<Scalar,ColMajor, StorageIndex> mat1 = amat;
- COLAMDOrdering<StorageIndex> ordering;
- ordering(mat1,m_Pinv);
- m_P = m_Pinv.inverse();
-#endif
-
m_analysisIsOk = true;
m_factorizationIsOk = false;
m_isInitialized = true;
diff --git a/Eigen/src/LU/Determinant.h b/Eigen/src/LU/Determinant.h
index 6af63a6e7..3a41e6fcb 100644
--- a/Eigen/src/LU/Determinant.h
+++ b/Eigen/src/LU/Determinant.h
@@ -23,15 +23,6 @@ inline const typename Derived::Scalar bruteforce_det3_helper
* (matrix.coeff(1,b) * matrix.coeff(2,c) - matrix.coeff(1,c) * matrix.coeff(2,b));
}
-template<typename Derived>
-EIGEN_DEVICE_FUNC
-const typename Derived::Scalar bruteforce_det4_helper
-(const MatrixBase<Derived>& matrix, int j, int k, int m, int n)
-{
- return (matrix.coeff(j,0) * matrix.coeff(k,1) - matrix.coeff(k,0) * matrix.coeff(j,1))
- * (matrix.coeff(m,2) * matrix.coeff(n,3) - matrix.coeff(n,2) * matrix.coeff(m,3));
-}
-
template<typename Derived,
int DeterminantType = Derived::RowsAtCompileTime
> struct determinant_impl
@@ -75,16 +66,34 @@ template<typename Derived> struct determinant_impl<Derived, 3>
template<typename Derived> struct determinant_impl<Derived, 4>
{
+ typedef typename traits<Derived>::Scalar Scalar;
static EIGEN_DEVICE_FUNC
- typename traits<Derived>::Scalar run(const Derived& m)
+ Scalar run(const Derived& m)
+ {
+ Scalar d2_01 = det2(m, 0, 1);
+ Scalar d2_02 = det2(m, 0, 2);
+ Scalar d2_03 = det2(m, 0, 3);
+ Scalar d2_12 = det2(m, 1, 2);
+ Scalar d2_13 = det2(m, 1, 3);
+ Scalar d2_23 = det2(m, 2, 3);
+ Scalar d3_0 = det3(m, 1,d2_23, 2,d2_13, 3,d2_12);
+ Scalar d3_1 = det3(m, 0,d2_23, 2,d2_03, 3,d2_02);
+ Scalar d3_2 = det3(m, 0,d2_13, 1,d2_03, 3,d2_01);
+ Scalar d3_3 = det3(m, 0,d2_12, 1,d2_02, 2,d2_01);
+ return internal::pmadd(-m(0,3),d3_0, m(1,3)*d3_1) +
+ internal::pmadd(-m(2,3),d3_2, m(3,3)*d3_3);
+ }
+protected:
+ static EIGEN_DEVICE_FUNC
+ Scalar det2(const Derived& m, Index i0, Index i1)
+ {
+ return m(i0,0) * m(i1,1) - m(i1,0) * m(i0,1);
+ }
+
+ static EIGEN_DEVICE_FUNC
+ Scalar det3(const Derived& m, Index i0, const Scalar& d0, Index i1, const Scalar& d1, Index i2, const Scalar& d2)
{
- // trick by Martin Costabel to compute 4x4 det with only 30 muls
- return bruteforce_det4_helper(m,0,1,2,3)
- - bruteforce_det4_helper(m,0,2,1,3)
- + bruteforce_det4_helper(m,0,3,1,2)
- + bruteforce_det4_helper(m,1,2,0,3)
- - bruteforce_det4_helper(m,1,3,0,2)
- + bruteforce_det4_helper(m,2,3,0,1);
+ return internal::pmadd(m(i0,2), d0, internal::pmadd(-m(i1,2), d1, m(i2,2)*d2));
}
};
diff --git a/Eigen/src/LU/FullPivLU.h b/Eigen/src/LU/FullPivLU.h
index 344ec8926..ef93ec5eb 100644
--- a/Eigen/src/LU/FullPivLU.h
+++ b/Eigen/src/LU/FullPivLU.h
@@ -18,6 +18,7 @@ template<typename _MatrixType> struct traits<FullPivLU<_MatrixType> >
{
typedef MatrixXpr XprKind;
typedef SolverStorage StorageKind;
+ typedef int StorageIndex;
enum { Flags = 0 };
};
@@ -62,9 +63,9 @@ template<typename _MatrixType> class FullPivLU
public:
typedef _MatrixType MatrixType;
typedef SolverBase<FullPivLU> Base;
+ friend class SolverBase<FullPivLU>;
EIGEN_GENERIC_PUBLIC_INTERFACE(FullPivLU)
- // FIXME StorageIndex defined in EIGEN_GENERIC_PUBLIC_INTERFACE should be int
enum {
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
@@ -218,6 +219,7 @@ template<typename _MatrixType> class FullPivLU
return internal::image_retval<FullPivLU>(*this, originalMatrix);
}
+ #ifdef EIGEN_PARSED_BY_DOXYGEN
/** \return a solution x to the equation Ax=b, where A is the matrix of which
* *this is the LU decomposition.
*
@@ -237,14 +239,10 @@ template<typename _MatrixType> class FullPivLU
*
* \sa TriangularView::solve(), kernel(), inverse()
*/
- // FIXME this is a copy-paste of the base-class member to add the isInitialized assertion.
template<typename Rhs>
inline const Solve<FullPivLU, Rhs>
- solve(const MatrixBase<Rhs>& b) const
- {
- eigen_assert(m_isInitialized && "LU is not initialized.");
- return Solve<FullPivLU, Rhs>(*this, b.derived());
- }
+ solve(const MatrixBase<Rhs>& b) const;
+ #endif
/** \returns an estimate of the reciprocal condition number of the matrix of which \c *this is
the LU decomposition.
@@ -529,8 +527,8 @@ void FullPivLU<MatrixType>::computeInPlace()
m_nonzero_pivots = k;
for(Index i = k; i < size; ++i)
{
- m_rowsTranspositions.coeffRef(i) = i;
- m_colsTranspositions.coeffRef(i) = i;
+ m_rowsTranspositions.coeffRef(i) = internal::convert_index<StorageIndex>(i);
+ m_colsTranspositions.coeffRef(i) = internal::convert_index<StorageIndex>(i);
}
break;
}
@@ -541,8 +539,8 @@ void FullPivLU<MatrixType>::computeInPlace()
// Now that we've found the pivot, we need to apply the row/col swaps to
// bring it to the location (k,k).
- m_rowsTranspositions.coeffRef(k) = row_of_biggest_in_corner;
- m_colsTranspositions.coeffRef(k) = col_of_biggest_in_corner;
+ m_rowsTranspositions.coeffRef(k) = internal::convert_index<StorageIndex>(row_of_biggest_in_corner);
+ m_colsTranspositions.coeffRef(k) = internal::convert_index<StorageIndex>(col_of_biggest_in_corner);
if(k != row_of_biggest_in_corner) {
m_lu.row(k).swap(m_lu.row(row_of_biggest_in_corner));
++number_of_transpositions;
@@ -755,7 +753,6 @@ void FullPivLU<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) const
const Index rows = this->rows(),
cols = this->cols(),
nonzero_pivots = this->rank();
- eigen_assert(rhs.rows() == rows);
const Index smalldim = (std::min)(rows, cols);
if(nonzero_pivots == 0)
@@ -805,7 +802,6 @@ void FullPivLU<_MatrixType>::_solve_impl_transposed(const RhsType &rhs, DstType
const Index rows = this->rows(), cols = this->cols(),
nonzero_pivots = this->rank();
- eigen_assert(rhs.rows() == cols);
const Index smalldim = (std::min)(rows, cols);
if(nonzero_pivots == 0)
@@ -819,29 +815,19 @@ void FullPivLU<_MatrixType>::_solve_impl_transposed(const RhsType &rhs, DstType
// Step 1
c = permutationQ().inverse() * rhs;
- if (Conjugate) {
- // Step 2
- m_lu.topLeftCorner(nonzero_pivots, nonzero_pivots)
- .template triangularView<Upper>()
- .adjoint()
- .solveInPlace(c.topRows(nonzero_pivots));
- // Step 3
- m_lu.topLeftCorner(smalldim, smalldim)
- .template triangularView<UnitLower>()
- .adjoint()
- .solveInPlace(c.topRows(smalldim));
- } else {
- // Step 2
- m_lu.topLeftCorner(nonzero_pivots, nonzero_pivots)
- .template triangularView<Upper>()
- .transpose()
- .solveInPlace(c.topRows(nonzero_pivots));
- // Step 3
- m_lu.topLeftCorner(smalldim, smalldim)
- .template triangularView<UnitLower>()
- .transpose()
- .solveInPlace(c.topRows(smalldim));
- }
+ // Step 2
+ m_lu.topLeftCorner(nonzero_pivots, nonzero_pivots)
+ .template triangularView<Upper>()
+ .transpose()
+ .template conjugateIf<Conjugate>()
+ .solveInPlace(c.topRows(nonzero_pivots));
+
+ // Step 3
+ m_lu.topLeftCorner(smalldim, smalldim)
+ .template triangularView<UnitLower>()
+ .transpose()
+ .template conjugateIf<Conjugate>()
+ .solveInPlace(c.topRows(smalldim));
// Step 4
PermutationPType invp = permutationP().inverse().eval();
diff --git a/Eigen/src/LU/PartialPivLU.h b/Eigen/src/LU/PartialPivLU.h
index bfcd2c95b..b8938013a 100644
--- a/Eigen/src/LU/PartialPivLU.h
+++ b/Eigen/src/LU/PartialPivLU.h
@@ -19,6 +19,7 @@ template<typename _MatrixType> struct traits<PartialPivLU<_MatrixType> >
{
typedef MatrixXpr XprKind;
typedef SolverStorage StorageKind;
+ typedef int StorageIndex;
typedef traits<_MatrixType> BaseTraits;
enum {
Flags = BaseTraits::Flags & RowMajorBit,
@@ -79,8 +80,9 @@ template<typename _MatrixType> class PartialPivLU
typedef _MatrixType MatrixType;
typedef SolverBase<PartialPivLU> Base;
+ friend class SolverBase<PartialPivLU>;
+
EIGEN_GENERIC_PUBLIC_INTERFACE(PartialPivLU)
- // FIXME StorageIndex defined in EIGEN_GENERIC_PUBLIC_INTERFACE should be int
enum {
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
@@ -152,6 +154,7 @@ template<typename _MatrixType> class PartialPivLU
return m_p;
}
+ #ifdef EIGEN_PARSED_BY_DOXYGEN
/** This method returns the solution x to the equation Ax=b, where A is the matrix of which
* *this is the LU decomposition.
*
@@ -169,14 +172,10 @@ template<typename _MatrixType> class PartialPivLU
*
* \sa TriangularView::solve(), inverse(), computeInverse()
*/
- // FIXME this is a copy-paste of the base-class member to add the isInitialized assertion.
template<typename Rhs>
inline const Solve<PartialPivLU, Rhs>
- solve(const MatrixBase<Rhs>& b) const
- {
- eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
- return Solve<PartialPivLU, Rhs>(*this, b.derived());
- }
+ solve(const MatrixBase<Rhs>& b) const;
+ #endif
/** \returns an estimate of the reciprocal condition number of the matrix of which \c *this is
the LU decomposition.
@@ -231,8 +230,6 @@ template<typename _MatrixType> class PartialPivLU
* Step 3: replace c by the solution x to Ux = c.
*/
- eigen_assert(rhs.rows() == m_lu.rows());
-
// Step 1
dst = permutationP() * rhs;
@@ -246,26 +243,21 @@ template<typename _MatrixType> class PartialPivLU
template<bool Conjugate, typename RhsType, typename DstType>
EIGEN_DEVICE_FUNC
void _solve_impl_transposed(const RhsType &rhs, DstType &dst) const {
- /* The decomposition PA = LU can be rewritten as A = P^{-1} L U.
+ /* The decomposition PA = LU can be rewritten as A^T = U^T L^T P.
* So we proceed as follows:
- * Step 1: compute c = Pb.
- * Step 2: replace c by the solution x to Lx = c.
- * Step 3: replace c by the solution x to Ux = c.
+ * Step 1: compute c as the solution to L^T c = b
+ * Step 2: replace c by the solution x to U^T x = c.
+ * Step 3: update c = P^-1 c.
*/
eigen_assert(rhs.rows() == m_lu.cols());
- if (Conjugate) {
- // Step 1
- dst = m_lu.template triangularView<Upper>().adjoint().solve(rhs);
- // Step 2
- m_lu.template triangularView<UnitLower>().adjoint().solveInPlace(dst);
- } else {
- // Step 1
- dst = m_lu.template triangularView<Upper>().transpose().solve(rhs);
- // Step 2
- m_lu.template triangularView<UnitLower>().transpose().solveInPlace(dst);
- }
+ // Step 1
+ dst = m_lu.template triangularView<Upper>().transpose()
+ .template conjugateIf<Conjugate>().solve(rhs);
+ // Step 2
+ m_lu.template triangularView<UnitLower>().transpose()
+ .template conjugateIf<Conjugate>().solveInPlace(dst);
// Step 3
dst = permutationP().transpose() * dst;
}
@@ -339,17 +331,18 @@ PartialPivLU<MatrixType>::PartialPivLU(EigenBase<InputType>& matrix)
namespace internal {
/** \internal This is the blocked version of fullpivlu_unblocked() */
-template<typename Scalar, int StorageOrder, typename PivIndex>
+template<typename Scalar, int StorageOrder, typename PivIndex, int SizeAtCompileTime=Dynamic>
struct partial_lu_impl
{
- // FIXME add a stride to Map, so that the following mapping becomes easier,
- // another option would be to create an expression being able to automatically
- // warp any Map, Matrix, and Block expressions as a unique type, but since that's exactly
- // a Map + stride, why not adding a stride to Map, and convenient ctors from a Matrix,
- // and Block.
- typedef Map<Matrix<Scalar, Dynamic, Dynamic, StorageOrder> > MapLU;
- typedef Block<MapLU, Dynamic, Dynamic> MatrixType;
- typedef Block<MatrixType,Dynamic,Dynamic> BlockType;
+ static const int UnBlockedBound = 16;
+ static const bool UnBlockedAtCompileTime = SizeAtCompileTime!=Dynamic && SizeAtCompileTime<=UnBlockedBound;
+ static const int ActualSizeAtCompileTime = UnBlockedAtCompileTime ? SizeAtCompileTime : Dynamic;
+ // Remaining rows and columns at compile-time:
+ static const int RRows = SizeAtCompileTime==2 ? 1 : Dynamic;
+ static const int RCols = SizeAtCompileTime==2 ? 1 : Dynamic;
+ typedef Matrix<Scalar, ActualSizeAtCompileTime, ActualSizeAtCompileTime, StorageOrder> MatrixType;
+ typedef Ref<MatrixType> MatrixTypeRef;
+ typedef Ref<Matrix<Scalar, Dynamic, Dynamic, StorageOrder> > BlockType;
typedef typename MatrixType::RealScalar RealScalar;
/** \internal performs the LU decomposition in-place of the matrix \a lu
@@ -362,19 +355,22 @@ struct partial_lu_impl
*
* \returns The index of the first pivot which is exactly zero if any, or a negative number otherwise.
*/
- static Index unblocked_lu(MatrixType& lu, PivIndex* row_transpositions, PivIndex& nb_transpositions)
+ static Index unblocked_lu(MatrixTypeRef& lu, PivIndex* row_transpositions, PivIndex& nb_transpositions)
{
typedef scalar_score_coeff_op<Scalar> Scoring;
typedef typename Scoring::result_type Score;
const Index rows = lu.rows();
const Index cols = lu.cols();
const Index size = (std::min)(rows,cols);
+ // For small compile-time matrices it is worth processing the last row separately:
+ // speedup: +100% for 2x2, +10% for others.
+ const Index endk = UnBlockedAtCompileTime ? size-1 : size;
nb_transpositions = 0;
Index first_zero_pivot = -1;
- for(Index k = 0; k < size; ++k)
+ for(Index k = 0; k < endk; ++k)
{
- Index rrows = rows-k-1;
- Index rcols = cols-k-1;
+ int rrows = internal::convert_index<int>(rows-k-1);
+ int rcols = internal::convert_index<int>(cols-k-1);
Index row_of_biggest_in_col;
Score biggest_in_corner
@@ -391,9 +387,7 @@ struct partial_lu_impl
++nb_transpositions;
}
- // FIXME shall we introduce a safe quotient expression in cas 1/lu.coeff(k,k)
- // overflow but not the actual quotient?
- lu.col(k).tail(rrows) /= lu.coeff(k,k);
+ lu.col(k).tail(fix<RRows>(rrows)) /= lu.coeff(k,k);
}
else if(first_zero_pivot==-1)
{
@@ -403,8 +397,18 @@ struct partial_lu_impl
}
if(k<rows-1)
- lu.bottomRightCorner(rrows,rcols).noalias() -= lu.col(k).tail(rrows) * lu.row(k).tail(rcols);
+ lu.bottomRightCorner(fix<RRows>(rrows),fix<RCols>(rcols)).noalias() -= lu.col(k).tail(fix<RRows>(rrows)) * lu.row(k).tail(fix<RCols>(rcols));
}
+
+ // special handling of the last entry
+ if(UnBlockedAtCompileTime)
+ {
+ Index k = endk;
+ row_transpositions[k] = PivIndex(k);
+ if (Scoring()(lu(k, k)) == Score(0) && first_zero_pivot == -1)
+ first_zero_pivot = k;
+ }
+
return first_zero_pivot;
}
@@ -425,13 +429,12 @@ struct partial_lu_impl
*/
static Index blocked_lu(Index rows, Index cols, Scalar* lu_data, Index luStride, PivIndex* row_transpositions, PivIndex& nb_transpositions, Index maxBlockSize=256)
{
- MapLU lu1(lu_data,StorageOrder==RowMajor?rows:luStride,StorageOrder==RowMajor?luStride:cols);
- MatrixType lu(lu1,0,0,rows,cols);
+ MatrixTypeRef lu = MatrixType::Map(lu_data,rows, cols, OuterStride<>(luStride));
const Index size = (std::min)(rows,cols);
// if the matrix is too small, no blocking:
- if(size<=16)
+ if(UnBlockedAtCompileTime || size<=UnBlockedBound)
{
return unblocked_lu(lu, row_transpositions, nb_transpositions);
}
@@ -457,12 +460,12 @@ struct partial_lu_impl
// A00 | A01 | A02
// lu = A_0 | A_1 | A_2 = A10 | A11 | A12
// A20 | A21 | A22
- BlockType A_0(lu,0,0,rows,k);
- BlockType A_2(lu,0,k+bs,rows,tsize);
- BlockType A11(lu,k,k,bs,bs);
- BlockType A12(lu,k,k+bs,bs,tsize);
- BlockType A21(lu,k+bs,k,trows,bs);
- BlockType A22(lu,k+bs,k+bs,trows,tsize);
+ BlockType A_0 = lu.block(0,0,rows,k);
+ BlockType A_2 = lu.block(0,k+bs,rows,tsize);
+ BlockType A11 = lu.block(k,k,bs,bs);
+ BlockType A12 = lu.block(k,k+bs,bs,tsize);
+ BlockType A21 = lu.block(k+bs,k,trows,bs);
+ BlockType A22 = lu.block(k+bs,k+bs,trows,tsize);
PivIndex nb_transpositions_in_panel;
// recursively call the blocked LU algorithm on [A11^T A21^T]^T
@@ -505,7 +508,9 @@ void partial_lu_inplace(MatrixType& lu, TranspositionType& row_transpositions, t
eigen_assert((&row_transpositions.coeffRef(1)-&row_transpositions.coeffRef(0)) == 1);
partial_lu_impl
- <typename MatrixType::Scalar, MatrixType::Flags&RowMajorBit?RowMajor:ColMajor, typename TranspositionType::StorageIndex>
+ < typename MatrixType::Scalar, MatrixType::Flags&RowMajorBit?RowMajor:ColMajor,
+ typename TranspositionType::StorageIndex,
+ EIGEN_SIZE_MIN_PREFER_FIXED(MatrixType::RowsAtCompileTime,MatrixType::ColsAtCompileTime)>
::blocked_lu(lu.rows(), lu.cols(), &lu.coeffRef(0,0), lu.outerStride(), &row_transpositions.coeffRef(0), nb_transpositions);
}
@@ -519,7 +524,10 @@ void PartialPivLU<MatrixType>::compute()
// the row permutation is stored as int indices, so just to be sure:
eigen_assert(m_lu.rows()<NumTraits<int>::highest());
- m_l1_norm = m_lu.cwiseAbs().colwise().sum().maxCoeff();
+ if(m_lu.cols()>0)
+ m_l1_norm = m_lu.cwiseAbs().colwise().sum().maxCoeff();
+ else
+ m_l1_norm = RealScalar(0);
eigen_assert(m_lu.rows() == m_lu.cols() && "PartialPivLU is only for square (and moreover invertible) matrices");
const Index size = m_lu.rows();
diff --git a/Eigen/src/LU/arch/Inverse_SSE.h b/Eigen/src/LU/arch/Inverse_SSE.h
index ebb64a62b..4dce2ef20 100644
--- a/Eigen/src/LU/arch/Inverse_SSE.h
+++ b/Eigen/src/LU/arch/Inverse_SSE.h
@@ -44,7 +44,7 @@ struct compute_inverse_size4<Architecture::SSE, float, MatrixType, ResultType>
static void run(const MatrixType& mat, ResultType& result)
{
ActualMatrixType matrix(mat);
- EIGEN_ALIGN16 const unsigned int _Sign_PNNP[4] = { 0x00000000, 0x80000000, 0x80000000, 0x00000000 };
+ const Packet4f p4f_sign_PNNP = _mm_castsi128_ps(_mm_set_epi32(0x00000000, 0x80000000, 0x80000000, 0x00000000));
// Load the full matrix into registers
__m128 _L1 = matrix.template packet<MatrixAlignment>( 0);
@@ -139,7 +139,7 @@ struct compute_inverse_size4<Architecture::SSE, float, MatrixType, ResultType>
iC = _mm_sub_ps(iC, _mm_mul_ps(_mm_shuffle_ps(A,A,0xB1), _mm_shuffle_ps(DC,DC,0x66)));
rd = _mm_shuffle_ps(rd,rd,0);
- rd = _mm_xor_ps(rd, _mm_load_ps((float*)_Sign_PNNP));
+ rd = _mm_xor_ps(rd, p4f_sign_PNNP);
// iB = C*|B| - D*B#*A
iB = _mm_sub_ps(_mm_mul_ps(C,_mm_shuffle_ps(dB,dB,0)), iB);
diff --git a/Eigen/src/OrderingMethods/Amd.h b/Eigen/src/OrderingMethods/Amd.h
index f91ecb24e..7ca3f33b1 100644
--- a/Eigen/src/OrderingMethods/Amd.h
+++ b/Eigen/src/OrderingMethods/Amd.h
@@ -2,32 +2,22 @@
// for linear algebra.
//
// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
/*
-
NOTE: this routine has been adapted from the CSparse library:
Copyright (c) 2006, Timothy A. Davis.
http://www.suitesparse.com
-CSparse is free software; you can redistribute it and/or
-modify it under the terms of the GNU Lesser General Public
-License as published by the Free Software Foundation; either
-version 2.1 of the License, or (at your option) any later version.
-
-CSparse is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-Lesser General Public License for more details.
-
-You should have received a copy of the GNU Lesser General Public
-License along with this Module; if not, write to the Free Software
-Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
-
+The author of CSparse, Timothy A. Davis., has executed a license with Google LLC
+to permit distribution of this code and derivative works as part of Eigen under
+the Mozilla Public License v. 2.0, as stated at the top of this file.
*/
-#include "../Core/util/NonMPL2.h"
-
#ifndef EIGEN_SPARSE_AMD_H
#define EIGEN_SPARSE_AMD_H
diff --git a/Eigen/src/OrderingMethods/Ordering.h b/Eigen/src/OrderingMethods/Ordering.h
index 34dbef487..8791158be 100644
--- a/Eigen/src/OrderingMethods/Ordering.h
+++ b/Eigen/src/OrderingMethods/Ordering.h
@@ -38,8 +38,6 @@ void ordering_helper_at_plus_a(const MatrixType& A, MatrixType& symmat)
}
-#ifndef EIGEN_MPL2_ONLY
-
/** \ingroup OrderingMethods_Module
* \class AMDOrdering
*
@@ -81,8 +79,6 @@ class AMDOrdering
}
};
-#endif // EIGEN_MPL2_ONLY
-
/** \ingroup OrderingMethods_Module
* \class NaturalOrdering
*
diff --git a/Eigen/src/PardisoSupport/PardisoSupport.h b/Eigen/src/PardisoSupport/PardisoSupport.h
index fb2ba04b4..07006b5c4 100644
--- a/Eigen/src/PardisoSupport/PardisoSupport.h
+++ b/Eigen/src/PardisoSupport/PardisoSupport.h
@@ -123,6 +123,7 @@ class PardisoImpl : public SparseSolverBase<Derived>
};
PardisoImpl()
+ : m_analysisIsOk(false), m_factorizationIsOk(false)
{
eigen_assert((sizeof(StorageIndex) >= sizeof(_INTEGER_t) && sizeof(StorageIndex) <= 8) && "Non-supported index type");
m_iparm.setZero();
diff --git a/Eigen/src/QR/ColPivHouseholderQR.h b/Eigen/src/QR/ColPivHouseholderQR.h
index 1faa3442e..9b677e9bf 100644
--- a/Eigen/src/QR/ColPivHouseholderQR.h
+++ b/Eigen/src/QR/ColPivHouseholderQR.h
@@ -17,6 +17,9 @@ namespace internal {
template<typename _MatrixType> struct traits<ColPivHouseholderQR<_MatrixType> >
: traits<_MatrixType>
{
+ typedef MatrixXpr XprKind;
+ typedef SolverStorage StorageKind;
+ typedef int StorageIndex;
enum { Flags = 0 };
};
@@ -46,20 +49,19 @@ template<typename _MatrixType> struct traits<ColPivHouseholderQR<_MatrixType> >
* \sa MatrixBase::colPivHouseholderQr()
*/
template<typename _MatrixType> class ColPivHouseholderQR
+ : public SolverBase<ColPivHouseholderQR<_MatrixType> >
{
public:
typedef _MatrixType MatrixType;
+ typedef SolverBase<ColPivHouseholderQR> Base;
+ friend class SolverBase<ColPivHouseholderQR>;
+
+ EIGEN_GENERIC_PUBLIC_INTERFACE(ColPivHouseholderQR)
enum {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
- typedef typename MatrixType::Scalar Scalar;
- typedef typename MatrixType::RealScalar RealScalar;
- // FIXME should be int
- typedef typename MatrixType::StorageIndex StorageIndex;
typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationType;
typedef typename internal::plain_row_type<MatrixType, Index>::type IntRowVectorType;
@@ -156,6 +158,7 @@ template<typename _MatrixType> class ColPivHouseholderQR
computeInPlace();
}
+ #ifdef EIGEN_PARSED_BY_DOXYGEN
/** This method finds a solution x to the equation Ax=b, where A is the matrix of which
* *this is the QR decomposition, if any exists.
*
@@ -172,11 +175,8 @@ template<typename _MatrixType> class ColPivHouseholderQR
*/
template<typename Rhs>
inline const Solve<ColPivHouseholderQR, Rhs>
- solve(const MatrixBase<Rhs>& b) const
- {
- eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
- return Solve<ColPivHouseholderQR, Rhs>(*this, b.derived());
- }
+ solve(const MatrixBase<Rhs>& b) const;
+ #endif
HouseholderSequenceType householderQ() const;
HouseholderSequenceType matrixQ() const
@@ -417,6 +417,9 @@ template<typename _MatrixType> class ColPivHouseholderQR
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename RhsType, typename DstType>
void _solve_impl(const RhsType &rhs, DstType &dst) const;
+
+ template<bool Conjugate, typename RhsType, typename DstType>
+ void _solve_impl_transposed(const RhsType &rhs, DstType &dst) const;
#endif
protected:
@@ -583,8 +586,6 @@ template<typename _MatrixType>
template<typename RhsType, typename DstType>
void ColPivHouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) const
{
- eigen_assert(rhs.rows() == rows());
-
const Index nonzero_pivots = nonzeroPivots();
if(nonzero_pivots == 0)
@@ -604,6 +605,31 @@ void ColPivHouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &
for(Index i = 0; i < nonzero_pivots; ++i) dst.row(m_colsPermutation.indices().coeff(i)) = c.row(i);
for(Index i = nonzero_pivots; i < cols(); ++i) dst.row(m_colsPermutation.indices().coeff(i)).setZero();
}
+
+template<typename _MatrixType>
+template<bool Conjugate, typename RhsType, typename DstType>
+void ColPivHouseholderQR<_MatrixType>::_solve_impl_transposed(const RhsType &rhs, DstType &dst) const
+{
+ const Index nonzero_pivots = nonzeroPivots();
+
+ if(nonzero_pivots == 0)
+ {
+ dst.setZero();
+ return;
+ }
+
+ typename RhsType::PlainObject c(m_colsPermutation.transpose()*rhs);
+
+ m_qr.topLeftCorner(nonzero_pivots, nonzero_pivots)
+ .template triangularView<Upper>()
+ .transpose().template conjugateIf<Conjugate>()
+ .solveInPlace(c.topRows(nonzero_pivots));
+
+ dst.topRows(nonzero_pivots) = c.topRows(nonzero_pivots);
+ dst.bottomRows(rows()-nonzero_pivots).setZero();
+
+ dst.applyOnTheLeft(householderQ().setLength(nonzero_pivots).template conjugateIf<!Conjugate>() );
+}
#endif
namespace internal {
diff --git a/Eigen/src/QR/CompleteOrthogonalDecomposition.h b/Eigen/src/QR/CompleteOrthogonalDecomposition.h
index 03017a375..2fc3c871a 100644
--- a/Eigen/src/QR/CompleteOrthogonalDecomposition.h
+++ b/Eigen/src/QR/CompleteOrthogonalDecomposition.h
@@ -16,6 +16,9 @@ namespace internal {
template <typename _MatrixType>
struct traits<CompleteOrthogonalDecomposition<_MatrixType> >
: traits<_MatrixType> {
+ typedef MatrixXpr XprKind;
+ typedef SolverStorage StorageKind;
+ typedef int StorageIndex;
enum { Flags = 0 };
};
@@ -44,19 +47,21 @@ struct traits<CompleteOrthogonalDecomposition<_MatrixType> >
*
* \sa MatrixBase::completeOrthogonalDecomposition()
*/
-template <typename _MatrixType>
-class CompleteOrthogonalDecomposition {
+template <typename _MatrixType> class CompleteOrthogonalDecomposition
+ : public SolverBase<CompleteOrthogonalDecomposition<_MatrixType> >
+{
public:
typedef _MatrixType MatrixType;
+ typedef SolverBase<CompleteOrthogonalDecomposition> Base;
+
+ template<typename Derived>
+ friend struct internal::solve_assertion;
+
+ EIGEN_GENERIC_PUBLIC_INTERFACE(CompleteOrthogonalDecomposition)
enum {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
- typedef typename MatrixType::Scalar Scalar;
- typedef typename MatrixType::RealScalar RealScalar;
- typedef typename MatrixType::StorageIndex StorageIndex;
typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime>
PermutationType;
@@ -131,9 +136,9 @@ class CompleteOrthogonalDecomposition {
m_temp(matrix.cols())
{
computeInPlace();
- }
-
+ }
+ #ifdef EIGEN_PARSED_BY_DOXYGEN
/** This method computes the minimum-norm solution X to a least squares
* problem \f[\mathrm{minimize} \|A X - B\|, \f] where \b A is the matrix of
* which \c *this is the complete orthogonal decomposition.
@@ -145,11 +150,8 @@ class CompleteOrthogonalDecomposition {
*/
template <typename Rhs>
inline const Solve<CompleteOrthogonalDecomposition, Rhs> solve(
- const MatrixBase<Rhs>& b) const {
- eigen_assert(m_cpqr.m_isInitialized &&
- "CompleteOrthogonalDecomposition is not initialized.");
- return Solve<CompleteOrthogonalDecomposition, Rhs>(*this, b.derived());
- }
+ const MatrixBase<Rhs>& b) const;
+ #endif
HouseholderSequenceType householderQ(void) const;
HouseholderSequenceType matrixQ(void) const { return m_cpqr.householderQ(); }
@@ -158,8 +160,8 @@ class CompleteOrthogonalDecomposition {
*/
MatrixType matrixZ() const {
MatrixType Z = MatrixType::Identity(m_cpqr.cols(), m_cpqr.cols());
- applyZAdjointOnTheLeftInPlace(Z);
- return Z.adjoint();
+ applyZOnTheLeftInPlace<false>(Z);
+ return Z;
}
/** \returns a reference to the matrix where the complete orthogonal
@@ -275,6 +277,7 @@ class CompleteOrthogonalDecomposition {
*/
inline const Inverse<CompleteOrthogonalDecomposition> pseudoInverse() const
{
+ eigen_assert(m_cpqr.m_isInitialized && "CompleteOrthogonalDecomposition is not initialized.");
return Inverse<CompleteOrthogonalDecomposition>(*this);
}
@@ -368,6 +371,9 @@ class CompleteOrthogonalDecomposition {
#ifndef EIGEN_PARSED_BY_DOXYGEN
template <typename RhsType, typename DstType>
void _solve_impl(const RhsType& rhs, DstType& dst) const;
+
+ template<bool Conjugate, typename RhsType, typename DstType>
+ void _solve_impl_transposed(const RhsType &rhs, DstType &dst) const;
#endif
protected:
@@ -375,8 +381,22 @@ class CompleteOrthogonalDecomposition {
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
}
+ template<bool Transpose_, typename Rhs>
+ void _check_solve_assertion(const Rhs& b) const {
+ EIGEN_ONLY_USED_FOR_DEBUG(b);
+ eigen_assert(m_cpqr.m_isInitialized && "CompleteOrthogonalDecomposition is not initialized.");
+ eigen_assert((Transpose_?derived().cols():derived().rows())==b.rows() && "CompleteOrthogonalDecomposition::solve(): invalid number of rows of the right hand side matrix b");
+ }
+
void computeInPlace();
+ /** Overwrites \b rhs with \f$ \mathbf{Z} * \mathbf{rhs} \f$ or
+ * \f$ \mathbf{\overline Z} * \mathbf{rhs} \f$ if \c Conjugate
+ * is set to \c true.
+ */
+ template <bool Conjugate, typename Rhs>
+ void applyZOnTheLeftInPlace(Rhs& rhs) const;
+
/** Overwrites \b rhs with \f$ \mathbf{Z}^* * \mathbf{rhs} \f$.
*/
template <typename Rhs>
@@ -465,13 +485,35 @@ void CompleteOrthogonalDecomposition<MatrixType>::computeInPlace()
}
template <typename MatrixType>
+template <bool Conjugate, typename Rhs>
+void CompleteOrthogonalDecomposition<MatrixType>::applyZOnTheLeftInPlace(
+ Rhs& rhs) const {
+ const Index cols = this->cols();
+ const Index nrhs = rhs.cols();
+ const Index rank = this->rank();
+ Matrix<typename Rhs::Scalar, Dynamic, 1> temp((std::max)(cols, nrhs));
+ for (Index k = rank-1; k >= 0; --k) {
+ if (k != rank - 1) {
+ rhs.row(k).swap(rhs.row(rank - 1));
+ }
+ rhs.middleRows(rank - 1, cols - rank + 1)
+ .applyHouseholderOnTheLeft(
+ matrixQTZ().row(k).tail(cols - rank).transpose().template conjugateIf<!Conjugate>(), zCoeffs().template conjugateIf<Conjugate>()(k),
+ &temp(0));
+ if (k != rank - 1) {
+ rhs.row(k).swap(rhs.row(rank - 1));
+ }
+ }
+}
+
+template <typename MatrixType>
template <typename Rhs>
void CompleteOrthogonalDecomposition<MatrixType>::applyZAdjointOnTheLeftInPlace(
Rhs& rhs) const {
const Index cols = this->cols();
const Index nrhs = rhs.cols();
const Index rank = this->rank();
- Matrix<typename MatrixType::Scalar, Dynamic, 1> temp((std::max)(cols, nrhs));
+ Matrix<typename Rhs::Scalar, Dynamic, 1> temp((std::max)(cols, nrhs));
for (Index k = 0; k < rank; ++k) {
if (k != rank - 1) {
rhs.row(k).swap(rhs.row(rank - 1));
@@ -491,8 +533,6 @@ template <typename _MatrixType>
template <typename RhsType, typename DstType>
void CompleteOrthogonalDecomposition<_MatrixType>::_solve_impl(
const RhsType& rhs, DstType& dst) const {
- eigen_assert(rhs.rows() == this->rows());
-
const Index rank = this->rank();
if (rank == 0) {
dst.setZero();
@@ -520,6 +560,34 @@ void CompleteOrthogonalDecomposition<_MatrixType>::_solve_impl(
// Undo permutation to get x = P^{-1} * y.
dst = colsPermutation() * dst;
}
+
+template<typename _MatrixType>
+template<bool Conjugate, typename RhsType, typename DstType>
+void CompleteOrthogonalDecomposition<_MatrixType>::_solve_impl_transposed(const RhsType &rhs, DstType &dst) const
+{
+ const Index rank = this->rank();
+
+ if (rank == 0) {
+ dst.setZero();
+ return;
+ }
+
+ typename RhsType::PlainObject c(colsPermutation().transpose()*rhs);
+
+ if (rank < cols()) {
+ applyZOnTheLeftInPlace<!Conjugate>(c);
+ }
+
+ matrixT().topLeftCorner(rank, rank)
+ .template triangularView<Upper>()
+ .transpose().template conjugateIf<Conjugate>()
+ .solveInPlace(c.topRows(rank));
+
+ dst.topRows(rank) = c.topRows(rank);
+ dst.bottomRows(rows()-rank).setZero();
+
+ dst.applyOnTheLeft(householderQ().setLength(rank).template conjugateIf<!Conjugate>() );
+}
#endif
namespace internal {
diff --git a/Eigen/src/QR/FullPivHouseholderQR.h b/Eigen/src/QR/FullPivHouseholderQR.h
index c31e47cc4..d0664a1d8 100644
--- a/Eigen/src/QR/FullPivHouseholderQR.h
+++ b/Eigen/src/QR/FullPivHouseholderQR.h
@@ -18,6 +18,9 @@ namespace internal {
template<typename _MatrixType> struct traits<FullPivHouseholderQR<_MatrixType> >
: traits<_MatrixType>
{
+ typedef MatrixXpr XprKind;
+ typedef SolverStorage StorageKind;
+ typedef int StorageIndex;
enum { Flags = 0 };
};
@@ -55,20 +58,19 @@ struct traits<FullPivHouseholderQRMatrixQReturnType<MatrixType> >
* \sa MatrixBase::fullPivHouseholderQr()
*/
template<typename _MatrixType> class FullPivHouseholderQR
+ : public SolverBase<FullPivHouseholderQR<_MatrixType> >
{
public:
typedef _MatrixType MatrixType;
+ typedef SolverBase<FullPivHouseholderQR> Base;
+ friend class SolverBase<FullPivHouseholderQR>;
+
+ EIGEN_GENERIC_PUBLIC_INTERFACE(FullPivHouseholderQR)
enum {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
- typedef typename MatrixType::Scalar Scalar;
- typedef typename MatrixType::RealScalar RealScalar;
- // FIXME should be int
- typedef typename MatrixType::StorageIndex StorageIndex;
typedef internal::FullPivHouseholderQRMatrixQReturnType<MatrixType> MatrixQReturnType;
typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
typedef Matrix<StorageIndex, 1,
@@ -156,6 +158,7 @@ template<typename _MatrixType> class FullPivHouseholderQR
computeInPlace();
}
+ #ifdef EIGEN_PARSED_BY_DOXYGEN
/** This method finds a solution x to the equation Ax=b, where A is the matrix of which
* \c *this is the QR decomposition.
*
@@ -173,11 +176,8 @@ template<typename _MatrixType> class FullPivHouseholderQR
*/
template<typename Rhs>
inline const Solve<FullPivHouseholderQR, Rhs>
- solve(const MatrixBase<Rhs>& b) const
- {
- eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
- return Solve<FullPivHouseholderQR, Rhs>(*this, b.derived());
- }
+ solve(const MatrixBase<Rhs>& b) const;
+ #endif
/** \returns Expression object representing the matrix Q
*/
@@ -396,6 +396,9 @@ template<typename _MatrixType> class FullPivHouseholderQR
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename RhsType, typename DstType>
void _solve_impl(const RhsType &rhs, DstType &dst) const;
+
+ template<bool Conjugate, typename RhsType, typename DstType>
+ void _solve_impl_transposed(const RhsType &rhs, DstType &dst) const;
#endif
protected:
@@ -498,15 +501,15 @@ void FullPivHouseholderQR<MatrixType>::computeInPlace()
m_nonzero_pivots = k;
for(Index i = k; i < size; i++)
{
- m_rows_transpositions.coeffRef(i) = i;
- m_cols_transpositions.coeffRef(i) = i;
+ m_rows_transpositions.coeffRef(i) = internal::convert_index<StorageIndex>(i);
+ m_cols_transpositions.coeffRef(i) = internal::convert_index<StorageIndex>(i);
m_hCoeffs.coeffRef(i) = Scalar(0);
}
break;
}
- m_rows_transpositions.coeffRef(k) = row_of_biggest_in_corner;
- m_cols_transpositions.coeffRef(k) = col_of_biggest_in_corner;
+ m_rows_transpositions.coeffRef(k) = internal::convert_index<StorageIndex>(row_of_biggest_in_corner);
+ m_cols_transpositions.coeffRef(k) = internal::convert_index<StorageIndex>(col_of_biggest_in_corner);
if(k != row_of_biggest_in_corner) {
m_qr.row(k).tail(cols-k).swap(m_qr.row(row_of_biggest_in_corner).tail(cols-k));
++number_of_transpositions;
@@ -540,7 +543,6 @@ template<typename _MatrixType>
template<typename RhsType, typename DstType>
void FullPivHouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) const
{
- eigen_assert(rhs.rows() == rows());
const Index l_rank = rank();
// FIXME introduce nonzeroPivots() and use it here. and more generally,
@@ -553,7 +555,7 @@ void FullPivHouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType
typename RhsType::PlainObject c(rhs);
- Matrix<Scalar,1,RhsType::ColsAtCompileTime> temp(rhs.cols());
+ Matrix<typename RhsType::Scalar,1,RhsType::ColsAtCompileTime> temp(rhs.cols());
for (Index k = 0; k < l_rank; ++k)
{
Index remainingSize = rows()-k;
@@ -570,6 +572,42 @@ void FullPivHouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType
for(Index i = 0; i < l_rank; ++i) dst.row(m_cols_permutation.indices().coeff(i)) = c.row(i);
for(Index i = l_rank; i < cols(); ++i) dst.row(m_cols_permutation.indices().coeff(i)).setZero();
}
+
+template<typename _MatrixType>
+template<bool Conjugate, typename RhsType, typename DstType>
+void FullPivHouseholderQR<_MatrixType>::_solve_impl_transposed(const RhsType &rhs, DstType &dst) const
+{
+ const Index l_rank = rank();
+
+ if(l_rank == 0)
+ {
+ dst.setZero();
+ return;
+ }
+
+ typename RhsType::PlainObject c(m_cols_permutation.transpose()*rhs);
+
+ m_qr.topLeftCorner(l_rank, l_rank)
+ .template triangularView<Upper>()
+ .transpose().template conjugateIf<Conjugate>()
+ .solveInPlace(c.topRows(l_rank));
+
+ dst.topRows(l_rank) = c.topRows(l_rank);
+ dst.bottomRows(rows()-l_rank).setZero();
+
+ Matrix<Scalar, 1, DstType::ColsAtCompileTime> temp(dst.cols());
+ const Index size = (std::min)(rows(), cols());
+ for (Index k = size-1; k >= 0; --k)
+ {
+ Index remainingSize = rows()-k;
+
+ dst.bottomRightCorner(remainingSize, dst.cols())
+ .applyHouseholderOnTheLeft(m_qr.col(k).tail(remainingSize-1).template conjugateIf<!Conjugate>(),
+ m_hCoeffs.template conjugateIf<Conjugate>().coeff(k), &temp.coeffRef(0));
+
+ dst.row(k).swap(dst.row(m_rows_transpositions.coeff(k)));
+ }
+}
#endif
namespace internal {
diff --git a/Eigen/src/QR/HouseholderQR.h b/Eigen/src/QR/HouseholderQR.h
index 33cb9c8ff..801739fbd 100644
--- a/Eigen/src/QR/HouseholderQR.h
+++ b/Eigen/src/QR/HouseholderQR.h
@@ -14,6 +14,18 @@
namespace Eigen {
+namespace internal {
+template<typename _MatrixType> struct traits<HouseholderQR<_MatrixType> >
+ : traits<_MatrixType>
+{
+ typedef MatrixXpr XprKind;
+ typedef SolverStorage StorageKind;
+ typedef int StorageIndex;
+ enum { Flags = 0 };
+};
+
+} // end namespace internal
+
/** \ingroup QR_Module
*
*
@@ -42,20 +54,19 @@ namespace Eigen {
* \sa MatrixBase::householderQr()
*/
template<typename _MatrixType> class HouseholderQR
+ : public SolverBase<HouseholderQR<_MatrixType> >
{
public:
typedef _MatrixType MatrixType;
+ typedef SolverBase<HouseholderQR> Base;
+ friend class SolverBase<HouseholderQR>;
+
+ EIGEN_GENERIC_PUBLIC_INTERFACE(HouseholderQR)
enum {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
- typedef typename MatrixType::Scalar Scalar;
- typedef typename MatrixType::RealScalar RealScalar;
- // FIXME should be int
- typedef typename MatrixType::StorageIndex StorageIndex;
typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, (MatrixType::Flags&RowMajorBit) ? RowMajor : ColMajor, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixQType;
typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
typedef typename internal::plain_row_type<MatrixType>::type RowVectorType;
@@ -121,6 +132,7 @@ template<typename _MatrixType> class HouseholderQR
computeInPlace();
}
+ #ifdef EIGEN_PARSED_BY_DOXYGEN
/** This method finds a solution x to the equation Ax=b, where A is the matrix of which
* *this is the QR decomposition, if any exists.
*
@@ -137,11 +149,8 @@ template<typename _MatrixType> class HouseholderQR
*/
template<typename Rhs>
inline const Solve<HouseholderQR, Rhs>
- solve(const MatrixBase<Rhs>& b) const
- {
- eigen_assert(m_isInitialized && "HouseholderQR is not initialized.");
- return Solve<HouseholderQR, Rhs>(*this, b.derived());
- }
+ solve(const MatrixBase<Rhs>& b) const;
+ #endif
/** This method returns an expression of the unitary matrix Q as a sequence of Householder transformations.
*
@@ -214,6 +223,9 @@ template<typename _MatrixType> class HouseholderQR
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename RhsType, typename DstType>
void _solve_impl(const RhsType &rhs, DstType &dst) const;
+
+ template<bool Conjugate, typename RhsType, typename DstType>
+ void _solve_impl_transposed(const RhsType &rhs, DstType &dst) const;
#endif
protected:
@@ -349,7 +361,6 @@ template<typename RhsType, typename DstType>
void HouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) const
{
const Index rank = (std::min)(rows(), cols());
- eigen_assert(rhs.rows() == rows());
typename RhsType::PlainObject c(rhs);
@@ -362,6 +373,25 @@ void HouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) c
dst.topRows(rank) = c.topRows(rank);
dst.bottomRows(cols()-rank).setZero();
}
+
+template<typename _MatrixType>
+template<bool Conjugate, typename RhsType, typename DstType>
+void HouseholderQR<_MatrixType>::_solve_impl_transposed(const RhsType &rhs, DstType &dst) const
+{
+ const Index rank = (std::min)(rows(), cols());
+
+ typename RhsType::PlainObject c(rhs);
+
+ m_qr.topLeftCorner(rank, rank)
+ .template triangularView<Upper>()
+ .transpose().template conjugateIf<Conjugate>()
+ .solveInPlace(c.topRows(rank));
+
+ dst.topRows(rank) = c.topRows(rank);
+ dst.bottomRows(rows()-rank).setZero();
+
+ dst.applyOnTheLeft(householderQ().setLength(rank).template conjugateIf<!Conjugate>() );
+}
#endif
/** Performs the QR factorization of the given matrix \a matrix. The result of
diff --git a/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h b/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h
index 1a5c5254e..013c7ae7a 100644
--- a/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h
+++ b/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h
@@ -74,13 +74,35 @@ class SPQR : public SparseSolverBase<SPQR<_MatrixType> >
};
public:
SPQR()
- : m_ordering(SPQR_ORDERING_DEFAULT), m_allow_tol(SPQR_DEFAULT_TOL), m_tolerance (NumTraits<Scalar>::epsilon()), m_useDefaultThreshold(true)
+ : m_analysisIsOk(false),
+ m_factorizationIsOk(false),
+ m_isRUpToDate(false),
+ m_ordering(SPQR_ORDERING_DEFAULT),
+ m_allow_tol(SPQR_DEFAULT_TOL),
+ m_tolerance (NumTraits<Scalar>::epsilon()),
+ m_cR(0),
+ m_E(0),
+ m_H(0),
+ m_HPinv(0),
+ m_HTau(0),
+ m_useDefaultThreshold(true)
{
cholmod_l_start(&m_cc);
}
explicit SPQR(const _MatrixType& matrix)
- : m_ordering(SPQR_ORDERING_DEFAULT), m_allow_tol(SPQR_DEFAULT_TOL), m_tolerance (NumTraits<Scalar>::epsilon()), m_useDefaultThreshold(true)
+ : m_analysisIsOk(false),
+ m_factorizationIsOk(false),
+ m_isRUpToDate(false),
+ m_ordering(SPQR_ORDERING_DEFAULT),
+ m_allow_tol(SPQR_DEFAULT_TOL),
+ m_tolerance (NumTraits<Scalar>::epsilon()),
+ m_cR(0),
+ m_E(0),
+ m_H(0),
+ m_HPinv(0),
+ m_HTau(0),
+ m_useDefaultThreshold(true)
{
cholmod_l_start(&m_cc);
compute(matrix);
diff --git a/Eigen/src/SVD/BDCSVD.h b/Eigen/src/SVD/BDCSVD.h
index 4daa9dd21..e3fddacbc 100644
--- a/Eigen/src/SVD/BDCSVD.h
+++ b/Eigen/src/SVD/BDCSVD.h
@@ -39,6 +39,7 @@ namespace internal {
template<typename _MatrixType>
struct traits<BDCSVD<_MatrixType> >
+ : traits<_MatrixType>
{
typedef _MatrixType MatrixType;
};
@@ -110,7 +111,7 @@ public:
* The default constructor is useful in cases in which the user intends to
* perform decompositions via BDCSVD::compute(const MatrixType&).
*/
- BDCSVD() : m_algoswap(16), m_numIters(0)
+ BDCSVD() : m_algoswap(16), m_isTranspose(false), m_compU(false), m_compV(false), m_numIters(0)
{}
@@ -1006,7 +1007,7 @@ void BDCSVD<MatrixType>::perturbCol0
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
assert((std::isfinite)(tmp));
#endif
- zhat(k) = col0(k) > Literal(0) ? tmp : -tmp;
+ zhat(k) = col0(k) > Literal(0) ? RealScalar(tmp) : RealScalar(-tmp);
}
}
}
diff --git a/Eigen/src/SVD/JacobiSVD.h b/Eigen/src/SVD/JacobiSVD.h
index 1c7c80376..2b6891105 100644
--- a/Eigen/src/SVD/JacobiSVD.h
+++ b/Eigen/src/SVD/JacobiSVD.h
@@ -425,6 +425,7 @@ struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, true>
template<typename _MatrixType, int QRPreconditioner>
struct traits<JacobiSVD<_MatrixType,QRPreconditioner> >
+ : traits<_MatrixType>
{
typedef _MatrixType MatrixType;
};
diff --git a/Eigen/src/SVD/SVDBase.h b/Eigen/src/SVD/SVDBase.h
index 429414797..68df48921 100644
--- a/Eigen/src/SVD/SVDBase.h
+++ b/Eigen/src/SVD/SVDBase.h
@@ -17,6 +17,18 @@
#define EIGEN_SVDBASE_H
namespace Eigen {
+
+namespace internal {
+template<typename Derived> struct traits<SVDBase<Derived> >
+ : traits<Derived>
+{
+ typedef MatrixXpr XprKind;
+ typedef SolverStorage StorageKind;
+ typedef int StorageIndex;
+ enum { Flags = 0 };
+};
+}
+
/** \ingroup SVD_Module
*
*
@@ -44,15 +56,18 @@ namespace Eigen {
* terminate in finite (and reasonable) time.
* \sa class BDCSVD, class JacobiSVD
*/
-template<typename Derived>
-class SVDBase
+template<typename Derived> class SVDBase
+ : public SolverBase<SVDBase<Derived> >
{
+public:
+
+ template<typename Derived_>
+ friend struct internal::solve_assertion;
-public:
typedef typename internal::traits<Derived>::MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
- typedef typename MatrixType::StorageIndex StorageIndex;
+ typedef typename Eigen::internal::traits<SVDBase>::StorageIndex StorageIndex;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
@@ -180,8 +195,10 @@ public:
RealScalar threshold() const
{
eigen_assert(m_isInitialized || m_usePrescribedThreshold);
+ // this temporary is needed to workaround a MSVC issue
+ Index diagSize = (std::max<Index>)(1,m_diagSize);
return m_usePrescribedThreshold ? m_prescribedThreshold
- : (std::max<Index>)(1,m_diagSize)*NumTraits<Scalar>::epsilon();
+ : diagSize*NumTraits<Scalar>::epsilon();
}
/** \returns true if \a U (full or thin) is asked for in this SVD decomposition */
@@ -192,6 +209,7 @@ public:
inline Index rows() const { return m_rows; }
inline Index cols() const { return m_cols; }
+ #ifdef EIGEN_PARSED_BY_DOXYGEN
/** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A.
*
* \param b the right-hand-side of the equation to solve.
@@ -203,16 +221,15 @@ public:
*/
template<typename Rhs>
inline const Solve<Derived, Rhs>
- solve(const MatrixBase<Rhs>& b) const
- {
- eigen_assert(m_isInitialized && "SVD is not initialized.");
- eigen_assert(computeU() && computeV() && "SVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice).");
- return Solve<Derived, Rhs>(derived(), b.derived());
- }
-
+ solve(const MatrixBase<Rhs>& b) const;
+ #endif
+
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename RhsType, typename DstType>
void _solve_impl(const RhsType &rhs, DstType &dst) const;
+
+ template<bool Conjugate, typename RhsType, typename DstType>
+ void _solve_impl_transposed(const RhsType &rhs, DstType &dst) const;
#endif
protected:
@@ -221,6 +238,14 @@ protected:
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
}
+
+ template<bool Transpose_, typename Rhs>
+ void _check_solve_assertion(const Rhs& b) const {
+ EIGEN_ONLY_USED_FOR_DEBUG(b);
+ eigen_assert(m_isInitialized && "SVD is not initialized.");
+ eigen_assert(computeU() && computeV() && "SVDBase::solve(): Both unitaries U and V are required to be computed (thin unitaries suffice).");
+ eigen_assert((Transpose_?cols():rows())==b.rows() && "SVDBase::solve(): invalid number of rows of the right hand side matrix b");
+ }
// return true if already allocated
bool allocate(Index rows, Index cols, unsigned int computationOptions) ;
@@ -243,6 +268,10 @@ protected:
: m_isInitialized(false),
m_isAllocated(false),
m_usePrescribedThreshold(false),
+ m_computeFullU(false),
+ m_computeThinU(false),
+ m_computeFullV(false),
+ m_computeThinV(false),
m_computationOptions(0),
m_rows(-1), m_cols(-1), m_diagSize(0)
{
@@ -257,17 +286,30 @@ template<typename Derived>
template<typename RhsType, typename DstType>
void SVDBase<Derived>::_solve_impl(const RhsType &rhs, DstType &dst) const
{
- eigen_assert(rhs.rows() == rows());
-
// A = U S V^*
// So A^{-1} = V S^{-1} U^*
- Matrix<Scalar, Dynamic, RhsType::ColsAtCompileTime, 0, MatrixType::MaxRowsAtCompileTime, RhsType::MaxColsAtCompileTime> tmp;
+ Matrix<typename RhsType::Scalar, Dynamic, RhsType::ColsAtCompileTime, 0, MatrixType::MaxRowsAtCompileTime, RhsType::MaxColsAtCompileTime> tmp;
Index l_rank = rank();
tmp.noalias() = m_matrixU.leftCols(l_rank).adjoint() * rhs;
tmp = m_singularValues.head(l_rank).asDiagonal().inverse() * tmp;
dst = m_matrixV.leftCols(l_rank) * tmp;
}
+
+template<typename Derived>
+template<bool Conjugate, typename RhsType, typename DstType>
+void SVDBase<Derived>::_solve_impl_transposed(const RhsType &rhs, DstType &dst) const
+{
+ // A = U S V^*
+ // So A^{-*} = U S^{-1} V^*
+ // And A^{-T} = U_conj S^{-1} V^T
+ Matrix<typename RhsType::Scalar, Dynamic, RhsType::ColsAtCompileTime, 0, MatrixType::MaxRowsAtCompileTime, RhsType::MaxColsAtCompileTime> tmp;
+ Index l_rank = rank();
+
+ tmp.noalias() = m_matrixV.leftCols(l_rank).transpose().template conjugateIf<Conjugate>() * rhs;
+ tmp = m_singularValues.head(l_rank).asDiagonal().inverse() * tmp;
+ dst = m_matrixU.template conjugateIf<!Conjugate>().leftCols(l_rank) * tmp;
+}
#endif
template<typename MatrixType>
diff --git a/Eigen/src/SparseCholesky/SimplicialCholesky.h b/Eigen/src/SparseCholesky/SimplicialCholesky.h
index b9ca94bc3..1ee4fad5d 100644
--- a/Eigen/src/SparseCholesky/SimplicialCholesky.h
+++ b/Eigen/src/SparseCholesky/SimplicialCholesky.h
@@ -80,11 +80,19 @@ class SimplicialCholeskyBase : public SparseSolverBase<Derived>
/** Default constructor */
SimplicialCholeskyBase()
- : m_info(Success), m_shiftOffset(0), m_shiftScale(1)
+ : m_info(Success),
+ m_factorizationIsOk(false),
+ m_analysisIsOk(false),
+ m_shiftOffset(0),
+ m_shiftScale(1)
{}
explicit SimplicialCholeskyBase(const MatrixType& matrix)
- : m_info(Success), m_shiftOffset(0), m_shiftScale(1)
+ : m_info(Success),
+ m_factorizationIsOk(false),
+ m_analysisIsOk(false),
+ m_shiftOffset(0),
+ m_shiftScale(1)
{
derived().compute(matrix);
}
diff --git a/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h b/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h
index 0aa92f8bc..7275db2cc 100644
--- a/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h
+++ b/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h
@@ -2,46 +2,21 @@
// for linear algebra.
//
// Copyright (C) 2008-2012 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
/*
-
NOTE: these functions have been adapted from the LDL library:
LDL Copyright (c) 2005 by Timothy A. Davis. All Rights Reserved.
-LDL License:
-
- Your use or distribution of LDL or any modified version of
- LDL implies that you agree to this License.
-
- This library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this library; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
- USA
-
- Permission is hereby granted to use or copy this program under the
- terms of the GNU LGPL, provided that the Copyright, this License,
- and the Availability of the original version is retained on all copies.
- User documentation of any code that uses this code or any modified
- version of this code must cite the Copyright, this License, the
- Availability note, and "Used by permission." Permission to modify
- the code and to distribute modified code is granted, provided the
- Copyright, this License, and the Availability note are retained,
- and a notice that the code was modified is included.
+The author of LDL, Timothy A. Davis., has executed a license with Google LLC
+to permit distribution of this code and derivative works as part of Eigen under
+the Mozilla Public License v. 2.0, as stated at the top of this file.
*/
-#include "../Core/util/NonMPL2.h"
-
#ifndef EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H
#define EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H
diff --git a/Eigen/src/SparseCore/CompressedStorage.h b/Eigen/src/SparseCore/CompressedStorage.h
index d89fa0dae..acd986fab 100644
--- a/Eigen/src/SparseCore/CompressedStorage.h
+++ b/Eigen/src/SparseCore/CompressedStorage.h
@@ -207,6 +207,22 @@ class CompressedStorage
return m_values[id];
}
+ void moveChunk(Index from, Index to, Index chunkSize)
+ {
+ eigen_internal_assert(to+chunkSize <= m_size);
+ if(to>from && from+chunkSize>to)
+ {
+ // move backward
+ internal::smart_memmove(m_values+from, m_values+from+chunkSize, m_values+to);
+ internal::smart_memmove(m_indices+from, m_indices+from+chunkSize, m_indices+to);
+ }
+ else
+ {
+ internal::smart_copy(m_values+from, m_values+from+chunkSize, m_values+to);
+ internal::smart_copy(m_indices+from, m_indices+from+chunkSize, m_indices+to);
+ }
+ }
+
void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
{
Index k = 0;
diff --git a/Eigen/src/SparseCore/SparseAssign.h b/Eigen/src/SparseCore/SparseAssign.h
index 71452e75e..905485c88 100644
--- a/Eigen/src/SparseCore/SparseAssign.h
+++ b/Eigen/src/SparseCore/SparseAssign.h
@@ -246,35 +246,22 @@ struct Assignment<DstXprType, SrcXprType, Functor, Diagonal2Sparse>
{
typedef typename DstXprType::StorageIndex StorageIndex;
typedef typename DstXprType::Scalar Scalar;
- typedef Array<StorageIndex,Dynamic,1> ArrayXI;
- typedef Array<Scalar,Dynamic,1> ArrayXS;
- template<int Options>
- static void run(SparseMatrix<Scalar,Options,StorageIndex> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
- {
- Index dstRows = src.rows();
- Index dstCols = src.cols();
- if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
- dst.resize(dstRows, dstCols);
- Index size = src.diagonal().size();
- dst.makeCompressed();
- dst.resizeNonZeros(size);
- Map<ArrayXI>(dst.innerIndexPtr(), size).setLinSpaced(0,StorageIndex(size)-1);
- Map<ArrayXI>(dst.outerIndexPtr(), size+1).setLinSpaced(0,StorageIndex(size));
- Map<ArrayXS>(dst.valuePtr(), size) = src.diagonal();
- }
+ template<int Options, typename AssignFunc>
+ static void run(SparseMatrix<Scalar,Options,StorageIndex> &dst, const SrcXprType &src, const AssignFunc &func)
+ { dst.assignDiagonal(src.diagonal(), func); }
template<typename DstDerived>
static void run(SparseMatrixBase<DstDerived> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
- {
- dst.diagonal() = src.diagonal();
- }
+ { dst.derived().diagonal() = src.diagonal(); }
- static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
- { dst.diagonal() += src.diagonal(); }
+ template<typename DstDerived>
+ static void run(SparseMatrixBase<DstDerived> &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
+ { dst.derived().diagonal() += src.diagonal(); }
- static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
- { dst.diagonal() -= src.diagonal(); }
+ template<typename DstDerived>
+ static void run(SparseMatrixBase<DstDerived> &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
+ { dst.derived().diagonal() -= src.diagonal(); }
};
} // end namespace internal
diff --git a/Eigen/src/SparseCore/SparseCompressedBase.h b/Eigen/src/SparseCore/SparseCompressedBase.h
index e0b3c22b6..6a2c7a8ce 100644
--- a/Eigen/src/SparseCore/SparseCompressedBase.h
+++ b/Eigen/src/SparseCore/SparseCompressedBase.h
@@ -128,6 +128,28 @@ class SparseCompressedBase
protected:
/** Default constructor. Do nothing. */
SparseCompressedBase() {}
+
+ /** \internal return the index of the coeff at (row,col) or just before if it does not exist.
+ * This is an analogue of std::lower_bound.
+ */
+ internal::LowerBoundIndex lower_bound(Index row, Index col) const
+ {
+ eigen_internal_assert(row>=0 && row<this->rows() && col>=0 && col<this->cols());
+
+ const Index outer = Derived::IsRowMajor ? row : col;
+ const Index inner = Derived::IsRowMajor ? col : row;
+
+ Index start = this->outerIndexPtr()[outer];
+ Index end = this->isCompressed() ? this->outerIndexPtr()[outer+1] : this->outerIndexPtr()[outer] + this->innerNonZeroPtr()[outer];
+ eigen_assert(end>=start && "you are using a non finalized sparse matrix or written coefficient does not exist");
+ internal::LowerBoundIndex p;
+ p.value = std::lower_bound(this->innerIndexPtr()+start, this->innerIndexPtr()+end,inner) - this->innerIndexPtr();
+ p.found = (p.value<end) && (this->innerIndexPtr()[p.value]==inner);
+ return p;
+ }
+
+ friend struct internal::evaluator<SparseCompressedBase<Derived> >;
+
private:
template<typename OtherDerived> explicit SparseCompressedBase(const SparseCompressedBase<OtherDerived>&);
};
@@ -333,17 +355,8 @@ protected:
Index find(Index row, Index col) const
{
- eigen_internal_assert(row>=0 && row<m_matrix->rows() && col>=0 && col<m_matrix->cols());
-
- const Index outer = Derived::IsRowMajor ? row : col;
- const Index inner = Derived::IsRowMajor ? col : row;
-
- Index start = m_matrix->outerIndexPtr()[outer];
- Index end = m_matrix->isCompressed() ? m_matrix->outerIndexPtr()[outer+1] : m_matrix->outerIndexPtr()[outer] + m_matrix->innerNonZeroPtr()[outer];
- eigen_assert(end>=start && "you are using a non finalized sparse matrix or written coefficient does not exist");
- const Index p = std::lower_bound(m_matrix->innerIndexPtr()+start, m_matrix->innerIndexPtr()+end,inner) - m_matrix->innerIndexPtr();
-
- return ((p<end) && (m_matrix->innerIndexPtr()[p]==inner)) ? p : Dynamic;
+ internal::LowerBoundIndex p = m_matrix->lower_bound(row,col);
+ return p.found ? p.value : Dynamic;
}
const Derived *m_matrix;
diff --git a/Eigen/src/SparseCore/SparseMatrix.h b/Eigen/src/SparseCore/SparseMatrix.h
index eedae47e8..63dd1cc32 100644
--- a/Eigen/src/SparseCore/SparseMatrix.h
+++ b/Eigen/src/SparseCore/SparseMatrix.h
@@ -99,6 +99,8 @@ class SparseMatrix
typedef SparseCompressedBase<SparseMatrix> Base;
using Base::convert_index;
friend class SparseVector<_Scalar,0,_StorageIndex>;
+ template<typename, typename, typename, typename, typename>
+ friend struct internal::Assignment;
public:
using Base::isCompressed;
using Base::nonZeros;
@@ -502,7 +504,7 @@ class SparseMatrix
m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
}
}
-
+
/** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerance \a epsilon */
void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
{
@@ -895,6 +897,113 @@ public:
m_data.index(p) = convert_index(inner);
return (m_data.value(p) = Scalar(0));
}
+protected:
+ struct IndexPosPair {
+ IndexPosPair(Index a_i, Index a_p) : i(a_i), p(a_p) {}
+ Index i;
+ Index p;
+ };
+
+ /** \internal assign \a diagXpr to the diagonal of \c *this
+ * There are different strategies:
+ * 1 - if *this is overwritten (Func==assign_op) or *this is empty, then we can work treat *this as a dense vector expression.
+ * 2 - otherwise, for each diagonal coeff,
+ * 2.a - if it already exists, then we update it,
+ * 2.b - otherwise, if *this is uncompressed and that the current inner-vector has empty room for at least 1 element, then we perform an in-place insertion.
+ * 2.c - otherwise, we'll have to reallocate and copy everything, so instead of doing so for each new element, it is recorded in a std::vector.
+ * 3 - at the end, if some entries failed to be inserted in-place, then we alloc a new buffer, copy each chunk at the right position, and insert the new elements.
+ *
+ * TODO: some piece of code could be isolated and reused for a general in-place update strategy.
+ * TODO: if we start to defer the insertion of some elements (i.e., case 2.c executed once),
+ * then it *might* be better to disable case 2.b since they will have to be copied anyway.
+ */
+ template<typename DiagXpr, typename Func>
+ void assignDiagonal(const DiagXpr diagXpr, const Func& assignFunc)
+ {
+ Index n = diagXpr.size();
+
+ const bool overwrite = internal::is_same<Func, internal::assign_op<Scalar,Scalar> >::value;
+ if(overwrite)
+ {
+ if((this->rows()!=n) || (this->cols()!=n))
+ this->resize(n, n);
+ }
+
+ if(m_data.size()==0 || overwrite)
+ {
+ typedef Array<StorageIndex,Dynamic,1> ArrayXI;
+ this->makeCompressed();
+ this->resizeNonZeros(n);
+ Eigen::Map<ArrayXI>(this->innerIndexPtr(), n).setLinSpaced(0,StorageIndex(n)-1);
+ Eigen::Map<ArrayXI>(this->outerIndexPtr(), n+1).setLinSpaced(0,StorageIndex(n));
+ Eigen::Map<Array<Scalar,Dynamic,1> > values = this->coeffs();
+ values.setZero();
+ internal::call_assignment_no_alias(values, diagXpr, assignFunc);
+ }
+ else
+ {
+ bool isComp = isCompressed();
+ internal::evaluator<DiagXpr> diaEval(diagXpr);
+ std::vector<IndexPosPair> newEntries;
+
+ // 1 - try in-place update and record insertion failures
+ for(Index i = 0; i<n; ++i)
+ {
+ internal::LowerBoundIndex lb = this->lower_bound(i,i);
+ Index p = lb.value;
+ if(lb.found)
+ {
+ // the coeff already exists
+ assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
+ }
+ else if((!isComp) && m_innerNonZeros[i] < (m_outerIndex[i+1]-m_outerIndex[i]))
+ {
+ // non compressed mode with local room for inserting one element
+ m_data.moveChunk(p, p+1, m_outerIndex[i]+m_innerNonZeros[i]-p);
+ m_innerNonZeros[i]++;
+ m_data.value(p) = Scalar(0);
+ m_data.index(p) = StorageIndex(i);
+ assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
+ }
+ else
+ {
+ // defer insertion
+ newEntries.push_back(IndexPosPair(i,p));
+ }
+ }
+ // 2 - insert deferred entries
+ Index n_entries = Index(newEntries.size());
+ if(n_entries>0)
+ {
+ Storage newData(m_data.size()+n_entries);
+ Index prev_p = 0;
+ Index prev_i = 0;
+ for(Index k=0; k<n_entries;++k)
+ {
+ Index i = newEntries[k].i;
+ Index p = newEntries[k].p;
+ internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+p, newData.valuePtr()+prev_p+k);
+ internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+p, newData.indexPtr()+prev_p+k);
+ for(Index j=prev_i;j<i;++j)
+ m_outerIndex[j+1] += k;
+ if(!isComp)
+ m_innerNonZeros[i]++;
+ prev_p = p;
+ prev_i = i;
+ newData.value(p+k) = Scalar(0);
+ newData.index(p+k) = StorageIndex(i);
+ assignFunc.assignCoeff(newData.value(p+k), diaEval.coeff(i));
+ }
+ {
+ internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+m_data.size(), newData.valuePtr()+prev_p+n_entries);
+ internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+m_data.size(), newData.indexPtr()+prev_p+n_entries);
+ for(Index j=prev_i+1;j<=m_outerSize;++j)
+ m_outerIndex[j] += n_entries;
+ }
+ m_data.swap(newData);
+ }
+ }
+ }
private:
static void check_template_parameters()
diff --git a/Eigen/src/SparseCore/SparseUtil.h b/Eigen/src/SparseCore/SparseUtil.h
index 74df0d496..ceb936887 100644
--- a/Eigen/src/SparseCore/SparseUtil.h
+++ b/Eigen/src/SparseCore/SparseUtil.h
@@ -140,6 +140,14 @@ struct SparseSelfAdjointShape { static std::string debugName() { return "SparseS
template<> struct glue_shapes<SparseShape,SelfAdjointShape> { typedef SparseSelfAdjointShape type; };
template<> struct glue_shapes<SparseShape,TriangularShape > { typedef SparseTriangularShape type; };
+// return type of SparseCompressedBase::lower_bound;
+struct LowerBoundIndex {
+ LowerBoundIndex() : value(-1), found(false) {}
+ LowerBoundIndex(Index val, bool ok) : value(val), found(ok) {}
+ Index value;
+ bool found;
+};
+
} // end namespace internal
/** \ingroup SparseCore_Module
diff --git a/Eigen/src/SparseQR/SparseQR.h b/Eigen/src/SparseQR/SparseQR.h
index 1a28389e8..d1fb96f5c 100644
--- a/Eigen/src/SparseQR/SparseQR.h
+++ b/Eigen/src/SparseQR/SparseQR.h
@@ -41,15 +41,16 @@ namespace internal {
/**
* \ingroup SparseQR_Module
* \class SparseQR
- * \brief Sparse left-looking rank-revealing QR factorization
+ * \brief Sparse left-looking QR factorization with numerical column pivoting
*
- * This class implements a left-looking rank-revealing QR decomposition
- * of sparse matrices. When a column has a norm less than a given tolerance
+ * This class implements a left-looking QR decomposition of sparse matrices
+ * with numerical column pivoting.
+ * When a column has a norm less than a given tolerance
* it is implicitly permuted to the end. The QR factorization thus obtained is
* given by A*P = Q*R where R is upper triangular or trapezoidal.
*
* P is the column permutation which is the product of the fill-reducing and the
- * rank-revealing permutations. Use colsPermutation() to get it.
+ * numerical permutations. Use colsPermutation() to get it.
*
* Q is the orthogonal matrix represented as products of Householder reflectors.
* Use matrixQ() to get an expression and matrixQ().adjoint() to get the adjoint.
@@ -64,6 +65,17 @@ namespace internal {
*
* \implsparsesolverconcept
*
+ * The numerical pivoting strategy and default threshold are the same as in SuiteSparse QR, and
+ * detailed in the following paper:
+ * <i>
+ * Tim Davis, "Algorithm 915, SuiteSparseQR: Multifrontal Multithreaded Rank-Revealing
+ * Sparse QR Factorization, ACM Trans. on Math. Soft. 38(1), 2011.
+ * </i>
+ * Even though it is qualified as "rank-revealing", this strategy might fail for some
+ * rank deficient problems. When this class is used to solve linear or least-square problems
+ * it is thus strongly recommended to check the accuracy of the computed solution. If it
+ * failed, it usually helps to increase the threshold with setPivotThreshold.
+ *
* \warning The input sparse matrix A must be in compressed mode (see SparseMatrix::makeCompressed()).
* \warning For complex matrices matrixQ().transpose() will actually return the adjoint matrix.
*
@@ -331,7 +343,7 @@ void SparseQR<MatrixType,OrderingType>::analyzePattern(const MatrixType& mat)
m_R.resize(m, n);
m_Q.resize(m, diagSize);
- // Allocate space for nonzero elements : rough estimation
+ // Allocate space for nonzero elements: rough estimation
m_R.reserve(2*mat.nonZeros()); //FIXME Get a more accurate estimation through symbolic factorization with the etree
m_Q.reserve(2*mat.nonZeros());
m_hcoeffs.resize(diagSize);
diff --git a/Eigen/src/plugins/ArrayCwiseUnaryOps.h b/Eigen/src/plugins/ArrayCwiseUnaryOps.h
index e928db467..2f99ee0b2 100644
--- a/Eigen/src/plugins/ArrayCwiseUnaryOps.h
+++ b/Eigen/src/plugins/ArrayCwiseUnaryOps.h
@@ -23,6 +23,11 @@ typedef CwiseUnaryOp<internal::scalar_atan_op<Scalar>, const Derived> AtanReturn
typedef CwiseUnaryOp<internal::scalar_tanh_op<Scalar>, const Derived> TanhReturnType;
typedef CwiseUnaryOp<internal::scalar_logistic_op<Scalar>, const Derived> LogisticReturnType;
typedef CwiseUnaryOp<internal::scalar_sinh_op<Scalar>, const Derived> SinhReturnType;
+#if EIGEN_HAS_CXX11_MATH
+typedef CwiseUnaryOp<internal::scalar_atanh_op<Scalar>, const Derived> AtanhReturnType;
+typedef CwiseUnaryOp<internal::scalar_asinh_op<Scalar>, const Derived> AsinhReturnType;
+typedef CwiseUnaryOp<internal::scalar_acosh_op<Scalar>, const Derived> AcoshReturnType;
+#endif
typedef CwiseUnaryOp<internal::scalar_cosh_op<Scalar>, const Derived> CoshReturnType;
typedef CwiseUnaryOp<internal::scalar_square_op<Scalar>, const Derived> SquareReturnType;
typedef CwiseUnaryOp<internal::scalar_cube_op<Scalar>, const Derived> CubeReturnType;
@@ -327,7 +332,7 @@ sinh() const
* Example: \include Cwise_cosh.cpp
* Output: \verbinclude Cwise_cosh.out
*
- * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_cosh">Math functions</a>, tan(), sinh(), cosh()
+ * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_cosh">Math functions</a>, tanh(), sinh(), cosh()
*/
EIGEN_DEVICE_FUNC
inline const CoshReturnType
@@ -336,6 +341,41 @@ cosh() const
return CoshReturnType(derived());
}
+#if EIGEN_HAS_CXX11_MATH
+/** \returns an expression of the coefficient-wise inverse hyperbolic tan of *this.
+ *
+ * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_atanh">Math functions</a>, atanh(), asinh(), acosh()
+ */
+EIGEN_DEVICE_FUNC
+inline const AtanhReturnType
+atanh() const
+{
+ return AtanhReturnType(derived());
+}
+
+/** \returns an expression of the coefficient-wise inverse hyperbolic sin of *this.
+ *
+ * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_asinh">Math functions</a>, atanh(), asinh(), acosh()
+ */
+EIGEN_DEVICE_FUNC
+inline const AsinhReturnType
+asinh() const
+{
+ return AsinhReturnType(derived());
+}
+
+/** \returns an expression of the coefficient-wise inverse hyperbolic cos of *this.
+ *
+ * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_acosh">Math functions</a>, atanh(), asinh(), acosh()
+ */
+EIGEN_DEVICE_FUNC
+inline const AcoshReturnType
+acosh() const
+{
+ return AcoshReturnType(derived());
+}
+#endif
+
/** \returns an expression of the coefficient-wise logistic of *this.
*/
EIGEN_DEVICE_FUNC
diff --git a/Eigen/src/plugins/BlockMethods.h b/Eigen/src/plugins/BlockMethods.h
index ef620ab7a..935a604b6 100644
--- a/Eigen/src/plugins/BlockMethods.h
+++ b/Eigen/src/plugins/BlockMethods.h
@@ -87,11 +87,11 @@ EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
/// \sa class Block, fix, fix<N>(int)
///
template<typename NRowsType, typename NColsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline typename FixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
+typename FixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
#else
-inline typename FixedBlockXpr<...,...>::Type
+typename FixedBlockXpr<...,...>::Type
#endif
block(Index startRow, Index startCol, NRowsType blockRows, NColsType blockCols)
{
@@ -101,11 +101,11 @@ block(Index startRow, Index startCol, NRowsType blockRows, NColsType blockCols)
/// This is the const version of block(Index,Index,NRowsType,NColsType)
template<typename NRowsType, typename NColsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline const typename ConstFixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
+const typename ConstFixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
#else
-inline const typename ConstFixedBlockXpr<...,...>::Type
+const typename ConstFixedBlockXpr<...,...>::Type
#endif
block(Index startRow, Index startCol, NRowsType blockRows, NColsType blockCols) const
{
@@ -133,11 +133,11 @@ EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<typename NRowsType, typename NColsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline typename FixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
+typename FixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
#else
-inline typename FixedBlockXpr<...,...>::Type
+typename FixedBlockXpr<...,...>::Type
#endif
topRightCorner(NRowsType cRows, NColsType cCols)
{
@@ -147,11 +147,11 @@ topRightCorner(NRowsType cRows, NColsType cCols)
/// This is the const version of topRightCorner(NRowsType, NColsType).
template<typename NRowsType, typename NColsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline const typename ConstFixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
+const typename ConstFixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
#else
-inline const typename ConstFixedBlockXpr<...,...>::Type
+const typename ConstFixedBlockXpr<...,...>::Type
#endif
topRightCorner(NRowsType cRows, NColsType cCols) const
{
@@ -172,16 +172,16 @@ EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
/// \sa class Block, block<int,int>(Index,Index)
///
template<int CRows, int CCols>
-EIGEN_DEVICE_FUNC
-inline typename FixedBlockXpr<CRows,CCols>::Type topRightCorner()
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename FixedBlockXpr<CRows,CCols>::Type topRightCorner()
{
return typename FixedBlockXpr<CRows,CCols>::Type(derived(), 0, cols() - CCols);
}
/// This is the const version of topRightCorner<int, int>().
template<int CRows, int CCols>
-EIGEN_DEVICE_FUNC
-inline const typename ConstFixedBlockXpr<CRows,CCols>::Type topRightCorner() const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+const typename ConstFixedBlockXpr<CRows,CCols>::Type topRightCorner() const
{
return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), 0, cols() - CCols);
}
@@ -206,14 +206,16 @@ EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
/// \sa class Block
///
template<int CRows, int CCols>
-inline typename FixedBlockXpr<CRows,CCols>::Type topRightCorner(Index cRows, Index cCols)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename FixedBlockXpr<CRows,CCols>::Type topRightCorner(Index cRows, Index cCols)
{
return typename FixedBlockXpr<CRows,CCols>::Type(derived(), 0, cols() - cCols, cRows, cCols);
}
/// This is the const version of topRightCorner<int, int>(Index, Index).
template<int CRows, int CCols>
-inline const typename ConstFixedBlockXpr<CRows,CCols>::Type topRightCorner(Index cRows, Index cCols) const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+const typename ConstFixedBlockXpr<CRows,CCols>::Type topRightCorner(Index cRows, Index cCols) const
{
return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), 0, cols() - cCols, cRows, cCols);
}
@@ -238,11 +240,11 @@ EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<typename NRowsType, typename NColsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline typename FixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
+typename FixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
#else
-inline typename FixedBlockXpr<...,...>::Type
+typename FixedBlockXpr<...,...>::Type
#endif
topLeftCorner(NRowsType cRows, NColsType cCols)
{
@@ -252,11 +254,11 @@ topLeftCorner(NRowsType cRows, NColsType cCols)
/// This is the const version of topLeftCorner(Index, Index).
template<typename NRowsType, typename NColsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline const typename ConstFixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
+const typename ConstFixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
#else
-inline const typename ConstFixedBlockXpr<...,...>::Type
+const typename ConstFixedBlockXpr<...,...>::Type
#endif
topLeftCorner(NRowsType cRows, NColsType cCols) const
{
@@ -276,16 +278,16 @@ EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<int CRows, int CCols>
-EIGEN_DEVICE_FUNC
-inline typename FixedBlockXpr<CRows,CCols>::Type topLeftCorner()
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename FixedBlockXpr<CRows,CCols>::Type topLeftCorner()
{
return typename FixedBlockXpr<CRows,CCols>::Type(derived(), 0, 0);
}
/// This is the const version of topLeftCorner<int, int>().
template<int CRows, int CCols>
-EIGEN_DEVICE_FUNC
-inline const typename ConstFixedBlockXpr<CRows,CCols>::Type topLeftCorner() const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+const typename ConstFixedBlockXpr<CRows,CCols>::Type topLeftCorner() const
{
return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), 0, 0);
}
@@ -310,14 +312,16 @@ EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
/// \sa class Block
///
template<int CRows, int CCols>
-inline typename FixedBlockXpr<CRows,CCols>::Type topLeftCorner(Index cRows, Index cCols)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename FixedBlockXpr<CRows,CCols>::Type topLeftCorner(Index cRows, Index cCols)
{
return typename FixedBlockXpr<CRows,CCols>::Type(derived(), 0, 0, cRows, cCols);
}
/// This is the const version of topLeftCorner<int, int>(Index, Index).
template<int CRows, int CCols>
-inline const typename ConstFixedBlockXpr<CRows,CCols>::Type topLeftCorner(Index cRows, Index cCols) const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+const typename ConstFixedBlockXpr<CRows,CCols>::Type topLeftCorner(Index cRows, Index cCols) const
{
return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), 0, 0, cRows, cCols);
}
@@ -342,11 +346,11 @@ EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<typename NRowsType, typename NColsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline typename FixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
+typename FixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
#else
-inline typename FixedBlockXpr<...,...>::Type
+typename FixedBlockXpr<...,...>::Type
#endif
bottomRightCorner(NRowsType cRows, NColsType cCols)
{
@@ -357,11 +361,11 @@ bottomRightCorner(NRowsType cRows, NColsType cCols)
/// This is the const version of bottomRightCorner(NRowsType, NColsType).
template<typename NRowsType, typename NColsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline const typename ConstFixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
+const typename ConstFixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
#else
-inline const typename ConstFixedBlockXpr<...,...>::Type
+const typename ConstFixedBlockXpr<...,...>::Type
#endif
bottomRightCorner(NRowsType cRows, NColsType cCols) const
{
@@ -382,16 +386,16 @@ EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<int CRows, int CCols>
-EIGEN_DEVICE_FUNC
-inline typename FixedBlockXpr<CRows,CCols>::Type bottomRightCorner()
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename FixedBlockXpr<CRows,CCols>::Type bottomRightCorner()
{
return typename FixedBlockXpr<CRows,CCols>::Type(derived(), rows() - CRows, cols() - CCols);
}
/// This is the const version of bottomRightCorner<int, int>().
template<int CRows, int CCols>
-EIGEN_DEVICE_FUNC
-inline const typename ConstFixedBlockXpr<CRows,CCols>::Type bottomRightCorner() const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+const typename ConstFixedBlockXpr<CRows,CCols>::Type bottomRightCorner() const
{
return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), rows() - CRows, cols() - CCols);
}
@@ -416,14 +420,16 @@ EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
/// \sa class Block
///
template<int CRows, int CCols>
-inline typename FixedBlockXpr<CRows,CCols>::Type bottomRightCorner(Index cRows, Index cCols)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename FixedBlockXpr<CRows,CCols>::Type bottomRightCorner(Index cRows, Index cCols)
{
return typename FixedBlockXpr<CRows,CCols>::Type(derived(), rows() - cRows, cols() - cCols, cRows, cCols);
}
/// This is the const version of bottomRightCorner<int, int>(Index, Index).
template<int CRows, int CCols>
-inline const typename ConstFixedBlockXpr<CRows,CCols>::Type bottomRightCorner(Index cRows, Index cCols) const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+const typename ConstFixedBlockXpr<CRows,CCols>::Type bottomRightCorner(Index cRows, Index cCols) const
{
return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), rows() - cRows, cols() - cCols, cRows, cCols);
}
@@ -448,11 +454,11 @@ EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<typename NRowsType, typename NColsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline typename FixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
+typename FixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
#else
-inline typename FixedBlockXpr<...,...>::Type
+typename FixedBlockXpr<...,...>::Type
#endif
bottomLeftCorner(NRowsType cRows, NColsType cCols)
{
@@ -463,11 +469,11 @@ bottomLeftCorner(NRowsType cRows, NColsType cCols)
/// This is the const version of bottomLeftCorner(NRowsType, NColsType).
template<typename NRowsType, typename NColsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline typename ConstFixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
+typename ConstFixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
#else
-inline typename ConstFixedBlockXpr<...,...>::Type
+typename ConstFixedBlockXpr<...,...>::Type
#endif
bottomLeftCorner(NRowsType cRows, NColsType cCols) const
{
@@ -488,16 +494,16 @@ EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<int CRows, int CCols>
-EIGEN_DEVICE_FUNC
-inline typename FixedBlockXpr<CRows,CCols>::Type bottomLeftCorner()
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename FixedBlockXpr<CRows,CCols>::Type bottomLeftCorner()
{
return typename FixedBlockXpr<CRows,CCols>::Type(derived(), rows() - CRows, 0);
}
/// This is the const version of bottomLeftCorner<int, int>().
template<int CRows, int CCols>
-EIGEN_DEVICE_FUNC
-inline const typename ConstFixedBlockXpr<CRows,CCols>::Type bottomLeftCorner() const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+const typename ConstFixedBlockXpr<CRows,CCols>::Type bottomLeftCorner() const
{
return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), rows() - CRows, 0);
}
@@ -522,14 +528,16 @@ EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
/// \sa class Block
///
template<int CRows, int CCols>
-inline typename FixedBlockXpr<CRows,CCols>::Type bottomLeftCorner(Index cRows, Index cCols)
+EIGEN_STRONG_INLINE
+typename FixedBlockXpr<CRows,CCols>::Type bottomLeftCorner(Index cRows, Index cCols)
{
return typename FixedBlockXpr<CRows,CCols>::Type(derived(), rows() - cRows, 0, cRows, cCols);
}
/// This is the const version of bottomLeftCorner<int, int>(Index, Index).
template<int CRows, int CCols>
-inline const typename ConstFixedBlockXpr<CRows,CCols>::Type bottomLeftCorner(Index cRows, Index cCols) const
+EIGEN_STRONG_INLINE
+const typename ConstFixedBlockXpr<CRows,CCols>::Type bottomLeftCorner(Index cRows, Index cCols) const
{
return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), rows() - cRows, 0, cRows, cCols);
}
@@ -553,11 +561,11 @@ EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major)
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<typename NRowsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline typename NRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type
+typename NRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type
#else
-inline typename NRowsBlockXpr<...>::Type
+typename NRowsBlockXpr<...>::Type
#endif
topRows(NRowsType n)
{
@@ -567,11 +575,11 @@ topRows(NRowsType n)
/// This is the const version of topRows(NRowsType).
template<typename NRowsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline const typename ConstNRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type
+const typename ConstNRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type
#else
-inline const typename ConstNRowsBlockXpr<...>::Type
+const typename ConstNRowsBlockXpr<...>::Type
#endif
topRows(NRowsType n) const
{
@@ -595,16 +603,16 @@ EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major)
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename NRowsBlockXpr<N>::Type topRows(Index n = N)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename NRowsBlockXpr<N>::Type topRows(Index n = N)
{
return typename NRowsBlockXpr<N>::Type(derived(), 0, 0, n, cols());
}
/// This is the const version of topRows<int>().
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename ConstNRowsBlockXpr<N>::Type topRows(Index n = N) const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename ConstNRowsBlockXpr<N>::Type topRows(Index n = N) const
{
return typename ConstNRowsBlockXpr<N>::Type(derived(), 0, 0, n, cols());
}
@@ -628,11 +636,11 @@ EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major)
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<typename NRowsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline typename NRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type
+typename NRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type
#else
-inline typename NRowsBlockXpr<...>::Type
+typename NRowsBlockXpr<...>::Type
#endif
bottomRows(NRowsType n)
{
@@ -642,11 +650,11 @@ bottomRows(NRowsType n)
/// This is the const version of bottomRows(NRowsType).
template<typename NRowsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline const typename ConstNRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type
+const typename ConstNRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type
#else
-inline const typename ConstNRowsBlockXpr<...>::Type
+const typename ConstNRowsBlockXpr<...>::Type
#endif
bottomRows(NRowsType n) const
{
@@ -670,16 +678,16 @@ EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major)
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename NRowsBlockXpr<N>::Type bottomRows(Index n = N)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename NRowsBlockXpr<N>::Type bottomRows(Index n = N)
{
return typename NRowsBlockXpr<N>::Type(derived(), rows() - n, 0, n, cols());
}
/// This is the const version of bottomRows<int>().
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename ConstNRowsBlockXpr<N>::Type bottomRows(Index n = N) const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename ConstNRowsBlockXpr<N>::Type bottomRows(Index n = N) const
{
return typename ConstNRowsBlockXpr<N>::Type(derived(), rows() - n, 0, n, cols());
}
@@ -704,11 +712,11 @@ EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major)
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<typename NRowsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline typename NRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type
+typename NRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type
#else
-inline typename NRowsBlockXpr<...>::Type
+typename NRowsBlockXpr<...>::Type
#endif
middleRows(Index startRow, NRowsType n)
{
@@ -718,11 +726,11 @@ middleRows(Index startRow, NRowsType n)
/// This is the const version of middleRows(Index,NRowsType).
template<typename NRowsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline const typename ConstNRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type
+const typename ConstNRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type
#else
-inline const typename ConstNRowsBlockXpr<...>::Type
+const typename ConstNRowsBlockXpr<...>::Type
#endif
middleRows(Index startRow, NRowsType n) const
{
@@ -747,16 +755,16 @@ EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major)
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename NRowsBlockXpr<N>::Type middleRows(Index startRow, Index n = N)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename NRowsBlockXpr<N>::Type middleRows(Index startRow, Index n = N)
{
return typename NRowsBlockXpr<N>::Type(derived(), startRow, 0, n, cols());
}
/// This is the const version of middleRows<int>().
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename ConstNRowsBlockXpr<N>::Type middleRows(Index startRow, Index n = N) const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename ConstNRowsBlockXpr<N>::Type middleRows(Index startRow, Index n = N) const
{
return typename ConstNRowsBlockXpr<N>::Type(derived(), startRow, 0, n, cols());
}
@@ -780,11 +788,11 @@ EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major)
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<typename NColsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline typename NColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type
+typename NColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type
#else
-inline typename NColsBlockXpr<...>::Type
+typename NColsBlockXpr<...>::Type
#endif
leftCols(NColsType n)
{
@@ -794,11 +802,11 @@ leftCols(NColsType n)
/// This is the const version of leftCols(NColsType).
template<typename NColsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline const typename ConstNColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type
+const typename ConstNColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type
#else
-inline const typename ConstNColsBlockXpr<...>::Type
+const typename ConstNColsBlockXpr<...>::Type
#endif
leftCols(NColsType n) const
{
@@ -822,16 +830,16 @@ EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major)
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename NColsBlockXpr<N>::Type leftCols(Index n = N)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename NColsBlockXpr<N>::Type leftCols(Index n = N)
{
return typename NColsBlockXpr<N>::Type(derived(), 0, 0, rows(), n);
}
/// This is the const version of leftCols<int>().
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename ConstNColsBlockXpr<N>::Type leftCols(Index n = N) const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename ConstNColsBlockXpr<N>::Type leftCols(Index n = N) const
{
return typename ConstNColsBlockXpr<N>::Type(derived(), 0, 0, rows(), n);
}
@@ -855,11 +863,11 @@ EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major)
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<typename NColsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline typename NColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type
+typename NColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type
#else
-inline typename NColsBlockXpr<...>::Type
+typename NColsBlockXpr<...>::Type
#endif
rightCols(NColsType n)
{
@@ -869,11 +877,11 @@ rightCols(NColsType n)
/// This is the const version of rightCols(NColsType).
template<typename NColsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline const typename ConstNColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type
+const typename ConstNColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type
#else
-inline const typename ConstNColsBlockXpr<...>::Type
+const typename ConstNColsBlockXpr<...>::Type
#endif
rightCols(NColsType n) const
{
@@ -897,16 +905,16 @@ EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major)
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename NColsBlockXpr<N>::Type rightCols(Index n = N)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename NColsBlockXpr<N>::Type rightCols(Index n = N)
{
return typename NColsBlockXpr<N>::Type(derived(), 0, cols() - n, rows(), n);
}
/// This is the const version of rightCols<int>().
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename ConstNColsBlockXpr<N>::Type rightCols(Index n = N) const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename ConstNColsBlockXpr<N>::Type rightCols(Index n = N) const
{
return typename ConstNColsBlockXpr<N>::Type(derived(), 0, cols() - n, rows(), n);
}
@@ -931,11 +939,11 @@ EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major)
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<typename NColsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline typename NColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type
+typename NColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type
#else
-inline typename NColsBlockXpr<...>::Type
+typename NColsBlockXpr<...>::Type
#endif
middleCols(Index startCol, NColsType numCols)
{
@@ -945,11 +953,11 @@ middleCols(Index startCol, NColsType numCols)
/// This is the const version of middleCols(Index,NColsType).
template<typename NColsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline const typename ConstNColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type
+const typename ConstNColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type
#else
-inline const typename ConstNColsBlockXpr<...>::Type
+const typename ConstNColsBlockXpr<...>::Type
#endif
middleCols(Index startCol, NColsType numCols) const
{
@@ -974,16 +982,16 @@ EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major)
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename NColsBlockXpr<N>::Type middleCols(Index startCol, Index n = N)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename NColsBlockXpr<N>::Type middleCols(Index startCol, Index n = N)
{
return typename NColsBlockXpr<N>::Type(derived(), 0, startCol, rows(), n);
}
/// This is the const version of middleCols<int>().
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename ConstNColsBlockXpr<N>::Type middleCols(Index startCol, Index n = N) const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename ConstNColsBlockXpr<N>::Type middleCols(Index startCol, Index n = N) const
{
return typename ConstNColsBlockXpr<N>::Type(derived(), 0, startCol, rows(), n);
}
@@ -1015,16 +1023,16 @@ EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<int NRows, int NCols>
-EIGEN_DEVICE_FUNC
-inline typename FixedBlockXpr<NRows,NCols>::Type block(Index startRow, Index startCol)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename FixedBlockXpr<NRows,NCols>::Type block(Index startRow, Index startCol)
{
return typename FixedBlockXpr<NRows,NCols>::Type(derived(), startRow, startCol);
}
/// This is the const version of block<>(Index, Index). */
template<int NRows, int NCols>
-EIGEN_DEVICE_FUNC
-inline const typename ConstFixedBlockXpr<NRows,NCols>::Type block(Index startRow, Index startCol) const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+const typename ConstFixedBlockXpr<NRows,NCols>::Type block(Index startRow, Index startCol) const
{
return typename ConstFixedBlockXpr<NRows,NCols>::Type(derived(), startRow, startCol);
}
@@ -1061,8 +1069,8 @@ EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<int NRows, int NCols>
-EIGEN_DEVICE_FUNC
-inline typename FixedBlockXpr<NRows,NCols>::Type block(Index startRow, Index startCol,
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename FixedBlockXpr<NRows,NCols>::Type block(Index startRow, Index startCol,
Index blockRows, Index blockCols)
{
return typename FixedBlockXpr<NRows,NCols>::Type(derived(), startRow, startCol, blockRows, blockCols);
@@ -1070,7 +1078,8 @@ inline typename FixedBlockXpr<NRows,NCols>::Type block(Index startRow, Index sta
/// This is the const version of block<>(Index, Index, Index, Index).
template<int NRows, int NCols>
-inline const typename ConstFixedBlockXpr<NRows,NCols>::Type block(Index startRow, Index startCol,
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+const typename ConstFixedBlockXpr<NRows,NCols>::Type block(Index startRow, Index startCol,
Index blockRows, Index blockCols) const
{
return typename ConstFixedBlockXpr<NRows,NCols>::Type(derived(), startRow, startCol, blockRows, blockCols);
@@ -1084,15 +1093,15 @@ inline const typename ConstFixedBlockXpr<NRows,NCols>::Type block(Index startRow
EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major)
/**
* \sa row(), class Block */
-EIGEN_DEVICE_FUNC
-inline ColXpr col(Index i)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ColXpr col(Index i)
{
return ColXpr(derived(), i);
}
/// This is the const version of col().
-EIGEN_DEVICE_FUNC
-inline ConstColXpr col(Index i) const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ConstColXpr col(Index i) const
{
return ConstColXpr(derived(), i);
}
@@ -1105,15 +1114,15 @@ inline ConstColXpr col(Index i) const
EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major)
/**
* \sa col(), class Block */
-EIGEN_DEVICE_FUNC
-inline RowXpr row(Index i)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+RowXpr row(Index i)
{
return RowXpr(derived(), i);
}
/// This is the const version of row(). */
-EIGEN_DEVICE_FUNC
-inline ConstRowXpr row(Index i) const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ConstRowXpr row(Index i) const
{
return ConstRowXpr(derived(), i);
}
@@ -1140,11 +1149,11 @@ inline ConstRowXpr row(Index i) const
/// \sa block(Index,Index,NRowsType,NColsType), fix<N>, fix<N>(int), class Block
///
template<typename NType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline typename FixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type
+typename FixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type
#else
-inline typename FixedSegmentReturnType<...>::Type
+typename FixedSegmentReturnType<...>::Type
#endif
segment(Index start, NType n)
{
@@ -1156,11 +1165,11 @@ segment(Index start, NType n)
/// This is the const version of segment(Index,NType).
template<typename NType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline const typename ConstFixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type
+const typename ConstFixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type
#else
-inline const typename ConstFixedSegmentReturnType<...>::Type
+const typename ConstFixedSegmentReturnType<...>::Type
#endif
segment(Index start, NType n) const
{
@@ -1190,11 +1199,11 @@ segment(Index start, NType n) const
/// \sa class Block, block(Index,Index)
///
template<typename NType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline typename FixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type
+typename FixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type
#else
-inline typename FixedSegmentReturnType<...>::Type
+typename FixedSegmentReturnType<...>::Type
#endif
head(NType n)
{
@@ -1205,11 +1214,11 @@ head(NType n)
/// This is the const version of head(NType).
template<typename NType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline const typename ConstFixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type
+const typename ConstFixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type
#else
-inline const typename ConstFixedSegmentReturnType<...>::Type
+const typename ConstFixedSegmentReturnType<...>::Type
#endif
head(NType n) const
{
@@ -1239,11 +1248,11 @@ head(NType n) const
/// \sa class Block, block(Index,Index)
///
template<typename NType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline typename FixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type
+typename FixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type
#else
-inline typename FixedSegmentReturnType<...>::Type
+typename FixedSegmentReturnType<...>::Type
#endif
tail(NType n)
{
@@ -1254,11 +1263,11 @@ tail(NType n)
/// This is the const version of tail(Index).
template<typename NType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline const typename ConstFixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type
+const typename ConstFixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type
#else
-inline const typename ConstFixedSegmentReturnType<...>::Type
+const typename ConstFixedSegmentReturnType<...>::Type
#endif
tail(NType n) const
{
@@ -1284,8 +1293,8 @@ tail(NType n) const
/// \sa segment(Index,NType), class Block
///
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename FixedSegmentReturnType<N>::Type segment(Index start, Index n = N)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename FixedSegmentReturnType<N>::Type segment(Index start, Index n = N)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return typename FixedSegmentReturnType<N>::Type(derived(), start, n);
@@ -1293,8 +1302,8 @@ inline typename FixedSegmentReturnType<N>::Type segment(Index start, Index n = N
/// This is the const version of segment<int>(Index).
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename ConstFixedSegmentReturnType<N>::Type segment(Index start, Index n = N) const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename ConstFixedSegmentReturnType<N>::Type segment(Index start, Index n = N) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return typename ConstFixedSegmentReturnType<N>::Type(derived(), start, n);
@@ -1316,8 +1325,8 @@ inline typename ConstFixedSegmentReturnType<N>::Type segment(Index start, Index
/// \sa head(NType), class Block
///
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename FixedSegmentReturnType<N>::Type head(Index n = N)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename FixedSegmentReturnType<N>::Type head(Index n = N)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return typename FixedSegmentReturnType<N>::Type(derived(), 0, n);
@@ -1325,8 +1334,8 @@ inline typename FixedSegmentReturnType<N>::Type head(Index n = N)
/// This is the const version of head<int>().
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename ConstFixedSegmentReturnType<N>::Type head(Index n = N) const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename ConstFixedSegmentReturnType<N>::Type head(Index n = N) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return typename ConstFixedSegmentReturnType<N>::Type(derived(), 0, n);
@@ -1348,8 +1357,8 @@ inline typename ConstFixedSegmentReturnType<N>::Type head(Index n = N) const
/// \sa tail(NType), class Block
///
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename FixedSegmentReturnType<N>::Type tail(Index n = N)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename FixedSegmentReturnType<N>::Type tail(Index n = N)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return typename FixedSegmentReturnType<N>::Type(derived(), size() - n);
@@ -1357,8 +1366,8 @@ inline typename FixedSegmentReturnType<N>::Type tail(Index n = N)
/// This is the const version of tail<int>.
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename ConstFixedSegmentReturnType<N>::Type tail(Index n = N) const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename ConstFixedSegmentReturnType<N>::Type tail(Index n = N) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return typename ConstFixedSegmentReturnType<N>::Type(derived(), size() - n);
@@ -1367,18 +1376,21 @@ inline typename ConstFixedSegmentReturnType<N>::Type tail(Index n = N) const
/// \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
/// is col-major (resp. row-major).
///
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
InnerVectorReturnType innerVector(Index outer)
{ return InnerVectorReturnType(derived(), outer); }
/// \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
/// is col-major (resp. row-major). Read-only.
///
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const ConstInnerVectorReturnType innerVector(Index outer) const
{ return ConstInnerVectorReturnType(derived(), outer); }
/// \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
/// is col-major (resp. row-major).
///
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
InnerVectorsReturnType
innerVectors(Index outerStart, Index outerSize)
{
@@ -1391,6 +1403,7 @@ innerVectors(Index outerStart, Index outerSize)
/// \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
/// is col-major (resp. row-major). Read-only.
///
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const ConstInnerVectorsReturnType
innerVectors(Index outerStart, Index outerSize) const
{
@@ -1404,7 +1417,7 @@ innerVectors(Index outerStart, Index outerSize) const
* \sa subVectors()
*/
template<DirectionType Direction>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
typename internal::conditional<Direction==Vertical,ColXpr,RowXpr>::type
subVector(Index i)
{
@@ -1413,7 +1426,7 @@ subVector(Index i)
/** This is the const version of subVector(Index) */
template<DirectionType Direction>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
typename internal::conditional<Direction==Vertical,ConstColXpr,ConstRowXpr>::type
subVector(Index i) const
{
@@ -1424,7 +1437,7 @@ subVector(Index i) const
* \sa subVector(Index)
*/
template<DirectionType Direction>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Index subVectors() const
{ return (Direction==Vertical)?cols():rows(); }
diff --git a/Eigen/src/plugins/CommonCwiseUnaryOps.h b/Eigen/src/plugins/CommonCwiseUnaryOps.h
index 89f4faaac..5418dc415 100644
--- a/Eigen/src/plugins/CommonCwiseUnaryOps.h
+++ b/Eigen/src/plugins/CommonCwiseUnaryOps.h
@@ -76,6 +76,20 @@ conjugate() const
return ConjugateReturnType(derived());
}
+/// \returns an expression of the complex conjugate of \c *this if Cond==true, returns derived() otherwise.
+///
+EIGEN_DOC_UNARY_ADDONS(conjugate,complex conjugate)
+///
+/// \sa conjugate()
+template<bool Cond>
+EIGEN_DEVICE_FUNC
+inline typename internal::conditional<Cond,ConjugateReturnType,const Derived&>::type
+conjugateIf() const
+{
+ typedef typename internal::conditional<Cond,ConjugateReturnType,const Derived&>::type ReturnType;
+ return ReturnType(derived());
+}
+
/// \returns a read-only expression of the real part of \c *this.
///
EIGEN_DOC_UNARY_ADDONS(real,real part function)
diff --git a/bench/BenchTimer.h b/bench/BenchTimer.h
index ea28496b7..8a0dbbe81 100644
--- a/bench/BenchTimer.h
+++ b/bench/BenchTimer.h
@@ -28,11 +28,15 @@
#endif
static void escape(void *p) {
+#if EIGEN_COMP_GNUC || EIGEN_COMP_CLANG
asm volatile("" : : "g"(p) : "memory");
+#endif
}
static void clobber() {
+#if EIGEN_COMP_GNUC || EIGEN_COMP_CLANG
asm volatile("" : : : "memory");
+#endif
}
#include <Eigen/Core>
diff --git a/bench/bench_gemm.cpp b/bench/bench_gemm.cpp
index 688d99c4a..78ca1cd13 100644
--- a/bench/bench_gemm.cpp
+++ b/bench/bench_gemm.cpp
@@ -11,8 +11,9 @@
//
#include <iostream>
-#include <Eigen/Core>
#include <bench/BenchTimer.h>
+#include <Eigen/Core>
+
using namespace std;
using namespace Eigen;
@@ -30,10 +31,22 @@ using namespace Eigen;
#define SCALARB SCALAR
#endif
+#ifdef ROWMAJ_A
+const int opt_A = RowMajor;
+#else
+const int opt_A = ColMajor;
+#endif
+
+#ifdef ROWMAJ_B
+const int opt_B = RowMajor;
+#else
+const int opt_B = ColMajor;
+#endif
+
typedef SCALAR Scalar;
typedef NumTraits<Scalar>::Real RealScalar;
-typedef Matrix<SCALARA,Dynamic,Dynamic> A;
-typedef Matrix<SCALARB,Dynamic,Dynamic> B;
+typedef Matrix<SCALARA,Dynamic,Dynamic,opt_A> A;
+typedef Matrix<SCALARB,Dynamic,Dynamic,opt_B> B;
typedef Matrix<Scalar,Dynamic,Dynamic> C;
typedef Matrix<RealScalar,Dynamic,Dynamic> M;
@@ -58,45 +71,61 @@ static char lower = 'L';
static char right = 'R';
static int intone = 1;
-void blas_gemm(const MatrixXf& a, const MatrixXf& b, MatrixXf& c)
+#ifdef ROWMAJ_A
+const char transA = trans;
+#else
+const char transA = notrans;
+#endif
+
+#ifdef ROWMAJ_B
+const char transB = trans;
+#else
+const char transB = notrans;
+#endif
+
+template<typename A,typename B>
+void blas_gemm(const A& a, const B& b, MatrixXf& c)
{
int M = c.rows(); int N = c.cols(); int K = a.cols();
- int lda = a.rows(); int ldb = b.rows(); int ldc = c.rows();
+ int lda = a.outerStride(); int ldb = b.outerStride(); int ldc = c.rows();
- sgemm_(&notrans,&notrans,&M,&N,&K,&fone,
+ sgemm_(&transA,&transB,&M,&N,&K,&fone,
const_cast<float*>(a.data()),&lda,
const_cast<float*>(b.data()),&ldb,&fone,
c.data(),&ldc);
}
-EIGEN_DONT_INLINE void blas_gemm(const MatrixXd& a, const MatrixXd& b, MatrixXd& c)
+template<typename A,typename B>
+void blas_gemm(const A& a, const B& b, MatrixXd& c)
{
int M = c.rows(); int N = c.cols(); int K = a.cols();
- int lda = a.rows(); int ldb = b.rows(); int ldc = c.rows();
+ int lda = a.outerStride(); int ldb = b.outerStride(); int ldc = c.rows();
- dgemm_(&notrans,&notrans,&M,&N,&K,&done,
+ dgemm_(&transA,&transB,&M,&N,&K,&done,
const_cast<double*>(a.data()),&lda,
const_cast<double*>(b.data()),&ldb,&done,
c.data(),&ldc);
}
-void blas_gemm(const MatrixXcf& a, const MatrixXcf& b, MatrixXcf& c)
+template<typename A,typename B>
+void blas_gemm(const A& a, const B& b, MatrixXcf& c)
{
int M = c.rows(); int N = c.cols(); int K = a.cols();
- int lda = a.rows(); int ldb = b.rows(); int ldc = c.rows();
+ int lda = a.outerStride(); int ldb = b.outerStride(); int ldc = c.rows();
- cgemm_(&notrans,&notrans,&M,&N,&K,(float*)&cfone,
+ cgemm_(&transA,&transB,&M,&N,&K,(float*)&cfone,
const_cast<float*>((const float*)a.data()),&lda,
const_cast<float*>((const float*)b.data()),&ldb,(float*)&cfone,
(float*)c.data(),&ldc);
}
-void blas_gemm(const MatrixXcd& a, const MatrixXcd& b, MatrixXcd& c)
+template<typename A,typename B>
+void blas_gemm(const A& a, const B& b, MatrixXcd& c)
{
int M = c.rows(); int N = c.cols(); int K = a.cols();
- int lda = a.rows(); int ldb = b.rows(); int ldc = c.rows();
+ int lda = a.outerStride(); int ldb = b.outerStride(); int ldc = c.rows();
- zgemm_(&notrans,&notrans,&M,&N,&K,(double*)&cdone,
+ zgemm_(&transA,&transB,&M,&N,&K,(double*)&cdone,
const_cast<double*>((const double*)a.data()),&lda,
const_cast<double*>((const double*)b.data()),&ldb,(double*)&cdone,
(double*)c.data(),&ldc);
@@ -112,6 +141,7 @@ void matlab_cplx_cplx(const M& ar, const M& ai, const M& br, const M& bi, M& cr,
cr.noalias() -= ai * bi;
ci.noalias() += ar * bi;
ci.noalias() += ai * br;
+ // [cr ci] += [ar ai] * br + [-ai ar] * bi
}
void matlab_real_cplx(const M& a, const M& br, const M& bi, M& cr, M& ci)
@@ -126,6 +156,8 @@ void matlab_cplx_real(const M& ar, const M& ai, const M& b, M& cr, M& ci)
ci.noalias() += ai * b;
}
+
+
template<typename A, typename B, typename C>
EIGEN_DONT_INLINE void gemm(const A& a, const B& b, C& c)
{
@@ -179,8 +211,8 @@ int main(int argc, char ** argv)
}
else if(argv[i][1]=='t')
{
+ tries = atoi(argv[++i]);
++i;
- tries = atoi(argv[i++]);
}
else if(argv[i][1]=='p')
{
@@ -216,7 +248,7 @@ int main(int argc, char ** argv)
std::cout << "Matrix sizes = " << m << "x" << p << " * " << p << "x" << n << "\n";
std::ptrdiff_t mc(m), nc(n), kc(p);
internal::computeProductBlockingSizes<Scalar,Scalar>(kc, mc, nc);
- std::cout << "blocking size (mc x kc) = " << mc << " x " << kc << "\n";
+ std::cout << "blocking size (mc x kc) = " << mc << " x " << kc << " x " << nc << "\n";
C r = c;
@@ -240,7 +272,7 @@ int main(int argc, char ** argv)
blas_gemm(a,b,r);
c.noalias() += a * b;
if(!r.isApprox(c)) {
- std::cout << r - c << "\n";
+ std::cout << (r - c).norm()/r.norm() << "\n";
std::cerr << "Warning, your product is crap!\n\n";
}
#else
@@ -249,7 +281,7 @@ int main(int argc, char ** argv)
gemm(a,b,c);
r.noalias() += a.cast<Scalar>() .lazyProduct( b.cast<Scalar>() );
if(!r.isApprox(c)) {
- std::cout << r - c << "\n";
+ std::cout << (r - c).norm()/r.norm() << "\n";
std::cerr << "Warning, your product is crap!\n\n";
}
}
@@ -263,6 +295,9 @@ int main(int argc, char ** argv)
std::cout << "blas real " << tblas.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tblas.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << tblas.total(REAL_TIMER) << "s)\n";
#endif
+ // warm start
+ if(b.norm()+a.norm()==123.554) std::cout << "\n";
+
BenchTimer tmt;
c = rc;
BENCH(tmt, tries, rep, gemm(a,b,c));
@@ -285,11 +320,11 @@ int main(int argc, char ** argv)
if(1.*m*n*p<30*30*30)
{
- BenchTimer tmt;
- c = rc;
- BENCH(tmt, tries, rep, c.noalias()+=a.lazyProduct(b));
- std::cout << "lazy cpu " << tmt.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmt.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << tmt.total(CPU_TIMER) << "s)\n";
- std::cout << "lazy real " << tmt.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmt.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << tmt.total(REAL_TIMER) << "s)\n";
+ BenchTimer tmt;
+ c = rc;
+ BENCH(tmt, tries, rep, c.noalias()+=a.lazyProduct(b));
+ std::cout << "lazy cpu " << tmt.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmt.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << tmt.total(CPU_TIMER) << "s)\n";
+ std::cout << "lazy real " << tmt.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmt.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << tmt.total(REAL_TIMER) << "s)\n";
}
#ifdef DECOUPLED
diff --git a/bench/perf_monitoring/changesets.txt b/bench/perf_monitoring/changesets.txt
index 960699c04..647825c0f 100644
--- a/bench/perf_monitoring/changesets.txt
+++ b/bench/perf_monitoring/changesets.txt
@@ -10,7 +10,7 @@
5908:f8ee3c721251 # improve packing with ptranspose
#5921:ca808bb456b0 # merge
#5927:8b1001f9e3ac
-5937:5a4ca1ad8c53 # New gebp kernel handling up to 3 packets x 4 register-level blocks
+5937:5a4ca1ad8c53 # New gebp kernel: up to 3 packets x 4 register-level blocks
#5949:f3488f4e45b2 # merge
#5969:e09031dccfd9 # Disable 3pX4 kernel on Altivec
#5992:4a429f5e0483 # merge
@@ -24,7 +24,7 @@ before-evaluators
#6726:ff2d2388e7b9 # merge default to tensors
#6742:0cbd6195e829 # merge default to tensors
#6747:853d2bafeb8f # Generalized the gebp apis
-6765:71584fd55762 # Made the blocking computation aware of the l3 cache; Also optimized the blocking parameters to take into account the number of threads used for a computation
+6765:71584fd55762 # Made the blocking computation aware of the l3 cache;<br/> Also optimized the blocking parameters to take<br/> into account the number of threads used for a computation.
6781:9cc5a931b2c6 # generalized gemv
6792:f6e1daab600a # ensured that contractions that can be reduced to a matrix vector product
#6844:039efd86b75c # merge tensor
@@ -38,34 +38,57 @@ before-evaluators
6933:52572e60b5d3 # blocking size strategy
6937:c8c042f286b2 # avoid redundant pack_rhs
6981:7e5d6f78da59 # dynamic loop swapping
-6984:45f26866c091 # rm dynamic loop swapping, adjust lhs's micro panel height to fully exploit L1 cache
-6986:a675d05b6f8f # blocking heuristic: block on the rhs in L1 if the lhs fit in L1.
-7013:f875e75f07e5 # organize a little our default cache sizes, and use a saner default L1 outside of x86 (10% faster on Nexus 5)
-7015:8aad8f35c955 # Refactor computeProductBlockingSizes to make room for the possibility of using lookup tables
+6984:45f26866c091 # rm dynamic loop swapping,<br/> adjust lhs's micro panel height to fully exploit L1 cache
+6986:a675d05b6f8f # blocking heuristic:<br/> block on the rhs in L1 if the lhs fit in L1.
+7013:f875e75f07e5 # organize a little our default cache sizes,<br/> and use a saner default L1 outside of x86 (10% faster on Nexus 5)
+7015:8aad8f35c955 # Refactor computeProductBlockingSizes to make room<br/> for the possibility of using lookup tables
7016:a58d253e8c91 # Polish lookup tables generation
-7018:9b27294a8186 # actual_panel_rows computation should always be resilient to parameters not consistent with the known L1 cache size, see comment
-7019:c758b1e2c073 # Provide a empirical lookup table for blocking sizes measured on a Nexus 5. Only for float, only for Android on ARM 32bit for now.
-7085:627e039fba68 # Bug 986: add support for coefficient-based product with 0 depth.
-7098:b6f1db9cf9ec # Bug 992: don't select a 3p GEMM path with non-vectorizable scalar types, this hits unsupported paths in symm/triangular products code
+7018:9b27294a8186 # actual_panel_rows computation should always be resilient<br/> to parameters not consistent with the known L1 cache size, see comment
+7019:c758b1e2c073 # Provide a empirical lookup table for blocking sizes measured on a Nexus 5.<br/> Only for float, only for Android on ARM 32bit for now.
+7085:627e039fba68 # Bug 986: add support for coefficient-based<br/> product with 0 depth.
+7098:b6f1db9cf9ec # Bug 992: don't select a 3p GEMM path with non-SIMD scalar types.
7591:09a8e2186610 # 3.3-alpha1
7650:b0f3c8f43025 # help clang inlining
-7708:dfc6ab9d9458 # Improve numerical accuracy in LLT and triangular solve by using true scalar divisions (instead of x * (1/y))
-#8744:74b789ada92a # Improved the matrix multiplication blocking in the case where mr is not a power of 2 (e.g on Haswell CPUs)
-8789:efcb912e4356 # Made the index type a template parameter to evaluateProductBlockingSizes. Use numext::mini and numext::maxi instead of std::min/std::max to compute blocking sizes
-8972:81d53c711775 # Don't optimize the processing of the last rows of a matrix matrix product in cases that violate the assumptions made by the optimized code path
+7708:dfc6ab9d9458 # Improve numerical accuracy in LLT and triangular solve<br/> by using true scalar divisions (instead of x * (1/y))
+#8744:74b789ada92a # Improved the matrix multiplication blocking in the case<br/> where mr is not a power of 2 (e.g on Haswell CPUs)
+8789:efcb912e4356 # Made the index type a template parameter to evaluateProductBlockingSizes.<br/> Use numext::mini and numext::maxi instead of <br/> std::min/std::max to compute blocking sizes.
+8972:81d53c711775 # Don't optimize the processing of the last rows of<br/> a matrix matrix product in cases that violate<br/> the assumptions made by the optimized code path.
8985:d935df21a082 # Remove the rotating kernel.
8988:6c2dc56e73b3 # Bug 256: enable vectorization with unaligned loads/stores.
-9148:b8b8c421e36c # Relax mixing-type constraints for binary coefficient-wise operators
+9148:b8b8c421e36c # Relax mixing-type constraints for binary coeff-wise operators
9174:d228bc282ac9 # merge
-9212:c90098affa7b # Fix performance regression introduced in changeset 8aad8f35c955
-9213:9f1c14e4694b # Fix performance regression in dgemm introduced by changeset 81d53c711775
+9175:abc7a3600098 # Include the cost of stores in unrolling
+9212:c90098affa7b # Fix perf regression introduced in changeset 8aad8f35c955
+9213:9f1c14e4694b # Fix perf regression in dgemm introduced by changeset 81d53c711775
9361:69d418c06999 # 3.3-beta2
+9445:f27ff0ad77a3 # Optimize expression matching 'd?=a-b*c' as 'd?=a; d?=b*c;'
9583:bef509908b9d # 3.3-rc1
+9593:2f24280cf59a # Bug 1311: fix alignment logic in some cases<br/> of (scalar*small).lazyProduct(small)
+9722:040d861b88b5 # Disabled part of the matrix matrix peeling code<br/> that's incompatible with 512 bit registers
9792:26667be4f70b # 3.3.0
+9891:41260bdfc23b # Fix a performance regression in (mat*mat)*vec<br/> for which mat*mat was evaluated multiple times.
9942:b1d3eba60130 # Operators += and -= do not resize!
-9943:79bb9887afd4 # Ease compiler job to generate clean and efficient code in mat*vec
-9946:2213991340ea # Complete rewrite of column-major-matrix * vector product to deliver higher performance of modern CPU.
-9955:630471c3298c # Improve performance of row-major-dense-matrix * vector products for recent CPUs. (this is the next changeset fixing a typo)
+9943:79bb9887afd4 # Ease compiler generating clean and efficient code in mat*vec
+9946:2213991340ea # Complete rewrite of column-major-matrix * vector product<br/> to deliver higher performance of modern CPU.
+9955:630471c3298c # Improve performance of row-major-dense-matrix * vector products<br/> for recent CPUs.
9975:2eeed9de710c # Revert vec/y to vec*(1/y) in row-major TRSM
-
-
+10442:e3f17da72a40 # Bug 1435: fix aliasing issue in exressions like: A = C - B*A;
+10735:6913f0cf7d06 # Adds missing EIGEN_STRONG_INLINE to support MSVC<br/> properly inlining small vector calculations
+10943:4db388d946bd # Bug 1562: optimize evaluation of small products<br/> of the form s*A*B by rewriting them as: s*(A.lazyProduct(B))<br/> to save a costly temporary.<br/> Measured speedup from 2x to 5x.
+10961:5007ff66c9f6 # Introduce the macro ei_declare_local_nested_eval to<br/> help allocating on the stack local temporaries via alloca,<br/> and let outer-products makes a good use of it.
+11083:30a528a984bb # Bug 1578: Improve prefetching in matrix multiplication on MIPS.
+11533:71609c41e9f8 # PR 526: Speed up multiplication of small, dynamically sized matrices
+11545:6d348dc9b092 # Vectorize row-by-row gebp loop iterations on 16 packets as well
+11579:efda481cbd7a # Bug 1624: improve matrix-matrix product on ARM 64, 20% speedup
+11606:b8d3f548a9d9 # do not read buffers out of bounds
+11638:22f9cc0079bd # Implement AVX512 vectorization of std::complex<float/double>
+11642:9f52fde03483 # Bug 1636: fix gemm performance issue with gcc>=6 and no FMA
+11648:81172653b67b # Bug 1515: disable gebp's 3pX4 micro kernel<br/> for MSVC<=19.14 because of register spilling.
+11654:b81188e099f3 # fix EIGEN_GEBP_2PX4_SPILLING_WORKAROUND<br/> for non vectorized type, and non x86/64 target
+11664:71546f1a9f0c # enable spilling workaround on architectures with SSE/AVX
+11669:b500fef42ced # Artificially increase l1-blocking size for AVX512.<br/> +10% speedup with current kernels.
+11683:2ea2960f1c7f # Make code compile again for older compilers.
+11753:556fb4ceb654 # Bug: 1633: refactor gebp kernel and optimize for neon
+11761:cefc1ba05596 # Bug 1661: fix regression in GEBP and AVX512
+11763:1e41e70fe97b # GEBP: cleanup logic to choose between<br/> a 4 packets of 1 packet (=209bf81aa3f3+fix)
+11803:d95b5d78598b # gebp: Add new ½ and ¼ packet rows per (peeling) round on the lhs \ No newline at end of file
diff --git a/bench/perf_monitoring/make_plot.sh b/bench/perf_monitoring/make_plot.sh
index ca9fa9662..65aaf66f9 100755
--- a/bench/perf_monitoring/make_plot.sh
+++ b/bench/perf_monitoring/make_plot.sh
@@ -64,8 +64,11 @@ do
i=0
while read line2
do
- if [ ! -z '$line2' ]; then
- echo '{"r":'$i',"v":'`echo $line2 | cut -f $col -d ' '`'},' >> $WHAT.html
+ if [ ! -z "$line2" ]; then
+ val=`echo $line2 | cut -s -f $col -d ' '`
+ if [ -n "$val" ]; then # skip build failures
+ echo '{"r":'$i',"v":'$val'},' >> $WHAT.html
+ fi
fi
((i++))
done < $WHAT.out
@@ -84,6 +87,17 @@ do
done < $WHAT.out
echo '];' >> $WHAT.html
+echo 'var changesets_details = [' >> $WHAT.html
+while read line2
+do
+ if [ ! -z '$line2' ]; then
+ num=`echo "$line2" | cut -f 1 -d ' '`
+ comment=`grep ":$num" changesets.txt | cut -f 2 -d '#'`
+ echo '"'"$comment"'",' >> $WHAT.html
+ fi
+done < $WHAT.out
+echo '];' >> $WHAT.html
+
echo 'var changesets_count = [' >> $WHAT.html
i=0
while read line2
diff --git a/bench/perf_monitoring/resources/chart_footer.html b/bench/perf_monitoring/resources/chart_footer.html
index 8acc69f14..e8ef0a270 100644
--- a/bench/perf_monitoring/resources/chart_footer.html
+++ b/bench/perf_monitoring/resources/chart_footer.html
@@ -14,12 +14,16 @@
.tickFormat(function(d){return changesets[d]})
.rotateLabels(-90);
- chart.y(function(datum){ return datum.v; })
- .yAxis.options({
- axisLabel: customSettings.YLABEL || 'GFlops'/*,
- tickFormat: function(val){ return d3.format('.0f')(val) + ' GFlops'; }*/
- });
+ chart.y(function(datum){ return datum.v; })
+ .yAxis.options({
+ axisLabel: customSettings.YLABEL || 'GFlops'/*,
+ tickFormat: function(val){ return d3.format('.0f')(val) + ' GFlops'; }*/
+ });
+ chart.tooltip.headerFormatter(function(d) { return changesets[d]
+ + ' <p style="font-weight:normal;text-align: left;">'
+ + changesets_details[d] + "</p>"; });
+
//chart.useInteractiveGuideline(true);
d3.select('#chart').datum(data).call(chart);
var plot = d3.select('#chart > g');
diff --git a/bench/perf_monitoring/run.sh b/bench/perf_monitoring/run.sh
index 4e8f73c7f..3022adfd1 100755
--- a/bench/perf_monitoring/run.sh
+++ b/bench/perf_monitoring/run.sh
@@ -148,9 +148,10 @@ make_backup $WORKING_DIR_PREFIX"c"$bench
cut -f1 -d"#" < changesets.txt | grep -E '[[:alnum:]]' | while read rev
do
if [ ! -z '$rev' ]; then
- echo "Testing rev $rev"
+ rev2=`echo $rev | cut -f 2 -d':'`
+ echo "Testing rev $rev, $rev2"
cd eigen_src
- hg up -C $rev > /dev/null
+ hg up -C $rev2 > /dev/null
actual_rev=`hg identify | cut -f1 -d' '`
cd ..
diff --git a/blas/common.h b/blas/common.h
index 960c09cc6..a9b697842 100644
--- a/blas/common.h
+++ b/blas/common.h
@@ -166,6 +166,10 @@ T* copy_back(T* x_cpy, T* x, int n, int incx)
return x_cpy;
}
-#define EIGEN_BLAS_FUNC(X) EIGEN_CAT(SCALAR_SUFFIX,X##_)
+#ifndef EIGEN_BLAS_FUNC_SUFFIX
+#define EIGEN_BLAS_FUNC_SUFFIX _
+#endif
+
+#define EIGEN_BLAS_FUNC(X) EIGEN_CAT(SCALAR_SUFFIX, EIGEN_CAT(X, EIGEN_BLAS_FUNC_SUFFIX))
#endif // EIGEN_BLAS_COMMON_H
diff --git a/blas/double.cpp b/blas/double.cpp
index 295b1d1f2..eb2e57307 100644
--- a/blas/double.cpp
+++ b/blas/double.cpp
@@ -19,7 +19,7 @@
#include "level2_real_impl.h"
#include "level3_impl.h"
-double BLASFUNC(dsdot)(int* n, float* x, int* incx, float* y, int* incy)
+double EIGEN_BLAS_FUNC(sdot)(int* n, float* x, int* incx, float* y, int* incy)
{
if(*n<=0) return 0;
diff --git a/blas/level1_cplx_impl.h b/blas/level1_cplx_impl.h
index 719f5bac9..4ac457175 100644
--- a/blas/level1_cplx_impl.h
+++ b/blas/level1_cplx_impl.h
@@ -25,7 +25,7 @@ namespace Eigen {
// computes the sum of magnitudes of all vector elements or, for a complex vector x, the sum
// res = |Rex1| + |Imx1| + |Rex2| + |Imx2| + ... + |Rexn| + |Imxn|, where x is a vector of order n
-RealScalar EIGEN_CAT(EIGEN_CAT(REAL_SCALAR_SUFFIX,SCALAR_SUFFIX),asum_)(int *n, RealScalar *px, int *incx)
+RealScalar EIGEN_CAT(REAL_SCALAR_SUFFIX, EIGEN_BLAS_FUNC(asum))(int *n, RealScalar *px, int *incx)
{
// std::cerr << "__asum " << *n << " " << *incx << "\n";
Complex* x = reinterpret_cast<Complex*>(px);
@@ -81,7 +81,7 @@ int EIGEN_BLAS_FUNC(dotuw)(int *n, RealScalar *px, int *incx, RealScalar *py, in
return 0;
}
-RealScalar EIGEN_CAT(EIGEN_CAT(REAL_SCALAR_SUFFIX,SCALAR_SUFFIX),nrm2_)(int *n, RealScalar *px, int *incx)
+RealScalar EIGEN_CAT(REAL_SCALAR_SUFFIX, EIGEN_BLAS_FUNC(nrm2))(int *n, RealScalar *px, int *incx)
{
// std::cerr << "__nrm2 " << *n << " " << *incx << "\n";
if(*n<=0) return 0;
@@ -94,7 +94,7 @@ RealScalar EIGEN_CAT(EIGEN_CAT(REAL_SCALAR_SUFFIX,SCALAR_SUFFIX),nrm2_)(int *n,
return make_vector(x,*n,*incx).stableNorm();
}
-int EIGEN_CAT(EIGEN_CAT(SCALAR_SUFFIX,REAL_SCALAR_SUFFIX),rot_)(int *n, RealScalar *px, int *incx, RealScalar *py, int *incy, RealScalar *pc, RealScalar *ps)
+int EIGEN_BLAS_FUNC(EIGEN_CAT(REAL_SCALAR_SUFFIX, rot))(int *n, RealScalar *px, int *incx, RealScalar *py, int *incy, RealScalar *pc, RealScalar *ps)
{
if(*n<=0) return 0;
@@ -117,7 +117,7 @@ int EIGEN_CAT(EIGEN_CAT(SCALAR_SUFFIX,REAL_SCALAR_SUFFIX),rot_)(int *n, RealScal
return 0;
}
-int EIGEN_CAT(EIGEN_CAT(SCALAR_SUFFIX,REAL_SCALAR_SUFFIX),scal_)(int *n, RealScalar *palpha, RealScalar *px, int *incx)
+int EIGEN_BLAS_FUNC(EIGEN_CAT(REAL_SCALAR_SUFFIX, scal))(int *n, RealScalar *palpha, RealScalar *px, int *incx)
{
if(*n<=0) return 0;
diff --git a/blas/level1_impl.h b/blas/level1_impl.h
index 6e7f8c976..d3ee03477 100644
--- a/blas/level1_impl.h
+++ b/blas/level1_impl.h
@@ -51,7 +51,7 @@ int EIGEN_BLAS_FUNC(copy)(int *n, RealScalar *px, int *incx, RealScalar *py, int
return 0;
}
-int EIGEN_CAT(EIGEN_CAT(i,SCALAR_SUFFIX),amax_)(int *n, RealScalar *px, int *incx)
+int EIGEN_CAT(i, EIGEN_BLAS_FUNC(amax))(int *n, RealScalar *px, int *incx)
{
if(*n<=0) return 0;
Scalar* x = reinterpret_cast<Scalar*>(px);
@@ -62,7 +62,7 @@ int EIGEN_CAT(EIGEN_CAT(i,SCALAR_SUFFIX),amax_)(int *n, RealScalar *px, int *inc
return int(ret)+1;
}
-int EIGEN_CAT(EIGEN_CAT(i,SCALAR_SUFFIX),amin_)(int *n, RealScalar *px, int *incx)
+int EIGEN_CAT(i, EIGEN_BLAS_FUNC(amin))(int *n, RealScalar *px, int *incx)
{
if(*n<=0) return 0;
Scalar* x = reinterpret_cast<Scalar*>(px);
diff --git a/blas/single.cpp b/blas/single.cpp
index 20ea57d5c..e66879aea 100644
--- a/blas/single.cpp
+++ b/blas/single.cpp
@@ -18,5 +18,5 @@
#include "level2_real_impl.h"
#include "level3_impl.h"
-float BLASFUNC(sdsdot)(int* n, float* alpha, float* x, int* incx, float* y, int* incy)
+float EIGEN_BLAS_FUNC(dsdot)(int* n, float* alpha, float* x, int* incx, float* y, int* incy)
{ return double(*alpha) + BLASFUNC(dsdot)(n, x, incx, y, incy); }
diff --git a/cmake/EigenTesting.cmake b/cmake/EigenTesting.cmake
index 35deed509..8cb2d5492 100644
--- a/cmake/EigenTesting.cmake
+++ b/cmake/EigenTesting.cmake
@@ -334,37 +334,32 @@ endmacro(ei_add_test_sycl)
# note that the test runner for these is CMake itself, when passed -DEIGEN_FAILTEST=ON
# so here we're just running CMake commands immediately, we're not adding any targets.
macro(ei_add_failtest testname)
- get_property(EIGEN_FAILTEST_FAILURE_COUNT GLOBAL PROPERTY EIGEN_FAILTEST_FAILURE_COUNT)
- get_property(EIGEN_FAILTEST_COUNT GLOBAL PROPERTY EIGEN_FAILTEST_COUNT)
- message(STATUS "Checking failtest: ${testname}")
- set(filename "${testname}.cpp")
- file(READ "${filename}" test_source)
+ set(test_target_ok ${testname}_ok)
+ set(test_target_ko ${testname}_ko)
- try_compile(succeeds_when_it_should_fail
- "${CMAKE_CURRENT_BINARY_DIR}"
- "${CMAKE_CURRENT_SOURCE_DIR}/${filename}"
- COMPILE_DEFINITIONS "-DEIGEN_SHOULD_FAIL_TO_BUILD")
- if (succeeds_when_it_should_fail)
- message(STATUS "FAILED: ${testname} build succeeded when it should have failed")
- endif()
+ # Add executables
+ add_executable(${test_target_ok} ${testname}.cpp)
+ add_executable(${test_target_ko} ${testname}.cpp)
- try_compile(succeeds_when_it_should_succeed
- "${CMAKE_CURRENT_BINARY_DIR}"
- "${CMAKE_CURRENT_SOURCE_DIR}/${filename}"
- COMPILE_DEFINITIONS)
- if (NOT succeeds_when_it_should_succeed)
- message(STATUS "FAILED: ${testname} build failed when it should have succeeded")
- endif()
+ # Remove them from the normal build process
+ set_target_properties(${test_target_ok} ${test_target_ko} PROPERTIES
+ EXCLUDE_FROM_ALL TRUE
+ EXCLUDE_FROM_DEFAULT_BUILD TRUE)
- if (succeeds_when_it_should_fail OR NOT succeeds_when_it_should_succeed)
- math(EXPR EIGEN_FAILTEST_FAILURE_COUNT ${EIGEN_FAILTEST_FAILURE_COUNT}+1)
- endif()
+ # Configure the failing test
+ target_compile_definitions(${test_target_ko} PRIVATE EIGEN_SHOULD_FAIL_TO_BUILD)
- math(EXPR EIGEN_FAILTEST_COUNT ${EIGEN_FAILTEST_COUNT}+1)
+ # Add the tests to ctest.
+ add_test(NAME ${test_target_ok}
+ COMMAND ${CMAKE_COMMAND} --build . --target ${test_target_ok} --config $<CONFIGURATION>
+ WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
+ add_test(NAME ${test_target_ko}
+ COMMAND ${CMAKE_COMMAND} --build . --target ${test_target_ko} --config $<CONFIGURATION>
+ WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
- set_property(GLOBAL PROPERTY EIGEN_FAILTEST_FAILURE_COUNT ${EIGEN_FAILTEST_FAILURE_COUNT})
- set_property(GLOBAL PROPERTY EIGEN_FAILTEST_COUNT ${EIGEN_FAILTEST_COUNT})
+ # Expect the second test to fail
+ set_tests_properties(${test_target_ko} PROPERTIES WILL_FAIL TRUE)
endmacro(ei_add_failtest)
# print a summary of the different options
diff --git a/doc/A05_PortingFrom2To3.dox b/doc/A05_PortingFrom2To3.dox
deleted file mode 100644
index 51555f996..000000000
--- a/doc/A05_PortingFrom2To3.dox
+++ /dev/null
@@ -1,299 +0,0 @@
-namespace Eigen {
-
-/** \page Eigen2ToEigen3 Porting from Eigen2 to Eigen3
-
-This page lists the most important API changes between Eigen2 and Eigen3,
-and gives tips to help porting your application from Eigen2 to Eigen3.
-
-\eigenAutoToc
-
-\section CompatibilitySupport Eigen2 compatibility support
-
-Up to version 3.2 %Eigen provides <a href="http://eigen.tuxfamily.org/dox-3.2/Eigen2SupportModes.html">Eigen2 support modes</a>. These are removed now, because they were barely used anymore and became hard to maintain after internal re-designs.
-You can still use them by first <a href="http://eigen.tuxfamily.org/dox-3.2/Eigen2ToEigen3.html">porting your code to Eigen 3.2</a>.
-
-\section Using The USING_PART_OF_NAMESPACE_EIGEN macro
-
-The USING_PART_OF_NAMESPACE_EIGEN macro has been removed. In Eigen 3, just do:
-\code
-using namespace Eigen;
-\endcode
-
-\section ComplexDot Dot products over complex numbers
-
-This is the single trickiest change between Eigen 2 and Eigen 3. It only affects code using \c std::complex numbers as scalar type.
-
-Eigen 2's dot product was linear in the first variable. Eigen 3's dot product is linear in the second variable. In other words, the Eigen 2 code \code x.dot(y) \endcode is equivalent to the Eigen 3 code \code y.dot(x) \endcode In yet other words, dot products are complex-conjugated in Eigen 3 compared to Eigen 2. The switch to the new convention was commanded by common usage, especially with the notation \f$ x^Ty \f$ for dot products of column-vectors.
-
-\section VectorBlocks Vector blocks
-
-<table class="manual">
-<tr><th>Eigen 2</th><th>Eigen 3</th></th>
-<tr><td>\code
-vector.start(length)
-vector.start<length>()
-vector.end(length)
-vector.end<length>()
-\endcode</td><td>\code
-vector.head(length)
-vector.head<length>()
-vector.tail(length)
-vector.tail<length>()
-\endcode</td></tr>
-</table>
-
-
-\section Corners Matrix Corners
-
-<table class="manual">
-<tr><th>Eigen 2</th><th>Eigen 3</th></th>
-<tr><td>\code
-matrix.corner(TopLeft,r,c)
-matrix.corner(TopRight,r,c)
-matrix.corner(BottomLeft,r,c)
-matrix.corner(BottomRight,r,c)
-matrix.corner<r,c>(TopLeft)
-matrix.corner<r,c>(TopRight)
-matrix.corner<r,c>(BottomLeft)
-matrix.corner<r,c>(BottomRight)
-\endcode</td><td>\code
-matrix.topLeftCorner(r,c)
-matrix.topRightCorner(r,c)
-matrix.bottomLeftCorner(r,c)
-matrix.bottomRightCorner(r,c)
-matrix.topLeftCorner<r,c>()
-matrix.topRightCorner<r,c>()
-matrix.bottomLeftCorner<r,c>()
-matrix.bottomRightCorner<r,c>()
-\endcode</td>
-</tr>
-</table>
-
-Notice that Eigen3 also provides these new convenience methods: topRows(), bottomRows(), leftCols(), rightCols(). See in class DenseBase.
-
-\section CoefficientWiseOperations Coefficient wise operations
-
-In Eigen2, coefficient wise operations which have no proper mathematical definition (as a coefficient wise product)
-were achieved using the .cwise() prefix, e.g.:
-\code a.cwise() * b \endcode
-In Eigen3 this .cwise() prefix has been superseded by a new kind of matrix type called
-Array for which all operations are performed coefficient wise. You can easily view a matrix as an array and vice versa using
-the MatrixBase::array() and ArrayBase::matrix() functions respectively. Here is an example:
-\code
-Vector4f a, b, c;
-c = a.array() * b.array();
-\endcode
-Note that the .array() function is not at all a synonym of the deprecated .cwise() prefix.
-While the .cwise() prefix changed the behavior of the following operator, the array() function performs
-a permanent conversion to the array world. Therefore, for binary operations such as the coefficient wise product,
-both sides must be converted to an \em array as in the above example. On the other hand, when you
-concatenate multiple coefficient wise operations you only have to do the conversion once, e.g.:
-\code
-Vector4f a, b, c;
-c = a.array().abs().pow(3) * b.array().abs().sin();
-\endcode
-With Eigen2 you would have written:
-\code
-c = (a.cwise().abs().cwise().pow(3)).cwise() * (b.cwise().abs().cwise().sin());
-\endcode
-
-\section PartAndExtract Triangular and self-adjoint matrices
-
-In Eigen 2 you had to play with the part, extract, and marked functions to deal with triangular and selfadjoint matrices. In Eigen 3, all these functions have been removed in favor of the concept of \em views:
-
-<table class="manual">
-<tr><th>Eigen 2</th><th>Eigen 3</th></tr>
-<tr><td>\code
-A.part<UpperTriangular>();
-A.part<StrictlyLowerTriangular>(); \endcode</td>
-<td>\code
-A.triangularView<Upper>()
-A.triangularView<StrictlyLower>()\endcode</td></tr>
-<tr><td>\code
-A.extract<UpperTriangular>();
-A.extract<StrictlyLowerTriangular>();\endcode</td>
-<td>\code
-A.triangularView<Upper>()
-A.triangularView<StrictlyLower>()\endcode</td></tr>
-<tr><td>\code
-A.marked<UpperTriangular>();
-A.marked<StrictlyLowerTriangular>();\endcode</td>
-<td>\code
-A.triangularView<Upper>()
-A.triangularView<StrictlyLower>()\endcode</td></tr>
-<tr><td colspan="2"></td></tr>
-<tr><td>\code
-A.part<SelfAdfjoint|UpperTriangular>();
-A.extract<SelfAdfjoint|LowerTriangular>();\endcode</td>
-<td>\code
-A.selfadjointView<Upper>()
-A.selfadjointView<Lower>()\endcode</td></tr>
-<tr><td colspan="2"></td></tr>
-<tr><td>\code
-UpperTriangular
-LowerTriangular
-UnitUpperTriangular
-UnitLowerTriangular
-StrictlyUpperTriangular
-StrictlyLowerTriangular
-\endcode</td><td>\code
-Upper
-Lower
-UnitUpper
-UnitLower
-StrictlyUpper
-StrictlyLower
-\endcode</td>
-</tr>
-</table>
-
-\sa class TriangularView, class SelfAdjointView
-
-\section TriangularSolveInPlace Triangular in-place solving
-
-<table class="manual">
-<tr><th>Eigen 2</th><th>Eigen 3</th></tr>
-<tr><td>\code A.triangularSolveInPlace<XxxTriangular>(Y);\endcode</td><td>\code A.triangularView<Xxx>().solveInPlace(Y);\endcode</td></tr>
-</table>
-
-
-\section Decompositions Matrix decompositions
-
-Some of Eigen 2's matrix decompositions have been renamed in Eigen 3, while some others have been removed and are replaced by other decompositions in Eigen 3.
-
-<table class="manual">
- <tr>
- <th>Eigen 2</th>
- <th>Eigen 3</th>
- <th>Notes</th>
- </tr>
- <tr>
- <td>LU</td>
- <td>FullPivLU</td>
- <td class="alt">See also the new PartialPivLU, it's much faster</td>
- </tr>
- <tr>
- <td>QR</td>
- <td>HouseholderQR</td>
- <td class="alt">See also the new ColPivHouseholderQR, it's more reliable</td>
- </tr>
- <tr>
- <td>SVD</td>
- <td>JacobiSVD</td>
- <td class="alt">We currently don't have a bidiagonalizing SVD; of course this is planned.</td>
- </tr>
- <tr>
- <td>EigenSolver and friends</td>
- <td>\code #include<Eigen/Eigenvalues> \endcode </td>
- <td class="alt">Moved to separate module</td>
- </tr>
-</table>
-
-\section LinearSolvers Linear solvers
-
-<table class="manual">
-<tr><th>Eigen 2</th><th>Eigen 3</th><th>Notes</th></tr>
-<tr><td>\code A.lu();\endcode</td>
-<td>\code A.fullPivLu();\endcode</td>
-<td class="alt">Now A.lu() returns a PartialPivLU</td></tr>
-<tr><td>\code A.lu().solve(B,&X);\endcode</td>
-<td>\code X = A.lu().solve(B);
- X = A.fullPivLu().solve(B);\endcode</td>
-<td class="alt">The returned by value is fully optimized</td></tr>
-<tr><td>\code A.llt().solve(B,&X);\endcode</td>
-<td>\code X = A.llt().solve(B);
- X = A.selfadjointView<Lower>.llt().solve(B);
- X = A.selfadjointView<Upper>.llt().solve(B);\endcode</td>
-<td class="alt">The returned by value is fully optimized and \n
-the selfadjointView API allows you to select the \n
-triangular part to work on (default is lower part)</td></tr>
-<tr><td>\code A.llt().solveInPlace(B);\endcode</td>
-<td>\code B = A.llt().solve(B);
- B = A.selfadjointView<Lower>.llt().solve(B);
- B = A.selfadjointView<Upper>.llt().solve(B);\endcode</td>
-<td class="alt">In place solving</td></tr>
-<tr><td>\code A.ldlt().solve(B,&X);\endcode</td>
-<td>\code X = A.ldlt().solve(B);
- X = A.selfadjointView<Lower>.ldlt().solve(B);
- X = A.selfadjointView<Upper>.ldlt().solve(B);\endcode</td>
-<td class="alt">The returned by value is fully optimized and \n
-the selfadjointView API allows you to select the \n
-triangular part to work on</td></tr>
-</table>
-
-\section GeometryModule Changes in the Geometry module
-
-The Geometry module is the one that changed the most. If you rely heavily on it, it's probably a good idea to use the <a href="http://eigen.tuxfamily.org/dox-3.2/Eigen2SupportModes.html">"Eigen 2 support modes"</a> to perform your migration.
-
-\section Transform The Transform class
-
-In Eigen 2, the Transform class didn't really know whether it was a projective or affine transformation. In Eigen 3, it takes a new \a Mode template parameter, which indicates whether it's \a Projective or \a Affine transform. There is no default value.
-
-The Transform3f (etc) typedefs are no more. In Eigen 3, the Transform typedefs explicitly refer to the \a Projective and \a Affine modes:
-
-<table class="manual">
-<tr><th>Eigen 2</th><th>Eigen 3</th><th>Notes</th></tr>
-<tr>
- <td> Transform3f </td>
- <td> Affine3f or Projective3f </td>
- <td> Of course 3f is just an example here </td>
-</tr>
-</table>
-
-
-\section LazyVsNoalias Lazy evaluation and noalias
-
-In Eigen all operations are performed in a lazy fashion except the matrix products which are always evaluated into a temporary by default.
-In Eigen2, lazy evaluation could be enforced by tagging a product using the .lazy() function. However, in complex expressions it was not
-easy to determine where to put the lazy() function. In Eigen3, the lazy() feature has been superseded by the MatrixBase::noalias() function
-which can be used on the left hand side of an assignment when no aliasing can occur. Here is an example:
-\code
-MatrixXf a, b, c;
-...
-c.noalias() += 2 * a.transpose() * b;
-\endcode
-However, the noalias mechanism does not cover all the features of the old .lazy(). Indeed, in some extremely rare cases,
-it might be useful to explicit request for a lay product, i.e., for a product which will be evaluated one coefficient at once, on request,
-just like any other expressions. To this end you can use the MatrixBase::lazyProduct() function, however we strongly discourage you to
-use it unless you are sure of what you are doing, i.e., you have rigourosly measured a speed improvement.
-
-\section AlignMacros Alignment-related macros
-
-The EIGEN_ALIGN_128 macro has been renamed to EIGEN_ALIGN16. Don't be surprised, it's just that we switched to counting in bytes ;-)
-
-The \link TopicPreprocessorDirectivesPerformance EIGEN_DONT_ALIGN \endlink option still exists in Eigen 3, but it has a new cousin: \link TopicPreprocessorDirectivesPerformance EIGEN_DONT_ALIGN_STATICALLY.\endlink It allows to get rid of all static alignment issues while keeping alignment of dynamic-size heap-allocated arrays. Vectorization of statically allocated arrays is still preserved (unless you define \link TopicPreprocessorDirectivesPerformance EIGEN_UNALIGNED_VECTORIZE \endlink =0), at the cost of unaligned memory stores.
-
-\section AlignedMap Aligned Map objects
-
-A common issue with Eigen 2 was that when mapping an array with Map, there was no way to tell Eigen that your array was aligned. There was a ForceAligned option but it didn't mean that; it was just confusing and has been removed.
-
-New in Eigen3 is the #Aligned option. See the documentation of class Map. Use it like this:
-\code
-Map<Vector4f, Aligned> myMappedVector(some_aligned_array);
-\endcode
-There also are related convenience static methods, which actually are the preferred way as they take care of such things as constness:
-\code
-result = Vector4f::MapAligned(some_aligned_array);
-\endcode
-
-\section StdContainers STL Containers
-
-In Eigen2, <tt>\#include\<Eigen/StdVector\></tt> tweaked std::vector to automatically align elements. The problem was that that was quite invasive. In Eigen3, we only override standard behavior if you use Eigen::aligned_allocator<T> as your allocator type. So for example, if you use std::vector<Matrix4f>, you need to do the following change (note that aligned_allocator is under namespace Eigen):
-
-<table class="manual">
-<tr><th>Eigen 2</th><th>Eigen 3</th></tr>
-<tr>
- <td> \code std::vector<Matrix4f> \endcode </td>
- <td> \code std::vector<Matrix4f, aligned_allocator<Matrix4f> > \endcode </td>
-</tr>
-</table>
-
-\section eiPrefix Internal ei_ prefix
-
-In Eigen2, global internal functions and structures were prefixed by \c ei_. In Eigen3, they all have been moved into the more explicit \c internal namespace. So, e.g., \c ei_sqrt(x) now becomes \c internal::sqrt(x). Of course it is not recommended to rely on Eigen's internal features.
-
-
-
-*/
-
-}
diff --git a/doc/AsciiQuickReference.txt b/doc/AsciiQuickReference.txt
index 0ca54cef3..18b4446c6 100644
--- a/doc/AsciiQuickReference.txt
+++ b/doc/AsciiQuickReference.txt
@@ -50,6 +50,12 @@ VectorXi::LinSpaced(((hi-low)/step)+1, // low:step:hi
// Matrix slicing and blocks. All expressions listed here are read/write.
// Templated size versions are faster. Note that Matlab is 1-based (a size N
// vector is x(1)...x(N)).
+/******************************************************************************/
+/* PLEASE HELP US IMPROVING THIS SECTION */
+/* Eigen 3.4 supports a much improved API for sub-matrices, including, */
+/* slicing and indexing from arrays: */
+/* http://eigen.tuxfamily.org/dox-devel/group__TutorialSlicingIndexing.html */
+/******************************************************************************/
// Eigen // Matlab
x.head(n) // x(1:n)
x.head<n>() // x(1:n)
@@ -88,6 +94,11 @@ R.row(i) = P.col(j); // R(i, :) = P(:, j)
R.col(j1).swap(mat1.col(j2)); // R(:, [j1 j2]) = R(:, [j2, j1])
// Views, transpose, etc;
+/******************************************************************************/
+/* PLEASE HELP US IMPROVING THIS SECTION */
+/* Eigen 3.4 supports a new API for reshaping: */
+/* http://eigen.tuxfamily.org/dox-devel/group__TutorialReshape.html */
+/******************************************************************************/
// Eigen // Matlab
R.adjoint() // R'
R.transpose() // R.' or conj(R') // Read-write
diff --git a/doc/CMakeLists.txt b/doc/CMakeLists.txt
index aa36d7891..f344ae572 100644
--- a/doc/CMakeLists.txt
+++ b/doc/CMakeLists.txt
@@ -106,7 +106,7 @@ add_custom_target(doc ALL
COMMAND doxygen Doxyfile-unsupported
COMMAND ${CMAKE_COMMAND} -E copy ${Eigen_BINARY_DIR}/doc/html/group__TopicUnalignedArrayAssert.html ${Eigen_BINARY_DIR}/doc/html/TopicUnalignedArrayAssert.html
COMMAND ${CMAKE_COMMAND} -E rename html eigen-doc
- COMMAND ${CMAKE_COMMAND} -E remove eigen-doc/eigen-doc.tgz
+ COMMAND ${CMAKE_COMMAND} -E remove eigen-doc/eigen-doc.tgz eigen-doc/unsupported/_formulas.log eigen-doc/_formulas.log
COMMAND ${CMAKE_COMMAND} -E tar cfz eigen-doc.tgz eigen-doc
COMMAND ${CMAKE_COMMAND} -E rename eigen-doc.tgz eigen-doc/eigen-doc.tgz
COMMAND ${CMAKE_COMMAND} -E rename eigen-doc html
diff --git a/doc/CoeffwiseMathFunctionsTable.dox b/doc/CoeffwiseMathFunctionsTable.dox
index af1251226..080e056e1 100644
--- a/doc/CoeffwiseMathFunctionsTable.dox
+++ b/doc/CoeffwiseMathFunctionsTable.dox
@@ -321,6 +321,42 @@ This also means that, unless specified, if the function \c std::foo is available
<td></td>
</tr>
<tr>
+ <td class="code">
+ \anchor cwisetable_asinh
+ a.\link ArrayBase::asinh asinh\endlink(); \n
+ \link Eigen::asinh asinh\endlink(a);
+ </td>
+ <td>computes inverse hyperbolic sine</td>
+ <td class="code">
+ using <a href="http://en.cppreference.com/w/cpp/numeric/math/asinh">std::asinh</a>; \n
+ asinh(a[i]);</td>
+ <td></td>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_acosh
+ a.\link ArrayBase::acosh cohs\endlink(); \n
+ \link Eigen::acosh acosh\endlink(a);
+ </td>
+ <td>computes hyperbolic cosine</td>
+ <td class="code">
+ using <a href="http://en.cppreference.com/w/cpp/numeric/math/acosh">std::acosh</a>; \n
+ acosh(a[i]);</td>
+ <td></td>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_atanh
+ a.\link ArrayBase::atanh atanh\endlink(); \n
+ \link Eigen::atanh atanh\endlink(a);
+ </td>
+ <td>computes hyperbolic tangent</td>
+ <td class="code">
+ using <a href="http://en.cppreference.com/w/cpp/numeric/math/atanh">std::atanh</a>; \n
+ atanh(a[i]);</td>
+ <td></td>
+</tr>
+<tr>
<th colspan="4">Nearest integer floating point operations</th>
</tr>
<tr>
diff --git a/doc/Doxyfile.in b/doc/Doxyfile.in
index 3ebbeb812..72120f1f1 100644
--- a/doc/Doxyfile.in
+++ b/doc/Doxyfile.in
@@ -1592,6 +1592,8 @@ PREDEFINED = EIGEN_EMPTY_STRUCT \
EIGEN_QT_SUPPORT \
EIGEN_STRONG_INLINE=inline \
EIGEN_DEVICE_FUNC= \
+ EIGEN_HAS_CXX11=1 \
+ EIGEN_HAS_CXX11_MATH=1 \
"EIGEN_MAKE_CWISE_BINARY_OP(METHOD,FUNCTOR)=template<typename OtherDerived> const CwiseBinaryOp<FUNCTOR<Scalar>, const Derived, const OtherDerived> METHOD(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const;" \
"EIGEN_CWISE_PRODUCT_RETURN_TYPE(LHS,RHS)=CwiseBinaryOp<internal::scalar_product_op<LHS::Scalar,RHS::Scalar>, const LHS, const RHS>"\
"EIGEN_CAT2(a,b)= a ## b"\
@@ -1610,6 +1612,9 @@ PREDEFINED = EIGEN_EMPTY_STRUCT \
EXPAND_AS_DEFINED = EIGEN_MAKE_TYPEDEFS \
EIGEN_MAKE_FIXED_TYPEDEFS \
EIGEN_MAKE_TYPEDEFS_ALL_SIZES \
+ EIGEN_MAKE_ARRAY_TYPEDEFS \
+ EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS \
+ EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES \
EIGEN_CWISE_UNOP_RETURN_TYPE \
EIGEN_CWISE_BINOP_RETURN_TYPE \
EIGEN_CURRENT_STORAGE_BASE_CLASS \
diff --git a/doc/InsideEigenExample.dox b/doc/InsideEigenExample.dox
index ed053c69d..ea2275bf2 100644
--- a/doc/InsideEigenExample.dox
+++ b/doc/InsideEigenExample.dox
@@ -212,6 +212,11 @@ Thus, the operator+ hasn't performed any actual computation. To summarize, the o
\section Assignment The assignment
+<div class="warningbox">
+<strong>PLEASE HELP US IMPROVING THIS SECTION.</strong>
+This page reflects how %Eigen worked until 3.2, but since %Eigen 3.3 the assignment is more sophisticated as it involves an Assignment expression, and the creation of so called evaluator which are responsible for the evaluation of each kind of expressions.
+</div>
+
At this point, the expression \a v + \a w has finished evaluating, so, in the process of compiling the line of code
\code
u = v + w;
diff --git a/doc/Manual.dox b/doc/Manual.dox
index 0992cda9f..84f0db645 100644
--- a/doc/Manual.dox
+++ b/doc/Manual.dox
@@ -15,7 +15,6 @@ namespace Eigen {
/** \page UserManual_Generalities General topics
- - \subpage Eigen2ToEigen3
- \subpage TopicFunctionTakingEigenTypes
- \subpage TopicPreprocessorDirectives
- \subpage TopicAssertions
@@ -108,7 +107,7 @@ namespace Eigen {
/** \addtogroup CoeffwiseMathFunctions
\ingroup DenseMatrixManipulation_chapter */
-/** \addtogroup SparseQuickRefPage
+/** \addtogroup QuickRefPage
\ingroup DenseMatrixManipulation_chapter */
diff --git a/doc/Overview.dox b/doc/Overview.dox
index dbb49bd21..43a12871e 100644
--- a/doc/Overview.dox
+++ b/doc/Overview.dox
@@ -4,8 +4,6 @@ namespace Eigen {
This is the API documentation for Eigen3. You can <a href="eigen-doc.tgz">download</a> it as a tgz archive for offline reading.
-You're already an Eigen2 user? Here is a \link Eigen2ToEigen3 Eigen2 to Eigen3 guide \endlink to help porting your application.
-
For a first contact with Eigen, the best place is to have a look at the \link GettingStarted getting started \endlink page that show you how to write and compile your first program with Eigen.
Then, the \b quick \b reference \b pages give you a quite complete description of the API in a very condensed format that is specially useful to recall the syntax of a particular feature, or to have a quick look at the API. They currently cover the two following feature sets, and more will come in the future:
diff --git a/doc/Pitfalls.dox b/doc/Pitfalls.dox
index 3f395053d..fda402572 100644
--- a/doc/Pitfalls.dox
+++ b/doc/Pitfalls.dox
@@ -7,14 +7,30 @@ namespace Eigen {
See this \link TopicTemplateKeyword page \endlink.
+
\section TopicPitfalls_aliasing Aliasing
Don't miss this \link TopicAliasing page \endlink on aliasing,
especially if you got wrong results in statements where the destination appears on the right hand side of the expression.
+
+\section TopicPitfalls_alignment_issue Alignment Issues (runtime assertion)
+
+%Eigen does explicit vectorization, and while that is appreciated by many users, that also leads to some issues in special situations where data alignment is compromised.
+Indeed, since C++17, C++ does not have quite good enough support for explicit data alignment.
+In that case your program hits an assertion failure (that is, a "controlled crash") with a message that tells you to consult this page:
+\code
+http://eigen.tuxfamily.org/dox/group__TopicUnalignedArrayAssert.html
+\endcode
+Have a look at \link TopicUnalignedArrayAssert it \endlink and see for yourself if that's something that you can cope with.
+It contains detailed information about how to deal with each known cause for that issue.
+
+Now what if you don't care about vectorization and so don't want to be annoyed with these alignment issues? Then read \link getrid how to get rid of them \endlink.
+
+
\section TopicPitfalls_auto_keyword C++11 and the auto keyword
-In short: do not use the auto keywords with Eigen's expressions, unless you are 100% sure about what you are doing. In particular, do not use the auto keyword as a replacement for a Matrix<> type. Here is an example:
+In short: do not use the auto keywords with %Eigen's expressions, unless you are 100% sure about what you are doing. In particular, do not use the auto keyword as a replacement for a \c Matrix<> type. Here is an example:
\code
MatrixXd A, B;
@@ -22,23 +38,81 @@ auto C = A*B;
for(...) { ... w = C * v; ...}
\endcode
-In this example, the type of C is not a MatrixXd but an abstract expression representing a matrix product and storing references to A and B. Therefore, the product of A*B will be carried out multiple times, once per iteration of the for loop. Moreover, if the coefficients of A or B change during the iteration, then C will evaluate to different values.
+In this example, the type of C is not a \c MatrixXd but an abstract expression representing a matrix product and storing references to \c A and \c B.
+Therefore, the product of \c A*B will be carried out multiple times, once per iteration of the for loop.
+Moreover, if the coefficients of A or B change during the iteration, then C will evaluate to different values.
Here is another example leading to a segfault:
\code
auto C = ((A+B).eval()).transpose();
// do something with C
\endcode
-The problem is that eval() returns a temporary object (in this case a MatrixXd) which is then referenced by the Transpose<> expression. However, this temporary is deleted right after the first line, and there the C expression reference a dead object. The same issue might occur when sub expressions are automatically evaluated by Eigen as in the following example:
+The problem is that \c eval() returns a temporary object (in this case a \c MatrixXd) which is then referenced by the \c Transpose<> expression.
+However, this temporary is deleted right after the first line, and then the \c C expression references a dead object.
+One possible fix consists in applying \c eval() on the whole expression:
+\code
+auto C = (A+B).transpose().eval();
+\endcode
+
+The same issue might occur when sub expressions are automatically evaluated by %Eigen as in the following example:
\code
VectorXd u, v;
auto C = u + (A*v).normalized();
// do something with C
\endcode
-where the normalized() method has to evaluate the expensive product A*v to avoid evaluating it twice. On the other hand, the following example is perfectly fine:
+Here the \c normalized() method has to evaluate the expensive product \c A*v to avoid evaluating it twice.
+Again, one possible fix is to call \c .eval() on the whole expression:
\code
auto C = (u + (A*v).normalized()).eval();
\endcode
-In this case, C will be a regular VectorXd object.
+In this case, \c C will be a regular \c VectorXd object.
+Note that DenseBase::eval() is smart enough to avoid copies when the underlying expression is already a plain \c Matrix<>.
+
+
+\section TopicPitfalls_header_issues Header Issues (failure to compile)
+
+With all libraries, one must check the documentation for which header to include.
+The same is true with %Eigen, but slightly worse: with %Eigen, a method in a class may require an additional <code>#include</code> over what the class itself requires!
+For example, if you want to use the \c cross() method on a vector (it computes a cross-product) then you need to:
+\code
+#include<Eigen/Geometry>
+\endcode
+We try to always document this, but do tell us if we forgot an occurrence.
+
+
+\section TopicPitfalls_ternary_operator Ternary operator
+
+In short: avoid the use of the ternary operator <code>(COND ? THEN : ELSE)</code> with %Eigen's expressions for the \c THEN and \c ELSE statements.
+To see why, let's consider the following example:
+\code
+Vector3f A;
+A << 1, 2, 3;
+Vector3f B = ((1 < 0) ? (A.reverse()) : A);
+\endcode
+This example will return <code>B = 3, 2, 1</code>. Do you see why?
+The reason is that in c++ the type of the \c ELSE statement is inferred from the type of the \c THEN expression such that both match.
+Since \c THEN is a <code>Reverse<Vector3f></code>, the \c ELSE statement A is converted to a <code>Reverse<Vector3f></code>, and the compiler thus generates:
+\code
+Vector3f B = ((1 < 0) ? (A.reverse()) : Reverse<Vector3f>(A));
+\endcode
+In this very particular case, a workaround would be to call A.reverse().eval() for the \c THEN statement, but the safest and fastest is really to avoid this ternary operator with %Eigen's expressions and use a if/else construct.
+
+
+\section TopicPitfalls_pass_by_value Pass-by-value
+
+If you don't know why passing-by-value is wrong with %Eigen, read this \link TopicPassingByValue page \endlink first.
+
+While you may be extremely careful and use care to make sure that all of your code that explicitly uses %Eigen types is pass-by-reference you have to watch out for templates which define the argument types at compile time.
+
+If a template has a function that takes arguments pass-by-value, and the relevant template parameter ends up being an %Eigen type, then you will of course have the same alignment problems that you would in an explicitly defined function passing %Eigen types by reference.
+
+Using %Eigen types with other third party libraries or even the STL can present the same problem.
+<code>boost::bind</code> for example uses pass-by-value to store arguments in the returned functor.
+This will of course be a problem.
+
+There are at least two ways around this:
+ - If the value you are passing is guaranteed to be around for the life of the functor, you can use boost::ref() to wrap the value as you pass it to boost::bind. Generally this is not a solution for values on the stack as if the functor ever gets passed to a lower or independent scope, the object may be gone by the time it's attempted to be used.
+ - The other option is to make your functions take a reference counted pointer like boost::shared_ptr as the argument. This avoids needing to worry about managing the lifetime of the object being passed.
+
*/
}
diff --git a/doc/PreprocessorDirectives.dox b/doc/PreprocessorDirectives.dox
index 7e9e30c4b..ffd2c660c 100644
--- a/doc/PreprocessorDirectives.dox
+++ b/doc/PreprocessorDirectives.dox
@@ -117,7 +117,7 @@ run time. However, these assertions do cost time and can thus be turned off.
Define it to 0 to disable.
- \b \c EIGEN_UNROLLING_LIMIT - defines the size of a loop to enable meta unrolling. Set it to zero to disable
unrolling. The size of a loop here is expressed in %Eigen's own notion of "number of FLOPS", it does not
- correspond to the number of iterations or the number of instructions. The default is value 100.
+ correspond to the number of iterations or the number of instructions. The default is value 110.
- \b \c EIGEN_STACK_ALLOCATION_LIMIT - defines the maximum bytes for a buffer to be allocated on the stack. For internal
temporary buffers, dynamic memory allocation is employed as a fall back. For fixed-size matrices or arrays, exceeding
this threshold raises a compile time assertion. Use 0 to set no limit. Default is 128 KB.
diff --git a/doc/QuickReference.dox b/doc/QuickReference.dox
index 18c90a2a9..9c8e6fb4a 100644
--- a/doc/QuickReference.dox
+++ b/doc/QuickReference.dox
@@ -529,6 +529,12 @@ if((array1 < array2).any()) ... // if there exist a pair i,j such that array1(i,
<a href="#" class="top">top</a>\section QuickRef_Blocks Sub-matrices
+<div class="warningbox">
+<strong>PLEASE HELP US IMPROVING THIS SECTION.</strong>
+%Eigen 3.4 supports a much improved API for sub-matrices, including,
+slicing and indexing from arrays: \ref TutorialSlicingIndexing
+</div>
+
Read-write access to a \link DenseBase::col(Index) column \endlink
or a \link DenseBase::row(Index) row \endlink of a matrix (or array):
\code
@@ -584,6 +590,11 @@ Read-write access to sub-matrices:</td></tr>
<a href="#" class="top">top</a>\section QuickRef_Misc Miscellaneous operations
+<div class="warningbox">
+<strong>PLEASE HELP US IMPROVING THIS SECTION.</strong>
+%Eigen 3.4 supports a new API for reshaping: \ref TutorialReshape
+</div>
+
\subsection QuickRef_Reverse Reverse
Vectors, rows, and/or columns of a matrix can be reversed (see DenseBase::reverse(), DenseBase::reverseInPlace(), VectorwiseOp::reverse()).
\code
diff --git a/doc/StlContainers.dox b/doc/StlContainers.dox
index e0f8714a9..665a54793 100644
--- a/doc/StlContainers.dox
+++ b/doc/StlContainers.dox
@@ -6,31 +6,39 @@ namespace Eigen {
\section StlContainers_summary Executive summary
-Using STL containers on \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types", or classes having members of such types, requires taking the following two steps:
+If you're compiling in \cpp17 mode only with a sufficiently recent compiler (e.g., GCC>=7, clang>=5, MSVC>=19.12), then everything is taken care by the compiler and you can stop reading.
-\li A 16-byte-aligned allocator must be used. Eigen does provide one ready for use: aligned_allocator.
-\li If you want to use the std::vector container, you need to \#include <Eigen/StdVector>.
+Otherwise, using STL containers on \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types", or classes having members of such types, requires the use of an over-aligned allocator.
+That is, an allocator capable of allocating buffers with 16, 32, or even 64 bytes alignment.
+%Eigen does provide one ready for use: aligned_allocator.
-These issues arise only with \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types" and \ref TopicStructHavingEigenMembers "structures having such Eigen objects as member". For other Eigen types, such as Vector3f or MatrixXd, no special care is needed when using STL containers.
+Prior to \cpp11, if you want to use the `std::vector` container, then you also have to `#include <Eigen/StdVector>`.
+
+These issues arise only with \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types" and \ref TopicStructHavingEigenMembers "structures having such Eigen objects as member".
+For other %Eigen types, such as Vector3f or MatrixXd, no special care is needed when using STL containers.
\section allocator Using an aligned allocator
-STL containers take an optional template parameter, the allocator type. When using STL containers on \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types", you need tell the container to use an allocator that will always allocate memory at 16-byte-aligned locations. Fortunately, Eigen does provide such an allocator: Eigen::aligned_allocator.
+STL containers take an optional template parameter, the allocator type. When using STL containers on \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types", you need tell the container to use an allocator that will always allocate memory at 16-byte-aligned (or more) locations. Fortunately, %Eigen does provide such an allocator: Eigen::aligned_allocator.
For example, instead of
\code
-std::map<int, Eigen::Vector4f>
+std::map<int, Eigen::Vector4d>
\endcode
you need to use
\code
-std::map<int, Eigen::Vector4f, std::less<int>,
- Eigen::aligned_allocator<std::pair<const int, Eigen::Vector4f> > >
+std::map<int, Eigen::Vector4d, std::less<int>,
+ Eigen::aligned_allocator<std::pair<const int, Eigen::Vector4d> > >
\endcode
-Note that the third parameter "std::less<int>" is just the default value, but we have to include it because we want to specify the fourth parameter, which is the allocator type.
+Note that the third parameter `std::less<int>` is just the default value, but we have to include it because we want to specify the fourth parameter, which is the allocator type.
\section StlContainers_vector The case of std::vector
-The situation with std::vector was even worse (explanation below) so we had to specialize it for the Eigen::aligned_allocator type. In practice you \b must use the Eigen::aligned_allocator (not another aligned allocator), \b and \#include <Eigen/StdVector>.
+This section is for c++98/03 users only. \cpp11 (or above) users can stop reading here.
+
+So in c++98/03, the situation with `std::vector` is more complicated because of a bug in the standard (explanation below).
+To workaround the issue, we had to specialize it for the Eigen::aligned_allocator type.
+In practice you \b must use the Eigen::aligned_allocator (not another aligned allocator), \b and \#include <Eigen/StdVector>.
Here is an example:
\code
@@ -39,12 +47,16 @@ Here is an example:
std::vector<Eigen::Vector4f,Eigen::aligned_allocator<Eigen::Vector4f> >
\endcode
+<span class="note">\b Explanation: The `resize()` method of `std::vector` takes a `value_type` argument (defaulting to `value_type()`). So with `std::vector<Eigen::Vector4d>`, some Eigen::Vector4d objects will be passed by value, which discards any alignment modifiers, so a Eigen::Vector4d can be created at an unaligned location.
+In order to avoid that, the only solution we saw was to specialize `std::vector` to make it work on a slight modification of, here, Eigen::Vector4d, that is able to deal properly with this situation.
+</span>
+
\subsection vector_spec An alternative - specializing std::vector for Eigen types
As an alternative to the recommended approach described above, you have the option to specialize std::vector for Eigen types requiring alignment.
-The advantage is that you won't need to declare std::vector all over with Eigen::allocator. One drawback on the other hand side is that
-the specialization needs to be defined before all code pieces in which e.g. std::vector<Vector2d> is used. Otherwise, without knowing the specialization
-the compiler will compile that particular instance with the default std::allocator and you program is most likely to crash.
+The advantage is that you won't need to declare std::vector all over with Eigen::aligned_allocator. One drawback on the other hand side is that
+the specialization needs to be defined before all code pieces in which e.g. `std::vector<Vector2d>` is used. Otherwise, without knowing the specialization
+the compiler will compile that particular instance with the default `std::allocator` and you program is most likely to crash.
Here is an example:
\code
@@ -54,8 +66,7 @@ EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(Matrix2d)
std::vector<Eigen::Vector2d>
\endcode
-<span class="note">\b Explanation: The resize() method of std::vector takes a value_type argument (defaulting to value_type()). So with std::vector<Eigen::Vector4f>, some Eigen::Vector4f objects will be passed by value, which discards any alignment modifiers, so a Eigen::Vector4f can be created at an unaligned location. In order to avoid that, the only solution we saw was to specialize std::vector to make it work on a slight modification of, here, Eigen::Vector4f, that is able to deal properly with this situation.
-</span>
+
*/
diff --git a/doc/StructHavingEigenMembers.dox b/doc/StructHavingEigenMembers.dox
index 7fbed0eb0..87016cdc9 100644
--- a/doc/StructHavingEigenMembers.dox
+++ b/doc/StructHavingEigenMembers.dox
@@ -6,7 +6,12 @@ namespace Eigen {
\section StructHavingEigenMembers_summary Executive Summary
-If you define a structure having members of \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types", you must overload its "operator new" so that it generates 16-bytes-aligned pointers. Fortunately, %Eigen provides you with a macro EIGEN_MAKE_ALIGNED_OPERATOR_NEW that does that for you.
+
+If you define a structure having members of \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types", you must ensure that calling operator new on it allocates properly aligned buffers.
+If you're compiling in \cpp17 mode only with a sufficiently recent compiler (e.g., GCC>=7, clang>=5, MSVC>=19.12), then everything is taken care by the compiler and you can stop reading.
+
+Otherwise, you have to overload its `operator new` so that it generates properly aligned pointers (e.g., 32-bytes-aligned for Vector4d and AVX).
+Fortunately, %Eigen provides you with a macro `EIGEN_MAKE_ALIGNED_OPERATOR_NEW` that does that for you.
\section StructHavingEigenMembers_what What kind of code needs to be changed?
@@ -29,13 +34,13 @@ In other words: you have a class that has as a member a \ref TopicFixedSizeVecto
\section StructHavingEigenMembers_how How should such code be modified?
-Very easy, you just need to put a EIGEN_MAKE_ALIGNED_OPERATOR_NEW macro in a public part of your class, like this:
+Very easy, you just need to put a `EIGEN_MAKE_ALIGNED_OPERATOR_NEW` macro in a public part of your class, like this:
\code
class Foo
{
...
- Eigen::Vector2d v;
+ Eigen::Vector4d v;
...
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
@@ -46,7 +51,9 @@ public:
Foo *foo = new Foo;
\endcode
-This macro makes "new Foo" always return an aligned pointer.
+This macro makes `new Foo` always return an aligned pointer.
+
+In \cpp17, this macro is empty.
If this approach is too intrusive, see also the \ref StructHavingEigenMembers_othersolutions "other solutions".
@@ -58,7 +65,7 @@ OK let's say that your code looks like this:
class Foo
{
...
- Eigen::Vector2d v;
+ Eigen::Vector4d v;
...
};
@@ -67,45 +74,59 @@ class Foo
Foo *foo = new Foo;
\endcode
-A Eigen::Vector2d consists of 2 doubles, which is 128 bits. Which is exactly the size of a SSE packet, which makes it possible to use SSE for all sorts of operations on this vector. But SSE instructions (at least the ones that %Eigen uses, which are the fast ones) require 128-bit alignment. Otherwise you get a segmentation fault.
+A Eigen::Vector4d consists of 4 doubles, which is 256 bits.
+This is exactly the size of an AVX register, which makes it possible to use AVX for all sorts of operations on this vector.
+But AVX instructions (at least the ones that %Eigen uses, which are the fast ones) require 256-bit alignment.
+Otherwise you get a segmentation fault.
-For this reason, Eigen takes care by itself to require 128-bit alignment for Eigen::Vector2d, by doing two things:
-\li Eigen requires 128-bit alignment for the Eigen::Vector2d's array (of 2 doubles). With GCC, this is done with a __attribute__ ((aligned(16))).
-\li Eigen overloads the "operator new" of Eigen::Vector2d so it will always return 128-bit aligned pointers.
+For this reason, %Eigen takes care by itself to require 256-bit alignment for Eigen::Vector4d, by doing two things:
+\li %Eigen requires 256-bit alignment for the Eigen::Vector4d's array (of 4 doubles). With \cpp11 this is done with the <a href="https://en.cppreference.com/w/cpp/keyword/alignas">alignas</a> keyword, or compiler's extensions for c++98/03.
+\li %Eigen overloads the `operator new` of Eigen::Vector4d so it will always return 256-bit aligned pointers. (removed in \cpp17)
-Thus, normally, you don't have to worry about anything, Eigen handles alignment for you...
+Thus, normally, you don't have to worry about anything, %Eigen handles alignment of operator new for you...
-... except in one case. When you have a class Foo like above, and you dynamically allocate a new Foo as above, then, since Foo doesn't have aligned "operator new", the returned pointer foo is not necessarily 128-bit aligned.
+... except in one case. When you have a `class Foo` like above, and you dynamically allocate a new `Foo` as above, then, since `Foo` doesn't have aligned `operator new`, the returned pointer foo is not necessarily 256-bit aligned.
-The alignment attribute of the member v is then relative to the start of the class, foo. If the foo pointer wasn't aligned, then foo->v won't be aligned either!
+The alignment attribute of the member `v` is then relative to the start of the class `Foo`. If the `foo` pointer wasn't aligned, then `foo->v` won't be aligned either!
-The solution is to let class Foo have an aligned "operator new", as we showed in the previous section.
+The solution is to let `class Foo` have an aligned `operator new`, as we showed in the previous section.
+
+This explanation also holds for SSE/NEON/MSA/Altivec/VSX targets, which require 16-bytes alignment, and AVX512 which requires 64-bytes alignment for fixed-size objects multiple of 64 bytes (e.g., Eigen::Matrix4d).
\section StructHavingEigenMembers_movetotop Should I then put all the members of Eigen types at the beginning of my class?
-That's not required. Since Eigen takes care of declaring 128-bit alignment, all members that need it are automatically 128-bit aligned relatively to the class. So code like this works fine:
+That's not required. Since %Eigen takes care of declaring adequate alignment, all members that need it are automatically aligned relatively to the class. So code like this works fine:
\code
class Foo
{
double x;
- Eigen::Vector2d v;
+ Eigen::Vector4d v;
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
};
\endcode
+That said, as usual, it is recommended to sort the members so that alignment does not waste memory.
+In the above example, with AVX, the compiler will have to reserve 24 empty bytes between `x` and `v`.
+
+
\section StructHavingEigenMembers_dynamicsize What about dynamic-size matrices and vectors?
Dynamic-size matrices and vectors, such as Eigen::VectorXd, allocate dynamically their own array of coefficients, so they take care of requiring absolute alignment automatically. So they don't cause this issue. The issue discussed here is only with \ref TopicFixedSizeVectorizable "fixed-size vectorizable matrices and vectors".
+
\section StructHavingEigenMembers_bugineigen So is this a bug in Eigen?
-No, it's not our bug. It's more like an inherent problem of the C++98 language specification, and seems to be taken care of in the upcoming language revision: <a href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2341.pdf">see this document</a>.
+No, it's not our bug. It's more like an inherent problem of the c++ language specification that has been solved in c++17 through the feature known as <a href="http://wg21.link/p0035r4">dynamic memory allocation for over-aligned data</a>.
+
-\section StructHavingEigenMembers_conditional What if I want to do this conditionnally (depending on template parameters) ?
+\section StructHavingEigenMembers_conditional What if I want to do this conditionally (depending on template parameters) ?
-For this situation, we offer the macro EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign). It will generate aligned operators like EIGEN_MAKE_ALIGNED_OPERATOR_NEW if NeedsToAlign is true. It will generate operators with the default alignment if NeedsToAlign is false.
+For this situation, we offer the macro `EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)`.
+It will generate aligned operators like `EIGEN_MAKE_ALIGNED_OPERATOR_NEW` if `NeedsToAlign` is true.
+It will generate operators with the default alignment if `NeedsToAlign` is false.
+In \cpp17, this macro is empty.
Example:
@@ -130,7 +151,7 @@ Foo<3> *foo3 = new Foo<3>; // foo3 has only the system default alignment guarant
\section StructHavingEigenMembers_othersolutions Other solutions
-In case putting the EIGEN_MAKE_ALIGNED_OPERATOR_NEW macro everywhere is too intrusive, there exists at least two other solutions.
+In case putting the `EIGEN_MAKE_ALIGNED_OPERATOR_NEW` macro everywhere is too intrusive, there exists at least two other solutions.
\subsection othersolutions1 Disabling alignment
@@ -139,22 +160,13 @@ The first is to disable alignment requirement for the fixed size members:
class Foo
{
...
- Eigen::Matrix<double,2,1,Eigen::DontAlign> v;
+ Eigen::Matrix<double,4,1,Eigen::DontAlign> v;
...
};
\endcode
-This has for effect to disable vectorization when using \c v.
-If a function of Foo uses it several times, then it still possible to re-enable vectorization by copying it into an aligned temporary vector:
-\code
-void Foo::bar()
-{
- Eigen::Vector2d av(v);
- // use av instead of v
- ...
- // if av changed, then do:
- v = av;
-}
-\endcode
+This `v` is fully compatible with aligned Eigen::Vector4d.
+This has only for effect to make load/stores to `v` more expensive (usually slightly, but that's hardware dependent).
+
\subsection othersolutions2 Private structure
@@ -164,7 +176,7 @@ The second consist in storing the fixed-size objects into a private struct which
struct Foo_d
{
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
- Vector2d v;
+ Vector4d v;
...
};
@@ -183,7 +195,8 @@ private:
};
\endcode
-The clear advantage here is that the class Foo remains unchanged regarding alignment issues. The drawback is that a heap allocation will be required whatsoever.
+The clear advantage here is that the class `Foo` remains unchanged regarding alignment issues.
+The drawback is that an additional heap allocation will be required whatsoever.
*/
diff --git a/doc/TopicLazyEvaluation.dox b/doc/TopicLazyEvaluation.dox
index b7820e3e6..d2a704f13 100644
--- a/doc/TopicLazyEvaluation.dox
+++ b/doc/TopicLazyEvaluation.dox
@@ -2,63 +2,95 @@ namespace Eigen {
/** \page TopicLazyEvaluation Lazy Evaluation and Aliasing
-Executive summary: Eigen has intelligent compile-time mechanisms to enable lazy evaluation and removing temporaries where appropriate.
+Executive summary: %Eigen has intelligent compile-time mechanisms to enable lazy evaluation and removing temporaries where appropriate.
It will handle aliasing automatically in most cases, for example with matrix products. The automatic behavior can be overridden
manually by using the MatrixBase::eval() and MatrixBase::noalias() methods.
When you write a line of code involving a complex expression such as
-\code mat1 = mat2 + mat3 * (mat4 + mat5); \endcode
+\code mat1 = mat2 + mat3 * (mat4 + mat5);
+\endcode
-Eigen determines automatically, for each sub-expression, whether to evaluate it into a temporary variable. Indeed, in certain cases it is better to evaluate immediately a sub-expression into a temporary variable, while in other cases it is better to avoid that.
+%Eigen determines automatically, for each sub-expression, whether to evaluate it into a temporary variable. Indeed, in certain cases it is better to evaluate a sub-expression into a temporary variable, while in other cases it is better to avoid that.
A traditional math library without expression templates always evaluates all sub-expressions into temporaries. So with this code,
-\code vec1 = vec2 + vec3; \endcode
+\code vec1 = vec2 + vec3;
+\endcode
a traditional library would evaluate \c vec2 + vec3 into a temporary \c vec4 and then copy \c vec4 into \c vec1. This is of course inefficient: the arrays are traversed twice, so there are a lot of useless load/store operations.
-Expression-templates-based libraries can avoid evaluating sub-expressions into temporaries, which in many cases results in large speed improvements. This is called <i>lazy evaluation</i> as an expression is getting evaluated as late as possible, instead of immediately. However, most other expression-templates-based libraries <i>always</i> choose lazy evaluation. There are two problems with that: first, lazy evaluation is not always a good choice for performance; second, lazy evaluation can be very dangerous, for example with matrix products: doing <tt>matrix = matrix*matrix</tt> gives a wrong result if the matrix product is lazy-evaluated, because of the way matrix product works.
+Expression-templates-based libraries can avoid evaluating sub-expressions into temporaries, which in many cases results in large speed improvements.
+This is called <i>lazy evaluation</i> as an expression is getting evaluated as late as possible.
+In %Eigen <b>all expressions are lazy-evaluated</b>.
+More precisely, an expression starts to be evaluated once it is assigned to a matrix.
+Until then nothing happens beyond constructing the abstract expression tree.
+In contrast to most other expression-templates-based libraries, however, <b>%Eigen might choose to evaluate some sub-expressions into temporaries</b>.
+There are two reasons for that: first, pure lazy evaluation is not always a good choice for performance; second, pure lazy evaluation can be very dangerous, for example with matrix products: doing <tt>mat = mat*mat</tt> gives a wrong result if the matrix product is directly evaluated within the destination matrix, because of the way matrix product works.
-For these reasons, Eigen has intelligent compile-time mechanisms to determine automatically when to use lazy evaluation, and when on the contrary it should evaluate immediately into a temporary variable.
+For these reasons, %Eigen has intelligent compile-time mechanisms to determine automatically which sub-expression should be evaluated into a temporary variable.
So in the basic example,
-\code matrix1 = matrix2 + matrix3; \endcode
+\code mat1 = mat2 + mat3;
+\endcode
-Eigen chooses lazy evaluation. Thus the arrays are traversed only once, producing optimized code. If you really want to force immediate evaluation, use \link MatrixBase::eval() eval()\endlink:
+%Eigen chooses not to introduce any temporary. Thus the arrays are traversed only once, producing optimized code.
+If you really want to force immediate evaluation, use \link MatrixBase::eval() eval()\endlink:
-\code matrix1 = (matrix2 + matrix3).eval(); \endcode
+\code mat1 = (mat2 + mat3).eval();
+\endcode
Here is now a more involved example:
-\code matrix1 = -matrix2 + matrix3 + 5 * matrix4; \endcode
+\code mat1 = -mat2 + mat3 + 5 * mat4;
+\endcode
-Eigen chooses lazy evaluation at every stage in that example, which is clearly the correct choice. In fact, lazy evaluation is the "default choice" and Eigen will choose it except in a few circumstances.
+Here again %Eigen won't introduce any temporary, thus producing a single <b>fused</b> evaluation loop, which is clearly the correct choice.
-<b>The first circumstance</b> in which Eigen chooses immediate evaluation, is when it sees an assignment <tt>a = b;</tt> and the expression \c b has the evaluate-before-assigning \link flags flag\endlink. The most important example of such an expression is the \link Product matrix product expression\endlink. For example, when you do
+\section TopicLazyEvaluationWhichExpr Which sub-expressions are evaluated into temporaries?
-\code matrix = matrix * matrix; \endcode
+The default evaluation strategy is to fuse the operations in a single loop, and %Eigen will choose it except in a few circumstances.
-Eigen first evaluates <tt>matrix * matrix</tt> into a temporary matrix, and then copies it into the original \c matrix. This guarantees a correct result as we saw above that lazy evaluation gives wrong results with matrix products. It also doesn't cost much, as the cost of the matrix product itself is much higher.
+<b>The first circumstance</b> in which %Eigen chooses to evaluate a sub-expression is when it sees an assignment <tt>a = b;</tt> and the expression \c b has the evaluate-before-assigning \link flags flag\endlink.
+The most important example of such an expression is the \link Product matrix product expression\endlink. For example, when you do
+
+\code mat = mat * mat;
+\endcode
+
+%Eigen will evaluate <tt>mat * mat</tt> into a temporary matrix, and then copies it into the original \c mat.
+This guarantees a correct result as we saw above that lazy evaluation gives wrong results with matrix products.
+It also doesn't cost much, as the cost of the matrix product itself is much higher.
+Note that this temporary is introduced at evaluation time only, that is, within operator= in this example.
+The expression <tt>mat * mat</tt> still return a abstract product type.
What if you know that the result does no alias the operand of the product and want to force lazy evaluation? Then use \link MatrixBase::noalias() .noalias()\endlink instead. Here is an example:
-\code matrix1.noalias() = matrix2 * matrix2; \endcode
+\code mat1.noalias() = mat2 * mat2;
+\endcode
-Here, since we know that matrix2 is not the same matrix as matrix1, we know that lazy evaluation is not dangerous, so we may force lazy evaluation. Concretely, the effect of noalias() here is to bypass the evaluate-before-assigning \link flags flag\endlink.
+Here, since we know that mat2 is not the same matrix as mat1, we know that lazy evaluation is not dangerous, so we may force lazy evaluation. Concretely, the effect of noalias() here is to bypass the evaluate-before-assigning \link flags flag\endlink.
-<b>The second circumstance</b> in which Eigen chooses immediate evaluation, is when it sees a nested expression such as <tt>a + b</tt> where \c b is already an expression having the evaluate-before-nesting \link flags flag\endlink. Again, the most important example of such an expression is the \link Product matrix product expression\endlink. For example, when you do
+<b>The second circumstance</b> in which %Eigen chooses to evaluate a sub-expression, is when it sees a nested expression such as <tt>a + b</tt> where \c b is already an expression having the evaluate-before-nesting \link flags flag\endlink.
+Again, the most important example of such an expression is the \link Product matrix product expression\endlink.
+For example, when you do
-\code matrix1 = matrix2 + matrix3 * matrix4; \endcode
+\code mat1 = mat2 * mat3 + mat4 * mat5;
+\endcode
-the product <tt>matrix3 * matrix4</tt> gets evaluated immediately into a temporary matrix. Indeed, experiments showed that it is often beneficial for performance to evaluate immediately matrix products when they are nested into bigger expressions.
+the products <tt>mat2 * mat3</tt> and <tt>mat4 * mat5</tt> gets evaluated separately into temporary matrices before being summed up in <tt>mat1</tt>.
+Indeed, to be efficient matrix products need to be evaluated within a destination matrix at hand, and not as simple "dot products".
+For small matrices, however, you might want to enforce a "dot-product" based lazy evaluation with lazyProduct().
+Again, it is important to understand that those temporaries are created at evaluation time only, that is in operator =.
+See TopicPitfalls_auto_keyword for common pitfalls regarding this remark.
-<b>The third circumstance</b> in which Eigen chooses immediate evaluation, is when its cost model shows that the total cost of an operation is reduced if a sub-expression gets evaluated into a temporary. Indeed, in certain cases, an intermediate result is sufficiently costly to compute and is reused sufficiently many times, that is worth "caching". Here is an example:
+<b>The third circumstance</b> in which %Eigen chooses to evaluate a sub-expression, is when its cost model shows that the total cost of an operation is reduced if a sub-expression gets evaluated into a temporary.
+Indeed, in certain cases, an intermediate result is sufficiently costly to compute and is reused sufficiently many times, that is worth "caching". Here is an example:
-\code matrix1 = matrix2 * (matrix3 + matrix4); \endcode
+\code mat1 = mat2 * (mat3 + mat4);
+\endcode
-Here, provided the matrices have at least 2 rows and 2 columns, each coefficienct of the expression <tt>matrix3 + matrix4</tt> is going to be used several times in the matrix product. Instead of computing the sum every time, it is much better to compute it once and store it in a temporary variable. Eigen understands this and evaluates <tt>matrix3 + matrix4</tt> into a temporary variable before evaluating the product.
+Here, provided the matrices have at least 2 rows and 2 columns, each coefficient of the expression <tt>mat3 + mat4</tt> is going to be used several times in the matrix product. Instead of computing the sum every time, it is much better to compute it once and store it in a temporary variable. %Eigen understands this and evaluates <tt>mat3 + mat4</tt> into a temporary variable before evaluating the product.
*/
diff --git a/doc/TutorialMatrixClass.dox b/doc/TutorialMatrixClass.dox
index 7ea0cd789..2c452220f 100644
--- a/doc/TutorialMatrixClass.dox
+++ b/doc/TutorialMatrixClass.dox
@@ -101,13 +101,41 @@ Matrix3f a(3,3);
\endcode
and is a no-operation.
-Finally, we also offer some constructors to initialize the coefficients of small fixed-size vectors up to size 4:
+Matrices and vectors can also be initialized from lists of coefficients.
+Prior to C++11, this feature is limited to small fixed-size column or vectors up to size 4:
\code
Vector2d a(5.0, 6.0);
Vector3d b(5.0, 6.0, 7.0);
Vector4d c(5.0, 6.0, 7.0, 8.0);
\endcode
+If C++11 is enabled, fixed-size column or row vectors of arbitrary size can be initialized by passing an arbitrary number of coefficients:
+\code
+Vector2i a(1, 2); // A column vector containing the elements {1, 2}
+Matrix<int, 5, 1> b {1, 2, 3, 4, 5}; // A row-vector containing the elements {1, 2, 3, 4, 5}
+Matrix<int, 1, 5> c = {1, 2, 3, 4, 5}; // A column vector containing the elements {1, 2, 3, 4, 5}
+\endcode
+
+In the general case of matrices and vectors with either fixed or runtime sizes,
+coefficients have to be grouped by rows and passed as an initializer list of initializer list (\link Matrix::Matrix(const std::initializer_list<std::initializer_list<Scalar>>&) details \endlink):
+\code
+MatrixXi a { // construct a 2x2 matrix
+ {1, 2}, // first row
+ {3, 4} // second row
+};
+Matrix<double, 2, 3> b {
+ {2, 3, 4},
+ {5, 6, 7},
+};
+\endcode
+
+For column or row vectors, implicit transposition is allowed.
+This means that a column vector can be initialized from a single row:
+\code
+VectorXd a {{1.5, 2.5, 3.5}}; // A column-vector with 3 coefficients
+RowVectorXd b {{1.0, 2.0, 3.0, 4.0}}; // A row-vector with 4 coefficients
+\endcode
+
\section TutorialMatrixCoeffAccessors Coefficient accessors
The primary coefficient accessors and mutators in Eigen are the overloaded parenthesis operators.
diff --git a/doc/TutorialSTL.dox b/doc/TutorialSTL.dox
new file mode 100644
index 000000000..9a825bc48
--- /dev/null
+++ b/doc/TutorialSTL.dox
@@ -0,0 +1,66 @@
+namespace Eigen {
+
+/** \eigenManualPage TutorialSTL STL iterators and algorithms
+
+Since the version 3.4, %Eigen's dense matrices and arrays provide STL compatible iterators.
+As demonstrated below, this makes them naturally compatible with range-for-loops and STL's algorithms.
+
+\eigenAutoToc
+
+\section TutorialSTLVectors Iterating over 1D arrays and vectors
+
+Any dense 1D expressions exposes the pair of `begin()/end()` methods to iterate over them.
+
+This directly enables c++11 range for loops:
+<table class="example">
+<tr><th>Example:</th><th>Output:</th></tr>
+<tr><td>
+\include Tutorial_range_for_loop_1d_cxx11.cpp
+</td>
+<td>
+\verbinclude Tutorial_range_for_loop_1d_cxx11.out
+</td></tr></table>
+
+One dimensional expressions can also easily be passed to STL algorithms:
+<table class="example">
+<tr><th>Example:</th><th>Output:</th></tr>
+<tr><td>
+\include Tutorial_std_sort.cpp
+</td>
+<td>
+\verbinclude Tutorial_std_sort.out
+</td></tr></table>
+
+Similar to `std::vector`, 1D expressions also exposes the pair of `cbegin()/cend()` methods to conveniently get const iterators on non-const object.
+
+\section TutorialSTLMatrices Iterating over coefficients of 2D arrays and matrices
+
+STL iterators are intrinsically designed to iterate over 1D structures.
+This is why `begin()/end()` methods are disabled for 2D expressions.
+Iterating over all coefficients of a 2D expressions is still easily accomplished by creating a 1D linear view through `reshaped()`:
+<table class="example">
+<tr><th>Example:</th><th>Output:</th></tr>
+<tr><td>
+\include Tutorial_range_for_loop_2d_cxx11.cpp
+</td>
+<td>
+\verbinclude Tutorial_range_for_loop_2d_cxx11.out
+</td></tr></table>
+
+\section TutorialSTLRowsColumns Iterating over rows or columns of 2D arrays and matrices
+
+It is also possible to get iterators over rows or columns of 2D expressions.
+Those are available through the `rowwise()` and `colwise()` proxies.
+Here is an example sorting each row of a matrix:
+<table class="example">
+<tr><th>Example:</th><th>Output:</th></tr>
+<tr><td>
+\include Tutorial_std_sort_rows_cxx11.cpp
+</td>
+<td>
+\verbinclude Tutorial_std_sort_rows_cxx11.out
+</td></tr></table>
+
+*/
+
+}
diff --git a/doc/TutorialSlicingIndexing.dox b/doc/TutorialSlicingIndexing.dox
index 3b60eac6e..98ace43e4 100644
--- a/doc/TutorialSlicingIndexing.dox
+++ b/doc/TutorialSlicingIndexing.dox
@@ -2,7 +2,7 @@ namespace Eigen {
/** \eigenManualPage TutorialSlicingIndexing Slicing and Indexing
-This pape presents the numerous possibilities offered by `operator()` to index sub-set of rows and columns.
+This page presents the numerous possibilities offered by `operator()` to index sub-set of rows and columns.
This API has been introduced in %Eigen 3.4.
It supports all the feature proposed by the \link TutorialBlockOperations block API \endlink, and much more.
In particular, it supports \b slicing that consists in taking a set of rows, columns, or elements, uniformly spaced within a matrix or indexed from an array of indices.
diff --git a/doc/UnalignedArrayAssert.dox b/doc/UnalignedArrayAssert.dox
index 8676faa1b..410c8a58f 100644
--- a/doc/UnalignedArrayAssert.dox
+++ b/doc/UnalignedArrayAssert.dox
@@ -12,7 +12,9 @@ is explained here: http://eigen.tuxfamily.org/dox-devel/group__TopicUnalignedArr
**** READ THIS WEB PAGE !!! ****"' failed.
</pre>
-There are 4 known causes for this issue. Please read on to understand them and learn how to fix them.
+There are 4 known causes for this issue.
+If you can target \cpp17 only with a recent compiler (e.g., GCC>=7, clang>=5, MSVC>=19.12), then you're lucky: enabling c++17 should be enough (if not, please <a href="http://eigen.tuxfamily.org/bz/">report</a> to us).
+Otherwise, please read on to understand those issues and learn how to fix them.
\eigenAutoToc
@@ -35,7 +37,7 @@ If you have code like this,
class Foo
{
//...
- Eigen::Vector2d v;
+ Eigen::Vector4d v;
//...
};
//...
@@ -44,27 +46,27 @@ Foo *foo = new Foo;
then you need to read this separate page: \ref TopicStructHavingEigenMembers "Structures Having Eigen Members".
-Note that here, Eigen::Vector2d is only used as an example, more generally the issue arises for all \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types".
+Note that here, Eigen::Vector4d is only used as an example, more generally the issue arises for all \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types".
\section c2 Cause 2: STL Containers or manual memory allocation
If you use STL Containers such as std::vector, std::map, ..., with %Eigen objects, or with classes containing %Eigen objects, like this,
\code
-std::vector<Eigen::Matrix2f> my_vector;
-struct my_class { ... Eigen::Matrix2f m; ... };
+std::vector<Eigen::Matrix2d> my_vector;
+struct my_class { ... Eigen::Matrix2d m; ... };
std::map<int, my_class> my_map;
\endcode
then you need to read this separate page: \ref TopicStlContainers "Using STL Containers with Eigen".
-Note that here, Eigen::Matrix2f is only used as an example, more generally the issue arises for all \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types" and \ref TopicStructHavingEigenMembers "structures having such Eigen objects as member".
+Note that here, Eigen::Matrix2d is only used as an example, more generally the issue arises for all \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types" and \ref TopicStructHavingEigenMembers "structures having such Eigen objects as member".
-The same issue will be exhibited by any classes/functions by-passing operator new to allocate memory, that is, by performing custom memory allocation followed by calls to the placement new operator. This is for instance typically the case of \c std::make_shared or \c std::allocate_shared for which is the solution is to use an \ref aligned_allocator "aligned allocator" as detailed in the \ref TopicStlContainers "solution for STL containers".
+The same issue will be exhibited by any classes/functions by-passing operator new to allocate memory, that is, by performing custom memory allocation followed by calls to the placement new operator. This is for instance typically the case of \c `std::make_shared` or `std::allocate_shared` for which is the solution is to use an \ref aligned_allocator "aligned allocator" as detailed in the \ref TopicStlContainers "solution for STL containers".
\section c3 Cause 3: Passing Eigen objects by value
-If some function in your code is getting an Eigen object passed by value, like this,
+If some function in your code is getting an %Eigen object passed by value, like this,
\code
void func(Eigen::Vector4d v);
@@ -90,11 +92,13 @@ then you need to read this separate page: \ref TopicWrongStackAlignment "Compile
Note that here, Eigen::Quaternionf is only used as an example, more generally the issue arises for all \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types".
+
\section explanation General explanation of this assertion
-\ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen objects" must absolutely be created at 16-byte-aligned locations, otherwise SIMD instructions addressing them will crash.
+\ref TopicFixedSizeVectorizable "Fixed-size vectorizable Eigen objects" must absolutely be created at properly aligned locations, otherwise SIMD instructions addressing them will crash.
+For instance, SSE/NEON/MSA/Altivec/VSX targets will require 16-byte-alignment, whereas AVX and AVX512 targets may require up to 32 and 64 byte alignment respectively.
-Eigen normally takes care of these alignment issues for you, by setting an alignment attribute on them and by overloading their "operator new".
+%Eigen normally takes care of these alignment issues for you, by setting an alignment attribute on them and by overloading their `operator new`.
However there are a few corner cases where these alignment settings get overridden: they are the possible causes for this assertion.
@@ -102,22 +106,22 @@ However there are a few corner cases where these alignment settings get overridd
Three possibilities:
<ul>
- <li>Use the \c DontAlign option to Matrix, Array, Quaternion, etc. objects that gives you trouble. This way Eigen won't try to align them, and thus won"t assume any special alignment. On the down side, you will pay the cost of unaligned loads/stores for them, but on modern CPUs, the overhead is either null or marginal. See \link StructHavingEigenMembers_othersolutions here \endlink for an example.</li>
- <li>Define \link TopicPreprocessorDirectivesPerformance EIGEN_DONT_ALIGN_STATICALLY \endlink. That disables all 16-byte (and above) static alignment code, while keeping 16-byte (or above) heap alignment. This has the effect of
+ <li>Use the \c DontAlign option to Matrix, Array, Quaternion, etc. objects that gives you trouble. This way %Eigen won't try to over-align them, and thus won"t assume any special alignment. On the down side, you will pay the cost of unaligned loads/stores for them, but on modern CPUs, the overhead is either null or marginal. See \link StructHavingEigenMembers_othersolutions here \endlink for an example.</li>
+ <li>Define \link TopicPreprocessorDirectivesPerformance EIGEN_MAX_STATIC_ALIGN_BYTES \endlink to 0. That disables all 16-byte (and above) static alignment code, while keeping 16-byte (or above) heap alignment. This has the effect of
vectorizing fixed-size objects (like Matrix4d) through unaligned stores (as controlled by \link TopicPreprocessorDirectivesPerformance EIGEN_UNALIGNED_VECTORIZE \endlink), while keeping unchanged the vectorization of dynamic-size objects
- (like MatrixXd). But do note that this breaks ABI compatibility with the default behavior of static alignment.</li>
- <li>Or define both \link TopicPreprocessorDirectivesPerformance EIGEN_DONT_VECTORIZE \endlink and EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT. This keeps the
- 16-byte alignment code and thus preserves ABI compatibility, but completely disables vectorization.</li>
+ (like MatrixXd). On 64 bytes systems, you might also define it 16 to disable only 32 and 64 bytes of over-alignment. But do note that this breaks ABI compatibility with the default behavior of static alignment.</li>
+ <li>Or define both \link TopicPreprocessorDirectivesPerformance EIGEN_DONT_VECTORIZE \endlink and `EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT`. This keeps the
+ 16-byte (or above) alignment code and thus preserves ABI compatibility, but completely disables vectorization.</li>
</ul>
-If you want to know why defining EIGEN_DONT_VECTORIZE does not by itself disable 16-byte alignment and the assertion, here's the explanation:
+If you want to know why defining `EIGEN_DONT_VECTORIZE` does not by itself disable 16-byte (or above) alignment and the assertion, here's the explanation:
It doesn't disable the assertion, because otherwise code that runs fine without vectorization would suddenly crash when enabling vectorization.
-It doesn't disable 16-byte alignment, because that would mean that vectorized and non-vectorized code are not mutually ABI-compatible. This ABI compatibility is very important, even for people who develop only an in-house application, as for instance one may want to have in the same application a vectorized path and a non-vectorized path.
+It doesn't disable 16-byte (or above) alignment, because that would mean that vectorized and non-vectorized code are not mutually ABI-compatible. This ABI compatibility is very important, even for people who develop only an in-house application, as for instance one may want to have in the same application a vectorized path and a non-vectorized path.
\section checkmycode How can I check my code is safe regarding alignment issues?
-Unfortunately, there is no possibility in C++ to detect any of the aforementioned shortcoming at compile time (though static analysers are becoming more and more powerful and could detect some of them).
+Unfortunately, there is no possibility in c++ to detect any of the aforementioned shortcoming at compile time (though static analyzers are becoming more and more powerful and could detect some of them).
Even at runtime, all we can do is to catch invalid unaligned allocation and trigger the explicit assertion mentioned at the beginning of this page.
Therefore, if your program runs fine on a given system with some given compilation flags, then this does not guarantee that your code is safe. For instance, on most 64 bits systems buffer are aligned on 16 bytes boundary and so, if you do not enable AVX instruction set, then your code will run fine. On the other hand, the same code may assert if moving to a more exotic platform, or enabling AVX instructions that required 32 bytes alignment by default.
diff --git a/doc/eigendoxy.css b/doc/eigendoxy.css
index 6147c7154..6148655f3 100644
--- a/doc/eigendoxy.css
+++ b/doc/eigendoxy.css
@@ -188,6 +188,13 @@ span.cpp11,span.cpp14,span.cpp17 {
font-weight: bold;
}
+div.warningbox {
+ max-width:60em;
+ border-style: solid solid solid solid;
+ border-color: red;
+ border-width: 3px;
+}
+
/**** old Eigen's styles ****/
diff --git a/doc/eigendoxy_footer.html.in b/doc/eigendoxy_footer.html.in
index 9ac0596cb..94f2bab71 100644
--- a/doc/eigendoxy_footer.html.in
+++ b/doc/eigendoxy_footer.html.in
@@ -17,18 +17,22 @@ $generatedby &#160;<a href="http://www.doxygen.org/index.html">
</small></address>
<!--END !GENERATE_TREEVIEW-->
-<!-- Piwik -->
+<!-- Matomo -->
<script type="text/javascript">
-var pkBaseURL = (("https:" == document.location.protocol) ? "https://stats.sylphide-consulting.com/piwik/" : "http://stats.sylphide-consulting.com/piwik/");
-document.write(unescape("%3Cscript src='" + pkBaseURL + "piwik.js' type='text/javascript'%3E%3C/script%3E"));
-</script><script type="text/javascript">
-try {
-var piwikTracker = Piwik.getTracker(pkBaseURL + "piwik.php", 20);
-piwikTracker.trackPageView();
-piwikTracker.enableLinkTracking();
-} catch( err ) {}
-</script><noscript><p><img src="http://stats.sylphide-consulting.com/piwik/piwik.php?idsite=20" style="border:0" alt="" /></p></noscript>
-<!-- End Piwik Tracking Code -->
+ var _paq = _paq || [];
+ /* tracker methods like "setCustomDimension" should be called before "trackPageView" */
+ _paq.push(['trackPageView']);
+ _paq.push(['enableLinkTracking']);
+ (function() {
+ var u="//stats.sylphide-consulting.com/matomo/";
+ _paq.push(['setTrackerUrl', u+'piwik.php']);
+ _paq.push(['setSiteId', '20']);
+ var d=document, g=d.createElement('script'), s=d.getElementsByTagName('script')[0];
+ g.type='text/javascript'; g.async=true; g.defer=true; g.src=u+'piwik.js'; s.parentNode.insertBefore(g,s);
+ })();
+</script>
+<noscript><p><img src="//stats.sylphide-consulting.com/matomo/piwik.php?idsite=20&rec=1" style="border:0;" alt="" /></p></noscript>
+<!-- End Matomo Code -->
</body>
</html>
diff --git a/doc/snippets/Array_initializer_list_23_cxx11.cpp b/doc/snippets/Array_initializer_list_23_cxx11.cpp
new file mode 100644
index 000000000..1ea32dd80
--- /dev/null
+++ b/doc/snippets/Array_initializer_list_23_cxx11.cpp
@@ -0,0 +1,5 @@
+ArrayXXi a {
+ {1, 2, 3},
+ {3, 4, 5}
+};
+cout << a << endl; \ No newline at end of file
diff --git a/doc/snippets/Array_initializer_list_vector_cxx11.cpp b/doc/snippets/Array_initializer_list_vector_cxx11.cpp
new file mode 100644
index 000000000..e38b61e95
--- /dev/null
+++ b/doc/snippets/Array_initializer_list_vector_cxx11.cpp
@@ -0,0 +1,2 @@
+Array<int, Dynamic, 1> v {{1, 2, 3, 4, 5}};
+cout << v << endl; \ No newline at end of file
diff --git a/doc/snippets/Array_variadic_ctor_cxx11.cpp b/doc/snippets/Array_variadic_ctor_cxx11.cpp
new file mode 100644
index 000000000..234c7a720
--- /dev/null
+++ b/doc/snippets/Array_variadic_ctor_cxx11.cpp
@@ -0,0 +1,3 @@
+Array<int, 1, 6> a(1, 2, 3, 4, 5, 6);
+Array<int, 3, 1> b {1, 2, 3};
+cout << a << "\n\n" << b << endl; \ No newline at end of file
diff --git a/doc/snippets/Matrix_initializer_list_23_cxx11.cpp b/doc/snippets/Matrix_initializer_list_23_cxx11.cpp
new file mode 100644
index 000000000..d338d0253
--- /dev/null
+++ b/doc/snippets/Matrix_initializer_list_23_cxx11.cpp
@@ -0,0 +1,5 @@
+MatrixXd m {
+ {1, 2, 3},
+ {4, 5, 6}
+};
+cout << m << endl; \ No newline at end of file
diff --git a/doc/snippets/Matrix_initializer_list_vector_cxx11.cpp b/doc/snippets/Matrix_initializer_list_vector_cxx11.cpp
new file mode 100644
index 000000000..8872e2cf3
--- /dev/null
+++ b/doc/snippets/Matrix_initializer_list_vector_cxx11.cpp
@@ -0,0 +1,2 @@
+VectorXi v {{1, 2}};
+cout << v << endl; \ No newline at end of file
diff --git a/doc/snippets/Matrix_variadic_ctor_cxx11.cpp b/doc/snippets/Matrix_variadic_ctor_cxx11.cpp
new file mode 100644
index 000000000..fcb4ccf88
--- /dev/null
+++ b/doc/snippets/Matrix_variadic_ctor_cxx11.cpp
@@ -0,0 +1,3 @@
+Matrix<int, 1, 6> a(1, 2, 3, 4, 5, 6);
+Matrix<int, 3, 1> b {1, 2, 3};
+cout << a << "\n\n" << b << endl; \ No newline at end of file
diff --git a/doc/snippets/Tutorial_std_sort_rows.cpp b/doc/snippets/Tutorial_std_sort_rows_cxx11.cpp
index fdd850d13..fdd850d13 100644
--- a/doc/snippets/Tutorial_std_sort_rows.cpp
+++ b/doc/snippets/Tutorial_std_sort_rows_cxx11.cpp
diff --git a/failtest/CMakeLists.txt b/failtest/CMakeLists.txt
index 1a73f05e6..256e541e2 100644
--- a/failtest/CMakeLists.txt
+++ b/failtest/CMakeLists.txt
@@ -1,4 +1,3 @@
-message(STATUS "Running the failtests")
ei_add_failtest("failtest_sanity_check")
@@ -64,12 +63,8 @@ ei_add_failtest("bdcsvd_int")
ei_add_failtest("eigensolver_int")
ei_add_failtest("eigensolver_cplx")
-if (EIGEN_FAILTEST_FAILURE_COUNT)
- message(FATAL_ERROR
- "${EIGEN_FAILTEST_FAILURE_COUNT} out of ${EIGEN_FAILTEST_COUNT} failtests FAILED. "
- "To debug these failures, manually compile these programs in ${CMAKE_CURRENT_SOURCE_DIR}, "
- "with and without #define EIGEN_SHOULD_FAIL_TO_BUILD.")
-else()
- message(STATUS "Failtest SUCCESS: all ${EIGEN_FAILTEST_COUNT} failtests passed.")
- message(STATUS "")
+if(EIGEN_TEST_CXX11)
+ ei_add_failtest("initializer_list_1")
+ ei_add_failtest("initializer_list_2")
endif()
+
diff --git a/failtest/initializer_list_1.cpp b/failtest/initializer_list_1.cpp
new file mode 100644
index 000000000..92dfd1f65
--- /dev/null
+++ b/failtest/initializer_list_1.cpp
@@ -0,0 +1,14 @@
+#include "../Eigen/Core"
+
+#ifdef EIGEN_SHOULD_FAIL_TO_BUILD
+#define ROWS Dynamic
+#else
+#define ROWS 3
+#endif
+
+using namespace Eigen;
+
+int main()
+{
+ Matrix<int, ROWS, 1> {1, 2, 3};
+}
diff --git a/failtest/initializer_list_2.cpp b/failtest/initializer_list_2.cpp
new file mode 100644
index 000000000..1996050a7
--- /dev/null
+++ b/failtest/initializer_list_2.cpp
@@ -0,0 +1,16 @@
+#include "../Eigen/Core"
+
+#ifdef EIGEN_SHOULD_FAIL_TO_BUILD
+#define ROWS Dynamic
+#define COLS Dynamic
+#else
+#define ROWS 3
+#define COLS 1
+#endif
+
+using namespace Eigen;
+
+int main()
+{
+ Matrix<int, ROWS, COLS> {1, 2, 3};
+}
diff --git a/lapack/CMakeLists.txt b/lapack/CMakeLists.txt
index 522ba8a2b..9b0d8638c 100644
--- a/lapack/CMakeLists.txt
+++ b/lapack/CMakeLists.txt
@@ -136,6 +136,7 @@ if(EXISTS ${eigen_full_path_to_testing_lapack})
add_subdirectory(testing/MATGEN)
add_subdirectory(testing/LIN)
add_subdirectory(testing/EIG)
+ cmake_policy(SET CMP0026 OLD)
macro(add_lapack_test output input target)
set(TEST_INPUT "${LAPACK_SOURCE_DIR}/testing/${input}")
set(TEST_OUTPUT "${LAPACK_BINARY_DIR}/testing/${output}")
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index f215d97cd..8c58f2a33 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -164,6 +164,7 @@ ei_add_test(sizeof)
ei_add_test(dynalloc)
ei_add_test(nomalloc)
ei_add_test(first_aligned)
+ei_add_test(type_alias)
ei_add_test(nullary)
ei_add_test(mixingtypes)
ei_add_test(packetmath "-DEIGEN_FAST_MATH=1")
@@ -268,6 +269,7 @@ ei_add_test(sparselu)
ei_add_test(sparseqr)
ei_add_test(umeyama)
ei_add_test(nesting_ops "${CMAKE_CXX_FLAGS_DEBUG}")
+ei_add_test(nestbyvalue)
ei_add_test(zerosized)
ei_add_test(dontalign)
ei_add_test(evaluators)
@@ -286,6 +288,10 @@ ei_add_test(half_float)
ei_add_test(array_of_string)
ei_add_test(num_dimensions)
ei_add_test(stl_iterators)
+if(EIGEN_TEST_CXX11)
+ ei_add_test(initializer_list_construction)
+ ei_add_test(diagonal_matrix_variadic_ctor)
+endif()
add_executable(bug1213 bug1213.cpp bug1213_main.cpp)
@@ -359,7 +365,7 @@ if(EIGEN_TEST_EIGEN2)
endif()
# boost MP unit test
-find_package(Boost)
+find_package(Boost 1.53.0)
if(Boost_FOUND)
include_directories(${Boost_INCLUDE_DIRS})
ei_add_test(boostmultiprec "" "${Boost_LIBRARIES}")
@@ -439,11 +445,6 @@ if (EIGEN_TEST_HIP)
endif(EIGEN_TEST_HIP)
-
-
-file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/failtests)
-add_test(NAME failtests WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/failtests COMMAND ${CMAKE_COMMAND} ${Eigen_SOURCE_DIR} -G "${CMAKE_GENERATOR}" -DEIGEN_FAILTEST=ON)
-
option(EIGEN_TEST_BUILD_DOCUMENTATION "Test building the doxygen documentation" OFF)
IF(EIGEN_TEST_BUILD_DOCUMENTATION)
add_dependencies(buildtests doc)
diff --git a/test/adjoint.cpp b/test/adjoint.cpp
index 4e1e4b5e8..4c4f98bb9 100644
--- a/test/adjoint.cpp
+++ b/test/adjoint.cpp
@@ -143,6 +143,9 @@ template<typename MatrixType> void adjoint(const MatrixType& m)
RealVectorType rv1 = RealVectorType::Random(rows);
VERIFY_IS_APPROX(v1.dot(rv1.template cast<Scalar>()), v1.dot(rv1));
VERIFY_IS_APPROX(rv1.template cast<Scalar>().dot(v1), rv1.dot(v1));
+
+ VERIFY( is_same_type(m1,m1.template conjugateIf<false>()) );
+ VERIFY( is_same_type(m1.conjugate(),m1.template conjugateIf<true>()) );
}
template<int>
@@ -171,6 +174,21 @@ void adjoint_extra()
c = MatrixXd::Ones(10,10) * 1.0 + c;
c = c + MatrixXd::Ones(10,10) .cwiseProduct( MatrixXd::Zero(10,10) );
c = MatrixXd::Ones(10,10) * MatrixXd::Zero(10,10);
+
+ // regression for bug 1646
+ for (int j = 0; j < 10; ++j) {
+ c.col(j).head(j) = c.row(j).head(j);
+ }
+
+ for (int j = 0; j < 10; ++j) {
+ c.col(j) = c.row(j);
+ }
+
+ a.conservativeResize(1,1);
+ a = a.transpose();
+
+ a.conservativeResize(0,0);
+ a = a.transpose();
}
EIGEN_DECLARE_TEST(adjoint)
diff --git a/test/array_cwise.cpp b/test/array_cwise.cpp
index 84e46665b..9e4adb701 100644
--- a/test/array_cwise.cpp
+++ b/test/array_cwise.cpp
@@ -92,15 +92,30 @@ template<typename ArrayType> void array(const ArrayType& m)
ArrayType::RowsAtCompileTime==Dynamic?2:ArrayType::RowsAtCompileTime,
ArrayType::ColsAtCompileTime==Dynamic?2:ArrayType::ColsAtCompileTime,
ArrayType::Options> FixedArrayType;
- FixedArrayType f1(s1);
- VERIFY_IS_APPROX(f1, FixedArrayType::Constant(s1));
- FixedArrayType f2(numext::real(s1));
- VERIFY_IS_APPROX(f2, FixedArrayType::Constant(numext::real(s1)));
- FixedArrayType f3((int)100*numext::real(s1));
- VERIFY_IS_APPROX(f3, FixedArrayType::Constant((int)100*numext::real(s1)));
- f1.setRandom();
- FixedArrayType f4(f1.data());
- VERIFY_IS_APPROX(f4, f1);
+ {
+ FixedArrayType f1(s1);
+ VERIFY_IS_APPROX(f1, FixedArrayType::Constant(s1));
+ FixedArrayType f2(numext::real(s1));
+ VERIFY_IS_APPROX(f2, FixedArrayType::Constant(numext::real(s1)));
+ FixedArrayType f3((int)100*numext::real(s1));
+ VERIFY_IS_APPROX(f3, FixedArrayType::Constant((int)100*numext::real(s1)));
+ f1.setRandom();
+ FixedArrayType f4(f1.data());
+ VERIFY_IS_APPROX(f4, f1);
+ }
+ #if EIGEN_HAS_CXX11
+ {
+ FixedArrayType f1{s1};
+ VERIFY_IS_APPROX(f1, FixedArrayType::Constant(s1));
+ FixedArrayType f2{numext::real(s1)};
+ VERIFY_IS_APPROX(f2, FixedArrayType::Constant(numext::real(s1)));
+ FixedArrayType f3{(int)100*numext::real(s1)};
+ VERIFY_IS_APPROX(f3, FixedArrayType::Constant((int)100*numext::real(s1)));
+ f1.setRandom();
+ FixedArrayType f4{f1.data()};
+ VERIFY_IS_APPROX(f4, f1);
+ }
+ #endif
// pow
VERIFY_IS_APPROX(m1.pow(2), m1.square());
@@ -120,10 +135,51 @@ template<typename ArrayType> void array(const ArrayType& m)
// Check possible conflicts with 1D ctor
typedef Array<Scalar, Dynamic, 1> OneDArrayType;
- OneDArrayType o1(rows);
- VERIFY(o1.size()==rows);
- OneDArrayType o4((int)rows);
- VERIFY(o4.size()==rows);
+ {
+ OneDArrayType o1(rows);
+ VERIFY(o1.size()==rows);
+ OneDArrayType o2(static_cast<int>(rows));
+ VERIFY(o2.size()==rows);
+ }
+ #if EIGEN_HAS_CXX11
+ {
+ OneDArrayType o1{rows};
+ VERIFY(o1.size()==rows);
+ OneDArrayType o4{int(rows)};
+ VERIFY(o4.size()==rows);
+ }
+ #endif
+ // Check possible conflicts with 2D ctor
+ typedef Array<Scalar, Dynamic, Dynamic> TwoDArrayType;
+ typedef Array<Scalar, 2, 1> ArrayType2;
+ {
+ TwoDArrayType o1(rows,cols);
+ VERIFY(o1.rows()==rows);
+ VERIFY(o1.cols()==cols);
+ TwoDArrayType o2(static_cast<int>(rows),static_cast<int>(cols));
+ VERIFY(o2.rows()==rows);
+ VERIFY(o2.cols()==cols);
+
+ ArrayType2 o3(rows,cols);
+ VERIFY(o3(0)==Scalar(rows) && o3(1)==Scalar(cols));
+ ArrayType2 o4(static_cast<int>(rows),static_cast<int>(cols));
+ VERIFY(o4(0)==Scalar(rows) && o4(1)==Scalar(cols));
+ }
+ #if EIGEN_HAS_CXX11
+ {
+ TwoDArrayType o1{rows,cols};
+ VERIFY(o1.rows()==rows);
+ VERIFY(o1.cols()==cols);
+ TwoDArrayType o2{int(rows),int(cols)};
+ VERIFY(o2.rows()==rows);
+ VERIFY(o2.cols()==cols);
+
+ ArrayType2 o3{rows,cols};
+ VERIFY(o3(0)==Scalar(rows) && o3(1)==Scalar(cols));
+ ArrayType2 o4{int(rows),int(cols)};
+ VERIFY(o4(0)==Scalar(rows) && o4(1)==Scalar(cols));
+ }
+ #endif
}
template<typename ArrayType> void comparisons(const ArrayType& m)
@@ -231,6 +287,11 @@ template<typename ArrayType> void array_real(const ArrayType& m)
VERIFY_IS_APPROX(m1.sinh(), sinh(m1));
VERIFY_IS_APPROX(m1.cosh(), cosh(m1));
VERIFY_IS_APPROX(m1.tanh(), tanh(m1));
+#if EIGEN_HAS_CXX11_MATH
+ VERIFY_IS_APPROX(m1.tanh().atanh(), atanh(tanh(m1)));
+ VERIFY_IS_APPROX(m1.sinh().asinh(), asinh(sinh(m1)));
+ VERIFY_IS_APPROX(m1.cosh().acosh(), acosh(cosh(m1)));
+#endif
VERIFY_IS_APPROX(m1.logistic(), logistic(m1));
VERIFY_IS_APPROX(m1.arg(), arg(m1));
@@ -462,6 +523,7 @@ EIGEN_DECLARE_TEST(array_cwise)
CALL_SUBTEST_4( array(ArrayXXcf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
CALL_SUBTEST_5( array(ArrayXXf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
CALL_SUBTEST_6( array(ArrayXXi(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
+ CALL_SUBTEST_6( array(Array<Index,Dynamic,Dynamic>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
}
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( comparisons(Array<float, 1, 1>()) );
diff --git a/test/array_reverse.cpp b/test/array_reverse.cpp
index e23159def..c77528a5b 100644
--- a/test/array_reverse.cpp
+++ b/test/array_reverse.cpp
@@ -132,6 +132,60 @@ void array_reverse_extra()
VERIFY(x.reverse() == y);
}
+// Simpler version of reverseInPlace leveraging a bug
+// in clang 6/7 with -O2 and AVX or AVX512 enabled.
+// This simpler version ensure that the clang bug is not simply hidden
+// through mis-inlining of reverseInPlace or other minor changes.
+template<typename MatrixType>
+EIGEN_DONT_INLINE
+void bug1684_job1(MatrixType& m1, MatrixType& m2)
+{
+ m2 = m1;
+ m2.col(0).swap(m2.col(3));
+ m2.col(1).swap(m2.col(2));
+}
+
+template<typename MatrixType>
+EIGEN_DONT_INLINE
+void bug1684_job2(MatrixType& m1, MatrixType& m2)
+{
+ m2 = m1; // load m1/m2 in AVX registers
+ m1.col(0) = m2.col(3); // perform 128 bits moves
+ m1.col(1) = m2.col(2);
+ m1.col(2) = m2.col(1);
+ m1.col(3) = m2.col(0);
+}
+
+template<typename MatrixType>
+EIGEN_DONT_INLINE
+void bug1684_job3(MatrixType& m1, MatrixType& m2)
+{
+ m2 = m1;
+ Vector4f tmp;
+ tmp = m2.col(0);
+ m2.col(0) = m2.col(3);
+ m2.col(3) = tmp;
+ tmp = m2.col(1);
+ m2.col(1) = m2.col(2);
+ m2.col(2) = tmp;
+
+}
+
+template<int>
+void bug1684()
+{
+ Matrix4f m1 = Matrix4f::Random();
+ Matrix4f m2 = Matrix4f::Random();
+ bug1684_job1(m1,m2);
+ VERIFY_IS_APPROX(m2, m1.rowwise().reverse().eval());
+ bug1684_job2(m1,m2);
+ VERIFY_IS_APPROX(m2, m1.rowwise().reverse().eval());
+ // This one still fail after our swap's workaround,
+ // but I expect users not to implement their own swap.
+ // bug1684_job3(m1,m2);
+ // VERIFY_IS_APPROX(m2, m1.rowwise().reverse().eval());
+}
+
EIGEN_DECLARE_TEST(array_reverse)
{
for(int i = 0; i < g_repeat; i++) {
@@ -144,6 +198,7 @@ EIGEN_DECLARE_TEST(array_reverse)
CALL_SUBTEST_7( reverse(MatrixXcd(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
CALL_SUBTEST_8( reverse(Matrix<float, 100, 100>()) );
CALL_SUBTEST_9( reverse(Matrix<float,Dynamic,Dynamic,RowMajor>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
+ CALL_SUBTEST_3( bug1684<0>() );
}
CALL_SUBTEST_3( array_reverse_extra<0>() );
}
diff --git a/test/bdcsvd.cpp b/test/bdcsvd.cpp
index 3065ff015..85a80d6bb 100644
--- a/test/bdcsvd.cpp
+++ b/test/bdcsvd.cpp
@@ -46,6 +46,8 @@ void bdcsvd_method()
VERIFY_RAISES_ASSERT(m.bdcSvd().matrixU());
VERIFY_RAISES_ASSERT(m.bdcSvd().matrixV());
VERIFY_IS_APPROX(m.bdcSvd(ComputeFullU|ComputeFullV).solve(m), m);
+ VERIFY_IS_APPROX(m.bdcSvd(ComputeFullU|ComputeFullV).transpose().solve(m), m);
+ VERIFY_IS_APPROX(m.bdcSvd(ComputeFullU|ComputeFullV).adjoint().solve(m), m);
}
// compare the Singular values returned with Jacobi and Bdc
diff --git a/test/bicgstab.cpp b/test/bicgstab.cpp
index 89d6a45ef..59c4b501c 100644
--- a/test/bicgstab.cpp
+++ b/test/bicgstab.cpp
@@ -10,11 +10,11 @@
#include "sparse_solver.h"
#include <Eigen/IterativeLinearSolvers>
-template<typename T, typename I> void test_bicgstab_T()
+template<typename T, typename I_> void test_bicgstab_T()
{
- BiCGSTAB<SparseMatrix<T,0,I>, DiagonalPreconditioner<T> > bicgstab_colmajor_diag;
- BiCGSTAB<SparseMatrix<T,0,I>, IdentityPreconditioner > bicgstab_colmajor_I;
- BiCGSTAB<SparseMatrix<T,0,I>, IncompleteLUT<T,I> > bicgstab_colmajor_ilut;
+ BiCGSTAB<SparseMatrix<T,0,I_>, DiagonalPreconditioner<T> > bicgstab_colmajor_diag;
+ BiCGSTAB<SparseMatrix<T,0,I_>, IdentityPreconditioner > bicgstab_colmajor_I;
+ BiCGSTAB<SparseMatrix<T,0,I_>, IncompleteLUT<T,I_> > bicgstab_colmajor_ilut;
//BiCGSTAB<SparseMatrix<T>, SSORPreconditioner<T> > bicgstab_colmajor_ssor;
bicgstab_colmajor_diag.setTolerance(NumTraits<T>::epsilon()*4);
diff --git a/test/block.cpp b/test/block.cpp
index 27b60d778..84124aba6 100644
--- a/test/block.cpp
+++ b/test/block.cpp
@@ -227,6 +227,16 @@ template<typename MatrixType> void block(const MatrixType& m)
VERIFY_IS_APPROX( (m1+m1).template subVector<Vertical>(c1), (m1+m1).col(c1) );
VERIFY_IS_EQUAL( m1.template subVectors<Horizontal>(), m1.rows() );
VERIFY_IS_EQUAL( m1.template subVectors<Vertical>(), m1.cols() );
+
+ if (rows>=2 || cols>=2) {
+ VERIFY_IS_EQUAL( int(m1.middleCols(0,0).IsRowMajor), int(m1.IsRowMajor) );
+ VERIFY_IS_EQUAL( m1.middleCols(0,0).outerSize(), m1.IsRowMajor ? rows : 0);
+ VERIFY_IS_EQUAL( m1.middleCols(0,0).innerSize(), m1.IsRowMajor ? 0 : rows);
+
+ VERIFY_IS_EQUAL( int(m1.middleRows(0,0).IsRowMajor), int(m1.IsRowMajor) );
+ VERIFY_IS_EQUAL( m1.middleRows(0,0).outerSize(), m1.IsRowMajor ? 0 : cols);
+ VERIFY_IS_EQUAL( m1.middleRows(0,0).innerSize(), m1.IsRowMajor ? cols : 0);
+ }
}
@@ -287,11 +297,14 @@ EIGEN_DECLARE_TEST(block)
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( block(Matrix<float, 1, 1>()) );
+ CALL_SUBTEST_1( block(Matrix<float, 1, Dynamic>(internal::random(2,50))) );
+ CALL_SUBTEST_1( block(Matrix<float, Dynamic, 1>(internal::random(2,50))) );
CALL_SUBTEST_2( block(Matrix4d()) );
- CALL_SUBTEST_3( block(MatrixXcf(3, 3)) );
- CALL_SUBTEST_4( block(MatrixXi(8, 12)) );
- CALL_SUBTEST_5( block(MatrixXcd(20, 20)) );
- CALL_SUBTEST_6( block(MatrixXf(20, 20)) );
+ CALL_SUBTEST_3( block(MatrixXcf(internal::random(2,50), internal::random(2,50))) );
+ CALL_SUBTEST_4( block(MatrixXi(internal::random(2,50), internal::random(2,50))) );
+ CALL_SUBTEST_5( block(MatrixXcd(internal::random(2,50), internal::random(2,50))) );
+ CALL_SUBTEST_6( block(MatrixXf(internal::random(2,50), internal::random(2,50))) );
+ CALL_SUBTEST_7( block(Matrix<int,Dynamic,Dynamic,RowMajor>(internal::random(2,50), internal::random(2,50))) );
CALL_SUBTEST_8( block(Matrix<float,Dynamic,4>(3, 4)) );
diff --git a/test/boostmultiprec.cpp b/test/boostmultiprec.cpp
index 579a6fd25..1d1441ae2 100644
--- a/test/boostmultiprec.cpp
+++ b/test/boostmultiprec.cpp
@@ -66,6 +66,7 @@
#undef isnan
#undef isinf
#undef isfinite
+#undef I
#include <boost/multiprecision/cpp_dec_float.hpp>
#include <boost/multiprecision/number.hpp>
diff --git a/test/cholesky.cpp b/test/cholesky.cpp
index b871351e0..0b1a7b45b 100644
--- a/test/cholesky.cpp
+++ b/test/cholesky.cpp
@@ -7,18 +7,16 @@
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-#ifndef EIGEN_NO_ASSERTION_CHECKING
-#define EIGEN_NO_ASSERTION_CHECKING
-#endif
-
#define TEST_ENABLE_TEMPORARY_TRACKING
#include "main.h"
#include <Eigen/Cholesky>
#include <Eigen/QR>
+#include "solverbase.h"
template<typename MatrixType, int UpLo>
typename MatrixType::RealScalar matrix_l1_norm(const MatrixType& m) {
+ if(m.cols()==0) return typename MatrixType::RealScalar(0);
MatrixType symm = m.template selfadjointView<UpLo>();
return symm.cwiseAbs().colwise().sum().maxCoeff();
}
@@ -80,15 +78,17 @@ template<typename MatrixType> void cholesky(const MatrixType& m)
}
{
+ STATIC_CHECK(( internal::is_same<typename LLT<MatrixType,Lower>::StorageIndex,int>::value ));
+ STATIC_CHECK(( internal::is_same<typename LLT<MatrixType,Upper>::StorageIndex,int>::value ));
+
SquareMatrixType symmUp = symm.template triangularView<Upper>();
SquareMatrixType symmLo = symm.template triangularView<Lower>();
LLT<SquareMatrixType,Lower> chollo(symmLo);
VERIFY_IS_APPROX(symm, chollo.reconstructedMatrix());
- vecX = chollo.solve(vecB);
- VERIFY_IS_APPROX(symm * vecX, vecB);
- matX = chollo.solve(matB);
- VERIFY_IS_APPROX(symm * matX, matB);
+
+ check_solverbase<VectorType, VectorType>(symm, chollo, rows, rows, 1);
+ check_solverbase<MatrixType, MatrixType>(symm, chollo, rows, cols, rows);
const MatrixType symmLo_inverse = chollo.solve(MatrixType::Identity(rows,cols));
RealScalar rcond = (RealScalar(1) / matrix_l1_norm<MatrixType, Lower>(symmLo)) /
@@ -96,7 +96,7 @@ template<typename MatrixType> void cholesky(const MatrixType& m)
RealScalar rcond_est = chollo.rcond();
// Verify that the estimated condition number is within a factor of 10 of the
// truth.
- VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10);
+ VERIFY(rcond_est >= rcond / 10 && rcond_est <= rcond * 10);
// test the upper mode
LLT<SquareMatrixType,Upper> cholup(symmUp);
@@ -112,12 +112,12 @@ template<typename MatrixType> void cholesky(const MatrixType& m)
rcond = (RealScalar(1) / matrix_l1_norm<MatrixType, Upper>(symmUp)) /
matrix_l1_norm<MatrixType, Upper>(symmUp_inverse);
rcond_est = cholup.rcond();
- VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10);
+ VERIFY(rcond_est >= rcond / 10 && rcond_est <= rcond * 10);
MatrixType neg = -symmLo;
chollo.compute(neg);
- VERIFY(chollo.info()==NumericalIssue);
+ VERIFY(neg.size()==0 || chollo.info()==NumericalIssue);
VERIFY_IS_APPROX(MatrixType(chollo.matrixL().transpose().conjugate()), MatrixType(chollo.matrixU()));
VERIFY_IS_APPROX(MatrixType(chollo.matrixU().transpose().conjugate()), MatrixType(chollo.matrixL()));
@@ -142,6 +142,9 @@ template<typename MatrixType> void cholesky(const MatrixType& m)
// LDLT
{
+ STATIC_CHECK(( internal::is_same<typename LDLT<MatrixType,Lower>::StorageIndex,int>::value ));
+ STATIC_CHECK(( internal::is_same<typename LDLT<MatrixType,Upper>::StorageIndex,int>::value ));
+
int sign = internal::random<int>()%2 ? 1 : -1;
if(sign == -1)
@@ -155,10 +158,9 @@ template<typename MatrixType> void cholesky(const MatrixType& m)
LDLT<SquareMatrixType,Lower> ldltlo(symmLo);
VERIFY(ldltlo.info()==Success);
VERIFY_IS_APPROX(symm, ldltlo.reconstructedMatrix());
- vecX = ldltlo.solve(vecB);
- VERIFY_IS_APPROX(symm * vecX, vecB);
- matX = ldltlo.solve(matB);
- VERIFY_IS_APPROX(symm * matX, matB);
+
+ check_solverbase<VectorType, VectorType>(symm, ldltlo, rows, rows, 1);
+ check_solverbase<MatrixType, MatrixType>(symm, ldltlo, rows, cols, rows);
const MatrixType symmLo_inverse = ldltlo.solve(MatrixType::Identity(rows,cols));
RealScalar rcond = (RealScalar(1) / matrix_l1_norm<MatrixType, Lower>(symmLo)) /
@@ -166,7 +168,7 @@ template<typename MatrixType> void cholesky(const MatrixType& m)
RealScalar rcond_est = ldltlo.rcond();
// Verify that the estimated condition number is within a factor of 10 of the
// truth.
- VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10);
+ VERIFY(rcond_est >= rcond / 10 && rcond_est <= rcond * 10);
LDLT<SquareMatrixType,Upper> ldltup(symmUp);
@@ -183,7 +185,7 @@ template<typename MatrixType> void cholesky(const MatrixType& m)
rcond = (RealScalar(1) / matrix_l1_norm<MatrixType, Upper>(symmUp)) /
matrix_l1_norm<MatrixType, Upper>(symmUp_inverse);
rcond_est = ldltup.rcond();
- VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10);
+ VERIFY(rcond_est >= rcond / 10 && rcond_est <= rcond * 10);
VERIFY_IS_APPROX(MatrixType(ldltlo.matrixL().transpose().conjugate()), MatrixType(ldltlo.matrixU()));
VERIFY_IS_APPROX(MatrixType(ldltlo.matrixU().transpose().conjugate()), MatrixType(ldltlo.matrixL()));
@@ -312,10 +314,9 @@ template<typename MatrixType> void cholesky_cplx(const MatrixType& m)
LLT<RealMatrixType,Lower> chollo(symmLo);
VERIFY_IS_APPROX(symm, chollo.reconstructedMatrix());
- vecX = chollo.solve(vecB);
- VERIFY_IS_APPROX(symm * vecX, vecB);
-// matX = chollo.solve(matB);
-// VERIFY_IS_APPROX(symm * matX, matB);
+
+ check_solverbase<VectorType, VectorType>(symm, chollo, rows, rows, 1);
+ //check_solverbase<MatrixType, MatrixType>(symm, chollo, rows, cols, rows);
}
// LDLT
@@ -332,10 +333,9 @@ template<typename MatrixType> void cholesky_cplx(const MatrixType& m)
LDLT<RealMatrixType,Lower> ldltlo(symmLo);
VERIFY(ldltlo.info()==Success);
VERIFY_IS_APPROX(symm, ldltlo.reconstructedMatrix());
- vecX = ldltlo.solve(vecB);
- VERIFY_IS_APPROX(symm * vecX, vecB);
-// matX = ldltlo.solve(matB);
-// VERIFY_IS_APPROX(symm * matX, matB);
+
+ check_solverbase<VectorType, VectorType>(symm, ldltlo, rows, rows, 1);
+ //check_solverbase<MatrixType, MatrixType>(symm, ldltlo, rows, cols, rows);
}
}
@@ -476,16 +476,20 @@ template<typename MatrixType> void cholesky_verify_assert()
VERIFY_RAISES_ASSERT(llt.matrixL())
VERIFY_RAISES_ASSERT(llt.matrixU())
VERIFY_RAISES_ASSERT(llt.solve(tmp))
- VERIFY_RAISES_ASSERT(llt.solveInPlace(&tmp))
+ VERIFY_RAISES_ASSERT(llt.transpose().solve(tmp))
+ VERIFY_RAISES_ASSERT(llt.adjoint().solve(tmp))
+ VERIFY_RAISES_ASSERT(llt.solveInPlace(tmp))
LDLT<MatrixType> ldlt;
VERIFY_RAISES_ASSERT(ldlt.matrixL())
- VERIFY_RAISES_ASSERT(ldlt.permutationP())
+ VERIFY_RAISES_ASSERT(ldlt.transpositionsP())
VERIFY_RAISES_ASSERT(ldlt.vectorD())
VERIFY_RAISES_ASSERT(ldlt.isPositive())
VERIFY_RAISES_ASSERT(ldlt.isNegative())
VERIFY_RAISES_ASSERT(ldlt.solve(tmp))
- VERIFY_RAISES_ASSERT(ldlt.solveInPlace(&tmp))
+ VERIFY_RAISES_ASSERT(ldlt.transpose().solve(tmp))
+ VERIFY_RAISES_ASSERT(ldlt.adjoint().solve(tmp))
+ VERIFY_RAISES_ASSERT(ldlt.solveInPlace(tmp))
}
EIGEN_DECLARE_TEST(cholesky)
@@ -507,6 +511,11 @@ EIGEN_DECLARE_TEST(cholesky)
CALL_SUBTEST_6( cholesky_cplx(MatrixXcd(s,s)) );
TEST_SET_BUT_UNUSED_VARIABLE(s)
}
+ // empty matrix, regression test for Bug 785:
+ CALL_SUBTEST_2( cholesky(MatrixXd(0,0)) );
+
+ // This does not work yet:
+ // CALL_SUBTEST_2( cholesky(Matrix<double,0,0>()) );
CALL_SUBTEST_4( cholesky_verify_assert<Matrix3f>() );
CALL_SUBTEST_7( cholesky_verify_assert<Matrix3d>() );
diff --git a/test/conjugate_gradient.cpp b/test/conjugate_gradient.cpp
index 47a4ca707..b076a126b 100644
--- a/test/conjugate_gradient.cpp
+++ b/test/conjugate_gradient.cpp
@@ -10,9 +10,9 @@
#include "sparse_solver.h"
#include <Eigen/IterativeLinearSolvers>
-template<typename T, typename I> void test_conjugate_gradient_T()
+template<typename T, typename I_> void test_conjugate_gradient_T()
{
- typedef SparseMatrix<T,0,I> SparseMatrixType;
+ typedef SparseMatrix<T,0,I_> SparseMatrixType;
ConjugateGradient<SparseMatrixType, Lower > cg_colmajor_lower_diag;
ConjugateGradient<SparseMatrixType, Upper > cg_colmajor_upper_diag;
ConjugateGradient<SparseMatrixType, Lower|Upper> cg_colmajor_loup_diag;
diff --git a/test/constructor.cpp b/test/constructor.cpp
index 1dd3bc3c0..ffd5e802a 100644
--- a/test/constructor.cpp
+++ b/test/constructor.cpp
@@ -20,6 +20,8 @@ template<typename MatrixType> struct Wrapper
inline operator MatrixType& () { return m_mat; }
};
+enum my_sizes { M = 12, N = 7};
+
template<typename MatrixType> void ctor_init1(const MatrixType& m)
{
// Check logic in PlainObjectBase::_init1
@@ -81,4 +83,16 @@ EIGEN_DECLARE_TEST(constructor)
Array<float,3,3> a(123);
VERIFY_IS_EQUAL(a(4), 123.f);
}
+ {
+ MatrixXi m1(M,N);
+ VERIFY_IS_EQUAL(m1.rows(),M);
+ VERIFY_IS_EQUAL(m1.cols(),N);
+ ArrayXXi a1(M,N);
+ VERIFY_IS_EQUAL(a1.rows(),M);
+ VERIFY_IS_EQUAL(a1.cols(),N);
+ VectorXi v1(M);
+ VERIFY_IS_EQUAL(v1.size(),M);
+ ArrayXi a2(M);
+ VERIFY_IS_EQUAL(a2.size(),M);
+ }
}
diff --git a/test/ctorleak.cpp b/test/ctorleak.cpp
index 7202e90dd..73904176b 100644
--- a/test/ctorleak.cpp
+++ b/test/ctorleak.cpp
@@ -8,7 +8,7 @@ struct Foo
static Index object_limit;
int dummy;
- Foo()
+ Foo() : dummy(0)
{
#ifdef EIGEN_EXCEPTIONS
// TODO: Is this the correct way to handle this?
@@ -37,22 +37,33 @@ EIGEN_DECLARE_TEST(ctorleak)
{
typedef Matrix<Foo, Dynamic, Dynamic> MatrixX;
typedef Matrix<Foo, Dynamic, 1> VectorX;
+
Foo::object_count = 0;
for(int i = 0; i < g_repeat; i++) {
Index rows = internal::random<Index>(2,EIGEN_TEST_MAX_SIZE), cols = internal::random<Index>(2,EIGEN_TEST_MAX_SIZE);
- Foo::object_limit = internal::random<Index>(0, rows*cols - 2);
+ Foo::object_limit = rows*cols;
+ {
+ MatrixX r(rows, cols);
+ Foo::object_limit = r.size()+internal::random<Index>(0, rows*cols - 2);
std::cout << "object_limit =" << Foo::object_limit << std::endl;
#ifdef EIGEN_EXCEPTIONS
try
{
#endif
- std::cout << "\nMatrixX m(" << rows << ", " << cols << ");\n";
- MatrixX m(rows, cols);
+ if(internal::random<bool>()) {
+ std::cout << "\nMatrixX m(" << rows << ", " << cols << ");\n";
+ MatrixX m(rows, cols);
+ }
+ else {
+ std::cout << "\nMatrixX m(r);\n";
+ MatrixX m(r);
+ }
#ifdef EIGEN_EXCEPTIONS
VERIFY(false); // not reached if exceptions are enabled
}
catch (const Foo::Fail&) { /* ignore */ }
#endif
+ }
VERIFY_IS_EQUAL(Index(0), Foo::object_count);
{
@@ -66,4 +77,5 @@ EIGEN_DECLARE_TEST(ctorleak)
}
VERIFY_IS_EQUAL(Index(0), Foo::object_count);
}
+ std::cout << "\n";
}
diff --git a/test/dense_storage.cpp b/test/dense_storage.cpp
index 1150ec52b..7fa25859d 100644
--- a/test/dense_storage.cpp
+++ b/test/dense_storage.cpp
@@ -52,6 +52,32 @@ void dense_storage_assignment()
VERIFY_IS_EQUAL(raw_reference[i], raw_copied_reference[i]);
}
+template<typename T, int Size, std::size_t Alignment>
+void dense_storage_alignment()
+{
+ #if EIGEN_HAS_ALIGNAS
+
+ struct alignas(Alignment) Empty1 {};
+ VERIFY_IS_EQUAL(std::alignment_of<Empty1>::value, Alignment);
+
+ struct EIGEN_ALIGN_TO_BOUNDARY(Alignment) Empty2 {};
+ VERIFY_IS_EQUAL(std::alignment_of<Empty2>::value, Alignment);
+
+ struct Nested1 { EIGEN_ALIGN_TO_BOUNDARY(Alignment) T data[Size]; };
+ VERIFY_IS_EQUAL(std::alignment_of<Nested1>::value, Alignment);
+
+ VERIFY_IS_EQUAL( (std::alignment_of<internal::plain_array<T,Size,AutoAlign,Alignment> >::value), Alignment);
+
+ const std::size_t default_alignment = internal::compute_default_alignment<T,Size>::value;
+
+ VERIFY_IS_EQUAL( (std::alignment_of<DenseStorage<T,Size,1,1,AutoAlign> >::value), default_alignment);
+ VERIFY_IS_EQUAL( (std::alignment_of<Matrix<T,Size,1,AutoAlign> >::value), default_alignment);
+ struct Nested2 { Matrix<T,Size,1,AutoAlign> mat; };
+ VERIFY_IS_EQUAL(std::alignment_of<Nested2>::value, default_alignment);
+
+ #endif
+}
+
EIGEN_DECLARE_TEST(dense_storage)
{
dense_storage_copy<int,Dynamic,Dynamic>();
@@ -72,5 +98,10 @@ EIGEN_DECLARE_TEST(dense_storage)
dense_storage_assignment<float,Dynamic,Dynamic>();
dense_storage_assignment<float,Dynamic,3>();
dense_storage_assignment<float,4,Dynamic>();
- dense_storage_assignment<float,4,3>();
+ dense_storage_assignment<float,4,3>();
+
+ dense_storage_alignment<float,16,8>();
+ dense_storage_alignment<float,16,16>();
+ dense_storage_alignment<float,16,32>();
+ dense_storage_alignment<float,16,64>();
}
diff --git a/test/diagonal_matrix_variadic_ctor.cpp b/test/diagonal_matrix_variadic_ctor.cpp
new file mode 100644
index 000000000..fbc8f8470
--- /dev/null
+++ b/test/diagonal_matrix_variadic_ctor.cpp
@@ -0,0 +1,185 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2019 David Tellenbach <david.tellenbach@tellnotes.org>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_NO_STATIC_ASSERT
+
+#include "main.h"
+
+template <typename Scalar>
+void assertionTest()
+{
+ typedef DiagonalMatrix<Scalar, 5> DiagMatrix5;
+ typedef DiagonalMatrix<Scalar, 7> DiagMatrix7;
+ typedef DiagonalMatrix<Scalar, Dynamic> DiagMatrixX;
+
+ Scalar raw[6];
+ for (int i = 0; i < 6; ++i) {
+ raw[i] = internal::random<Scalar>();
+ }
+
+ VERIFY_RAISES_ASSERT((DiagMatrix5{raw[0], raw[1], raw[2], raw[3]}));
+ VERIFY_RAISES_ASSERT((DiagMatrix5{raw[0], raw[1], raw[3]}));
+ VERIFY_RAISES_ASSERT((DiagMatrix7{raw[0], raw[1], raw[2], raw[3]}));
+
+ VERIFY_RAISES_ASSERT((DiagMatrixX {
+ {raw[0], raw[1], raw[2]},
+ {raw[3], raw[4], raw[5]}
+ }));
+}
+
+#define VERIFY_IMPLICIT_CONVERSION_3(DIAGTYPE, V0, V1, V2) \
+ DIAGTYPE d(V0, V1, V2); \
+ DIAGTYPE::DenseMatrixType Dense = d.toDenseMatrix(); \
+ VERIFY_IS_APPROX(Dense(0, 0), (Scalar)V0); \
+ VERIFY_IS_APPROX(Dense(1, 1), (Scalar)V1); \
+ VERIFY_IS_APPROX(Dense(2, 2), (Scalar)V2);
+
+#define VERIFY_IMPLICIT_CONVERSION_4(DIAGTYPE, V0, V1, V2, V3) \
+ DIAGTYPE d(V0, V1, V2, V3); \
+ DIAGTYPE::DenseMatrixType Dense = d.toDenseMatrix(); \
+ VERIFY_IS_APPROX(Dense(0, 0), (Scalar)V0); \
+ VERIFY_IS_APPROX(Dense(1, 1), (Scalar)V1); \
+ VERIFY_IS_APPROX(Dense(2, 2), (Scalar)V2); \
+ VERIFY_IS_APPROX(Dense(3, 3), (Scalar)V3);
+
+#define VERIFY_IMPLICIT_CONVERSION_5(DIAGTYPE, V0, V1, V2, V3, V4) \
+ DIAGTYPE d(V0, V1, V2, V3, V4); \
+ DIAGTYPE::DenseMatrixType Dense = d.toDenseMatrix(); \
+ VERIFY_IS_APPROX(Dense(0, 0), (Scalar)V0); \
+ VERIFY_IS_APPROX(Dense(1, 1), (Scalar)V1); \
+ VERIFY_IS_APPROX(Dense(2, 2), (Scalar)V2); \
+ VERIFY_IS_APPROX(Dense(3, 3), (Scalar)V3); \
+ VERIFY_IS_APPROX(Dense(4, 4), (Scalar)V4);
+
+template<typename Scalar>
+void constructorTest()
+{
+ typedef DiagonalMatrix<Scalar, 0> DiagonalMatrix0;
+ typedef DiagonalMatrix<Scalar, 3> DiagonalMatrix3;
+ typedef DiagonalMatrix<Scalar, 4> DiagonalMatrix4;
+ typedef DiagonalMatrix<Scalar, Dynamic> DiagonalMatrixX;
+
+ Scalar raw[7];
+ for (int k = 0; k < 7; ++k) raw[k] = internal::random<Scalar>();
+
+ // Fixed-sized matrices
+ {
+ DiagonalMatrix0 a {{}};
+ VERIFY(a.rows() == 0);
+ VERIFY(a.cols() == 0);
+ typename DiagonalMatrix0::DenseMatrixType m = a.toDenseMatrix();
+ for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]);
+ }
+ {
+ DiagonalMatrix3 a {{raw[0], raw[1], raw[2]}};
+ VERIFY(a.rows() == 3);
+ VERIFY(a.cols() == 3);
+ typename DiagonalMatrix3::DenseMatrixType m = a.toDenseMatrix();
+ for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]);
+ }
+ {
+ DiagonalMatrix4 a {{raw[0], raw[1], raw[2], raw[3]}};
+ VERIFY(a.rows() == 4);
+ VERIFY(a.cols() == 4);
+ typename DiagonalMatrix4::DenseMatrixType m = a.toDenseMatrix();
+ for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]);
+ }
+
+ // dynamically sized matrices
+ {
+ DiagonalMatrixX a{{}};
+ VERIFY(a.rows() == 0);
+ VERIFY(a.rows() == 0);
+ typename DiagonalMatrixX::DenseMatrixType m = a.toDenseMatrix();
+ for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]);
+ }
+ {
+ DiagonalMatrixX a{{raw[0], raw[1], raw[2], raw[3], raw[4], raw[5], raw[6]}};
+ VERIFY(a.rows() == 7);
+ VERIFY(a.rows() == 7);
+ typename DiagonalMatrixX::DenseMatrixType m = a.toDenseMatrix();
+ for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]);
+ }
+}
+
+template<>
+void constructorTest<float>()
+{
+ typedef float Scalar;
+
+ typedef DiagonalMatrix<Scalar, 0> DiagonalMatrix0;
+ typedef DiagonalMatrix<Scalar, 3> DiagonalMatrix3;
+ typedef DiagonalMatrix<Scalar, 4> DiagonalMatrix4;
+ typedef DiagonalMatrix<Scalar, 5> DiagonalMatrix5;
+ typedef DiagonalMatrix<Scalar, Dynamic> DiagonalMatrixX;
+
+ Scalar raw[7];
+ for (int k = 0; k < 7; ++k) raw[k] = internal::random<Scalar>();
+
+ // Fixed-sized matrices
+ {
+ DiagonalMatrix0 a {{}};
+ VERIFY(a.rows() == 0);
+ VERIFY(a.cols() == 0);
+ typename DiagonalMatrix0::DenseMatrixType m = a.toDenseMatrix();
+ for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]);
+ }
+ {
+ DiagonalMatrix3 a {{raw[0], raw[1], raw[2]}};
+ VERIFY(a.rows() == 3);
+ VERIFY(a.cols() == 3);
+ typename DiagonalMatrix3::DenseMatrixType m = a.toDenseMatrix();
+ for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]);
+ }
+ {
+ DiagonalMatrix4 a {{raw[0], raw[1], raw[2], raw[3]}};
+ VERIFY(a.rows() == 4);
+ VERIFY(a.cols() == 4);
+ typename DiagonalMatrix4::DenseMatrixType m = a.toDenseMatrix();
+ for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]);
+ }
+
+ // dynamically sized matrices
+ {
+ DiagonalMatrixX a{{}};
+ VERIFY(a.rows() == 0);
+ VERIFY(a.rows() == 0);
+ typename DiagonalMatrixX::DenseMatrixType m = a.toDenseMatrix();
+ for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]);
+ }
+ {
+ DiagonalMatrixX a{{raw[0], raw[1], raw[2], raw[3], raw[4], raw[5], raw[6]}};
+ VERIFY(a.rows() == 7);
+ VERIFY(a.rows() == 7);
+ typename DiagonalMatrixX::DenseMatrixType m = a.toDenseMatrix();
+ for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]);
+ }
+ { VERIFY_IMPLICIT_CONVERSION_3(DiagonalMatrix3, 1.2647, 2.56f, -3); }
+ { VERIFY_IMPLICIT_CONVERSION_4(DiagonalMatrix4, 1.2647, 2.56f, -3, 3.23f); }
+ { VERIFY_IMPLICIT_CONVERSION_5(DiagonalMatrix5, 1.2647, 2.56f, -3, 3.23f, 2); }
+}
+
+EIGEN_DECLARE_TEST(diagonal_matrix_variadic_ctor)
+{
+ CALL_SUBTEST_1(assertionTest<unsigned char>());
+ CALL_SUBTEST_1(assertionTest<float>());
+ CALL_SUBTEST_1(assertionTest<Index>());
+ CALL_SUBTEST_1(assertionTest<int>());
+ CALL_SUBTEST_1(assertionTest<long int>());
+ CALL_SUBTEST_1(assertionTest<std::ptrdiff_t>());
+ CALL_SUBTEST_1(assertionTest<std::complex<double>>());
+
+ CALL_SUBTEST_2(constructorTest<unsigned char>());
+ CALL_SUBTEST_2(constructorTest<float>());
+ CALL_SUBTEST_2(constructorTest<Index>());
+ CALL_SUBTEST_2(constructorTest<int>());
+ CALL_SUBTEST_2(constructorTest<long int>());
+ CALL_SUBTEST_2(constructorTest<std::ptrdiff_t>());
+ CALL_SUBTEST_2(constructorTest<std::complex<double>>());
+}
diff --git a/test/dynalloc.cpp b/test/dynalloc.cpp
index 1c74866ba..23c90a7b5 100644
--- a/test/dynalloc.cpp
+++ b/test/dynalloc.cpp
@@ -107,7 +107,7 @@ template<typename T> void check_custom_new_delete()
delete[] t;
}
-#if EIGEN_MAX_ALIGN_BYTES>0
+#if EIGEN_MAX_ALIGN_BYTES>0 && (!EIGEN_HAS_CXX17_OVERALIGN)
{
T* t = static_cast<T *>((T::operator new)(sizeof(T)));
(T::operator delete)(t, sizeof(T));
diff --git a/test/eigensolver_generic.cpp b/test/eigensolver_generic.cpp
index e0e435151..7adb98665 100644
--- a/test/eigensolver_generic.cpp
+++ b/test/eigensolver_generic.cpp
@@ -12,6 +12,21 @@
#include <limits>
#include <Eigen/Eigenvalues>
+template<typename EigType,typename MatType>
+void check_eigensolver_for_given_mat(const EigType &eig, const MatType& a)
+{
+ typedef typename NumTraits<typename MatType::Scalar>::Real RealScalar;
+ typedef Matrix<RealScalar, MatType::RowsAtCompileTime, 1> RealVectorType;
+ typedef typename std::complex<RealScalar> Complex;
+ Index n = a.rows();
+ VERIFY_IS_EQUAL(eig.info(), Success);
+ VERIFY_IS_APPROX(a * eig.pseudoEigenvectors(), eig.pseudoEigenvectors() * eig.pseudoEigenvalueMatrix());
+ VERIFY_IS_APPROX(a.template cast<Complex>() * eig.eigenvectors(),
+ eig.eigenvectors() * eig.eigenvalues().asDiagonal());
+ VERIFY_IS_APPROX(eig.eigenvectors().colwise().norm(), RealVectorType::Ones(n).transpose());
+ VERIFY_IS_APPROX(a.eigenvalues(), eig.eigenvalues());
+}
+
template<typename MatrixType> void eigensolver(const MatrixType& m)
{
/* this test covers the following files:
@@ -22,8 +37,7 @@ template<typename MatrixType> void eigensolver(const MatrixType& m)
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
- typedef Matrix<RealScalar, MatrixType::RowsAtCompileTime, 1> RealVectorType;
- typedef typename std::complex<typename NumTraits<typename MatrixType::Scalar>::Real> Complex;
+ typedef typename std::complex<RealScalar> Complex;
MatrixType a = MatrixType::Random(rows,cols);
MatrixType a1 = MatrixType::Random(rows,cols);
@@ -36,12 +50,7 @@ template<typename MatrixType> void eigensolver(const MatrixType& m)
(ei0.pseudoEigenvectors().template cast<Complex>()) * (ei0.eigenvalues().asDiagonal()));
EigenSolver<MatrixType> ei1(a);
- VERIFY_IS_EQUAL(ei1.info(), Success);
- VERIFY_IS_APPROX(a * ei1.pseudoEigenvectors(), ei1.pseudoEigenvectors() * ei1.pseudoEigenvalueMatrix());
- VERIFY_IS_APPROX(a.template cast<Complex>() * ei1.eigenvectors(),
- ei1.eigenvectors() * ei1.eigenvalues().asDiagonal());
- VERIFY_IS_APPROX(ei1.eigenvectors().colwise().norm(), RealVectorType::Ones(rows).transpose());
- VERIFY_IS_APPROX(a.eigenvalues(), ei1.eigenvalues());
+ CALL_SUBTEST( check_eigensolver_for_given_mat(ei1,a) );
EigenSolver<MatrixType> ei2;
ei2.setMaxIterations(RealSchur<MatrixType>::m_maxIterationsPerRow * rows).compute(a);
@@ -67,7 +76,7 @@ template<typename MatrixType> void eigensolver(const MatrixType& m)
// Test matrix with NaN
a(0,0) = std::numeric_limits<typename MatrixType::RealScalar>::quiet_NaN();
EigenSolver<MatrixType> eiNaN(a);
- VERIFY_IS_EQUAL(eiNaN.info(), NoConvergence);
+ VERIFY_IS_NOT_EQUAL(eiNaN.info(), Success);
}
// regression test for bug 1098
@@ -100,6 +109,19 @@ template<typename MatrixType> void eigensolver_verify_assert(const MatrixType& m
VERIFY_RAISES_ASSERT(eig.pseudoEigenvectors());
}
+
+template<typename CoeffType>
+Matrix<typename CoeffType::Scalar,Dynamic,Dynamic>
+make_companion(const CoeffType& coeffs)
+{
+ Index n = coeffs.size()-1;
+ Matrix<typename CoeffType::Scalar,Dynamic,Dynamic> res(n,n);
+ res.setZero();
+ res.row(0) = -coeffs.tail(n) / coeffs(0);
+ res.diagonal(-1).setOnes();
+ return res;
+}
+
template<int>
void eigensolver_generic_extra()
{
@@ -126,6 +148,62 @@ void eigensolver_generic_extra()
VERIFY_IS_APPROX((a * eig.eigenvectors()).norm()+1., 1.);
VERIFY_IS_APPROX((eig.eigenvectors() * eig.eigenvalues().asDiagonal()).norm()+1., 1.);
}
+
+ // regression test for bug 933
+ {
+ {
+ VectorXd coeffs(5); coeffs << 1, -3, -175, -225, 2250;
+ MatrixXd C = make_companion(coeffs);
+ EigenSolver<MatrixXd> eig(C);
+ CALL_SUBTEST( check_eigensolver_for_given_mat(eig,C) );
+ }
+ {
+ // this test is tricky because it requires high accuracy in smallest eigenvalues
+ VectorXd coeffs(5); coeffs << 6.154671e-15, -1.003870e-10, -9.819570e-01, 3.995715e+03, 2.211511e+08;
+ MatrixXd C = make_companion(coeffs);
+ EigenSolver<MatrixXd> eig(C);
+ CALL_SUBTEST( check_eigensolver_for_given_mat(eig,C) );
+ Index n = C.rows();
+ for(Index i=0;i<n;++i)
+ {
+ typedef std::complex<double> Complex;
+ MatrixXcd ac = C.cast<Complex>();
+ ac.diagonal().array() -= eig.eigenvalues()(i);
+ VectorXd sv = ac.jacobiSvd().singularValues();
+ // comparing to sv(0) is not enough here to catch the "bug",
+ // the hard-coded 1.0 is important!
+ VERIFY_IS_MUCH_SMALLER_THAN(sv(n-1), 1.0);
+ }
+ }
+ }
+ // regression test for bug 1557
+ {
+ // this test is interesting because it contains zeros on the diagonal.
+ MatrixXd A_bug1557(3,3);
+ A_bug1557 << 0, 0, 0, 1, 0, 0.5887907064808635127, 0, 1, 0;
+ EigenSolver<MatrixXd> eig(A_bug1557);
+ CALL_SUBTEST( check_eigensolver_for_given_mat(eig,A_bug1557) );
+ }
+
+ // regression test for bug 1174
+ {
+ Index n = 12;
+ MatrixXf A_bug1174(n,n);
+ A_bug1174 << 262144, 0, 0, 262144, 786432, 0, 0, 0, 0, 0, 0, 786432,
+ 262144, 0, 0, 262144, 786432, 0, 0, 0, 0, 0, 0, 786432,
+ 262144, 0, 0, 262144, 786432, 0, 0, 0, 0, 0, 0, 786432,
+ 262144, 0, 0, 262144, 786432, 0, 0, 0, 0, 0, 0, 786432,
+ 0, 262144, 262144, 0, 0, 262144, 262144, 262144, 262144, 262144, 262144, 0,
+ 0, 262144, 262144, 0, 0, 262144, 262144, 262144, 262144, 262144, 262144, 0,
+ 0, 262144, 262144, 0, 0, 262144, 262144, 262144, 262144, 262144, 262144, 0,
+ 0, 262144, 262144, 0, 0, 262144, 262144, 262144, 262144, 262144, 262144, 0,
+ 0, 262144, 262144, 0, 0, 262144, 262144, 262144, 262144, 262144, 262144, 0,
+ 0, 262144, 262144, 0, 0, 262144, 262144, 262144, 262144, 262144, 262144, 0,
+ 0, 262144, 262144, 0, 0, 262144, 262144, 262144, 262144, 262144, 262144, 0,
+ 0, 262144, 262144, 0, 0, 262144, 262144, 262144, 262144, 262144, 262144, 0;
+ EigenSolver<MatrixXf> eig(A_bug1174);
+ CALL_SUBTEST( check_eigensolver_for_given_mat(eig,A_bug1174) );
+ }
}
EIGEN_DECLARE_TEST(eigensolver_generic)
diff --git a/test/geo_transformations.cpp b/test/geo_transformations.cpp
index bf920696b..c72267955 100755
--- a/test/geo_transformations.cpp
+++ b/test/geo_transformations.cpp
@@ -612,6 +612,66 @@ template<typename Scalar, int Dim, int Options> void transform_products()
VERIFY_IS_APPROX((ac*p).matrix(), a_m*p_m);
}
+template<typename Scalar, int Mode, int Options> void transformations_no_scale()
+{
+ /* this test covers the following files:
+ Cross.h Quaternion.h, Transform.h
+ */
+ typedef Matrix<Scalar,3,1> Vector3;
+ typedef Matrix<Scalar,4,1> Vector4;
+ typedef Quaternion<Scalar> Quaternionx;
+ typedef AngleAxis<Scalar> AngleAxisx;
+ typedef Transform<Scalar,3,Mode,Options> Transform3;
+ typedef Translation<Scalar,3> Translation3;
+ typedef Matrix<Scalar,4,4> Matrix4;
+
+ Vector3 v0 = Vector3::Random(),
+ v1 = Vector3::Random();
+
+ Transform3 t0, t1, t2;
+
+ Scalar a = internal::random<Scalar>(-Scalar(EIGEN_PI), Scalar(EIGEN_PI));
+
+ Quaternionx q1, q2;
+
+ q1 = AngleAxisx(a, v0.normalized());
+
+ t0 = Transform3::Identity();
+ VERIFY_IS_APPROX(t0.matrix(), Transform3::MatrixType::Identity());
+
+ t0.setIdentity();
+ t1.setIdentity();
+ v1 = Vector3::Ones();
+ t0.linear() = q1.toRotationMatrix();
+ t0.pretranslate(v0);
+ t1.linear() = q1.conjugate().toRotationMatrix();
+ t1.translate(-v0);
+
+ VERIFY((t0 * t1).matrix().isIdentity(test_precision<Scalar>()));
+
+ t1.fromPositionOrientationScale(v0, q1, v1);
+ VERIFY_IS_APPROX(t1.matrix(), t0.matrix());
+ VERIFY_IS_APPROX(t1*v1, t0*v1);
+
+ // translation * vector
+ t0.setIdentity();
+ t0.translate(v0);
+ VERIFY_IS_APPROX((t0 * v1).template head<3>(), Translation3(v0) * v1);
+
+ // Conversion to matrix.
+ Transform3 t3;
+ t3.linear() = q1.toRotationMatrix();
+ t3.translation() = v1;
+ Matrix4 m3 = t3.matrix();
+ VERIFY((m3 * m3.inverse()).isIdentity(test_precision<Scalar>()));
+ // Verify implicit last row is initialized.
+ VERIFY_IS_APPROX(Vector4(m3.row(3)), Vector4(0.0, 0.0, 0.0, 1.0));
+
+ VERIFY_IS_APPROX(t3.rotation(), t3.linear());
+ if(Mode==Isometry)
+ VERIFY(t3.rotation().data()==t3.linear().data());
+}
+
EIGEN_DECLARE_TEST(geo_transformations)
{
for(int i = 0; i < g_repeat; i++) {
@@ -625,7 +685,7 @@ EIGEN_DECLARE_TEST(geo_transformations)
CALL_SUBTEST_3(( transformations<double,Projective,AutoAlign>() ));
CALL_SUBTEST_3(( transformations<double,Projective,DontAlign>() ));
CALL_SUBTEST_3(( transform_alignment<double>() ));
-
+
CALL_SUBTEST_4(( transformations<float,Affine,RowMajor|AutoAlign>() ));
CALL_SUBTEST_4(( non_projective_only<float,Affine,RowMajor>() ));
@@ -641,5 +701,8 @@ EIGEN_DECLARE_TEST(geo_transformations)
CALL_SUBTEST_8(( transform_associativity<double,2,ColMajor>(Rotation2D<double>(internal::random<double>()*double(EIGEN_PI))) ));
CALL_SUBTEST_8(( transform_associativity<double,3,ColMajor>(Quaterniond::UnitRandom()) ));
+
+ CALL_SUBTEST_9(( transformations_no_scale<double,Affine,AutoAlign>() ));
+ CALL_SUBTEST_9(( transformations_no_scale<double,Isometry,AutoAlign>() ));
}
}
diff --git a/test/incomplete_cholesky.cpp b/test/incomplete_cholesky.cpp
index 68fe7d507..ecc17f5c3 100644
--- a/test/incomplete_cholesky.cpp
+++ b/test/incomplete_cholesky.cpp
@@ -12,14 +12,14 @@
#include <Eigen/IterativeLinearSolvers>
#include <unsupported/Eigen/IterativeSolvers>
-template<typename T, typename I> void test_incomplete_cholesky_T()
+template<typename T, typename I_> void test_incomplete_cholesky_T()
{
- typedef SparseMatrix<T,0,I> SparseMatrixType;
- ConjugateGradient<SparseMatrixType, Lower, IncompleteCholesky<T, Lower, AMDOrdering<I> > > cg_illt_lower_amd;
- ConjugateGradient<SparseMatrixType, Lower, IncompleteCholesky<T, Lower, NaturalOrdering<I> > > cg_illt_lower_nat;
- ConjugateGradient<SparseMatrixType, Upper, IncompleteCholesky<T, Upper, AMDOrdering<I> > > cg_illt_upper_amd;
- ConjugateGradient<SparseMatrixType, Upper, IncompleteCholesky<T, Upper, NaturalOrdering<I> > > cg_illt_upper_nat;
- ConjugateGradient<SparseMatrixType, Upper|Lower, IncompleteCholesky<T, Lower, AMDOrdering<I> > > cg_illt_uplo_amd;
+ typedef SparseMatrix<T,0,I_> SparseMatrixType;
+ ConjugateGradient<SparseMatrixType, Lower, IncompleteCholesky<T, Lower, AMDOrdering<I_> > > cg_illt_lower_amd;
+ ConjugateGradient<SparseMatrixType, Lower, IncompleteCholesky<T, Lower, NaturalOrdering<I_> > > cg_illt_lower_nat;
+ ConjugateGradient<SparseMatrixType, Upper, IncompleteCholesky<T, Upper, AMDOrdering<I_> > > cg_illt_upper_amd;
+ ConjugateGradient<SparseMatrixType, Upper, IncompleteCholesky<T, Upper, NaturalOrdering<I_> > > cg_illt_upper_nat;
+ ConjugateGradient<SparseMatrixType, Upper|Lower, IncompleteCholesky<T, Lower, AMDOrdering<I_> > > cg_illt_uplo_amd;
CALL_SUBTEST( check_sparse_spd_solving(cg_illt_lower_amd) );
diff --git a/test/indexed_view.cpp b/test/indexed_view.cpp
index 6518642df..5f1e01fc8 100644
--- a/test/indexed_view.cpp
+++ b/test/indexed_view.cpp
@@ -95,7 +95,11 @@ void check_indexed_view()
ArrayXd a = ArrayXd::LinSpaced(n,0,n-1);
Array<double,1,Dynamic> b = a.transpose();
- ArrayXXi A = ArrayXXi::NullaryExpr(n,n, std::ptr_fun(encode));
+ #if EIGEN_COMP_CXXVER>=14
+ ArrayXXi A = ArrayXXi::NullaryExpr(n,n, std::ref(encode));
+ #else
+ ArrayXXi A = ArrayXXi::NullaryExpr(n,n, std::ptr_fun(&encode));
+ #endif
for(Index i=0; i<n; ++i)
for(Index j=0; j<n; ++j)
@@ -335,8 +339,8 @@ void check_indexed_view()
VERIFY_IS_APPROX( A(B.RowsAtCompileTime, 1), A(4,1) );
VERIFY_IS_APPROX( A(B.RowsAtCompileTime-1, B.ColsAtCompileTime-1), A(3,3) );
VERIFY_IS_APPROX( A(B.RowsAtCompileTime, B.ColsAtCompileTime), A(4,4) );
- const Index I = 3, J = 4;
- VERIFY_IS_APPROX( A(I,J), A(3,4) );
+ const Index I_ = 3, J_ = 4;
+ VERIFY_IS_APPROX( A(I_,J_), A(3,4) );
}
// check extended block API
diff --git a/test/initializer_list_construction.cpp b/test/initializer_list_construction.cpp
new file mode 100644
index 000000000..b84e9ba72
--- /dev/null
+++ b/test/initializer_list_construction.cpp
@@ -0,0 +1,385 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2019 David Tellenbach <david.tellenbach@tellnotes.org>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_NO_STATIC_ASSERT
+
+#include "main.h"
+
+template<typename Scalar, bool is_integer = NumTraits<Scalar>::IsInteger>
+struct TestMethodDispatching {
+ static void run() {}
+};
+
+template<typename Scalar>
+struct TestMethodDispatching<Scalar, 1> {
+ static void run()
+ {
+ {
+ Matrix<Scalar, Dynamic, Dynamic> m {3, 4};
+ Array<Scalar, Dynamic, Dynamic> a {3, 4};
+ VERIFY(m.rows() == 3);
+ VERIFY(m.cols() == 4);
+ VERIFY(a.rows() == 3);
+ VERIFY(a.cols() == 4);
+ }
+ {
+ Matrix<Scalar, 1, 2> m {3, 4};
+ Array<Scalar, 1, 2> a {3, 4};
+ VERIFY(m(0) == 3);
+ VERIFY(m(1) == 4);
+ VERIFY(a(0) == 3);
+ VERIFY(a(1) == 4);
+ }
+ {
+ Matrix<Scalar, 2, 1> m {3, 4};
+ Array<Scalar, 2, 1> a {3, 4};
+ VERIFY(m(0) == 3);
+ VERIFY(m(1) == 4);
+ VERIFY(a(0) == 3);
+ VERIFY(a(1) == 4);
+ }
+ }
+};
+
+template<typename Vec4, typename Vec5> void fixedsizeVariadicVectorConstruction2()
+{
+ {
+ Vec4 ref = Vec4::Random();
+ Vec4 v{ ref[0], ref[1], ref[2], ref[3] };
+ VERIFY_IS_APPROX(v, ref);
+ VERIFY_IS_APPROX(v, (Vec4( ref[0], ref[1], ref[2], ref[3] )));
+ VERIFY_IS_APPROX(v, (Vec4({ref[0], ref[1], ref[2], ref[3]})));
+
+ Vec4 v2 = { ref[0], ref[1], ref[2], ref[3] };
+ VERIFY_IS_APPROX(v2, ref);
+ }
+ {
+ Vec5 ref = Vec5::Random();
+ Vec5 v{ ref[0], ref[1], ref[2], ref[3], ref[4] };
+ VERIFY_IS_APPROX(v, ref);
+ VERIFY_IS_APPROX(v, (Vec5( ref[0], ref[1], ref[2], ref[3], ref[4] )));
+ VERIFY_IS_APPROX(v, (Vec5({ref[0], ref[1], ref[2], ref[3], ref[4]})));
+
+ Vec5 v2 = { ref[0], ref[1], ref[2], ref[3], ref[4] };
+ VERIFY_IS_APPROX(v2, ref);
+ }
+}
+
+#define CHECK_MIXSCALAR_V5_APPROX(V, A0, A1, A2, A3, A4) { \
+ VERIFY_IS_APPROX(V[0], Scalar(A0) ); \
+ VERIFY_IS_APPROX(V[1], Scalar(A1) ); \
+ VERIFY_IS_APPROX(V[2], Scalar(A2) ); \
+ VERIFY_IS_APPROX(V[3], Scalar(A3) ); \
+ VERIFY_IS_APPROX(V[4], Scalar(A4) ); \
+}
+
+#define CHECK_MIXSCALAR_V5(VEC5, A0, A1, A2, A3, A4) { \
+ typedef VEC5::Scalar Scalar; \
+ VEC5 v = { A0 , A1 , A2 , A3 , A4 }; \
+ CHECK_MIXSCALAR_V5_APPROX(v, A0 , A1 , A2 , A3 , A4); \
+}
+
+template<int> void fixedsizeVariadicVectorConstruction3()
+{
+ typedef Matrix<double,5,1> Vec5;
+ typedef Array<float,5,1> Arr5;
+ CHECK_MIXSCALAR_V5(Vec5, 1, 2., -3, 4.121, 5.53252);
+ CHECK_MIXSCALAR_V5(Arr5, 1, 2., 3.12f, 4.121, 5.53252);
+}
+
+template<typename Scalar> void fixedsizeVariadicVectorConstruction()
+{
+ CALL_SUBTEST(( fixedsizeVariadicVectorConstruction2<Matrix<Scalar,4,1>, Matrix<Scalar,5,1> >() ));
+ CALL_SUBTEST(( fixedsizeVariadicVectorConstruction2<Matrix<Scalar,1,4>, Matrix<Scalar,1,5> >() ));
+ CALL_SUBTEST(( fixedsizeVariadicVectorConstruction2<Array<Scalar,4,1>, Array<Scalar,5,1> >() ));
+ CALL_SUBTEST(( fixedsizeVariadicVectorConstruction2<Array<Scalar,1,4>, Array<Scalar,1,5> >() ));
+}
+
+
+template<typename Scalar> void initializerListVectorConstruction()
+{
+ Scalar raw[4];
+ for(int k = 0; k < 4; ++k) {
+ raw[k] = internal::random<Scalar>();
+ }
+ {
+ Matrix<Scalar, 4, 1> m { {raw[0]}, {raw[1]},{raw[2]},{raw[3]} };
+ Array<Scalar, 4, 1> a { {raw[0]}, {raw[1]}, {raw[2]}, {raw[3]} };
+ for(int k = 0; k < 4; ++k) {
+ VERIFY(m(k) == raw[k]);
+ }
+ for(int k = 0; k < 4; ++k) {
+ VERIFY(a(k) == raw[k]);
+ }
+ VERIFY_IS_EQUAL(m, (Matrix<Scalar,4,1>({ {raw[0]}, {raw[1]}, {raw[2]}, {raw[3]} })));
+ VERIFY((a == (Array<Scalar,4,1>({ {raw[0]}, {raw[1]}, {raw[2]}, {raw[3]} }))).all());
+ }
+ {
+ Matrix<Scalar, 1, 4> m { {raw[0], raw[1], raw[2], raw[3]} };
+ Array<Scalar, 1, 4> a { {raw[0], raw[1], raw[2], raw[3]} };
+ for(int k = 0; k < 4; ++k) {
+ VERIFY(m(k) == raw[k]);
+ }
+ for(int k = 0; k < 4; ++k) {
+ VERIFY(a(k) == raw[k]);
+ }
+ VERIFY_IS_EQUAL(m, (Matrix<Scalar, 1, 4>({{raw[0],raw[1],raw[2],raw[3]}})));
+ VERIFY((a == (Array<Scalar, 1, 4>({{raw[0],raw[1],raw[2],raw[3]}}))).all());
+ }
+ {
+ Matrix<Scalar, 4, Dynamic> m { {raw[0]}, {raw[1]}, {raw[2]}, {raw[3]} };
+ Array<Scalar, 4, Dynamic> a { {raw[0]}, {raw[1]}, {raw[2]}, {raw[3]} };
+ for(int k=0; k < 4; ++k) {
+ VERIFY(m(k) == raw[k]);
+ }
+ for(int k=0; k < 4; ++k) {
+ VERIFY(a(k) == raw[k]);
+ }
+ VERIFY_IS_EQUAL(m, (Matrix<Scalar, 4, Dynamic>({ {raw[0]}, {raw[1]}, {raw[2]}, {raw[3]} })));
+ VERIFY((a == (Array<Scalar, 4, Dynamic>({ {raw[0]}, {raw[1]}, {raw[2]}, {raw[3]} }))).all());
+ }
+ {
+ Matrix<Scalar, Dynamic, 4> m {{raw[0],raw[1],raw[2],raw[3]}};
+ Array<Scalar, Dynamic, 4> a {{raw[0],raw[1],raw[2],raw[3]}};
+ for(int k=0; k < 4; ++k) {
+ VERIFY(m(k) == raw[k]);
+ }
+ for(int k=0; k < 4; ++k) {
+ VERIFY(a(k) == raw[k]);
+ }
+ VERIFY_IS_EQUAL(m, (Matrix<Scalar, Dynamic, 4>({{raw[0],raw[1],raw[2],raw[3]}})));
+ VERIFY((a == (Array<Scalar, Dynamic, 4>({{raw[0],raw[1],raw[2],raw[3]}}))).all());
+ }
+}
+
+template<typename Scalar> void initializerListMatrixConstruction()
+{
+ const Index RowsAtCompileTime = 5;
+ const Index ColsAtCompileTime = 4;
+ const Index SizeAtCompileTime = RowsAtCompileTime * ColsAtCompileTime;
+
+ Scalar raw[SizeAtCompileTime];
+ for (int i = 0; i < SizeAtCompileTime; ++i) {
+ raw[i] = internal::random<Scalar>();
+ }
+ {
+ Matrix<Scalar, Dynamic, Dynamic> m {};
+ VERIFY(m.cols() == 0);
+ VERIFY(m.rows() == 0);
+ VERIFY_IS_EQUAL(m, (Matrix<Scalar, Dynamic, Dynamic>()));
+ }
+ {
+ Matrix<Scalar, 5, 4> m {
+ {raw[0], raw[1], raw[2], raw[3]},
+ {raw[4], raw[5], raw[6], raw[7]},
+ {raw[8], raw[9], raw[10], raw[11]},
+ {raw[12], raw[13], raw[14], raw[15]},
+ {raw[16], raw[17], raw[18], raw[19]}
+ };
+
+ Matrix<Scalar, 5, 4> m2;
+ m2 << raw[0], raw[1], raw[2], raw[3],
+ raw[4], raw[5], raw[6], raw[7],
+ raw[8], raw[9], raw[10], raw[11],
+ raw[12], raw[13], raw[14], raw[15],
+ raw[16], raw[17], raw[18], raw[19];
+
+ int k = 0;
+ for(int i = 0; i < RowsAtCompileTime; ++i) {
+ for (int j = 0; j < ColsAtCompileTime; ++j) {
+ VERIFY(m(i, j) == raw[k]);
+ ++k;
+ }
+ }
+ VERIFY_IS_EQUAL(m, m2);
+ }
+ {
+ Matrix<Scalar, Dynamic, Dynamic> m{
+ {raw[0], raw[1], raw[2], raw[3]},
+ {raw[4], raw[5], raw[6], raw[7]},
+ {raw[8], raw[9], raw[10], raw[11]},
+ {raw[12], raw[13], raw[14], raw[15]},
+ {raw[16], raw[17], raw[18], raw[19]}
+ };
+
+ VERIFY(m.cols() == 4);
+ VERIFY(m.rows() == 5);
+ int k = 0;
+ for(int i = 0; i < RowsAtCompileTime; ++i) {
+ for (int j = 0; j < ColsAtCompileTime; ++j) {
+ VERIFY(m(i, j) == raw[k]);
+ ++k;
+ }
+ }
+
+ Matrix<Scalar, Dynamic, Dynamic> m2(RowsAtCompileTime, ColsAtCompileTime);
+ k = 0;
+ for(int i = 0; i < RowsAtCompileTime; ++i) {
+ for (int j = 0; j < ColsAtCompileTime; ++j) {
+ m2(i, j) = raw[k];
+ ++k;
+ }
+ }
+ VERIFY_IS_EQUAL(m, m2);
+ }
+}
+
+template<typename Scalar> void initializerListArrayConstruction()
+{
+ const Index RowsAtCompileTime = 5;
+ const Index ColsAtCompileTime = 4;
+ const Index SizeAtCompileTime = RowsAtCompileTime * ColsAtCompileTime;
+
+ Scalar raw[SizeAtCompileTime];
+ for (int i = 0; i < SizeAtCompileTime; ++i) {
+ raw[i] = internal::random<Scalar>();
+ }
+ {
+ Array<Scalar, Dynamic, Dynamic> a {};
+ VERIFY(a.cols() == 0);
+ VERIFY(a.rows() == 0);
+ }
+ {
+ Array<Scalar, 5, 4> m {
+ {raw[0], raw[1], raw[2], raw[3]},
+ {raw[4], raw[5], raw[6], raw[7]},
+ {raw[8], raw[9], raw[10], raw[11]},
+ {raw[12], raw[13], raw[14], raw[15]},
+ {raw[16], raw[17], raw[18], raw[19]}
+ };
+
+ Array<Scalar, 5, 4> m2;
+ m2 << raw[0], raw[1], raw[2], raw[3],
+ raw[4], raw[5], raw[6], raw[7],
+ raw[8], raw[9], raw[10], raw[11],
+ raw[12], raw[13], raw[14], raw[15],
+ raw[16], raw[17], raw[18], raw[19];
+
+ int k = 0;
+ for(int i = 0; i < RowsAtCompileTime; ++i) {
+ for (int j = 0; j < ColsAtCompileTime; ++j) {
+ VERIFY(m(i, j) == raw[k]);
+ ++k;
+ }
+ }
+ VERIFY_IS_APPROX(m, m2);
+ }
+ {
+ Array<Scalar, Dynamic, Dynamic> m {
+ {raw[0], raw[1], raw[2], raw[3]},
+ {raw[4], raw[5], raw[6], raw[7]},
+ {raw[8], raw[9], raw[10], raw[11]},
+ {raw[12], raw[13], raw[14], raw[15]},
+ {raw[16], raw[17], raw[18], raw[19]}
+ };
+
+ VERIFY(m.cols() == 4);
+ VERIFY(m.rows() == 5);
+ int k = 0;
+ for(int i = 0; i < RowsAtCompileTime; ++i) {
+ for (int j = 0; j < ColsAtCompileTime; ++j) {
+ VERIFY(m(i, j) == raw[k]);
+ ++k;
+ }
+ }
+
+ Array<Scalar, Dynamic, Dynamic> m2(RowsAtCompileTime, ColsAtCompileTime);
+ k = 0;
+ for(int i = 0; i < RowsAtCompileTime; ++i) {
+ for (int j = 0; j < ColsAtCompileTime; ++j) {
+ m2(i, j) = raw[k];
+ ++k;
+ }
+ }
+ VERIFY_IS_APPROX(m, m2);
+ }
+}
+
+template<typename Scalar> void dynamicVectorConstruction()
+{
+ const Index size = 4;
+ Scalar raw[size];
+ for (int i = 0; i < size; ++i) {
+ raw[i] = internal::random<Scalar>();
+ }
+
+ typedef Matrix<Scalar, Dynamic, 1> VectorX;
+
+ {
+ VectorX v {{raw[0], raw[1], raw[2], raw[3]}};
+ for (int i = 0; i < size; ++i) {
+ VERIFY(v(i) == raw[i]);
+ }
+ VERIFY(v.rows() == size);
+ VERIFY(v.cols() == 1);
+ VERIFY_IS_EQUAL(v, (VectorX {{raw[0], raw[1], raw[2], raw[3]}}));
+ }
+
+ {
+ VERIFY_RAISES_ASSERT((VectorX {raw[0], raw[1], raw[2], raw[3]}));
+ }
+ {
+ VERIFY_RAISES_ASSERT((VectorX {
+ {raw[0], raw[1], raw[2], raw[3]},
+ {raw[0], raw[1], raw[2], raw[3]},
+ }));
+ }
+}
+
+EIGEN_DECLARE_TEST(initializer_list_construction)
+{
+ CALL_SUBTEST_1(initializerListVectorConstruction<unsigned char>());
+ CALL_SUBTEST_1(initializerListVectorConstruction<float>());
+ CALL_SUBTEST_1(initializerListVectorConstruction<double>());
+ CALL_SUBTEST_1(initializerListVectorConstruction<int>());
+ CALL_SUBTEST_1(initializerListVectorConstruction<long int>());
+ CALL_SUBTEST_1(initializerListVectorConstruction<std::ptrdiff_t>());
+ CALL_SUBTEST_1(initializerListVectorConstruction<std::complex<double>>());
+ CALL_SUBTEST_1(initializerListVectorConstruction<std::complex<float>>());
+
+ CALL_SUBTEST_2(initializerListMatrixConstruction<unsigned char>());
+ CALL_SUBTEST_2(initializerListMatrixConstruction<float>());
+ CALL_SUBTEST_2(initializerListMatrixConstruction<double>());
+ CALL_SUBTEST_2(initializerListMatrixConstruction<int>());
+ CALL_SUBTEST_2(initializerListMatrixConstruction<long int>());
+ CALL_SUBTEST_2(initializerListMatrixConstruction<std::ptrdiff_t>());
+ CALL_SUBTEST_2(initializerListMatrixConstruction<std::complex<double>>());
+ CALL_SUBTEST_2(initializerListMatrixConstruction<std::complex<float>>());
+
+ CALL_SUBTEST_3(initializerListArrayConstruction<unsigned char>());
+ CALL_SUBTEST_3(initializerListArrayConstruction<float>());
+ CALL_SUBTEST_3(initializerListArrayConstruction<double>());
+ CALL_SUBTEST_3(initializerListArrayConstruction<int>());
+ CALL_SUBTEST_3(initializerListArrayConstruction<long int>());
+ CALL_SUBTEST_3(initializerListArrayConstruction<std::ptrdiff_t>());
+ CALL_SUBTEST_3(initializerListArrayConstruction<std::complex<double>>());
+ CALL_SUBTEST_3(initializerListArrayConstruction<std::complex<float>>());
+
+ CALL_SUBTEST_4(fixedsizeVariadicVectorConstruction<unsigned char>());
+ CALL_SUBTEST_4(fixedsizeVariadicVectorConstruction<float>());
+ CALL_SUBTEST_4(fixedsizeVariadicVectorConstruction<double>());
+ CALL_SUBTEST_4(fixedsizeVariadicVectorConstruction<int>());
+ CALL_SUBTEST_4(fixedsizeVariadicVectorConstruction<long int>());
+ CALL_SUBTEST_4(fixedsizeVariadicVectorConstruction<std::ptrdiff_t>());
+ CALL_SUBTEST_4(fixedsizeVariadicVectorConstruction<std::complex<double>>());
+ CALL_SUBTEST_4(fixedsizeVariadicVectorConstruction<std::complex<float>>());
+ CALL_SUBTEST_4(fixedsizeVariadicVectorConstruction3<0>());
+
+ CALL_SUBTEST_5(TestMethodDispatching<int>::run());
+ CALL_SUBTEST_5(TestMethodDispatching<long int>::run());
+
+ CALL_SUBTEST_6(dynamicVectorConstruction<unsigned char>());
+ CALL_SUBTEST_6(dynamicVectorConstruction<float>());
+ CALL_SUBTEST_6(dynamicVectorConstruction<double>());
+ CALL_SUBTEST_6(dynamicVectorConstruction<int>());
+ CALL_SUBTEST_6(dynamicVectorConstruction<long int>());
+ CALL_SUBTEST_6(dynamicVectorConstruction<std::ptrdiff_t>());
+ CALL_SUBTEST_6(dynamicVectorConstruction<std::complex<double>>());
+ CALL_SUBTEST_6(dynamicVectorConstruction<std::complex<float>>());
+} \ No newline at end of file
diff --git a/test/inverse.cpp b/test/inverse.cpp
index 8754cb7e5..99f9e0c9b 100644
--- a/test/inverse.cpp
+++ b/test/inverse.cpp
@@ -105,6 +105,22 @@ template<typename MatrixType> void inverse(const MatrixType& m)
}
}
+template<typename Scalar>
+void inverse_zerosized()
+{
+ Matrix<Scalar,Dynamic,Dynamic> A(0,0);
+ {
+ Matrix<Scalar,0,1> b, x;
+ x = A.inverse() * b;
+ }
+ {
+ Matrix<Scalar,Dynamic,Dynamic> b(0,1), x;
+ x = A.inverse() * b;
+ VERIFY_IS_EQUAL(x.rows(), 0);
+ VERIFY_IS_EQUAL(x.cols(), 1);
+ }
+}
+
EIGEN_DECLARE_TEST(inverse)
{
int s = 0;
@@ -118,6 +134,7 @@ EIGEN_DECLARE_TEST(inverse)
s = internal::random<int>(50,320);
CALL_SUBTEST_5( inverse(MatrixXf(s,s)) );
TEST_SET_BUT_UNUSED_VARIABLE(s)
+ CALL_SUBTEST_5( inverse_zerosized<float>() );
s = internal::random<int>(25,100);
CALL_SUBTEST_6( inverse(MatrixXcd(s,s)) );
diff --git a/test/jacobisvd.cpp b/test/jacobisvd.cpp
index f9a59e0e7..89484d971 100644
--- a/test/jacobisvd.cpp
+++ b/test/jacobisvd.cpp
@@ -67,6 +67,23 @@ void jacobisvd_method()
VERIFY_RAISES_ASSERT(m.jacobiSvd().matrixU());
VERIFY_RAISES_ASSERT(m.jacobiSvd().matrixV());
VERIFY_IS_APPROX(m.jacobiSvd(ComputeFullU|ComputeFullV).solve(m), m);
+ VERIFY_IS_APPROX(m.jacobiSvd(ComputeFullU|ComputeFullV).transpose().solve(m), m);
+ VERIFY_IS_APPROX(m.jacobiSvd(ComputeFullU|ComputeFullV).adjoint().solve(m), m);
+}
+
+namespace Foo {
+// older compiler require a default constructor for Bar
+// cf: https://stackoverflow.com/questions/7411515/
+class Bar {public: Bar() {}};
+bool operator<(const Bar&, const Bar&) { return true; }
+}
+// regression test for a very strange MSVC issue for which simply
+// including SVDBase.h messes up with std::max and custom scalar type
+void msvc_workaround()
+{
+ const Foo::Bar a;
+ const Foo::Bar b;
+ std::max EIGEN_NOT_A_MACRO (a,b);
}
EIGEN_DECLARE_TEST(jacobisvd)
@@ -122,4 +139,6 @@ EIGEN_DECLARE_TEST(jacobisvd)
CALL_SUBTEST_9( svd_preallocate<void>() );
CALL_SUBTEST_2( svd_underoverflow<void>() );
+
+ msvc_workaround();
}
diff --git a/test/lu.cpp b/test/lu.cpp
index effde6060..1bbadcbf0 100644
--- a/test/lu.cpp
+++ b/test/lu.cpp
@@ -9,6 +9,7 @@
#include "main.h"
#include <Eigen/LU>
+#include "solverbase.h"
using namespace std;
template<typename MatrixType>
@@ -18,6 +19,8 @@ typename MatrixType::RealScalar matrix_l1_norm(const MatrixType& m) {
template<typename MatrixType> void lu_non_invertible()
{
+ STATIC_CHECK(( internal::is_same<typename FullPivLU<MatrixType>::StorageIndex,int>::value ));
+
typedef typename MatrixType::RealScalar RealScalar;
/* this test covers the following files:
LU.h
@@ -90,42 +93,24 @@ template<typename MatrixType> void lu_non_invertible()
VERIFY(!lu.isInjective());
VERIFY(!lu.isInvertible());
VERIFY(!lu.isSurjective());
- VERIFY((m1 * m1kernel).isMuchSmallerThan(m1));
+ VERIFY_IS_MUCH_SMALLER_THAN((m1 * m1kernel), m1);
VERIFY(m1image.fullPivLu().rank() == rank);
VERIFY_IS_APPROX(m1 * m1.adjoint() * m1image, m1image);
+ check_solverbase<CMatrixType, MatrixType>(m1, lu, rows, cols, cols2);
+
m2 = CMatrixType::Random(cols,cols2);
m3 = m1*m2;
m2 = CMatrixType::Random(cols,cols2);
// test that the code, which does resize(), may be applied to an xpr
m2.block(0,0,m2.rows(),m2.cols()) = lu.solve(m3);
VERIFY_IS_APPROX(m3, m1*m2);
-
- // test solve with transposed
- m3 = MatrixType::Random(rows,cols2);
- m2 = m1.transpose()*m3;
- m3 = MatrixType::Random(rows,cols2);
- lu.template _solve_impl_transposed<false>(m2, m3);
- VERIFY_IS_APPROX(m2, m1.transpose()*m3);
- m3 = MatrixType::Random(rows,cols2);
- m3 = lu.transpose().solve(m2);
- VERIFY_IS_APPROX(m2, m1.transpose()*m3);
-
- // test solve with conjugate transposed
- m3 = MatrixType::Random(rows,cols2);
- m2 = m1.adjoint()*m3;
- m3 = MatrixType::Random(rows,cols2);
- lu.template _solve_impl_transposed<true>(m2, m3);
- VERIFY_IS_APPROX(m2, m1.adjoint()*m3);
- m3 = MatrixType::Random(rows,cols2);
- m3 = lu.adjoint().solve(m2);
- VERIFY_IS_APPROX(m2, m1.adjoint()*m3);
}
template<typename MatrixType> void lu_invertible()
{
/* this test covers the following files:
- LU.h
+ FullPivLU.h
*/
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
Index size = MatrixType::RowsAtCompileTime;
@@ -148,10 +133,12 @@ template<typename MatrixType> void lu_invertible()
VERIFY(lu.isSurjective());
VERIFY(lu.isInvertible());
VERIFY(lu.image(m1).fullPivLu().isInvertible());
+
+ check_solverbase<MatrixType, MatrixType>(m1, lu, size, size, size);
+
+ MatrixType m1_inverse = lu.inverse();
m3 = MatrixType::Random(size,size);
m2 = lu.solve(m3);
- VERIFY_IS_APPROX(m3, m1*m2);
- MatrixType m1_inverse = lu.inverse();
VERIFY_IS_APPROX(m2, m1_inverse*m3);
RealScalar rcond = (RealScalar(1) / matrix_l1_norm(m1)) / matrix_l1_norm(m1_inverse);
@@ -160,63 +147,37 @@ template<typename MatrixType> void lu_invertible()
// truth.
VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10);
- // test solve with transposed
- lu.template _solve_impl_transposed<false>(m3, m2);
- VERIFY_IS_APPROX(m3, m1.transpose()*m2);
- m3 = MatrixType::Random(size,size);
- m3 = lu.transpose().solve(m2);
- VERIFY_IS_APPROX(m2, m1.transpose()*m3);
-
- // test solve with conjugate transposed
- lu.template _solve_impl_transposed<true>(m3, m2);
- VERIFY_IS_APPROX(m3, m1.adjoint()*m2);
- m3 = MatrixType::Random(size,size);
- m3 = lu.adjoint().solve(m2);
- VERIFY_IS_APPROX(m2, m1.adjoint()*m3);
-
// Regression test for Bug 302
MatrixType m4 = MatrixType::Random(size,size);
VERIFY_IS_APPROX(lu.solve(m3*m4), lu.solve(m3)*m4);
}
-template<typename MatrixType> void lu_partial_piv()
+template<typename MatrixType> void lu_partial_piv(Index size = MatrixType::ColsAtCompileTime)
{
/* this test covers the following files:
PartialPivLU.h
*/
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
- Index size = internal::random<Index>(1,4);
MatrixType m1(size, size), m2(size, size), m3(size, size);
m1.setRandom();
PartialPivLU<MatrixType> plu(m1);
+ STATIC_CHECK(( internal::is_same<typename PartialPivLU<MatrixType>::StorageIndex,int>::value ));
+
VERIFY_IS_APPROX(m1, plu.reconstructedMatrix());
+ check_solverbase<MatrixType, MatrixType>(m1, plu, size, size, size);
+
+ MatrixType m1_inverse = plu.inverse();
m3 = MatrixType::Random(size,size);
m2 = plu.solve(m3);
- VERIFY_IS_APPROX(m3, m1*m2);
- MatrixType m1_inverse = plu.inverse();
VERIFY_IS_APPROX(m2, m1_inverse*m3);
RealScalar rcond = (RealScalar(1) / matrix_l1_norm(m1)) / matrix_l1_norm(m1_inverse);
const RealScalar rcond_est = plu.rcond();
// Verify that the estimate is within a factor of 10 of the truth.
VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10);
-
- // test solve with transposed
- plu.template _solve_impl_transposed<false>(m3, m2);
- VERIFY_IS_APPROX(m3, m1.transpose()*m2);
- m3 = MatrixType::Random(size,size);
- m3 = plu.transpose().solve(m2);
- VERIFY_IS_APPROX(m2, m1.transpose()*m3);
-
- // test solve with conjugate transposed
- plu.template _solve_impl_transposed<true>(m3, m2);
- VERIFY_IS_APPROX(m3, m1.adjoint()*m2);
- m3 = MatrixType::Random(size,size);
- m3 = plu.adjoint().solve(m2);
- VERIFY_IS_APPROX(m2, m1.adjoint()*m3);
}
template<typename MatrixType> void lu_verify_assert()
@@ -230,6 +191,8 @@ template<typename MatrixType> void lu_verify_assert()
VERIFY_RAISES_ASSERT(lu.kernel())
VERIFY_RAISES_ASSERT(lu.image(tmp))
VERIFY_RAISES_ASSERT(lu.solve(tmp))
+ VERIFY_RAISES_ASSERT(lu.transpose().solve(tmp))
+ VERIFY_RAISES_ASSERT(lu.adjoint().solve(tmp))
VERIFY_RAISES_ASSERT(lu.determinant())
VERIFY_RAISES_ASSERT(lu.rank())
VERIFY_RAISES_ASSERT(lu.dimensionOfKernel())
@@ -242,6 +205,8 @@ template<typename MatrixType> void lu_verify_assert()
VERIFY_RAISES_ASSERT(plu.matrixLU())
VERIFY_RAISES_ASSERT(plu.permutationP())
VERIFY_RAISES_ASSERT(plu.solve(tmp))
+ VERIFY_RAISES_ASSERT(plu.transpose().solve(tmp))
+ VERIFY_RAISES_ASSERT(plu.adjoint().solve(tmp))
VERIFY_RAISES_ASSERT(plu.determinant())
VERIFY_RAISES_ASSERT(plu.inverse())
}
@@ -252,9 +217,13 @@ EIGEN_DECLARE_TEST(lu)
CALL_SUBTEST_1( lu_non_invertible<Matrix3f>() );
CALL_SUBTEST_1( lu_invertible<Matrix3f>() );
CALL_SUBTEST_1( lu_verify_assert<Matrix3f>() );
+ CALL_SUBTEST_1( lu_partial_piv<Matrix3f>() );
CALL_SUBTEST_2( (lu_non_invertible<Matrix<double, 4, 6> >()) );
CALL_SUBTEST_2( (lu_verify_assert<Matrix<double, 4, 6> >()) );
+ CALL_SUBTEST_2( lu_partial_piv<Matrix2d>() );
+ CALL_SUBTEST_2( lu_partial_piv<Matrix4d>() );
+ CALL_SUBTEST_2( (lu_partial_piv<Matrix<double,6,6> >()) );
CALL_SUBTEST_3( lu_non_invertible<MatrixXf>() );
CALL_SUBTEST_3( lu_invertible<MatrixXf>() );
@@ -262,7 +231,7 @@ EIGEN_DECLARE_TEST(lu)
CALL_SUBTEST_4( lu_non_invertible<MatrixXd>() );
CALL_SUBTEST_4( lu_invertible<MatrixXd>() );
- CALL_SUBTEST_4( lu_partial_piv<MatrixXd>() );
+ CALL_SUBTEST_4( lu_partial_piv<MatrixXd>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE)) );
CALL_SUBTEST_4( lu_verify_assert<MatrixXd>() );
CALL_SUBTEST_5( lu_non_invertible<MatrixXcf>() );
@@ -271,7 +240,7 @@ EIGEN_DECLARE_TEST(lu)
CALL_SUBTEST_6( lu_non_invertible<MatrixXcd>() );
CALL_SUBTEST_6( lu_invertible<MatrixXcd>() );
- CALL_SUBTEST_6( lu_partial_piv<MatrixXcd>() );
+ CALL_SUBTEST_6( lu_partial_piv<MatrixXcd>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE)) );
CALL_SUBTEST_6( lu_verify_assert<MatrixXcd>() );
CALL_SUBTEST_7(( lu_non_invertible<Matrix<float,Dynamic,16> >() ));
diff --git a/test/main.h b/test/main.h
index 36784b1f4..1fe631ca9 100644
--- a/test/main.h
+++ b/test/main.h
@@ -17,6 +17,7 @@
#include <sstream>
#include <vector>
#include <typeinfo>
+#include <functional>
// The following includes of STL headers have to be done _before_ the
// definition of macros min() and max(). The reason is that many STL
@@ -96,6 +97,8 @@
#define FORBIDDEN_IDENTIFIER (this_identifier_is_forbidden_to_avoid_clashes) this_identifier_is_forbidden_to_avoid_clashes
// B0 is defined in POSIX header termios.h
#define B0 FORBIDDEN_IDENTIFIER
+// `I` may be defined by complex.h:
+#define I FORBIDDEN_IDENTIFIER
// Unit tests calling Eigen's blas library must preserve the default blocking size
// to avoid troubles.
@@ -389,6 +392,13 @@ inline void verify_impl(bool condition, const char *testname, const char *file,
namespace Eigen {
+template<typename T1,typename T2>
+typename internal::enable_if<internal::is_same<T1,T2>::value,bool>::type
+is_same_type(const T1&, const T2&)
+{
+ return true;
+}
+
template<typename T> inline typename NumTraits<T>::Real test_precision() { return NumTraits<T>::dummy_precision(); }
template<> inline float test_precision<float>() { return 1e-3f; }
template<> inline double test_precision<double>() { return 1e-6; }
@@ -397,37 +407,29 @@ template<> inline float test_precision<std::complex<float> >() { return test_pre
template<> inline double test_precision<std::complex<double> >() { return test_precision<double>(); }
template<> inline long double test_precision<std::complex<long double> >() { return test_precision<long double>(); }
-inline bool test_isApprox(const short& a, const short& b)
-{ return internal::isApprox(a, b, test_precision<short>()); }
-inline bool test_isApprox(const unsigned short& a, const unsigned short& b)
-{ return internal::isApprox(a, b, test_precision<unsigned short>()); }
-inline bool test_isApprox(const unsigned int& a, const unsigned int& b)
-{ return internal::isApprox(a, b, test_precision<unsigned int>()); }
-inline bool test_isApprox(const long& a, const long& b)
-{ return internal::isApprox(a, b, test_precision<long>()); }
-inline bool test_isApprox(const unsigned long& a, const unsigned long& b)
-{ return internal::isApprox(a, b, test_precision<unsigned long>()); }
-
-inline bool test_isApprox(const int& a, const int& b)
-{ return internal::isApprox(a, b, test_precision<int>()); }
-inline bool test_isMuchSmallerThan(const int& a, const int& b)
-{ return internal::isMuchSmallerThan(a, b, test_precision<int>()); }
-inline bool test_isApproxOrLessThan(const int& a, const int& b)
-{ return internal::isApproxOrLessThan(a, b, test_precision<int>()); }
-
-inline bool test_isApprox(const float& a, const float& b)
-{ return internal::isApprox(a, b, test_precision<float>()); }
-inline bool test_isMuchSmallerThan(const float& a, const float& b)
-{ return internal::isMuchSmallerThan(a, b, test_precision<float>()); }
-inline bool test_isApproxOrLessThan(const float& a, const float& b)
-{ return internal::isApproxOrLessThan(a, b, test_precision<float>()); }
-
-inline bool test_isApprox(const double& a, const double& b)
-{ return internal::isApprox(a, b, test_precision<double>()); }
-inline bool test_isMuchSmallerThan(const double& a, const double& b)
-{ return internal::isMuchSmallerThan(a, b, test_precision<double>()); }
-inline bool test_isApproxOrLessThan(const double& a, const double& b)
-{ return internal::isApproxOrLessThan(a, b, test_precision<double>()); }
+#define EIGEN_TEST_SCALAR_TEST_OVERLOAD(TYPE) \
+ inline bool test_isApprox(TYPE a, TYPE b) \
+ { return internal::isApprox(a, b, test_precision<TYPE>()); } \
+ inline bool test_isMuchSmallerThan(TYPE a, TYPE b) \
+ { return internal::isMuchSmallerThan(a, b, test_precision<TYPE>()); } \
+ inline bool test_isApproxOrLessThan(TYPE a, TYPE b) \
+ { return internal::isApproxOrLessThan(a, b, test_precision<TYPE>()); }
+
+EIGEN_TEST_SCALAR_TEST_OVERLOAD(short)
+EIGEN_TEST_SCALAR_TEST_OVERLOAD(unsigned short)
+EIGEN_TEST_SCALAR_TEST_OVERLOAD(int)
+EIGEN_TEST_SCALAR_TEST_OVERLOAD(unsigned int)
+EIGEN_TEST_SCALAR_TEST_OVERLOAD(long)
+EIGEN_TEST_SCALAR_TEST_OVERLOAD(unsigned long)
+#if EIGEN_HAS_CXX11
+EIGEN_TEST_SCALAR_TEST_OVERLOAD(long long)
+EIGEN_TEST_SCALAR_TEST_OVERLOAD(unsigned long long)
+#endif
+EIGEN_TEST_SCALAR_TEST_OVERLOAD(float)
+EIGEN_TEST_SCALAR_TEST_OVERLOAD(double)
+EIGEN_TEST_SCALAR_TEST_OVERLOAD(half)
+
+#undef EIGEN_TEST_SCALAR_TEST_OVERLOAD
#ifndef EIGEN_TEST_NO_COMPLEX
inline bool test_isApprox(const std::complex<float>& a, const std::complex<float>& b)
@@ -464,13 +466,6 @@ inline bool test_isApproxOrLessThan(const long double& a, const long double& b)
{ return internal::isApproxOrLessThan(a, b, test_precision<long double>()); }
#endif // EIGEN_TEST_NO_LONGDOUBLE
-inline bool test_isApprox(const half& a, const half& b)
-{ return internal::isApprox(a, b, test_precision<half>()); }
-inline bool test_isMuchSmallerThan(const half& a, const half& b)
-{ return internal::isMuchSmallerThan(a, b, test_precision<half>()); }
-inline bool test_isApproxOrLessThan(const half& a, const half& b)
-{ return internal::isApproxOrLessThan(a, b, test_precision<half>()); }
-
// test_relative_error returns the relative difference between a and b as a real scalar as used in isApprox.
template<typename T1,typename T2>
typename NumTraits<typename T1::RealScalar>::NonInteger test_relative_error(const EigenBase<T1> &a, const EigenBase<T2> &b)
diff --git a/test/nestbyvalue.cpp b/test/nestbyvalue.cpp
new file mode 100644
index 000000000..c5356bc24
--- /dev/null
+++ b/test/nestbyvalue.cpp
@@ -0,0 +1,37 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2019 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define TEST_ENABLE_TEMPORARY_TRACKING
+
+#include "main.h"
+
+typedef NestByValue<MatrixXd> CpyMatrixXd;
+typedef CwiseBinaryOp<internal::scalar_sum_op<double,double>,const CpyMatrixXd,const CpyMatrixXd> XprType;
+
+XprType get_xpr_with_temps(const MatrixXd& a)
+{
+ MatrixXd t1 = a.rowwise().reverse();
+ MatrixXd t2 = a+a;
+ return t1.nestByValue() + t2.nestByValue();
+}
+
+EIGEN_DECLARE_TEST(nestbyvalue)
+{
+ for(int i = 0; i < g_repeat; i++) {
+ Index rows = internal::random<Index>(1,EIGEN_TEST_MAX_SIZE);
+ Index cols = internal::random<Index>(1,EIGEN_TEST_MAX_SIZE);
+ MatrixXd a = MatrixXd(rows,cols);
+ nb_temporaries = 0;
+ XprType x = get_xpr_with_temps(a);
+ VERIFY_IS_EQUAL(nb_temporaries,6);
+ MatrixXd b = x;
+ VERIFY_IS_EQUAL(nb_temporaries,6+1);
+ VERIFY_IS_APPROX(b, a.rowwise().reverse().eval() + (a+a).eval());
+ }
+}
diff --git a/test/nullary.cpp b/test/nullary.cpp
index 12b9e122f..9b25ea4f3 100644
--- a/test/nullary.cpp
+++ b/test/nullary.cpp
@@ -70,7 +70,7 @@ void testVectorType(const VectorType& base)
Scalar high = internal::random<Scalar>(-500,500);
Scalar low = (size == 1 ? high : internal::random<Scalar>(-500,500));
- if (low>high) std::swap(low,high);
+ if (numext::real(low)>numext::real(high)) std::swap(low,high);
// check low==high
if(internal::random<float>(0.f,1.f)<0.05f)
@@ -79,7 +79,7 @@ void testVectorType(const VectorType& base)
else if(size>2 && std::numeric_limits<RealScalar>::max_exponent10>0 && internal::random<float>(0.f,1.f)<0.1f)
low = -internal::random<Scalar>(1,2) * RealScalar(std::pow(RealScalar(10),std::numeric_limits<RealScalar>::max_exponent10/2));
- const Scalar step = ((size == 1) ? 1 : (high-low)/(size-1));
+ const Scalar step = ((size == 1) ? 1 : (high-low)/RealScalar(size-1));
// check whether the result yields what we expect it to do
VectorType m(base);
@@ -89,21 +89,22 @@ void testVectorType(const VectorType& base)
{
VectorType n(size);
for (int i=0; i<size; ++i)
- n(i) = low+i*step;
+ n(i) = low+RealScalar(i)*step;
VERIFY_IS_APPROX(m,n);
CALL_SUBTEST( check_extremity_accuracy(m, low, high) );
}
- if((!NumTraits<Scalar>::IsInteger) || ((high-low)>=size && (Index(high-low)%(size-1))==0) || (Index(high-low+1)<size && (size%Index(high-low+1))==0))
+ RealScalar range_length = numext::real(high-low);
+ if((!NumTraits<Scalar>::IsInteger) || (range_length>=size && (Index(range_length)%(size-1))==0) || (Index(range_length+1)<size && (size%Index(range_length+1))==0))
{
VectorType n(size);
- if((!NumTraits<Scalar>::IsInteger) || (high-low>=size))
+ if((!NumTraits<Scalar>::IsInteger) || (range_length>=size))
for (int i=0; i<size; ++i)
- n(i) = size==1 ? low : (low + ((high-low)*Scalar(i))/(size-1));
+ n(i) = size==1 ? low : (low + ((high-low)*Scalar(i))/RealScalar(size-1));
else
for (int i=0; i<size; ++i)
- n(i) = size==1 ? low : low + Scalar((double(high-low+1)*double(i))/double(size));
+ n(i) = size==1 ? low : low + Scalar((double(range_length+1)*double(i))/double(size));
VERIFY_IS_APPROX(m,n);
// random access version
@@ -116,12 +117,12 @@ void testVectorType(const VectorType& base)
CALL_SUBTEST( check_extremity_accuracy(m, low, high) );
}
- VERIFY( m(m.size()-1) <= high );
- VERIFY( (m.array() <= high).all() );
- VERIFY( (m.array() >= low).all() );
+ VERIFY( numext::real(m(m.size()-1)) <= numext::real(high) );
+ VERIFY( (m.array().real() <= numext::real(high)).all() );
+ VERIFY( (m.array().real() >= numext::real(low)).all() );
- VERIFY( m(m.size()-1) >= low );
+ VERIFY( numext::real(m(m.size()-1)) >= numext::real(low) );
if(size>=1)
{
VERIFY( internal::isApprox(m(0),low) );
@@ -135,7 +136,7 @@ void testVectorType(const VectorType& base)
col_vector.setLinSpaced(size,low,high);
// when using the extended precision (e.g., FPU) the relative error might exceed 1 bit
// when computing the squared sum in isApprox, thus the 2x factor.
- VERIFY( row_vector.isApprox(col_vector.transpose(), Scalar(2)*NumTraits<Scalar>::epsilon()));
+ VERIFY( row_vector.isApprox(col_vector.transpose(), RealScalar(2)*NumTraits<Scalar>::epsilon()));
Matrix<Scalar,Dynamic,1> size_changer(size+50);
size_changer.setLinSpaced(size,low,high);
@@ -157,18 +158,18 @@ void testVectorType(const VectorType& base)
{
Index n0 = VectorType::SizeAtCompileTime==Dynamic ? 0 : VectorType::SizeAtCompileTime;
low = internal::random<Scalar>();
- m = VectorType::LinSpaced(n0,low,low-1);
+ m = VectorType::LinSpaced(n0,low,low-RealScalar(1));
VERIFY(m.size()==n0);
if(VectorType::SizeAtCompileTime==Dynamic)
{
VERIFY_IS_EQUAL(VectorType::LinSpaced(n0,0,Scalar(n0-1)).sum(),Scalar(0));
- VERIFY_IS_EQUAL(VectorType::LinSpaced(n0,low,low-1).sum(),Scalar(0));
+ VERIFY_IS_EQUAL(VectorType::LinSpaced(n0,low,low-RealScalar(1)).sum(),Scalar(0));
}
m.setLinSpaced(n0,0,Scalar(n0-1));
VERIFY(m.size()==n0);
- m.setLinSpaced(n0,low,low-1);
+ m.setLinSpaced(n0,low,low-RealScalar(1));
VERIFY(m.size()==n0);
// empty range only:
@@ -178,16 +179,16 @@ void testVectorType(const VectorType& base)
if(NumTraits<Scalar>::IsInteger)
{
- VERIFY_IS_APPROX( VectorType::LinSpaced(size,low,Scalar(low+size-1)), VectorType::LinSpaced(size,Scalar(low+size-1),low).reverse() );
+ VERIFY_IS_APPROX( VectorType::LinSpaced(size,low,low+Scalar(size-1)), VectorType::LinSpaced(size,low+Scalar(size-1),low).reverse() );
if(VectorType::SizeAtCompileTime==Dynamic)
{
// Check negative multiplicator path:
for(Index k=1; k<5; ++k)
- VERIFY_IS_APPROX( VectorType::LinSpaced(size,low,Scalar(low+(size-1)*k)), VectorType::LinSpaced(size,Scalar(low+(size-1)*k),low).reverse() );
+ VERIFY_IS_APPROX( VectorType::LinSpaced(size,low,low+Scalar((size-1)*k)), VectorType::LinSpaced(size,low+Scalar((size-1)*k),low).reverse() );
// Check negative divisor path:
for(Index k=1; k<5; ++k)
- VERIFY_IS_APPROX( VectorType::LinSpaced(size*k,low,Scalar(low+size-1)), VectorType::LinSpaced(size*k,Scalar(low+size-1),low).reverse() );
+ VERIFY_IS_APPROX( VectorType::LinSpaced(size*k,low,low+Scalar(size-1)), VectorType::LinSpaced(size*k,low+Scalar(size-1),low).reverse() );
}
}
}
@@ -247,6 +248,14 @@ void bug79()
}
template<int>
+void bug1630()
+{
+ Array4d x4 = Array4d::LinSpaced(0.0, 1.0);
+ Array3d x3(Array4d::LinSpaced(0.0, 1.0).head(3));
+ VERIFY_IS_APPROX(x4.head(3), x3);
+}
+
+template<int>
void nullary_overflow()
{
// Check possible overflow issue
@@ -272,10 +281,10 @@ void nullary_internal_logic()
VERIFY(( internal::has_binary_operator<internal::scalar_identity_op<double> >::value ));
VERIFY(( !internal::functor_has_linear_access<internal::scalar_identity_op<double> >::ret ));
- VERIFY(( !internal::has_nullary_operator<internal::linspaced_op<float,float> >::value ));
- VERIFY(( internal::has_unary_operator<internal::linspaced_op<float,float> >::value ));
- VERIFY(( !internal::has_binary_operator<internal::linspaced_op<float,float> >::value ));
- VERIFY(( internal::functor_has_linear_access<internal::linspaced_op<float,float> >::ret ));
+ VERIFY(( !internal::has_nullary_operator<internal::linspaced_op<float> >::value ));
+ VERIFY(( internal::has_unary_operator<internal::linspaced_op<float> >::value ));
+ VERIFY(( !internal::has_binary_operator<internal::linspaced_op<float> >::value ));
+ VERIFY(( internal::functor_has_linear_access<internal::linspaced_op<float> >::ret ));
// Regression unit test for a weird MSVC bug.
// Search "nullary_wrapper_workaround_msvc" in CoreEvaluators.h for the details.
@@ -296,10 +305,10 @@ void nullary_internal_logic()
VERIFY(( !internal::has_binary_operator<internal::scalar_constant_op<float> >::value ));
VERIFY(( internal::functor_has_linear_access<internal::scalar_constant_op<float> >::ret ));
- VERIFY(( !internal::has_nullary_operator<internal::linspaced_op<int,int> >::value ));
- VERIFY(( internal::has_unary_operator<internal::linspaced_op<int,int> >::value ));
- VERIFY(( !internal::has_binary_operator<internal::linspaced_op<int,int> >::value ));
- VERIFY(( internal::functor_has_linear_access<internal::linspaced_op<int,int> >::ret ));
+ VERIFY(( !internal::has_nullary_operator<internal::linspaced_op<int> >::value ));
+ VERIFY(( internal::has_unary_operator<internal::linspaced_op<int> >::value ));
+ VERIFY(( !internal::has_binary_operator<internal::linspaced_op<int> >::value ));
+ VERIFY(( internal::functor_has_linear_access<internal::linspaced_op<int> >::ret ));
}
}
@@ -310,6 +319,7 @@ EIGEN_DECLARE_TEST(nullary)
CALL_SUBTEST_3( testMatrixType(MatrixXf(internal::random<int>(1,300),internal::random<int>(1,300))) );
for(int i = 0; i < g_repeat*10; i++) {
+ CALL_SUBTEST_3( testVectorType(VectorXcd(internal::random<int>(1,30000))) );
CALL_SUBTEST_4( testVectorType(VectorXd(internal::random<int>(1,30000))) );
CALL_SUBTEST_5( testVectorType(Vector4d()) ); // regression test for bug 232
CALL_SUBTEST_6( testVectorType(Vector3d()) );
@@ -325,6 +335,7 @@ EIGEN_DECLARE_TEST(nullary)
}
CALL_SUBTEST_6( bug79<0>() );
+ CALL_SUBTEST_6( bug1630<0>() );
CALL_SUBTEST_9( nullary_overflow<0>() );
CALL_SUBTEST_10( nullary_internal_logic<0>() );
}
diff --git a/test/numext.cpp b/test/numext.cpp
index 6307f5979..8c6447d40 100644
--- a/test/numext.cpp
+++ b/test/numext.cpp
@@ -12,6 +12,7 @@
template<typename T>
void check_abs() {
typedef typename NumTraits<T>::Real Real;
+ Real zero(0);
if(NumTraits<T>::IsSigned)
VERIFY_IS_EQUAL(numext::abs(-T(1)), T(1));
@@ -26,9 +27,9 @@ void check_abs() {
if(NumTraits<T>::IsSigned)
{
VERIFY_IS_EQUAL(numext::abs(x), numext::abs(-x));
- VERIFY( numext::abs(-x) >= Real(0));
+ VERIFY( numext::abs(-x) >= zero );
}
- VERIFY( numext::abs(x) >= Real(0));
+ VERIFY( numext::abs(x) >= zero );
VERIFY_IS_APPROX( numext::abs2(x), numext::abs2(numext::abs(x)) );
}
}
diff --git a/test/packetmath.cpp b/test/packetmath.cpp
index babb7c20e..4906f6eb0 100644
--- a/test/packetmath.cpp
+++ b/test/packetmath.cpp
@@ -10,6 +10,7 @@
#include "main.h"
#include "unsupported/Eigen/SpecialFunctions"
+#include <typeinfo>
#if defined __GNUC__ && __GNUC__>=6
#pragma GCC diagnostic ignored "-Wignored-attributes"
@@ -22,9 +23,52 @@ const bool g_vectorize_sse = true;
const bool g_vectorize_sse = false;
#endif
+bool g_first_pass = true;
+
namespace Eigen {
namespace internal {
+
template<typename T> T negate(const T& x) { return -x; }
+
+template<typename T>
+Map<const Array<unsigned char,sizeof(T),1> >
+bits(const T& x) {
+ return Map<const Array<unsigned char,sizeof(T),1> >(reinterpret_cast<const unsigned char *>(&x));
+}
+
+// The following implement bitwise operations on floating point types
+template<typename T,typename Bits,typename Func>
+T apply_bit_op(Bits a, Bits b, Func f) {
+ Array<unsigned char,sizeof(T),1> res;
+ for(Index i=0; i<res.size();++i) res[i] = f(a[i],b[i]);
+ return *reinterpret_cast<T*>(&res);
+}
+
+#define EIGEN_TEST_MAKE_BITWISE2(OP,FUNC,T) \
+ template<> T EIGEN_CAT(p,OP)(const T& a,const T& b) { \
+ return apply_bit_op<T>(bits(a),bits(b),FUNC); \
+ }
+
+#define EIGEN_TEST_MAKE_BITWISE(OP,FUNC) \
+ EIGEN_TEST_MAKE_BITWISE2(OP,FUNC,float) \
+ EIGEN_TEST_MAKE_BITWISE2(OP,FUNC,double) \
+ EIGEN_TEST_MAKE_BITWISE2(OP,FUNC,half) \
+ EIGEN_TEST_MAKE_BITWISE2(OP,FUNC,std::complex<float>) \
+ EIGEN_TEST_MAKE_BITWISE2(OP,FUNC,std::complex<double>)
+
+EIGEN_TEST_MAKE_BITWISE(xor,std::bit_xor<unsigned char>())
+EIGEN_TEST_MAKE_BITWISE(and,std::bit_and<unsigned char>())
+EIGEN_TEST_MAKE_BITWISE(or, std::bit_or<unsigned char>())
+struct bit_andnot{
+ template<typename T> T
+ operator()(T a, T b) const { return a & (~b); }
+};
+EIGEN_TEST_MAKE_BITWISE(andnot, bit_andnot())
+template<typename T>
+bool biteq(T a, T b) {
+ return (bits(a) == bits(b)).all();
+}
+
}
}
@@ -52,7 +96,7 @@ template<typename Scalar> bool areApprox(const Scalar* a, const Scalar* b, int s
{
for (int i=0; i<size; ++i)
{
- if (a[i]!=b[i] && !internal::isApprox(a[i],b[i]))
+ if ((!internal::biteq(a[i],b[i])) && a[i]!=b[i] && !internal::isApprox(a[i],b[i]))
{
std::cout << "ref: [" << Map<const Matrix<Scalar,1,Dynamic> >(a,size) << "]" << " != vec: [" << Map<const Matrix<Scalar,1,Dynamic> >(b,size) << "]\n";
return false;
@@ -109,14 +153,18 @@ struct packet_helper<false,Packet>
#define REF_MUL(a,b) ((a)*(b))
#define REF_DIV(a,b) ((a)/(b))
-template<typename Scalar> void packetmath()
+template<typename Scalar,typename Packet> void packetmath()
{
using std::abs;
typedef internal::packet_traits<Scalar> PacketTraits;
- typedef typename PacketTraits::type Packet;
- const int PacketSize = PacketTraits::size;
+ const int PacketSize = internal::unpacket_traits<Packet>::size;
typedef typename NumTraits<Scalar>::Real RealScalar;
+ if (g_first_pass)
+ std::cerr << "=== Testing packet of type '" << typeid(Packet).name()
+ << "' and scalar type '" << typeid(Scalar).name()
+ << "' and size '" << PacketSize << "' ===\n" ;
+
const int max_size = PacketSize > 4 ? PacketSize : 4;
const int size = PacketSize*max_size;
EIGEN_ALIGN_MAX Scalar data1[size];
@@ -190,6 +238,9 @@ template<typename Scalar> void packetmath()
CHECK_CWISE2_IF(PacketTraits::HasMul, REF_MUL, internal::pmul);
CHECK_CWISE2_IF(PacketTraits::HasDiv, REF_DIV, internal::pdiv);
+ CHECK_CWISE1(internal::pnot, internal::pnot);
+ CHECK_CWISE1(internal::pzero, internal::pzero);
+ CHECK_CWISE1(internal::ptrue, internal::ptrue);
CHECK_CWISE1(internal::negate, internal::pnegate);
CHECK_CWISE1(numext::conj, internal::pconj);
@@ -254,7 +305,7 @@ template<typename Scalar> void packetmath()
ref[0] += data1[i];
VERIFY(isApproxAbs(ref[0], internal::predux(internal::pload<Packet>(data1)), refvalue) && "internal::predux");
- if(PacketSize==8 && internal::unpacket_traits<typename internal::unpacket_traits<Packet>::half>::size ==4) // so far, predux_half_dowto4 is only required in such a case
+ if(PacketSize==8 && internal::unpacket_traits<typename internal::unpacket_traits<Packet>::half>::size ==4) // so far, predux_half_downto4 is only required in such a case
{
int HalfPacketSize = PacketSize>4 ? PacketSize/2 : PacketSize;
for (int i=0; i<HalfPacketSize; ++i)
@@ -270,15 +321,18 @@ template<typename Scalar> void packetmath()
ref[0] *= data1[i];
VERIFY(internal::isApprox(ref[0], internal::predux_mul(internal::pload<Packet>(data1))) && "internal::predux_mul");
- for (int j=0; j<PacketSize; ++j)
+ if (PacketTraits::HasReduxp)
{
- ref[j] = Scalar(0);
- for (int i=0; i<PacketSize; ++i)
- ref[j] += data1[i+j*PacketSize];
- packets[j] = internal::pload<Packet>(data1+j*PacketSize);
+ for (int j=0; j<PacketSize; ++j)
+ {
+ ref[j] = Scalar(0);
+ for (int i=0; i<PacketSize; ++i)
+ ref[j] += data1[i+j*PacketSize];
+ packets[j] = internal::pload<Packet>(data1+j*PacketSize);
+ }
+ internal::pstore(data2, internal::preduxp(packets));
+ VERIFY(areApproxAbs(ref, data2, PacketSize, refvalue) && "internal::preduxp");
}
- internal::pstore(data2, internal::preduxp(packets));
- VERIFY(areApproxAbs(ref, data2, PacketSize, refvalue) && "internal::preduxp");
for (int i=0; i<PacketSize; ++i)
ref[i] = data1[PacketSize-i-1];
@@ -332,19 +386,41 @@ template<typename Scalar> void packetmath()
internal::pstore(data2, internal::pinsertlast(internal::pload<Packet>(data1),s));
VERIFY(areApprox(ref, data2, PacketSize) && "internal::pinsertlast");
}
+
+ {
+ for (int i=0; i<PacketSize; ++i)
+ {
+ data1[i] = internal::random<Scalar>();
+ unsigned char v = internal::random<bool>() ? 0xff : 0;
+ char* bytes = (char*)(data1+PacketSize+i);
+ for(int k=0; k<int(sizeof(Scalar)); ++k)
+ bytes[k] = v;
+ }
+ CHECK_CWISE2_IF(true, internal::por, internal::por);
+ CHECK_CWISE2_IF(true, internal::pxor, internal::pxor);
+ CHECK_CWISE2_IF(true, internal::pand, internal::pand);
+ CHECK_CWISE2_IF(true, internal::pandnot, internal::pandnot);
+ }
+
+ {
+ for (int i = 0; i < PacketSize; ++i) {
+ data1[i] = internal::random<Scalar>();
+ data2[i] = (i % 2) ? data1[i] : Scalar(0);
+ }
+ CHECK_CWISE2_IF(true, internal::pcmp_eq, internal::pcmp_eq);
+ }
}
-template<typename Scalar> void packetmath_real()
+template<typename Scalar,typename Packet> void packetmath_real()
{
using std::abs;
typedef internal::packet_traits<Scalar> PacketTraits;
- typedef typename PacketTraits::type Packet;
- const int PacketSize = PacketTraits::size;
+ const int PacketSize = internal::unpacket_traits<Packet>::size;
const int size = PacketSize*4;
- EIGEN_ALIGN_MAX Scalar data1[PacketTraits::size*4];
- EIGEN_ALIGN_MAX Scalar data2[PacketTraits::size*4];
- EIGEN_ALIGN_MAX Scalar ref[PacketTraits::size*4];
+ EIGEN_ALIGN_MAX Scalar data1[PacketSize*4];
+ EIGEN_ALIGN_MAX Scalar data2[PacketSize*4];
+ EIGEN_ALIGN_MAX Scalar ref[PacketSize*4];
for (int i=0; i<size; ++i)
{
@@ -379,7 +455,7 @@ template<typename Scalar> void packetmath_real()
data2[i] = internal::random<Scalar>(-1,1) * std::pow(Scalar(10), internal::random<Scalar>(-6,6));
}
CHECK_CWISE1_IF(PacketTraits::HasTanh, std::tanh, internal::ptanh);
- if(PacketTraits::HasExp && PacketTraits::size>=2)
+ if(PacketTraits::HasExp && PacketSize>=2)
{
data1[0] = std::numeric_limits<Scalar>::quiet_NaN();
data1[1] = std::numeric_limits<Scalar>::epsilon();
@@ -455,10 +531,11 @@ template<typename Scalar> void packetmath_real()
CHECK_CWISE1_IF(internal::packet_traits<Scalar>::HasErfc, std::erfc, internal::perfc);
#endif
- if(PacketTraits::HasLog && PacketTraits::size>=2)
+ if(PacketSize>=2)
{
data1[0] = std::numeric_limits<Scalar>::quiet_NaN();
data1[1] = std::numeric_limits<Scalar>::epsilon();
+ if(PacketTraits::HasLog)
{
packet_helper<PacketTraits::HasLog,Packet> h;
h.store(data2, internal::plog(h.load(data1)));
@@ -486,29 +563,77 @@ template<typename Scalar> void packetmath_real()
data1[0] = Scalar(-1.0f);
h.store(data2, internal::plog(h.load(data1)));
VERIFY((numext::isnan)(data2[0]));
+
+ data1[0] = std::numeric_limits<Scalar>::infinity();
+ h.store(data2, internal::plog(h.load(data1)));
+ VERIFY((numext::isinf)(data2[0]));
}
+ if(PacketTraits::HasSqrt)
{
packet_helper<PacketTraits::HasSqrt,Packet> h;
data1[0] = Scalar(-1.0f);
+ data1[1] = -std::numeric_limits<Scalar>::denorm_min();
h.store(data2, internal::psqrt(h.load(data1)));
VERIFY((numext::isnan)(data2[0]));
VERIFY((numext::isnan)(data2[1]));
}
+ if(PacketTraits::HasCos)
+ {
+ packet_helper<PacketTraits::HasCos,Packet> h;
+ for(Scalar k = 1; k<Scalar(10000)/std::numeric_limits<Scalar>::epsilon(); k*=2)
+ {
+ for(int k1=0;k1<=1; ++k1)
+ {
+ data1[0] = (2*k+k1 )*Scalar(EIGEN_PI)/2 * internal::random<Scalar>(0.8,1.2);
+ data1[1] = (2*k+2+k1)*Scalar(EIGEN_PI)/2 * internal::random<Scalar>(0.8,1.2);
+ h.store(data2, internal::pcos(h.load(data1)));
+ h.store(data2+PacketSize, internal::psin(h.load(data1)));
+ VERIFY(data2[0]<=Scalar(1.) && data2[0]>=Scalar(-1.));
+ VERIFY(data2[1]<=Scalar(1.) && data2[1]>=Scalar(-1.));
+ VERIFY(data2[PacketSize+0]<=Scalar(1.) && data2[PacketSize+0]>=Scalar(-1.));
+ VERIFY(data2[PacketSize+1]<=Scalar(1.) && data2[PacketSize+1]>=Scalar(-1.));
+
+ VERIFY_IS_APPROX(numext::abs2(data2[0])+numext::abs2(data2[PacketSize+0]), Scalar(1));
+ VERIFY_IS_APPROX(numext::abs2(data2[1])+numext::abs2(data2[PacketSize+1]), Scalar(1));
+ }
+ }
+
+ data1[0] = std::numeric_limits<Scalar>::infinity();
+ data1[1] = -std::numeric_limits<Scalar>::infinity();
+ h.store(data2, internal::psin(h.load(data1)));
+ VERIFY((numext::isnan)(data2[0]));
+ VERIFY((numext::isnan)(data2[1]));
+
+ h.store(data2, internal::pcos(h.load(data1)));
+ VERIFY((numext::isnan)(data2[0]));
+ VERIFY((numext::isnan)(data2[1]));
+
+ data1[0] = std::numeric_limits<Scalar>::quiet_NaN();
+ h.store(data2, internal::psin(h.load(data1)));
+ VERIFY((numext::isnan)(data2[0]));
+ h.store(data2, internal::pcos(h.load(data1)));
+ VERIFY((numext::isnan)(data2[0]));
+
+ data1[0] = -Scalar(0.);
+ h.store(data2, internal::psin(h.load(data1)));
+ VERIFY( internal::biteq(data2[0], data1[0]) );
+ h.store(data2, internal::pcos(h.load(data1)));
+ VERIFY_IS_EQUAL(data2[0], Scalar(1));
+ }
}
}
-template<typename Scalar> void packetmath_notcomplex()
+template<typename Scalar,typename Packet> void packetmath_notcomplex()
{
using std::abs;
typedef internal::packet_traits<Scalar> PacketTraits;
- typedef typename PacketTraits::type Packet;
- const int PacketSize = PacketTraits::size;
+ const int PacketSize = internal::unpacket_traits<Packet>::size;
- EIGEN_ALIGN_MAX Scalar data1[PacketTraits::size*4];
- EIGEN_ALIGN_MAX Scalar data2[PacketTraits::size*4];
- EIGEN_ALIGN_MAX Scalar ref[PacketTraits::size*4];
+ EIGEN_ALIGN_MAX Scalar data1[PacketSize*4];
+ EIGEN_ALIGN_MAX Scalar data2[PacketSize*4];
+ EIGEN_ALIGN_MAX Scalar ref[PacketSize*4];
- Array<Scalar,Dynamic,1>::Map(data1, PacketTraits::size*4).setRandom();
+ Array<Scalar,Dynamic,1>::Map(data1, PacketSize*4).setRandom();
ref[0] = data1[0];
for (int i=0; i<PacketSize; ++i)
@@ -531,13 +656,34 @@ template<typename Scalar> void packetmath_notcomplex()
ref[i] = data1[0]+Scalar(i);
internal::pstore(data2, internal::plset<Packet>(data1[0]));
VERIFY(areApprox(ref, data2, PacketSize) && "internal::plset");
+
+ {
+ unsigned char* data1_bits = reinterpret_cast<unsigned char*>(data1);
+ // predux_all - not needed yet
+ // for (unsigned int i=0; i<PacketSize*sizeof(Scalar); ++i) data1_bits[i] = 0xff;
+ // VERIFY(internal::predux_all(internal::pload<Packet>(data1)) && "internal::predux_all(1111)");
+ // for(int k=0; k<PacketSize; ++k)
+ // {
+ // for (unsigned int i=0; i<sizeof(Scalar); ++i) data1_bits[k*sizeof(Scalar)+i] = 0x0;
+ // VERIFY( (!internal::predux_all(internal::pload<Packet>(data1))) && "internal::predux_all(0101)");
+ // for (unsigned int i=0; i<sizeof(Scalar); ++i) data1_bits[k*sizeof(Scalar)+i] = 0xff;
+ // }
+
+ // predux_any
+ for (unsigned int i=0; i<PacketSize*sizeof(Scalar); ++i) data1_bits[i] = 0x0;
+ VERIFY( (!internal::predux_any(internal::pload<Packet>(data1))) && "internal::predux_any(0000)");
+ for(int k=0; k<PacketSize; ++k)
+ {
+ for (unsigned int i=0; i<sizeof(Scalar); ++i) data1_bits[k*sizeof(Scalar)+i] = 0xff;
+ VERIFY( internal::predux_any(internal::pload<Packet>(data1)) && "internal::predux_any(0101)");
+ for (unsigned int i=0; i<sizeof(Scalar); ++i) data1_bits[k*sizeof(Scalar)+i] = 0x00;
+ }
+ }
}
-template<typename Scalar,bool ConjLhs,bool ConjRhs> void test_conj_helper(Scalar* data1, Scalar* data2, Scalar* ref, Scalar* pval)
+template<typename Scalar,typename Packet,bool ConjLhs,bool ConjRhs> void test_conj_helper(Scalar* data1, Scalar* data2, Scalar* ref, Scalar* pval)
{
- typedef internal::packet_traits<Scalar> PacketTraits;
- typedef typename PacketTraits::type Packet;
- const int PacketSize = PacketTraits::size;
+ const int PacketSize = internal::unpacket_traits<Packet>::size;
internal::conj_if<ConjLhs> cj0;
internal::conj_if<ConjRhs> cj1;
@@ -562,11 +708,9 @@ template<typename Scalar,bool ConjLhs,bool ConjRhs> void test_conj_helper(Scalar
VERIFY(areApprox(ref, pval, PacketSize) && "conj_helper pmadd");
}
-template<typename Scalar> void packetmath_complex()
+template<typename Scalar,typename Packet> void packetmath_complex()
{
- typedef internal::packet_traits<Scalar> PacketTraits;
- typedef typename PacketTraits::type Packet;
- const int PacketSize = PacketTraits::size;
+ const int PacketSize = internal::unpacket_traits<Packet>::size;
const int size = PacketSize*4;
EIGEN_ALIGN_MAX Scalar data1[PacketSize*4];
@@ -580,10 +724,10 @@ template<typename Scalar> void packetmath_complex()
data2[i] = internal::random<Scalar>() * Scalar(1e2);
}
- test_conj_helper<Scalar,false,false> (data1,data2,ref,pval);
- test_conj_helper<Scalar,false,true> (data1,data2,ref,pval);
- test_conj_helper<Scalar,true,false> (data1,data2,ref,pval);
- test_conj_helper<Scalar,true,true> (data1,data2,ref,pval);
+ test_conj_helper<Scalar,Packet,false,false> (data1,data2,ref,pval);
+ test_conj_helper<Scalar,Packet,false,true> (data1,data2,ref,pval);
+ test_conj_helper<Scalar,Packet,true,false> (data1,data2,ref,pval);
+ test_conj_helper<Scalar,Packet,true,true> (data1,data2,ref,pval);
{
for(int i=0;i<PacketSize;++i)
@@ -593,12 +737,10 @@ template<typename Scalar> void packetmath_complex()
}
}
-template<typename Scalar> void packetmath_scatter_gather()
+template<typename Scalar,typename Packet> void packetmath_scatter_gather()
{
- typedef internal::packet_traits<Scalar> PacketTraits;
- typedef typename PacketTraits::type Packet;
typedef typename NumTraits<Scalar>::Real RealScalar;
- const int PacketSize = PacketTraits::size;
+ const int PacketSize = internal::unpacket_traits<Packet>::size;
EIGEN_ALIGN_MAX Scalar data1[PacketSize];
RealScalar refvalue = 0;
for (int i=0; i<PacketSize; ++i) {
@@ -630,30 +772,86 @@ template<typename Scalar> void packetmath_scatter_gather()
}
}
+
+template<
+ typename Scalar,
+ typename PacketType,
+ bool IsComplex = NumTraits<Scalar>::IsComplex,
+ bool IsInteger = NumTraits<Scalar>::IsInteger>
+struct runall;
+
+template<typename Scalar,typename PacketType>
+struct runall<Scalar,PacketType,false,false> { // i.e. float or double
+ static void run() {
+ packetmath<Scalar,PacketType>();
+ packetmath_scatter_gather<Scalar,PacketType>();
+ packetmath_notcomplex<Scalar,PacketType>();
+ packetmath_real<Scalar,PacketType>();
+ }
+};
+
+template<typename Scalar,typename PacketType>
+struct runall<Scalar,PacketType,false,true> { // i.e. int
+ static void run() {
+ packetmath<Scalar,PacketType>();
+ packetmath_scatter_gather<Scalar,PacketType>();
+ packetmath_notcomplex<Scalar,PacketType>();
+ }
+};
+
+template<typename Scalar,typename PacketType>
+struct runall<Scalar,PacketType,true,false> { // i.e. complex
+ static void run() {
+ packetmath<Scalar,PacketType>();
+ packetmath_scatter_gather<Scalar,PacketType>();
+ packetmath_complex<Scalar,PacketType>();
+ }
+};
+
+template<
+ typename Scalar,
+ typename PacketType = typename internal::packet_traits<Scalar>::type,
+ bool Vectorized = internal::packet_traits<Scalar>::Vectorizable,
+ bool HasHalf = !internal::is_same<typename internal::unpacket_traits<PacketType>::half,PacketType>::value >
+struct runner;
+
+template<typename Scalar,typename PacketType>
+struct runner<Scalar,PacketType,true,true>
+{
+ static void run() {
+ runall<Scalar,PacketType>::run();
+ runner<Scalar,typename internal::unpacket_traits<PacketType>::half>::run();
+ }
+};
+
+template<typename Scalar,typename PacketType>
+struct runner<Scalar,PacketType,true,false>
+{
+ static void run() {
+ runall<Scalar,PacketType>::run();
+ runall<Scalar,Scalar>::run();
+ }
+};
+
+template<typename Scalar,typename PacketType>
+struct runner<Scalar,PacketType,false,false>
+{
+ static void run() {
+ runall<Scalar,PacketType>::run();
+ }
+};
+
EIGEN_DECLARE_TEST(packetmath)
{
+ g_first_pass = true;
for(int i = 0; i < g_repeat; i++) {
- CALL_SUBTEST_1( packetmath<float>() );
- CALL_SUBTEST_2( packetmath<double>() );
- CALL_SUBTEST_3( packetmath<int>() );
- CALL_SUBTEST_4( packetmath<std::complex<float> >() );
- CALL_SUBTEST_5( packetmath<std::complex<double> >() );
- CALL_SUBTEST_6( packetmath<half>() );
-
- CALL_SUBTEST_1( packetmath_notcomplex<float>() );
- CALL_SUBTEST_2( packetmath_notcomplex<double>() );
- CALL_SUBTEST_3( packetmath_notcomplex<int>() );
-
- CALL_SUBTEST_1( packetmath_real<float>() );
- CALL_SUBTEST_2( packetmath_real<double>() );
-
- CALL_SUBTEST_4( packetmath_complex<std::complex<float> >() );
- CALL_SUBTEST_5( packetmath_complex<std::complex<double> >() );
-
- CALL_SUBTEST_1( packetmath_scatter_gather<float>() );
- CALL_SUBTEST_2( packetmath_scatter_gather<double>() );
- CALL_SUBTEST_3( packetmath_scatter_gather<int>() );
- CALL_SUBTEST_4( packetmath_scatter_gather<std::complex<float> >() );
- CALL_SUBTEST_5( packetmath_scatter_gather<std::complex<double> >() );
+
+ CALL_SUBTEST_1( runner<float>::run() );
+ CALL_SUBTEST_2( runner<double>::run() );
+ CALL_SUBTEST_3( runner<int>::run() );
+ CALL_SUBTEST_4( runner<std::complex<float> >::run() );
+ CALL_SUBTEST_5( runner<std::complex<double> >::run() );
+ CALL_SUBTEST_6(( packetmath<half,internal::packet_traits<half>::type>() ));
+ g_first_pass = false;
}
}
diff --git a/test/product_notemporary.cpp b/test/product_notemporary.cpp
index dffb07608..20cb7c080 100644
--- a/test/product_notemporary.cpp
+++ b/test/product_notemporary.cpp
@@ -11,6 +11,37 @@
#include "main.h"
+template<typename Dst, typename Lhs, typename Rhs>
+void check_scalar_multiple3(Dst &dst, const Lhs& A, const Rhs& B)
+{
+ VERIFY_EVALUATION_COUNT( (dst.noalias() = A * B), 0);
+ VERIFY_IS_APPROX( dst, (A.eval() * B.eval()).eval() );
+ VERIFY_EVALUATION_COUNT( (dst.noalias() += A * B), 0);
+ VERIFY_IS_APPROX( dst, 2*(A.eval() * B.eval()).eval() );
+ VERIFY_EVALUATION_COUNT( (dst.noalias() -= A * B), 0);
+ VERIFY_IS_APPROX( dst, (A.eval() * B.eval()).eval() );
+}
+
+template<typename Dst, typename Lhs, typename Rhs, typename S2>
+void check_scalar_multiple2(Dst &dst, const Lhs& A, const Rhs& B, S2 s2)
+{
+ CALL_SUBTEST( check_scalar_multiple3(dst, A, B) );
+ CALL_SUBTEST( check_scalar_multiple3(dst, A, -B) );
+ CALL_SUBTEST( check_scalar_multiple3(dst, A, s2*B) );
+ CALL_SUBTEST( check_scalar_multiple3(dst, A, B*s2) );
+ CALL_SUBTEST( check_scalar_multiple3(dst, A, (B*s2).conjugate()) );
+}
+
+template<typename Dst, typename Lhs, typename Rhs, typename S1, typename S2>
+void check_scalar_multiple1(Dst &dst, const Lhs& A, const Rhs& B, S1 s1, S2 s2)
+{
+ CALL_SUBTEST( check_scalar_multiple2(dst, A, B, s2) );
+ CALL_SUBTEST( check_scalar_multiple2(dst, -A, B, s2) );
+ CALL_SUBTEST( check_scalar_multiple2(dst, s1*A, B, s2) );
+ CALL_SUBTEST( check_scalar_multiple2(dst, A*s1, B, s2) );
+ CALL_SUBTEST( check_scalar_multiple2(dst, (A*s1).conjugate(), B, s2) );
+}
+
template<typename MatrixType> void product_notemporary(const MatrixType& m)
{
/* This test checks the number of temporaries created
@@ -105,7 +136,9 @@ template<typename MatrixType> void product_notemporary(const MatrixType& m)
VERIFY_EVALUATION_COUNT( m3.noalias() = m1.block(r0,r0,r1,r1).template triangularView<UnitUpper>() * m2.block(r0,c0,r1,c1), 1);
// Zero temporaries for lazy products ...
+ m3.setRandom(rows,cols);
VERIFY_EVALUATION_COUNT( Scalar tmp = 0; tmp += Scalar(RealScalar(1)) / (m3.transpose().lazyProduct(m3)).diagonal().sum(), 0 );
+ VERIFY_EVALUATION_COUNT( m3.noalias() = m1.conjugate().lazyProduct(m2.conjugate()), 0);
// ... and even no temporary for even deeply (>=2) nested products
VERIFY_EVALUATION_COUNT( Scalar tmp = 0; tmp += Scalar(RealScalar(1)) / (m3.transpose() * m3).diagonal().sum(), 0 );
@@ -148,6 +181,15 @@ template<typename MatrixType> void product_notemporary(const MatrixType& m)
// Check nested products
VERIFY_EVALUATION_COUNT( cvres.noalias() = m1.adjoint() * m1 * cv1, 1 );
VERIFY_EVALUATION_COUNT( rvres.noalias() = rv1 * (m1 * m2.adjoint()), 1 );
+
+ // exhaustively check all scalar multiple combinations:
+ {
+ // Generic path:
+ check_scalar_multiple1(m3, m1, m2, s1, s2);
+ // Force fall back to coeff-based:
+ typename ColMajorMatrixType::BlockXpr m3_blck = m3.block(r0,r0,1,1);
+ check_scalar_multiple1(m3_blck, m1.block(r0,c0,1,1), m2.block(c0,r0,1,1), s1, s2);
+ }
}
EIGEN_DECLARE_TEST(product_notemporary)
diff --git a/test/product_trsolve.cpp b/test/product_trsolve.cpp
index 0c22cccf6..c927cb635 100644
--- a/test/product_trsolve.cpp
+++ b/test/product_trsolve.cpp
@@ -71,6 +71,19 @@ template<typename Scalar,int Size, int Cols> void trsolve(int size=Size,int cols
int c = internal::random<int>(0,cols-1);
VERIFY_TRSM(rmLhs.template triangularView<Lower>(), rmRhs.col(c));
VERIFY_TRSM(cmLhs.template triangularView<Lower>(), rmRhs.col(c));
+
+ if(Size==Dynamic)
+ {
+ cmLhs.resize(0,0);
+ cmRhs.resize(0,cmRhs.cols());
+ Matrix<Scalar,Size,Cols,colmajor> res = cmLhs.template triangularView<Lower>().solve(cmRhs);
+ VERIFY_IS_EQUAL(res.rows(),0);
+ VERIFY_IS_EQUAL(res.cols(),cmRhs.cols());
+ res = cmRhs;
+ cmLhs.template triangularView<Lower>().solveInPlace(res);
+ VERIFY_IS_EQUAL(res.rows(),0);
+ VERIFY_IS_EQUAL(res.cols(),cmRhs.cols());
+ }
}
EIGEN_DECLARE_TEST(product_trsolve)
diff --git a/test/qr.cpp b/test/qr.cpp
index 4799aa9ef..c38e3439b 100644
--- a/test/qr.cpp
+++ b/test/qr.cpp
@@ -9,6 +9,7 @@
#include "main.h"
#include <Eigen/QR>
+#include "solverbase.h"
template<typename MatrixType> void qr(const MatrixType& m)
{
@@ -41,11 +42,7 @@ template<typename MatrixType, int Cols2> void qr_fixedsize()
VERIFY_IS_APPROX(m1, qr.householderQ() * r);
- Matrix<Scalar,Cols,Cols2> m2 = Matrix<Scalar,Cols,Cols2>::Random(Cols,Cols2);
- Matrix<Scalar,Rows,Cols2> m3 = m1*m2;
- m2 = Matrix<Scalar,Cols,Cols2>::Random(Cols,Cols2);
- m2 = qr.solve(m3);
- VERIFY_IS_APPROX(m3, m1*m2);
+ check_solverbase<Matrix<Scalar,Cols,Cols2>, Matrix<Scalar,Rows,Cols2> >(m1, qr, Rows, Cols, Cols2);
}
template<typename MatrixType> void qr_invertible()
@@ -57,6 +54,8 @@ template<typename MatrixType> void qr_invertible()
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
typedef typename MatrixType::Scalar Scalar;
+ STATIC_CHECK(( internal::is_same<typename HouseholderQR<MatrixType>::StorageIndex,int>::value ));
+
int size = internal::random<int>(10,50);
MatrixType m1(size, size), m2(size, size), m3(size, size);
@@ -70,9 +69,8 @@ template<typename MatrixType> void qr_invertible()
}
HouseholderQR<MatrixType> qr(m1);
- m3 = MatrixType::Random(size,size);
- m2 = qr.solve(m3);
- VERIFY_IS_APPROX(m3, m1*m2);
+
+ check_solverbase<MatrixType, MatrixType>(m1, qr, size, size, size);
// now construct a matrix with prescribed determinant
m1.setZero();
@@ -95,6 +93,8 @@ template<typename MatrixType> void qr_verify_assert()
HouseholderQR<MatrixType> qr;
VERIFY_RAISES_ASSERT(qr.matrixQR())
VERIFY_RAISES_ASSERT(qr.solve(tmp))
+ VERIFY_RAISES_ASSERT(qr.transpose().solve(tmp))
+ VERIFY_RAISES_ASSERT(qr.adjoint().solve(tmp))
VERIFY_RAISES_ASSERT(qr.householderQ())
VERIFY_RAISES_ASSERT(qr.absDeterminant())
VERIFY_RAISES_ASSERT(qr.logAbsDeterminant())
diff --git a/test/qr_colpivoting.cpp b/test/qr_colpivoting.cpp
index d224a9436..a563b5470 100644
--- a/test/qr_colpivoting.cpp
+++ b/test/qr_colpivoting.cpp
@@ -11,9 +11,12 @@
#include "main.h"
#include <Eigen/QR>
#include <Eigen/SVD>
+#include "solverbase.h"
template <typename MatrixType>
void cod() {
+ STATIC_CHECK(( internal::is_same<typename CompleteOrthogonalDecomposition<MatrixType>::StorageIndex,int>::value ));
+
Index rows = internal::random<Index>(2, EIGEN_TEST_MAX_SIZE);
Index cols = internal::random<Index>(2, EIGEN_TEST_MAX_SIZE);
Index cols2 = internal::random<Index>(2, EIGEN_TEST_MAX_SIZE);
@@ -46,12 +49,12 @@ void cod() {
MatrixType c = q * t * z * cod.colsPermutation().inverse();
VERIFY_IS_APPROX(matrix, c);
+ check_solverbase<MatrixType, MatrixType>(matrix, cod, rows, cols, cols2);
+
+ // Verify that we get the same minimum-norm solution as the SVD.
MatrixType exact_solution = MatrixType::Random(cols, cols2);
MatrixType rhs = matrix * exact_solution;
MatrixType cod_solution = cod.solve(rhs);
- VERIFY_IS_APPROX(rhs, matrix * cod_solution);
-
- // Verify that we get the same minimum-norm solution as the SVD.
JacobiSVD<MatrixType> svd(matrix, ComputeThinU | ComputeThinV);
MatrixType svd_solution = svd.solve(rhs);
VERIFY_IS_APPROX(cod_solution, svd_solution);
@@ -77,13 +80,13 @@ void cod_fixedsize() {
VERIFY(cod.isSurjective() == (rank == Cols));
VERIFY(cod.isInvertible() == (cod.isInjective() && cod.isSurjective()));
+ check_solverbase<Matrix<Scalar, Cols, Cols2>, Matrix<Scalar, Rows, Cols2> >(matrix, cod, Rows, Cols, Cols2);
+
+ // Verify that we get the same minimum-norm solution as the SVD.
Matrix<Scalar, Cols, Cols2> exact_solution;
exact_solution.setRandom(Cols, Cols2);
Matrix<Scalar, Rows, Cols2> rhs = matrix * exact_solution;
Matrix<Scalar, Cols, Cols2> cod_solution = cod.solve(rhs);
- VERIFY_IS_APPROX(rhs, matrix * cod_solution);
-
- // Verify that we get the same minimum-norm solution as the SVD.
JacobiSVD<MatrixType> svd(matrix, ComputeFullU | ComputeFullV);
Matrix<Scalar, Cols, Cols2> svd_solution = svd.solve(rhs);
VERIFY_IS_APPROX(cod_solution, svd_solution);
@@ -93,6 +96,8 @@ template<typename MatrixType> void qr()
{
using std::sqrt;
+ STATIC_CHECK(( internal::is_same<typename ColPivHouseholderQR<MatrixType>::StorageIndex,int>::value ));
+
Index rows = internal::random<Index>(2,EIGEN_TEST_MAX_SIZE), cols = internal::random<Index>(2,EIGEN_TEST_MAX_SIZE), cols2 = internal::random<Index>(2,EIGEN_TEST_MAX_SIZE);
Index rank = internal::random<Index>(1, (std::min)(rows, cols)-1);
@@ -133,13 +138,10 @@ template<typename MatrixType> void qr()
VERIFY_IS_APPROX_OR_LESS_THAN(y, x);
}
- MatrixType m2 = MatrixType::Random(cols,cols2);
- MatrixType m3 = m1*m2;
- m2 = MatrixType::Random(cols,cols2);
- m2 = qr.solve(m3);
- VERIFY_IS_APPROX(m3, m1*m2);
+ check_solverbase<MatrixType, MatrixType>(m1, qr, rows, cols, cols2);
{
+ MatrixType m2, m3;
Index size = rows;
do {
m1 = MatrixType::Random(size,size);
@@ -173,11 +175,8 @@ template<typename MatrixType, int Cols2> void qr_fixedsize()
Matrix<Scalar,Rows,Cols> c = qr.householderQ() * r * qr.colsPermutation().inverse();
VERIFY_IS_APPROX(m1, c);
- Matrix<Scalar,Cols,Cols2> m2 = Matrix<Scalar,Cols,Cols2>::Random(Cols,Cols2);
- Matrix<Scalar,Rows,Cols2> m3 = m1*m2;
- m2 = Matrix<Scalar,Cols,Cols2>::Random(Cols,Cols2);
- m2 = qr.solve(m3);
- VERIFY_IS_APPROX(m3, m1*m2);
+ check_solverbase<Matrix<Scalar,Cols,Cols2>, Matrix<Scalar,Rows,Cols2> >(m1, qr, Rows, Cols, Cols2);
+
// Verify that the absolute value of the diagonal elements in R are
// non-increasing until they reache the singularity threshold.
RealScalar threshold =
@@ -264,9 +263,8 @@ template<typename MatrixType> void qr_invertible()
}
ColPivHouseholderQR<MatrixType> qr(m1);
- m3 = MatrixType::Random(size,size);
- m2 = qr.solve(m3);
- //VERIFY_IS_APPROX(m3, m1*m2);
+
+ check_solverbase<MatrixType, MatrixType>(m1, qr, size, size, size);
// now construct a matrix with prescribed determinant
m1.setZero();
@@ -286,6 +284,8 @@ template<typename MatrixType> void qr_verify_assert()
ColPivHouseholderQR<MatrixType> qr;
VERIFY_RAISES_ASSERT(qr.matrixQR())
VERIFY_RAISES_ASSERT(qr.solve(tmp))
+ VERIFY_RAISES_ASSERT(qr.transpose().solve(tmp))
+ VERIFY_RAISES_ASSERT(qr.adjoint().solve(tmp))
VERIFY_RAISES_ASSERT(qr.householderQ())
VERIFY_RAISES_ASSERT(qr.dimensionOfKernel())
VERIFY_RAISES_ASSERT(qr.isInjective())
@@ -296,6 +296,25 @@ template<typename MatrixType> void qr_verify_assert()
VERIFY_RAISES_ASSERT(qr.logAbsDeterminant())
}
+template<typename MatrixType> void cod_verify_assert()
+{
+ MatrixType tmp;
+
+ CompleteOrthogonalDecomposition<MatrixType> cod;
+ VERIFY_RAISES_ASSERT(cod.matrixQTZ())
+ VERIFY_RAISES_ASSERT(cod.solve(tmp))
+ VERIFY_RAISES_ASSERT(cod.transpose().solve(tmp))
+ VERIFY_RAISES_ASSERT(cod.adjoint().solve(tmp))
+ VERIFY_RAISES_ASSERT(cod.householderQ())
+ VERIFY_RAISES_ASSERT(cod.dimensionOfKernel())
+ VERIFY_RAISES_ASSERT(cod.isInjective())
+ VERIFY_RAISES_ASSERT(cod.isSurjective())
+ VERIFY_RAISES_ASSERT(cod.isInvertible())
+ VERIFY_RAISES_ASSERT(cod.pseudoInverse())
+ VERIFY_RAISES_ASSERT(cod.absDeterminant())
+ VERIFY_RAISES_ASSERT(cod.logAbsDeterminant())
+}
+
EIGEN_DECLARE_TEST(qr_colpivoting)
{
for(int i = 0; i < g_repeat; i++) {
@@ -330,6 +349,13 @@ EIGEN_DECLARE_TEST(qr_colpivoting)
CALL_SUBTEST_6(qr_verify_assert<MatrixXcf>());
CALL_SUBTEST_3(qr_verify_assert<MatrixXcd>());
+ CALL_SUBTEST_7(cod_verify_assert<Matrix3f>());
+ CALL_SUBTEST_8(cod_verify_assert<Matrix3d>());
+ CALL_SUBTEST_1(cod_verify_assert<MatrixXf>());
+ CALL_SUBTEST_2(cod_verify_assert<MatrixXd>());
+ CALL_SUBTEST_6(cod_verify_assert<MatrixXcf>());
+ CALL_SUBTEST_3(cod_verify_assert<MatrixXcd>());
+
// Test problem size constructors
CALL_SUBTEST_9(ColPivHouseholderQR<MatrixXf>(10, 20));
diff --git a/test/qr_fullpivoting.cpp b/test/qr_fullpivoting.cpp
index 150b4256c..f2d8cb33e 100644
--- a/test/qr_fullpivoting.cpp
+++ b/test/qr_fullpivoting.cpp
@@ -10,9 +10,12 @@
#include "main.h"
#include <Eigen/QR>
+#include "solverbase.h"
template<typename MatrixType> void qr()
{
+ STATIC_CHECK(( internal::is_same<typename FullPivHouseholderQR<MatrixType>::StorageIndex,int>::value ));
+
static const int Rows = MatrixType::RowsAtCompileTime, Cols = MatrixType::ColsAtCompileTime;
Index max_size = EIGEN_TEST_MAX_SIZE;
Index min_size = numext::maxi(1,EIGEN_TEST_MAX_SIZE/10);
@@ -48,13 +51,10 @@ template<typename MatrixType> void qr()
MatrixType tmp;
VERIFY_IS_APPROX(tmp.noalias() = qr.matrixQ() * r, (qr.matrixQ() * r).eval());
- MatrixType m2 = MatrixType::Random(cols,cols2);
- MatrixType m3 = m1*m2;
- m2 = MatrixType::Random(cols,cols2);
- m2 = qr.solve(m3);
- VERIFY_IS_APPROX(m3, m1*m2);
+ check_solverbase<MatrixType, MatrixType>(m1, qr, rows, cols, cols2);
{
+ MatrixType m2, m3;
Index size = rows;
do {
m1 = MatrixType::Random(size,size);
@@ -93,9 +93,7 @@ template<typename MatrixType> void qr_invertible()
VERIFY(qr.isInvertible());
VERIFY(qr.isSurjective());
- m3 = MatrixType::Random(size,size);
- m2 = qr.solve(m3);
- VERIFY_IS_APPROX(m3, m1*m2);
+ check_solverbase<MatrixType, MatrixType>(m1, qr, size, size, size);
// now construct a matrix with prescribed determinant
m1.setZero();
@@ -115,6 +113,8 @@ template<typename MatrixType> void qr_verify_assert()
FullPivHouseholderQR<MatrixType> qr;
VERIFY_RAISES_ASSERT(qr.matrixQR())
VERIFY_RAISES_ASSERT(qr.solve(tmp))
+ VERIFY_RAISES_ASSERT(qr.transpose().solve(tmp))
+ VERIFY_RAISES_ASSERT(qr.adjoint().solve(tmp))
VERIFY_RAISES_ASSERT(qr.matrixQ())
VERIFY_RAISES_ASSERT(qr.dimensionOfKernel())
VERIFY_RAISES_ASSERT(qr.isInjective())
diff --git a/test/ref.cpp b/test/ref.cpp
index 250135bdb..c0b6ffdcf 100644
--- a/test/ref.cpp
+++ b/test/ref.cpp
@@ -102,10 +102,14 @@ template<typename VectorType> void ref_vector(const VectorType& m)
Index i = internal::random<Index>(0,size-1);
Index bsize = internal::random<Index>(1,size-i);
- RefMat rm0 = v1;
- VERIFY_IS_EQUAL(rm0, v1);
- RefDynMat rv1 = v1;
- VERIFY_IS_EQUAL(rv1, v1);
+ { RefMat rm0 = v1; VERIFY_IS_EQUAL(rm0, v1); }
+ { RefMat rm0 = v1.block(0,0,size,1); VERIFY_IS_EQUAL(rm0, v1); }
+ { RefDynMat rv1 = v1; VERIFY_IS_EQUAL(rv1, v1); }
+ { RefDynMat rv1 = v1.block(0,0,size,1); VERIFY_IS_EQUAL(rv1, v1); }
+ { VERIFY_RAISES_ASSERT( RefMat rm0 = v1.block(0, 0, size, 0); EIGEN_UNUSED_VARIABLE(rm0); ); }
+ if(VectorType::SizeAtCompileTime!=1)
+ { VERIFY_RAISES_ASSERT( RefDynMat rv1 = v1.block(0, 0, size, 0); EIGEN_UNUSED_VARIABLE(rv1); ); }
+
RefDynMat rv2 = v1.segment(i,bsize);
VERIFY_IS_EQUAL(rv2, v1.segment(i,bsize));
rv2.setOnes();
diff --git a/test/reshape.cpp b/test/reshape.cpp
index 14a02bb3b..7b16742a2 100644
--- a/test/reshape.cpp
+++ b/test/reshape.cpp
@@ -49,10 +49,25 @@ void check_auto_reshape4x4(MatType m)
VERIFY(is_same_eq(m.template reshaped<Order>(AutoSize, fix< 1> ), m.template reshaped<Order>(v16, fix< 1>)));
}
+template <typename MatType>
+void check_direct_access_reshape4x4(MatType , internal::FixedInt<RowMajorBit>) {}
+
+template <typename MatType>
+void check_direct_access_reshape4x4(MatType m, internal::FixedInt<0>) {
+ VERIFY_IS_EQUAL(m.reshaped( 1, 16).data(), m.data());
+ VERIFY_IS_EQUAL(m.reshaped( 1, 16).innerStride(), 1);
+
+ VERIFY_IS_EQUAL(m.reshaped( 2, 8).data(), m.data());
+ VERIFY_IS_EQUAL(m.reshaped( 2, 8).innerStride(), 1);
+ VERIFY_IS_EQUAL(m.reshaped( 2, 8).outerStride(), 2);
+}
+
// just test a 4x4 matrix, enumerate all combination manually
template <typename MatType>
void reshape4x4(MatType m)
{
+ typedef typename MatType::Scalar Scalar;
+
internal::VariableAndFixedInt<MatType::SizeAtCompileTime==Dynamic?-1: 1> v1( 1);
internal::VariableAndFixedInt<MatType::SizeAtCompileTime==Dynamic?-1: 2> v2( 2);
internal::VariableAndFixedInt<MatType::SizeAtCompileTime==Dynamic?-1: 4> v4( 4);
@@ -124,12 +139,7 @@ void reshape4x4(MatType m)
check_auto_reshape4x4<ColMajor> (m.transpose());
check_auto_reshape4x4<AutoOrder>(m.transpose());
- VERIFY_IS_EQUAL(m.reshaped( 1, 16).data(), m.data());
- VERIFY_IS_EQUAL(m.reshaped( 1, 16).innerStride(), 1);
-
- VERIFY_IS_EQUAL(m.reshaped( 2, 8).data(), m.data());
- VERIFY_IS_EQUAL(m.reshaped( 2, 8).innerStride(), 1);
- VERIFY_IS_EQUAL(m.reshaped( 2, 8).outerStride(), 2);
+ check_direct_access_reshape4x4(m,fix<MatType::Flags&RowMajorBit>);
if((MatType::Flags&RowMajorBit)==0)
{
@@ -150,8 +160,8 @@ void reshape4x4(MatType m)
VERIFY_IS_EQUAL( m28r1, m28r2);
VERIFY(is_same_eq(m.reshaped(v16,fix<1>), m.reshaped()));
- VERIFY_IS_EQUAL(m.reshaped(16,1), m.reshaped());
- VERIFY_IS_EQUAL(m.reshaped(1,16), m.reshaped().transpose());
+ VERIFY_IS_EQUAL(m.reshaped(16,1).eval(), m.reshaped().eval());
+ VERIFY_IS_EQUAL(m.reshaped(1,16).eval(), m.reshaped().transpose().eval());
VERIFY_IS_EQUAL(m.reshaped().reshaped(2,8), m.reshaped(2,8));
VERIFY_IS_EQUAL(m.reshaped().reshaped(4,4), m.reshaped(4,4));
VERIFY_IS_EQUAL(m.reshaped().reshaped(8,2), m.reshaped(8,2));
@@ -163,12 +173,30 @@ void reshape4x4(MatType m)
VERIFY(is_same_eq(m.reshaped(AutoSize,fix<1>), m.reshaped()));
VERIFY_IS_EQUAL(m.template reshaped<RowMajor>(fix<1>,AutoSize), m.transpose().reshaped().transpose());
+
+ // check assignment
+ {
+ Matrix<Scalar,Dynamic,1> m1x(m.size()); m1x.setRandom();
+ VERIFY_IS_APPROX(m.reshaped() = m1x, m1x);
+ VERIFY_IS_APPROX(m, m1x.reshaped(4,4));
+
+ Matrix<Scalar,Dynamic,Dynamic> m28(2,8); m28.setRandom();
+ VERIFY_IS_APPROX(m.reshaped(2,8) = m28, m28);
+ VERIFY_IS_APPROX(m, m28.reshaped(4,4));
+ VERIFY_IS_APPROX(m.template reshaped<RowMajor>(2,8) = m28, m28);
+
+ Matrix<Scalar,Dynamic,Dynamic> m24(2,4); m24.setRandom();
+ VERIFY_IS_APPROX(m(seq(0,last,2),all).reshaped(2,4) = m24, m24);
+
+ // check constness:
+ m.reshaped(2,8).nestedExpression() = m;
+ }
}
EIGEN_DECLARE_TEST(reshape)
{
- typedef Matrix<int,Dynamic,Dynamic> RowMatrixXi;
- typedef Matrix<int,4,4> RowMatrix4i;
+ typedef Matrix<int,Dynamic,Dynamic,RowMajor> RowMatrixXi;
+ typedef Matrix<int,4,4,RowMajor> RowMatrix4i;
MatrixXi mx = MatrixXi::Random(4, 4);
Matrix4i m4 = Matrix4i::Random(4, 4);
RowMatrixXi rmx = RowMatrixXi::Random(4, 4);
diff --git a/test/simplicial_cholesky.cpp b/test/simplicial_cholesky.cpp
index 314b903e2..e3c31e3ba 100644
--- a/test/simplicial_cholesky.cpp
+++ b/test/simplicial_cholesky.cpp
@@ -9,17 +9,17 @@
#include "sparse_solver.h"
-template<typename T, typename I> void test_simplicial_cholesky_T()
+template<typename T, typename I_> void test_simplicial_cholesky_T()
{
- typedef SparseMatrix<T,0,I> SparseMatrixType;
+ typedef SparseMatrix<T,0,I_> SparseMatrixType;
SimplicialCholesky<SparseMatrixType, Lower> chol_colmajor_lower_amd;
SimplicialCholesky<SparseMatrixType, Upper> chol_colmajor_upper_amd;
SimplicialLLT< SparseMatrixType, Lower> llt_colmajor_lower_amd;
SimplicialLLT< SparseMatrixType, Upper> llt_colmajor_upper_amd;
SimplicialLDLT< SparseMatrixType, Lower> ldlt_colmajor_lower_amd;
SimplicialLDLT< SparseMatrixType, Upper> ldlt_colmajor_upper_amd;
- SimplicialLDLT< SparseMatrixType, Lower, NaturalOrdering<I> > ldlt_colmajor_lower_nat;
- SimplicialLDLT< SparseMatrixType, Upper, NaturalOrdering<I> > ldlt_colmajor_upper_nat;
+ SimplicialLDLT< SparseMatrixType, Lower, NaturalOrdering<I_> > ldlt_colmajor_lower_nat;
+ SimplicialLDLT< SparseMatrixType, Upper, NaturalOrdering<I_> > ldlt_colmajor_upper_nat;
check_sparse_spd_solving(chol_colmajor_lower_amd);
check_sparse_spd_solving(chol_colmajor_upper_amd);
diff --git a/test/solverbase.h b/test/solverbase.h
new file mode 100644
index 000000000..13c09593a
--- /dev/null
+++ b/test/solverbase.h
@@ -0,0 +1,36 @@
+#ifndef TEST_SOLVERBASE_H
+#define TEST_SOLVERBASE_H
+
+template<typename DstType, typename RhsType, typename MatrixType, typename SolverType>
+void check_solverbase(const MatrixType& matrix, const SolverType& solver, Index rows, Index cols, Index cols2)
+{
+ // solve
+ DstType m2 = DstType::Random(cols,cols2);
+ RhsType m3 = matrix*m2;
+ DstType solver_solution = DstType::Random(cols,cols2);
+ solver._solve_impl(m3, solver_solution);
+ VERIFY_IS_APPROX(m3, matrix*solver_solution);
+ solver_solution = DstType::Random(cols,cols2);
+ solver_solution = solver.solve(m3);
+ VERIFY_IS_APPROX(m3, matrix*solver_solution);
+ // test solve with transposed
+ m3 = RhsType::Random(rows,cols2);
+ m2 = matrix.transpose()*m3;
+ RhsType solver_solution2 = RhsType::Random(rows,cols2);
+ solver.template _solve_impl_transposed<false>(m2, solver_solution2);
+ VERIFY_IS_APPROX(m2, matrix.transpose()*solver_solution2);
+ solver_solution2 = RhsType::Random(rows,cols2);
+ solver_solution2 = solver.transpose().solve(m2);
+ VERIFY_IS_APPROX(m2, matrix.transpose()*solver_solution2);
+ // test solve with conjugate transposed
+ m3 = RhsType::Random(rows,cols2);
+ m2 = matrix.adjoint()*m3;
+ solver_solution2 = RhsType::Random(rows,cols2);
+ solver.template _solve_impl_transposed<true>(m2, solver_solution2);
+ VERIFY_IS_APPROX(m2, matrix.adjoint()*solver_solution2);
+ solver_solution2 = RhsType::Random(rows,cols2);
+ solver_solution2 = solver.adjoint().solve(m2);
+ VERIFY_IS_APPROX(m2, matrix.adjoint()*solver_solution2);
+}
+
+#endif // TEST_SOLVERBASE_H
diff --git a/test/sparse.h b/test/sparse.h
index 9912e1e24..df471b4e2 100644
--- a/test/sparse.h
+++ b/test/sparse.h
@@ -14,7 +14,7 @@
#include "main.h"
-#if EIGEN_GNUC_AT_LEAST(4,0) && !defined __ICC && !defined(__clang__)
+#if EIGEN_HAS_CXX11
#ifdef min
#undef min
@@ -24,11 +24,9 @@
#undef max
#endif
-#include <tr1/unordered_map>
+#include <unordered_map>
#define EIGEN_UNORDERED_MAP_SUPPORT
-namespace std {
- using std::tr1::unordered_map;
-}
+
#endif
#ifdef EIGEN_GOOGLEHASH_SUPPORT
diff --git a/test/sparse_basic.cpp b/test/sparse_basic.cpp
index 30d3f1bba..9e735b38b 100644
--- a/test/sparse_basic.cpp
+++ b/test/sparse_basic.cpp
@@ -546,7 +546,7 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re
{
DenseVector d = DenseVector::Random(rows);
DenseMatrix refMat2 = d.asDiagonal();
- SparseMatrixType m2(rows, rows);
+ SparseMatrixType m2;
m2 = d.asDiagonal();
VERIFY_IS_APPROX(m2, refMat2);
SparseMatrixType m3(d.asDiagonal());
@@ -554,6 +554,28 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re
refMat2 += d.asDiagonal();
m2 += d.asDiagonal();
VERIFY_IS_APPROX(m2, refMat2);
+ m2.setZero(); m2 += d.asDiagonal();
+ refMat2.setZero(); refMat2 += d.asDiagonal();
+ VERIFY_IS_APPROX(m2, refMat2);
+ m2.setZero(); m2 -= d.asDiagonal();
+ refMat2.setZero(); refMat2 -= d.asDiagonal();
+ VERIFY_IS_APPROX(m2, refMat2);
+
+ initSparse<Scalar>(density, refMat2, m2);
+ m2.makeCompressed();
+ m2 += d.asDiagonal();
+ refMat2 += d.asDiagonal();
+ VERIFY_IS_APPROX(m2, refMat2);
+
+ initSparse<Scalar>(density, refMat2, m2);
+ m2.makeCompressed();
+ VectorXi res(rows);
+ for(Index i=0; i<rows; ++i)
+ res(i) = internal::random<int>(0,3);
+ m2.reserve(res);
+ m2 -= d.asDiagonal();
+ refMat2 -= d.asDiagonal();
+ VERIFY_IS_APPROX(m2, refMat2);
}
// test conservative resize
@@ -658,7 +680,8 @@ void big_sparse_triplet(Index rows, Index cols, double density) {
{
Index r = internal::random<Index>(0,rows-1);
Index c = internal::random<Index>(0,cols-1);
- Scalar v = internal::random<Scalar>();
+ // use positive values to prevent numerical cancellation errors in sum
+ Scalar v = numext::abs(internal::random<Scalar>());
triplets.push_back(TripletType(r,c,v));
sum += v;
}
diff --git a/test/sparse_solvers.cpp b/test/sparse_solvers.cpp
index aaf3d39c9..3b7cd7788 100644
--- a/test/sparse_solvers.cpp
+++ b/test/sparse_solvers.cpp
@@ -98,6 +98,19 @@ template<typename Scalar> void sparse_solvers(int rows, int cols)
initSparse<Scalar>(density, refMat2, m2, ForceNonZeroDiag|MakeLowerTriangular, &zeroCoords, &nonzeroCoords);
VERIFY_IS_APPROX(refMat2.template triangularView<Lower>().solve(vec2),
m2.template triangularView<Lower>().solve(vec3));
+
+ // test empty triangular matrix
+ {
+ m2.resize(0,0);
+ refMatB.resize(0,refMatB.cols());
+ DenseMatrix res = m2.template triangularView<Lower>().solve(refMatB);
+ VERIFY_IS_EQUAL(res.rows(),0);
+ VERIFY_IS_EQUAL(res.cols(),refMatB.cols());
+ res = refMatB;
+ m2.template triangularView<Lower>().solveInPlace(res);
+ VERIFY_IS_EQUAL(res.rows(),0);
+ VERIFY_IS_EQUAL(res.cols(),refMatB.cols());
+ }
}
}
diff --git a/test/sparseqr.cpp b/test/sparseqr.cpp
index 3ffe62314..3576cc626 100644
--- a/test/sparseqr.cpp
+++ b/test/sparseqr.cpp
@@ -43,6 +43,7 @@ int generate_sparse_rectangular_problem(MatrixType& A, DenseMat& dA, int maxRows
template<typename Scalar> void test_sparseqr_scalar()
{
+ typedef typename NumTraits<Scalar>::Real RealScalar;
typedef SparseMatrix<Scalar,ColMajor> MatrixType;
typedef Matrix<Scalar,Dynamic,Dynamic> DenseMat;
typedef Matrix<Scalar,Dynamic,1> DenseVector;
@@ -91,14 +92,34 @@ template<typename Scalar> void test_sparseqr_scalar()
exit(0);
return;
}
-
- VERIFY_IS_APPROX(A * x, b);
-
- //Compare with a dense QR solver
+
+ // Compare with a dense QR solver
ColPivHouseholderQR<DenseMat> dqr(dA);
refX = dqr.solve(b);
- VERIFY_IS_EQUAL(dqr.rank(), solver.rank());
+ bool rank_deficient = A.cols()>A.rows() || dqr.rank()<A.cols();
+ if(rank_deficient)
+ {
+ // rank deficient problem -> we might have to increase the threshold
+ // to get a correct solution.
+ RealScalar th = RealScalar(20)*dA.colwise().norm().maxCoeff()*(A.rows()+A.cols()) * NumTraits<RealScalar>::epsilon();
+ for(Index k=0; (k<16) && !test_isApprox(A*x,b); ++k)
+ {
+ th *= RealScalar(10);
+ solver.setPivotThreshold(th);
+ solver.compute(A);
+ x = solver.solve(b);
+ }
+ }
+
+ VERIFY_IS_APPROX(A * x, b);
+
+ // For rank deficient problem, the estimated rank might
+ // be slightly off, so let's only raise a warning in such cases.
+ if(rank_deficient) ++g_test_level;
+ VERIFY_IS_EQUAL(solver.rank(), dqr.rank());
+ if(rank_deficient) --g_test_level;
+
if(solver.rank()==A.cols()) // full rank
VERIFY_IS_APPROX(x, refX);
// else
diff --git a/test/svd_common.h b/test/svd_common.h
index cba066593..5c0f2a0e4 100644
--- a/test/svd_common.h
+++ b/test/svd_common.h
@@ -17,6 +17,7 @@
#endif
#include "svd_fill.h"
+#include "solverbase.h"
// Check that the matrix m is properly reconstructed and that the U and V factors are unitary
// The SVD must have already been computed.
@@ -219,12 +220,33 @@ void svd_min_norm(const MatrixType& m, unsigned int computationOptions)
VERIFY_IS_APPROX(x21, x3);
}
+template<typename MatrixType, typename SolverType>
+void svd_test_solvers(const MatrixType& m, const SolverType& solver) {
+ Index rows, cols, cols2;
+
+ rows = m.rows();
+ cols = m.cols();
+
+ if(MatrixType::ColsAtCompileTime==Dynamic)
+ {
+ cols2 = internal::random<int>(2,EIGEN_TEST_MAX_SIZE);
+ }
+ else
+ {
+ cols2 = cols;
+ }
+ typedef Matrix<typename MatrixType::Scalar, MatrixType::ColsAtCompileTime, MatrixType::ColsAtCompileTime> CMatrixType;
+ check_solverbase<CMatrixType, MatrixType>(m, solver, rows, cols, cols2);
+}
+
// Check full, compare_to_full, least_square, and min_norm for all possible compute-options
template<typename SvdType, typename MatrixType>
void svd_test_all_computation_options(const MatrixType& m, bool full_only)
{
// if (QRPreconditioner == NoQRPreconditioner && m.rows() != m.cols())
// return;
+ STATIC_CHECK(( internal::is_same<typename SvdType::StorageIndex,int>::value ));
+
SvdType fullSvd(m, ComputeFullU|ComputeFullV);
CALL_SUBTEST(( svd_check_full(m, fullSvd) ));
CALL_SUBTEST(( svd_least_square<SvdType>(m, ComputeFullU | ComputeFullV) ));
@@ -234,6 +256,9 @@ void svd_test_all_computation_options(const MatrixType& m, bool full_only)
// remark #111: statement is unreachable
#pragma warning disable 111
#endif
+
+ svd_test_solvers(m, fullSvd);
+
if(full_only)
return;
@@ -448,6 +473,8 @@ void svd_verify_assert(const MatrixType& m)
VERIFY_RAISES_ASSERT(svd.singularValues())
VERIFY_RAISES_ASSERT(svd.matrixV())
VERIFY_RAISES_ASSERT(svd.solve(rhs))
+ VERIFY_RAISES_ASSERT(svd.transpose().solve(rhs))
+ VERIFY_RAISES_ASSERT(svd.adjoint().solve(rhs))
MatrixType a = MatrixType::Zero(rows, cols);
a.setZero();
svd.compute(a, 0);
diff --git a/test/symbolic_index.cpp b/test/symbolic_index.cpp
index ea73e99e9..b114cbb95 100644
--- a/test/symbolic_index.cpp
+++ b/test/symbolic_index.cpp
@@ -19,44 +19,6 @@
#include "main.h"
-template<typename T>
-bool match(const T& xpr, std::string ref, std::string str_xpr = "") {
- EIGEN_UNUSED_VARIABLE(str_xpr);
- std::stringstream str;
- str << xpr;
- if(!(str.str() == ref))
- std::cout << str_xpr << "\n" << xpr << "\n\n";
- return str.str() == ref;
-}
-
-#define MATCH(X,R) match(X, R, #X)
-
-template<typename T1,typename T2>
-typename internal::enable_if<internal::is_same<T1,T2>::value,bool>::type
-is_same_fixed(const T1& a, const T2& b)
-{
- return (Index(a) == Index(b));
-}
-
-template<typename T1,typename T2>
-bool is_same_seq(const T1& a, const T2& b)
-{
- bool ok = a.first()==b.first() && a.size() == b.size() && Index(a.incrObject())==Index(b.incrObject());;
- if(!ok)
- {
- std::cerr << "seqN(" << a.first() << ", " << a.size() << ", " << Index(a.incrObject()) << ") != ";
- std::cerr << "seqN(" << b.first() << ", " << b.size() << ", " << Index(b.incrObject()) << ")\n";
- }
- return ok;
-}
-
-template<typename T1,typename T2>
-typename internal::enable_if<internal::is_same<T1,T2>::value,bool>::type
-is_same_type(const T1&, const T2&)
-{
- return true;
-}
-
template<typename T1,typename T2>
bool is_same_symb(const T1& a, const T2& b, Index size)
{
diff --git a/test/triangular.cpp b/test/triangular.cpp
index 99ef1dcda..0fca5e3b9 100644
--- a/test/triangular.cpp
+++ b/test/triangular.cpp
@@ -129,6 +129,22 @@ template<typename MatrixType> void triangular_square(const MatrixType& m)
VERIFY_IS_APPROX(m1.template selfadjointView<Upper>().diagonal(), m1.diagonal());
+ m3.setRandom();
+ const MatrixType& m3c(m3);
+ VERIFY( is_same_type(m3c.template triangularView<Lower>(),m3.template triangularView<Lower>().template conjugateIf<false>()) );
+ VERIFY( is_same_type(m3c.template triangularView<Lower>().conjugate(),m3.template triangularView<Lower>().template conjugateIf<true>()) );
+ VERIFY_IS_APPROX(m3.template triangularView<Lower>().template conjugateIf<true>().toDenseMatrix(),
+ m3.conjugate().template triangularView<Lower>().toDenseMatrix());
+ VERIFY_IS_APPROX(m3.template triangularView<Lower>().template conjugateIf<false>().toDenseMatrix(),
+ m3.template triangularView<Lower>().toDenseMatrix());
+
+ VERIFY( is_same_type(m3c.template selfadjointView<Lower>(),m3.template selfadjointView<Lower>().template conjugateIf<false>()) );
+ VERIFY( is_same_type(m3c.template selfadjointView<Lower>().conjugate(),m3.template selfadjointView<Lower>().template conjugateIf<true>()) );
+ VERIFY_IS_APPROX(m3.template selfadjointView<Lower>().template conjugateIf<true>().toDenseMatrix(),
+ m3.conjugate().template selfadjointView<Lower>().toDenseMatrix());
+ VERIFY_IS_APPROX(m3.template selfadjointView<Lower>().template conjugateIf<false>().toDenseMatrix(),
+ m3.template selfadjointView<Lower>().toDenseMatrix());
+
}
diff --git a/test/type_alias.cpp b/test/type_alias.cpp
new file mode 100644
index 000000000..9a6616c72
--- /dev/null
+++ b/test/type_alias.cpp
@@ -0,0 +1,48 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2019 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+EIGEN_DECLARE_TEST(type_alias)
+{
+ using namespace internal;
+
+ // To warm up, some basic checks:
+ STATIC_CHECK((is_same<MatrixXd,Matrix<double,Dynamic,Dynamic> >::value));
+ STATIC_CHECK((is_same<Matrix2f,Matrix<float,2,2> >::value));
+ STATIC_CHECK((is_same<Array33i,Array<int,3,3> >::value));
+
+#if EIGEN_HAS_CXX11
+
+ STATIC_CHECK((is_same<MatrixX<double>, MatrixXd>::value));
+ STATIC_CHECK((is_same<MatrixX<int>, MatrixXi>::value));
+ STATIC_CHECK((is_same<Matrix2<int>, Matrix2i>::value));
+ STATIC_CHECK((is_same<Matrix2X<float>, Matrix2Xf>::value));
+ STATIC_CHECK((is_same<MatrixX4<double>, MatrixX4d>::value));
+ STATIC_CHECK((is_same<VectorX<int>, VectorXi>::value));
+ STATIC_CHECK((is_same<Vector2<float>, Vector2f>::value));
+ STATIC_CHECK((is_same<RowVectorX<int>, RowVectorXi>::value));
+ STATIC_CHECK((is_same<RowVector2<float>, RowVector2f>::value));
+
+ STATIC_CHECK((is_same<ArrayXX<float>, ArrayXXf>::value));
+ STATIC_CHECK((is_same<Array33<int>, Array33i>::value));
+ STATIC_CHECK((is_same<Array2X<float>, Array2Xf>::value));
+ STATIC_CHECK((is_same<ArrayX4<double>, ArrayX4d>::value));
+ STATIC_CHECK((is_same<ArrayX<double>, ArrayXd>::value));
+ STATIC_CHECK((is_same<Array4<double>, Array4d>::value));
+
+ STATIC_CHECK((is_same<Vector<float,3>, Vector3f>::value));
+ STATIC_CHECK((is_same<Vector<int,Dynamic>, VectorXi>::value));
+ STATIC_CHECK((is_same<RowVector<float,3>, RowVector3f>::value));
+ STATIC_CHECK((is_same<RowVector<int,Dynamic>, RowVectorXi>::value));
+
+#else
+ std::cerr << "WARNING: c++11 type aliases not tested.\n";
+#endif
+}
diff --git a/test/umeyama.cpp b/test/umeyama.cpp
index 1590a0a81..170c28a61 100644
--- a/test/umeyama.cpp
+++ b/test/umeyama.cpp
@@ -27,7 +27,7 @@ Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> randMatrixUnitary(int size)
MatrixType Q;
int max_tries = 40;
- double is_unitary = false;
+ bool is_unitary = false;
while (!is_unitary && max_tries > 0)
{
diff --git a/test/vectorization_logic.cpp b/test/vectorization_logic.cpp
index e2146eef3..4bf3b3db2 100644
--- a/test/vectorization_logic.cpp
+++ b/test/vectorization_logic.cpp
@@ -187,18 +187,19 @@ struct vectorization_logic
VERIFY(test_assign(Matrix33c().row(2),Matrix33c().row(1)+Matrix33c().row(1),
LinearTraversal,CompleteUnrolling));
VERIFY(test_assign(Vector3(),Vector3()+Vector3(),
- EIGEN_UNALIGNED_VECTORIZE ? (HalfPacketSize==1 ? InnerVectorizedTraversal : LinearVectorizedTraversal) : (HalfPacketSize==1 ? InnerVectorizedTraversal : LinearTraversal), CompleteUnrolling));
+ sizeof(Scalar)==16 ? InnerVectorizedTraversal : (EIGEN_UNALIGNED_VECTORIZE ? LinearVectorizedTraversal : LinearTraversal), CompleteUnrolling));
VERIFY(test_assign(Matrix33c().col(0),Matrix33c().col(1)+Matrix33c().col(1),
- EIGEN_UNALIGNED_VECTORIZE ? (HalfPacketSize==1 ? InnerVectorizedTraversal : LinearVectorizedTraversal) : (HalfPacketSize==1 ? SliceVectorizedTraversal : LinearTraversal),
- ((!EIGEN_UNALIGNED_VECTORIZE) && HalfPacketSize==1) ? NoUnrolling : CompleteUnrolling));
+ EIGEN_UNALIGNED_VECTORIZE ? (sizeof(Scalar)==16 ? InnerVectorizedTraversal : LinearVectorizedTraversal)
+ : (sizeof(Scalar)==16 ? SliceVectorizedTraversal : LinearTraversal),
+ ((!EIGEN_UNALIGNED_VECTORIZE) && (sizeof(Scalar)==16)) ? NoUnrolling : CompleteUnrolling));
VERIFY(test_assign(Matrix3(),Matrix3().cwiseProduct(Matrix3()),
LinearVectorizedTraversal,CompleteUnrolling));
VERIFY(test_assign(Matrix<Scalar,17,17>(),Matrix<Scalar,17,17>()+Matrix<Scalar,17,17>(),
- HalfPacketSize==1 ? InnerVectorizedTraversal :
+ sizeof(Scalar)==16 ? InnerVectorizedTraversal :
EIGEN_UNALIGNED_VECTORIZE ? LinearVectorizedTraversal :
- LinearTraversal,
+ LinearTraversal,
NoUnrolling));
VERIFY(test_assign(Matrix11(), Matrix11()+Matrix11(),InnerVectorizedTraversal,CompleteUnrolling));
@@ -290,10 +291,6 @@ struct vectorization_logic_half
typedef Matrix<Scalar,5*PacketSize,7,ColMajor> Matrix57;
typedef Matrix<Scalar,3*PacketSize,5,ColMajor> Matrix35;
typedef Matrix<Scalar,5*PacketSize,7,DontAlign|ColMajor> Matrix57u;
-// typedef Matrix<Scalar,(Matrix11::Flags&RowMajorBit)?16:4*PacketSize,(Matrix11::Flags&RowMajorBit)?4*PacketSize:16> Matrix44;
-// typedef Matrix<Scalar,(Matrix11::Flags&RowMajorBit)?16:4*PacketSize,(Matrix11::Flags&RowMajorBit)?4*PacketSize:16,DontAlign|EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION> Matrix44u;
-// typedef Matrix<Scalar,4*PacketSize,4*PacketSize,ColMajor> Matrix44c;
-// typedef Matrix<Scalar,4*PacketSize,4*PacketSize,RowMajor> Matrix44r;
typedef Matrix<Scalar,
(PacketSize==16 ? 8 : PacketSize==8 ? 4 : PacketSize==4 ? 2 : PacketSize==2 ? 1 : /*PacketSize==1 ?*/ 1),
@@ -351,13 +348,15 @@ struct vectorization_logic_half
VERIFY(test_assign(Matrix33c().row(2),Matrix33c().row(1)+Matrix33c().row(1),
LinearTraversal,CompleteUnrolling));
VERIFY(test_assign(Matrix33c().col(0),Matrix33c().col(1)+Matrix33c().col(1),
- EIGEN_UNALIGNED_VECTORIZE ? (PacketSize==1 ? InnerVectorizedTraversal : LinearVectorizedTraversal) : LinearTraversal,CompleteUnrolling));
+ EIGEN_UNALIGNED_VECTORIZE ? (sizeof(Scalar)==16 ? InnerVectorizedTraversal : LinearVectorizedTraversal)
+ : (sizeof(Scalar)==16 ? SliceVectorizedTraversal : LinearTraversal),
+ ((!EIGEN_UNALIGNED_VECTORIZE) && (sizeof(Scalar)==16)) ? NoUnrolling : CompleteUnrolling));
VERIFY(test_assign(Matrix3(),Matrix3().cwiseQuotient(Matrix3()),
PacketTraits::HasDiv ? LinearVectorizedTraversal : LinearTraversal,CompleteUnrolling));
VERIFY(test_assign(Matrix<Scalar,17,17>(),Matrix<Scalar,17,17>()+Matrix<Scalar,17,17>(),
- EIGEN_UNALIGNED_VECTORIZE ? (PacketSize==1 ? InnerVectorizedTraversal : LinearVectorizedTraversal) : LinearTraversal,
+ sizeof(Scalar)==16 ? InnerVectorizedTraversal : (EIGEN_UNALIGNED_VECTORIZE ? LinearVectorizedTraversal : LinearTraversal),
NoUnrolling));
VERIFY(test_assign(Matrix11(),Matrix<Scalar,17,17>().template block<PacketSize,PacketSize>(2,3)+Matrix<Scalar,17,17>().template block<PacketSize,PacketSize>(8,4),
diff --git a/test/vectorwiseop.cpp b/test/vectorwiseop.cpp
index 37dbcf970..8ee58841a 100644
--- a/test/vectorwiseop.cpp
+++ b/test/vectorwiseop.cpp
@@ -134,6 +134,7 @@ template<typename MatrixType> void vectorwiseop_matrix(const MatrixType& m)
typedef Matrix<Scalar, 1, MatrixType::ColsAtCompileTime> RowVectorType;
typedef Matrix<RealScalar, MatrixType::RowsAtCompileTime, 1> RealColVectorType;
typedef Matrix<RealScalar, 1, MatrixType::ColsAtCompileTime> RealRowVectorType;
+ typedef Matrix<Scalar,Dynamic,Dynamic> MatrixX;
Index rows = m.rows();
Index cols = m.cols();
@@ -149,6 +150,19 @@ template<typename MatrixType> void vectorwiseop_matrix(const MatrixType& m)
RealColVectorType rcres;
RealRowVectorType rrres;
+ // test broadcast assignment
+ m2 = m1;
+ m2.colwise() = colvec;
+ for(Index j=0; j<cols; ++j)
+ VERIFY_IS_APPROX(m2.col(j), colvec);
+ m2.rowwise() = rowvec;
+ for(Index i=0; i<rows; ++i)
+ VERIFY_IS_APPROX(m2.row(i), rowvec);
+ if(rows>1)
+ VERIFY_RAISES_ASSERT(m2.colwise() = colvec.transpose());
+ if(cols>1)
+ VERIFY_RAISES_ASSERT(m2.rowwise() = rowvec.transpose());
+
// test addition
m2 = m1;
@@ -247,6 +261,26 @@ template<typename MatrixType> void vectorwiseop_matrix(const MatrixType& m)
m1 = m1.rowwise() - (m1.colwise().sum()/RealScalar(m1.rows()));
VERIFY_IS_APPROX( m1, m2 );
VERIFY_EVALUATION_COUNT( m2 = (m1.rowwise() - m1.colwise().sum()/RealScalar(m1.rows())), (MatrixType::RowsAtCompileTime!=1 ? 1 : 0) );
+
+ // test empty expressions
+ VERIFY_IS_APPROX(m1.matrix().middleCols(0,0).rowwise().sum().eval(), MatrixX::Zero(rows,1));
+ VERIFY_IS_APPROX(m1.matrix().middleRows(0,0).colwise().sum().eval(), MatrixX::Zero(1,cols));
+ VERIFY_IS_APPROX(m1.matrix().middleCols(0,fix<0>).rowwise().sum().eval(), MatrixX::Zero(rows,1));
+ VERIFY_IS_APPROX(m1.matrix().middleRows(0,fix<0>).colwise().sum().eval(), MatrixX::Zero(1,cols));
+
+ VERIFY_IS_APPROX(m1.matrix().middleCols(0,0).rowwise().prod().eval(), MatrixX::Ones(rows,1));
+ VERIFY_IS_APPROX(m1.matrix().middleRows(0,0).colwise().prod().eval(), MatrixX::Ones(1,cols));
+ VERIFY_IS_APPROX(m1.matrix().middleCols(0,fix<0>).rowwise().prod().eval(), MatrixX::Ones(rows,1));
+ VERIFY_IS_APPROX(m1.matrix().middleRows(0,fix<0>).colwise().prod().eval(), MatrixX::Ones(1,cols));
+
+ VERIFY_IS_APPROX(m1.matrix().middleCols(0,0).rowwise().squaredNorm().eval(), MatrixX::Zero(rows,1));
+
+ VERIFY_RAISES_ASSERT(m1.real().middleCols(0,0).rowwise().minCoeff().eval());
+ VERIFY_RAISES_ASSERT(m1.real().middleRows(0,0).colwise().maxCoeff().eval());
+ VERIFY_IS_EQUAL(m1.real().middleRows(0,0).rowwise().maxCoeff().eval().rows(),0);
+ VERIFY_IS_EQUAL(m1.real().middleCols(0,0).colwise().maxCoeff().eval().cols(),0);
+ VERIFY_IS_EQUAL(m1.real().middleRows(0,fix<0>).rowwise().maxCoeff().eval().rows(),0);
+ VERIFY_IS_EQUAL(m1.real().middleCols(0,fix<0>).colwise().maxCoeff().eval().cols(),0);
}
EIGEN_DECLARE_TEST(vectorwiseop)
@@ -256,6 +290,7 @@ EIGEN_DECLARE_TEST(vectorwiseop)
CALL_SUBTEST_3( vectorwiseop_array(ArrayXXf(3, 4)) );
CALL_SUBTEST_4( vectorwiseop_matrix(Matrix4cf()) );
CALL_SUBTEST_5( vectorwiseop_matrix(Matrix4f()) );
+ CALL_SUBTEST_5( vectorwiseop_matrix(Vector4f()) );
CALL_SUBTEST_5( vectorwiseop_matrix(Matrix<float,4,5>()) );
CALL_SUBTEST_6( vectorwiseop_matrix(MatrixXd(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
CALL_SUBTEST_7( vectorwiseop_matrix(VectorXd(internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
diff --git a/test/zerosized.cpp b/test/zerosized.cpp
index edd1f6925..07afd0f86 100644
--- a/test/zerosized.cpp
+++ b/test/zerosized.cpp
@@ -16,9 +16,18 @@ template<typename MatrixType> void zeroReduction(const MatrixType& m) {
VERIFY(!m.any());
VERIFY(m.prod()==1);
VERIFY(m.sum()==0);
+ VERIFY(m.norm()==0);
+ VERIFY(m.squaredNorm()==0);
VERIFY(m.count()==0);
VERIFY(m.allFinite());
VERIFY(!m.hasNaN());
+ VERIFY_RAISES_ASSERT( m.minCoeff() );
+ VERIFY_RAISES_ASSERT( m.maxCoeff() );
+ Index i,j;
+ VERIFY_RAISES_ASSERT( m.minCoeff(&i,&j) );
+ VERIFY_RAISES_ASSERT( m.maxCoeff(&i,&j) );
+ VERIFY_RAISES_ASSERT( m.reshaped().minCoeff(&i) );
+ VERIFY_RAISES_ASSERT( m.reshaped().maxCoeff(&i) );
}
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h b/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h
index 7d9afa685..dbacf494e 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h
@@ -244,9 +244,11 @@ class TensorBase<Derived, ReadOnlyAccessors>
}
EIGEN_DEVICE_FUNC
- EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, const Derived>
+ EIGEN_STRONG_INLINE const typename internal::conditional<NumTraits<CoeffReturnType>::IsComplex,
+ TensorCwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, const Derived>,
+ Derived>::type
conjugate() const {
- return unaryExpr(internal::scalar_conjugate_op<Scalar>());
+ return choose(Cond<NumTraits<CoeffReturnType>::IsComplex>(), unaryExpr(internal::scalar_conjugate_op<Scalar>()), derived());
}
EIGEN_DEVICE_FUNC
@@ -339,10 +341,13 @@ class TensorBase<Derived, ReadOnlyAccessors>
return cwiseMin(constant(threshold));
}
- template <typename NewType> EIGEN_DEVICE_FUNC
- EIGEN_STRONG_INLINE const TensorConversionOp<NewType, const Derived>
+ template<typename NewType>
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const typename internal::conditional<internal::is_same<NewType, CoeffReturnType>::value,
+ Derived,
+ TensorConversionOp<NewType, const Derived> >::type
cast() const {
- return TensorConversionOp<NewType, const Derived>(derived());
+ return choose(Cond<internal::is_same<NewType, CoeffReturnType>::value>(), derived(), TensorConversionOp<NewType, const Derived>(derived()));
}
EIGEN_DEVICE_FUNC
@@ -628,26 +633,26 @@ class TensorBase<Derived, ReadOnlyAccessors>
}
template <typename Dims> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
- const TensorReductionOp<internal::AndReducer, const Dims, const TensorConversionOp<bool, const Derived> >
+ const TensorReductionOp<internal::AndReducer, const Dims, const typename internal::conditional<internal::is_same<bool, CoeffReturnType>::value, Derived, TensorConversionOp<bool, const Derived> >::type >
all(const Dims& dims) const {
return cast<bool>().reduce(dims, internal::AndReducer());
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
- const TensorReductionOp<internal::AndReducer, const DimensionList<Index, NumDimensions>, const TensorConversionOp<bool, const Derived> >
+ const TensorReductionOp<internal::AndReducer, const DimensionList<Index, NumDimensions>, const typename internal::conditional<internal::is_same<bool, CoeffReturnType>::value, Derived, TensorConversionOp<bool, const Derived> >::type >
all() const {
DimensionList<Index, NumDimensions> in_dims;
return cast<bool>().reduce(in_dims, internal::AndReducer());
}
template <typename Dims> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
- const TensorReductionOp<internal::OrReducer, const Dims, const TensorConversionOp<bool, const Derived> >
+ const TensorReductionOp<internal::OrReducer, const Dims, const typename internal::conditional<internal::is_same<bool, CoeffReturnType>::value, Derived, TensorConversionOp<bool, const Derived> >::type >
any(const Dims& dims) const {
return cast<bool>().reduce(dims, internal::OrReducer());
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
- const TensorReductionOp<internal::OrReducer, const DimensionList<Index, NumDimensions>, const TensorConversionOp<bool, const Derived> >
+ const TensorReductionOp<internal::OrReducer, const DimensionList<Index, NumDimensions>, const typename internal::conditional<internal::is_same<bool, CoeffReturnType>::value, Derived, TensorConversionOp<bool, const Derived> >::type >
any() const {
DimensionList<Index, NumDimensions> in_dims;
return cast<bool>().reduce(in_dims, internal::OrReducer());
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h
index 61a4e1a3a..6ca881f27 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h
@@ -102,7 +102,7 @@ struct traits<TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKern
typedef typename remove_reference<RhsNested>::type _RhsNested;
// From NumDims below.
- static const int NumDimensions = traits<RhsXprType>::NumDimensions + traits<RhsXprType>::NumDimensions - 2 * array_size<Dimensions>::value;
+ static const int NumDimensions = traits<LhsXprType>::NumDimensions + traits<RhsXprType>::NumDimensions - 2 * array_size<Dimensions>::value;
static const int Layout = traits<LhsXprType>::Layout;
typedef typename conditional<Pointer_type_promotion<typename LhsXprType::Scalar, Scalar>::val,
typename traits<LhsXprType>::PointerType, typename traits<RhsXprType>::PointerType>::type PointerType;
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h
index 71fd19774..c51f3f8dd 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h
@@ -51,6 +51,10 @@ class TensorContractionBlocking {
else {
computeProductBlockingSizes<LhsScalar, RhsScalar, 1>(kc_, nc_, mc_, num_threads);
}
+
+ const int rhs_packet_size = internal::packet_traits<RhsScalar>::size;
+ kc_ = (rhs_packet_size <= 8 || kc_ <= rhs_packet_size) ?
+ kc_ : (kc_ / rhs_packet_size) * rhs_packet_size;
}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE StorageIndex kc() const { return kc_; }
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionGpu.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionGpu.h
index 056665749..5d19652e6 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionGpu.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionGpu.h
@@ -1219,9 +1219,6 @@ template<typename Indices, typename LeftArgType, typename RightArgType, typename
struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType>, GpuDevice> :
public TensorContractionEvaluatorBase<TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType>, GpuDevice> > {
- static_assert(std::is_same<OutputKernelType, const NoOpOutputKernel>::value,
- "GPU tensor contraction does not support output kernels.");
-
typedef GpuDevice Device;
typedef TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType>, Device> Self;
@@ -1274,7 +1271,11 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
typedef typename RightEvaluator::Dimensions RightDimensions;
EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device) :
- Base(op, device) {}
+ Base(op, device)
+ {
+ EIGEN_STATIC_ASSERT( (internal::is_same<OutputKernelType, const NoOpOutputKernel>::value),
+ GPU_TENSOR_CONTRACTION_DOES_NOT_SUPPORT_OUTPUT_KERNELS);
+ }
// We need to redefine this method to make nvcc happy
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data) {
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h
index 2d3b69128..142492603 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h
@@ -120,6 +120,7 @@ class SimpleTensorContractionMapper {
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Index computeIndex(Index row, Index col) const {
const bool left = (side == Lhs);
+ EIGEN_UNUSED_VARIABLE(left); // annoying bug in g++8.1: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85963
Index nocontract_val = left ? row : col;
Index linidx = 0;
for (int i = static_cast<int>(array_size<nocontract_t>::value) - 1; i > 0; i--) {
@@ -158,6 +159,7 @@ class SimpleTensorContractionMapper {
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE IndexPair<Index> computeIndexPair(Index row, Index col, const Index distance) const {
const bool left = (side == Lhs);
+ EIGEN_UNUSED_VARIABLE(left); // annoying bug in g++8.1: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85963
Index nocontract_val[2] = {left ? row : col, left ? row + distance : col};
Index linidx[2] = {0, 0};
if (array_size<typename Tensor::Dimensions>::value > array_size<contract_t>::value) {
@@ -239,8 +241,10 @@ class BaseTensorContractionMapper : public SimpleTensorContractionMapper<Scalar,
ParentMapper(tensor, nocontract_strides, ij_strides, contract_strides, k_strides) { }
template <typename PacketT,int AlignmentType>
- EIGEN_DEVICE_FUNC
- EIGEN_STRONG_INLINE PacketT load(Index i, Index j) const {
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ typename internal::enable_if<internal::unpacket_traits<PacketT>::size==packet_size,PacketT>::type
+ load(Index i, Index j) const
+ {
// whole method makes column major assumption
// don't need to add offsets for now (because operator handles that)
@@ -282,6 +286,29 @@ class BaseTensorContractionMapper : public SimpleTensorContractionMapper<Scalar,
}
template <typename PacketT,int AlignmentType>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ typename internal::enable_if<internal::unpacket_traits<PacketT>::size!=packet_size,PacketT>::type
+ load(Index i, Index j) const
+ {
+ const Index requested_packet_size = internal::unpacket_traits<PacketT>::size;
+ EIGEN_ALIGN_MAX Scalar data[requested_packet_size];
+
+ const IndexPair<Index> indexPair = this->computeIndexPair(i, j, requested_packet_size - 1);
+ const Index first = indexPair.first;
+ const Index lastIdx = indexPair.second;
+
+ data[0] = this->m_tensor.coeff(first);
+ for (Index k = 1; k < requested_packet_size - 1; k += 2) {
+ const IndexPair<Index> internal_pair = this->computeIndexPair(i + k, j, 1);
+ data[k] = this->m_tensor.coeff(internal_pair.first);
+ data[k + 1] = this->m_tensor.coeff(internal_pair.second);
+ }
+ data[requested_packet_size - 1] = this->m_tensor.coeff(lastIdx);
+
+ return pload<PacketT>(data);
+ }
+
+ template <typename PacketT,int AlignmentType>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE PacketT loadPacket(Index i, Index j) const {
return this->load<PacketT,AlignmentType>(i,j);
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index 24ba3e431..adf57c892 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -208,6 +208,26 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
Index nm = divup(nm0, gm);
Index nn = divup(nn0, gn);
+ // If there is enough concurrency in the sharding dimension, we choose not
+ // to paralellize by the other dimension, and execute all kernels in sync
+ // mode. This reduces parallelism from the nm x nn down to nn
+ // (shard_by_col==true) or nm (shard_by_col==false).
+ const Index sharding_dim_tasks = shard_by_col ? nn : nm;
+ const int num_worker_threads = this->m_device.numThreadsInPool();
+
+ // With small number of threads we want to make sure that we do not reduce
+ // parallelism too much. With large number of threads we trade maximum
+ // parallelism for better memory locality.
+ const float oversharding_factor =
+ num_worker_threads <= 4 ? 8.0 :
+ num_worker_threads <= 8 ? 4.0 :
+ num_worker_threads <= 16 ? 2.0 :
+ num_worker_threads <= 32 ? 1.0 :
+ num_worker_threads <= 64 ? 0.8 : /* num_worker_threads > 64 */ 0.6;
+
+ const bool parallelize_by_sharding_dim_only =
+ sharding_dim_tasks >= oversharding_factor * num_worker_threads;
+
// Last by not least, decide whether we want to issue both lhs and rhs
// packing in parallel; or issue lhs packing first, and then issue rhs
// packing when lhs packing completes (for !shard_by_col lhs and rhs are
@@ -223,10 +243,13 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
// But don't do it if we will use each rhs only once. Locality seems to be
// more important in this case.
if ((shard_by_col ? nm : nn) == 1) parallel_pack = false;
+ // Also don't get in the way of parallelize_by_sharding_dim_only
+ // optimization.
+ if (parallelize_by_sharding_dim_only) parallel_pack = false;
- #define CONTEXT_ARGS \
+#define CONTEXT_ARGS \
(this, num_threads, buffer, m, n, k, bm, bn, bk, nm, nn, nk, gm, gn, nm0, \
- nn0, shard_by_col, parallel_pack) \
+ nn0, shard_by_col, parallel_pack, parallelize_by_sharding_dim_only) \
.run()
TENSOR_CONTRACTION_DISPATCH(Context, Alignment, CONTEXT_ARGS);
@@ -260,7 +283,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
Context(const Self* self, int num_threads, Scalar* buffer, Index tm, Index tn,
Index tk, Index bm, Index bn, Index bk, Index nm, Index nn, Index nk,
Index gm, Index gn, Index nm0, Index nn0, bool shard_by_col,
- bool parallel_pack)
+ bool parallel_pack, bool parallelize_by_sharding_dim_only)
: device_(self->m_device),
lhs_(self->m_leftImpl, self->m_left_nocontract_strides,
self->m_i_strides, self->m_left_contracting_strides,
@@ -275,6 +298,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
num_threads_(num_threads),
shard_by_col_(shard_by_col),
parallel_pack_(parallel_pack),
+ parallelize_by_sharding_dim_only_(parallelize_by_sharding_dim_only),
m_(tm),
n_(tn),
k_(tk),
@@ -289,6 +313,9 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
nm0_(nm0),
nn0_(nn0)
{
+ // These two options are mutually exclusive.
+ eigen_assert(!(parallel_pack && parallelize_by_sharding_dim_only));
+
for (Index x = 0; x < P; x++) {
// Normal number of notifications for k slice switch is
// nm_ + nn_ + nm_ * nn_. However, first P - 1 slices will receive only
@@ -335,6 +362,42 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
mem += rhs_size;
}
}
+
+ if (parallelize_by_sharding_dim_only_) {
+ const int num_worker_threads = device_.numThreadsInPool();
+
+ if (shard_by_col) {
+ can_use_thread_local_packed_ = new std::atomic<bool>[nn_];
+ for (int i = 0; i < nn_; ++i)
+ can_use_thread_local_packed_[i].store(true,
+ std::memory_order_relaxed);
+
+ Index num_blocks = num_worker_threads * gn_;
+ thread_local_packed_mem_ = device_.allocate(num_blocks * rhs_size);
+ mem = static_cast<char*>(thread_local_packed_mem_);
+
+ thread_local_packed_rhs_.resize(num_blocks, nullptr);
+ for (Index i = 0; i < num_blocks; ++i) {
+ thread_local_packed_rhs_[i] = reinterpret_cast<RhsScalar*>(mem);
+ mem += rhs_size;
+ }
+ } else {
+ can_use_thread_local_packed_ = new std::atomic<bool>[nm_];
+ for (int i = 0; i < nm_; ++i)
+ can_use_thread_local_packed_[i].store(true,
+ std::memory_order_relaxed);
+
+ Index num_blocks = num_worker_threads * gm_;
+ thread_local_packed_mem_ = device_.allocate(num_blocks * lhs_size);
+ mem = static_cast<char*>(thread_local_packed_mem_);
+
+ thread_local_packed_lhs_.resize(num_blocks, nullptr);
+ for (Index i = 0; i < num_blocks; ++i) {
+ thread_local_packed_lhs_[i] = reinterpret_cast<LhsScalar*>(mem);
+ mem += lhs_size;
+ }
+ }
+ }
}
~Context() {
@@ -343,6 +406,10 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
delete[] state_kernel_[x];
}
device_.deallocate(packed_mem_);
+ if (parallelize_by_sharding_dim_only_) {
+ device_.deallocate(thread_local_packed_mem_);
+ delete[] can_use_thread_local_packed_;
+ }
}
void run() {
@@ -368,6 +435,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
const int num_threads_;
const bool shard_by_col_;
const bool parallel_pack_;
+ const bool parallelize_by_sharding_dim_only_;
// Matrix sizes.
const Index m_;
const Index n_;
@@ -426,6 +494,36 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
void* packed_mem_;
std::vector<LhsScalar*> packed_lhs_[P - 1];
std::vector<RhsScalar*> packed_rhs_[P - 1];
+
+ // If we choose to parallelize only by the sharding dimension, each thread
+ // will have it's own "thead local" (not a c++ thread local storage) memory
+ // for packed_lhs or packed_rhs (shard_by_col = false of true). This memory
+ // can't be passed to a kernel that might execute on a different thread.
+ //
+ // In practice when we are ready to pack memory for the sharding dimension
+ // (rhs if shard_by_col==true) of the K-th slice, all kernels for K-1 slice
+ // already computed (99% of the time), and we can pack data into the thread
+ // local storage, and guarantee that all the kernels will be executed
+ // immediately in the same thread. This significantly increases L1 cache hit
+ // ratio and reduces pressure on the memory bus.
+ //
+ // It's still possible that kernel for the K-th slice will be ready before
+ // completion of the K-1 kernel, so we have to allocate "global" packed_lhs_
+ // and packed_rhs_ to allow kernels to be executed later on a thread
+ // different from the thread that was used for packing.
+ void* thread_local_packed_mem_;
+
+ // Only one of these will beinitialized depending on shard_by_col value.
+ std::vector<LhsScalar*> thread_local_packed_lhs_;
+ std::vector<RhsScalar*> thread_local_packed_rhs_;
+
+ // After a particular shard for Kth slice missed thread local execution
+ // opportunity (K-1 slice didn't complete kernels execution), we can no
+ // longer schedule K+1 and following slices in thread local mode, because
+ // there is no more guarantee that previous kernels were executed
+ // sequentially in the same thread (size is nn_ or nm_).
+ std::atomic<bool>* can_use_thread_local_packed_;
+
std::atomic<uint8_t>** state_kernel_[P];
// state_switch_ is frequently modified by worker threads, while other
// fields are read-only after constructor. Let's move it to a separate cache
@@ -434,22 +532,96 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
std::atomic<Index> state_packing_ready_[P];
std::atomic<Index> state_switch_[P];
+ LhsScalar* packed_lhs(Index m, Index k, Index m1, bool use_thread_local) {
+ if (use_thread_local) {
+ eigen_assert(!shard_by_col_);
+
+ Index base_idx = gm_ * device_.currentThreadId();
+ Index grain_idx = m1 - m * gm_;
+ Index block_idx = base_idx + grain_idx;
+
+ return thread_local_packed_lhs_[block_idx];
+ } else {
+ return packed_lhs_[k % (P - 1)][m1];
+ }
+ }
+
+ RhsScalar* packed_rhs(Index n, Index k, Index n1, bool use_thread_local) {
+ if (use_thread_local) {
+ eigen_assert(shard_by_col_);
+
+ Index base_idx = gn_ * device_.currentThreadId();
+ Index grain_idx = n1 - n * gn_;
+ Index block_idx = base_idx + grain_idx;
+
+ return thread_local_packed_rhs_[block_idx];
+ } else {
+ return packed_rhs_[k % (P - 1)][n1];
+ }
+ }
+
+ // In following two methods (pack_lhs and pack_rhs), if we know for sure
+ // that we'll be able to immediately call a kernel with packed data, and do
+ // not submit it to the thread pool, we can use thread local memory for
+ // packed data.
+ //
+ // We can only reliably check it if we are running all kernels in sync mode
+ // (parallelize only by sharding dim). If kernel for m==0 (n==0) is ready to
+ // run, it's guaranteed that all kernels with larger values of m (n) are
+ // also ready, because we execute them in the same order for all K slices.
+
void pack_lhs(Index m, Index k) {
+ bool use_thread_local = false;
+
+ if (parallelize_by_sharding_dim_only_ && !shard_by_col_ &&
+ can_use_thread_local_packed_[m].load(std::memory_order_relaxed)) {
+ if (state_kernel_[k % P][m][0].load(std::memory_order_relaxed) == 1) {
+ use_thread_local = true;
+ } else {
+ // If we can't guarantee that all kernels in `k` slice will be
+ // executed sequentially in current thread, it's no longer safe to use
+ // thread local memory in followig slices along the k dimensions.
+ eigen_assert(k > 0);
+ can_use_thread_local_packed_[m].store(false,
+ std::memory_order_relaxed);
+ }
+ }
+
const Index mend = m * gm_ + gm(m);
for (Index m1 = m * gm_; m1 < mend; m1++)
- TensorContractionKernel::packLhs(packed_lhs_[k % (P - 1)][m1],
+ TensorContractionKernel::packLhs(packed_lhs(m, k, m1, use_thread_local),
lhs_.getSubMapper(m1 * bm_, k * bk_),
bk(k), bm(m1));
if (!parallel_pack_ && shard_by_col_) {
+ assert(!use_thread_local);
signal_packing(k);
} else {
signal_switch(k + 1);
- for (Index n = nn_ - 1; n >= 0; n--) signal_kernel(m, n, k, n == 0);
+ for (Index n = nn_ - 1; n >= 0; n--) {
+ bool sync = parallelize_by_sharding_dim_only_ || n == 0;
+ signal_kernel(m, n, k, sync, use_thread_local);
+ }
}
}
void pack_rhs(Index n, Index k) {
+ bool use_thread_local = false;
+
+ if (parallelize_by_sharding_dim_only_ && shard_by_col_ &&
+ can_use_thread_local_packed_[n].load(std::memory_order_relaxed)) {
+ if (state_kernel_[k % P][0][n].load(std::memory_order_relaxed) == 1) {
+ use_thread_local = true;
+ } else {
+ // If we can't guarantee that all kernels in `k` slice will be
+ // executed sequentially in current thread, it's no longer safe to use
+ // thread local memory in followig slices along the k dimensions.
+ eigen_assert(k > 0);
+ can_use_thread_local_packed_[n].store(false,
+ std::memory_order_relaxed);
+ }
+ }
+
const Index nend = n * gn_ + gn(n);
for (Index n1 = n * gn_; n1 < nend; n1++) {
if (k == 0) {
@@ -462,20 +634,24 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
// deadlocks.
memset(buffer_ + n1 * bn_ * m_, 0, bn(n1) * m_ * sizeof(Scalar));
}
- TensorContractionKernel::packRhs(packed_rhs_[k % (P - 1)][n1],
+ TensorContractionKernel::packRhs(packed_rhs(n, k, n1, use_thread_local),
rhs_.getSubMapper(k * bk_, n1 * bn_),
bk(k), bn(n1));
}
if (parallel_pack_ || shard_by_col_) {
signal_switch(k + 1);
- for (Index m = nm_ - 1; m >= 0; m--) signal_kernel(m, n, k, m == 0);
+ for (Index m = nm_ - 1; m >= 0; m--) {
+ bool sync = parallelize_by_sharding_dim_only_ || m == 0;
+ signal_kernel(m, n, k, sync, use_thread_local);
+ }
} else {
+ assert(!use_thread_local);
signal_packing(k);
}
}
- void kernel(Index m, Index n, Index k) {
+ void kernel(Index m, Index n, Index k, bool use_thread_local) {
// Note: order of iteration matters here. Iteration over m is innermost
// because we want to reuse the same packed rhs in consecutive tasks
// (rhs fits into L2$ while lhs only into L3$).
@@ -486,8 +662,10 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
for (Index m1 = m * gm_; m1 < mend; m1++) {
const auto output_mapper = output_.getSubMapper(m1 * bm_, n1 * bn_);
TensorContractionKernel::invoke(
- output_mapper, packed_lhs_[k % (P - 1)][m1],
- packed_rhs_[k % (P - 1)][n1], bm(m1), bk(k), bn(n1), Scalar(1));
+ output_mapper,
+ packed_lhs(m, k, m1, !shard_by_col_ && use_thread_local),
+ packed_rhs(n, k, n1, shard_by_col_ && use_thread_local), bm(m1),
+ bk(k), bn(n1), Scalar(1));
// We are done with the last task for the [m1, n1] block.
if (k + 1 == nk_) {
@@ -501,8 +679,10 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
for (Index n1 = n * gn_; n1 < nend; n1++) {
const auto output_mapper = output_.getSubMapper(m1 * bm_, n1 * bn_);
TensorContractionKernel::invoke(
- output_mapper, packed_lhs_[k % (P - 1)][m1],
- packed_rhs_[k % (P - 1)][n1], bm(m1), bk(k), bn(n1), Scalar(1));
+ output_mapper,
+ packed_lhs(m, k, m1, !shard_by_col_ && use_thread_local),
+ packed_rhs(n, k, n1, shard_by_col_ && use_thread_local), bm(m1),
+ bk(k), bn(n1), Scalar(1));
// We are done with the last task for the [m1, n1] block.
if (k + 1 == nk_) {
@@ -511,7 +691,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
}
}
}
- signal_kernel(m, n, k + 1, false);
+ signal_kernel(m, n, k + 1, /*sync=*/false, /*use_thread_local=*/false);
signal_switch(k + 2);
}
@@ -524,16 +704,23 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
enqueue_packing(k, shard_by_col_);
}
- void signal_kernel(Index m, Index n, Index k, bool sync) {
+ void signal_kernel(Index m, Index n, Index k, bool sync,
+ bool use_thread_local) {
std::atomic<uint8_t>* state = &state_kernel_[k % P][m][n];
Index s = state->load();
eigen_assert(s > 0);
- if (s != 1 && state->fetch_sub(1) != 1) return;
+ if (s != 1 && state->fetch_sub(1) != 1) {
+ eigen_assert(!use_thread_local);
+ return;
+ }
state->store(parallel_pack_ ? 3 : 2, std::memory_order_relaxed);
- if (sync)
- kernel(m, n, k);
- else
- device_.enqueueNoNotification([=]() { kernel(m, n, k); });
+ if (sync) {
+ kernel(m, n, k, use_thread_local);
+ } else {
+ eigen_assert(!use_thread_local);
+ device_.enqueueNoNotification(
+ [=]() { kernel(m, n, k, use_thread_local); });
+ }
}
void signal_switch(Index k, Index v = 1) {
@@ -589,7 +776,26 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
[=]() { enqueue_packing_helper(mid, end, k, rhs); });
end = mid;
}
- enqueue_packing_helper(start, end, k, rhs);
+
+ // Decide if we want to run first packing task (start == 0) in
+ // async mode if we parallelize only by sharding dim:
+ // (1) pack_lhs and pack_rhs call signal_switch before completing
+ // all calls to signal_kernel, which in sync mode might lead
+ // to the execution of the first kernel of the k+1 slice, before
+ // completing a call to the last kernel of the k slice.
+ // (2) all pack tasks for sharded dim must be executed in a thread
+ // pool.
+ bool pack_async =
+ (start == 0) &&
+ (parallelize_by_sharding_dim_only_&& shard_by_col_ == rhs) &&
+ (k > 0 || device_.currentThreadId() < 0);
+
+ if (pack_async) {
+ device_.enqueueNoNotification(
+ [=]() { enqueue_packing_helper(start, end, k, rhs); });
+ } else {
+ enqueue_packing_helper(start, end, k, rhs);
+ }
}
}
@@ -756,6 +962,36 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
}
}
+ template <int Alignment>
+ EIGEN_STRONG_INLINE void addAllToBuffer(size_t n, const Scalar* src_buf0,
+ const Scalar* src_buf1,
+ const Scalar* src_buf2,
+ Scalar* dst_buf) const {
+ using ::Eigen::internal::padd;
+ using ::Eigen::internal::pload;
+ using ::Eigen::internal::ploadt;
+ using ::Eigen::internal::pstoret;
+
+ const int output_packet_size =
+ internal::unpacket_traits<PacketReturnType>::size;
+
+ size_t i = 0;
+ const size_t num_packets = n / output_packet_size;
+ for (; i < output_packet_size * num_packets; i += output_packet_size) {
+ const auto src_val0 = pload<PacketReturnType>(src_buf0 + i);
+ const auto src_val1 = pload<PacketReturnType>(src_buf1 + i);
+ const auto src_val2 = pload<PacketReturnType>(src_buf2 + i);
+
+ const auto dst_val = ploadt<PacketReturnType, Alignment>(dst_buf + i);
+ const auto sum = padd(padd(dst_val, src_val0), padd(src_val1, src_val2));
+
+ pstoret<Scalar, PacketReturnType, Alignment>(dst_buf + i, sum);
+ }
+ for (; i < n; ++i) {
+ dst_buf[i] += src_buf0[i] + src_buf1[i] + src_buf2[i];
+ }
+ }
+
// Decide whether we want to shard m x k x n contraction over the inner
// (contraction) dimension (k).
static bool shardByInnerDim(Index m, Index n, Index k, int num_threads,
@@ -788,48 +1024,147 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
const Index m = this->m_i_size;
const Index n = this->m_j_size;
const Index k = this->m_k_size;
- // The underlying GEMM kernel assumes that k is a multiple of 8 and
- // subtle breakage occurs if this is violated.
- Index block_size = 8 * divup<Index>(k, 8 * num_threads);
- Index num_blocks = divup<Index>(k, block_size);
- // we use 'result' for the first block's partial result.
- MaxSizeVector<Scalar*> block_buffers(num_blocks - 1);
- Barrier barrier(internal::convert_index<int>(num_blocks));
- auto process_block = [=, &barrier](Scalar* buf, Index begin, Index end) {
- ::memset(buf, 0, m * n * sizeof(Scalar));
+
+ // We will compute partial results into the buffers of this size.
+ const Index buffer_size_bytes = m * n * sizeof(Scalar);
+
+ // The underlying GEMM kernel assumes that k is a multiple of
+ // the packet size and subtle breakage occurs if this is violated.
+ const Index packet_size = internal::packet_traits<RhsScalar>::size;
+
+ const auto round_up = [=](Index index) -> Index {
+ const Index kmultiple = packet_size <= 8 ? 8 : packet_size;
+ return divup<Index>(index, kmultiple) * kmultiple;
+ };
+
+ // Cost model doesn't capture well the cost associated with constructing
+ // tensor contraction mappers and computing loop bounds in gemm_pack_lhs and
+ // gemm_pack_rhs, so we specify minimum desired block size.
+ const Index target_block_size = round_up(divup<Index>(k, num_threads));
+ const Index desired_min_block_size = 12 * packet_size;
+
+ const Index block_size = numext::mini<Index>(
+ k, numext::maxi<Index>(desired_min_block_size, target_block_size));
+ const Index num_blocks = divup<Index>(k, block_size);
+
+ // Compute block size with accounting for potentially incomplete last block.
+ const auto actual_block_size = [=](Index block_idx) -> Index {
+ return block_idx + 1 < num_blocks
+ ? block_size
+ : k + block_size - block_size * num_blocks;
+ };
+
+ // We compute partial gemm results in parallel, and to get the final result
+ // we need to add them all together. For the large number of threads (>= 48)
+ // this adds a very expensive sequential step at the end.
+ //
+ // We split the [0, num_blocks) into small ranges, and when a task for the
+ // block finishes its partial gemm computation, it checks if it was the last
+ // gemm in the range, and if so, it will add all blocks of the range.
+ //
+ // After all tasks finihes, we need to add only these pre-aggregated blocks.
+
+ // Compute range size with accounting for potentially incomplete last range.
+ const auto actual_range_size = [=](Index num_ranges, Index range_size,
+ Index range_idx) -> Index {
+ eigen_assert(range_idx < num_ranges);
+ return range_idx + 1 < num_ranges
+ ? range_size
+ : num_blocks + range_size - range_size * num_ranges;
+ };
+
+ // For now we use just a single level of ranges to compute pre-aggregated
+ // partial sums, but in general we can use more layers to compute tree
+ // aggregation in parallel and reduce the size of the sequential step.
+ //
+ // TODO(ezhulenev): Add multilevel tree aggregation? Probably will make
+ // sense only if number of threads >= ~128?
+ static const Index l0_size = 4;
+ const Index l0_ranges = divup<Index>(num_blocks, l0_size);
+
+ // Keep count of pending gemm tasks for each l0 range.
+ MaxSizeVector<std::atomic<int>> l0_state(l0_ranges);
+ for (int i = 0; i < l0_ranges; ++i) {
+ const Index num_pending_tasks = actual_range_size(l0_ranges, l0_size, i);
+ l0_state.emplace_back(internal::convert_index<int>(num_pending_tasks));
+ }
+
+ MaxSizeVector<Scalar*> block_buffers(num_blocks);
+
+ auto process_block = [&, this](Index block_idx, Index begin, Index end) {
+ Scalar* buf = block_buffers[block_idx];
+ ::memset(buf, 0, buffer_size_bytes);
+
TENSOR_CONTRACTION_DISPATCH(
this->template evalGemmPartialWithoutOutputKernel, Alignment,
- (buf, begin, end, this->m_device.numThreads()));
- barrier.Notify();
- };
- Index start = 0;
- for (Index blocks_left = num_blocks; blocks_left > 0; --blocks_left) {
- // The underlying GEMM kernel assumes that k is a multiple of packet size
- // (currently largest packet size is 8) and subtle breakage occurs if
- // this is violated.
- block_size = 8 * divup<Index>(k - start, 8 * blocks_left);
- Scalar* buf;
- if (start == 0) {
- buf = result;
- } else {
- buf = static_cast<Scalar*>(
- this->m_device.allocate(m * n * sizeof(Scalar)));
- block_buffers.push_back(buf);
- }
- Index end = start + block_size;
- if (end > k) {
- end = k;
+ (buf, begin, end,
+ /*num_threads=*/internal::convert_index<int>(num_blocks)));
+
+ // Check if it was the last task in l0 range.
+ const Index l0_index = block_idx / l0_size;
+ const int v = l0_state[l0_index].fetch_sub(1);
+ eigen_assert(v >= 1);
+
+ // If we processed the last block of the range, we can aggregate all
+ // partial results into the first block of the range.
+ if (v == 1) {
+ const Index rng_size = actual_range_size(l0_ranges, l0_size, l0_index);
+ const Index dst_block_idx = l0_index * l0_size;
+
+ if (rng_size == l0_size) {
+ addAllToBuffer<Alignment>(
+ m * n,
+ /*src_buf0=*/block_buffers[dst_block_idx + 1],
+ /*src_buf1=*/block_buffers[dst_block_idx + 2],
+ /*src_buf2=*/block_buffers[dst_block_idx + 3],
+ /*dst_buf= */ block_buffers[dst_block_idx]);
+ } else {
+ // Aggregate blocks of potentially incomplete last range.
+ for (int i = 1; i < rng_size; ++i) {
+ addToBuffer<Alignment>(m * n,
+ /*src_buf=*/block_buffers[dst_block_idx + i],
+ /*dst_buf=*/block_buffers[dst_block_idx]);
+ }
+ }
}
- this->m_device.enqueueNoNotification(
- [=, &process_block]() { process_block(buf, start, end); });
- start = end;
+ };
+
+ Barrier barrier(internal::convert_index<int>(num_blocks));
+ for (Index block_idx = 0; block_idx < num_blocks; ++block_idx) {
+ Scalar* buf = block_idx == 0
+ ? result
+ : static_cast<Scalar*>(
+ this->m_device.allocate(buffer_size_bytes));
+ block_buffers.push_back(buf);
+
+ Index block_start = block_idx * block_size;
+ Index block_end = block_start + actual_block_size(block_idx);
+
+ this->m_device.enqueueNoNotification([=, &barrier, &process_block]() {
+ process_block(block_idx, block_start, block_end);
+ barrier.Notify();
+ });
}
barrier.Wait();
- // Add other partial results into first partial result.
- for (const auto& buf : block_buffers) {
- addToBuffer<Alignment>(m * n, buf, result);
- this->m_device.deallocate(buf);
+ // Aggregate partial sums from l0 ranges.
+ Index l0_index = 1;
+ for (; l0_index + 2 < l0_ranges; l0_index += 3) {
+ addAllToBuffer<Alignment>(
+ m * n,
+ /*src_buf0=*/block_buffers[(l0_index + 0) * l0_size],
+ /*src_buf1=*/block_buffers[(l0_index + 1) * l0_size],
+ /*src_buf2=*/block_buffers[(l0_index + 2) * l0_size],
+ /*dst_buf= */block_buffers[0]);
+ }
+ for (; l0_index < l0_ranges; ++l0_index) {
+ addToBuffer<Alignment>(m * n, block_buffers[l0_index * l0_size],
+ block_buffers[0]);
+ }
+
+ // Don't forget to deallocate ALL temporary buffers.
+ for (Index i = 1; i < num_blocks; ++i) {
+ this->m_device.deallocate(block_buffers[i]);
}
// Finally call output kernel with finalized output buffer.
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorConversion.h b/unsupported/Eigen/CXX11/src/Tensor/TensorConversion.h
index 1f613d3c7..938fd0f34 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorConversion.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorConversion.h
@@ -32,7 +32,7 @@ struct traits<TensorConversionOp<TargetType, XprType> >
static const int NumDimensions = traits<XprType>::NumDimensions;
static const int Layout = traits<XprType>::Layout;
enum { Flags = 0 };
- typedef typename TypeConversion<Scalar, typename traits<XprType>::PointerType>::type PointerType;
+ typedef typename TypeConversion<Scalar, typename traits<XprType>::PointerType>::type PointerType;
};
template<typename TargetType, typename XprType>
@@ -177,6 +177,81 @@ template <typename Eval, typename Scalar> struct ConversionSubExprEval<true, Eva
}
};
+namespace internal {
+
+template <typename SrcType, typename TargetType, bool IsSameT>
+struct CoeffConv {
+ template <typename ArgType, typename Device>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetType run(const TensorEvaluator<ArgType, Device>& impl, Index index) {
+ internal::scalar_cast_op<SrcType, TargetType> converter;
+ return converter(impl.coeff(index));
+ }
+};
+
+template <typename SrcType, typename TargetType>
+struct CoeffConv<SrcType, TargetType, true> {
+ template <typename ArgType, typename Device>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetType run(const TensorEvaluator<ArgType, Device>& impl, Index index) {
+ return impl.coeff(index);
+ }
+};
+
+template <typename SrcPacket, typename TargetPacket, int LoadMode, bool ActuallyVectorize, bool IsSameT>
+struct PacketConv {
+ typedef typename internal::unpacket_traits<SrcPacket>::type SrcType;
+ typedef typename internal::unpacket_traits<TargetPacket>::type TargetType;
+
+ static const int PacketSize = internal::unpacket_traits<TargetPacket>::size;
+
+ template <typename ArgType, typename Device>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetPacket run(const TensorEvaluator<ArgType, Device>& impl, Index index) {
+ internal::scalar_cast_op<SrcType, TargetType> converter;
+ EIGEN_ALIGN_MAX typename internal::remove_const<TargetType>::type values[PacketSize];
+ for (int i = 0; i < PacketSize; ++i) {
+ values[i] = converter(impl.coeff(index+i));
+ }
+ TargetPacket rslt = internal::pload<TargetPacket>(values);
+ return rslt;
+ }
+};
+
+template <typename SrcPacket, typename TargetPacket, int LoadMode, bool IsSameT>
+struct PacketConv<SrcPacket, TargetPacket, LoadMode, true, IsSameT> {
+ typedef typename internal::unpacket_traits<SrcPacket>::type SrcType;
+ typedef typename internal::unpacket_traits<TargetPacket>::type TargetType;
+
+ template <typename ArgType, typename Device>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetPacket run(const TensorEvaluator<ArgType, Device>& impl, Index index) {
+ const int SrcCoeffRatio = internal::type_casting_traits<SrcType, TargetType>::SrcCoeffRatio;
+ const int TgtCoeffRatio = internal::type_casting_traits<SrcType, TargetType>::TgtCoeffRatio;
+ PacketConverter<TensorEvaluator<ArgType, Device>, SrcPacket, TargetPacket,
+ SrcCoeffRatio, TgtCoeffRatio> converter(impl);
+ return converter.template packet<LoadMode>(index);
+ }
+};
+
+template <typename SrcPacket, typename TargetPacket, int LoadMode>
+struct PacketConv<SrcPacket, TargetPacket, LoadMode, /*ActuallyVectorize=*/false, /*IsSameT=*/true> {
+ typedef typename internal::unpacket_traits<TargetPacket>::type TargetType;
+ static const int PacketSize = internal::unpacket_traits<TargetPacket>::size;
+
+ template <typename ArgType, typename Device>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetPacket run(const TensorEvaluator<ArgType, Device>& impl, Index index) {
+ EIGEN_ALIGN_MAX typename internal::remove_const<TargetType>::type values[PacketSize];
+ for (int i = 0; i < PacketSize; ++i) values[i] = impl.coeff(index+i);
+ return internal::pload<TargetPacket>(values);
+ }
+};
+
+template <typename SrcPacket, typename TargetPacket, int LoadMode>
+struct PacketConv<SrcPacket, TargetPacket, LoadMode, /*ActuallyVectorize=*/true, /*IsSameT=*/true> {
+ template <typename ArgType, typename Device>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetPacket run(const TensorEvaluator<ArgType, Device>& impl, Index index) {
+ return impl.template packet<LoadMode>(index);
+ }
+};
+
+} // namespace internal
// Eval as rvalue
template<typename TargetType, typename ArgType, typename Device>
@@ -191,6 +266,7 @@ struct TensorEvaluator<const TensorConversionOp<TargetType, ArgType>, Device>
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
typedef typename PacketType<SrcType, Device>::type PacketSourceType;
static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
+ static const bool IsSameType = internal::is_same<TargetType, SrcType>::value;
enum {
IsAligned = false,
@@ -210,7 +286,7 @@ struct TensorEvaluator<const TensorConversionOp<TargetType, ArgType>, Device>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data)
{
- return ConversionSubExprEval<internal::is_same<TargetType, SrcType>::value, TensorEvaluator<ArgType, Device>, Scalar>::run(m_impl, data);
+ return ConversionSubExprEval<IsSameType, TensorEvaluator<ArgType, Device>, Scalar>::run(m_impl, data);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup()
@@ -220,16 +296,23 @@ struct TensorEvaluator<const TensorConversionOp<TargetType, ArgType>, Device>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
{
- internal::scalar_cast_op<SrcType, TargetType> converter;
- return converter(m_impl.coeff(index));
+ return internal::CoeffConv<SrcType, TargetType, IsSameType>::run(m_impl,index);
}
template<int LoadMode>
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
- {
- const bool Vectorizable = TensorEvaluator<ArgType, Device>::PacketAccess &
- internal::type_casting_traits<SrcType, TargetType>::VectorizedCast;
- return PacketConv<LoadMode, Vectorizable>::run(m_impl, index);
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType
+ packet(Index index) const {
+ // If we are not going to do the cast, we just need to check that base
+ // TensorEvaluator has packet access. Otherwise we also need to make sure,
+ // that we have an implementation of vectorized cast.
+ const bool Vectorizable =
+ IsSameType
+ ? TensorEvaluator<ArgType, Device>::PacketAccess
+ : TensorEvaluator<ArgType, Device>::PacketAccess &
+ internal::type_casting_traits<SrcType, TargetType>::VectorizedCast;
+
+ return internal::PacketConv<PacketSourceType, PacketReturnType, LoadMode,
+ Vectorizable, IsSameType>::run(m_impl, index);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
@@ -252,31 +335,7 @@ struct TensorEvaluator<const TensorConversionOp<TargetType, ArgType>, Device>
/// required by sycl in order to extract the sycl accessor
const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
- protected:
- template <int LoadMode, bool ActuallyVectorize>
- struct PacketConv {
- static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType run(const TensorEvaluator<ArgType, Device>& impl, Index index) {
- internal::scalar_cast_op<SrcType, TargetType> converter;
- EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
- for (int i = 0; i < PacketSize; ++i) {
- values[i] = converter(impl.coeff(index+i));
- }
- PacketReturnType rslt = internal::pload<PacketReturnType>(values);
- return rslt;
- }
- };
-
- template <int LoadMode>
- struct PacketConv<LoadMode, true> {
- static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType run(const TensorEvaluator<ArgType, Device>& impl, Index index) {
- const int SrcCoeffRatio = internal::type_casting_traits<SrcType, TargetType>::SrcCoeffRatio;
- const int TgtCoeffRatio = internal::type_casting_traits<SrcType, TargetType>::TgtCoeffRatio;
- PacketConverter<TensorEvaluator<ArgType, Device>, PacketSourceType, PacketReturnType,
- SrcCoeffRatio, TgtCoeffRatio> converter(impl);
- return converter.template packet<LoadMode>(index);
- }
- };
-
+ protected:
TensorEvaluator<ArgType, Device> m_impl;
};
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h
index bb330a77b..b43db40c8 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h
@@ -87,13 +87,13 @@ struct ThreadPoolDevice {
const size_t kMinBlockSize = 32768;
typedef TensorCostModel<ThreadPoolDevice> CostModel;
const size_t num_threads = CostModel::numThreads(n, TensorOpCost(1.0, 1.0, 0), 4);
- if (n <= kMinBlockSize || num_threads == 1) {
+ if (n <= kMinBlockSize || num_threads < 2) {
::memcpy(dst, src, n);
} else {
const char* src_ptr = static_cast<const char*>(src);
char* dst_ptr = static_cast<char*>(dst);
const size_t blocksize = (n + (num_threads - 1)) / num_threads;
- Barrier barrier(num_threads - 1);
+ Barrier barrier(static_cast<int>(num_threads - 1));
// Launch the last 3 blocks on worker threads.
for (size_t i = 1; i < num_threads; ++i) {
enqueue_with_barrier(&barrier, [n, i, src_ptr, dst_ptr, blocksize] {
@@ -122,6 +122,12 @@ struct ThreadPoolDevice {
return num_threads_;
}
+ // Number of theads available in the underlying thread pool. This number can
+ // be different from the value returned by numThreads().
+ EIGEN_STRONG_INLINE int numThreadsInPool() const {
+ return pool_->NumThreads();
+ }
+
EIGEN_STRONG_INLINE size_t firstLevelCacheSize() const {
return l1CacheSize();
}
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h
index 1c44541bd..e2ff11129 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h
@@ -325,7 +325,6 @@ class TensorExecutor<Expression, GpuDevice, Vectorizable, Tileable> {
static void run(const Expression& expr, const GpuDevice& device);
};
-
#if defined(EIGEN_GPUCC)
template <typename Evaluator, typename StorageIndex, bool Vectorizable>
struct EigenMetaKernelEval {
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h b/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h
index 78068be35..74b905329 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h
@@ -90,14 +90,21 @@ struct TensorEvaluator<const TensorForcedEvalOp<ArgType>, Device>
static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
enum {
- IsAligned = true,
- PacketAccess = (PacketType<CoeffReturnType, Device>::size > 1),
- BlockAccess = false,
+ IsAligned = true,
+ PacketAccess = (PacketType<CoeffReturnType, Device>::size > 1),
+ BlockAccess = internal::is_arithmetic<CoeffReturnType>::value,
PreferBlockAccess = false,
- Layout = TensorEvaluator<ArgType, Device>::Layout,
- RawAccess = true
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ RawAccess = true
};
+ typedef typename internal::TensorBlock<
+ CoeffReturnType, Index, internal::traits<ArgType>::NumDimensions, Layout>
+ TensorBlock;
+ typedef typename internal::TensorBlockReader<
+ CoeffReturnType, Index, internal::traits<ArgType>::NumDimensions, Layout>
+ TensorBlockReader;
+
EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device)
/// op_ is used for sycl
: m_impl(op.expression(), device), m_op(op.expression()), m_device(device), m_buffer(NULL)
@@ -139,6 +146,14 @@ struct TensorEvaluator<const TensorForcedEvalOp<ArgType>, Device>
return internal::ploadt<PacketReturnType, LoadMode>(m_buffer + index);
}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void getResourceRequirements(
+ std::vector<internal::TensorOpResourceRequirements>*) const {}
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void block(TensorBlock* block) const {
+ assert(m_buffer != NULL);
+ TensorBlockReader::Run(block, m_buffer);
+ }
+
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize);
}
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorGenerator.h b/unsupported/Eigen/CXX11/src/Tensor/TensorGenerator.h
index ac66f9cf1..204a6fd33 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorGenerator.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorGenerator.h
@@ -89,17 +89,22 @@ struct TensorEvaluator<const TensorGeneratorOp<Generator, ArgType>, Device>
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
enum {
- IsAligned = false,
- PacketAccess = (PacketType<CoeffReturnType, Device>::size > 1),
- BlockAccess = false,
- PreferBlockAccess = false,
- Layout = TensorEvaluator<ArgType, Device>::Layout,
- CoordAccess = false, // to be implemented
- RawAccess = false
+ IsAligned = false,
+ PacketAccess = (PacketType<CoeffReturnType, Device>::size > 1),
+ BlockAccess = true,
+ PreferBlockAccess = true,
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
+ CoordAccess = false, // to be implemented
+ RawAccess = false
};
+ typedef internal::TensorIntDivisor<Index> IndexDivisor;
+
+ typedef internal::TensorBlock<CoeffReturnType, Index, NumDims, Layout>
+ TensorBlock;
+
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
- : m_generator(op.generator())
+ : m_device(device), m_generator(op.generator())
#ifdef EIGEN_USE_SYCL
, m_argImpl(op.expression(), device)
#endif
@@ -111,11 +116,13 @@ struct TensorEvaluator<const TensorGeneratorOp<Generator, ArgType>, Device>
m_strides[0] = 1;
for (int i = 1; i < NumDims; ++i) {
m_strides[i] = m_strides[i - 1] * m_dimensions[i - 1];
+ if (m_strides[i] != 0) m_fast_strides[i] = IndexDivisor(m_strides[i]);
}
} else {
m_strides[NumDims - 1] = 1;
for (int i = NumDims - 2; i >= 0; --i) {
m_strides[i] = m_strides[i + 1] * m_dimensions[i + 1];
+ if (m_strides[i] != 0) m_fast_strides[i] = IndexDivisor(m_strides[i]);
}
}
}
@@ -150,6 +157,75 @@ struct TensorEvaluator<const TensorGeneratorOp<Generator, ArgType>, Device>
return rslt;
}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void getResourceRequirements(
+ std::vector<internal::TensorOpResourceRequirements>* resources) const {
+ Eigen::Index block_total_size_max = numext::maxi<Eigen::Index>(
+ 1, m_device.firstLevelCacheSize() / sizeof(Scalar));
+ resources->push_back(internal::TensorOpResourceRequirements(
+ internal::kSkewedInnerDims, block_total_size_max));
+ }
+
+ struct BlockIteratorState {
+ Index stride;
+ Index span;
+ Index size;
+ Index count;
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void block(
+ TensorBlock* output_block) const {
+ if (NumDims <= 0) return;
+
+ static const bool is_col_major =
+ static_cast<int>(Layout) == static_cast<int>(ColMajor);
+
+ // Compute spatial coordinates for the first block element.
+ array<Index, NumDims> coords;
+ extract_coordinates(output_block->first_coeff_index(), coords);
+ array<Index, NumDims> initial_coords = coords;
+
+ CoeffReturnType* data = output_block->data();
+ Index offset = 0;
+
+ // Initialize output block iterator state. Dimension in this array are
+ // always in inner_most -> outer_most order (col major layout).
+ array<BlockIteratorState, NumDims> it;
+ for (Index i = 0; i < NumDims; ++i) {
+ const Index dim = is_col_major ? i : NumDims - 1 - i;
+ it[i].size = output_block->block_sizes()[dim];
+ it[i].stride = output_block->block_strides()[dim];
+ it[i].span = it[i].stride * (it[i].size - 1);
+ it[i].count = 0;
+ }
+ eigen_assert(it[0].stride == 1);
+
+ while (it[NumDims - 1].count < it[NumDims - 1].size) {
+ // Generate data for the inner-most dimension.
+ for (Index i = 0; i < it[0].size; ++i) {
+ *(data + offset + i) = m_generator(coords);
+ coords[is_col_major ? 0 : NumDims - 1]++;
+ }
+ coords[is_col_major ? 0 : NumDims - 1] =
+ initial_coords[is_col_major ? 0 : NumDims - 1];
+
+ // For the 1d tensor we need to generate only one inner-most dimension.
+ if (NumDims == 1) break;
+
+ // Update offset.
+ for (Index i = 1; i < NumDims; ++i) {
+ if (++it[i].count < it[i].size) {
+ offset += it[i].stride;
+ coords[is_col_major ? i : NumDims - 1 - i]++;
+ break;
+ }
+ if (i != NumDims - 1) it[i].count = 0;
+ coords[is_col_major ? i : NumDims - 1 - i] =
+ initial_coords[is_col_major ? i : NumDims - 1 - i];
+ offset -= it[i].span;
+ }
+ }
+ }
+
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
costPerCoeff(bool) const {
// TODO(rmlarsen): This is just a placeholder. Define interface to make
@@ -170,14 +246,14 @@ struct TensorEvaluator<const TensorGeneratorOp<Generator, ArgType>, Device>
void extract_coordinates(Index index, array<Index, NumDims>& coords) const {
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
for (int i = NumDims - 1; i > 0; --i) {
- const Index idx = index / m_strides[i];
+ const Index idx = index / m_fast_strides[i];
index -= idx * m_strides[i];
coords[i] = idx;
}
coords[0] = index;
} else {
for (int i = 0; i < NumDims - 1; ++i) {
- const Index idx = index / m_strides[i];
+ const Index idx = index / m_fast_strides[i];
index -= idx * m_strides[i];
coords[i] = idx;
}
@@ -185,8 +261,10 @@ struct TensorEvaluator<const TensorGeneratorOp<Generator, ArgType>, Device>
}
}
+ const Device& m_device;
Dimensions m_dimensions;
array<Index, NumDims> m_strides;
+ array<IndexDivisor, NumDims> m_fast_strides;
Generator m_generator;
#ifdef EIGEN_USE_SYCL
TensorEvaluator<ArgType, Device> m_argImpl;
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h
index 50fa0cb2e..bb63433fe 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h
@@ -402,25 +402,25 @@ struct OuterReducer {
#if defined(EIGEN_USE_GPU) && (defined(EIGEN_GPUCC))
-template <int B, int N, typename S, typename R, typename I>
-__global__ void FullReductionKernel(R, const S, I, typename S::CoeffReturnType*, unsigned int*);
+template <int B, int N, typename S, typename R, typename I_>
+__global__ void FullReductionKernel(R, const S, I_, typename S::CoeffReturnType*, unsigned int*);
#if defined(EIGEN_HAS_GPU_FP16)
-template <typename S, typename R, typename I>
-__global__ void ReductionInitFullReduxKernelHalfFloat(R, const S, I, half2*);
-template <int B, int N, typename S, typename R, typename I>
-__global__ void FullReductionKernelHalfFloat(R, const S, I, half*, half2*);
-template <int NPT, typename S, typename R, typename I>
-__global__ void InnerReductionKernelHalfFloat(R, const S, I, I, half*);
+template <typename S, typename R, typename I_>
+__global__ void ReductionInitFullReduxKernelHalfFloat(R, const S, I_, half2*);
+template <int B, int N, typename S, typename R, typename I_>
+__global__ void FullReductionKernelHalfFloat(R, const S, I_, half*, half2*);
+template <int NPT, typename S, typename R, typename I_>
+__global__ void InnerReductionKernelHalfFloat(R, const S, I_, I_, half*);
#endif
-template <int NPT, typename S, typename R, typename I>
-__global__ void InnerReductionKernel(R, const S, I, I, typename S::CoeffReturnType*);
+template <int NPT, typename S, typename R, typename I_>
+__global__ void InnerReductionKernel(R, const S, I_, I_, typename S::CoeffReturnType*);
-template <int NPT, typename S, typename R, typename I>
-__global__ void OuterReductionKernel(R, const S, I, I, typename S::CoeffReturnType*);
+template <int NPT, typename S, typename R, typename I_>
+__global__ void OuterReductionKernel(R, const S, I_, I_, typename S::CoeffReturnType*);
#endif
template <typename Self, typename Op,
@@ -1114,15 +1114,15 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>,
template <typename S, typename O, bool V> friend struct internal::FullReducerShard;
#endif
#if defined(EIGEN_USE_GPU) && (defined(EIGEN_GPUCC))
- template <int B, int N, typename S, typename R, typename I> KERNEL_FRIEND void internal::FullReductionKernel(R, const S, I, typename S::CoeffReturnType*, unsigned int*);
+ template <int B, int N, typename S, typename R, typename I_> KERNEL_FRIEND void internal::FullReductionKernel(R, const S, I_, typename S::CoeffReturnType*, unsigned int*);
#if defined(EIGEN_HAS_GPU_FP16)
- template <typename S, typename R, typename I> KERNEL_FRIEND void internal::ReductionInitFullReduxKernelHalfFloat(R, const S, I, half2*);
- template <int B, int N, typename S, typename R, typename I> KERNEL_FRIEND void internal::FullReductionKernelHalfFloat(R, const S, I, half*, half2*);
- template <int NPT, typename S, typename R, typename I> KERNEL_FRIEND void internal::InnerReductionKernelHalfFloat(R, const S, I, I, half*);
+ template <typename S, typename R, typename I_> KERNEL_FRIEND void internal::ReductionInitFullReduxKernelHalfFloat(R, const S, I_, half2*);
+ template <int B, int N, typename S, typename R, typename I_> KERNEL_FRIEND void internal::FullReductionKernelHalfFloat(R, const S, I_, half*, half2*);
+ template <int NPT, typename S, typename R, typename I_> KERNEL_FRIEND void internal::InnerReductionKernelHalfFloat(R, const S, I_, I_, half*);
#endif
- template <int NPT, typename S, typename R, typename I> KERNEL_FRIEND void internal::InnerReductionKernel(R, const S, I, I, typename S::CoeffReturnType*);
+ template <int NPT, typename S, typename R, typename I_> KERNEL_FRIEND void internal::InnerReductionKernel(R, const S, I_, I_, typename S::CoeffReturnType*);
- template <int NPT, typename S, typename R, typename I> KERNEL_FRIEND void internal::OuterReductionKernel(R, const S, I, I, typename S::CoeffReturnType*);
+ template <int NPT, typename S, typename R, typename I_> KERNEL_FRIEND void internal::OuterReductionKernel(R, const S, I_, I_, typename S::CoeffReturnType*);
#endif
#if defined(EIGEN_USE_SYCL)
diff --git a/unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h b/unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h
index 7a9ebe40a..8b3b210b1 100644
--- a/unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h
+++ b/unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h
@@ -20,7 +20,8 @@ namespace Eigen {
// if (predicate)
// return act();
// EventCount::Waiter& w = waiters[my_index];
-// ec.Prewait(&w);
+// if (!ec.Prewait(&w))
+// return act();
// if (predicate) {
// ec.CancelWait(&w);
// return act();
@@ -50,78 +51,78 @@ class EventCount {
public:
class Waiter;
- EventCount(MaxSizeVector<Waiter>& waiters) : waiters_(waiters) {
+ EventCount(MaxSizeVector<Waiter>& waiters)
+ : state_(kStackMask), waiters_(waiters) {
eigen_plain_assert(waiters.size() < (1 << kWaiterBits) - 1);
- // Initialize epoch to something close to overflow to test overflow.
- state_ = kStackMask | (kEpochMask - kEpochInc * waiters.size() * 2);
}
~EventCount() {
// Ensure there are no waiters.
- eigen_plain_assert((state_.load() & (kStackMask | kWaiterMask)) == kStackMask);
+ eigen_plain_assert(state_.load() == kStackMask);
}
// Prewait prepares for waiting.
- // After calling this function the thread must re-check the wait predicate
- // and call either CancelWait or CommitWait passing the same Waiter object.
- void Prewait(Waiter* w) {
- w->epoch = state_.fetch_add(kWaiterInc, std::memory_order_relaxed);
- std::atomic_thread_fence(std::memory_order_seq_cst);
+ // If Prewait returns true, the thread must re-check the wait predicate
+ // and then call either CancelWait or CommitWait.
+ // Otherwise, the thread should assume the predicate may be true
+ // and don't call CancelWait/CommitWait (there was a concurrent Notify call).
+ bool Prewait() {
+ uint64_t state = state_.load(std::memory_order_relaxed);
+ for (;;) {
+ CheckState(state);
+ uint64_t newstate = state + kWaiterInc;
+ if ((state & kSignalMask) != 0) {
+ // Consume the signal and cancel waiting.
+ newstate -= kSignalInc + kWaiterInc;
+ }
+ CheckState(newstate);
+ if (state_.compare_exchange_weak(state, newstate,
+ std::memory_order_seq_cst))
+ return (state & kSignalMask) == 0;
+ }
}
- // CommitWait commits waiting.
+ // CommitWait commits waiting after Prewait.
void CommitWait(Waiter* w) {
+ eigen_plain_assert((w->epoch & ~kEpochMask) == 0);
w->state = Waiter::kNotSignaled;
- // Modification epoch of this waiter.
- uint64_t epoch =
- (w->epoch & kEpochMask) +
- (((w->epoch & kWaiterMask) >> kWaiterShift) << kEpochShift);
+ const uint64_t me = (w - &waiters_[0]) | w->epoch;
uint64_t state = state_.load(std::memory_order_seq_cst);
for (;;) {
- if (int64_t((state & kEpochMask) - epoch) < 0) {
- // The preceding waiter has not decided on its fate. Wait until it
- // calls either CancelWait or CommitWait, or is notified.
- EIGEN_THREAD_YIELD();
- state = state_.load(std::memory_order_seq_cst);
- continue;
+ CheckState(state, true);
+ uint64_t newstate;
+ if ((state & kSignalMask) != 0) {
+ // Consume the signal and return immidiately.
+ newstate = state - kWaiterInc - kSignalInc;
+ } else {
+ // Remove this thread from pre-wait counter and add to the waiter stack.
+ newstate = ((state & kWaiterMask) - kWaiterInc) | me;
+ w->next.store(state & (kStackMask | kEpochMask),
+ std::memory_order_relaxed);
}
- // We've already been notified.
- if (int64_t((state & kEpochMask) - epoch) > 0) return;
- // Remove this thread from prewait counter and add it to the waiter list.
- eigen_plain_assert((state & kWaiterMask) != 0);
- uint64_t newstate = state - kWaiterInc + kEpochInc;
- newstate = (newstate & ~kStackMask) | (w - &waiters_[0]);
- if ((state & kStackMask) == kStackMask)
- w->next.store(nullptr, std::memory_order_relaxed);
- else
- w->next.store(&waiters_[state & kStackMask], std::memory_order_relaxed);
+ CheckState(newstate);
if (state_.compare_exchange_weak(state, newstate,
- std::memory_order_release))
- break;
+ std::memory_order_acq_rel)) {
+ if ((state & kSignalMask) == 0) {
+ w->epoch += kEpochInc;
+ Park(w);
+ }
+ return;
+ }
}
- Park(w);
}
// CancelWait cancels effects of the previous Prewait call.
- void CancelWait(Waiter* w) {
- uint64_t epoch =
- (w->epoch & kEpochMask) +
- (((w->epoch & kWaiterMask) >> kWaiterShift) << kEpochShift);
+ void CancelWait() {
uint64_t state = state_.load(std::memory_order_relaxed);
for (;;) {
- if (int64_t((state & kEpochMask) - epoch) < 0) {
- // The preceding waiter has not decided on its fate. Wait until it
- // calls either CancelWait or CommitWait, or is notified.
- EIGEN_THREAD_YIELD();
- state = state_.load(std::memory_order_relaxed);
- continue;
- }
- // We've already been notified.
- if (int64_t((state & kEpochMask) - epoch) > 0) return;
- // Remove this thread from prewait counter.
- eigen_plain_assert((state & kWaiterMask) != 0);
- if (state_.compare_exchange_weak(state, state - kWaiterInc + kEpochInc,
- std::memory_order_relaxed))
+ CheckState(state, true);
+ uint64_t newstate = state - kWaiterInc;
+ // Also take away a signal if any.
+ if ((state & kSignalMask) != 0) newstate -= kSignalInc;
+ CheckState(newstate);
+ if (state_.compare_exchange_weak(state, newstate,
+ std::memory_order_acq_rel))
return;
}
}
@@ -132,35 +133,33 @@ class EventCount {
std::atomic_thread_fence(std::memory_order_seq_cst);
uint64_t state = state_.load(std::memory_order_acquire);
for (;;) {
+ CheckState(state);
+ const uint64_t waiters = (state & kWaiterMask) >> kWaiterShift;
+ const uint64_t signals = (state & kSignalMask) >> kSignalShift;
// Easy case: no waiters.
- if ((state & kStackMask) == kStackMask && (state & kWaiterMask) == 0)
- return;
- uint64_t waiters = (state & kWaiterMask) >> kWaiterShift;
+ if ((state & kStackMask) == kStackMask && waiters == signals) return;
uint64_t newstate;
if (notifyAll) {
- // Reset prewait counter and empty wait list.
- newstate = (state & kEpochMask) + (kEpochInc * waiters) + kStackMask;
- } else if (waiters) {
+ // Empty wait stack and set signal to number of pre-wait threads.
+ newstate =
+ (state & kWaiterMask) | (waiters << kSignalShift) | kStackMask;
+ } else if (signals < waiters) {
// There is a thread in pre-wait state, unblock it.
- newstate = state + kEpochInc - kWaiterInc;
+ newstate = state + kSignalInc;
} else {
// Pop a waiter from list and unpark it.
Waiter* w = &waiters_[state & kStackMask];
- Waiter* wnext = w->next.load(std::memory_order_relaxed);
- uint64_t next = kStackMask;
- if (wnext != nullptr) next = wnext - &waiters_[0];
- // Note: we don't add kEpochInc here. ABA problem on the lock-free stack
- // can't happen because a waiter is re-pushed onto the stack only after
- // it was in the pre-wait state which inevitably leads to epoch
- // increment.
- newstate = (state & kEpochMask) + next;
+ uint64_t next = w->next.load(std::memory_order_relaxed);
+ newstate = (state & (kWaiterMask | kSignalMask)) | next;
}
+ CheckState(newstate);
if (state_.compare_exchange_weak(state, newstate,
- std::memory_order_acquire)) {
- if (!notifyAll && waiters) return; // unblocked pre-wait thread
+ std::memory_order_acq_rel)) {
+ if (!notifyAll && (signals < waiters))
+ return; // unblocked pre-wait thread
if ((state & kStackMask) == kStackMask) return;
Waiter* w = &waiters_[state & kStackMask];
- if (!notifyAll) w->next.store(nullptr, std::memory_order_relaxed);
+ if (!notifyAll) w->next.store(kStackMask, std::memory_order_relaxed);
Unpark(w);
return;
}
@@ -171,11 +170,11 @@ class EventCount {
friend class EventCount;
// Align to 128 byte boundary to prevent false sharing with other Waiter
// objects in the same vector.
- EIGEN_ALIGN_TO_BOUNDARY(128) std::atomic<Waiter*> next;
+ EIGEN_ALIGN_TO_BOUNDARY(128) std::atomic<uint64_t> next;
std::mutex mu;
std::condition_variable cv;
- uint64_t epoch;
- unsigned state;
+ uint64_t epoch = 0;
+ unsigned state = kNotSignaled;
enum {
kNotSignaled,
kWaiting,
@@ -185,23 +184,41 @@ class EventCount {
private:
// State_ layout:
- // - low kStackBits is a stack of waiters committed wait.
+ // - low kWaiterBits is a stack of waiters committed wait
+ // (indexes in waiters_ array are used as stack elements,
+ // kStackMask means empty stack).
// - next kWaiterBits is count of waiters in prewait state.
- // - next kEpochBits is modification counter.
- static const uint64_t kStackBits = 16;
- static const uint64_t kStackMask = (1ull << kStackBits) - 1;
- static const uint64_t kWaiterBits = 16;
- static const uint64_t kWaiterShift = 16;
+ // - next kWaiterBits is count of pending signals.
+ // - remaining bits are ABA counter for the stack.
+ // (stored in Waiter node and incremented on push).
+ static const uint64_t kWaiterBits = 14;
+ static const uint64_t kStackMask = (1ull << kWaiterBits) - 1;
+ static const uint64_t kWaiterShift = kWaiterBits;
static const uint64_t kWaiterMask = ((1ull << kWaiterBits) - 1)
<< kWaiterShift;
- static const uint64_t kWaiterInc = 1ull << kWaiterBits;
- static const uint64_t kEpochBits = 32;
- static const uint64_t kEpochShift = 32;
+ static const uint64_t kWaiterInc = 1ull << kWaiterShift;
+ static const uint64_t kSignalShift = 2 * kWaiterBits;
+ static const uint64_t kSignalMask = ((1ull << kWaiterBits) - 1)
+ << kSignalShift;
+ static const uint64_t kSignalInc = 1ull << kSignalShift;
+ static const uint64_t kEpochShift = 3 * kWaiterBits;
+ static const uint64_t kEpochBits = 64 - kEpochShift;
static const uint64_t kEpochMask = ((1ull << kEpochBits) - 1) << kEpochShift;
static const uint64_t kEpochInc = 1ull << kEpochShift;
std::atomic<uint64_t> state_;
MaxSizeVector<Waiter>& waiters_;
+ static void CheckState(uint64_t state, bool waiter = false) {
+ static_assert(kEpochBits >= 20, "not enough bits to prevent ABA problem");
+ const uint64_t waiters = (state & kWaiterMask) >> kWaiterShift;
+ const uint64_t signals = (state & kSignalMask) >> kSignalShift;
+ eigen_plain_assert(waiters >= signals);
+ eigen_plain_assert(waiters < (1 << kWaiterBits) - 1);
+ eigen_plain_assert(!waiter || waiters > 0);
+ (void)waiters;
+ (void)signals;
+ }
+
void Park(Waiter* w) {
std::unique_lock<std::mutex> lock(w->mu);
while (w->state != Waiter::kSignaled) {
@@ -210,10 +227,10 @@ class EventCount {
}
}
- void Unpark(Waiter* waiters) {
- Waiter* next = nullptr;
- for (Waiter* w = waiters; w; w = next) {
- next = w->next.load(std::memory_order_relaxed);
+ void Unpark(Waiter* w) {
+ for (Waiter* next; w; w = next) {
+ uint64_t wnext = w->next.load(std::memory_order_relaxed) & kStackMask;
+ next = wnext == kStackMask ? nullptr : &waiters_[wnext];
unsigned state;
{
std::unique_lock<std::mutex> lock(w->mu);
diff --git a/unsupported/Eigen/CXX11/src/ThreadPool/NonBlockingThreadPool.h b/unsupported/Eigen/CXX11/src/ThreadPool/NonBlockingThreadPool.h
index 8fafcdab5..9e54254c1 100644
--- a/unsupported/Eigen/CXX11/src/ThreadPool/NonBlockingThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/ThreadPool/NonBlockingThreadPool.h
@@ -29,6 +29,7 @@ class ThreadPoolTempl : public Eigen::ThreadPoolInterface {
thread_data_(num_threads),
all_coprimes_(num_threads),
waiters_(num_threads),
+ global_steal_partition_(EncodePartition(0, num_threads_)),
blocked_(0),
spinning_(0),
done_(false),
@@ -237,6 +238,7 @@ class ThreadPoolTempl : public Eigen::ThreadPoolInterface {
MaxSizeVector<ThreadData> thread_data_;
MaxSizeVector<MaxSizeVector<unsigned>> all_coprimes_;
MaxSizeVector<EventCount::Waiter> waiters_;
+ unsigned global_steal_partition_;
std::atomic<unsigned> blocked_;
std::atomic<bool> spinning_;
std::atomic<bool> done_;
@@ -354,6 +356,9 @@ class ThreadPoolTempl : public Eigen::ThreadPoolInterface {
Task LocalSteal() {
PerThread* pt = GetPerThread();
unsigned partition = GetStealPartition(pt->thread_id);
+ // If thread steal partition is the same as global partition, there is no
+ // need to go through the steal loop twice.
+ if (global_steal_partition_ == partition) return Task();
unsigned start, limit;
DecodePartition(partition, &start, &limit);
AssertBounds(start, limit);
@@ -374,11 +379,11 @@ class ThreadPoolTempl : public Eigen::ThreadPoolInterface {
eigen_plain_assert(!t->f);
// We already did best-effort emptiness check in Steal, so prepare for
// blocking.
- ec_.Prewait(waiter);
+ if (!ec_.Prewait()) return true;
// Now do a reliable emptiness check.
int victim = NonEmptyQueueIndex();
if (victim != -1) {
- ec_.CancelWait(waiter);
+ ec_.CancelWait();
if (cancelled_) {
return false;
} else {
@@ -392,7 +397,7 @@ class ThreadPoolTempl : public Eigen::ThreadPoolInterface {
blocked_++;
// TODO is blocked_ required to be unsigned?
if (done_ && blocked_ == static_cast<unsigned>(num_threads_)) {
- ec_.CancelWait(waiter);
+ ec_.CancelWait();
// Almost done, but need to re-check queues.
// Consider that all queues are empty and all worker threads are preempted
// right after incrementing blocked_ above. Now a free-standing thread
diff --git a/unsupported/Eigen/CXX11/src/ThreadPool/RunQueue.h b/unsupported/Eigen/CXX11/src/ThreadPool/RunQueue.h
index 73928c1d4..a9ae05fc6 100644
--- a/unsupported/Eigen/CXX11/src/ThreadPool/RunQueue.h
+++ b/unsupported/Eigen/CXX11/src/ThreadPool/RunQueue.h
@@ -150,30 +150,11 @@ class RunQueue {
// Size returns current queue size.
// Can be called by any thread at any time.
- unsigned Size() const {
- // Emptiness plays critical role in thread pool blocking. So we go to great
- // effort to not produce false positives (claim non-empty queue as empty).
- for (;;) {
- // Capture a consistent snapshot of front/tail.
- unsigned front = front_.load(std::memory_order_acquire);
- unsigned back = back_.load(std::memory_order_acquire);
- unsigned front1 = front_.load(std::memory_order_relaxed);
- if (front != front1) continue;
- int size = (front & kMask2) - (back & kMask2);
- // Fix overflow.
- if (size < 0) size += 2 * kSize;
- // Order of modification in push/pop is crafted to make the queue look
- // larger than it is during concurrent modifications. E.g. pop can
- // decrement size before the corresponding push has incremented it.
- // So the computed size can be up to kSize + 1, fix it.
- if (size > static_cast<int>(kSize)) size = kSize;
- return size;
- }
- }
+ unsigned Size() const { return SizeOrNotEmpty<true>(); }
// Empty tests whether container is empty.
// Can be called by any thread at any time.
- bool Empty() const { return Size() == 0; }
+ bool Empty() const { return SizeOrNotEmpty<false>() == 0; }
// Delete all the elements from the queue.
void Flush() {
@@ -206,6 +187,49 @@ class RunQueue {
std::atomic<unsigned> back_;
Elem array_[kSize];
+ // SizeOrNotEmpty returns current queue size; if NeedSizeEstimate is false,
+ // only whether the size is 0 is guaranteed to be correct.
+ // Can be called by any thread at any time.
+ template<bool NeedSizeEstimate>
+ unsigned SizeOrNotEmpty() const {
+ // Emptiness plays critical role in thread pool blocking. So we go to great
+ // effort to not produce false positives (claim non-empty queue as empty).
+ unsigned front = front_.load(std::memory_order_acquire);
+ for (;;) {
+ // Capture a consistent snapshot of front/tail.
+ unsigned back = back_.load(std::memory_order_acquire);
+ unsigned front1 = front_.load(std::memory_order_relaxed);
+ if (front != front1) {
+ front = front1;
+ std::atomic_thread_fence(std::memory_order_acquire);
+ continue;
+ }
+ if (NeedSizeEstimate) {
+ return CalculateSize(front, back);
+ } else {
+ // This value will be 0 if the queue is empty, and undefined otherwise.
+ unsigned maybe_zero = ((front ^ back) & kMask2);
+ // Queue size estimate must agree with maybe zero check on the queue
+ // empty/non-empty state.
+ eigen_assert((CalculateSize(front, back) == 0) == (maybe_zero == 0));
+ return maybe_zero;
+ }
+ }
+ }
+
+ EIGEN_ALWAYS_INLINE
+ unsigned CalculateSize(unsigned front, unsigned back) const {
+ int size = (front & kMask2) - (back & kMask2);
+ // Fix overflow.
+ if (size < 0) size += 2 * kSize;
+ // Order of modification in push/pop is crafted to make the queue look
+ // larger than it is during concurrent modifications. E.g. push can
+ // increment size before the corresponding pop has decremented it.
+ // So the computed size can be up to kSize + 1, fix it.
+ if (size > static_cast<int>(kSize)) size = kSize;
+ return static_cast<unsigned>(size);
+ }
+
RunQueue(const RunQueue&) = delete;
void operator=(const RunQueue&) = delete;
};
diff --git a/unsupported/Eigen/CXX11/src/ThreadPool/ThreadLocal.h b/unsupported/Eigen/CXX11/src/ThreadPool/ThreadLocal.h
index 7229839ac..696c2d03b 100644
--- a/unsupported/Eigen/CXX11/src/ThreadPool/ThreadLocal.h
+++ b/unsupported/Eigen/CXX11/src/ThreadPool/ThreadLocal.h
@@ -10,6 +10,14 @@
#ifndef EIGEN_CXX11_THREADPOOL_THREAD_LOCAL_H
#define EIGEN_CXX11_THREADPOOL_THREAD_LOCAL_H
+#ifdef EIGEN_AVOID_THREAD_LOCAL
+
+#ifdef EIGEN_THREAD_LOCAL
+#undef EIGEN_THREAD_LOCAL
+#endif
+
+#else
+
#if EIGEN_MAX_CPP_VER >= 11 && \
((EIGEN_COMP_GNUC && EIGEN_GNUC_AT_LEAST(4, 8)) || \
__has_feature(cxx_thread_local) || \
@@ -52,4 +60,6 @@
#endif
#endif // defined(__ANDROID__) && defined(__clang__)
+#endif // EIGEN_AVOID_THREAD_LOCAL
+
#endif // EIGEN_CXX11_THREADPOOL_THREAD_LOCAL_H
diff --git a/unsupported/Eigen/CXX11/src/util/CXX11Workarounds.h b/unsupported/Eigen/CXX11/src/util/CXX11Workarounds.h
index fe4d22803..f1c0284ea 100644
--- a/unsupported/Eigen/CXX11/src/util/CXX11Workarounds.h
+++ b/unsupported/Eigen/CXX11/src/util/CXX11Workarounds.h
@@ -47,9 +47,9 @@ namespace internal {
*/
-template<std::size_t I, class T> constexpr inline T& array_get(std::vector<T>& a) { return a[I]; }
-template<std::size_t I, class T> constexpr inline T&& array_get(std::vector<T>&& a) { return a[I]; }
-template<std::size_t I, class T> constexpr inline T const& array_get(std::vector<T> const& a) { return a[I]; }
+template<std::size_t I_, class T> constexpr inline T& array_get(std::vector<T>& a) { return a[I_]; }
+template<std::size_t I_, class T> constexpr inline T&& array_get(std::vector<T>&& a) { return a[I_]; }
+template<std::size_t I_, class T> constexpr inline T const& array_get(std::vector<T> const& a) { return a[I_]; }
/* Suppose you have a template of the form
* template<typename T> struct X;
diff --git a/unsupported/Eigen/CXX11/src/util/EmulateArray.h b/unsupported/Eigen/CXX11/src/util/EmulateArray.h
index 39c255791..834b20b55 100644
--- a/unsupported/Eigen/CXX11/src/util/EmulateArray.h
+++ b/unsupported/Eigen/CXX11/src/util/EmulateArray.h
@@ -197,13 +197,13 @@ EIGEN_DEVICE_FUNC bool operator==(const array<T,N>& lhs, const array<T,N>& rhs)
namespace internal {
-template<std::size_t I, class T, std::size_t N>
+template<std::size_t I_, class T, std::size_t N>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T& array_get(array<T,N>& a) {
- return a[I];
+ return a[I_];
}
-template<std::size_t I, class T, std::size_t N>
+template<std::size_t I_, class T, std::size_t N>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const T& array_get(const array<T,N>& a) {
- return a[I];
+ return a[I_];
}
template<class T, std::size_t N> struct array_size<array<T,N> > {
@@ -240,16 +240,16 @@ namespace internal {
* this may not be constexpr
*/
#if defined(__GLIBCXX__) && __GLIBCXX__ < 20120322
-#define STD_GET_ARR_HACK a._M_instance[I]
+#define STD_GET_ARR_HACK a._M_instance[I_]
#elif defined(_LIBCPP_VERSION)
-#define STD_GET_ARR_HACK a.__elems_[I]
+#define STD_GET_ARR_HACK a.__elems_[I_]
#else
-#define STD_GET_ARR_HACK std::template get<I, T, N>(a)
+#define STD_GET_ARR_HACK std::template get<I_, T, N>(a)
#endif
-template<std::size_t I, class T, std::size_t N> constexpr inline T& array_get(std::array<T,N>& a) { return (T&) STD_GET_ARR_HACK; }
-template<std::size_t I, class T, std::size_t N> constexpr inline T&& array_get(std::array<T,N>&& a) { return (T&&) STD_GET_ARR_HACK; }
-template<std::size_t I, class T, std::size_t N> constexpr inline T const& array_get(std::array<T,N> const& a) { return (T const&) STD_GET_ARR_HACK; }
+template<std::size_t I_, class T, std::size_t N> constexpr inline T& array_get(std::array<T,N>& a) { return (T&) STD_GET_ARR_HACK; }
+template<std::size_t I_, class T, std::size_t N> constexpr inline T&& array_get(std::array<T,N>&& a) { return (T&&) STD_GET_ARR_HACK; }
+template<std::size_t I_, class T, std::size_t N> constexpr inline T const& array_get(std::array<T,N> const& a) { return (T const&) STD_GET_ARR_HACK; }
#undef STD_GET_ARR_HACK
diff --git a/unsupported/Eigen/CXX11/src/util/EmulateCXX11Meta.h b/unsupported/Eigen/CXX11/src/util/EmulateCXX11Meta.h
index 8a536faf6..d02d86f85 100644
--- a/unsupported/Eigen/CXX11/src/util/EmulateCXX11Meta.h
+++ b/unsupported/Eigen/CXX11/src/util/EmulateCXX11Meta.h
@@ -166,13 +166,13 @@ array<t, n> repeat(t v) {
return array;
}
-template<std::size_t I, class Head, class Tail>
+template<std::size_t I_, class Head, class Tail>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Head::type array_get(type_list<Head, Tail>&) {
- return get<I, type_list<Head, Tail> >::value;
+ return get<I_, type_list<Head, Tail> >::value;
}
-template<std::size_t I, class Head, class Tail>
+template<std::size_t I_, class Head, class Tail>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Head::type array_get(const type_list<Head, Tail>&) {
- return get<I, type_list<Head, Tail> >::value;
+ return get<I_, type_list<Head, Tail> >::value;
}
template <class NList>
@@ -200,13 +200,13 @@ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE t array_prod(const std::vector<t>& a) {
}
-template<std::size_t I, class T>
+template<std::size_t I_, class T>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T& array_get(std::vector<T>& a) {
- return a[I];
+ return a[I_];
}
-template<std::size_t I, class T>
+template<std::size_t I_, class T>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const T& array_get(const std::vector<T>& a) {
- return a[I];
+ return a[I_];
}
struct sum_op {
diff --git a/unsupported/Eigen/src/Eigenvalues/ArpackSelfAdjointEigenSolver.h b/unsupported/Eigen/src/Eigenvalues/ArpackSelfAdjointEigenSolver.h
index 3c6cfb1e3..0fbd84772 100644
--- a/unsupported/Eigen/src/Eigenvalues/ArpackSelfAdjointEigenSolver.h
+++ b/unsupported/Eigen/src/Eigenvalues/ArpackSelfAdjointEigenSolver.h
@@ -3,24 +3,9 @@
//
// Copyright (C) 2012 David Harmon <dharmon@gmail.com>
//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_ARPACKGENERALIZEDSELFADJOINTEIGENSOLVER_H
#define EIGEN_ARPACKGENERALIZEDSELFADJOINTEIGENSOLVER_H
diff --git a/unsupported/Eigen/src/EulerAngles/EulerSystem.h b/unsupported/Eigen/src/EulerAngles/EulerSystem.h
index 88acabcf8..2a833b0a4 100644
--- a/unsupported/Eigen/src/EulerAngles/EulerSystem.h
+++ b/unsupported/Eigen/src/EulerAngles/EulerSystem.h
@@ -177,9 +177,9 @@ namespace Eigen
// I, J, K are the pivot indexes permutation for the rotation matrix, that match this Euler system.
// They are used in this class converters.
// They are always different from each other, and their possible values are: 0, 1, or 2.
- I = AlphaAxisAbs - 1,
- J = (AlphaAxisAbs - 1 + 1 + IsOdd)%3,
- K = (AlphaAxisAbs - 1 + 2 - IsOdd)%3
+ I_ = AlphaAxisAbs - 1,
+ J_ = (AlphaAxisAbs - 1 + 1 + IsOdd)%3,
+ K_ = (AlphaAxisAbs - 1 + 2 - IsOdd)%3
;
// TODO: Get @mat parameter in form that avoids double evaluation.
@@ -194,24 +194,24 @@ namespace Eigen
const Scalar plusMinus = IsEven? 1 : -1;
const Scalar minusPlus = IsOdd? 1 : -1;
- const Scalar Rsum = sqrt((mat(I,I) * mat(I,I) + mat(I,J) * mat(I,J) + mat(J,K) * mat(J,K) + mat(K,K) * mat(K,K))/2);
- res[1] = atan2(plusMinus * mat(I,K), Rsum);
+ const Scalar Rsum = sqrt((mat(I_,I_) * mat(I_,I_) + mat(I_,J_) * mat(I_,J_) + mat(J_,K_) * mat(J_,K_) + mat(K_,K_) * mat(K_,K_))/2);
+ res[1] = atan2(plusMinus * mat(I_,K_), Rsum);
// There is a singularity when cos(beta) == 0
if(Rsum > 4 * NumTraits<Scalar>::epsilon()) {// cos(beta) != 0
- res[0] = atan2(minusPlus * mat(J, K), mat(K, K));
- res[2] = atan2(minusPlus * mat(I, J), mat(I, I));
+ res[0] = atan2(minusPlus * mat(J_, K_), mat(K_, K_));
+ res[2] = atan2(minusPlus * mat(I_, J_), mat(I_, I_));
}
- else if(plusMinus * mat(I, K) > 0) {// cos(beta) == 0 and sin(beta) == 1
- Scalar spos = mat(J, I) + plusMinus * mat(K, J); // 2*sin(alpha + plusMinus * gamma
- Scalar cpos = mat(J, J) + minusPlus * mat(K, I); // 2*cos(alpha + plusMinus * gamma)
+ else if(plusMinus * mat(I_, K_) > 0) {// cos(beta) == 0 and sin(beta) == 1
+ Scalar spos = mat(J_, I_) + plusMinus * mat(K_, J_); // 2*sin(alpha + plusMinus * gamma
+ Scalar cpos = mat(J_, J_) + minusPlus * mat(K_, I_); // 2*cos(alpha + plusMinus * gamma)
Scalar alphaPlusMinusGamma = atan2(spos, cpos);
res[0] = alphaPlusMinusGamma;
res[2] = 0;
}
else {// cos(beta) == 0 and sin(beta) == -1
- Scalar sneg = plusMinus * (mat(K, J) + minusPlus * mat(J, I)); // 2*sin(alpha + minusPlus*gamma)
- Scalar cneg = mat(J, J) + plusMinus * mat(K, I); // 2*cos(alpha + minusPlus*gamma)
+ Scalar sneg = plusMinus * (mat(K_, J_) + minusPlus * mat(J_, I_)); // 2*sin(alpha + minusPlus*gamma)
+ Scalar cneg = mat(J_, J_) + plusMinus * mat(K_, I_); // 2*cos(alpha + minusPlus*gamma)
Scalar alphaMinusPlusBeta = atan2(sneg, cneg);
res[0] = alphaMinusPlusBeta;
res[2] = 0;
@@ -230,24 +230,24 @@ namespace Eigen
const Scalar plusMinus = IsEven? 1 : -1;
const Scalar minusPlus = IsOdd? 1 : -1;
- const Scalar Rsum = sqrt((mat(I, J) * mat(I, J) + mat(I, K) * mat(I, K) + mat(J, I) * mat(J, I) + mat(K, I) * mat(K, I)) / 2);
+ const Scalar Rsum = sqrt((mat(I_, J_) * mat(I_, J_) + mat(I_, K_) * mat(I_, K_) + mat(J_, I_) * mat(J_, I_) + mat(K_, I_) * mat(K_, I_)) / 2);
- res[1] = atan2(Rsum, mat(I, I));
+ res[1] = atan2(Rsum, mat(I_, I_));
// There is a singularity when sin(beta) == 0
if(Rsum > 4 * NumTraits<Scalar>::epsilon()) {// sin(beta) != 0
- res[0] = atan2(mat(J, I), minusPlus * mat(K, I));
- res[2] = atan2(mat(I, J), plusMinus * mat(I, K));
+ res[0] = atan2(mat(J_, I_), minusPlus * mat(K_, I_));
+ res[2] = atan2(mat(I_, J_), plusMinus * mat(I_, K_));
}
- else if(mat(I, I) > 0) {// sin(beta) == 0 and cos(beta) == 1
- Scalar spos = plusMinus * mat(K, J) + minusPlus * mat(J, K); // 2*sin(alpha + gamma)
- Scalar cpos = mat(J, J) + mat(K, K); // 2*cos(alpha + gamma)
+ else if(mat(I_, I_) > 0) {// sin(beta) == 0 and cos(beta) == 1
+ Scalar spos = plusMinus * mat(K_, J_) + minusPlus * mat(J_, K_); // 2*sin(alpha + gamma)
+ Scalar cpos = mat(J_, J_) + mat(K_, K_); // 2*cos(alpha + gamma)
res[0] = atan2(spos, cpos);
res[2] = 0;
}
else {// sin(beta) == 0 and cos(beta) == -1
- Scalar sneg = plusMinus * mat(K, J) + plusMinus * mat(J, K); // 2*sin(alpha - gamma)
- Scalar cneg = mat(J, J) - mat(K, K); // 2*cos(alpha - gamma)
+ Scalar sneg = plusMinus * mat(K_, J_) + plusMinus * mat(J_, K_); // 2*sin(alpha - gamma)
+ Scalar cneg = mat(J_, J_) - mat(K_, K_); // 2*cos(alpha - gamma)
res[0] = atan2(sneg, cneg);
res[2] = 0;
}
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h b/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
index 46f2720d0..cc12ab62b 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
@@ -72,10 +72,10 @@ MatrixType MatrixFunctionAtomic<MatrixType>::compute(const MatrixType& A)
MatrixType F = m_f(avgEival, 0) * MatrixType::Identity(rows, rows);
MatrixType P = Ashifted;
MatrixType Fincr;
- for (Index s = 1; s < 1.1 * rows + 10; s++) { // upper limit is fairly arbitrary
+ for (Index s = 1; double(s) < 1.1 * double(rows) + 10.0; s++) { // upper limit is fairly arbitrary
Fincr = m_f(avgEival, static_cast<int>(s)) * P;
F += Fincr;
- P = Scalar(RealScalar(1.0/(s + 1))) * P * Ashifted;
+ P = Scalar(RealScalar(1)/RealScalar(s + 1)) * P * Ashifted;
// test whether Taylor series converged
const RealScalar F_norm = F.cwiseAbs().rowwise().sum().maxCoeff();
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h b/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h
index 79f3f957c..e917013e0 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h
@@ -62,8 +62,8 @@ void matrix_log_compute_2x2(const MatrixType& A, MatrixType& result)
else
{
// computation in previous branch is inaccurate if A(1,1) \approx A(0,0)
- int unwindingNumber = static_cast<int>(ceil((imag(logA11 - logA00) - RealScalar(EIGEN_PI)) / RealScalar(2*EIGEN_PI)));
- result(0,1) = A(0,1) * (numext::log1p(y/A(0,0)) + Scalar(0,2*EIGEN_PI*unwindingNumber)) / y;
+ RealScalar unwindingNumber = ceil((imag(logA11 - logA00) - RealScalar(EIGEN_PI)) / RealScalar(2*EIGEN_PI));
+ result(0,1) = A(0,1) * (numext::log1p(y/A(0,0)) + Scalar(0,RealScalar(2*EIGEN_PI)*unwindingNumber)) / y;
}
}
@@ -135,7 +135,8 @@ void matrix_log_compute_pade(MatrixType& result, const MatrixType& T, int degree
const int minPadeDegree = 3;
const int maxPadeDegree = 11;
assert(degree >= minPadeDegree && degree <= maxPadeDegree);
-
+ // FIXME this creates float-conversion-warnings if these are enabled.
+ // Either manually convert each value, or disable the warning locally
const RealScalar nodes[][maxPadeDegree] = {
{ 0.1127016653792583114820734600217600L, 0.5000000000000000000000000000000000L, // degree 3
0.8872983346207416885179265399782400L },
@@ -232,12 +233,13 @@ void matrix_log_compute_big(const MatrixType& A, MatrixType& result)
int degree;
MatrixType T = A, sqrtT;
- int maxPadeDegree = matrix_log_max_pade_degree<Scalar>::value;
- const RealScalar maxNormForPade = maxPadeDegree<= 5? 5.3149729967117310e-1L: // single precision
+ const int maxPadeDegree = matrix_log_max_pade_degree<Scalar>::value;
+ const RealScalar maxNormForPade = RealScalar(
+ maxPadeDegree<= 5? 5.3149729967117310e-1L: // single precision
maxPadeDegree<= 7? 2.6429608311114350e-1L: // double precision
maxPadeDegree<= 8? 2.32777776523703892094e-1L: // extended precision
maxPadeDegree<=10? 1.05026503471351080481093652651105e-1L: // double-double
- 1.1880960220216759245467951592883642e-1L; // quadruple precision
+ 1.1880960220216759245467951592883642e-1L); // quadruple precision
while (true) {
RealScalar normTminusI = (T - MatrixType::Identity(T.rows(), T.rows())).cwiseAbs().colwise().sum().maxCoeff();
@@ -254,7 +256,7 @@ void matrix_log_compute_big(const MatrixType& A, MatrixType& result)
}
matrix_log_compute_pade(result, T, degree);
- result *= pow(RealScalar(2), numberOfSquareRoots);
+ result *= pow(RealScalar(2), RealScalar(numberOfSquareRoots)); // TODO replace by bitshift if possible
}
/** \ingroup MatrixFunctions_Module
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h b/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h
index 95f6fbca8..d7672d7c9 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h
@@ -160,11 +160,11 @@ template<typename MatrixType>
void MatrixPowerAtomic<MatrixType>::computePade(int degree, const MatrixType& IminusT, ResultType& res) const
{
int i = 2*degree;
- res = (m_p-degree) / (2*i-2) * IminusT;
+ res = (m_p-RealScalar(degree)) / RealScalar(2*i-2) * IminusT;
for (--i; i; --i) {
res = (MatrixType::Identity(IminusT.rows(), IminusT.cols()) + res).template triangularView<Upper>()
- .solve((i==1 ? -m_p : i&1 ? (-m_p-i/2)/(2*i) : (m_p-i/2)/(2*i-2)) * IminusT).eval();
+ .solve((i==1 ? -m_p : i&1 ? (-m_p-RealScalar(i/2))/RealScalar(2*i) : (m_p-RealScalar(i/2))/RealScalar(2*i-2)) * IminusT).eval();
}
res += MatrixType::Identity(IminusT.rows(), IminusT.cols());
}
@@ -194,11 +194,12 @@ void MatrixPowerAtomic<MatrixType>::computeBig(ResultType& res) const
{
using std::ldexp;
const int digits = std::numeric_limits<RealScalar>::digits;
- const RealScalar maxNormForPade = digits <= 24? 4.3386528e-1L // single precision
+ const RealScalar maxNormForPade = RealScalar(
+ digits <= 24? 4.3386528e-1L // single precision
: digits <= 53? 2.789358995219730e-1L // double precision
: digits <= 64? 2.4471944416607995472e-1L // extended precision
: digits <= 106? 1.1016843812851143391275867258512e-1L // double-double
- : 9.134603732914548552537150753385375e-2L; // quadruple precision
+ : 9.134603732914548552537150753385375e-2L); // quadruple precision
MatrixType IminusT, sqrtT, T = m_A.template triangularView<Upper>();
RealScalar normIminusT;
int degree, degree2, numberOfSquareRoots = 0;
@@ -296,8 +297,8 @@ MatrixPowerAtomic<MatrixType>::computeSuperDiag(const ComplexScalar& curr, const
ComplexScalar logCurr = log(curr);
ComplexScalar logPrev = log(prev);
- int unwindingNumber = ceil((numext::imag(logCurr - logPrev) - RealScalar(EIGEN_PI)) / RealScalar(2*EIGEN_PI));
- ComplexScalar w = numext::log1p((curr-prev)/prev)/RealScalar(2) + ComplexScalar(0, EIGEN_PI*unwindingNumber);
+ RealScalar unwindingNumber = ceil((numext::imag(logCurr - logPrev) - RealScalar(EIGEN_PI)) / RealScalar(2*EIGEN_PI));
+ ComplexScalar w = numext::log1p((curr-prev)/prev)/RealScalar(2) + ComplexScalar(0, RealScalar(EIGEN_PI)*unwindingNumber);
return RealScalar(2) * exp(RealScalar(0.5) * p * (logCurr + logPrev)) * sinh(p * w) / (curr - prev);
}
diff --git a/unsupported/Eigen/src/Polynomials/Companion.h b/unsupported/Eigen/src/Polynomials/Companion.h
index 126be783b..6ab8f9714 100644
--- a/unsupported/Eigen/src/Polynomials/Companion.h
+++ b/unsupported/Eigen/src/Polynomials/Companion.h
@@ -75,8 +75,7 @@ class companion
void setPolynomial( const VectorType& poly )
{
const Index deg = poly.size()-1;
- m_monic = Scalar(-1)/poly[deg] * poly.head(deg);
- //m_bl_diag.setIdentity( deg-1 );
+ m_monic = -poly.head(deg)/poly[deg];
m_bl_diag.setOnes(deg-1);
}
diff --git a/unsupported/Eigen/src/Polynomials/PolynomialSolver.h b/unsupported/Eigen/src/Polynomials/PolynomialSolver.h
index 788594247..5e0ecbb43 100644
--- a/unsupported/Eigen/src/Polynomials/PolynomialSolver.h
+++ b/unsupported/Eigen/src/Polynomials/PolynomialSolver.h
@@ -126,7 +126,7 @@ class PolynomialSolverBase
for( Index i=0; i<m_roots.size(); ++i )
{
- if( abs( m_roots[i].imag() ) < absImaginaryThreshold )
+ if( abs( m_roots[i].imag() ) <= absImaginaryThreshold )
{
if( !hasArealRoot )
{
@@ -144,10 +144,10 @@ class PolynomialSolverBase
}
}
}
- else
+ else if(!hasArealRoot)
{
if( abs( m_roots[i].imag() ) < abs( m_roots[res].imag() ) ){
- res = i; }
+ res = i;}
}
}
return numext::real_ref(m_roots[res]);
@@ -167,7 +167,7 @@ class PolynomialSolverBase
for( Index i=0; i<m_roots.size(); ++i )
{
- if( abs( m_roots[i].imag() ) < absImaginaryThreshold )
+ if( abs( m_roots[i].imag() ) <= absImaginaryThreshold )
{
if( !hasArealRoot )
{
@@ -340,6 +340,7 @@ class PolynomialSolver : public PolynomialSolverBase<_Scalar,_Deg>
typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
ComplexEigenSolver<CompanionMatrixType>,
EigenSolver<CompanionMatrixType> >::type EigenSolverType;
+ typedef typename internal::conditional<NumTraits<Scalar>::IsComplex, Scalar, std::complex<Scalar> >::type ComplexScalar;
public:
/** Computes the complex roots of a new polynomial. */
@@ -354,6 +355,25 @@ class PolynomialSolver : public PolynomialSolverBase<_Scalar,_Deg>
companion.balance();
m_eigenSolver.compute( companion.denseMatrix() );
m_roots = m_eigenSolver.eigenvalues();
+ // cleanup noise in imaginary part of real roots:
+ // if the imaginary part is rather small compared to the real part
+ // and that cancelling the imaginary part yield a smaller evaluation,
+ // then it's safe to keep the real part only.
+ RealScalar coarse_prec = RealScalar(std::pow(4,poly.size()+1))*NumTraits<RealScalar>::epsilon();
+ for(Index i = 0; i<m_roots.size(); ++i)
+ {
+ if( internal::isMuchSmallerThan(numext::abs(numext::imag(m_roots[i])),
+ numext::abs(numext::real(m_roots[i])),
+ coarse_prec) )
+ {
+ ComplexScalar as_real_root = ComplexScalar(numext::real(m_roots[i]));
+ if( numext::abs(poly_eval(poly, as_real_root))
+ <= numext::abs(poly_eval(poly, m_roots[i])))
+ {
+ m_roots[i] = as_real_root;
+ }
+ }
+ }
}
else if(poly.size () == 2)
{
diff --git a/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h b/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h
index bda057a85..6d0370d5b 100644
--- a/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h
+++ b/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h
@@ -349,4 +349,4 @@ bool SkylineInplaceLU<MatrixType>::solve(const MatrixBase<BDerived> &b, MatrixBa
} // end namespace Eigen
-#endif // EIGEN_SKYLINELU_H
+#endif // EIGEN_SKYLINEINPLACELU_H
diff --git a/unsupported/Eigen/src/Skyline/SkylineMatrix.h b/unsupported/Eigen/src/Skyline/SkylineMatrix.h
index f77d79a04..7c7eace7f 100644
--- a/unsupported/Eigen/src/Skyline/SkylineMatrix.h
+++ b/unsupported/Eigen/src/Skyline/SkylineMatrix.h
@@ -859,4 +859,4 @@ protected:
} // end namespace Eigen
-#endif // EIGEN_SkylineMatrix_H
+#endif // EIGEN_SKYLINEMATRIX_H
diff --git a/unsupported/Eigen/src/Skyline/SkylineMatrixBase.h b/unsupported/Eigen/src/Skyline/SkylineMatrixBase.h
index b3a237230..753c1b33d 100644
--- a/unsupported/Eigen/src/Skyline/SkylineMatrixBase.h
+++ b/unsupported/Eigen/src/Skyline/SkylineMatrixBase.h
@@ -209,4 +209,4 @@ protected:
} // end namespace Eigen
-#endif // EIGEN_SkylineMatrixBase_H
+#endif // EIGEN_SKYLINEMATRIXBASE_H
diff --git a/unsupported/Eigen/src/Skyline/SkylineStorage.h b/unsupported/Eigen/src/Skyline/SkylineStorage.h
index 378a8deb4..cc7514f12 100644
--- a/unsupported/Eigen/src/Skyline/SkylineStorage.h
+++ b/unsupported/Eigen/src/Skyline/SkylineStorage.h
@@ -256,4 +256,4 @@ public:
} // end namespace Eigen
-#endif // EIGEN_COMPRESSED_STORAGE_H
+#endif // EIGEN_SKYLINE_STORAGE_H
diff --git a/unsupported/Eigen/src/SparseExtra/RandomSetter.h b/unsupported/Eigen/src/SparseExtra/RandomSetter.h
index ee97299af..7542cf764 100644
--- a/unsupported/Eigen/src/SparseExtra/RandomSetter.h
+++ b/unsupported/Eigen/src/SparseExtra/RandomSetter.h
@@ -249,10 +249,10 @@ class RandomSetter
}
}
// prefix sum
- Index count = 0;
+ StorageIndex count = 0;
for (Index j=0; j<mp_target->outerSize(); ++j)
{
- Index tmp = positions[j];
+ StorageIndex tmp = positions[j];
mp_target->outerIndexPtr()[j] = count;
positions[j] = count;
count += tmp;
@@ -281,7 +281,7 @@ class RandomSetter
mp_target->innerIndexPtr()[i+1] = mp_target->innerIndexPtr()[i];
--i;
}
- mp_target->innerIndexPtr()[i+1] = inner;
+ mp_target->innerIndexPtr()[i+1] = internal::convert_index<StorageIndex>(inner);
mp_target->valuePtr()[i+1] = it->second.value;
}
}
diff --git a/unsupported/Eigen/src/Splines/Spline.h b/unsupported/Eigen/src/Splines/Spline.h
index c1cf5b7e4..79edd52ce 100644
--- a/unsupported/Eigen/src/Splines/Spline.h
+++ b/unsupported/Eigen/src/Splines/Spline.h
@@ -191,7 +191,7 @@ namespace Eigen
DenseIndex span(Scalar u) const;
/**
- * \brief Computes the spang within the provided knot vector in which u is falling.
+ * \brief Computes the span within the provided knot vector in which u is falling.
**/
static DenseIndex Span(typename SplineTraits<Spline>::Scalar u, DenseIndex degree, const typename SplineTraits<Spline>::KnotVectorType& knots);
diff --git a/unsupported/test/CMakeLists.txt b/unsupported/test/CMakeLists.txt
index cda658e0e..e8e1dc832 100644
--- a/unsupported/test/CMakeLists.txt
+++ b/unsupported/test/CMakeLists.txt
@@ -258,7 +258,7 @@ if(CUDA_FOUND AND EIGEN_TEST_CUDA)
set(EIGEN_CUDA_RELAXED_CONSTEXPR "--relaxed-constexpr")
endif()
- if( (NOT EIGEN_TEST_CXX11) OR (CMAKE_VERSION VERSION_LESS 3.3))
+ if(( (NOT EIGEN_TEST_CXX11) OR (CMAKE_VERSION VERSION_LESS 3.3)) AND EIGEN_TEST_CXX11)
set(EIGEN_CUDA_CXX11_FLAG "-std=c++11")
else()
# otherwise the flag has already been added because of the above set(CMAKE_CXX_STANDARD 11)
diff --git a/unsupported/test/EulerAngles.cpp b/unsupported/test/EulerAngles.cpp
index 67533e364..4ddb5a2e8 100644
--- a/unsupported/test/EulerAngles.cpp
+++ b/unsupported/test/EulerAngles.cpp
@@ -72,9 +72,9 @@ void verify_euler(const EulerAngles<Scalar, EulerSystem>& e)
}
}
- const Vector3 I = EulerAnglesType::AlphaAxisVector();
- const Vector3 J = EulerAnglesType::BetaAxisVector();
- const Vector3 K = EulerAnglesType::GammaAxisVector();
+ const Vector3 I_ = EulerAnglesType::AlphaAxisVector();
+ const Vector3 J_ = EulerAnglesType::BetaAxisVector();
+ const Vector3 K_ = EulerAnglesType::GammaAxisVector();
// Is approx checks
VERIFY(e.isApprox(e));
@@ -97,7 +97,7 @@ void verify_euler(const EulerAngles<Scalar, EulerSystem>& e)
VERIFY_APPROXED_RANGE(betaRangeStart, ebis.beta(), betaRangeEnd);
VERIFY_APPROXED_RANGE(-PI, ebis.gamma(), PI);
- const Matrix3 mbis(AngleAxisType(ebis.alpha(), I) * AngleAxisType(ebis.beta(), J) * AngleAxisType(ebis.gamma(), K));
+ const Matrix3 mbis(AngleAxisType(ebis.alpha(), I_) * AngleAxisType(ebis.beta(), J_) * AngleAxisType(ebis.gamma(), K_));
VERIFY_IS_APPROX(Scalar(mbis.determinant()), ONE);
VERIFY_IS_APPROX(mbis, ebis.toRotationMatrix());
/*std::cout << "===================\n" <<
diff --git a/unsupported/test/cxx11_eventcount.cpp b/unsupported/test/cxx11_eventcount.cpp
index 2f1418684..3ca8598c7 100644
--- a/unsupported/test/cxx11_eventcount.cpp
+++ b/unsupported/test/cxx11_eventcount.cpp
@@ -30,11 +30,11 @@ static void test_basic_eventcount()
EventCount ec(waiters);
EventCount::Waiter& w = waiters[0];
ec.Notify(false);
- ec.Prewait(&w);
+ VERIFY(ec.Prewait());
ec.Notify(true);
ec.CommitWait(&w);
- ec.Prewait(&w);
- ec.CancelWait(&w);
+ VERIFY(ec.Prewait());
+ ec.CancelWait();
}
// Fake bounded counter-based queue.
@@ -112,7 +112,7 @@ static void test_stress_eventcount()
unsigned idx = rand_reentrant(&rnd) % kQueues;
if (queues[idx].Pop()) continue;
j--;
- ec.Prewait(&w);
+ if (!ec.Prewait()) continue;
bool empty = true;
for (int q = 0; q < kQueues; q++) {
if (!queues[q].Empty()) {
@@ -121,7 +121,7 @@ static void test_stress_eventcount()
}
}
if (!empty) {
- ec.CancelWait(&w);
+ ec.CancelWait();
continue;
}
ec.CommitWait(&w);
diff --git a/unsupported/test/cxx11_tensor_executor.cpp b/unsupported/test/cxx11_tensor_executor.cpp
index 18c87b35e..162dab7b8 100644
--- a/unsupported/test/cxx11_tensor_executor.cpp
+++ b/unsupported/test/cxx11_tensor_executor.cpp
@@ -452,6 +452,81 @@ static void test_execute_slice_lvalue(Device d)
}
}
+template <typename T, int NumDims, typename Device, bool Vectorizable,
+ bool Tileable, int Layout>
+static void test_execute_broadcasting_of_forced_eval(Device d)
+{
+ static constexpr int Options = 0 | Layout;
+
+ auto dims = RandomDims<NumDims>(1, 10);
+ Tensor<T, NumDims, Options, Index> src(dims);
+ src.setRandom();
+
+ const auto broadcasts = RandomDims<NumDims>(1, 7);
+ const auto expr = src.square().eval().broadcast(broadcasts);
+
+ // We assume that broadcasting on a default device is tested and correct, so
+ // we can rely on it to verify correctness of tensor executor and tiling.
+ Tensor<T, NumDims, Options, Index> golden;
+ golden = expr;
+
+ // Now do the broadcasting using configured tensor executor.
+ Tensor<T, NumDims, Options, Index> dst(golden.dimensions());
+
+ using Assign = TensorAssignOp<decltype(dst), const decltype(expr)>;
+ using Executor =
+ internal::TensorExecutor<const Assign, Device, Vectorizable, Tileable>;
+
+ Executor::run(Assign(dst, expr), d);
+
+ for (Index i = 0; i < dst.dimensions().TotalSize(); ++i) {
+ VERIFY_IS_EQUAL(dst.coeff(i), golden.coeff(i));
+ }
+}
+
+template<typename T, int NumDims>
+struct DummyGenerator {
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+ T operator()(const array <Index, NumDims>& dims) const {
+ T result = static_cast<T>(0);
+ for (int i = 0; i < NumDims; ++i) {
+ result += static_cast<T>((i + 1) * dims[i]);
+ }
+ return result;
+ }
+};
+
+template <typename T, int NumDims, typename Device, bool Vectorizable,
+ bool Tileable, int Layout>
+static void test_execute_generator_op(Device d)
+{
+ static constexpr int Options = 0 | Layout;
+
+ auto dims = RandomDims<NumDims>(20, 30);
+ Tensor<T, NumDims, Options, Index> src(dims);
+ src.setRandom();
+
+ const auto expr = src.generate(DummyGenerator<T, NumDims>());
+
+ // We assume that generator on a default device is tested and correct, so
+ // we can rely on it to verify correctness of tensor executor and tiling.
+ Tensor<T, NumDims, Options, Index> golden;
+ golden = expr;
+
+ // Now do the broadcasting using configured tensor executor.
+ Tensor<T, NumDims, Options, Index> dst(golden.dimensions());
+
+ using Assign = TensorAssignOp<decltype(dst), const decltype(expr)>;
+ using Executor =
+ internal::TensorExecutor<const Assign, Device, Vectorizable, Tileable>;
+
+ Executor::run(Assign(dst, expr), d);
+
+ for (Index i = 0; i < dst.dimensions().TotalSize(); ++i) {
+ VERIFY_IS_EQUAL(dst.coeff(i), golden.coeff(i));
+ }
+}
+
#define CALL_SUBTEST_PART(PART) \
CALL_SUBTEST_##PART
@@ -528,8 +603,18 @@ EIGEN_DECLARE_TEST(cxx11_tensor_executor) {
CALL_SUBTEST_COMBINATIONS(11, test_execute_slice_lvalue, float, 4);
CALL_SUBTEST_COMBINATIONS(11, test_execute_slice_lvalue, float, 5);
+ CALL_SUBTEST_COMBINATIONS(12, test_execute_broadcasting_of_forced_eval, float, 2);
+ CALL_SUBTEST_COMBINATIONS(12, test_execute_broadcasting_of_forced_eval, float, 3);
+ CALL_SUBTEST_COMBINATIONS(12, test_execute_broadcasting_of_forced_eval, float, 4);
+ CALL_SUBTEST_COMBINATIONS(12, test_execute_broadcasting_of_forced_eval, float, 5);
+
+ CALL_SUBTEST_COMBINATIONS(13, test_execute_generator_op, float, 2);
+ CALL_SUBTEST_COMBINATIONS(13, test_execute_generator_op, float, 3);
+ CALL_SUBTEST_COMBINATIONS(13, test_execute_generator_op, float, 4);
+ CALL_SUBTEST_COMBINATIONS(13, test_execute_generator_op, float, 5);
+
// Force CMake to split this test.
- // EIGEN_SUFFIXES;1;2;3;4;5;6;7;8;9;10;11
+ // EIGEN_SUFFIXES;1;2;3;4;5;6;7;8;9;10;11;12;13
}
#undef CALL_SUBTEST_COMBINATIONS
diff --git a/unsupported/test/cxx11_tensor_fft.cpp b/unsupported/test/cxx11_tensor_fft.cpp
index 4e4c9c4ec..641486a4a 100644
--- a/unsupported/test/cxx11_tensor_fft.cpp
+++ b/unsupported/test/cxx11_tensor_fft.cpp
@@ -228,9 +228,6 @@ template <typename RealScalar>
static void test_fft_non_power_of_2_round_trip(int exponent) {
int n = (1 << exponent) + 1;
- // The dimension type needs to be at least 8 bytes long for the
- // Tensor constructor to work. On Windows, long is only 4 bytes long,
- // so use long long here to force the usage of a 8 bytes integer type.
Eigen::DSizes<std::int64_t, 1> dimensions;
dimensions[0] = n;
const DSizes<std::int64_t, 1> arr = dimensions;
@@ -249,7 +246,9 @@ static void test_fft_non_power_of_2_round_trip(int exponent) {
forward.template fft<RealPart, FFT_REVERSE>(fft);
for (int i = 0; i < n; ++i) {
- VERIFY_IS_APPROX(input[i], output[i]);
+ RealScalar tol = test_precision<RealScalar>() *
+ (std::abs(input[i]) + std::abs(output[i]) + 1);
+ VERIFY_IS_APPROX_OR_LESS_THAN(std::abs(input[i] - output[i]), tol);
}
}
@@ -301,4 +300,5 @@ EIGEN_DECLARE_TEST(cxx11_tensor_fft) {
test_fft_real_input_energy<RowMajor, double, false, Eigen::BothParts, FFT_FORWARD, 4>();
test_fft_non_power_of_2_round_trip<float>(7);
+ test_fft_non_power_of_2_round_trip<double>(7);
}
diff --git a/unsupported/test/cxx11_tensor_forced_eval.cpp b/unsupported/test/cxx11_tensor_forced_eval.cpp
index f76e2ea97..a21a02bec 100644
--- a/unsupported/test/cxx11_tensor_forced_eval.cpp
+++ b/unsupported/test/cxx11_tensor_forced_eval.cpp
@@ -61,7 +61,7 @@ static void test_const()
Eigen::array<int, 2> bcast;
bcast[0] = 3;
bcast[1] = 1;
- const TensorMap<Tensor<const float, 2> > input_tensor(input.data(), 3, 3);
+ const TensorMap<const Tensor<float, 2> > input_tensor(input.data(), 3, 3);
Tensor<float, 2> output_tensor= (input_tensor - input_tensor.maximum(depth_dim).eval().reshape(dims2d).broadcast(bcast));
for (int i = 0; i < 3; ++i) {
diff --git a/unsupported/test/cxx11_tensor_generator.cpp b/unsupported/test/cxx11_tensor_generator.cpp
index ee5e29b77..6dcf676bb 100644
--- a/unsupported/test/cxx11_tensor_generator.cpp
+++ b/unsupported/test/cxx11_tensor_generator.cpp
@@ -42,11 +42,11 @@ struct Generator2D {
template <int DataLayout>
static void test_2D()
{
- Tensor<float, 2> matrix(5, 7);
+ Tensor<float, 2> matrix(512, 512);
Tensor<float, 2> result = matrix.generate(Generator2D());
- for (int i = 0; i < 5; ++i) {
- for (int j = 0; j < 5; ++j) {
+ for (int i = 0; i < 512; ++i) {
+ for (int j = 0; j < 512; ++j) {
VERIFY_IS_EQUAL(result(i, j), 3*i + 11*j);
}
}
diff --git a/unsupported/test/cxx11_tensor_gpu.cu b/unsupported/test/cxx11_tensor_gpu.cu
index 14fc0bd04..94625e6a3 100644
--- a/unsupported/test/cxx11_tensor_gpu.cu
+++ b/unsupported/test/cxx11_tensor_gpu.cu
@@ -17,6 +17,8 @@
#include <unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h>
+#define EIGEN_GPU_TEST_C99_MATH EIGEN_HAS_CXX11
+
using Eigen::Tensor;
void test_gpu_nullary() {
@@ -617,6 +619,7 @@ void test_gpu_convolution_3d()
}
+#if EIGEN_GPU_TEST_C99_MATH
template <typename Scalar>
void test_gpu_lgamma(const Scalar stddev)
{
@@ -655,6 +658,7 @@ void test_gpu_lgamma(const Scalar stddev)
gpuFree(d_in);
gpuFree(d_out);
}
+#endif
template <typename Scalar>
void test_gpu_digamma()
@@ -986,6 +990,7 @@ void test_gpu_igammac()
gpuFree(d_out);
}
+#if EIGEN_GPU_TEST_C99_MATH
template <typename Scalar>
void test_gpu_erf(const Scalar stddev)
{
@@ -1063,6 +1068,7 @@ void test_gpu_erfc(const Scalar stddev)
gpuFree(d_in);
gpuFree(d_out);
}
+#endif
template <typename Scalar>
void test_gpu_betainc()
@@ -1494,7 +1500,7 @@ EIGEN_DECLARE_TEST(cxx11_tensor_gpu)
CALL_SUBTEST_3(test_gpu_convolution_3d<RowMajor>());
#endif
-#if __cplusplus > 199711L
+#if EIGEN_GPU_TEST_C99_MATH
// std::erf, std::erfc, and so on where only added in c++11. We use them
// as a golden reference to validate the results produced by Eigen. Therefore
// we can only run these tests if we use a c++11 compiler.
diff --git a/unsupported/test/matrix_power.cpp b/unsupported/test/matrix_power.cpp
index fa52d256e..dbaf9dbdf 100644
--- a/unsupported/test/matrix_power.cpp
+++ b/unsupported/test/matrix_power.cpp
@@ -19,7 +19,7 @@ void test2dRotation(const T& tol)
MatrixPower<Matrix<T,2,2> > Apow(A);
for (int i=0; i<=20; ++i) {
- angle = std::pow(T(10), (i-10) / T(5.));
+ angle = std::pow(T(10), T(i-10) / T(5.));
c = std::cos(angle);
s = std::sin(angle);
B << c, s, -s, c;
@@ -61,7 +61,7 @@ void test3dRotation(const T& tol)
for (int i=0; i<=20; ++i) {
v = Matrix<T,3,1>::Random();
v.normalize();
- angle = std::pow(T(10), (i-10) / T(5.));
+ angle = std::pow(T(10), T(i-10) / T(5.));
VERIFY(AngleAxis<T>(angle, v).matrix().isApprox(AngleAxis<T>(1,v).matrix().pow(angle), tol));
}
}
@@ -153,52 +153,52 @@ typedef Matrix<long double,Dynamic,Dynamic> MatrixXe;
EIGEN_DECLARE_TEST(matrix_power)
{
CALL_SUBTEST_2(test2dRotation<double>(1e-13));
- CALL_SUBTEST_1(test2dRotation<float>(2e-5)); // was 1e-5, relaxed for clang 2.8 / linux / x86-64
+ CALL_SUBTEST_1(test2dRotation<float>(2e-5f)); // was 1e-5, relaxed for clang 2.8 / linux / x86-64
CALL_SUBTEST_9(test2dRotation<long double>(1e-13L));
CALL_SUBTEST_2(test2dHyperbolicRotation<double>(1e-14));
- CALL_SUBTEST_1(test2dHyperbolicRotation<float>(1e-5));
+ CALL_SUBTEST_1(test2dHyperbolicRotation<float>(1e-5f));
CALL_SUBTEST_9(test2dHyperbolicRotation<long double>(1e-14L));
CALL_SUBTEST_10(test3dRotation<double>(1e-13));
- CALL_SUBTEST_11(test3dRotation<float>(1e-5));
+ CALL_SUBTEST_11(test3dRotation<float>(1e-5f));
CALL_SUBTEST_12(test3dRotation<long double>(1e-13L));
CALL_SUBTEST_2(testGeneral(Matrix2d(), 1e-13));
CALL_SUBTEST_7(testGeneral(Matrix3dRowMajor(), 1e-13));
CALL_SUBTEST_3(testGeneral(Matrix4cd(), 1e-13));
CALL_SUBTEST_4(testGeneral(MatrixXd(8,8), 2e-12));
- CALL_SUBTEST_1(testGeneral(Matrix2f(), 1e-4));
- CALL_SUBTEST_5(testGeneral(Matrix3cf(), 1e-4));
- CALL_SUBTEST_8(testGeneral(Matrix4f(), 1e-4));
- CALL_SUBTEST_6(testGeneral(MatrixXf(2,2), 1e-3)); // see bug 614
+ CALL_SUBTEST_1(testGeneral(Matrix2f(), 1e-4f));
+ CALL_SUBTEST_5(testGeneral(Matrix3cf(), 1e-4f));
+ CALL_SUBTEST_8(testGeneral(Matrix4f(), 1e-4f));
+ CALL_SUBTEST_6(testGeneral(MatrixXf(2,2), 1e-3f)); // see bug 614
CALL_SUBTEST_9(testGeneral(MatrixXe(7,7), 1e-13L));
CALL_SUBTEST_10(testGeneral(Matrix3d(), 1e-13));
- CALL_SUBTEST_11(testGeneral(Matrix3f(), 1e-4));
+ CALL_SUBTEST_11(testGeneral(Matrix3f(), 1e-4f));
CALL_SUBTEST_12(testGeneral(Matrix3e(), 1e-13L));
CALL_SUBTEST_2(testSingular(Matrix2d(), 1e-13));
CALL_SUBTEST_7(testSingular(Matrix3dRowMajor(), 1e-13));
CALL_SUBTEST_3(testSingular(Matrix4cd(), 1e-13));
CALL_SUBTEST_4(testSingular(MatrixXd(8,8), 2e-12));
- CALL_SUBTEST_1(testSingular(Matrix2f(), 1e-4));
- CALL_SUBTEST_5(testSingular(Matrix3cf(), 1e-4));
- CALL_SUBTEST_8(testSingular(Matrix4f(), 1e-4));
- CALL_SUBTEST_6(testSingular(MatrixXf(2,2), 1e-3));
+ CALL_SUBTEST_1(testSingular(Matrix2f(), 1e-4f));
+ CALL_SUBTEST_5(testSingular(Matrix3cf(), 1e-4f));
+ CALL_SUBTEST_8(testSingular(Matrix4f(), 1e-4f));
+ CALL_SUBTEST_6(testSingular(MatrixXf(2,2), 1e-3f));
CALL_SUBTEST_9(testSingular(MatrixXe(7,7), 1e-13L));
CALL_SUBTEST_10(testSingular(Matrix3d(), 1e-13));
- CALL_SUBTEST_11(testSingular(Matrix3f(), 1e-4));
+ CALL_SUBTEST_11(testSingular(Matrix3f(), 1e-4f));
CALL_SUBTEST_12(testSingular(Matrix3e(), 1e-13L));
CALL_SUBTEST_2(testLogThenExp(Matrix2d(), 1e-13));
CALL_SUBTEST_7(testLogThenExp(Matrix3dRowMajor(), 1e-13));
CALL_SUBTEST_3(testLogThenExp(Matrix4cd(), 1e-13));
CALL_SUBTEST_4(testLogThenExp(MatrixXd(8,8), 2e-12));
- CALL_SUBTEST_1(testLogThenExp(Matrix2f(), 1e-4));
- CALL_SUBTEST_5(testLogThenExp(Matrix3cf(), 1e-4));
- CALL_SUBTEST_8(testLogThenExp(Matrix4f(), 1e-4));
- CALL_SUBTEST_6(testLogThenExp(MatrixXf(2,2), 1e-3));
+ CALL_SUBTEST_1(testLogThenExp(Matrix2f(), 1e-4f));
+ CALL_SUBTEST_5(testLogThenExp(Matrix3cf(), 1e-4f));
+ CALL_SUBTEST_8(testLogThenExp(Matrix4f(), 1e-4f));
+ CALL_SUBTEST_6(testLogThenExp(MatrixXf(2,2), 1e-3f));
CALL_SUBTEST_9(testLogThenExp(MatrixXe(7,7), 1e-13L));
CALL_SUBTEST_10(testLogThenExp(Matrix3d(), 1e-13));
- CALL_SUBTEST_11(testLogThenExp(Matrix3f(), 1e-4));
+ CALL_SUBTEST_11(testLogThenExp(Matrix3f(), 1e-4f));
CALL_SUBTEST_12(testLogThenExp(Matrix3e(), 1e-13L));
}
diff --git a/unsupported/test/polynomialsolver.cpp b/unsupported/test/polynomialsolver.cpp
index 50c74f797..4ff9bda5a 100644
--- a/unsupported/test/polynomialsolver.cpp
+++ b/unsupported/test/polynomialsolver.cpp
@@ -26,6 +26,16 @@ struct increment_if_fixed_size
}
}
+template<typename PolynomialType>
+PolynomialType polyder(const PolynomialType& p)
+{
+ typedef typename PolynomialType::Scalar Scalar;
+ PolynomialType res(p.size());
+ for(Index i=1; i<p.size(); ++i)
+ res[i-1] = p[i]*Scalar(i);
+ res[p.size()-1] = 0.;
+ return res;
+}
template<int Deg, typename POLYNOMIAL, typename SOLVER>
bool aux_evalSolver( const POLYNOMIAL& pols, SOLVER& psolve )
@@ -44,10 +54,17 @@ bool aux_evalSolver( const POLYNOMIAL& pols, SOLVER& psolve )
psolve.compute( pols );
const RootsType& roots( psolve.roots() );
EvalRootsType evr( deg );
+ POLYNOMIAL pols_der = polyder(pols);
+ EvalRootsType der( deg );
for( int i=0; i<roots.size(); ++i ){
- evr[i] = std::abs( poly_eval( pols, roots[i] ) ); }
+ evr[i] = std::abs( poly_eval( pols, roots[i] ) );
+ der[i] = numext::maxi(RealScalar(1.), std::abs( poly_eval( pols_der, roots[i] ) ));
+ }
- bool evalToZero = evr.isZero( test_precision<Scalar>() );
+ // we need to divide by the magnitude of the derivative because
+ // with a high derivative is very small error in the value of the root
+ // yiels a very large error in the polynomial evaluation.
+ bool evalToZero = (evr.cwiseQuotient(der)).isZero( test_precision<Scalar>() );
if( !evalToZero )
{
cerr << "WRONG root: " << endl;