aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--CMakeLists.txt8
-rw-r--r--Eigen/Core16
-rw-r--r--Eigen/src/Core/Matrix.h22
-rw-r--r--Eigen/src/Core/PermutationMatrix.h2
-rw-r--r--Eigen/src/Core/PlainObjectBase.h30
-rw-r--r--Eigen/src/Core/Ref.h18
-rw-r--r--Eigen/src/Core/arch/AltiVec/Complex.h11
-rw-r--r--Eigen/src/Core/util/Macros.h12
-rw-r--r--Eigen/src/Core/util/Memory.h102
-rw-r--r--Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h18
-rw-r--r--Eigen/src/IterativeLinearSolvers/BiCGSTAB.h1
-rw-r--r--Eigen/src/SparseCholesky/SimplicialCholesky.h42
-rw-r--r--Eigen/src/SparseCore/ConservativeSparseSparseProduct.h94
-rw-r--r--Eigen/src/SparseCore/SparseMatrixBase.h4
-rw-r--r--Eigen/src/SparseCore/SparseVector.h14
-rw-r--r--Eigen/src/SparseQR/SparseQR.h19
-rw-r--r--blas/level3_impl.h2
-rw-r--r--cmake/FindEigen3.cmake9
-rw-r--r--doc/CMakeLists.txt8
-rw-r--r--doc/Doxyfile.in10
-rw-r--r--doc/PreprocessorDirectives.dox15
-rw-r--r--doc/examples/CMakeLists.txt4
-rw-r--r--doc/snippets/CMakeLists.txt6
-rw-r--r--doc/special_examples/CMakeLists.txt16
-rw-r--r--test/CMakeLists.txt8
-rw-r--r--test/basicstuff.cpp34
-rw-r--r--test/ctorleak.cpp51
-rw-r--r--test/dynalloc.cpp33
-rw-r--r--test/eigensolver_selfadjoint.cpp43
-rw-r--r--test/main.h20
-rw-r--r--test/simplicial_cholesky.cpp41
-rw-r--r--test/sparseqr.cpp2
-rw-r--r--unsupported/Eigen/MPRealSupport10
-rw-r--r--unsupported/Eigen/src/IterativeSolvers/GMRES.h156
-rw-r--r--unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h8
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h4
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h12
-rw-r--r--unsupported/Eigen/src/SparseExtra/MarketIO.h9
-rw-r--r--unsupported/doc/examples/CMakeLists.txt4
-rw-r--r--unsupported/doc/snippets/CMakeLists.txt4
-rw-r--r--unsupported/test/mpreal/mpreal.h4
41 files changed, 630 insertions, 296 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index cc3988196..dac091b49 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -119,7 +119,7 @@ endmacro(ei_add_cxx_compiler_flag)
if(NOT MSVC)
# We assume that other compilers are partly compatible with GNUCC
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fexceptions")
+# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fexceptions")
set(CMAKE_CXX_FLAGS_DEBUG "-g3")
set(CMAKE_CXX_FLAGS_RELEASE "-g0 -O2")
@@ -301,6 +301,12 @@ if(EIGEN_TEST_NO_EXPLICIT_ALIGNMENT)
message(STATUS "Disabling alignment in tests/examples")
endif()
+option(EIGEN_TEST_NO_EXCEPTIONS "Disables C++ exceptions" OFF)
+if(EIGEN_TEST_NO_EXCEPTIONS)
+ ei_add_cxx_compiler_flag("-fno-exceptions")
+ message(STATUS "Disabling exceptions in tests/examples")
+endif()
+
option(EIGEN_TEST_C++0x "Enables all C++0x features." OFF)
include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
diff --git a/Eigen/Core b/Eigen/Core
index 44c2c09c9..239bc6990 100644
--- a/Eigen/Core
+++ b/Eigen/Core
@@ -49,6 +49,14 @@
#define EIGEN_USING_STD_MATH(FUNC) using std::FUNC;
#endif
+#if (defined(_CPPUNWIND) || defined(__EXCEPTIONS)) && !defined(__CUDA_ARCH__) && !defined(EIGEN_EXCEPTIONS)
+ #define EIGEN_EXCEPTIONS
+#endif
+
+#ifdef EIGEN_EXCEPTIONS
+ #include <new>
+#endif
+
// then include this file where all our macros are defined. It's really important to do it first because
// it's where we do all the alignment settings (platform detection and honoring the user's will if he
// defined e.g. EIGEN_DONT_ALIGN) so it needs to be done before we do anything with vectorization.
@@ -216,14 +224,6 @@
#include <intrin.h>
#endif
-#if (defined(_CPPUNWIND) || defined(__EXCEPTIONS)) && !defined(__CUDA_ARCH__)
- #define EIGEN_EXCEPTIONS
-#endif
-
-#ifdef EIGEN_EXCEPTIONS
- #include <new>
-#endif
-
/** \brief Namespace containing all symbols from the %Eigen library. */
namespace Eigen {
diff --git a/Eigen/src/Core/Matrix.h b/Eigen/src/Core/Matrix.h
index eb70c1129..1daaabb07 100644
--- a/Eigen/src/Core/Matrix.h
+++ b/Eigen/src/Core/Matrix.h
@@ -262,9 +262,15 @@ class Matrix
/** \brief Constructs a vector or row-vector with given dimension. \only_for_vectors
*
- * Note that this is only useful for dynamic-size vectors. For fixed-size vectors,
- * it is redundant to pass the dimension here, so it makes more sense to use the default
- * constructor Matrix() instead.
+ * This is useful for dynamic-size vectors. For fixed-size vectors,
+ * it is redundant to pass these parameters, so one should use the default constructor
+ * Matrix() instead.
+ *
+ * \warning This constructor is disabled for fixed-size \c 1x1 matrices. For instance,
+ * calling Matrix<double,1,1>(1) will call the initialization constructor: Matrix(const Scalar&).
+ * For fixed-size \c 1x1 matrices it is thefore recommended to use the default
+ * constructor Matrix() instead, especilly when using one of the non standard
+ * \c EIGEN_INITIALIZE_MATRICES_BY_{ZERO,\c NAN} macros (see \ref TopicPreprocessorDirectives).
*/
EIGEN_STRONG_INLINE explicit Matrix(Index dim);
/** \brief Constructs an initialized 1x1 matrix with the given coefficient */
@@ -273,9 +279,17 @@ class Matrix
*
* This is useful for dynamic-size matrices. For fixed-size matrices,
* it is redundant to pass these parameters, so one should use the default constructor
- * Matrix() instead. */
+ * Matrix() instead.
+ *
+ * \warning This constructor is disabled for fixed-size \c 1x2 and \c 2x1 vectors. For instance,
+ * calling Matrix2f(2,1) will call the initialization constructor: Matrix(const Scalar& x, const Scalar& y).
+ * For fixed-size \c 1x2 or \c 2x1 vectors it is thefore recommended to use the default
+ * constructor Matrix() instead, especilly when using one of the non standard
+ * \c EIGEN_INITIALIZE_MATRICES_BY_{ZERO,\c NAN} macros (see \ref TopicPreprocessorDirectives).
+ */
EIGEN_DEVICE_FUNC
Matrix(Index rows, Index cols);
+
/** \brief Constructs an initialized 2D vector with given coefficients */
Matrix(const Scalar& x, const Scalar& y);
#endif
diff --git a/Eigen/src/Core/PermutationMatrix.h b/Eigen/src/Core/PermutationMatrix.h
index 84d64edb3..31e0697a1 100644
--- a/Eigen/src/Core/PermutationMatrix.h
+++ b/Eigen/src/Core/PermutationMatrix.h
@@ -265,7 +265,7 @@ class PermutationBase : public EigenBase<Derived>
*
* \param SizeAtCompileTime the number of rows/cols, or Dynamic
* \param MaxSizeAtCompileTime the maximum number of rows/cols, or Dynamic. This optional parameter defaults to SizeAtCompileTime. Most of the time, you should not have to specify it.
- * \param StorageIndexType the interger type of the indices
+ * \param StorageIndexType the integer type of the indices
*
* This class represents a permutation matrix, internally stored as a vector of integers.
*
diff --git a/Eigen/src/Core/PlainObjectBase.h b/Eigen/src/Core/PlainObjectBase.h
index 8fd18b69d..3637b6256 100644
--- a/Eigen/src/Core/PlainObjectBase.h
+++ b/Eigen/src/Core/PlainObjectBase.h
@@ -702,6 +702,7 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED)
resize(nbRows,nbCols);
}
+
template<typename T0, typename T1>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE void _init2(const Scalar& val0, const Scalar& val1, typename internal::enable_if<Base::SizeAtCompileTime==2,T0>::type* = 0)
@@ -710,12 +711,27 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
m_storage.data()[0] = val0;
m_storage.data()[1] = val1;
}
+
+ template<typename T0, typename T1>
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE void _init2(const Index& val0, const Index& val1,
+ typename internal::enable_if< (!internal::is_same<Index,Scalar>::value)
+ && (internal::is_same<T0,Index>::value)
+ && (internal::is_same<T1,Index>::value)
+ && Base::SizeAtCompileTime==2,T1>::type* = 0)
+ {
+ EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 2)
+ m_storage.data()[0] = Scalar(val0);
+ m_storage.data()[1] = Scalar(val1);
+ }
template<typename T>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE void _init1(Index size, typename internal::enable_if<Base::SizeAtCompileTime!=1 || !internal::is_convertible<T, Scalar>::value,T>::type* = 0)
{
- EIGEN_STATIC_ASSERT(bool(NumTraits<T>::IsInteger),
+ // NOTE MSVC 2008 complains if we directly put bool(NumTraits<T>::IsInteger) as the EIGEN_STATIC_ASSERT argument.
+ const bool is_integer = NumTraits<T>::IsInteger;
+ EIGEN_STATIC_ASSERT(is_integer,
FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED)
resize(size);
}
@@ -726,6 +742,18 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 1)
m_storage.data()[0] = val0;
}
+
+ template<typename T>
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE void _init1(const Index& val0,
+ typename internal::enable_if< (!internal::is_same<Index,Scalar>::value)
+ && (internal::is_same<Index,T>::value)
+ && Base::SizeAtCompileTime==1
+ && internal::is_convertible<T, Scalar>::value,T*>::type* = 0)
+ {
+ EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 1)
+ m_storage.data()[0] = Scalar(val0);
+ }
template<typename T>
EIGEN_DEVICE_FUNC
diff --git a/Eigen/src/Core/Ref.h b/Eigen/src/Core/Ref.h
index fe493641d..6390a8b64 100644
--- a/Eigen/src/Core/Ref.h
+++ b/Eigen/src/Core/Ref.h
@@ -15,17 +15,17 @@ namespace Eigen {
/** \class Ref
* \ingroup Core_Module
*
- * \brief A matrix or vector expression mapping an existing expressions
+ * \brief A matrix or vector expression mapping an existing expression
*
* \tparam PlainObjectType the equivalent matrix type of the mapped data
* \tparam Options specifies whether the pointer is \c #Aligned, or \c #Unaligned.
* The default is \c #Unaligned.
* \tparam StrideType optionally specifies strides. By default, Ref implies a contiguous storage along the inner dimension (inner stride==1),
- * but accept a variable outer stride (leading dimension).
+ * but accepts a variable outer stride (leading dimension).
* This can be overridden by specifying strides.
* The type passed here must be a specialization of the Stride template, see examples below.
*
- * This class permits to write non template functions taking Eigen's object as parameters while limiting the number of copies.
+ * This class provides a way to write non-template functions taking Eigen objects as parameters while limiting the number of copies.
* A Ref<> object can represent either a const expression or a l-value:
* \code
* // in-out argument:
@@ -35,10 +35,10 @@ namespace Eigen {
* void foo2(const Ref<const VectorXf>& x);
* \endcode
*
- * In the in-out case, the input argument must satisfies the constraints of the actual Ref<> type, otherwise a compilation issue will be triggered.
+ * In the in-out case, the input argument must satisfy the constraints of the actual Ref<> type, otherwise a compilation issue will be triggered.
* By default, a Ref<VectorXf> can reference any dense vector expression of float having a contiguous memory layout.
- * Likewise, a Ref<MatrixXf> can reference any column major dense matrix expression of float whose column's elements are contiguously stored with
- * the possibility to have a constant space inbetween each column, i.e.: the inner stride mmust be equal to 1, but the outer-stride (or leading dimension),
+ * Likewise, a Ref<MatrixXf> can reference any column-major dense matrix expression of float whose column's elements are contiguously stored with
+ * the possibility to have a constant space in-between each column, i.e. the inner stride must be equal to 1, but the outer stride (or leading dimension)
* can be greater than the number of rows.
*
* In the const case, if the input expression does not match the above requirement, then it is evaluated into a temporary before being passed to the function.
@@ -54,15 +54,15 @@ namespace Eigen {
* foo2(A.col().segment(2,4)); // No temporary
* \endcode
*
- * The range of inputs that can be referenced without temporary can be enlarged using the last two template parameter.
+ * The range of inputs that can be referenced without temporary can be enlarged using the last two template parameters.
* Here is an example accepting an innerstride!=1:
* \code
* // in-out argument:
* void foo3(Ref<VectorXf,0,InnerStride<> > x);
* foo3(A.row()); // OK
* \endcode
- * The downside here is that the function foo3 might be significantly slower than foo1 because it won't be able to exploit vectorization, and will involved more
- * expensive address computations even if the input is contiguously stored in memory. To overcome this issue, one might propose to overloads internally calling a
+ * The downside here is that the function foo3 might be significantly slower than foo1 because it won't be able to exploit vectorization, and will involve more
+ * expensive address computations even if the input is contiguously stored in memory. To overcome this issue, one might propose to overload internally calling a
* template function, e.g.:
* \code
* // in the .h:
diff --git a/Eigen/src/Core/arch/AltiVec/Complex.h b/Eigen/src/Core/arch/AltiVec/Complex.h
index 73fa62643..13b874d0c 100644
--- a/Eigen/src/Core/arch/AltiVec/Complex.h
+++ b/Eigen/src/Core/arch/AltiVec/Complex.h
@@ -16,13 +16,14 @@ namespace internal {
static Packet4ui p4ui_CONJ_XOR = vec_mergeh((Packet4ui)p4i_ZERO, (Packet4ui)p4f_ZERO_);//{ 0x00000000, 0x80000000, 0x00000000, 0x80000000 };
static Packet16uc p16uc_COMPLEX_RE = vec_sld((Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 0), (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 2), 8);//{ 0,1,2,3, 0,1,2,3, 8,9,10,11, 8,9,10,11 };
-static Packet16uc p16uc_COMPLEX_IM = vec_sld((Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 1), (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 3), 8);//{ 4,5,6,7, 4,5,6,7, 12,13,14,15, 12,13,14,15 };
+static Packet16uc p16uc_COMPLEX_IM = vec_sld(p16uc_DUPLICATE, (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 3), 8);//{ 4,5,6,7, 4,5,6,7, 12,13,14,15, 12,13,14,15 };
static Packet16uc p16uc_COMPLEX_REV = vec_sld(p16uc_REVERSE, p16uc_REVERSE, 8);//{ 4,5,6,7, 0,1,2,3, 12,13,14,15, 8,9,10,11 };
static Packet16uc p16uc_COMPLEX_REV2 = vec_sld(p16uc_FORWARD, p16uc_FORWARD, 8);//{ 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 };
-static Packet16uc p16uc_PSET_HI = (Packet16uc) vec_mergeh((Packet4ui) vec_splat((Packet4ui)p16uc_FORWARD, 0), (Packet4ui) vec_splat((Packet4ui)p16uc_FORWARD, 1));//{ 0,1,2,3, 4,5,6,7, 0,1,2,3, 4,5,6,7 };
-static Packet16uc p16uc_PSET_LO = (Packet16uc) vec_mergeh((Packet4ui) vec_splat((Packet4ui)p16uc_FORWARD, 2), (Packet4ui) vec_splat((Packet4ui)p16uc_FORWARD, 3));//{ 8,9,10,11, 12,13,14,15, 8,9,10,11, 12,13,14,15 };
-static Packet16uc p16uc_COMPLEX_TRANSPOSE_0 = { 0,1,2,3, 4,5,6,7, 16,17,18,19, 20,21,22,23};
-static Packet16uc p16uc_COMPLEX_TRANSPOSE_1 = { 8,9,10,11, 12,13,14,15, 24,25,26,27, 28,29,30,31};
+static Packet16uc p16uc_PSET_HI = (Packet16uc) vec_mergeh((Packet4ui)p16uc_COMPLEX_RE, (Packet4ui)p16uc_COMPLEX_IM);//{ 0,1,2,3, 4,5,6,7, 0,1,2,3, 4,5,6,7 };
+static Packet16uc p16uc_PSET_LO = (Packet16uc) vec_mergel((Packet4ui)p16uc_COMPLEX_RE, (Packet4ui)p16uc_COMPLEX_IM);//{ 8,9,10,11, 12,13,14,15, 8,9,10,11, 12,13,14,15 };
+static Packet16uc p16uc_COMPLEX_MASK16 = vec_sld((Packet16uc)p4i_ZERO, vec_splat((Packet16uc) vec_abs(p4i_MINUS16), 3), 8);//{ 0,0,0,0, 0,0,0,0, 16,16,16,16, 16,16,16,16};
+static Packet16uc p16uc_COMPLEX_TRANSPOSE_0 = vec_add(p16uc_PSET_HI, p16uc_COMPLEX_MASK16);//{ 0,1,2,3, 4,5,6,7, 16,17,18,19, 20,21,22,23};
+static Packet16uc p16uc_COMPLEX_TRANSPOSE_1 = vec_add(p16uc_PSET_LO, p16uc_COMPLEX_MASK16);//{ 8,9,10,11, 12,13,14,15, 24,25,26,27, 28,29,30,31};
//---------- float ----------
struct Packet2cf
diff --git a/Eigen/src/Core/util/Macros.h b/Eigen/src/Core/util/Macros.h
index f84d20397..d029e0c6c 100644
--- a/Eigen/src/Core/util/Macros.h
+++ b/Eigen/src/Core/util/Macros.h
@@ -493,4 +493,16 @@ namespace Eigen {
const RHS \
>
+#ifdef EIGEN_EXCEPTIONS
+# define EIGEN_THROW_X(X) throw X
+# define EIGEN_THROW throw
+# define EIGEN_TRY try
+# define EIGEN_CATCH(X) catch (X)
+#else
+# define EIGEN_THROW_X(X) std::abort()
+# define EIGEN_THROW std::abort()
+# define EIGEN_TRY if (true)
+# define EIGEN_CATCH(X) else
+#endif
+
#endif // EIGEN_MACROS_H
diff --git a/Eigen/src/Core/util/Memory.h b/Eigen/src/Core/util/Memory.h
index dd9285714..30133ba67 100644
--- a/Eigen/src/Core/util/Memory.h
+++ b/Eigen/src/Core/util/Memory.h
@@ -64,7 +64,7 @@
// Currently, let's include it only on unix systems:
#if defined(__unix__) || defined(__unix)
#include <unistd.h>
- #if ((defined __QNXNTO__) || (defined _GNU_SOURCE) || ((defined _XOPEN_SOURCE) && (_XOPEN_SOURCE >= 600))) && (defined _POSIX_ADVISORY_INFO) && (_POSIX_ADVISORY_INFO > 0)
+ #if ((defined __QNXNTO__) || (defined _GNU_SOURCE) || (defined __PGI) || ((defined _XOPEN_SOURCE) && (_XOPEN_SOURCE >= 600))) && (defined _POSIX_ADVISORY_INFO) && (_POSIX_ADVISORY_INFO > 0)
#define EIGEN_HAS_POSIX_MEMALIGN 1
#endif
#endif
@@ -338,15 +338,6 @@ template<> inline void* conditional_aligned_realloc<false>(void* ptr, size_t new
*** Construction/destruction of array elements ***
*****************************************************************************/
-/** \internal Constructs the elements of an array.
- * The \a size parameter tells on how many objects to call the constructor of T.
- */
-template<typename T> inline T* construct_elements_of_array(T *ptr, size_t size)
-{
- for (size_t i=0; i < size; ++i) ::new (ptr + i) T;
- return ptr;
-}
-
/** \internal Destructs the elements of an array.
* The \a size parameters tells on how many objects to call the destructor of T.
*/
@@ -357,6 +348,24 @@ template<typename T> inline void destruct_elements_of_array(T *ptr, size_t size)
while(size) ptr[--size].~T();
}
+/** \internal Constructs the elements of an array.
+ * The \a size parameter tells on how many objects to call the constructor of T.
+ */
+template<typename T> inline T* construct_elements_of_array(T *ptr, size_t size)
+{
+ size_t i;
+ EIGEN_TRY
+ {
+ for (i = 0; i < size; ++i) ::new (ptr + i) T;
+ return ptr;
+ }
+ EIGEN_CATCH(...)
+ {
+ destruct_elements_of_array(ptr, i);
+ EIGEN_THROW;
+ }
+}
+
/*****************************************************************************
*** Implementation of aligned new/delete-like functions ***
*****************************************************************************/
@@ -376,14 +385,30 @@ template<typename T> inline T* aligned_new(size_t size)
{
check_size_for_overflow<T>(size);
T *result = reinterpret_cast<T*>(aligned_malloc(sizeof(T)*size));
- return construct_elements_of_array(result, size);
+ EIGEN_TRY
+ {
+ return construct_elements_of_array(result, size);
+ }
+ EIGEN_CATCH(...)
+ {
+ aligned_free(result);
+ EIGEN_THROW;
+ }
}
template<typename T, bool Align> inline T* conditional_aligned_new(size_t size)
{
check_size_for_overflow<T>(size);
T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size));
- return construct_elements_of_array(result, size);
+ EIGEN_TRY
+ {
+ return construct_elements_of_array(result, size);
+ }
+ EIGEN_CATCH(...)
+ {
+ conditional_aligned_free<Align>(result);
+ EIGEN_THROW;
+ }
}
/** \internal Deletes objects constructed with aligned_new
@@ -412,7 +437,17 @@ template<typename T, bool Align> inline T* conditional_aligned_realloc_new(T* pt
destruct_elements_of_array(pts+new_size, old_size-new_size);
T *result = reinterpret_cast<T*>(conditional_aligned_realloc<Align>(reinterpret_cast<void*>(pts), sizeof(T)*new_size, sizeof(T)*old_size));
if(new_size > old_size)
- construct_elements_of_array(result+old_size, new_size-old_size);
+ {
+ EIGEN_TRY
+ {
+ construct_elements_of_array(result+old_size, new_size-old_size);
+ }
+ EIGEN_CATCH(...)
+ {
+ conditional_aligned_free<Align>(result);
+ EIGEN_THROW;
+ }
+ }
return result;
}
@@ -422,7 +457,17 @@ template<typename T, bool Align> inline T* conditional_aligned_new_auto(size_t s
check_size_for_overflow<T>(size);
T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size));
if(NumTraits<T>::RequireInitialization)
- construct_elements_of_array(result, size);
+ {
+ EIGEN_TRY
+ {
+ construct_elements_of_array(result, size);
+ }
+ EIGEN_CATCH(...)
+ {
+ conditional_aligned_free<Align>(result);
+ EIGEN_THROW;
+ }
+ }
return result;
}
@@ -434,7 +479,17 @@ template<typename T, bool Align> inline T* conditional_aligned_realloc_new_auto(
destruct_elements_of_array(pts+new_size, old_size-new_size);
T *result = reinterpret_cast<T*>(conditional_aligned_realloc<Align>(reinterpret_cast<void*>(pts), sizeof(T)*new_size, sizeof(T)*old_size));
if(NumTraits<T>::RequireInitialization && (new_size > old_size))
- construct_elements_of_array(result+old_size, new_size-old_size);
+ {
+ EIGEN_TRY
+ {
+ construct_elements_of_array(result+old_size, new_size-old_size);
+ }
+ EIGEN_CATCH(...)
+ {
+ conditional_aligned_free<Align>(result);
+ EIGEN_THROW;
+ }
+ }
return result;
}
@@ -634,20 +689,11 @@ template<typename T> class aligned_stack_memory_handler
*****************************************************************************/
#if EIGEN_ALIGN
- #ifdef EIGEN_EXCEPTIONS
- #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
+ #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
void* operator new(size_t size, const std::nothrow_t&) throw() { \
- try { return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); } \
- catch (...) { return 0; } \
- return 0; \
+ EIGEN_TRY { return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); } \
+ EIGEN_CATCH (...) { return 0; } \
}
- #else
- #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
- void* operator new(size_t size, const std::nothrow_t&) throw() { \
- return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \
- }
- #endif
-
#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) \
void *operator new(size_t size) { \
return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \
@@ -657,6 +703,8 @@ template<typename T> class aligned_stack_memory_handler
} \
void operator delete(void * ptr) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
void operator delete[](void * ptr) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
+ void operator delete(void * ptr, std::size_t /* sz */) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
+ void operator delete[](void * ptr, std::size_t /* sz */) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
/* in-place new and delete. since (at least afaik) there is no actual */ \
/* memory allocated we can safely let the default implementation handle */ \
/* this particular case. */ \
diff --git a/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h b/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
index fc8ecaa6f..a6bbdac6b 100644
--- a/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
+++ b/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
@@ -605,7 +605,6 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3
if(computeEigenvectors)
{
Scalar safeNorm2 = Eigen::NumTraits<Scalar>::epsilon();
- safeNorm2 *= safeNorm2;
if((eivals(2)-eivals(0))<=Eigen::NumTraits<Scalar>::epsilon())
{
eivecs.setIdentity();
@@ -619,7 +618,7 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3
Scalar d0 = eivals(2) - eivals(1);
Scalar d1 = eivals(1) - eivals(0);
int k = d0 > d1 ? 2 : 0;
- d0 = d0 > d1 ? d1 : d0;
+ d0 = d0 > d1 ? d0 : d1;
tmp.diagonal().array () -= eivals(k);
VectorType cross;
@@ -627,19 +626,25 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3
n = (cross = tmp.row(0).cross(tmp.row(1))).squaredNorm();
if(n>safeNorm2)
+ {
eivecs.col(k) = cross / sqrt(n);
+ }
else
{
n = (cross = tmp.row(0).cross(tmp.row(2))).squaredNorm();
if(n>safeNorm2)
+ {
eivecs.col(k) = cross / sqrt(n);
+ }
else
{
n = (cross = tmp.row(1).cross(tmp.row(2))).squaredNorm();
if(n>safeNorm2)
+ {
eivecs.col(k) = cross / sqrt(n);
+ }
else
{
// the input matrix and/or the eigenvaues probably contains some inf/NaN,
@@ -659,12 +664,16 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3
tmp.diagonal().array() -= eivals(1);
if(d0<=Eigen::NumTraits<Scalar>::epsilon())
+ {
eivecs.col(1) = eivecs.col(k).unitOrthogonal();
+ }
else
{
- n = (cross = eivecs.col(k).cross(tmp.row(0).normalized())).squaredNorm();
+ n = (cross = eivecs.col(k).cross(tmp.row(0))).squaredNorm();
if(n>safeNorm2)
+ {
eivecs.col(1) = cross / sqrt(n);
+ }
else
{
n = (cross = eivecs.col(k).cross(tmp.row(1))).squaredNorm();
@@ -678,13 +687,14 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3
else
{
// we should never reach this point,
- // if so the last two eigenvalues are likely to ve very closed to each other
+ // if so the last two eigenvalues are likely to be very close to each other
eivecs.col(1) = eivecs.col(k).unitOrthogonal();
}
}
}
// make sure that eivecs[1] is orthogonal to eivecs[2]
+ // FIXME: this step should not be needed
Scalar d = eivecs.col(1).dot(eivecs.col(k));
eivecs.col(1) = (eivecs.col(1) - d * eivecs.col(k)).normalized();
}
diff --git a/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h b/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
index dc524c225..27824b9d5 100644
--- a/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
+++ b/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
@@ -39,7 +39,6 @@ bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,
int maxIters = iters;
int n = mat.cols();
- x = precond.solve(x);
VectorType r = rhs - mat * x;
VectorType r0 = r;
diff --git a/Eigen/src/SparseCholesky/SimplicialCholesky.h b/Eigen/src/SparseCholesky/SimplicialCholesky.h
index 21c7e0b39..1abd31304 100644
--- a/Eigen/src/SparseCholesky/SimplicialCholesky.h
+++ b/Eigen/src/SparseCholesky/SimplicialCholesky.h
@@ -37,6 +37,7 @@ class SimplicialCholeskyBase : internal::noncopyable
{
public:
typedef typename internal::traits<Derived>::MatrixType MatrixType;
+ typedef typename internal::traits<Derived>::OrderingType OrderingType;
enum { UpLo = internal::traits<Derived>::UpLo };
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
@@ -240,15 +241,16 @@ class SimplicialCholeskyBase : internal::noncopyable
RealScalar m_shiftScale;
};
-template<typename _MatrixType, int _UpLo = Lower> class SimplicialLLT;
-template<typename _MatrixType, int _UpLo = Lower> class SimplicialLDLT;
-template<typename _MatrixType, int _UpLo = Lower> class SimplicialCholesky;
+template<typename _MatrixType, int _UpLo = Lower, typename _Ordering = AMDOrdering<typename _MatrixType::Index> > class SimplicialLLT;
+template<typename _MatrixType, int _UpLo = Lower, typename _Ordering = AMDOrdering<typename _MatrixType::Index> > class SimplicialLDLT;
+template<typename _MatrixType, int _UpLo = Lower, typename _Ordering = AMDOrdering<typename _MatrixType::Index> > class SimplicialCholesky;
namespace internal {
-template<typename _MatrixType, int _UpLo> struct traits<SimplicialLLT<_MatrixType,_UpLo> >
+template<typename _MatrixType, int _UpLo, typename _Ordering> struct traits<SimplicialLLT<_MatrixType,_UpLo,_Ordering> >
{
typedef _MatrixType MatrixType;
+ typedef _Ordering OrderingType;
enum { UpLo = _UpLo };
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::Index Index;
@@ -259,9 +261,10 @@ template<typename _MatrixType, int _UpLo> struct traits<SimplicialLLT<_MatrixTyp
static inline MatrixU getU(const MatrixType& m) { return m.adjoint(); }
};
-template<typename _MatrixType,int _UpLo> struct traits<SimplicialLDLT<_MatrixType,_UpLo> >
+template<typename _MatrixType,int _UpLo, typename _Ordering> struct traits<SimplicialLDLT<_MatrixType,_UpLo,_Ordering> >
{
typedef _MatrixType MatrixType;
+ typedef _Ordering OrderingType;
enum { UpLo = _UpLo };
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::Index Index;
@@ -272,9 +275,10 @@ template<typename _MatrixType,int _UpLo> struct traits<SimplicialLDLT<_MatrixTyp
static inline MatrixU getU(const MatrixType& m) { return m.adjoint(); }
};
-template<typename _MatrixType, int _UpLo> struct traits<SimplicialCholesky<_MatrixType,_UpLo> >
+template<typename _MatrixType, int _UpLo, typename _Ordering> struct traits<SimplicialCholesky<_MatrixType,_UpLo,_Ordering> >
{
typedef _MatrixType MatrixType;
+ typedef _Ordering OrderingType;
enum { UpLo = _UpLo };
};
@@ -294,11 +298,12 @@ template<typename _MatrixType, int _UpLo> struct traits<SimplicialCholesky<_Matr
* \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
* \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
* or Upper. Default is Lower.
+ * \tparam _Ordering The ordering method to use, either AMDOrdering<> or NaturalOrdering<>. Default is AMDOrdering<>
*
- * \sa class SimplicialLDLT
+ * \sa class SimplicialLDLT, class AMDOrdering, class NaturalOrdering
*/
-template<typename _MatrixType, int _UpLo>
- class SimplicialLLT : public SimplicialCholeskyBase<SimplicialLLT<_MatrixType,_UpLo> >
+template<typename _MatrixType, int _UpLo, typename _Ordering>
+ class SimplicialLLT : public SimplicialCholeskyBase<SimplicialLLT<_MatrixType,_UpLo,_Ordering> >
{
public:
typedef _MatrixType MatrixType;
@@ -382,11 +387,12 @@ public:
* \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
* \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
* or Upper. Default is Lower.
+ * \tparam _Ordering The ordering method to use, either AMDOrdering<> or NaturalOrdering<>. Default is AMDOrdering<>
*
- * \sa class SimplicialLLT
+ * \sa class SimplicialLLT, class AMDOrdering, class NaturalOrdering
*/
-template<typename _MatrixType, int _UpLo>
- class SimplicialLDLT : public SimplicialCholeskyBase<SimplicialLDLT<_MatrixType,_UpLo> >
+template<typename _MatrixType, int _UpLo, typename _Ordering>
+ class SimplicialLDLT : public SimplicialCholeskyBase<SimplicialLDLT<_MatrixType,_UpLo,_Ordering> >
{
public:
typedef _MatrixType MatrixType;
@@ -467,8 +473,8 @@ public:
*
* \sa class SimplicialLDLT, class SimplicialLLT
*/
-template<typename _MatrixType, int _UpLo>
- class SimplicialCholesky : public SimplicialCholeskyBase<SimplicialCholesky<_MatrixType,_UpLo> >
+template<typename _MatrixType, int _UpLo, typename _Ordering>
+ class SimplicialCholesky : public SimplicialCholeskyBase<SimplicialCholesky<_MatrixType,_UpLo,_Ordering> >
{
public:
typedef _MatrixType MatrixType;
@@ -612,15 +618,13 @@ void SimplicialCholeskyBase<Derived>::ordering(const MatrixType& a, CholMatrixTy
{
eigen_assert(a.rows()==a.cols());
const Index size = a.rows();
- // TODO allows to configure the permutation
// Note that amd compute the inverse permutation
{
CholMatrixType C;
C = a.template selfadjointView<UpLo>();
- // remove diagonal entries:
- // seems not to be needed
- // C.prune(keep_diag());
- internal::minimum_degree_ordering(C, m_Pinv);
+
+ OrderingType ordering;
+ ordering(C,m_Pinv);
}
if(m_Pinv.size()>0)
diff --git a/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h b/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
index f6824a895..815fdb6d8 100644
--- a/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
+++ b/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
@@ -15,7 +15,7 @@ namespace Eigen {
namespace internal {
template<typename Lhs, typename Rhs, typename ResultType>
-static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, bool sortedInsertion = false)
{
typedef typename remove_all<Lhs>::type::Scalar Scalar;
typedef typename remove_all<Lhs>::type::Index Index;
@@ -24,10 +24,10 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
Index rows = lhs.innerSize();
Index cols = rhs.outerSize();
eigen_assert(lhs.outerSize() == rhs.innerSize());
-
- std::vector<bool> mask(rows,false);
- Matrix<Scalar,Dynamic,1> values(rows);
- Matrix<Index,Dynamic,1> indices(rows);
+
+ ei_declare_aligned_stack_constructed_variable(bool, mask, rows, 0);
+ ei_declare_aligned_stack_constructed_variable(Scalar, values, rows, 0);
+ ei_declare_aligned_stack_constructed_variable(Index, indices, rows, 0);
// estimate the number of non zero entries
// given a rhs column containing Y non zeros, we assume that the respective Y columns
@@ -77,53 +77,51 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
values[i] += x * y;
}
}
-
- // unordered insertion
- for(Index k=0; k<nnz; ++k)
+ if(!sortedInsertion)
{
- Index i = indices[k];
- res.insertBackByOuterInnerUnordered(j,i) = values[i];
- mask[i] = false;
- }
-
-#if 0
- // alternative ordered insertion code:
-
- Index t200 = rows/(log2(200)*1.39);
- Index t = (rows*100)/139;
-
- // FIXME reserve nnz non zeros
- // FIXME implement fast sort algorithms for very small nnz
- // if the result is sparse enough => use a quick sort
- // otherwise => loop through the entire vector
- // In order to avoid to perform an expensive log2 when the
- // result is clearly very sparse we use a linear bound up to 200.
- //if((nnz<200 && nnz<t200) || nnz * log2(nnz) < t)
- //res.startVec(j);
- if(true)
- {
- if(nnz>1) std::sort(indices.data(),indices.data()+nnz);
+ // unordered insertion
for(Index k=0; k<nnz; ++k)
{
Index i = indices[k];
- res.insertBackByOuterInner(j,i) = values[i];
+ res.insertBackByOuterInnerUnordered(j,i) = values[i];
mask[i] = false;
}
}
else
{
- // dense path
- for(Index i=0; i<rows; ++i)
+ // alternative ordered insertion code:
+ const Index t200 = rows/11; // 11 == (log2(200)*1.39)
+ const Index t = (rows*100)/139;
+
+ // FIXME reserve nnz non zeros
+ // FIXME implement faster sorting algorithms for very small nnz
+ // if the result is sparse enough => use a quick sort
+ // otherwise => loop through the entire vector
+ // In order to avoid to perform an expensive log2 when the
+ // result is clearly very sparse we use a linear bound up to 200.
+ if((nnz<200 && nnz<t200) || nnz * log2(nnz) < t)
{
- if(mask[i])
+ if(nnz>1) std::sort(indices,indices+nnz);
+ for(Index k=0; k<nnz; ++k)
{
- mask[i] = false;
+ Index i = indices[k];
res.insertBackByOuterInner(j,i) = values[i];
+ mask[i] = false;
+ }
+ }
+ else
+ {
+ // dense path
+ for(Index i=0; i<rows; ++i)
+ {
+ if(mask[i])
+ {
+ mask[i] = false;
+ res.insertBackByOuterInner(j,i) = values[i];
+ }
}
}
}
-#endif
-
}
res.finalize();
}
@@ -148,12 +146,24 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,C
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrixAux;
+ typedef typename sparse_eval<ColMajorMatrixAux,ResultType::RowsAtCompileTime,ResultType::ColsAtCompileTime>::type ColMajorMatrix;
+
ColMajorMatrix resCol(lhs.rows(),rhs.cols());
- internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol);
- // sort the non zeros:
- RowMajorMatrix resRow(resCol);
- res = resRow;
+ // FIXME, the following heuristic is probably not very good.
+ if(lhs.rows()>=rhs.cols())
+ {
+ // perform sorted insertion
+ internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol, true);
+ res.swap(resCol);
+ }
+ else
+ {
+ // ressort to transpose to sort the entries
+ internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol, false);
+ RowMajorMatrix resRow(resCol);
+ res = resRow;
+ }
}
};
diff --git a/Eigen/src/SparseCore/SparseMatrixBase.h b/Eigen/src/SparseCore/SparseMatrixBase.h
index 4c5563755..aff4c8b6f 100644
--- a/Eigen/src/SparseCore/SparseMatrixBase.h
+++ b/Eigen/src/SparseCore/SparseMatrixBase.h
@@ -291,7 +291,9 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
/** sparse * dense (returns a dense object unless it is an outer product) */
template<typename OtherDerived>
const typename SparseDenseProductReturnType<Derived,OtherDerived>::Type
- operator*(const MatrixBase<OtherDerived> &other) const;
+ operator*(const MatrixBase<OtherDerived> &other) const
+ { return typename SparseDenseProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived()); }
+
#else // EIGEN_TEST_EVALUATORS
// sparse * diagonal
template<typename OtherDerived>
diff --git a/Eigen/src/SparseCore/SparseVector.h b/Eigen/src/SparseCore/SparseVector.h
index ed78d7c1e..0f9aa9dd1 100644
--- a/Eigen/src/SparseCore/SparseVector.h
+++ b/Eigen/src/SparseCore/SparseVector.h
@@ -109,7 +109,7 @@ class SparseVector
inline Scalar& coeffRef(Index row, Index col)
{
eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));
- return coeff(IsColVector ? row : col);
+ return coeffRef(IsColVector ? row : col);
}
/** \returns a reference to the coefficient value at given index \a i
@@ -151,6 +151,18 @@ class SparseVector
m_data.append(0, i);
return m_data.value(m_data.size()-1);
}
+
+ Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
+ {
+ EIGEN_UNUSED_VARIABLE(outer);
+ eigen_assert(outer==0);
+ return insertBackUnordered(inner);
+ }
+ inline Scalar& insertBackUnordered(Index i)
+ {
+ m_data.append(0, i);
+ return m_data.value(m_data.size()-1);
+ }
inline Scalar& insert(Index row, Index col)
{
diff --git a/Eigen/src/SparseQR/SparseQR.h b/Eigen/src/SparseQR/SparseQR.h
index e8d7b8607..6be569533 100644
--- a/Eigen/src/SparseQR/SparseQR.h
+++ b/Eigen/src/SparseQR/SparseQR.h
@@ -75,7 +75,7 @@ class SparseQR
typedef Matrix<Scalar, Dynamic, 1> ScalarVector;
typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;
public:
- SparseQR () : m_isInitialized(false), m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false)
+ SparseQR () : m_isInitialized(false), m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false),m_isEtreeOk(false)
{ }
/** Construct a QR factorization of the matrix \a mat.
@@ -84,7 +84,7 @@ class SparseQR
*
* \sa compute()
*/
- SparseQR(const MatrixType& mat) : m_isInitialized(false), m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false)
+ SparseQR(const MatrixType& mat) : m_isInitialized(false), m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false),m_isEtreeOk(false)
{
compute(mat);
}
@@ -262,6 +262,7 @@ class SparseQR
IndexVector m_etree; // Column elimination tree
IndexVector m_firstRowElt; // First element in each row
bool m_isQSorted; // whether Q is sorted or not
+ bool m_isEtreeOk; // whether the elimination tree match the initial input matrix
template <typename, typename > friend struct SparseQR_QProduct;
template <typename > friend struct SparseQRMatrixQReturnType;
@@ -297,6 +298,7 @@ void SparseQR<MatrixType,OrderingType>::analyzePattern(const MatrixType& mat)
// Compute the column elimination tree of the permuted matrix
m_outputPerm_c = m_perm_c.inverse();
internal::coletree(mat, m_etree, m_firstRowElt, m_outputPerm_c.indices().data());
+ m_isEtreeOk = true;
m_R.resize(m, n);
m_Q.resize(m, diagSize);
@@ -330,6 +332,15 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
Index nzcolR, nzcolQ; // Number of nonzero for the current column of R and Q
ScalarVector tval(m); // The dense vector used to compute the current column
RealScalar pivotThreshold = m_threshold;
+
+ m_R.setZero();
+ m_Q.setZero();
+ if(!m_isEtreeOk)
+ {
+ m_outputPerm_c = m_perm_c.inverse();
+ internal::coletree(mat, m_etree, m_firstRowElt, m_outputPerm_c.indices().data());
+ m_isEtreeOk = true;
+ }
m_pmat = mat;
m_pmat.uncompress(); // To have the innerNonZeroPtr allocated
@@ -447,7 +458,7 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
}
} // End update current column
- Scalar tau = 0;
+ Scalar tau = RealScalar(0);
RealScalar beta = 0;
if(nonzeroCol < diagSize)
@@ -461,7 +472,6 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
for (Index itq = 1; itq < nzcolQ; ++itq) sqrNorm += numext::abs2(tval(Qidx(itq)));
if(sqrNorm == RealScalar(0) && numext::imag(c0) == RealScalar(0))
{
- tau = RealScalar(0);
beta = numext::real(c0);
tval(Qidx(0)) = 1;
}
@@ -514,6 +524,7 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
// Recompute the column elimination tree
internal::coletree(m_pmat, m_etree, m_firstRowElt, m_pivotperm.indices().data());
+ m_isEtreeOk = false;
}
}
diff --git a/blas/level3_impl.h b/blas/level3_impl.h
index 07dbc22ff..a05872666 100644
--- a/blas/level3_impl.h
+++ b/blas/level3_impl.h
@@ -56,7 +56,7 @@ int EIGEN_BLAS_FUNC(gemm)(char *opa, char *opb, int *m, int *n, int *k, RealScal
else matrix(c, *m, *n, *ldc) *= beta;
}
- internal::gemm_blocking_space<ColMajor,Scalar,Scalar,Dynamic,Dynamic,Dynamic> blocking(*m,*n,*k);
+ internal::gemm_blocking_space<ColMajor,Scalar,Scalar,Dynamic,Dynamic,Dynamic> blocking(*m,*n,*k,true);
int code = OP(*opa) | (OP(*opb) << 2);
func[code](*m, *n, *k, a, *lda, b, *ldb, c, *ldc, alpha, blocking, 0);
diff --git a/cmake/FindEigen3.cmake b/cmake/FindEigen3.cmake
index 9c546a05d..cea1afeab 100644
--- a/cmake/FindEigen3.cmake
+++ b/cmake/FindEigen3.cmake
@@ -9,6 +9,12 @@
# EIGEN3_FOUND - system has eigen lib with correct version
# EIGEN3_INCLUDE_DIR - the eigen include directory
# EIGEN3_VERSION - eigen version
+#
+# This module reads hints about search locations from
+# the following enviroment variables:
+#
+# EIGEN3_ROOT
+# EIGEN3_ROOT_DIR
# Copyright (c) 2006, 2007 Montel Laurent, <montel@kde.org>
# Copyright (c) 2008, 2009 Gael Guennebaud, <g.gael@free.fr>
@@ -62,6 +68,9 @@ if (EIGEN3_INCLUDE_DIR)
else (EIGEN3_INCLUDE_DIR)
find_path(EIGEN3_INCLUDE_DIR NAMES signature_of_eigen3_matrix_library
+ HINTS
+ ENV EIGEN3_ROOT
+ ENV EIGEN3_ROOT_DIR
PATHS
${CMAKE_INSTALL_PREFIX}/include
${KDE4_INCLUDE_DIR}
diff --git a/doc/CMakeLists.txt b/doc/CMakeLists.txt
index 8a493031c..1b8aaf9aa 100644
--- a/doc/CMakeLists.txt
+++ b/doc/CMakeLists.txt
@@ -10,12 +10,20 @@ if(CMAKE_COMPILER_IS_GNUCXX)
endif(CMAKE_SYSTEM_NAME MATCHES Linux)
endif(CMAKE_COMPILER_IS_GNUCXX)
+option(EIGEN_INTERNAL_DOCUMENTATION "Build internal documentation" OFF)
+
+
# Set some Doxygen flags
set(EIGEN_DOXY_PROJECT_NAME "Eigen")
set(EIGEN_DOXY_OUTPUT_DIRECTORY_SUFFIX "")
set(EIGEN_DOXY_INPUT "\"${Eigen_SOURCE_DIR}/Eigen\" \"${Eigen_SOURCE_DIR}/doc\"")
set(EIGEN_DOXY_HTML_COLORSTYLE_HUE "220")
set(EIGEN_DOXY_TAGFILES "")
+if(EIGEN_INTERNAL_DOCUMENTATION)
+ set(EIGEN_DOXY_INTERNAL "YES")
+else(EIGEN_INTERNAL_DOCUMENTATION)
+ set(EIGEN_DOXY_INTERNAL "NO")
+endif(EIGEN_INTERNAL_DOCUMENTATION)
configure_file(
${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in
diff --git a/doc/Doxyfile.in b/doc/Doxyfile.in
index 2ceee5e20..5d82add72 100644
--- a/doc/Doxyfile.in
+++ b/doc/Doxyfile.in
@@ -460,7 +460,7 @@ HIDE_IN_BODY_DOCS = NO
# to NO (the default) then the documentation will be excluded.
# Set it to YES to include the internal documentation.
-INTERNAL_DOCS = NO
+INTERNAL_DOCS = ${EIGEN_DOXY_INTERNAL}
# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
# file names in lower-case letters. If set to YES upper-case letters are also
@@ -480,7 +480,7 @@ HIDE_SCOPE_NAMES = NO
# will put a list of the files that are included by a file in the documentation
# of that file.
-SHOW_INCLUDE_FILES = NO
+SHOW_INCLUDE_FILES = ${EIGEN_DOXY_INTERNAL}
# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen
# will list include files with double quotes in the documentation
@@ -546,7 +546,7 @@ STRICT_PROTO_MATCHING = NO
# disable (NO) the todo list. This list is created by putting \todo
# commands in the documentation.
-GENERATE_TODOLIST = NO
+GENERATE_TODOLIST = ${EIGEN_DOXY_INTERNAL}
# The GENERATE_TESTLIST tag can be used to enable (YES) or
# disable (NO) the test list. This list is created by putting \test
@@ -558,13 +558,13 @@ GENERATE_TESTLIST = NO
# disable (NO) the bug list. This list is created by putting \bug
# commands in the documentation.
-GENERATE_BUGLIST = NO
+GENERATE_BUGLIST = ${EIGEN_DOXY_INTERNAL}
# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
# disable (NO) the deprecated list. This list is created by putting
# \deprecated commands in the documentation.
-GENERATE_DEPRECATEDLIST= NO
+GENERATE_DEPRECATEDLIST= ${EIGEN_DOXY_INTERNAL}
# The ENABLED_SECTIONS tag can be used to enable conditional
# documentation sections, marked by \if sectionname ... \endif.
diff --git a/doc/PreprocessorDirectives.dox b/doc/PreprocessorDirectives.dox
index e67991fb7..4be2167ef 100644
--- a/doc/PreprocessorDirectives.dox
+++ b/doc/PreprocessorDirectives.dox
@@ -27,10 +27,23 @@ are doing.
Defaults to the %IOFormat constructed by the default constructor IOFormat::IOFormat().
- \b EIGEN_INITIALIZE_MATRICES_BY_ZERO - if defined, all entries of newly constructed matrices and arrays are
initialized to zero, as are new entries in matrices and arrays after resizing. Not defined by default.
+ \warning The unary (resp. binary) constructor of \c 1x1 (resp. \c 2x1 or \c 1x2) fixed size matrices is
+ always interpreted as an initialization constructor where the argument(s) are the coefficient values
+ and not the sizes. For instance, \code Vector2d v(2,1); \endcode will create a vector with coeficients [2,1],
+ and \b not a \c 2x1 vector initialized with zeros (i.e., [0,0]). If such cases might occur, then it is
+ recommended to use the default constructor with a explicit call to resize:
+ \code
+ Matrix<?,SizeAtCompileTime,1> v;
+ v.resize(size);
+ Matrix<?,RowsAtCompileTime,ColsAtCompileTime> m;
+ m.resize(rows,cols);
+ \endcode
- \b EIGEN_INITIALIZE_MATRICES_BY_NAN - if defined, all entries of newly constructed matrices and arrays are
initialized to NaN, as are new entries in matrices and arrays after resizing. This option is especially
useful for debugging purpose, though a memory tool like <a href="http://valgrind.org/">valgrind</a> is
preferable. Not defined by default.
+ \warning See the documentation of \c EIGEN_INITIALIZE_MATRICES_BY_ZERO for a discussion on a limitations
+ of these macros when applied to \c 1x1, \c 1x2, and \c 2x1 fixed-size matrices.
- \b EIGEN_NO_AUTOMATIC_RESIZING - if defined, the matrices (or arrays) on both sides of an assignment
<tt>a = b</tt> have to be of the same size; otherwise, %Eigen automatically resizes \c a so that it is of
the correct size. Not defined by default.
@@ -61,6 +74,8 @@ run time. However, these assertions do cost time and can thus be turned off.
expect that any objects passed to it are aligned. This will turn off vectorization. Not defined by default.
- \b EIGEN_DONT_ALIGN_STATICALLY - disables alignment of arrays on the stack. Not defined by default, unless
\c EIGEN_DONT_ALIGN is defined.
+ - \b EIGEN_DONT_PARALLELIZE - if defined, this disables multi-threading. This is only relevant if you enabled OpenMP.
+ See \ref TopicMultiThreading for details.
- \b EIGEN_DONT_VECTORIZE - disables explicit vectorization when defined. Not defined by default, unless
alignment is disabled by %Eigen's platform test or the user defining \c EIGEN_DONT_ALIGN.
- \b EIGEN_FAST_MATH - enables some optimizations which might affect the accuracy of the result. This currently
diff --git a/doc/examples/CMakeLists.txt b/doc/examples/CMakeLists.txt
index 71b272a15..08cf8efd7 100644
--- a/doc/examples/CMakeLists.txt
+++ b/doc/examples/CMakeLists.txt
@@ -6,12 +6,10 @@ foreach(example_src ${examples_SRCS})
if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO)
target_link_libraries(${example} ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO})
endif()
- get_target_property(example_executable
- ${example} LOCATION)
add_custom_command(
TARGET ${example}
POST_BUILD
- COMMAND ${example_executable}
+ COMMAND ${example}
ARGS >${CMAKE_CURRENT_BINARY_DIR}/${example}.out
)
add_dependencies(all_examples ${example})
diff --git a/doc/snippets/CMakeLists.txt b/doc/snippets/CMakeLists.txt
index 92a22ea61..1135900cf 100644
--- a/doc/snippets/CMakeLists.txt
+++ b/doc/snippets/CMakeLists.txt
@@ -14,12 +14,10 @@ foreach(snippet_src ${snippets_SRCS})
if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO)
target_link_libraries(${compile_snippet_target} ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO})
endif()
- get_target_property(compile_snippet_executable
- ${compile_snippet_target} LOCATION)
add_custom_command(
TARGET ${compile_snippet_target}
POST_BUILD
- COMMAND ${compile_snippet_executable}
+ COMMAND ${compile_snippet_target}
ARGS >${CMAKE_CURRENT_BINARY_DIR}/${snippet}.out
)
add_dependencies(all_snippets ${compile_snippet_target})
@@ -27,4 +25,4 @@ foreach(snippet_src ${snippets_SRCS})
PROPERTIES OBJECT_DEPENDS ${snippet_src})
endforeach(snippet_src)
-ei_add_target_property(compile_tut_arithmetic_transpose_aliasing COMPILE_FLAGS -DEIGEN_NO_DEBUG) \ No newline at end of file
+ei_add_target_property(compile_tut_arithmetic_transpose_aliasing COMPILE_FLAGS -DEIGEN_NO_DEBUG)
diff --git a/doc/special_examples/CMakeLists.txt b/doc/special_examples/CMakeLists.txt
index 45e2339e3..aab80a55d 100644
--- a/doc/special_examples/CMakeLists.txt
+++ b/doc/special_examples/CMakeLists.txt
@@ -1,4 +1,3 @@
-
if(NOT EIGEN_TEST_NOQT)
find_package(Qt4)
if(QT4_FOUND)
@@ -6,17 +5,16 @@ if(NOT EIGEN_TEST_NOQT)
endif()
endif(NOT EIGEN_TEST_NOQT)
-
if(QT4_FOUND)
add_executable(Tutorial_sparse_example Tutorial_sparse_example.cpp Tutorial_sparse_example_details.cpp)
target_link_libraries(Tutorial_sparse_example ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO} ${QT_QTCORE_LIBRARY} ${QT_QTGUI_LIBRARY})
-
+
add_custom_command(
TARGET Tutorial_sparse_example
POST_BUILD
COMMAND Tutorial_sparse_example ARGS ${CMAKE_CURRENT_BINARY_DIR}/../html/Tutorial_sparse_example.jpeg
)
-
+
add_dependencies(all_examples Tutorial_sparse_example)
endif(QT4_FOUND)
@@ -26,15 +24,11 @@ if(EIGEN_COMPILER_SUPPORT_CPP11)
target_link_libraries(random_cpp11 ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO})
add_dependencies(all_examples random_cpp11)
ei_add_target_property(random_cpp11 COMPILE_FLAGS "-std=c++11")
-
- get_target_property(random_cpp11_exec
- random_cpp11 LOCATION)
+
add_custom_command(
TARGET random_cpp11
POST_BUILD
- COMMAND ${random_cpp11_exec}
+ COMMAND random_cpp11
ARGS >${CMAKE_CURRENT_BINARY_DIR}/random_cpp11.out
)
-
-
-endif() \ No newline at end of file
+endif()
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index 7555aaa38..6446b8bb0 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -158,7 +158,9 @@ ei_add_test(basicstuff)
ei_add_test(linearstructure)
ei_add_test(integer_types)
ei_add_test(unalignedcount)
-ei_add_test(exceptions)
+if(NOT EIGEN_TEST_NO_EXCEPTIONS)
+ ei_add_test(exceptions)
+endif()
ei_add_test(redux)
ei_add_test(visitor)
ei_add_test(block)
@@ -242,7 +244,9 @@ ei_add_test(nesting_ops "${CMAKE_CXX_FLAGS_DEBUG}")
ei_add_test(zerosized)
ei_add_test(dontalign)
ei_add_test(evaluators)
-ei_add_test(sizeoverflow)
+if(NOT EIGEN_TEST_NO_EXCEPTIONS)
+ ei_add_test(sizeoverflow)
+endif()
ei_add_test(prec_inverse_4x4)
ei_add_test(vectorwiseop)
ei_add_test(special_numbers)
diff --git a/test/basicstuff.cpp b/test/basicstuff.cpp
index 56370d591..3d7d74d4e 100644
--- a/test/basicstuff.cpp
+++ b/test/basicstuff.cpp
@@ -183,6 +183,7 @@ void fixedSizeMatrixConstruction()
Scalar raw[4];
for(int k=0; k<4; ++k)
raw[k] = internal::random<Scalar>();
+
{
Matrix<Scalar,4,1> m(raw);
Array<Scalar,4,1> a(raw);
@@ -200,18 +201,40 @@ void fixedSizeMatrixConstruction()
VERIFY((a==Array<Scalar,3,1>(raw[0],raw[1],raw[2])).all());
}
{
- Matrix<Scalar,2,1> m(raw);
- Array<Scalar,2,1> a(raw);
+ Matrix<Scalar,2,1> m(raw), m2( (DenseIndex(raw[0])), (DenseIndex(raw[1])) );
+ Array<Scalar,2,1> a(raw), a2( (DenseIndex(raw[0])), (DenseIndex(raw[1])) );
for(int k=0; k<2; ++k) VERIFY(m(k) == raw[k]);
for(int k=0; k<2; ++k) VERIFY(a(k) == raw[k]);
VERIFY_IS_EQUAL(m,(Matrix<Scalar,2,1>(raw[0],raw[1])));
VERIFY((a==Array<Scalar,2,1>(raw[0],raw[1])).all());
+ for(int k=0; k<2; ++k) VERIFY(m2(k) == DenseIndex(raw[k]));
+ for(int k=0; k<2; ++k) VERIFY(a2(k) == DenseIndex(raw[k]));
+ }
+ {
+ Matrix<Scalar,1,2> m(raw),
+ m2( (DenseIndex(raw[0])), (DenseIndex(raw[1])) ),
+ m3( (int(raw[0])), (int(raw[1])) ),
+ m4( (float(raw[0])), (float(raw[1])) );
+ Array<Scalar,1,2> a(raw), a2( (DenseIndex(raw[0])), (DenseIndex(raw[1])) );
+ for(int k=0; k<2; ++k) VERIFY(m(k) == raw[k]);
+ for(int k=0; k<2; ++k) VERIFY(a(k) == raw[k]);
+ VERIFY_IS_EQUAL(m,(Matrix<Scalar,1,2>(raw[0],raw[1])));
+ VERIFY((a==Array<Scalar,1,2>(raw[0],raw[1])).all());
+ for(int k=0; k<2; ++k) VERIFY(m2(k) == DenseIndex(raw[k]));
+ for(int k=0; k<2; ++k) VERIFY(a2(k) == DenseIndex(raw[k]));
+ for(int k=0; k<2; ++k) VERIFY(m3(k) == int(raw[k]));
+ for(int k=0; k<2; ++k) VERIFY(m4(k) == float(raw[k]));
}
{
- Matrix<Scalar,1,1> m(raw);
- Array<Scalar,1,1> a(raw);
+ Matrix<Scalar,1,1> m(raw), m1(raw[0]), m2( (DenseIndex(raw[0])) ), m3( (int(raw[0])) );
+ Array<Scalar,1,1> a(raw), a1(raw[0]), a2( (DenseIndex(raw[0])) );
VERIFY(m(0) == raw[0]);
VERIFY(a(0) == raw[0]);
+ VERIFY(m1(0) == raw[0]);
+ VERIFY(a1(0) == raw[0]);
+ VERIFY(m2(0) == DenseIndex(raw[0]));
+ VERIFY(a2(0) == DenseIndex(raw[0]));
+ VERIFY(m3(0) == int(raw[0]));
VERIFY_IS_EQUAL(m,(Matrix<Scalar,1,1>(raw[0])));
VERIFY((a==Array<Scalar,1,1>(raw[0])).all());
}
@@ -233,9 +256,10 @@ void test_basicstuff()
}
CALL_SUBTEST_1(fixedSizeMatrixConstruction<unsigned char>());
- CALL_SUBTEST_1(fixedSizeMatrixConstruction<double>());
+ CALL_SUBTEST_1(fixedSizeMatrixConstruction<float>());
CALL_SUBTEST_1(fixedSizeMatrixConstruction<double>());
CALL_SUBTEST_1(fixedSizeMatrixConstruction<int>());
+ CALL_SUBTEST_1(fixedSizeMatrixConstruction<long int>());
CALL_SUBTEST_1(fixedSizeMatrixConstruction<std::ptrdiff_t>());
CALL_SUBTEST_2(casting());
diff --git a/test/ctorleak.cpp b/test/ctorleak.cpp
new file mode 100644
index 000000000..145d91be4
--- /dev/null
+++ b/test/ctorleak.cpp
@@ -0,0 +1,51 @@
+#include "main.h"
+
+#include <exception> // std::exception
+
+struct Foo
+{
+ static unsigned object_count;
+ static unsigned object_limit;
+ int dummy;
+
+ Foo()
+ {
+#ifdef EIGEN_EXCEPTIONS
+ // TODO: Is this the correct way to handle this?
+ if (Foo::object_count > Foo::object_limit) { throw Foo::Fail(); }
+#endif
+ ++Foo::object_count;
+ }
+
+ ~Foo()
+ {
+ --Foo::object_count;
+ }
+
+ class Fail : public std::exception {};
+};
+
+unsigned Foo::object_count = 0;
+unsigned Foo::object_limit = 0;
+
+
+void test_ctorleak()
+{
+ typedef DenseIndex Index;
+ Foo::object_count = 0;
+ for(int i = 0; i < g_repeat; i++) {
+ Index rows = internal::random<Index>(2,EIGEN_TEST_MAX_SIZE), cols = internal::random<Index>(2,EIGEN_TEST_MAX_SIZE);
+ Foo::object_limit = internal::random(0, rows*cols - 2);
+#ifdef EIGEN_EXCEPTIONS
+ try
+ {
+#endif
+ Matrix<Foo, Dynamic, Dynamic> m(rows, cols);
+#ifdef EIGEN_EXCEPTIONS
+ VERIFY(false); // not reached if exceptions are enabled
+ }
+ catch (const Foo::Fail&) { /* ignore */ }
+#endif
+ }
+ VERIFY_IS_EQUAL(static_cast<unsigned>(0), Foo::object_count);
+}
diff --git a/test/dynalloc.cpp b/test/dynalloc.cpp
index c98cc80f0..1190eb9cd 100644
--- a/test/dynalloc.cpp
+++ b/test/dynalloc.cpp
@@ -55,7 +55,7 @@ void check_aligned_new()
void check_aligned_stack_alloc()
{
- for(int i = 1; i < 1000; i++)
+ for(int i = 1; i < 400; i++)
{
ei_declare_aligned_stack_constructed_variable(float,p,i,0);
VERIFY(size_t(p)%ALIGNMENT==0);
@@ -93,6 +93,32 @@ template<typename T> void check_dynaligned()
}
}
+template<typename T> void check_custom_new_delete()
+{
+ {
+ T* t = new T;
+ delete t;
+ }
+
+ {
+ std::size_t N = internal::random<std::size_t>(1,10);
+ T* t = new T[N];
+ delete[] t;
+ }
+
+#ifdef EIGEN_ALIGN
+ {
+ T* t = static_cast<T *>((T::operator new)(sizeof(T)));
+ (T::operator delete)(t, sizeof(T));
+ }
+
+ {
+ T* t = static_cast<T *>((T::operator new)(sizeof(T)));
+ (T::operator delete)(t);
+ }
+#endif
+}
+
void test_dynalloc()
{
// low level dynamic memory allocation
@@ -109,6 +135,11 @@ void test_dynalloc()
CALL_SUBTEST(check_dynaligned<Vector4d>() );
CALL_SUBTEST(check_dynaligned<Vector4i>() );
CALL_SUBTEST(check_dynaligned<Vector8f>() );
+
+ CALL_SUBTEST( check_custom_new_delete<Vector4f>() );
+ CALL_SUBTEST( check_custom_new_delete<Vector2f>() );
+ CALL_SUBTEST( check_custom_new_delete<Matrix4f>() );
+ CALL_SUBTEST( check_custom_new_delete<MatrixXi>() );
}
// check static allocation, who knows ?
diff --git a/test/eigensolver_selfadjoint.cpp b/test/eigensolver_selfadjoint.cpp
index 06a6a8654..3851f9df2 100644
--- a/test/eigensolver_selfadjoint.cpp
+++ b/test/eigensolver_selfadjoint.cpp
@@ -29,7 +29,21 @@ template<typename MatrixType> void selfadjointeigensolver(const MatrixType& m)
MatrixType a = MatrixType::Random(rows,cols);
MatrixType a1 = MatrixType::Random(rows,cols);
MatrixType symmA = a.adjoint() * a + a1.adjoint() * a1;
+ MatrixType symmC = symmA;
+
+ // randomly nullify some rows/columns
+ {
+ Index count = 1;//internal::random<Index>(-cols,cols);
+ for(Index k=0; k<count; ++k)
+ {
+ Index i = internal::random<Index>(0,cols-1);
+ symmA.row(i).setZero();
+ symmA.col(i).setZero();
+ }
+ }
+
symmA.template triangularView<StrictlyUpper>().setZero();
+ symmC.template triangularView<StrictlyUpper>().setZero();
MatrixType b = MatrixType::Random(rows,cols);
MatrixType b1 = MatrixType::Random(rows,cols);
@@ -40,7 +54,7 @@ template<typename MatrixType> void selfadjointeigensolver(const MatrixType& m)
SelfAdjointEigenSolver<MatrixType> eiDirect;
eiDirect.computeDirect(symmA);
// generalized eigen pb
- GeneralizedSelfAdjointEigenSolver<MatrixType> eiSymmGen(symmA, symmB);
+ GeneralizedSelfAdjointEigenSolver<MatrixType> eiSymmGen(symmC, symmB);
VERIFY_IS_EQUAL(eiSymm.info(), Success);
VERIFY((symmA.template selfadjointView<Lower>() * eiSymm.eigenvectors()).isApprox(
@@ -57,27 +71,28 @@ template<typename MatrixType> void selfadjointeigensolver(const MatrixType& m)
VERIFY_IS_APPROX(eiSymm.eigenvalues(), eiSymmNoEivecs.eigenvalues());
// generalized eigen problem Ax = lBx
- eiSymmGen.compute(symmA, symmB,Ax_lBx);
+ eiSymmGen.compute(symmC, symmB,Ax_lBx);
VERIFY_IS_EQUAL(eiSymmGen.info(), Success);
- VERIFY((symmA.template selfadjointView<Lower>() * eiSymmGen.eigenvectors()).isApprox(
+ VERIFY((symmC.template selfadjointView<Lower>() * eiSymmGen.eigenvectors()).isApprox(
symmB.template selfadjointView<Lower>() * (eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps));
// generalized eigen problem BAx = lx
- eiSymmGen.compute(symmA, symmB,BAx_lx);
+ eiSymmGen.compute(symmC, symmB,BAx_lx);
VERIFY_IS_EQUAL(eiSymmGen.info(), Success);
- VERIFY((symmB.template selfadjointView<Lower>() * (symmA.template selfadjointView<Lower>() * eiSymmGen.eigenvectors())).isApprox(
+ VERIFY((symmB.template selfadjointView<Lower>() * (symmC.template selfadjointView<Lower>() * eiSymmGen.eigenvectors())).isApprox(
(eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps));
// generalized eigen problem ABx = lx
- eiSymmGen.compute(symmA, symmB,ABx_lx);
+ eiSymmGen.compute(symmC, symmB,ABx_lx);
VERIFY_IS_EQUAL(eiSymmGen.info(), Success);
- VERIFY((symmA.template selfadjointView<Lower>() * (symmB.template selfadjointView<Lower>() * eiSymmGen.eigenvectors())).isApprox(
+ VERIFY((symmC.template selfadjointView<Lower>() * (symmB.template selfadjointView<Lower>() * eiSymmGen.eigenvectors())).isApprox(
(eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps));
+ eiSymm.compute(symmC);
MatrixType sqrtSymmA = eiSymm.operatorSqrt();
- VERIFY_IS_APPROX(MatrixType(symmA.template selfadjointView<Lower>()), sqrtSymmA*sqrtSymmA);
- VERIFY_IS_APPROX(sqrtSymmA, symmA.template selfadjointView<Lower>()*eiSymm.operatorInverseSqrt());
+ VERIFY_IS_APPROX(MatrixType(symmC.template selfadjointView<Lower>()), sqrtSymmA*sqrtSymmA);
+ VERIFY_IS_APPROX(sqrtSymmA, symmC.template selfadjointView<Lower>()*eiSymm.operatorInverseSqrt());
MatrixType id = MatrixType::Identity(rows, cols);
VERIFY_IS_APPROX(id.template selfadjointView<Lower>().operatorNorm(), RealScalar(1));
@@ -95,9 +110,9 @@ template<typename MatrixType> void selfadjointeigensolver(const MatrixType& m)
VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorInverseSqrt());
// test Tridiagonalization's methods
- Tridiagonalization<MatrixType> tridiag(symmA);
+ Tridiagonalization<MatrixType> tridiag(symmC);
// FIXME tridiag.matrixQ().adjoint() does not work
- VERIFY_IS_APPROX(MatrixType(symmA.template selfadjointView<Lower>()), tridiag.matrixQ() * tridiag.matrixT().eval() * MatrixType(tridiag.matrixQ()).adjoint());
+ VERIFY_IS_APPROX(MatrixType(symmC.template selfadjointView<Lower>()), tridiag.matrixQ() * tridiag.matrixT().eval() * MatrixType(tridiag.matrixQ()).adjoint());
// Test computation of eigenvalues from tridiagonal matrix
if(rows > 1)
@@ -111,8 +126,8 @@ template<typename MatrixType> void selfadjointeigensolver(const MatrixType& m)
if (rows > 1)
{
// Test matrix with NaN
- symmA(0,0) = std::numeric_limits<typename MatrixType::RealScalar>::quiet_NaN();
- SelfAdjointEigenSolver<MatrixType> eiSymmNaN(symmA);
+ symmC(0,0) = std::numeric_limits<typename MatrixType::RealScalar>::quiet_NaN();
+ SelfAdjointEigenSolver<MatrixType> eiSymmNaN(symmC);
VERIFY_IS_EQUAL(eiSymmNaN.info(), NoConvergence);
}
}
@@ -122,8 +137,10 @@ void test_eigensolver_selfadjoint()
int s = 0;
for(int i = 0; i < g_repeat; i++) {
// very important to test 3x3 and 2x2 matrices since we provide special paths for them
+ CALL_SUBTEST_1( selfadjointeigensolver(Matrix2f()) );
CALL_SUBTEST_1( selfadjointeigensolver(Matrix2d()) );
CALL_SUBTEST_1( selfadjointeigensolver(Matrix3f()) );
+ CALL_SUBTEST_1( selfadjointeigensolver(Matrix3d()) );
CALL_SUBTEST_2( selfadjointeigensolver(Matrix4d()) );
s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE/4);
CALL_SUBTEST_3( selfadjointeigensolver(MatrixXf(s,s)) );
diff --git a/test/main.h b/test/main.h
index e89b5a305..b8854a1c3 100644
--- a/test/main.h
+++ b/test/main.h
@@ -79,6 +79,10 @@ namespace Eigen
#define EIGEN_DEFAULT_IO_FORMAT IOFormat(4, 0, " ", "\n", "", "", "", "")
+#if (defined(_CPPUNWIND) || defined(__EXCEPTIONS)) && !defined(__CUDA_ARCH__)
+ #define EIGEN_EXCEPTIONS
+#endif
+
#ifndef EIGEN_NO_ASSERTION_CHECKING
namespace Eigen
@@ -120,13 +124,14 @@ namespace Eigen
if(report_on_cerr_on_assert_failure) \
std::cerr << #a << " " __FILE__ << "(" << __LINE__ << ")\n"; \
Eigen::no_more_assert = true; \
- throw Eigen::eigen_assert_exception(); \
+ EIGEN_THROW_X(Eigen::eigen_assert_exception()); \
} \
else if (Eigen::internal::push_assert) \
{ \
eigen_assert_list.push_back(std::string(EI_PP_MAKE_STRING(__FILE__) " (" EI_PP_MAKE_STRING(__LINE__) ") : " #a) ); \
}
+ #ifdef EIGEN_EXCEPTIONS
#define VERIFY_RAISES_ASSERT(a) \
{ \
Eigen::no_more_assert = false; \
@@ -145,6 +150,7 @@ namespace Eigen
Eigen::report_on_cerr_on_assert_failure = true; \
Eigen::internal::push_assert = false; \
}
+ #endif //EIGEN_EXCEPTIONS
#elif !defined(__CUDACC__) // EIGEN_DEBUG_ASSERTS
// see bug 89. The copy_bool here is working around a bug in gcc <= 4.3
@@ -155,9 +161,10 @@ namespace Eigen
if(report_on_cerr_on_assert_failure) \
eigen_plain_assert(a); \
else \
- throw Eigen::eigen_assert_exception(); \
+ EIGEN_THROW_X(Eigen::eigen_assert_exception()); \
}
- #define VERIFY_RAISES_ASSERT(a) { \
+ #ifdef EIGEN_EXCEPTIONS
+ #define VERIFY_RAISES_ASSERT(a) { \
Eigen::no_more_assert = false; \
Eigen::report_on_cerr_on_assert_failure = false; \
try { \
@@ -167,9 +174,14 @@ namespace Eigen
catch (Eigen::eigen_assert_exception&) { VERIFY(true); } \
Eigen::report_on_cerr_on_assert_failure = true; \
}
-
+ #endif //EIGEN_EXCEPTIONS
#endif // EIGEN_DEBUG_ASSERTS
+#ifndef VERIFY_RAISES_ASSERT
+ #define VERIFY_RAISES_ASSERT(a) \
+ std::cout << "Can't VERIFY_RAISES_ASSERT( " #a " ) with exceptions disabled\n";
+#endif
+
#if !defined(__CUDACC__)
#define EIGEN_USE_CUSTOM_ASSERT
#endif
diff --git a/test/simplicial_cholesky.cpp b/test/simplicial_cholesky.cpp
index e93a52e9c..786468421 100644
--- a/test/simplicial_cholesky.cpp
+++ b/test/simplicial_cholesky.cpp
@@ -11,26 +11,31 @@
template<typename T> void test_simplicial_cholesky_T()
{
- SimplicialCholesky<SparseMatrix<T>, Lower> chol_colmajor_lower;
- SimplicialCholesky<SparseMatrix<T>, Upper> chol_colmajor_upper;
- SimplicialLLT<SparseMatrix<T>, Lower> llt_colmajor_lower;
- SimplicialLDLT<SparseMatrix<T>, Upper> llt_colmajor_upper;
- SimplicialLDLT<SparseMatrix<T>, Lower> ldlt_colmajor_lower;
- SimplicialLDLT<SparseMatrix<T>, Upper> ldlt_colmajor_upper;
+ SimplicialCholesky<SparseMatrix<T>, Lower> chol_colmajor_lower_amd;
+ SimplicialCholesky<SparseMatrix<T>, Upper> chol_colmajor_upper_amd;
+ SimplicialLLT<SparseMatrix<T>, Lower> llt_colmajor_lower_amd;
+ SimplicialLLT<SparseMatrix<T>, Upper> llt_colmajor_upper_amd;
+ SimplicialLDLT<SparseMatrix<T>, Lower> ldlt_colmajor_lower_amd;
+ SimplicialLDLT<SparseMatrix<T>, Upper> ldlt_colmajor_upper_amd;
+ SimplicialLDLT<SparseMatrix<T>, Lower, NaturalOrdering<int> > ldlt_colmajor_lower_nat;
+ SimplicialLDLT<SparseMatrix<T>, Upper, NaturalOrdering<int> > ldlt_colmajor_upper_nat;
- check_sparse_spd_solving(chol_colmajor_lower);
- check_sparse_spd_solving(chol_colmajor_upper);
- check_sparse_spd_solving(llt_colmajor_lower);
- check_sparse_spd_solving(llt_colmajor_upper);
- check_sparse_spd_solving(ldlt_colmajor_lower);
- check_sparse_spd_solving(ldlt_colmajor_upper);
+ check_sparse_spd_solving(chol_colmajor_lower_amd);
+ check_sparse_spd_solving(chol_colmajor_upper_amd);
+ check_sparse_spd_solving(llt_colmajor_lower_amd);
+ check_sparse_spd_solving(llt_colmajor_upper_amd);
+ check_sparse_spd_solving(ldlt_colmajor_lower_amd);
+ check_sparse_spd_solving(ldlt_colmajor_upper_amd);
- check_sparse_spd_determinant(chol_colmajor_lower);
- check_sparse_spd_determinant(chol_colmajor_upper);
- check_sparse_spd_determinant(llt_colmajor_lower);
- check_sparse_spd_determinant(llt_colmajor_upper);
- check_sparse_spd_determinant(ldlt_colmajor_lower);
- check_sparse_spd_determinant(ldlt_colmajor_upper);
+ check_sparse_spd_determinant(chol_colmajor_lower_amd);
+ check_sparse_spd_determinant(chol_colmajor_upper_amd);
+ check_sparse_spd_determinant(llt_colmajor_lower_amd);
+ check_sparse_spd_determinant(llt_colmajor_upper_amd);
+ check_sparse_spd_determinant(ldlt_colmajor_lower_amd);
+ check_sparse_spd_determinant(ldlt_colmajor_upper_amd);
+
+ check_sparse_spd_solving(ldlt_colmajor_lower_nat);
+ check_sparse_spd_solving(ldlt_colmajor_upper_nat);
}
void test_simplicial_cholesky()
diff --git a/test/sparseqr.cpp b/test/sparseqr.cpp
index 1fe4a98ee..8e6887dd3 100644
--- a/test/sparseqr.cpp
+++ b/test/sparseqr.cpp
@@ -54,6 +54,8 @@ template<typename Scalar> void test_sparseqr_scalar()
b = dA * DenseVector::Random(A.cols());
solver.compute(A);
+ if(internal::random<float>(0,1)>0.5)
+ solver.factorize(A); // this checks that calling analyzePattern is not needed if the pattern do not change.
if (solver.info() != Success)
{
std::cerr << "sparse QR factorization failed\n";
diff --git a/unsupported/Eigen/MPRealSupport b/unsupported/Eigen/MPRealSupport
index 0584e470e..630e0aa77 100644
--- a/unsupported/Eigen/MPRealSupport
+++ b/unsupported/Eigen/MPRealSupport
@@ -88,9 +88,9 @@ int main()
inline static Real epsilon (const Real& x) { return mpfr::machine_epsilon(x); }
inline static Real dummy_precision()
- {
- unsigned int weak_prec = ((mpfr::mpreal::get_default_prec()-1) * 90) / 100;
- return mpfr::machine_epsilon(weak_prec);
+ {
+ mpfr_prec_t weak_prec = ((mpfr::mpreal::get_default_prec()-1) * 90) / 100;
+ return mpfr::machine_epsilon(weak_prec);
}
};
@@ -159,7 +159,11 @@ int main()
{
if(rows==0 || cols==0 || depth==0)
return;
+<<<<<<< local
+=======
+
+>>>>>>> other
mpreal acc1(0,mpfr_get_prec(blockA[0].mpfr_srcptr())),
tmp (0,mpfr_get_prec(blockA[0].mpfr_srcptr()));
diff --git a/unsupported/Eigen/src/IterativeSolvers/GMRES.h b/unsupported/Eigen/src/IterativeSolvers/GMRES.h
index c8c84069e..67498705b 100644
--- a/unsupported/Eigen/src/IterativeSolvers/GMRES.h
+++ b/unsupported/Eigen/src/IterativeSolvers/GMRES.h
@@ -11,7 +11,7 @@
#ifndef EIGEN_GMRES_H
#define EIGEN_GMRES_H
-namespace Eigen {
+namespace Eigen {
namespace internal {
@@ -27,11 +27,11 @@ namespace internal {
* \param iters on input: maximum number of iterations to perform
* on output: number of iterations performed
* \param restart number of iterations for a restart
- * \param tol_error on input: residual tolerance
+ * \param tol_error on input: relative residual tolerance
* on output: residuum achieved
*
- * \sa IterativeMethods::bicgstab()
- *
+ * \sa IterativeMethods::bicgstab()
+ *
*
* For references, please see:
*
@@ -70,18 +70,24 @@ bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Precondition
const int m = mat.rows();
- VectorType p0 = rhs - mat*x;
+ // residual and preconditioned residual
+ const VectorType p0 = rhs - mat*x;
VectorType r0 = precond.solve(p0);
-
+
+ const RealScalar r0Norm = r0.norm();
+
// is initial guess already good enough?
- if(abs(r0.norm()) < tol) {
- return true;
+ if(r0Norm == 0) {
+ tol_error=0;
+ return true;
}
+ // storage for Hessenberg matrix and Householder data
+ FMatrixType H = FMatrixType::Zero(m, restart + 1);
VectorType w = VectorType::Zero(restart + 1);
-
- FMatrixType H = FMatrixType::Zero(m, restart + 1); // Hessenberg matrix
VectorType tau = VectorType::Zero(restart + 1);
+
+ // storage for Jacobi rotations
std::vector < JacobiRotation < Scalar > > G(restart);
// generate first Householder vector
@@ -112,11 +118,10 @@ bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Precondition
}
if (v.tail(m - k).norm() != 0.0) {
-
if (k <= restart) {
// generate new Householder vector
- VectorType e(m - k - 1);
+ VectorType e(m - k - 1);
RealScalar beta;
v.tail(m - k).makeHouseholder(e, tau.coeffRef(k), beta);
H.col(k).tail(m - k - 1) = e;
@@ -125,78 +130,77 @@ bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Precondition
v.tail(m - k).applyHouseholderOnTheLeft(H.col(k).tail(m - k - 1), tau.coeffRef(k), workspace.data());
}
- }
+ }
- if (k > 1) {
- for (int i = 0; i < k - 1; ++i) {
- // apply old Givens rotations to v
- v.applyOnTheLeft(i, i + 1, G[i].adjoint());
- }
- }
+ if (k > 1) {
+ for (int i = 0; i < k - 1; ++i) {
+ // apply old Givens rotations to v
+ v.applyOnTheLeft(i, i + 1, G[i].adjoint());
+ }
+ }
- if (k<m && v(k) != (Scalar) 0) {
- // determine next Givens rotation
- G[k - 1].makeGivens(v(k - 1), v(k));
+ if (k<m && v(k) != (Scalar) 0) {
- // apply Givens rotation to v and w
- v.applyOnTheLeft(k - 1, k, G[k - 1].adjoint());
- w.applyOnTheLeft(k - 1, k, G[k - 1].adjoint());
+ // determine next Givens rotation
+ G[k - 1].makeGivens(v(k - 1), v(k));
- }
+ // apply Givens rotation to v and w
+ v.applyOnTheLeft(k - 1, k, G[k - 1].adjoint());
+ w.applyOnTheLeft(k - 1, k, G[k - 1].adjoint());
+ }
- // insert coefficients into upper matrix triangle
- H.col(k - 1).head(k) = v.head(k);
+ // insert coefficients into upper matrix triangle
+ H.col(k - 1).head(k) = v.head(k);
- bool stop=(k==m || abs(w(k)) < tol || iters == maxIters);
+ bool stop=(k==m || abs(w(k)) < tol * r0Norm || iters == maxIters);
- if (stop || k == restart) {
+ if (stop || k == restart) {
- // solve upper triangular system
- VectorType y = w.head(k);
- H.topLeftCorner(k, k).template triangularView < Eigen::Upper > ().solveInPlace(y);
+ // solve upper triangular system
+ VectorType y = w.head(k);
+ H.topLeftCorner(k, k).template triangularView < Eigen::Upper > ().solveInPlace(y);
- // use Horner-like scheme to calculate solution vector
- VectorType x_new = y(k - 1) * VectorType::Unit(m, k - 1);
+ // use Horner-like scheme to calculate solution vector
+ VectorType x_new = y(k - 1) * VectorType::Unit(m, k - 1);
- // apply Householder reflection H_{k} to x_new
- x_new.tail(m - k + 1).applyHouseholderOnTheLeft(H.col(k - 1).tail(m - k), tau.coeffRef(k - 1), workspace.data());
+ // apply Householder reflection H_{k} to x_new
+ x_new.tail(m - k + 1).applyHouseholderOnTheLeft(H.col(k - 1).tail(m - k), tau.coeffRef(k - 1), workspace.data());
- for (int i = k - 2; i >= 0; --i) {
- x_new += y(i) * VectorType::Unit(m, i);
- // apply Householder reflection H_{i} to x_new
- x_new.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data());
- }
+ for (int i = k - 2; i >= 0; --i) {
+ x_new += y(i) * VectorType::Unit(m, i);
+ // apply Householder reflection H_{i} to x_new
+ x_new.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data());
+ }
- x += x_new;
+ x += x_new;
- if (stop) {
- return true;
- } else {
- k=0;
+ if (stop) {
+ return true;
+ } else {
+ k=0;
- // reset data for a restart r0 = rhs - mat * x;
- VectorType p0=mat*x;
- VectorType p1=precond.solve(p0);
- r0 = rhs - p1;
-// r0_sqnorm = r0.squaredNorm();
- w = VectorType::Zero(restart + 1);
- H = FMatrixType::Zero(m, restart + 1);
- tau = VectorType::Zero(restart + 1);
+ // reset data for restart
+ const VectorType p0 = rhs - mat*x;
+ r0 = precond.solve(p0);
- // generate first Householder vector
- RealScalar beta;
- r0.makeHouseholder(e, tau.coeffRef(0), beta);
- w(0)=(Scalar) beta;
- H.bottomLeftCorner(m - 1, 1) = e;
+ // clear Hessenberg matrix and Householder data
+ H = FMatrixType::Zero(m, restart + 1);
+ w = VectorType::Zero(restart + 1);
+ tau = VectorType::Zero(restart + 1);
- }
+ // generate first Householder vector
+ RealScalar beta;
+ r0.makeHouseholder(e, tau.coeffRef(0), beta);
+ w(0)=(Scalar) beta;
+ H.bottomLeftCorner(m - 1, 1) = e;
- }
+ }
+ }
}
-
+
return false;
}
@@ -230,7 +234,7 @@ struct traits<GMRES<_MatrixType,_Preconditioner> >
* The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
* and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
* and NumTraits<Scalar>::epsilon() for the tolerance.
- *
+ *
* This class can be used as the direct solver classes. Here is a typical usage example:
* \code
* int n = 10000;
@@ -244,7 +248,7 @@ struct traits<GMRES<_MatrixType,_Preconditioner> >
* // update b, and solve again
* x = solver.solve(b);
* \endcode
- *
+ *
* By default the iterations start with x=0 as an initial guess of the solution.
* One can control the start using the solveWithGuess() method. Here is a step by
* step execution example starting with a random guess and printing the evolution
@@ -260,7 +264,7 @@ struct traits<GMRES<_MatrixType,_Preconditioner> >
* } while (solver.info()!=Success && i<100);
* \endcode
* Note that such a step by step excution is slightly slower.
- *
+ *
* \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
*/
template< typename _MatrixType, typename _Preconditioner>
@@ -272,10 +276,10 @@ class GMRES : public IterativeSolverBase<GMRES<_MatrixType,_Preconditioner> >
using Base::m_iterations;
using Base::m_info;
using Base::m_isInitialized;
-
+
private:
int m_restart;
-
+
public:
typedef _MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
@@ -289,10 +293,10 @@ public:
GMRES() : Base(), m_restart(30) {}
/** Initialize the solver with matrix \a A for further \c Ax=b solving.
- *
+ *
* This constructor is a shortcut for the default constructor followed
* by a call to compute().
- *
+ *
* \warning this class stores a reference to the matrix A as well as some
* precomputed values that depend on it. Therefore, if \a A is changed
* this class becomes invalid. Call compute() to update it with the new
@@ -301,16 +305,16 @@ public:
GMRES(const MatrixType& A) : Base(A), m_restart(30) {}
~GMRES() {}
-
+
/** Get the number of iterations after that a restart is performed.
*/
int get_restart() { return m_restart; }
-
+
/** Set the number of iterations after that a restart is performed.
* \param restart number of iterations for a restarti, default is 30.
*/
void set_restart(const int restart) { m_restart=restart; }
-
+
/** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A
* \a x0 as an initial solution.
*
@@ -326,17 +330,17 @@ public:
return internal::solve_retval_with_guess
<GMRES, Rhs, Guess>(*this, b.derived(), x0);
}
-
+
/** \internal */
template<typename Rhs,typename Dest>
void _solveWithGuess(const Rhs& b, Dest& x) const
- {
+ {
bool failed = false;
for(int j=0; j<b.cols(); ++j)
{
m_iterations = Base::maxIterations();
m_error = Base::m_tolerance;
-
+
typename Dest::ColXpr xj(x,j);
if(!internal::gmres(*mp_matrix, b.col(j), xj, Base::m_preconditioner, m_iterations, m_restart, m_error))
failed = true;
diff --git a/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h b/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h
index 51dd1d3c4..7cebe4e06 100644
--- a/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h
+++ b/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h
@@ -144,11 +144,13 @@ class LevenbergMarquardt : internal::no_assignment_operator
/** Sets the default parameters */
void resetParameters()
- {
+ {
+ using std::sqrt;
+
m_factor = 100.;
m_maxfev = 400;
- m_ftol = std::sqrt(NumTraits<RealScalar>::epsilon());
- m_xtol = std::sqrt(NumTraits<RealScalar>::epsilon());
+ m_ftol = sqrt(NumTraits<RealScalar>::epsilon());
+ m_xtol = sqrt(NumTraits<RealScalar>::epsilon());
m_gtol = 0. ;
m_epsfcn = 0. ;
}
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h b/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
index 05df5a102..160120d03 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
@@ -176,8 +176,8 @@ void matrix_exp_pade17(const MatrixType &A, MatrixType &U, MatrixType &V)
const MatrixType A4 = A2 * A2;
const MatrixType A6 = A4 * A2;
const MatrixType A8 = A4 * A4;
- V = b[17] * m_tmp1 + b[15] * A6 + b[13] * A4 + b[11] * A2; // used for temporary storage
- matrixType tmp = A8 * V;
+ V = b[17] * A8 + b[15] * A6 + b[13] * A4 + b[11] * A2; // used for temporary storage
+ MatrixType tmp = A8 * V;
tmp += b[9] * A8 + b[7] * A6 + b[5] * A4 + b[3] * A2
+ b[1] * MatrixType::Identity(A.rows(), A.cols());
U.noalias() = A * tmp;
diff --git a/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h b/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h
index bfeb26fc9..ecb8dccf4 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h
@@ -55,8 +55,8 @@ public:
Parameters()
: factor(Scalar(100.))
, maxfev(400)
- , ftol(std::sqrt(NumTraits<Scalar>::epsilon()))
- , xtol(std::sqrt(NumTraits<Scalar>::epsilon()))
+ , ftol(sqrt_(NumTraits<Scalar>::epsilon()))
+ , xtol(sqrt_(NumTraits<Scalar>::epsilon()))
, gtol(Scalar(0.))
, epsfcn(Scalar(0.)) {}
Scalar factor;
@@ -72,7 +72,7 @@ public:
LevenbergMarquardtSpace::Status lmder1(
FVectorType &x,
- const Scalar tol = std::sqrt(NumTraits<Scalar>::epsilon())
+ const Scalar tol = sqrt_(NumTraits<Scalar>::epsilon())
);
LevenbergMarquardtSpace::Status minimize(FVectorType &x);
@@ -83,12 +83,12 @@ public:
FunctorType &functor,
FVectorType &x,
Index *nfev,
- const Scalar tol = std::sqrt(NumTraits<Scalar>::epsilon())
+ const Scalar tol = sqrt_(NumTraits<Scalar>::epsilon())
);
LevenbergMarquardtSpace::Status lmstr1(
FVectorType &x,
- const Scalar tol = std::sqrt(NumTraits<Scalar>::epsilon())
+ const Scalar tol = sqrt_(NumTraits<Scalar>::epsilon())
);
LevenbergMarquardtSpace::Status minimizeOptimumStorage(FVectorType &x);
@@ -109,6 +109,8 @@ public:
Scalar lm_param(void) { return par; }
private:
+ static Scalar sqrt_(const Scalar& x) { using std::sqrt; return sqrt(x); }
+
FunctorType &functor;
Index n;
Index m;
diff --git a/unsupported/Eigen/src/SparseExtra/MarketIO.h b/unsupported/Eigen/src/SparseExtra/MarketIO.h
index 1c40d3f7c..25ff4228d 100644
--- a/unsupported/Eigen/src/SparseExtra/MarketIO.h
+++ b/unsupported/Eigen/src/SparseExtra/MarketIO.h
@@ -133,6 +133,7 @@ template<typename SparseMatrixType>
bool loadMarket(SparseMatrixType& mat, const std::string& filename)
{
typedef typename SparseMatrixType::Scalar Scalar;
+ typedef typename SparseMatrixType::Index Index;
std::ifstream input(filename.c_str(),std::ios::in);
if(!input)
return false;
@@ -142,11 +143,11 @@ bool loadMarket(SparseMatrixType& mat, const std::string& filename)
bool readsizes = false;
- typedef Triplet<Scalar,int> T;
+ typedef Triplet<Scalar,Index> T;
std::vector<T> elements;
- int M(-1), N(-1), NNZ(-1);
- int count = 0;
+ Index M(-1), N(-1), NNZ(-1);
+ Index count = 0;
while(input.getline(buffer, maxBuffersize))
{
// skip comments
@@ -169,7 +170,7 @@ bool loadMarket(SparseMatrixType& mat, const std::string& filename)
}
else
{
- int i(-1), j(-1);
+ Index i(-1), j(-1);
Scalar value;
if( internal::GetMarketLine(line, M, N, i, j, value) )
{
diff --git a/unsupported/doc/examples/CMakeLists.txt b/unsupported/doc/examples/CMakeLists.txt
index 978f9afd8..c47646dfc 100644
--- a/unsupported/doc/examples/CMakeLists.txt
+++ b/unsupported/doc/examples/CMakeLists.txt
@@ -10,12 +10,10 @@ FOREACH(example_src ${examples_SRCS})
if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO)
target_link_libraries(example_${example} ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO})
endif()
- GET_TARGET_PROPERTY(example_executable
- example_${example} LOCATION)
ADD_CUSTOM_COMMAND(
TARGET example_${example}
POST_BUILD
- COMMAND ${example_executable}
+ COMMAND example_${example}
ARGS >${CMAKE_CURRENT_BINARY_DIR}/${example}.out
)
ADD_DEPENDENCIES(unsupported_examples example_${example})
diff --git a/unsupported/doc/snippets/CMakeLists.txt b/unsupported/doc/snippets/CMakeLists.txt
index 4a4157933..f0c5cc2a8 100644
--- a/unsupported/doc/snippets/CMakeLists.txt
+++ b/unsupported/doc/snippets/CMakeLists.txt
@@ -14,12 +14,10 @@ FOREACH(snippet_src ${snippets_SRCS})
if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO)
target_link_libraries(${compile_snippet_target} ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO})
endif()
- GET_TARGET_PROPERTY(compile_snippet_executable
- ${compile_snippet_target} LOCATION)
ADD_CUSTOM_COMMAND(
TARGET ${compile_snippet_target}
POST_BUILD
- COMMAND ${compile_snippet_executable}
+ COMMAND ${compile_snippet_target}
ARGS >${CMAKE_CURRENT_BINARY_DIR}/${snippet}.out
)
ADD_DEPENDENCIES(unsupported_snippets ${compile_snippet_target})
diff --git a/unsupported/test/mpreal/mpreal.h b/unsupported/test/mpreal/mpreal.h
index 4c29d2ac2..dddda7dd9 100644
--- a/unsupported/test/mpreal/mpreal.h
+++ b/unsupported/test/mpreal/mpreal.h
@@ -1698,7 +1698,7 @@ inline bool isregular(const mpreal& op){ return (mpfr_regular_p(op.mpfr_srcpt
//////////////////////////////////////////////////////////////////////////
// Type Converters
-inline bool mpreal::toBool (mp_rnd_t mode) const { return mpfr_zero_p (mpfr_srcptr()) == 0; }
+inline bool mpreal::toBool (mp_rnd_t /*mode*/) const { return mpfr_zero_p (mpfr_srcptr()) == 0; }
inline long mpreal::toLong (mp_rnd_t mode) const { return mpfr_get_si (mpfr_srcptr(), mode); }
inline unsigned long mpreal::toULong (mp_rnd_t mode) const { return mpfr_get_ui (mpfr_srcptr(), mode); }
inline float mpreal::toFloat (mp_rnd_t mode) const { return mpfr_get_flt(mpfr_srcptr(), mode); }
@@ -3070,4 +3070,4 @@ namespace std
}
-#endif /* __MPREAL_H__ */ \ No newline at end of file
+#endif /* __MPREAL_H__ */