aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--Eigen/Core1
-rw-r--r--Eigen/SparseCore3
-rw-r--r--Eigen/src/Core/Block.h2
-rw-r--r--Eigen/src/Core/arch/AVX/PacketMath.h2
-rw-r--r--Eigen/src/Core/arch/NEON/MathFunctions.h91
-rw-r--r--Eigen/src/Core/arch/NEON/PacketMath.h2
-rwxr-xr-xEigen/src/Core/arch/SSE/PacketMath.h2
-rw-r--r--Eigen/src/Core/util/Constants.h13
-rw-r--r--Eigen/src/Core/util/Macros.h2
-rw-r--r--Eigen/src/IterativeLinearSolvers/BiCGSTAB.h8
-rw-r--r--Eigen/src/IterativeLinearSolvers/ConjugateGradient.h26
-rw-r--r--Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h53
-rw-r--r--Eigen/src/SparseCore/MappedSparseMatrix.h172
-rw-r--r--Eigen/src/SparseCore/SparseBlock.h17
-rw-r--r--Eigen/src/SparseCore/SparseCompressedBase.h199
-rw-r--r--Eigen/src/SparseCore/SparseMap.h239
-rw-r--r--Eigen/src/SparseCore/SparseMatrix.h131
-rw-r--r--Eigen/src/SparseCore/SparseRef.h192
-rw-r--r--Eigen/src/SparseCore/SparseTranspose.h34
-rw-r--r--Eigen/src/SparseCore/SparseUtil.h6
-rw-r--r--blas/common.h3
-rw-r--r--blas/level2_impl.h20
-rw-r--r--blas/xerbla.cpp4
-rw-r--r--failtest/CMakeLists.txt6
-rw-r--r--failtest/sparse_ref_1.cpp18
-rw-r--r--failtest/sparse_ref_2.cpp15
-rw-r--r--failtest/sparse_ref_3.cpp15
-rw-r--r--failtest/sparse_ref_4.cpp15
-rw-r--r--failtest/sparse_ref_5.cpp16
-rw-r--r--test/CMakeLists.txt1
-rw-r--r--test/conjugate_gradient.cpp6
-rw-r--r--test/sparse_basic.cpp20
-rw-r--r--test/sparse_ref.cpp99
-rw-r--r--test/sparse_solver.h10
-rw-r--r--unsupported/Eigen/src/IterativeSolvers/DGMRES.h2
-rw-r--r--unsupported/Eigen/src/IterativeSolvers/GMRES.h19
-rw-r--r--unsupported/Eigen/src/IterativeSolvers/MINRES.h26
-rw-r--r--unsupported/test/minres.cpp2
38 files changed, 1117 insertions, 375 deletions
diff --git a/Eigen/Core b/Eigen/Core
index 0263caf20..216428e97 100644
--- a/Eigen/Core
+++ b/Eigen/Core
@@ -305,6 +305,7 @@ using std::ptrdiff_t;
#include "src/Core/arch/AltiVec/Complex.h"
#elif defined EIGEN_VECTORIZE_NEON
#include "src/Core/arch/NEON/PacketMath.h"
+ #include "src/Core/arch/NEON/MathFunctions.h"
#include "src/Core/arch/NEON/Complex.h"
#endif
diff --git a/Eigen/SparseCore b/Eigen/SparseCore
index d5c0f6271..48ed967b8 100644
--- a/Eigen/SparseCore
+++ b/Eigen/SparseCore
@@ -31,9 +31,12 @@
#include "src/SparseCore/SparseAssign.h"
#include "src/SparseCore/CompressedStorage.h"
#include "src/SparseCore/AmbiVector.h"
+#include "src/SparseCore/SparseCompressedBase.h"
#include "src/SparseCore/SparseMatrix.h"
+#include "src/SparseCore/SparseMap.h"
#include "src/SparseCore/MappedSparseMatrix.h"
#include "src/SparseCore/SparseVector.h"
+#include "src/SparseCore/SparseRef.h"
#include "src/SparseCore/SparseCwiseUnaryOp.h"
#include "src/SparseCore/SparseCwiseBinaryOp.h"
#include "src/SparseCore/SparseTranspose.h"
diff --git a/Eigen/src/Core/Block.h b/Eigen/src/Core/Block.h
index 9cf9d5432..5f6307206 100644
--- a/Eigen/src/Core/Block.h
+++ b/Eigen/src/Core/Block.h
@@ -87,7 +87,7 @@ struct traits<Block<XprType, BlockRows, BlockCols, InnerPanel> > : traits<XprTyp
// FIXME, this traits is rather specialized for dense object and it needs to be cleaned further
FlagsLvalueBit = is_lvalue<XprType>::value ? LvalueBit : 0,
FlagsRowMajorBit = IsRowMajor ? RowMajorBit : 0,
- Flags = (traits<XprType>::Flags & DirectAccessBit) | FlagsLvalueBit | FlagsRowMajorBit
+ Flags = (traits<XprType>::Flags & (DirectAccessBit | (InnerPanel?CompressedAccessBit:0))) | FlagsLvalueBit | FlagsRowMajorBit
// FIXME DirectAccessBit should not be handled by expressions
};
};
diff --git a/Eigen/src/Core/arch/AVX/PacketMath.h b/Eigen/src/Core/arch/AVX/PacketMath.h
index e66d50649..be66a502a 100644
--- a/Eigen/src/Core/arch/AVX/PacketMath.h
+++ b/Eigen/src/Core/arch/AVX/PacketMath.h
@@ -135,7 +135,7 @@ template<> EIGEN_STRONG_INLINE Packet8i pdiv<Packet8i>(const Packet8i& /*a*/, co
return pset1<Packet8i>(0);
}
-#ifdef EIGEN_VECTORIZE_FMA
+#ifdef __FMA__
template<> EIGEN_STRONG_INLINE Packet8f pmadd(const Packet8f& a, const Packet8f& b, const Packet8f& c) {
#if EIGEN_COMP_GNUC || EIGEN_COMP_CLANG
// clang stupidly generates a vfmadd213ps instruction plus some vmovaps on registers,
diff --git a/Eigen/src/Core/arch/NEON/MathFunctions.h b/Eigen/src/Core/arch/NEON/MathFunctions.h
new file mode 100644
index 000000000..6bb05bb92
--- /dev/null
+++ b/Eigen/src/Core/arch/NEON/MathFunctions.h
@@ -0,0 +1,91 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/* The sin, cos, exp, and log functions of this file come from
+ * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/
+ */
+
+#ifndef EIGEN_MATH_FUNCTIONS_NEON_H
+#define EIGEN_MATH_FUNCTIONS_NEON_H
+
+namespace Eigen {
+
+namespace internal {
+
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet4f pexp<Packet4f>(const Packet4f& _x)
+{
+ Packet4f x = _x;
+ Packet4f tmp, fx;
+
+ _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
+ _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
+ _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f);
+ _EIGEN_DECLARE_CONST_Packet4f(exp_hi, 88.3762626647950f);
+ _EIGEN_DECLARE_CONST_Packet4f(exp_lo, -88.3762626647949f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_LOG2EF, 1.44269504088896341f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C1, 0.693359375f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C2, -2.12194440e-4f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p0, 1.9875691500E-4f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p1, 1.3981999507E-3f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p2, 8.3334519073E-3f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p3, 4.1665795894E-2f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459E-1f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201E-1f);
+
+ x = vminq_f32(x, p4f_exp_hi);
+ x = vmaxq_f32(x, p4f_exp_lo);
+
+ /* express exp(x) as exp(g + n*log(2)) */
+ fx = vmlaq_f32(p4f_half, x, p4f_cephes_LOG2EF);
+
+ /* perform a floorf */
+ tmp = vcvtq_f32_s32(vcvtq_s32_f32(fx));
+
+ /* if greater, substract 1 */
+ Packet4ui mask = vcgtq_f32(tmp, fx);
+ mask = vandq_u32(mask, vreinterpretq_u32_f32(p4f_1));
+
+ fx = vsubq_f32(tmp, vreinterpretq_f32_u32(mask));
+
+ tmp = vmulq_f32(fx, p4f_cephes_exp_C1);
+ Packet4f z = vmulq_f32(fx, p4f_cephes_exp_C2);
+ x = vsubq_f32(x, tmp);
+ x = vsubq_f32(x, z);
+
+ Packet4f y = vmulq_f32(p4f_cephes_exp_p0, x);
+ z = vmulq_f32(x, x);
+ y = vaddq_f32(y, p4f_cephes_exp_p1);
+ y = vmulq_f32(y, x);
+ y = vaddq_f32(y, p4f_cephes_exp_p2);
+ y = vmulq_f32(y, x);
+ y = vaddq_f32(y, p4f_cephes_exp_p3);
+ y = vmulq_f32(y, x);
+ y = vaddq_f32(y, p4f_cephes_exp_p4);
+ y = vmulq_f32(y, x);
+ y = vaddq_f32(y, p4f_cephes_exp_p5);
+
+ y = vmulq_f32(y, z);
+ y = vaddq_f32(y, x);
+ y = vaddq_f32(y, p4f_1);
+
+ /* build 2^n */
+ int32x4_t mm;
+ mm = vcvtq_s32_f32(fx);
+ mm = vaddq_s32(mm, p4i_0x7f);
+ mm = vshlq_n_s32(mm, 23);
+ Packet4f pow2n = vreinterpretq_f32_s32(mm);
+
+ y = vmulq_f32(y, pow2n);
+ return y;
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_MATH_FUNCTIONS_NEON_H
diff --git a/Eigen/src/Core/arch/NEON/PacketMath.h b/Eigen/src/Core/arch/NEON/PacketMath.h
index 9afd86bec..559682cf7 100644
--- a/Eigen/src/Core/arch/NEON/PacketMath.h
+++ b/Eigen/src/Core/arch/NEON/PacketMath.h
@@ -88,7 +88,7 @@ template<> struct packet_traits<float> : default_packet_traits
HasSin = 0,
HasCos = 0,
HasLog = 0,
- HasExp = 0,
+ HasExp = 1,
HasSqrt = 0
};
};
diff --git a/Eigen/src/Core/arch/SSE/PacketMath.h b/Eigen/src/Core/arch/SSE/PacketMath.h
index 3befd4c25..898cb9ab0 100755
--- a/Eigen/src/Core/arch/SSE/PacketMath.h
+++ b/Eigen/src/Core/arch/SSE/PacketMath.h
@@ -231,7 +231,7 @@ template<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& /*a*/, co
// for some weird raisons, it has to be overloaded for packet of integers
template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd(pmul(a,b), c); }
-#ifdef EIGEN_VECTORIZE_FMA
+#ifdef __FMA__
template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return _mm_fmadd_ps(a,b,c); }
template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return _mm_fmadd_pd(a,b,c); }
#endif
diff --git a/Eigen/src/Core/util/Constants.h b/Eigen/src/Core/util/Constants.h
index 9b40093f0..d1855b50b 100644
--- a/Eigen/src/Core/util/Constants.h
+++ b/Eigen/src/Core/util/Constants.h
@@ -163,6 +163,19 @@ const unsigned int NestByRefBit = 0x100;
* \sa \ref RowMajorBit, \ref TopicStorageOrders */
const unsigned int NoPreferredStorageOrderBit = 0x200;
+/** \ingroup flags
+ *
+ * Means that the underlying coefficients can be accessed through pointers to the sparse (un)compressed storage format,
+ * that is, the expression provides:
+ * \code
+ inline const Scalar* valuePtr() const;
+ inline const Index* innerIndexPtr() const;
+ inline const Index* outerIndexPtr() const;
+ inline const Index* innerNonZeroPtr() const;
+ \endcode
+ */
+const unsigned int CompressedAccessBit = 0x400;
+
// list of flags that are inherited by default
const unsigned int HereditaryBits = RowMajorBit
diff --git a/Eigen/src/Core/util/Macros.h b/Eigen/src/Core/util/Macros.h
index dc5f13673..07c528a2f 100644
--- a/Eigen/src/Core/util/Macros.h
+++ b/Eigen/src/Core/util/Macros.h
@@ -396,7 +396,7 @@
/** Allows to disable some optimizations which might affect the accuracy of the result.
* Such optimization are enabled by default, and set EIGEN_FAST_MATH to 0 to disable them.
* They currently include:
- * - single precision Cwise::sin() and Cwise::cos() when SSE vectorization is enabled.
+ * - single precision ArrayBase::sin() and ArrayBase::cos() when SSE vectorization is enabled.
*/
#ifndef EIGEN_FAST_MATH
#define EIGEN_FAST_MATH 1
diff --git a/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h b/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
index 224fe913f..31a43cb56 100644
--- a/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
+++ b/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
@@ -139,11 +139,7 @@ struct traits<BiCGSTAB<_MatrixType,_Preconditioner> >
* \include BiCGSTAB_simple.cpp
*
* By default the iterations start with x=0 as an initial guess of the solution.
- * One can control the start using the solveWithGuess() method. Here is a step by
- * step execution example starting with a random guess and printing the evolution
- * of the estimated error:
- * \include BiCGSTAB_step_by_step.cpp
- * Note that such a step by step execution is slightly slower.
+ * One can control the start using the solveWithGuess() method.
*
* \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
*/
@@ -193,7 +189,7 @@ public:
m_error = Base::m_tolerance;
typename Dest::ColXpr xj(x,j);
- if(!internal::bicgstab(*mp_matrix, b.col(j), xj, Base::m_preconditioner, m_iterations, m_error))
+ if(!internal::bicgstab(mp_matrix, b.col(j), xj, Base::m_preconditioner, m_iterations, m_error))
failed = true;
}
m_info = failed ? NumericalIssue
diff --git a/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h b/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
index b5ef6d60f..1e819fc9f 100644
--- a/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
+++ b/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
@@ -113,8 +113,8 @@ struct traits<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> >
* The matrix A must be selfadjoint. The matrix A and the vectors x and b can be either dense or sparse.
*
* \tparam _MatrixType the type of the matrix A, can be a dense or a sparse matrix.
- * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
- * or Upper. Default is Lower.
+ * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower,
+ * Upper, or Lower|Upper in which the full matrix entries will be considered. Default is Lower.
* \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
*
* The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
@@ -137,20 +137,7 @@ struct traits<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> >
* \endcode
*
* By default the iterations start with x=0 as an initial guess of the solution.
- * One can control the start using the solveWithGuess() method. Here is a step by
- * step execution example starting with a random guess and printing the evolution
- * of the estimated error:
- * * \code
- * x = VectorXd::Random(n);
- * cg.setMaxIterations(1);
- * int i = 0;
- * do {
- * x = cg.solveWithGuess(b,x);
- * std::cout << i << " : " << cg.error() << std::endl;
- * ++i;
- * } while (cg.info()!=Success && i<100);
- * \endcode
- * Note that such a step by step excution is slightly slower.
+ * One can control the start using the solveWithGuess() method.
*
* \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
*/
@@ -197,6 +184,10 @@ public:
template<typename Rhs,typename Dest>
void _solve_with_guess_impl(const Rhs& b, Dest& x) const
{
+ typedef typename internal::conditional<UpLo==(Lower|Upper),
+ Ref<const MatrixType>&,
+ SparseSelfAdjointView<const Ref<const MatrixType>, UpLo>
+ >::type MatrixWrapperType;
m_iterations = Base::maxIterations();
m_error = Base::m_tolerance;
@@ -206,8 +197,7 @@ public:
m_error = Base::m_tolerance;
typename Dest::ColXpr xj(x,j);
- internal::conjugate_gradient(mp_matrix->template selfadjointView<UpLo>(), b.col(j), xj,
- Base::m_preconditioner, m_iterations, m_error);
+ internal::conjugate_gradient(MatrixWrapperType(mp_matrix), b.col(j), xj, Base::m_preconditioner, m_iterations, m_error);
}
m_isInitialized = true;
diff --git a/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h b/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
index f33c868bb..6f48075b4 100644
--- a/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
+++ b/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
@@ -37,7 +37,7 @@ public:
/** Default constructor. */
IterativeSolverBase()
- : mp_matrix(0)
+ : m_dummy(0,0), mp_matrix(m_dummy)
{
init();
}
@@ -52,10 +52,11 @@ public:
* this class becomes invalid. Call compute() to update it with the new
* matrix A, or modify a copy of A.
*/
- explicit IterativeSolverBase(const MatrixType& A)
+ template<typename SparseMatrixDerived>
+ explicit IterativeSolverBase(const SparseMatrixBase<SparseMatrixDerived>& A)
{
init();
- compute(A);
+ compute(A.derived());
}
~IterativeSolverBase() {}
@@ -65,9 +66,11 @@ public:
* Currently, this function mostly calls analyzePattern on the preconditioner. In the future
* we might, for instance, implement column reordering for faster matrix vector products.
*/
- Derived& analyzePattern(const MatrixType& A)
+ template<typename SparseMatrixDerived>
+ Derived& analyzePattern(const SparseMatrixBase<SparseMatrixDerived>& A)
{
- m_preconditioner.analyzePattern(A);
+ grab(A);
+ m_preconditioner.analyzePattern(mp_matrix);
m_isInitialized = true;
m_analysisIsOk = true;
m_info = Success;
@@ -83,11 +86,12 @@ public:
* this class becomes invalid. Call compute() to update it with the new
* matrix A, or modify a copy of A.
*/
- Derived& factorize(const MatrixType& A)
+ template<typename SparseMatrixDerived>
+ Derived& factorize(const SparseMatrixBase<SparseMatrixDerived>& A)
{
eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
- mp_matrix = &A;
- m_preconditioner.factorize(A);
+ grab(A);
+ m_preconditioner.factorize(mp_matrix);
m_factorizationIsOk = true;
m_info = Success;
return derived();
@@ -103,10 +107,11 @@ public:
* this class becomes invalid. Call compute() to update it with the new
* matrix A, or modify a copy of A.
*/
- Derived& compute(const MatrixType& A)
+ template<typename SparseMatrixDerived>
+ Derived& compute(const SparseMatrixBase<SparseMatrixDerived>& A)
{
- mp_matrix = &A;
- m_preconditioner.compute(A);
+ grab(A);
+ m_preconditioner.compute(mp_matrix);
m_isInitialized = true;
m_analysisIsOk = true;
m_factorizationIsOk = true;
@@ -115,9 +120,9 @@ public:
}
/** \internal */
- Index rows() const { return mp_matrix ? mp_matrix->rows() : 0; }
+ Index rows() const { return mp_matrix.rows(); }
/** \internal */
- Index cols() const { return mp_matrix ? mp_matrix->cols() : 0; }
+ Index cols() const { return mp_matrix.cols(); }
/** \returns the tolerance threshold used by the stopping criteria */
RealScalar tolerance() const { return m_tolerance; }
@@ -135,13 +140,18 @@ public:
/** \returns a read-only reference to the preconditioner. */
const Preconditioner& preconditioner() const { return m_preconditioner; }
- /** \returns the max number of iterations */
+ /** \returns the max number of iterations.
+ * It is either the value setted by setMaxIterations or, by default,
+ * twice the number of columns of the matrix.
+ */
int maxIterations() const
{
- return (mp_matrix && m_maxIterations<0) ? mp_matrix->cols() : m_maxIterations;
+ return (m_maxIterations<0) ? 2*mp_matrix.cols() : m_maxIterations;
}
- /** Sets the max number of iterations */
+ /** Sets the max number of iterations.
+ * Default is twice the number of columns of the matrix.
+ */
Derived& setMaxIterations(int maxIters)
{
m_maxIterations = maxIters;
@@ -210,7 +220,16 @@ protected:
m_maxIterations = -1;
m_tolerance = NumTraits<Scalar>::epsilon();
}
- const MatrixType* mp_matrix;
+
+ template<typename SparseMatrixDerived>
+ void grab(const SparseMatrixBase<SparseMatrixDerived> &A)
+ {
+ mp_matrix.~Ref<const MatrixType>();
+ ::new (&mp_matrix) Ref<const MatrixType>(A);
+ }
+
+ MatrixType m_dummy;
+ Ref<const MatrixType> mp_matrix;
Preconditioner m_preconditioner;
int m_maxIterations;
diff --git a/Eigen/src/SparseCore/MappedSparseMatrix.h b/Eigen/src/SparseCore/MappedSparseMatrix.h
index 2852c669a..533479fd0 100644
--- a/Eigen/src/SparseCore/MappedSparseMatrix.h
+++ b/Eigen/src/SparseCore/MappedSparseMatrix.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -10,9 +10,10 @@
#ifndef EIGEN_MAPPED_SPARSEMATRIX_H
#define EIGEN_MAPPED_SPARSEMATRIX_H
-namespace Eigen {
+namespace Eigen {
-/** \class MappedSparseMatrix
+/** \deprecated Use Map<SparseMatrix<> >
+ * \class MappedSparseMatrix
*
* \brief Sparse matrix
*
@@ -25,179 +26,38 @@ namespace internal {
template<typename _Scalar, int _Flags, typename _Index>
struct traits<MappedSparseMatrix<_Scalar, _Flags, _Index> > : traits<SparseMatrix<_Scalar, _Flags, _Index> >
{};
-}
+} // end namespace internal
template<typename _Scalar, int _Flags, typename _Index>
class MappedSparseMatrix
- : public SparseMatrixBase<MappedSparseMatrix<_Scalar, _Flags, _Index> >
+ : public Map<SparseMatrix<_Scalar, _Flags, _Index> >
{
- public:
- EIGEN_SPARSE_PUBLIC_INTERFACE(MappedSparseMatrix)
- enum { IsRowMajor = Base::IsRowMajor };
-
- protected:
-
- Index m_outerSize;
- Index m_innerSize;
- Index m_nnz;
- Index* m_outerIndex;
- Index* m_innerIndices;
- Scalar* m_values;
+ typedef Map<SparseMatrix<_Scalar, _Flags, _Index> > Base;
public:
-
- inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
- inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
- inline Index innerSize() const { return m_innerSize; }
- inline Index outerSize() const { return m_outerSize; }
- bool isCompressed() const { return true; }
-
- //----------------------------------------
- // direct access interface
- inline const Scalar* valuePtr() const { return m_values; }
- inline Scalar* valuePtr() { return m_values; }
-
- inline const Index* innerIndexPtr() const { return m_innerIndices; }
- inline Index* innerIndexPtr() { return m_innerIndices; }
-
- inline const Index* outerIndexPtr() const { return m_outerIndex; }
- inline Index* outerIndexPtr() { return m_outerIndex; }
- //----------------------------------------
-
- inline Scalar coeff(Index row, Index col) const
- {
- const Index outer = IsRowMajor ? row : col;
- const Index inner = IsRowMajor ? col : row;
-
- Index start = m_outerIndex[outer];
- Index end = m_outerIndex[outer+1];
- if (start==end)
- return Scalar(0);
- else if (end>0 && inner==m_innerIndices[end-1])
- return m_values[end-1];
- // ^^ optimization: let's first check if it is the last coefficient
- // (very common in high level algorithms)
+ typedef typename Base::Index Index;
+ typedef typename Base::Scalar Scalar;
- const Index* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end-1],inner);
- const Index id = r-&m_innerIndices[0];
- return ((*r==inner) && (id<end)) ? m_values[id] : Scalar(0);
- }
-
- inline Scalar& coeffRef(Index row, Index col)
- {
- const Index outer = IsRowMajor ? row : col;
- const Index inner = IsRowMajor ? col : row;
-
- Index start = m_outerIndex[outer];
- Index end = m_outerIndex[outer+1];
- eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
- eigen_assert(end>start && "coeffRef cannot be called on a zero coefficient");
- Index* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end],inner);
- const Index id = r-&m_innerIndices[0];
- eigen_assert((*r==inner) && (id<end) && "coeffRef cannot be called on a zero coefficient");
- return m_values[id];
- }
-
- class InnerIterator;
- class ReverseInnerIterator;
-
- /** \returns the number of non zero coefficients */
- inline Index nonZeros() const { return m_nnz; }
-
- inline MappedSparseMatrix(Index rows, Index cols, Index nnz, Index* outerIndexPtr, Index* innerIndexPtr, Scalar* valuePtr)
- : m_outerSize(IsRowMajor?rows:cols), m_innerSize(IsRowMajor?cols:rows), m_nnz(nnz), m_outerIndex(outerIndexPtr),
- m_innerIndices(innerIndexPtr), m_values(valuePtr)
+ inline MappedSparseMatrix(Index rows, Index cols, Index nnz, Index* outerIndexPtr, Index* innerIndexPtr, Scalar* valuePtr, Index* innerNonZeroPtr = 0)
+ : Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZeroPtr)
{}
/** Empty destructor */
inline ~MappedSparseMatrix() {}
};
-template<typename Scalar, int _Flags, typename _Index>
-class MappedSparseMatrix<Scalar,_Flags,_Index>::InnerIterator
-{
- public:
- InnerIterator(const MappedSparseMatrix& mat, Index outer)
- : m_matrix(mat),
- m_outer(outer),
- m_id(mat.outerIndexPtr()[outer]),
- m_start(m_id),
- m_end(mat.outerIndexPtr()[outer+1])
- {}
-
- inline InnerIterator& operator++() { m_id++; return *this; }
-
- inline Scalar value() const { return m_matrix.valuePtr()[m_id]; }
- inline Scalar& valueRef() { return const_cast<Scalar&>(m_matrix.valuePtr()[m_id]); }
-
- inline Index index() const { return m_matrix.innerIndexPtr()[m_id]; }
- inline Index row() const { return IsRowMajor ? m_outer : index(); }
- inline Index col() const { return IsRowMajor ? index() : m_outer; }
-
- inline operator bool() const { return (m_id < m_end) && (m_id>=m_start); }
-
- protected:
- const MappedSparseMatrix& m_matrix;
- const Index m_outer;
- Index m_id;
- const Index m_start;
- const Index m_end;
-};
-
-template<typename Scalar, int _Flags, typename _Index>
-class MappedSparseMatrix<Scalar,_Flags,_Index>::ReverseInnerIterator
-{
- public:
- ReverseInnerIterator(const MappedSparseMatrix& mat, Index outer)
- : m_matrix(mat),
- m_outer(outer),
- m_id(mat.outerIndexPtr()[outer+1]),
- m_start(mat.outerIndexPtr()[outer]),
- m_end(m_id)
- {}
-
- inline ReverseInnerIterator& operator--() { m_id--; return *this; }
-
- inline Scalar value() const { return m_matrix.valuePtr()[m_id-1]; }
- inline Scalar& valueRef() { return const_cast<Scalar&>(m_matrix.valuePtr()[m_id-1]); }
-
- inline Index index() const { return m_matrix.innerIndexPtr()[m_id-1]; }
- inline Index row() const { return IsRowMajor ? m_outer : index(); }
- inline Index col() const { return IsRowMajor ? index() : m_outer; }
-
- inline operator bool() const { return (m_id <= m_end) && (m_id>m_start); }
-
- protected:
- const MappedSparseMatrix& m_matrix;
- const Index m_outer;
- Index m_id;
- const Index m_start;
- const Index m_end;
-};
-
namespace internal {
template<typename _Scalar, int _Options, typename _Index>
struct evaluator<MappedSparseMatrix<_Scalar,_Options,_Index> >
- : evaluator_base<MappedSparseMatrix<_Scalar,_Options,_Index> >
+ : evaluator<SparseCompressedBase<MappedSparseMatrix<_Scalar,_Options,_Index> > >
{
- typedef MappedSparseMatrix<_Scalar,_Options,_Index> MappedSparseMatrixType;
- typedef typename MappedSparseMatrixType::InnerIterator InnerIterator;
- typedef typename MappedSparseMatrixType::ReverseInnerIterator ReverseInnerIterator;
-
- enum {
- CoeffReadCost = NumTraits<_Scalar>::ReadCost,
- Flags = MappedSparseMatrixType::Flags
- };
-
- evaluator() : m_matrix(0) {}
- explicit evaluator(const MappedSparseMatrixType &mat) : m_matrix(&mat) {}
-
- operator MappedSparseMatrixType&() { return m_matrix->const_cast_derived(); }
- operator const MappedSparseMatrixType&() const { return *m_matrix; }
+ typedef MappedSparseMatrix<_Scalar,_Options,_Index> XprType;
+ typedef evaluator<SparseCompressedBase<XprType> > Base;
- const MappedSparseMatrixType *m_matrix;
+ evaluator() : Base() {}
+ explicit evaluator(const XprType &mat) : Base(mat) {}
};
}
diff --git a/Eigen/src/SparseCore/SparseBlock.h b/Eigen/src/SparseCore/SparseBlock.h
index 9e4da2057..6b8ade5aa 100644
--- a/Eigen/src/SparseCore/SparseBlock.h
+++ b/Eigen/src/SparseCore/SparseBlock.h
@@ -74,7 +74,7 @@ namespace internal {
template<typename SparseMatrixType, int BlockRows, int BlockCols>
class sparse_matrix_block_impl
- : public SparseMatrixBase<Block<SparseMatrixType,BlockRows,BlockCols,true> >
+ : public SparseCompressedBase<Block<SparseMatrixType,BlockRows,BlockCols,true> >
{
typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _MatrixTypeNested;
typedef Block<SparseMatrixType, BlockRows, BlockCols, true> BlockType;
@@ -172,19 +172,24 @@ public:
}
inline const Scalar* valuePtr() const
- { return m_matrix.valuePtr() + m_matrix.outerIndexPtr()[m_outerStart]; }
+ { return m_matrix.valuePtr(); }
inline Scalar* valuePtr()
- { return m_matrix.const_cast_derived().valuePtr() + m_matrix.outerIndexPtr()[m_outerStart]; }
+ { return m_matrix.const_cast_derived().valuePtr(); }
inline const Index* innerIndexPtr() const
- { return m_matrix.innerIndexPtr() + m_matrix.outerIndexPtr()[m_outerStart]; }
+ { return m_matrix.innerIndexPtr(); }
inline Index* innerIndexPtr()
- { return m_matrix.const_cast_derived().innerIndexPtr() + m_matrix.outerIndexPtr()[m_outerStart]; }
+ { return m_matrix.const_cast_derived().innerIndexPtr(); }
inline const Index* outerIndexPtr() const
{ return m_matrix.outerIndexPtr() + m_outerStart; }
inline Index* outerIndexPtr()
{ return m_matrix.const_cast_derived().outerIndexPtr() + m_outerStart; }
+
+ inline const Index* innerNonZeroPtr() const
+ { return isCompressed() ? 0 : m_matrix.innerNonZeroPtr(); }
+ inline Index* innerNonZeroPtr()
+ { return isCompressed() ? 0 : m_matrix.const_cast_derived().innerNonZeroPtr(); }
Index nonZeros() const
{
@@ -196,6 +201,8 @@ public:
else
return Map<const Matrix<Index,OuterSize,1> >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum();
}
+
+ bool isCompressed() const { return m_matrix.innerNonZeroPtr()==0; }
const Scalar& lastCoeff() const
{
diff --git a/Eigen/src/SparseCore/SparseCompressedBase.h b/Eigen/src/SparseCore/SparseCompressedBase.h
new file mode 100644
index 000000000..00658181e
--- /dev/null
+++ b/Eigen/src/SparseCore/SparseCompressedBase.h
@@ -0,0 +1,199 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSE_COMPRESSED_BASE_H
+#define EIGEN_SPARSE_COMPRESSED_BASE_H
+
+namespace Eigen {
+
+template<typename Derived> class SparseCompressedBase;
+
+namespace internal {
+
+template<typename Derived>
+struct traits<SparseCompressedBase<Derived> > : traits<Derived>
+{};
+
+} // end namespace internal
+
+template<typename Derived>
+class SparseCompressedBase
+ : public SparseMatrixBase<Derived>
+{
+ public:
+ typedef SparseMatrixBase<Derived> Base;
+ _EIGEN_SPARSE_PUBLIC_INTERFACE(SparseCompressedBase)
+ using Base::operator=;
+ using Base::IsRowMajor;
+
+ class InnerIterator;
+ class ReverseInnerIterator;
+
+ /** \returns a const pointer to the array of values.
+ * This function is aimed at interoperability with other libraries.
+ * \sa innerIndexPtr(), outerIndexPtr() */
+ inline const Scalar* valuePtr() const { return derived().valuePtr(); }
+ /** \returns a non-const pointer to the array of values.
+ * This function is aimed at interoperability with other libraries.
+ * \sa innerIndexPtr(), outerIndexPtr() */
+ inline Scalar* valuePtr() { return derived().valuePtr(); }
+
+ /** \returns a const pointer to the array of inner indices.
+ * This function is aimed at interoperability with other libraries.
+ * \sa valuePtr(), outerIndexPtr() */
+ inline const Index* innerIndexPtr() const { return derived().innerIndexPtr(); }
+ /** \returns a non-const pointer to the array of inner indices.
+ * This function is aimed at interoperability with other libraries.
+ * \sa valuePtr(), outerIndexPtr() */
+ inline Index* innerIndexPtr() { return derived().innerIndexPtr(); }
+
+ /** \returns a const pointer to the array of the starting positions of the inner vectors.
+ * This function is aimed at interoperability with other libraries.
+ * \sa valuePtr(), innerIndexPtr() */
+ inline const Index* outerIndexPtr() const { return derived().outerIndexPtr(); }
+ /** \returns a non-const pointer to the array of the starting positions of the inner vectors.
+ * This function is aimed at interoperability with other libraries.
+ * \sa valuePtr(), innerIndexPtr() */
+ inline Index* outerIndexPtr() { return derived().outerIndexPtr(); }
+
+ /** \returns a const pointer to the array of the number of non zeros of the inner vectors.
+ * This function is aimed at interoperability with other libraries.
+ * \warning it returns the null pointer 0 in compressed mode */
+ inline const Index* innerNonZeroPtr() const { return derived().innerNonZeroPtr(); }
+ /** \returns a non-const pointer to the array of the number of non zeros of the inner vectors.
+ * This function is aimed at interoperability with other libraries.
+ * \warning it returns the null pointer 0 in compressed mode */
+ inline Index* innerNonZeroPtr() { return derived().innerNonZeroPtr(); }
+
+ /** \returns whether \c *this is in compressed form. */
+ inline bool isCompressed() const { return innerNonZeroPtr()==0; }
+
+};
+
+template<typename Derived>
+class SparseCompressedBase<Derived>::InnerIterator
+{
+ public:
+ InnerIterator(const SparseCompressedBase& mat, Index outer)
+ : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_id(mat.outerIndexPtr()[outer])
+ {
+ if(mat.isCompressed())
+ m_end = mat.outerIndexPtr()[outer+1];
+ else
+ m_end = m_id + mat.innerNonZeroPtr()[outer];
+ }
+
+ inline InnerIterator& operator++() { m_id++; return *this; }
+
+ inline const Scalar& value() const { return m_values[m_id]; }
+ inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id]); }
+
+ inline Index index() const { return m_indices[m_id]; }
+ inline Index outer() const { return m_outer; }
+ inline Index row() const { return IsRowMajor ? m_outer : index(); }
+ inline Index col() const { return IsRowMajor ? index() : m_outer; }
+
+ inline operator bool() const { return (m_id < m_end); }
+
+ protected:
+ const Scalar* m_values;
+ const Index* m_indices;
+ const Index m_outer;
+ Index m_id;
+ Index m_end;
+ private:
+ // If you get here, then you're not using the right InnerIterator type, e.g.:
+ // SparseMatrix<double,RowMajor> A;
+ // SparseMatrix<double>::InnerIterator it(A,0);
+ template<typename T> InnerIterator(const SparseMatrixBase<T>&,Index outer);
+};
+
+template<typename Derived>
+class SparseCompressedBase<Derived>::ReverseInnerIterator
+{
+ public:
+ ReverseInnerIterator(const SparseCompressedBase& mat, Index outer)
+ : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_start(mat.outerIndexPtr()[outer])
+ {
+ if(mat.isCompressed())
+ m_id = mat.outerIndexPtr()[outer+1];
+ else
+ m_id = m_start + mat.innerNonZeroPtr()[outer];
+ }
+
+ inline ReverseInnerIterator& operator--() { --m_id; return *this; }
+
+ inline const Scalar& value() const { return m_values[m_id-1]; }
+ inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id-1]); }
+
+ inline Index index() const { return m_indices[m_id-1]; }
+ inline Index outer() const { return m_outer; }
+ inline Index row() const { return IsRowMajor ? m_outer : index(); }
+ inline Index col() const { return IsRowMajor ? index() : m_outer; }
+
+ inline operator bool() const { return (m_id > m_start); }
+
+ protected:
+ const Scalar* m_values;
+ const Index* m_indices;
+ const Index m_outer;
+ Index m_id;
+ const Index m_start;
+};
+
+namespace internal {
+
+template<typename Derived>
+struct evaluator<SparseCompressedBase<Derived> >
+ : evaluator_base<Derived>
+{
+ typedef typename Derived::Scalar Scalar;
+ typedef typename Derived::Index Index;
+ typedef typename Derived::InnerIterator InnerIterator;
+ typedef typename Derived::ReverseInnerIterator ReverseInnerIterator;
+
+ enum {
+ CoeffReadCost = NumTraits<Scalar>::ReadCost,
+ Flags = Derived::Flags
+ };
+
+ evaluator() : m_matrix(0) {}
+ explicit evaluator(const Derived &mat) : m_matrix(&mat) {}
+
+ operator Derived&() { return m_matrix->const_cast_derived(); }
+ operator const Derived&() const { return *m_matrix; }
+
+ typedef typename DenseCoeffsBase<Derived,ReadOnlyAccessors>::CoeffReturnType CoeffReturnType;
+ Scalar coeff(Index row, Index col) const
+ { return m_matrix->coeff(row,col); }
+
+ Scalar& coeffRef(Index row, Index col)
+ {
+ eigen_internal_assert(row>=0 && row<m_matrix->rows() && col>=0 && col<m_matrix->cols());
+
+ const Index outer = Derived::IsRowMajor ? row : col;
+ const Index inner = Derived::IsRowMajor ? col : row;
+
+ Index start = m_matrix->outerIndexPtr()[outer];
+ Index end = m_matrix->isCompressed() ? m_matrix->outerIndexPtr()[outer+1] : m_matrix->outerIndexPtr()[outer] + m_matrix->innerNonZeroPtr()[outer];
+ eigen_assert(end>start && "you are using a non finalized sparse matrix or written coefficient does not exist");
+ const Index p = std::lower_bound(m_matrix->innerIndexPtr()+start, m_matrix->innerIndexPtr()+end,inner)
+ - m_matrix->innerIndexPtr();
+ eigen_assert((p<end) && (m_matrix->innerIndexPtr()[p]==inner) && "written coefficient does not exist");
+ return m_matrix->const_cast_derived().valuePtr()[p];
+ }
+
+ const Derived *m_matrix;
+};
+
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_COMPRESSED_BASE_H
diff --git a/Eigen/src/SparseCore/SparseMap.h b/Eigen/src/SparseCore/SparseMap.h
new file mode 100644
index 000000000..72dedb1ec
--- /dev/null
+++ b/Eigen/src/SparseCore/SparseMap.h
@@ -0,0 +1,239 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSE_MAP_H
+#define EIGEN_SPARSE_MAP_H
+
+namespace Eigen {
+
+namespace internal {
+
+template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
+struct traits<Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
+ : public traits<SparseMatrix<MatScalar,MatOptions,MatIndex> >
+{
+ typedef SparseMatrix<MatScalar,MatOptions,MatIndex> PlainObjectType;
+ typedef traits<PlainObjectType> TraitsBase;
+ enum {
+ Flags = TraitsBase::Flags & (~NestByRefBit)
+ };
+};
+
+template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
+struct traits<Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
+ : public traits<SparseMatrix<MatScalar,MatOptions,MatIndex> >
+{
+ typedef SparseMatrix<MatScalar,MatOptions,MatIndex> PlainObjectType;
+ typedef traits<PlainObjectType> TraitsBase;
+ enum {
+ Flags = TraitsBase::Flags & (~ (NestByRefBit | LvalueBit))
+ };
+};
+
+} // end namespace internal
+
+template<typename Derived,
+ int Level = internal::accessors_level<Derived>::has_write_access ? WriteAccessors : ReadOnlyAccessors
+> class SparseMapBase;
+
+template<typename Derived>
+class SparseMapBase<Derived,ReadOnlyAccessors>
+ : public SparseCompressedBase<Derived>
+{
+ public:
+ typedef SparseCompressedBase<Derived> Base;
+ typedef typename Base::Scalar Scalar;
+ typedef typename Base::Index Index;
+ enum { IsRowMajor = Base::IsRowMajor };
+ using Base::operator=;
+ protected:
+
+ typedef typename internal::conditional<
+ bool(internal::is_lvalue<Derived>::value),
+ Scalar *, const Scalar *>::type ScalarPointer;
+ typedef typename internal::conditional<
+ bool(internal::is_lvalue<Derived>::value),
+ Index *, const Index *>::type IndexPointer;
+
+ Index m_outerSize;
+ Index m_innerSize;
+ Index m_nnz;
+ IndexPointer m_outerIndex;
+ IndexPointer m_innerIndices;
+ ScalarPointer m_values;
+ IndexPointer m_innerNonZeros;
+
+ public:
+
+ inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
+ inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
+ inline Index innerSize() const { return m_innerSize; }
+ inline Index outerSize() const { return m_outerSize; }
+
+ bool isCompressed() const { return m_innerNonZeros==0; }
+
+ //----------------------------------------
+ // direct access interface
+ inline const Scalar* valuePtr() const { return m_values; }
+ inline const Index* innerIndexPtr() const { return m_innerIndices; }
+ inline const Index* outerIndexPtr() const { return m_outerIndex; }
+ inline const Index* innerNonZeroPtr() const { return m_innerNonZeros; }
+ //----------------------------------------
+
+ inline Scalar coeff(Index row, Index col) const
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ Index start = m_outerIndex[outer];
+ Index end = isCompressed() ? m_outerIndex[outer+1] : start + m_innerNonZeros[outer];
+ if (start==end)
+ return Scalar(0);
+ else if (end>0 && inner==m_innerIndices[end-1])
+ return m_values[end-1];
+ // ^^ optimization: let's first check if it is the last coefficient
+ // (very common in high level algorithms)
+
+ const Index* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end-1],inner);
+ const Index id = r-&m_innerIndices[0];
+ return ((*r==inner) && (id<end)) ? m_values[id] : Scalar(0);
+ }
+
+ /** \returns the number of non zero coefficients */
+ inline Index nonZeros() const { return m_nnz; }
+
+ inline SparseMapBase(Index rows, Index cols, Index nnz, IndexPointer outerIndexPtr, IndexPointer innerIndexPtr,
+ ScalarPointer valuePtr, IndexPointer innerNonZerosPtr = 0)
+ : m_outerSize(IsRowMajor?rows:cols), m_innerSize(IsRowMajor?cols:rows), m_nnz(nnz), m_outerIndex(outerIndexPtr),
+ m_innerIndices(innerIndexPtr), m_values(valuePtr), m_innerNonZeros(innerNonZerosPtr)
+ {}
+
+ /** Empty destructor */
+ inline ~SparseMapBase() {}
+};
+
+template<typename Derived>
+class SparseMapBase<Derived,WriteAccessors>
+ : public SparseMapBase<Derived,ReadOnlyAccessors>
+{
+ typedef MapBase<Derived, ReadOnlyAccessors> ReadOnlyMapBase;
+
+ public:
+ typedef SparseMapBase<Derived, ReadOnlyAccessors> Base;
+ typedef typename Base::Scalar Scalar;
+ typedef typename Base::Index Index;
+ enum { IsRowMajor = Base::IsRowMajor };
+
+ using Base::operator=;
+
+ public:
+
+ //----------------------------------------
+ // direct access interface
+ using Base::valuePtr;
+ using Base::innerIndexPtr;
+ using Base::outerIndexPtr;
+ using Base::innerNonZeroPtr;
+ inline Scalar* valuePtr() { return Base::m_values; }
+ inline Index* innerIndexPtr() { return Base::m_innerIndices; }
+ inline Index* outerIndexPtr() { return Base::m_outerIndex; }
+ inline Index* innerNonZeroPtr() { return Base::m_innerNonZeros; }
+ //----------------------------------------
+
+ inline Scalar& coeffRef(Index row, Index col)
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ Index start = Base::m_outerIndex[outer];
+ Index end = Base::isCompressed() ? Base::m_outerIndex[outer+1] : start + Base::m_innerNonZeros[outer];
+ eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
+ eigen_assert(end>start && "coeffRef cannot be called on a zero coefficient");
+ Index* r = std::lower_bound(&Base::m_innerIndices[start],&Base::m_innerIndices[end],inner);
+ const Index id = r - &Base::m_innerIndices[0];
+ eigen_assert((*r==inner) && (id<end) && "coeffRef cannot be called on a zero coefficient");
+ return const_cast<Scalar*>(Base::m_values)[id];
+ }
+
+ inline SparseMapBase(Index rows, Index cols, Index nnz, Index* outerIndexPtr, Index* innerIndexPtr,
+ Scalar* valuePtr, Index* innerNonZerosPtr = 0)
+ : Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZerosPtr)
+ {}
+
+ /** Empty destructor */
+ inline ~SparseMapBase() {}
+};
+
+template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
+class Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType>
+ : public SparseMapBase<Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
+{
+ public:
+ typedef SparseMapBase<Map> Base;
+ _EIGEN_SPARSE_PUBLIC_INTERFACE(Map)
+ enum { IsRowMajor = Base::IsRowMajor };
+
+ public:
+
+ inline Map(Index rows, Index cols, Index nnz, Index* outerIndexPtr,
+ Index* innerIndexPtr, Scalar* valuePtr, Index* innerNonZerosPtr = 0)
+ : Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZerosPtr)
+ {}
+
+ /** Empty destructor */
+ inline ~Map() {}
+};
+
+template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
+class Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType>
+ : public SparseMapBase<Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
+{
+ public:
+ typedef SparseMapBase<Map> Base;
+ _EIGEN_SPARSE_PUBLIC_INTERFACE(Map)
+ enum { IsRowMajor = Base::IsRowMajor };
+
+ public:
+
+ inline Map(Index rows, Index cols, Index nnz, const Index* outerIndexPtr,
+ const Index* innerIndexPtr, const Scalar* valuePtr, const Index* innerNonZerosPtr = 0)
+ : Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZerosPtr)
+ {}
+
+ /** Empty destructor */
+ inline ~Map() {}
+};
+
+namespace internal {
+
+template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
+struct evaluator<Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
+ : evaluator<SparseCompressedBase<Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >
+{
+ typedef evaluator<SparseCompressedBase<Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;
+ typedef Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;
+ evaluator() : Base() {}
+ explicit evaluator(const XprType &mat) : Base(mat) {}
+};
+
+template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
+struct evaluator<Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
+ : evaluator<SparseCompressedBase<Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >
+{
+ typedef evaluator<SparseCompressedBase<Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;
+ typedef Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;
+ evaluator() : Base() {}
+ explicit evaluator(const XprType &mat) : Base(mat) {}
+};
+
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_MAP_H
diff --git a/Eigen/src/SparseCore/SparseMatrix.h b/Eigen/src/SparseCore/SparseMatrix.h
index 93677c786..74b4c6a9d 100644
--- a/Eigen/src/SparseCore/SparseMatrix.h
+++ b/Eigen/src/SparseCore/SparseMatrix.h
@@ -51,7 +51,7 @@ struct traits<SparseMatrix<_Scalar, _Options, _Index> >
ColsAtCompileTime = Dynamic,
MaxRowsAtCompileTime = Dynamic,
MaxColsAtCompileTime = Dynamic,
- Flags = _Options | NestByRefBit | LvalueBit,
+ Flags = _Options | NestByRefBit | LvalueBit | CompressedAccessBit,
SupportedAccessPatterns = InnerRandomAccessPattern
};
};
@@ -90,16 +90,20 @@ struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _Index>, DiagIndex>
template<typename _Scalar, int _Options, typename _Index>
class SparseMatrix
- : public SparseMatrixBase<SparseMatrix<_Scalar, _Options, _Index> >
+ : public SparseCompressedBase<SparseMatrix<_Scalar, _Options, _Index> >
{
public:
- EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
+ typedef SparseCompressedBase<SparseMatrix> Base;
+ using Base::isCompressed;
+ _EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, +=)
EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, -=)
typedef MappedSparseMatrix<Scalar,Flags> Map;
typedef Diagonal<SparseMatrix> DiagonalReturnType;
typedef Diagonal<const SparseMatrix> ConstDiagonalReturnType;
+ typedef typename Base::InnerIterator InnerIterator;
+ typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
using Base::IsRowMajor;
@@ -123,9 +127,6 @@ class SparseMatrix
public:
- /** \returns whether \c *this is in compressed form. */
- inline bool isCompressed() const { return m_innerNonZeros==0; }
-
/** \returns the number of rows of the matrix */
inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
/** \returns the number of columns of the matrix */
@@ -241,9 +242,6 @@ class SparseMatrix
public:
- class InnerIterator;
- class ReverseInnerIterator;
-
/** Removes all non zeros but keep allocated memory */
inline void setZero()
{
@@ -875,76 +873,6 @@ private:
};
};
-template<typename Scalar, int _Options, typename _Index>
-class SparseMatrix<Scalar,_Options,_Index>::InnerIterator
-{
- public:
- InnerIterator(const SparseMatrix& mat, Index outer)
- : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_id(mat.m_outerIndex[outer])
- {
- if(mat.isCompressed())
- m_end = mat.m_outerIndex[outer+1];
- else
- m_end = m_id + mat.m_innerNonZeros[outer];
- }
-
- inline InnerIterator& operator++() { m_id++; return *this; }
-
- inline const Scalar& value() const { return m_values[m_id]; }
- inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id]); }
-
- inline Index index() const { return m_indices[m_id]; }
- inline Index outer() const { return m_outer; }
- inline Index row() const { return IsRowMajor ? m_outer : index(); }
- inline Index col() const { return IsRowMajor ? index() : m_outer; }
-
- inline operator bool() const { return (m_id < m_end); }
-
- protected:
- const Scalar* m_values;
- const Index* m_indices;
- const Index m_outer;
- Index m_id;
- Index m_end;
- private:
- // If you get here, then you're not using the right InnerIterator type, e.g.:
- // SparseMatrix<double,RowMajor> A;
- // SparseMatrix<double>::InnerIterator it(A,0);
- template<typename T> InnerIterator(const SparseMatrixBase<T>&,Index outer);
-};
-
-template<typename Scalar, int _Options, typename _Index>
-class SparseMatrix<Scalar,_Options,_Index>::ReverseInnerIterator
-{
- public:
- ReverseInnerIterator(const SparseMatrix& mat, Index outer)
- : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_start(mat.m_outerIndex[outer])
- {
- if(mat.isCompressed())
- m_id = mat.m_outerIndex[outer+1];
- else
- m_id = m_start + mat.m_innerNonZeros[outer];
- }
-
- inline ReverseInnerIterator& operator--() { --m_id; return *this; }
-
- inline const Scalar& value() const { return m_values[m_id-1]; }
- inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id-1]); }
-
- inline Index index() const { return m_indices[m_id-1]; }
- inline Index outer() const { return m_outer; }
- inline Index row() const { return IsRowMajor ? m_outer : index(); }
- inline Index col() const { return IsRowMajor ? index() : m_outer; }
-
- inline operator bool() const { return (m_id > m_start); }
-
- protected:
- const Scalar* m_values;
- const Index* m_indices;
- const Index m_outer;
- Index m_id;
- const Index m_start;
-};
namespace internal {
@@ -1075,6 +1003,10 @@ EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_Index>& SparseMatrix<Scalar,_Opt
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+ #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
+ EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
+ #endif
+
const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
if (needToTranspose)
{
@@ -1277,45 +1209,12 @@ namespace internal {
template<typename _Scalar, int _Options, typename _Index>
struct evaluator<SparseMatrix<_Scalar,_Options,_Index> >
- : evaluator_base<SparseMatrix<_Scalar,_Options,_Index> >
+ : evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_Index> > >
{
- typedef _Scalar Scalar;
- typedef _Index Index;
+ typedef evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_Index> > > Base;
typedef SparseMatrix<_Scalar,_Options,_Index> SparseMatrixType;
- typedef typename SparseMatrixType::InnerIterator InnerIterator;
- typedef typename SparseMatrixType::ReverseInnerIterator ReverseInnerIterator;
-
- enum {
- CoeffReadCost = NumTraits<_Scalar>::ReadCost,
- Flags = SparseMatrixType::Flags
- };
-
- evaluator() : m_matrix(0) {}
- explicit evaluator(const SparseMatrixType &mat) : m_matrix(&mat) {}
-
- operator SparseMatrixType&() { return m_matrix->const_cast_derived(); }
- operator const SparseMatrixType&() const { return *m_matrix; }
-
- typedef typename DenseCoeffsBase<SparseMatrixType,ReadOnlyAccessors>::CoeffReturnType CoeffReturnType;
- Scalar coeff(Index row, Index col) const
- { return m_matrix->coeff(row,col); }
-
- Scalar& coeffRef(Index row, Index col)
- {
- eigen_internal_assert(row>=0 && row<m_matrix->rows() && col>=0 && col<m_matrix->cols());
-
- const Index outer = SparseMatrixType::IsRowMajor ? row : col;
- const Index inner = SparseMatrixType::IsRowMajor ? col : row;
-
- Index start = m_matrix->outerIndexPtr()[outer];
- Index end = m_matrix->isCompressed() ? m_matrix->outerIndexPtr()[outer+1] : m_matrix->outerIndexPtr()[outer] + m_matrix->innerNonZeroPtr()[outer];
- eigen_assert(end>start && "you are using a non finalized sparse matrix or written coefficient does not exist");
- const Index p = m_matrix->data().searchLowerIndex(start,end-1,inner);
- eigen_assert((p<end) && (m_matrix->data().index(p)==inner) && "written coefficient does not exist");
- return m_matrix->const_cast_derived().data().value(p);
- }
-
- const SparseMatrixType *m_matrix;
+ evaluator() : Base() {}
+ explicit evaluator(const SparseMatrixType &mat) : Base(mat) {}
};
}
diff --git a/Eigen/src/SparseCore/SparseRef.h b/Eigen/src/SparseCore/SparseRef.h
new file mode 100644
index 000000000..2ca039323
--- /dev/null
+++ b/Eigen/src/SparseCore/SparseRef.h
@@ -0,0 +1,192 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSE_REF_H
+#define EIGEN_SPARSE_REF_H
+
+namespace Eigen {
+
+namespace internal {
+
+template<typename Derived> class SparseRefBase;
+
+template<typename MatScalar, int MatOptions, typename MatIndex, int _Options, typename _StrideType>
+struct traits<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >
+ : public traits<SparseMatrix<MatScalar,MatOptions,MatIndex> >
+{
+ typedef SparseMatrix<MatScalar,MatOptions,MatIndex> PlainObjectType;
+ enum {
+ Options = _Options,
+ Flags = traits<SparseMatrix<MatScalar,MatOptions,MatIndex> >::Flags | CompressedAccessBit | NestByRefBit
+ };
+
+ template<typename Derived> struct match {
+ enum {
+ StorageOrderMatch = PlainObjectType::IsVectorAtCompileTime || Derived::IsVectorAtCompileTime || ((PlainObjectType::Flags&RowMajorBit)==(Derived::Flags&RowMajorBit)),
+ MatchAtCompileTime = (Derived::Flags&CompressedAccessBit) && StorageOrderMatch
+ };
+ typedef typename internal::conditional<MatchAtCompileTime,internal::true_type,internal::false_type>::type type;
+ };
+
+};
+
+template<typename MatScalar, int MatOptions, typename MatIndex, int _Options, typename _StrideType>
+struct traits<Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >
+ : public traits<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >
+{
+ enum {
+ Flags = (traits<SparseMatrix<MatScalar,MatOptions,MatIndex> >::Flags | CompressedAccessBit | NestByRefBit) & ~LvalueBit
+ };
+};
+
+template<typename Derived>
+struct traits<SparseRefBase<Derived> > : public traits<Derived> {};
+
+template<typename Derived> class SparseRefBase
+ : public SparseMapBase<Derived>
+{
+public:
+
+ typedef SparseMapBase<Derived> Base;
+ _EIGEN_SPARSE_PUBLIC_INTERFACE(SparseRefBase)
+
+ SparseRefBase()
+ : Base(RowsAtCompileTime==Dynamic?0:RowsAtCompileTime,ColsAtCompileTime==Dynamic?0:ColsAtCompileTime, 0, 0, 0, 0, 0)
+ {}
+
+protected:
+
+
+ template<typename Expression>
+ void construct(Expression& expr)
+ {
+ ::new (static_cast<Base*>(this)) Base(expr.rows(), expr.cols(), expr.nonZeros(), expr.outerIndexPtr(), expr.innerIndexPtr(), expr.valuePtr(), expr.innerNonZeroPtr());
+ }
+};
+
+} // namespace internal
+
+template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
+class Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType >
+ : public internal::SparseRefBase<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType > >
+{
+ typedef SparseMatrix<MatScalar,MatOptions,MatIndex> PlainObjectType;
+ typedef internal::traits<Ref> Traits;
+ template<int OtherOptions>
+ inline Ref(const SparseMatrix<MatScalar,OtherOptions,MatIndex>& expr);
+ template<int OtherOptions>
+ inline Ref(const MappedSparseMatrix<MatScalar,OtherOptions,MatIndex>& expr);
+ public:
+
+ typedef internal::SparseRefBase<Ref> Base;
+ _EIGEN_SPARSE_PUBLIC_INTERFACE(Ref)
+
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ template<int OtherOptions>
+ inline Ref(SparseMatrix<MatScalar,OtherOptions,MatIndex>& expr)
+ {
+ EIGEN_STATIC_ASSERT(bool(Traits::template match<SparseMatrix<MatScalar,OtherOptions,MatIndex> >::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
+ Base::construct(expr.derived());
+ }
+
+ template<int OtherOptions>
+ inline Ref(MappedSparseMatrix<MatScalar,OtherOptions,MatIndex>& expr)
+ {
+ EIGEN_STATIC_ASSERT(bool(Traits::template match<SparseMatrix<MatScalar,OtherOptions,MatIndex> >::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
+ Base::construct(expr.derived());
+ }
+
+ template<typename Derived>
+ inline Ref(const SparseCompressedBase<Derived>& expr)
+ #else
+ template<typename Derived>
+ inline Ref(SparseCompressedBase<Derived>& expr)
+ #endif
+ {
+ EIGEN_STATIC_ASSERT(bool(internal::is_lvalue<Derived>::value), THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY);
+ EIGEN_STATIC_ASSERT(bool(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
+ Base::construct(expr.const_cast_derived());
+ }
+};
+
+// this is the const ref version
+template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
+class Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType>
+ : public internal::SparseRefBase<Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
+{
+ typedef SparseMatrix<MatScalar,MatOptions,MatIndex> TPlainObjectType;
+ typedef internal::traits<Ref> Traits;
+ public:
+
+ typedef internal::SparseRefBase<Ref> Base;
+ _EIGEN_SPARSE_PUBLIC_INTERFACE(Ref)
+
+ template<typename Derived>
+ inline Ref(const SparseMatrixBase<Derived>& expr)
+ {
+ construct(expr.derived(), typename Traits::template match<Derived>::type());
+ }
+
+ inline Ref(const Ref& other) : Base(other) {
+ // copy constructor shall not copy the m_object, to avoid unnecessary malloc and copy
+ }
+
+ template<typename OtherRef>
+ inline Ref(const RefBase<OtherRef>& other) {
+ construct(other.derived(), typename Traits::template match<OtherRef>::type());
+ }
+
+ protected:
+
+ template<typename Expression>
+ void construct(const Expression& expr,internal::true_type)
+ {
+ Base::construct(expr);
+ }
+
+ template<typename Expression>
+ void construct(const Expression& expr, internal::false_type)
+ {
+ m_object = expr;
+ Base::construct(m_object);
+ }
+
+ protected:
+ TPlainObjectType m_object;
+};
+
+
+namespace internal {
+
+template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
+struct evaluator<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
+ : evaluator<SparseCompressedBase<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >
+{
+ typedef evaluator<SparseCompressedBase<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;
+ typedef Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;
+ evaluator() : Base() {}
+ explicit evaluator(const XprType &mat) : Base(mat) {}
+};
+
+template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
+struct evaluator<Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
+ : evaluator<SparseCompressedBase<Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >
+{
+ typedef evaluator<SparseCompressedBase<Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;
+ typedef Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;
+ evaluator() : Base() {}
+ explicit evaluator(const XprType &mat) : Base(mat) {}
+};
+
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_REF_H
diff --git a/Eigen/src/SparseCore/SparseTranspose.h b/Eigen/src/SparseCore/SparseTranspose.h
index c3d2d1a16..37ce7b0d5 100644
--- a/Eigen/src/SparseCore/SparseTranspose.h
+++ b/Eigen/src/SparseCore/SparseTranspose.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -12,13 +12,41 @@
namespace Eigen {
+namespace internal {
+ template<typename MatrixType,int CompressedAccess=int(MatrixType::Flags&CompressedAccessBit)>
+ class SparseTransposeImpl
+ : public SparseMatrixBase<Transpose<MatrixType> >
+ {};
+
+ template<typename MatrixType>
+ class SparseTransposeImpl<MatrixType,CompressedAccessBit>
+ : public SparseCompressedBase<Transpose<MatrixType> >
+ {
+ typedef SparseCompressedBase<Transpose<MatrixType> > Base;
+ public:
+ using Base::derived;
+ typedef typename Base::Scalar Scalar;
+ typedef typename Base::Index Index;
+
+ inline const Scalar* valuePtr() const { return derived().nestedExpression().valuePtr(); }
+ inline const Index* innerIndexPtr() const { return derived().nestedExpression().innerIndexPtr(); }
+ inline const Index* outerIndexPtr() const { return derived().nestedExpression().outerIndexPtr(); }
+ inline const Index* innerNonZeroPtr() const { return derived().nestedExpression().innerNonZeroPtr(); }
+
+ inline Scalar* valuePtr() { return derived().nestedExpression().valuePtr(); }
+ inline Index* innerIndexPtr() { return derived().nestedExpression().innerIndexPtr(); }
+ inline Index* outerIndexPtr() { return derived().nestedExpression().outerIndexPtr(); }
+ inline Index* innerNonZeroPtr() { return derived().nestedExpression().innerNonZeroPtr(); }
+ };
+}
+
// Implement nonZeros() for transpose. I'm not sure that's the best approach for that.
// Perhaps it should be implemented in Transpose<> itself.
template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>
- : public SparseMatrixBase<Transpose<MatrixType> >
+ : public internal::SparseTransposeImpl<MatrixType>
{
protected:
- typedef SparseMatrixBase<Transpose<MatrixType> > Base;
+ typedef internal::SparseTransposeImpl<MatrixType> Base;
public:
inline typename MatrixType::Index nonZeros() const { return Base::derived().nestedExpression().nonZeros(); }
};
diff --git a/Eigen/src/SparseCore/SparseUtil.h b/Eigen/src/SparseCore/SparseUtil.h
index 8de227b88..ba4803646 100644
--- a/Eigen/src/SparseCore/SparseUtil.h
+++ b/Eigen/src/SparseCore/SparseUtil.h
@@ -43,8 +43,7 @@ EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, -=) \
EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, *=) \
EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=)
-#define _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, BaseClass) \
- typedef BaseClass Base; \
+#define _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) \
typedef typename Eigen::internal::traits<Derived >::Scalar Scalar; \
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; \
typedef typename Eigen::internal::nested<Derived >::type Nested; \
@@ -59,7 +58,8 @@ EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=)
using Base::const_cast_derived;
#define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) \
- _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, Eigen::SparseMatrixBase<Derived >)
+ typedef Eigen::SparseMatrixBase<Derived > Base; \
+ _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
const int CoherentAccessPattern = 0x1;
const int InnerRandomAccessPattern = 0x2 | CoherentAccessPattern;
diff --git a/blas/common.h b/blas/common.h
index c39cc63a8..5ecb153e2 100644
--- a/blas/common.h
+++ b/blas/common.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -13,7 +13,6 @@
#include <Eigen/Core>
#include <Eigen/Jacobi>
-#include <iostream>
#include <complex>
#ifndef SCALAR
diff --git a/blas/level2_impl.h b/blas/level2_impl.h
index 233c7b753..e604fe611 100644
--- a/blas/level2_impl.h
+++ b/blas/level2_impl.h
@@ -9,6 +9,20 @@
#include "common.h"
+template<typename Index, typename Scalar, int StorageOrder, bool ConjugateLhs, bool ConjugateRhs>
+struct general_matrix_vector_product_wrapper
+{
+ static void run(Index rows, Index cols,const Scalar *lhs, Index lhsStride, const Scalar *rhs, Index rhsIncr, Scalar* res, Index resIncr, Scalar alpha)
+ {
+ typedef internal::const_blas_data_mapper<Scalar,Index,StorageOrder> LhsMapper;
+ typedef internal::const_blas_data_mapper<Scalar,Index,RowMajor> RhsMapper;
+
+ internal::general_matrix_vector_product
+ <Index,Scalar,LhsMapper,StorageOrder,ConjugateLhs,Scalar,RhsMapper,ConjugateRhs>::run(
+ rows, cols, LhsMapper(lhs, lhsStride), RhsMapper(rhs, rhsIncr), res, resIncr, alpha);
+ }
+};
+
int EIGEN_BLAS_FUNC(gemv)(char *opa, int *m, int *n, RealScalar *palpha, RealScalar *pa, int *lda, RealScalar *pb, int *incb, RealScalar *pbeta, RealScalar *pc, int *incc)
{
typedef void (*functype)(int, int, const Scalar *, int, const Scalar *, int , Scalar *, int, Scalar);
@@ -20,9 +34,9 @@ int EIGEN_BLAS_FUNC(gemv)(char *opa, int *m, int *n, RealScalar *palpha, RealSca
for(int k=0; k<4; ++k)
func[k] = 0;
- func[NOTR] = (internal::general_matrix_vector_product<int,Scalar,ColMajor,false,Scalar,false>::run);
- func[TR ] = (internal::general_matrix_vector_product<int,Scalar,RowMajor,false,Scalar,false>::run);
- func[ADJ ] = (internal::general_matrix_vector_product<int,Scalar,RowMajor,Conj, Scalar,false>::run);
+ func[NOTR] = (general_matrix_vector_product_wrapper<int,Scalar,ColMajor,false,false>::run);
+ func[TR ] = (general_matrix_vector_product_wrapper<int,Scalar,RowMajor,false,false>::run);
+ func[ADJ ] = (general_matrix_vector_product_wrapper<int,Scalar,RowMajor,Conj ,false>::run);
init = true;
}
diff --git a/blas/xerbla.cpp b/blas/xerbla.cpp
index 0422f79b7..8775b88cd 100644
--- a/blas/xerbla.cpp
+++ b/blas/xerbla.cpp
@@ -1,5 +1,5 @@
-#include <iostream>
+#include <stdio.h>
#if (defined __GNUC__) && (!defined __MINGW32__)
#define EIGEN_WEAK_LINKING __attribute__ ((weak))
@@ -14,7 +14,7 @@ extern "C"
EIGEN_WEAK_LINKING int xerbla_(const char * msg, int *info, int)
{
- std::cerr << "Eigen BLAS ERROR #" << *info << ": " << msg << "\n";
+ printf("Eigen BLAS ERROR #%i: %s\n", *info, msg );
return 0;
}
diff --git a/failtest/CMakeLists.txt b/failtest/CMakeLists.txt
index d2fea7bdc..c8795a344 100644
--- a/failtest/CMakeLists.txt
+++ b/failtest/CMakeLists.txt
@@ -41,6 +41,12 @@ ei_add_failtest("ref_5")
ei_add_failtest("swap_1")
ei_add_failtest("swap_2")
+ei_add_failtest("sparse_ref_1")
+ei_add_failtest("sparse_ref_2")
+ei_add_failtest("sparse_ref_3")
+ei_add_failtest("sparse_ref_4")
+ei_add_failtest("sparse_ref_5")
+
if (EIGEN_FAILTEST_FAILURE_COUNT)
message(FATAL_ERROR
"${EIGEN_FAILTEST_FAILURE_COUNT} out of ${EIGEN_FAILTEST_COUNT} failtests FAILED. "
diff --git a/failtest/sparse_ref_1.cpp b/failtest/sparse_ref_1.cpp
new file mode 100644
index 000000000..d78d1f9b1
--- /dev/null
+++ b/failtest/sparse_ref_1.cpp
@@ -0,0 +1,18 @@
+#include "../Eigen/Sparse"
+
+#ifdef EIGEN_SHOULD_FAIL_TO_BUILD
+#define CV_QUALIFIER const
+#else
+#define CV_QUALIFIER
+#endif
+
+using namespace Eigen;
+
+void call_ref(Ref<SparseMatrix<float> > a) { }
+
+int main()
+{
+ SparseMatrix<float> a(10,10);
+ CV_QUALIFIER SparseMatrix<float>& ac(a);
+ call_ref(ac);
+}
diff --git a/failtest/sparse_ref_2.cpp b/failtest/sparse_ref_2.cpp
new file mode 100644
index 000000000..46c9440c2
--- /dev/null
+++ b/failtest/sparse_ref_2.cpp
@@ -0,0 +1,15 @@
+#include "../Eigen/Sparse"
+
+using namespace Eigen;
+
+void call_ref(Ref<SparseMatrix<float> > a) { }
+
+int main()
+{
+ SparseMatrix<float> A(10,10);
+#ifdef EIGEN_SHOULD_FAIL_TO_BUILD
+ call_ref(A.row(3));
+#else
+ call_ref(A.col(3));
+#endif
+}
diff --git a/failtest/sparse_ref_3.cpp b/failtest/sparse_ref_3.cpp
new file mode 100644
index 000000000..a9949b552
--- /dev/null
+++ b/failtest/sparse_ref_3.cpp
@@ -0,0 +1,15 @@
+#include "../Eigen/Sparse"
+
+using namespace Eigen;
+
+#ifdef EIGEN_SHOULD_FAIL_TO_BUILD
+void call_ref(Ref<SparseMatrix<float> > a) { }
+#else
+void call_ref(const Ref<const SparseMatrix<float> > &a) { }
+#endif
+
+int main()
+{
+ SparseMatrix<float> a(10,10);
+ call_ref(a+a);
+}
diff --git a/failtest/sparse_ref_4.cpp b/failtest/sparse_ref_4.cpp
new file mode 100644
index 000000000..57bb6a1fc
--- /dev/null
+++ b/failtest/sparse_ref_4.cpp
@@ -0,0 +1,15 @@
+#include "../Eigen/Sparse"
+
+using namespace Eigen;
+
+void call_ref(Ref<SparseMatrix<float> > a) {}
+
+int main()
+{
+ SparseMatrix<float> A(10,10);
+#ifdef EIGEN_SHOULD_FAIL_TO_BUILD
+ call_ref(A.transpose());
+#else
+ call_ref(A);
+#endif
+}
diff --git a/failtest/sparse_ref_5.cpp b/failtest/sparse_ref_5.cpp
new file mode 100644
index 000000000..4478f6f2f
--- /dev/null
+++ b/failtest/sparse_ref_5.cpp
@@ -0,0 +1,16 @@
+#include "../Eigen/Sparse"
+
+using namespace Eigen;
+
+void call_ref(Ref<SparseMatrix<float> > a) { }
+
+int main()
+{
+ SparseMatrix<float> a(10,10);
+ SparseMatrixBase<SparseMatrix<float> > &ac(a);
+#ifdef EIGEN_SHOULD_FAIL_TO_BUILD
+ call_ref(ac);
+#else
+ call_ref(ac.derived());
+#endif
+}
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index f57d8ce36..168749634 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -228,6 +228,7 @@ ei_add_test(stddeque)
ei_add_test(sparse_basic)
ei_add_test(sparse_vector)
ei_add_test(sparse_product)
+ei_add_test(sparse_ref)
ei_add_test(sparse_solvers)
ei_add_test(sparse_permutations)
ei_add_test(simplicial_cholesky)
diff --git a/test/conjugate_gradient.cpp b/test/conjugate_gradient.cpp
index 869051b31..019cc4d64 100644
--- a/test/conjugate_gradient.cpp
+++ b/test/conjugate_gradient.cpp
@@ -12,13 +12,15 @@
template<typename T> void test_conjugate_gradient_T()
{
- ConjugateGradient<SparseMatrix<T>, Lower> cg_colmajor_lower_diag;
- ConjugateGradient<SparseMatrix<T>, Upper> cg_colmajor_upper_diag;
+ ConjugateGradient<SparseMatrix<T>, Lower > cg_colmajor_lower_diag;
+ ConjugateGradient<SparseMatrix<T>, Upper > cg_colmajor_upper_diag;
+ ConjugateGradient<SparseMatrix<T>, Lower|Upper> cg_colmajor_loup_diag;
ConjugateGradient<SparseMatrix<T>, Lower, IdentityPreconditioner> cg_colmajor_lower_I;
ConjugateGradient<SparseMatrix<T>, Upper, IdentityPreconditioner> cg_colmajor_upper_I;
CALL_SUBTEST( check_sparse_spd_solving(cg_colmajor_lower_diag) );
CALL_SUBTEST( check_sparse_spd_solving(cg_colmajor_upper_diag) );
+ CALL_SUBTEST( check_sparse_spd_solving(cg_colmajor_loup_diag) );
CALL_SUBTEST( check_sparse_spd_solving(cg_colmajor_lower_I) );
CALL_SUBTEST( check_sparse_spd_solving(cg_colmajor_upper_I) );
}
diff --git a/test/sparse_basic.cpp b/test/sparse_basic.cpp
index 097959c84..a19de296a 100644
--- a/test/sparse_basic.cpp
+++ b/test/sparse_basic.cpp
@@ -408,6 +408,26 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re
m.setFromTriplets(triplets.begin(), triplets.end());
VERIFY_IS_APPROX(m, refMat);
}
+
+ // test Map
+ {
+ DenseMatrix refMat2(rows, cols), refMat3(rows, cols);
+ SparseMatrixType m2(rows, cols), m3(rows, cols);
+ initSparse<Scalar>(density, refMat2, m2);
+ initSparse<Scalar>(density, refMat3, m3);
+ {
+ Map<SparseMatrixType> mapMat2(m2.rows(), m2.cols(), m2.nonZeros(), m2.outerIndexPtr(), m2.innerIndexPtr(), m2.valuePtr(), m2.innerNonZeroPtr());
+ Map<SparseMatrixType> mapMat3(m3.rows(), m3.cols(), m3.nonZeros(), m3.outerIndexPtr(), m3.innerIndexPtr(), m3.valuePtr(), m3.innerNonZeroPtr());
+ VERIFY_IS_APPROX(mapMat2+mapMat3, refMat2+refMat3);
+ VERIFY_IS_APPROX(mapMat2+mapMat3, refMat2+refMat3);
+ }
+ {
+ MappedSparseMatrix<Scalar,SparseMatrixType::Options,Index> mapMat2(m2.rows(), m2.cols(), m2.nonZeros(), m2.outerIndexPtr(), m2.innerIndexPtr(), m2.valuePtr(), m2.innerNonZeroPtr());
+ MappedSparseMatrix<Scalar,SparseMatrixType::Options,Index> mapMat3(m3.rows(), m3.cols(), m3.nonZeros(), m3.outerIndexPtr(), m3.innerIndexPtr(), m3.valuePtr(), m3.innerNonZeroPtr());
+ VERIFY_IS_APPROX(mapMat2+mapMat3, refMat2+refMat3);
+ VERIFY_IS_APPROX(mapMat2+mapMat3, refMat2+refMat3);
+ }
+ }
// test triangularView
{
diff --git a/test/sparse_ref.cpp b/test/sparse_ref.cpp
new file mode 100644
index 000000000..e7380ba21
--- /dev/null
+++ b/test/sparse_ref.cpp
@@ -0,0 +1,99 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 20015 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// This unit test cannot be easily written to work with EIGEN_DEFAULT_TO_ROW_MAJOR
+#ifdef EIGEN_DEFAULT_TO_ROW_MAJOR
+#undef EIGEN_DEFAULT_TO_ROW_MAJOR
+#endif
+
+static long int nb_temporaries;
+
+inline void on_temporary_creation() {
+ // here's a great place to set a breakpoint when debugging failures in this test!
+ nb_temporaries++;
+}
+
+#define EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN { on_temporary_creation(); }
+
+#include "main.h"
+#include <Eigen/SparseCore>
+
+#define VERIFY_EVALUATION_COUNT(XPR,N) {\
+ nb_temporaries = 0; \
+ XPR; \
+ if(nb_temporaries!=N) std::cerr << "nb_temporaries == " << nb_temporaries << "\n"; \
+ VERIFY( (#XPR) && nb_temporaries==N ); \
+ }
+
+template<typename PlainObjectType> void check_const_correctness(const PlainObjectType&)
+{
+ // verify that ref-to-const don't have LvalueBit
+ typedef typename internal::add_const<PlainObjectType>::type ConstPlainObjectType;
+ VERIFY( !(internal::traits<Ref<ConstPlainObjectType> >::Flags & LvalueBit) );
+ VERIFY( !(internal::traits<Ref<ConstPlainObjectType, Aligned> >::Flags & LvalueBit) );
+ VERIFY( !(Ref<ConstPlainObjectType>::Flags & LvalueBit) );
+ VERIFY( !(Ref<ConstPlainObjectType, Aligned>::Flags & LvalueBit) );
+}
+
+template<typename B>
+EIGEN_DONT_INLINE void call_ref_1(Ref<SparseMatrix<float> > a, const B &b) { VERIFY_IS_EQUAL(a.toDense(),b.toDense()); }
+
+template<typename B>
+EIGEN_DONT_INLINE void call_ref_2(const Ref<const SparseMatrix<float> >& a, const B &b) { VERIFY_IS_EQUAL(a.toDense(),b.toDense()); }
+
+void call_ref()
+{
+// SparseVector<std::complex<float> > ca = VectorXcf::Random(10).sparseView();
+// SparseVector<float> a = VectorXf::Random(10).sparseView();
+ SparseMatrix<float> A = MatrixXf::Random(10,10).sparseView(0.5,1);
+ SparseMatrix<float,RowMajor> B = MatrixXf::Random(10,10).sparseView(0.5,1);
+ const SparseMatrix<float>& Ac(A);
+ Block<SparseMatrix<float> > Ab(A,0,1, 3,3);
+ const Block<SparseMatrix<float> > Abc(A,0,1,3,3);
+
+
+ VERIFY_EVALUATION_COUNT( call_ref_1(A, A), 0);
+// VERIFY_EVALUATION_COUNT( call_ref_1(Ac, Ac), 0); // does not compile on purpose
+ VERIFY_EVALUATION_COUNT( call_ref_2(A, A), 0);
+ VERIFY_EVALUATION_COUNT( call_ref_2(A.transpose(), A.transpose()), 1);
+ VERIFY_EVALUATION_COUNT( call_ref_2(Ac,Ac), 0);
+ VERIFY_EVALUATION_COUNT( call_ref_2(A+A,2*Ac), 1);
+ VERIFY_EVALUATION_COUNT( call_ref_2(B, B), 1);
+ VERIFY_EVALUATION_COUNT( call_ref_2(B.transpose(), B.transpose()), 0);
+ VERIFY_EVALUATION_COUNT( call_ref_2(A*A, A*A), 1);
+
+ Ref<SparseMatrix<float> > Ar(A);
+ VERIFY_IS_APPROX(Ar+Ar, A+A);
+ VERIFY_EVALUATION_COUNT( call_ref_1(Ar, A), 0);
+ VERIFY_EVALUATION_COUNT( call_ref_2(Ar, A), 0);
+
+ Ref<SparseMatrix<float,RowMajor> > Br(B);
+ VERIFY_EVALUATION_COUNT( call_ref_1(Br.transpose(), Br.transpose()), 0);
+ VERIFY_EVALUATION_COUNT( call_ref_2(Br, Br), 1);
+ VERIFY_EVALUATION_COUNT( call_ref_2(Br.transpose(), Br.transpose()), 0);
+
+ Ref<const SparseMatrix<float> > Arc(A);
+// VERIFY_EVALUATION_COUNT( call_ref_1(Arc, Arc), 0); // does not compile on purpose
+ VERIFY_EVALUATION_COUNT( call_ref_2(Arc, Arc), 0);
+
+ VERIFY_EVALUATION_COUNT( call_ref_2(A.middleCols(1,3), A.middleCols(1,3)), 0);
+
+ VERIFY_EVALUATION_COUNT( call_ref_2(A.col(2), A.col(2)), 0);
+
+ VERIFY_EVALUATION_COUNT( call_ref_2(A.block(1,1,3,3), A.block(1,1,3,3)), 1); // should be 0 (allocate starts/nnz only)
+}
+
+void test_sparse_ref()
+{
+ for(int i = 0; i < g_repeat; i++) {
+ CALL_SUBTEST_1( check_const_correctness(SparseMatrix<float>()) );
+ CALL_SUBTEST_1( check_const_correctness(SparseMatrix<double,RowMajor>()) );
+ CALL_SUBTEST_2( call_ref() );
+ }
+}
diff --git a/test/sparse_solver.h b/test/sparse_solver.h
index ee350d561..6cf99e0b0 100644
--- a/test/sparse_solver.h
+++ b/test/sparse_solver.h
@@ -32,7 +32,7 @@ void check_sparse_solving(Solver& solver, const typename Solver::MatrixType& A,
x = solver.solve(b);
if (solver.info() != Success)
{
- std::cerr << "sparse solver testing: solving failed\n";
+ std::cerr << "sparse solver testing: solving failed (" << typeid(Solver).name() << ")\n";
return;
}
VERIFY(oldb.isApprox(b) && "sparse solver testing: the rhs should not be modified!");
@@ -75,7 +75,8 @@ void check_sparse_solving(Solver& solver, const typename Solver::MatrixType& A,
xm = solver.solve(bm);
if (solver.info() != Success)
{
- std::cerr << "sparse solver testing: solving failed\n";
+ std::cerr << "sparse solver testing: solving with a Map failed\n";
+ exit(0);
return;
}
VERIFY(oldb.isApprox(bm) && "sparse solver testing: the rhs should not be modified!");
@@ -194,7 +195,10 @@ int generate_sparse_spd_problem(Solver& , typename Solver::MatrixType& A, typena
dA = dM * dM.adjoint();
halfA.resize(size,size);
- halfA.template selfadjointView<Solver::UpLo>().rankUpdate(M);
+ if(Solver::UpLo==(Lower|Upper))
+ halfA = A;
+ else
+ halfA.template selfadjointView<Solver::UpLo>().rankUpdate(M);
return size;
}
diff --git a/unsupported/Eigen/src/IterativeSolvers/DGMRES.h b/unsupported/Eigen/src/IterativeSolvers/DGMRES.h
index 0e1b7d977..52eb65a2f 100644
--- a/unsupported/Eigen/src/IterativeSolvers/DGMRES.h
+++ b/unsupported/Eigen/src/IterativeSolvers/DGMRES.h
@@ -150,7 +150,7 @@ class DGMRES : public IterativeSolverBase<DGMRES<_MatrixType,_Preconditioner> >
m_error = Base::m_tolerance;
typename Dest::ColXpr xj(x,j);
- dgmres(*mp_matrix, b.col(j), xj, Base::m_preconditioner);
+ dgmres(mp_matrix, b.col(j), xj, Base::m_preconditioner);
}
m_info = failed ? NumericalIssue
: m_error <= Base::m_tolerance ? Success
diff --git a/unsupported/Eigen/src/IterativeSolvers/GMRES.h b/unsupported/Eigen/src/IterativeSolvers/GMRES.h
index cd15ce0bf..30f82b52e 100644
--- a/unsupported/Eigen/src/IterativeSolvers/GMRES.h
+++ b/unsupported/Eigen/src/IterativeSolvers/GMRES.h
@@ -250,21 +250,8 @@ struct traits<GMRES<_MatrixType,_Preconditioner> >
* \endcode
*
* By default the iterations start with x=0 as an initial guess of the solution.
- * One can control the start using the solveWithGuess() method. Here is a step by
- * step execution example starting with a random guess and printing the evolution
- * of the estimated error:
- * * \code
- * x = VectorXd::Random(n);
- * solver.setMaxIterations(1);
- * int i = 0;
- * do {
- * x = solver.solveWithGuess(b,x);
- * std::cout << i << " : " << solver.error() << std::endl;
- * ++i;
- * } while (solver.info()!=Success && i<100);
- * \endcode
- * Note that such a step by step excution is slightly slower.
- *
+ * One can control the start using the solveWithGuess() method.
+ *
* \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
*/
template< typename _MatrixType, typename _Preconditioner>
@@ -327,7 +314,7 @@ public:
m_error = Base::m_tolerance;
typename Dest::ColXpr xj(x,j);
- if(!internal::gmres(*mp_matrix, b.col(j), xj, Base::m_preconditioner, m_iterations, m_restart, m_error))
+ if(!internal::gmres(mp_matrix, b.col(j), xj, Base::m_preconditioner, m_iterations, m_restart, m_error))
failed = true;
}
m_info = failed ? NumericalIssue
diff --git a/unsupported/Eigen/src/IterativeSolvers/MINRES.h b/unsupported/Eigen/src/IterativeSolvers/MINRES.h
index aaf42c78a..c4d969a72 100644
--- a/unsupported/Eigen/src/IterativeSolvers/MINRES.h
+++ b/unsupported/Eigen/src/IterativeSolvers/MINRES.h
@@ -165,8 +165,8 @@ namespace Eigen {
* The vectors x and b can be either dense or sparse.
*
* \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
- * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
- * or Upper. Default is Lower.
+ * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower,
+ * Upper, or Lower|Upper in which the full matrix entries will be considered. Default is Lower.
* \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
*
* The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
@@ -189,20 +189,7 @@ namespace Eigen {
* \endcode
*
* By default the iterations start with x=0 as an initial guess of the solution.
- * One can control the start using the solveWithGuess() method. Here is a step by
- * step execution example starting with a random guess and printing the evolution
- * of the estimated error:
- * * \code
- * x = VectorXd::Random(n);
- * mr.setMaxIterations(1);
- * int i = 0;
- * do {
- * x = mr.solveWithGuess(b,x);
- * std::cout << i << " : " << mr.error() << std::endl;
- * ++i;
- * } while (mr.info()!=Success && i<100);
- * \endcode
- * Note that such a step by step excution is slightly slower.
+ * One can control the start using the solveWithGuess() method.
*
* \sa class ConjugateGradient, BiCGSTAB, SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
*/
@@ -250,6 +237,11 @@ namespace Eigen {
template<typename Rhs,typename Dest>
void _solve_with_guess_impl(const Rhs& b, Dest& x) const
{
+ typedef typename internal::conditional<UpLo==(Lower|Upper),
+ Ref<const MatrixType>&,
+ SparseSelfAdjointView<const Ref<const MatrixType>, UpLo>
+ >::type MatrixWrapperType;
+
m_iterations = Base::maxIterations();
m_error = Base::m_tolerance;
@@ -259,7 +251,7 @@ namespace Eigen {
m_error = Base::m_tolerance;
typename Dest::ColXpr xj(x,j);
- internal::minres(mp_matrix->template selfadjointView<UpLo>(), b.col(j), xj,
+ internal::minres(MatrixWrapperType(mp_matrix), b.col(j), xj,
Base::m_preconditioner, m_iterations, m_error);
}
diff --git a/unsupported/test/minres.cpp b/unsupported/test/minres.cpp
index 81b762c37..8b300b78a 100644
--- a/unsupported/test/minres.cpp
+++ b/unsupported/test/minres.cpp
@@ -21,6 +21,7 @@ template<typename T> void test_minres_T()
// Diagonal preconditioner
MINRES<SparseMatrix<T>, Lower, DiagonalPreconditioner<T> > minres_colmajor_lower_diag;
MINRES<SparseMatrix<T>, Upper, DiagonalPreconditioner<T> > minres_colmajor_upper_diag;
+ MINRES<SparseMatrix<T>, Lower|Upper, DiagonalPreconditioner<T> > minres_colmajor_uplo_diag;
// call tests for SPD matrix
CALL_SUBTEST( check_sparse_spd_solving(minres_colmajor_lower_I) );
@@ -28,6 +29,7 @@ template<typename T> void test_minres_T()
CALL_SUBTEST( check_sparse_spd_solving(minres_colmajor_lower_diag) );
CALL_SUBTEST( check_sparse_spd_solving(minres_colmajor_upper_diag) );
+ CALL_SUBTEST( check_sparse_spd_solving(minres_colmajor_uplo_diag) );
// TO DO: symmetric semi-definite matrix
// TO DO: symmetric indefinite matrix