aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Konstantinos Margaritis <markos@freevec.org>2014-10-22 10:42:18 +0300
committerGravatar Konstantinos Margaritis <markos@freevec.org>2014-10-22 10:42:18 +0300
commitfcb3573d17826e0aabffea04c1ba816ec1448581 (patch)
tree88a1f83762619bb1d23f1369b7dc30112afd0838
parentfae4fd7a26ee31fbf1eaa5d3581916ccee3c004f (diff)
parentcf09c5f687c171d2e8b760358a4d48da1c010ce0 (diff)
Merged eigen/eigen into default
-rw-r--r--Eigen/src/Cholesky/LDLT.h4
-rw-r--r--Eigen/src/Core/Diagonal.h5
-rw-r--r--Eigen/src/Core/Fuzzy.h3
-rw-r--r--Eigen/src/Core/GenericPacketMath.h4
-rw-r--r--Eigen/src/Core/MathFunctions.h16
-rw-r--r--Eigen/src/Core/StableNorm.h10
-rw-r--r--Eigen/src/Core/arch/SSE/MathFunctions.h6
-rw-r--r--Eigen/src/Core/functors/BinaryFunctors.h6
-rw-r--r--Eigen/src/Core/util/Macros.h5
-rw-r--r--Eigen/src/Eigenvalues/EigenSolver.h6
-rw-r--r--Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h3
-rw-r--r--Eigen/src/Geometry/Quaternion.h3
-rw-r--r--Eigen/src/Geometry/Rotation2D.h6
-rw-r--r--Eigen/src/SVD/JacobiSVD.h3
-rw-r--r--Eigen/src/SparseCore/SparseFuzzy.h3
-rw-r--r--Eigen/src/SparseCore/SparseVector.h5
-rw-r--r--Eigen/src/SparseQR/SparseQR.h9
-rw-r--r--test/CMakeLists.txt2
-rw-r--r--test/cholesky.cpp12
-rw-r--r--test/geo_transformations.cpp17
-rw-r--r--test/main.h20
-rw-r--r--test/packetmath.cpp22
-rw-r--r--test/sparse_solver.h28
-rw-r--r--test/stable_norm.cpp20
-rw-r--r--unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h1
-rw-r--r--unsupported/Eigen/src/BDCSVD/BDCSVD.h8
-rw-r--r--unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h3
27 files changed, 148 insertions, 82 deletions
diff --git a/Eigen/src/Cholesky/LDLT.h b/Eigen/src/Cholesky/LDLT.h
index b4016cd6c..5acbf4651 100644
--- a/Eigen/src/Cholesky/LDLT.h
+++ b/Eigen/src/Cholesky/LDLT.h
@@ -433,6 +433,7 @@ LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const MatrixType& a)
m_transpositions.resize(size);
m_isInitialized = false;
m_temporary.resize(size);
+ m_sign = internal::ZeroSign;
internal::ldlt_inplace<UpLo>::unblocked(m_matrix, m_transpositions, m_temporary, m_sign);
@@ -487,11 +488,10 @@ void LDLT<_MatrixType,_UpLo>::_solve_impl(const RhsType &rhs, DstType &dst) cons
// dst = D^-1 (L^-1 P b)
// more precisely, use pseudo-inverse of D (see bug 241)
using std::abs;
- EIGEN_USING_STD_MATH(max);
const typename Diagonal<const MatrixType>::RealReturnType vecD(vectorD());
// In some previous versions, tolerance was set to the max of 1/highest and the maximal diagonal entry * epsilon
// as motivated by LAPACK's xGELSS:
- // RealScalar tolerance = (max)(vectorD.array().abs().maxCoeff() *NumTraits<RealScalar>::epsilon(),RealScalar(1) / NumTraits<RealScalar>::highest());
+ // RealScalar tolerance = numext::maxi(vectorD.array().abs().maxCoeff() *NumTraits<RealScalar>::epsilon(),RealScalar(1) / NumTraits<RealScalar>::highest());
// However, LDLT is not rank revealing, and so adjusting the tolerance wrt to the highest
// diagonal element is not well justified and to numerical issues in some cases.
// Moreover, Lapack's xSYTRS routines use 0 for the tolerance.
diff --git a/Eigen/src/Core/Diagonal.h b/Eigen/src/Core/Diagonal.h
index 6ffc0c762..26a58d664 100644
--- a/Eigen/src/Core/Diagonal.h
+++ b/Eigen/src/Core/Diagonal.h
@@ -77,9 +77,8 @@ template<typename MatrixType, int _DiagIndex> class Diagonal
EIGEN_DEVICE_FUNC
inline Index rows() const
{
- EIGEN_USING_STD_MATH(min);
- return m_index.value()<0 ? (min)(Index(m_matrix.cols()),Index(m_matrix.rows()+m_index.value()))
- : (min)(Index(m_matrix.rows()),Index(m_matrix.cols()-m_index.value()));
+ return m_index.value()<0 ? numext::mini(Index(m_matrix.cols()),Index(m_matrix.rows()+m_index.value()))
+ : numext::mini(Index(m_matrix.rows()),Index(m_matrix.cols()-m_index.value()));
}
EIGEN_DEVICE_FUNC
diff --git a/Eigen/src/Core/Fuzzy.h b/Eigen/src/Core/Fuzzy.h
index 8cd069a0d..3e403a09d 100644
--- a/Eigen/src/Core/Fuzzy.h
+++ b/Eigen/src/Core/Fuzzy.h
@@ -22,10 +22,9 @@ struct isApprox_selector
EIGEN_DEVICE_FUNC
static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar& prec)
{
- EIGEN_USING_STD_MATH(min);
typename internal::nested_eval<Derived,2>::type nested(x);
typename internal::nested_eval<OtherDerived,2>::type otherNested(y);
- return (nested - otherNested).cwiseAbs2().sum() <= prec * prec * (min)(nested.cwiseAbs2().sum(), otherNested.cwiseAbs2().sum());
+ return (nested - otherNested).cwiseAbs2().sum() <= prec * prec * numext::mini(nested.cwiseAbs2().sum(), otherNested.cwiseAbs2().sum());
}
};
diff --git a/Eigen/src/Core/GenericPacketMath.h b/Eigen/src/Core/GenericPacketMath.h
index a1fcb82fb..065ccf7ac 100644
--- a/Eigen/src/Core/GenericPacketMath.h
+++ b/Eigen/src/Core/GenericPacketMath.h
@@ -126,12 +126,12 @@ pdiv(const Packet& a,
/** \internal \returns the min of \a a and \a b (coeff-wise) */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
pmin(const Packet& a,
- const Packet& b) { EIGEN_USING_STD_MATH(min); return (min)(a, b); }
+ const Packet& b) { return numext::mini(a, b); }
/** \internal \returns the max of \a a and \a b (coeff-wise) */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
pmax(const Packet& a,
- const Packet& b) { EIGEN_USING_STD_MATH(max); return (max)(a, b); }
+ const Packet& b) { return numext::maxi(a, b); }
/** \internal \returns the absolute value of \a a */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
diff --git a/Eigen/src/Core/MathFunctions.h b/Eigen/src/Core/MathFunctions.h
index 73859b0ee..4c5fc1cae 100644
--- a/Eigen/src/Core/MathFunctions.h
+++ b/Eigen/src/Core/MathFunctions.h
@@ -591,6 +591,22 @@ inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random()
****************************************************************************/
namespace numext {
+
+template<typename T>
+EIGEN_DEVICE_FUNC
+inline T mini(const T& x, const T& y)
+{
+ EIGEN_USING_STD_MATH(min);
+ return min EIGEN_NOT_A_MACRO (x,y);
+}
+
+template<typename T>
+EIGEN_DEVICE_FUNC
+inline T maxi(const T& x, const T& y)
+{
+ EIGEN_USING_STD_MATH(max);
+ return max EIGEN_NOT_A_MACRO (x,y);
+}
template<typename Scalar>
EIGEN_DEVICE_FUNC
diff --git a/Eigen/src/Core/StableNorm.h b/Eigen/src/Core/StableNorm.h
index 64d43e1b1..0b7e39827 100644
--- a/Eigen/src/Core/StableNorm.h
+++ b/Eigen/src/Core/StableNorm.h
@@ -17,7 +17,6 @@ namespace internal {
template<typename ExpressionType, typename Scalar>
inline void stable_norm_kernel(const ExpressionType& bl, Scalar& ssq, Scalar& scale, Scalar& invScale)
{
- using std::max;
Scalar maxCoeff = bl.cwiseAbs().maxCoeff();
if(maxCoeff>scale)
@@ -58,8 +57,6 @@ blueNorm_impl(const EigenBase<Derived>& _vec)
typedef typename Derived::RealScalar RealScalar;
typedef typename Derived::Index Index;
using std::pow;
- EIGEN_USING_STD_MATH(min);
- EIGEN_USING_STD_MATH(max);
using std::sqrt;
using std::abs;
const Derived& vec(_vec.derived());
@@ -136,8 +133,8 @@ blueNorm_impl(const EigenBase<Derived>& _vec)
}
else
return sqrt(amed);
- asml = (min)(abig, amed);
- abig = (max)(abig, amed);
+ asml = numext::mini(abig, amed);
+ abig = numext::maxi(abig, amed);
if(asml <= abig*relerr)
return abig;
else
@@ -160,7 +157,6 @@ template<typename Derived>
inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
MatrixBase<Derived>::stableNorm() const
{
- EIGEN_USING_STD_MATH(min);
using std::sqrt;
const Index blockSize = 4096;
RealScalar scale(0);
@@ -174,7 +170,7 @@ MatrixBase<Derived>::stableNorm() const
if (bi>0)
internal::stable_norm_kernel(this->head(bi), ssq, scale, invScale);
for (; bi<n; bi+=blockSize)
- internal::stable_norm_kernel(this->segment(bi,(min)(blockSize, n - bi)).template forceAlignedAccessIf<Alignment>(), ssq, scale, invScale);
+ internal::stable_norm_kernel(this->segment(bi,numext::mini(blockSize, n - bi)).template forceAlignedAccessIf<Alignment>(), ssq, scale, invScale);
return scale * sqrt(ssq);
}
diff --git a/Eigen/src/Core/arch/SSE/MathFunctions.h b/Eigen/src/Core/arch/SSE/MathFunctions.h
index 8f78b3a6c..9ffba5b41 100644
--- a/Eigen/src/Core/arch/SSE/MathFunctions.h
+++ b/Eigen/src/Core/arch/SSE/MathFunctions.h
@@ -52,7 +52,7 @@ Packet4f plog<Packet4f>(const Packet4f& _x)
Packet4i emm0;
- Packet4f invalid_mask = _mm_cmplt_ps(x, _mm_setzero_ps());
+ Packet4f invalid_mask = _mm_cmpnge_ps(x, _mm_setzero_ps()); // not greater equal is true if x is NaN
Packet4f iszero_mask = _mm_cmpeq_ps(x, _mm_setzero_ps());
x = pmax(x, p4f_min_norm_pos); /* cut off denormalized stuff */
@@ -167,7 +167,7 @@ Packet4f pexp<Packet4f>(const Packet4f& _x)
emm0 = _mm_cvttps_epi32(fx);
emm0 = _mm_add_epi32(emm0, p4i_0x7f);
emm0 = _mm_slli_epi32(emm0, 23);
- return pmul(y, Packet4f(_mm_castsi128_ps(emm0)));
+ return pmax(pmul(y, Packet4f(_mm_castsi128_ps(emm0))), _x);
}
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet2d pexp<Packet2d>(const Packet2d& _x)
@@ -241,7 +241,7 @@ Packet2d pexp<Packet2d>(const Packet2d& _x)
emm0 = _mm_add_epi32(emm0, p4i_1023_0);
emm0 = _mm_slli_epi32(emm0, 20);
emm0 = _mm_shuffle_epi32(emm0, _MM_SHUFFLE(1,2,0,3));
- return pmul(x, Packet2d(_mm_castsi128_pd(emm0)));
+ return pmax(pmul(x, Packet2d(_mm_castsi128_pd(emm0))), _x);
}
/* evaluation of 4 sines at onces, using SSE2 intrinsics.
diff --git a/Eigen/src/Core/functors/BinaryFunctors.h b/Eigen/src/Core/functors/BinaryFunctors.h
index 157d075a7..9c96181c7 100644
--- a/Eigen/src/Core/functors/BinaryFunctors.h
+++ b/Eigen/src/Core/functors/BinaryFunctors.h
@@ -115,7 +115,7 @@ struct functor_traits<scalar_conj_product_op<LhsScalar,RhsScalar> > {
*/
template<typename Scalar> struct scalar_min_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_min_op)
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { EIGEN_USING_STD_MATH(min); return (min)(a, b); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return numext::mini(a, b); }
template<typename Packet>
EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
{ return internal::pmin(a,b); }
@@ -138,7 +138,7 @@ struct functor_traits<scalar_min_op<Scalar> > {
*/
template<typename Scalar> struct scalar_max_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_max_op)
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { EIGEN_USING_STD_MATH(max); return (max)(a, b); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return numext::maxi(a, b); }
template<typename Packet>
EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
{ return internal::pmax(a,b); }
@@ -164,8 +164,6 @@ template<typename Scalar> struct scalar_hypot_op {
// typedef typename NumTraits<Scalar>::Real result_type;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& _x, const Scalar& _y) const
{
- EIGEN_USING_STD_MATH(max);
- EIGEN_USING_STD_MATH(min);
using std::sqrt;
Scalar p, qp;
if(_x>_y)
diff --git a/Eigen/src/Core/util/Macros.h b/Eigen/src/Core/util/Macros.h
index f9b908e22..c09854951 100644
--- a/Eigen/src/Core/util/Macros.h
+++ b/Eigen/src/Core/util/Macros.h
@@ -86,6 +86,11 @@
#define EIGEN_ALIGN 0
#endif
+
+// This macro can be used to prevent from macro expansion, e.g.:
+// std::max EIGNE_NOT_A_MACRO(a,b)
+#define EIGEN_NOT_A_MACRO
+
// EIGEN_ALIGN_STATICALLY is the true test whether we want to align arrays on the stack or not. It takes into account both the user choice to explicitly disable
// alignment (EIGEN_DONT_ALIGN_STATICALLY) and the architecture config (EIGEN_ARCH_WANTS_STACK_ALIGNMENT). Henceforth, only EIGEN_ALIGN_STATICALLY should be used.
#if EIGEN_ARCH_WANTS_STACK_ALIGNMENT && !defined(EIGEN_DONT_ALIGN_STATICALLY)
diff --git a/Eigen/src/Eigenvalues/EigenSolver.h b/Eigen/src/Eigenvalues/EigenSolver.h
index 8a83b85bb..9372021ff 100644
--- a/Eigen/src/Eigenvalues/EigenSolver.h
+++ b/Eigen/src/Eigenvalues/EigenSolver.h
@@ -368,7 +368,6 @@ EigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvect
{
using std::sqrt;
using std::abs;
- using std::max;
using numext::isfinite;
eigen_assert(matrix.cols() == matrix.rows());
@@ -409,7 +408,7 @@ EigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvect
{
Scalar t0 = m_matT.coeff(i+1, i);
Scalar t1 = m_matT.coeff(i, i+1);
- Scalar maxval = (max)(abs(p),(max)(abs(t0),abs(t1)));
+ Scalar maxval = numext::maxi(abs(p),numext::maxi(abs(t0),abs(t1)));
t0 /= maxval;
t1 /= maxval;
Scalar p0 = p/maxval;
@@ -600,8 +599,7 @@ void EigenSolver<MatrixType>::doComputeEigenvectors()
}
// Overflow control
- EIGEN_USING_STD_MATH(max);
- Scalar t = (max)(abs(m_matT.coeff(i,n-1)),abs(m_matT.coeff(i,n)));
+ Scalar t = numext::maxi(abs(m_matT.coeff(i,n-1)),abs(m_matT.coeff(i,n)));
if ((eps * t) * t > Scalar(1))
m_matT.block(i, n-1, size-i, 2) /= t;
diff --git a/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h b/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
index 1dd2ab45b..54f60b197 100644
--- a/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
+++ b/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
@@ -732,7 +732,6 @@ struct direct_selfadjoint_eigenvalues<SolverType,2,false>
EIGEN_DEVICE_FUNC
static inline void run(SolverType& solver, const MatrixType& mat, int options)
{
- EIGEN_USING_STD_MATH(max)
EIGEN_USING_STD_MATH(sqrt);
eigen_assert(mat.cols() == 2 && mat.cols() == mat.rows());
@@ -746,7 +745,7 @@ struct direct_selfadjoint_eigenvalues<SolverType,2,false>
// map the matrix coefficients to [-1:1] to avoid over- and underflow.
Scalar scale = mat.cwiseAbs().maxCoeff();
- scale = (max)(scale,Scalar(1));
+ scale = numext::maxi(scale,Scalar(1));
MatrixType scaledMat = mat / scale;
// Compute the eigenvalues
diff --git a/Eigen/src/Geometry/Quaternion.h b/Eigen/src/Geometry/Quaternion.h
index 216e5b12f..508eba767 100644
--- a/Eigen/src/Geometry/Quaternion.h
+++ b/Eigen/src/Geometry/Quaternion.h
@@ -571,7 +571,6 @@ template<class Derived>
template<typename Derived1, typename Derived2>
inline Derived& QuaternionBase<Derived>::setFromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b)
{
- EIGEN_USING_STD_MATH(max);
using std::sqrt;
Vector3 v0 = a.normalized();
Vector3 v1 = b.normalized();
@@ -587,7 +586,7 @@ inline Derived& QuaternionBase<Derived>::setFromTwoVectors(const MatrixBase<Deri
// which yields a singular value problem
if (c < Scalar(-1)+NumTraits<Scalar>::dummy_precision())
{
- c = (max)(c,Scalar(-1));
+ c = numext::maxi(c,Scalar(-1));
Matrix<Scalar,2,3> m; m << v0.transpose(), v1.transpose();
JacobiSVD<Matrix<Scalar,2,3> > svd(m, ComputeFullV);
Vector3 axis = svd.matrixV().col(2);
diff --git a/Eigen/src/Geometry/Rotation2D.h b/Eigen/src/Geometry/Rotation2D.h
index 776c36144..4feb3d4d2 100644
--- a/Eigen/src/Geometry/Rotation2D.h
+++ b/Eigen/src/Geometry/Rotation2D.h
@@ -71,11 +71,11 @@ public:
inline Scalar& angle() { return m_angle; }
/** \returns the inverse rotation */
- inline Rotation2D inverse() const { return -m_angle; }
+ inline Rotation2D inverse() const { return Rotation2D(-m_angle); }
/** Concatenates two rotations */
inline Rotation2D operator*(const Rotation2D& other) const
- { return m_angle + other.m_angle; }
+ { return Rotation2D(m_angle + other.m_angle); }
/** Concatenates two rotations */
inline Rotation2D& operator*=(const Rotation2D& other)
@@ -93,7 +93,7 @@ public:
* parameter \a t. It is in fact equivalent to a linear interpolation.
*/
inline Rotation2D slerp(const Scalar& t, const Rotation2D& other) const
- { return m_angle * (1-t) + other.angle() * t; }
+ { return Rotation2D(m_angle * (1-t) + other.angle() * t); }
/** \returns \c *this with scalar type casted to \a NewScalarType
*
diff --git a/Eigen/src/SVD/JacobiSVD.h b/Eigen/src/SVD/JacobiSVD.h
index 7a8aa8d3f..0f7e5b8fe 100644
--- a/Eigen/src/SVD/JacobiSVD.h
+++ b/Eigen/src/SVD/JacobiSVD.h
@@ -723,8 +723,7 @@ JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsig
// if this 2x2 sub-matrix is not diagonal already...
// notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't
// keep us iterating forever. Similarly, small denormal numbers are considered zero.
- EIGEN_USING_STD_MATH(max);
- RealScalar threshold = (max)(considerAsZero, precision * (max)(abs(m_workMatrix.coeff(p,p)),
+ RealScalar threshold = numext::maxi(considerAsZero, precision * numext::maxi(abs(m_workMatrix.coeff(p,p)),
abs(m_workMatrix.coeff(q,q))));
// We compare both values to threshold instead of calling max to be robust to NaN (See bug 791)
if(abs(m_workMatrix.coeff(p,q))>threshold || abs(m_workMatrix.coeff(q,p)) > threshold)
diff --git a/Eigen/src/SparseCore/SparseFuzzy.h b/Eigen/src/SparseCore/SparseFuzzy.h
index a76c1a5e0..7d47eb94d 100644
--- a/Eigen/src/SparseCore/SparseFuzzy.h
+++ b/Eigen/src/SparseCore/SparseFuzzy.h
@@ -16,13 +16,12 @@ template<typename Derived>
template<typename OtherDerived>
bool SparseMatrixBase<Derived>::isApprox(const SparseMatrixBase<OtherDerived>& other, const RealScalar &prec) const
{
- using std::min;
const typename internal::nested_eval<Derived,2,PlainObject>::type actualA(derived());
typename internal::conditional<bool(IsRowMajor)==bool(OtherDerived::IsRowMajor),
const typename internal::nested_eval<OtherDerived,2,PlainObject>::type,
const PlainObject>::type actualB(other.derived());
- return (actualA - actualB).squaredNorm() <= prec * prec * (min)(actualA.squaredNorm(), actualB.squaredNorm());
+ return (actualA - actualB).squaredNorm() <= prec * prec * numext::mini(actualA.squaredNorm(), actualB.squaredNorm());
}
} // end namespace Eigen
diff --git a/Eigen/src/SparseCore/SparseVector.h b/Eigen/src/SparseCore/SparseVector.h
index dfbb2be2e..8b696a476 100644
--- a/Eigen/src/SparseCore/SparseVector.h
+++ b/Eigen/src/SparseCore/SparseVector.h
@@ -386,6 +386,11 @@ class SparseVector<Scalar,_Options,_Index>::InnerIterator
const internal::CompressedStorage<Scalar,Index>& m_data;
Index m_id;
const Index m_end;
+ private:
+ // If you get here, then you're not using the right InnerIterator type, e.g.:
+ // SparseMatrix<double,RowMajor> A;
+ // SparseMatrix<double>::InnerIterator it(A,0);
+ template<typename T> InnerIterator(const SparseMatrixBase<T>&,Index outer=0);
};
template<typename Scalar, int _Options, typename _Index>
diff --git a/Eigen/src/SparseQR/SparseQR.h b/Eigen/src/SparseQR/SparseQR.h
index f6a9af672..133211488 100644
--- a/Eigen/src/SparseQR/SparseQR.h
+++ b/Eigen/src/SparseQR/SparseQR.h
@@ -325,7 +325,6 @@ template <typename MatrixType, typename OrderingType>
void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
{
using std::abs;
- using std::max;
eigen_assert(m_analysisIsok && "analyzePattern() should be called before this step");
Index m = mat.rows();
@@ -377,7 +376,9 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
if(m_useDefaultThreshold)
{
RealScalar max2Norm = 0.0;
- for (int j = 0; j < n; j++) max2Norm = (max)(max2Norm, m_pmat.col(j).norm());
+ for (int j = 0; j < n; j++) max2Norm = numext::maxi(max2Norm, m_pmat.col(j).norm());
+ if(max2Norm==RealScalar(0))
+ max2Norm = RealScalar(1);
pivotThreshold = 20 * (m + n) * max2Norm * NumTraits<RealScalar>::epsilon();
}
@@ -386,7 +387,7 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
Index nonzeroCol = 0; // Record the number of valid pivots
m_Q.startVec(0);
-
+
// Left looking rank-revealing QR factorization: compute a column of R and Q at a time
for (Index col = 0; col < n; ++col)
{
@@ -554,7 +555,7 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
m_R.finalize();
m_R.makeCompressed();
m_isQSorted = false;
-
+
m_nonzeropivots = nonzeroCol;
if(nonzeroCol<n)
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index 4132e6481..63ed2c7a4 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -306,7 +306,7 @@ endif()
option(EIGEN_TEST_NVCC "Enable NVCC support in unit tests" OFF)
if(EIGEN_TEST_NVCC)
-find_package(CUDA)
+find_package(CUDA 5.0)
if(CUDA_FOUND)
set(CUDA_PROPAGATE_HOST_FLAGS OFF)
diff --git a/test/cholesky.cpp b/test/cholesky.cpp
index a883192ab..33e32a322 100644
--- a/test/cholesky.cpp
+++ b/test/cholesky.cpp
@@ -316,33 +316,35 @@ template<typename MatrixType> void cholesky_definiteness(const MatrixType& m)
{
eigen_assert(m.rows() == 2 && m.cols() == 2);
MatrixType mat;
+ LDLT<MatrixType> ldlt(2);
+
{
mat << 1, 0, 0, -1;
- LDLT<MatrixType> ldlt(mat);
+ ldlt.compute(mat);
VERIFY(!ldlt.isNegative());
VERIFY(!ldlt.isPositive());
}
{
mat << 1, 2, 2, 1;
- LDLT<MatrixType> ldlt(mat);
+ ldlt.compute(mat);
VERIFY(!ldlt.isNegative());
VERIFY(!ldlt.isPositive());
}
{
mat << 0, 0, 0, 0;
- LDLT<MatrixType> ldlt(mat);
+ ldlt.compute(mat);
VERIFY(ldlt.isNegative());
VERIFY(ldlt.isPositive());
}
{
mat << 0, 0, 0, 1;
- LDLT<MatrixType> ldlt(mat);
+ ldlt.compute(mat);
VERIFY(!ldlt.isNegative());
VERIFY(ldlt.isPositive());
}
{
mat << -1, 0, 0, 0;
- LDLT<MatrixType> ldlt(mat);
+ ldlt.compute(mat);
VERIFY(ldlt.isNegative());
VERIFY(!ldlt.isPositive());
}
diff --git a/test/geo_transformations.cpp b/test/geo_transformations.cpp
index 1768d7b8a..e189217eb 100644
--- a/test/geo_transformations.cpp
+++ b/test/geo_transformations.cpp
@@ -98,7 +98,7 @@ template<typename Scalar, int Mode, int Options> void transformations()
Matrix3 matrot1, m;
Scalar a = internal::random<Scalar>(-Scalar(M_PI), Scalar(M_PI));
- Scalar s0 = internal::random<Scalar>();
+ Scalar s0 = internal::random<Scalar>(), s1 = internal::random<Scalar>();
VERIFY_IS_APPROX(v0, AngleAxisx(a, v0.normalized()) * v0);
VERIFY_IS_APPROX(-v0, AngleAxisx(Scalar(M_PI), v0.unitOrthogonal()) * v0);
@@ -394,10 +394,21 @@ template<typename Scalar, int Mode, int Options> void transformations()
Rotation2D<double> r2d1d = r2d1.template cast<double>();
VERIFY_IS_APPROX(r2d1d.template cast<Scalar>(),r2d1);
- t20 = Translation2(v20) * (Rotation2D<Scalar>(s0) * Eigen::Scaling(s0));
- t21 = Translation2(v20) * Rotation2D<Scalar>(s0) * Eigen::Scaling(s0);
+ Rotation2D<Scalar> R0(s0), R1(s1);
+
+ t20 = Translation2(v20) * (R0 * Eigen::Scaling(s0));
+ t21 = Translation2(v20) * R0 * Eigen::Scaling(s0);
+ VERIFY_IS_APPROX(t20,t21);
+
+ t20 = Translation2(v20) * (R0 * R0.inverse() * Eigen::Scaling(s0));
+ t21 = Translation2(v20) * Eigen::Scaling(s0);
VERIFY_IS_APPROX(t20,t21);
+ VERIFY_IS_APPROX(s0, (R0.slerp(0, R1)).angle());
+ VERIFY_IS_APPROX(s1, (R0.slerp(1, R1)).angle());
+ VERIFY_IS_APPROX(s0, (R0.slerp(0.5, R0)).angle());
+ VERIFY_IS_APPROX(Scalar(0), (R0.slerp(0.5, R0.inverse())).angle());
+
// check basic features
{
Rotation2D<Scalar> r1; // default ctor
diff --git a/test/main.h b/test/main.h
index 371c7e602..adf4adb31 100644
--- a/test/main.h
+++ b/test/main.h
@@ -432,6 +432,26 @@ void randomPermutationVector(PermutationVectorType& v, typename PermutationVecto
}
}
+template<typename T> bool isNotNaN(const T& x)
+{
+ return x==x;
+}
+
+template<typename T> bool isNaN(const T& x)
+{
+ return x!=x;
+}
+
+template<typename T> bool isInf(const T& x)
+{
+ return x > NumTraits<T>::highest();
+}
+
+template<typename T> bool isMinusInf(const T& x)
+{
+ return x < NumTraits<T>::lowest();
+}
+
} // end namespace Eigen
template<typename T> struct GetDifferentType;
diff --git a/test/packetmath.cpp b/test/packetmath.cpp
index e716d6d9a..ee0502f69 100644
--- a/test/packetmath.cpp
+++ b/test/packetmath.cpp
@@ -297,6 +297,12 @@ template<typename Scalar> void packetmath_real()
data2[i] = internal::random<Scalar>(-87,88);
}
CHECK_CWISE1_IF(internal::packet_traits<Scalar>::HasExp, std::exp, internal::pexp);
+ {
+ data1[0] = std::numeric_limits<Scalar>::quiet_NaN();
+ packet_helper<internal::packet_traits<Scalar>::HasExp,Packet> h;
+ h.store(data2, internal::pexp(h.load(data1)));
+ VERIFY(isNaN(data2[0]));
+ }
for (int i=0; i<size; ++i)
{
@@ -305,8 +311,22 @@ template<typename Scalar> void packetmath_real()
}
if(internal::random<float>(0,1)<0.1)
data1[internal::random<int>(0, PacketSize)] = 0;
- CHECK_CWISE1_IF(internal::packet_traits<Scalar>::HasLog, std::log, internal::plog);
CHECK_CWISE1_IF(internal::packet_traits<Scalar>::HasSqrt, std::sqrt, internal::psqrt);
+ CHECK_CWISE1_IF(internal::packet_traits<Scalar>::HasLog, std::log, internal::plog);
+ {
+ data1[0] = std::numeric_limits<Scalar>::quiet_NaN();
+ packet_helper<internal::packet_traits<Scalar>::HasLog,Packet> h;
+ h.store(data2, internal::plog(h.load(data1)));
+ VERIFY(isNaN(data2[0]));
+ data1[0] = -1.0f;
+ h.store(data2, internal::plog(h.load(data1)));
+ VERIFY(isNaN(data2[0]));
+#if !EIGEN_FAST_MATH
+ h.store(data2, internal::psqrt(h.load(data1)));
+ VERIFY(isNaN(data2[0]));
+ VERIFY(isNaN(data2[1]));
+#endif
+ }
}
template<typename Scalar> void packetmath_notcomplex()
diff --git a/test/sparse_solver.h b/test/sparse_solver.h
index b10eaebcc..ee350d561 100644
--- a/test/sparse_solver.h
+++ b/test/sparse_solver.h
@@ -15,6 +15,7 @@ void check_sparse_solving(Solver& solver, const typename Solver::MatrixType& A,
{
typedef typename Solver::MatrixType Mat;
typedef typename Mat::Scalar Scalar;
+ typedef typename Mat::Index Index;
DenseRhs refX = dA.lu().solve(db);
{
@@ -35,8 +36,8 @@ void check_sparse_solving(Solver& solver, const typename Solver::MatrixType& A,
return;
}
VERIFY(oldb.isApprox(b) && "sparse solver testing: the rhs should not be modified!");
-
VERIFY(x.isApprox(refX,test_precision<Scalar>()));
+
x.setZero();
// test the analyze/factorize API
solver.analyzePattern(A);
@@ -54,8 +55,31 @@ void check_sparse_solving(Solver& solver, const typename Solver::MatrixType& A,
return;
}
VERIFY(oldb.isApprox(b) && "sparse solver testing: the rhs should not be modified!");
-
VERIFY(x.isApprox(refX,test_precision<Scalar>()));
+
+
+ x.setZero();
+ // test with Map
+ MappedSparseMatrix<Scalar,Mat::Options,Index> Am(A.rows(), A.cols(), A.nonZeros(), const_cast<Index*>(A.outerIndexPtr()), const_cast<Index*>(A.innerIndexPtr()), const_cast<Scalar*>(A.valuePtr()));
+ solver.compute(Am);
+ if (solver.info() != Success)
+ {
+ std::cerr << "sparse solver testing: factorization failed (check_sparse_solving)\n";
+ exit(0);
+ return;
+ }
+ DenseRhs dx(refX);
+ dx.setZero();
+ Map<DenseRhs> xm(dx.data(), dx.rows(), dx.cols());
+ Map<const DenseRhs> bm(db.data(), db.rows(), db.cols());
+ xm = solver.solve(bm);
+ if (solver.info() != Success)
+ {
+ std::cerr << "sparse solver testing: solving failed\n";
+ return;
+ }
+ VERIFY(oldb.isApprox(bm) && "sparse solver testing: the rhs should not be modified!");
+ VERIFY(xm.isApprox(refX,test_precision<Scalar>()));
}
// test dense Block as the result and rhs:
diff --git a/test/stable_norm.cpp b/test/stable_norm.cpp
index 6cd65c64a..650f62a8a 100644
--- a/test/stable_norm.cpp
+++ b/test/stable_norm.cpp
@@ -9,26 +9,6 @@
#include "main.h"
-template<typename T> bool isNotNaN(const T& x)
-{
- return x==x;
-}
-
-template<typename T> bool isNaN(const T& x)
-{
- return x!=x;
-}
-
-template<typename T> bool isInf(const T& x)
-{
- return x > NumTraits<T>::highest();
-}
-
-template<typename T> bool isMinusInf(const T& x)
-{
- return x < NumTraits<T>::lowest();
-}
-
// workaround aggressive optimization in ICC
template<typename T> EIGEN_DONT_INLINE T sub(T a, T b) { return a - b; }
diff --git a/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h b/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h
index 590797973..8336c2644 100644
--- a/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h
+++ b/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h
@@ -593,7 +593,6 @@ inline const AutoDiffScalar<Matrix<typename internal::traits<DerTypeA>::Scalar,D
atan2(const AutoDiffScalar<DerTypeA>& a, const AutoDiffScalar<DerTypeB>& b)
{
using std::atan2;
- using std::max;
typedef typename internal::traits<DerTypeA>::Scalar Scalar;
typedef AutoDiffScalar<Matrix<Scalar,Dynamic,1> > PlainADS;
PlainADS ret;
diff --git a/unsupported/Eigen/src/BDCSVD/BDCSVD.h b/unsupported/Eigen/src/BDCSVD/BDCSVD.h
index e2551cf88..ac71b0aa8 100644
--- a/unsupported/Eigen/src/BDCSVD/BDCSVD.h
+++ b/unsupported/Eigen/src/BDCSVD/BDCSVD.h
@@ -649,7 +649,6 @@ void BDCSVD<MatrixType>::computeSingVals(const ArrayXr& col0, const ArrayXr& dia
{
using std::abs;
using std::swap;
- using std::max;
Index n = col0.size();
Index actual_n = n;
@@ -728,7 +727,7 @@ void BDCSVD<MatrixType>::computeSingVals(const ArrayXr& col0, const ArrayXr& dia
// rational interpolation: fit a function of the form a / mu + b through the two previous
// iterates and use its zero to compute the next iterate
bool useBisection = fPrev*fCur>0;
- while (fCur!=0 && abs(muCur - muPrev) > 8 * NumTraits<RealScalar>::epsilon() * (max)(abs(muCur), abs(muPrev)) && abs(fCur - fPrev)>NumTraits<RealScalar>::epsilon() && !useBisection)
+ while (fCur!=0 && abs(muCur - muPrev) > 8 * NumTraits<RealScalar>::epsilon() * numext::maxi(abs(muCur), abs(muPrev)) && abs(fCur - fPrev)>NumTraits<RealScalar>::epsilon() && !useBisection)
{
++m_numIters;
@@ -779,7 +778,7 @@ void BDCSVD<MatrixType>::computeSingVals(const ArrayXr& col0, const ArrayXr& dia
#endif
eigen_internal_assert(fLeft * fRight < 0);
- while (rightShifted - leftShifted > 2 * NumTraits<RealScalar>::epsilon() * (max)(abs(leftShifted), abs(rightShifted)))
+ while (rightShifted - leftShifted > 2 * NumTraits<RealScalar>::epsilon() * numext::maxi(abs(leftShifted), abs(rightShifted)))
{
RealScalar midShifted = (leftShifted + rightShifted) / 2;
RealScalar fMid = secularEq(midShifted, col0, diag, perm, diagShifted, shift);
@@ -981,7 +980,6 @@ void BDCSVD<MatrixType>::deflation(Index firstCol, Index lastCol, Index k, Index
{
using std::sqrt;
using std::abs;
- using std::max;
const Index length = lastCol + 1 - firstCol;
Block<MatrixXr,Dynamic,1> col0(m_computed, firstCol+shift, firstCol+shift, length, 1);
@@ -990,7 +988,7 @@ void BDCSVD<MatrixType>::deflation(Index firstCol, Index lastCol, Index k, Index
RealScalar maxDiag = diag.tail((std::max)(Index(1),length-1)).cwiseAbs().maxCoeff();
RealScalar epsilon_strict = NumTraits<RealScalar>::epsilon() * maxDiag;
- RealScalar epsilon_coarse = 8 * NumTraits<RealScalar>::epsilon() * (max)(col0.cwiseAbs().maxCoeff(), maxDiag);
+ RealScalar epsilon_coarse = 8 * NumTraits<RealScalar>::epsilon() * numext::maxi(col0.cwiseAbs().maxCoeff(), maxDiag);
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
assert(m_naiveU.allFinite());
diff --git a/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h b/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h
index dd43de6b3..35cfa315d 100644
--- a/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h
+++ b/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h
@@ -126,7 +126,6 @@ template<typename _MatrixType>
void IncompleteCholesky<Scalar,_UpLo, OrderingType>::factorize(const _MatrixType& mat)
{
using std::sqrt;
- using std::min;
eigen_assert(m_analysisIsOk && "analyzePattern() should be called first");
// Dropping strategies : Keep only the p largest elements per column, where p is the number of elements in the column of the original matrix. Other strategies will be added
@@ -160,7 +159,7 @@ void IncompleteCholesky<Scalar,_UpLo, OrderingType>::factorize(const _MatrixType
for (int j = 0; j < n; j++){
for (int k = colPtr[j]; k < colPtr[j+1]; k++)
vals[k] /= (m_scal(j) * m_scal(rowIdx[k]));
- mindiag = (min)(vals[colPtr[j]], mindiag);
+ mindiag = numext::mini(vals[colPtr[j]], mindiag);
}
if(mindiag < Scalar(0.)) m_shift = m_shift - mindiag;