aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Gael Guennebaud <g.gael@free.fr>2009-07-28 17:35:07 +0200
committerGravatar Gael Guennebaud <g.gael@free.fr>2009-07-28 17:35:07 +0200
commit54804eb62642ab1be510e41db9b573c6f6151bf2 (patch)
tree76eda2dedb4a66be0072425ebd110546211f1f71
parent264fe82c655a26f3c3ab5057684dbc51cf533056 (diff)
parent562864bcfb363f603f40ce716c49539fcd1565d3 (diff)
synch with main branch
-rw-r--r--CTestConfig.cmake6
-rw-r--r--Eigen/Core1
-rw-r--r--Eigen/QR2
-rw-r--r--Eigen/src/Cholesky/LDLT.h22
-rw-r--r--Eigen/src/Core/Block.h4
-rw-r--r--Eigen/src/Core/Dot.h148
-rw-r--r--Eigen/src/Core/Functors.h3
-rw-r--r--Eigen/src/Core/MapBase.h10
-rw-r--r--Eigen/src/Core/Matrix.h10
-rw-r--r--Eigen/src/Core/MatrixBase.h1
-rw-r--r--Eigen/src/Core/NumTraits.h8
-rw-r--r--Eigen/src/Core/Product.h12
-rw-r--r--Eigen/src/Core/StableNorm.h194
-rw-r--r--Eigen/src/QR/SelfAdjointEigenSolver.h30
-rw-r--r--Eigen/src/SVD/SVD.h13
-rw-r--r--bench/bench_norm.cpp344
-rw-r--r--test/adjoint.cpp4
-rw-r--r--test/mixingtypes.cpp4
-rw-r--r--test/product_large.cpp6
19 files changed, 623 insertions, 199 deletions
diff --git a/CTestConfig.cmake b/CTestConfig.cmake
index 263cda480..0ccd5a1ad 100644
--- a/CTestConfig.cmake
+++ b/CTestConfig.cmake
@@ -5,9 +5,9 @@
## ENABLE_TESTING()
## INCLUDE(CTest)
set(CTEST_PROJECT_NAME "Eigen")
-set(CTEST_NIGHTLY_START_TIME "05:00:00 UTC")
+set(CTEST_NIGHTLY_START_TIME "06:00:00 UTC")
set(CTEST_DROP_METHOD "http")
-set(CTEST_DROP_SITE "my.cdash.org")
-set(CTEST_DROP_LOCATION "/submit.php?project=Eigen")
+set(CTEST_DROP_SITE "eigen.tuxfamily.org")
+set(CTEST_DROP_LOCATION "/CDash/submit.php?project=Eigen")
set(CTEST_DROP_SITE_CDASH TRUE)
diff --git a/Eigen/Core b/Eigen/Core
index 15dae4af0..463529123 100644
--- a/Eigen/Core
+++ b/Eigen/Core
@@ -161,6 +161,7 @@ namespace Eigen {
#include "src/Core/CwiseNullaryOp.h"
#include "src/Core/CwiseUnaryView.h"
#include "src/Core/Dot.h"
+#include "src/Core/StableNorm.h"
#include "src/Core/MapBase.h"
#include "src/Core/Map.h"
#include "src/Core/Block.h"
diff --git a/Eigen/QR b/Eigen/QR
index 97907d1e5..5f36d0987 100644
--- a/Eigen/QR
+++ b/Eigen/QR
@@ -41,7 +41,7 @@ namespace Eigen {
// declare all classes for a given matrix type
#define EIGEN_QR_MODULE_INSTANTIATE_TYPE(MATRIXTYPE,PREFIX) \
- PREFIX template class QR<MATRIXTYPE>; \
+ PREFIX template class HouseholderQR<MATRIXTYPE>; \
PREFIX template class Tridiagonalization<MATRIXTYPE>; \
PREFIX template class HessenbergDecomposition<MATRIXTYPE>; \
PREFIX template class SelfAdjointEigenSolver<MATRIXTYPE>
diff --git a/Eigen/src/Cholesky/LDLT.h b/Eigen/src/Cholesky/LDLT.h
index 18029bab8..200b428af 100644
--- a/Eigen/src/Cholesky/LDLT.h
+++ b/Eigen/src/Cholesky/LDLT.h
@@ -62,7 +62,7 @@ template<typename MatrixType> class LDLT
typedef Matrix<int, MatrixType::RowsAtCompileTime, 1> IntColVectorType;
typedef Matrix<int, 1, MatrixType::RowsAtCompileTime> IntRowVectorType;
- /**
+ /**
* \brief Default Constructor.
*
* The default constructor is useful in cases in which the user intends to
@@ -83,7 +83,7 @@ template<typename MatrixType> class LDLT
inline TriangularView<MatrixType, UnitLowerTriangular> matrixL(void) const
{
ei_assert(m_isInitialized && "LDLT is not initialized.");
- return m_matrix;
+ return m_matrix;
}
/** \returns a vector of integers, whose size is the number of rows of the matrix being decomposed,
@@ -97,24 +97,24 @@ template<typename MatrixType> class LDLT
}
/** \returns the coefficients of the diagonal matrix D */
- inline Diagonal<MatrixType,0> vectorD(void) const
- {
+ inline Diagonal<MatrixType,0> vectorD(void) const
+ {
ei_assert(m_isInitialized && "LDLT is not initialized.");
- return m_matrix.diagonal();
+ return m_matrix.diagonal();
}
/** \returns true if the matrix is positive (semidefinite) */
- inline bool isPositive(void) const
- {
+ inline bool isPositive(void) const
+ {
ei_assert(m_isInitialized && "LDLT is not initialized.");
- return m_sign == 1;
+ return m_sign == 1;
}
/** \returns true if the matrix is negative (semidefinite) */
- inline bool isNegative(void) const
- {
+ inline bool isNegative(void) const
+ {
ei_assert(m_isInitialized && "LDLT is not initialized.");
- return m_sign == -1;
+ return m_sign == -1;
}
template<typename RhsDerived, typename ResultType>
diff --git a/Eigen/src/Core/Block.h b/Eigen/src/Core/Block.h
index ffe065e8b..cf7730170 100644
--- a/Eigen/src/Core/Block.h
+++ b/Eigen/src/Core/Block.h
@@ -33,8 +33,8 @@
* \param MatrixType the type of the object in which we are taking a block
* \param BlockRows the number of rows of the block we are taking at compile time (optional)
* \param BlockCols the number of columns of the block we are taking at compile time (optional)
- * \param _PacketAccess allows to enforce aligned loads and stores if set to ForceAligned.
- * The default is AsRequested. This parameter is internaly used by Eigen
+ * \param _PacketAccess allows to enforce aligned loads and stores if set to \b ForceAligned.
+ * The default is \b AsRequested. This parameter is internaly used by Eigen
* in expressions such as \code mat.block() += other; \endcode and most of
* the time this is the only way it is used.
* \param _DirectAccessStatus \internal used for partial specialization
diff --git a/Eigen/src/Core/Dot.h b/Eigen/src/Core/Dot.h
index c5f2e8505..9e84d72bb 100644
--- a/Eigen/src/Core/Dot.h
+++ b/Eigen/src/Core/Dot.h
@@ -292,154 +292,6 @@ inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real MatrixBase<
return ei_sqrt(squaredNorm());
}
-/** \returns the \em l2 norm of \c *this using a numerically more stable
- * algorithm.
- *
- * \sa norm(), dot(), squaredNorm(), blueNorm()
- */
-template<typename Derived>
-inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real
-MatrixBase<Derived>::stableNorm() const
-{
- return this->cwise().abs().redux(ei_scalar_hypot_op<RealScalar>());
-}
-
-/** \internal Computes ibeta^iexp by binary expansion of iexp,
- * exact if ibeta is the machine base */
-template<typename T> inline T bexp(int ibeta, int iexp)
-{
- T tbeta = T(ibeta);
- T res = 1.0;
- int n = iexp;
- if (n<0)
- {
- n = - n;
- tbeta = 1.0/tbeta;
- }
- for(;;)
- {
- if ((n % 2)==0)
- res = res * tbeta;
- n = n/2;
- if (n==0) return res;
- tbeta = tbeta*tbeta;
- }
- return res;
-}
-
-/** \returns the \em l2 norm of \c *this using the Blue's algorithm.
- * A Portable Fortran Program to Find the Euclidean Norm of a Vector,
- * ACM TOMS, Vol 4, Issue 1, 1978.
- *
- * \sa norm(), dot(), squaredNorm(), stableNorm()
- */
-template<typename Derived>
-inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real
-MatrixBase<Derived>::blueNorm() const
-{
- static int nmax;
- static Scalar b1, b2, s1m, s2m, overfl, rbig, relerr;
- int n;
- Scalar ax, abig, amed, asml;
-
- if(nmax <= 0)
- {
- int nbig, ibeta, it, iemin, iemax, iexp;
- Scalar abig, eps;
- // This program calculates the machine-dependent constants
- // bl, b2, slm, s2m, relerr overfl, nmax
- // from the "basic" machine-dependent numbers
- // nbig, ibeta, it, iemin, iemax, rbig.
- // The following define the basic machine-dependent constants.
- // For portability, the PORT subprograms "ilmaeh" and "rlmach"
- // are used. For any specific computer, each of the assignment
- // statements can be replaced
- nbig = std::numeric_limits<int>::max(); // largest integer
- ibeta = NumTraits<Scalar>::Base; // base for floating-point numbers
- it = NumTraits<Scalar>::Mantissa; // number of base-beta digits in mantissa
- iemin = std::numeric_limits<Scalar>::min_exponent; // minimum exponent
- iemax = std::numeric_limits<Scalar>::max_exponent; // maximum exponent
- rbig = std::numeric_limits<Scalar>::max(); // largest floating-point number
-
- // Check the basic machine-dependent constants.
- if(iemin > 1 - 2*it || 1+it>iemax || (it==2 && ibeta<5)
- || (it<=4 && ibeta <= 3 ) || it<2)
- {
- ei_assert(false && "the algorithm cannot be guaranteed on this computer");
- }
- iexp = -((1-iemin)/2);
- b1 = bexp<Scalar>(ibeta, iexp); // lower boundary of midrange
- iexp = (iemax + 1 - it)/2;
- b2 = bexp<Scalar>(ibeta,iexp); // upper boundary of midrange
-
- iexp = (2-iemin)/2;
- s1m = bexp<Scalar>(ibeta,iexp); // scaling factor for lower range
- iexp = - ((iemax+it)/2);
- s2m = bexp<Scalar>(ibeta,iexp); // scaling factor for upper range
-
- overfl = rbig*s2m; // overfow boundary for abig
- eps = bexp<Scalar>(ibeta, 1-it);
- relerr = ei_sqrt(eps); // tolerance for neglecting asml
- abig = 1.0/eps - 1.0;
- if (Scalar(nbig)>abig) nmax = abig; // largest safe n
- else nmax = nbig;
- }
- n = size();
- if(n==0)
- return 0;
- asml = Scalar(0);
- amed = Scalar(0);
- abig = Scalar(0);
- for(int j=0; j<n; ++j)
- {
- ax = ei_abs(coeff(j));
- if(ax > b2) abig += ei_abs2(ax*s2m);
- else if(ax < b1) asml += ei_abs2(ax*s1m);
- else amed += ei_abs2(ax);
- }
- if(abig > Scalar(0))
- {
- abig = ei_sqrt(abig);
- if(abig > overfl)
- {
- ei_assert(false && "overflow");
- return rbig;
- }
- if(amed > Scalar(0))
- {
- abig = abig/s2m;
- amed = ei_sqrt(amed);
- }
- else
- {
- return abig/s2m;
- }
-
- }
- else if(asml > Scalar(0))
- {
- if (amed > Scalar(0))
- {
- abig = ei_sqrt(amed);
- amed = ei_sqrt(asml) / s1m;
- }
- else
- {
- return ei_sqrt(asml)/s1m;
- }
- }
- else
- {
- return ei_sqrt(amed);
- }
- asml = std::min(abig, amed);
- abig = std::max(abig, amed);
- if(asml <= abig*relerr)
- return abig;
- else
- return abig * ei_sqrt(Scalar(1) + ei_abs2(asml/abig));
-}
-
/** \returns an expression of the quotient of *this by its own norm.
*
* \only_for_vectors
diff --git a/Eigen/src/Core/Functors.h b/Eigen/src/Core/Functors.h
index 89badb353..a4c9604df 100644
--- a/Eigen/src/Core/Functors.h
+++ b/Eigen/src/Core/Functors.h
@@ -124,9 +124,6 @@ template<typename Scalar> struct ei_scalar_hypot_op EIGEN_EMPTY_STRUCT {
// typedef typename NumTraits<Scalar>::Real result_type;
EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& _x, const Scalar& _y) const
{
-// typedef typename NumTraits<T>::Real RealScalar;
-// RealScalar _x = ei_abs(x);
-// RealScalar _y = ei_abs(y);
Scalar p = std::max(_x, _y);
Scalar q = std::min(_x, _y);
Scalar qp = q/p;
diff --git a/Eigen/src/Core/MapBase.h b/Eigen/src/Core/MapBase.h
index 721f2d476..e643144ff 100644
--- a/Eigen/src/Core/MapBase.h
+++ b/Eigen/src/Core/MapBase.h
@@ -173,8 +173,14 @@ template<typename Derived> class MapBase
using Base::operator=;
using Base::operator*=;
- using Base::operator+=;
- using Base::operator-=;
+
+ template<typename Lhs,typename Rhs>
+ Derived& operator+=(const Flagged<Product<Lhs,Rhs,CacheFriendlyProduct>, 0, EvalBeforeNestingBit | EvalBeforeAssigningBit>& other)
+ { return Base::operator+=(other); }
+
+ template<typename Lhs,typename Rhs>
+ Derived& operator-=(const Flagged<Product<Lhs,Rhs,CacheFriendlyProduct>, 0, EvalBeforeNestingBit | EvalBeforeAssigningBit>& other)
+ { return Base::operator-=(other); }
template<typename OtherDerived>
Derived& operator+=(const MatrixBase<OtherDerived>& other)
diff --git a/Eigen/src/Core/Matrix.h b/Eigen/src/Core/Matrix.h
index d5c508128..f21364c70 100644
--- a/Eigen/src/Core/Matrix.h
+++ b/Eigen/src/Core/Matrix.h
@@ -571,10 +571,16 @@ class Matrix
template<typename OtherDerived>
EIGEN_STRONG_INLINE Matrix& _set(const MatrixBase<OtherDerived>& other)
{
- _resize_to_match(other);
- return Base::operator=(other);
+ _set_selector(other.derived(), typename ei_meta_if<(int(OtherDerived::Flags) & EvalBeforeAssigningBit), ei_meta_true, ei_meta_false>::ret());
+ return *this;
}
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE void _set_selector(const OtherDerived& other, const ei_meta_true&) { _set_noalias(other.eval()); }
+
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE void _set_selector(const OtherDerived& other, const ei_meta_false&) { _set_noalias(other); }
+
/** \internal Like _set() but additionally makes the assumption that no aliasing effect can happen (which
* is the case when creating a new matrix) so one can enforce lazy evaluation.
*
diff --git a/Eigen/src/Core/MatrixBase.h b/Eigen/src/Core/MatrixBase.h
index 3df518d1a..ac2b2532f 100644
--- a/Eigen/src/Core/MatrixBase.h
+++ b/Eigen/src/Core/MatrixBase.h
@@ -437,6 +437,7 @@ template<typename Derived> class MatrixBase
RealScalar norm() const;
RealScalar stableNorm() const;
RealScalar blueNorm() const;
+ RealScalar hypotNorm() const;
const PlainMatrixType normalized() const;
void normalize();
diff --git a/Eigen/src/Core/NumTraits.h b/Eigen/src/Core/NumTraits.h
index dec07a036..24afe54c5 100644
--- a/Eigen/src/Core/NumTraits.h
+++ b/Eigen/src/Core/NumTraits.h
@@ -70,9 +70,7 @@ template<> struct NumTraits<float>
HasFloatingPoint = 1,
ReadCost = 1,
AddCost = 1,
- MulCost = 1,
- Base = 2,
- Mantissa = 23
+ MulCost = 1
};
};
@@ -85,9 +83,7 @@ template<> struct NumTraits<double>
HasFloatingPoint = 1,
ReadCost = 1,
AddCost = 1,
- MulCost = 1,
- Base = 2,
- Mantissa = 52
+ MulCost = 1
};
};
diff --git a/Eigen/src/Core/Product.h b/Eigen/src/Core/Product.h
index 402f597d2..b46440ec0 100644
--- a/Eigen/src/Core/Product.h
+++ b/Eigen/src/Core/Product.h
@@ -287,6 +287,18 @@ MatrixBase<Derived>::operator*(const MatrixBase<OtherDerived> &other) const
return typename ProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
}
+/** replaces \c *this by \c *this * \a other.
+ *
+ * \returns a reference to \c *this
+ */
+template<typename Derived>
+template<typename OtherDerived>
+inline Derived &
+MatrixBase<Derived>::operator*=(const MatrixBase<OtherDerived> &other)
+{
+ return derived() = derived() * other.derived();
+}
+
/***************************************************************************
* Normal product .coeff() implementation (with meta-unrolling)
***************************************************************************/
diff --git a/Eigen/src/Core/StableNorm.h b/Eigen/src/Core/StableNorm.h
new file mode 100644
index 000000000..22809633d
--- /dev/null
+++ b/Eigen/src/Core/StableNorm.h
@@ -0,0 +1,194 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_STABLENORM_H
+#define EIGEN_STABLENORM_H
+
+template<typename ExpressionType, typename Scalar>
+inline void ei_stable_norm_kernel(const ExpressionType& bl, Scalar& ssq, Scalar& scale, Scalar& invScale)
+{
+ Scalar max = bl.cwise().abs().maxCoeff();
+ if (max>scale)
+ {
+ ssq = ssq * ei_abs2(scale/max);
+ scale = max;
+ invScale = Scalar(1)/scale;
+ }
+ // TODO if the max is much much smaller than the current scale,
+ // then we can neglect this sub vector
+ ssq += (bl*invScale).squaredNorm();
+}
+
+/** \returns the \em l2 norm of \c *this avoiding underflow and overflow.
+ * This version use a blockwise two passes algorithm:
+ * 1 - find the absolute largest coefficient \c s
+ * 2 - compute \f$ s \Vert \frac{*this}{s} \Vert \f$ in a standard way
+ *
+ * For architecture/scalar types supporting vectorization, this version
+ * is faster than blueNorm(). Otherwise the blueNorm() is much faster.
+ *
+ * \sa norm(), blueNorm(), hypotNorm()
+ */
+template<typename Derived>
+inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real
+MatrixBase<Derived>::stableNorm() const
+{
+ const int blockSize = 4096;
+ RealScalar scale = 0;
+ RealScalar invScale;
+ RealScalar ssq = 0; // sum of square
+ enum {
+ Alignment = (int(Flags)&DirectAccessBit) || (int(Flags)&AlignedBit) ? ForceAligned : AsRequested
+ };
+ int n = size();
+ int bi=0;
+ if ((int(Flags)&DirectAccessBit) && !(int(Flags)&AlignedBit))
+ {
+ bi = ei_alignmentOffset(&const_cast_derived().coeffRef(0), n);
+ if (bi>0)
+ ei_stable_norm_kernel(start(bi), ssq, scale, invScale);
+ }
+ for (; bi<n; bi+=blockSize)
+ ei_stable_norm_kernel(VectorBlock<Derived,Dynamic,Alignment>(derived(),bi,std::min(blockSize, n - bi)), ssq, scale, invScale);
+ return scale * ei_sqrt(ssq);
+}
+
+/** \returns the \em l2 norm of \c *this using the Blue's algorithm.
+ * A Portable Fortran Program to Find the Euclidean Norm of a Vector,
+ * ACM TOMS, Vol 4, Issue 1, 1978.
+ *
+ * For architecture/scalar types without vectorization, this version
+ * is much faster than stableNorm(). Otherwise the stableNorm() is faster.
+ *
+ * \sa norm(), stableNorm(), hypotNorm()
+ */
+template<typename Derived>
+inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real
+MatrixBase<Derived>::blueNorm() const
+{
+ static int nmax = -1;
+ static RealScalar b1, b2, s1m, s2m, overfl, rbig, relerr;
+ if(nmax <= 0)
+ {
+ int nbig, ibeta, it, iemin, iemax, iexp;
+ RealScalar abig, eps;
+ // This program calculates the machine-dependent constants
+ // bl, b2, slm, s2m, relerr overfl, nmax
+ // from the "basic" machine-dependent numbers
+ // nbig, ibeta, it, iemin, iemax, rbig.
+ // The following define the basic machine-dependent constants.
+ // For portability, the PORT subprograms "ilmaeh" and "rlmach"
+ // are used. For any specific computer, each of the assignment
+ // statements can be replaced
+ nbig = std::numeric_limits<int>::max(); // largest integer
+ ibeta = std::numeric_limits<RealScalar>::radix; // base for floating-point numbers
+ it = std::numeric_limits<RealScalar>::digits; // number of base-beta digits in mantissa
+ iemin = std::numeric_limits<RealScalar>::min_exponent; // minimum exponent
+ iemax = std::numeric_limits<RealScalar>::max_exponent; // maximum exponent
+ rbig = std::numeric_limits<RealScalar>::max(); // largest floating-point number
+
+ // Check the basic machine-dependent constants.
+ if(iemin > 1 - 2*it || 1+it>iemax || (it==2 && ibeta<5)
+ || (it<=4 && ibeta <= 3 ) || it<2)
+ {
+ ei_assert(false && "the algorithm cannot be guaranteed on this computer");
+ }
+ iexp = -((1-iemin)/2);
+ b1 = std::pow(double(ibeta),iexp); // lower boundary of midrange
+ iexp = (iemax + 1 - it)/2;
+ b2 = std::pow(double(ibeta),iexp); // upper boundary of midrange
+
+ iexp = (2-iemin)/2;
+ s1m = std::pow(double(ibeta),iexp); // scaling factor for lower range
+ iexp = - ((iemax+it)/2);
+ s2m = std::pow(double(ibeta),iexp); // scaling factor for upper range
+
+ overfl = rbig*s2m; // overfow boundary for abig
+ eps = std::pow(double(ibeta), 1-it);
+ relerr = ei_sqrt(eps); // tolerance for neglecting asml
+ abig = 1.0/eps - 1.0;
+ if (RealScalar(nbig)>abig) nmax = abig; // largest safe n
+ else nmax = nbig;
+ }
+ int n = size();
+ RealScalar ab2 = b2 / RealScalar(n);
+ RealScalar asml = RealScalar(0);
+ RealScalar amed = RealScalar(0);
+ RealScalar abig = RealScalar(0);
+ for(int j=0; j<n; ++j)
+ {
+ RealScalar ax = ei_abs(coeff(j));
+ if(ax > ab2) abig += ei_abs2(ax*s2m);
+ else if(ax < b1) asml += ei_abs2(ax*s1m);
+ else amed += ei_abs2(ax);
+ }
+ if(abig > RealScalar(0))
+ {
+ abig = ei_sqrt(abig);
+ if(abig > overfl)
+ {
+ ei_assert(false && "overflow");
+ return rbig;
+ }
+ if(amed > RealScalar(0))
+ {
+ abig = abig/s2m;
+ amed = ei_sqrt(amed);
+ }
+ else
+ return abig/s2m;
+ }
+ else if(asml > RealScalar(0))
+ {
+ if (amed > RealScalar(0))
+ {
+ abig = ei_sqrt(amed);
+ amed = ei_sqrt(asml) / s1m;
+ }
+ else
+ return ei_sqrt(asml)/s1m;
+ }
+ else
+ return ei_sqrt(amed);
+ asml = std::min(abig, amed);
+ abig = std::max(abig, amed);
+ if(asml <= abig*relerr)
+ return abig;
+ else
+ return abig * ei_sqrt(RealScalar(1) + ei_abs2(asml/abig));
+}
+
+/** \returns the \em l2 norm of \c *this avoiding undeflow and overflow.
+ * This version use a concatenation of hypot() calls, and it is very slow.
+ *
+ * \sa norm(), stableNorm()
+ */
+template<typename Derived>
+inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real
+MatrixBase<Derived>::hypotNorm() const
+{
+ return this->cwise().abs().redux(ei_scalar_hypot_op<RealScalar>());
+}
+
+#endif // EIGEN_STABLENORM_H
diff --git a/Eigen/src/QR/SelfAdjointEigenSolver.h b/Eigen/src/QR/SelfAdjointEigenSolver.h
index b003fb4d7..167fedebf 100644
--- a/Eigen/src/QR/SelfAdjointEigenSolver.h
+++ b/Eigen/src/QR/SelfAdjointEigenSolver.h
@@ -383,18 +383,24 @@ static void ei_tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, int st
int kn1 = (k+1)*n;
#endif
// let's do the product manually to avoid the need of temporaries...
- for (int i=0; i<n; ++i)
- {
- #ifdef EIGEN_DEFAULT_TO_ROW_MAJOR
- Scalar matrixQ_i_k = matrixQ[i*n+k];
- matrixQ[i*n+k] = c * matrixQ_i_k - s * matrixQ[i*n+k+1];
- matrixQ[i*n+k+1] = s * matrixQ_i_k + c * matrixQ[i*n+k+1];
- #else
- Scalar matrixQ_i_k = matrixQ[i+kn];
- matrixQ[i+kn] = c * matrixQ_i_k - s * matrixQ[i+kn1];
- matrixQ[i+kn1] = s * matrixQ_i_k + c * matrixQ[i+kn1];
- #endif
- }
+ Matrix<Scalar,Dynamic,1> aux = Map<Matrix<Scalar,Dynamic,1>, ForceAligned >(matrixQ+kn,n);
+ Map<Matrix<Scalar,Dynamic,1>, ForceAligned >(matrixQ+kn,n)
+ = Map<Matrix<Scalar,Dynamic,1>, ForceAligned >(matrixQ+kn,n) * c - s * Map<Matrix<Scalar,Dynamic,1> >(matrixQ+kn1,n);
+
+ Map<Matrix<Scalar,Dynamic,1>, ForceAligned >(matrixQ+kn1,n)
+ = Map<Matrix<Scalar,Dynamic,1>, ForceAligned >(matrixQ+kn1,n) * c - s * aux;//Map<Matrix<Scalar,Dynamic,1> >(matrixQ+kn,n);
+// for (int i=0; i<n; ++i)
+// {
+// #ifdef EIGEN_DEFAULT_TO_ROW_MAJOR
+// Scalar matrixQ_i_k = matrixQ[i*n+k];
+// matrixQ[i*n+k] = c * matrixQ_i_k - s * matrixQ[i*n+k+1];
+// matrixQ[i*n+k+1] = s * matrixQ_i_k + c * matrixQ[i*n+k+1];
+// #else
+// Scalar matrixQ_i_k = matrixQ[i+kn];
+// matrixQ[i+kn] = c * matrixQ_i_k - s * matrixQ[i+kn1];
+// matrixQ[i+kn1] = s * matrixQ_i_k + c * matrixQ[i+kn1];
+// #endif
+// }
}
}
}
diff --git a/Eigen/src/SVD/SVD.h b/Eigen/src/SVD/SVD.h
index f9f9feb89..b68d1c834 100644
--- a/Eigen/src/SVD/SVD.h
+++ b/Eigen/src/SVD/SVD.h
@@ -125,7 +125,7 @@ template<typename MatrixType> class SVD
{
return (b >= Scalar(0.0) ? ei_abs(a) : -ei_abs(a));
}
-
+
protected:
/** \internal */
MatrixUType m_matU;
@@ -254,11 +254,14 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
if (g != Scalar(0.0))
{
g = Scalar(1.0)/g;
- for (j=l; j<n; j++)
+ if (m-l)
{
- s = A.col(i).end(m-l).dot(A.col(j).end(m-l));
- f = (s/A(i,i))*g;
- A.col(j).end(m-i) += f * A.col(i).end(m-i);
+ for (j=l; j<n; j++)
+ {
+ s = A.col(i).end(m-l).dot(A.col(j).end(m-l));
+ f = (s/A(i,i))*g;
+ A.col(j).end(m-i) += f * A.col(i).end(m-i);
+ }
}
A.col(i).end(m-i) *= g;
}
diff --git a/bench/bench_norm.cpp b/bench/bench_norm.cpp
new file mode 100644
index 000000000..7a3dc2e68
--- /dev/null
+++ b/bench/bench_norm.cpp
@@ -0,0 +1,344 @@
+#include <typeinfo>
+#include <Eigen/Array>
+#include "BenchTimer.h"
+using namespace Eigen;
+using namespace std;
+
+template<typename T>
+EIGEN_DONT_INLINE typename T::Scalar sqsumNorm(const T& v)
+{
+ return v.norm();
+}
+
+template<typename T>
+EIGEN_DONT_INLINE typename T::Scalar hypotNorm(const T& v)
+{
+ return v.hypotNorm();
+}
+
+template<typename T>
+EIGEN_DONT_INLINE typename T::Scalar blueNorm(const T& v)
+{
+ return v.blueNorm();
+}
+
+template<typename T>
+EIGEN_DONT_INLINE typename T::Scalar lapackNorm(T& v)
+{
+ typedef typename T::Scalar Scalar;
+ int n = v.size();
+ Scalar scale = 0;
+ Scalar ssq = 1;
+ for (int i=0;i<n;++i)
+ {
+ Scalar ax = ei_abs(v.coeff(i));
+ if (scale >= ax)
+ {
+ ssq += ei_abs2(ax/scale);
+ }
+ else
+ {
+ ssq = Scalar(1) + ssq * ei_abs2(scale/ax);
+ scale = ax;
+ }
+ }
+ return scale * ei_sqrt(ssq);
+}
+
+template<typename T>
+EIGEN_DONT_INLINE typename T::Scalar twopassNorm(T& v)
+{
+ typedef typename T::Scalar Scalar;
+ Scalar s = v.cwise().abs().maxCoeff();
+ return s*(v/s).norm();
+}
+
+template<typename T>
+EIGEN_DONT_INLINE typename T::Scalar bl2passNorm(T& v)
+{
+ return v.stableNorm();
+}
+
+template<typename T>
+EIGEN_DONT_INLINE typename T::Scalar divacNorm(T& v)
+{
+ int n =v.size() / 2;
+ for (int i=0;i<n;++i)
+ v(i) = v(2*i)*v(2*i) + v(2*i+1)*v(2*i+1);
+ n = n/2;
+ while (n>0)
+ {
+ for (int i=0;i<n;++i)
+ v(i) = v(2*i) + v(2*i+1);
+ n = n/2;
+ }
+ return ei_sqrt(v(0));
+}
+
+#ifdef EIGEN_VECTORIZE
+Packet4f ei_plt(const Packet4f& a, Packet4f& b) { return _mm_cmplt_ps(a,b); }
+Packet2d ei_plt(const Packet2d& a, Packet2d& b) { return _mm_cmplt_pd(a,b); }
+
+Packet4f ei_pandnot(const Packet4f& a, Packet4f& b) { return _mm_andnot_ps(a,b); }
+Packet2d ei_pandnot(const Packet2d& a, Packet2d& b) { return _mm_andnot_pd(a,b); }
+#endif
+
+template<typename T>
+EIGEN_DONT_INLINE typename T::Scalar pblueNorm(const T& v)
+{
+ #ifndef EIGEN_VECTORIZE
+ return v.blueNorm();
+ #else
+ typedef typename T::Scalar Scalar;
+
+ static int nmax = 0;
+ static Scalar b1, b2, s1m, s2m, overfl, rbig, relerr;
+ int n;
+
+ if(nmax <= 0)
+ {
+ int nbig, ibeta, it, iemin, iemax, iexp;
+ Scalar abig, eps;
+
+ nbig = std::numeric_limits<int>::max(); // largest integer
+ ibeta = std::numeric_limits<Scalar>::radix; //NumTraits<Scalar>::Base; // base for floating-point numbers
+ it = std::numeric_limits<Scalar>::digits; //NumTraits<Scalar>::Mantissa; // number of base-beta digits in mantissa
+ iemin = std::numeric_limits<Scalar>::min_exponent; // minimum exponent
+ iemax = std::numeric_limits<Scalar>::max_exponent; // maximum exponent
+ rbig = std::numeric_limits<Scalar>::max(); // largest floating-point number
+
+ // Check the basic machine-dependent constants.
+ if(iemin > 1 - 2*it || 1+it>iemax || (it==2 && ibeta<5)
+ || (it<=4 && ibeta <= 3 ) || it<2)
+ {
+ ei_assert(false && "the algorithm cannot be guaranteed on this computer");
+ }
+ iexp = -((1-iemin)/2);
+ b1 = std::pow(ibeta, iexp); // lower boundary of midrange
+ iexp = (iemax + 1 - it)/2;
+ b2 = std::pow(ibeta,iexp); // upper boundary of midrange
+
+ iexp = (2-iemin)/2;
+ s1m = std::pow(ibeta,iexp); // scaling factor for lower range
+ iexp = - ((iemax+it)/2);
+ s2m = std::pow(ibeta,iexp); // scaling factor for upper range
+
+ overfl = rbig*s2m; // overfow boundary for abig
+ eps = std::pow(ibeta, 1-it);
+ relerr = ei_sqrt(eps); // tolerance for neglecting asml
+ abig = 1.0/eps - 1.0;
+ if (Scalar(nbig)>abig) nmax = abig; // largest safe n
+ else nmax = nbig;
+ }
+
+ typedef typename ei_packet_traits<Scalar>::type Packet;
+ const int ps = ei_packet_traits<Scalar>::size;
+ Packet pasml = ei_pset1(Scalar(0));
+ Packet pamed = ei_pset1(Scalar(0));
+ Packet pabig = ei_pset1(Scalar(0));
+ Packet ps2m = ei_pset1(s2m);
+ Packet ps1m = ei_pset1(s1m);
+ Packet pb2 = ei_pset1(b2);
+ Packet pb1 = ei_pset1(b1);
+ for(int j=0; j<v.size(); j+=ps)
+ {
+ Packet ax = ei_pabs(v.template packet<Aligned>(j));
+ Packet ax_s2m = ei_pmul(ax,ps2m);
+ Packet ax_s1m = ei_pmul(ax,ps1m);
+ Packet maskBig = ei_plt(pb2,ax);
+ Packet maskSml = ei_plt(ax,pb1);
+
+// Packet maskMed = ei_pand(maskSml,maskBig);
+// Packet scale = ei_pset1(Scalar(0));
+// scale = ei_por(scale, ei_pand(maskBig,ps2m));
+// scale = ei_por(scale, ei_pand(maskSml,ps1m));
+// scale = ei_por(scale, ei_pandnot(ei_pset1(Scalar(1)),maskMed));
+// ax = ei_pmul(ax,scale);
+// ax = ei_pmul(ax,ax);
+// pabig = ei_padd(pabig, ei_pand(maskBig, ax));
+// pasml = ei_padd(pasml, ei_pand(maskSml, ax));
+// pamed = ei_padd(pamed, ei_pandnot(ax,maskMed));
+
+
+ pabig = ei_padd(pabig, ei_pand(maskBig, ei_pmul(ax_s2m,ax_s2m)));
+ pasml = ei_padd(pasml, ei_pand(maskSml, ei_pmul(ax_s1m,ax_s1m)));
+ pamed = ei_padd(pamed, ei_pandnot(ei_pmul(ax,ax),ei_pand(maskSml,maskBig)));
+ }
+ Scalar abig = ei_predux(pabig);
+ Scalar asml = ei_predux(pasml);
+ Scalar amed = ei_predux(pamed);
+ if(abig > Scalar(0))
+ {
+ abig = ei_sqrt(abig);
+ if(abig > overfl)
+ {
+ ei_assert(false && "overflow");
+ return rbig;
+ }
+ if(amed > Scalar(0))
+ {
+ abig = abig/s2m;
+ amed = ei_sqrt(amed);
+ }
+ else
+ {
+ return abig/s2m;
+ }
+
+ }
+ else if(asml > Scalar(0))
+ {
+ if (amed > Scalar(0))
+ {
+ abig = ei_sqrt(amed);
+ amed = ei_sqrt(asml) / s1m;
+ }
+ else
+ {
+ return ei_sqrt(asml)/s1m;
+ }
+ }
+ else
+ {
+ return ei_sqrt(amed);
+ }
+ asml = std::min(abig, amed);
+ abig = std::max(abig, amed);
+ if(asml <= abig*relerr)
+ return abig;
+ else
+ return abig * ei_sqrt(Scalar(1) + ei_abs2(asml/abig));
+ #endif
+}
+
+#define BENCH_PERF(NRM) { \
+ Eigen::BenchTimer tf, td, tcf; tf.reset(); td.reset(); tcf.reset();\
+ for (int k=0; k<tries; ++k) { \
+ tf.start(); \
+ for (int i=0; i<iters; ++i) NRM(vf); \
+ tf.stop(); \
+ } \
+ for (int k=0; k<tries; ++k) { \
+ td.start(); \
+ for (int i=0; i<iters; ++i) NRM(vd); \
+ td.stop(); \
+ } \
+ for (int k=0; k<std::max(1,tries/3); ++k) { \
+ tcf.start(); \
+ for (int i=0; i<iters; ++i) NRM(vcf); \
+ tcf.stop(); \
+ } \
+ std::cout << #NRM << "\t" << tf.value() << " " << td.value() << " " << tcf.value() << "\n"; \
+}
+
+void check_accuracy(double basef, double based, int s)
+{
+ double yf = basef * ei_abs(ei_random<double>());
+ double yd = based * ei_abs(ei_random<double>());
+ VectorXf vf = VectorXf::Ones(s) * yf;
+ VectorXd vd = VectorXd::Ones(s) * yd;
+
+ std::cout << "reference\t" << ei_sqrt(double(s))*yf << "\t" << ei_sqrt(double(s))*yd << "\n";
+ std::cout << "sqsumNorm\t" << sqsumNorm(vf) << "\t" << sqsumNorm(vd) << "\n";
+ std::cout << "hypotNorm\t" << hypotNorm(vf) << "\t" << hypotNorm(vd) << "\n";
+ std::cout << "blueNorm\t" << blueNorm(vf) << "\t" << blueNorm(vd) << "\n";
+ std::cout << "pblueNorm\t" << pblueNorm(vf) << "\t" << pblueNorm(vd) << "\n";
+ std::cout << "lapackNorm\t" << lapackNorm(vf) << "\t" << lapackNorm(vd) << "\n";
+ std::cout << "twopassNorm\t" << twopassNorm(vf) << "\t" << twopassNorm(vd) << "\n";
+ std::cout << "bl2passNorm\t" << bl2passNorm(vf) << "\t" << bl2passNorm(vd) << "\n";
+}
+
+void check_accuracy_var(int ef0, int ef1, int ed0, int ed1, int s)
+{
+ VectorXf vf(s);
+ VectorXd vd(s);
+ for (int i=0; i<s; ++i)
+ {
+ vf[i] = ei_abs(ei_random<double>()) * std::pow(double(10), ei_random<int>(ef0,ef1));
+ vd[i] = ei_abs(ei_random<double>()) * std::pow(double(10), ei_random<int>(ed0,ed1));
+ }
+
+ //std::cout << "reference\t" << ei_sqrt(double(s))*yf << "\t" << ei_sqrt(double(s))*yd << "\n";
+ std::cout << "sqsumNorm\t" << sqsumNorm(vf) << "\t" << sqsumNorm(vd) << "\t" << sqsumNorm(vf.cast<long double>()) << "\t" << sqsumNorm(vd.cast<long double>()) << "\n";
+ std::cout << "hypotNorm\t" << hypotNorm(vf) << "\t" << hypotNorm(vd) << "\t" << hypotNorm(vf.cast<long double>()) << "\t" << hypotNorm(vd.cast<long double>()) << "\n";
+ std::cout << "blueNorm\t" << blueNorm(vf) << "\t" << blueNorm(vd) << "\t" << blueNorm(vf.cast<long double>()) << "\t" << blueNorm(vd.cast<long double>()) << "\n";
+ std::cout << "pblueNorm\t" << pblueNorm(vf) << "\t" << pblueNorm(vd) << "\t" << blueNorm(vf.cast<long double>()) << "\t" << blueNorm(vd.cast<long double>()) << "\n";
+ std::cout << "lapackNorm\t" << lapackNorm(vf) << "\t" << lapackNorm(vd) << "\t" << lapackNorm(vf.cast<long double>()) << "\t" << lapackNorm(vd.cast<long double>()) << "\n";
+ std::cout << "twopassNorm\t" << twopassNorm(vf) << "\t" << twopassNorm(vd) << "\t" << twopassNorm(vf.cast<long double>()) << "\t" << twopassNorm(vd.cast<long double>()) << "\n";
+// std::cout << "bl2passNorm\t" << bl2passNorm(vf) << "\t" << bl2passNorm(vd) << "\t" << bl2passNorm(vf.cast<long double>()) << "\t" << bl2passNorm(vd.cast<long double>()) << "\n";
+}
+
+int main(int argc, char** argv)
+{
+ int tries = 10;
+ int iters = 100000;
+ double y = 1.1345743233455785456788e12 * ei_random<double>();
+ VectorXf v = VectorXf::Ones(1024) * y;
+
+// return 0;
+ int s = 10000;
+ double basef_ok = 1.1345743233455785456788e15;
+ double based_ok = 1.1345743233455785456788e95;
+
+ double basef_under = 1.1345743233455785456788e-27;
+ double based_under = 1.1345743233455785456788e-303;
+
+ double basef_over = 1.1345743233455785456788e+27;
+ double based_over = 1.1345743233455785456788e+302;
+
+ std::cout.precision(20);
+
+ std::cerr << "\nNo under/overflow:\n";
+ check_accuracy(basef_ok, based_ok, s);
+
+ std::cerr << "\nUnderflow:\n";
+ check_accuracy(basef_under, based_under, s);
+
+ std::cerr << "\nOverflow:\n";
+ check_accuracy(basef_over, based_over, s);
+
+ std::cerr << "\nVarying (over):\n";
+ for (int k=0; k<1; ++k)
+ {
+ check_accuracy_var(20,27,190,302,s);
+ std::cout << "\n";
+ }
+
+ std::cerr << "\nVarying (under):\n";
+ for (int k=0; k<1; ++k)
+ {
+ check_accuracy_var(-27,20,-302,-190,s);
+ std::cout << "\n";
+ }
+
+ std::cout.precision(4);
+ std::cerr << "Performance (out of cache):\n";
+ {
+ int iters = 1;
+ VectorXf vf = VectorXf::Random(1024*1024*32) * y;
+ VectorXd vd = VectorXd::Random(1024*1024*32) * y;
+ VectorXcf vcf = VectorXcf::Random(1024*1024*32) * y;
+ BENCH_PERF(sqsumNorm);
+ BENCH_PERF(blueNorm);
+// BENCH_PERF(pblueNorm);
+// BENCH_PERF(lapackNorm);
+// BENCH_PERF(hypotNorm);
+// BENCH_PERF(twopassNorm);
+ BENCH_PERF(bl2passNorm);
+ }
+
+ std::cerr << "\nPerformance (in cache):\n";
+ {
+ int iters = 100000;
+ VectorXf vf = VectorXf::Random(512) * y;
+ VectorXd vd = VectorXd::Random(512) * y;
+ VectorXcf vcf = VectorXcf::Random(512) * y;
+ BENCH_PERF(sqsumNorm);
+ BENCH_PERF(blueNorm);
+// BENCH_PERF(pblueNorm);
+// BENCH_PERF(lapackNorm);
+// BENCH_PERF(hypotNorm);
+// BENCH_PERF(twopassNorm);
+ BENCH_PERF(bl2passNorm);
+ }
+}
diff --git a/test/adjoint.cpp b/test/adjoint.cpp
index 1f4aa7427..47e1bd740 100644
--- a/test/adjoint.cpp
+++ b/test/adjoint.cpp
@@ -76,8 +76,8 @@ template<typename MatrixType> void adjoint(const MatrixType& m)
{
VERIFY_IS_MUCH_SMALLER_THAN(vzero.norm(), static_cast<RealScalar>(1));
VERIFY_IS_APPROX(v1.norm(), v1.stableNorm());
- // NOTE disabled because it currently compiles for float and double only
- // VERIFY_IS_APPROX(v1.blueNorm(), v1.stableNorm());
+ VERIFY_IS_APPROX(v1.blueNorm(), v1.stableNorm());
+ VERIFY_IS_APPROX(v1.hypotNorm(), v1.stableNorm());
}
// check compatibility of dot and adjoint
diff --git a/test/mixingtypes.cpp b/test/mixingtypes.cpp
index ec8a0f190..d14232bd4 100644
--- a/test/mixingtypes.cpp
+++ b/test/mixingtypes.cpp
@@ -82,7 +82,7 @@ template<int SizeAtCompileType> void mixingtypes(int size = SizeAtCompileType)
void test_mixingtypes()
{
// check that our operator new is indeed called:
- CALL_SUBTEST(mixingtypes<3>());
- CALL_SUBTEST(mixingtypes<4>());
+ CALL_SUBTEST(mixingtypes<3>(3));
+ CALL_SUBTEST(mixingtypes<4>(4));
CALL_SUBTEST(mixingtypes<Dynamic>(20));
}
diff --git a/test/product_large.cpp b/test/product_large.cpp
index 77ae7b587..9b53e7b89 100644
--- a/test/product_large.cpp
+++ b/test/product_large.cpp
@@ -42,4 +42,10 @@ void test_product_large()
m = (v+v).asDiagonal() * m;
VERIFY_IS_APPROX(m, MatrixXf::Constant(N,3,2));
}
+
+ {
+ // test deferred resizing in Matrix::operator=
+ MatrixXf a = MatrixXf::Random(10,4), b = MatrixXf::Random(4,10), c = a;
+ VERIFY_IS_APPROX((a = a * b), (c * b).eval());
+ }
}