aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--Eigen/src/Core/Dot.h138
-rw-r--r--Eigen/src/Core/MatrixBase.h5
-rw-r--r--Eigen/src/Core/NumTraits.h8
-rw-r--r--test/adjoint.cpp33
4 files changed, 170 insertions, 14 deletions
diff --git a/Eigen/src/Core/Dot.h b/Eigen/src/Core/Dot.h
index 3861984da..4f185ea5b 100644
--- a/Eigen/src/Core/Dot.h
+++ b/Eigen/src/Core/Dot.h
@@ -295,7 +295,7 @@ inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real MatrixBase<
/** \returns the \em l2 norm of \c *this using a numerically more stable
* algorithm.
*
- * \sa norm(), dot(), squaredNorm()
+ * \sa norm(), dot(), squaredNorm(), blueNorm()
*/
template<typename Derived>
inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real
@@ -304,6 +304,142 @@ MatrixBase<Derived>::stableNorm() const
return this->cwise().abs().redux(ei_scalar_hypot_op<RealScalar>());
}
+/** \internal Computes ibeta^iexp by binary expansion of iexp,
+ * exact if ibeta is the machine base */
+template<typename T> inline T bexp(int ibeta, int iexp)
+{
+ T tbeta = T(ibeta);
+ T res = 1.0;
+ int n = iexp;
+ if (n<0)
+ {
+ n = - n;
+ tbeta = 1.0/tbeta;
+ }
+ for(;;)
+ {
+ if ((n % 2)==0)
+ res = res * tbeta;
+ n = n/2;
+ if (n==0) return res;
+ tbeta = tbeta*tbeta;
+ }
+ return res;
+}
+
+/** \returns the \em l2 norm of \c *this using the Blue's algorithm.
+ * A Portable Fortran Program to Find the Euclidean Norm of a Vector,
+ * ACM TOMS, Vol 4, Issue 1, 1978.
+ *
+ * \sa norm(), dot(), squaredNorm(), stableNorm()
+ */
+template<typename Derived>
+inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real
+MatrixBase<Derived>::blueNorm() const
+{
+ static int nmax;
+ static Scalar b1, b2, s1m, s2m, overfl, rbig, relerr;
+ int n;
+ Scalar ax, abig, amed, asml;
+
+ if(nmax <= 0)
+ {
+ int nbig, ibeta, it, iemin, iemax, iexp;
+ Scalar abig, eps;
+ // This program calculates the machine-dependent constants
+ // bl, b2, slm, s2m, relerr overfl, nmax
+ // from the "basic" machine-dependent numbers
+ // nbig, ibeta, it, iemin, iemax, rbig.
+ // The following define the basic machine-dependent constants.
+ // For portability, the PORT subprograms "ilmaeh" and "rlmach"
+ // are used. For any specific computer, each of the assignment
+ // statements can be replaced
+ nbig = std::numeric_limits<int>::max(); // largest integer
+ ibeta = NumTraits<Scalar>::Base; // base for floating-point numbers
+ it = NumTraits<Scalar>::Mantissa; // number of base-beta digits in mantissa
+ iemin = std::numeric_limits<Scalar>::min_exponent; // minimum exponent
+ iemax = std::numeric_limits<Scalar>::max_exponent; // maximum exponent
+ rbig = std::numeric_limits<Scalar>::max(); // largest floating-point number
+
+ // Check the basic machine-dependent constants.
+ if(iemin > 1 - 2*it || 1+it>iemax || (it==2 && ibeta<5)
+ || (it<=4 && ibeta <= 3 ) || it<2)
+ {
+ ei_assert(false && "the algorithm cannot be guaranteed on this computer");
+ }
+ iexp = -((1-iemin)/2);
+ b1 = bexp<Scalar>(ibeta, iexp); // lower boundary of midrange
+ iexp = (iemax + 1 - it)/2;
+ b2 = bexp<Scalar>(ibeta,iexp); // upper boundary of midrange
+
+ iexp = (2-iemin)/2;
+ s1m = bexp<Scalar>(ibeta,iexp); // scaling factor for lower range
+ iexp = - ((iemax+it)/2);
+ s2m = bexp<Scalar>(ibeta,iexp); // scaling factor for upper range
+
+ overfl = rbig*s2m; // overfow boundary for abig
+ eps = bexp<Scalar>(ibeta, 1-it);
+ relerr = ei_sqrt(eps); // tolerance for neglecting asml
+ abig = 1.0/eps - 1.0;
+ if (Scalar(nbig)>abig) nmax = abig; // largest safe n
+ else nmax = nbig;
+ }
+ n = size();
+ if(n==0)
+ return 0;
+ asml = Scalar(0);
+ amed = Scalar(0);
+ abig = Scalar(0);
+ for(int j=0; j<n; ++j)
+ {
+ ax = ei_abs(coeff(j));
+ if(ax > b2) abig += ei_abs2(ax*s2m);
+ else if(ax < b2) asml += ei_abs2(ax*s1m);
+ else amed += ei_abs2(ax);
+ }
+ if(abig > Scalar(0))
+ {
+ abig = ei_sqrt(abig);
+ if(abig > overfl)
+ {
+ ei_assert(false && "overflow");
+ return rbig;
+ }
+ if(amed > Scalar(0))
+ {
+ abig = abig/s2m;
+ amed = ei_sqrt(amed);
+ }
+ else
+ {
+ return abig/s2m;
+ }
+
+ }
+ else if(asml > Scalar(0))
+ {
+ if (amed > Scalar(0))
+ {
+ abig = ei_sqrt(amed);
+ amed = ei_sqrt(asml) / s1m;
+ }
+ else
+ {
+ return ei_sqrt(asml)/s1m;
+ }
+ }
+ else
+ {
+ return ei_sqrt(amed);
+ }
+ asml = std::min(abig, amed);
+ abig = std::max(abig, amed);
+ if(asml <= abig*relerr)
+ return abig;
+ else
+ return abig * ei_sqrt(Scalar(1) + ei_abs2(asml/abig));
+}
+
/** \returns an expression of the quotient of *this by its own norm.
*
* \only_for_vectors
diff --git a/Eigen/src/Core/MatrixBase.h b/Eigen/src/Core/MatrixBase.h
index 2a49ae620..b8273ca22 100644
--- a/Eigen/src/Core/MatrixBase.h
+++ b/Eigen/src/Core/MatrixBase.h
@@ -381,8 +381,9 @@ template<typename Derived> class MatrixBase
template<typename OtherDerived>
Scalar dot(const MatrixBase<OtherDerived>& other) const;
RealScalar squaredNorm() const;
- RealScalar norm() const;
- RealScalar stableNorm() const;
+ RealScalar norm() const;
+ RealScalar stableNorm() const;
+ RealScalar blueNorm() const;
const PlainMatrixType normalized() const;
void normalize();
diff --git a/Eigen/src/Core/NumTraits.h b/Eigen/src/Core/NumTraits.h
index 24afe54c5..dec07a036 100644
--- a/Eigen/src/Core/NumTraits.h
+++ b/Eigen/src/Core/NumTraits.h
@@ -70,7 +70,9 @@ template<> struct NumTraits<float>
HasFloatingPoint = 1,
ReadCost = 1,
AddCost = 1,
- MulCost = 1
+ MulCost = 1,
+ Base = 2,
+ Mantissa = 23
};
};
@@ -83,7 +85,9 @@ template<> struct NumTraits<double>
HasFloatingPoint = 1,
ReadCost = 1,
AddCost = 1,
- MulCost = 1
+ MulCost = 1,
+ Base = 2,
+ Mantissa = 52
};
};
diff --git a/test/adjoint.cpp b/test/adjoint.cpp
index 965e4d256..14ee44a0f 100644
--- a/test/adjoint.cpp
+++ b/test/adjoint.cpp
@@ -76,6 +76,7 @@ template<typename MatrixType> void adjoint(const MatrixType& m)
{
VERIFY_IS_MUCH_SMALLER_THAN(vzero.norm(), static_cast<RealScalar>(1));
VERIFY_IS_APPROX(v1.norm(), v1.stableNorm());
+ VERIFY_IS_APPROX(v1.blueNorm(), v1.stableNorm());
}
// check compatibility of dot and adjoint
@@ -113,15 +114,29 @@ template<typename MatrixType> void adjoint(const MatrixType& m)
void test_adjoint()
{
- for(int i = 0; i < g_repeat; i++) {
- CALL_SUBTEST( adjoint(Matrix<float, 1, 1>()) );
- CALL_SUBTEST( adjoint(Matrix3d()) );
- CALL_SUBTEST( adjoint(Matrix4f()) );
- CALL_SUBTEST( adjoint(MatrixXcf(4, 4)) );
- CALL_SUBTEST( adjoint(MatrixXi(8, 12)) );
- CALL_SUBTEST( adjoint(MatrixXf(21, 21)) );
- }
+// for(int i = 0; i < g_repeat; i++) {
+// CALL_SUBTEST( adjoint(Matrix<float, 1, 1>()) );
+// CALL_SUBTEST( adjoint(Matrix3d()) );
+// CALL_SUBTEST( adjoint(Matrix4f()) );
+// CALL_SUBTEST( adjoint(MatrixXcf(4, 4)) );
+// CALL_SUBTEST( adjoint(MatrixXi(8, 12)) );
+// CALL_SUBTEST( adjoint(MatrixXf(21, 21)) );
+// }
// test a large matrix only once
- CALL_SUBTEST( adjoint(Matrix<float, 100, 100>()) );
+// CALL_SUBTEST( adjoint(Matrix<float, 100, 100>()) );
+ for(int i = 0; i < g_repeat; i++)
+ {
+ std::cerr.precision(20);
+ int s = 1000000;
+ double y = 1.131242353467546478463457843445677435233e23 * ei_abs(ei_random<double>());
+ VectorXf v = VectorXf::Ones(s) * y;
+// Vector4f x(v.segment(0,s/4).blueNorm(), v.segment(s/4+1,s/4).blueNorm(),
+// v.segment((s/2)+1,s/4).blueNorm(), v.segment(3*s/4+1,s - 3*s/4-1).blueNorm());
+// std::cerr << v.norm() << " == " << v.stableNorm() << " == " << v.blueNorm() << " == " << x.norm() << "\n";
+ std::cerr << v.norm() << "\n" << v.stableNorm() << "\n" << v.blueNorm() << "\n" << ei_sqrt(double(s)) * y << "\n\n\n";
+
+// VectorXd d = VectorXd::Ones(s) * y;//v.cast<double>();
+// std::cerr << d.norm() << "\n" << d.stableNorm() << "\n" << d.blueNorm() << "\n" << ei_sqrt(double(s)) * y << "\n\n\n";
+ }
}