aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported/Eigen/src/NonLinearOptimization
diff options
context:
space:
mode:
authorGravatar Benoit Jacob <jacob.benoit.1@gmail.com>2010-05-30 16:00:58 -0400
committerGravatar Benoit Jacob <jacob.benoit.1@gmail.com>2010-05-30 16:00:58 -0400
commitaaaade4b3d66d67d2c08af3372c3965e7255b2e8 (patch)
tree76dfaefb014333b2f98c6db660454771655ea8b7 /unsupported/Eigen/src/NonLinearOptimization
parentfaa3ff3be6a02b57c6cb05edc87375e54ab96606 (diff)
the Index types change.
As discussed on the list (too long to explain here).
Diffstat (limited to 'unsupported/Eigen/src/NonLinearOptimization')
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h30
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h38
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/chkder.h10
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/covar.h8
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/dogleg.h6
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/fdjac1.h16
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/lmpar.h24
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/qrsolv.h8
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h12
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/r1updt.h8
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/rwupdt.h8
11 files changed, 96 insertions, 72 deletions
diff --git a/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h b/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h
index d75b1407c..aba31b238 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h
@@ -56,6 +56,8 @@ template<typename FunctorType, typename Scalar=double>
class HybridNonLinearSolver
{
public:
+ typedef DenseIndex Index;
+
HybridNonLinearSolver(FunctorType &_functor)
: functor(_functor) { nfev=njev=iter = 0; fnorm= 0.; useExternalScaling=false;}
@@ -68,10 +70,10 @@ public:
, nb_of_superdiagonals(-1)
, epsfcn(Scalar(0.)) {}
Scalar factor;
- int maxfev; // maximum number of function evaluation
+ Index maxfev; // maximum number of function evaluation
Scalar xtol;
- int nb_of_subdiagonals;
- int nb_of_superdiagonals;
+ Index nb_of_subdiagonals;
+ Index nb_of_superdiagonals;
Scalar epsfcn;
};
typedef Matrix< Scalar, Dynamic, 1 > FVectorType;
@@ -102,24 +104,24 @@ public:
FVectorType fvec, qtf, diag;
JacobianType fjac;
UpperTriangularType R;
- int nfev;
- int njev;
- int iter;
+ Index nfev;
+ Index njev;
+ Index iter;
Scalar fnorm;
bool useExternalScaling;
private:
FunctorType &functor;
- int n;
+ Index n;
Scalar sum;
bool sing;
Scalar temp;
Scalar delta;
bool jeval;
- int ncsuc;
+ Index ncsuc;
Scalar ratio;
Scalar pnorm, xnorm, fnorm1;
- int nslow1, nslow2;
- int ncfail;
+ Index nslow1, nslow2;
+ Index ncfail;
Scalar actred, prered;
FVectorType wa1, wa2, wa3, wa4;
};
@@ -169,7 +171,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveInit(FVectorType &x)
if (n <= 0 || parameters.xtol < 0. || parameters.maxfev <= 0 || parameters.factor <= 0. )
return HybridNonLinearSolverSpace::ImproperInputParameters;
if (useExternalScaling)
- for (int j = 0; j < n; ++j)
+ for (Index j = 0; j < n; ++j)
if (diag[j] <= 0.)
return HybridNonLinearSolverSpace::ImproperInputParameters;
@@ -196,7 +198,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(FVectorType &x)
{
assert(x.size()==n); // check the caller is not cheating us
- int j;
+ Index j;
std::vector<PlanarRotation<Scalar> > v_givens(n), w_givens(n);
jeval = true;
@@ -408,7 +410,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffInit(FVectorType &
if (n <= 0 || parameters.xtol < 0. || parameters.maxfev <= 0 || parameters.nb_of_subdiagonals< 0 || parameters.nb_of_superdiagonals< 0 || parameters.factor <= 0. )
return HybridNonLinearSolverSpace::ImproperInputParameters;
if (useExternalScaling)
- for (int j = 0; j < n; ++j)
+ for (Index j = 0; j < n; ++j)
if (diag[j] <= 0.)
return HybridNonLinearSolverSpace::ImproperInputParameters;
@@ -435,7 +437,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(FVectorType
{
assert(x.size()==n); // check the caller is not cheating us
- int j;
+ Index j;
std::vector<PlanarRotation<Scalar> > v_givens(n), w_givens(n);
jeval = true;
diff --git a/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h b/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h
index 3d5b6ea70..63eb66738 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h
@@ -63,6 +63,8 @@ public:
LevenbergMarquardt(FunctorType &_functor)
: functor(_functor) { nfev = njev = iter = 0; fnorm = gnorm = 0.; useExternalScaling=false; }
+ typedef DenseIndex Index;
+
struct Parameters {
Parameters()
: factor(Scalar(100.))
@@ -72,7 +74,7 @@ public:
, gtol(Scalar(0.))
, epsfcn(Scalar(0.)) {}
Scalar factor;
- int maxfev; // maximum number of function evaluation
+ Index maxfev; // maximum number of function evaluation
Scalar ftol;
Scalar xtol;
Scalar gtol;
@@ -94,7 +96,7 @@ public:
static LevenbergMarquardtSpace::Status lmdif1(
FunctorType &functor,
FVectorType &x,
- int *nfev,
+ Index *nfev,
const Scalar tol = ei_sqrt(NumTraits<Scalar>::epsilon())
);
@@ -113,17 +115,17 @@ public:
FVectorType fvec, qtf, diag;
JacobianType fjac;
PermutationMatrix<Dynamic,Dynamic> permutation;
- int nfev;
- int njev;
- int iter;
+ Index nfev;
+ Index njev;
+ Index iter;
Scalar fnorm, gnorm;
bool useExternalScaling;
Scalar lm_param(void) { return par; }
private:
FunctorType &functor;
- int n;
- int m;
+ Index n;
+ Index m;
FVectorType wa1, wa2, wa3, wa4;
Scalar par, sum;
@@ -194,7 +196,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeInit(FVectorType &x)
return LevenbergMarquardtSpace::ImproperInputParameters;
if (useExternalScaling)
- for (int j = 0; j < n; ++j)
+ for (Index j = 0; j < n; ++j)
if (diag[j] <= 0.)
return LevenbergMarquardtSpace::ImproperInputParameters;
@@ -219,7 +221,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(FVectorType &x)
assert(x.size()==n); // check the caller is not cheating us
/* calculate the jacobian matrix. */
- int df_ret = functor.df(x, fjac);
+ Index df_ret = functor.df(x, fjac);
if (df_ret<0)
return LevenbergMarquardtSpace::UserAsked;
if (df_ret>0)
@@ -237,7 +239,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(FVectorType &x)
/* to the norms of the columns of the initial jacobian. */
if (iter == 1) {
if (!useExternalScaling)
- for (int j = 0; j < n; ++j)
+ for (Index j = 0; j < n; ++j)
diag[j] = (wa2[j]==0.)? 1. : wa2[j];
/* on the first iteration, calculate the norm of the scaled x */
@@ -257,7 +259,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(FVectorType &x)
/* compute the norm of the scaled gradient. */
gnorm = 0.;
if (fnorm != 0.)
- for (int j = 0; j < n; ++j)
+ for (Index j = 0; j < n; ++j)
if (wa2[permutation.indices()[j]] != 0.)
gnorm = std::max(gnorm, ei_abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]]));
@@ -410,7 +412,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageInit(FVectorType
return LevenbergMarquardtSpace::ImproperInputParameters;
if (useExternalScaling)
- for (int j = 0; j < n; ++j)
+ for (Index j = 0; j < n; ++j)
if (diag[j] <= 0.)
return LevenbergMarquardtSpace::ImproperInputParameters;
@@ -435,7 +437,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(FVectorTyp
{
assert(x.size()==n); // check the caller is not cheating us
- int i, j;
+ Index i, j;
bool sing;
/* compute the qr factorization of the jacobian matrix */
@@ -444,7 +446,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(FVectorTyp
/* n components in qtf. */
qtf.fill(0.);
fjac.fill(0.);
- int rownb = 2;
+ Index rownb = 2;
for (i = 0; i < m; ++i) {
if (functor.df(x, wa3, rownb) < 0) return LevenbergMarquardtSpace::UserAsked;
ei_rwupdt<Scalar>(fjac, wa3, qtf, fvec[i]);
@@ -471,7 +473,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(FVectorTyp
fjac.diagonal() = qrfac.hCoeffs();
permutation = qrfac.colsPermutation();
// TODO : avoid this:
- for(int ii=0; ii< fjac.cols(); ii++) fjac.col(ii).segment(ii+1, fjac.rows()-ii-1) *= fjac(ii,ii); // rescale vectors
+ for(Index ii=0; ii< fjac.cols(); ii++) fjac.col(ii).segment(ii+1, fjac.rows()-ii-1) *= fjac(ii,ii); // rescale vectors
for (j = 0; j < n; ++j) {
if (fjac(j,j) != 0.) {
@@ -623,12 +625,12 @@ LevenbergMarquardtSpace::Status
LevenbergMarquardt<FunctorType,Scalar>::lmdif1(
FunctorType &functor,
FVectorType &x,
- int *nfev,
+ Index *nfev,
const Scalar tol
)
{
- int n = x.size();
- int m = functor.values();
+ Index n = x.size();
+ Index m = functor.values();
/* check the input parameters for errors. */
if (n <= 0 || m < n || tol < 0.)
diff --git a/unsupported/Eigen/src/NonLinearOptimization/chkder.h b/unsupported/Eigen/src/NonLinearOptimization/chkder.h
index 591e8bef7..4cb4fbdef 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/chkder.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/chkder.h
@@ -13,17 +13,19 @@ void ei_chkder(
Matrix< Scalar, Dynamic, 1 > &err
)
{
+ typedef DenseIndex Index;
+
const Scalar eps = ei_sqrt(NumTraits<Scalar>::epsilon());
const Scalar epsf = chkder_factor * NumTraits<Scalar>::epsilon();
const Scalar epslog = chkder_log10e * ei_log(eps);
Scalar temp;
- const int m = fvec.size(), n = x.size();
+ const Index m = fvec.size(), n = x.size();
if (mode != 2) {
/* mode = 1. */
xp.resize(n);
- for (int j = 0; j < n; ++j) {
+ for (Index j = 0; j < n; ++j) {
temp = eps * ei_abs(x[j]);
if (temp == 0.)
temp = eps;
@@ -33,13 +35,13 @@ void ei_chkder(
else {
/* mode = 2. */
err.setZero(m);
- for (int j = 0; j < n; ++j) {
+ for (Index j = 0; j < n; ++j) {
temp = ei_abs(x[j]);
if (temp == 0.)
temp = 1.;
err += temp * fjac.col(j);
}
- for (int i = 0; i < m; ++i) {
+ for (Index i = 0; i < m; ++i) {
temp = 1.;
if (fvec[i] != 0. && fvecp[i] != 0. && ei_abs(fvecp[i] - fvec[i]) >= epsf * ei_abs(fvec[i]))
temp = eps * ei_abs((fvecp[i] - fvec[i]) / eps - err[i]) / (ei_abs(fvec[i]) + ei_abs(fvecp[i]));
diff --git a/unsupported/Eigen/src/NonLinearOptimization/covar.h b/unsupported/Eigen/src/NonLinearOptimization/covar.h
index 7cfaa22d4..104898a30 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/covar.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/covar.h
@@ -5,13 +5,15 @@ void ei_covar(
const VectorXi &ipvt,
Scalar tol = ei_sqrt(NumTraits<Scalar>::epsilon()) )
{
+ typedef DenseIndex Index;
+
/* Local variables */
- int i, j, k, l, ii, jj;
- int sing;
+ Index i, j, k, l, ii, jj;
+ bool sing;
Scalar temp;
/* Function Body */
- const int n = r.cols();
+ const Index n = r.cols();
const Scalar tolr = tol * ei_abs(r(0,0));
Matrix< Scalar, Dynamic, 1 > wa(n);
assert(ipvt.size()==n);
diff --git a/unsupported/Eigen/src/NonLinearOptimization/dogleg.h b/unsupported/Eigen/src/NonLinearOptimization/dogleg.h
index 9c1d38dea..ab01d5c47 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/dogleg.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/dogleg.h
@@ -7,15 +7,17 @@ void ei_dogleg(
Scalar delta,
Matrix< Scalar, Dynamic, 1 > &x)
{
+ typedef DenseIndex Index;
+
/* Local variables */
- int i, j;
+ Index i, j;
Scalar sum, temp, alpha, bnorm;
Scalar gnorm, qnorm;
Scalar sgnorm;
/* Function Body */
const Scalar epsmch = NumTraits<Scalar>::epsilon();
- const int n = qrfac.cols();
+ const Index n = qrfac.cols();
assert(n==qtb.size());
assert(n==x.size());
assert(n==diag.size());
diff --git a/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h b/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h
index 3dc1e8070..74cf53b90 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h
@@ -1,24 +1,26 @@
template<typename FunctorType, typename Scalar>
-int ei_fdjac1(
+DenseIndex ei_fdjac1(
const FunctorType &Functor,
Matrix< Scalar, Dynamic, 1 > &x,
Matrix< Scalar, Dynamic, 1 > &fvec,
Matrix< Scalar, Dynamic, Dynamic > &fjac,
- int ml, int mu,
+ DenseIndex ml, DenseIndex mu,
Scalar epsfcn)
{
+ typedef DenseIndex Index;
+
/* Local variables */
Scalar h;
- int j, k;
+ Index j, k;
Scalar eps, temp;
- int msum;
+ Index msum;
int iflag;
- int start, length;
+ Index start, length;
/* Function Body */
const Scalar epsmch = NumTraits<Scalar>::epsilon();
- const int n = x.size();
+ const Index n = x.size();
assert(fvec.size()==n);
Matrix< Scalar, Dynamic, 1 > wa1(n);
Matrix< Scalar, Dynamic, 1 > wa2(n);
@@ -57,7 +59,7 @@ int ei_fdjac1(
h = eps * ei_abs(wa2[j]);
if (h == 0.) h = eps;
fjac.col(j).setZero();
- start = std::max(0,j-mu);
+ start = std::max<Index>(0,j-mu);
length = std::min(n-1, j+ml) - start + 1;
fjac.col(j).segment(start, length) = ( wa1.segment(start, length)-fvec.segment(start, length))/h;
}
diff --git a/unsupported/Eigen/src/NonLinearOptimization/lmpar.h b/unsupported/Eigen/src/NonLinearOptimization/lmpar.h
index 850011912..27138de96 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/lmpar.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/lmpar.h
@@ -9,11 +9,13 @@ void ei_lmpar(
Scalar &par,
Matrix< Scalar, Dynamic, 1 > &x)
{
+ typedef DenseIndex Index;
+
/* Local variables */
- int i, j, l;
+ Index i, j, l;
Scalar fp;
Scalar parc, parl;
- int iter;
+ Index iter;
Scalar temp, paru;
Scalar gnorm;
Scalar dxnorm;
@@ -21,7 +23,7 @@ void ei_lmpar(
/* Function Body */
const Scalar dwarf = std::numeric_limits<Scalar>::min();
- const int n = r.cols();
+ const Index n = r.cols();
assert(n==diag.size());
assert(n==qtb.size());
assert(n==x.size());
@@ -30,7 +32,7 @@ void ei_lmpar(
/* compute and store in x the gauss-newton direction. if the */
/* jacobian is rank-deficient, obtain a least squares solution. */
- int nsing = n-1;
+ Index nsing = n-1;
wa1 = qtb;
for (j = 0; j < n; ++j) {
if (r(j,j) == 0. && nsing == n-1)
@@ -163,11 +165,13 @@ void ei_lmpar2(
Matrix< Scalar, Dynamic, 1 > &x)
{
+ typedef DenseIndex Index;
+
/* Local variables */
- int j;
+ Index j;
Scalar fp;
Scalar parc, parl;
- int iter;
+ Index iter;
Scalar temp, paru;
Scalar gnorm;
Scalar dxnorm;
@@ -175,7 +179,7 @@ void ei_lmpar2(
/* Function Body */
const Scalar dwarf = std::numeric_limits<Scalar>::min();
- const int n = qr.matrixQR().cols();
+ const Index n = qr.matrixQR().cols();
assert(n==diag.size());
assert(n==qtb.size());
@@ -184,8 +188,8 @@ void ei_lmpar2(
/* compute and store in x the gauss-newton direction. if the */
/* jacobian is rank-deficient, obtain a least squares solution. */
-// const int rank = qr.nonzeroPivots(); // exactly double(0.)
- const int rank = qr.rank(); // use a threshold
+// const Index rank = qr.nonzeroPivots(); // exactly double(0.)
+ const Index rank = qr.rank(); // use a threshold
wa1 = qtb;
wa1.tail(n-rank).setZero();
qr.matrixQR().topLeftCorner(rank, rank).template triangularView<Upper>().solveInPlace(wa1.head(rank));
@@ -262,7 +266,7 @@ void ei_lmpar2(
for (j = 0; j < n; ++j) {
wa1[j] /= sdiag[j];
temp = wa1[j];
- for (int i = j+1; i < n; ++i)
+ for (Index i = j+1; i < n; ++i)
wa1[i] -= s(i,j) * temp;
}
temp = wa1.blueNorm();
diff --git a/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h b/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h
index 205d934bd..bce8a4441 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h
@@ -11,10 +11,12 @@ void ei_qrsolv(
Matrix< Scalar, Dynamic, 1 > &sdiag)
{
+ typedef DenseIndex Index;
+
/* Local variables */
- int i, j, k, l;
+ Index i, j, k, l;
Scalar temp;
- int n = s.cols();
+ Index n = s.cols();
Matrix< Scalar, Dynamic, 1 > wa(n);
PlanarRotation<Scalar> givens;
@@ -67,7 +69,7 @@ void ei_qrsolv(
/* solve the triangular system for z. if the system is */
/* singular, then obtain a least squares solution. */
- int nsing;
+ Index nsing;
for (nsing=0; nsing<n && sdiag[nsing]!=0; nsing++);
wa.tail(n-nsing).setZero();
diff --git a/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h b/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h
index 855cb7a1b..ad319d9eb 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h
@@ -2,18 +2,20 @@
// TODO : move this to GivensQR once there's such a thing in Eigen
template <typename Scalar>
-void ei_r1mpyq(int m, int n, Scalar *a, const std::vector<PlanarRotation<Scalar> > &v_givens, const std::vector<PlanarRotation<Scalar> > &w_givens)
+void ei_r1mpyq(DenseIndex m, DenseIndex n, Scalar *a, const std::vector<PlanarRotation<Scalar> > &v_givens, const std::vector<PlanarRotation<Scalar> > &w_givens)
{
+ typedef DenseIndex Index;
+
/* apply the first set of givens rotations to a. */
- for (int j = n-2; j>=0; --j)
- for (int i = 0; i<m; ++i) {
+ for (Index j = n-2; j>=0; --j)
+ for (Index i = 0; i<m; ++i) {
Scalar temp = v_givens[j].c() * a[i+m*j] - v_givens[j].s() * a[i+m*(n-1)];
a[i+m*(n-1)] = v_givens[j].s() * a[i+m*j] + v_givens[j].c() * a[i+m*(n-1)];
a[i+m*j] = temp;
}
/* apply the second set of givens rotations to a. */
- for (int j = 0; j<n-1; ++j)
- for (int i = 0; i<m; ++i) {
+ for (Index j = 0; j<n-1; ++j)
+ for (Index i = 0; i<m; ++i) {
Scalar temp = w_givens[j].c() * a[i+m*j] + w_givens[j].s() * a[i+m*(n-1)];
a[i+m*(n-1)] = -w_givens[j].s() * a[i+m*j] + w_givens[j].c() * a[i+m*(n-1)];
a[i+m*j] = temp;
diff --git a/unsupported/Eigen/src/NonLinearOptimization/r1updt.h b/unsupported/Eigen/src/NonLinearOptimization/r1updt.h
index 3d8978837..e01d02910 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/r1updt.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/r1updt.h
@@ -9,10 +9,12 @@ void ei_r1updt(
Matrix< Scalar, Dynamic, 1> &w,
bool *sing)
{
+ typedef DenseIndex Index;
+
/* Local variables */
- const int m = s.rows();
- const int n = s.cols();
- int i, j=1;
+ const Index m = s.rows();
+ const Index n = s.cols();
+ Index i, j=1;
Scalar temp;
PlanarRotation<Scalar> givens;
diff --git a/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h b/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h
index b0bf72923..aa0bf7d0f 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h
@@ -6,7 +6,9 @@ void ei_rwupdt(
Matrix< Scalar, Dynamic, 1> &b,
Scalar alpha)
{
- const int n = r.cols();
+ typedef DenseIndex Index;
+
+ const Index n = r.cols();
assert(r.rows()>=n);
std::vector<PlanarRotation<Scalar> > givens(n);
@@ -14,12 +16,12 @@ void ei_rwupdt(
Scalar temp, rowj;
/* Function Body */
- for (int j = 0; j < n; ++j) {
+ for (Index j = 0; j < n; ++j) {
rowj = w[j];
/* apply the previous transformations to */
/* r(i,j), i=0,1,...,j-1, and to w(j). */
- for (int i = 0; i < j; ++i) {
+ for (Index i = 0; i < j; ++i) {
temp = givens[i].c() * r(i,j) + givens[i].s() * rowj;
rowj = -givens[i].s() * r(i,j) + givens[i].c() * rowj;
r(i,j) = temp;