aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported/Eigen/src/NonLinearOptimization
diff options
context:
space:
mode:
authorGravatar Gael Guennebaud <g.gael@free.fr>2011-07-21 11:19:36 +0200
committerGravatar Gael Guennebaud <g.gael@free.fr>2011-07-21 11:19:36 +0200
commit22bff949c898e7c340a415d771145c0bd22317d6 (patch)
treebe0fc956a220db15d4d7622fc580777f563ea643 /unsupported/Eigen/src/NonLinearOptimization
parentd4bd8bddb5e9f968ffcbdfff5936934e3d706684 (diff)
protect calls to min and max with parentheses to make Eigen compatible with default windows.h
Diffstat (limited to 'unsupported/Eigen/src/NonLinearOptimization')
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h14
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h12
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/dogleg.h2
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/fdjac1.h4
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/lmpar.h28
5 files changed, 30 insertions, 30 deletions
diff --git a/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h b/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h
index 5327d5901..f1fdf50b9 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h
@@ -255,7 +255,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(FVectorType &x)
/* on the first iteration, adjust the initial step bound. */
if (iter == 1)
- delta = std::min(delta,pnorm);
+ delta = (std::min)(delta,pnorm);
/* evaluate the function at x + p and calculate its norm. */
if ( functor(wa2, wa4) < 0)
@@ -289,7 +289,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(FVectorType &x)
ncfail = 0;
++ncsuc;
if (ratio >= Scalar(.5) || ncsuc > 1)
- delta = std::max(delta, pnorm / Scalar(.5));
+ delta = (std::max)(delta, pnorm / Scalar(.5));
if (internal::abs(ratio - 1.) <= Scalar(.1)) {
delta = pnorm / Scalar(.5);
}
@@ -322,7 +322,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(FVectorType &x)
/* tests for termination and stringent tolerances. */
if (nfev >= parameters.maxfev)
return HybridNonLinearSolverSpace::TooManyFunctionEvaluation;
- if (Scalar(.1) * std::max(Scalar(.1) * delta, pnorm) <= NumTraits<Scalar>::epsilon() * xnorm)
+ if (Scalar(.1) * (std::max)(Scalar(.1) * delta, pnorm) <= NumTraits<Scalar>::epsilon() * xnorm)
return HybridNonLinearSolverSpace::TolTooSmall;
if (nslow2 == 5)
return HybridNonLinearSolverSpace::NotMakingProgressJacobian;
@@ -449,7 +449,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(FVectorType
/* calculate the jacobian matrix. */
if (internal::fdjac1(functor, x, fvec, fjac, parameters.nb_of_subdiagonals, parameters.nb_of_superdiagonals, parameters.epsfcn) <0)
return HybridNonLinearSolverSpace::UserAsked;
- nfev += std::min(parameters.nb_of_subdiagonals+parameters.nb_of_superdiagonals+ 1, n);
+ nfev += (std::min)(parameters.nb_of_subdiagonals+parameters.nb_of_superdiagonals+ 1, n);
wa2 = fjac.colwise().blueNorm();
@@ -496,7 +496,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(FVectorType
/* on the first iteration, adjust the initial step bound. */
if (iter == 1)
- delta = std::min(delta,pnorm);
+ delta = (std::min)(delta,pnorm);
/* evaluate the function at x + p and calculate its norm. */
if ( functor(wa2, wa4) < 0)
@@ -530,7 +530,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(FVectorType
ncfail = 0;
++ncsuc;
if (ratio >= Scalar(.5) || ncsuc > 1)
- delta = std::max(delta, pnorm / Scalar(.5));
+ delta = (std::max)(delta, pnorm / Scalar(.5));
if (internal::abs(ratio - 1.) <= Scalar(.1)) {
delta = pnorm / Scalar(.5);
}
@@ -563,7 +563,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(FVectorType
/* tests for termination and stringent tolerances. */
if (nfev >= parameters.maxfev)
return HybridNonLinearSolverSpace::TooManyFunctionEvaluation;
- if (Scalar(.1) * std::max(Scalar(.1) * delta, pnorm) <= NumTraits<Scalar>::epsilon() * xnorm)
+ if (Scalar(.1) * (std::max)(Scalar(.1) * delta, pnorm) <= NumTraits<Scalar>::epsilon() * xnorm)
return HybridNonLinearSolverSpace::TolTooSmall;
if (nslow2 == 5)
return HybridNonLinearSolverSpace::NotMakingProgressJacobian;
diff --git a/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h b/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h
index c43df3f7a..0ae681b1c 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h
@@ -263,7 +263,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(FVectorType &x)
if (fnorm != 0.)
for (Index j = 0; j < n; ++j)
if (wa2[permutation.indices()[j]] != 0.)
- gnorm = std::max(gnorm, internal::abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]]));
+ gnorm = (std::max)(gnorm, internal::abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]]));
/* test for convergence of the gradient norm. */
if (gnorm <= parameters.gtol)
@@ -285,7 +285,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(FVectorType &x)
/* on the first iteration, adjust the initial step bound. */
if (iter == 1)
- delta = std::min(delta,pnorm);
+ delta = (std::min)(delta,pnorm);
/* evaluate the function at x + p and calculate its norm. */
if ( functor(wa2, wa4) < 0)
@@ -321,7 +321,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(FVectorType &x)
if (Scalar(.1) * fnorm1 >= fnorm || temp < Scalar(.1))
temp = Scalar(.1);
/* Computing MIN */
- delta = temp * std::min(delta, pnorm / Scalar(.1));
+ delta = temp * (std::min)(delta, pnorm / Scalar(.1));
par /= temp;
} else if (!(par != 0. && ratio < Scalar(.75))) {
delta = pnorm / Scalar(.5);
@@ -510,7 +510,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(FVectorTyp
if (fnorm != 0.)
for (j = 0; j < n; ++j)
if (wa2[permutation.indices()[j]] != 0.)
- gnorm = std::max(gnorm, internal::abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]]));
+ gnorm = (std::max)(gnorm, internal::abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]]));
/* test for convergence of the gradient norm. */
if (gnorm <= parameters.gtol)
@@ -532,7 +532,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(FVectorTyp
/* on the first iteration, adjust the initial step bound. */
if (iter == 1)
- delta = std::min(delta,pnorm);
+ delta = (std::min)(delta,pnorm);
/* evaluate the function at x + p and calculate its norm. */
if ( functor(wa2, wa4) < 0)
@@ -568,7 +568,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(FVectorTyp
if (Scalar(.1) * fnorm1 >= fnorm || temp < Scalar(.1))
temp = Scalar(.1);
/* Computing MIN */
- delta = temp * std::min(delta, pnorm / Scalar(.1));
+ delta = temp * (std::min)(delta, pnorm / Scalar(.1));
par /= temp;
} else if (!(par != 0. && ratio < Scalar(.75))) {
delta = pnorm / Scalar(.5);
diff --git a/unsupported/Eigen/src/NonLinearOptimization/dogleg.h b/unsupported/Eigen/src/NonLinearOptimization/dogleg.h
index fffd9e0be..cbdcf4b71 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/dogleg.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/dogleg.h
@@ -93,7 +93,7 @@ algo_end:
/* form appropriate convex combination of the gauss-newton */
/* direction and the scaled gradient direction. */
- temp = (1.-alpha) * std::min(sgnorm,delta);
+ temp = (1.-alpha) * (std::min)(sgnorm,delta);
x = temp * wa1 + alpha * x;
}
diff --git a/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h b/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h
index 887b76fa1..0a26c2061 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h
@@ -26,7 +26,7 @@ DenseIndex fdjac1(
Matrix< Scalar, Dynamic, 1 > wa1(n);
Matrix< Scalar, Dynamic, 1 > wa2(n);
- eps = sqrt(std::max(epsfcn,epsmch));
+ eps = sqrt((std::max)(epsfcn,epsmch));
msum = ml + mu + 1;
if (msum >= n) {
/* computation of dense approximate jacobian. */
@@ -61,7 +61,7 @@ DenseIndex fdjac1(
if (h == 0.) h = eps;
fjac.col(j).setZero();
start = std::max<Index>(0,j-mu);
- length = std::min(n-1, j+ml) - start + 1;
+ length = (std::min)(n-1, j+ml) - start + 1;
fjac.col(j).segment(start, length) = ( wa1.segment(start, length)-fvec.segment(start, length))/h;
}
}
diff --git a/unsupported/Eigen/src/NonLinearOptimization/lmpar.h b/unsupported/Eigen/src/NonLinearOptimization/lmpar.h
index a6bbc50ba..62f4aabc9 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/lmpar.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/lmpar.h
@@ -91,12 +91,12 @@ void lmpar(
gnorm = wa1.stableNorm();
paru = gnorm / delta;
if (paru == 0.)
- paru = dwarf / std::min(delta,Scalar(0.1));
+ paru = dwarf / (std::min)(delta,Scalar(0.1));
/* if the input par lies outside of the interval (parl,paru), */
/* set par to the closer endpoint. */
- par = std::max(par,parl);
- par = std::min(par,paru);
+ par = (std::max)(par,parl);
+ par = (std::min)(par,paru);
if (par == 0.)
par = gnorm / dxnorm;
@@ -106,7 +106,7 @@ void lmpar(
/* evaluate the function at the current value of par. */
if (par == 0.)
- par = std::max(dwarf,Scalar(.001) * paru); /* Computing MAX */
+ par = (std::max)(dwarf,Scalar(.001) * paru); /* Computing MAX */
wa1 = sqrt(par)* diag;
Matrix< Scalar, Dynamic, 1 > sdiag(n);
@@ -139,13 +139,13 @@ void lmpar(
/* depending on the sign of the function, update parl or paru. */
if (fp > 0.)
- parl = std::max(parl,par);
+ parl = (std::max)(parl,par);
if (fp < 0.)
- paru = std::min(paru,par);
+ paru = (std::min)(paru,par);
/* compute an improved estimate for par. */
/* Computing MAX */
- par = std::max(parl,par+parc);
+ par = (std::max)(parl,par+parc);
/* end of an iteration. */
}
@@ -227,12 +227,12 @@ void lmpar2(
gnorm = wa1.stableNorm();
paru = gnorm / delta;
if (paru == 0.)
- paru = dwarf / std::min(delta,Scalar(0.1));
+ paru = dwarf / (std::min)(delta,Scalar(0.1));
/* if the input par lies outside of the interval (parl,paru), */
/* set par to the closer endpoint. */
- par = std::max(par,parl);
- par = std::min(par,paru);
+ par = (std::max)(par,parl);
+ par = (std::min)(par,paru);
if (par == 0.)
par = gnorm / dxnorm;
@@ -243,7 +243,7 @@ void lmpar2(
/* evaluate the function at the current value of par. */
if (par == 0.)
- par = std::max(dwarf,Scalar(.001) * paru); /* Computing MAX */
+ par = (std::max)(dwarf,Scalar(.001) * paru); /* Computing MAX */
wa1 = sqrt(par)* diag;
Matrix< Scalar, Dynamic, 1 > sdiag(n);
@@ -275,12 +275,12 @@ void lmpar2(
/* depending on the sign of the function, update parl or paru. */
if (fp > 0.)
- parl = std::max(parl,par);
+ parl = (std::max)(parl,par);
if (fp < 0.)
- paru = std::min(paru,par);
+ paru = (std::min)(paru,par);
/* compute an improved estimate for par. */
- par = std::max(parl,par+parc);
+ par = (std::max)(parl,par+parc);
}
if (iter == 0)
par = 0.;