aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported
diff options
context:
space:
mode:
authorGravatar Gael Guennebaud <g.gael@free.fr>2011-07-21 11:19:36 +0200
committerGravatar Gael Guennebaud <g.gael@free.fr>2011-07-21 11:19:36 +0200
commit22bff949c898e7c340a415d771145c0bd22317d6 (patch)
treebe0fc956a220db15d4d7622fc580777f563ea643 /unsupported
parentd4bd8bddb5e9f968ffcbdfff5936934e3d706684 (diff)
protect calls to min and max with parentheses to make Eigen compatible with default windows.h
Diffstat (limited to 'unsupported')
-rw-r--r--unsupported/Eigen/MPRealSupport2
-rw-r--r--unsupported/Eigen/src/BVH/BVAlgorithms.h8
-rw-r--r--unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h4
-rw-r--r--unsupported/Eigen/src/IterativeSolvers/IterationController.h2
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h4
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h14
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h12
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/dogleg.h2
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/fdjac1.h4
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/lmpar.h28
-rw-r--r--unsupported/Eigen/src/NumericalDiff/NumericalDiff.h2
-rw-r--r--unsupported/Eigen/src/Skyline/SkylineStorage.h10
-rw-r--r--unsupported/Eigen/src/SparseExtra/UmfPackSupport.h4
-rw-r--r--unsupported/test/BVH.cpp8
-rw-r--r--unsupported/test/FFTW.cpp2
15 files changed, 53 insertions, 53 deletions
diff --git a/unsupported/Eigen/MPRealSupport b/unsupported/Eigen/MPRealSupport
index 140dabecc..974137a4c 100644
--- a/unsupported/Eigen/MPRealSupport
+++ b/unsupported/Eigen/MPRealSupport
@@ -148,7 +148,7 @@ int main()
inline bool isApprox(const mpfr::mpreal& a, const mpfr::mpreal& b, const mpfr::mpreal& prec)
{
- return mpfr::abs(a - b) <= mpfr::min(mpfr::abs(a), mpfr::abs(b)) * prec;
+ return mpfr::abs(a - b) <= (mpfr::min)(mpfr::abs(a), mpfr::abs(b)) * prec;
}
inline bool isApproxOrLessThan(const mpfr::mpreal& a, const mpfr::mpreal& b, const mpfr::mpreal& prec)
diff --git a/unsupported/Eigen/src/BVH/BVAlgorithms.h b/unsupported/Eigen/src/BVH/BVAlgorithms.h
index 2412680e7..e6fdf4737 100644
--- a/unsupported/Eigen/src/BVH/BVAlgorithms.h
+++ b/unsupported/Eigen/src/BVH/BVAlgorithms.h
@@ -178,7 +178,7 @@ typename Minimizer::Scalar minimize_helper(const BVH &tree, Minimizer &minimizer
todo.pop();
for(; oBegin != oEnd; ++oBegin) //go through child objects
- minimum = std::min(minimum, minimizer.minimumOnObject(*oBegin));
+ minimum = (std::min)(minimum, minimizer.minimumOnObject(*oBegin));
for(; vBegin != vEnd; ++vBegin) { //go through child volumes
Scalar val = minimizer.minimumOnVolume(tree.getVolume(*vBegin));
@@ -274,12 +274,12 @@ typename Minimizer::Scalar BVMinimize(const BVH1 &tree1, const BVH2 &tree2, Mini
for(; oBegin1 != oEnd1; ++oBegin1) { //go through child objects of first tree
for(oCur2 = oBegin2; oCur2 != oEnd2; ++oCur2) {//go through child objects of second tree
- minimum = std::min(minimum, minimizer.minimumOnObjectObject(*oBegin1, *oCur2));
+ minimum = (std::min)(minimum, minimizer.minimumOnObjectObject(*oBegin1, *oCur2));
}
for(vCur2 = vBegin2; vCur2 != vEnd2; ++vCur2) { //go through child volumes of second tree
Helper2 helper(*oBegin1, minimizer);
- minimum = std::min(minimum, internal::minimize_helper(tree2, helper, *vCur2, minimum));
+ minimum = (std::min)(minimum, internal::minimize_helper(tree2, helper, *vCur2, minimum));
}
}
@@ -288,7 +288,7 @@ typename Minimizer::Scalar BVMinimize(const BVH1 &tree1, const BVH2 &tree2, Mini
for(oCur2 = oBegin2; oCur2 != oEnd2; ++oCur2) {//go through child objects of second tree
Helper1 helper(*oCur2, minimizer);
- minimum = std::min(minimum, internal::minimize_helper(tree1, helper, *vBegin1, minimum));
+ minimum = (std::min)(minimum, internal::minimize_helper(tree1, helper, *vBegin1, minimum));
}
for(vCur2 = vBegin2; vCur2 != vEnd2; ++vCur2) { //go through child volumes of second tree
diff --git a/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h b/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h
index f4851d316..4d8e183ee 100644
--- a/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h
+++ b/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h
@@ -172,7 +172,7 @@ void constrained_cg(const TMatrix& A, const CMatrix& C, VectorX& x,
if (iter.noiseLevel() > 0 && transition) std::cerr << "CCG: transition\n";
if (transition || iter.first()) gamma = 0.0;
- else gamma = std::max(0.0, (rho - old_z.dot(z)) / rho_1);
+ else gamma = (std::max)(0.0, (rho - old_z.dot(z)) / rho_1);
p = z + gamma*p;
++iter;
@@ -185,7 +185,7 @@ void constrained_cg(const TMatrix& A, const CMatrix& C, VectorX& x,
{
Scalar bb = C.row(i).dot(p) - f[i];
if (bb > 0.0)
- lambda = std::min(lambda, (f.coeff(i)-C.row(i).dot(x)) / bb);
+ lambda = (std::min)(lambda, (f.coeff(i)-C.row(i).dot(x)) / bb);
}
}
x += lambda * p;
diff --git a/unsupported/Eigen/src/IterativeSolvers/IterationController.h b/unsupported/Eigen/src/IterativeSolvers/IterationController.h
index 385d8c546..a65793cd5 100644
--- a/unsupported/Eigen/src/IterativeSolvers/IterationController.h
+++ b/unsupported/Eigen/src/IterativeSolvers/IterationController.h
@@ -141,7 +141,7 @@ class IterationController
bool converged(double nr)
{
m_res = internal::abs(nr);
- m_resminreach = std::min(m_resminreach, m_res);
+ m_resminreach = (std::min)(m_resminreach, m_res);
return converged();
}
template<typename VectorType> bool converged(const VectorType &v)
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h b/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h
index 87dc64ce1..d08766921 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h
@@ -127,10 +127,10 @@ bool MatrixFunctionAtomic<MatrixType>::taylorConverged(Index s, const MatrixType
for (Index r = 0; r < n; r++) {
RealScalar mx = 0;
for (Index i = 0; i < n; i++)
- mx = std::max(mx, std::abs(m_f(m_Ashifted(i, i) + m_avgEival, static_cast<int>(s+r))));
+ mx = (std::max)(mx, std::abs(m_f(m_Ashifted(i, i) + m_avgEival, static_cast<int>(s+r))));
if (r != 0)
rfactorial *= RealScalar(r);
- delta = std::max(delta, mx / rfactorial);
+ delta = (std::max)(delta, mx / rfactorial);
}
const RealScalar P_norm = P.cwiseAbs().rowwise().sum().maxCoeff();
if (m_mu * delta * P_norm < NumTraits<Scalar>::epsilon() * F_norm)
diff --git a/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h b/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h
index 5327d5901..f1fdf50b9 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h
@@ -255,7 +255,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(FVectorType &x)
/* on the first iteration, adjust the initial step bound. */
if (iter == 1)
- delta = std::min(delta,pnorm);
+ delta = (std::min)(delta,pnorm);
/* evaluate the function at x + p and calculate its norm. */
if ( functor(wa2, wa4) < 0)
@@ -289,7 +289,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(FVectorType &x)
ncfail = 0;
++ncsuc;
if (ratio >= Scalar(.5) || ncsuc > 1)
- delta = std::max(delta, pnorm / Scalar(.5));
+ delta = (std::max)(delta, pnorm / Scalar(.5));
if (internal::abs(ratio - 1.) <= Scalar(.1)) {
delta = pnorm / Scalar(.5);
}
@@ -322,7 +322,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(FVectorType &x)
/* tests for termination and stringent tolerances. */
if (nfev >= parameters.maxfev)
return HybridNonLinearSolverSpace::TooManyFunctionEvaluation;
- if (Scalar(.1) * std::max(Scalar(.1) * delta, pnorm) <= NumTraits<Scalar>::epsilon() * xnorm)
+ if (Scalar(.1) * (std::max)(Scalar(.1) * delta, pnorm) <= NumTraits<Scalar>::epsilon() * xnorm)
return HybridNonLinearSolverSpace::TolTooSmall;
if (nslow2 == 5)
return HybridNonLinearSolverSpace::NotMakingProgressJacobian;
@@ -449,7 +449,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(FVectorType
/* calculate the jacobian matrix. */
if (internal::fdjac1(functor, x, fvec, fjac, parameters.nb_of_subdiagonals, parameters.nb_of_superdiagonals, parameters.epsfcn) <0)
return HybridNonLinearSolverSpace::UserAsked;
- nfev += std::min(parameters.nb_of_subdiagonals+parameters.nb_of_superdiagonals+ 1, n);
+ nfev += (std::min)(parameters.nb_of_subdiagonals+parameters.nb_of_superdiagonals+ 1, n);
wa2 = fjac.colwise().blueNorm();
@@ -496,7 +496,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(FVectorType
/* on the first iteration, adjust the initial step bound. */
if (iter == 1)
- delta = std::min(delta,pnorm);
+ delta = (std::min)(delta,pnorm);
/* evaluate the function at x + p and calculate its norm. */
if ( functor(wa2, wa4) < 0)
@@ -530,7 +530,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(FVectorType
ncfail = 0;
++ncsuc;
if (ratio >= Scalar(.5) || ncsuc > 1)
- delta = std::max(delta, pnorm / Scalar(.5));
+ delta = (std::max)(delta, pnorm / Scalar(.5));
if (internal::abs(ratio - 1.) <= Scalar(.1)) {
delta = pnorm / Scalar(.5);
}
@@ -563,7 +563,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(FVectorType
/* tests for termination and stringent tolerances. */
if (nfev >= parameters.maxfev)
return HybridNonLinearSolverSpace::TooManyFunctionEvaluation;
- if (Scalar(.1) * std::max(Scalar(.1) * delta, pnorm) <= NumTraits<Scalar>::epsilon() * xnorm)
+ if (Scalar(.1) * (std::max)(Scalar(.1) * delta, pnorm) <= NumTraits<Scalar>::epsilon() * xnorm)
return HybridNonLinearSolverSpace::TolTooSmall;
if (nslow2 == 5)
return HybridNonLinearSolverSpace::NotMakingProgressJacobian;
diff --git a/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h b/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h
index c43df3f7a..0ae681b1c 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h
@@ -263,7 +263,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(FVectorType &x)
if (fnorm != 0.)
for (Index j = 0; j < n; ++j)
if (wa2[permutation.indices()[j]] != 0.)
- gnorm = std::max(gnorm, internal::abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]]));
+ gnorm = (std::max)(gnorm, internal::abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]]));
/* test for convergence of the gradient norm. */
if (gnorm <= parameters.gtol)
@@ -285,7 +285,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(FVectorType &x)
/* on the first iteration, adjust the initial step bound. */
if (iter == 1)
- delta = std::min(delta,pnorm);
+ delta = (std::min)(delta,pnorm);
/* evaluate the function at x + p and calculate its norm. */
if ( functor(wa2, wa4) < 0)
@@ -321,7 +321,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(FVectorType &x)
if (Scalar(.1) * fnorm1 >= fnorm || temp < Scalar(.1))
temp = Scalar(.1);
/* Computing MIN */
- delta = temp * std::min(delta, pnorm / Scalar(.1));
+ delta = temp * (std::min)(delta, pnorm / Scalar(.1));
par /= temp;
} else if (!(par != 0. && ratio < Scalar(.75))) {
delta = pnorm / Scalar(.5);
@@ -510,7 +510,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(FVectorTyp
if (fnorm != 0.)
for (j = 0; j < n; ++j)
if (wa2[permutation.indices()[j]] != 0.)
- gnorm = std::max(gnorm, internal::abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]]));
+ gnorm = (std::max)(gnorm, internal::abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]]));
/* test for convergence of the gradient norm. */
if (gnorm <= parameters.gtol)
@@ -532,7 +532,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(FVectorTyp
/* on the first iteration, adjust the initial step bound. */
if (iter == 1)
- delta = std::min(delta,pnorm);
+ delta = (std::min)(delta,pnorm);
/* evaluate the function at x + p and calculate its norm. */
if ( functor(wa2, wa4) < 0)
@@ -568,7 +568,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(FVectorTyp
if (Scalar(.1) * fnorm1 >= fnorm || temp < Scalar(.1))
temp = Scalar(.1);
/* Computing MIN */
- delta = temp * std::min(delta, pnorm / Scalar(.1));
+ delta = temp * (std::min)(delta, pnorm / Scalar(.1));
par /= temp;
} else if (!(par != 0. && ratio < Scalar(.75))) {
delta = pnorm / Scalar(.5);
diff --git a/unsupported/Eigen/src/NonLinearOptimization/dogleg.h b/unsupported/Eigen/src/NonLinearOptimization/dogleg.h
index fffd9e0be..cbdcf4b71 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/dogleg.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/dogleg.h
@@ -93,7 +93,7 @@ algo_end:
/* form appropriate convex combination of the gauss-newton */
/* direction and the scaled gradient direction. */
- temp = (1.-alpha) * std::min(sgnorm,delta);
+ temp = (1.-alpha) * (std::min)(sgnorm,delta);
x = temp * wa1 + alpha * x;
}
diff --git a/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h b/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h
index 887b76fa1..0a26c2061 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h
@@ -26,7 +26,7 @@ DenseIndex fdjac1(
Matrix< Scalar, Dynamic, 1 > wa1(n);
Matrix< Scalar, Dynamic, 1 > wa2(n);
- eps = sqrt(std::max(epsfcn,epsmch));
+ eps = sqrt((std::max)(epsfcn,epsmch));
msum = ml + mu + 1;
if (msum >= n) {
/* computation of dense approximate jacobian. */
@@ -61,7 +61,7 @@ DenseIndex fdjac1(
if (h == 0.) h = eps;
fjac.col(j).setZero();
start = std::max<Index>(0,j-mu);
- length = std::min(n-1, j+ml) - start + 1;
+ length = (std::min)(n-1, j+ml) - start + 1;
fjac.col(j).segment(start, length) = ( wa1.segment(start, length)-fvec.segment(start, length))/h;
}
}
diff --git a/unsupported/Eigen/src/NonLinearOptimization/lmpar.h b/unsupported/Eigen/src/NonLinearOptimization/lmpar.h
index a6bbc50ba..62f4aabc9 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/lmpar.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/lmpar.h
@@ -91,12 +91,12 @@ void lmpar(
gnorm = wa1.stableNorm();
paru = gnorm / delta;
if (paru == 0.)
- paru = dwarf / std::min(delta,Scalar(0.1));
+ paru = dwarf / (std::min)(delta,Scalar(0.1));
/* if the input par lies outside of the interval (parl,paru), */
/* set par to the closer endpoint. */
- par = std::max(par,parl);
- par = std::min(par,paru);
+ par = (std::max)(par,parl);
+ par = (std::min)(par,paru);
if (par == 0.)
par = gnorm / dxnorm;
@@ -106,7 +106,7 @@ void lmpar(
/* evaluate the function at the current value of par. */
if (par == 0.)
- par = std::max(dwarf,Scalar(.001) * paru); /* Computing MAX */
+ par = (std::max)(dwarf,Scalar(.001) * paru); /* Computing MAX */
wa1 = sqrt(par)* diag;
Matrix< Scalar, Dynamic, 1 > sdiag(n);
@@ -139,13 +139,13 @@ void lmpar(
/* depending on the sign of the function, update parl or paru. */
if (fp > 0.)
- parl = std::max(parl,par);
+ parl = (std::max)(parl,par);
if (fp < 0.)
- paru = std::min(paru,par);
+ paru = (std::min)(paru,par);
/* compute an improved estimate for par. */
/* Computing MAX */
- par = std::max(parl,par+parc);
+ par = (std::max)(parl,par+parc);
/* end of an iteration. */
}
@@ -227,12 +227,12 @@ void lmpar2(
gnorm = wa1.stableNorm();
paru = gnorm / delta;
if (paru == 0.)
- paru = dwarf / std::min(delta,Scalar(0.1));
+ paru = dwarf / (std::min)(delta,Scalar(0.1));
/* if the input par lies outside of the interval (parl,paru), */
/* set par to the closer endpoint. */
- par = std::max(par,parl);
- par = std::min(par,paru);
+ par = (std::max)(par,parl);
+ par = (std::min)(par,paru);
if (par == 0.)
par = gnorm / dxnorm;
@@ -243,7 +243,7 @@ void lmpar2(
/* evaluate the function at the current value of par. */
if (par == 0.)
- par = std::max(dwarf,Scalar(.001) * paru); /* Computing MAX */
+ par = (std::max)(dwarf,Scalar(.001) * paru); /* Computing MAX */
wa1 = sqrt(par)* diag;
Matrix< Scalar, Dynamic, 1 > sdiag(n);
@@ -275,12 +275,12 @@ void lmpar2(
/* depending on the sign of the function, update parl or paru. */
if (fp > 0.)
- parl = std::max(parl,par);
+ parl = (std::max)(parl,par);
if (fp < 0.)
- paru = std::min(paru,par);
+ paru = (std::min)(paru,par);
/* compute an improved estimate for par. */
- par = std::max(parl,par+parc);
+ par = (std::max)(parl,par+parc);
}
if (iter == 0)
par = 0.;
diff --git a/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h b/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h
index dbf27c481..52dc0ec01 100644
--- a/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h
+++ b/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h
@@ -80,7 +80,7 @@ public:
Scalar h;
int nfev=0;
const typename InputType::Index n = _x.size();
- const Scalar eps = internal::sqrt((std::max(epsfcn,NumTraits<Scalar>::epsilon() )));
+ const Scalar eps = internal::sqrt(((std::max)(epsfcn,NumTraits<Scalar>::epsilon() )));
ValueType val1, val2;
InputType x = _x;
// TODO : we should do this only if the size is not already known
diff --git a/unsupported/Eigen/src/Skyline/SkylineStorage.h b/unsupported/Eigen/src/Skyline/SkylineStorage.h
index 13f3e0ced..62806bfb6 100644
--- a/unsupported/Eigen/src/Skyline/SkylineStorage.h
+++ b/unsupported/Eigen/src/Skyline/SkylineStorage.h
@@ -221,11 +221,11 @@ protected:
Index* upperProfile = new Index[upperProfileSize];
Index* lowerProfile = new Index[lowerProfileSize];
- Index copyDiagSize = std::min(diagSize, m_diagSize);
- Index copyUpperSize = std::min(upperSize, m_upperSize);
- Index copyLowerSize = std::min(lowerSize, m_lowerSize);
- Index copyUpperProfileSize = std::min(upperProfileSize, m_upperProfileSize);
- Index copyLowerProfileSize = std::min(lowerProfileSize, m_lowerProfileSize);
+ Index copyDiagSize = (std::min)(diagSize, m_diagSize);
+ Index copyUpperSize = (std::min)(upperSize, m_upperSize);
+ Index copyLowerSize = (std::min)(lowerSize, m_lowerSize);
+ Index copyUpperProfileSize = (std::min)(upperProfileSize, m_upperProfileSize);
+ Index copyLowerProfileSize = (std::min)(lowerProfileSize, m_lowerProfileSize);
// copy
memcpy(diag, m_diag, copyDiagSize * sizeof (Scalar));
diff --git a/unsupported/Eigen/src/SparseExtra/UmfPackSupport.h b/unsupported/Eigen/src/SparseExtra/UmfPackSupport.h
index df6b3cdf9..beb18f6cd 100644
--- a/unsupported/Eigen/src/SparseExtra/UmfPackSupport.h
+++ b/unsupported/Eigen/src/SparseExtra/UmfPackSupport.h
@@ -295,10 +295,10 @@ void SparseLU<MatrixType,UmfPack>::extractData() const
umfpack_get_lunz(&lnz, &unz, &rows, &cols, &nz_udiag, m_numeric, Scalar());
// allocate data
- m_l.resize(rows,std::min(rows,cols));
+ m_l.resize(rows,(std::min)(rows,cols));
m_l.resizeNonZeros(lnz);
- m_u.resize(std::min(rows,cols),cols);
+ m_u.resize((std::min)(rows,cols),cols);
m_u.resizeNonZeros(unz);
m_p.resize(rows);
diff --git a/unsupported/test/BVH.cpp b/unsupported/test/BVH.cpp
index ba5871e66..d773afb77 100644
--- a/unsupported/test/BVH.cpp
+++ b/unsupported/test/BVH.cpp
@@ -143,10 +143,10 @@ struct TreeTest
VectorType pt = VectorType::Random();
BallPointStuff<Dim> i1(pt), i2(pt);
- double m1 = std::numeric_limits<double>::max(), m2 = m1;
+ double m1 = (std::numeric_limits<double>::max)(), m2 = m1;
for(int i = 0; i < (int)b.size(); ++i)
- m1 = std::min(m1, i1.minimumOnObject(b[i]));
+ m1 = (std::min)(m1, i1.minimumOnObject(b[i]));
m2 = BVMinimize(tree, i2);
@@ -194,11 +194,11 @@ struct TreeTest
BallPointStuff<Dim> i1, i2;
- double m1 = std::numeric_limits<double>::max(), m2 = m1;
+ double m1 = (std::numeric_limits<double>::max)(), m2 = m1;
for(int i = 0; i < (int)b.size(); ++i)
for(int j = 0; j < (int)v.size(); ++j)
- m1 = std::min(m1, i1.minimumOnObjectObject(b[i], v[j]));
+ m1 = (std::min)(m1, i1.minimumOnObjectObject(b[i], v[j]));
m2 = BVMinimize(tree, vTree, i2);
diff --git a/unsupported/test/FFTW.cpp b/unsupported/test/FFTW.cpp
index d3676005a..c535fd219 100644
--- a/unsupported/test/FFTW.cpp
+++ b/unsupported/test/FFTW.cpp
@@ -70,7 +70,7 @@ complex<long double> promote(long double x) { return complex<long double>( x);
{
long double totalpower=0;
long double difpower=0;
- size_t n = min( buf1.size(),buf2.size() );
+ size_t n = (min)( buf1.size(),buf2.size() );
for (size_t k=0;k<n;++k) {
totalpower += (norm( buf1[k] ) + norm(buf2[k]) )/2.;
difpower += norm(buf1[k] - buf2[k]);