diff options
author | Jitse Niesen <jitse@maths.leeds.ac.uk> | 2013-02-02 22:04:42 +0000 |
---|---|---|
committer | Jitse Niesen <jitse@maths.leeds.ac.uk> | 2013-02-02 22:04:42 +0000 |
commit | 14e2ab02b58ba07e2be6b003bc178a0ec5e1015b (patch) | |
tree | dcddfd386dd5b428beaee6ef14803b4eb00de212 | |
parent | 35647b01334370d0ddfd5d2d3376d4200947b007 (diff) |
Replace assert() by eigen_assert() (fixes bug #548).
28 files changed, 61 insertions, 61 deletions
diff --git a/Eigen/src/Eigenvalues/ComplexEigenSolver.h b/Eigen/src/Eigenvalues/ComplexEigenSolver.h index 95c70aecb..238e08925 100644 --- a/Eigen/src/Eigenvalues/ComplexEigenSolver.h +++ b/Eigen/src/Eigenvalues/ComplexEigenSolver.h @@ -252,7 +252,7 @@ ComplexEigenSolver<MatrixType>& ComplexEigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors) { // this code is inspired from Jampack - assert(matrix.cols() == matrix.rows()); + eigen_assert(matrix.cols() == matrix.rows()); // Do a complex Schur decomposition, A = U T U^* // The eigenvalues are on the diagonal of T. diff --git a/Eigen/src/Eigenvalues/ComplexSchur_MKL.h b/Eigen/src/Eigenvalues/ComplexSchur_MKL.h index ada7a24e3..91496ae5b 100644 --- a/Eigen/src/Eigenvalues/ComplexSchur_MKL.h +++ b/Eigen/src/Eigenvalues/ComplexSchur_MKL.h @@ -49,7 +49,7 @@ ComplexSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(const Matri typedef MatrixType::RealScalar RealScalar; \ typedef std::complex<RealScalar> ComplexScalar; \ \ - assert(matrix.cols() == matrix.rows()); \ + eigen_assert(matrix.cols() == matrix.rows()); \ \ m_matUisUptodate = false; \ if(matrix.cols() == 1) \ diff --git a/Eigen/src/Eigenvalues/EigenSolver.h b/Eigen/src/Eigenvalues/EigenSolver.h index 43d0ffa76..a80f88eb0 100644 --- a/Eigen/src/Eigenvalues/EigenSolver.h +++ b/Eigen/src/Eigenvalues/EigenSolver.h @@ -366,7 +366,7 @@ EigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvect { using std::sqrt; using std::abs; - assert(matrix.cols() == matrix.rows()); + eigen_assert(matrix.cols() == matrix.rows()); // Reduce to real Schur form. m_realSchur.compute(matrix, computeEigenvectors); diff --git a/Eigen/src/Eigenvalues/HessenbergDecomposition.h b/Eigen/src/Eigenvalues/HessenbergDecomposition.h index b8378b08a..ebd8ae908 100644 --- a/Eigen/src/Eigenvalues/HessenbergDecomposition.h +++ b/Eigen/src/Eigenvalues/HessenbergDecomposition.h @@ -291,7 +291,7 @@ template<typename _MatrixType> class HessenbergDecomposition template<typename MatrixType> void HessenbergDecomposition<MatrixType>::_compute(MatrixType& matA, CoeffVectorType& hCoeffs, VectorType& temp) { - assert(matA.rows()==matA.cols()); + eigen_assert(matA.rows()==matA.cols()); Index n = matA.rows(); temp.resize(n); for (Index i = 0; i<n-1; ++i) diff --git a/Eigen/src/Eigenvalues/RealQZ.h b/Eigen/src/Eigenvalues/RealQZ.h index dcaa9fbd6..5706eeebe 100644 --- a/Eigen/src/Eigenvalues/RealQZ.h +++ b/Eigen/src/Eigenvalues/RealQZ.h @@ -559,7 +559,7 @@ namespace Eigen { const Index dim = A_in.cols(); - assert (A_in.rows()==dim && A_in.cols()==dim + eigen_assert (A_in.rows()==dim && A_in.cols()==dim && B_in.rows()==dim && B_in.cols()==dim && "Need square matrices of the same dimension"); diff --git a/Eigen/src/Eigenvalues/RealSchur.h b/Eigen/src/Eigenvalues/RealSchur.h index 7680f9929..a770b3187 100644 --- a/Eigen/src/Eigenvalues/RealSchur.h +++ b/Eigen/src/Eigenvalues/RealSchur.h @@ -245,7 +245,7 @@ template<typename _MatrixType> class RealSchur template<typename MatrixType> RealSchur<MatrixType>& RealSchur<MatrixType>::compute(const MatrixType& matrix, bool computeU) { - assert(matrix.cols() == matrix.rows()); + eigen_assert(matrix.cols() == matrix.rows()); Index maxIters = m_maxIters; if (maxIters == -1) maxIters = m_maxIterationsPerRow * matrix.rows(); @@ -467,8 +467,8 @@ inline void RealSchur<MatrixType>::initFrancisQRStep(Index il, Index iu, const V template<typename MatrixType> inline void RealSchur<MatrixType>::performFrancisQRStep(Index il, Index im, Index iu, bool computeU, const Vector3s& firstHouseholderVector, Scalar* workspace) { - assert(im >= il); - assert(im <= iu-2); + eigen_assert(im >= il); + eigen_assert(im <= iu-2); const Index size = m_matT.cols(); diff --git a/Eigen/src/Eigenvalues/RealSchur_MKL.h b/Eigen/src/Eigenvalues/RealSchur_MKL.h index 960ec3c76..ad9736460 100644 --- a/Eigen/src/Eigenvalues/RealSchur_MKL.h +++ b/Eigen/src/Eigenvalues/RealSchur_MKL.h @@ -48,7 +48,7 @@ RealSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(const Matrix<E typedef MatrixType::Scalar Scalar; \ typedef MatrixType::RealScalar RealScalar; \ \ - assert(matrix.cols() == matrix.rows()); \ + eigen_assert(matrix.cols() == matrix.rows()); \ \ lapack_int n = matrix.cols(), sdim, info; \ lapack_int lda = matrix.outerStride(); \ diff --git a/Eigen/src/LU/Determinant.h b/Eigen/src/LU/Determinant.h index d862c5d77..bb8e78a8a 100644 --- a/Eigen/src/LU/Determinant.h +++ b/Eigen/src/LU/Determinant.h @@ -91,7 +91,7 @@ template<typename Derived> struct determinant_impl<Derived, 4> template<typename Derived> inline typename internal::traits<Derived>::Scalar MatrixBase<Derived>::determinant() const { - assert(rows() == cols()); + eigen_assert(rows() == cols()); typedef typename internal::nested<Derived,Base::RowsAtCompileTime>::type Nested; return internal::determinant_impl<typename internal::remove_all<Nested>::type>::run(derived()); } diff --git a/unsupported/Eigen/src/IterativeSolvers/DGMRES.h b/unsupported/Eigen/src/IterativeSolvers/DGMRES.h index 952aba15e..2ea0eccf1 100644 --- a/unsupported/Eigen/src/IterativeSolvers/DGMRES.h +++ b/unsupported/Eigen/src/IterativeSolvers/DGMRES.h @@ -38,7 +38,7 @@ struct traits<DGMRES<_MatrixType,_Preconditioner> > template <typename VectorType, typename IndexType> void sortWithPermutation (VectorType& vec, IndexType& perm, typename IndexType::Scalar& ncut) { - assert(vec.size() == perm.size()); + eigen_assert(vec.size() == perm.size()); typedef typename IndexType::Scalar Index; typedef typename VectorType::Scalar Scalar; Index n = vec.size(); @@ -525,4 +525,4 @@ struct solve_retval<DGMRES<_MatrixType, _Preconditioner>, Rhs> } // end namespace internal } // end namespace Eigen -#endif
\ No newline at end of file +#endif diff --git a/unsupported/Eigen/src/IterativeSolvers/MINRES.h b/unsupported/Eigen/src/IterativeSolvers/MINRES.h index 6bc1b8f5d..0e56342a8 100644 --- a/unsupported/Eigen/src/IterativeSolvers/MINRES.h +++ b/unsupported/Eigen/src/IterativeSolvers/MINRES.h @@ -52,7 +52,7 @@ namespace Eigen { VectorType w_new(precond.solve(v_new)); // initialize w_new // RealScalar beta; // will be initialized inside loop RealScalar beta_new2(v_new.dot(w_new)); - assert(beta_new2 >= 0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE"); + eigen_assert(beta_new2 >= 0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE"); RealScalar beta_new(sqrt(beta_new2)); const RealScalar beta_one(beta_new); v_new /= beta_new; @@ -91,7 +91,7 @@ namespace Eigen { v_new -= alpha*v; // overwrite v_new w_new = precond.solve(v_new); // overwrite w_new beta_new2 = v_new.dot(w_new); // compute beta_new - assert(beta_new2 >= 0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE"); + eigen_assert(beta_new2 >= 0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE"); beta_new = sqrt(beta_new2); // compute beta_new v_new /= beta_new; // overwrite v_new for next iteration w_new /= beta_new; // overwrite w_new for next iteration diff --git a/unsupported/Eigen/src/IterativeSolvers/Scaling.h b/unsupported/Eigen/src/IterativeSolvers/Scaling.h index ed4ee48c8..4fd439202 100644 --- a/unsupported/Eigen/src/IterativeSolvers/Scaling.h +++ b/unsupported/Eigen/src/IterativeSolvers/Scaling.h @@ -73,7 +73,7 @@ class IterScaling { int m = mat.rows(); int n = mat.cols(); - assert((m>0 && m == n) && "Please give a non - empty matrix"); + eigen_assert((m>0 && m == n) && "Please give a non - empty matrix"); m_left.resize(m); m_right.resize(n); m_left.setOnes(); @@ -182,4 +182,4 @@ class IterScaling int m_maxits; // Maximum number of iterations allowed }; } -#endif
\ No newline at end of file +#endif diff --git a/unsupported/Eigen/src/LevenbergMarquardt/LMcovar.h b/unsupported/Eigen/src/LevenbergMarquardt/LMcovar.h index 3210587e4..32d3ad518 100644 --- a/unsupported/Eigen/src/LevenbergMarquardt/LMcovar.h +++ b/unsupported/Eigen/src/LevenbergMarquardt/LMcovar.h @@ -33,7 +33,7 @@ void covar( const Index n = r.cols(); const Scalar tolr = tol * abs(r(0,0)); Matrix< Scalar, Dynamic, 1 > wa(n); - assert(ipvt.size()==n); + eigen_assert(ipvt.size()==n); /* form the inverse of r in the full upper triangle of r. */ l = -1; diff --git a/unsupported/Eigen/src/LevenbergMarquardt/LMonestep.h b/unsupported/Eigen/src/LevenbergMarquardt/LMonestep.h index aa0c02668..51ade866c 100644 --- a/unsupported/Eigen/src/LevenbergMarquardt/LMonestep.h +++ b/unsupported/Eigen/src/LevenbergMarquardt/LMonestep.h @@ -26,7 +26,7 @@ LevenbergMarquardt<FunctorType>::minimizeOneStep(FVectorType &x) RealScalar temp, temp1,temp2; RealScalar ratio; RealScalar pnorm, xnorm, fnorm1, actred, dirder, prered; - assert(x.size()==n); // check the caller is not cheating us + eigen_assert(x.size()==n); // check the caller is not cheating us temp = 0.0; xnorm = 0.0; /* calculate the jacobian matrix. */ @@ -176,4 +176,4 @@ LevenbergMarquardt<FunctorType>::minimizeOneStep(FVectorType &x) } // end namespace Eigen -#endif // EIGEN_LMONESTEP_H
\ No newline at end of file +#endif // EIGEN_LMONESTEP_H diff --git a/unsupported/Eigen/src/LevenbergMarquardt/LMpar.h b/unsupported/Eigen/src/LevenbergMarquardt/LMpar.h index dc60ca05a..af76a9799 100644 --- a/unsupported/Eigen/src/LevenbergMarquardt/LMpar.h +++ b/unsupported/Eigen/src/LevenbergMarquardt/LMpar.h @@ -45,8 +45,8 @@ namespace internal { /* Function Body */ const Scalar dwarf = (std::numeric_limits<Scalar>::min)(); const Index n = qr.matrixQR().cols(); - assert(n==diag.size()); - assert(n==qtb.size()); + eigen_assert(n==diag.size()); + eigen_assert(n==qtb.size()); VectorType wa1, wa2; diff --git a/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h b/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h index e45e73ab5..bf2423ef0 100644 --- a/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h +++ b/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h @@ -257,7 +257,7 @@ LevenbergMarquardt<FunctorType>::minimizeInit(FVectorType &x) // m_fjac.reserve(VectorXi::Constant(n,5)); // FIXME Find a better alternative if (!m_useExternalScaling) m_diag.resize(n); - assert( (!m_useExternalScaling || m_diag.size()==n) || "When m_useExternalScaling is set, the caller must provide a valid 'm_diag'"); + eigen_assert( (!m_useExternalScaling || m_diag.size()==n) || "When m_useExternalScaling is set, the caller must provide a valid 'm_diag'"); m_qtf.resize(n); /* Function Body */ diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h b/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h index 166393f00..b00e5e921 100644 --- a/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h +++ b/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h @@ -237,7 +237,7 @@ void MatrixLogarithmAtomic<MatrixType>::computePade3(MatrixType& result, const M 0.8872983346207416885179265399782400L }; const RealScalar weights[] = { 0.2777777777777777777777777777777778L, 0.4444444444444444444444444444444444L, 0.2777777777777777777777777777777778L }; - assert(degree <= maxPadeDegree); + eigen_assert(degree <= maxPadeDegree); MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); result.setZero(T.rows(), T.rows()); for (int k = 0; k < degree; ++k) @@ -253,7 +253,7 @@ void MatrixLogarithmAtomic<MatrixType>::computePade4(MatrixType& result, const M 0.6699905217924281324013328795516223L, 0.9305681557970262876119732444464048L }; const RealScalar weights[] = { 0.1739274225687269286865319746109997L, 0.3260725774312730713134680253890003L, 0.3260725774312730713134680253890003L, 0.1739274225687269286865319746109997L }; - assert(degree <= maxPadeDegree); + eigen_assert(degree <= maxPadeDegree); MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); result.setZero(T.rows(), T.rows()); for (int k = 0; k < degree; ++k) @@ -271,7 +271,7 @@ void MatrixLogarithmAtomic<MatrixType>::computePade5(MatrixType& result, const M const RealScalar weights[] = { 0.1184634425280945437571320203599587L, 0.2393143352496832340206457574178191L, 0.2844444444444444444444444444444444L, 0.2393143352496832340206457574178191L, 0.1184634425280945437571320203599587L }; - assert(degree <= maxPadeDegree); + eigen_assert(degree <= maxPadeDegree); MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); result.setZero(T.rows(), T.rows()); for (int k = 0; k < degree; ++k) @@ -289,7 +289,7 @@ void MatrixLogarithmAtomic<MatrixType>::computePade6(MatrixType& result, const M const RealScalar weights[] = { 0.0856622461895851725201480710863665L, 0.1803807865240693037849167569188581L, 0.2339569672863455236949351719947755L, 0.2339569672863455236949351719947755L, 0.1803807865240693037849167569188581L, 0.0856622461895851725201480710863665L }; - assert(degree <= maxPadeDegree); + eigen_assert(degree <= maxPadeDegree); MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); result.setZero(T.rows(), T.rows()); for (int k = 0; k < degree; ++k) @@ -309,7 +309,7 @@ void MatrixLogarithmAtomic<MatrixType>::computePade7(MatrixType& result, const M 0.1909150252525594724751848877444876L, 0.2089795918367346938775510204081633L, 0.1909150252525594724751848877444876L, 0.1398526957446383339507338857118898L, 0.0647424830844348466353057163395410L }; - assert(degree <= maxPadeDegree); + eigen_assert(degree <= maxPadeDegree); MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); result.setZero(T.rows(), T.rows()); for (int k = 0; k < degree; ++k) @@ -329,7 +329,7 @@ void MatrixLogarithmAtomic<MatrixType>::computePade8(MatrixType& result, const M 0.1568533229389436436689811009933007L, 0.1813418916891809914825752246385978L, 0.1813418916891809914825752246385978L, 0.1568533229389436436689811009933007L, 0.1111905172266872352721779972131204L, 0.0506142681451881295762656771549811L }; - assert(degree <= maxPadeDegree); + eigen_assert(degree <= maxPadeDegree); MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); result.setZero(T.rows(), T.rows()); for (int k = 0; k < degree; ++k) @@ -351,7 +351,7 @@ void MatrixLogarithmAtomic<MatrixType>::computePade9(MatrixType& result, const M 0.1651196775006298815822625346434870L, 0.1561735385200014200343152032922218L, 0.1303053482014677311593714347093164L, 0.0903240803474287020292360156214564L, 0.0406371941807872059859460790552618L }; - assert(degree <= maxPadeDegree); + eigen_assert(degree <= maxPadeDegree); MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); result.setZero(T.rows(), T.rows()); for (int k = 0; k < degree; ++k) @@ -373,7 +373,7 @@ void MatrixLogarithmAtomic<MatrixType>::computePade10(MatrixType& result, const 0.1477621123573764350869464973256692L, 0.1477621123573764350869464973256692L, 0.1346333596549981775456134607847347L, 0.1095431812579910219977674671140816L, 0.0747256745752902965728881698288487L, 0.0333356721543440687967844049466659L }; - assert(degree <= maxPadeDegree); + eigen_assert(degree <= maxPadeDegree); MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); result.setZero(T.rows(), T.rows()); for (int k = 0; k < degree; ++k) @@ -397,7 +397,7 @@ void MatrixLogarithmAtomic<MatrixType>::computePade11(MatrixType& result, const 0.1314022722551233310903444349452546L, 0.1165968822959952399592618524215876L, 0.0931451054638671257130488207158280L, 0.0627901847324523123173471496119701L, 0.0278342835580868332413768602212743L }; - assert(degree <= maxPadeDegree); + eigen_assert(degree <= maxPadeDegree); MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); result.setZero(T.rows(), T.rows()); for (int k = 0; k < degree; ++k) diff --git a/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h b/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h index b190827b3..5b24b4619 100644 --- a/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h +++ b/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h @@ -150,7 +150,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveInit(FVectorType &x) fjac.resize(n, n); if (!useExternalScaling) diag.resize(n); - assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'"); + eigen_assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'"); /* Function Body */ nfev = 0; @@ -187,7 +187,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(FVectorType &x) { using std::abs; - assert(x.size()==n); // check the caller is not cheating us + eigen_assert(x.size()==n); // check the caller is not cheating us Index j; std::vector<JacobiRotation<Scalar> > v_givens(n), w_givens(n); @@ -390,7 +390,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffInit(FVectorType & fvec.resize(n); if (!useExternalScaling) diag.resize(n); - assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'"); + eigen_assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'"); /* Function Body */ nfev = 0; diff --git a/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h b/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h index 4b1a2d0fb..3d0a9c8fc 100644 --- a/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h +++ b/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h @@ -172,7 +172,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeInit(FVectorType &x) fjac.resize(m, n); if (!useExternalScaling) diag.resize(n); - assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'"); + eigen_assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'"); qtf.resize(n); /* Function Body */ @@ -209,7 +209,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(FVectorType &x) using std::abs; using std::sqrt; - assert(x.size()==n); // check the caller is not cheating us + eigen_assert(x.size()==n); // check the caller is not cheating us /* calculate the jacobian matrix. */ Index df_ret = functor.df(x, fjac); @@ -391,7 +391,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageInit(FVectorType fjac.resize(n, n); if (!useExternalScaling) diag.resize(n); - assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'"); + eigen_assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'"); qtf.resize(n); /* Function Body */ @@ -429,7 +429,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(FVectorTyp using std::abs; using std::sqrt; - assert(x.size()==n); // check the caller is not cheating us + eigen_assert(x.size()==n); // check the caller is not cheating us Index i, j; bool sing; diff --git a/unsupported/Eigen/src/NonLinearOptimization/covar.h b/unsupported/Eigen/src/NonLinearOptimization/covar.h index c2fb79441..68260d191 100644 --- a/unsupported/Eigen/src/NonLinearOptimization/covar.h +++ b/unsupported/Eigen/src/NonLinearOptimization/covar.h @@ -20,7 +20,7 @@ void covar( const Index n = r.cols(); const Scalar tolr = tol * abs(r(0,0)); Matrix< Scalar, Dynamic, 1 > wa(n); - assert(ipvt.size()==n); + eigen_assert(ipvt.size()==n); /* form the inverse of r in the full upper triangle of r. */ l = -1; diff --git a/unsupported/Eigen/src/NonLinearOptimization/dogleg.h b/unsupported/Eigen/src/NonLinearOptimization/dogleg.h index 57dbc8bfb..4210958e7 100644 --- a/unsupported/Eigen/src/NonLinearOptimization/dogleg.h +++ b/unsupported/Eigen/src/NonLinearOptimization/dogleg.h @@ -24,9 +24,9 @@ void dogleg( /* Function Body */ const Scalar epsmch = NumTraits<Scalar>::epsilon(); const Index n = qrfac.cols(); - assert(n==qtb.size()); - assert(n==x.size()); - assert(n==diag.size()); + eigen_assert(n==qtb.size()); + eigen_assert(n==x.size()); + eigen_assert(n==diag.size()); Matrix< Scalar, Dynamic, 1 > wa1(n), wa2(n); /* first, calculate the gauss-newton direction. */ diff --git a/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h b/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h index 05947936e..bb7cf267b 100644 --- a/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h +++ b/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h @@ -27,7 +27,7 @@ DenseIndex fdjac1( /* Function Body */ const Scalar epsmch = NumTraits<Scalar>::epsilon(); const Index n = x.size(); - assert(fvec.size()==n); + eigen_assert(fvec.size()==n); Matrix< Scalar, Dynamic, 1 > wa1(n); Matrix< Scalar, Dynamic, 1 > wa2(n); diff --git a/unsupported/Eigen/src/NonLinearOptimization/lmpar.h b/unsupported/Eigen/src/NonLinearOptimization/lmpar.h index 834407c5a..4c17d4cdf 100644 --- a/unsupported/Eigen/src/NonLinearOptimization/lmpar.h +++ b/unsupported/Eigen/src/NonLinearOptimization/lmpar.h @@ -29,9 +29,9 @@ void lmpar( /* Function Body */ const Scalar dwarf = (std::numeric_limits<Scalar>::min)(); const Index n = r.cols(); - assert(n==diag.size()); - assert(n==qtb.size()); - assert(n==x.size()); + eigen_assert(n==diag.size()); + eigen_assert(n==qtb.size()); + eigen_assert(n==x.size()); Matrix< Scalar, Dynamic, 1 > wa1, wa2; @@ -187,8 +187,8 @@ void lmpar2( /* Function Body */ const Scalar dwarf = (std::numeric_limits<Scalar>::min)(); const Index n = qr.matrixQR().cols(); - assert(n==diag.size()); - assert(n==qtb.size()); + eigen_assert(n==diag.size()); + eigen_assert(n==qtb.size()); Matrix< Scalar, Dynamic, 1 > wa1, wa2; diff --git a/unsupported/Eigen/src/NonLinearOptimization/r1updt.h b/unsupported/Eigen/src/NonLinearOptimization/r1updt.h index 55fae5ae8..f28766061 100644 --- a/unsupported/Eigen/src/NonLinearOptimization/r1updt.h +++ b/unsupported/Eigen/src/NonLinearOptimization/r1updt.h @@ -24,10 +24,10 @@ void r1updt( // r1updt had a broader usecase, but we dont use it here. And, more // importantly, we can not test it. - assert(m==n); - assert(u.size()==m); - assert(v.size()==n); - assert(w.size()==n); + eigen_assert(m==n); + eigen_assert(u.size()==m); + eigen_assert(v.size()==n); + eigen_assert(w.size()==n); /* move the nontrivial part of the last column of s into w. */ w[n-1] = s(n-1,n-1); diff --git a/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h b/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h index 9ce079e22..6ebf8563f 100644 --- a/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h +++ b/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h @@ -12,7 +12,7 @@ void rwupdt( typedef DenseIndex Index; const Index n = r.cols(); - assert(r.rows()>=n); + eigen_assert(r.rows()>=n); std::vector<JacobiRotation<Scalar> > givens(n); /* Local variables */ diff --git a/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h b/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h index 7ee30e18c..ea5d8bc27 100644 --- a/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h +++ b/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h @@ -86,7 +86,7 @@ public: // do nothing break; default: - assert(false); + eigen_assert(false); }; // Function Body @@ -112,7 +112,7 @@ public: jac.col(j) = (val2-val1)/(2*h); break; default: - assert(false); + eigen_assert(false); }; } return nfev; diff --git a/unsupported/Eigen/src/Polynomials/PolynomialSolver.h b/unsupported/Eigen/src/Polynomials/PolynomialSolver.h index fba8fc910..ad486f08e 100644 --- a/unsupported/Eigen/src/Polynomials/PolynomialSolver.h +++ b/unsupported/Eigen/src/Polynomials/PolynomialSolver.h @@ -344,7 +344,7 @@ class PolynomialSolver : public PolynomialSolverBase<_Scalar,_Deg> template< typename OtherPolynomial > void compute( const OtherPolynomial& poly ) { - assert( Scalar(0) != poly[poly.size()-1] ); + eigen_assert( Scalar(0) != poly[poly.size()-1] ); internal::companion<Scalar,_Deg> companion( poly ); companion.balance(); m_eigenSolver.compute( companion.denseMatrix() ); @@ -376,7 +376,7 @@ class PolynomialSolver<_Scalar,1> : public PolynomialSolverBase<_Scalar,1> template< typename OtherPolynomial > void compute( const OtherPolynomial& poly ) { - assert( Scalar(0) != poly[poly.size()-1] ); + eigen_assert( Scalar(0) != poly[poly.size()-1] ); m_roots[0] = -poly[0]/poly[poly.size()-1]; } diff --git a/unsupported/Eigen/src/Polynomials/PolynomialUtils.h b/unsupported/Eigen/src/Polynomials/PolynomialUtils.h index 5a9ab110e..27d4e9f91 100644 --- a/unsupported/Eigen/src/Polynomials/PolynomialUtils.h +++ b/unsupported/Eigen/src/Polynomials/PolynomialUtils.h @@ -78,7 +78,7 @@ typename NumTraits<typename Polynomial::Scalar>::Real cauchy_max_bound( const Po typedef typename Polynomial::Scalar Scalar; typedef typename NumTraits<Scalar>::Real Real; - assert( Scalar(0) != poly[poly.size()-1] ); + eigen_assert( Scalar(0) != poly[poly.size()-1] ); const Scalar inv_leading_coeff = Scalar(1)/poly[poly.size()-1]; Real cb(0); diff --git a/unsupported/Eigen/src/SparseExtra/MarketIO.h b/unsupported/Eigen/src/SparseExtra/MarketIO.h index de958de9f..7aafce928 100644 --- a/unsupported/Eigen/src/SparseExtra/MarketIO.h +++ b/unsupported/Eigen/src/SparseExtra/MarketIO.h @@ -116,7 +116,7 @@ inline bool getMarketHeader(const std::string& filename, int& sym, bool& iscompl std::string line; // The matrix header is always the first line in the file - std::getline(in, line); assert(in.good()); + std::getline(in, line); eigen_assert(in.good()); std::stringstream fmtline(line); std::string substr[5]; @@ -200,11 +200,11 @@ bool loadMarketVector(VectorType& vec, const std::string& filename) int n(0), col(0); do { // Skip comments - std::getline(in, line); assert(in.good()); + std::getline(in, line); eigen_assert(in.good()); } while (line[0] == '%'); std::istringstream newline(line); newline >> n >> col; - assert(n>0 && col>0); + eigen_assert(n>0 && col>0); vec.resize(n); int i = 0; Scalar value; |