From 89f468671dea2cc1dc37cdf75bbc7c7e56749bac Mon Sep 17 00:00:00 2001 From: Benoit Jacob Date: Wed, 17 Dec 2008 14:30:01 +0000 Subject: * replace postfix ++ by prefix ++ wherever that makes sense in Eigen/ * fix some "unused variable" warnings in the tests; there remains a libstdc++ "deprecated" warning which I haven't looked much into --- Eigen/src/Array/AllAndAny.h | 8 ++++---- Eigen/src/Core/Assign.h | 20 +++++++++---------- Eigen/src/Core/CacheFriendlyProduct.h | 20 +++++++++---------- Eigen/src/Core/CommaInitializer.h | 2 +- Eigen/src/Core/CwiseNullaryOp.h | 14 +++++++------- Eigen/src/Core/DiagonalMatrix.h | 6 +++--- Eigen/src/Core/Dot.h | 8 ++++---- Eigen/src/Core/Fuzzy.h | 6 +++--- Eigen/src/Core/IO.h | 8 ++++---- Eigen/src/Core/Part.h | 36 +++++++++++++++++------------------ Eigen/src/Core/Product.h | 6 +++--- Eigen/src/Core/Redux.h | 6 +++--- Eigen/src/Core/Sum.h | 10 +++++----- Eigen/src/Core/Visitor.h | 6 +++--- Eigen/src/Core/util/Macros.h | 2 +- Eigen/src/LU/LU.h | 34 ++++++++++++++++----------------- Eigen/src/QR/EigenSolver.h | 36 +++++++++++++++++------------------ Eigen/src/QR/QR.h | 2 +- Eigen/src/QR/SelfAdjointEigenSolver.h | 2 +- Eigen/src/QR/Tridiagonalization.h | 2 +- Eigen/src/Regression/Regression.h | 12 ++++++------ Eigen/src/SVD/SVD.h | 34 ++++++++++++++++----------------- Eigen/src/Sparse/AmbiVector.h | 8 ++++---- Eigen/src/Sparse/RandomSetter.h | 4 ++-- Eigen/src/Sparse/SparseLDLT.h | 20 +++++++++---------- Eigen/src/Sparse/SparseMatrix.h | 8 ++++---- 26 files changed, 160 insertions(+), 160 deletions(-) (limited to 'Eigen') diff --git a/Eigen/src/Array/AllAndAny.h b/Eigen/src/Array/AllAndAny.h index cc4560b2f..ac2760f1a 100644 --- a/Eigen/src/Array/AllAndAny.h +++ b/Eigen/src/Array/AllAndAny.h @@ -99,8 +99,8 @@ inline bool MatrixBase::all(void) const >::run(derived()); else { - for(int j = 0; j < cols(); j++) - for(int i = 0; i < rows(); i++) + for(int j = 0; j < cols(); ++j) + for(int i = 0; i < rows(); ++i) if (!coeff(i, j)) return false; return true; } @@ -123,8 +123,8 @@ inline bool MatrixBase::any(void) const >::run(derived()); else { - for(int j = 0; j < cols(); j++) - for(int i = 0; i < rows(); i++) + for(int j = 0; j < cols(); ++j) + for(int i = 0; i < rows(); ++i) if (coeff(i, j)) return true; return false; } diff --git a/Eigen/src/Core/Assign.h b/Eigen/src/Core/Assign.h index 11f35c5e7..bcb3e9e7e 100644 --- a/Eigen/src/Core/Assign.h +++ b/Eigen/src/Core/Assign.h @@ -214,8 +214,8 @@ struct ei_assign_impl { const int innerSize = dst.innerSize(); const int outerSize = dst.outerSize(); - for(int j = 0; j < outerSize; j++) - for(int i = 0; i < innerSize; i++) + for(int j = 0; j < outerSize; ++j) + for(int i = 0; i < innerSize; ++i) { if(int(Derived1::Flags)&RowMajorBit) dst.copyCoeff(j, i, src); @@ -243,7 +243,7 @@ struct ei_assign_impl const bool rowMajor = int(Derived1::Flags)&RowMajorBit; const int innerSize = rowMajor ? Derived1::ColsAtCompileTime : Derived1::RowsAtCompileTime; const int outerSize = dst.outerSize(); - for(int j = 0; j < outerSize; j++) + for(int j = 0; j < outerSize; ++j) ei_assign_novec_InnerUnrolling ::run(dst, src, j); } @@ -261,7 +261,7 @@ struct ei_assign_impl const int innerSize = dst.innerSize(); const int outerSize = dst.outerSize(); const int packetSize = ei_packet_traits::size; - for(int j = 0; j < outerSize; j++) + for(int j = 0; j < outerSize; ++j) for(int i = 0; i < innerSize; i+=packetSize) { if(int(Derived1::Flags)&RowMajorBit) @@ -290,7 +290,7 @@ struct ei_assign_impl const bool rowMajor = int(Derived1::Flags)&RowMajorBit; const int innerSize = rowMajor ? Derived1::ColsAtCompileTime : Derived1::RowsAtCompileTime; const int outerSize = dst.outerSize(); - for(int j = 0; j < outerSize; j++) + for(int j = 0; j < outerSize; ++j) ei_assign_innervec_InnerUnrolling ::run(dst, src, j); } @@ -311,7 +311,7 @@ struct ei_assign_impl : ei_alignmentOffset(&dst.coeffRef(0), size); const int alignedEnd = alignedStart + ((size-alignedStart)/packetSize)*packetSize; - for(int index = 0; index < alignedStart; index++) + for(int index = 0; index < alignedStart; ++index) dst.copyCoeff(index, src); for(int index = alignedStart; index < alignedEnd; index += packetSize) @@ -319,7 +319,7 @@ struct ei_assign_impl dst.template copyPacket::SrcAlignment>(index, src); } - for(int index = alignedEnd; index < size; index++) + for(int index = alignedEnd; index < size; ++index) dst.copyCoeff(index, src); } }; @@ -355,12 +355,12 @@ struct ei_assign_impl int alignedStart = ei_assign_traits::DstIsAligned ? 0 : ei_alignmentOffset(&dst.coeffRef(0), innerSize); - for(int i = 0; i < outerSize; i++) + for(int i = 0; i < outerSize; ++i) { const int alignedEnd = alignedStart + ((innerSize-alignedStart) & ~packetAlignedMask); // do the non-vectorizable part of the assignment - for (int index = 0; index } // do the non-vectorizable part of the assignment - for (int index = alignedEnd; indexalignedStart) @@ -493,7 +493,7 @@ static EIGEN_DONT_INLINE void ei_cache_friendly_product_colmajor_times_vector( } // end explicit vectorization /* process remaining coeffs (or all if there is no explicit vectorization) */ - for (int j=alignedSize; jalignedStart) @@ -732,7 +732,7 @@ static EIGEN_DONT_INLINE void ei_cache_friendly_product_rowmajor_times_vector( // process remaining scalars // FIXME this loop get vectorized by the compiler ! - for (int j=alignedSize; j bool MatrixBase::isApproxToConstant (const Scalar& value, RealScalar prec) const { - for(int j = 0; j < cols(); j++) - for(int i = 0; i < rows(); i++) + for(int j = 0; j < cols(); ++j) + for(int i = 0; i < rows(); ++i) if(!ei_isApprox(coeff(i, j), value, prec)) return false; return true; @@ -330,8 +330,8 @@ template bool MatrixBase::isZero (RealScalar prec) const { - for(int j = 0; j < cols(); j++) - for(int i = 0; i < rows(); i++) + for(int j = 0; j < cols(); ++j) + for(int i = 0; i < rows(); ++i) if(!ei_isMuchSmallerThan(coeff(i, j), static_cast(1), prec)) return false; return true; @@ -499,9 +499,9 @@ template bool MatrixBase::isIdentity (RealScalar prec) const { - for(int j = 0; j < cols(); j++) + for(int j = 0; j < cols(); ++j) { - for(int i = 0; i < rows(); i++) + for(int i = 0; i < rows(); ++i) { if(i == j) { @@ -534,7 +534,7 @@ struct ei_setIdentity_impl { m.setZero(); const int size = std::min(m.rows(), m.cols()); - for(int i = 0; i < size; i++) m.coeffRef(i,i) = typename Derived::Scalar(1); + for(int i = 0; i < size; ++i) m.coeffRef(i,i) = typename Derived::Scalar(1); return m; } }; diff --git a/Eigen/src/Core/DiagonalMatrix.h b/Eigen/src/Core/DiagonalMatrix.h index e09797eaf..25fa26953 100644 --- a/Eigen/src/Core/DiagonalMatrix.h +++ b/Eigen/src/Core/DiagonalMatrix.h @@ -123,13 +123,13 @@ bool MatrixBase::isDiagonal { if(cols() != rows()) return false; RealScalar maxAbsOnDiagonal = static_cast(-1); - for(int j = 0; j < cols(); j++) + for(int j = 0; j < cols(); ++j) { RealScalar absOnDiagonal = ei_abs(coeff(j,j)); if(absOnDiagonal > maxAbsOnDiagonal) maxAbsOnDiagonal = absOnDiagonal; } - for(int j = 0; j < cols(); j++) - for(int i = 0; i < j; i++) + for(int j = 0; j < cols(); ++j) + for(int i = 0; i < j; ++i) { if(!ei_isMuchSmallerThan(coeff(i, j), maxAbsOnDiagonal, prec)) return false; if(!ei_isMuchSmallerThan(coeff(j, i), maxAbsOnDiagonal, prec)) return false; diff --git a/Eigen/src/Core/Dot.h b/Eigen/src/Core/Dot.h index e700b76ae..c4703adc3 100644 --- a/Eigen/src/Core/Dot.h +++ b/Eigen/src/Core/Dot.h @@ -156,7 +156,7 @@ struct ei_dot_impl ei_assert(v1.size()>0 && "you are using a non initialized vector"); Scalar res; res = v1.coeff(0) * ei_conj(v2.coeff(0)); - for(int i = 1; i < v1.size(); i++) + for(int i = 1; i < v1.size(); ++i) res += v1.coeff(i) * ei_conj(v2.coeff(i)); return res; } @@ -211,7 +211,7 @@ struct ei_dot_impl } // do the remainder of the vector - for(int index = alignedSize; index < size; index++) + for(int index = alignedSize; index < size; ++index) { res += v1.coeff(index) * v2.coeff(index); } @@ -370,11 +370,11 @@ template bool MatrixBase::isUnitary(RealScalar prec) const { typename Derived::Nested nested(derived()); - for(int i = 0; i < cols(); i++) + for(int i = 0; i < cols(); ++i) { if(!ei_isApprox(nested.col(i).squaredNorm(), static_cast(1), prec)) return false; - for(int j = 0; j < i; j++) + for(int j = 0; j < i; ++j) if(!ei_isMuchSmallerThan(nested.col(i).dot(nested.col(j)), static_cast(1), prec)) return false; } diff --git a/Eigen/src/Core/Fuzzy.h b/Eigen/src/Core/Fuzzy.h index 18150cc6d..128554296 100644 --- a/Eigen/src/Core/Fuzzy.h +++ b/Eigen/src/Core/Fuzzy.h @@ -202,7 +202,7 @@ struct ei_fuzzy_selector ei_assert(self.rows() == other.rows() && self.cols() == other.cols()); typename Derived::Nested nested(self); typename OtherDerived::Nested otherNested(other); - for(int i = 0; i < self.cols(); i++) + for(int i = 0; i < self.cols(); ++i) if((nested.col(i) - otherNested.col(i)).squaredNorm() > std::min(nested.col(i).squaredNorm(), otherNested.col(i).squaredNorm()) * prec * prec) return false; @@ -211,7 +211,7 @@ struct ei_fuzzy_selector static bool isMuchSmallerThan(const Derived& self, const RealScalar& other, RealScalar prec) { typename Derived::Nested nested(self); - for(int i = 0; i < self.cols(); i++) + for(int i = 0; i < self.cols(); ++i) if(nested.col(i).squaredNorm() > ei_abs2(other * prec)) return false; return true; @@ -222,7 +222,7 @@ struct ei_fuzzy_selector ei_assert(self.rows() == other.rows() && self.cols() == other.cols()); typename Derived::Nested nested(self); typename OtherDerived::Nested otherNested(other); - for(int i = 0; i < self.cols(); i++) + for(int i = 0; i < self.cols(); ++i) if(nested.col(i).squaredNorm() > otherNested.col(i).squaredNorm() * prec * prec) return false; return true; diff --git a/Eigen/src/Core/IO.h b/Eigen/src/Core/IO.h index 6e6d02ad4..ca00cae3d 100644 --- a/Eigen/src/Core/IO.h +++ b/Eigen/src/Core/IO.h @@ -129,8 +129,8 @@ std::ostream & ei_print_matrix(std::ostream & s, const MatrixBase & _m, if (fmt.flags & AlignCols) { // compute the largest width - for(int j = 1; j < m.cols(); j++) - for(int i = 0; i < m.rows(); i++) + for(int j = 1; j < m.cols(); ++j) + for(int i = 0; i < m.rows(); ++i) { std::stringstream sstr; sstr.precision(fmt.precision); @@ -140,14 +140,14 @@ std::ostream & ei_print_matrix(std::ostream & s, const MatrixBase & _m, } s.precision(fmt.precision); s << fmt.matPrefix; - for(int i = 0; i < m.rows(); i++) + for(int i = 0; i < m.rows(); ++i) { if (i) s << fmt.rowSpacer; s << fmt.rowPrefix; if(width) s.width(width); s << m.coeff(i, 0); - for(int j = 1; j < m.cols(); j++) + for(int j = 1; j < m.cols(); ++j) { s << fmt.coeffSeparator; if (width) s.width(width); diff --git a/Eigen/src/Core/Part.h b/Eigen/src/Core/Part.h index 3928f51d3..3cb55fe1d 100644 --- a/Eigen/src/Core/Part.h +++ b/Eigen/src/Core/Part.h @@ -219,8 +219,8 @@ struct ei_part_assignment_impl { inline static void run(Derived1 &dst, const Derived2 &src) { - for(int j = 0; j < dst.cols(); j++) - for(int i = 0; i <= j; i++) + for(int j = 0; j < dst.cols(); ++j) + for(int i = 0; i <= j; ++i) dst.copyCoeff(i, j, src); } }; @@ -230,8 +230,8 @@ struct ei_part_assignment_impl { inline static void run(Derived1 &dst, const Derived2 &src) { - for(int j = 0; j < dst.cols(); j++) - for(int i = j; i < dst.rows(); i++) + for(int j = 0; j < dst.cols(); ++j) + for(int i = j; i < dst.rows(); ++i) dst.copyCoeff(i, j, src); } }; @@ -241,8 +241,8 @@ struct ei_part_assignment_impl { inline static void run(Derived1 &dst, const Derived2 &src) { - for(int j = 0; j < dst.cols(); j++) - for(int i = 0; i < j; i++) + for(int j = 0; j < dst.cols(); ++j) + for(int i = 0; i < j; ++i) dst.copyCoeff(i, j, src); } }; @@ -251,8 +251,8 @@ struct ei_part_assignment_impl { inline static void run(Derived1 &dst, const Derived2 &src) { - for(int j = 0; j < dst.cols(); j++) - for(int i = j+1; i < dst.rows(); i++) + for(int j = 0; j < dst.cols(); ++j) + for(int i = j+1; i < dst.rows(); ++i) dst.copyCoeff(i, j, src); } }; @@ -261,9 +261,9 @@ struct ei_part_assignment_impl { inline static void run(Derived1 &dst, const Derived2 &src) { - for(int j = 0; j < dst.cols(); j++) + for(int j = 0; j < dst.cols(); ++j) { - for(int i = 0; i < j; i++) + for(int i = 0; i < j; ++i) dst.coeffRef(j, i) = ei_conj(dst.coeffRef(i, j) = src.coeff(i, j)); dst.coeffRef(j, j) = ei_real(src.coeff(j, j)); } @@ -312,14 +312,14 @@ bool MatrixBase::isUpper(RealScalar prec) const { if(cols() != rows()) return false; RealScalar maxAbsOnUpperPart = static_cast(-1); - for(int j = 0; j < cols(); j++) - for(int i = 0; i <= j; i++) + for(int j = 0; j < cols(); ++j) + for(int i = 0; i <= j; ++i) { RealScalar absValue = ei_abs(coeff(i,j)); if(absValue > maxAbsOnUpperPart) maxAbsOnUpperPart = absValue; } - for(int j = 0; j < cols()-1; j++) - for(int i = j+1; i < rows(); i++) + for(int j = 0; j < cols()-1; ++j) + for(int i = j+1; i < rows(); ++i) if(!ei_isMuchSmallerThan(coeff(i, j), maxAbsOnUpperPart, prec)) return false; return true; } @@ -334,14 +334,14 @@ bool MatrixBase::isLower(RealScalar prec) const { if(cols() != rows()) return false; RealScalar maxAbsOnLowerPart = static_cast(-1); - for(int j = 0; j < cols(); j++) - for(int i = j; i < rows(); i++) + for(int j = 0; j < cols(); ++j) + for(int i = j; i < rows(); ++i) { RealScalar absValue = ei_abs(coeff(i,j)); if(absValue > maxAbsOnLowerPart) maxAbsOnLowerPart = absValue; } - for(int j = 1; j < cols(); j++) - for(int i = 0; i < j; i++) + for(int j = 1; j < cols(); ++j) + for(int i = 0; i < j; ++i) if(!ei_isMuchSmallerThan(coeff(i, j), maxAbsOnLowerPart, prec)) return false; return true; } diff --git a/Eigen/src/Core/Product.h b/Eigen/src/Core/Product.h index b464304f8..a844470a7 100644 --- a/Eigen/src/Core/Product.h +++ b/Eigen/src/Core/Product.h @@ -337,7 +337,7 @@ struct ei_product_coeff_impl { ei_assert(lhs.cols()>0 && "you are using a non initialized matrix"); res = lhs.coeff(row, 0) * rhs.coeff(0, col); - for(int i = 1; i < lhs.cols(); i++) + for(int i = 1; i < lhs.cols(); ++i) res += lhs.coeff(row, i) * rhs.coeff(i, col); } }; @@ -495,7 +495,7 @@ struct ei_product_packet_impl0 && "you are using a non initialized matrix"); res = ei_pmul(ei_pset1(lhs.coeff(row, 0)),rhs.template packet(0, col)); - for(int i = 1; i < lhs.cols(); i++) + for(int i = 1; i < lhs.cols(); ++i) res = ei_pmadd(ei_pset1(lhs.coeff(row, i)), rhs.template packet(i, col), res); } }; @@ -507,7 +507,7 @@ struct ei_product_packet_impl0 && "you are using a non initialized matrix"); res = ei_pmul(lhs.template packet(row, 0), ei_pset1(rhs.coeff(0, col))); - for(int i = 1; i < lhs.cols(); i++) + for(int i = 1; i < lhs.cols(); ++i) res = ei_pmadd(lhs.template packet(row, i), ei_pset1(rhs.coeff(i, col)), res); } }; diff --git a/Eigen/src/Core/Redux.h b/Eigen/src/Core/Redux.h index 7dec894ff..734ef1929 100644 --- a/Eigen/src/Core/Redux.h +++ b/Eigen/src/Core/Redux.h @@ -68,10 +68,10 @@ struct ei_redux_impl ei_assert(mat.rows()>0 && mat.cols()>0 && "you are using a non initialized matrix"); Scalar res; res = mat.coeff(0,0); - for(int i = 1; i < mat.rows(); i++) + for(int i = 1; i < mat.rows(); ++i) res = func(res, mat.coeff(i, 0)); - for(int j = 1; j < mat.cols(); j++) - for(int i = 0; i < mat.rows(); i++) + for(int j = 1; j < mat.cols(); ++j) + for(int i = 0; i < mat.rows(); ++i) res = func(res, mat.coeff(i, j)); return res; } diff --git a/Eigen/src/Core/Sum.h b/Eigen/src/Core/Sum.h index 4247bffcc..45ef62205 100644 --- a/Eigen/src/Core/Sum.h +++ b/Eigen/src/Core/Sum.h @@ -168,10 +168,10 @@ struct ei_sum_impl ei_assert(mat.rows()>0 && mat.cols()>0 && "you are using a non initialized matrix"); Scalar res; res = mat.coeff(0, 0); - for(int i = 1; i < mat.rows(); i++) + for(int i = 1; i < mat.rows(); ++i) res += mat.coeff(i, 0); - for(int j = 1; j < mat.cols(); j++) - for(int i = 0; i < mat.rows(); i++) + for(int j = 1; j < mat.cols(); ++j) + for(int i = 0; i < mat.rows(); ++i) res += mat.coeff(i, j); return res; } @@ -217,10 +217,10 @@ struct ei_sum_impl res = Scalar(0); } - for(int index = 0; index < alignedStart; index++) + for(int index = 0; index < alignedStart; ++index) res += mat.coeff(index); - for(int index = alignedEnd; index < size; index++) + for(int index = alignedEnd; index < size; ++index) res += mat.coeff(index); return res; diff --git a/Eigen/src/Core/Visitor.h b/Eigen/src/Core/Visitor.h index 041aa9445..a9ef5c861 100644 --- a/Eigen/src/Core/Visitor.h +++ b/Eigen/src/Core/Visitor.h @@ -55,10 +55,10 @@ struct ei_visitor_impl inline static void run(const Derived& mat, Visitor& visitor) { visitor.init(mat.coeff(0,0), 0, 0); - for(int i = 1; i < mat.rows(); i++) + for(int i = 1; i < mat.rows(); ++i) visitor(mat.coeff(i, 0), i, 0); - for(int j = 1; j < mat.cols(); j++) - for(int i = 0; i < mat.rows(); i++) + for(int j = 1; j < mat.cols(); ++j) + for(int i = 0; i < mat.rows(); ++i) visitor(mat.coeff(i, j), i, j); } }; diff --git a/Eigen/src/Core/util/Macros.h b/Eigen/src/Core/util/Macros.h index 274d59057..4d6905964 100644 --- a/Eigen/src/Core/util/Macros.h +++ b/Eigen/src/Core/util/Macros.h @@ -107,7 +107,7 @@ using Eigen::ei_cos; #endif #if (defined __GNUC__) -#define EIGEN_ALIGN_128 __attribute__ ((aligned(16))) +#define EIGEN_ALIGN_128 __attribute__((aligned(16))) #elif (defined _MSC_VER) #define EIGEN_ALIGN_128 __declspec(align(16)) #else diff --git a/Eigen/src/LU/LU.h b/Eigen/src/LU/LU.h index 1e5ced47c..526ea488a 100644 --- a/Eigen/src/LU/LU.h +++ b/Eigen/src/LU/LU.h @@ -312,7 +312,7 @@ LU::LU(const MatrixType& matrix) int number_of_transpositions = 0; RealScalar biggest = RealScalar(0); - for(int k = 0; k < size; k++) + for(int k = 0; k < size; ++k) { int row_of_biggest_in_corner, col_of_biggest_in_corner; RealScalar biggest_in_corner; @@ -326,11 +326,11 @@ LU::LU(const MatrixType& matrix) cols_transpositions.coeffRef(k) = col_of_biggest_in_corner; if(k != row_of_biggest_in_corner) { m_lu.row(k).swap(m_lu.row(row_of_biggest_in_corner)); - number_of_transpositions++; + ++number_of_transpositions; } if(k != col_of_biggest_in_corner) { m_lu.col(k).swap(m_lu.col(col_of_biggest_in_corner)); - number_of_transpositions++; + ++number_of_transpositions; } if(k==0) biggest = biggest_in_corner; @@ -339,21 +339,21 @@ LU::LU(const MatrixType& matrix) if(k= 0; k--) std::swap(m_p.coeffRef(k), m_p.coeffRef(rows_transpositions.coeff(k))); - for(int k = 0; k < matrix.cols(); k++) m_q.coeffRef(k) = k; - for(int k = 0; k < size; k++) + for(int k = 0; k < matrix.cols(); ++k) m_q.coeffRef(k) = k; + for(int k = 0; k < size; ++k) std::swap(m_q.coeffRef(k), m_q.coeffRef(cols_transpositions.coeff(k))); m_det_pq = (number_of_transpositions%2) ? -1 : 1; - for(m_rank = 0; m_rank < size; m_rank++) + for(m_rank = 0; m_rank < size; ++m_rank) if(ei_isMuchSmallerThan(m_lu.diagonal().coeff(m_rank), m_lu.diagonal().coeff(0))) break; } @@ -374,7 +374,7 @@ void LU::computeKernel(KernelResultType *result) const /* Let us use the following lemma: * * Lemma: If the matrix A has the LU decomposition PAQ = LU, - * then Ker A = Q( Ker U ). + * then Ker A = Q(Ker U). * * Proof: trivial: just keep in mind that P, Q, L are invertible. */ @@ -395,10 +395,10 @@ void LU::computeKernel(KernelResultType *result) const .template marked() .solveTriangularInPlace(y); - for(int i = 0; i < m_rank; i++) + for(int i = 0; i < m_rank; ++i) result->row(m_q.coeff(i)) = y.row(i); - for(int i = m_rank; i < cols; i++) result->row(m_q.coeff(i)).setZero(); - for(int k = 0; k < dimker; k++) result->coeffRef(m_q.coeff(m_rank+k), k) = Scalar(1); + for(int i = m_rank; i < cols; ++i) result->row(m_q.coeff(i)).setZero(); + for(int k = 0; k < dimker; ++k) result->coeffRef(m_q.coeff(m_rank+k), k) = Scalar(1); } template @@ -432,7 +432,7 @@ bool LU::solve( typename OtherDerived::Eval c(b.rows(), b.cols()); // Step 1 - for(int i = 0; i < rows; i++) c.row(m_p.coeff(i)) = b.row(i); + for(int i = 0; i < rows; ++i) c.row(m_p.coeff(i)) = b.row(i); // Step 2 Matrix::solve( { // is c is in the image of U ? RealScalar biggest_in_c = c.corner(TopLeft, m_rank, c.cols()).cwise().abs().maxCoeff(); - for(int col = 0; col < c.cols(); col++) - for(int row = m_rank; row < c.rows(); row++) + for(int col = 0; col < c.cols(); ++col) + for(int row = m_rank; row < c.rows(); ++row) if(!ei_isMuchSmallerThan(c.coeff(row,col), biggest_in_c)) return false; } @@ -464,8 +464,8 @@ bool LU::solve( // Step 4 result->resize(m_lu.cols(), b.cols()); - for(int i = 0; i < m_rank; i++) result->row(m_q.coeff(i)) = d.row(i); - for(int i = m_rank; i < m_lu.cols(); i++) result->row(m_q.coeff(i)).setZero(); + for(int i = 0; i < m_rank; ++i) result->row(m_q.coeff(i)) = d.row(i); + for(int i = m_rank; i < m_lu.cols(); ++i) result->row(m_q.coeff(i)).setZero(); return true; } diff --git a/Eigen/src/QR/EigenSolver.h b/Eigen/src/QR/EigenSolver.h index 33dcd6daa..cd818a975 100644 --- a/Eigen/src/QR/EigenSolver.h +++ b/Eigen/src/QR/EigenSolver.h @@ -122,7 +122,7 @@ MatrixType EigenSolver::pseudoEigenvalueMatrix() const { int n = m_eivec.cols(); MatrixType matD = MatrixType::Zero(n,n); - for (int i=0; i::pseudoEigenvalueMatrix() const { matD.template block<2,2>(i,i) << ei_real(m_eivalues.coeff(i)), ei_imag(m_eivalues.coeff(i)), -ei_imag(m_eivalues.coeff(i)), ei_real(m_eivalues.coeff(i)); - i++; + ++i; } } return matD; @@ -145,7 +145,7 @@ typename EigenSolver::EigenvectorType EigenSolver::eigen { int n = m_eivec.cols(); EigenvectorType matV(n,n); - for (int j=0; j::EigenvectorType EigenSolver::eigen else { // we have a pair of complex eigen values - for (int i=0; i::orthes(MatrixType& matH, RealVectorType& ort) int low = 0; int high = n-1; - for (int m = low+1; m <= high-1; m++) + for (int m = low+1; m <= high-1; ++m) { // Scale column. RealScalar scale = matH.block(m, m-1, high-m+1, 1).cwise().abs().sum(); @@ -290,7 +290,7 @@ void EigenSolver::hqr2(MatrixType& matH) // FIXME to be efficient the following would requires a triangular reduxion code // Scalar norm = matH.upper().cwise().abs().sum() + matH.corner(BottomLeft,n,n).diagonal().cwise().abs().sum(); Scalar norm = 0.0; - for (int j = 0; j < nn; j++) + for (int j = 0; j < nn; ++j) { // FIXME what's the purpose of the following since the condition is always false if ((j < low) || (j > high)) @@ -361,7 +361,7 @@ void EigenSolver::hqr2(MatrixType& matH) q = q / r; // Row modification - for (int j = n-1; j < nn; j++) + for (int j = n-1; j < nn; ++j) { z = matH.coeff(n-1,j); matH.coeffRef(n-1,j) = q * z + p * matH.coeff(n,j); @@ -369,7 +369,7 @@ void EigenSolver::hqr2(MatrixType& matH) } // Column modification - for (int i = 0; i <= n; i++) + for (int i = 0; i <= n; ++i) { z = matH.coeff(i,n-1); matH.coeffRef(i,n-1) = q * z + p * matH.coeff(i,n); @@ -377,7 +377,7 @@ void EigenSolver::hqr2(MatrixType& matH) } // Accumulate transformations - for (int i = low; i <= high; i++) + for (int i = low; i <= high; ++i) { z = m_eivec.coeff(i,n-1); m_eivec.coeffRef(i,n-1) = q * z + p * m_eivec.coeff(i,n); @@ -410,7 +410,7 @@ void EigenSolver::hqr2(MatrixType& matH) if (iter == 10) { exshift += x; - for (int i = low; i <= n; i++) + for (int i = low; i <= n; ++i) matH.coeffRef(i,i) -= x; s = ei_abs(matH.coeff(n,n-1)) + ei_abs(matH.coeff(n-1,n-2)); x = y = 0.75 * s; @@ -428,7 +428,7 @@ void EigenSolver::hqr2(MatrixType& matH) if (y < x) s = -s; s = x - w / ((y - x) / 2.0 + s); - for (int i = low; i <= n; i++) + for (int i = low; i <= n; ++i) matH.coeffRef(i,i) -= s; exshift += s; x = y = w = 0.964; @@ -463,7 +463,7 @@ void EigenSolver::hqr2(MatrixType& matH) m--; } - for (int i = m+2; i <= n; i++) + for (int i = m+2; i <= n; ++i) { matH.coeffRef(i,i-2) = 0.0; if (i > m+2) @@ -471,7 +471,7 @@ void EigenSolver::hqr2(MatrixType& matH) } // Double QR step involving rows l:n and columns m:n - for (int k = m; k <= n-1; k++) + for (int k = m; k <= n-1; ++k) { int notlast = (k != n-1); if (k != m) { @@ -510,7 +510,7 @@ void EigenSolver::hqr2(MatrixType& matH) r = r / p; // Row modification - for (int j = k; j < nn; j++) + for (int j = k; j < nn; ++j) { p = matH.coeff(k,j) + q * matH.coeff(k+1,j); if (notlast) @@ -523,7 +523,7 @@ void EigenSolver::hqr2(MatrixType& matH) } // Column modification - for (int i = 0; i <= std::min(n,k+3); i++) + for (int i = 0; i <= std::min(n,k+3); ++i) { p = x * matH.coeff(i,k) + y * matH.coeff(i,k+1); if (notlast) @@ -536,7 +536,7 @@ void EigenSolver::hqr2(MatrixType& matH) } // Accumulate transformations - for (int i = low; i <= high; i++) + for (int i = low; i <= high; ++i) { p = x * m_eivec.coeff(i,k) + y * m_eivec.coeff(i,k+1); if (notlast) @@ -686,7 +686,7 @@ void EigenSolver::hqr2(MatrixType& matH) } // Vectors of isolated roots - for (int i = 0; i < nn; i++) + for (int i = 0; i < nn; ++i) { // FIXME again what's the purpose of this test ? // in this algo low==0 and high==nn-1 !! diff --git a/Eigen/src/QR/QR.h b/Eigen/src/QR/QR.h index c3fe96718..94b817a02 100644 --- a/Eigen/src/QR/QR.h +++ b/Eigen/src/QR/QR.h @@ -87,7 +87,7 @@ void QR::_compute(const MatrixType& matrix) int rows = matrix.rows(); int cols = matrix.cols(); - for (int k = 0; k < cols; k++) + for (int k = 0; k < cols; ++k) { int remainingSize = rows-k; diff --git a/Eigen/src/QR/SelfAdjointEigenSolver.h b/Eigen/src/QR/SelfAdjointEigenSolver.h index e57b52ed5..05060063c 100644 --- a/Eigen/src/QR/SelfAdjointEigenSolver.h +++ b/Eigen/src/QR/SelfAdjointEigenSolver.h @@ -202,7 +202,7 @@ void SelfAdjointEigenSolver::compute(const MatrixType& matrix, bool // Sort eigenvalues and corresponding vectors. // TODO make the sort optional ? // TODO use a better sort algorithm !! - for (int i = 0; i < n-1; i++) + for (int i = 0; i < n-1; ++i) { int k; m_eivalues.segment(i,n-i).minCoeff(&k); diff --git a/Eigen/src/QR/Tridiagonalization.h b/Eigen/src/QR/Tridiagonalization.h index a4fa32ed4..ee7f7b84b 100644 --- a/Eigen/src/QR/Tridiagonalization.h +++ b/Eigen/src/QR/Tridiagonalization.h @@ -268,7 +268,7 @@ void Tridiagonalization::_compute(MatrixType& matA, CoeffVectorType& * if we remove the specialization of Block for Matrix then it is even worse, much worse ! */ #ifdef EIGEN_NEVER_DEFINED for (int j1=i+1; j1 m(numPoints, size); if(funcOfOthers>0) - for(int i = 0; i < numPoints; i++) + for(int i = 0; i < numPoints; ++i) m.row(i).start(funcOfOthers) = points[i]->start(funcOfOthers); if(funcOfOthersend(size-funcOfOthers-1); - for(int i = 0; i < numPoints; i++) + for(int i = 0; i < numPoints; ++i) m.row(i).coeffRef(size-1) = Scalar(1); VectorType v(size); v.setZero(); - for(int i = 0; i < numPoints; i++) + for(int i = 0; i < numPoints; ++i) v += m.row(i).adjoint() * points[i]->coeff(funcOfOthers); ei_assert((m.adjoint()*m).lu().solve(v, result)); @@ -170,14 +170,14 @@ void fitHyperplane(int numPoints, // compute the mean of the data VectorType mean = VectorType::Zero(size); - for(int i = 0; i < numPoints; i++) + for(int i = 0; i < numPoints; ++i) mean += *(points[i]); mean /= numPoints; // compute the covariance matrix CovMatrixType covMat = CovMatrixType::Zero(size, size); VectorType remean = VectorType::Zero(size); - for(int i = 0; i < numPoints; i++) + for(int i = 0; i < numPoints; ++i) { VectorType diff = (*(points[i]) - mean).conjugate(); covMat += diff * diff.adjoint(); diff --git a/Eigen/src/SVD/SVD.h b/Eigen/src/SVD/SVD.h index debdc7606..988316649 100644 --- a/Eigen/src/SVD/SVD.h +++ b/Eigen/src/SVD/SVD.h @@ -115,7 +115,7 @@ void SVD::compute(const MatrixType& matrix) // in s and the super-diagonal elements in e. int nct = std::min(m-1,n); int nrt = std::max(0,std::min(n-2,m)); - for (k = 0; k < std::max(nct,nrt); k++) + for (k = 0; k < std::max(nct,nrt); ++k) { if (k < nct) { @@ -132,7 +132,7 @@ void SVD::compute(const MatrixType& matrix) m_sigma[k] = -m_sigma[k]; } - for (j = k+1; j < n; j++) + for (j = k+1; j < n; ++j) { if ((k < nct) && (m_sigma[k] != 0.0)) { @@ -168,7 +168,7 @@ void SVD::compute(const MatrixType& matrix) { // Apply the transformation. work.end(m-k-1) = matA.corner(BottomRight,m-k-1,n-k-1) * e.end(n-k-1); - for (j = k+1; j < n; j++) + for (j = k+1; j < n; ++j) matA.col(j).end(m-k-1) += (-e[j]/e[k+1]) * work.end(m-k-1); } @@ -192,7 +192,7 @@ void SVD::compute(const MatrixType& matrix) // If required, generate U. if (wantu) { - for (j = nct; j < nu; j++) + for (j = nct; j < nu; ++j) { m_matU.col(j).setZero(); m_matU(j,j) = 1.0; @@ -201,7 +201,7 @@ void SVD::compute(const MatrixType& matrix) { if (m_sigma[k] != 0.0) { - for (j = k+1; j < nu; j++) + for (j = k+1; j < nu; ++j) { Scalar t = m_matU.col(k).end(m-k).dot(m_matU.col(j).end(m-k)); // FIXME is it really a dot product we want ? t = -t/m_matU(k,k); @@ -227,7 +227,7 @@ void SVD::compute(const MatrixType& matrix) { if ((k < nrt) & (e[k] != 0.0)) { - for (j = k+1; j < nu; j++) + for (j = k+1; j < nu; ++j) { Scalar t = m_matV.col(k).end(n-k-1).dot(m_matV.col(j).end(n-k-1)); // FIXME is it really a dot product we want ? t = -t/m_matV(k+1,k); @@ -302,7 +302,7 @@ void SVD::compute(const MatrixType& matrix) k = ks; } } - k++; + ++k; // Perform the task indicated by kase. switch (kase) @@ -326,7 +326,7 @@ void SVD::compute(const MatrixType& matrix) } if (wantv) { - for (i = 0; i < n; i++) + for (i = 0; i < n; ++i) { t = cs*m_matV(i,j) + sn*m_matV(i,p-1); m_matV(i,p-1) = -sn*m_matV(i,j) + cs*m_matV(i,p-1); @@ -342,7 +342,7 @@ void SVD::compute(const MatrixType& matrix) { Scalar f(e[k-1]); e[k-1] = 0.0; - for (j = k; j < p; j++) + for (j = k; j < p; ++j) { Scalar t(hypot(m_sigma[j],f)); Scalar cs( m_sigma[j]/t); @@ -352,7 +352,7 @@ void SVD::compute(const MatrixType& matrix) e[j] = cs*e[j]; if (wantu) { - for (i = 0; i < m; i++) + for (i = 0; i < m; ++i) { t = cs*m_matU(i,j) + sn*m_matU(i,k-1); m_matU(i,k-1) = -sn*m_matU(i,j) + cs*m_matU(i,k-1); @@ -390,7 +390,7 @@ void SVD::compute(const MatrixType& matrix) // Chase zeros. - for (j = k; j < p-1; j++) + for (j = k; j < p-1; ++j) { Scalar t = hypot(f,g); Scalar cs = f/t; @@ -403,7 +403,7 @@ void SVD::compute(const MatrixType& matrix) m_sigma[j+1] = cs*m_sigma[j+1]; if (wantv) { - for (i = 0; i < n; i++) + for (i = 0; i < n; ++i) { t = cs*m_matV(i,j) + sn*m_matV(i,j+1); m_matV(i,j+1) = -sn*m_matV(i,j) + cs*m_matV(i,j+1); @@ -420,7 +420,7 @@ void SVD::compute(const MatrixType& matrix) e[j+1] = cs*e[j+1]; if (wantu && (j < m-1)) { - for (i = 0; i < m; i++) + for (i = 0; i < m; ++i) { t = cs*m_matU(i,j) + sn*m_matU(i,j+1); m_matU(i,j+1) = -sn*m_matU(i,j) + cs*m_matU(i,j+1); @@ -456,7 +456,7 @@ void SVD::compute(const MatrixType& matrix) m_matV.col(k).swap(m_matV.col(k+1)); if (wantu && (k < m-1)) m_matU.col(k).swap(m_matU.col(k+1)); - k++; + ++k; } iter = 0; p--; @@ -473,12 +473,12 @@ SVD& SVD::sort() int mv = m_matV.rows(); int n = m_matU.cols(); - for (int i=0; i p) { @@ -520,7 +520,7 @@ bool SVD::solve(const MatrixBase &b, ResultType* resul { Matrix aux = m_matU.transpose() * b.col(j); - for (int i = 0; i ::coeffRef(int i) // this is the first element m_llStart = 0; m_llCurrent = 0; - m_llSize++; + ++m_llSize; llElements[0].value = Scalar(0); llElements[0].index = i; llElements[0].next = -1; @@ -216,7 +216,7 @@ Scalar& AmbiVector::coeffRef(int i) el.index = i; el.next = m_llStart; m_llStart = m_llSize; - m_llSize++; + ++m_llSize; m_llCurrent = m_llStart; return el.value; } @@ -246,7 +246,7 @@ Scalar& AmbiVector::coeffRef(int i) el.index = i; el.next = llElements[m_llCurrent].next; llElements[m_llCurrent].next = m_llSize; - m_llSize++; + ++m_llSize; return el.value; } } @@ -332,7 +332,7 @@ class AmbiVector<_Scalar>::Iterator if (m_isDense) { do { - m_cachedIndex++; + ++m_cachedIndex; } while (m_cachedIndex> 1; } KeyType ik = (1<<(OuterPacketBits+m_keyBitsOffset)); @@ -183,7 +183,7 @@ class RandomSetter for (typename HashMapType::iterator it = m_hashmaps[k].begin(); it!=end; ++it) { const int outer = it->first & keyBitsMask; - positions[outer]++; + ++positions[outer]; } } // prefix sum diff --git a/Eigen/src/Sparse/SparseLDLT.h b/Eigen/src/Sparse/SparseLDLT.h index 66e31aa64..b29148864 100644 --- a/Eigen/src/Sparse/SparseLDLT.h +++ b/Eigen/src/Sparse/SparseLDLT.h @@ -203,10 +203,10 @@ void SparseLDLT::_symbolic(const MatrixType& a) if (P) { /* If P is present then compute Pinv, the inverse of P */ - for (int k = 0; k < size; k++) + for (int k = 0; k < size; ++k) Pinv[P[k]] = k; } - for (int k = 0; k < size; k++) + for (int k = 0; k < size; ++k) { /* L(k,:) pattern: all nodes reachable in etree from nz in A(0:k-1,k) */ m_parent[k] = -1; /* parent of k is not yet known */ @@ -214,7 +214,7 @@ void SparseLDLT::_symbolic(const MatrixType& a) m_nonZerosPerCol[k] = 0; /* count of nonzeros in column k of L */ int kk = P ? P[k] : k; /* kth original, or permuted, column */ int p2 = Ap[kk+1]; - for (int p = Ap[kk]; p < p2; p++) + for (int p = Ap[kk]; p < p2; ++p) { /* A (i,k) is nonzero (original or permuted A) */ int i = Pinv ? Pinv[Ai[p]] : Ai[p]; @@ -226,7 +226,7 @@ void SparseLDLT::_symbolic(const MatrixType& a) /* find parent of i if not yet determined */ if (m_parent[i] == -1) m_parent[i] = k; - m_nonZerosPerCol[i]++; /* L (k,i) is nonzero */ + ++m_nonZerosPerCol[i]; /* L (k,i) is nonzero */ tags[i] = k; /* mark i as visited */ } } @@ -234,7 +234,7 @@ void SparseLDLT::_symbolic(const MatrixType& a) } /* construct Lp index array from m_nonZerosPerCol column counts */ Lp[0] = 0; - for (int k = 0; k < size; k++) + for (int k = 0; k < size; ++k) Lp[k+1] = Lp[k] + m_nonZerosPerCol[k]; m_matrix.resizeNonZeros(Lp[size]); @@ -265,7 +265,7 @@ bool SparseLDLT::_numeric(const MatrixType& a) const int* Pinv = 0; bool ok = true; - for (int k = 0; k < size; k++) + for (int k = 0; k < size; ++k) { /* compute nonzero pattern of kth row of L, in topological order */ y[k] = 0.0; /* Y(0:k) is now all zero */ @@ -274,7 +274,7 @@ bool SparseLDLT::_numeric(const MatrixType& a) m_nonZerosPerCol[k] = 0; /* count of nonzeros in column k of L */ int kk = (P) ? (P[k]) : (k); /* kth original, or permuted, column */ int p2 = Ap[kk+1]; - for (int p = Ap[kk]; p < p2; p++) + for (int p = Ap[kk]; p < p2; ++p) { int i = Pinv ? Pinv[Ai[p]] : Ai[p]; /* get A(i,k) */ if (i <= k) @@ -293,20 +293,20 @@ bool SparseLDLT::_numeric(const MatrixType& a) /* compute numerical values kth row of L (a sparse triangular solve) */ m_diag[k] = y[k]; /* get D(k,k) and clear Y(k) */ y[k] = 0.0; - for (; top < size; top++) + for (; top < size; ++top) { int i = pattern[top]; /* pattern[top:n-1] is pattern of L(:,k) */ Scalar yi = y[i]; /* get and clear Y(i) */ y[i] = 0.0; int p2 = Lp[i] + m_nonZerosPerCol[i]; int p; - for (p = Lp[i]; p < p2; p++) + for (p = Lp[i]; p < p2; ++p) y[Li[p]] -= Lx[p] * yi; Scalar l_ki = yi / m_diag[i]; /* the nonzero entry L(k,i) */ m_diag[k] -= l_ki * yi; Li[p] = k; /* store L(k,i) in column form of L */ Lx[p] = l_ki; - m_nonZerosPerCol[i]++; /* increment count of nonzeros in col i */ + ++m_nonZerosPerCol[i]; /* increment count of nonzeros in col i */ } if (m_diag[k] == 0.0) { diff --git a/Eigen/src/Sparse/SparseMatrix.h b/Eigen/src/Sparse/SparseMatrix.h index 94407259f..8abe96e6d 100644 --- a/Eigen/src/Sparse/SparseMatrix.h +++ b/Eigen/src/Sparse/SparseMatrix.h @@ -163,7 +163,7 @@ class SparseMatrix } assert(m_outerIndex[outer+1] == m_data.size()); int id = m_outerIndex[outer+1]; - m_outerIndex[outer+1]++; + ++m_outerIndex[outer+1]; m_data.append(0, inner); return m_data.value(id); @@ -192,7 +192,7 @@ class SparseMatrix assert(m_outerIndex[outer+1] == m_data.size() && "invalid outer index"); int startId = m_outerIndex[outer]; int id = m_outerIndex[outer+1]-1; - m_outerIndex[outer+1]++; + ++m_outerIndex[outer+1]; m_data.resize(id+2); while ( (id >= startId) && (m_data.index(id) > inner) ) @@ -212,7 +212,7 @@ class SparseMatrix // find the last filled column while (i>=0 && m_outerIndex[i]==0) --i; - i++; + ++i; while (i<=m_outerSize) { m_outerIndex[i] = size; @@ -299,7 +299,7 @@ class SparseMatrix // FIXME the above copy could be merged with that pass for (int j=0; j