aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--bench/BenchSparseUtil.h20
-rw-r--r--bench/sparse_cholesky.cpp215
-rw-r--r--bench/sparse_lu.cpp112
-rw-r--r--bench/sparse_product.cpp50
-rw-r--r--bench/sparse_transpose.cpp104
-rw-r--r--bench/sparse_trisolver.cpp42
6 files changed, 525 insertions, 18 deletions
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h
index 35c9a5263..26a2f47f2 100644
--- a/bench/BenchSparseUtil.h
+++ b/bench/BenchSparseUtil.h
@@ -72,3 +72,23 @@ void eiToMtl(const EigenSparseMatrix& src, MtlSparse& dst)
ins[it.index()][j] = it.value();
}
#endif
+
+#ifdef CSPARSE
+extern "C" {
+#include "cs.h"
+}
+void eiToCSparse(const EigenSparseMatrix& src, cs* &dst)
+{
+ cs* aux = cs_spalloc (0, 0, 1, 1, 1);
+ for (int j=0; j<src.cols(); ++j)
+ for (EigenSparseMatrix::InnerIterator it(src.derived(), j); it; ++it)
+ if (!cs_entry(aux, it.index(), j, it.value()))
+ {
+ std::cout << "cs_entry error\n";
+ exit(2);
+ }
+ dst = cs_compress(aux);
+// cs_spfree(aux);
+}
+
+#endif
diff --git a/bench/sparse_cholesky.cpp b/bench/sparse_cholesky.cpp
new file mode 100644
index 000000000..d1d29c152
--- /dev/null
+++ b/bench/sparse_cholesky.cpp
@@ -0,0 +1,215 @@
+#define EIGEN_TAUCS_SUPPORT
+#define EIGEN_CHOLMOD_SUPPORT
+#include <Eigen/Sparse>
+
+// g++ -DSIZE=10000 -DDENSITY=0.001 sparse_cholesky.cpp -I.. -DDENSEMATRI -O3 -g0 -DNDEBUG -DNBTRIES=1 -I /home/gael/Coding/LinearAlgebra/taucs_full/src/ -I/home/gael/Coding/LinearAlgebra/taucs_full/build/linux/ -L/home/gael/Coding/LinearAlgebra/taucs_full/lib/linux/ -ltaucs /home/gael/Coding/LinearAlgebra/GotoBLAS/libgoto.a -lpthread -I /home/gael/Coding/LinearAlgebra/SuiteSparse/CHOLMOD/Include/ $CHOLLIB -I /home/gael/Coding/LinearAlgebra/SuiteSparse/UFconfig/ /home/gael/Coding/LinearAlgebra/SuiteSparse/CCOLAMD/Lib/libccolamd.a /home/gael/Coding/LinearAlgebra/SuiteSparse/CHOLMOD/Lib/libcholmod.a -lmetis /home/gael/Coding/LinearAlgebra/SuiteSparse/AMD/Lib/libamd.a /home/gael/Coding/LinearAlgebra/SuiteSparse/CAMD/Lib/libcamd.a /home/gael/Coding/LinearAlgebra/SuiteSparse/CCOLAMD/Lib/libccolamd.a /home/gael/Coding/LinearAlgebra/SuiteSparse/COLAMD/Lib/libcolamd.a -llapack && ./a.out
+
+#define NOGMM
+#define NOMTL
+
+#ifndef SIZE
+#define SIZE 10
+#endif
+
+#ifndef DENSITY
+#define DENSITY 0.01
+#endif
+
+#ifndef REPEAT
+#define REPEAT 1
+#endif
+
+#include "BenchSparseUtil.h"
+
+#ifndef MINDENSITY
+#define MINDENSITY 0.0004
+#endif
+
+#ifndef NBTRIES
+#define NBTRIES 10
+#endif
+
+#define BENCH(X) \
+ timer.reset(); \
+ for (int _j=0; _j<NBTRIES; ++_j) { \
+ timer.start(); \
+ for (int _k=0; _k<REPEAT; ++_k) { \
+ X \
+ } timer.stop(); }
+
+// typedef SparseMatrix<Scalar,Upper> EigenSparseTriMatrix;
+typedef SparseMatrix<Scalar,SelfAdjoint|Lower> EigenSparseSelfAdjointMatrix;
+
+void fillSpdMatrix(float density, int rows, int cols, EigenSparseSelfAdjointMatrix& dst)
+{
+ dst.startFill(rows*cols*density);
+ for(int j = 0; j < cols; j++)
+ {
+ dst.fill(j,j) = ei_random<Scalar>(10,20);
+ for(int i = j+1; i < rows; i++)
+ {
+ Scalar v = (ei_random<float>(0,1) < density) ? ei_random<Scalar>() : 0;
+ if (v!=0)
+ dst.fill(i,j) = v;
+ }
+
+ }
+ dst.endFill();
+}
+
+#include <Eigen/Cholesky>
+
+template<int Backend>
+void doEigen(const char* name, const EigenSparseSelfAdjointMatrix& sm1, int flags = 0)
+{
+ std::cout << name << "..." << std::flush;
+ BenchTimer timer;
+ timer.start();
+ SparseLLT<EigenSparseSelfAdjointMatrix,Backend> chol(sm1, flags);
+ timer.stop();
+ std::cout << ":\t" << timer.value() << endl;
+
+ std::cout << " nnz: " << sm1.nonZeros() << " => " << chol.matrixL().nonZeros() << "\n";
+ //std::cout << "sparse\n" << chol.matrixL() << "%\n";
+}
+
+int main(int argc, char *argv[])
+{
+ int rows = SIZE;
+ int cols = SIZE;
+ float density = DENSITY;
+ BenchTimer timer;
+
+ VectorXf b = VectorXf::Random(cols);
+ VectorXf x = VectorXf::Random(cols);
+
+ bool densedone = false;
+
+ //for (float density = DENSITY; density>=MINDENSITY; density*=0.5)
+// float density = 0.5;
+ {
+ EigenSparseSelfAdjointMatrix sm1(rows, cols);
+ std::cout << "Generate sparse matrix (might take a while)...\n";
+ fillSpdMatrix(density, rows, cols, sm1);
+ std::cout << "DONE\n\n";
+
+ // dense matrices
+ #ifdef DENSEMATRIX
+ if (!densedone)
+ {
+ densedone = true;
+ std::cout << "Eigen Dense\t" << density*100 << "%\n";
+ DenseMatrix m1(rows,cols);
+ eiToDense(sm1, m1);
+ m1 = (m1 + m1.transpose()).eval();
+ m1.diagonal() *= 0.5;
+
+// BENCH(LLT<DenseMatrix> chol(m1);)
+// std::cout << "dense:\t" << timer.value() << endl;
+
+ BenchTimer timer;
+ timer.start();
+ LLT<DenseMatrix> chol(m1);
+ timer.stop();
+ std::cout << "dense:\t" << timer.value() << endl;
+ int count = 0;
+ for (int j=0; j<cols; ++j)
+ for (int i=j; i<rows; ++i)
+ if (!ei_isMuchSmallerThan(ei_abs(chol.matrixL()(i,j)), 0.1))
+ count++;
+ std::cout << "dense: " << "nnz = " << count << "\n";
+ std::cout << "dense:\n" << m1 << "\n\n" << chol.matrixL() << endl;
+ }
+ #endif
+
+ // eigen sparse matrices
+ doEigen<Eigen::DefaultBackend>("Eigen/Sparse", sm1, Eigen::IncompleteFactorization);
+
+ #ifdef EIGEN_CHOLMOD_SUPPORT
+ doEigen<Eigen::Cholmod>("Eigen/Cholmod", sm1, Eigen::IncompleteFactorization);
+ #endif
+
+ #ifdef EIGEN_TAUCS_SUPPORT
+ doEigen<Eigen::Taucs>("Eigen/Taucs", sm1, Eigen::IncompleteFactorization);
+ #endif
+
+ #if 0
+ // TAUCS
+ {
+ taucs_ccs_matrix A = sm1.asTaucsMatrix();
+
+ //BENCH(taucs_ccs_matrix* chol = taucs_ccs_factor_llt(&A, 0, 0);)
+// BENCH(taucs_supernodal_factor_to_ccs(taucs_ccs_factor_llt_ll(&A));)
+// std::cout << "taucs:\t" << timer.value() << endl;
+
+ taucs_ccs_matrix* chol = taucs_ccs_factor_llt(&A, 0, 0);
+
+ for (int j=0; j<cols; ++j)
+ {
+ for (int i=chol->colptr[j]; i<chol->colptr[j+1]; ++i)
+ std::cout << chol->values.d[i] << " ";
+ }
+ }
+
+ // CHOLMOD
+ #ifdef EIGEN_CHOLMOD_SUPPORT
+ {
+ cholmod_common c;
+ cholmod_start (&c);
+ cholmod_sparse A;
+ cholmod_factor *L;
+
+ A = sm1.asCholmodMatrix();
+ BenchTimer timer;
+// timer.reset();
+ timer.start();
+ std::vector<int> perm(cols);
+// std::vector<int> set(ncols);
+ for (int i=0; i<cols; ++i)
+ perm[i] = i;
+// c.nmethods = 1;
+// c.method[0] = 1;
+
+ c.nmethods = 1;
+ c.method [0].ordering = CHOLMOD_NATURAL;
+ c.postorder = 0;
+ c.final_ll = 1;
+
+ L = cholmod_analyze_p(&A, &perm[0], &perm[0], cols, &c);
+ timer.stop();
+ std::cout << "cholmod/analyze:\t" << timer.value() << endl;
+ timer.reset();
+ timer.start();
+ cholmod_factorize(&A, L, &c);
+ timer.stop();
+ std::cout << "cholmod/factorize:\t" << timer.value() << endl;
+
+ cholmod_sparse* cholmat = cholmod_factor_to_sparse(L, &c);
+
+ cholmod_print_factor(L, "Factors", &c);
+
+ cholmod_print_sparse(cholmat, "Chol", &c);
+ cholmod_write_sparse(stdout, cholmat, 0, 0, &c);
+//
+// cholmod_print_sparse(&A, "A", &c);
+// cholmod_write_sparse(stdout, &A, 0, 0, &c);
+
+
+// for (int j=0; j<cols; ++j)
+// {
+// for (int i=chol->colptr[j]; i<chol->colptr[j+1]; ++i)
+// std::cout << chol->values.s[i] << " ";
+// }
+ }
+ #endif
+
+ #endif
+
+
+
+ }
+
+
+ return 0;
+}
+
diff --git a/bench/sparse_lu.cpp b/bench/sparse_lu.cpp
new file mode 100644
index 000000000..35e7e8a3a
--- /dev/null
+++ b/bench/sparse_lu.cpp
@@ -0,0 +1,112 @@
+
+// g++ -I.. sparse_lu.cpp -O3 -g0 -I /usr/include/superlu/ -lsuperlu -lgfortran -DSIZE=1000 -DDENSITY=.05 && ./a.out
+
+// #define EIGEN_TAUCS_SUPPORT
+// #define EIGEN_CHOLMOD_SUPPORT
+#define EIGEN_SUPERLU_SUPPORT
+#include <Eigen/Sparse>
+
+#define NOGMM
+#define NOMTL
+
+#ifndef SIZE
+#define SIZE 10
+#endif
+
+#ifndef DENSITY
+#define DENSITY 0.01
+#endif
+
+#ifndef REPEAT
+#define REPEAT 1
+#endif
+
+#include "BenchSparseUtil.h"
+
+#ifndef MINDENSITY
+#define MINDENSITY 0.0004
+#endif
+
+#ifndef NBTRIES
+#define NBTRIES 10
+#endif
+
+#define BENCH(X) \
+ timer.reset(); \
+ for (int _j=0; _j<NBTRIES; ++_j) { \
+ timer.start(); \
+ for (int _k=0; _k<REPEAT; ++_k) { \
+ X \
+ } timer.stop(); }
+
+typedef Matrix<Scalar,Dynamic,1> VectorX;
+
+#include <Eigen/LU>
+
+int main(int argc, char *argv[])
+{
+ int rows = SIZE;
+ int cols = SIZE;
+ float density = DENSITY;
+ BenchTimer timer;
+
+ VectorX b = VectorX::Random(cols);
+ VectorX x = VectorX::Random(cols);
+
+ bool densedone = false;
+
+ //for (float density = DENSITY; density>=MINDENSITY; density*=0.5)
+// float density = 0.5;
+ {
+ EigenSparseMatrix sm1(rows, cols);
+ fillMatrix(density, rows, cols, sm1);
+
+ // dense matrices
+ #ifdef DENSEMATRIX
+ if (!densedone)
+ {
+ densedone = true;
+ std::cout << "Eigen Dense\t" << density*100 << "%\n";
+ DenseMatrix m1(rows,cols);
+ eiToDense(sm1, m1);
+
+ BenchTimer timer;
+ timer.start();
+ LU<DenseMatrix> lu(m1);
+ timer.stop();
+ std::cout << "Eigen/dense:\t" << timer.value() << endl;
+
+ timer.reset();
+ timer.start();
+ lu.solve(b,&x);
+ timer.stop();
+ std::cout << " solve:\t" << timer.value() << endl;
+// std::cout << b.transpose() << "\n";
+ std::cout << x.transpose() << "\n";
+ }
+ #endif
+
+ // eigen sparse matrices
+ {
+ x.setZero();
+ BenchTimer timer;
+ timer.start();
+ SparseLU<EigenSparseMatrix,SuperLU> lu(sm1);
+ timer.stop();
+ std::cout << "Eigen/SuperLU:\t" << timer.value() << endl;
+
+ timer.reset();
+ timer.start();
+ lu.solve(b,&x);
+ timer.stop();
+ std::cout << " solve:\t" << timer.value() << endl;
+
+ std::cout << x.transpose() << "\n";
+
+ }
+
+ }
+
+ return 0;
+}
+
diff --git a/bench/sparse_product.cpp b/bench/sparse_product.cpp
index edeb08c5d..dfb4b4335 100644
--- a/bench/sparse_product.cpp
+++ b/bench/sparse_product.cpp
@@ -1,8 +1,8 @@
//g++ -O3 -g0 -DNDEBUG sparse_product.cpp -I.. -I/home/gael/Coding/LinearAlgebra/mtl4/ -DDENSITY=0.005 -DSIZE=10000 && ./a.out
//g++ -O3 -g0 -DNDEBUG sparse_product.cpp -I.. -I/home/gael/Coding/LinearAlgebra/mtl4/ -DDENSITY=0.05 -DSIZE=2000 && ./a.out
-// -DNOGMM -DNOMTL
-
+// -DNOGMM -DNOMTL -DCSPARSE
+// -I /home/gael/Coding/LinearAlgebra/CSparse/Include/ /home/gael/Coding/LinearAlgebra/CSparse/Lib/libcsparse.a
#ifndef SIZE
#define SIZE 10000
#endif
@@ -33,6 +33,22 @@
X \
} timer.stop(); }
+
+#ifdef CSPARSE
+cs* cs_sorted_multiply(const cs* a, const cs* b)
+{
+ cs* A = cs_transpose (a, 1) ;
+ cs* B = cs_transpose (b, 1) ;
+ cs* D = cs_multiply (B,A) ; /* D = B'*A' */
+ cs_spfree (A) ;
+ cs_spfree (B) ;
+ cs_dropzeros (D) ; /* drop zeros from D */
+ cs* C = cs_transpose (D, 1) ; /* C = D', so that C is sorted */
+ cs_spfree (D) ;
+ return C;
+}
+#endif
+
int main(int argc, char *argv[])
{
int rows = SIZE;
@@ -87,13 +103,15 @@ int main(int argc, char *argv[])
// eigen sparse matrices
{
- std::cout << "Eigen sparse\t" << density*100 << "%\n";
+ std::cout << "Eigen sparse\t" << sm1.nonZeros()/float(sm1.rows()*sm1.cols())*100 << "% * "
+ << sm2.nonZeros()/float(sm2.rows()*sm2.cols())*100 << "%\n";
// timer.reset();
// timer.start();
BENCH(for (int k=0; k<REPEAT; ++k) sm3 = sm1 * sm2;)
// timer.stop();
std::cout << " a * b:\t" << timer.value() << endl;
+// std::cout << sm3 << "\n";
timer.reset();
timer.start();
@@ -120,6 +138,32 @@ int main(int argc, char *argv[])
std::cout << " a * b' :\t" << timer.value() << endl;
}
+ // CSparse
+ #ifdef CSPARSE
+ {
+ std::cout << "CSparse \t" << density*100 << "%\n";
+ cs *m1, *m2, *m3;
+ eiToCSparse(sm1, m1);
+ eiToCSparse(sm2, m2);
+
+ timer.reset();
+ timer.start();
+ for (int k=0; k<REPEAT; ++k)
+ {
+ m3 = cs_sorted_multiply(m1, m2);
+ if (!m3)
+ {
+ std::cerr << "cs_multiply failed\n";
+// break;
+ }
+// cs_print(m3, 0);
+ cs_spfree(m3);
+ }
+ timer.stop();
+ std::cout << " a * b:\t" << timer.value() << endl;
+ }
+ #endif
+
// GMM++
#ifndef NOGMM
{
diff --git a/bench/sparse_transpose.cpp b/bench/sparse_transpose.cpp
new file mode 100644
index 000000000..c9aacf5f1
--- /dev/null
+++ b/bench/sparse_transpose.cpp
@@ -0,0 +1,104 @@
+
+//g++ -O3 -g0 -DNDEBUG sparse_transpose.cpp -I.. -I/home/gael/Coding/LinearAlgebra/mtl4/ -DDENSITY=0.005 -DSIZE=10000 && ./a.out
+// -DNOGMM -DNOMTL
+// -DCSPARSE -I /home/gael/Coding/LinearAlgebra/CSparse/Include/ /home/gael/Coding/LinearAlgebra/CSparse/Lib/libcsparse.a
+
+#ifndef SIZE
+#define SIZE 10000
+#endif
+
+#ifndef DENSITY
+#define DENSITY 0.01
+#endif
+
+#ifndef REPEAT
+#define REPEAT 1
+#endif
+
+#include "BenchSparseUtil.h"
+
+#ifndef MINDENSITY
+#define MINDENSITY 0.0004
+#endif
+
+#ifndef NBTRIES
+#define NBTRIES 10
+#endif
+
+#define BENCH(X) \
+ timer.reset(); \
+ for (int _j=0; _j<NBTRIES; ++_j) { \
+ timer.start(); \
+ for (int _k=0; _k<REPEAT; ++_k) { \
+ X \
+ } timer.stop(); }
+
+int main(int argc, char *argv[])
+{
+ int rows = SIZE;
+ int cols = SIZE;
+ float density = DENSITY;
+
+ EigenSparseMatrix sm1(rows,cols), sm3(rows,cols);
+
+ BenchTimer timer;
+ for (float density = DENSITY; density>=MINDENSITY; density*=0.5)
+ {
+ fillMatrix(density, rows, cols, sm1);
+
+ // dense matrices
+ #ifdef DENSEMATRIX
+ {
+ DenseMatrix m1(rows,cols), m3(rows,cols);
+ eiToDense(sm1, m1);
+ BENCH(for (int k=0; k<REPEAT; ++k) m3 = m1.transpose();)
+ std::cout << " Eigen dense:\t" << timer.value() << endl;
+ }
+ #endif
+
+ std::cout << "Non zeros: " << sm1.nonZeros()/float(sm1.rows()*sm1.cols())*100 << "%\n";
+
+ // eigen sparse matrices
+ {
+ BENCH(for (int k=0; k<REPEAT; ++k) sm3 = sm1.transpose();)
+ std::cout << " Eigen:\t" << timer.value() << endl;
+ }
+
+ // CSparse
+ #ifdef CSPARSE
+ {
+ cs *m1, *m3;
+ eiToCSparse(sm1, m1);
+
+ BENCH(for (int k=0; k<REPEAT; ++k) { m3 = cs_transpose(m1,1); cs_spfree(m3);})
+ std::cout << " CSparse:\t" << timer.value() << endl;
+ }
+ #endif
+
+ // GMM++
+ #ifndef NOGMM
+ {
+ GmmDynSparse gmmT3(rows,cols);
+ GmmSparse m1(rows,cols), m3(rows,cols);
+ eiToGmm(sm1, m1);
+ BENCH(for (int k=0; k<REPEAT; ++k) gmm::copy(gmm::transposed(m1),m3);)
+ std::cout << " GMM:\t\t" << timer.value() << endl;
+ }
+ #endif
+
+ // MTL4
+ #ifndef NOMTL
+ {
+ MtlSparse m1(rows,cols), m3(rows,cols);
+ eiToMtl(sm1, m1);
+ BENCH(for (int k=0; k<REPEAT; ++k) m3 = trans(m1);)
+ std::cout << " MTL4:\t\t" << timer.value() << endl;
+ }
+ #endif
+
+ std::cout << "\n\n";
+ }
+
+ return 0;
+}
+
diff --git a/bench/sparse_trisolver.cpp b/bench/sparse_trisolver.cpp
index c6b29e88d..021433043 100644
--- a/bench/sparse_trisolver.cpp
+++ b/bench/sparse_trisolver.cpp
@@ -2,6 +2,7 @@
//g++ -O3 -g0 -DNDEBUG sparse_product.cpp -I.. -I/home/gael/Coding/LinearAlgebra/mtl4/ -DDENSITY=0.005 -DSIZE=10000 && ./a.out
//g++ -O3 -g0 -DNDEBUG sparse_product.cpp -I.. -I/home/gael/Coding/LinearAlgebra/mtl4/ -DDENSITY=0.05 -DSIZE=2000 && ./a.out
// -DNOGMM -DNOMTL
+// -I /home/gael/Coding/LinearAlgebra/CSparse/Include/ /home/gael/Coding/LinearAlgebra/CSparse/Lib/libcsparse.a
#ifndef SIZE
#define SIZE 10000
@@ -60,8 +61,9 @@ int main(int argc, char *argv[])
BenchTimer timer;
#if 1
EigenSparseTriMatrix sm1(rows,cols);
- VectorXf b = VectorXf::Random(cols);
- VectorXf x = VectorXf::Random(cols);
+ typedef Matrix<Scalar,Dynamic,1> DenseVector;
+ DenseVector b = DenseVector::Random(cols);
+ DenseVector x = DenseVector::Random(cols);
bool densedone = false;
@@ -81,13 +83,13 @@ int main(int argc, char *argv[])
eiToDense(sm1, m1);
m2 = m1;
- BENCH(x = m1.marked<Upper>().inverseProduct(b);)
+ BENCH(x = m1.marked<Upper>().solveTriangular(b);)
std::cout << " colmajor^-1 * b:\t" << timer.value() << endl;
- std::cerr << x.transpose() << "\n";
+// std::cerr << x.transpose() << "\n";
- BENCH(x = m2.marked<Upper>().inverseProduct(b);)
+ BENCH(x = m2.marked<Upper>().solveTriangular(b);)
std::cout << " rowmajor^-1 * b:\t" << timer.value() << endl;
- std::cerr << x.transpose() << "\n";
+// std::cerr << x.transpose() << "\n";
}
#endif
@@ -96,13 +98,13 @@ int main(int argc, char *argv[])
std::cout << "Eigen sparse\t" << density*100 << "%\n";
EigenSparseTriMatrixRow sm2 = sm1;
- BENCH(x = sm1.inverseProduct(b);)
+ BENCH(x = sm1.solveTriangular(b);)
std::cout << " colmajor^-1 * b:\t" << timer.value() << endl;
- std::cerr << x.transpose() << "\n";
+// std::cerr << x.transpose() << "\n";
- BENCH(x = sm2.inverseProduct(b);)
+ BENCH(x = sm2.solveTriangular(b);)
std::cout << " rowmajor^-1 * b:\t" << timer.value() << endl;
- std::cerr << x.transpose() << "\n";
+// std::cerr << x.transpose() << "\n";
// x = b;
// BENCH(sm1.inverseProductInPlace(x);)
@@ -115,6 +117,18 @@ int main(int argc, char *argv[])
// std::cerr << x.transpose() << "\n";
}
+ // CSparse
+ #ifdef CSPARSE
+ {
+ std::cout << "CSparse \t" << density*100 << "%\n";
+ cs *m1;
+ eiToCSparse(sm1, m1);
+
+ BENCH(x = b; if (!cs_lsolve (m1, x.data())){std::cerr << "cs_lsolve failed\n"; break;}; )
+ std::cout << " colmajor^-1 * b:\t" << timer.value() << endl;
+ }
+ #endif
+
// GMM++
#ifndef NOGMM
{
@@ -130,13 +144,13 @@ int main(int argc, char *argv[])
gmmX = gmmB;
BENCH(gmm::upper_tri_solve(m1, gmmX, false);)
std::cout << " colmajor^-1 * b:\t" << timer.value() << endl;
- std::cerr << Map<Matrix<Scalar,Dynamic,1> >(&gmmX[0], cols).transpose() << "\n";
+// std::cerr << Map<Matrix<Scalar,Dynamic,1> >(&gmmX[0], cols).transpose() << "\n";
gmmX = gmmB;
BENCH(gmm::upper_tri_solve(m2, gmmX, false);)
timer.stop();
std::cout << " rowmajor^-1 * b:\t" << timer.value() << endl;
- std::cerr << Map<Matrix<Scalar,Dynamic,1> >(&gmmX[0], cols).transpose() << "\n";
+// std::cerr << Map<Matrix<Scalar,Dynamic,1> >(&gmmX[0], cols).transpose() << "\n";
}
#endif
@@ -162,7 +176,7 @@ int main(int argc, char *argv[])
#endif
-
+ std::cout << "\n\n";
}
#endif
@@ -199,8 +213,6 @@ int main(int argc, char *argv[])
}
#endif
- std::cout << "\n\n";
-
return 0;
}