// This file is part of Eigen, a lightweight C++ template library // for linear algebra. Eigen itself is part of the KDE project. // // Copyright (C) 2008 Gael Guennebaud // // Eigen is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation; either // version 3 of the License, or (at your option) any later version. // // Alternatively, you can redistribute it and/or // modify it under the terms of the GNU General Public License as // published by the Free Software Foundation; either version 2 of // the License, or (at your option) any later version. // // Eigen is distributed in the hope that it will be useful, but WITHOUT ANY // WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS // FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the // GNU General Public License for more details. // // You should have received a copy of the GNU Lesser General Public // License and a copy of the GNU General Public License along with // Eigen. If not, see . #ifndef EIGEN_SPARSEPRODUCT_H #define EIGEN_SPARSEPRODUCT_H // sparse product return type specialization template struct ProductReturnType { typedef typename ei_traits::Scalar Scalar; enum { LhsRowMajor = ei_traits::Flags & RowMajorBit, RhsRowMajor = ei_traits::Flags & RowMajorBit, TransposeRhs = (!LhsRowMajor) && RhsRowMajor, TransposeLhs = LhsRowMajor && (!RhsRowMajor) }; // FIXME if we transpose let's evaluate to a LinkedVectorMatrix since it is the // type of the temporary to perform the transpose op typedef typename ei_meta_if, typename ei_nested::type>::ret LhsNested; typedef typename ei_meta_if, typename ei_nested::type>::ret RhsNested; typedef Product::type, typename ei_unconst::type, SparseProduct> Type; }; template struct ei_traits > { // clean the nested types: typedef typename ei_unconst::type>::type _LhsNested; typedef typename ei_unconst::type>::type _RhsNested; typedef typename _LhsNested::Scalar Scalar; enum { LhsCoeffReadCost = _LhsNested::CoeffReadCost, RhsCoeffReadCost = _RhsNested::CoeffReadCost, LhsFlags = _LhsNested::Flags, RhsFlags = _RhsNested::Flags, RowsAtCompileTime = _LhsNested::RowsAtCompileTime, ColsAtCompileTime = _RhsNested::ColsAtCompileTime, InnerSize = EIGEN_ENUM_MIN(_LhsNested::ColsAtCompileTime, _RhsNested::RowsAtCompileTime), MaxRowsAtCompileTime = _LhsNested::MaxRowsAtCompileTime, MaxColsAtCompileTime = _RhsNested::MaxColsAtCompileTime, LhsRowMajor = LhsFlags & RowMajorBit, RhsRowMajor = RhsFlags & RowMajorBit, EvalToRowMajor = (RhsFlags & LhsFlags & RowMajorBit), RemovedBits = ~(EvalToRowMajor ? 0 : RowMajorBit), Flags = (int(LhsFlags | RhsFlags) & HereditaryBits & RemovedBits) | EvalBeforeAssigningBit | EvalBeforeNestingBit, CoeffReadCost = Dynamic }; }; template class Product : ei_no_assignment_operator, public MatrixBase > { public: EIGEN_GENERIC_PUBLIC_INTERFACE(Product) private: typedef typename ei_traits::_LhsNested _LhsNested; typedef typename ei_traits::_RhsNested _RhsNested; public: template inline Product(const Lhs& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs) { ei_assert(lhs.cols() == rhs.rows()); } Scalar coeff(int, int) const { ei_assert(false && "eigen internal error"); } Scalar& coeffRef(int, int) { ei_assert(false && "eigen internal error"); } inline int rows() const { return m_lhs.rows(); } inline int cols() const { return m_rhs.cols(); } const _LhsNested& lhs() const { return m_lhs; } const _LhsNested& rhs() const { return m_rhs; } protected: const LhsNested m_lhs; const RhsNested m_rhs; }; template::Flags&RowMajorBit, int RhsStorageOrder = ei_traits::Flags&RowMajorBit, int ResStorageOrder = ei_traits::Flags&RowMajorBit> struct ei_sparse_product_selector; template struct ei_sparse_product_selector { typedef typename ei_traits::type>::Scalar Scalar; struct ListEl { int next; int index; Scalar value; }; static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { // make sure to call innerSize/outerSize since we fake the storage order. int rows = lhs.innerSize(); int cols = rhs.outerSize(); int size = lhs.outerSize(); ei_assert(size == rhs.rows()); // allocate a temporary buffer Scalar* buffer = new Scalar[rows]; // estimate the number of non zero entries float ratioLhs = float(lhs.nonZeros())/float(lhs.rows()*lhs.cols()); float avgNnzPerRhsColumn = float(rhs.nonZeros())/float(cols); float ratioRes = std::min(ratioLhs * avgNnzPerRhsColumn, 1.f); res.resize(rows, cols); res.startFill(ratioRes*rows*cols); for (int j=0; j0.1) { // dense path, the scalar * columns products are accumulated into a dense column Scalar* __restrict__ tmp = buffer; // set to zero for (int k=0; k(buffer); // sparse path, the scalar * columns products are accumulated into a linked list int tmp_size = 0; int tmp_start = -1; for (typename Rhs::InnerIterator rhsIt(rhs, j); rhsIt; ++rhsIt) { int tmp_el = tmp_start; for (typename Lhs::InnerIterator lhsIt(lhs, rhsIt.index()); lhsIt; ++lhsIt) { Scalar v = lhsIt.value() * rhsIt.value(); int id = lhsIt.index(); if (tmp_size==0) { tmp_start = 0; tmp_el = 0; tmp_size++; tmp[0].value = v; tmp[0].index = id; tmp[0].next = -1; } else if (id= 0 && tmp[nextel].index<=id) { tmp_el = nextel; nextel = tmp[nextel].next; } if (tmp[tmp_el].index==id) { tmp[tmp_el].value += v; } else { tmp[tmp_size].value = v; tmp[tmp_size].index = id; tmp[tmp_size].next = tmp[tmp_el].next; tmp[tmp_el].next = tmp_size; tmp_size++; } } } } int k = tmp_start; while (k>=0) { if (tmp[k].value!=0) res.fill(tmp[k].index, j) = tmp[k].value; k = tmp[k].next; } } } res.endFill(); } }; template struct ei_sparse_product_selector { typedef SparseMatrix SparseTemporaryType; static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { SparseTemporaryType _res(res.rows(), res.cols()); ei_sparse_product_selector::run(lhs, rhs, _res); res = _res; } }; template struct ei_sparse_product_selector { static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { // let's transpose the product and fake the matrices are column major ei_sparse_product_selector::run(rhs, lhs, res); } }; template struct ei_sparse_product_selector { typedef SparseMatrix SparseTemporaryType; static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { // let's transpose the product and fake the matrices are column major ei_sparse_product_selector::run(rhs, lhs, res); } }; // NOTE eventually let's transpose one argument even in this case since it might be expensive if // the result is not dense. // template // struct ei_sparse_product_selector // { // static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) // { // // trivial product as lhs.row/rhs.col dot products // // loop over the preferred order of the result // } // }; // NOTE the 2 others cases (col row *) must never occurs since they are caught // by ProductReturnType which transform it to (col col *) by evaluating rhs. template template inline Derived& MatrixBase::lazyAssign(const Product& product) { // std::cout << "sparse product to dense\n"; ei_sparse_product_selector< typename ei_cleantype::type, typename ei_cleantype::type, typename ei_cleantype::type>::run(product.lhs(),product.rhs(),derived()); return derived(); } template template inline Derived& SparseMatrixBase::operator=(const Product& product) { // std::cout << "sparse product to sparse\n"; ei_sparse_product_selector< typename ei_cleantype::type, typename ei_cleantype::type, Derived>::run(product.lhs(),product.rhs(),derived()); return derived(); } #endif // EIGEN_SPARSEPRODUCT_H