1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H
#define EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H
namespace Eigen {
namespace internal {
// perform a pseudo in-place sparse * sparse product assuming all matrices are col major
template<typename Lhs, typename Rhs, typename ResultType>
static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, const typename ResultType::RealScalar& tolerance)
{
// return sparse_sparse_product_with_pruning_impl2(lhs,rhs,res);
typedef typename remove_all<Lhs>::type::Scalar Scalar;
typedef typename remove_all<Lhs>::type::Index Index;
// make sure to call innerSize/outerSize since we fake the storage order.
Index rows = lhs.innerSize();
Index cols = rhs.outerSize();
//int size = lhs.outerSize();
eigen_assert(lhs.outerSize() == rhs.innerSize());
// allocate a temporary buffer
AmbiVector<Scalar,Index> tempVector(rows);
// estimate the number of non zero entries
// given a rhs column containing Y non zeros, we assume that the respective Y columns
// of the lhs differs in average of one non zeros, thus the number of non zeros for
// the product of a rhs column with the lhs is X+Y where X is the average number of non zero
// per column of the lhs.
// Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs)
Index estimated_nnz_prod = lhs.nonZeros() + rhs.nonZeros();
// mimics a resizeByInnerOuter:
if(ResultType::IsRowMajor)
res.resize(cols, rows);
else
res.resize(rows, cols);
res.reserve(estimated_nnz_prod);
double ratioColRes = double(estimated_nnz_prod)/double(lhs.rows()*rhs.cols());
for (Index j=0; j<cols; ++j)
{
// FIXME:
//double ratioColRes = (double(rhs.innerVector(j).nonZeros()) + double(lhs.nonZeros())/double(lhs.cols()))/double(lhs.rows());
// let's do a more accurate determination of the nnz ratio for the current column j of res
tempVector.init(ratioColRes);
tempVector.setZero();
for (typename Rhs::InnerIterator rhsIt(rhs, j); rhsIt; ++rhsIt)
{
// FIXME should be written like this: tmp += rhsIt.value() * lhs.col(rhsIt.index())
tempVector.restart();
Scalar x = rhsIt.value();
for (typename Lhs::InnerIterator lhsIt(lhs, rhsIt.index()); lhsIt; ++lhsIt)
{
tempVector.coeffRef(lhsIt.index()) += lhsIt.value() * x;
}
}
res.startVec(j);
for (typename AmbiVector<Scalar,Index>::Iterator it(tempVector,tolerance); it; ++it)
res.insertBackByOuterInner(j,it.index()) = it.value();
}
res.finalize();
}
template<typename Lhs, typename Rhs, typename ResultType,
int LhsStorageOrder = traits<Lhs>::Flags&RowMajorBit,
int RhsStorageOrder = traits<Rhs>::Flags&RowMajorBit,
int ResStorageOrder = traits<ResultType>::Flags&RowMajorBit>
struct sparse_sparse_product_with_pruning_selector;
template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,ColMajor>
{
typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar;
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
typename remove_all<ResultType>::type _res(res.rows(), res.cols());
internal::sparse_sparse_product_with_pruning_impl<Lhs,Rhs,ResultType>(lhs, rhs, _res, tolerance);
res.swap(_res);
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,RowMajor>
{
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
// we need a col-major matrix to hold the result
typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;
SparseTemporaryType _res(res.rows(), res.cols());
internal::sparse_sparse_product_with_pruning_impl<Lhs,Rhs,SparseTemporaryType>(lhs, rhs, _res, tolerance);
res = _res;
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,RowMajor>
{
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
// let's transpose the product to get a column x column product
typename remove_all<ResultType>::type _res(res.rows(), res.cols());
internal::sparse_sparse_product_with_pruning_impl<Rhs,Lhs,ResultType>(rhs, lhs, _res, tolerance);
res.swap(_res);
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,ColMajor>
{
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
ColMajorMatrix colLhs(lhs);
ColMajorMatrix colRhs(rhs);
internal::sparse_sparse_product_with_pruning_impl<ColMajorMatrix,ColMajorMatrix,ResultType>(colLhs, colRhs, res, tolerance);
// let's transpose the product to get a column x column product
// typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;
// SparseTemporaryType _res(res.cols(), res.rows());
// sparse_sparse_product_with_pruning_impl<Rhs,Lhs,SparseTemporaryType>(rhs, lhs, _res);
// res = _res.transpose();
}
};
// NOTE the 2 others cases (col row *) must never occur since they are caught
// by ProductReturnType which transforms it to (col col *) by evaluating rhs.
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H
|