aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h
diff options
context:
space:
mode:
authorGravatar Rasmus Munk Larsen <rmlarsen@google.com>2016-04-14 13:57:35 -0700
committerGravatar Rasmus Munk Larsen <rmlarsen@google.com>2016-04-14 13:57:35 -0700
commit235e83aba608cf3d94b033bfbf551f8c136a3fab (patch)
tree7b011fee8fe18b605320c69e75995cf8521fbdf4 /unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h
parent3551dea887ce60756c28796e83bb7c080f2b2782 (diff)
Eigen cost model part 1. This implements a basic recursive framework to estimate the cost of evaluating tensor expressions.
Diffstat (limited to 'unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h')
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h51
1 files changed, 36 insertions, 15 deletions
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h b/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h
index b6e6db12a..c771496e2 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h
@@ -101,6 +101,9 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
typedef DSizes<Index, NumDims> Dimensions;
typedef typename XprType::Scalar Scalar;
typedef typename TensorEvaluator<ArgType, Device>::Dimensions InputDimensions;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
+ static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
enum {
IsAligned = false,
@@ -140,9 +143,6 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
}
}
- typedef typename XprType::CoeffReturnType CoeffReturnType;
- typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
-
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) {
@@ -247,9 +247,8 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetColMajor(Index index) const
{
- const int packetSize = internal::unpacket_traits<PacketReturnType>::size;
- EIGEN_STATIC_ASSERT(packetSize > 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
- eigen_assert(index+packetSize-1 < dimensions().TotalSize());
+ EIGEN_STATIC_ASSERT(PacketSize > 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
const Index originalIndex = index;
@@ -284,12 +283,12 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
// Todo: this could be extended to the second dimension if we're not
// broadcasting alongside the first dimension, and so on.
- if (innermostLoc + packetSize <= m_impl.dimensions()[0]) {
+ if (innermostLoc + PacketSize <= m_impl.dimensions()[0]) {
return m_impl.template packet<Unaligned>(inputIndex);
} else {
- EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[packetSize];
+ EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
values[0] = m_impl.coeff(inputIndex);
- for (int i = 1; i < packetSize; ++i) {
+ for (int i = 1; i < PacketSize; ++i) {
values[i] = coeffColMajor(originalIndex+i);
}
PacketReturnType rslt = internal::pload<PacketReturnType>(values);
@@ -300,9 +299,8 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetRowMajor(Index index) const
{
- const int packetSize = internal::unpacket_traits<PacketReturnType>::size;
- EIGEN_STATIC_ASSERT(packetSize > 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
- eigen_assert(index+packetSize-1 < dimensions().TotalSize());
+ EIGEN_STATIC_ASSERT(PacketSize > 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
const Index originalIndex = index;
@@ -337,12 +335,12 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
// Todo: this could be extended to the second dimension if we're not
// broadcasting alongside the first dimension, and so on.
- if (innermostLoc + packetSize <= m_impl.dimensions()[NumDims-1]) {
+ if (innermostLoc + PacketSize <= m_impl.dimensions()[NumDims-1]) {
return m_impl.template packet<Unaligned>(inputIndex);
} else {
- EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[packetSize];
+ EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
values[0] = m_impl.coeff(inputIndex);
- for (int i = 1; i < packetSize; ++i) {
+ for (int i = 1; i < PacketSize; ++i) {
values[i] = coeffRowMajor(originalIndex+i);
}
PacketReturnType rslt = internal::pload<PacketReturnType>(values);
@@ -350,6 +348,29 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
}
}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
+ costPerCoeff(bool vectorized) const {
+ double compute_cost = TensorOpCost::AddCost<Index>();
+ if (NumDims > 0) {
+ for (int i = NumDims - 1; i > 0; --i) {
+ compute_cost += TensorOpCost::DivCost<Index>();
+ if (internal::index_statically_eq<Broadcast>()(i, 1)) {
+ compute_cost +=
+ TensorOpCost::MulCost<Index>() + TensorOpCost::AddCost<Index>();
+ } else {
+ if (!internal::index_statically_eq<InputDimensions>()(i, 1)) {
+ compute_cost += TensorOpCost::MulCost<Index>() +
+ TensorOpCost::ModCost<Index>() +
+ TensorOpCost::AddCost<Index>();
+ }
+ }
+ compute_cost +=
+ TensorOpCost::MulCost<Index>() + TensorOpCost::AddCost<Index>();
+ }
+ }
+ return m_impl.costPerCoeff(vectorized) +
+ TensorOpCost(0, 0, compute_cost, vectorized, PacketSize);
+ }
EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }