aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported/Eigen/CXX11/src/Tensor/Tensor.h
diff options
context:
space:
mode:
authorGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2014-04-28 10:32:27 -0700
committerGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2014-04-28 10:32:27 -0700
commitc0f2cb016e60b7dbde1d5946f42234a709a711f9 (patch)
tree346d5beb917ea586a6a463312606cf794c91da75 /unsupported/Eigen/CXX11/src/Tensor/Tensor.h
parent450d0c3de044c9f32fa2f37fee821f6e390df382 (diff)
Extended support for Tensors:
* Added ability to map a region of the memory to a tensor * Added basic support for unary and binary coefficient wise expressions, such as addition or square root * Provided an emulation layer to make it possible to compile the code with compilers (such as nvcc) that don't support cxx11.
Diffstat (limited to 'unsupported/Eigen/CXX11/src/Tensor/Tensor.h')
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/Tensor.h156
1 files changed, 82 insertions, 74 deletions
diff --git a/unsupported/Eigen/CXX11/src/Tensor/Tensor.h b/unsupported/Eigen/CXX11/src/Tensor/Tensor.h
index c6216e14c..7b8f14c6d 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/Tensor.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/Tensor.h
@@ -57,28 +57,16 @@ namespace Eigen {
*
* \ref TopicStorageOrders
*/
-template<typename Scalar_, std::size_t NumIndices_, int Options_ = 0>
-class Tensor;
namespace internal {
-template<typename Scalar_, std::size_t NumIndices_, int Options_>
-struct traits<Tensor<Scalar_, NumIndices_, Options_>>
-{
- typedef Scalar_ Scalar;
- typedef Dense StorageKind;
- typedef DenseIndex Index;
- enum {
- Options = Options_
- };
-};
template<typename Index, std::size_t NumIndices, std::size_t n, bool RowMajor>
struct tensor_index_linearization_helper
{
- constexpr static inline Index run(std::array<Index, NumIndices> const& indices, std::array<Index, NumIndices> const& dimensions)
+ static inline Index run(array<Index, NumIndices> const& indices, array<Index, NumIndices> const& dimensions)
{
- return std_array_get<RowMajor ? n : (NumIndices - n - 1)>(indices) +
- std_array_get<RowMajor ? n : (NumIndices - n - 1)>(dimensions) *
+ return array_get<RowMajor ? n : (NumIndices - n - 1)>(indices) +
+ array_get<RowMajor ? n : (NumIndices - n - 1)>(dimensions) *
tensor_index_linearization_helper<Index, NumIndices, n - 1, RowMajor>::run(indices, dimensions);
}
};
@@ -86,39 +74,40 @@ struct tensor_index_linearization_helper
template<typename Index, std::size_t NumIndices, bool RowMajor>
struct tensor_index_linearization_helper<Index, NumIndices, 0, RowMajor>
{
- constexpr static inline Index run(std::array<Index, NumIndices> const& indices, std::array<Index, NumIndices> const&)
+ static inline Index run(array<Index, NumIndices> const& indices, array<Index, NumIndices> const&)
{
- return std_array_get<RowMajor ? 0 : NumIndices - 1>(indices);
+ return array_get<RowMajor ? 0 : NumIndices - 1>(indices);
}
};
/* Forward-declaration required for the symmetry support. */
template<typename Tensor_, typename Symmetry_, int Flags = 0> class tensor_symmetry_value_setter;
+
} // end namespace internal
template<typename Scalar_, std::size_t NumIndices_, int Options_>
-class Tensor
+class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_> >
{
- static_assert(NumIndices_ >= 1, "A tensor must have at least one index.");
-
public:
typedef Tensor<Scalar_, NumIndices_, Options_> Self;
+ typedef TensorBase<Tensor<Scalar_, NumIndices_, Options_> > Base;
+ typedef typename Eigen::internal::nested<Self>::type Nested;
typedef typename internal::traits<Self>::StorageKind StorageKind;
typedef typename internal::traits<Self>::Index Index;
- typedef typename internal::traits<Self>::Scalar Scalar;
+ typedef Scalar_ Scalar;
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
- typedef Self DenseType;
+ typedef typename Base::CoeffReturnType CoeffReturnType;
- constexpr static int Options = Options_;
- constexpr static std::size_t NumIndices = NumIndices_;
+ static const int Options = Options_;
+ static const std::size_t NumIndices = NumIndices_;
protected:
TensorStorage<Scalar, NumIndices, Dynamic, Options> m_storage;
public:
EIGEN_STRONG_INLINE Index dimension(std::size_t n) const { return m_storage.dimensions()[n]; }
- EIGEN_STRONG_INLINE std::array<Index, NumIndices> dimensions() const { return m_storage.dimensions(); }
+ EIGEN_STRONG_INLINE array<Index, NumIndices> dimensions() const { return m_storage.dimensions(); }
EIGEN_STRONG_INLINE Index size() const { return internal::array_prod(m_storage.dimensions()); }
EIGEN_STRONG_INLINE Scalar *data() { return m_storage.data(); }
EIGEN_STRONG_INLINE const Scalar *data() const { return m_storage.data(); }
@@ -129,29 +118,17 @@ class Tensor
inline Self& base() { return *this; }
inline const Self& base() const { return *this; }
- void setZero()
- {
- // FIXME: until we have implemented packet access and the
- // expression engine w.r.t. nullary ops, use this
- // as a kludge. Only works with POD types, but for
- // any standard usage, this shouldn't be a problem
- memset((void *)data(), 0, size() * sizeof(Scalar));
- }
-
- inline Self& operator=(Self const& other)
- {
- m_storage = other.m_storage;
- return *this;
- }
-
+#ifdef EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes>
inline const Scalar& coeff(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
{
- static_assert(sizeof...(otherIndices) + 2 == NumIndices, "Number of indices used to access a tensor coefficient must be equal to the rank of the tensor.");
- return coeff(std::array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
+ // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
+ EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ return coeff(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
}
+#endif
- inline const Scalar& coeff(const std::array<Index, NumIndices>& indices) const
+ inline const Scalar& coeff(const array<Index, NumIndices>& indices) const
{
eigen_internal_assert(checkIndexRange(indices));
return m_storage.data()[linearizedIndex(indices)];
@@ -163,14 +140,17 @@ class Tensor
return m_storage.data()[index];
}
+#ifdef EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes>
inline Scalar& coeffRef(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
{
- static_assert(sizeof...(otherIndices) + 2 == NumIndices, "Number of indices used to access a tensor coefficient must be equal to the rank of the tensor.");
- return coeffRef(std::array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
+ // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
+ EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ return coeffRef(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
}
+#endif
- inline Scalar& coeffRef(const std::array<Index, NumIndices>& indices)
+ inline Scalar& coeffRef(const array<Index, NumIndices>& indices)
{
eigen_internal_assert(checkIndexRange(indices));
return m_storage.data()[linearizedIndex(indices)];
@@ -182,14 +162,17 @@ class Tensor
return m_storage.data()[index];
}
+#ifdef EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes>
inline const Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
{
- static_assert(sizeof...(otherIndices) + 2 == NumIndices, "Number of indices used to access a tensor coefficient must be equal to the rank of the tensor.");
- return this->operator()(std::array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
+ // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
+ EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ return this->operator()(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
}
+#endif
- inline const Scalar& operator()(const std::array<Index, NumIndices>& indices) const
+ inline const Scalar& operator()(const array<Index, NumIndices>& indices) const
{
eigen_assert(checkIndexRange(indices));
return coeff(indices);
@@ -203,18 +186,22 @@ class Tensor
inline const Scalar& operator[](Index index) const
{
- static_assert(NumIndices == 1, "The bracket operator is only for vectors, use the parenthesis operator instead.");
+ // The bracket operator is only for vectors, use the parenthesis operator instead.
+ EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE);
return coeff(index);
}
+#ifdef EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes>
inline Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
{
- static_assert(sizeof...(otherIndices) + 2 == NumIndices, "Number of indices used to access a tensor coefficient must be equal to the rank of the tensor.");
- return operator()(std::array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
+ // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
+ EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ return operator()(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
}
+#endif
- inline Scalar& operator()(const std::array<Index, NumIndices>& indices)
+ inline Scalar& operator()(const array<Index, NumIndices>& indices)
{
eigen_assert(checkIndexRange(indices));
return coeffRef(indices);
@@ -228,47 +215,70 @@ class Tensor
inline Scalar& operator[](Index index)
{
- static_assert(NumIndices == 1, "The bracket operator is only for vectors, use the parenthesis operator instead.");
+ // The bracket operator is only for vectors, use the parenthesis operator instead
+ EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
return coeffRef(index);
}
- inline Tensor()
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Tensor()
: m_storage()
{
}
- inline Tensor(const Self& other)
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Tensor(const Self& other)
: m_storage(other.m_storage)
{
}
- inline Tensor(Self&& other)
- : m_storage(other.m_storage)
- {
- }
+#ifdef EIGEN_HAVE_RVALUE_REFERENCES
+// inline Tensor(Self&& other)
+// : m_storage(other.m_storage)
+// {
+// }
+#endif
+#ifdef EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes>
inline Tensor(Index firstDimension, IndexTypes... otherDimensions)
: m_storage()
{
- static_assert(sizeof...(otherDimensions) + 1 == NumIndices, "Number of dimensions used to construct a tensor must be equal to the rank of the tensor.");
- resize(std::array<Index, NumIndices>{{firstDimension, otherDimensions...}});
+ // The number of dimensions used to construct a tensor must be equal to the rank of the tensor.
+ EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ resize(array<Index, NumIndices>{{firstDimension, otherDimensions...}});
}
+#endif
- inline Tensor(std::array<Index, NumIndices> dimensions)
+ inline Tensor(const array<Index, NumIndices>& dimensions)
: m_storage(internal::array_prod(dimensions), dimensions)
{
EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
}
+
+ template<typename OtherDerived>
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Tensor& operator=(const OtherDerived& other)
+ {
+ // FIXME: we need to resize the tensor to fix the dimensions of the other.
+ // Unfortunately this isn't possible yet when the rhs is an expression.
+ // resize(other.dimensions());
+ internal::TensorAssign<Tensor, const OtherDerived>::run(*this, other);
+ return *this;
+ }
+
+#ifdef EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes>
void resize(Index firstDimension, IndexTypes... otherDimensions)
{
- static_assert(sizeof...(otherDimensions) + 1 == NumIndices, "Number of dimensions used to resize a tensor must be equal to the rank of the tensor.");
- resize(std::array<Index, NumIndices>{{firstDimension, otherDimensions...}});
+ // The number of dimensions used to resize a tensor must be equal to the rank of the tensor.
+ EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ resize(array<Index, NumIndices>{{firstDimension, otherDimensions...}});
}
+#endif
- void resize(const std::array<Index, NumIndices>& dimensions)
+ void resize(const array<Index, NumIndices>& dimensions)
{
std::size_t i;
Index size = Index(1);
@@ -285,20 +295,22 @@ class Tensor
#endif
}
+#ifdef EIGEN_HAS_VARIADIC_TEMPLATES
template<typename Symmetry_, typename... IndexTypes>
internal::tensor_symmetry_value_setter<Self, Symmetry_> symCoeff(const Symmetry_& symmetry, Index firstIndex, IndexTypes... otherIndices)
{
- return symCoeff(symmetry, std::array<Index, NumIndices>{{firstIndex, otherIndices...}});
+ return symCoeff(symmetry, array<Index, NumIndices>{{firstIndex, otherIndices...}});
}
template<typename Symmetry_, typename... IndexTypes>
- internal::tensor_symmetry_value_setter<Self, Symmetry_> symCoeff(const Symmetry_& symmetry, std::array<Index, NumIndices> const& indices)
+ internal::tensor_symmetry_value_setter<Self, Symmetry_> symCoeff(const Symmetry_& symmetry, array<Index, NumIndices> const& indices)
{
return internal::tensor_symmetry_value_setter<Self, Symmetry_>(*this, symmetry, indices);
}
+#endif
protected:
- bool checkIndexRange(const std::array<Index, NumIndices>& indices) const
+ bool checkIndexRange(const array<Index, NumIndices>& indices) const
{
using internal::array_apply_and_reduce;
using internal::array_zip_and_reduce;
@@ -313,7 +325,7 @@ class Tensor
array_zip_and_reduce<logical_and_op, lesser_op>(indices, m_storage.dimensions());
}
- inline Index linearizedIndex(const std::array<Index, NumIndices>& indices) const
+ inline Index linearizedIndex(const array<Index, NumIndices>& indices) const
{
return internal::tensor_index_linearization_helper<Index, NumIndices, NumIndices - 1, Options&RowMajor>::run(indices, m_storage.dimensions());
}
@@ -322,7 +334,3 @@ class Tensor
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_H
-
-/*
- * kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle;
- */