aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2014-04-28 10:32:27 -0700
committerGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2014-04-28 10:32:27 -0700
commitc0f2cb016e60b7dbde1d5946f42234a709a711f9 (patch)
tree346d5beb917ea586a6a463312606cf794c91da75
parent450d0c3de044c9f32fa2f37fee821f6e390df382 (diff)
Extended support for Tensors:
* Added ability to map a region of the memory to a tensor * Added basic support for unary and binary coefficient wise expressions, such as addition or square root * Provided an emulation layer to make it possible to compile the code with compilers (such as nvcc) that don't support cxx11.
-rw-r--r--Eigen/src/Core/util/Macros.h5
-rw-r--r--unsupported/Eigen/CXX11/Core14
-rw-r--r--unsupported/Eigen/CXX11/Tensor27
-rw-r--r--unsupported/Eigen/CXX11/src/Core/util/CXX11Meta.h24
-rw-r--r--unsupported/Eigen/CXX11/src/Core/util/CXX11Workarounds.h16
-rw-r--r--unsupported/Eigen/CXX11/src/Core/util/EmulateCXX11Meta.h184
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/Tensor.h156
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h52
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorBase.h82
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h127
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h161
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h27
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorMap.h101
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h52
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h122
-rw-r--r--unsupported/test/CMakeLists.txt5
-rw-r--r--unsupported/test/cxx11_tensor_simple.cpp2
17 files changed, 1028 insertions, 129 deletions
diff --git a/Eigen/src/Core/util/Macros.h b/Eigen/src/Core/util/Macros.h
index bfd6ba7de..3a928001e 100644
--- a/Eigen/src/Core/util/Macros.h
+++ b/Eigen/src/Core/util/Macros.h
@@ -121,6 +121,11 @@
#define EIGEN_HAVE_RVALUE_REFERENCES
#endif
+// Does the compiler support variadic templates?
+#if __cplusplus > 199711L
+#define EIGEN_HAS_VARIADIC_TEMPLATES 1
+#endif
+
/** Allows to disable some optimizations which might affect the accuracy of the result.
* Such optimization are enabled by default, and set EIGEN_FAST_MATH to 0 to disable them.
* They currently include:
diff --git a/unsupported/Eigen/CXX11/Core b/unsupported/Eigen/CXX11/Core
index 4dc4ab224..bba3d578d 100644
--- a/unsupported/Eigen/CXX11/Core
+++ b/unsupported/Eigen/CXX11/Core
@@ -2,6 +2,7 @@
// for linear algebra.
//
// Copyright (C) 2013 Christian Seiler <christian@iwakd.de>
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -21,20 +22,23 @@
* module. Note that at this stage, you should not need to include
* this module directly.
*
+ * It also provides a limited fallback for compilers that don't support
+ * CXX11 yet, such as nvcc.
+ *
* \code
* #include <Eigen/CXX11/Core>
* \endcode
*/
-#include <array>
-
+// Emulate the cxx11 functionality that we need if the compiler doesn't support it.
+#if __cplusplus <= 199711L
+#include "src/Core/util/EmulateCXX11Meta.h"
+#else
#include "src/Core/util/CXX11Workarounds.h"
#include "src/Core/util/CXX11Meta.h"
+#endif
#include <Eigen/src/Core/util/ReenableStupidWarnings.h>
#endif // EIGEN_CXX11_CORE_MODULE
-/*
- * kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle;
- */
diff --git a/unsupported/Eigen/CXX11/Tensor b/unsupported/Eigen/CXX11/Tensor
index f2c5129b3..f554c204a 100644
--- a/unsupported/Eigen/CXX11/Tensor
+++ b/unsupported/Eigen/CXX11/Tensor
@@ -10,9 +10,10 @@
#ifndef EIGEN_CXX11_TENSOR_MODULE
#define EIGEN_CXX11_TENSOR_MODULE
-#include <Eigen/CXX11/Core>
+#include "Eigen/src/Core/util/StaticAssert.h"
+#include "unsupported/Eigen/CXX11/Core"
-#include <Eigen/src/Core/util/DisableStupidWarnings.h>
+#include "Eigen/src/Core/util/DisableStupidWarnings.h"
/** \defgroup CXX11_Tensor_Module Tensor Module
*
@@ -27,13 +28,21 @@
#include <cstddef>
#include <cstring>
-#include "src/Tensor/TensorStorage.h"
-#include "src/Tensor/Tensor.h"
+#include "Eigen/Core"
-#include <Eigen/src/Core/util/ReenableStupidWarnings.h>
+#include "unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h"
+#include "unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h"
-#endif // EIGEN_CXX11_TENSOR_MODULE
+#include "unsupported/Eigen/CXX11/src/Tensor/TensorBase.h"
+
+#include "unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h"
+#include "unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h"
+#include "unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h"
+
+#include "unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h"
+#include "unsupported/Eigen/CXX11/src/Tensor/Tensor.h"
+#include "unsupported/Eigen/CXX11/src/Tensor/TensorMap.h"
-/*
- * kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle;
- */
+#include "Eigen/src/Core/util/ReenableStupidWarnings.h"
+
+#endif // EIGEN_CXX11_TENSOR_MODULE
diff --git a/unsupported/Eigen/CXX11/src/Core/util/CXX11Meta.h b/unsupported/Eigen/CXX11/src/Core/util/CXX11Meta.h
index 618e2eb7b..47f06b1b5 100644
--- a/unsupported/Eigen/CXX11/src/Core/util/CXX11Meta.h
+++ b/unsupported/Eigen/CXX11/src/Core/util/CXX11Meta.h
@@ -317,7 +317,7 @@ constexpr inline decltype(reduce<sum_op, Ts...>::run((*((Ts*)0))...)) arg_sum(Ts
template<typename Array, int... n>
constexpr inline Array h_array_reverse(Array arr, numeric_list<int, n...>)
{
- return {{std_array_get<sizeof...(n) - n - 1>(arr)...}};
+ return {{array_get<sizeof...(n) - n - 1>(arr)...}};
}
template<typename T, std::size_t N>
@@ -335,9 +335,9 @@ constexpr inline std::array<T, N> array_reverse(std::array<T, N> arr)
// an infinite loop)
template<typename Reducer, typename T, std::size_t N, std::size_t n = N - 1>
struct h_array_reduce {
- constexpr static inline auto run(std::array<T, N> arr) -> decltype(Reducer::run(h_array_reduce<Reducer, T, N, n - 1>::run(arr), std_array_get<n>(arr)))
+ constexpr static inline auto run(std::array<T, N> arr) -> decltype(Reducer::run(h_array_reduce<Reducer, T, N, n - 1>::run(arr), array_get<n>(arr)))
{
- return Reducer::run(h_array_reduce<Reducer, T, N, n - 1>::run(arr), std_array_get<n>(arr));
+ return Reducer::run(h_array_reduce<Reducer, T, N, n - 1>::run(arr), array_get<n>(arr));
}
};
@@ -346,7 +346,7 @@ struct h_array_reduce<Reducer, T, N, 0>
{
constexpr static inline T run(std::array<T, N> arr)
{
- return std_array_get<0>(arr);
+ return array_get<0>(arr);
}
};
@@ -375,7 +375,7 @@ constexpr inline auto array_prod(std::array<T, N> arr) -> decltype(array_reduce<
template<typename Op, typename A, typename B, std::size_t N, int... n>
constexpr inline std::array<decltype(Op::run(A(), B())),N> h_array_zip(std::array<A, N> a, std::array<B, N> b, numeric_list<int, n...>)
{
- return std::array<decltype(Op::run(A(), B())),N>{{ Op::run(std_array_get<n>(a), std_array_get<n>(b))... }};
+ return std::array<decltype(Op::run(A(), B())),N>{{ Op::run(array_get<n>(a), array_get<n>(b))... }};
}
template<typename Op, typename A, typename B, std::size_t N>
@@ -387,9 +387,9 @@ constexpr inline std::array<decltype(Op::run(A(), B())),N> array_zip(std::array<
/* zip an array and reduce the result */
template<typename Reducer, typename Op, typename A, typename B, std::size_t N, int... n>
-constexpr inline auto h_array_zip_and_reduce(std::array<A, N> a, std::array<B, N> b, numeric_list<int, n...>) -> decltype(reduce<Reducer, typename id_numeric<int,n,decltype(Op::run(A(), B()))>::type...>::run(Op::run(std_array_get<n>(a), std_array_get<n>(b))...))
+constexpr inline auto h_array_zip_and_reduce(std::array<A, N> a, std::array<B, N> b, numeric_list<int, n...>) -> decltype(reduce<Reducer, typename id_numeric<int,n,decltype(Op::run(A(), B()))>::type...>::run(Op::run(array_get<n>(a), array_get<n>(b))...))
{
- return reduce<Reducer, typename id_numeric<int,n,decltype(Op::run(A(), B()))>::type...>::run(Op::run(std_array_get<n>(a), std_array_get<n>(b))...);
+ return reduce<Reducer, typename id_numeric<int,n,decltype(Op::run(A(), B()))>::type...>::run(Op::run(array_get<n>(a), array_get<n>(b))...);
}
template<typename Reducer, typename Op, typename A, typename B, std::size_t N>
@@ -403,7 +403,7 @@ constexpr inline auto array_zip_and_reduce(std::array<A, N> a, std::array<B, N>
template<typename Op, typename A, std::size_t N, int... n>
constexpr inline std::array<decltype(Op::run(A())),N> h_array_apply(std::array<A, N> a, numeric_list<int, n...>)
{
- return std::array<decltype(Op::run(A())),N>{{ Op::run(std_array_get<n>(a))... }};
+ return std::array<decltype(Op::run(A())),N>{{ Op::run(array_get<n>(a))... }};
}
template<typename Op, typename A, std::size_t N>
@@ -415,9 +415,9 @@ constexpr inline std::array<decltype(Op::run(A())),N> array_apply(std::array<A,
/* apply stuff to an array and reduce */
template<typename Reducer, typename Op, typename A, std::size_t N, int... n>
-constexpr inline auto h_array_apply_and_reduce(std::array<A, N> arr, numeric_list<int, n...>) -> decltype(reduce<Reducer, typename id_numeric<int,n,decltype(Op::run(A()))>::type...>::run(Op::run(std_array_get<n>(arr))...))
+constexpr inline auto h_array_apply_and_reduce(std::array<A, N> arr, numeric_list<int, n...>) -> decltype(reduce<Reducer, typename id_numeric<int,n,decltype(Op::run(A()))>::type...>::run(Op::run(array_get<n>(arr))...))
{
- return reduce<Reducer, typename id_numeric<int,n,decltype(Op::run(A()))>::type...>::run(Op::run(std_array_get<n>(arr))...);
+ return reduce<Reducer, typename id_numeric<int,n,decltype(Op::run(A()))>::type...>::run(Op::run(array_get<n>(arr))...);
}
template<typename Reducer, typename Op, typename A, std::size_t N>
@@ -497,7 +497,3 @@ InstType instantiate_by_c_array(ArrType* arr)
} // end namespace Eigen
#endif // EIGEN_CXX11META_H
-
-/*
- * kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle;
- */
diff --git a/unsupported/Eigen/CXX11/src/Core/util/CXX11Workarounds.h b/unsupported/Eigen/CXX11/src/Core/util/CXX11Workarounds.h
index 356ae10cf..77207f453 100644
--- a/unsupported/Eigen/CXX11/src/Core/util/CXX11Workarounds.h
+++ b/unsupported/Eigen/CXX11/src/Core/util/CXX11Workarounds.h
@@ -40,8 +40,18 @@
#error This library needs at least a C++11 compliant compiler. If you use g++/clang, please enable the -std=c++11 compiler flag. (-std=c++0x on older versions.)
#endif
+using std::array;
+
namespace Eigen {
+// Use std::array as Eigen array
+/*template <typename T, size_t N>
+struct array : public std::array<T, N> {
+ array() = default;
+ array(const std::initializer_list<T>& a);// : std::array<T, N>(a) {};
+ array(const std::array<T, N>& a);
+};*/
+
namespace internal {
/* std::get is only constexpr in C++14, not yet in C++11
@@ -60,9 +70,9 @@ namespace internal {
#define STD_GET_ARR_HACK std::template get<I, T, N>(a)
#endif
-template<std::size_t I, class T, std::size_t N> constexpr inline T& std_array_get(std::array<T,N>& a) { return (T&) STD_GET_ARR_HACK; }
-template<std::size_t I, class T, std::size_t N> constexpr inline T&& std_array_get(std::array<T,N>&& a) { return (T&&) STD_GET_ARR_HACK; }
-template<std::size_t I, class T, std::size_t N> constexpr inline T const& std_array_get(std::array<T,N> const& a) { return (T const&) STD_GET_ARR_HACK; }
+template<std::size_t I, class T, std::size_t N> constexpr inline T& array_get(std::array<T,N>& a) { return (T&) STD_GET_ARR_HACK; }
+template<std::size_t I, class T, std::size_t N> constexpr inline T&& array_get(std::array<T,N>&& a) { return (T&&) STD_GET_ARR_HACK; }
+template<std::size_t I, class T, std::size_t N> constexpr inline T const& array_get(std::array<T,N> const& a) { return (T const&) STD_GET_ARR_HACK; }
#undef STD_GET_ARR_HACK
diff --git a/unsupported/Eigen/CXX11/src/Core/util/EmulateCXX11Meta.h b/unsupported/Eigen/CXX11/src/Core/util/EmulateCXX11Meta.h
new file mode 100644
index 000000000..76fcba5b4
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Core/util/EmulateCXX11Meta.h
@@ -0,0 +1,184 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_EMULATE_CXX11_META_H
+#define EIGEN_EMULATE_CXX11_META_H
+
+
+namespace Eigen {
+
+// The array class is only available starting with cxx11. Emulate our own here
+// if needed
+template <typename T, size_t n> class array {
+ public:
+ T& operator[] (size_t index) { return values[index]; }
+ const T& operator[] (size_t index) const { return values[index]; }
+
+ T values[n];
+};
+
+
+namespace internal {
+
+/** \internal
+ * \file CXX11/Core/util/EmulateCXX11Meta.h
+ * This file emulates a subset of the functionality provided by CXXMeta.h for
+ * compilers that don't yet support cxx11 such as nvcc.
+ */
+
+struct empty_list { static const std::size_t count = 0; };
+
+template<typename T, typename Tail=empty_list> struct type_list {
+ T head;
+ Tail tail;
+ static const std::size_t count = 1 + Tail::count;
+};
+
+struct null_type { };
+
+template<typename T1 = null_type, typename T2 = null_type, typename T3 = null_type, typename T4 = null_type, typename T5 = null_type>
+struct make_type_list {
+ typedef typename make_type_list<T2, T3, T4, T5>::type tailresult;
+
+ typedef type_list<T1, tailresult> type;
+};
+
+template<> struct make_type_list<> {
+ typedef empty_list type;
+};
+
+
+
+template <typename T, T n>
+struct type2val {
+ static const T value = n;
+};
+
+
+template<typename T, size_t n, T V> struct gen_numeric_list_repeated;
+
+template<typename T, T V> struct gen_numeric_list_repeated<T, 1, V> {
+ typedef typename make_type_list<type2val<T, V> >::type type;
+};
+
+template<typename T, T V> struct gen_numeric_list_repeated<T, 2, V> {
+ typedef typename make_type_list<type2val<T, V>, type2val<T, V> >::type type;
+};
+
+template<typename T, T V> struct gen_numeric_list_repeated<T, 3, V> {
+ typedef typename make_type_list<type2val<T, V>, type2val<T, V>, type2val<T, V> >::type type;
+};
+
+template<typename T, T V> struct gen_numeric_list_repeated<T, 4, V> {
+ typedef typename make_type_list<type2val<T, V>, type2val<T, V>, type2val<T, V>, type2val<T, V> >::type type;
+};
+
+template<typename T, T V> struct gen_numeric_list_repeated<T, 5, V> {
+ typedef typename make_type_list<type2val<T, V>, type2val<T, V>, type2val<T, V>, type2val<T, V>, type2val<T, V> >::type type;
+};
+
+
+
+template<int n, typename t>
+array<t, n> repeat(t v) {
+ array<t, n> array;
+ array.fill(v);
+ return array;
+}
+
+template<std::size_t n, typename t>
+t array_prod(const array<t, n>& a) {
+ t prod = 1;
+ for (size_t i = 0; i < n; ++i) { prod *= a[i]; }
+ return prod;
+}
+template<typename t>
+t array_prod(const array<t, 0>& /*a*/) {
+ return 0;
+}
+
+template<std::size_t I, class T, std::size_t N> inline T& array_get(array<T,N>& a) {
+ return a[I];
+}
+template<std::size_t I, class T, std::size_t N> inline const T& array_get(const array<T,N>& a) {
+ return a[I];
+}
+
+struct sum_op {
+ template<typename A, typename B> static inline bool run(A a, B b) { return a + b; }
+};
+struct product_op {
+ template<typename A, typename B> static inline bool run(A a, B b) { return a * b; }
+};
+
+struct logical_and_op {
+ template<typename A, typename B> static inline bool run(A a, B b) { return a && b; }
+};
+struct logical_or_op {
+ template<typename A, typename B> static inline bool run(A a, B b) { return a || b; }
+};
+
+struct equal_op {
+ template<typename A, typename B> static inline bool run(A a, B b) { return a == b; }
+};
+struct not_equal_op {
+ template<typename A, typename B> static inline bool run(A a, B b) { return a != b; }
+};
+struct lesser_op {
+ template<typename A, typename B> static inline bool run(A a, B b) { return a < b; }
+};
+struct lesser_equal_op {
+ template<typename A, typename B> static inline bool run(A a, B b) { return a <= b; }
+};
+
+struct greater_op {
+ template<typename A, typename B> static inline bool run(A a, B b) { return a > b; }
+};
+struct greater_equal_op {
+ template<typename A, typename B> static inline bool run(A a, B b) { return a >= b; }
+};
+
+struct not_op {
+ template<typename A> static inline bool run(A a) { return !a; }
+};
+struct negation_op {
+ template<typename A> static inline bool run(A a) { return -a; }
+};
+struct greater_equal_zero_op {
+ template<typename A> static inline bool run(A a) { return a >= 0; }
+};
+
+
+template<typename Reducer, typename Op, typename A, std::size_t N>
+inline bool array_apply_and_reduce(const array<A, N>& a) {
+ EIGEN_STATIC_ASSERT(N >= 2, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ bool result = Reducer::run(Op::run(a[0]), Op::run(a[1]));
+ for (size_t i = 2; i < N; ++i) {
+ result = Reducer::run(result, Op::run(a[i]));
+ }
+ return result;
+}
+
+template<typename Reducer, typename Op, typename A, typename B, std::size_t N>
+inline bool array_zip_and_reduce(const array<A, N>& a, const array<B, N>& b) {
+ EIGEN_STATIC_ASSERT(N >= 2, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ bool result = Reducer::run(Op::run(a[0], b[0]), Op::run(a[1], b[1]));
+ for (size_t i = 2; i < N; ++i) {
+ result = Reducer::run(result, Op::run(a[i], b[i]));
+ }
+ return result;
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+
+
+#endif // EIGEN_EMULATE_CXX11_META_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/Tensor.h b/unsupported/Eigen/CXX11/src/Tensor/Tensor.h
index c6216e14c..7b8f14c6d 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/Tensor.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/Tensor.h
@@ -57,28 +57,16 @@ namespace Eigen {
*
* \ref TopicStorageOrders
*/
-template<typename Scalar_, std::size_t NumIndices_, int Options_ = 0>
-class Tensor;
namespace internal {
-template<typename Scalar_, std::size_t NumIndices_, int Options_>
-struct traits<Tensor<Scalar_, NumIndices_, Options_>>
-{
- typedef Scalar_ Scalar;
- typedef Dense StorageKind;
- typedef DenseIndex Index;
- enum {
- Options = Options_
- };
-};
template<typename Index, std::size_t NumIndices, std::size_t n, bool RowMajor>
struct tensor_index_linearization_helper
{
- constexpr static inline Index run(std::array<Index, NumIndices> const& indices, std::array<Index, NumIndices> const& dimensions)
+ static inline Index run(array<Index, NumIndices> const& indices, array<Index, NumIndices> const& dimensions)
{
- return std_array_get<RowMajor ? n : (NumIndices - n - 1)>(indices) +
- std_array_get<RowMajor ? n : (NumIndices - n - 1)>(dimensions) *
+ return array_get<RowMajor ? n : (NumIndices - n - 1)>(indices) +
+ array_get<RowMajor ? n : (NumIndices - n - 1)>(dimensions) *
tensor_index_linearization_helper<Index, NumIndices, n - 1, RowMajor>::run(indices, dimensions);
}
};
@@ -86,39 +74,40 @@ struct tensor_index_linearization_helper
template<typename Index, std::size_t NumIndices, bool RowMajor>
struct tensor_index_linearization_helper<Index, NumIndices, 0, RowMajor>
{
- constexpr static inline Index run(std::array<Index, NumIndices> const& indices, std::array<Index, NumIndices> const&)
+ static inline Index run(array<Index, NumIndices> const& indices, array<Index, NumIndices> const&)
{
- return std_array_get<RowMajor ? 0 : NumIndices - 1>(indices);
+ return array_get<RowMajor ? 0 : NumIndices - 1>(indices);
}
};
/* Forward-declaration required for the symmetry support. */
template<typename Tensor_, typename Symmetry_, int Flags = 0> class tensor_symmetry_value_setter;
+
} // end namespace internal
template<typename Scalar_, std::size_t NumIndices_, int Options_>
-class Tensor
+class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_> >
{
- static_assert(NumIndices_ >= 1, "A tensor must have at least one index.");
-
public:
typedef Tensor<Scalar_, NumIndices_, Options_> Self;
+ typedef TensorBase<Tensor<Scalar_, NumIndices_, Options_> > Base;
+ typedef typename Eigen::internal::nested<Self>::type Nested;
typedef typename internal::traits<Self>::StorageKind StorageKind;
typedef typename internal::traits<Self>::Index Index;
- typedef typename internal::traits<Self>::Scalar Scalar;
+ typedef Scalar_ Scalar;
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
- typedef Self DenseType;
+ typedef typename Base::CoeffReturnType CoeffReturnType;
- constexpr static int Options = Options_;
- constexpr static std::size_t NumIndices = NumIndices_;
+ static const int Options = Options_;
+ static const std::size_t NumIndices = NumIndices_;
protected:
TensorStorage<Scalar, NumIndices, Dynamic, Options> m_storage;
public:
EIGEN_STRONG_INLINE Index dimension(std::size_t n) const { return m_storage.dimensions()[n]; }
- EIGEN_STRONG_INLINE std::array<Index, NumIndices> dimensions() const { return m_storage.dimensions(); }
+ EIGEN_STRONG_INLINE array<Index, NumIndices> dimensions() const { return m_storage.dimensions(); }
EIGEN_STRONG_INLINE Index size() const { return internal::array_prod(m_storage.dimensions()); }
EIGEN_STRONG_INLINE Scalar *data() { return m_storage.data(); }
EIGEN_STRONG_INLINE const Scalar *data() const { return m_storage.data(); }
@@ -129,29 +118,17 @@ class Tensor
inline Self& base() { return *this; }
inline const Self& base() const { return *this; }
- void setZero()
- {
- // FIXME: until we have implemented packet access and the
- // expression engine w.r.t. nullary ops, use this
- // as a kludge. Only works with POD types, but for
- // any standard usage, this shouldn't be a problem
- memset((void *)data(), 0, size() * sizeof(Scalar));
- }
-
- inline Self& operator=(Self const& other)
- {
- m_storage = other.m_storage;
- return *this;
- }
-
+#ifdef EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes>
inline const Scalar& coeff(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
{
- static_assert(sizeof...(otherIndices) + 2 == NumIndices, "Number of indices used to access a tensor coefficient must be equal to the rank of the tensor.");
- return coeff(std::array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
+ // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
+ EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ return coeff(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
}
+#endif
- inline const Scalar& coeff(const std::array<Index, NumIndices>& indices) const
+ inline const Scalar& coeff(const array<Index, NumIndices>& indices) const
{
eigen_internal_assert(checkIndexRange(indices));
return m_storage.data()[linearizedIndex(indices)];
@@ -163,14 +140,17 @@ class Tensor
return m_storage.data()[index];
}
+#ifdef EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes>
inline Scalar& coeffRef(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
{
- static_assert(sizeof...(otherIndices) + 2 == NumIndices, "Number of indices used to access a tensor coefficient must be equal to the rank of the tensor.");
- return coeffRef(std::array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
+ // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
+ EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ return coeffRef(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
}
+#endif
- inline Scalar& coeffRef(const std::array<Index, NumIndices>& indices)
+ inline Scalar& coeffRef(const array<Index, NumIndices>& indices)
{
eigen_internal_assert(checkIndexRange(indices));
return m_storage.data()[linearizedIndex(indices)];
@@ -182,14 +162,17 @@ class Tensor
return m_storage.data()[index];
}
+#ifdef EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes>
inline const Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
{
- static_assert(sizeof...(otherIndices) + 2 == NumIndices, "Number of indices used to access a tensor coefficient must be equal to the rank of the tensor.");
- return this->operator()(std::array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
+ // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
+ EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ return this->operator()(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
}
+#endif
- inline const Scalar& operator()(const std::array<Index, NumIndices>& indices) const
+ inline const Scalar& operator()(const array<Index, NumIndices>& indices) const
{
eigen_assert(checkIndexRange(indices));
return coeff(indices);
@@ -203,18 +186,22 @@ class Tensor
inline const Scalar& operator[](Index index) const
{
- static_assert(NumIndices == 1, "The bracket operator is only for vectors, use the parenthesis operator instead.");
+ // The bracket operator is only for vectors, use the parenthesis operator instead.
+ EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE);
return coeff(index);
}
+#ifdef EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes>
inline Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
{
- static_assert(sizeof...(otherIndices) + 2 == NumIndices, "Number of indices used to access a tensor coefficient must be equal to the rank of the tensor.");
- return operator()(std::array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
+ // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
+ EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ return operator()(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
}
+#endif
- inline Scalar& operator()(const std::array<Index, NumIndices>& indices)
+ inline Scalar& operator()(const array<Index, NumIndices>& indices)
{
eigen_assert(checkIndexRange(indices));
return coeffRef(indices);
@@ -228,47 +215,70 @@ class Tensor
inline Scalar& operator[](Index index)
{
- static_assert(NumIndices == 1, "The bracket operator is only for vectors, use the parenthesis operator instead.");
+ // The bracket operator is only for vectors, use the parenthesis operator instead
+ EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
return coeffRef(index);
}
- inline Tensor()
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Tensor()
: m_storage()
{
}
- inline Tensor(const Self& other)
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Tensor(const Self& other)
: m_storage(other.m_storage)
{
}
- inline Tensor(Self&& other)
- : m_storage(other.m_storage)
- {
- }
+#ifdef EIGEN_HAVE_RVALUE_REFERENCES
+// inline Tensor(Self&& other)
+// : m_storage(other.m_storage)
+// {
+// }
+#endif
+#ifdef EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes>
inline Tensor(Index firstDimension, IndexTypes... otherDimensions)
: m_storage()
{
- static_assert(sizeof...(otherDimensions) + 1 == NumIndices, "Number of dimensions used to construct a tensor must be equal to the rank of the tensor.");
- resize(std::array<Index, NumIndices>{{firstDimension, otherDimensions...}});
+ // The number of dimensions used to construct a tensor must be equal to the rank of the tensor.
+ EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ resize(array<Index, NumIndices>{{firstDimension, otherDimensions...}});
}
+#endif
- inline Tensor(std::array<Index, NumIndices> dimensions)
+ inline Tensor(const array<Index, NumIndices>& dimensions)
: m_storage(internal::array_prod(dimensions), dimensions)
{
EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
}
+
+ template<typename OtherDerived>
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Tensor& operator=(const OtherDerived& other)
+ {
+ // FIXME: we need to resize the tensor to fix the dimensions of the other.
+ // Unfortunately this isn't possible yet when the rhs is an expression.
+ // resize(other.dimensions());
+ internal::TensorAssign<Tensor, const OtherDerived>::run(*this, other);
+ return *this;
+ }
+
+#ifdef EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes>
void resize(Index firstDimension, IndexTypes... otherDimensions)
{
- static_assert(sizeof...(otherDimensions) + 1 == NumIndices, "Number of dimensions used to resize a tensor must be equal to the rank of the tensor.");
- resize(std::array<Index, NumIndices>{{firstDimension, otherDimensions...}});
+ // The number of dimensions used to resize a tensor must be equal to the rank of the tensor.
+ EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ resize(array<Index, NumIndices>{{firstDimension, otherDimensions...}});
}
+#endif
- void resize(const std::array<Index, NumIndices>& dimensions)
+ void resize(const array<Index, NumIndices>& dimensions)
{
std::size_t i;
Index size = Index(1);
@@ -285,20 +295,22 @@ class Tensor
#endif
}
+#ifdef EIGEN_HAS_VARIADIC_TEMPLATES
template<typename Symmetry_, typename... IndexTypes>
internal::tensor_symmetry_value_setter<Self, Symmetry_> symCoeff(const Symmetry_& symmetry, Index firstIndex, IndexTypes... otherIndices)
{
- return symCoeff(symmetry, std::array<Index, NumIndices>{{firstIndex, otherIndices...}});
+ return symCoeff(symmetry, array<Index, NumIndices>{{firstIndex, otherIndices...}});
}
template<typename Symmetry_, typename... IndexTypes>
- internal::tensor_symmetry_value_setter<Self, Symmetry_> symCoeff(const Symmetry_& symmetry, std::array<Index, NumIndices> const& indices)
+ internal::tensor_symmetry_value_setter<Self, Symmetry_> symCoeff(const Symmetry_& symmetry, array<Index, NumIndices> const& indices)
{
return internal::tensor_symmetry_value_setter<Self, Symmetry_>(*this, symmetry, indices);
}
+#endif
protected:
- bool checkIndexRange(const std::array<Index, NumIndices>& indices) const
+ bool checkIndexRange(const array<Index, NumIndices>& indices) const
{
using internal::array_apply_and_reduce;
using internal::array_zip_and_reduce;
@@ -313,7 +325,7 @@ class Tensor
array_zip_and_reduce<logical_and_op, lesser_op>(indices, m_storage.dimensions());
}
- inline Index linearizedIndex(const std::array<Index, NumIndices>& indices) const
+ inline Index linearizedIndex(const array<Index, NumIndices>& indices) const
{
return internal::tensor_index_linearization_helper<Index, NumIndices, NumIndices - 1, Options&RowMajor>::run(indices, m_storage.dimensions());
}
@@ -322,7 +334,3 @@ class Tensor
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_H
-
-/*
- * kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle;
- */
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h b/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h
new file mode 100644
index 000000000..f1df827f9
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h
@@ -0,0 +1,52 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H
+#define EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H
+
+
+namespace Eigen {
+
+/** \class TensorAssign
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief The tensor assignment class.
+ *
+ * This class is responsible for triggering the evaluation of the expressions
+ * used on the lhs and rhs of an assignment operator and copy the result of
+ * the evaluation of the rhs expression at the address computed during the
+ * evaluation lhs expression.
+ *
+ * TODO: vectorization. For now the code only uses scalars
+ * TODO: parallelisation using multithreading on cpu, or kernels on gpu.
+ */
+namespace internal {
+
+template<typename Derived1, typename Derived2>
+struct TensorAssign
+{
+ typedef typename Derived1::Index Index;
+ EIGEN_DEVICE_FUNC
+ static inline void run(Derived1& dst, const Derived2& src)
+ {
+ TensorEvaluator<Derived1> evalDst(dst);
+ TensorEvaluator<Derived2> evalSrc(src);
+ const Index size = dst.size();
+ for(Index i = 0; i < size; ++i) {
+ evalDst.coeffRef(i) = evalSrc.coeff(i);
+ }
+ }
+};
+
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h b/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h
new file mode 100644
index 000000000..0b9f32f7f
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h
@@ -0,0 +1,82 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_BASE_H
+#define EIGEN_CXX11_TENSOR_TENSOR_BASE_H
+
+namespace Eigen {
+
+/** \class TensorBase
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief The tensor base class.
+ *
+ * This class is the common parent of the Tensor and TensorMap class, thus
+ * making it possible to use either class interchangably in expressions.
+ */
+
+template<typename Derived>
+class TensorBase
+{
+ public:
+ typedef typename internal::traits<Derived>::Scalar Scalar;
+ typedef typename internal::traits<Derived>::Index Index;
+ typedef Scalar CoeffReturnType;
+
+ Derived& setZero() {
+ return setConstant(Scalar(0));
+ }
+
+ Derived& setConstant(const Scalar& val) {
+ Scalar* data = derived().data();
+ for (int i = 0; i < derived().size(); ++i) {
+ data[i] = val;
+ }
+ return derived();
+ }
+
+ Derived& setRandom() {
+ Scalar* data = derived().data();
+ for (int i = 0; i < derived().size(); ++i) {
+ data[i] = internal::random_default_impl<Scalar, false, false>::run();
+ }
+ return derived();
+ }
+
+ // Coefficient-wise unary operators
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_opposite_op<Scalar>, const Derived>
+ operator-() const { return derived(); }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_sqrt_op<Scalar>, const Derived>
+ cwiseSqrt() const { return derived(); }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_abs_op<Scalar>, const Derived>
+ cwiseAbs() const { return derived(); }
+
+ // Coefficient-wise binary operators.
+ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const TensorCwiseBinaryOp<internal::scalar_sum_op<Scalar>, const Derived, const OtherDerived>
+ operator+(const OtherDerived& other) const {
+ return TensorCwiseBinaryOp<internal::scalar_sum_op<Scalar>, const Derived, const OtherDerived>(derived(), other.derived());
+ }
+
+ protected:
+ template <typename OtherDerived> friend class TensorBase;
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Derived& derived() { return *static_cast<Derived*>(this); }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Derived& derived() const { return *static_cast<const Derived*>(this); }
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_BASE_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h b/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h
new file mode 100644
index 000000000..f4f10eff5
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h
@@ -0,0 +1,127 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_EVALUATOR_H
+#define EIGEN_CXX11_TENSOR_TENSOR_EVALUATOR_H
+
+namespace Eigen {
+
+/** \class TensorEvaluator
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief The tensor evaluator classes.
+ *
+ * These classes are responsible for the evaluation of the tensor expression.
+ *
+ * TODO: add support for more types of expressions, in particular expressions
+ * leading to lvalues (slicing, reshaping, etc...)
+ * TODO: add support for vectorization
+ */
+
+
+template<typename Derived>
+struct TensorEvaluator
+{
+ typedef typename Derived::Index Index;
+ typedef typename Derived::Scalar Scalar;
+ typedef typename Derived::Scalar& CoeffReturnType;
+ //typedef typename Derived::PacketScalar PacketScalar;
+ typedef TensorEvaluator<Derived> nestedType;
+
+ TensorEvaluator(Derived& m)
+ : m_data(const_cast<Scalar*>(m.data()))
+ { }
+
+ CoeffReturnType coeff(Index index) const {
+ return m_data[index];
+ }
+
+ Scalar& coeffRef(Index index) {
+ return m_data[index];
+ }
+
+ // to do: vectorized evaluation.
+ /* template<int LoadMode>
+ PacketReturnType packet(Index index) const
+ {
+ return ploadt<PacketScalar, LoadMode>(m_data + index);
+ }
+
+ template<int StoreMode>
+ void writePacket(Index index, const PacketScalar& x)
+ {
+ return pstoret<Scalar, PacketScalar, StoreMode>(const_cast<Scalar*>(m_data) + index, x);
+ }*/
+
+ protected:
+ Scalar* m_data;
+};
+
+
+
+
+// -------------------- CwiseUnaryOp --------------------
+
+template<typename UnaryOp, typename ArgType>
+struct TensorEvaluator<const TensorCwiseUnaryOp<UnaryOp, ArgType> >
+{
+ typedef TensorCwiseUnaryOp<UnaryOp, ArgType> XprType;
+ typedef TensorEvaluator<ArgType> nestedType;
+
+ TensorEvaluator(const XprType& op)
+ : m_functor(op.functor()),
+ m_argImpl(op.nestedExpression())
+ { }
+
+ typedef typename XprType::Index Index;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+
+ CoeffReturnType coeff(Index index) const
+ {
+ return m_functor(m_argImpl.coeff(index));
+ }
+
+ private:
+ const UnaryOp m_functor;
+ typename TensorEvaluator<ArgType>::nestedType m_argImpl;
+};
+
+
+// -------------------- CwiseBinaryOp --------------------
+
+template<typename BinaryOp, typename LeftArgType, typename RightArgType>
+struct TensorEvaluator<const TensorCwiseBinaryOp<BinaryOp, LeftArgType, RightArgType> >
+{
+ typedef TensorCwiseBinaryOp<BinaryOp, LeftArgType, RightArgType> XprType;
+ typedef TensorEvaluator<LeftArgType> leftType;
+ typedef TensorEvaluator<RightArgType> rightType;
+
+ TensorEvaluator(const XprType& op)
+ : m_functor(op.functor()),
+ m_leftImpl(op.lhsExpression()),
+ m_rightImpl(op.rhsExpression())
+ { }
+
+ typedef typename XprType::Index Index;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+
+ CoeffReturnType coeff(Index index) const
+ {
+ return m_functor(m_leftImpl.coeff(index), m_rightImpl.coeff(index));
+ }
+
+ private:
+ const BinaryOp m_functor;
+ typename TensorEvaluator<LeftArgType>::nestedType m_leftImpl;
+ typename TensorEvaluator<RightArgType>::nestedType m_rightImpl;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_EVALUATOR_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h b/unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h
new file mode 100644
index 000000000..5a45cec31
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h
@@ -0,0 +1,161 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_EXPR_H
+#define EIGEN_CXX11_TENSOR_TENSOR_EXPR_H
+
+namespace Eigen {
+
+/** \class TensorExpr
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Tensor expression classes.
+ *
+ * The TensorCwiseUnaryOp class represents an expression where a unary operator
+ * (e.g. cwiseSqrt) is applied to an expression.
+ *
+ * The TensorCwiseBinaryOp class represents an expression where a binary operator
+ * (e.g. addition) is applied to a lhs and a rhs expression.
+ *
+ */
+
+namespace internal {
+template<typename UnaryOp, typename XprType>
+struct traits<TensorCwiseUnaryOp<UnaryOp, XprType> >
+ : traits<XprType>
+{
+ typedef typename result_of<
+ UnaryOp(typename XprType::Scalar)
+ >::type Scalar;
+ typedef typename XprType::Nested XprTypeNested;
+ typedef typename remove_reference<XprTypeNested>::type _XprTypeNested;
+};
+
+template<typename UnaryOp, typename XprType>
+struct eval<TensorCwiseUnaryOp<UnaryOp, XprType>, Eigen::Dense>
+{
+ typedef const TensorCwiseUnaryOp<UnaryOp, XprType>& type;
+};
+
+template<typename UnaryOp, typename XprType>
+struct nested<TensorCwiseUnaryOp<UnaryOp, XprType>, 1, typename eval<TensorCwiseUnaryOp<UnaryOp, XprType> >::type>
+{
+ typedef TensorCwiseUnaryOp<UnaryOp, XprType> type;
+};
+
+} // end namespace internal
+
+
+
+template<typename UnaryOp, typename XprType>
+class TensorCwiseUnaryOp
+{
+ public:
+ typedef typename Eigen::internal::traits<TensorCwiseUnaryOp>::Scalar Scalar;
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename Eigen::internal::nested<TensorCwiseUnaryOp>::type Nested;
+ typedef typename Eigen::internal::traits<TensorCwiseUnaryOp>::StorageKind StorageKind;
+ typedef typename Eigen::internal::traits<TensorCwiseUnaryOp>::Index Index;
+
+ inline TensorCwiseUnaryOp(const XprType& xpr, const UnaryOp& func = UnaryOp())
+ : m_xpr(xpr), m_functor(func) {}
+
+ EIGEN_DEVICE_FUNC
+ const UnaryOp& functor() const { return m_functor; }
+
+ /** \returns the nested expression */
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename XprType::Nested>::type&
+ nestedExpression() const { return m_xpr; }
+
+ /** \returns the nested expression */
+ EIGEN_DEVICE_FUNC
+ typename internal::remove_all<typename XprType::Nested>::type&
+ nestedExpression() { return m_xpr.const_cast_derived(); }
+
+ protected:
+ typename XprType::Nested m_xpr;
+ const UnaryOp m_functor;
+};
+
+
+namespace internal {
+template<typename BinaryOp, typename LhsXprType, typename RhsXprType>
+struct traits<TensorCwiseBinaryOp<BinaryOp, LhsXprType, RhsXprType> >
+{
+ // Type promotion to handle the case where the types of the lhs and the rhs are different.
+ typedef typename result_of<
+ BinaryOp(
+ typename LhsXprType::Scalar,
+ typename RhsXprType::Scalar
+ )
+ >::type Scalar;
+ typedef typename promote_storage_type<typename traits<LhsXprType>::StorageKind,
+ typename traits<RhsXprType>::StorageKind>::ret StorageKind;
+ typedef typename promote_index_type<typename traits<LhsXprType>::Index,
+ typename traits<RhsXprType>::Index>::type Index;
+ typedef typename LhsXprType::Nested LhsNested;
+ typedef typename RhsXprType::Nested RhsNested;
+ typedef typename remove_reference<LhsNested>::type _LhsNested;
+ typedef typename remove_reference<RhsNested>::type _RhsNested;
+};
+
+template<typename BinaryOp, typename LhsXprType, typename RhsXprType>
+struct eval<TensorCwiseBinaryOp<BinaryOp, LhsXprType, RhsXprType>, Eigen::Dense>
+{
+ typedef const TensorCwiseBinaryOp<BinaryOp, LhsXprType, RhsXprType>& type;
+};
+
+template<typename BinaryOp, typename LhsXprType, typename RhsXprType>
+struct nested<TensorCwiseBinaryOp<BinaryOp, LhsXprType, RhsXprType>, 1, typename eval<TensorCwiseBinaryOp<BinaryOp, LhsXprType, RhsXprType> >::type>
+{
+ typedef TensorCwiseBinaryOp<BinaryOp, LhsXprType, RhsXprType> type;
+};
+
+} // end namespace internal
+
+
+
+template<typename BinaryOp, typename LhsXprType, typename RhsXprType>
+class TensorCwiseBinaryOp
+{
+ public:
+ typedef typename Eigen::internal::traits<TensorCwiseBinaryOp>::Scalar Scalar;
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
+ typedef typename internal::promote_storage_type<typename LhsXprType::CoeffReturnType,
+ typename RhsXprType::CoeffReturnType>::ret CoeffReturnType;
+ typedef typename Eigen::internal::nested<TensorCwiseBinaryOp>::type Nested;
+ typedef typename Eigen::internal::traits<TensorCwiseBinaryOp>::StorageKind StorageKind;
+ typedef typename Eigen::internal::traits<TensorCwiseBinaryOp>::Index Index;
+
+ inline TensorCwiseBinaryOp(const LhsXprType& lhs, const RhsXprType& rhs, const BinaryOp& func = BinaryOp())
+ : m_lhs_xpr(lhs), m_rhs_xpr(rhs), m_functor(func) {}
+
+ EIGEN_DEVICE_FUNC
+ const BinaryOp& functor() const { return m_functor; }
+
+ /** \returns the nested expressions */
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename LhsXprType::Nested>::type&
+ lhsExpression() const { return m_lhs_xpr; }
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename RhsXprType::Nested>::type&
+ rhsExpression() const { return m_rhs_xpr; }
+
+ protected:
+ typename LhsXprType::Nested m_lhs_xpr;
+ typename RhsXprType::Nested m_rhs_xpr;
+ const BinaryOp m_functor;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_EXPR_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h b/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h
new file mode 100644
index 000000000..dc97764f0
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h
@@ -0,0 +1,27 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_FORWARD_DECLARATIONS_H
+#define EIGEN_CXX11_TENSOR_TENSOR_FORWARD_DECLARATIONS_H
+
+namespace Eigen {
+
+template<typename Scalar_, std::size_t NumIndices_, int Options_ = 0> class Tensor;
+template<typename PlainObjectType> class TensorMap;
+template<typename Derived> class TensorBase;
+
+template<typename UnaryOp, typename XprType> class TensorCwiseUnaryOp;
+template<typename BinaryOp, typename LeftXprType, typename RightXprType> class TensorCwiseBinaryOp;
+
+// Move to internal?
+template<typename Derived> struct TensorEvaluator;
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_FORWARD_DECLARATIONS_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h b/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h
new file mode 100644
index 000000000..7dec1e08d
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h
@@ -0,0 +1,101 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_MAP_H
+#define EIGEN_CXX11_TENSOR_TENSOR_MAP_H
+
+namespace Eigen {
+
+template<int InnerStrideAtCompileTime, int OuterStrideAtCompileTime> class Stride;
+
+
+/** \class TensorMap
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief A tensor expression mapping an existing array of data.
+ *
+ */
+
+template<typename PlainObjectType> class TensorMap : public TensorBase<TensorMap<PlainObjectType> >
+{
+ public:
+ typedef TensorMap<PlainObjectType> Self;
+ typedef typename PlainObjectType::Base Base;
+ typedef typename Eigen::internal::nested<Self>::type Nested;
+ typedef typename internal::traits<PlainObjectType>::StorageKind StorageKind;
+ typedef typename internal::traits<PlainObjectType>::Index Index;
+ typedef typename internal::traits<PlainObjectType>::Scalar Scalar;
+ typedef typename internal::packet_traits<Scalar>::type PacketScalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef typename Base::CoeffReturnType CoeffReturnType;
+
+ /* typedef typename internal::conditional<
+ bool(internal::is_lvalue<PlainObjectType>::value),
+ Scalar *,
+ const Scalar *>::type
+ PointerType;*/
+ typedef Scalar* PointerType;
+ typedef PointerType PointerArgType;
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr, Index firstDimension) : m_data(dataPtr), m_dimensions({{firstDimension}}) {
+ // The number of dimensions used to construct a tensor must be equal to the rank of the tensor.
+ EIGEN_STATIC_ASSERT(1 == PlainObjectType::NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ }
+
+#ifdef EIGEN_HAS_VARIADIC_TEMPLATES
+ template<typename... IndexTypes> EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr, Index firstDimension, IndexTypes... otherDimensions) : m_data(dataPtr), m_dimensions({{firstDimension, otherDimensions...}}) {
+ // The number of dimensions used to construct a tensor must be equal to the rank of the tensor.
+ EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == PlainObjectType::NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ }
+#endif
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Index dimension(Index n) const { return m_dimensions[n]; }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Index size() const { return internal::array_prod(m_dimensions); }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar* data() { return m_data; }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar* data() const { return m_data; }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar& operator()(Index index) const
+ {
+ eigen_internal_assert(index >= 0 && index < size());
+ return m_data[index];
+ }
+
+#ifdef EIGEN_HAS_VARIADIC_TEMPLATES
+ template<typename... IndexTypes> EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& operator()(Index firstIndex, IndexTypes... otherIndices)
+ {
+ static_assert(sizeof...(otherIndices) + 1 == PlainObjectType::NumIndices, "Number of indices used to access a tensor coefficient must be equal to the rank of the tensor.");
+ const Index index = internal::tensor_index_linearization_helper<Index, PlainObjectType::NumIndices, PlainObjectType::NumIndices - 1, PlainObjectType::Options&RowMajor>::run(array<Index, PlainObjectType::NumIndices>{{firstIndex, otherIndices...}}, m_dimensions);
+ return m_data[index];
+ }
+#endif
+
+ template<typename OtherDerived>
+ EIGEN_DEVICE_FUNC
+ Self& operator=(const OtherDerived& other)
+ {
+ internal::TensorAssign<Self, const OtherDerived>::run(*this, other);
+ return *this;
+ }
+
+ private:
+ typename PlainObjectType::Scalar* m_data;
+ array<DenseIndex, PlainObjectType::NumIndices> m_dimensions;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_MAP_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h b/unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h
index a34600ee6..503d7cfd6 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h
@@ -37,14 +37,19 @@ template<typename T, std::size_t NumIndices_, int Options_>
class TensorStorage<T, NumIndices_, Dynamic, Options_, void>
: public TensorStorage<T, NumIndices_, Dynamic, Options_, typename internal::gen_numeric_list_repeated<DenseIndex, NumIndices_, Dynamic>::type>
{
- typedef TensorStorage<T, NumIndices_, Dynamic, Options_, typename internal::gen_numeric_list_repeated<DenseIndex, NumIndices_, Dynamic>::type> Base_;
+ typedef TensorStorage<T, NumIndices_, Dynamic, Options_, typename internal::gen_numeric_list_repeated<DenseIndex, NumIndices_, Dynamic>::type> Base_;
+
public:
- TensorStorage() = default;
- TensorStorage(const TensorStorage<T, NumIndices_, Dynamic, Options_, void>&) = default;
- TensorStorage(TensorStorage<T, NumIndices_, Dynamic, Options_, void>&&) = default;
+ TensorStorage() { }
+ TensorStorage(const TensorStorage<T, NumIndices_, Dynamic, Options_, void>& other) : Base_(other) { }
+
+#ifdef EIGEN_HAVE_RVALUE_REFERENCES
+// TensorStorage(TensorStorage<T, NumIndices_, Dynamic, Options_, void>&&) = default;
+#endif
TensorStorage(internal::constructor_without_unaligned_array_assert) : Base_(internal::constructor_without_unaligned_array_assert()) {}
- TensorStorage(DenseIndex size, const std::array<DenseIndex, NumIndices_>& dimensions) : Base_(size, dimensions) {}
- TensorStorage<T, NumIndices_, Dynamic, Options_, void>& operator=(const TensorStorage<T, NumIndices_, Dynamic, Options_, void>&) = default;
+ TensorStorage(DenseIndex size, const array<DenseIndex, NumIndices_>& dimensions) : Base_(size, dimensions) {}
+
+ // TensorStorage<T, NumIndices_, Dynamic, Options_, void>& operator=(const TensorStorage<T, NumIndices_, Dynamic, Options_, void>&) = default;
};
// pure dynamic
@@ -52,17 +57,17 @@ template<typename T, std::size_t NumIndices_, int Options_>
class TensorStorage<T, NumIndices_, Dynamic, Options_, typename internal::gen_numeric_list_repeated<DenseIndex, NumIndices_, Dynamic>::type>
{
T *m_data;
- std::array<DenseIndex, NumIndices_> m_dimensions;
+ array<DenseIndex, NumIndices_> m_dimensions;
typedef TensorStorage<T, NumIndices_, Dynamic, Options_, typename internal::gen_numeric_list_repeated<DenseIndex, NumIndices_, Dynamic>::type> Self_;
public:
- TensorStorage() : m_data(0), m_dimensions(internal::template repeat<NumIndices_, DenseIndex>(0)) {}
+ TensorStorage() : m_data(0), m_dimensions() {}
TensorStorage(internal::constructor_without_unaligned_array_assert)
: m_data(0), m_dimensions(internal::template repeat<NumIndices_, DenseIndex>(0)) {}
- TensorStorage(DenseIndex size, const std::array<DenseIndex, NumIndices_>& dimensions)
- : m_data(internal::conditional_aligned_new_auto<T,(Options_&DontAlign)==0>(size)), m_dimensions(dimensions)
- { EIGEN_INTERNAL_TENSOR_STORAGE_CTOR_PLUGIN }
- TensorStorage(const Self_& other)
+ TensorStorage(DenseIndex size, const array<DenseIndex, NumIndices_>& dimensions)
+ : m_data(internal::conditional_aligned_new_auto<T,(Options_&DontAlign)==0>(size)), m_dimensions(dimensions)
+ { EIGEN_INTERNAL_TENSOR_STORAGE_CTOR_PLUGIN }
+ TensorStorage(const Self_& other)
: m_data(internal::conditional_aligned_new_auto<T,(Options_&DontAlign)==0>(internal::array_prod(other.m_dimensions)))
, m_dimensions(other.m_dimensions)
{
@@ -76,28 +81,34 @@ class TensorStorage<T, NumIndices_, Dynamic, Options_, typename internal::gen_nu
}
return *this;
}
- TensorStorage(Self_&& other)
+
+#ifdef EIGEN_HAVE_RVALUE_REFERENCES
+/* TensorStorage(Self_&& other)
: m_data(std::move(other.m_data)), m_dimensions(std::move(other.m_dimensions))
{
other.m_data = nullptr;
}
+*/
Self_& operator=(Self_&& other)
{
using std::swap;
swap(m_data, other.m_data);
swap(m_dimensions, other.m_dimensions);
return *this;
- }
+ }
+#endif
+
~TensorStorage() { internal::conditional_aligned_delete_auto<T,(Options_&DontAlign)==0>(m_data, internal::array_prod(m_dimensions)); }
void swap(Self_& other)
{ std::swap(m_data,other.m_data); std::swap(m_dimensions,other.m_dimensions); }
- std::array<DenseIndex, NumIndices_> dimensions(void) const {return m_dimensions;}
- void conservativeResize(DenseIndex size, const std::array<DenseIndex, NumIndices_>& nbDimensions)
+ const array<DenseIndex, NumIndices_>& dimensions() const {return m_dimensions;}
+
+ void conservativeResize(DenseIndex size, const array<DenseIndex, NumIndices_>& nbDimensions)
{
m_data = internal::conditional_aligned_realloc_new_auto<T,(Options_&DontAlign)==0>(m_data, size, internal::array_prod(m_dimensions));
m_dimensions = nbDimensions;
}
- void resize(DenseIndex size, const std::array<DenseIndex, NumIndices_>& nbDimensions)
+ void resize(DenseIndex size, const array<DenseIndex, NumIndices_>& nbDimensions)
{
if(size != internal::array_prod(m_dimensions))
{
@@ -110,8 +121,9 @@ class TensorStorage<T, NumIndices_, Dynamic, Options_, typename internal::gen_nu
}
m_dimensions = nbDimensions;
}
- const T *data() const { return m_data; }
+
T *data() { return m_data; }
+ const T *data() const { return m_data; }
};
// TODO: implement fixed-size stuff
@@ -119,7 +131,3 @@ class TensorStorage<T, NumIndices_, Dynamic, Options_, typename internal::gen_nu
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSORSTORAGE_H
-
-/*
- * kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle;
- */
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h b/unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h
new file mode 100644
index 000000000..53b4ea444
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h
@@ -0,0 +1,122 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_TRAITS_H
+#define EIGEN_CXX11_TENSOR_TENSOR_TRAITS_H
+
+namespace Eigen {
+namespace internal {
+
+
+template<typename Scalar, int Options>
+class compute_tensor_flags
+{
+ enum {
+ is_dynamic_size_storage = 1,
+
+ aligned_bit =
+ (
+ ((Options&DontAlign)==0) && (
+#if EIGEN_ALIGN_STATICALLY
+ (!is_dynamic_size_storage)
+#else
+ 0
+#endif
+ ||
+#if EIGEN_ALIGN
+ is_dynamic_size_storage
+#else
+ 0
+#endif
+ )
+ ) ? AlignedBit : 0,
+ packet_access_bit = packet_traits<Scalar>::Vectorizable && aligned_bit ? PacketAccessBit : 0
+ };
+
+ public:
+ enum { ret = packet_access_bit | aligned_bit};
+};
+
+
+template<typename Scalar_, std::size_t NumIndices_, int Options_>
+struct traits<Tensor<Scalar_, NumIndices_, Options_> >
+{
+ typedef Scalar_ Scalar;
+ typedef Dense StorageKind;
+ typedef DenseIndex Index;
+ enum {
+ Options = Options_,
+ Flags = compute_tensor_flags<Scalar_, Options_>::ret,
+ };
+};
+
+
+template<typename PlainObjectType>
+struct traits<TensorMap<PlainObjectType> >
+ : public traits<PlainObjectType>
+{
+ typedef traits<PlainObjectType> BaseTraits;
+ typedef typename BaseTraits::Scalar Scalar;
+ typedef typename BaseTraits::StorageKind StorageKind;
+ typedef typename BaseTraits::Index Index;
+};
+
+
+template<typename _Scalar, std::size_t NumIndices_, int Options_>
+struct eval<Tensor<_Scalar, NumIndices_, Options_>, Eigen::Dense>
+{
+ typedef const Tensor<_Scalar, NumIndices_, Options_>& type;
+};
+
+template<typename _Scalar, std::size_t NumIndices_, int Options_>
+struct eval<const Tensor<_Scalar, NumIndices_, Options_>, Eigen::Dense>
+{
+ typedef const Tensor<_Scalar, NumIndices_, Options_>& type;
+};
+
+template<typename PlainObjectType>
+struct eval<TensorMap<PlainObjectType>, Eigen::Dense>
+{
+ typedef const TensorMap<PlainObjectType>& type;
+};
+
+template<typename PlainObjectType>
+struct eval<const TensorMap<PlainObjectType>, Eigen::Dense>
+{
+ typedef const TensorMap<PlainObjectType>& type;
+};
+
+template <typename Scalar_, std::size_t NumIndices_, int Options_>
+struct nested<Tensor<Scalar_, NumIndices_, Options_>, 1, typename eval<Tensor<Scalar_, NumIndices_, Options_> >::type>
+{
+ typedef const Tensor<Scalar_, NumIndices_, Options_>& type;
+};
+
+template <typename Scalar_, std::size_t NumIndices_, int Options_>
+struct nested<const Tensor<Scalar_, NumIndices_, Options_>, 1, typename eval<const Tensor<Scalar_, NumIndices_, Options_> >::type>
+{
+ typedef const Tensor<Scalar_, NumIndices_, Options_>& type;
+};
+
+template <typename PlainObjectType>
+struct nested<TensorMap<PlainObjectType>, 1, typename eval<TensorMap<PlainObjectType> >::type>
+{
+ typedef const TensorMap<PlainObjectType>& type;
+};
+
+template <typename PlainObjectType>
+struct nested<const TensorMap<PlainObjectType>, 1, typename eval<TensorMap<PlainObjectType> >::type>
+{
+ typedef const TensorMap<PlainObjectType>& type;
+};
+
+} // end namespace internal
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_TRAITS_H
diff --git a/unsupported/test/CMakeLists.txt b/unsupported/test/CMakeLists.txt
index 0a6c56c19..31583d3ca 100644
--- a/unsupported/test/CMakeLists.txt
+++ b/unsupported/test/CMakeLists.txt
@@ -93,7 +93,7 @@ ei_add_test(minres)
ei_add_test(levenberg_marquardt)
ei_add_test(bdcsvd)
-option(EIGEN_TEST_CXX11 "Enable testing of C++11 features (e.g. Tensor module)." OFF)
+option(EIGEN_TEST_CXX11 "Enable testing of C++11 features (e.g. Tensor module)." ON)
if(EIGEN_TEST_CXX11)
# FIXME: add C++11 compiler switch in some portable way
# (MSVC doesn't need any for example, so this will
@@ -101,4 +101,7 @@ if(EIGEN_TEST_CXX11)
ei_add_test(cxx11_meta "-std=c++0x")
ei_add_test(cxx11_tensor_simple "-std=c++0x")
ei_add_test(cxx11_tensor_symmetry "-std=c++0x")
+ ei_add_test(cxx11_tensor_assign "-std=c++0x")
+ ei_add_test(cxx11_tensor_expr "-std=c++0x")
+ ei_add_test(cxx11_tensor_map "-std=c++0x")
endif()
diff --git a/unsupported/test/cxx11_tensor_simple.cpp b/unsupported/test/cxx11_tensor_simple.cpp
index ea512c9cc..1f76033ea 100644
--- a/unsupported/test/cxx11_tensor_simple.cpp
+++ b/unsupported/test/cxx11_tensor_simple.cpp
@@ -163,7 +163,7 @@ static void test_3d()
VERIFY_IS_EQUAL((epsilon(0,2,1)), -1);
VERIFY_IS_EQUAL((epsilon(1,0,2)), -1);
- std::array<Eigen::DenseIndex, 3> dims{{2,3,4}};
+ array<Eigen::DenseIndex, 3> dims{{2,3,4}};
Tensor<int, 3> t1(dims);
Tensor<int, 3, RowMajor> t2(dims);