aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--unsupported/Eigen/CXX11/Tensor40
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/Tensor.h314
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h126
-rw-r--r--unsupported/test/CMakeLists.txt1
-rw-r--r--unsupported/test/cxx11_tensor_simple.cpp271
5 files changed, 752 insertions, 0 deletions
diff --git a/unsupported/Eigen/CXX11/Tensor b/unsupported/Eigen/CXX11/Tensor
new file mode 100644
index 000000000..083d8c0a7
--- /dev/null
+++ b/unsupported/Eigen/CXX11/Tensor
@@ -0,0 +1,40 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_MODULE
+#define EIGEN_CXX11_TENSOR_MODULE
+
+#include <Eigen/CXX11/Core>
+
+#include <Eigen/src/Core/util/DisableStupidWarnings.h>
+
+/** \defgroup CXX11_Tensor_Module Tensor Module
+ *
+ * This module provides a Tensor class for storing arbitrarily indexed
+ * objects.
+ *
+ * \code
+ * #include <Eigen/CXX11/Tensor>
+ * \endcode
+ */
+
+#include <cstddef>
+#include <cstring>
+
+#include "src/Tensor/TensorStorage.h"
+#include "src/Tensor/Tensor.h"
+
+#include <Eigen/src/Core/util/ReenableStupidWarnings.h>
+
+#endif // EIGEN_CXX11_TENSOR_MODULE
+
+/*
+ * kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle;
+ */
diff --git a/unsupported/Eigen/CXX11/src/Tensor/Tensor.h b/unsupported/Eigen/CXX11/src/Tensor/Tensor.h
new file mode 100644
index 000000000..c40905af4
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/Tensor.h
@@ -0,0 +1,314 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSOR_H
+#define EIGEN_CXX11_TENSOR_TENSOR_H
+
+namespace Eigen {
+
+/** \class Tensor
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief The tensor class.
+ *
+ * The %Tensor class is the work-horse for all \em dense tensors within Eigen.
+ *
+ * The %Tensor class encompasses only dynamic-size objects so far.
+ *
+ * The first two template parameters are required:
+ * \tparam Scalar_ \anchor tensor_tparam_scalar Numeric type, e.g. float, double, int or std::complex<float>.
+ * User defined scalar types are supported as well (see \ref user_defined_scalars "here").
+ * \tparam NumIndices_ Number of indices (i.e. rank of the tensor)
+ *
+ * The remaining template parameters are optional -- in most cases you don't have to worry about them.
+ * \tparam Options_ \anchor tensor_tparam_options A combination of either \b #RowMajor or \b #ColMajor, and of either
+ * \b #AutoAlign or \b #DontAlign.
+ * The former controls \ref TopicStorageOrders "storage order", and defaults to column-major. The latter controls alignment, which is required
+ * for vectorization. It defaults to aligning tensors. Note that tensors currently do not support any operations that profit from vectorization.
+ * Support for such operations (i.e. adding two tensors etc.) is planned.
+ *
+ * You can access elements of tensors using normal subscripting:
+ *
+ * \code
+ * Eigen::Tensor<double, 4> t(10, 10, 10, 10);
+ * t(0, 1, 2, 3) = 42.0;
+ * \endcode
+ *
+ * This class can be extended with the help of the plugin mechanism described on the page
+ * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_TENSOR_PLUGIN.
+ *
+ * <i><b>Some notes:</b></i>
+ *
+ * <dl>
+ * <dt><b>Relation to other parts of Eigen:</b></dt>
+ * <dd>The midterm developement goal for this class is to have a similar hierarchy as Eigen uses for matrices, so that
+ * taking blocks or using tensors in expressions is easily possible, including an interface with the vector/matrix code
+ * by providing .asMatrix() and .asVector() (or similar) methods for rank 2 and 1 tensors. However, currently, the %Tensor
+ * class does not provide any of these features and is only available as a stand-alone class that just allows for
+ * coefficient access. Also, when fixed-size tensors are implemented, the number of template arguments is likely to
+ * change dramatically.</dd>
+ * </dl>
+ *
+ * \ref TopicStorageOrders
+ */
+template<typename Scalar_, std::size_t NumIndices_, int Options_ = 0>
+class Tensor;
+
+namespace internal {
+template<typename Scalar_, std::size_t NumIndices_, int Options_>
+struct traits<Tensor<Scalar_, NumIndices_, Options_>>
+{
+ typedef Scalar_ Scalar;
+ typedef Dense StorageKind;
+ typedef DenseIndex Index;
+ enum {
+ Options = Options_
+ };
+};
+
+template<typename Index, std::size_t NumIndices, std::size_t n, bool RowMajor>
+struct tensor_index_linearization_helper
+{
+ constexpr static inline Index run(std::array<Index, NumIndices> const& indices, std::array<Index, NumIndices> const& dimensions)
+ {
+ return std_array_get<RowMajor ? n : (NumIndices - n - 1)>(indices) +
+ std_array_get<RowMajor ? n : (NumIndices - n - 1)>(dimensions) *
+ tensor_index_linearization_helper<Index, NumIndices, n - 1, RowMajor>::run(indices, dimensions);
+ }
+};
+
+template<typename Index, std::size_t NumIndices, bool RowMajor>
+struct tensor_index_linearization_helper<Index, NumIndices, 0, RowMajor>
+{
+ constexpr static inline Index run(std::array<Index, NumIndices> const& indices, std::array<Index, NumIndices> const&)
+ {
+ return std_array_get<RowMajor ? 0 : NumIndices - 1>(indices);
+ }
+};
+} // end namespace internal
+
+template<typename Scalar_, std::size_t NumIndices_, int Options_>
+class Tensor
+{
+ static_assert(NumIndices_ >= 1, "A tensor must have at least one index.");
+
+ public:
+ typedef Tensor<Scalar_, NumIndices_, Options_> Self;
+ typedef typename internal::traits<Self>::StorageKind StorageKind;
+ typedef typename internal::traits<Self>::Index Index;
+ typedef typename internal::traits<Self>::Scalar Scalar;
+ typedef typename internal::packet_traits<Scalar>::type PacketScalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef Self DenseType;
+
+ constexpr static int Options = Options_;
+ constexpr static std::size_t NumIndices = NumIndices_;
+
+ protected:
+ TensorStorage<Scalar, NumIndices, Dynamic, Options> m_storage;
+
+ public:
+ EIGEN_STRONG_INLINE Index dimension(std::size_t n) const { return m_storage.dimensions()[n]; }
+ EIGEN_STRONG_INLINE std::array<Index, NumIndices> dimensions() const { return m_storage.dimensions(); }
+ EIGEN_STRONG_INLINE Index size() const { return internal::array_prod(m_storage.dimensions()); }
+ EIGEN_STRONG_INLINE Scalar *data() { return m_storage.data(); }
+ EIGEN_STRONG_INLINE const Scalar *data() const { return m_storage.data(); }
+
+ // This makes EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
+ // work, because that uses base().coeffRef() - and we don't yet
+ // implement a similar class hierarchy
+ inline Self& base() { return *this; }
+ inline const Self& base() const { return *this; }
+
+ void setZero()
+ {
+ // FIXME: until we have implemented packet access and the
+ // expression engine w.r.t. nullary ops, use this
+ // as a kludge. Only works with POD types, but for
+ // any standard usage, this shouldn't be a problem
+ memset((void *)data(), 0, size() * sizeof(Scalar));
+ }
+
+ inline Self& operator=(Self const& other)
+ {
+ m_storage = other.m_storage;
+ return *this;
+ }
+
+ template<typename... IndexTypes>
+ inline const Scalar& coeff(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
+ {
+ static_assert(sizeof...(otherIndices) + 2 == NumIndices, "Number of indices used to access a tensor coefficient must be equal to the rank of the tensor.");
+ return coeff(std::array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
+ }
+
+ inline const Scalar& coeff(const std::array<Index, NumIndices>& indices) const
+ {
+ eigen_internal_assert(checkIndexRange(indices));
+ return m_storage.data()[linearizedIndex(indices)];
+ }
+
+ inline const Scalar& coeff(Index index) const
+ {
+ eigen_internal_assert(index >= 0 && index < size());
+ return m_storage.data()[index];
+ }
+
+ template<typename... IndexTypes>
+ inline Scalar& coeffRef(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
+ {
+ static_assert(sizeof...(otherIndices) + 2 == NumIndices, "Number of indices used to access a tensor coefficient must be equal to the rank of the tensor.");
+ return coeffRef(std::array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
+ }
+
+ inline Scalar& coeffRef(const std::array<Index, NumIndices>& indices)
+ {
+ eigen_internal_assert(checkIndexRange(indices));
+ return m_storage.data()[linearizedIndex(indices)];
+ }
+
+ inline Scalar& coeffRef(Index index)
+ {
+ eigen_internal_assert(index >= 0 && index < size());
+ return m_storage.data()[index];
+ }
+
+ template<typename... IndexTypes>
+ inline const Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
+ {
+ static_assert(sizeof...(otherIndices) + 2 == NumIndices, "Number of indices used to access a tensor coefficient must be equal to the rank of the tensor.");
+ return this->operator()(std::array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
+ }
+
+ inline const Scalar& operator()(const std::array<Index, NumIndices>& indices) const
+ {
+ eigen_assert(checkIndexRange(indices));
+ return coeff(indices);
+ }
+
+ inline const Scalar& operator()(Index index) const
+ {
+ eigen_internal_assert(index >= 0 && index < size());
+ return coeff(index);
+ }
+
+ inline const Scalar& operator[](Index index) const
+ {
+ static_assert(NumIndices == 1, "The bracket operator is only for vectors, use the parenthesis operator instead.");
+ return coeff(index);
+ }
+
+ template<typename... IndexTypes>
+ inline Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
+ {
+ static_assert(sizeof...(otherIndices) + 2 == NumIndices, "Number of indices used to access a tensor coefficient must be equal to the rank of the tensor.");
+ return operator()(std::array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
+ }
+
+ inline Scalar& operator()(const std::array<Index, NumIndices>& indices)
+ {
+ eigen_assert(checkIndexRange(indices));
+ return coeffRef(indices);
+ }
+
+ inline Scalar& operator()(Index index)
+ {
+ eigen_assert(index >= 0 && index < size());
+ return coeffRef(index);
+ }
+
+ inline Scalar& operator[](Index index)
+ {
+ static_assert(NumIndices == 1, "The bracket operator is only for vectors, use the parenthesis operator instead.");
+ return coeffRef(index);
+ }
+
+ inline Tensor()
+ : m_storage()
+ {
+ }
+
+ inline Tensor(const Self& other)
+ : m_storage(other.m_storage)
+ {
+ }
+
+ inline Tensor(Self&& other)
+ : m_storage(other.m_storage)
+ {
+ }
+
+ template<typename... IndexTypes>
+ inline Tensor(Index firstDimension, IndexTypes... otherDimensions)
+ : m_storage()
+ {
+ static_assert(sizeof...(otherDimensions) + 1 == NumIndices, "Number of dimensions used to construct a tensor must be equal to the rank of the tensor.");
+ resize(std::array<Index, NumIndices>{{firstDimension, otherDimensions...}});
+ }
+
+ inline Tensor(std::array<Index, NumIndices> dimensions)
+ : m_storage(internal::array_prod(dimensions), dimensions)
+ {
+ EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
+ }
+
+ template<typename... IndexTypes>
+ void resize(Index firstDimension, IndexTypes... otherDimensions)
+ {
+ static_assert(sizeof...(otherDimensions) + 1 == NumIndices, "Number of dimensions used to resize a tensor must be equal to the rank of the tensor.");
+ resize(std::array<Index, NumIndices>{{firstDimension, otherDimensions...}});
+ }
+
+ void resize(const std::array<Index, NumIndices>& dimensions)
+ {
+ std::size_t i;
+ Index size = Index(1);
+ for (i = 0; i < NumIndices; i++) {
+ internal::check_rows_cols_for_overflow<Dynamic>::run(size, dimensions[i]);
+ size *= dimensions[i];
+ }
+ #ifdef EIGEN_INITIALIZE_COEFFS
+ bool size_changed = size != this->size();
+ m_storage.resize(size, dimensions);
+ if(size_changed) EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
+ #else
+ m_storage.resize(size, dimensions);
+ #endif
+ }
+
+ protected:
+ bool checkIndexRange(const std::array<Index, NumIndices>& indices) const
+ {
+ using internal::array_apply_and_reduce;
+ using internal::array_zip_and_reduce;
+ using internal::greater_equal_zero_op;
+ using internal::logical_and_op;
+ using internal::lesser_op;
+
+ return
+ // check whether the indices are all >= 0
+ array_apply_and_reduce<logical_and_op, greater_equal_zero_op>(indices) &&
+ // check whether the indices fit in the dimensions
+ array_zip_and_reduce<logical_and_op, lesser_op>(indices, m_storage.dimensions());
+ }
+
+ inline Index linearizedIndex(const std::array<Index, NumIndices>& indices) const
+ {
+ return internal::tensor_index_linearization_helper<Index, NumIndices, NumIndices - 1, Options&RowMajor>::run(indices, m_storage.dimensions());
+ }
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_H
+
+/*
+ * kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle;
+ */
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h b/unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h
new file mode 100644
index 000000000..50040147d
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h
@@ -0,0 +1,126 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CXX11_TENSOR_TENSORSTORAGE_H
+#define EIGEN_CXX11_TENSOR_TENSORSTORAGE_H
+
+#ifdef EIGEN_TENSOR_STORAGE_CTOR_PLUGIN
+ #define EIGEN_INTERNAL_TENSOR_STORAGE_CTOR_PLUGIN EIGEN_TENSOR_STORAGE_CTOR_PLUGIN;
+#else
+ #define EIGEN_INTERNAL_TENSOR_STORAGE_CTOR_PLUGIN
+#endif
+
+namespace Eigen {
+
+/** \internal
+ *
+ * \class TensorStorage
+ * \ingroup CXX11_Tensor_Module
+ *
+ * \brief Stores the data of a tensor
+ *
+ * This class stores the data of fixed-size, dynamic-size or mixed tensors
+ * in a way as compact as possible.
+ *
+ * \sa Tensor
+ */
+template<typename T, std::size_t NumIndices_, DenseIndex Size, int Options_, typename Dimensions = void> class TensorStorage;
+
+// pure-dynamic, but without specification of all dimensions explicitly
+template<typename T, std::size_t NumIndices_, int Options_>
+class TensorStorage<T, NumIndices_, Dynamic, Options_, void>
+ : public TensorStorage<T, NumIndices_, Dynamic, Options_, typename internal::gen_numeric_list_repeated<DenseIndex, NumIndices_, Dynamic>::type>
+{
+ typedef TensorStorage<T, NumIndices_, Dynamic, Options_, typename internal::gen_numeric_list_repeated<DenseIndex, NumIndices_, Dynamic>::type> Base_;
+ public:
+ TensorStorage() = default;
+ TensorStorage(const TensorStorage<T, NumIndices_, Dynamic, Options_, void>&) = default;
+ TensorStorage(TensorStorage<T, NumIndices_, Dynamic, Options_, void>&&) = default;
+ TensorStorage(internal::constructor_without_unaligned_array_assert) : Base_(internal::constructor_without_unaligned_array_assert()) {}
+ TensorStorage(DenseIndex size, const std::array<DenseIndex, NumIndices_>& dimensions) : Base_(size, dimensions) {}
+ TensorStorage<T, NumIndices_, Dynamic, Options_, void>& operator=(const TensorStorage<T, NumIndices_, Dynamic, Options_, void>&) = default;
+};
+
+// pure dynamic
+template<typename T, std::size_t NumIndices_, int Options_>
+class TensorStorage<T, NumIndices_, Dynamic, Options_, typename internal::gen_numeric_list_repeated<DenseIndex, NumIndices_, Dynamic>::type>
+{
+ T *m_data;
+ std::array<DenseIndex, NumIndices_> m_dimensions;
+
+ typedef TensorStorage<T, NumIndices_, Dynamic, Options_, typename internal::gen_numeric_list_repeated<DenseIndex, NumIndices_, Dynamic>::type> Self_;
+ public:
+ TensorStorage() : m_data(0), m_dimensions(internal::template repeat<NumIndices_, DenseIndex>(0)) {}
+ TensorStorage(internal::constructor_without_unaligned_array_assert)
+ : m_data(0), m_dimensions(internal::template repeat<NumIndices_, DenseIndex>(0)) {}
+ TensorStorage(DenseIndex size, const std::array<DenseIndex, NumIndices_>& dimensions)
+ : m_data(internal::conditional_aligned_new_auto<T,(Options_&DontAlign)==0>(size)), m_dimensions(dimensions)
+ { EIGEN_INTERNAL_TENSOR_STORAGE_CTOR_PLUGIN }
+ TensorStorage(const Self_& other)
+ : m_data(internal::conditional_aligned_new_auto<T,(Options_&DontAlign)==0>(internal::array_prod(other.m_dimensions)))
+ , m_dimensions(other.m_dimensions)
+ {
+ internal::smart_copy(other.m_data, other.m_data+internal::array_prod(other.m_dimensions), m_data);
+ }
+ Self_& operator=(const Self_& other)
+ {
+ if (this != &other) {
+ Self_ tmp(other);
+ this->swap(tmp);
+ }
+ return *this;
+ }
+ TensorStorage(Self_&& other)
+ : m_data(std::move(other.m_data)), m_dimensions(std::move(other.m_dimensions))
+ {
+ other.m_data = nullptr;
+ }
+ Self_& operator=(Self_&& other)
+ {
+ using std::swap;
+ swap(m_data, other.m_data);
+ swap(m_dimensions, other.m_dimensions);
+ return *this;
+ }
+ ~TensorStorage() { internal::conditional_aligned_delete_auto<T,(Options_&DontAlign)==0>(m_data, internal::array_prod(m_dimensions)); }
+ void swap(Self_& other)
+ { std::swap(m_data,other.m_data); std::swap(m_dimensions,other.m_dimensions); }
+ std::array<DenseIndex, NumIndices_> dimensions(void) const {return m_dimensions;}
+ void conservativeResize(DenseIndex size, const std::array<DenseIndex, NumIndices_>& nbDimensions)
+ {
+ m_data = internal::conditional_aligned_realloc_new_auto<T,(Options_&DontAlign)==0>(m_data, size, internal::array_prod(m_dimensions));
+ m_dimensions = nbDimensions;
+ }
+ void resize(DenseIndex size, const std::array<DenseIndex, NumIndices_>& nbDimensions)
+ {
+ if(size != internal::array_prod(m_dimensions))
+ {
+ internal::conditional_aligned_delete_auto<T,(Options_&DontAlign)==0>(m_data, internal::array_prod(m_dimensions));
+ if (size)
+ m_data = internal::conditional_aligned_new_auto<T,(Options_&DontAlign)==0>(size);
+ else
+ m_data = 0;
+ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
+ }
+ m_dimensions = nbDimensions;
+ }
+ const T *data() const { return m_data; }
+ T *data() { return m_data; }
+};
+
+// TODO: implement fixed-size stuff
+
+} // end namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSORSTORAGE_H
+
+/*
+ * kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle;
+ */
diff --git a/unsupported/test/CMakeLists.txt b/unsupported/test/CMakeLists.txt
index 841cab9d7..61e8f56fd 100644
--- a/unsupported/test/CMakeLists.txt
+++ b/unsupported/test/CMakeLists.txt
@@ -99,4 +99,5 @@ if(EIGEN_TEST_CXX11)
# (MSVC doesn't need any for example, so this will
# clash there)
ei_add_test(cxx11_meta "-std=c++0x")
+ ei_add_test(cxx11_tensor_simple "-std=c++0x")
endif()
diff --git a/unsupported/test/cxx11_tensor_simple.cpp b/unsupported/test/cxx11_tensor_simple.cpp
new file mode 100644
index 000000000..6875a4e58
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_simple.cpp
@@ -0,0 +1,271 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+using Eigen::RowMajor;
+
+static void test_1d()
+{
+ Tensor<int, 1> vec1(6);
+ Tensor<int, 1, RowMajor> vec2(6);
+ Tensor<int, 1> vec3;
+ Tensor<int, 1, RowMajor> vec4;
+
+ vec3.resize(6);
+ vec4.resize(6);
+
+ vec1(0) = 4; vec2(0) = 0; vec3(0) = 5;
+ vec1(1) = 8; vec2(1) = 1; vec3(1) = 4;
+ vec1(2) = 15; vec2(2) = 2; vec3(2) = 3;
+ vec1(3) = 16; vec2(3) = 3; vec3(3) = 2;
+ vec1(4) = 23; vec2(4) = 4; vec3(4) = 1;
+ vec1(5) = 42; vec2(5) = 5; vec3(5) = 0;
+ vec4.setZero();
+
+ VERIFY_IS_EQUAL((vec1.size()), 6);
+ VERIFY_IS_EQUAL((vec1.dimensions()[0]), 6);
+
+ VERIFY_IS_EQUAL((vec1[0]), 4);
+ VERIFY_IS_EQUAL((vec1[1]), 8);
+ VERIFY_IS_EQUAL((vec1[2]), 15);
+ VERIFY_IS_EQUAL((vec1[3]), 16);
+ VERIFY_IS_EQUAL((vec1[4]), 23);
+ VERIFY_IS_EQUAL((vec1[5]), 42);
+
+ VERIFY_IS_EQUAL((vec2[0]), 0);
+ VERIFY_IS_EQUAL((vec2[1]), 1);
+ VERIFY_IS_EQUAL((vec2[2]), 2);
+ VERIFY_IS_EQUAL((vec2[3]), 3);
+ VERIFY_IS_EQUAL((vec2[4]), 4);
+ VERIFY_IS_EQUAL((vec2[5]), 5);
+
+ VERIFY_IS_EQUAL((vec3[0]), 5);
+ VERIFY_IS_EQUAL((vec3[1]), 4);
+ VERIFY_IS_EQUAL((vec3[2]), 3);
+ VERIFY_IS_EQUAL((vec3[3]), 2);
+ VERIFY_IS_EQUAL((vec3[4]), 1);
+ VERIFY_IS_EQUAL((vec3[5]), 0);
+
+ VERIFY_IS_EQUAL((vec4[0]), 0);
+ VERIFY_IS_EQUAL((vec4[1]), 0);
+ VERIFY_IS_EQUAL((vec4[2]), 0);
+ VERIFY_IS_EQUAL((vec4[3]), 0);
+ VERIFY_IS_EQUAL((vec4[4]), 0);
+ VERIFY_IS_EQUAL((vec4[5]), 0);
+
+ Tensor<int, 1> vec5(vec1);
+
+ VERIFY_IS_EQUAL((vec5(0)), 4);
+ VERIFY_IS_EQUAL((vec5(1)), 8);
+ VERIFY_IS_EQUAL((vec5(2)), 15);
+ VERIFY_IS_EQUAL((vec5(3)), 16);
+ VERIFY_IS_EQUAL((vec5(4)), 23);
+ VERIFY_IS_EQUAL((vec5(5)), 42);
+
+ VERIFY_IS_EQUAL((vec5.data()[0]), 4);
+ VERIFY_IS_EQUAL((vec5.data()[1]), 8);
+ VERIFY_IS_EQUAL((vec5.data()[2]), 15);
+ VERIFY_IS_EQUAL((vec5.data()[3]), 16);
+ VERIFY_IS_EQUAL((vec5.data()[4]), 23);
+ VERIFY_IS_EQUAL((vec5.data()[5]), 42);
+}
+
+static void test_2d()
+{
+ Tensor<int, 2> mat1(2,3);
+ Tensor<int, 2, RowMajor> mat2(2,3);
+
+ mat1(0,0) = 0;
+ mat1(0,1) = 1;
+ mat1(0,2) = 2;
+ mat1(1,0) = 3;
+ mat1(1,1) = 4;
+ mat1(1,2) = 5;
+
+ mat2(0,0) = 0;
+ mat2(0,1) = 1;
+ mat2(0,2) = 2;
+ mat2(1,0) = 3;
+ mat2(1,1) = 4;
+ mat2(1,2) = 5;
+
+ VERIFY_IS_EQUAL((mat1.size()), 6);
+ VERIFY_IS_EQUAL((mat1.dimensions()[0]), 2);
+ VERIFY_IS_EQUAL((mat1.dimensions()[1]), 3);
+
+ VERIFY_IS_EQUAL((mat2.size()), 6);
+ VERIFY_IS_EQUAL((mat2.dimensions()[0]), 2);
+ VERIFY_IS_EQUAL((mat2.dimensions()[1]), 3);
+
+ VERIFY_IS_EQUAL((mat1.data()[0]), 0);
+ VERIFY_IS_EQUAL((mat1.data()[1]), 3);
+ VERIFY_IS_EQUAL((mat1.data()[2]), 1);
+ VERIFY_IS_EQUAL((mat1.data()[3]), 4);
+ VERIFY_IS_EQUAL((mat1.data()[4]), 2);
+ VERIFY_IS_EQUAL((mat1.data()[5]), 5);
+
+ VERIFY_IS_EQUAL((mat2.data()[0]), 0);
+ VERIFY_IS_EQUAL((mat2.data()[1]), 1);
+ VERIFY_IS_EQUAL((mat2.data()[2]), 2);
+ VERIFY_IS_EQUAL((mat2.data()[3]), 3);
+ VERIFY_IS_EQUAL((mat2.data()[4]), 4);
+ VERIFY_IS_EQUAL((mat2.data()[5]), 5);
+}
+
+static void test_3d()
+{
+ Tensor<int, 3> epsilon(3,3,3);
+ epsilon.setZero();
+ epsilon(0,1,2) = epsilon(2,0,1) = epsilon(1,2,0) = 1;
+ epsilon(2,1,0) = epsilon(0,2,1) = epsilon(1,0,2) = -1;
+
+ VERIFY_IS_EQUAL((epsilon.size()), 27);
+ VERIFY_IS_EQUAL((epsilon.dimensions()[0]), 3);
+ VERIFY_IS_EQUAL((epsilon.dimensions()[1]), 3);
+ VERIFY_IS_EQUAL((epsilon.dimensions()[2]), 3);
+
+ VERIFY_IS_EQUAL((epsilon(0,0,0)), 0);
+ VERIFY_IS_EQUAL((epsilon(0,0,1)), 0);
+ VERIFY_IS_EQUAL((epsilon(0,0,2)), 0);
+ VERIFY_IS_EQUAL((epsilon(0,1,0)), 0);
+ VERIFY_IS_EQUAL((epsilon(0,1,1)), 0);
+ VERIFY_IS_EQUAL((epsilon(0,2,0)), 0);
+ VERIFY_IS_EQUAL((epsilon(0,2,2)), 0);
+ VERIFY_IS_EQUAL((epsilon(1,0,0)), 0);
+ VERIFY_IS_EQUAL((epsilon(1,0,1)), 0);
+ VERIFY_IS_EQUAL((epsilon(1,1,0)), 0);
+ VERIFY_IS_EQUAL((epsilon(1,1,1)), 0);
+ VERIFY_IS_EQUAL((epsilon(1,1,2)), 0);
+ VERIFY_IS_EQUAL((epsilon(1,2,1)), 0);
+ VERIFY_IS_EQUAL((epsilon(1,2,2)), 0);
+ VERIFY_IS_EQUAL((epsilon(2,0,0)), 0);
+ VERIFY_IS_EQUAL((epsilon(2,0,2)), 0);
+ VERIFY_IS_EQUAL((epsilon(2,1,1)), 0);
+ VERIFY_IS_EQUAL((epsilon(2,1,2)), 0);
+ VERIFY_IS_EQUAL((epsilon(2,2,0)), 0);
+ VERIFY_IS_EQUAL((epsilon(2,2,1)), 0);
+ VERIFY_IS_EQUAL((epsilon(2,2,2)), 0);
+
+ VERIFY_IS_EQUAL((epsilon(0,1,2)), 1);
+ VERIFY_IS_EQUAL((epsilon(2,0,1)), 1);
+ VERIFY_IS_EQUAL((epsilon(1,2,0)), 1);
+ VERIFY_IS_EQUAL((epsilon(2,1,0)), -1);
+ VERIFY_IS_EQUAL((epsilon(0,2,1)), -1);
+ VERIFY_IS_EQUAL((epsilon(1,0,2)), -1);
+
+ std::array<Eigen::DenseIndex, 3> dims{{2,3,4}};
+ Tensor<int, 3> t1(dims);
+ Tensor<int, 3, RowMajor> t2(dims);
+
+ VERIFY_IS_EQUAL((t1.size()), 24);
+ VERIFY_IS_EQUAL((t1.dimensions()[0]), 2);
+ VERIFY_IS_EQUAL((t1.dimensions()[1]), 3);
+ VERIFY_IS_EQUAL((t1.dimensions()[2]), 4);
+
+ VERIFY_IS_EQUAL((t2.size()), 24);
+ VERIFY_IS_EQUAL((t2.dimensions()[0]), 2);
+ VERIFY_IS_EQUAL((t2.dimensions()[1]), 3);
+ VERIFY_IS_EQUAL((t2.dimensions()[2]), 4);
+
+ for (int i = 0; i < 2; i++) {
+ for (int j = 0; j < 3; j++) {
+ for (int k = 0; k < 4; k++) {
+ t1(i, j, k) = 100 * i + 10 * j + k;
+ t2(i, j, k) = 100 * i + 10 * j + k;
+ }
+ }
+ }
+
+ VERIFY_IS_EQUAL((t1.data()[0]), 0);
+ VERIFY_IS_EQUAL((t1.data()[1]), 100);
+ VERIFY_IS_EQUAL((t1.data()[2]), 10);
+ VERIFY_IS_EQUAL((t1.data()[3]), 110);
+ VERIFY_IS_EQUAL((t1.data()[4]), 20);
+ VERIFY_IS_EQUAL((t1.data()[5]), 120);
+ VERIFY_IS_EQUAL((t1.data()[6]), 1);
+ VERIFY_IS_EQUAL((t1.data()[7]), 101);
+ VERIFY_IS_EQUAL((t1.data()[8]), 11);
+ VERIFY_IS_EQUAL((t1.data()[9]), 111);
+ VERIFY_IS_EQUAL((t1.data()[10]), 21);
+ VERIFY_IS_EQUAL((t1.data()[11]), 121);
+ VERIFY_IS_EQUAL((t1.data()[12]), 2);
+ VERIFY_IS_EQUAL((t1.data()[13]), 102);
+ VERIFY_IS_EQUAL((t1.data()[14]), 12);
+ VERIFY_IS_EQUAL((t1.data()[15]), 112);
+ VERIFY_IS_EQUAL((t1.data()[16]), 22);
+ VERIFY_IS_EQUAL((t1.data()[17]), 122);
+ VERIFY_IS_EQUAL((t1.data()[18]), 3);
+ VERIFY_IS_EQUAL((t1.data()[19]), 103);
+ VERIFY_IS_EQUAL((t1.data()[20]), 13);
+ VERIFY_IS_EQUAL((t1.data()[21]), 113);
+ VERIFY_IS_EQUAL((t1.data()[22]), 23);
+ VERIFY_IS_EQUAL((t1.data()[23]), 123);
+
+ VERIFY_IS_EQUAL((t2.data()[0]), 0);
+ VERIFY_IS_EQUAL((t2.data()[1]), 1);
+ VERIFY_IS_EQUAL((t2.data()[2]), 2);
+ VERIFY_IS_EQUAL((t2.data()[3]), 3);
+ VERIFY_IS_EQUAL((t2.data()[4]), 10);
+ VERIFY_IS_EQUAL((t2.data()[5]), 11);
+ VERIFY_IS_EQUAL((t2.data()[6]), 12);
+ VERIFY_IS_EQUAL((t2.data()[7]), 13);
+ VERIFY_IS_EQUAL((t2.data()[8]), 20);
+ VERIFY_IS_EQUAL((t2.data()[9]), 21);
+ VERIFY_IS_EQUAL((t2.data()[10]), 22);
+ VERIFY_IS_EQUAL((t2.data()[11]), 23);
+ VERIFY_IS_EQUAL((t2.data()[12]), 100);
+ VERIFY_IS_EQUAL((t2.data()[13]), 101);
+ VERIFY_IS_EQUAL((t2.data()[14]), 102);
+ VERIFY_IS_EQUAL((t2.data()[15]), 103);
+ VERIFY_IS_EQUAL((t2.data()[16]), 110);
+ VERIFY_IS_EQUAL((t2.data()[17]), 111);
+ VERIFY_IS_EQUAL((t2.data()[18]), 112);
+ VERIFY_IS_EQUAL((t2.data()[19]), 113);
+ VERIFY_IS_EQUAL((t2.data()[20]), 120);
+ VERIFY_IS_EQUAL((t2.data()[21]), 121);
+ VERIFY_IS_EQUAL((t2.data()[22]), 122);
+ VERIFY_IS_EQUAL((t2.data()[23]), 123);
+}
+
+static void test_simple_assign()
+{
+ Tensor<int, 3> epsilon(3,3,3);
+ epsilon.setZero();
+ epsilon(0,1,2) = epsilon(2,0,1) = epsilon(1,2,0) = 1;
+ epsilon(2,1,0) = epsilon(0,2,1) = epsilon(1,0,2) = -1;
+
+ Tensor<int, 3> e2(2,3,1);
+ e2.setZero();
+ VERIFY_IS_EQUAL((e2(1,2,0)), 0);
+
+ e2 = epsilon;
+ VERIFY_IS_EQUAL((e2(1,2,0)), 1);
+ VERIFY_IS_EQUAL((e2(0,1,2)), 1);
+ VERIFY_IS_EQUAL((e2(2,0,1)), 1);
+ VERIFY_IS_EQUAL((e2(2,1,0)), -1);
+ VERIFY_IS_EQUAL((e2(0,2,1)), -1);
+ VERIFY_IS_EQUAL((e2(1,0,2)), -1);
+}
+
+void test_cxx11_tensor_simple()
+{
+ CALL_SUBTEST(test_1d());
+ CALL_SUBTEST(test_2d());
+ CALL_SUBTEST(test_3d());
+ CALL_SUBTEST(test_simple_assign());
+}
+
+/*
+ * kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle;
+ */