aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported/test
diff options
context:
space:
mode:
authorGravatar Eugene Zhulenev <ezhulenev@google.com>2018-08-08 16:57:58 -0700
committerGravatar Eugene Zhulenev <ezhulenev@google.com>2018-08-08 16:57:58 -0700
commit1c8b9e10a791cb43b4f730dcb5d7889099cc1c68 (patch)
treea62a2f74c6e2bec8367a01272743260ec7f54cef /unsupported/test
parent1b0373ae10687ecc51ad9a0bfd46aa4ee116ade1 (diff)
parent131ed1191fa5ccbe0265fcfccfc685642c388192 (diff)
Merged with upstream eigen
Diffstat (limited to 'unsupported/test')
-rw-r--r--unsupported/test/cxx11_tensor_concatenation.cpp8
-rw-r--r--unsupported/test/cxx11_tensor_thread_pool.cpp45
-rw-r--r--unsupported/test/cxx11_tensor_trace.cpp16
3 files changed, 56 insertions, 13 deletions
diff --git a/unsupported/test/cxx11_tensor_concatenation.cpp b/unsupported/test/cxx11_tensor_concatenation.cpp
index 9189a609b..f53515b4e 100644
--- a/unsupported/test/cxx11_tensor_concatenation.cpp
+++ b/unsupported/test/cxx11_tensor_concatenation.cpp
@@ -50,7 +50,13 @@ static void test_static_dimension_failure()
.reshape(Tensor<int, 3>::Dimensions(2, 3, 1))
.concatenate(right, 0);
Tensor<int, 2, DataLayout> alternative = left
- .concatenate(right.reshape(Tensor<int, 2>::Dimensions{{{2, 3}}}), 0);
+ // Clang compiler break with {{{}}} with an ambigous error on copy constructor
+ // the variadic DSize constructor added for #ifndef EIGEN_EMULATE_CXX11_META_H.
+ // Solution:
+ // either the code should change to
+ // Tensor<int, 2>::Dimensions{{2, 3}}
+ // or Tensor<int, 2>::Dimensions{Tensor<int, 2>::Dimensions{{2, 3}}}
+ .concatenate(right.reshape(Tensor<int, 2>::Dimensions{{2, 3}}), 0);
}
template<int DataLayout>
diff --git a/unsupported/test/cxx11_tensor_thread_pool.cpp b/unsupported/test/cxx11_tensor_thread_pool.cpp
index 20a197f2b..5c3aae482 100644
--- a/unsupported/test/cxx11_tensor_thread_pool.cpp
+++ b/unsupported/test/cxx11_tensor_thread_pool.cpp
@@ -16,6 +16,25 @@
using Eigen::Tensor;
+class TestAllocator : public Allocator {
+ public:
+ ~TestAllocator() override {}
+ EIGEN_DEVICE_FUNC void* allocate(size_t num_bytes) const override {
+ const_cast<TestAllocator*>(this)->alloc_count_++;
+ return internal::aligned_malloc(num_bytes);
+ }
+ EIGEN_DEVICE_FUNC void deallocate(void* buffer) const override {
+ const_cast<TestAllocator*>(this)->dealloc_count_++;
+ internal::aligned_free(buffer);
+ }
+
+ int alloc_count() const { return alloc_count_; }
+ int dealloc_count() const { return dealloc_count_; }
+
+ private:
+ int alloc_count_ = 0;
+ int dealloc_count_ = 0;
+};
void test_multithread_elementwise()
{
@@ -374,14 +393,14 @@ void test_multithread_random()
}
template<int DataLayout>
-void test_multithread_shuffle()
+void test_multithread_shuffle(Allocator* allocator)
{
Tensor<float, 4, DataLayout> tensor(17,5,7,11);
tensor.setRandom();
const int num_threads = internal::random<int>(2, 11);
ThreadPool threads(num_threads);
- Eigen::ThreadPoolDevice device(&threads, num_threads);
+ Eigen::ThreadPoolDevice device(&threads, num_threads, allocator);
Tensor<float, 4, DataLayout> shuffle(7,5,11,17);
array<ptrdiff_t, 4> shuffles = {{2,1,3,0}};
@@ -398,6 +417,21 @@ void test_multithread_shuffle()
}
}
+void test_threadpool_allocate(TestAllocator* allocator)
+{
+ const int num_threads = internal::random<int>(2, 11);
+ const int num_allocs = internal::random<int>(2, 11);
+ ThreadPool threads(num_threads);
+ Eigen::ThreadPoolDevice device(&threads, num_threads, allocator);
+
+ for (int a = 0; a < num_allocs; ++a) {
+ void* ptr = device.allocate(512);
+ device.deallocate(ptr);
+ }
+ VERIFY(allocator != nullptr);
+ VERIFY_IS_EQUAL(allocator->alloc_count(), num_allocs);
+ VERIFY_IS_EQUAL(allocator->dealloc_count(), num_allocs);
+}
EIGEN_DECLARE_TEST(cxx11_tensor_thread_pool)
{
@@ -424,6 +458,9 @@ EIGEN_DECLARE_TEST(cxx11_tensor_thread_pool)
CALL_SUBTEST_6(test_memcpy());
CALL_SUBTEST_6(test_multithread_random());
- CALL_SUBTEST_6(test_multithread_shuffle<ColMajor>());
- CALL_SUBTEST_6(test_multithread_shuffle<RowMajor>());
+
+ TestAllocator test_allocator;
+ CALL_SUBTEST_6(test_multithread_shuffle<ColMajor>(nullptr));
+ CALL_SUBTEST_6(test_multithread_shuffle<RowMajor>(&test_allocator));
+ CALL_SUBTEST_6(test_threadpool_allocate(&test_allocator));
}
diff --git a/unsupported/test/cxx11_tensor_trace.cpp b/unsupported/test/cxx11_tensor_trace.cpp
index 1579bc1eb..0cb23060e 100644
--- a/unsupported/test/cxx11_tensor_trace.cpp
+++ b/unsupported/test/cxx11_tensor_trace.cpp
@@ -37,7 +37,7 @@ static void test_all_dimensions_trace() {
VERIFY_IS_EQUAL(result1(), sum);
Tensor<float, 5, DataLayout> tensor2(7, 7, 7, 7, 7);
- array<ptrdiff_t, 5> dims({{2, 1, 0, 3, 4}});
+ array<ptrdiff_t, 5> dims = { { 2, 1, 0, 3, 4 } };
Tensor<float, 0, DataLayout> result2 = tensor2.trace(dims);
VERIFY_IS_EQUAL(result2.rank(), 0);
sum = 0.0f;
@@ -52,7 +52,7 @@ template <int DataLayout>
static void test_simple_trace() {
Tensor<float, 3, DataLayout> tensor1(3, 5, 3);
tensor1.setRandom();
- array<ptrdiff_t, 2> dims1({{0, 2}});
+ array<ptrdiff_t, 2> dims1 = { { 0, 2 } };
Tensor<float, 1, DataLayout> result1 = tensor1.trace(dims1);
VERIFY_IS_EQUAL(result1.rank(), 1);
VERIFY_IS_EQUAL(result1.dimension(0), 5);
@@ -67,7 +67,7 @@ static void test_simple_trace() {
Tensor<float, 4, DataLayout> tensor2(5, 5, 7, 7);
tensor2.setRandom();
- array<ptrdiff_t, 2> dims2({{2, 3}});
+ array<ptrdiff_t, 2> dims2 = { { 2, 3 } };
Tensor<float, 2, DataLayout> result2 = tensor2.trace(dims2);
VERIFY_IS_EQUAL(result2.rank(), 2);
VERIFY_IS_EQUAL(result2.dimension(0), 5);
@@ -82,7 +82,7 @@ static void test_simple_trace() {
}
}
- array<ptrdiff_t, 2> dims3({{1, 0}});
+ array<ptrdiff_t, 2> dims3 = { { 1, 0 } };
Tensor<float, 2, DataLayout> result3 = tensor2.trace(dims3);
VERIFY_IS_EQUAL(result3.rank(), 2);
VERIFY_IS_EQUAL(result3.dimension(0), 7);
@@ -99,7 +99,7 @@ static void test_simple_trace() {
Tensor<float, 5, DataLayout> tensor3(3, 7, 3, 7, 3);
tensor3.setRandom();
- array<ptrdiff_t, 3> dims4({{0, 2, 4}});
+ array<ptrdiff_t, 3> dims4 = { { 0, 2, 4 } };
Tensor<float, 2, DataLayout> result4 = tensor3.trace(dims4);
VERIFY_IS_EQUAL(result4.rank(), 2);
VERIFY_IS_EQUAL(result4.dimension(0), 7);
@@ -116,7 +116,7 @@ static void test_simple_trace() {
Tensor<float, 5, DataLayout> tensor4(3, 7, 4, 7, 5);
tensor4.setRandom();
- array<ptrdiff_t, 2> dims5({{1, 3}});
+ array<ptrdiff_t, 2> dims5 = { { 1, 3 } };
Tensor<float, 3, DataLayout> result5 = tensor4.trace(dims5);
VERIFY_IS_EQUAL(result5.rank(), 3);
VERIFY_IS_EQUAL(result5.dimension(0), 3);
@@ -140,7 +140,7 @@ template<int DataLayout>
static void test_trace_in_expr() {
Tensor<float, 4, DataLayout> tensor(2, 3, 5, 3);
tensor.setRandom();
- array<ptrdiff_t, 2> dims({{1, 3}});
+ array<ptrdiff_t, 2> dims = { { 1, 3 } };
Tensor<float, 2, DataLayout> result(2, 5);
result = result.constant(1.0f) - tensor.trace(dims);
VERIFY_IS_EQUAL(result.rank(), 2);
@@ -168,4 +168,4 @@ EIGEN_DECLARE_TEST(cxx11_tensor_trace) {
CALL_SUBTEST(test_simple_trace<RowMajor>());
CALL_SUBTEST(test_trace_in_expr<ColMajor>());
CALL_SUBTEST(test_trace_in_expr<RowMajor>());
-}
+} \ No newline at end of file