aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported/test
diff options
context:
space:
mode:
authorGravatar Antonio Sánchez <cantonios@google.com>2020-05-28 17:40:15 +0000
committerGravatar Rasmus Munk Larsen <rmlarsen@google.com>2020-05-28 17:40:15 +0000
commit8719b9c5bc1a97e62d675c02495ed72dda6fae73 (patch)
tree3c91fd5b4bc0d08eda6ccbba28dbea3da117de42 /unsupported/test
parent8e1df5b08280f07a8814719fdbbeaf6fababd2dc (diff)
Disable test for 32-bit systems (e.g. ARM, i386)
Both i386 and 32-bit ARM do not define __uint128_t. On most systems, if __uint128_t is defined, then so is the macro __SIZEOF_INT128__. https://stackoverflow.com/questions/18531782/how-to-know-if-uint128-t-is-defined1
Diffstat (limited to 'unsupported/test')
-rw-r--r--unsupported/test/cxx11_tensor_complex_cwise_ops_gpu.cu2
-rw-r--r--unsupported/test/cxx11_tensor_fft.cpp8
-rw-r--r--unsupported/test/cxx11_tensor_morphing.cpp4
-rw-r--r--unsupported/test/cxx11_tensor_of_float16_gpu.cu6
-rw-r--r--unsupported/test/cxx11_tensor_uint128.cpp2
5 files changed, 12 insertions, 10 deletions
diff --git a/unsupported/test/cxx11_tensor_complex_cwise_ops_gpu.cu b/unsupported/test/cxx11_tensor_complex_cwise_ops_gpu.cu
index f2a2a6cfa..99447b21d 100644
--- a/unsupported/test/cxx11_tensor_complex_cwise_ops_gpu.cu
+++ b/unsupported/test/cxx11_tensor_complex_cwise_ops_gpu.cu
@@ -77,6 +77,8 @@ void test_cuda_complex_cwise_ops() {
gpu_out.device(gpu_device) = -gpu_in1;
expected = -a;
break;
+ case NbOps:
+ break;
}
assert(cudaMemcpyAsync(actual.data(), d_out, complex_bytes, cudaMemcpyDeviceToHost,
gpu_device.stream()) == cudaSuccess);
diff --git a/unsupported/test/cxx11_tensor_fft.cpp b/unsupported/test/cxx11_tensor_fft.cpp
index 641486a4a..2e1008eca 100644
--- a/unsupported/test/cxx11_tensor_fft.cpp
+++ b/unsupported/test/cxx11_tensor_fft.cpp
@@ -228,10 +228,10 @@ template <typename RealScalar>
static void test_fft_non_power_of_2_round_trip(int exponent) {
int n = (1 << exponent) + 1;
- Eigen::DSizes<std::int64_t, 1> dimensions;
+ Eigen::DSizes<ptrdiff_t, 1> dimensions;
dimensions[0] = n;
- const DSizes<std::int64_t, 1> arr = dimensions;
- Tensor<RealScalar, 1, ColMajor, std::int64_t> input;
+ const DSizes<ptrdiff_t, 1> arr = dimensions;
+ Tensor<RealScalar, 1, ColMajor, ptrdiff_t> input;
input.resize(arr);
input.setRandom();
@@ -242,7 +242,7 @@ static void test_fft_non_power_of_2_round_trip(int exponent) {
Tensor<std::complex<RealScalar>, 1, ColMajor> forward =
input.template fft<BothParts, FFT_FORWARD>(fft);
- Tensor<RealScalar, 1, ColMajor, std::int64_t> output =
+ Tensor<RealScalar, 1, ColMajor, ptrdiff_t> output =
forward.template fft<RealPart, FFT_REVERSE>(fft);
for (int i = 0; i < n; ++i) {
diff --git a/unsupported/test/cxx11_tensor_morphing.cpp b/unsupported/test/cxx11_tensor_morphing.cpp
index f01b95357..59f4c34b3 100644
--- a/unsupported/test/cxx11_tensor_morphing.cpp
+++ b/unsupported/test/cxx11_tensor_morphing.cpp
@@ -51,8 +51,8 @@ static void test_static_reshape() {
// New dimensions: [2, 3, 7]
Eigen::IndexList<type2index<2>, type2index<3>, type2index<7>> dim;
- Tensor<float, 3> reshaped = tensor.reshape(static_cast<Eigen::DSizes<long,3>>(dim));
-
+ Tensor<float, 3> reshaped = tensor.reshape(static_cast<Eigen::DSizes<ptrdiff_t,3>>(dim));
+
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 3; ++j) {
diff --git a/unsupported/test/cxx11_tensor_of_float16_gpu.cu b/unsupported/test/cxx11_tensor_of_float16_gpu.cu
index 4d74e6138..c55676c76 100644
--- a/unsupported/test/cxx11_tensor_of_float16_gpu.cu
+++ b/unsupported/test/cxx11_tensor_of_float16_gpu.cu
@@ -64,7 +64,7 @@ void test_gpu_conversion() {
Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
int num_elem = 101;
-
+
float* d_float = (float*)gpu_device.allocate(num_elem * sizeof(float));
Eigen::half* d_half = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half));
float* d_conv = (float*)gpu_device.allocate(num_elem * sizeof(float));
@@ -322,7 +322,7 @@ template<typename>
void test_gpu_reductions(int size1, int size2, int redux) {
std::cout << "Reducing " << size1 << " by " << size2
- << " tensor along dim " << redux << std::endl;
+ << " tensor along dim " << redux << std::endl;
Eigen::GpuStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
@@ -346,7 +346,7 @@ void test_gpu_reductions(int size1, int size2, int redux) {
gpu_float1.device(gpu_device) = gpu_float1.random() * 2.0f;
gpu_float2.device(gpu_device) = gpu_float2.random() * 2.0f;
- Eigen::array<int, 1> redux_dim = {{redux}};
+ Eigen::array<int, 1> redux_dim = {redux};
gpu_res_float.device(gpu_device) = gpu_float1.sum(redux_dim).cast<Eigen::half>();
gpu_res_half.device(gpu_device) = gpu_float1.cast<Eigen::half>().sum(redux_dim);
diff --git a/unsupported/test/cxx11_tensor_uint128.cpp b/unsupported/test/cxx11_tensor_uint128.cpp
index 07691df98..46fceaa19 100644
--- a/unsupported/test/cxx11_tensor_uint128.cpp
+++ b/unsupported/test/cxx11_tensor_uint128.cpp
@@ -12,7 +12,7 @@
#include <Eigen/CXX11/Tensor>
-#if EIGEN_COMP_MSVC
+#if EIGEN_COMP_MSVC || !defined(__SIZEOF_INT128__)
#define EIGEN_NO_INT128
#else
typedef __uint128_t uint128_t;