aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported/test/cxx11_tensor_complex_cwise_ops_gpu.cu
blob: 99447b21dcf856b2d8ebf4971387de6569ca1132 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.

#define EIGEN_TEST_NO_LONGDOUBLE

#define EIGEN_USE_GPU

#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>

using Eigen::Tensor;

template<typename T>
void test_cuda_complex_cwise_ops() {
  const int kNumItems = 2;
  std::size_t complex_bytes = kNumItems * sizeof(std::complex<T>);

  std::complex<T>* d_in1;
  std::complex<T>* d_in2;
  std::complex<T>* d_out;
  cudaMalloc((void**)(&d_in1), complex_bytes);
  cudaMalloc((void**)(&d_in2), complex_bytes);
  cudaMalloc((void**)(&d_out), complex_bytes);

  Eigen::GpuStreamDevice stream;
  Eigen::GpuDevice gpu_device(&stream);

  Eigen::TensorMap<Eigen::Tensor<std::complex<T>, 1, 0, int>, Eigen::Aligned> gpu_in1(
      d_in1, kNumItems);
  Eigen::TensorMap<Eigen::Tensor<std::complex<T>, 1, 0, int>, Eigen::Aligned> gpu_in2(
      d_in2, kNumItems);
  Eigen::TensorMap<Eigen::Tensor<std::complex<T>, 1, 0, int>, Eigen::Aligned> gpu_out(
      d_out, kNumItems);

  const std::complex<T> a(3.14f, 2.7f);
  const std::complex<T> b(-10.6f, 1.4f);

  gpu_in1.device(gpu_device) = gpu_in1.constant(a);
  gpu_in2.device(gpu_device) = gpu_in2.constant(b);

  enum CwiseOp {
    Add = 0,
    Sub,
    Mul,
    Div,
    Neg,
    NbOps
  };

  Tensor<std::complex<T>, 1, 0, int> actual(kNumItems);
  for (int op = Add; op < NbOps; op++) {
    std::complex<T> expected;
    switch (static_cast<CwiseOp>(op)) {
      case Add:
        gpu_out.device(gpu_device) = gpu_in1 + gpu_in2;
        expected = a + b;
        break;
      case Sub:
        gpu_out.device(gpu_device) = gpu_in1 - gpu_in2;
        expected = a - b;
        break;
      case Mul:
        gpu_out.device(gpu_device) = gpu_in1 * gpu_in2;
        expected = a * b;
        break;
      case Div:
        gpu_out.device(gpu_device) = gpu_in1 / gpu_in2;
        expected = a / b;
        break;
      case Neg:
        gpu_out.device(gpu_device) = -gpu_in1;
        expected = -a;
        break;
      case NbOps:
        break;
    }
    assert(cudaMemcpyAsync(actual.data(), d_out, complex_bytes, cudaMemcpyDeviceToHost,
                           gpu_device.stream()) == cudaSuccess);
    assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);

    for (int i = 0; i < kNumItems; ++i) {
      VERIFY_IS_APPROX(actual(i), expected);
    }
  }

  cudaFree(d_in1);
  cudaFree(d_in2);
  cudaFree(d_out);
}


EIGEN_DECLARE_TEST(test_cxx11_tensor_complex_cwise_ops)
{
  CALL_SUBTEST(test_cuda_complex_cwise_ops<float>());
  CALL_SUBTEST(test_cuda_complex_cwise_ops<double>());
}