aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported/test/cxx11_tensor_contraction_mkldnn.cpp
blob: 5a905c0cf0ed74d0c9dcce4a75cd10c7acb9a270 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2018 Eugene Zhulenev <ezhulenev@google.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.

#include "main.h"

// Nothing to test here if we do not have mkldnn enabled.
#if defined(EIGEN_USE_MKLDNN)

#include <Eigen/CXX11/Tensor>

using Eigen::array;
using Eigen::ColMajor;
using Eigen::Tensor;
using Eigen::Index;
using Eigen::internal::blas_data_mapper;
using Eigen::internal::mkldnn_gemm_kernel;
using Eigen::internal::mkldnn_gemm_pack;

template <int NumDims>
static array<Index, NumDims> RandomDims(int min_dim = 1, int max_dim = 20) {
  array<Index, NumDims> dims;
  for (int i = 0; i < NumDims; ++i) {
    dims[i] = internal::random<int>(min_dim, max_dim);
  }
  return dims;
}

// Packing with mkldnn_gemm_pack is the same as taking a slice of 2 dimensional
// Tensor.
template <typename Scalar>
static void test_mkldnn_gemm_pack() {
  static const int Options = 0 | ColMajor;

  typedef blas_data_mapper<Scalar, Index, ColMajor> DataMapper;
  typedef mkldnn_gemm_pack<Scalar, Index, DataMapper, ColMajor> MkldnnGemmPack;
  typedef Tensor<Scalar, 2, Options, Index> Tensor2d;

  array<Index, 2> dims = RandomDims<2>(1, 500);

  // Create a tensor initialized with random data.
  Tensor2d src(dims);
  src.setRandom();

  // Pick a random slice of src tensor.
  array<Index, 2> slice_start = RandomDims<2>(0, 250);
  array<Index, 2> slice_size = RandomDims<2>(100, 500);
  // Make sure that slice start + size do not overflow tensor dims.
  for (int i = 0; i < 2; ++i) {
    slice_start[i] = numext::mini(dims[i] - 1, slice_start[i]);
    slice_size[i] = numext::mini(slice_size[i], dims[i] - slice_start[i]);
  }

  // Prepare tensors for packing and slicing results.
  Tensor2d pack_dst(slice_size[0], slice_size[1]);
  Tensor2d slice_dst(slice_size[0], slice_size[1]);

  // Pack memory using mkldnn_gemm_pack.
  DataMapper data_mapper(src.data(), dims[0]);
  MkldnnGemmPack gemm_pack;
  gemm_pack(pack_dst.data(),
            data_mapper.getSubMapper(slice_start[0], slice_start[1]),
            slice_size[0], slice_size[1]);
  // Slice the source tensor.
  slice_dst = src.slice(slice_start, slice_size);

  // Verify that dst tensors are equal.
  VERIFY_IS_EQUAL(pack_dst.dimensions().TotalSize(),
                  slice_dst.dimensions().TotalSize());
  for (Index i = 0; i < pack_dst.dimensions().TotalSize(); ++i) {
    Scalar packed = pack_dst.coeff(i);
    Scalar sliced = slice_dst.coeff(i);
    VERIFY_IS_EQUAL(packed, sliced);
  }
}
template <typename Scalar>
static void test_mkldnn_gemm_kernel() {
  static const int Options = 0 | ColMajor;

  typedef Tensor<Scalar, 2, Options, Index> Tensor2d;

  int m = internal::random<int>(1, 100);
  int n = internal::random<int>(1, 100);
  int k = internal::random<int>(1, 100);

  Tensor2d lhs(m, k);
  lhs.setRandom();

  Tensor2d rhs(k, n);
  rhs.setRandom();

  // Compute matmul with mkldnn gemm kernel.
  typedef blas_data_mapper<Scalar, Index, ColMajor> OutputMapper;
  typedef mkldnn_gemm_kernel<Scalar, Index, OutputMapper, ColMajor>
      MkldnnGemmKernel;

  Tensor2d mkldnn_result(m, n);
  mkldnn_result.setZero();

  OutputMapper output_mapper(mkldnn_result.data(), m);
  MkldnnGemmKernel gemm_kernel;
  gemm_kernel(output_mapper, lhs.data(), rhs.data(), m, k, n, /*alpha*/ 1.0);

  // Compute matmul with Eigen::Matrix.
  typedef Eigen::Matrix<Scalar, Dynamic, Dynamic, ColMajor> Matrix;
  typedef Map<Eigen::Matrix<Scalar, Dynamic, Dynamic, ColMajor> > MatrixMap;

  MatrixMap lhs_mat(lhs.data(), m, k);
  MatrixMap rhs_mat(rhs.data(), k, n);
  Matrix matmul_result(m, n);
  matmul_result.setZero();

  matmul_result = lhs_mat * rhs_mat;

  static const float error_threshold = 1e-4f;

  // Verify that results are equal.
  for (Index i = 0; i < m * n; ++i) {
    Scalar gemm = mkldnn_result(i);
    Scalar matmul = matmul_result(i % m, i / m);
    if ((std::abs)(gemm) > error_threshold &&
        (std::abs)(matmul) > error_threshold) {
      if (!Eigen::internal::isApprox(gemm, matmul, error_threshold))
        std::cout << "gemm=" << gemm << " matmul=" << matmul << std::endl;
      VERIFY(Eigen::internal::isApprox(gemm, matmul, error_threshold));
    }
  }
}

EIGEN_DECLARE_TEST(cxx11_tensor_contraction_mkldnn) {
  CALL_SUBTEST(test_mkldnn_gemm_pack<float>());
  CALL_SUBTEST(test_mkldnn_gemm_kernel<float>());
}
#else
EIGEN_DECLARE_TEST(cxx11_tensor_contraction_mkldnn) {}
#endif  // EIGEN_USE_MKLDNN