aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/core/kernels/concat_lib_cpu.cc
blob: b0bec0c5dcd30f4a630cd927e6ea922105249676 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

#define EIGEN_USE_THREADS

#include "tensorflow/core/kernels/concat_lib_cpu.h"
#include <vector>
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/kernels/concat_lib.h"

namespace tensorflow {

namespace {
template <typename T>
struct MemCpyCopier {
  inline void Copy(T* dst, const T* src, int input_index, size_t n) {
    if (DataTypeCanUseMemcpy(DataTypeToEnum<T>::v())) {
      memcpy(dst, src, n * sizeof(T));
    } else {
      for (size_t k = 0; k < n; ++k) {
        *dst++ = *src++;
      }
    }
  }
};
template <>
struct MemCpyCopier<ResourceHandle> {
  inline void Copy(ResourceHandle* dst, const ResourceHandle* src,
                   int input_index, size_t n) {
    for (size_t k = 0; k < n; ++k) {
      *dst++ = *src++;
    }
  }
};

}  // namespace

template <typename T>
void ConcatCPU(DeviceBase* d,
               const std::vector<
                   std::unique_ptr<typename TTypes<T, 2>::ConstMatrix>>& inputs,
               typename TTypes<T, 2>::Matrix* output) {
  if (std::is_same<T, string>::value) {
    // use a large cost here to force strings to be handled by separate threads
    ConcatCPUImpl<T>(d, inputs, 100000, MemCpyCopier<T>(), output);
  } else {
    ConcatCPUImpl<T>(d, inputs, sizeof(T) /* cost_per_unit */,
                     MemCpyCopier<T>(), output);
  }
}

#define REGISTER(T)                                                            \
  template void ConcatCPU<T>(                                                  \
      DeviceBase*,                                                             \
      const std::vector<std::unique_ptr<typename TTypes<T, 2>::ConstMatrix>>&, \
      typename TTypes<T, 2>::Matrix* output);
TF_CALL_ALL_TYPES(REGISTER)
REGISTER(quint8)
REGISTER(qint8)
REGISTER(quint16)
REGISTER(qint16)
REGISTER(qint32)
REGISTER(bfloat16)

#if defined(IS_MOBILE_PLATFORM) && !defined(SUPPORT_SELECTIVE_REGISTRATION) && \
    !defined(__ANDROID_TYPES_FULL__)
// Primarily used for SavedModel support on mobile. Registering it here only if
// __ANDROID_TYPES_FULL__ is not defined, as that already register strings
REGISTER(string);
#endif  // defined(IS_MOBILE_PLATFORM) &&
        // !defined(SUPPORT_SELECTIVE_REGISTRATION) &&
        // !defined(__ANDROID_TYPES_FULL__)

#ifdef TENSORFLOW_USE_SYCL
template <typename T>
void ConcatSYCL(const Eigen::SyclDevice& d,
               const std::vector<
                   std::unique_ptr<typename TTypes<T, 2>::ConstMatrix>>& inputs,
               typename TTypes<T, 2>::Matrix* output) {
  ConcatSYCLImpl<T>(d, inputs, sizeof(T) /* cost_per_unit */, MemCpyCopier<T>(),
                   output);
}
#define REGISTER_SYCL(T)                                                      \
 template void ConcatSYCL<T>(                                                 \
     const Eigen::SyclDevice&,                                                \
     const std::vector<std::unique_ptr<typename TTypes<T, 2>::ConstMatrix>>&, \
     typename TTypes<T, 2>::Matrix* output);

TF_CALL_GPU_NUMBER_TYPES_NO_HALF(REGISTER_SYCL)

#undef REGISTER_SYCL
#endif // TENSORFLOW_USE_SYCL
}  // namespace tensorflow