1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
|
// See docs in ../ops/nn_ops.cc.
#define EIGEN_USE_THREADS
#include "tensorflow/core/framework/numeric_op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/kernels/bias_op.h"
#include "tensorflow/core/public/tensor.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
template <typename Device, typename T>
class BiasOp : public BinaryOp<T> {
public:
explicit BiasOp(OpKernelConstruction* context) : BinaryOp<T>(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
const Tensor& bias = context->input(1);
OP_REQUIRES(context, TensorShapeUtils::IsMatrixOrHigher(input.shape()),
errors::InvalidArgument("Input tensor must be at least 2D: ",
input.shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsVector(bias.shape()),
errors::InvalidArgument("Biases must be 1D: ",
bias.shape().DebugString()));
const auto last_dim = input.shape().dims() - 1;
OP_REQUIRES(
context, bias.shape().dim_size(0) == input.shape().dim_size(last_dim),
errors::InvalidArgument(
"Must provide as many biases as the last dimension "
"of the input tensor: ",
bias.shape().DebugString(), " vs. ", input.shape().DebugString()));
Tensor* output = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(0, input.shape(), &output));
switch (input.shape().dims()) {
case 2:
Compute<2>(context, input, bias, output);
break;
case 3:
Compute<3>(context, input, bias, output);
break;
case 4:
Compute<4>(context, input, bias, output);
break;
case 5:
Compute<5>(context, input, bias, output);
break;
default:
OP_REQUIRES(context, false,
errors::InvalidArgument("Only ranks up to 5 supported: ",
input.shape().DebugString()));
}
}
// Add biases for an input matrix of rank Dims, by using the Bias.
template <int Dims>
void Compute(OpKernelContext* ctx, const Tensor& input, const Tensor& bias,
Tensor* output) {
functor::Bias<Device, T, Dims> functor;
functor(ctx->eigen_device<Device>(), input.tensor<T, Dims>(), bias.vec<T>(),
output->tensor<T, Dims>());
}
};
#define REGISTER_KERNEL(type) \
REGISTER_KERNEL_BUILDER( \
Name("BiasAdd").Device(DEVICE_CPU).TypeConstraint<type>("T"), \
BiasOp<CPUDevice, type>);
TF_CALL_NUMBER_TYPES(REGISTER_KERNEL);
#undef REGISTER_KERNEL
#if GOOGLE_CUDA
// Forward declarations of the functor specializations for GPU.
namespace functor {
#define DECLARE_GPU_SPEC(T, Dims) \
template <> \
void Bias<GPUDevice, T, Dims>::operator()( \
const GPUDevice& d, typename TTypes<T, Dims>::ConstTensor input, \
typename TTypes<T>::ConstVec bias, \
typename TTypes<T, Dims>::Tensor output); \
extern template struct Bias<GPUDevice, T, Dims>;
#define DECLARE_GPU_SPECS(T) \
DECLARE_GPU_SPEC(T, 2); \
DECLARE_GPU_SPEC(T, 3); \
DECLARE_GPU_SPEC(T, 4); \
DECLARE_GPU_SPEC(T, 5);
TF_CALL_GPU_NUMBER_TYPES(DECLARE_GPU_SPECS);
} // namespace functor
// Registration of the GPU implementations.
#define REGISTER_GPU_KERNEL(type) \
REGISTER_KERNEL_BUILDER( \
Name("BiasAdd").Device(DEVICE_GPU).TypeConstraint<type>("T"), \
BiasOp<GPUDevice, type>);
TF_CALL_GPU_NUMBER_TYPES(REGISTER_GPU_KERNEL);
#endif // GOOGLE_CUDA
} // namespace tensorflow
|