1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
|
#ifndef TENSORFLOW_KERNELS_RELU_OP_H_
#define TENSORFLOW_KERNELS_RELU_OP_H_
// Functor definition for ReluOp and ReluGradOp, must be compilable by nvcc.
#include "tensorflow/core/framework/tensor_types.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
namespace tensorflow {
namespace functor {
// Functor used by ReluOp to do the computations.
template <typename Device, typename T>
struct Relu {
// Computes Relu activation.
//
// features: any shape.
// activations: same shape as "features".
void operator()(const Device& d, typename TTypes<T>::ConstTensor features,
typename TTypes<T>::Tensor activations) {
activations.device(d) = features.cwiseMax(static_cast<T>(0));
}
};
// Functor used by ReluGradOp to do the computations.
template <typename Device, typename T>
struct ReluGrad {
// Computes ReluGrad backprops.
//
// gradients: gradients backpropagated to the Relu op.
// features: inputs that where passed to the Relu op.
// backprops: gradients to backpropagate to the Relu inputs.
void operator()(const Device& d, typename TTypes<T>::ConstTensor gradients,
typename TTypes<T>::ConstTensor features,
typename TTypes<T>::Tensor backprops) {
// NOTE: When the activation is exactly zero, we arbitrarily choose to not
// propagate the associated gradient value.
backprops.device(d) =
gradients * (features > features.constant(static_cast<T>(0)));
}
};
// Functor used by Relu6Op to do the computations.
template <typename Device, typename T>
struct Relu6 {
// Computes Relu6 activation.
//
// features: any shape.
// activations: same shape as "features".
void operator()(const Device& d, typename TTypes<T>::ConstTensor features,
typename TTypes<T>::Tensor activations) {
activations.device(d) =
features.cwiseMax(static_cast<T>(0)).cwiseMin(static_cast<T>(6));
}
};
// Functor used by ReluGradOp to do the computations.
template <typename Device, typename T>
struct Relu6Grad {
// Computes Relu6Grad backprops.
//
// gradients: gradients backpropagated to the Relu6 op.
// features: inputs that where passed to the Relu6 op.
// backprops: gradients to backpropagate to the Relu6 inputs.
void operator()(const Device& d, typename TTypes<T>::ConstTensor gradients,
typename TTypes<T>::ConstTensor features,
typename TTypes<T>::Tensor backprops) {
// NOTE: When the activation is exactly zero or six, we
// arbitrarily choose to not propagate the associated gradient
// value.
backprops.device(d) = gradients *
(features > features.constant(static_cast<T>(0))) *
(features < features.constant(static_cast<T>(6)));
}
};
} // namespace functor
} // namespace tensorflow
#endif // TENSORFLOW_KERNELS_RELU_OP_H_
|