aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/cc/gradients
diff options
context:
space:
mode:
authorGravatar KB Sriram <kbsriram@gmail.com>2018-02-22 07:21:39 -0800
committerGravatar KB Sriram <kbsriram@gmail.com>2018-02-23 06:46:42 -0800
commit7d095f1bccc9d923fe64420e552268d220160488 (patch)
tree813252707e9e469410e92bd0dcc2a3ab0fae9cea /tensorflow/cc/gradients
parent9054c9b2ac303cbd1538166d0821f389cbc75894 (diff)
C++ gradients for MaxPool3D, AvgPool and AvgPool3D
Resolves tensorflow/tensorflow#17195
Diffstat (limited to 'tensorflow/cc/gradients')
-rw-r--r--tensorflow/cc/gradients/nn_grad.cc64
-rw-r--r--tensorflow/cc/gradients/nn_grad_test.cc44
2 files changed, 105 insertions, 3 deletions
diff --git a/tensorflow/cc/gradients/nn_grad.cc b/tensorflow/cc/gradients/nn_grad.cc
index 13a3bba5e6..63a67f09f6 100644
--- a/tensorflow/cc/gradients/nn_grad.cc
+++ b/tensorflow/cc/gradients/nn_grad.cc
@@ -196,6 +196,70 @@ Status MaxPoolGradV2Helper(const Scope& scope, const Operation& op,
}
REGISTER_GRADIENT_OP("MaxPoolV2", MaxPoolGradV2Helper);
+Status MaxPool3DGradHelper(const Scope& scope, const Operation& op,
+ const std::vector<Output>& grad_inputs,
+ std::vector<Output>* grad_outputs) {
+ std::vector<int32> ksize;
+ std::vector<int32> strides;
+ string padding;
+ string data_format;
+ auto attrs = op.output(0).node()->attrs();
+ TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "ksize", &ksize));
+ TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "strides", &strides));
+ TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "padding", &padding));
+ TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "data_format", &data_format));
+ MaxPool3DGrad::Attrs grad_attrs;
+ grad_attrs.DataFormat(data_format);
+ auto dx = MaxPool3DGrad(scope, op.input(0), op.output(0), grad_inputs[0],
+ ksize, strides, padding, grad_attrs);
+ grad_outputs->push_back(dx);
+ return scope.status();
+}
+REGISTER_GRADIENT_OP("MaxPool3D", MaxPool3DGradHelper);
+
+Status AvgPoolGradHelper(const Scope& scope, const Operation& op,
+ const std::vector<Output>& grad_inputs,
+ std::vector<Output>* grad_outputs) {
+ std::vector<int32> ksize;
+ std::vector<int32> strides;
+ string padding;
+ string data_format;
+ auto attrs = op.output(0).node()->attrs();
+ TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "ksize", &ksize));
+ TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "strides", &strides));
+ TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "padding", &padding));
+ TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "data_format", &data_format));
+ internal::AvgPoolGrad::Attrs grad_attrs;
+ grad_attrs.DataFormat(data_format);
+ auto dx =
+ internal::AvgPoolGrad(scope, Shape(scope, op.input(0)), grad_inputs[0],
+ ksize, strides, padding, grad_attrs);
+ grad_outputs->push_back(dx);
+ return scope.status();
+}
+REGISTER_GRADIENT_OP("AvgPool", AvgPoolGradHelper);
+
+Status AvgPool3DGradHelper(const Scope& scope, const Operation& op,
+ const std::vector<Output>& grad_inputs,
+ std::vector<Output>* grad_outputs) {
+ std::vector<int32> ksize;
+ std::vector<int32> strides;
+ string padding;
+ string data_format;
+ auto attrs = op.output(0).node()->attrs();
+ TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "ksize", &ksize));
+ TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "strides", &strides));
+ TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "padding", &padding));
+ TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "data_format", &data_format));
+ AvgPool3DGrad::Attrs grad_attrs;
+ grad_attrs.DataFormat(data_format);
+ auto dx = AvgPool3DGrad(scope, Shape(scope, op.input(0)), grad_inputs[0],
+ ksize, strides, padding, grad_attrs);
+ grad_outputs->push_back(dx);
+ return scope.status();
+}
+REGISTER_GRADIENT_OP("AvgPool3D", AvgPool3DGradHelper);
+
Status LRNGradHelper(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs){
diff --git a/tensorflow/cc/gradients/nn_grad_test.cc b/tensorflow/cc/gradients/nn_grad_test.cc
index 0cfe5f6e3c..c4eba7ecb0 100644
--- a/tensorflow/cc/gradients/nn_grad_test.cc
+++ b/tensorflow/cc/gradients/nn_grad_test.cc
@@ -31,8 +31,11 @@ using ops::Elu;
using ops::L2Loss;
using ops::LogSoftmax;
using ops::LRN;
+using ops::AvgPool;
+using ops::AvgPool3D;
using ops::MaxPool;
using ops::MaxPoolV2;
+using ops::MaxPool3D;
using ops::Placeholder;
using ops::Relu;
using ops::Relu6;
@@ -70,9 +73,9 @@ class NNGradTest : public ::testing::Test {
// Sets tensor with random values, ensuring that the max value is largest by
// a reasonable amount.
- // This is an issue for MaxPool and MaxPoolV2, in which perturbations by the
- // numeric gradient computation in the gradient checker can change the max
- // value if values are too close together.
+ // This is an issue for MaxPool, MaxPoolV2 and MaxPool3D, in which
+ // perturbations by the numeric gradient computation in the gradient checker
+ // can change the max value if values are too close together.
template <typename T>
void SetRandomValuesWithBumpedMax(Tensor* tensor) {
auto tensor_flat = tensor->flat<T>();
@@ -203,6 +206,41 @@ TEST_F(NNGradTest, MaxPoolGradV2Helper) {
RunTest(x, x_init_value, y, y_shape);
}
+TEST_F(NNGradTest, MaxPool3DGradHelper) {
+ TensorShape x_shape({1, 3, 3, 3, 1});
+ TensorShape y_shape({1, 1, 1, 1, 1});
+ auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
+ // Setup window and strides so that we only do one MaxPool3D.
+ const std::vector<int> ksize{1, 3, 3, 3, 1};
+ const std::vector<int> strides{1, 3, 3, 3, 1};
+ auto y = MaxPool3D(scope_, x, ksize, strides, "VALID");
+ Tensor x_init_value = Tensor(DT_FLOAT, x_shape);
+ SetRandomValuesWithBumpedMax<float>(&x_init_value);
+ RunTest(x, x_init_value, y, y_shape);
+}
+
+TEST_F(NNGradTest, AvgPoolGradHelper) {
+ TensorShape x_shape({1, 2, 2, 1});
+ TensorShape y_shape({1, 1, 1, 1});
+ auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
+ // Setup window and strides so that we only do one AvgPool.
+ const std::vector<int> ksize{1, 2, 2, 1};
+ const std::vector<int> strides{1, 2, 2, 1};
+ auto y = AvgPool(scope_, x, ksize, strides, "SAME");
+ RunTest(x, x_shape, y, y_shape);
+}
+
+TEST_F(NNGradTest, AvgPool3DGradHelper) {
+ TensorShape x_shape({1, 3, 3, 3, 1});
+ TensorShape y_shape({1, 1, 1, 1, 1});
+ auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
+ // Setup window and strides so that we only do one AvgPool3D.
+ const std::vector<int> ksize{1, 3, 3, 3, 1};
+ const std::vector<int> strides{1, 3, 3, 3, 1};
+ auto y = AvgPool3D(scope_, x, ksize, strides, "SAME");
+ RunTest(x, x_shape, y, y_shape);
+}
+
TEST_F(NNGradTest, LRN){
TensorShape x_shape({1, 1, 2, 1});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));