aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/cc
diff options
context:
space:
mode:
authorGravatar Akshay Modi <nareshmodi@google.com>2018-06-18 11:48:36 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-06-18 11:55:03 -0700
commit148b4381fd0259cae441e459ec8ebe2c5d557722 (patch)
treec66c96ea6c60c63385b528dce195af802b8acf3b /tensorflow/cc
parentfc03fbff3dd7a58fa4f16226df4ada1f21f8b53f (diff)
Automated g4 rollback of changelist 201011811
PiperOrigin-RevId: 201033171
Diffstat (limited to 'tensorflow/cc')
-rw-r--r--tensorflow/cc/gradients/math_grad.cc1
-rw-r--r--tensorflow/cc/gradients/nn_grad.cc47
-rw-r--r--tensorflow/cc/gradients/nn_grad_test.cc84
3 files changed, 16 insertions, 116 deletions
diff --git a/tensorflow/cc/gradients/math_grad.cc b/tensorflow/cc/gradients/math_grad.cc
index 35a01e0341..52c177212a 100644
--- a/tensorflow/cc/gradients/math_grad.cc
+++ b/tensorflow/cc/gradients/math_grad.cc
@@ -38,7 +38,6 @@ REGISTER_NO_GRADIENT_OP("NotEqual");
REGISTER_NO_GRADIENT_OP("LogicalAnd");
REGISTER_NO_GRADIENT_OP("LogicalOr");
REGISTER_NO_GRADIENT_OP("LogicalNot");
-REGISTER_NO_GRADIENT_OP("Floor");
// Conjugate helper function returns the conjugate of an Output if it
// is complex valued.
diff --git a/tensorflow/cc/gradients/nn_grad.cc b/tensorflow/cc/gradients/nn_grad.cc
index c73482d5f4..0cb3132e94 100644
--- a/tensorflow/cc/gradients/nn_grad.cc
+++ b/tensorflow/cc/gradients/nn_grad.cc
@@ -255,53 +255,6 @@ Status LRNGradHelper(const Scope& scope, const Operation& op,
}
REGISTER_GRADIENT_OP("LRN", LRNGradHelper);
-Status SoftplusGradHelper(const Scope& scope, const Operation& op,
- const std::vector<Output>& grad_inputs,
- std::vector<Output>* grad_outputs) {
- auto dx = internal::SoftplusGrad(scope, grad_inputs[0], op.input(0));
- grad_outputs->push_back(dx);
- return scope.status();
-}
-REGISTER_GRADIENT_OP("Softplus", SoftplusGradHelper);
-
-Status SoftsignGradHelper(const Scope& scope, const Operation& op,
- const std::vector<Output>& grad_inputs,
- std::vector<Output>* grad_outputs) {
- auto dx = internal::SoftsignGrad(scope, grad_inputs[0], op.input(0));
- grad_outputs->push_back(dx);
- return scope.status();
-}
-REGISTER_GRADIENT_OP("Softsign", SoftsignGradHelper);
-
-Status FractionalAvgPoolGradHelper(const Scope& scope, const Operation& op,
- const std::vector<Output>& grad_inputs,
- std::vector<Output>* grad_outputs) {
- bool overlapping;
- TF_RETURN_IF_ERROR(
- GetNodeAttr(op.output(0).node()->attrs(), "overlapping", &overlapping));
- auto dx = internal::FractionalAvgPoolGrad(
- scope, Shape(scope, op.input(0), Shape::OutType(DT_INT64)),
- grad_inputs[0], op.output(1), op.output(2),
- internal::FractionalAvgPoolGrad::Overlapping(overlapping));
- grad_outputs->push_back(dx);
- return scope.status();
-}
-REGISTER_GRADIENT_OP("FractionalAvgPool", FractionalAvgPoolGradHelper);
-
-Status FractionalMaxPoolGradHelper(const Scope& scope, const Operation& op,
- const std::vector<Output>& grad_inputs,
- std::vector<Output>* grad_outputs) {
- bool overlapping;
- TF_RETURN_IF_ERROR(
- GetNodeAttr(op.output(0).node()->attrs(), "overlapping", &overlapping));
- auto dx = internal::FractionalMaxPoolGrad(
- scope, op.input(0), op.output(0), grad_inputs[0], op.output(1),
- op.output(2), internal::FractionalMaxPoolGrad::Overlapping(overlapping));
- grad_outputs->push_back(dx);
- return scope.status();
-}
-REGISTER_GRADIENT_OP("FractionalMaxPool", FractionalMaxPoolGradHelper);
-
} // anonymous namespace
} // namespace ops
} // namespace tensorflow
diff --git a/tensorflow/cc/gradients/nn_grad_test.cc b/tensorflow/cc/gradients/nn_grad_test.cc
index b4d457a9d1..c4eba7ecb0 100644
--- a/tensorflow/cc/gradients/nn_grad_test.cc
+++ b/tensorflow/cc/gradients/nn_grad_test.cc
@@ -28,8 +28,6 @@ namespace {
using ops::BiasAdd;
using ops::Conv2D;
using ops::Elu;
-using ops::FractionalAvgPool;
-using ops::FractionalMaxPool;
using ops::L2Loss;
using ops::LogSoftmax;
using ops::LRN;
@@ -43,8 +41,6 @@ using ops::Relu;
using ops::Relu6;
using ops::Selu;
using ops::Softmax;
-using ops::Softplus;
-using ops::Softsign;
class NNGradTest : public ::testing::Test {
protected:
@@ -75,30 +71,22 @@ class NNGradTest : public ::testing::Test {
EXPECT_LT(max_error, 1e-3);
}
- // Sets tensor with random values, ensuring that every pair of elements are at
- // least a reasonable amount apart.
- // This is an issue for max pooling operations, in which perturbations by the
- // numeric gradient computation in the gradient checker can change the max
- // value if a pool has values that are too close together.
+ // Sets tensor with random values, ensuring that the max value is largest by
+ // a reasonable amount.
+ // This is an issue for MaxPool, MaxPoolV2 and MaxPool3D, in which
+ // perturbations by the numeric gradient computation in the gradient checker
+ // can change the max value if values are too close together.
template <typename T>
- void SetRandomValuesForMaxPooling(Tensor* tensor) {
+ void SetRandomValuesWithBumpedMax(Tensor* tensor) {
auto tensor_flat = tensor->flat<T>();
- // First set the array to an increasing sequence of values spaced
- // a reasonable amount apart
- T cur = 0;
- for (size_t i = 0; i < tensor->NumElements(); i++) {
- tensor_flat(i) = cur;
- cur += 5e-2;
- }
- // Fischer-Yates shuffle the array
- for (size_t i = tensor->NumElements() - 1; i >= 1; i--) {
- // j <- random integer 0 <= j <= i
- size_t j = random::New64() % (i + 1);
- // swap values at i, j
- T tmp = tensor_flat(i);
- tensor_flat(i) = tensor_flat(j);
- tensor_flat(j) = tmp;
+ tensor_flat.setRandom();
+ int32 max_index = 0;
+ for (size_t i = 1; i < tensor->NumElements(); i++) {
+ if (tensor_flat(i) > tensor_flat(max_index)) {
+ max_index = i;
+ }
}
+ tensor_flat(max_index) += 1e-2;
}
Scope scope_;
@@ -201,7 +189,7 @@ TEST_F(NNGradTest, MaxPoolGradHelper) {
const std::vector<int> strides{1, 2, 2, 1};
auto y = MaxPool(scope_, x, ksize, strides, "VALID");
Tensor x_init_value = Tensor(DT_FLOAT, x_shape);
- SetRandomValuesForMaxPooling<float>(&x_init_value);
+ SetRandomValuesWithBumpedMax<float>(&x_init_value);
RunTest(x, x_init_value, y, y_shape);
}
@@ -214,7 +202,7 @@ TEST_F(NNGradTest, MaxPoolGradV2Helper) {
Tensor strides = test::AsTensor<int>({1, 2, 2, 1}, {4});
auto y = MaxPoolV2(scope_, x, ksize, strides, "VALID");
Tensor x_init_value = Tensor(DT_FLOAT, x_shape);
- SetRandomValuesForMaxPooling<float>(&x_init_value);
+ SetRandomValuesWithBumpedMax<float>(&x_init_value);
RunTest(x, x_init_value, y, y_shape);
}
@@ -227,7 +215,7 @@ TEST_F(NNGradTest, MaxPool3DGradHelper) {
const std::vector<int> strides{1, 3, 3, 3, 1};
auto y = MaxPool3D(scope_, x, ksize, strides, "VALID");
Tensor x_init_value = Tensor(DT_FLOAT, x_shape);
- SetRandomValuesForMaxPooling<float>(&x_init_value);
+ SetRandomValuesWithBumpedMax<float>(&x_init_value);
RunTest(x, x_init_value, y, y_shape);
}
@@ -260,45 +248,5 @@ TEST_F(NNGradTest, LRN){
RunTest(x, x_shape, y, x_shape);
}
-TEST_F(NNGradTest, SoftplusGrad) {
- TensorShape shape({3, 7});
- auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
- auto y = Softplus(scope_, x);
- RunTest(x, shape, y, shape);
-}
-
-TEST_F(NNGradTest, SoftsignGrad) {
- TensorShape shape({3, 7});
- auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
- auto y = Softsign(scope_, x);
- RunTest(x, shape, y, shape);
-}
-
-TEST_F(NNGradTest, FractionalAvgPoolGradHelper) {
- TensorShape x_shape({1, 3, 7, 1});
- auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
- // Force consistent pooling regions for unit testing.
- auto y = FractionalAvgPool(
- scope_, x, {1, 1.2, 1.9, 1},
- FractionalAvgPool::Deterministic(true).Overlapping(true).Seed(1).Seed2(
- 2));
- TensorShape y_shape({1, 2, 3, 1});
- RunTest(x, x_shape, y.output, y_shape);
-}
-
-TEST_F(NNGradTest, FractionalMaxPoolGradHelper) {
- TensorShape x_shape({1, 3, 7, 1});
- auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
- // Force consistent pooling regions for unit testing.
- auto y = FractionalMaxPool(
- scope_, x, {1, 1.2, 1.9, 1},
- FractionalMaxPool::Deterministic(true).Overlapping(true).Seed(1).Seed2(
- 2));
- Tensor x_init_value = Tensor(DT_FLOAT, x_shape);
- SetRandomValuesForMaxPooling<float>(&x_init_value);
- TensorShape y_shape({1, 2, 3, 1});
- RunTest(x, x_init_value, y.output, y_shape);
-}
-
} // namespace
} // namespace tensorflow