aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/cc
diff options
context:
space:
mode:
authorGravatar Shanqing Cai <cais@google.com>2018-03-12 19:33:52 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-03-12 19:37:39 -0700
commit7144571f2fc59c8705e4e3d7b922fa0ebf44f3fa (patch)
treeb14683f826541c183c1bb783265e13b565469fbb /tensorflow/cc
parent2bda52d485c9715dcd17f49526cea7890e091cb8 (diff)
Merge changes from github.
PiperOrigin-RevId: 188817194
Diffstat (limited to 'tensorflow/cc')
-rw-r--r--tensorflow/cc/gradients/nn_grad.cc64
-rw-r--r--tensorflow/cc/gradients/nn_grad_test.cc44
-rw-r--r--tensorflow/cc/profiler/profiler.h6
3 files changed, 108 insertions, 6 deletions
diff --git a/tensorflow/cc/gradients/nn_grad.cc b/tensorflow/cc/gradients/nn_grad.cc
index 9b732421e5..0cb3132e94 100644
--- a/tensorflow/cc/gradients/nn_grad.cc
+++ b/tensorflow/cc/gradients/nn_grad.cc
@@ -182,6 +182,70 @@ Status MaxPoolGradV2Helper(const Scope& scope, const Operation& op,
}
REGISTER_GRADIENT_OP("MaxPoolV2", MaxPoolGradV2Helper);
+Status MaxPool3DGradHelper(const Scope& scope, const Operation& op,
+ const std::vector<Output>& grad_inputs,
+ std::vector<Output>* grad_outputs) {
+ std::vector<int32> ksize;
+ std::vector<int32> strides;
+ string padding;
+ string data_format;
+ auto attrs = op.output(0).node()->attrs();
+ TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "ksize", &ksize));
+ TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "strides", &strides));
+ TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "padding", &padding));
+ TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "data_format", &data_format));
+ MaxPool3DGrad::Attrs grad_attrs;
+ auto dx = MaxPool3DGrad(scope, op.input(0), op.output(0), grad_inputs[0],
+ ksize, strides, padding,
+ grad_attrs.DataFormat(data_format));
+ grad_outputs->push_back(dx);
+ return scope.status();
+}
+REGISTER_GRADIENT_OP("MaxPool3D", MaxPool3DGradHelper);
+
+Status AvgPoolGradHelper(const Scope& scope, const Operation& op,
+ const std::vector<Output>& grad_inputs,
+ std::vector<Output>* grad_outputs) {
+ std::vector<int32> ksize;
+ std::vector<int32> strides;
+ string padding;
+ string data_format;
+ auto attrs = op.output(0).node()->attrs();
+ TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "ksize", &ksize));
+ TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "strides", &strides));
+ TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "padding", &padding));
+ TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "data_format", &data_format));
+ internal::AvgPoolGrad::Attrs grad_attrs;
+ auto dx =
+ internal::AvgPoolGrad(scope, Shape(scope, op.input(0)), grad_inputs[0],
+ ksize, strides, padding,
+ grad_attrs.DataFormat(data_format));
+ grad_outputs->push_back(dx);
+ return scope.status();
+}
+REGISTER_GRADIENT_OP("AvgPool", AvgPoolGradHelper);
+
+Status AvgPool3DGradHelper(const Scope& scope, const Operation& op,
+ const std::vector<Output>& grad_inputs,
+ std::vector<Output>* grad_outputs) {
+ std::vector<int32> ksize;
+ std::vector<int32> strides;
+ string padding;
+ string data_format;
+ auto attrs = op.output(0).node()->attrs();
+ TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "ksize", &ksize));
+ TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "strides", &strides));
+ TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "padding", &padding));
+ TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "data_format", &data_format));
+ AvgPool3DGrad::Attrs grad_attrs;
+ auto dx = AvgPool3DGrad(scope, Shape(scope, op.input(0)), grad_inputs[0],
+ ksize, strides, padding,
+ grad_attrs.DataFormat(data_format));
+ grad_outputs->push_back(dx);
+ return scope.status();
+}
+REGISTER_GRADIENT_OP("AvgPool3D", AvgPool3DGradHelper);
+
Status LRNGradHelper(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
diff --git a/tensorflow/cc/gradients/nn_grad_test.cc b/tensorflow/cc/gradients/nn_grad_test.cc
index 0cfe5f6e3c..c4eba7ecb0 100644
--- a/tensorflow/cc/gradients/nn_grad_test.cc
+++ b/tensorflow/cc/gradients/nn_grad_test.cc
@@ -31,8 +31,11 @@ using ops::Elu;
using ops::L2Loss;
using ops::LogSoftmax;
using ops::LRN;
+using ops::AvgPool;
+using ops::AvgPool3D;
using ops::MaxPool;
using ops::MaxPoolV2;
+using ops::MaxPool3D;
using ops::Placeholder;
using ops::Relu;
using ops::Relu6;
@@ -70,9 +73,9 @@ class NNGradTest : public ::testing::Test {
// Sets tensor with random values, ensuring that the max value is largest by
// a reasonable amount.
- // This is an issue for MaxPool and MaxPoolV2, in which perturbations by the
- // numeric gradient computation in the gradient checker can change the max
- // value if values are too close together.
+ // This is an issue for MaxPool, MaxPoolV2 and MaxPool3D, in which
+ // perturbations by the numeric gradient computation in the gradient checker
+ // can change the max value if values are too close together.
template <typename T>
void SetRandomValuesWithBumpedMax(Tensor* tensor) {
auto tensor_flat = tensor->flat<T>();
@@ -203,6 +206,41 @@ TEST_F(NNGradTest, MaxPoolGradV2Helper) {
RunTest(x, x_init_value, y, y_shape);
}
+TEST_F(NNGradTest, MaxPool3DGradHelper) {
+ TensorShape x_shape({1, 3, 3, 3, 1});
+ TensorShape y_shape({1, 1, 1, 1, 1});
+ auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
+ // Setup window and strides so that we only do one MaxPool3D.
+ const std::vector<int> ksize{1, 3, 3, 3, 1};
+ const std::vector<int> strides{1, 3, 3, 3, 1};
+ auto y = MaxPool3D(scope_, x, ksize, strides, "VALID");
+ Tensor x_init_value = Tensor(DT_FLOAT, x_shape);
+ SetRandomValuesWithBumpedMax<float>(&x_init_value);
+ RunTest(x, x_init_value, y, y_shape);
+}
+
+TEST_F(NNGradTest, AvgPoolGradHelper) {
+ TensorShape x_shape({1, 2, 2, 1});
+ TensorShape y_shape({1, 1, 1, 1});
+ auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
+ // Setup window and strides so that we only do one AvgPool.
+ const std::vector<int> ksize{1, 2, 2, 1};
+ const std::vector<int> strides{1, 2, 2, 1};
+ auto y = AvgPool(scope_, x, ksize, strides, "SAME");
+ RunTest(x, x_shape, y, y_shape);
+}
+
+TEST_F(NNGradTest, AvgPool3DGradHelper) {
+ TensorShape x_shape({1, 3, 3, 3, 1});
+ TensorShape y_shape({1, 1, 1, 1, 1});
+ auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
+ // Setup window and strides so that we only do one AvgPool3D.
+ const std::vector<int> ksize{1, 3, 3, 3, 1};
+ const std::vector<int> strides{1, 3, 3, 3, 1};
+ auto y = AvgPool3D(scope_, x, ksize, strides, "SAME");
+ RunTest(x, x_shape, y, y_shape);
+}
+
TEST_F(NNGradTest, LRN){
TensorShape x_shape({1, 1, 2, 1});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
diff --git a/tensorflow/cc/profiler/profiler.h b/tensorflow/cc/profiler/profiler.h
index 6077c45c58..64edbb5766 100644
--- a/tensorflow/cc/profiler/profiler.h
+++ b/tensorflow/cc/profiler/profiler.h
@@ -61,18 +61,18 @@ class Profiler {
/// Adds tracing information `run_meta` to profiler. A `run_meta` is
/// generated by a TensorFlow session run call. `step` is the key
/// to the `run_meta`. When calling ProfileXXX methods, caller can specify
- /// `step` in `options` to seletively profile the corresponding `run_meta`.
+ /// `step` in `options` to selectively profile the corresponding `run_meta`.
/// Multiple different `run_meta` can be keyed by the same `step` in order
/// to group them together.
void AddStep(int64 step, const RunMetadata& run_meta);
/// Profiles the model by organizing nodes in graph structure.
- /// Each node is an op and the nodes are contected by the op inputs/outputs.
+ /// Each node is an op and the nodes are connected by the op inputs/outputs.
GraphNodeProto ProfileGraph(const Options& options);
/// Profiles the model by organizing nodes in name scope structure.
/// Each node is an op, and nodes are organized by the ops' name
- /// scope, similar to a filesystem tree.
+ /// scope, similar to a file system tree.
/// E.g. /foo is the root of operation /foo/matmul_1 and foo/conv_2.
GraphNodeProto ProfileNameScope(const Options& options);