diff options
author | Akshay Modi <nareshmodi@google.com> | 2018-03-06 14:09:03 -0800 |
---|---|---|
committer | Akshay Modi <nareshmodi@google.com> | 2018-03-06 14:09:03 -0800 |
commit | 69c36c2e6de93e93fe47eb1276369e272e0ce70f (patch) | |
tree | c5809ec63fd7cf5053cf9fd2e52cc65e9d41c4a7 /tensorflow/cc | |
parent | 1f441c191f9a6d8f27b32b1c19c55f76aaf9e387 (diff) | |
parent | e79a3d99e43b797036d0e35ab9b332e371108a5d (diff) |
Merge commit for internal changes
o Fixed simple merge issue in tf/contrib/timeseries/python/timeseries/BUILD
Diffstat (limited to 'tensorflow/cc')
-rw-r--r-- | tensorflow/cc/framework/cc_op_gen.cc | 3 | ||||
-rw-r--r-- | tensorflow/cc/gradients/nn_grad.cc | 59 |
2 files changed, 23 insertions, 39 deletions
diff --git a/tensorflow/cc/framework/cc_op_gen.cc b/tensorflow/cc/framework/cc_op_gen.cc index a40ad1ffc3..39893f5ccd 100644 --- a/tensorflow/cc/framework/cc_op_gen.cc +++ b/tensorflow/cc/framework/cc_op_gen.cc @@ -697,7 +697,8 @@ string OpInfo::GetOpAttrStruct() const { attr_comment = MakeComment(attr_comment, " "); strings::StrAppend(&setters, attr_comment); - strings::StrAppend(&setters, " Attrs ", attr_func_def, " x) {\n"); + strings::StrAppend(&setters, " TF_MUST_USE_RESULT Attrs ", attr_func_def, + " x) {\n"); strings::StrAppend(&setters, " Attrs ret = *this;\n"); strings::StrAppend(&setters, " ret.", api_def_attr.rename_to(), "_ = x;\n"); diff --git a/tensorflow/cc/gradients/nn_grad.cc b/tensorflow/cc/gradients/nn_grad.cc index 63a67f09f6..1c23f3257e 100644 --- a/tensorflow/cc/gradients/nn_grad.cc +++ b/tensorflow/cc/gradients/nn_grad.cc @@ -48,8 +48,8 @@ Status SoftmaxGrad(const Scope& scope, const Operation& op, REGISTER_GRADIENT_OP("Softmax", SoftmaxGrad); Status LogSoftmaxGrad(const Scope& scope, const Operation& op, - const std::vector<Output>& grad_inputs, - std::vector<Output>* grad_outputs) { + const std::vector<Output>& grad_inputs, + std::vector<Output>* grad_outputs) { auto softmax = Exp(scope, op.output(0)); auto sum = Sum(scope, grad_inputs[0], {1}, Sum::KeepDims(true)); auto mul = Mul(scope, sum, softmax); @@ -107,11 +107,10 @@ Status BiasAddGradHelper(const Scope& scope, const Operation& op, const std::vector<Output>& grad_inputs, std::vector<Output>* grad_outputs) { string data_format; - BiasAddGrad::Attrs input_attrs; TF_RETURN_IF_ERROR( GetNodeAttr(op.output(0).node()->attrs(), "data_format", &data_format)); - input_attrs.DataFormat(data_format); - auto dx_1 = BiasAddGrad(scope, grad_inputs[0], input_attrs); + auto dx_1 = + BiasAddGrad(scope, grad_inputs[0], BiasAddGrad::DataFormat(data_format)); grad_outputs->push_back(Identity(scope, grad_inputs[0])); grad_outputs->push_back(dx_1); return scope.status(); @@ -130,19 +129,16 @@ Status Conv2DGrad(const Scope& scope, const Operation& op, TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "padding", &padding)); TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "strides", &strides)); TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "use_cudnn_on_gpu", &use_cudnn_on_gpu)); - Conv2DBackpropInput::Attrs input_attrs; - input_attrs.DataFormat(data_format); - input_attrs.UseCudnnOnGpu(use_cudnn_on_gpu); - auto dx_1 = Conv2DBackpropInput(scope, Shape(scope, op.input(0)), - op.input(1), grad_inputs[0], - strides, padding, input_attrs); + auto dx_1 = Conv2DBackpropInput(scope, Shape(scope, op.input(0)), op.input(1), + grad_inputs[0], strides, padding, + Conv2DBackpropInput::DataFormat(data_format) + .UseCudnnOnGpu(use_cudnn_on_gpu)); grad_outputs->push_back(dx_1); - Conv2DBackpropFilter::Attrs filter_attrs; - filter_attrs.DataFormat(data_format); - filter_attrs.UseCudnnOnGpu(use_cudnn_on_gpu); - auto dx_2 = Conv2DBackpropFilter(scope, op.input(0), - Shape(scope, op.input(1)), grad_inputs[0], - strides, padding, filter_attrs); + auto dx_2 = + Conv2DBackpropFilter(scope, op.input(0), Shape(scope, op.input(1)), + grad_inputs[0], strides, padding, + Conv2DBackpropFilter::DataFormat(data_format) + .UseCudnnOnGpu(use_cudnn_on_gpu)); grad_outputs->push_back(dx_2); return scope.status(); } @@ -160,13 +156,9 @@ Status MaxPoolGradHelper(const Scope& scope, const Operation& op, TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "ksize", &ksize)); TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "padding", &padding)); TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "strides", &strides)); - internal::MaxPoolGrad::Attrs grad_attrs; - grad_attrs.DataFormat(data_format); - auto dx = internal::MaxPoolGrad(scope, op.input(0), - op.output(0), - grad_inputs[0], - ksize, strides, - padding, grad_attrs); + auto dx = internal::MaxPoolGrad( + scope, op.input(0), op.output(0), grad_inputs[0], ksize, strides, padding, + internal::MaxPoolGrad::DataFormat(data_format)); grad_outputs->push_back(dx); return scope.status(); } @@ -180,15 +172,9 @@ Status MaxPoolGradV2Helper(const Scope& scope, const Operation& op, auto attrs = op.output(0).node()->attrs(); TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "data_format", &data_format)); TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "padding", &padding)); - MaxPoolGradV2::Attrs grad_attrs; - grad_attrs.DataFormat(data_format); - auto dx = MaxPoolGradV2(scope, op.input(0), - op.output(0), - grad_inputs[0], - op.input(1), - op.input(2), - padding, - grad_attrs); + auto dx = MaxPoolGradV2(scope, op.input(0), op.output(0), grad_inputs[0], + op.input(1), op.input(2), padding, + MaxPoolGradV2::DataFormat(data_format)); grad_outputs->push_back(dx); grad_outputs->push_back(NoGradient()); grad_outputs->push_back(NoGradient()); @@ -262,11 +248,8 @@ REGISTER_GRADIENT_OP("AvgPool3D", AvgPool3DGradHelper); Status LRNGradHelper(const Scope& scope, const Operation& op, const std::vector<Output>& grad_inputs, - std::vector<Output>* grad_outputs){ - internal::LRNGrad::Attrs grad_attrs; - - auto dx = internal::LRNGrad(scope, grad_inputs[0], op.input(0), op.output(0), - grad_attrs); + std::vector<Output>* grad_outputs) { + auto dx = internal::LRNGrad(scope, grad_inputs[0], op.input(0), op.output(0)); grad_outputs->push_back(dx); return scope.status(); } |