Correct op::Attr usage in C++ gradient implementations.
authorA. Unique TensorFlower <gardener@tensorflow.org>
Mon, 5 Mar 2018 22:45:28 +0000 (14:45 -0800)
committerTensorFlower Gardener <gardener@tensorflow.org>
Mon, 5 Mar 2018 22:49:23 +0000 (14:49 -0800)
Also enabled TF_MUST_USE_RESULT for the generated Attr API, so we
can catch any new errors early.

Fixes #17360

PiperOrigin-RevId: 187925761

tensorflow/cc/framework/cc_op_gen.cc
tensorflow/cc/gradients/nn_grad.cc

index a40ad1f..39893f5 100644 (file)
@@ -697,7 +697,8 @@ string OpInfo::GetOpAttrStruct() const {
     attr_comment = MakeComment(attr_comment, "    ");
 
     strings::StrAppend(&setters, attr_comment);
-    strings::StrAppend(&setters, "    Attrs ", attr_func_def, " x) {\n");
+    strings::StrAppend(&setters, "    TF_MUST_USE_RESULT Attrs ", attr_func_def,
+                       " x) {\n");
     strings::StrAppend(&setters, "      Attrs ret = *this;\n");
     strings::StrAppend(&setters, "      ret.", api_def_attr.rename_to(),
                        "_ = x;\n");
index 13a3bba..9b73242 100644 (file)
@@ -48,8 +48,8 @@ Status SoftmaxGrad(const Scope& scope, const Operation& op,
 REGISTER_GRADIENT_OP("Softmax", SoftmaxGrad);
 
 Status LogSoftmaxGrad(const Scope& scope, const Operation& op,
-                   const std::vector<Output>& grad_inputs,
-                   std::vector<Output>* grad_outputs) {
+                      const std::vector<Output>& grad_inputs,
+                      std::vector<Output>* grad_outputs) {
   auto softmax = Exp(scope, op.output(0));
   auto sum = Sum(scope, grad_inputs[0], {1}, Sum::KeepDims(true));
   auto mul = Mul(scope, sum, softmax);
@@ -107,11 +107,10 @@ Status BiasAddGradHelper(const Scope& scope, const Operation& op,
                          const std::vector<Output>& grad_inputs,
                          std::vector<Output>* grad_outputs) {
   string data_format;
-  BiasAddGrad::Attrs input_attrs;
   TF_RETURN_IF_ERROR(
       GetNodeAttr(op.output(0).node()->attrs(), "data_format", &data_format));
-  input_attrs.DataFormat(data_format);
-  auto dx_1 = BiasAddGrad(scope, grad_inputs[0], input_attrs);
+  auto dx_1 =
+      BiasAddGrad(scope, grad_inputs[0], BiasAddGrad::DataFormat(data_format));
   grad_outputs->push_back(Identity(scope, grad_inputs[0]));
   grad_outputs->push_back(dx_1);
   return scope.status();
@@ -130,19 +129,16 @@ Status Conv2DGrad(const Scope& scope, const Operation& op,
   TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "padding", &padding));
   TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "strides", &strides));
   TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "use_cudnn_on_gpu", &use_cudnn_on_gpu));
-  Conv2DBackpropInput::Attrs input_attrs;
-  input_attrs.DataFormat(data_format);
-  input_attrs.UseCudnnOnGpu(use_cudnn_on_gpu);
-  auto dx_1 = Conv2DBackpropInput(scope, Shape(scope, op.input(0)),
-                                  op.input(1), grad_inputs[0],
-                                  strides, padding, input_attrs);
+  auto dx_1 = Conv2DBackpropInput(scope, Shape(scope, op.input(0)), op.input(1),
+                                  grad_inputs[0], strides, padding,
+                                  Conv2DBackpropInput::DataFormat(data_format)
+                                      .UseCudnnOnGpu(use_cudnn_on_gpu));
   grad_outputs->push_back(dx_1);
-  Conv2DBackpropFilter::Attrs filter_attrs;
-  filter_attrs.DataFormat(data_format);
-  filter_attrs.UseCudnnOnGpu(use_cudnn_on_gpu);
-  auto dx_2 = Conv2DBackpropFilter(scope, op.input(0),
-                                   Shape(scope, op.input(1)), grad_inputs[0],
-                                   strides, padding, filter_attrs);
+  auto dx_2 =
+      Conv2DBackpropFilter(scope, op.input(0), Shape(scope, op.input(1)),
+                           grad_inputs[0], strides, padding,
+                           Conv2DBackpropFilter::DataFormat(data_format)
+                               .UseCudnnOnGpu(use_cudnn_on_gpu));
   grad_outputs->push_back(dx_2);
   return scope.status();
 }
@@ -160,13 +156,9 @@ Status MaxPoolGradHelper(const Scope& scope, const Operation& op,
   TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "ksize", &ksize));
   TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "padding", &padding));
   TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "strides", &strides));
-  internal::MaxPoolGrad::Attrs grad_attrs;
-  grad_attrs.DataFormat(data_format);
-  auto dx = internal::MaxPoolGrad(scope, op.input(0),
-                                  op.output(0),
-                                  grad_inputs[0],
-                                  ksize, strides,
-                                  padding, grad_attrs);
+  auto dx = internal::MaxPoolGrad(
+      scope, op.input(0), op.output(0), grad_inputs[0], ksize, strides, padding,
+      internal::MaxPoolGrad::DataFormat(data_format));
   grad_outputs->push_back(dx);
   return scope.status();
 }
@@ -180,15 +172,9 @@ Status MaxPoolGradV2Helper(const Scope& scope, const Operation& op,
   auto attrs = op.output(0).node()->attrs();
   TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "data_format", &data_format));
   TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "padding", &padding));
-  MaxPoolGradV2::Attrs grad_attrs;
-  grad_attrs.DataFormat(data_format);
-  auto dx = MaxPoolGradV2(scope, op.input(0),
-                          op.output(0),
-                          grad_inputs[0],
-                          op.input(1),
-                          op.input(2),
-                          padding,
-                          grad_attrs);
+  auto dx = MaxPoolGradV2(scope, op.input(0), op.output(0), grad_inputs[0],
+                          op.input(1), op.input(2), padding,
+                          MaxPoolGradV2::DataFormat(data_format));
   grad_outputs->push_back(dx);
   grad_outputs->push_back(NoGradient());
   grad_outputs->push_back(NoGradient());
@@ -198,11 +184,8 @@ REGISTER_GRADIENT_OP("MaxPoolV2", MaxPoolGradV2Helper);
 
 Status LRNGradHelper(const Scope& scope, const Operation& op,
                      const std::vector<Output>& grad_inputs,
-                     std::vector<Output>* grad_outputs){
-  internal::LRNGrad::Attrs grad_attrs;
-
-  auto dx = internal::LRNGrad(scope, grad_inputs[0], op.input(0), op.output(0),
-                              grad_attrs);
+                     std::vector<Output>* grad_outputs) {
+  auto dx = internal::LRNGrad(scope, grad_inputs[0], op.input(0), op.output(0));
   grad_outputs->push_back(dx);
   return scope.status();
 }