Rename distributed_apply to _distributed_apply in OptimizerV2 to match
authorA. Unique TensorFlower <gardener@tensorflow.org>
Fri, 30 Mar 2018 18:12:24 +0000 (11:12 -0700)
committerTensorFlower Gardener <gardener@tensorflow.org>
Fri, 30 Mar 2018 18:14:41 +0000 (11:14 -0700)
the Optimizer base class.

PiperOrigin-RevId: 191089407

tensorflow/contrib/optimizer_v2/optimizer_v2.py

index 471992f..25d1957 100644 (file)
@@ -866,7 +866,7 @@ class OptimizerV2(optimizer_v1.Optimizer):
       raise ValueError("No gradients provided for any variable: %s." %
                        ([str(v) for _, v in grads_and_vars],))
     return distribute_lib.get_tower_context().merge_call(
-        self.distributed_apply, filtered, global_step=global_step, name=name)
+        self._distributed_apply, filtered, global_step=global_step, name=name)
 
   def _get_or_create_state(self, var_list=None):
     """Either looks up or creates `_OptimizerV2State`.
@@ -899,7 +899,7 @@ class OptimizerV2(optimizer_v1.Optimizer):
       self._per_graph_state[graph_key] = per_graph_state
     return per_graph_state
 
-  def distributed_apply(self, distribution, grads_and_vars, global_step, name):
+  def _distributed_apply(self, distribution, grads_and_vars, global_step, name):
     """`apply_gradients` for use with a `DistributionStrategy`."""
     reduced_grads = distribution.batch_reduce("sum", grads_and_vars)
     var_list = [v for _, v in grads_and_vars]