Rename `distribute` to `train_distribute` parameter in `RunConfig` to clarify that...
authorPriya Gupta <priyag@google.com>
Wed, 4 Apr 2018 22:06:08 +0000 (15:06 -0700)
committerTensorFlower Gardener <gardener@tensorflow.org>
Wed, 4 Apr 2018 22:08:48 +0000 (15:08 -0700)
PiperOrigin-RevId: 191654161

tensorflow/contrib/distribute/README.md
tensorflow/contrib/distribute/python/estimator_integration_test.py
tensorflow/contrib/distribute/python/examples/simple_estimator_example.py
tensorflow/contrib/distribute/python/examples/simple_tfkeras_example.py
tensorflow/contrib/learn/python/learn/estimators/run_config.py
tensorflow/python/estimator/estimator.py
tensorflow/python/estimator/run_config.py
tensorflow/tools/api/golden/tensorflow.estimator.-run-config.pbtxt

index 4af51be..28483f4 100644 (file)
@@ -77,7 +77,7 @@ parameter of `Estimator`.
 
 ```python
 distribution = tf.contrib.distribute.MirroredStrategy()
-config = tf.estimator.RunConfig(distribute=distribution)
+config = tf.estimator.RunConfig(train_distribute=distribution)
 classifier = tf.estimator.Estimator(model_fn=model_fn, config=config)
 classifier.train(input_fn=input_fn)
 ```
index 9be186a..2b49b8f 100644 (file)
@@ -95,7 +95,7 @@ class DNNLinearCombinedClassifierIntegrationTest(test.TestCase,
         # TODO(isaprykin): Work around the colocate_with error.
         dnn_optimizer=adagrad.AdagradOptimizer(0.001),
         linear_optimizer=adagrad.AdagradOptimizer(0.001),
-        config=run_config.RunConfig(distribute=distribution))
+        config=run_config.RunConfig(train_distribute=distribution))
 
     num_steps = 10
     estimator.train(train_input_fn, steps=num_steps)
index 5d6e02b..00c25c7 100644 (file)
@@ -59,7 +59,7 @@ def build_model_fn_optimizer():
 def main(_):
   distribution = tf.contrib.distribute.MirroredStrategy(
       ["/device:GPU:0", "/device:GPU:1"])
-  config = tf.estimator.RunConfig(distribute=distribution)
+  config = tf.estimator.RunConfig(train_distribute=distribution)
 
   def input_fn():
     features = tf.data.Dataset.from_tensors([[1.]]).repeat(10)
index e714255..b872242 100644 (file)
@@ -41,7 +41,7 @@ def main(args):
 
   strategy = tf.contrib.distribute.MirroredStrategy(
       ['/device:GPU:0', '/device:GPU:1'])
-  config = tf.estimator.RunConfig(distribute=strategy)
+  config = tf.estimator.RunConfig(train_distribute=strategy)
   optimizer = tf.train.GradientDescentOptimizer(0.2)
 
   model = tf.keras.Sequential()
index f3500bf..8c85c43 100644 (file)
@@ -298,7 +298,7 @@ class RunConfig(ClusterConfig, core_run_config.RunConfig):
     #   core_run_config.RunConfig.__init__(self)
     # so instead of breaking compatibility with that assumption, we
     # just manually initialize this field:
-    self._distribute = None
+    self._train_distribute = None
 
     gpu_options = config_pb2.GPUOptions(
         per_process_gpu_memory_fraction=gpu_memory_fraction)
index ab69a09..4d3eff7 100644 (file)
@@ -188,7 +188,7 @@ class Estimator(object):
       self._config = config
 
     # The distribute field contains an instance of DistributionStrategy.
-    self._distribution = self._config.distribute
+    self._distribution = self._config.train_distribute
 
     # Model directory.
     model_dir = compat_internal.path_to_str(model_dir)
index 41415b8..f62c9ce 100644 (file)
@@ -44,7 +44,7 @@ _DEFAULT_REPLACEABLE_LIST = [
     'keep_checkpoint_max',
     'keep_checkpoint_every_n_hours',
     'log_step_count_steps',
-    'distribute'
+    'train_distribute'
 ]
 
 _SAVE_CKPT_ERR = (
@@ -302,7 +302,7 @@ class RunConfig(object):
                keep_checkpoint_max=5,
                keep_checkpoint_every_n_hours=10000,
                log_step_count_steps=100,
-               distribute=None):
+               train_distribute=None):
     """Constructs a RunConfig.
 
     All distributed training related properties `cluster_spec`, `is_chief`,
@@ -426,10 +426,10 @@ class RunConfig(object):
         the feature.
       log_step_count_steps: The frequency, in number of global steps, that the
         global step/sec and the loss will be logged during training.
-      distribute: an optional instance of
+      train_distribute: an optional instance of
         `tf.contrib.distribute.DistributionStrategy`. If specified,
-        then Estimator will distribute the user's model according to the policy
-        specified by that strategy.
+        then Estimator will distribute the user's model during training,
+        according to the policy specified by that strategy.
 
     Raises:
       ValueError: If both `save_checkpoints_steps` and `save_checkpoints_secs`
@@ -466,7 +466,7 @@ class RunConfig(object):
         keep_checkpoint_max=keep_checkpoint_max,
         keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,
         log_step_count_steps=log_step_count_steps,
-        distribute=distribute)
+        train_distribute=train_distribute)
 
     self._init_distributed_setting_from_environment_var(tf_config)
 
@@ -678,10 +678,10 @@ class RunConfig(object):
     return self._service
 
   @property
-  def distribute(self):
+  def train_distribute(self):
     """Returns the optional `tf.contrib.distribute.DistributionStrategy` object.
     """
-    return self._distribute
+    return self._train_distribute
 
   def replace(self, **kwargs):
     """Returns a new instance of `RunConfig` replacing specified properties.
@@ -697,7 +697,7 @@ class RunConfig(object):
       - `keep_checkpoint_max`,
       - `keep_checkpoint_every_n_hours`,
       - `log_step_count_steps`,
-      - `distribute`.
+      - `train_distribute`.
 
     In addition, either `save_checkpoints_steps` or `save_checkpoints_secs`
     can be set (should not be both).
index 759ff75..05e603e 100644 (file)
@@ -7,10 +7,6 @@ tf_class {
     mtype: "<type \'property\'>"
   }
   member {
-    name: "distribute"
-    mtype: "<type \'property\'>"
-  }
-  member {
     name: "evaluation_master"
     mtype: "<type \'property\'>"
   }
@@ -82,9 +78,13 @@ tf_class {
     name: "tf_random_seed"
     mtype: "<type \'property\'>"
   }
+  member {
+    name: "train_distribute"
+    mtype: "<type \'property\'>"
+  }
   member_method {
     name: "__init__"
-    argspec: "args=[\'self\', \'model_dir\', \'tf_random_seed\', \'save_summary_steps\', \'save_checkpoints_steps\', \'save_checkpoints_secs\', \'session_config\', \'keep_checkpoint_max\', \'keep_checkpoint_every_n_hours\', \'log_step_count_steps\', \'distribute\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'100\', \'<object object instance>\', \'<object object instance>\', \'None\', \'5\', \'10000\', \'100\', \'None\'], "
+    argspec: "args=[\'self\', \'model_dir\', \'tf_random_seed\', \'save_summary_steps\', \'save_checkpoints_steps\', \'save_checkpoints_secs\', \'session_config\', \'keep_checkpoint_max\', \'keep_checkpoint_every_n_hours\', \'log_step_count_steps\', \'train_distribute\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'100\', \'<object object instance>\', \'<object object instance>\', \'None\', \'5\', \'10000\', \'100\', \'None\'], "
   }
   member_method {
     name: "replace"