```python
distribution = tf.contrib.distribute.MirroredStrategy()
-config = tf.estimator.RunConfig(distribute=distribution)
+config = tf.estimator.RunConfig(train_distribute=distribution)
classifier = tf.estimator.Estimator(model_fn=model_fn, config=config)
classifier.train(input_fn=input_fn)
```
# TODO(isaprykin): Work around the colocate_with error.
dnn_optimizer=adagrad.AdagradOptimizer(0.001),
linear_optimizer=adagrad.AdagradOptimizer(0.001),
- config=run_config.RunConfig(distribute=distribution))
+ config=run_config.RunConfig(train_distribute=distribution))
num_steps = 10
estimator.train(train_input_fn, steps=num_steps)
def main(_):
distribution = tf.contrib.distribute.MirroredStrategy(
["/device:GPU:0", "/device:GPU:1"])
- config = tf.estimator.RunConfig(distribute=distribution)
+ config = tf.estimator.RunConfig(train_distribute=distribution)
def input_fn():
features = tf.data.Dataset.from_tensors([[1.]]).repeat(10)
strategy = tf.contrib.distribute.MirroredStrategy(
['/device:GPU:0', '/device:GPU:1'])
- config = tf.estimator.RunConfig(distribute=strategy)
+ config = tf.estimator.RunConfig(train_distribute=strategy)
optimizer = tf.train.GradientDescentOptimizer(0.2)
model = tf.keras.Sequential()
# core_run_config.RunConfig.__init__(self)
# so instead of breaking compatibility with that assumption, we
# just manually initialize this field:
- self._distribute = None
+ self._train_distribute = None
gpu_options = config_pb2.GPUOptions(
per_process_gpu_memory_fraction=gpu_memory_fraction)
self._config = config
# The distribute field contains an instance of DistributionStrategy.
- self._distribution = self._config.distribute
+ self._distribution = self._config.train_distribute
# Model directory.
model_dir = compat_internal.path_to_str(model_dir)
'keep_checkpoint_max',
'keep_checkpoint_every_n_hours',
'log_step_count_steps',
- 'distribute'
+ 'train_distribute'
]
_SAVE_CKPT_ERR = (
keep_checkpoint_max=5,
keep_checkpoint_every_n_hours=10000,
log_step_count_steps=100,
- distribute=None):
+ train_distribute=None):
"""Constructs a RunConfig.
All distributed training related properties `cluster_spec`, `is_chief`,
the feature.
log_step_count_steps: The frequency, in number of global steps, that the
global step/sec and the loss will be logged during training.
- distribute: an optional instance of
+ train_distribute: an optional instance of
`tf.contrib.distribute.DistributionStrategy`. If specified,
- then Estimator will distribute the user's model according to the policy
- specified by that strategy.
+ then Estimator will distribute the user's model during training,
+ according to the policy specified by that strategy.
Raises:
ValueError: If both `save_checkpoints_steps` and `save_checkpoints_secs`
keep_checkpoint_max=keep_checkpoint_max,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,
log_step_count_steps=log_step_count_steps,
- distribute=distribute)
+ train_distribute=train_distribute)
self._init_distributed_setting_from_environment_var(tf_config)
return self._service
@property
- def distribute(self):
+ def train_distribute(self):
"""Returns the optional `tf.contrib.distribute.DistributionStrategy` object.
"""
- return self._distribute
+ return self._train_distribute
def replace(self, **kwargs):
"""Returns a new instance of `RunConfig` replacing specified properties.
- `keep_checkpoint_max`,
- `keep_checkpoint_every_n_hours`,
- `log_step_count_steps`,
- - `distribute`.
+ - `train_distribute`.
In addition, either `save_checkpoints_steps` or `save_checkpoints_secs`
can be set (should not be both).
mtype: "<type \'property\'>"
}
member {
- name: "distribute"
- mtype: "<type \'property\'>"
- }
- member {
name: "evaluation_master"
mtype: "<type \'property\'>"
}
name: "tf_random_seed"
mtype: "<type \'property\'>"
}
+ member {
+ name: "train_distribute"
+ mtype: "<type \'property\'>"
+ }
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'model_dir\', \'tf_random_seed\', \'save_summary_steps\', \'save_checkpoints_steps\', \'save_checkpoints_secs\', \'session_config\', \'keep_checkpoint_max\', \'keep_checkpoint_every_n_hours\', \'log_step_count_steps\', \'distribute\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'100\', \'<object object instance>\', \'<object object instance>\', \'None\', \'5\', \'10000\', \'100\', \'None\'], "
+ argspec: "args=[\'self\', \'model_dir\', \'tf_random_seed\', \'save_summary_steps\', \'save_checkpoints_steps\', \'save_checkpoints_secs\', \'session_config\', \'keep_checkpoint_max\', \'keep_checkpoint_every_n_hours\', \'log_step_count_steps\', \'train_distribute\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'100\', \'<object object instance>\', \'<object object instance>\', \'None\', \'5\', \'10000\', \'100\', \'None\'], "
}
member_method {
name: "replace"