TpuStrategy -> TPUStrategy
authorJonathan Hseu <jhseu@google.com>
Tue, 17 Apr 2018 23:24:42 +0000 (16:24 -0700)
committerTensorFlower Gardener <gardener@tensorflow.org>
Tue, 17 Apr 2018 23:27:25 +0000 (16:27 -0700)
PiperOrigin-RevId: 193275991

tensorflow/contrib/distribute/python/combinations.py
tensorflow/contrib/distribute/python/tpu_strategy.py

index 1f66997..946310a 100644 (file)
@@ -266,7 +266,7 @@ one_device_strategy = NamedDistribution(
     "OneDeviceCPU", one_device_strategy.OneDeviceStrategy("/cpu:0"),
     None)
 tpu_strategy = NamedDistribution(
-    "TPU", tpu_strategy.TpuStrategy(), required_tpu=True)
+    "TPU", tpu_strategy.TPUStrategy(), required_tpu=True)
 mirrored_strategy_with_gpu_and_cpu = NamedDistribution(
     "MirroredCPUAndGPU",
     mirrored_strategy.MirroredStrategy(["/gpu:0", "/cpu:0"]), 1)
index 0ac307d..804217b 100644 (file)
@@ -32,10 +32,10 @@ from tensorflow.python.ops import control_flow_ops
 
 
 # TODO(isaprykin):  Consider whether inheriting is really appropriate.
-class TpuStrategy(one_device_strategy.OneDeviceStrategy):
+class TPUStrategy(one_device_strategy.OneDeviceStrategy):
 
   def __init__(self, master=None, iterations=None, model_dir=None):
-    super(TpuStrategy, self).__init__('/cpu:0')
+    super(TPUStrategy, self).__init__('/cpu:0')
 
   def _call_for_each_tower(self, fn, *args, **kwargs):
     kwargs.pop('run_concurrently', None)