Change the MomentumOptimzer lambda so it has the same named argument (learning_rate)
authorA. Unique TensorFlower <gardener@tensorflow.org>
Thu, 22 Feb 2018 18:59:57 +0000 (10:59 -0800)
committerTensorFlower Gardener <gardener@tensorflow.org>
Thu, 22 Feb 2018 19:03:46 +0000 (11:03 -0800)
as the MomentumOptimzer constructor.

PiperOrigin-RevId: 186642325

tensorflow/contrib/layers/python/layers/optimizers.py
tensorflow/contrib/layers/python/layers/optimizers_test.py

index cdceea6..69d927e 100644 (file)
@@ -41,7 +41,7 @@ OPTIMIZER_CLS_NAMES = {
     "Adagrad": train.AdagradOptimizer,
     "Adam": train.AdamOptimizer,
     "Ftrl": train.FtrlOptimizer,
-    "Momentum": lambda lr: train.MomentumOptimizer(lr, momentum=0.9),
+    "Momentum": lambda learning_rate: train.MomentumOptimizer(learning_rate, momentum=0.9),  # pylint: disable=line-too-long
     "RMSProp": train.RMSPropOptimizer,
     "SGD": train.GradientDescentOptimizer,
 }
index 1ea25bd..a4461a2 100644 (file)
@@ -61,7 +61,8 @@ class OptimizersTest(test.TestCase):
     optimizers = [
         "SGD", gradient_descent.GradientDescentOptimizer,
         gradient_descent.GradientDescentOptimizer(learning_rate=0.1),
-        lambda lr: gradient_descent.GradientDescentOptimizer(learning_rate=lr)
+        lambda lr: gradient_descent.GradientDescentOptimizer(learning_rate=lr),
+        "Momentum"
     ]
     for optimizer in optimizers:
       with ops.Graph().as_default() as g: