remove unused parameters in optimizer tests (#18084)
authorJongsoo Park <jongsoo@fb.com>
Sat, 16 Mar 2019 01:02:53 +0000 (18:02 -0700)
committerFacebook Github Bot <facebook-github-bot@users.noreply.github.com>
Sat, 16 Mar 2019 01:06:15 +0000 (18:06 -0700)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/18084

data_strategy parameter was not used in some of unit tests for optimizers

Reviewed By: hyuen

Differential Revision: D14487830

fbshipit-source-id: d757cd06aa2965f4c0570a4a18ba090b98820ef4

caffe2/python/operator_test/adadelta_test.py
caffe2/python/operator_test/adagrad_test.py
caffe2/python/operator_test/wngrad_test.py

index ab8065d..94709dc 100644 (file)
@@ -144,10 +144,8 @@ class TestAdadelta(serial.SerializedTestCase):
                              allow_nan=False, allow_infinity=False),
            decay=st.floats(min_value=0.01, max_value=0.99,
                              allow_nan=False, allow_infinity=False),
-           data_strategy=st.data(),
            **hu.gcs)
-    def test_sparse_adadelta_empty(self, inputs, lr, epsilon, decay,
-                                  data_strategy, gc, dc):
+    def test_sparse_adadelta_empty(self, inputs, lr, epsilon, decay, gc, dc):
         param, moment, moment_delta = inputs
         moment = np.abs(moment)
         lr = np.array([lr], dtype=np.float32)
index 0ae49d4..5287b60 100644 (file)
@@ -134,10 +134,9 @@ class TestAdagrad(serial.SerializedTestCase):
         epsilon=st.floats(
             min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False
         ),
-        data_strategy=st.data(),
         **hu.gcs
     )
-    def test_sparse_adagrad_empty(self, inputs, lr, epsilon, data_strategy, gc, dc):
+    def test_sparse_adagrad_empty(self, inputs, lr, epsilon, gc, dc):
         param, momentum = inputs
         grad = np.empty(shape=(0,) + param.shape[1:], dtype=np.float32)
 
@@ -176,10 +175,9 @@ class TestAdagrad(serial.SerializedTestCase):
         epsilon=st.floats(
             min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False
         ),
-        data_strategy=st.data(),
         **hu.gcs
     )
-    def test_row_wise_sparse_adagrad(self, inputs, lr, epsilon, data_strategy, gc, dc):
+    def test_row_wise_sparse_adagrad(self, inputs, lr, epsilon, gc, dc):
         adagrad_sparse_test_helper(
             self,
             inputs,
@@ -200,11 +198,10 @@ class TestAdagrad(serial.SerializedTestCase):
         epsilon=st.floats(
             min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False
         ),
-        data_strategy=st.data(),
         **hu.gcs
     )
     def test_row_wise_sparse_adagrad_empty(
-        self, inputs, lr, epsilon, data_strategy, gc, dc
+        self, inputs, lr, epsilon, gc, dc
     ):
         param, momentum = inputs
         grad = np.empty(shape=(0,) + param.shape[1:], dtype=np.float32)
index 7b4e81c..1556299 100644 (file)
@@ -182,10 +182,8 @@ class TestWngrad(serial.SerializedTestCase):
                         allow_nan=False, allow_infinity=False),
            epsilon=st.floats(min_value=0.01, max_value=0.99,
                              allow_nan=False, allow_infinity=False),
-           data_strategy=st.data(),
            **hu.gcs_cpu_only)
-    def test_sparse_wngrad_empty(self, inputs, seq_b, lr, epsilon,
-                                  data_strategy, gc, dc):
+    def test_sparse_wngrad_empty(self, inputs, seq_b, lr, epsilon, gc, dc):
         param = inputs[0]
         seq_b = np.array([seq_b, ], dtype=np.float32)
         lr = np.array([lr], dtype=np.float32)