Re-apply CL 194140820, which reverts #18251 (convolution change).
authorPatrick Nguyen <drpng@google.com>
Wed, 2 May 2018 00:48:36 +0000 (17:48 -0700)
committerTensorFlower Gardener <gardener@tensorflow.org>
Wed, 2 May 2018 00:51:07 +0000 (17:51 -0700)
PiperOrigin-RevId: 195027049

tensorflow/contrib/layers/python/layers/layers.py
tensorflow/contrib/layers/python/layers/layers_test.py

index 2f3e576..25c3b1e 100644 (file)
@@ -932,8 +932,7 @@ def convolution(inputs,
                 variables_collections=None,
                 outputs_collections=None,
                 trainable=True,
-                scope=None,
-                conv_dims=None):
+                scope=None):
   """Adds an N-D convolution followed by an optional batch_norm layer.
 
   It is required that 1 <= N <= 3.
@@ -994,10 +993,6 @@ def convolution(inputs,
     trainable: If `True` also add variables to the graph collection
       `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
     scope: Optional scope for `variable_scope`.
-    conv_dims: Optional convolution dimensionality, when set it would use the
-      corresponding convolution (e.g. 2 for Conv 2D, 3 for Conv 3D, ..). When
-      leaved to None it would select the convolution dimensionality based on
-      the input rank (i.e. Conv ND, with N = input_rank - 2).
 
   Returns:
     A tensor representing the output of the operation.
@@ -1020,9 +1015,6 @@ def convolution(inputs,
     inputs = ops.convert_to_tensor(inputs)
     input_rank = inputs.get_shape().ndims
 
-    if conv_dims is not None and conv_dims + 2 != input_rank:
-      raise ValueError('Convolution expects input with rank %d, got %d' %
-                       (conv_dims + 2, input_rank))
     if input_rank == 3:
       layer_class = convolutional_layers.Convolution1D
     elif input_rank == 4:
@@ -1069,134 +1061,10 @@ def convolution(inputs,
       outputs = activation_fn(outputs)
     return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
 
-@add_arg_scope
-def convolution1d(inputs,
-                  num_outputs,
-                  kernel_size,
-                  stride=1,
-                  padding='SAME',
-                  data_format=None,
-                  rate=1,
-                  activation_fn=nn.relu,
-                  normalizer_fn=None,
-                  normalizer_params=None,
-                  weights_initializer=initializers.xavier_initializer(),
-                  weights_regularizer=None,
-                  biases_initializer=init_ops.zeros_initializer(),
-                  biases_regularizer=None,
-                  reuse=None,
-                  variables_collections=None,
-                  outputs_collections=None,
-                  trainable=True,
-                  scope=None):
-  return convolution(inputs,
-                     num_outputs,
-                     kernel_size,
-                     stride,
-                     padding,
-                     data_format,
-                     rate,
-                     activation_fn,
-                     normalizer_fn,
-                     normalizer_params,
-                     weights_initializer,
-                     weights_regularizer,
-                     biases_initializer,
-                     biases_regularizer,
-                     reuse,
-                     variables_collections,
-                     outputs_collections,
-                     trainable,
-                     scope,
-                     conv_dims=1)
-
-convolution1d.__doc__ = convolution.__doc__
 
-@add_arg_scope
-def convolution2d(inputs,
-                  num_outputs,
-                  kernel_size,
-                  stride=1,
-                  padding='SAME',
-                  data_format=None,
-                  rate=1,
-                  activation_fn=nn.relu,
-                  normalizer_fn=None,
-                  normalizer_params=None,
-                  weights_initializer=initializers.xavier_initializer(),
-                  weights_regularizer=None,
-                  biases_initializer=init_ops.zeros_initializer(),
-                  biases_regularizer=None,
-                  reuse=None,
-                  variables_collections=None,
-                  outputs_collections=None,
-                  trainable=True,
-                  scope=None):
-  return convolution(inputs,
-                     num_outputs,
-                     kernel_size,
-                     stride,
-                     padding,
-                     data_format,
-                     rate,
-                     activation_fn,
-                     normalizer_fn,
-                     normalizer_params,
-                     weights_initializer,
-                     weights_regularizer,
-                     biases_initializer,
-                     biases_regularizer,
-                     reuse,
-                     variables_collections,
-                     outputs_collections,
-                     trainable,
-                     scope,
-                     conv_dims=2)
-
-convolution2d.__doc__ = convolution.__doc__
+convolution2d = convolution
+convolution3d = convolution
 
-@add_arg_scope
-def convolution3d(inputs,
-                  num_outputs,
-                  kernel_size,
-                  stride=1,
-                  padding='SAME',
-                  data_format=None,
-                  rate=1,
-                  activation_fn=nn.relu,
-                  normalizer_fn=None,
-                  normalizer_params=None,
-                  weights_initializer=initializers.xavier_initializer(),
-                  weights_regularizer=None,
-                  biases_initializer=init_ops.zeros_initializer(),
-                  biases_regularizer=None,
-                  reuse=None,
-                  variables_collections=None,
-                  outputs_collections=None,
-                  trainable=True,
-                  scope=None):
-  return convolution(inputs,
-                     num_outputs,
-                     kernel_size,
-                     stride,
-                     padding,
-                     data_format,
-                     rate,
-                     activation_fn,
-                     normalizer_fn,
-                     normalizer_params,
-                     weights_initializer,
-                     weights_regularizer,
-                     biases_initializer,
-                     biases_regularizer,
-                     reuse,
-                     variables_collections,
-                     outputs_collections,
-                     trainable,
-                     scope,
-                     conv_dims=3)
-
-convolution3d.__doc__ = convolution.__doc__
 
 @add_arg_scope
 def convolution2d_in_plane(
@@ -1543,7 +1411,7 @@ def dense_to_sparse(tensor, eos_token=0, outputs_collections=None, scope=None):
   Args:
      tensor: An `int` `Tensor` to be converted to a `Sparse`.
      eos_token: An integer.
-       It is part of the target label that signifies the end of a sentence.
+       It is part of the target label that signfies the end of a sentence.
      outputs_collections: Collection to add the outputs.
      scope: Optional scope for name_scope.
   """
@@ -1687,7 +1555,7 @@ def _inner_flatten(inputs, new_rank, output_collections=None, scope=None):
     output_collections: Collection to which the outputs will be added.
     scope: Optional scope for `name_scope`.
   Returns:
-    A `Tensor` or `SparseTensor` containing the same values as `inputs`, but
+    A `Tensor` or `SparseTensor` conataining the same values as `inputs`, but
     with innermost dimensions flattened to obtain rank `new_rank`.
 
   Raises:
index b01fd5d..997f910 100644 (file)
@@ -310,17 +310,6 @@ class BiasAddTest(test.TestCase):
 
 class ConvolutionTest(test.TestCase):
 
-  def testInvalidShape(self):
-    with self.test_session():
-      images_2d = random_ops.random_uniform((5, 7, 9, 3), seed=1)
-      with self.assertRaisesRegexp(
-          ValueError, 'Convolution expects input with rank 5, got 4'):
-        layers_lib.convolution3d(images_2d, 32, 3)
-      images_3d = random_ops.random_uniform((5, 6, 7, 9, 3), seed=1)
-      with self.assertRaisesRegexp(
-          ValueError, 'Convolution expects input with rank 4, got 5'):
-        layers_lib.convolution2d(images_3d, 32, 3)
-
   def testInvalidDataFormat(self):
     height, width = 7, 9
     with self.test_session():
@@ -3166,7 +3155,7 @@ class RepeatTests(test.TestCase):
     with self.test_session():
       images = np.random.uniform(size=(5, height, width, 3)).astype(np.float32)
       output = _layers.repeat(images, 3, layers_lib.conv2d, 32, [3, 3])
-      self.assertEqual(output.op.name, 'Repeat/convolution2d_3/Relu')
+      self.assertEqual(output.op.name, 'Repeat/convolution_3/Relu')
       self.assertListEqual(output.get_shape().as_list(), [5, 3, 3, 32])
 
   def testRepeatWithScope(self):
@@ -3760,7 +3749,7 @@ class StackTests(test.TestCase):
           layers_lib.convolution2d, [10, 20, 30],
           kernel_size=[3, 3],
           padding='SAME')
-      self.assertEqual(output.op.name, 'Stack/convolution2d_3/Relu')
+      self.assertEqual(output.op.name, 'Stack/convolution_3/Relu')
       self.assertListEqual(output.get_shape().as_list(), [5, 3, 3, 30])
 
   def testStackWithScope(self):