variables_collections=None,
outputs_collections=None,
trainable=True,
- scope=None,
- conv_dims=None):
+ scope=None):
"""Adds an N-D convolution followed by an optional batch_norm layer.
It is required that 1 <= N <= 3.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_scope`.
- conv_dims: Optional convolution dimensionality, when set it would use the
- corresponding convolution (e.g. 2 for Conv 2D, 3 for Conv 3D, ..). When
- leaved to None it would select the convolution dimensionality based on
- the input rank (i.e. Conv ND, with N = input_rank - 2).
Returns:
A tensor representing the output of the operation.
inputs = ops.convert_to_tensor(inputs)
input_rank = inputs.get_shape().ndims
- if conv_dims is not None and conv_dims + 2 != input_rank:
- raise ValueError('Convolution expects input with rank %d, got %d' %
- (conv_dims + 2, input_rank))
if input_rank == 3:
layer_class = convolutional_layers.Convolution1D
elif input_rank == 4:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
-@add_arg_scope
-def convolution1d(inputs,
- num_outputs,
- kernel_size,
- stride=1,
- padding='SAME',
- data_format=None,
- rate=1,
- activation_fn=nn.relu,
- normalizer_fn=None,
- normalizer_params=None,
- weights_initializer=initializers.xavier_initializer(),
- weights_regularizer=None,
- biases_initializer=init_ops.zeros_initializer(),
- biases_regularizer=None,
- reuse=None,
- variables_collections=None,
- outputs_collections=None,
- trainable=True,
- scope=None):
- return convolution(inputs,
- num_outputs,
- kernel_size,
- stride,
- padding,
- data_format,
- rate,
- activation_fn,
- normalizer_fn,
- normalizer_params,
- weights_initializer,
- weights_regularizer,
- biases_initializer,
- biases_regularizer,
- reuse,
- variables_collections,
- outputs_collections,
- trainable,
- scope,
- conv_dims=1)
-
-convolution1d.__doc__ = convolution.__doc__
-@add_arg_scope
-def convolution2d(inputs,
- num_outputs,
- kernel_size,
- stride=1,
- padding='SAME',
- data_format=None,
- rate=1,
- activation_fn=nn.relu,
- normalizer_fn=None,
- normalizer_params=None,
- weights_initializer=initializers.xavier_initializer(),
- weights_regularizer=None,
- biases_initializer=init_ops.zeros_initializer(),
- biases_regularizer=None,
- reuse=None,
- variables_collections=None,
- outputs_collections=None,
- trainable=True,
- scope=None):
- return convolution(inputs,
- num_outputs,
- kernel_size,
- stride,
- padding,
- data_format,
- rate,
- activation_fn,
- normalizer_fn,
- normalizer_params,
- weights_initializer,
- weights_regularizer,
- biases_initializer,
- biases_regularizer,
- reuse,
- variables_collections,
- outputs_collections,
- trainable,
- scope,
- conv_dims=2)
-
-convolution2d.__doc__ = convolution.__doc__
+convolution2d = convolution
+convolution3d = convolution
-@add_arg_scope
-def convolution3d(inputs,
- num_outputs,
- kernel_size,
- stride=1,
- padding='SAME',
- data_format=None,
- rate=1,
- activation_fn=nn.relu,
- normalizer_fn=None,
- normalizer_params=None,
- weights_initializer=initializers.xavier_initializer(),
- weights_regularizer=None,
- biases_initializer=init_ops.zeros_initializer(),
- biases_regularizer=None,
- reuse=None,
- variables_collections=None,
- outputs_collections=None,
- trainable=True,
- scope=None):
- return convolution(inputs,
- num_outputs,
- kernel_size,
- stride,
- padding,
- data_format,
- rate,
- activation_fn,
- normalizer_fn,
- normalizer_params,
- weights_initializer,
- weights_regularizer,
- biases_initializer,
- biases_regularizer,
- reuse,
- variables_collections,
- outputs_collections,
- trainable,
- scope,
- conv_dims=3)
-
-convolution3d.__doc__ = convolution.__doc__
@add_arg_scope
def convolution2d_in_plane(
Args:
tensor: An `int` `Tensor` to be converted to a `Sparse`.
eos_token: An integer.
- It is part of the target label that signifies the end of a sentence.
+ It is part of the target label that signfies the end of a sentence.
outputs_collections: Collection to add the outputs.
scope: Optional scope for name_scope.
"""
output_collections: Collection to which the outputs will be added.
scope: Optional scope for `name_scope`.
Returns:
- A `Tensor` or `SparseTensor` containing the same values as `inputs`, but
+ A `Tensor` or `SparseTensor` conataining the same values as `inputs`, but
with innermost dimensions flattened to obtain rank `new_rank`.
Raises:
class ConvolutionTest(test.TestCase):
- def testInvalidShape(self):
- with self.test_session():
- images_2d = random_ops.random_uniform((5, 7, 9, 3), seed=1)
- with self.assertRaisesRegexp(
- ValueError, 'Convolution expects input with rank 5, got 4'):
- layers_lib.convolution3d(images_2d, 32, 3)
- images_3d = random_ops.random_uniform((5, 6, 7, 9, 3), seed=1)
- with self.assertRaisesRegexp(
- ValueError, 'Convolution expects input with rank 4, got 5'):
- layers_lib.convolution2d(images_3d, 32, 3)
-
def testInvalidDataFormat(self):
height, width = 7, 9
with self.test_session():
with self.test_session():
images = np.random.uniform(size=(5, height, width, 3)).astype(np.float32)
output = _layers.repeat(images, 3, layers_lib.conv2d, 32, [3, 3])
- self.assertEqual(output.op.name, 'Repeat/convolution2d_3/Relu')
+ self.assertEqual(output.op.name, 'Repeat/convolution_3/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 3, 32])
def testRepeatWithScope(self):
layers_lib.convolution2d, [10, 20, 30],
kernel_size=[3, 3],
padding='SAME')
- self.assertEqual(output.op.name, 'Stack/convolution2d_3/Relu')
+ self.assertEqual(output.op.name, 'Stack/convolution_3/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 3, 30])
def testStackWithScope(self):