aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/layers
diff options
context:
space:
mode:
authorGravatar Yifei Feng <yifeif@google.com>2018-05-24 19:12:26 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-05-24 19:15:01 -0700
commitb59833c3fd91511b33255369016868e4ae6cda2e (patch)
treeecbd70cfd3abb5d934f6eb4b7280a35e8589f5cf /tensorflow/contrib/layers
parent2b99d9cbc7166efedaff9eee11744348da30fc8a (diff)
Merge changes from github.
Revert #18413. Too many internal test failures due to the name scope change caused by this change. Revert #18192. Cannot use re2::StringPiece internally. Need alternative for set call. Will pull and clean this up in a separate change. PiperOrigin-RevId: 197991247
Diffstat (limited to 'tensorflow/contrib/layers')
-rw-r--r--tensorflow/contrib/layers/python/layers/layers.py142
-rw-r--r--tensorflow/contrib/layers/python/layers/layers_test.py15
2 files changed, 150 insertions, 7 deletions
diff --git a/tensorflow/contrib/layers/python/layers/layers.py b/tensorflow/contrib/layers/python/layers/layers.py
index f708da6693..b7194ae333 100644
--- a/tensorflow/contrib/layers/python/layers/layers.py
+++ b/tensorflow/contrib/layers/python/layers/layers.py
@@ -932,7 +932,8 @@ def convolution(inputs,
variables_collections=None,
outputs_collections=None,
trainable=True,
- scope=None):
+ scope=None,
+ conv_dims=None):
"""Adds an N-D convolution followed by an optional batch_norm layer.
It is required that 1 <= N <= 3.
@@ -993,6 +994,10 @@ def convolution(inputs,
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_scope`.
+ conv_dims: Optional convolution dimensionality, when set it would use the
+ corresponding convolution (e.g. 2 for Conv 2D, 3 for Conv 3D, ..). When
+ leaved to None it would select the convolution dimensionality based on
+ the input rank (i.e. Conv ND, with N = input_rank - 2).
Returns:
A tensor representing the output of the operation.
@@ -1015,6 +1020,9 @@ def convolution(inputs,
inputs = ops.convert_to_tensor(inputs)
input_rank = inputs.get_shape().ndims
+ if conv_dims is not None and conv_dims + 2 != input_rank:
+ raise ValueError('Convolution expects input with rank %d, got %d' %
+ (conv_dims + 2, input_rank))
if input_rank == 3:
layer_class = convolutional_layers.Convolution1D
elif input_rank == 4:
@@ -1061,10 +1069,134 @@ def convolution(inputs,
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
+@add_arg_scope
+def convolution1d(inputs,
+ num_outputs,
+ kernel_size,
+ stride=1,
+ padding='SAME',
+ data_format=None,
+ rate=1,
+ activation_fn=nn.relu,
+ normalizer_fn=None,
+ normalizer_params=None,
+ weights_initializer=initializers.xavier_initializer(),
+ weights_regularizer=None,
+ biases_initializer=init_ops.zeros_initializer(),
+ biases_regularizer=None,
+ reuse=None,
+ variables_collections=None,
+ outputs_collections=None,
+ trainable=True,
+ scope=None):
+ return convolution(inputs,
+ num_outputs,
+ kernel_size,
+ stride,
+ padding,
+ data_format,
+ rate,
+ activation_fn,
+ normalizer_fn,
+ normalizer_params,
+ weights_initializer,
+ weights_regularizer,
+ biases_initializer,
+ biases_regularizer,
+ reuse,
+ variables_collections,
+ outputs_collections,
+ trainable,
+ scope,
+ conv_dims=1)
+
+convolution1d.__doc__ = convolution.__doc__
-convolution2d = convolution
-convolution3d = convolution
+@add_arg_scope
+def convolution2d(inputs,
+ num_outputs,
+ kernel_size,
+ stride=1,
+ padding='SAME',
+ data_format=None,
+ rate=1,
+ activation_fn=nn.relu,
+ normalizer_fn=None,
+ normalizer_params=None,
+ weights_initializer=initializers.xavier_initializer(),
+ weights_regularizer=None,
+ biases_initializer=init_ops.zeros_initializer(),
+ biases_regularizer=None,
+ reuse=None,
+ variables_collections=None,
+ outputs_collections=None,
+ trainable=True,
+ scope=None):
+ return convolution(inputs,
+ num_outputs,
+ kernel_size,
+ stride,
+ padding,
+ data_format,
+ rate,
+ activation_fn,
+ normalizer_fn,
+ normalizer_params,
+ weights_initializer,
+ weights_regularizer,
+ biases_initializer,
+ biases_regularizer,
+ reuse,
+ variables_collections,
+ outputs_collections,
+ trainable,
+ scope,
+ conv_dims=2)
+
+convolution2d.__doc__ = convolution.__doc__
+@add_arg_scope
+def convolution3d(inputs,
+ num_outputs,
+ kernel_size,
+ stride=1,
+ padding='SAME',
+ data_format=None,
+ rate=1,
+ activation_fn=nn.relu,
+ normalizer_fn=None,
+ normalizer_params=None,
+ weights_initializer=initializers.xavier_initializer(),
+ weights_regularizer=None,
+ biases_initializer=init_ops.zeros_initializer(),
+ biases_regularizer=None,
+ reuse=None,
+ variables_collections=None,
+ outputs_collections=None,
+ trainable=True,
+ scope=None):
+ return convolution(inputs,
+ num_outputs,
+ kernel_size,
+ stride,
+ padding,
+ data_format,
+ rate,
+ activation_fn,
+ normalizer_fn,
+ normalizer_params,
+ weights_initializer,
+ weights_regularizer,
+ biases_initializer,
+ biases_regularizer,
+ reuse,
+ variables_collections,
+ outputs_collections,
+ trainable,
+ scope,
+ conv_dims=3)
+
+convolution3d.__doc__ = convolution.__doc__
@add_arg_scope
def convolution2d_in_plane(
@@ -1411,7 +1543,7 @@ def dense_to_sparse(tensor, eos_token=0, outputs_collections=None, scope=None):
Args:
tensor: An `int` `Tensor` to be converted to a `Sparse`.
eos_token: An integer.
- It is part of the target label that signfies the end of a sentence.
+ It is part of the target label that signifies the end of a sentence.
outputs_collections: Collection to add the outputs.
scope: Optional scope for name_scope.
"""
@@ -1555,7 +1687,7 @@ def _inner_flatten(inputs, new_rank, output_collections=None, scope=None):
output_collections: Collection to which the outputs will be added.
scope: Optional scope for `name_scope`.
Returns:
- A `Tensor` or `SparseTensor` conataining the same values as `inputs`, but
+ A `Tensor` or `SparseTensor` containing the same values as `inputs`, but
with innermost dimensions flattened to obtain rank `new_rank`.
Raises:
diff --git a/tensorflow/contrib/layers/python/layers/layers_test.py b/tensorflow/contrib/layers/python/layers/layers_test.py
index 997f910a2a..b01fd5d5c9 100644
--- a/tensorflow/contrib/layers/python/layers/layers_test.py
+++ b/tensorflow/contrib/layers/python/layers/layers_test.py
@@ -310,6 +310,17 @@ class BiasAddTest(test.TestCase):
class ConvolutionTest(test.TestCase):
+ def testInvalidShape(self):
+ with self.test_session():
+ images_2d = random_ops.random_uniform((5, 7, 9, 3), seed=1)
+ with self.assertRaisesRegexp(
+ ValueError, 'Convolution expects input with rank 5, got 4'):
+ layers_lib.convolution3d(images_2d, 32, 3)
+ images_3d = random_ops.random_uniform((5, 6, 7, 9, 3), seed=1)
+ with self.assertRaisesRegexp(
+ ValueError, 'Convolution expects input with rank 4, got 5'):
+ layers_lib.convolution2d(images_3d, 32, 3)
+
def testInvalidDataFormat(self):
height, width = 7, 9
with self.test_session():
@@ -3155,7 +3166,7 @@ class RepeatTests(test.TestCase):
with self.test_session():
images = np.random.uniform(size=(5, height, width, 3)).astype(np.float32)
output = _layers.repeat(images, 3, layers_lib.conv2d, 32, [3, 3])
- self.assertEqual(output.op.name, 'Repeat/convolution_3/Relu')
+ self.assertEqual(output.op.name, 'Repeat/convolution2d_3/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 3, 32])
def testRepeatWithScope(self):
@@ -3749,7 +3760,7 @@ class StackTests(test.TestCase):
layers_lib.convolution2d, [10, 20, 30],
kernel_size=[3, 3],
padding='SAME')
- self.assertEqual(output.op.name, 'Stack/convolution_3/Relu')
+ self.assertEqual(output.op.name, 'Stack/convolution2d_3/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 3, 30])
def testStackWithScope(self):