From 1e2566f029ec24ab1208a9d65c6295b0ce499940 Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Mon, 13 Aug 2018 11:45:12 -0700 Subject: Replace applications tests with lighter integration test. Also remove MobileNetV2 due to a compatibility issue (will be re-enabled in the next version). PiperOrigin-RevId: 208517089 --- tensorflow/python/keras/BUILD | 116 ++---------------------------------------- 1 file changed, 5 insertions(+), 111 deletions(-) (limited to 'tensorflow/python/keras/BUILD') diff --git a/tensorflow/python/keras/BUILD b/tensorflow/python/keras/BUILD index e04d0e93e2..7eb7884d1d 100755 --- a/tensorflow/python/keras/BUILD +++ b/tensorflow/python/keras/BUILD @@ -296,109 +296,15 @@ py_test( ) py_test( - name = "densenet_test", - size = "large", - srcs = ["applications/densenet_test.py"], - srcs_version = "PY2AND3", - tags = ["nomsan"], # times out, http://b/78650237 - deps = [ - ":keras", - "//tensorflow/python:client_testlib", - "//third_party/py/numpy", - ], -) - -py_test( - name = "inception_resnet_v2_test", - size = "medium", - srcs = ["applications/inception_resnet_v2_test.py"], - srcs_version = "PY2AND3", - deps = [ - ":keras", - "//tensorflow/python:client_testlib", - "//third_party/py/numpy", - ], -) - -py_test( - name = "inception_v3_test", - size = "medium", - srcs = ["applications/inception_v3_test.py"], - srcs_version = "PY2AND3", - deps = [ - ":keras", - "//tensorflow/python:client_testlib", - "//third_party/py/numpy", - ], -) - -py_test( - name = "mobilenet_test", - size = "medium", - srcs = ["applications/mobilenet_test.py"], - srcs_version = "PY2AND3", - deps = [ - ":keras", - "//tensorflow/python:client_testlib", - "//third_party/py/numpy", - ], -) - -py_test( - name = "nasnet_test", - size = "large", - srcs = ["applications/nasnet_test.py"], - srcs_version = "PY2AND3", - tags = ["nomsan"], # times out, http://b/78573625 - deps = [ - ":keras", - "//tensorflow/python:client_testlib", - "//third_party/py/numpy", - ], -) - -py_test( - name = "resnet50_test", - size = "medium", - srcs = ["applications/resnet50_test.py"], - srcs_version = "PY2AND3", - deps = [ - ":keras", - "//tensorflow/python:client_testlib", - ], -) - -py_test( - name = "vgg16_test", - size = "small", - srcs = ["applications/vgg16_test.py"], - srcs_version = "PY2AND3", - deps = [ - ":keras", - "//tensorflow/python:client_testlib", - ], -) - -py_test( - name = "vgg19_test", - size = "small", - srcs = ["applications/vgg19_test.py"], - srcs_version = "PY2AND3", - deps = [ - ":keras", - "//tensorflow/python:client_testlib", - ], -) - -py_test( - name = "xception_test", - size = "medium", - srcs = ["applications/xception_test.py"], + name = "applications_test", + size = "enormous", + srcs = ["applications/applications_test.py"], + shard_count = 2, srcs_version = "PY2AND3", deps = [ ":keras", "//tensorflow/python:client_testlib", - "//third_party/py/numpy", + "@absl_py//absl/testing:parameterized", ], ) @@ -718,18 +624,6 @@ cuda_py_test( ], ) -py_test( - name = "imagenet_utils_test", - size = "small", - srcs = ["applications/imagenet_utils_test.py"], - srcs_version = "PY2AND3", - deps = [ - ":keras", - "//tensorflow/python:client_testlib", - "//third_party/py/numpy", - ], -) - py_test( name = "image_test", size = "medium", -- cgit v1.2.3 From a05c7b0e0953bf76ccf71ffbff2076150b1fe709 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Tue, 14 Aug 2018 16:37:01 -0700 Subject: Add an `implementation` argument to `tf.keras.layers.LocallyConnected2D` and `tf.keras.layers.LocallyConnected1D`. The new mode (`implementation=2`) performs forward pass as a single dense matrix multiplication, allowing dramatic speedups in certain scenarios (but worse performance in others - see docstring). The option also allows to use `padding=same`. PiperOrigin-RevId: 208737074 --- tensorflow/python/keras/BUILD | 15 +- tensorflow/python/keras/layers/local.py | 340 +++++++++++++-- tensorflow/python/keras/layers/local_test.py | 461 ++++++++++++++++----- tensorflow/python/keras/utils/conv_utils.py | 166 ++++++++ tensorflow/python/keras/utils/conv_utils_test.py | 232 +++++++++++ ...orflow.keras.layers.-locally-connected1-d.pbtxt | 2 +- ...orflow.keras.layers.-locally-connected2-d.pbtxt | 2 +- 7 files changed, 1094 insertions(+), 124 deletions(-) create mode 100644 tensorflow/python/keras/utils/conv_utils_test.py (limited to 'tensorflow/python/keras/BUILD') diff --git a/tensorflow/python/keras/BUILD b/tensorflow/python/keras/BUILD index 7eb7884d1d..fa1ec51aa7 100755 --- a/tensorflow/python/keras/BUILD +++ b/tensorflow/python/keras/BUILD @@ -399,7 +399,7 @@ py_test( py_test( name = "local_test", - size = "medium", + size = "large", srcs = ["layers/local_test.py"], srcs_version = "PY2AND3", deps = [ @@ -624,6 +624,19 @@ cuda_py_test( ], ) +py_test( + name = "conv_utils_test", + size = "small", + srcs = ["utils/conv_utils_test.py"], + srcs_version = "PY2AND3", + deps = [ + ":keras", + "//tensorflow/python:client_testlib", + "//third_party/py/numpy", + "@absl_py//absl/testing:parameterized", + ], +) + py_test( name = "image_test", size = "medium", diff --git a/tensorflow/python/keras/layers/local.py b/tensorflow/python/keras/layers/local.py index 0ebafe07cc..33d09a1660 100644 --- a/tensorflow/python/keras/layers/local.py +++ b/tensorflow/python/keras/layers/local.py @@ -85,6 +85,28 @@ class LocallyConnected1D(Layer): the output of the layer (its "activation").. kernel_constraint: Constraint function applied to the kernel matrix. bias_constraint: Constraint function applied to the bias vector. + implementation: implementation mode, either `1` or `2`. + `1` loops over input spatial locations to perform the forward pass. + It is memory-efficient but performs a lot of (small) ops. + + `2` stores layer weights in a dense but sparsely-populated 2D matrix + and implements the forward pass as a single matrix-multiply. It uses + a lot of RAM but performs few (large) ops. + + Depending on the inputs, layer parameters, hardware, and + `tf.executing_eagerly()` one implementation can be dramatically faster + (e.g. 50X) than another. + + It is recommended to benchmark both in the setting of interest to pick + the most efficient one (in terms of speed and memory usage). + + Following scenarios could benefit from setting `implementation=2`: + - eager execution; + - inference; + - running on CPU; + - large amount of RAM available; + - small models (few filters, small kernel); + - using `padding=same` (only possible with `implementation=2`). Input shape: 3D tensor with shape: `(batch_size, steps, input_dim)` @@ -109,15 +131,17 @@ class LocallyConnected1D(Layer): activity_regularizer=None, kernel_constraint=None, bias_constraint=None, + implementation=1, **kwargs): super(LocallyConnected1D, self).__init__(**kwargs) self.filters = filters self.kernel_size = conv_utils.normalize_tuple(kernel_size, 1, 'kernel_size') self.strides = conv_utils.normalize_tuple(strides, 1, 'strides') self.padding = conv_utils.normalize_padding(padding) - if self.padding != 'valid': + if self.padding != 'valid' and implementation == 1: raise ValueError('Invalid border mode for LocallyConnected1D ' - '(only "valid" is supported): ' + padding) + '(only "valid" is supported if implementation is 1): ' + + padding) self.data_format = conv_utils.normalize_data_format(data_format) self.activation = activations.get(activation) self.use_bias = use_bias @@ -128,6 +152,7 @@ class LocallyConnected1D(Layer): self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) + self.implementation = implementation self.input_spec = InputSpec(ndim=3) @tf_utils.shape_type_conversion @@ -142,14 +167,45 @@ class LocallyConnected1D(Layer): 'Found shape:', input_shape) self.output_length = conv_utils.conv_output_length( input_length, self.kernel_size[0], self.padding, self.strides[0]) - self.kernel_shape = (self.output_length, self.kernel_size[0] * input_dim, - self.filters) - self.kernel = self.add_weight( - shape=self.kernel_shape, - initializer=self.kernel_initializer, - name='kernel', - regularizer=self.kernel_regularizer, - constraint=self.kernel_constraint) + + if self.implementation == 1: + self.kernel_shape = (self.output_length, self.kernel_size[0] * input_dim, + self.filters) + + self.kernel = self.add_weight( + shape=self.kernel_shape, + initializer=self.kernel_initializer, + name='kernel', + regularizer=self.kernel_regularizer, + constraint=self.kernel_constraint) + + elif self.implementation == 2: + if self.data_format == 'channels_first': + self.kernel_shape = (input_dim, input_length, + self.filters, self.output_length) + else: + self.kernel_shape = (input_length, input_dim, + self.output_length, self.filters) + + self.kernel = self.add_weight(shape=self.kernel_shape, + initializer=self.kernel_initializer, + name='kernel', + regularizer=self.kernel_regularizer, + constraint=self.kernel_constraint) + + self.kernel_mask = get_locallyconnected_mask( + input_shape=(input_length,), + kernel_shape=self.kernel_size, + strides=self.strides, + padding=self.padding, + data_format=self.data_format, + dtype=self.kernel.dtype + ) + + else: + raise ValueError('Unrecognized implementation mode: %d.' + % self.implementation) + if self.use_bias: self.bias = self.add_weight( shape=(self.output_length, self.filters), @@ -182,8 +238,17 @@ class LocallyConnected1D(Layer): return (input_shape[0], length, self.filters) def call(self, inputs): - output = K.local_conv(inputs, self.kernel, self.kernel_size, self.strides, - (self.output_length,), self.data_format) + if self.implementation == 1: + output = K.local_conv(inputs, self.kernel, self.kernel_size, self.strides, + (self.output_length,), self.data_format) + + elif self.implementation == 2: + output = local_conv_matmul(inputs, self.kernel, self.kernel_mask, + self.compute_output_shape(inputs.shape)) + + else: + raise ValueError('Unrecognized implementation mode: %d.' + % self.implementation) if self.use_bias: output = K.bias_add(output, self.bias, data_format=self.data_format) @@ -220,7 +285,9 @@ class LocallyConnected1D(Layer): 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'bias_constraint': - constraints.serialize(self.bias_constraint) + constraints.serialize(self.bias_constraint), + 'implementation': + self.implementation } base_config = super(LocallyConnected1D, self).get_config() return dict(list(base_config.items()) + list(config.items())) @@ -284,9 +351,31 @@ class LocallyConnected2D(Layer): the `kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. activity_regularizer: Regularizer function applied to - the output of the layer (its "activation").. + the output of the layer (its "activation"). kernel_constraint: Constraint function applied to the kernel matrix. bias_constraint: Constraint function applied to the bias vector. + implementation: implementation mode, either `1` or `2`. + `1` loops over input spatial locations to perform the forward pass. + It is memory-efficient but performs a lot of (small) ops. + + `2` stores layer weights in a dense but sparsely-populated 2D matrix + and implements the forward pass as a single matrix-multiply. It uses + a lot of RAM but performs few (large) ops. + + Depending on the inputs, layer parameters, hardware, and + `tf.executing_eagerly()` one implementation can be dramatically faster + (e.g. 50X) than another. + + It is recommended to benchmark both in the setting of interest to pick + the most efficient one (in terms of speed and memory usage). + + Following scenarios could benefit from setting `implementation=2`: + - eager execution; + - inference; + - running on CPU; + - large amount of RAM available; + - small models (few filters, small kernel); + - using `padding=same` (only possible with `implementation=2`). Input shape: 4D tensor with shape: @@ -317,15 +406,17 @@ class LocallyConnected2D(Layer): activity_regularizer=None, kernel_constraint=None, bias_constraint=None, + implementation=1, **kwargs): super(LocallyConnected2D, self).__init__(**kwargs) self.filters = filters self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size') self.strides = conv_utils.normalize_tuple(strides, 2, 'strides') self.padding = conv_utils.normalize_padding(padding) - if self.padding != 'valid': + if self.padding != 'valid' and implementation == 1: raise ValueError('Invalid border mode for LocallyConnected2D ' - '(only "valid" is supported): ' + padding) + '(only "valid" is supported if implementation is 1): ' + + padding) self.data_format = conv_utils.normalize_data_format(data_format) self.activation = activations.get(activation) self.use_bias = use_bias @@ -336,6 +427,7 @@ class LocallyConnected2D(Layer): self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) + self.implementation = implementation self.input_spec = InputSpec(ndim=4) @tf_utils.shape_type_conversion @@ -357,15 +449,47 @@ class LocallyConnected2D(Layer): self.padding, self.strides[1]) self.output_row = output_row self.output_col = output_col - self.kernel_shape = ( - output_row * output_col, - self.kernel_size[0] * self.kernel_size[1] * input_filter, self.filters) - self.kernel = self.add_weight( - shape=self.kernel_shape, - initializer=self.kernel_initializer, - name='kernel', - regularizer=self.kernel_regularizer, - constraint=self.kernel_constraint) + + if self.implementation == 1: + self.kernel_shape = ( + output_row * output_col, + self.kernel_size[0] * self.kernel_size[1] * input_filter, + self.filters) + + self.kernel = self.add_weight( + shape=self.kernel_shape, + initializer=self.kernel_initializer, + name='kernel', + regularizer=self.kernel_regularizer, + constraint=self.kernel_constraint) + + elif self.implementation == 2: + if self.data_format == 'channels_first': + self.kernel_shape = (input_filter, input_row, input_col, + self.filters, self.output_row, self.output_col) + else: + self.kernel_shape = (input_row, input_col, input_filter, + self.output_row, self.output_col, self.filters) + + self.kernel = self.add_weight(shape=self.kernel_shape, + initializer=self.kernel_initializer, + name='kernel', + regularizer=self.kernel_regularizer, + constraint=self.kernel_constraint) + + self.kernel_mask = get_locallyconnected_mask( + input_shape=(input_row, input_col), + kernel_shape=self.kernel_size, + strides=self.strides, + padding=self.padding, + data_format=self.data_format, + dtype=self.kernel.dtype + ) + + else: + raise ValueError('Unrecognized implementation mode: %d.' + % self.implementation) + if self.use_bias: self.bias = self.add_weight( shape=(output_row, output_col, self.filters), @@ -401,8 +525,18 @@ class LocallyConnected2D(Layer): return (input_shape[0], rows, cols, self.filters) def call(self, inputs): - output = K.local_conv(inputs, self.kernel, self.kernel_size, self.strides, - (self.output_row, self.output_col), self.data_format) + if self.implementation == 1: + output = K.local_conv(inputs, self.kernel, self.kernel_size, self.strides, + (self.output_row, self.output_col), + self.data_format) + + elif self.implementation == 2: + output = local_conv_matmul(inputs, self.kernel, self.kernel_mask, + self.compute_output_shape(inputs.shape)) + + else: + raise ValueError('Unrecognized implementation mode: %d.' + % self.implementation) if self.use_bias: output = K.bias_add(output, self.bias, data_format=self.data_format) @@ -439,7 +573,157 @@ class LocallyConnected2D(Layer): 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'bias_constraint': - constraints.serialize(self.bias_constraint) + constraints.serialize(self.bias_constraint), + 'implementation': + self.implementation } base_config = super(LocallyConnected2D, self).get_config() return dict(list(base_config.items()) + list(config.items())) + + +def get_locallyconnected_mask(input_shape, + kernel_shape, + strides, + padding, + data_format, + dtype): + """Return a mask representing connectivity of a locally-connected operation. + + This method returns a masking tensor of 0s and 1s (of type `dtype`) that, + when element-wise multiplied with a fully-connected weight tensor, masks out + the weights between disconnected input-output pairs and thus implements local + connectivity through a sparse fully-connected weight tensor. + + Assume an unshared convolution with given parameters is applied to an input + having N spatial dimensions with `input_shape = (d_in1, ..., d_inN)` + to produce an output with spatial shape `(d_out1, ..., d_outN)` (determined + by layer parameters such as `strides`). + + This method returns a mask which can be broadcast-multiplied (element-wise) + with a 2*(N+1)-D weight matrix (equivalent to a fully-connected layer between + (N+1)-D activations (N spatial + 1 channel dimensions for input and output) + to make it perform an unshared convolution with given `kernel_shape`, + `strides`, `padding` and `data_format`. + + Arguments: + input_shape: tuple of size N: `(d_in1, ..., d_inN)` + spatial shape of the input. + kernel_shape: tuple of size N, spatial shape of the convolutional kernel + / receptive field. + strides: tuple of size N, strides along each spatial dimension. + padding: type of padding, string `"same"` or `"valid"`. + data_format: a string, `"channels_first"` or `"channels_last"`. + dtype: type of the layer operation, e.g. `tf.float64`. + + Returns: + a `dtype`-tensor of shape + `(1, d_in1, ..., d_inN, 1, d_out1, ..., d_outN)` + if `data_format == `"channels_first"`, or + `(d_in1, ..., d_inN, 1, d_out1, ..., d_outN, 1)` + if `data_format == "channels_last"`. + + Raises: + ValueError: if `data_format` is neither `"channels_first"` nor + `"channels_last"`. + """ + mask = conv_utils.conv_kernel_mask( + input_shape=input_shape, + kernel_shape=kernel_shape, + strides=strides, + padding=padding + ) + + ndims = int(mask.ndim / 2) + mask = K.variable(mask, dtype) + + if data_format == 'channels_first': + mask = K.expand_dims(mask, 0) + mask = K.expand_dims(mask, - ndims - 1) + + elif data_format == 'channels_last': + mask = K.expand_dims(mask, ndims) + mask = K.expand_dims(mask, -1) + + else: + raise ValueError('Unrecognized data_format: ' + str(data_format)) + + return mask + + +def local_conv_matmul(inputs, kernel, kernel_mask, output_shape): + """Apply N-D convolution with un-shared weights using a single matmul call. + + This method outputs `inputs . (kernel * kernel_mask)` + (with `.` standing for matrix-multiply and `*` for element-wise multiply) + and requires a precomputed `kernel_mask` to zero-out weights in `kernel` and + hence perform the same operation as a convolution with un-shared + (the remaining entries in `kernel`) weights. It also does the necessary + reshapes to make `inputs` and `kernel` 2-D and `output` (N+2)-D. + + Arguments: + inputs: (N+2)-D tensor with shape + `(batch_size, channels_in, d_in1, ..., d_inN)` + or + `(batch_size, d_in1, ..., d_inN, channels_in)`. + kernel: the unshared weights for N-D convolution, + an (N+2)-D tensor of shape: + `(d_in1, ..., d_inN, channels_in, d_out2, ..., d_outN, channels_out)` + or + `(channels_in, d_in1, ..., d_inN, channels_out, d_out2, ..., d_outN)`, + with the ordering of channels and spatial dimensions matching + that of the input. + Each entry is the weight between a particular input and + output location, similarly to a fully-connected weight matrix. + kernel_mask: a float 0/1 mask tensor of shape: + `(d_in1, ..., d_inN, 1, d_out2, ..., d_outN, 1)` + or + `(1, d_in1, ..., d_inN, 1, d_out2, ..., d_outN)`, + with the ordering of singleton and spatial dimensions + matching that of the input. + Mask represents the connectivity pattern of the layer and is + precomputed elsewhere based on layer parameters: stride, + padding, and the receptive field shape. + output_shape: a tuple of (N+2) elements representing the output shape: + `(batch_size, channels_out, d_out1, ..., d_outN)` + or + `(batch_size, d_out1, ..., d_outN, channels_out)`, + with the ordering of channels and spatial dimensions matching that of + the input. + + Returns: + Output (N+2)-D tensor with shape `output_shape`. + """ + inputs_flat = K.reshape(inputs, (K.shape(inputs)[0], -1)) + + kernel = kernel_mask * kernel + kernel = make_2d(kernel, split_dim=K.ndim(kernel) // 2) + + output_flat = K.math_ops.sparse_matmul(inputs_flat, kernel, b_is_sparse=True) + output = K.reshape(output_flat, + [K.shape(output_flat)[0],] + output_shape.as_list()[1:]) + return output + + +def make_2d(tensor, split_dim): + """Reshapes an N-dimensional tensor into a 2D tensor. + + Dimensions before (excluding) and after (including) `split_dim` are grouped + together. + + Arguments: + tensor: a tensor of shape `(d0, ..., d(N-1))`. + split_dim: an integer from 1 to N-1, index of the dimension to group + dimensions before (excluding) and after (including). + + Returns: + Tensor of shape + `(d0 * ... * d(split_dim-1), d(split_dim) * ... * d(N-1))`. + """ + shape = K.array_ops.shape(tensor) + in_dims = shape[:split_dim] + out_dims = shape[split_dim:] + + in_size = K.math_ops.reduce_prod(in_dims) + out_size = K.math_ops.reduce_prod(out_dims) + + return K.array_ops.reshape(tensor, (in_size, out_size)) diff --git a/tensorflow/python/keras/layers/local_test.py b/tensorflow/python/keras/layers/local_test.py index 9639e0251f..4781bcae07 100644 --- a/tensorflow/python/keras/layers/local_test.py +++ b/tensorflow/python/keras/layers/local_test.py @@ -24,6 +24,7 @@ from tensorflow.python import keras from tensorflow.python.framework import test_util as tf_test_util from tensorflow.python.keras import testing_utils from tensorflow.python.platform import test +from tensorflow.python.training.rmsprop import RMSPropOptimizer class LocallyConnectedLayersTest(test.TestCase): @@ -36,21 +37,30 @@ class LocallyConnectedLayersTest(test.TestCase): filter_length = 3 filters = 4 - for padding in ['valid']: + for padding in ['valid', 'same']: for strides in [1]: if padding == 'same' and strides != 1: continue for data_format in ['channels_first', 'channels_last']: - testing_utils.layer_test( - keras.layers.LocallyConnected1D, - kwargs={ - 'filters': filters, - 'kernel_size': filter_length, - 'padding': padding, - 'strides': strides, - 'data_format': data_format - }, - input_shape=(num_samples, num_steps, input_dim)) + for implementation in [1, 2]: + kwargs = { + 'filters': filters, + 'kernel_size': filter_length, + 'padding': padding, + 'strides': strides, + 'data_format': data_format, + 'implementation': implementation + } + + if padding == 'same' and implementation == 1: + self.assertRaises(ValueError, + keras.layers.LocallyConnected1D, + **kwargs) + else: + testing_utils.layer_test( + keras.layers.LocallyConnected1D, + kwargs=kwargs, + input_shape=(num_samples, num_steps, input_dim)) def test_locallyconnected_1d_regularization(self): num_samples = 2 @@ -59,38 +69,47 @@ class LocallyConnectedLayersTest(test.TestCase): filter_length = 3 filters = 4 for data_format in ['channels_first', 'channels_last']: - kwargs = { - 'filters': filters, - 'kernel_size': filter_length, - 'kernel_regularizer': 'l2', - 'bias_regularizer': 'l2', - 'activity_regularizer': 'l2', - 'data_format': data_format - } - - with self.test_session(): - layer = keras.layers.LocallyConnected1D(**kwargs) - layer.build((num_samples, num_steps, input_dim)) - self.assertEqual(len(layer.losses), 2) - layer( - keras.backend.variable(np.ones((num_samples, - num_steps, - input_dim)))) - self.assertEqual(len(layer.losses), 3) - - k_constraint = keras.constraints.max_norm(0.01) - b_constraint = keras.constraints.max_norm(0.01) - kwargs = { - 'filters': filters, - 'kernel_size': filter_length, - 'kernel_constraint': k_constraint, - 'bias_constraint': b_constraint, - } - with self.test_session(): - layer = keras.layers.LocallyConnected1D(**kwargs) - layer.build((num_samples, num_steps, input_dim)) - self.assertEqual(layer.kernel.constraint, k_constraint) - self.assertEqual(layer.bias.constraint, b_constraint) + for padding in ['valid', 'same']: + for implementation in [1, 2]: + kwargs = { + 'filters': filters, + 'kernel_size': filter_length, + 'kernel_regularizer': 'l2', + 'bias_regularizer': 'l2', + 'activity_regularizer': 'l2', + 'data_format': data_format, + 'implementation': implementation, + 'padding': padding + } + + if padding == 'same' and implementation == 1: + self.assertRaises(ValueError, + keras.layers.LocallyConnected1D, + **kwargs) + else: + with self.test_session(): + layer = keras.layers.LocallyConnected1D(**kwargs) + layer.build((num_samples, num_steps, input_dim)) + self.assertEqual(len(layer.losses), 2) + layer( + keras.backend.variable(np.ones((num_samples, + num_steps, + input_dim)))) + self.assertEqual(len(layer.losses), 3) + + k_constraint = keras.constraints.max_norm(0.01) + b_constraint = keras.constraints.max_norm(0.01) + kwargs = { + 'filters': filters, + 'kernel_size': filter_length, + 'kernel_constraint': k_constraint, + 'bias_constraint': b_constraint, + } + with self.test_session(): + layer = keras.layers.LocallyConnected1D(**kwargs) + layer.build((num_samples, num_steps, input_dim)) + self.assertEqual(layer.kernel.constraint, k_constraint) + self.assertEqual(layer.bias.constraint, b_constraint) @tf_test_util.run_in_graph_and_eager_modes def test_locallyconnected_2d(self): @@ -100,23 +119,32 @@ class LocallyConnectedLayersTest(test.TestCase): num_row = 6 num_col = 10 - for padding in ['valid']: + for padding in ['valid', 'same']: for strides in [(1, 1), (2, 2)]: - if padding == 'same' and strides != (1, 1): - continue + for implementation in [1, 2]: + if padding == 'same' and strides != (1, 1): + continue - testing_utils.layer_test( - keras.layers.LocallyConnected2D, - kwargs={ - 'filters': filters, - 'kernel_size': 3, - 'padding': padding, - 'kernel_regularizer': 'l2', - 'bias_regularizer': 'l2', - 'strides': strides, - 'data_format': 'channels_last' - }, - input_shape=(num_samples, num_row, num_col, stack_size)) + kwargs = { + 'filters': filters, + 'kernel_size': 3, + 'padding': padding, + 'kernel_regularizer': 'l2', + 'bias_regularizer': 'l2', + 'strides': strides, + 'data_format': 'channels_last', + 'implementation': implementation + } + + if padding == 'same' and implementation == 1: + self.assertRaises(ValueError, + keras.layers.LocallyConnected2D, + **kwargs) + else: + testing_utils.layer_test( + keras.layers.LocallyConnected2D, + kwargs=kwargs, + input_shape=(num_samples, num_row, num_col, stack_size)) @tf_test_util.run_in_graph_and_eager_modes def test_locallyconnected_2d_channels_first(self): @@ -126,14 +154,25 @@ class LocallyConnectedLayersTest(test.TestCase): num_row = 6 num_col = 10 - testing_utils.layer_test( - keras.layers.LocallyConnected2D, - kwargs={ + for implementation in [1, 2]: + for padding in ['valid', 'same']: + kwargs = { 'filters': filters, 'kernel_size': 3, - 'data_format': 'channels_first' - }, - input_shape=(num_samples, num_row, num_col, stack_size)) + 'data_format': 'channels_first', + 'implementation': implementation, + 'padding': padding + } + + if padding == 'same' and implementation == 1: + self.assertRaises(ValueError, + keras.layers.LocallyConnected2D, + **kwargs) + else: + testing_utils.layer_test( + keras.layers.LocallyConnected2D, + kwargs=kwargs, + input_shape=(num_samples, num_row, num_col, stack_size)) def test_locallyconnected_2d_regularization(self): num_samples = 8 @@ -141,35 +180,271 @@ class LocallyConnectedLayersTest(test.TestCase): stack_size = 4 num_row = 6 num_col = 10 - kwargs = { - 'filters': filters, - 'kernel_size': 3, - 'kernel_regularizer': 'l2', - 'bias_regularizer': 'l2', - 'activity_regularizer': 'l2', - } - with self.test_session(): - layer = keras.layers.LocallyConnected2D(**kwargs) - layer.build((num_samples, num_row, num_col, stack_size)) - self.assertEqual(len(layer.losses), 2) - layer( - keras.backend.variable( - np.ones((num_samples, num_row, num_col, stack_size)))) - self.assertEqual(len(layer.losses), 3) - - k_constraint = keras.constraints.max_norm(0.01) - b_constraint = keras.constraints.max_norm(0.01) - kwargs = { - 'filters': filters, - 'kernel_size': 3, - 'kernel_constraint': k_constraint, - 'bias_constraint': b_constraint, - } - with self.test_session(): - layer = keras.layers.LocallyConnected2D(**kwargs) - layer.build((num_samples, num_row, num_col, stack_size)) - self.assertEqual(layer.kernel.constraint, k_constraint) - self.assertEqual(layer.bias.constraint, b_constraint) + for implementation in [1, 2]: + for padding in ['valid', 'same']: + kwargs = { + 'filters': filters, + 'kernel_size': 3, + 'kernel_regularizer': 'l2', + 'bias_regularizer': 'l2', + 'activity_regularizer': 'l2', + 'implementation': implementation, + 'padding': padding + } + + if padding == 'same' and implementation == 1: + self.assertRaises(ValueError, + keras.layers.LocallyConnected2D, + **kwargs) + else: + with self.test_session(): + layer = keras.layers.LocallyConnected2D(**kwargs) + layer.build((num_samples, num_row, num_col, stack_size)) + self.assertEqual(len(layer.losses), 2) + layer( + keras.backend.variable( + np.ones((num_samples, num_row, num_col, stack_size)))) + self.assertEqual(len(layer.losses), 3) + + k_constraint = keras.constraints.max_norm(0.01) + b_constraint = keras.constraints.max_norm(0.01) + kwargs = { + 'filters': filters, + 'kernel_size': 3, + 'kernel_constraint': k_constraint, + 'bias_constraint': b_constraint, + } + with self.test_session(): + layer = keras.layers.LocallyConnected2D(**kwargs) + layer.build((num_samples, num_row, num_col, stack_size)) + self.assertEqual(layer.kernel.constraint, k_constraint) + self.assertEqual(layer.bias.constraint, b_constraint) + + @tf_test_util.run_in_graph_and_eager_modes + def test_locallyconnected_implementation(self): + n_train = 4 + n_classes = 3 + n_epochs = 2 + + np.random.seed(1) + targets = np.random.randint(0, n_classes, (n_train,)) + + for width in [1, 17]: + for height in [16]: + for filters in [2]: + for data_format in ['channels_first', 'channels_last']: + inputs = get_inputs(data_format, filters, height, n_train, width) + + for kernel_x in [(3,)]: + for kernel_y in [()] if width == 1 else [(2,)]: + for stride_x in [(1,)]: + for stride_y in [()] if width == 1 else [(3,)]: + for layers in [2]: + kwargs = { + 'layers': layers, + 'filters': filters, + 'kernel_size': kernel_x + kernel_y, + 'strides': stride_x + stride_y, + 'data_format': data_format, + 'n_classes': n_classes, + 'input_shape': inputs.shape + } + + model_1 = get_model(implementation=1, **kwargs) + model_2 = get_model(implementation=2, **kwargs) + + copy_model_weights(model_2, model_1) + + # Compare outputs at initialization. + out_1 = model_1.call(inputs) + out_2 = model_2.call(inputs) + self.assertAllCloseAccordingToType(out_1, out_2, + rtol=1e-5, atol=1e-5) + + # Train. + model_1.fit(x=inputs, + y=targets, + epochs=n_epochs, + batch_size=n_train) + + model_2.fit(x=inputs, + y=targets, + epochs=n_epochs, + batch_size=n_train) + + # Compare outputs after a few training steps. + out_1 = model_1.call(inputs) + out_2 = model_2.call(inputs) + self.assertAllCloseAccordingToType(out_1, out_2, + rtol=1e-5, atol=1e-5) + + @tf_test_util.run_in_graph_and_eager_modes + def test_make_2d(self): + input_shapes = [ + (0,), + (0, 0), + (1,), + (2,), + (3,), + (1, 0), + (0, 3), + (1, 1), + (1, 2), + (3, 1), + (2, 2), + (3, 3), + (1, 0, 1), + (5, 2, 3), + (3, 5, 6, 7, 0), + (3, 2, 2, 4, 4), + (1, 2, 3, 4, 7, 2), + ] + np.random.seed(1) + + for input_shape in input_shapes: + inputs = np.random.normal(0, 1, input_shape) + inputs_tf = keras.backend.variable(inputs) + + split_dim = np.random.randint(0, inputs.ndim + 1) + shape_2d = (int(np.prod(inputs.shape[:split_dim])), + int(np.prod(inputs.shape[split_dim:]))) + inputs_2d = np.reshape(inputs, shape_2d) + + inputs_2d_tf = keras.layers.local.make_2d(inputs_tf, split_dim) + inputs_2d_tf = keras.backend.get_value(inputs_2d_tf) + + self.assertAllCloseAccordingToType(inputs_2d, inputs_2d_tf) + + +def get_inputs(data_format, filters, height, n_train, width): + if data_format == 'channels_first': + if width == 1: + input_shape = (filters, height) + else: + input_shape = (filters, height, width) + + elif data_format == 'channels_last': + if width == 1: + input_shape = (height, filters) + else: + input_shape = (height, width, filters) + + else: + raise NotImplementedError(data_format) + + inputs = np.random.normal(0, 1, + (n_train,) + input_shape).astype(np.float32) + return inputs + + +def xent(y_true, y_pred): + y_true = keras.backend.cast( + keras.backend.reshape(y_true, (-1,)), + keras.backend.dtypes_module.int32) + + return keras.backend.nn.sparse_softmax_cross_entropy_with_logits( + labels=y_true, + logits=y_pred) + + +def get_model(implementation, + filters, + kernel_size, + strides, + layers, + n_classes, + data_format, + input_shape): + model = keras.Sequential() + + if len(kernel_size) == 1: + lc_layer = keras.layers.LocallyConnected1D + elif len(kernel_size) == 2: + lc_layer = keras.layers.LocallyConnected2D + else: + raise NotImplementedError(kernel_size) + + for _ in range(layers): + model.add(lc_layer( + padding='valid', + kernel_initializer=keras.initializers.random_normal(), + bias_initializer=keras.initializers.random_normal(), + filters=filters, + strides=strides, + kernel_size=kernel_size, + activation=keras.activations.relu, + data_format=data_format, + implementation=implementation)) + + model.add(keras.layers.Flatten()) + model.add(keras.layers.Dense(n_classes)) + model.compile( + optimizer=RMSPropOptimizer(0.01), + metrics=[keras.metrics.categorical_accuracy], + loss=xent + ) + model.build(input_shape) + return model + + +def copy_lc_weights(lc_layer_2_from, lc_layer_1_to): + lc_2_kernel, lc_2_bias = lc_layer_2_from.weights + lc_2_kernel_masked = lc_2_kernel * lc_layer_2_from.kernel_mask + + data_format = lc_layer_2_from.data_format + + if data_format == 'channels_first': + if isinstance(lc_layer_2_from, keras.layers.LocallyConnected1D): + permutation = (3, 0, 1, 2) + elif isinstance(lc_layer_2_from, keras.layers.LocallyConnected2D): + permutation = (4, 5, 0, 1, 2, 3) + else: + raise NotImplementedError(lc_layer_2_from) + + elif data_format == 'channels_last': + if isinstance(lc_layer_2_from, keras.layers.LocallyConnected1D): + permutation = (2, 0, 1, 3) + elif isinstance(lc_layer_2_from, keras.layers.LocallyConnected2D): + permutation = (3, 4, 0, 1, 2, 5) + else: + raise NotImplementedError(lc_layer_2_from) + + else: + raise NotImplementedError(data_format) + + lc_2_kernel_masked = keras.backend.permute_dimensions( + lc_2_kernel_masked, permutation) + + lc_2_kernel_mask = keras.backend.math_ops.not_equal( + lc_2_kernel_masked, 0) + lc_2_kernel_flat = keras.backend.array_ops.boolean_mask( + lc_2_kernel_masked, lc_2_kernel_mask) + lc_2_kernel_reshaped = keras.backend.reshape(lc_2_kernel_flat, + lc_layer_1_to.kernel.shape) + + lc_2_kernel_reshaped = keras.backend.get_value(lc_2_kernel_reshaped) + lc_2_bias = keras.backend.get_value(lc_2_bias) + + lc_layer_1_to.set_weights([lc_2_kernel_reshaped, lc_2_bias]) + + +def copy_model_weights(model_2_from, model_1_to): + for l in range(len(model_2_from.layers)): + layer_2_from = model_2_from.layers[l] + layer_1_to = model_1_to.layers[l] + + if isinstance(layer_2_from, (keras.layers.LocallyConnected2D, + keras.layers.LocallyConnected1D)): + copy_lc_weights(layer_2_from, layer_1_to) + + elif isinstance(layer_2_from, keras.layers.Dense): + weights_2, bias_2 = layer_2_from.weights + weights_2 = keras.backend.get_value(weights_2) + bias_2 = keras.backend.get_value(bias_2) + layer_1_to.set_weights([weights_2, bias_2]) + + else: + continue if __name__ == '__main__': diff --git a/tensorflow/python/keras/utils/conv_utils.py b/tensorflow/python/keras/utils/conv_utils.py index 5419e7ae05..3a176c3316 100644 --- a/tensorflow/python/keras/utils/conv_utils.py +++ b/tensorflow/python/keras/utils/conv_utils.py @@ -18,6 +18,7 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function +import itertools import numpy as np from six.moves import range # pylint: disable=redefined-builtin @@ -199,3 +200,168 @@ def convert_kernel(kernel): no_flip = (slice(None, None), slice(None, None)) slices[-2:] = no_flip return np.copy(kernel[slices]) + + +def conv_kernel_mask(input_shape, kernel_shape, strides, padding): + """Compute a mask representing the connectivity of a convolution operation. + + Assume a convolution with given parameters is applied to an input having N + spatial dimensions with `input_shape = (d_in1, ..., d_inN)` to produce an + output with shape `(d_out1, ..., d_outN)`. This method returns a boolean array + of shape `(d_in1, ..., d_inN, d_out1, ..., d_outN)` with `True` entries + indicating pairs of input and output locations that are connected by a weight. + + Example: + ```python + >>> input_shape = (4,) + >>> kernel_shape = (2,) + >>> strides = (1,) + >>> padding = "valid" + >>> conv_kernel_mask(input_shape, kernel_shape, strides, padding) + array([[ True, False, False], + [ True, True, False], + [False, True, True], + [False, False, True]], dtype=bool) + ``` + where rows and columns correspond to inputs and outputs respectively. + + + Args: + input_shape: tuple of size N: `(d_in1, ..., d_inN)`, + spatial shape of the input. + kernel_shape: tuple of size N, spatial shape of the convolutional kernel + / receptive field. + strides: tuple of size N, strides along each spatial dimension. + padding: type of padding, string `"same"` or `"valid"`. + + Returns: + A boolean 2N-D `np.ndarray` of shape + `(d_in1, ..., d_inN, d_out1, ..., d_outN)`, where `(d_out1, ..., d_outN)` + is the spatial shape of the output. `True` entries in the mask represent + pairs of input-output locations that are connected by a weight. + + Raises: + ValueError: if `input_shape`, `kernel_shape` and `strides` don't have the + same number of dimensions. + NotImplementedError: if `padding` is not in {`"same"`, `"valid"`}. + """ + if padding not in {'same', 'valid'}: + raise NotImplementedError('Padding type %s not supported. ' + 'Only "valid" and "same" ' + 'are implemented.' % padding) + + in_dims = len(input_shape) + if isinstance(kernel_shape, int): + kernel_shape = (kernel_shape,) * in_dims + if isinstance(strides, int): + strides = (strides,) * in_dims + + kernel_dims = len(kernel_shape) + stride_dims = len(strides) + if kernel_dims != in_dims or stride_dims != in_dims: + raise ValueError('Number of strides, input and kernel dimensions must all ' + 'match. Received: %d, %d, %d.' % + (stride_dims, in_dims, kernel_dims)) + + output_shape = conv_output_shape(input_shape, kernel_shape, strides, padding) + + mask_shape = input_shape + output_shape + mask = np.zeros(mask_shape, np.bool) + + output_axes_ticks = [range(dim) for dim in output_shape] + for output_position in itertools.product(*output_axes_ticks): + input_axes_ticks = conv_connected_inputs(input_shape, + kernel_shape, + output_position, + strides, + padding) + for input_position in itertools.product(*input_axes_ticks): + mask[input_position + output_position] = True + + return mask + + +def conv_connected_inputs(input_shape, + kernel_shape, + output_position, + strides, + padding): + """Return locations of the input connected to an output position. + + Assume a convolution with given parameters is applied to an input having N + spatial dimensions with `input_shape = (d_in1, ..., d_inN)`. This method + returns N ranges specifying the input region that was convolved with the + kernel to produce the output at position + `output_position = (p_out1, ..., p_outN)`. + + Example: + ```python + >>> input_shape = (4, 4) + >>> kernel_shape = (2, 1) + >>> output_position = (1, 1) + >>> strides = (1, 1) + >>> padding = "valid" + >>> conv_connected_inputs(input_shape, kernel_shape, output_position, + >>> strides, padding) + [xrange(1, 3), xrange(1, 2)] + ``` + Args: + input_shape: tuple of size N: `(d_in1, ..., d_inN)`, + spatial shape of the input. + kernel_shape: tuple of size N, spatial shape of the convolutional kernel + / receptive field. + output_position: tuple of size N: `(p_out1, ..., p_outN)`, + a single position in the output of the convolution. + strides: tuple of size N, strides along each spatial dimension. + padding: type of padding, string `"same"` or `"valid"`. + + Returns: + N ranges `[[p_in_left1, ..., p_in_right1], ..., + [p_in_leftN, ..., p_in_rightN]]` specifying the region in the + input connected to output_position. + """ + ranges = [] + + ndims = len(input_shape) + for d in range(ndims): + left_shift = int(kernel_shape[d] / 2) + right_shift = kernel_shape[d] - left_shift + + center = output_position[d] * strides[d] + + if padding == 'valid': + center += left_shift + + start = max(0, center - left_shift) + end = min(input_shape[d], center + right_shift) + + ranges.append(range(start, end)) + + return ranges + + +def conv_output_shape(input_shape, kernel_shape, strides, padding): + """Return the output shape of an N-D convolution. + + Forces dimensions where input is empty (size 0) to remain empty. + + Args: + input_shape: tuple of size N: `(d_in1, ..., d_inN)`, + spatial shape of the input. + kernel_shape: tuple of size N, spatial shape of the convolutional kernel + / receptive field. + strides: tuple of size N, strides along each spatial dimension. + padding: type of padding, string `"same"` or `"valid"`. + + Returns: + tuple of size N: `(d_out1, ..., d_outN)`, spatial shape of the output. + """ + dims = range(len(kernel_shape)) + output_shape = [conv_output_length(input_shape[d], + kernel_shape[d], + padding, + strides[d]) + for d in dims] + output_shape = tuple([0 if input_shape[d] == 0 else output_shape[d] + for d in dims]) + return output_shape diff --git a/tensorflow/python/keras/utils/conv_utils_test.py b/tensorflow/python/keras/utils/conv_utils_test.py new file mode 100644 index 0000000000..eb2a360bfd --- /dev/null +++ b/tensorflow/python/keras/utils/conv_utils_test.py @@ -0,0 +1,232 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for conv_utils.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import itertools + +from absl.testing import parameterized +import numpy as np + +from tensorflow.python.keras.utils import conv_utils +from tensorflow.python.platform import test + + +def _get_const_output_shape(input_shape, dim): + return tuple([min(d, dim) for d in input_shape]) + + +input_shapes = [ + (0,), + (0, 0), + (1,), + (2,), + (3,), + (1, 0), + (0, 3), + (1, 1), + (1, 2), + (3, 1), + (2, 2), + (3, 3), + (1, 0, 1), + (5, 2, 3), + (3, 5, 6, 7, 0), + (3, 2, 2, 4, 4), + (1, 2, 3, 4, 7, 2), +] + + +@parameterized.parameters(input_shapes) +class TestConvUtils(test.TestCase, parameterized.TestCase): + + def test_conv_kernel_mask_fc(self, *input_shape): + padding = 'valid' + kernel_shape = input_shape + ndims = len(input_shape) + strides = (1,) * ndims + output_shape = _get_const_output_shape(input_shape, dim=1) + mask = np.ones(input_shape + output_shape, np.bool) + self.assertAllEqual( + mask, + conv_utils.conv_kernel_mask( + input_shape, + kernel_shape, + strides, + padding + ) + ) + + def test_conv_kernel_mask_diag(self, *input_shape): + ndims = len(input_shape) + kernel_shape = (1,) * ndims + strides = (1,) * ndims + + for padding in ['valid', 'same']: + mask = np.identity(int(np.prod(input_shape)), np.bool) + mask = np.reshape(mask, input_shape * 2) + self.assertAllEqual( + mask, + conv_utils.conv_kernel_mask( + input_shape, + kernel_shape, + strides, + padding + ) + ) + + def test_conv_kernel_mask_full_stride(self, *input_shape): + padding = 'valid' + ndims = len(input_shape) + kernel_shape = (1,) * ndims + strides = tuple([max(d, 1) for d in input_shape]) + output_shape = _get_const_output_shape(input_shape, dim=1) + + mask = np.zeros(input_shape + output_shape, np.bool) + if all(d > 0 for d in mask.shape): + mask[(0,) * len(output_shape)] = True + + self.assertAllEqual( + mask, + conv_utils.conv_kernel_mask( + input_shape, + kernel_shape, + strides, + padding + ) + ) + + def test_conv_kernel_mask_almost_full_stride(self, *input_shape): + padding = 'valid' + ndims = len(input_shape) + kernel_shape = (1,) * ndims + strides = tuple([max(d - 1, 1) for d in input_shape]) + output_shape = _get_const_output_shape(input_shape, dim=2) + + mask = np.zeros(input_shape + output_shape, np.bool) + if all(d > 0 for d in mask.shape): + for in_position in itertools.product(*[[0, d - 1] for d in input_shape]): + out_position = tuple([min(p, 1) for p in in_position]) + mask[in_position + out_position] = True + + self.assertAllEqual( + mask, + conv_utils.conv_kernel_mask( + input_shape, + kernel_shape, + strides, + padding + ) + ) + + def test_conv_kernel_mask_rect_kernel(self, *input_shape): + padding = 'valid' + ndims = len(input_shape) + strides = (1,) * ndims + + for d in range(ndims): + kernel_shape = [1] * ndims + kernel_shape[d] = input_shape[d] + + output_shape = list(input_shape) + output_shape[d] = min(1, input_shape[d]) + + mask = np.identity(int(np.prod(input_shape)), np.bool) + mask = np.reshape(mask, input_shape * 2) + + for p in itertools.product(*[range(input_shape[dim]) + for dim in range(ndims)]): + p = list(p) + p[d] = slice(None) + mask[p * 2] = True + + mask = np.take(mask, range(0, min(1, input_shape[d])), ndims + d) + + self.assertAllEqual( + mask, + conv_utils.conv_kernel_mask( + input_shape, + kernel_shape, + strides, + padding + ) + ) + + def test_conv_kernel_mask_wrong_padding(self, *input_shape): + ndims = len(input_shape) + kernel_shape = (1,) * ndims + strides = (1,) * ndims + + conv_utils.conv_kernel_mask( + input_shape, + kernel_shape, + strides, + 'valid' + ) + + conv_utils.conv_kernel_mask( + input_shape, + kernel_shape, + strides, + 'same' + ) + + self.assertRaises(NotImplementedError, + conv_utils.conv_kernel_mask, + input_shape, kernel_shape, strides, 'full') + + def test_conv_kernel_mask_wrong_dims(self, *input_shape): + kernel_shape = 1 + strides = 1 + + conv_utils.conv_kernel_mask( + input_shape, + kernel_shape, + strides, + 'valid' + ) + + ndims = len(input_shape) + + kernel_shape = (2,) * (ndims + 1) + self.assertRaises(ValueError, + conv_utils.conv_kernel_mask, + input_shape, kernel_shape, strides, 'same') + + strides = (1,) * ndims + self.assertRaises(ValueError, + conv_utils.conv_kernel_mask, + input_shape, kernel_shape, strides, 'valid') + + kernel_shape = (1,) * ndims + strides = (2,) * (ndims - 1) + self.assertRaises(ValueError, + conv_utils.conv_kernel_mask, + input_shape, kernel_shape, strides, 'valid') + + strides = (2,) * ndims + conv_utils.conv_kernel_mask( + input_shape, + kernel_shape, + strides, + 'valid' + ) + + +if __name__ == '__main__': + test.main() diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-locally-connected1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-locally-connected1-d.pbtxt index f754fa1da8..ff19dcc3a3 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-locally-connected1-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-locally-connected1-d.pbtxt @@ -82,7 +82,7 @@ tf_class { } member_method { name: "__init__" - argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'activation\', \'use_bias\', \'kernel_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'bias_constraint\'], varargs=None, keywords=kwargs, defaults=[\'1\', \'valid\', \'None\', \'None\', \'True\', \'glorot_uniform\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\'], " + argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'activation\', \'use_bias\', \'kernel_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'bias_constraint\', \'implementation\'], varargs=None, keywords=kwargs, defaults=[\'1\', \'valid\', \'None\', \'None\', \'True\', \'glorot_uniform\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\', \'1\'], " } member_method { name: "add_loss" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-locally-connected2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-locally-connected2-d.pbtxt index c9516b8f07..3c278fead6 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-locally-connected2-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-locally-connected2-d.pbtxt @@ -82,7 +82,7 @@ tf_class { } member_method { name: "__init__" - argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'activation\', \'use_bias\', \'kernel_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'bias_constraint\'], varargs=None, keywords=kwargs, defaults=[\'(1, 1)\', \'valid\', \'None\', \'None\', \'True\', \'glorot_uniform\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\'], " + argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'activation\', \'use_bias\', \'kernel_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'bias_constraint\', \'implementation\'], varargs=None, keywords=kwargs, defaults=[\'(1, 1)\', \'valid\', \'None\', \'None\', \'True\', \'glorot_uniform\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\', \'1\'], " } member_method { name: "add_loss" -- cgit v1.2.3