aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--tensorflow/python/keras/backend.py64
-rw-r--r--tensorflow/python/keras/callbacks.py101
-rw-r--r--tensorflow/python/keras/callbacks_test.py118
-rw-r--r--tensorflow/python/keras/layers/convolutional.py14
-rw-r--r--tensorflow/python/keras/layers/convolutional_test.py36
-rw-r--r--tensorflow/python/kernel_tests/rnn_test.py4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.backend.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-early-stopping.pbtxt6
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-tensor-board.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-up-sampling2-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.backend.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-early-stopping.pbtxt6
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-tensor-board.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-up-sampling2-d.pbtxt2
14 files changed, 296 insertions, 65 deletions
diff --git a/tensorflow/python/keras/backend.py b/tensorflow/python/keras/backend.py
index 13f52fbae7..7509ef9c59 100644
--- a/tensorflow/python/keras/backend.py
+++ b/tensorflow/python/keras/backend.py
@@ -2338,7 +2338,8 @@ def permute_dimensions(x, pattern):
@tf_export('keras.backend.resize_images')
-def resize_images(x, height_factor, width_factor, data_format):
+def resize_images(x, height_factor, width_factor, data_format,
+ interpolation='nearest'):
"""Resizes the images contained in a 4D tensor.
Arguments:
@@ -2346,40 +2347,55 @@ def resize_images(x, height_factor, width_factor, data_format):
height_factor: Positive integer.
width_factor: Positive integer.
data_format: One of `"channels_first"`, `"channels_last"`.
+ interpolation: A string, one of `nearest` or `bilinear`.
Returns:
A tensor.
Raises:
- ValueError: if `data_format` is neither
- `channels_last` or `channels_first`.
+ ValueError: in case of incorrect value for
+ `data_format` or `interpolation`.
"""
if data_format == 'channels_first':
- original_shape = int_shape(x)
- new_shape = array_ops.shape(x)[2:]
- new_shape *= constant_op.constant(
- np.array([height_factor, width_factor]).astype('int32'))
+ rows, cols = 2, 3
+ elif data_format == 'channels_last':
+ rows, cols = 1, 2
+ else:
+ raise ValueError('Invalid `data_format` argument: %s' % (data_format,))
+
+ original_shape = int_shape(x)
+ new_shape = array_ops.shape(x)[rows:cols + 1]
+ new_shape *= constant_op.constant(
+ np.array([height_factor, width_factor], dtype='int32'))
+
+ if data_format == 'channels_first':
x = permute_dimensions(x, [0, 2, 3, 1])
+ if interpolation == 'nearest':
x = image_ops.resize_nearest_neighbor(x, new_shape)
+ elif interpolation == 'bilinear':
+ x = image_ops.resize_bilinear(x, new_shape)
+ else:
+ raise ValueError('interpolation should be one '
+ 'of "nearest" or "bilinear".')
+ if data_format == 'channels_first':
x = permute_dimensions(x, [0, 3, 1, 2])
- x.set_shape((None, None, original_shape[2] * height_factor
- if original_shape[2] is not None else None,
- original_shape[3] * width_factor
- if original_shape[3] is not None else None))
- return x
- elif data_format == 'channels_last':
- original_shape = int_shape(x)
- new_shape = array_ops.shape(x)[1:3]
- new_shape *= constant_op.constant(
- np.array([height_factor, width_factor]).astype('int32'))
- x = image_ops.resize_nearest_neighbor(x, new_shape)
- x.set_shape((None, original_shape[1] * height_factor
- if original_shape[1] is not None else None,
- original_shape[2] * width_factor
- if original_shape[2] is not None else None, None))
- return x
+
+ if original_shape[rows] is None:
+ new_height = None
else:
- raise ValueError('Invalid data_format: ' + str(data_format))
+ new_height = original_shape[rows] * height_factor
+
+ if original_shape[cols] is None:
+ new_width = None
+ else:
+ new_width = original_shape[cols] * width_factor
+
+ if data_format == 'channels_first':
+ output_shape = (None, None, new_height, new_width)
+ else:
+ output_shape = (None, new_height, new_width, None)
+ x.set_shape(output_shape)
+ return x
@tf_export('keras.backend.resize_volumes')
diff --git a/tensorflow/python/keras/callbacks.py b/tensorflow/python/keras/callbacks.py
index 3d6000f223..4c12c83a4c 100644
--- a/tensorflow/python/keras/callbacks.py
+++ b/tensorflow/python/keras/callbacks.py
@@ -24,6 +24,7 @@ from collections import Iterable
from collections import OrderedDict
import copy
import csv
+import io
import json
import math
import os
@@ -606,24 +607,28 @@ class EarlyStopping(Callback):
"""Stop training when a monitored quantity has stopped improving.
Arguments:
- monitor: quantity to be monitored.
- min_delta: minimum change in the monitored quantity
+ monitor: Quantity to be monitored.
+ min_delta: Minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than min_delta, will count as no
improvement.
- patience: number of epochs with no improvement
+ patience: Number of epochs with no improvement
after which training will be stopped.
verbose: verbosity mode.
- mode: one of {auto, min, max}. In `min` mode,
+ mode: One of `{"auto", "min", "max"}`. In `min` mode,
training will stop when the quantity
monitored has stopped decreasing; in `max`
mode it will stop when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
- baseline: baseline value for the monitored quantity.
+ baseline: Baseline value for the monitored quantity.
Training will stop if the model doesn't show improvement over the
baseline.
+ restore_best_weights: Whether to restore model weights from
+ the epoch with the best value of the monitored quantity.
+ If False, the model weights obtained at the last step of
+ training are used.
"""
def __init__(self,
@@ -632,7 +637,8 @@ class EarlyStopping(Callback):
patience=0,
verbose=0,
mode='auto',
- baseline=None):
+ baseline=None,
+ restore_best_weights=False):
super(EarlyStopping, self).__init__()
self.monitor = monitor
@@ -642,6 +648,8 @@ class EarlyStopping(Callback):
self.min_delta = abs(min_delta)
self.wait = 0
self.stopped_epoch = 0
+ self.restore_best_weights = restore_best_weights
+ self.best_weights = None
if mode not in ['auto', 'min', 'max']:
logging.warning('EarlyStopping mode %s is unknown, '
@@ -673,25 +681,37 @@ class EarlyStopping(Callback):
self.best = np.Inf if self.monitor_op == np.less else -np.Inf
def on_epoch_end(self, epoch, logs=None):
- current = logs.get(self.monitor)
+ current = self.get_monitor_value(logs)
if current is None:
- logging.warning('Early stopping conditioned on metric `%s` '
- 'which is not available. Available metrics are: %s',
- self.monitor, ','.join(list(logs.keys())))
return
if self.monitor_op(current - self.min_delta, self.best):
self.best = current
self.wait = 0
+ if self.restore_best_weights:
+ self.best_weights = self.model.get_weights()
else:
self.wait += 1
if self.wait >= self.patience:
self.stopped_epoch = epoch
self.model.stop_training = True
+ if self.restore_best_weights:
+ if self.verbose > 0:
+ print('Restoring model weights from the end of the best epoch.')
+ self.model.set_weights(self.best_weights)
def on_train_end(self, logs=None):
if self.stopped_epoch > 0 and self.verbose > 0:
print('Epoch %05d: early stopping' % (self.stopped_epoch + 1))
+ def get_monitor_value(self, logs):
+ logs = logs or {}
+ monitor_value = logs.get(self.monitor)
+ if monitor_value is None:
+ logging.warning('Early stopping conditioned on metric `%s` '
+ 'which is not available. Available metrics are: %s',
+ self.monitor, ','.join(list(logs.keys())))
+ return monitor_value
+
@tf_export('keras.callbacks.RemoteMonitor')
class RemoteMonitor(Callback):
@@ -839,6 +859,12 @@ class TensorBoard(Callback):
`embeddings_layer_names`. Numpy array (if the model has a single
input) or list of Numpy arrays (if the model has multiple inputs).
Learn [more about embeddings](https://www.tensorflow.org/programmers_guide/embedding)
+ update_freq: `'batch'` or `'epoch'` or integer. When using `'batch'`,
+ writes the losses and metrics to TensorBoard after each batch.
+ The same applies for `'epoch'`. If using an integer, let's say `1000`,
+ the callback will write the metrics and losses to TensorBoard every
+ 1000 samples. Note that writing too frequently to TensorBoard
+ can slow down your training.
Raises:
ValueError: If histogram_freq is set and no validation data is provided.
@@ -862,7 +888,8 @@ class TensorBoard(Callback):
embeddings_freq=0,
embeddings_layer_names=None,
embeddings_metadata=None,
- embeddings_data=None):
+ embeddings_data=None,
+ update_freq='epoch'):
super(TensorBoard, self).__init__()
self.log_dir = log_dir
self.histogram_freq = histogram_freq
@@ -882,6 +909,12 @@ class TensorBoard(Callback):
self.embeddings_layer_names = embeddings_layer_names
self.embeddings_metadata = embeddings_metadata
self.embeddings_data = embeddings_data
+ if update_freq == 'batch':
+ self.update_freq = 1
+ else:
+ self.update_freq = update_freq
+ self._samples_seen = 0
+ self._samples_seen_at_last_write = 0
def _init_writer(self):
"""Sets file writer."""
@@ -1045,13 +1078,17 @@ class TensorBoard(Callback):
# use v2 summary ops
with self.writer.as_default(), summary_ops_v2.always_record_summaries():
for name, value in logs.items():
- summary_ops_v2.scalar(name, value.item(), step=step)
+ if isinstance(value, np.ndarray):
+ value = value.item()
+ summary_ops_v2.scalar(name, value, step=step)
else:
# use FileWriter from v1 summary
for name, value in logs.items():
+ if isinstance(value, np.ndarray):
+ value = value.item()
summary = tf_summary.Summary()
summary_value = summary.value.add()
- summary_value.simple_value = value.item()
+ summary_value.simple_value = value
summary_value.tag = name
self.writer.add_summary(summary, step)
self.writer.flush()
@@ -1076,10 +1113,14 @@ class TensorBoard(Callback):
"""Writes scalar summaries for metrics on every training batch."""
# Don't output batch_size and batch number as Tensorboard summaries
logs = logs or {}
- batch_logs = {('batch_' + k): v
- for k, v in logs.items()
- if k not in ['batch', 'size', 'num_steps']}
- self._write_custom_summaries(self._total_batches_seen, batch_logs)
+ self._samples_seen += logs.get('size', 1)
+ samples_seen_since = self._samples_seen - self._samples_seen_at_last_write
+ if self.update_freq != 'epoch' and samples_seen_since >= self.update_freq:
+ batch_logs = {('batch_' + k): v
+ for k, v in logs.items()
+ if k not in ['batch', 'size', 'num_steps']}
+ self._write_custom_summaries(self._total_batches_seen, batch_logs)
+ self._samples_seen_at_last_write = self._samples_seen
self._total_batches_seen += 1
def on_epoch_begin(self, epoch, logs=None):
@@ -1103,7 +1144,11 @@ class TensorBoard(Callback):
logs = {('epoch_' + k): v
for k, v in logs.items()
if k not in ['batch', 'size', 'num_steps']}
- self._write_custom_summaries(epoch, logs)
+ if self.update_freq == 'epoch':
+ step = epoch
+ else:
+ step = self._samples_seen
+ self._write_custom_summaries(step, logs)
# pop the histogram summary op after each epoch
if self.histogram_freq:
@@ -1309,7 +1354,12 @@ class CSVLogger(Callback):
self.writer = None
self.keys = None
self.append_header = True
- self.file_flags = 'b' if six.PY2 and os.name == 'nt' else ''
+ if six.PY2:
+ self.file_flags = 'b'
+ self._open_args = {}
+ else:
+ self.file_flags = ''
+ self._open_args = {'newline': '\n'}
super(CSVLogger, self).__init__()
def on_train_begin(self, logs=None):
@@ -1317,9 +1367,12 @@ class CSVLogger(Callback):
if os.path.exists(self.filename):
with open(self.filename, 'r' + self.file_flags) as f:
self.append_header = not bool(len(f.readline()))
- self.csv_file = open(self.filename, 'a' + self.file_flags)
+ mode = 'a'
else:
- self.csv_file = open(self.filename, 'w' + self.file_flags)
+ mode = 'w'
+ self.csv_file = io.open(self.filename,
+ mode + self.file_flags,
+ **self._open_args)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
@@ -1345,9 +1398,13 @@ class CSVLogger(Callback):
class CustomDialect(csv.excel):
delimiter = self.sep
+ fieldnames = ['epoch'] + self.keys
+ if six.PY2:
+ fieldnames = [unicode(x) for x in fieldnames]
+
self.writer = csv.DictWriter(
self.csv_file,
- fieldnames=['epoch'] + self.keys,
+ fieldnames=fieldnames,
dialect=CustomDialect)
if self.append_header:
self.writer.writeheader()
diff --git a/tensorflow/python/keras/callbacks_test.py b/tensorflow/python/keras/callbacks_test.py
index 467bc4cdc4..bb85347033 100644
--- a/tensorflow/python/keras/callbacks_test.py
+++ b/tensorflow/python/keras/callbacks_test.py
@@ -313,6 +313,42 @@ class KerasCallbacksTest(test.TestCase):
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
+ def test_EarlyStopping_final_weights_when_restoring_model_weights(self):
+
+ class DummyModel(object):
+
+ def __init__(self):
+ self.stop_training = False
+ self.weights = -1
+
+ def get_weights(self):
+ return self.weights
+
+ def set_weights(self, weights):
+ self.weights = weights
+
+ def set_weight_to_epoch(self, epoch):
+ self.weights = epoch
+
+ early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
+ patience=2,
+ restore_best_weights=True)
+ early_stop.model = DummyModel()
+ losses = [0.2, 0.15, 0.1, 0.11, 0.12]
+ # The best configuration is in the epoch 2 (loss = 0.1000).
+ epochs_trained = 0
+ early_stop.on_train_begin()
+ for epoch in range(len(losses)):
+ epochs_trained += 1
+ early_stop.model.set_weight_to_epoch(epoch=epoch)
+ early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
+ if early_stop.model.stop_training:
+ break
+ # The best configuration is in epoch 2 (loss = 0.1000),
+ # and while patience = 2, we're restoring the best weights,
+ # so we end up at the epoch with the best weights, i.e. epoch 2
+ self.assertEqual(early_stop.model.get_weights(), 2)
+
def test_RemoteMonitor(self):
if requests is None:
return
@@ -534,11 +570,15 @@ class KerasCallbacksTest(test.TestCase):
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
- epochs=1,
+ epochs=2,
verbose=0)
with open(filepath) as csvfile:
- output = ' '.join(csvfile.readlines())
+ list_lines = csvfile.readlines()
+ for line in list_lines:
+ assert line.count(sep) == 4
+ assert len(list_lines) == 5
+ output = ' '.join(list_lines)
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
@@ -1115,11 +1155,11 @@ class KerasCallbacksTest(test.TestCase):
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
- tb_cbk = keras.callbacks.TensorBoard(temp_dir)
+ tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq='batch')
tb_cbk.writer = FileWriterStub(temp_dir)
for batch in range(5):
- tb_cbk.on_batch_end(batch, {'acc': np.float32(batch)})
+ tb_cbk.on_batch_end(batch, {'acc': batch})
self.assertEqual(tb_cbk.writer.batches_logged, [0, 1, 2, 3, 4])
self.assertEqual(tb_cbk.writer.summary_values, [0., 1., 2., 3., 4.])
self.assertEqual(tb_cbk.writer.summary_tags, ['batch_acc'] * 5)
@@ -1147,14 +1187,17 @@ class KerasCallbacksTest(test.TestCase):
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
- tb_cbk = keras.callbacks.TensorBoard(temp_dir)
+ tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq='batch')
tb_cbk.writer = FileWriterStub(temp_dir)
- tb_cbk.on_batch_end(0, {'acc': np.float32(5.0)})
- tb_cbk.on_epoch_end(0, {'acc': np.float32(10.0)})
+ tb_cbk.on_batch_end(0, {'acc': 5.0})
batch_step, batch_summary = tb_cbk.writer.batch_summary
self.assertEqual(batch_step, 0)
self.assertEqual(batch_summary.value[0].simple_value, 5.0)
+
+ tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq='epoch')
+ tb_cbk.writer = FileWriterStub(temp_dir)
+ tb_cbk.on_epoch_end(0, {'acc': 10.0})
epoch_step, epoch_summary = tb_cbk.writer.epoch_summary
self.assertEqual(epoch_step, 0)
self.assertEqual(epoch_summary.value[0].simple_value, 10.0)
@@ -1192,6 +1235,66 @@ class KerasCallbacksTest(test.TestCase):
self.assertTrue(os.path.exists(temp_dir))
+ def test_TensorBoard_update_freq(self):
+
+ class FileWriterStub(object):
+
+ def __init__(self, logdir, graph=None):
+ self.logdir = logdir
+ self.graph = graph
+ self.batch_summaries = []
+ self.epoch_summaries = []
+
+ def add_summary(self, summary, step):
+ if 'batch_' in summary.value[0].tag:
+ self.batch_summaries.append((step, summary))
+ elif 'epoch_' in summary.value[0].tag:
+ self.epoch_summaries.append((step, summary))
+
+ def flush(self):
+ pass
+
+ def close(self):
+ pass
+
+ temp_dir = self.get_temp_dir()
+ self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
+
+ # Epoch mode
+ tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq='epoch')
+ tb_cbk.writer = FileWriterStub(temp_dir)
+
+ tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 1})
+ self.assertEqual(tb_cbk.writer.batch_summaries, [])
+ tb_cbk.on_epoch_end(0, {'acc': 10.0, 'size': 1})
+ self.assertEqual(len(tb_cbk.writer.epoch_summaries), 1)
+
+ # Batch mode
+ tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq='batch')
+ tb_cbk.writer = FileWriterStub(temp_dir)
+
+ tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 1})
+ self.assertEqual(len(tb_cbk.writer.batch_summaries), 1)
+ tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 1})
+ self.assertEqual(len(tb_cbk.writer.batch_summaries), 2)
+ self.assertFalse(tb_cbk.writer.epoch_summaries)
+
+ # Integer mode
+ tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq=20)
+ tb_cbk.writer = FileWriterStub(temp_dir)
+
+ tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
+ self.assertFalse(tb_cbk.writer.batch_summaries)
+ tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
+ self.assertEqual(len(tb_cbk.writer.batch_summaries), 1)
+ tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
+ self.assertEqual(len(tb_cbk.writer.batch_summaries), 1)
+ tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
+ self.assertEqual(len(tb_cbk.writer.batch_summaries), 2)
+ tb_cbk.on_batch_end(0, {'acc': 10.0, 'size': 10})
+ self.assertEqual(len(tb_cbk.writer.batch_summaries), 2)
+ self.assertFalse(tb_cbk.writer.epoch_summaries)
+
def test_RemoteMonitorWithJsonPayload(self):
if requests is None:
self.skipTest('`requests` required to run this test')
@@ -1226,6 +1329,7 @@ class KerasCallbacksTest(test.TestCase):
def test_fit_generator_with_callback(self):
class TestCallback(keras.callbacks.Callback):
+
def set_model(self, model):
# Check the model operations for the optimizer operations that
# the _make_train_function adds under a named scope for the
diff --git a/tensorflow/python/keras/layers/convolutional.py b/tensorflow/python/keras/layers/convolutional.py
index 8f5872385c..58024677ee 100644
--- a/tensorflow/python/keras/layers/convolutional.py
+++ b/tensorflow/python/keras/layers/convolutional.py
@@ -1951,6 +1951,7 @@ class UpSampling2D(Layer):
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
+ interpolation: A string, one of `nearest` or `bilinear`.
Input shape:
4D tensor with shape:
@@ -1967,10 +1968,18 @@ class UpSampling2D(Layer):
`(batch, channels, upsampled_rows, upsampled_cols)`
"""
- def __init__(self, size=(2, 2), data_format=None, **kwargs):
+ def __init__(self,
+ size=(2, 2),
+ data_format=None,
+ interpolation='nearest',
+ **kwargs):
super(UpSampling2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.size = conv_utils.normalize_tuple(size, 2, 'size')
+ if interpolation not in {'nearest', 'bilinear'}:
+ raise ValueError('`interpolation` argument should be one of `"nearest"` '
+ 'or `"bilinear"`.')
+ self.interpolation = interpolation
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
@@ -1992,7 +2001,8 @@ class UpSampling2D(Layer):
def call(self, inputs):
return backend.resize_images(
- inputs, self.size[0], self.size[1], self.data_format)
+ inputs, self.size[0], self.size[1], self.data_format,
+ interpolation=self.interpolation)
def get_config(self):
config = {'size': self.size, 'data_format': self.data_format}
diff --git a/tensorflow/python/keras/layers/convolutional_test.py b/tensorflow/python/keras/layers/convolutional_test.py
index f88d632ab5..bdc175b8b9 100644
--- a/tensorflow/python/keras/layers/convolutional_test.py
+++ b/tensorflow/python/keras/layers/convolutional_test.py
@@ -790,6 +790,42 @@ class UpSamplingTest(test.TestCase):
np.testing.assert_allclose(np_output, expected_out)
@tf_test_util.run_in_graph_and_eager_modes
+ def test_upsampling_2d_bilinear(self):
+ num_samples = 2
+ stack_size = 2
+ input_num_row = 11
+ input_num_col = 12
+ for data_format in ['channels_first', 'channels_last']:
+ if data_format == 'channels_first':
+ inputs = np.random.rand(num_samples, stack_size, input_num_row,
+ input_num_col)
+ else:
+ inputs = np.random.rand(num_samples, input_num_row, input_num_col,
+ stack_size)
+
+ testing_utils.layer_test(keras.layers.UpSampling2D,
+ kwargs={'size': (2, 2),
+ 'data_format': data_format,
+ 'interpolation': 'bilinear'},
+ input_shape=inputs.shape)
+
+ if not context.executing_eagerly():
+ for length_row in [2]:
+ for length_col in [2, 3]:
+ layer = keras.layers.UpSampling2D(
+ size=(length_row, length_col),
+ data_format=data_format)
+ layer.build(inputs.shape)
+ outputs = layer(keras.backend.variable(inputs))
+ np_output = keras.backend.eval(outputs)
+ if data_format == 'channels_first':
+ self.assertEqual(np_output.shape[2], length_row * input_num_row)
+ self.assertEqual(np_output.shape[3], length_col * input_num_col)
+ else:
+ self.assertEqual(np_output.shape[1], length_row * input_num_row)
+ self.assertEqual(np_output.shape[2], length_col * input_num_col)
+
+ @tf_test_util.run_in_graph_and_eager_modes
def test_upsampling_3d(self):
num_samples = 2
stack_size = 2
diff --git a/tensorflow/python/kernel_tests/rnn_test.py b/tensorflow/python/kernel_tests/rnn_test.py
index 2f6963f6b8..907e1277a9 100644
--- a/tensorflow/python/kernel_tests/rnn_test.py
+++ b/tensorflow/python/kernel_tests/rnn_test.py
@@ -571,8 +571,8 @@ class RNNTest(test.TestCase):
cell.set_weights(tf_weights)
[tf_out, tf_state] = sess.run([tf_out, tf_state], {inputs: x_train})
- self.assertAllClose(tf_out, k_out)
- self.assertAllClose(tf_state, k_state)
+ self.assertAllClose(tf_out, k_out, atol=1e-5)
+ self.assertAllClose(tf_state, k_state, atol=1e-5)
def testBasicLSTMCellInterchangeWithLSTMCell(self):
with self.session(graph=ops_lib.Graph()) as sess:
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.backend.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.backend.pbtxt
index 9feb7c09b8..5f0dfd7ae7 100644
--- a/tensorflow/tools/api/golden/v1/tensorflow.keras.backend.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.backend.pbtxt
@@ -386,7 +386,7 @@ tf_module {
}
member_method {
name: "resize_images"
- argspec: "args=[\'x\', \'height_factor\', \'width_factor\', \'data_format\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'x\', \'height_factor\', \'width_factor\', \'data_format\', \'interpolation\'], varargs=None, keywords=None, defaults=[\'nearest\'], "
}
member_method {
name: "resize_volumes"
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-early-stopping.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-early-stopping.pbtxt
index f71292856c..ed0f37647f 100644
--- a/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-early-stopping.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-early-stopping.pbtxt
@@ -5,7 +5,11 @@ tf_class {
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'monitor\', \'min_delta\', \'patience\', \'verbose\', \'mode\', \'baseline\'], varargs=None, keywords=None, defaults=[\'val_loss\', \'0\', \'0\', \'0\', \'auto\', \'None\'], "
+ argspec: "args=[\'self\', \'monitor\', \'min_delta\', \'patience\', \'verbose\', \'mode\', \'baseline\', \'restore_best_weights\'], varargs=None, keywords=None, defaults=[\'val_loss\', \'0\', \'0\', \'0\', \'auto\', \'None\', \'False\'], "
+ }
+ member_method {
+ name: "get_monitor_value"
+ argspec: "args=[\'self\', \'logs\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "on_batch_begin"
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-tensor-board.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-tensor-board.pbtxt
index e58ba18c1c..e9d53b7225 100644
--- a/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-tensor-board.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-tensor-board.pbtxt
@@ -5,7 +5,7 @@ tf_class {
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'log_dir\', \'histogram_freq\', \'batch_size\', \'write_graph\', \'write_grads\', \'write_images\', \'embeddings_freq\', \'embeddings_layer_names\', \'embeddings_metadata\', \'embeddings_data\'], varargs=None, keywords=None, defaults=[\'./logs\', \'0\', \'32\', \'True\', \'False\', \'False\', \'0\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'log_dir\', \'histogram_freq\', \'batch_size\', \'write_graph\', \'write_grads\', \'write_images\', \'embeddings_freq\', \'embeddings_layer_names\', \'embeddings_metadata\', \'embeddings_data\', \'update_freq\'], varargs=None, keywords=None, defaults=[\'./logs\', \'0\', \'32\', \'True\', \'False\', \'False\', \'0\', \'None\', \'None\', \'None\', \'epoch\'], "
}
member_method {
name: "on_batch_begin"
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-up-sampling2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-up-sampling2-d.pbtxt
index 40a56a0c94..b05e5ec84d 100644
--- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-up-sampling2-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-up-sampling2-d.pbtxt
@@ -82,7 +82,7 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'size\', \'data_format\'], varargs=None, keywords=kwargs, defaults=[\'(2, 2)\', \'None\'], "
+ argspec: "args=[\'self\', \'size\', \'data_format\', \'interpolation\'], varargs=None, keywords=kwargs, defaults=[\'(2, 2)\', \'None\', \'nearest\'], "
}
member_method {
name: "add_loss"
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.backend.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.backend.pbtxt
index 9feb7c09b8..5f0dfd7ae7 100644
--- a/tensorflow/tools/api/golden/v2/tensorflow.keras.backend.pbtxt
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.backend.pbtxt
@@ -386,7 +386,7 @@ tf_module {
}
member_method {
name: "resize_images"
- argspec: "args=[\'x\', \'height_factor\', \'width_factor\', \'data_format\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'x\', \'height_factor\', \'width_factor\', \'data_format\', \'interpolation\'], varargs=None, keywords=None, defaults=[\'nearest\'], "
}
member_method {
name: "resize_volumes"
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-early-stopping.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-early-stopping.pbtxt
index f71292856c..ed0f37647f 100644
--- a/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-early-stopping.pbtxt
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-early-stopping.pbtxt
@@ -5,7 +5,11 @@ tf_class {
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'monitor\', \'min_delta\', \'patience\', \'verbose\', \'mode\', \'baseline\'], varargs=None, keywords=None, defaults=[\'val_loss\', \'0\', \'0\', \'0\', \'auto\', \'None\'], "
+ argspec: "args=[\'self\', \'monitor\', \'min_delta\', \'patience\', \'verbose\', \'mode\', \'baseline\', \'restore_best_weights\'], varargs=None, keywords=None, defaults=[\'val_loss\', \'0\', \'0\', \'0\', \'auto\', \'None\', \'False\'], "
+ }
+ member_method {
+ name: "get_monitor_value"
+ argspec: "args=[\'self\', \'logs\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "on_batch_begin"
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-tensor-board.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-tensor-board.pbtxt
index e58ba18c1c..e9d53b7225 100644
--- a/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-tensor-board.pbtxt
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-tensor-board.pbtxt
@@ -5,7 +5,7 @@ tf_class {
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'log_dir\', \'histogram_freq\', \'batch_size\', \'write_graph\', \'write_grads\', \'write_images\', \'embeddings_freq\', \'embeddings_layer_names\', \'embeddings_metadata\', \'embeddings_data\'], varargs=None, keywords=None, defaults=[\'./logs\', \'0\', \'32\', \'True\', \'False\', \'False\', \'0\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'log_dir\', \'histogram_freq\', \'batch_size\', \'write_graph\', \'write_grads\', \'write_images\', \'embeddings_freq\', \'embeddings_layer_names\', \'embeddings_metadata\', \'embeddings_data\', \'update_freq\'], varargs=None, keywords=None, defaults=[\'./logs\', \'0\', \'32\', \'True\', \'False\', \'False\', \'0\', \'None\', \'None\', \'None\', \'epoch\'], "
}
member_method {
name: "on_batch_begin"
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-up-sampling2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-up-sampling2-d.pbtxt
index 40a56a0c94..b05e5ec84d 100644
--- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-up-sampling2-d.pbtxt
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-up-sampling2-d.pbtxt
@@ -82,7 +82,7 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'size\', \'data_format\'], varargs=None, keywords=kwargs, defaults=[\'(2, 2)\', \'None\'], "
+ argspec: "args=[\'self\', \'size\', \'data_format\', \'interpolation\'], varargs=None, keywords=kwargs, defaults=[\'(2, 2)\', \'None\', \'nearest\'], "
}
member_method {
name: "add_loss"