aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/keras
diff options
context:
space:
mode:
authorGravatar Benoit Steiner <bsteiner@google.com>2017-05-10 21:12:21 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-05-11 11:02:28 -0700
commitee112cff56081fb9d0b74c987a8935acc360b05c (patch)
tree6026d8b42ccc09d9c0d1b2d091916cfcb4f5a057 /tensorflow/contrib/keras
parent27c89207d2f31fe4b4b42c789b96d62cde4e2133 (diff)
Merge changes from github.
PiperOrigin-RevId: 155709893
Diffstat (limited to 'tensorflow/contrib/keras')
-rw-r--r--tensorflow/contrib/keras/python/keras/datasets/imdb.py2
-rw-r--r--tensorflow/contrib/keras/python/keras/datasets/reuters.py2
-rw-r--r--tensorflow/contrib/keras/python/keras/engine/topology.py2
-rw-r--r--tensorflow/contrib/keras/python/keras/engine/training.py2
-rw-r--r--tensorflow/contrib/keras/python/keras/layers/normalization.py2
-rw-r--r--tensorflow/contrib/keras/python/keras/models.py2
-rw-r--r--tensorflow/contrib/keras/python/keras/preprocessing/sequence.py2
7 files changed, 7 insertions, 7 deletions
diff --git a/tensorflow/contrib/keras/python/keras/datasets/imdb.py b/tensorflow/contrib/keras/python/keras/datasets/imdb.py
index bafd92aca6..5c087fe63f 100644
--- a/tensorflow/contrib/keras/python/keras/datasets/imdb.py
+++ b/tensorflow/contrib/keras/python/keras/datasets/imdb.py
@@ -41,7 +41,7 @@ def load_data(path='imdb.npz',
num_words: max number of words to include. Words are ranked
by how often they occur (in the training set) and only
the most frequent words are kept
- skip_top: skip the top N most frequently occuring words
+ skip_top: skip the top N most frequently occurring words
(which may not be informative).
maxlen: truncate sequences after this length.
seed: random seed for sample shuffling.
diff --git a/tensorflow/contrib/keras/python/keras/datasets/reuters.py b/tensorflow/contrib/keras/python/keras/datasets/reuters.py
index 81e940a846..b1c22fee63 100644
--- a/tensorflow/contrib/keras/python/keras/datasets/reuters.py
+++ b/tensorflow/contrib/keras/python/keras/datasets/reuters.py
@@ -43,7 +43,7 @@ def load_data(path='reuters.npz',
num_words: max number of words to include. Words are ranked
by how often they occur (in the training set) and only
the most frequent words are kept
- skip_top: skip the top N most frequently occuring words
+ skip_top: skip the top N most frequently occurring words
(which may not be informative).
maxlen: truncate sequences after this length.
test_split: Fraction of the dataset to be used as test data.
diff --git a/tensorflow/contrib/keras/python/keras/engine/topology.py b/tensorflow/contrib/keras/python/keras/engine/topology.py
index 0336fc4bf4..3d9ed51a1c 100644
--- a/tensorflow/contrib/keras/python/keras/engine/topology.py
+++ b/tensorflow/contrib/keras/python/keras/engine/topology.py
@@ -649,7 +649,7 @@ class Layer(tf_base_layers.Layer):
'but was passed an input_mask: ' + str(mask))
# masking not explicitly supported: return None as mask
return None
- # if masking is explictly supported, by default
+ # if masking is explicitly supported, by default
# carry over the input mask
return mask
diff --git a/tensorflow/contrib/keras/python/keras/engine/training.py b/tensorflow/contrib/keras/python/keras/engine/training.py
index ba6201713e..96d1c2f262 100644
--- a/tensorflow/contrib/keras/python/keras/engine/training.py
+++ b/tensorflow/contrib/keras/python/keras/engine/training.py
@@ -245,7 +245,7 @@ def _check_array_lengths(inputs, targets, weights):
def _check_loss_and_target_compatibility(targets, loss_fns, output_shapes):
- """Does validation on the compatiblity of targets and loss functions.
+ """Does validation on the compatibility of targets and loss functions.
This helps prevent users from using loss functions incorrectly.
diff --git a/tensorflow/contrib/keras/python/keras/layers/normalization.py b/tensorflow/contrib/keras/python/keras/layers/normalization.py
index df77401aee..ea229fdce1 100644
--- a/tensorflow/contrib/keras/python/keras/layers/normalization.py
+++ b/tensorflow/contrib/keras/python/keras/layers/normalization.py
@@ -169,7 +169,7 @@ class BatchNormalization(Layer):
def normalize_inference():
if needs_broadcasting:
- # In this case we must explictly broadcast all parameters.
+ # In this case we must explicitly broadcast all parameters.
broadcast_moving_mean = K.reshape(self.moving_mean, broadcast_shape)
broadcast_moving_variance = K.reshape(self.moving_variance,
broadcast_shape)
diff --git a/tensorflow/contrib/keras/python/keras/models.py b/tensorflow/contrib/keras/python/keras/models.py
index 52456a4bb5..1c041091fc 100644
--- a/tensorflow/contrib/keras/python/keras/models.py
+++ b/tensorflow/contrib/keras/python/keras/models.py
@@ -221,7 +221,7 @@ def load_model(filepath, custom_objects=None):
obj: object, dict, or list.
Returns:
- The same structure, where occurences
+ The same structure, where occurrences
of a custom object name have been replaced
with the custom object.
"""
diff --git a/tensorflow/contrib/keras/python/keras/preprocessing/sequence.py b/tensorflow/contrib/keras/python/keras/preprocessing/sequence.py
index 5a24a63b01..692a359ead 100644
--- a/tensorflow/contrib/keras/python/keras/preprocessing/sequence.py
+++ b/tensorflow/contrib/keras/python/keras/preprocessing/sequence.py
@@ -156,7 +156,7 @@ def skipgrams(sequence,
of word indices (integers). If using a `sampling_table`,
word indices are expected to match the rank
of the words in a reference dataset (e.g. 10 would encode
- the 10-th most frequently occuring token).
+ the 10-th most frequently occurring token).
Note that index 0 is expected to be a non-word and will be skipped.
vocabulary_size: int. maximum possible word index + 1
window_size: int. actually half-window.