aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/examples
diff options
context:
space:
mode:
authorGravatar Mark Daoust <markdaoust@google.com>2017-12-06 09:08:09 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-12-06 09:12:34 -0800
commitf79c39e9c8291787718015318b396bd11ff7ae71 (patch)
tree32b2671e071af327ff2eb3cf4875d786cc84b676 /tensorflow/examples
parent7ac7aa868406a5d9b03e4101509ac80e011b91c7 (diff)
Use sparse xent to avoid softmax_v2 warning in examples/learn
`tf.nn.softmax_cross_entropy_with_logits` and `tf.losses.softmax_cross_entropy` both throw the warning. Almost everywhere it's used can simply be replaced by `tf.losses.sparse_softmax_cross_entropy` PiperOrigin-RevId: 178105702
Diffstat (limited to 'tensorflow/examples')
-rw-r--r--tensorflow/examples/learn/iris_custom_decay_dnn.py6
-rw-r--r--tensorflow/examples/learn/iris_custom_model.py6
-rw-r--r--tensorflow/examples/learn/mnist.py4
-rw-r--r--tensorflow/examples/learn/multiple_gpu.py6
-rwxr-xr-xtensorflow/examples/learn/resnet.py4
-rw-r--r--tensorflow/examples/learn/text_classification.py4
-rw-r--r--tensorflow/examples/learn/text_classification_character_cnn.py4
-rw-r--r--tensorflow/examples/learn/text_classification_character_rnn.py4
-rw-r--r--tensorflow/examples/learn/text_classification_cnn.py4
9 files changed, 9 insertions, 33 deletions
diff --git a/tensorflow/examples/learn/iris_custom_decay_dnn.py b/tensorflow/examples/learn/iris_custom_decay_dnn.py
index 072357e51c..4a219694d1 100644
--- a/tensorflow/examples/learn/iris_custom_decay_dnn.py
+++ b/tensorflow/examples/learn/iris_custom_decay_dnn.py
@@ -46,12 +46,8 @@ def my_model(features, labels, mode):
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
- # Convert the labels to a one-hot tensor of shape (length of features, 3) and
- # with a on-value of 1 for each one-hot vector of length 3.
- onehot_labels = tf.one_hot(labels, 3, 1, 0)
# Compute loss.
- loss = tf.losses.softmax_cross_entropy(
- onehot_labels=onehot_labels, logits=logits)
+ loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Create training op with exponentially decaying learning rate.
if mode == tf.estimator.ModeKeys.TRAIN:
diff --git a/tensorflow/examples/learn/iris_custom_model.py b/tensorflow/examples/learn/iris_custom_model.py
index 471a99ba76..c6bdb86ba5 100644
--- a/tensorflow/examples/learn/iris_custom_model.py
+++ b/tensorflow/examples/learn/iris_custom_model.py
@@ -47,12 +47,8 @@ def my_model(features, labels, mode):
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
- # Convert the labels to a one-hot tensor of shape (length of features, 3) and
- # with a on-value of 1 for each one-hot vector of length 3.
- onehot_labels = tf.one_hot(labels, 3, 1, 0)
# Compute loss.
- loss = tf.losses.softmax_cross_entropy(
- onehot_labels=onehot_labels, logits=logits)
+ loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Create training op.
if mode == tf.estimator.ModeKeys.TRAIN:
diff --git a/tensorflow/examples/learn/mnist.py b/tensorflow/examples/learn/mnist.py
index 88425ea0d0..98819b20bf 100644
--- a/tensorflow/examples/learn/mnist.py
+++ b/tensorflow/examples/learn/mnist.py
@@ -77,9 +77,7 @@ def conv_model(features, labels, mode):
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Compute loss.
- onehot_labels = tf.one_hot(tf.cast(labels, tf.int32), N_DIGITS, 1, 0)
- loss = tf.losses.softmax_cross_entropy(
- onehot_labels=onehot_labels, logits=logits)
+ loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Create training op.
if mode == tf.estimator.ModeKeys.TRAIN:
diff --git a/tensorflow/examples/learn/multiple_gpu.py b/tensorflow/examples/learn/multiple_gpu.py
index a294950a38..3bad22ddf6 100644
--- a/tensorflow/examples/learn/multiple_gpu.py
+++ b/tensorflow/examples/learn/multiple_gpu.py
@@ -65,12 +65,8 @@ def my_model(features, labels, mode):
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
- # Convert the labels to a one-hot tensor of shape (length of features, 3)
- # and with a on-value of 1 for each one-hot vector of length 3.
- onehot_labels = tf.one_hot(labels, 3, 1, 0)
# Compute loss.
- loss = tf.losses.softmax_cross_entropy(
- onehot_labels=onehot_labels, logits=logits)
+ loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Create training op.
if mode == tf.estimator.ModeKeys.TRAIN:
diff --git a/tensorflow/examples/learn/resnet.py b/tensorflow/examples/learn/resnet.py
index 1e0966475b..9542e55250 100755
--- a/tensorflow/examples/learn/resnet.py
+++ b/tensorflow/examples/learn/resnet.py
@@ -151,9 +151,7 @@ def res_net_model(features, labels, mode):
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Compute loss.
- onehot_labels = tf.one_hot(tf.cast(labels, tf.int32), N_DIGITS, 1, 0)
- loss = tf.losses.softmax_cross_entropy(
- onehot_labels=onehot_labels, logits=logits)
+ loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Create training op.
if mode == tf.estimator.ModeKeys.TRAIN:
diff --git a/tensorflow/examples/learn/text_classification.py b/tensorflow/examples/learn/text_classification.py
index ba89c532be..eb117c39a1 100644
--- a/tensorflow/examples/learn/text_classification.py
+++ b/tensorflow/examples/learn/text_classification.py
@@ -46,9 +46,7 @@ def estimator_spec_for_softmax_classification(
'prob': tf.nn.softmax(logits)
})
- onehot_labels = tf.one_hot(labels, MAX_LABEL, 1, 0)
- loss = tf.losses.softmax_cross_entropy(
- onehot_labels=onehot_labels, logits=logits)
+ loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
diff --git a/tensorflow/examples/learn/text_classification_character_cnn.py b/tensorflow/examples/learn/text_classification_character_cnn.py
index 363ff00362..afda170e2a 100644
--- a/tensorflow/examples/learn/text_classification_character_cnn.py
+++ b/tensorflow/examples/learn/text_classification_character_cnn.py
@@ -88,9 +88,7 @@ def char_cnn_model(features, labels, mode):
'prob': tf.nn.softmax(logits)
})
- onehot_labels = tf.one_hot(labels, MAX_LABEL, 1, 0)
- loss = tf.losses.softmax_cross_entropy(
- onehot_labels=onehot_labels, logits=logits)
+ loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
diff --git a/tensorflow/examples/learn/text_classification_character_rnn.py b/tensorflow/examples/learn/text_classification_character_rnn.py
index 86adc056ad..15733821fb 100644
--- a/tensorflow/examples/learn/text_classification_character_rnn.py
+++ b/tensorflow/examples/learn/text_classification_character_rnn.py
@@ -59,9 +59,7 @@ def char_rnn_model(features, labels, mode):
'prob': tf.nn.softmax(logits)
})
- onehot_labels = tf.one_hot(labels, MAX_LABEL, 1, 0)
- loss = tf.losses.softmax_cross_entropy(
- onehot_labels=onehot_labels, logits=logits)
+ loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
diff --git a/tensorflow/examples/learn/text_classification_cnn.py b/tensorflow/examples/learn/text_classification_cnn.py
index be262285a3..9e21aee87f 100644
--- a/tensorflow/examples/learn/text_classification_cnn.py
+++ b/tensorflow/examples/learn/text_classification_cnn.py
@@ -87,9 +87,7 @@ def cnn_model(features, labels, mode):
'prob': tf.nn.softmax(logits)
})
- onehot_labels = tf.one_hot(labels, MAX_LABEL, 1, 0)
- loss = tf.losses.softmax_cross_entropy(
- onehot_labels=onehot_labels, logits=logits)
+ loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())