aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Neal Wu <wun@google.com>2017-03-20 14:33:54 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-03-20 15:51:03 -0700
commitaf62b5ccb9d06381096d18418920d06390d90be9 (patch)
treee484c801873549e025cb9cf0121e2f22b8ee8b04
parent159f66df31234c77dd509876adddbd7280e445c2 (diff)
Fixed the order of arguments for softmax_loss_function in two places, including a semantic code change.
Change: 150685391
-rw-r--r--tensorflow/contrib/seq2seq/python/ops/loss.py4
1 files changed, 2 insertions, 2 deletions
diff --git a/tensorflow/contrib/seq2seq/python/ops/loss.py b/tensorflow/contrib/seq2seq/python/ops/loss.py
index bb87111266..e14f07bc09 100644
--- a/tensorflow/contrib/seq2seq/python/ops/loss.py
+++ b/tensorflow/contrib/seq2seq/python/ops/loss.py
@@ -48,7 +48,7 @@ def sequence_loss(logits, targets, weights,
timesteps.
average_across_batch: If set, sum the cost across the batch dimension and
divide the returned cost by the batch size.
- softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch
+ softmax_loss_function: Function (labels-batch, inputs-batch) -> loss-batch
to be used instead of the standard softmax (the default if this is None).
name: Optional name for this operation, defaults to "sequence_loss".
@@ -76,7 +76,7 @@ def sequence_loss(logits, targets, weights,
crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=targets, logits=probs_flat)
else:
- crossent = softmax_loss_function(probs_flat, targets)
+ crossent = softmax_loss_function(targets, probs_flat)
crossent = crossent * array_ops.reshape(weights, [-1])
if average_across_timesteps and average_across_batch:
crossent = math_ops.reduce_sum(crossent)