aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/python/kernel_tests
diff options
context:
space:
mode:
authorGravatar Martin Wicke <wicke@google.com>2017-01-04 21:25:34 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-01-04 21:46:08 -0800
commit333dc32ff79af21484695157f3d141dc776f7c02 (patch)
treeb379bcaa56bfa54d12ea839fb7e62ab163490743 /tensorflow/python/kernel_tests
parentd9541696b068cfcc1fab66b03d0b8d605b64f14d (diff)
Change arg order for {softmax,sparse_softmax,sigmoid}_cross_entropy_with_logits to be (labels, predictions), and force use of named args to avoid accidents.
Change: 143629623
Diffstat (limited to 'tensorflow/python/kernel_tests')
-rw-r--r--tensorflow/python/kernel_tests/sparse_xent_op_test.py19
-rw-r--r--tensorflow/python/kernel_tests/xent_op_test.py5
2 files changed, 14 insertions, 10 deletions
diff --git a/tensorflow/python/kernel_tests/sparse_xent_op_test.py b/tensorflow/python/kernel_tests/sparse_xent_op_test.py
index ef94af54fe..d2a815a0d7 100644
--- a/tensorflow/python/kernel_tests/sparse_xent_op_test.py
+++ b/tensorflow/python/kernel_tests/sparse_xent_op_test.py
@@ -141,25 +141,26 @@ class SparseXentTest(test.TestCase):
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(ValueError, ".*Rank mismatch:*"):
nn_ops.sparse_softmax_cross_entropy_with_logits(
- [[0., 1.], [2., 3.], [2., 3.]], [[0, 2]])
+ labels=[[0, 2]], logits=[[0., 1.], [2., 3.], [2., 3.]])
def testScalar(self):
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(ValueError, ".*Logits cannot be scalars*"):
nn_ops.sparse_softmax_cross_entropy_with_logits(
- constant_op.constant(1.0), constant_op.constant(0))
+ labels=constant_op.constant(0), logits=constant_op.constant(1.0))
def testLabelsPlaceholderScalar(self):
with self.test_session(use_gpu=True):
labels = array_ops.placeholder(np.int32)
- y = nn_ops.sparse_softmax_cross_entropy_with_logits([[7.]], labels)
+ y = nn_ops.sparse_softmax_cross_entropy_with_logits(
+ labels=labels, logits=[[7.]])
with self.assertRaisesOpError("labels must be 1-D"):
y.eval(feed_dict={labels: 0})
def testVector(self):
with self.test_session(use_gpu=True):
loss = nn_ops.sparse_softmax_cross_entropy_with_logits(
- constant_op.constant([1.0]), constant_op.constant(0))
+ labels=constant_op.constant(0), logits=constant_op.constant([1.0]))
self.assertAllClose(0.0, loss.eval())
def testFloat(self):
@@ -191,7 +192,8 @@ class SparseXentTest(test.TestCase):
shape=[3, 4],
dtype=dtypes.float64,
name="f")
- x = nn_ops.sparse_softmax_cross_entropy_with_logits(f, l, name="xent")
+ x = nn_ops.sparse_softmax_cross_entropy_with_logits(
+ labels=l, logits=f, name="xent")
err = gradient_checker.compute_gradient_error(f, [3, 4], x, [3])
print("cross entropy gradient err = ", err)
self.assertLess(err, 5e-8)
@@ -201,7 +203,8 @@ class SparseXentTest(test.TestCase):
# manually reshape loss
np_loss = np.reshape(np_loss, np.array(labels).shape)
with self.test_session(use_gpu=True) as sess:
- loss = nn_ops.sparse_softmax_cross_entropy_with_logits(features, labels)
+ loss = nn_ops.sparse_softmax_cross_entropy_with_logits(
+ labels=labels, logits=features)
backprop = loss.op.inputs[0].op.outputs[1]
tf_loss, tf_backprop = sess.run([loss, backprop])
self.assertAllCloseAccordingToType(np_loss, tf_loss)
@@ -225,7 +228,7 @@ class SparseXentTest(test.TestCase):
labels = array_ops.placeholder(dtypes.int32, shape=[None, 1])
logits = array_ops.placeholder(dtypes.float32, shape=[None, 3])
ce = nn_ops.sparse_softmax_cross_entropy_with_logits(
- logits, array_ops.squeeze(labels))
+ labels=array_ops.squeeze(labels), logits=logits)
labels_v2 = np.zeros((1, 1), dtype=np.int32)
logits_v2 = np.random.randn(1, 3)
sess.run([ce], feed_dict={labels: labels_v2, logits: logits_v2})
@@ -243,7 +246,7 @@ def _sparse_vs_dense_xent_benchmark_dense(labels, logits):
array_ops.stack([length]), 1.0, 0.0)
target = array_ops.reshape(target, array_ops.stack([-1, num_entries]))
crossent = nn_ops.softmax_cross_entropy_with_logits(
- logits, target, name="SequenceLoss/CrossEntropy")
+ labels=target, logits=logits, name="SequenceLoss/CrossEntropy")
crossent_sum = math_ops.reduce_sum(crossent)
grads = gradients_impl.gradients([crossent_sum], [logits])[0]
diff --git a/tensorflow/python/kernel_tests/xent_op_test.py b/tensorflow/python/kernel_tests/xent_op_test.py
index ac56f567ce..e1e0566124 100644
--- a/tensorflow/python/kernel_tests/xent_op_test.py
+++ b/tensorflow/python/kernel_tests/xent_op_test.py
@@ -57,7 +57,7 @@ class XentTest(test.TestCase):
np_loss, _ = self._npXent(np_features, np_labels, dim=dim)
with self.test_session(use_gpu=use_gpu) as sess:
loss = nn_ops.softmax_cross_entropy_with_logits(
- np_features, np_labels, dim=dim)
+ labels=np_labels, logits=np_features, dim=dim)
tf_loss = sess.run(loss)
print("np_loss:", np_loss)
print("tf_loss:", tf_loss)
@@ -166,7 +166,8 @@ class XentTest(test.TestCase):
shape=[3, 4],
dtype=dtypes.float64,
name="f")
- x = nn_ops.softmax_cross_entropy_with_logits(f, l, name="xent")
+ x = nn_ops.softmax_cross_entropy_with_logits(labels=l, logits=f,
+ name="xent")
err = gradient_checker.compute_gradient_error(f, [3, 4], x, [3])
print("cross entropy gradient err = ", err)
self.assertLess(err, 5e-8)