diff options
Diffstat (limited to 'tensorflow/contrib/layers/python/ops/loss_ops_test.py')
-rw-r--r-- | tensorflow/contrib/layers/python/ops/loss_ops_test.py | 281 |
1 files changed, 106 insertions, 175 deletions
diff --git a/tensorflow/contrib/layers/python/ops/loss_ops_test.py b/tensorflow/contrib/layers/python/ops/loss_ops_test.py index 48f49989cf..1453af5331 100644 --- a/tensorflow/contrib/layers/python/ops/loss_ops_test.py +++ b/tensorflow/contrib/layers/python/ops/loss_ops_test.py @@ -21,6 +21,10 @@ from __future__ import print_function import numpy as np import tensorflow as tf +from tensorflow.contrib.layers.python.framework import tensor_util + +pi = 3.14 +indiana_pi = 3.2 # https://en.wikipedia.org/wiki/Indiana_Pi_Bill class ReduceBatchSumTest(tf.test.TestCase): @@ -89,72 +93,6 @@ class ReduceBatchSumTest(tf.test.TestCase): self.assertAllClose(expected_result, actual_result.eval()) -class ReduceBatchMeanTest(tf.test.TestCase): - - def testDimensionNone(self): - with self.test_session(): - input_array = np.array([ - [1.0, 2.0], - [-1.0, -2.0] - ], dtype=np.float32) - placeholder_vec = tf.placeholder(tf.float32, name="placeholder_vec") - expected_result = np.array([1.5, -1.5]) - actual_result = tf.contrib.layers.reduce_batch_mean(placeholder_vec) - self.assertEqual(actual_result.get_shape().as_list(), [None]) - self.assertAllClose(expected_result, actual_result.eval(feed_dict={ - placeholder_vec: input_array - })) - - def testDimension0(self): - with self.test_session(): - input_vec = tf.constant(2.0) - with self.assertRaises(ValueError): - tf.contrib.layers.reduce_batch_mean(input_vec) - - def testDimension1(self): - with self.test_session(): - input_vec = tf.constant([1.0, 2.0]) - expected_result = np.array([1.0, 2.0]) - actual_result = tf.contrib.layers.reduce_batch_mean(input_vec) - self.assertAllClose(expected_result, actual_result.eval()) - - def testDimension2(self): - with self.test_session(): - input_vec = tf.constant([ - [1.0, 2.0], - [-1.0, -2.0] - ]) - expected_result = np.array([1.5, -1.5]) - actual_result = tf.contrib.layers.reduce_batch_mean(input_vec) - self.assertAllClose(expected_result, actual_result.eval()) - - def testReturnShape(self): - with self.test_session(): - input_vec = tf.constant([ - [1.0, 2.0], - [-1.0, -2.0] - ]) - expected_result = np.array([3.0, -3.0]) - actual_result = tf.contrib.layers.reduce_batch_mean(input_vec) - self.assertShapeEqual(expected_result, actual_result) - - def testDimensionN(self): - with self.test_session(): - input_vec = tf.constant([ - [ - [1.0, 2.0], - [3.0, 4.0] - ], - [ - [5.0, 6.0], - [7.0, 8.0] - ] - ]) - expected_result = np.array([2.5, 6.5]) - actual_result = tf.contrib.layers.reduce_batch_mean(input_vec) - self.assertAllClose(expected_result, actual_result.eval()) - - class AbsoluteLossTest(tf.test.TestCase): def _getTestVectors(self): @@ -191,7 +129,7 @@ class SquaredLossTest(tf.test.TestCase): target = tf.constant([1.0, 0.0, 3.0, 2.0], shape=[2, 2], name="target") predicted = tf.constant([1.1, -0.2, 3.3, 1.6], shape=[2, 2], name="predicted") - expected_loss = np.array([0.01, 0.04, 0.09, 0.16]).reshape(2, 2) + expected_loss = np.array([0.005, 0.02, 0.045, 0.08]).reshape(2, 2) return target, predicted, expected_loss def testSquaredLoss(self): @@ -250,114 +188,108 @@ class SumSquaredLossTest(tf.test.TestCase): tf.contrib.layers.sum_squared_loss(incompatible_shape, target) -class MeanAbsoluteLossTest(tf.test.TestCase): - - def _getTestVectors(self): - target = tf.constant([[0.0, 1.0, 2.0], - [3.0, 2.0, 4.0]], - shape=[2, 3], - name="target") - predicted = tf.constant([[3.0, -3.0, 0.0], - [1.0, 2.0, 0.0]], - shape=[2, 3], - name="predicted") - expected_loss = np.array([3.0, 2.0]) - return target, predicted, expected_loss - - def testMeanAbsoluteLoss(self): - with self.test_session(): - target, predicted, expected_loss = self._getTestVectors() - result = tf.contrib.layers.mean_absolute_loss(predicted, target) - self.assertAllClose(expected_loss, result.eval()) - - def testMeanAbsoluteLossReturnShape(self): - with self.test_session(): - target, predicted, expected_loss = self._getTestVectors() - result = tf.contrib.layers.mean_absolute_loss(predicted, target) - self.assertShapeEqual(expected_loss, result) +class ScalarAbsoluteLossTest(tf.test.TestCase): - def testInvalidShapesValueError(self): + def testScalarAbsoluteLoss(self): with self.test_session(): - target = tf.constant([1.0, 0.0, 3.0, 2.0], shape=[2, 2], name="target") - incompatible_shape = tf.constant([0.0, 1.1], shape=[2], - name="incompatible_shape") - with self.assertRaises(ValueError): - tf.contrib.layers.mean_absolute_loss(incompatible_shape, target) - - -class MeanSquaredLossTest(tf.test.TestCase): - - def _getTestVectors(self): - target = tf.constant([[0.0, 1.0, 2.0], - [3.0, 2.0, 4.0]], - shape=[2, 3], - name="target") - predicted = tf.constant([[3.0, -3.0, 0.0], - [1.0, 2.0, 0.0]], - shape=[2, 3], - name="predicted") - expected_loss = np.array([9.666667, 6.666667]) - return target, predicted, expected_loss - - def testMeanSquaredLoss(self): - with self.test_session(): - target, predicted, expected_loss = self._getTestVectors() - result = tf.contrib.layers.mean_squared_loss(predicted, target) - self.assertAllClose(expected_loss, result.eval()) - - def testMeanSquaredLossReturnShape(self): - with self.test_session(): - target, predicted, expected_loss = self._getTestVectors() - result = tf.contrib.layers.mean_squared_loss(predicted, target) - self.assertShapeEqual(expected_loss, result) - - def testInvalidShapesValueError(self): + actual = tf.constant([pi], name="pi") + actual_placeholder = tf.placeholder(tf.float32) + label = tf.constant([indiana_pi], name="lbl") + label_placeholder = tf.placeholder(tf.float32, name="lbl_ph") + expected_loss = abs(indiana_pi - pi) + + # Both shapes are set. + both_shapes_loss = tf.contrib.layers.scalar_absolute_loss(actual, label) + tf.initialize_all_variables().run() + np.testing.assert_almost_equal( + both_shapes_loss.eval(), expected_loss, decimal=6) + + # No shape for 'actual' - check that the loss layer can be created. + no_actual_shape_loss = tf.contrib.layers.scalar_absolute_loss( + actual_placeholder, label) + tf.initialize_all_variables().run() + np.testing.assert_almost_equal( + no_actual_shape_loss.eval({actual_placeholder: [pi]}), + expected_loss, decimal=6) + + # No shape for 'label' - check that the loss layer can be created. + no_label_shape_loss = tf.contrib.layers.scalar_absolute_loss( + actual, label_placeholder) + tf.initialize_all_variables().run() + np.testing.assert_almost_equal( + no_label_shape_loss.eval({label_placeholder: [indiana_pi]}), + expected_loss, decimal=6) + + # No shapes. + no_shape_loss = tf.contrib.layers.scalar_absolute_loss( + actual_placeholder, label_placeholder) + tf.initialize_all_variables().run() + np.testing.assert_almost_equal( + no_shape_loss.eval({label_placeholder: [indiana_pi], + actual_placeholder: [pi]}), + expected_loss, decimal=6) + + # Evaluate the previous one again, but this time with different + # (matching) shapes. This should still work. + np.testing.assert_almost_equal( + no_shape_loss.eval({label_placeholder: [indiana_pi, indiana_pi], + actual_placeholder: [pi, pi]}), + expected_loss, decimal=6) + + +class ScalarSquaredLossTest(tf.test.TestCase): + + def testScalarSquaredLoss(self): with self.test_session(): - target = tf.constant([1.0, 0.0, 3.0, 2.0], shape=[2, 2], name="target") - incompatible_shape = tf.constant([0.0, 1.1], shape=[2], - name="incompatible_shape") - with self.assertRaises(ValueError): - tf.contrib.layers.mean_squared_loss(incompatible_shape, target) - - -class RootMeanSquaredLossTest(tf.test.TestCase): - - def _getTestVectors(self): - target = tf.constant([[0.0, 1.0, 2.0], - [3.0, 2.0, 4.0]], - shape=[2, 3], - name="target") - predicted = tf.constant([[3.0, -3.0, 0.0], - [1.0, 2.0, 0.0]], - shape=[2, 3], - name="predicted") - expected_loss = np.array([3.109126, 2.5819889]) - return target, predicted, expected_loss - - def testRootMeanSquaredLoss(self): - with self.test_session(): - target, predicted, expected_loss = self._getTestVectors() - result = tf.contrib.layers.root_mean_squared_loss(predicted, target) - self.assertAllClose(expected_loss, result.eval()) - - def testRootMeanSquaredLossReturnShape(self): - with self.test_session(): - target, predicted, expected_loss = self._getTestVectors() - result = tf.contrib.layers.root_mean_squared_loss(predicted, target) - self.assertShapeEqual(expected_loss, result) - - def testInvalidShapesValueError(self): - with self.test_session(): - target = tf.constant([1.0, 0.0, 3.0, 2.0], shape=[2, 2], name="target") - incompatible_shape = tf.constant([0.0, 1.1], shape=[2], - name="incompatible_shape") - with self.assertRaises(ValueError): - tf.contrib.layers.root_mean_squared_loss(incompatible_shape, target) - - -class MeanScalarLogisticLossTest(tf.test.TestCase): - - def _get_mean_sigmoid_logistic_loss(self, logit, target): + actual = tf.constant([pi], name="pi") + actual_placeholder = tf.placeholder(tf.float32) + label = tf.constant([indiana_pi], name="lbl") + label_placeholder = tf.placeholder(tf.float32, name="lbl_ph") + expected_loss = (indiana_pi - pi) * (indiana_pi - pi) / 2 + + # Both shapes are set. + both_shapes_loss = tf.contrib.layers.scalar_squared_loss(actual, label) + tf.initialize_all_variables().run() + np.testing.assert_almost_equal( + both_shapes_loss.eval(), expected_loss, decimal=6) + + # No shape for 'actual' - check that the loss layer can be created. + no_actual_shape_loss = tf.contrib.layers.scalar_squared_loss( + actual_placeholder, label) + tf.initialize_all_variables().run() + np.testing.assert_almost_equal( + no_actual_shape_loss.eval({actual_placeholder: [pi]}), + expected_loss, decimal=6) + + # No shape for 'label' - check that the loss layer can be created. + no_label_shape_loss = tf.contrib.layers.scalar_squared_loss( + actual, label_placeholder) + tf.initialize_all_variables().run() + np.testing.assert_almost_equal( + no_label_shape_loss.eval({label_placeholder: [indiana_pi]}), + expected_loss, + decimal=6) + + # No shapes. + no_shape_loss = tf.contrib.layers.scalar_squared_loss( + actual_placeholder, label_placeholder) + tf.initialize_all_variables().run() + np.testing.assert_almost_equal( + no_shape_loss.eval({label_placeholder: [indiana_pi], + actual_placeholder: [pi]}), + expected_loss, decimal=6) + + # Evaluate the previous one again, but this time with different + # (matching) shapes. This should still work. + np.testing.assert_almost_equal( + no_shape_loss.eval({label_placeholder: [indiana_pi, indiana_pi], + actual_placeholder: [pi, pi]}), + expected_loss, decimal=6) + + +class ScalarLogisticLossTest(tf.test.TestCase): + + def _expected_loss(self, logit, target): sigmoid = 1.0 / (1.0 + np.exp(-logit)) logistic_loss = (target * -np.log(sigmoid)) - ( (1.0 - target) * np.log(1.0 - sigmoid)) @@ -365,14 +297,13 @@ class MeanScalarLogisticLossTest(tf.test.TestCase): return np.sum(batch_losses) / len(batch_losses) - def test_mean__scalar_logistic_loss(self): + def test_scalar_logistic_loss(self): logit = np.array([[9.45, -42], [4.2, 1], [-0.6, 20]]) target = np.array([[0.8, 0.9], [0.45, 0.99999], [0.1, 0.0006]]) - expected_loss = self._get_mean_sigmoid_logistic_loss(logit, target) with self.test_session(): result = tf.contrib.layers.scalar_logistic_loss( tf.constant(logit), tf.constant(target)) - self.assertAllClose(expected_loss, result.eval()) + self.assertAllClose(self._expected_loss(logit, target), result.eval()) if __name__ == "__main__": |