aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/linear_optimizer/python/kernel_tests/sdca_ops_test.py
diff options
context:
space:
mode:
Diffstat (limited to 'tensorflow/contrib/linear_optimizer/python/kernel_tests/sdca_ops_test.py')
-rw-r--r--tensorflow/contrib/linear_optimizer/python/kernel_tests/sdca_ops_test.py47
1 files changed, 44 insertions, 3 deletions
diff --git a/tensorflow/contrib/linear_optimizer/python/kernel_tests/sdca_ops_test.py b/tensorflow/contrib/linear_optimizer/python/kernel_tests/sdca_ops_test.py
index 13968457f7..27da0b4f55 100644
--- a/tensorflow/contrib/linear_optimizer/python/kernel_tests/sdca_ops_test.py
+++ b/tensorflow/contrib/linear_optimizer/python/kernel_tests/sdca_ops_test.py
@@ -92,6 +92,7 @@ def make_variable_dict(max_age, max_gender):
return dict(sparse_features_weights=[age_weights, gender_weights],
dense_features_weights=[])
+
def make_dense_variable_dict(num_dense_features, num_examples):
feature_weights = ([
tf.Variable(tf.zeros([1],
@@ -130,6 +131,7 @@ def tearDown():
pass
+# TODO(katsiapis): Add tests that exercise L1 and Shrinking.
class SdcaOptimizerTest(TensorFlowTestCase):
def _single_threaded_test_session(self):
@@ -180,6 +182,44 @@ class SdcaOptimizerTest(TensorFlowTestCase):
rtol=1e-2,
atol=1e-2)
+ def testSimpleLogisticNoL2(self):
+ # Same as test above (so comments from above apply) but without an L2.
+ # The algorithm should behave as if we have an L2 of 1 in optimization but
+ # 0 in regularized_loss.
+ example_protos = [
+ make_example_proto(
+ {'age': [0],
+ 'gender': [0]}, 0),
+ make_example_proto(
+ {'age': [1],
+ 'gender': [1]}, 1),
+ ]
+ example_weights = [1.0, 1.0]
+ with self._single_threaded_test_session():
+ examples = make_example_dict(example_protos, example_weights)
+ variables = make_variable_dict(1, 1)
+ options = dict(symmetric_l2_regularization=0,
+ symmetric_l1_regularization=0,
+ loss_type='logistic_loss')
+
+ lr = SdcaModel(CONTAINER, examples, variables, options)
+ tf.initialize_all_variables().run()
+ unregularized_loss = lr.unregularized_loss(examples)
+ loss = lr.regularized_loss(examples)
+ predictions = lr.predictions(examples)
+ self.assertAllClose(0.693147, unregularized_loss.eval())
+ self.assertAllClose(0.693147, loss.eval())
+ for _ in xrange(5):
+ lr.minimize().run()
+ self.assertAllClose(0.411608, unregularized_loss.eval(), rtol=0.11)
+ self.assertAllClose(0.371705, loss.eval(), atol=0.01)
+ predicted_labels = get_binary_predictions_for_logistic(predictions)
+ self.assertAllEqual([0, 1], predicted_labels.eval())
+ self.assertAllClose(0.01,
+ lr.approximate_duality_gap().eval(),
+ rtol=1e-2,
+ atol=1e-2)
+
def testSomeUnweightedExamples(self):
# Setup test data with 4 examples, but should produce the same
# results as testSimple.
@@ -272,10 +312,11 @@ class SdcaOptimizerTest(TensorFlowTestCase):
lr = SdcaModel(CONTAINER, examples, variables, options)
tf.initialize_all_variables().run()
self.assertAllClose([0.5, 0.5], lr.predictions(examples).eval())
- with self.assertRaisesOpError(
- 'No weighted examples in 2 training examples'):
- lr.minimize().run()
+ lr.minimize().run()
self.assertAllClose([0.5, 0.5], lr.predictions(examples).eval())
+ with self.assertRaisesOpError(
+ 'No examples found or all examples have zero weight.'):
+ lr.approximate_duality_gap().eval()
def testDuplicateExampleIds(self):
# Setup test data with 1 positive, and 1 negative example.