diff options
Diffstat (limited to 'tensorflow/contrib/solvers/python/kernel_tests/linear_equations_test.py')
-rw-r--r-- | tensorflow/contrib/solvers/python/kernel_tests/linear_equations_test.py | 63 |
1 files changed, 49 insertions, 14 deletions
diff --git a/tensorflow/contrib/solvers/python/kernel_tests/linear_equations_test.py b/tensorflow/contrib/solvers/python/kernel_tests/linear_equations_test.py index 930df2414b..a1282847be 100644 --- a/tensorflow/contrib/solvers/python/kernel_tests/linear_equations_test.py +++ b/tensorflow/contrib/solvers/python/kernel_tests/linear_equations_test.py @@ -45,32 +45,67 @@ def _get_linear_equations_tests(dtype_, use_static_shape_, shape_): low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_) # Make a selfadjoint, positive definite. a_np = np.dot(a_np.T, a_np) + # jacobi preconditioner + jacobi_np = np.zeros_like(a_np) + jacobi_np[range(a_np.shape[0]), range(a_np.shape[1])] = ( + 1.0 / a_np.diagonal()) rhs_np = np.random.uniform( low=-1.0, high=1.0, size=shape_[0]).astype(dtype_) + x_np = np.zeros_like(rhs_np) tol = 1e-6 if dtype_ == np.float64 else 1e-3 max_iter = 20 with self.test_session() as sess: if use_static_shape_: a = constant_op.constant(a_np) rhs = constant_op.constant(rhs_np) + x = constant_op.constant(x_np) + jacobi = constant_op.constant(jacobi_np) else: a = array_ops.placeholder(dtype_) rhs = array_ops.placeholder(dtype_) + x = array_ops.placeholder(dtype_) + jacobi = array_ops.placeholder(dtype_) operator = util.create_operator(a) - cg_graph = linear_equations.conjugate_gradient( - operator, rhs, tol=tol, max_iter=max_iter) - if use_static_shape_: - cg_val = sess.run(cg_graph) - else: - cg_val = sess.run(cg_graph, feed_dict={a: a_np, rhs: rhs_np}) - norm_r0 = np.linalg.norm(rhs_np) - norm_r = np.sqrt(cg_val.gamma) - self.assertLessEqual(norm_r, tol * norm_r0) - # Validate that we get an equally small residual norm with numpy - # using the computed solution. - r_np = rhs_np - np.dot(a_np, cg_val.x) - norm_r_np = np.linalg.norm(r_np) - self.assertLessEqual(norm_r_np, tol * norm_r0) + preconditioners = [ + None, util.identity_operator(a), + util.create_operator(jacobi) + ] + cg_results = [] + for preconditioner in preconditioners: + cg_graph = linear_equations.conjugate_gradient( + operator, + rhs, + preconditioner=preconditioner, + x=x, + tol=tol, + max_iter=max_iter) + if use_static_shape_: + cg_val = sess.run(cg_graph) + else: + cg_val = sess.run( + cg_graph, + feed_dict={ + a: a_np, + rhs: rhs_np, + x: x_np, + jacobi: jacobi_np + }) + norm_r0 = np.linalg.norm(rhs_np) + norm_r = np.linalg.norm(cg_val.r) + self.assertLessEqual(norm_r, tol * norm_r0) + # Validate that we get an equally small residual norm with numpy + # using the computed solution. + r_np = rhs_np - np.dot(a_np, cg_val.x) + norm_r_np = np.linalg.norm(r_np) + self.assertLessEqual(norm_r_np, tol * norm_r0) + cg_results.append(cg_val) + # Validate that we get same results using identity_preconditioner + # and None + self.assertEqual(cg_results[0].i, cg_results[1].i) + self.assertAlmostEqual(cg_results[0].gamma, cg_results[1].gamma) + self.assertAllClose(cg_results[0].r, cg_results[1].r, rtol=tol) + self.assertAllClose(cg_results[0].x, cg_results[1].x, rtol=tol) + self.assertAllClose(cg_results[0].p, cg_results[1].p, rtol=tol) return [test_conjugate_gradient] |