aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar ImSheridan <xiaoyudong0512@gmail.com>2018-06-04 12:50:12 +0800
committerGravatar Gunhan Gulsoy <gunan@google.com>2018-06-03 21:50:12 -0700
commit63dafb7f5dbef4da63e095595a49f5d5d7258af9 (patch)
tree4541b20582d87587029b8fb536e3badd8b3ee63b
parent320d8056af7799ab20e339757cf379963148425a (diff)
Fix print function with tf_logging.info to keep consistence (#18423)
* Fix print function with tf_logging.info to keep consistence * fix minor typo * fix pylint errors * Fix minor pylint errors * Fix lint error
-rw-r--r--tensorflow/contrib/fused_conv/python/ops/fused_conv2d_bias_activation_op_test.py11
-rw-r--r--tensorflow/python/kernel_tests/betainc_op_test.py4
-rw-r--r--tensorflow/python/kernel_tests/conv_ops_test.py32
-rw-r--r--tensorflow/python/kernel_tests/pooling_ops_test.py4
-rw-r--r--tensorflow/tools/quantization/quantize_graph_test.py12
5 files changed, 32 insertions, 31 deletions
diff --git a/tensorflow/contrib/fused_conv/python/ops/fused_conv2d_bias_activation_op_test.py b/tensorflow/contrib/fused_conv/python/ops/fused_conv2d_bias_activation_op_test.py
index 3d0ed89932..4d62ac65ff 100644
--- a/tensorflow/contrib/fused_conv/python/ops/fused_conv2d_bias_activation_op_test.py
+++ b/tensorflow/contrib/fused_conv/python/ops/fused_conv2d_bias_activation_op_test.py
@@ -289,8 +289,8 @@ class FusedConv2DBiasActivationTest(test.TestCase):
conv = tensors[i]
value = values[i]
ref_value = ref_values[i]
- print("expected = ", ref_value)
- print("actual = ", value)
+ tf_logging.info("expected = ", ref_value)
+ tf_logging.info("actual = ", value)
tol = 1e-5
if value.dtype == np.float16:
tol = 1e-3
@@ -831,7 +831,8 @@ class FusedConvInt8Tests(test.TestCase):
vertical_stride, padding_type)
output_width = CalculateConvolvedOutputDim(input_width, filter_width,
horizontal_stride, padding_type)
- print("output_height=", output_height, ", output_width=", output_width)
+ tf_logging.info("output_height=", output_height, ", output_width=",
+ output_width)
side_input, _, _ = gen_array_ops.quantize_v2(
random_ops.random_uniform(
@@ -866,8 +867,8 @@ class FusedConvInt8Tests(test.TestCase):
with self.test_session(use_gpu=True) as sess:
actual_y, expected_y = sess.run([actual, expected])
- print("actual_y = ", actual_y)
- print("expected_y = ", expected_y)
+ tf_logging.info("actual_y = ", actual_y)
+ tf_logging.info("expected_y = ", expected_y)
self.assertTrue(np.array_equal(actual_y, expected_y))
def testFusedConvInt8(self):
diff --git a/tensorflow/python/kernel_tests/betainc_op_test.py b/tensorflow/python/kernel_tests/betainc_op_test.py
index 08b03f8518..16fdedac41 100644
--- a/tensorflow/python/kernel_tests/betainc_op_test.py
+++ b/tensorflow/python/kernel_tests/betainc_op_test.py
@@ -172,7 +172,7 @@ class BetaincTest(test.TestCase):
tf_gout_t = math_ops.betainc(tf_ga_s, tf_gb_s, tf_gx_s)
err = gradient_checker.compute_gradient_error(
[tf_gx_s], [gx_s.shape], tf_gout_t, gx_s.shape)
- print("betainc gradient err = %g " % err)
+ tf_logging.info("betainc gradient err = %g " % err)
self.assertLess(err, err_tolerance)
# Test broadcast gradient
@@ -181,7 +181,7 @@ class BetaincTest(test.TestCase):
tf_gout_t = math_ops.betainc(tf_ga_s, tf_gb_s, tf_gx_s)
err = gradient_checker.compute_gradient_error(
[tf_gx_s], [()], tf_gout_t, ga_s.shape)
- print("betainc gradient err = %g " % err)
+ tf_logging.info("betainc gradient err = %g " % err)
self.assertLess(err, err_tolerance)
diff --git a/tensorflow/python/kernel_tests/conv_ops_test.py b/tensorflow/python/kernel_tests/conv_ops_test.py
index a291bef0ad..450428707d 100644
--- a/tensorflow/python/kernel_tests/conv_ops_test.py
+++ b/tensorflow/python/kernel_tests/conv_ops_test.py
@@ -312,8 +312,8 @@ class Conv2DTest(test.TestCase):
expected_values = self.evaluate(expected_results)
computed_values = self.evaluate(computed_results)
for e_value, c_value in zip(expected_values, computed_values):
- print("expected = ", e_value)
- print("actual = ", c_value)
+ tf_logging.info("expected = ", e_value)
+ tf_logging.info("actual = ", c_value)
self.assertAllClose(
e_value.flatten(), c_value.flatten(), atol=tolerance, rtol=1e-4)
@@ -337,8 +337,8 @@ class Conv2DTest(test.TestCase):
for i in range(len(tensors)):
conv = tensors[i]
value = values[i]
- print("expected = ", expected)
- print("actual = ", value)
+ tf_logging.info("expected = ", expected)
+ tf_logging.info("actual = ", value)
tol = 1e-5
if value.dtype == np.float16:
tol = 1e-3
@@ -547,8 +547,8 @@ class Conv2DTest(test.TestCase):
# "values" consists of two tensors for two backprops
value = self.evaluate(conv)
self.assertShapeEqual(value, conv)
- print("expected = ", expected)
- print("actual = ", value)
+ tf_logging.info("expected = ", expected)
+ tf_logging.info("actual = ", value)
self.assertArrayNear(expected, value.flatten(), err)
def _CompareBackpropInput(self, input_sizes, filter_sizes, output_sizes,
@@ -723,8 +723,8 @@ class Conv2DTest(test.TestCase):
data_format=data_format)
value = self.evaluate(conv)
self.assertShapeEqual(value, conv)
- print("expected = ", expected)
- print("actual = ", value)
+ tf_logging.info("expected = ", expected)
+ tf_logging.info("actual = ", value)
self.assertArrayNear(expected, value.flatten(), 1e-5)
def _CompareBackFilter(self, input_sizes, filter_sizes, output_sizes,
@@ -912,8 +912,8 @@ class Conv2DTest(test.TestCase):
value_2 = sess.run(conv_2)
self.assertShapeEqual(value, conv)
self.assertShapeEqual(value_2, conv_2)
- print("expected = ", value_2)
- print("actual = ", value)
+ tf_logging.info("expected = ", value_2)
+ tf_logging.info("actual = ", value)
self.assertArrayNear(value_2.flatten(), value.flatten(), err)
# Testing for backprops
@@ -965,8 +965,8 @@ class Conv2DTest(test.TestCase):
value_2 = sess.run(conv_2)
self.assertShapeEqual(value, conv)
self.assertShapeEqual(value_2, conv_2)
- print("expected = ", value_2)
- print("actual = ", value)
+ tf_logging.info("expected = ", value_2)
+ tf_logging.info("actual = ", value)
self.assertArrayNear(value_2.flatten(), value.flatten(), err)
def testConv2D2x2Depth3ValidBackpropFilterStride1x1Dilation2x1(self):
@@ -1178,7 +1178,7 @@ class Conv2DTest(test.TestCase):
# since fp16 numerical gradients are too imprecise.
err = np.fabs(jacob_t - reference_jacob_t).max()
- print("conv_2d gradient error = ", err)
+ tf_logging.info("conv_2d gradient error = ", err)
self.assertLess(err, 0.002)
def testInputGradientValidPaddingStrideOne(self):
@@ -1546,7 +1546,7 @@ class DepthwiseConv2DTest(test.TestCase):
conv = nn_impl.depthwise_conv2d(
t1, t2, strides=[1, stride, stride, 1], padding=padding)
value = sess.run(conv)
- print("value = ", value)
+ tf_logging.info("value = ", value)
self.assertArrayNear(expected, np.ravel(value), 1e-5)
self.assertShapeEqual(value, conv)
@@ -1668,7 +1668,7 @@ class SeparableConv2DTest(test.TestCase):
conv = array_ops.transpose(conv, [0, 2, 3, 1])
value = sess.run(conv)
- print("value = ", value)
+ tf_logging.info("value = ", value)
self.assertArrayNear(expected, np.ravel(value), 1e-5)
self.assertShapeEqual(value, conv)
@@ -1826,7 +1826,7 @@ class Conv2DBenchmark(test.Benchmark):
wall_time = time.time() - start
self.report_benchmark(
name="conv_stack_iter_%d" % iter_index, wall_time=wall_time)
- print("conv_stack_iter_%d: %.4f" % (iter_index, wall_time))
+ tf_logging.info("conv_stack_iter_%d: %.4f" % (iter_index, wall_time))
def GetInceptionFwdTest(input_size, filter_size, stride, padding,
diff --git a/tensorflow/python/kernel_tests/pooling_ops_test.py b/tensorflow/python/kernel_tests/pooling_ops_test.py
index a0c372db7d..e95c729715 100644
--- a/tensorflow/python/kernel_tests/pooling_ops_test.py
+++ b/tensorflow/python/kernel_tests/pooling_ops_test.py
@@ -947,7 +947,7 @@ class PoolingTest(test.TestCase):
output_sizes,
x_init_value=x_init_value,
delta=1e-2)
- print("%s gradient error = " % func_name, err)
+ tf_logging.info("%s gradient error = " % func_name, err)
self.assertLess(err, err_tolerance)
def _ConstructAndTestSecondGradient(self,
@@ -1024,7 +1024,7 @@ class PoolingTest(test.TestCase):
input_sizes,
x_init_value=x_init_value,
delta=1e-2)
- print("%s second-order gradient error = " % func_name, err)
+ tf_logging.info("%s second-order gradient error = " % func_name, err)
self.assertLess(err, err_tolerance)
def _testMaxPoolGradValidPadding1_1(self, data_format, use_gpu):
diff --git a/tensorflow/tools/quantization/quantize_graph_test.py b/tensorflow/tools/quantization/quantize_graph_test.py
index df71840b64..92bb5127da 100644
--- a/tensorflow/tools/quantization/quantize_graph_test.py
+++ b/tensorflow/tools/quantization/quantize_graph_test.py
@@ -119,8 +119,8 @@ def are_tensors_near(a, b, tolerance):
flat_a = a.flatten()
flat_b = b.flatten()
if len(flat_a) != len(flat_b):
- print("Tensors are different sizes: " + str(len(flat_a)) + " vs " + str(
- len(flat_b)))
+ tf_logging.info("Tensors are different sizes: " + str(len(flat_a)) + " vs "
+ + str(len(flat_b)))
return False
value_count = len(flat_a)
how_many_different = 0
@@ -140,10 +140,10 @@ def are_tensors_near(a, b, tolerance):
if how_many_different == 0:
return True
else:
- print("Tensors have {0} different values ({1}%), with mean difference"
- " {2} and mean absolute difference {3}".format(
- how_many_different, proportion_different * 100, mean_difference,
- mean_abs_difference))
+ tf_logging.info("Tensors have {0} different values ({1}%), with mean"
+ " difference {2} and mean absolute difference {3}".format(
+ how_many_different, proportion_different * 100,
+ mean_difference, mean_abs_difference))
return False