aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/python/kernel_tests
diff options
context:
space:
mode:
authorGravatar Anna R <annarev@google.com>2018-02-28 14:50:02 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-02-28 14:53:59 -0800
commit91d49c7d98114da4e4647c62d9f9b69119296b69 (patch)
treef471ce837b915650f1128a57d505ac2c46dc51da /tensorflow/python/kernel_tests
parentf28e4d6faf94c08464f430f9cd01ef32dde6ad46 (diff)
Removing underscore prefixes from hidden generated Python functions.
PiperOrigin-RevId: 187386941
Diffstat (limited to 'tensorflow/python/kernel_tests')
-rw-r--r--tensorflow/python/kernel_tests/array_ops_test.py2
-rw-r--r--tensorflow/python/kernel_tests/batchtospace_op_test.py2
-rw-r--r--tensorflow/python/kernel_tests/bcast_ops_test.py4
-rw-r--r--tensorflow/python/kernel_tests/checkpoint_ops_test.py34
-rw-r--r--tensorflow/python/kernel_tests/concat_op_test.py22
-rw-r--r--tensorflow/python/kernel_tests/control_flow_ops_py_test.py10
-rw-r--r--tensorflow/python/kernel_tests/cwise_ops_test.py10
-rw-r--r--tensorflow/python/kernel_tests/determinant_op_test.py2
-rw-r--r--tensorflow/python/kernel_tests/fractional_avg_pool_op_test.py10
-rw-r--r--tensorflow/python/kernel_tests/fractional_max_pool_op_test.py28
-rw-r--r--tensorflow/python/kernel_tests/matrix_exponential_op_test.py12
-rw-r--r--tensorflow/python/kernel_tests/matrix_logarithm_op_test.py14
-rw-r--r--tensorflow/python/kernel_tests/pooling_ops_test.py86
-rw-r--r--tensorflow/python/kernel_tests/save_restore_ops_test.py7
-rw-r--r--tensorflow/python/kernel_tests/scalar_test.py4
-rw-r--r--tensorflow/python/kernel_tests/spacetobatch_op_test.py4
-rw-r--r--tensorflow/python/kernel_tests/sparse_xent_op_test.py13
-rw-r--r--tensorflow/python/kernel_tests/stack_ops_test.py82
-rw-r--r--tensorflow/python/kernel_tests/tensor_array_ops_test.py2
-rw-r--r--tensorflow/python/kernel_tests/unique_op_test.py6
-rw-r--r--tensorflow/python/kernel_tests/variable_ops_test.py24
-rw-r--r--tensorflow/python/kernel_tests/variables_test.py2
-rw-r--r--tensorflow/python/kernel_tests/xent_op_test.py12
23 files changed, 193 insertions, 199 deletions
diff --git a/tensorflow/python/kernel_tests/array_ops_test.py b/tensorflow/python/kernel_tests/array_ops_test.py
index 365cf72108..d35f62b186 100644
--- a/tensorflow/python/kernel_tests/array_ops_test.py
+++ b/tensorflow/python/kernel_tests/array_ops_test.py
@@ -1223,7 +1223,7 @@ class SnapshotOpTest(test_util.TensorFlowTestCase):
for dtype in [dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64]:
with self.test_session(use_gpu=True):
x = constant_op.constant([0, 1, 2, 3], dtype=dtype)
- y = gen_array_ops._snapshot(x)
+ y = gen_array_ops.snapshot(x)
self.assertAllEqual(y.eval(), [0, 1, 2, 3])
diff --git a/tensorflow/python/kernel_tests/batchtospace_op_test.py b/tensorflow/python/kernel_tests/batchtospace_op_test.py
index 0c802476a0..6143cd3baa 100644
--- a/tensorflow/python/kernel_tests/batchtospace_op_test.py
+++ b/tensorflow/python/kernel_tests/batchtospace_op_test.py
@@ -44,7 +44,7 @@ class CppOpImpl(object):
@staticmethod
def batch_to_space(*args, **kwargs):
- return gen_array_ops._batch_to_space(*args, **kwargs)
+ return gen_array_ops.batch_to_space(*args, **kwargs)
class BatchToSpaceDepthToSpace(test.TestCase, PythonOpImpl):
diff --git a/tensorflow/python/kernel_tests/bcast_ops_test.py b/tensorflow/python/kernel_tests/bcast_ops_test.py
index 9e51234605..cb46fcb007 100644
--- a/tensorflow/python/kernel_tests/bcast_ops_test.py
+++ b/tensorflow/python/kernel_tests/bcast_ops_test.py
@@ -20,8 +20,8 @@ from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
-from tensorflow.python.ops.gen_array_ops import _broadcast_args
from tensorflow.python.ops.gen_array_ops import _broadcast_gradient_args
+from tensorflow.python.ops.gen_array_ops import broadcast_args
from tensorflow.python.platform import test
@@ -29,7 +29,7 @@ class BcastOpsTest(test.TestCase):
def _GetBroadcastShape(self, xs, ys):
with self.test_session() as sess:
- return sess.run(_broadcast_args(xs, ys))
+ return sess.run(broadcast_args(xs, ys))
def _GetGradientArgs(self, xs, ys):
with self.test_session() as sess:
diff --git a/tensorflow/python/kernel_tests/checkpoint_ops_test.py b/tensorflow/python/kernel_tests/checkpoint_ops_test.py
index a786d0a47e..7f147ba53a 100644
--- a/tensorflow/python/kernel_tests/checkpoint_ops_test.py
+++ b/tensorflow/python/kernel_tests/checkpoint_ops_test.py
@@ -50,7 +50,7 @@ class GenerateVocabRemappingTest(test.TestCase):
def test_generate_remapping_with_no_vocab_changes(self):
"""Tests where vocab does not change at all."""
- remapping, num_present = gen_checkpoint_ops._generate_vocab_remapping(
+ remapping, num_present = gen_checkpoint_ops.generate_vocab_remapping(
new_vocab_file=self.old_vocab_file,
old_vocab_file=self.old_vocab_file,
num_new_vocab=3,
@@ -63,7 +63,7 @@ class GenerateVocabRemappingTest(test.TestCase):
def test_generate_remapping_with_shifted_vocab(self):
"""Tests where vocab is the same, but shifted / ordered differently."""
- remapping, num_present = gen_checkpoint_ops._generate_vocab_remapping(
+ remapping, num_present = gen_checkpoint_ops.generate_vocab_remapping(
new_vocab_file=self.new_vocab_file,
old_vocab_file=self.old_vocab_file,
num_new_vocab=3,
@@ -76,7 +76,7 @@ class GenerateVocabRemappingTest(test.TestCase):
def test_generate_remapping_with_offset(self):
"""Tests offset and num_new_vocab logic."""
- remapping, num_present = gen_checkpoint_ops._generate_vocab_remapping(
+ remapping, num_present = gen_checkpoint_ops.generate_vocab_remapping(
new_vocab_file=self.new_vocab_file,
old_vocab_file=self.old_vocab_file,
num_new_vocab=1,
@@ -89,7 +89,7 @@ class GenerateVocabRemappingTest(test.TestCase):
def test_generate_remapping_with_old_vocab_size(self):
"""Tests where old_vocab_size is specified."""
- remapping, num_present = gen_checkpoint_ops._generate_vocab_remapping(
+ remapping, num_present = gen_checkpoint_ops.generate_vocab_remapping(
new_vocab_file=self.new_vocab_file,
old_vocab_file=self.old_vocab_file,
num_new_vocab=3,
@@ -132,7 +132,7 @@ class LoadAndRemapMatrixTest(test.TestCase):
# No column remapping, new weight matrix has second row, then first row.
row_remapping = [1, 0]
- remapped_matrix = gen_checkpoint_ops._load_and_remap_matrix(
+ remapped_matrix = gen_checkpoint_ops.load_and_remap_matrix(
ckpt_path=[self.bundle_file],
old_tensor_name=self.old_tensor_name,
row_remapping=row_remapping,
@@ -147,7 +147,7 @@ class LoadAndRemapMatrixTest(test.TestCase):
# No row remapping, new weight matrix has third col, then first col.
row_remapping = list(range(self.old_num_rows))
col_remapping = [2, 0]
- remapped_matrix = gen_checkpoint_ops._load_and_remap_matrix(
+ remapped_matrix = gen_checkpoint_ops.load_and_remap_matrix(
ckpt_path=[self.bundle_file],
old_tensor_name=self.old_tensor_name,
row_remapping=row_remapping,
@@ -162,7 +162,7 @@ class LoadAndRemapMatrixTest(test.TestCase):
# Both row and column remappings.
row_remapping = [1, 0, 4]
col_remapping = [1, 15]
- remapped_matrix = gen_checkpoint_ops._load_and_remap_matrix(
+ remapped_matrix = gen_checkpoint_ops.load_and_remap_matrix(
ckpt_path=[self.bundle_file],
old_tensor_name=self.old_tensor_name,
row_remapping=row_remapping,
@@ -177,7 +177,7 @@ class LoadAndRemapMatrixTest(test.TestCase):
def test_load_and_remap_with_init(self):
"""Tests the op's load and remap where there are missing entries."""
init_val = 42
- remapped_matrix = gen_checkpoint_ops._load_and_remap_matrix(
+ remapped_matrix = gen_checkpoint_ops.load_and_remap_matrix(
ckpt_path=[self.bundle_file],
old_tensor_name=self.old_tensor_name,
row_remapping=[2, -1, 0],
@@ -196,7 +196,7 @@ class LoadAndRemapMatrixTest(test.TestCase):
"""Tests when all the rows are missing and need to be initialized."""
num_rows = 7
initializing_values = [42] * num_rows * self.old_num_cols
- remapped_matrix = gen_checkpoint_ops._load_and_remap_matrix(
+ remapped_matrix = gen_checkpoint_ops.load_and_remap_matrix(
ckpt_path=[self.bundle_file],
old_tensor_name=self.old_tensor_name,
row_remapping=[-1] * num_rows,
@@ -214,7 +214,7 @@ class LoadAndRemapMatrixTest(test.TestCase):
num_rows = 7
num_cols = 4
initializing_values = [42] * num_rows * num_cols
- remapped_matrix = gen_checkpoint_ops._load_and_remap_matrix(
+ remapped_matrix = gen_checkpoint_ops.load_and_remap_matrix(
ckpt_path=[self.bundle_file],
old_tensor_name=self.old_tensor_name,
row_remapping=[-1] * num_rows,
@@ -235,7 +235,7 @@ class LoadAndRemapMatrixTest(test.TestCase):
invalid_remapping = [1, 0, 0, 0, 1, 2]
# Invalid row remapping.
- remapped_matrix = gen_checkpoint_ops._load_and_remap_matrix(
+ remapped_matrix = gen_checkpoint_ops.load_and_remap_matrix(
ckpt_path=[self.bundle_file],
old_tensor_name=self.old_tensor_name,
row_remapping=invalid_remapping,
@@ -247,7 +247,7 @@ class LoadAndRemapMatrixTest(test.TestCase):
remapped_matrix.eval()
# Invalid column remapping.
- remapped_matrix = gen_checkpoint_ops._load_and_remap_matrix(
+ remapped_matrix = gen_checkpoint_ops.load_and_remap_matrix(
ckpt_path=[self.bundle_file],
old_tensor_name=self.old_tensor_name,
row_remapping=list(range(self.old_num_rows)),
@@ -260,7 +260,7 @@ class LoadAndRemapMatrixTest(test.TestCase):
def test_load_and_remap_incorrect_initializing_values(self):
"""Tests that errors are raised with incorrect number of init values."""
- remapped_matrix = gen_checkpoint_ops._load_and_remap_matrix(
+ remapped_matrix = gen_checkpoint_ops.load_and_remap_matrix(
ckpt_path=[self.bundle_file],
old_tensor_name=self.old_tensor_name,
row_remapping=[2, -1, 0],
@@ -275,7 +275,7 @@ class LoadAndRemapMatrixTest(test.TestCase):
with self.test_session(), self.assertRaises(errors.InvalidArgumentError):
remapped_matrix.eval()
- remapped_matrix = gen_checkpoint_ops._load_and_remap_matrix(
+ remapped_matrix = gen_checkpoint_ops.load_and_remap_matrix(
ckpt_path=[self.bundle_file],
old_tensor_name=self.old_tensor_name,
row_remapping=[2, -1, 0],
@@ -314,7 +314,7 @@ class LoadAndRemapMatrixWithMaxRowsTest(test.TestCase):
num_rows, num_cols = np_value.shape
# Tests loading the entire tensor (except reversed).
- remapped_matrix = gen_checkpoint_ops._load_and_remap_matrix(
+ remapped_matrix = gen_checkpoint_ops.load_and_remap_matrix(
ckpt_path=ckpt_path,
old_tensor_name=old_tensor_name,
# Simply reverses the rows of the matrix.
@@ -332,7 +332,7 @@ class LoadAndRemapMatrixWithMaxRowsTest(test.TestCase):
self.assertGreater(num_rows, 2)
prefix_rows = 2
suffix_rows = 3
- remapped_matrix = gen_checkpoint_ops._load_and_remap_matrix(
+ remapped_matrix = gen_checkpoint_ops.load_and_remap_matrix(
ckpt_path=ckpt_path,
old_tensor_name=old_tensor_name,
# Reverses the rows of the matrix, then prepends and appends
@@ -353,7 +353,7 @@ class LoadAndRemapMatrixWithMaxRowsTest(test.TestCase):
# Tests when everything is taken from initializing_values.
new_rows = 7
initializing_values = [42] * new_rows * num_cols
- remapped_matrix = gen_checkpoint_ops._load_and_remap_matrix(
+ remapped_matrix = gen_checkpoint_ops.load_and_remap_matrix(
ckpt_path=ckpt_path,
old_tensor_name=old_tensor_name,
# Nothing is loaded from the old tensor.
diff --git a/tensorflow/python/kernel_tests/concat_op_test.py b/tensorflow/python/kernel_tests/concat_op_test.py
index 127bc6bb20..81c6a4aa6e 100644
--- a/tensorflow/python/kernel_tests/concat_op_test.py
+++ b/tensorflow/python/kernel_tests/concat_op_test.py
@@ -526,7 +526,7 @@ class ConcatOpTest(test.TestCase):
with self.test_session(use_gpu=True):
t1 = []
t2 = []
- output = gen_array_ops._concat_v2([t1, t2], 0).eval()
+ output = gen_array_ops.concat_v2([t1, t2], 0).eval()
self.assertFalse(output) # Checks that output is empty
def testConcatInvalidAxis(self):
@@ -534,20 +534,20 @@ class ConcatOpTest(test.TestCase):
with self.test_session(use_gpu=True):
t1 = [1]
t2 = [2]
- gen_array_ops._concat_v2([t1, t2], 1).eval()
+ gen_array_ops.concat_v2([t1, t2], 1).eval()
def testConcatNegativeAxis(self):
with self.test_session(use_gpu=True):
t1 = [[1, 2, 3], [4, 5, 6]]
t2 = [[7, 8, 9], [10, 11, 12]]
- c = gen_array_ops._concat_v2([t1, t2], -2)
+ c = gen_array_ops.concat_v2([t1, t2], -2)
self.assertEqual([4, 3], c.get_shape().as_list())
output = c.eval()
self.assertAllEqual([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]],
output)
- c = gen_array_ops._concat_v2([t1, t2], -1)
+ c = gen_array_ops.concat_v2([t1, t2], -1)
self.assertEqual([2, 6], c.get_shape().as_list())
output = c.eval()
self.assertAllEqual([[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]], output)
@@ -615,7 +615,7 @@ class ConcatOffsetTest(test.TestCase):
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 5], dtypes.int32)
s2 = constant_op.constant([2, 20, 5], dtypes.int32)
- off = gen_array_ops._concat_offset(cdim, [s0, s1, s2])
+ off = gen_array_ops.concat_offset(cdim, [s0, s1, s2])
ans = sess.run(off)
self.assertAllEqual(ans, [[0, 0, 0], [0, 3, 0], [0, 10, 0]])
@@ -624,7 +624,7 @@ class ConcatOffsetTest(test.TestCase):
cdim = constant_op.constant(1, dtypes.int32)
s0 = constant_op.constant([[2, 3, 5]], dtypes.int32)
s1 = constant_op.constant([[2, 7, 5]], dtypes.int32)
- off = gen_array_ops._concat_offset(cdim, [s0, s1])
+ off = gen_array_ops.concat_offset(cdim, [s0, s1])
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"should be a vector"):
sess.run(off)
@@ -634,7 +634,7 @@ class ConcatOffsetTest(test.TestCase):
cdim = constant_op.constant(4, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 5], dtypes.int32)
- off = gen_array_ops._concat_offset(cdim, [s0, s1])
+ off = gen_array_ops.concat_offset(cdim, [s0, s1])
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"Concat dim is out of range: 4 vs. 3"):
sess.run(off)
@@ -644,7 +644,7 @@ class ConcatOffsetTest(test.TestCase):
cdim = constant_op.constant(1, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 5, 10], dtypes.int32)
- off = gen_array_ops._concat_offset(cdim, [s0, s1])
+ off = gen_array_ops.concat_offset(cdim, [s0, s1])
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"should contain 3 elem"):
sess.run(off)
@@ -654,7 +654,7 @@ class ConcatOffsetTest(test.TestCase):
cdim = constant_op.constant(1, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 10], dtypes.int32)
- off = gen_array_ops._concat_offset(cdim, [s0, s1])
+ off = gen_array_ops.concat_offset(cdim, [s0, s1])
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
r"All dimensions except 1 must match. Input 1 has shape \[2 7 10\] "
@@ -667,7 +667,7 @@ class ConcatOffsetTest(test.TestCase):
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 5], dtypes.int32)
s2 = constant_op.constant([2, 20, 5], dtypes.int32)
- off = gen_array_ops._concat_offset(cdim, [s0, s1, s2])
+ off = gen_array_ops.concat_offset(cdim, [s0, s1, s2])
ans = sess.run(off)
self.assertAllEqual(ans, [[0, 0, 0], [0, 3, 0], [0, 10, 0]])
@@ -675,7 +675,7 @@ class ConcatOffsetTest(test.TestCase):
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([1, 3, 5], dtypes.int32)
s2 = constant_op.constant([3, 3, 5], dtypes.int32)
- off = gen_array_ops._concat_offset(cdim, [s0, s1, s2])
+ off = gen_array_ops.concat_offset(cdim, [s0, s1, s2])
ans = sess.run(off)
self.assertAllEqual(ans, [[0, 0, 0], [2, 0, 0], [3, 0, 0]])
diff --git a/tensorflow/python/kernel_tests/control_flow_ops_py_test.py b/tensorflow/python/kernel_tests/control_flow_ops_py_test.py
index 58f38650eb..b429fa5c42 100644
--- a/tensorflow/python/kernel_tests/control_flow_ops_py_test.py
+++ b/tensorflow/python/kernel_tests/control_flow_ops_py_test.py
@@ -552,7 +552,7 @@ class ControlFlowTest(test.TestCase):
def testCondRef(self):
with self.test_session():
- x = gen_state_ops._variable(
+ x = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="x",
@@ -580,7 +580,7 @@ class ControlFlowTest(test.TestCase):
def testUninitializedRefIdentity(self):
with self.test_session() as sess:
- v = gen_state_ops._variable(
+ v = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="v",
@@ -1620,7 +1620,7 @@ class ControlFlowTest(test.TestCase):
def testWhileStack_1(self):
with self.test_session():
- s = gen_data_flow_ops._stack_v2(-1, dtypes.int32, stack_name="foo")
+ s = gen_data_flow_ops.stack_v2(-1, dtypes.int32, stack_name="foo")
i = constant_op.constant(0)
def c(i):
@@ -1629,7 +1629,7 @@ class ControlFlowTest(test.TestCase):
def b(i):
ni = math_ops.add(i, 1)
ni = control_flow_ops.with_dependencies(
- [gen_data_flow_ops._stack_push_v2(s, i)], ni)
+ [gen_data_flow_ops.stack_push_v2(s, i)], ni)
return ni
r = control_flow_ops.while_loop(c, b, [i], parallel_iterations=1)
@@ -1641,7 +1641,7 @@ class ControlFlowTest(test.TestCase):
def b1(i, x):
ni = math_ops.subtract(i, 1)
- nx = x + gen_data_flow_ops._stack_pop_v2(s, dtypes.int32)
+ nx = x + gen_data_flow_ops.stack_pop_v2(s, dtypes.int32)
return [ni, nx]
_, rx = control_flow_ops.while_loop(
diff --git a/tensorflow/python/kernel_tests/cwise_ops_test.py b/tensorflow/python/kernel_tests/cwise_ops_test.py
index 0d9b46c30d..8db0bb6f0d 100644
--- a/tensorflow/python/kernel_tests/cwise_ops_test.py
+++ b/tensorflow/python/kernel_tests/cwise_ops_test.py
@@ -495,11 +495,11 @@ class UnaryOpTest(test.TestCase):
dtype_tols = [(np.float32, 5e-4), (np.float64, 1e-6), (np.complex64, 5e-4),
(np.complex128, 1e-6)]
op_range = [
- (gen_math_ops._reciprocal_grad, [-2, 2]),
- (gen_math_ops._rsqrt_grad, [0.1, 3]),
- (gen_math_ops._sigmoid_grad, [-2, 2]),
- (gen_math_ops._sqrt_grad, [0.1, 3]),
- (gen_math_ops._tanh_grad, [-2, 2]),
+ (gen_math_ops.reciprocal_grad, [-2, 2]),
+ (gen_math_ops.rsqrt_grad, [0.1, 3]),
+ (gen_math_ops.sigmoid_grad, [-2, 2]),
+ (gen_math_ops.sqrt_grad, [0.1, 3]),
+ (gen_math_ops.tanh_grad, [-2, 2]),
]
def rand(dtype):
diff --git a/tensorflow/python/kernel_tests/determinant_op_test.py b/tensorflow/python/kernel_tests/determinant_op_test.py
index 222038b22e..a52b2c0dc3 100644
--- a/tensorflow/python/kernel_tests/determinant_op_test.py
+++ b/tensorflow/python/kernel_tests/determinant_op_test.py
@@ -65,7 +65,7 @@ class DeterminantOpTest(test.TestCase):
self._compareDeterminantBase(matrix_x,
linalg_ops.matrix_determinant(matrix_x))
self._compareLogDeterminantBase(
- matrix_x, gen_linalg_ops._log_matrix_determinant(matrix_x))
+ matrix_x, gen_linalg_ops.log_matrix_determinant(matrix_x))
def testBasic(self):
# 2x2 matrices
diff --git a/tensorflow/python/kernel_tests/fractional_avg_pool_op_test.py b/tensorflow/python/kernel_tests/fractional_avg_pool_op_test.py
index feec9934e4..faac7d8365 100644
--- a/tensorflow/python/kernel_tests/fractional_avg_pool_op_test.py
+++ b/tensorflow/python/kernel_tests/fractional_avg_pool_op_test.py
@@ -347,7 +347,7 @@ class FractionalAvgPoolGradTest(test.TestCase):
Two types of tests for FractionalAvgPoolGrad.
1) Test fractional_avg_pool_grad() directly.
- This type of test relies on gen_nn_ops._avg_pool_grad() returns the
+ This type of test relies on gen_nn_ops.avg_pool_grad() returns the
correct result. For example:
* input_tensor_shape = (1, 10, 10, 1)
* window_size = (1, 2, 2, 1)
@@ -404,13 +404,13 @@ class FractionalAvgPoolGradTest(test.TestCase):
num_elements *= dim_size
output_backprop = (self._PRNG.rand(num_elements) *
1000).reshape(output_data.shape)
- input_backprop_tensor = gen_nn_ops._avg_pool_grad(
+ input_backprop_tensor = gen_nn_ops.avg_pool_grad(
input_tensor.get_shape(), output_backprop, window_size,
stride_size, padding)
input_backprop = input_backprop_tensor.eval()
row_seq = list(range(0, num_rows + 1, row_window_size))
col_seq = list(range(0, num_cols + 1, col_window_size))
- fap_input_backprop_tensor = gen_nn_ops._fractional_avg_pool_grad(
+ fap_input_backprop_tensor = gen_nn_ops.fractional_avg_pool_grad(
input_tensor.get_shape(),
output_backprop,
row_seq,
@@ -443,7 +443,7 @@ class FractionalAvgPoolGradTest(test.TestCase):
num_elements *= dim_size
output_backprop = (self._PRNG.rand(num_elements) *
1000).reshape(output_data.shape)
- input_backprop_tensor = gen_nn_ops._avg_pool_grad(
+ input_backprop_tensor = gen_nn_ops.avg_pool_grad(
input_tensor.get_shape(), output_backprop, window_size,
stride_size, padding)
input_backprop = input_backprop_tensor.eval()
@@ -451,7 +451,7 @@ class FractionalAvgPoolGradTest(test.TestCase):
col_seq = list(range(0, num_cols, col_window_size - 1))
row_seq[-1] += 1
col_seq[-1] += 1
- fap_input_backprop_tensor = gen_nn_ops._fractional_avg_pool_grad(
+ fap_input_backprop_tensor = gen_nn_ops.fractional_avg_pool_grad(
input_tensor.get_shape(),
output_backprop,
row_seq,
diff --git a/tensorflow/python/kernel_tests/fractional_max_pool_op_test.py b/tensorflow/python/kernel_tests/fractional_max_pool_op_test.py
index 5983ae7759..6477c9ebc4 100644
--- a/tensorflow/python/kernel_tests/fractional_max_pool_op_test.py
+++ b/tensorflow/python/kernel_tests/fractional_max_pool_op_test.py
@@ -318,7 +318,7 @@ class FractionalMaxPoolGradTest(test.TestCase):
Two types of tests for FractionalMaxPoolGrad.
1) Test fractional_max_pool_grad() directly.
- This type of test relies on gen_nn_ops._max_pool_grad() returns the correct
+ This type of test relies on gen_nn_ops.max_pool_grad() returns the correct
result. For example:
* input_tensor_shape = (1, 10, 10, 1)
* window_size = (1, 2, 2, 1)
@@ -384,16 +384,13 @@ class FractionalMaxPoolGradTest(test.TestCase):
stride_size, padding)
output_data = output_tensor.eval()
output_backprop = self._PRNG.randint(100, size=output_data.shape)
- input_backprop_tensor = gen_nn_ops._max_pool_grad(input_tensor,
- output_tensor,
- output_backprop,
- window_size,
- stride_size,
- padding)
+ input_backprop_tensor = gen_nn_ops.max_pool_grad(
+ input_tensor, output_tensor, output_backprop, window_size,
+ stride_size, padding)
input_backprop = input_backprop_tensor.eval()
row_seq = list(range(0, num_rows + 1, row_window_size))
col_seq = list(range(0, num_cols + 1, col_window_size))
- fmp_input_backprop_tensor = gen_nn_ops._fractional_max_pool_grad(
+ fmp_input_backprop_tensor = gen_nn_ops.fractional_max_pool_grad(
input_tensor,
output_tensor,
output_backprop,
@@ -422,18 +419,15 @@ class FractionalMaxPoolGradTest(test.TestCase):
stride_size, padding)
output_data = output_tensor.eval()
output_backprop = self._PRNG.randint(100, size=output_data.shape)
- input_backprop_tensor = gen_nn_ops._max_pool_grad(input_tensor,
- output_tensor,
- output_backprop,
- window_size,
- stride_size,
- padding)
+ input_backprop_tensor = gen_nn_ops.max_pool_grad(
+ input_tensor, output_tensor, output_backprop, window_size,
+ stride_size, padding)
input_backprop = input_backprop_tensor.eval()
row_seq = list(range(0, num_rows, row_window_size - 1))
col_seq = list(range(0, num_cols, col_window_size - 1))
row_seq[-1] += 1
col_seq[-1] += 1
- fmp_input_backprop_tensor = gen_nn_ops._fractional_max_pool_grad(
+ fmp_input_backprop_tensor = gen_nn_ops.fractional_max_pool_grad(
input_tensor,
output_tensor,
output_backprop,
@@ -591,7 +585,7 @@ class FractionalMaxPoolGradTest(test.TestCase):
output_tensor = constant_op.constant(
output_data_not_overlapping, shape=output_size)
grad = constant_op.constant(output_backprop, shape=output_size)
- r = gen_nn_ops._fractional_max_pool_grad(
+ r = gen_nn_ops.fractional_max_pool_grad(
input_tensor,
output_tensor,
grad,
@@ -606,7 +600,7 @@ class FractionalMaxPoolGradTest(test.TestCase):
# Test when overlapping is True
output_tensor = constant_op.constant(
output_data_overlapping, shape=output_size)
- r = gen_nn_ops._fractional_max_pool_grad(
+ r = gen_nn_ops.fractional_max_pool_grad(
input_tensor, output_tensor, grad, row_seq, col_seq, overlapping=True)
input_backprop_overlapping = r.eval()
self.assertShapeEqual(
diff --git a/tensorflow/python/kernel_tests/matrix_exponential_op_test.py b/tensorflow/python/kernel_tests/matrix_exponential_op_test.py
index 6203a412d7..a0c66c77d8 100644
--- a/tensorflow/python/kernel_tests/matrix_exponential_op_test.py
+++ b/tensorflow/python/kernel_tests/matrix_exponential_op_test.py
@@ -48,7 +48,7 @@ class ExponentialOpTest(test.TestCase):
def _verifyExponential(self, x, np_type):
inp = x.astype(np_type)
with self.test_session(use_gpu=True):
- tf_ans = gen_linalg_ops._matrix_exponential(inp)
+ tf_ans = gen_linalg_ops.matrix_exponential(inp)
if x.size == 0:
np_ans = np.empty(x.shape, dtype=np_type)
else:
@@ -116,13 +116,13 @@ class ExponentialOpTest(test.TestCase):
# When the exponential of a non-square matrix is attempted we should return
# an error
with self.assertRaises(ValueError):
- gen_linalg_ops._matrix_exponential(np.array([[1., 2., 3.], [3., 4., 5.]]))
+ gen_linalg_ops.matrix_exponential(np.array([[1., 2., 3.], [3., 4., 5.]]))
def testWrongDimensions(self):
# The input to the exponential should be at least a 2-dimensional tensor.
tensor3 = constant_op.constant([1., 2.])
with self.assertRaises(ValueError):
- gen_linalg_ops._matrix_exponential(tensor3)
+ gen_linalg_ops.matrix_exponential(tensor3)
def testEmpty(self):
self._verifyExponentialReal(np.empty([0, 2, 2]))
@@ -143,8 +143,8 @@ class ExponentialOpTest(test.TestCase):
with self.test_session(use_gpu=True) as sess:
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
- expm1 = gen_linalg_ops._matrix_exponential(matrix1)
- expm2 = gen_linalg_ops._matrix_exponential(matrix2)
+ expm1 = gen_linalg_ops.matrix_exponential(matrix1)
+ expm2 = gen_linalg_ops.matrix_exponential(matrix2)
expm = sess.run([expm1, expm2])
self.assertAllEqual(expm[0], expm[1])
@@ -180,7 +180,7 @@ class MatrixExponentialBenchmark(test.Benchmark):
session.Session() as sess, \
ops.device("/cpu:0"):
matrix = self._GenerateMatrix(shape)
- expm = gen_linalg_ops._matrix_exponential(matrix)
+ expm = gen_linalg_ops.matrix_exponential(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
diff --git a/tensorflow/python/kernel_tests/matrix_logarithm_op_test.py b/tensorflow/python/kernel_tests/matrix_logarithm_op_test.py
index 18ed59828c..24edc4f59f 100644
--- a/tensorflow/python/kernel_tests/matrix_logarithm_op_test.py
+++ b/tensorflow/python/kernel_tests/matrix_logarithm_op_test.py
@@ -39,8 +39,8 @@ class LogarithmOpTest(test.TestCase):
inp = x.astype(np_type)
with self.test_session(use_gpu=True):
# Verify that expm(logm(A)) == A.
- tf_ans = gen_linalg_ops._matrix_exponential(
- gen_linalg_ops._matrix_logarithm(inp))
+ tf_ans = gen_linalg_ops.matrix_exponential(
+ gen_linalg_ops.matrix_logarithm(inp))
out = tf_ans.eval()
self.assertAllClose(inp, out, rtol=1e-4, atol=1e-3)
@@ -85,14 +85,14 @@ class LogarithmOpTest(test.TestCase):
# When the logarithm of a non-square matrix is attempted we should return
# an error
with self.assertRaises(ValueError):
- gen_linalg_ops._matrix_logarithm(
+ gen_linalg_ops.matrix_logarithm(
np.array([[1., 2., 3.], [3., 4., 5.]], dtype=np.complex64))
def testWrongDimensions(self):
# The input to the logarithm should be at least a 2-dimensional tensor.
tensor3 = constant_op.constant([1., 2.], dtype=dtypes.complex64)
with self.assertRaises(ValueError):
- gen_linalg_ops._matrix_logarithm(tensor3)
+ gen_linalg_ops.matrix_logarithm(tensor3)
def testEmpty(self):
self._verifyLogarithmComplex(np.empty([0, 2, 2], dtype=np.complex64))
@@ -115,8 +115,8 @@ class LogarithmOpTest(test.TestCase):
random_ops.random_normal([5, 5], seed=42), dtypes.complex64)
matrix2 = math_ops.cast(
random_ops.random_normal([5, 5], seed=42), dtypes.complex64)
- logm1 = gen_linalg_ops._matrix_logarithm(matrix1)
- logm2 = gen_linalg_ops._matrix_logarithm(matrix2)
+ logm1 = gen_linalg_ops.matrix_logarithm(matrix1)
+ logm2 = gen_linalg_ops.matrix_logarithm(matrix2)
logm = sess.run([logm1, logm2])
self.assertAllEqual(logm[0], logm[1])
@@ -152,7 +152,7 @@ class MatrixLogarithmBenchmark(test.Benchmark):
session.Session() as sess, \
ops.device("/cpu:0"):
matrix = self._GenerateMatrix(shape)
- logm = gen_linalg_ops._matrix_logarithm(matrix)
+ logm = gen_linalg_ops.matrix_logarithm(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
diff --git a/tensorflow/python/kernel_tests/pooling_ops_test.py b/tensorflow/python/kernel_tests/pooling_ops_test.py
index 4466beeec9..a0ac355b60 100644
--- a/tensorflow/python/kernel_tests/pooling_ops_test.py
+++ b/tensorflow/python/kernel_tests/pooling_ops_test.py
@@ -405,7 +405,7 @@ class PoolingTest(test.TestCase):
for v2 in [True, False]:
self._VerifyValues(
- gen_nn_ops._max_pool_v2,
+ gen_nn_ops.max_pool_v2,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
@@ -427,7 +427,7 @@ class PoolingTest(test.TestCase):
for v2 in [True, False]:
self._VerifyValues(
- gen_nn_ops._max_pool_v2,
+ gen_nn_ops.max_pool_v2,
input_sizes=[1, 2, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
@@ -456,7 +456,7 @@ class PoolingTest(test.TestCase):
for v2 in [True, False]:
self._VerifyValues(
- gen_nn_ops._max_pool_v2,
+ gen_nn_ops.max_pool_v2,
input_sizes=[1, 2, 2, 1],
ksize=[1, 1, 2, 1],
strides=[1, 1, 1, 1],
@@ -485,7 +485,7 @@ class PoolingTest(test.TestCase):
for v2 in [True, False]:
self._VerifyValues(
- gen_nn_ops._max_pool_v2,
+ gen_nn_ops.max_pool_v2,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 1, 2, 1],
@@ -494,7 +494,7 @@ class PoolingTest(test.TestCase):
use_gpu=use_gpu,
v2=v2)
self._VerifyValues(
- gen_nn_ops._max_pool_v2,
+ gen_nn_ops.max_pool_v2,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 2, 1, 1],
@@ -519,7 +519,7 @@ class PoolingTest(test.TestCase):
for v2 in [True, False]:
self._VerifyValues(
- gen_nn_ops._max_pool_v2,
+ gen_nn_ops.max_pool_v2,
input_sizes=[1, 4, 4, 4],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
@@ -554,7 +554,7 @@ class PoolingTest(test.TestCase):
for v2 in [True, False]:
self._VerifyValues(
- gen_nn_ops._max_pool_v2,
+ gen_nn_ops.max_pool_v2,
input_sizes=[1, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
@@ -565,7 +565,7 @@ class PoolingTest(test.TestCase):
def _testMaxPoolEmptyInput(self, use_gpu):
self._VerifyValues(
- gen_nn_ops._max_pool_v2,
+ gen_nn_ops.max_pool_v2,
input_sizes=[0, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
@@ -600,7 +600,7 @@ class PoolingTest(test.TestCase):
for v2 in [True, False]:
self._VerifyValues(
- gen_nn_ops._max_pool_v2,
+ gen_nn_ops.max_pool_v2,
input_sizes=[1, 1, 1, 10],
ksize=[1, 1, 1, 2],
strides=[1, 1, 1, 2],
@@ -626,7 +626,7 @@ class PoolingTest(test.TestCase):
for v2 in [True, False]:
self._VerifyValues(
- gen_nn_ops._max_pool_v2,
+ gen_nn_ops.max_pool_v2,
input_sizes=[1, 2, 2, 6],
ksize=[1, 1, 1, 3],
strides=[1, 1, 1, 3],
@@ -648,7 +648,7 @@ class PoolingTest(test.TestCase):
for v2 in [True, False]:
self._VerifyValues(
- gen_nn_ops._max_pool_v2,
+ gen_nn_ops.max_pool_v2,
input_sizes=[1, 7, 7, 1],
ksize=[1, 2, 2, 1],
strides=[1, 3, 3, 1],
@@ -689,7 +689,7 @@ class PoolingTest(test.TestCase):
for v2 in [True, False]:
self._VerifyValues(
- gen_nn_ops._max_pool_v2,
+ gen_nn_ops.max_pool_v2,
input_sizes=[1, 3, 3, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
@@ -699,7 +699,7 @@ class PoolingTest(test.TestCase):
v2=v2)
self._VerifyValues(
- gen_nn_ops._max_pool_v2,
+ gen_nn_ops.max_pool_v2,
input_sizes=[1, 4, 4, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
@@ -764,8 +764,8 @@ class PoolingTest(test.TestCase):
_, argmax_op = nn_ops.max_pool_with_argmax(t, ksize, strides, padding)
argmax = argmax_op.eval()
grad_in = constant_op.constant(tensor_output, shape=output_shape)
- out_op = gen_nn_ops._max_pool_grad_with_argmax(t, grad_in, argmax,
- ksize, strides, padding)
+ out_op = gen_nn_ops.max_pool_grad_with_argmax(t, grad_in, argmax, ksize,
+ strides, padding)
gpu_val = out_op.eval()
self.assertShapeEqual(gpu_val, out_op)
with self.test_session(use_gpu=False):
@@ -773,8 +773,8 @@ class PoolingTest(test.TestCase):
out_op = nn_ops.max_pool(t, ksize, strides, padding)
orig_out = out_op.eval()
grad_in = constant_op.constant(tensor_output, shape=output_shape)
- out_op = gen_nn_ops._max_pool_grad(t, orig_out, grad_in, ksize, strides,
- padding)
+ out_op = gen_nn_ops.max_pool_grad(t, orig_out, grad_in, ksize, strides,
+ padding)
cpu_val = out_op.eval()
self.assertShapeEqual(cpu_val, out_op)
# The CPU version accumulates its gradient on fp16, so it's less
@@ -793,7 +793,7 @@ class PoolingTest(test.TestCase):
_, argmax_op = nn_ops.max_pool_with_argmax(t, ksize, strides, padding)
argmax = argmax_op.eval()
grad_in = constant_op.constant(tensor_input, shape=input_shape)
- out_op = gen_nn_ops._max_pool_grad_grad_with_argmax(
+ out_op = gen_nn_ops.max_pool_grad_grad_with_argmax(
t, grad_in, argmax, ksize, strides, padding)
gpu_val = out_op.eval()
self.assertShapeEqual(gpu_val, out_op)
@@ -802,8 +802,8 @@ class PoolingTest(test.TestCase):
out_op = nn_ops.max_pool(t, ksize, strides, padding)
orig_out = out_op.eval()
grad_in = constant_op.constant(tensor_input, shape=input_shape)
- out_op = gen_nn_ops._max_pool_grad_grad(t, orig_out, grad_in, ksize,
- strides, padding)
+ out_op = gen_nn_ops.max_pool_grad_grad(t, orig_out, grad_in, ksize,
+ strides, padding)
cpu_val = out_op.eval()
self.assertShapeEqual(cpu_val, out_op)
# The CPU version accumulates its gradient on fp16, so it's less
@@ -842,7 +842,7 @@ class PoolingTest(test.TestCase):
t = constant_op.constant(tensor_input, shape=[1, 2, 2, 1])
argmax = constant_op.constant(
tensor_argmax, shape=[1, 2, 2, 1], dtype=dtypes.int64)
- out_op = gen_nn_ops._max_pool_grad_with_argmax(
+ out_op = gen_nn_ops.max_pool_grad_with_argmax(
orig_in,
t,
argmax,
@@ -865,7 +865,7 @@ class PoolingTest(test.TestCase):
t = constant_op.constant(tensor_input, shape=[1, 3, 3, 1])
argmax = constant_op.constant(
tensor_argmax, shape=[1, 2, 2, 1], dtype=dtypes.int64)
- out_op = gen_nn_ops._max_pool_grad_grad_with_argmax(
+ out_op = gen_nn_ops.max_pool_grad_grad_with_argmax(
orig_in,
t,
argmax,
@@ -1029,7 +1029,7 @@ class PoolingTest(test.TestCase):
self.assertLess(err, err_tolerance)
def _testMaxPoolGradValidPadding1_1(self, data_format, use_gpu):
- for pool_func in [gen_nn_ops._max_pool_v2, nn_ops.max_pool]:
+ for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[1, 3, 3, 1],
@@ -1043,7 +1043,7 @@ class PoolingTest(test.TestCase):
use_gpu=use_gpu)
def _testMaxPoolGradValidPadding2_1_6(self, data_format, use_gpu):
- for pool_func in [gen_nn_ops._max_pool_v2, nn_ops.max_pool]:
+ for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[2, 6, 6, 3],
@@ -1057,7 +1057,7 @@ class PoolingTest(test.TestCase):
use_gpu=use_gpu)
def _testMaxPoolGradValidPadding2_1_7(self, data_format, use_gpu):
- for pool_func in [gen_nn_ops._max_pool_v2, nn_ops.max_pool]:
+ for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[2, 7, 7, 3],
@@ -1071,7 +1071,7 @@ class PoolingTest(test.TestCase):
use_gpu=use_gpu)
def _testMaxPoolGradValidPadding1_2(self, data_format, use_gpu):
- for pool_func in [gen_nn_ops._max_pool_v2, nn_ops.max_pool]:
+ for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[1, 3, 3, 1],
@@ -1085,7 +1085,7 @@ class PoolingTest(test.TestCase):
use_gpu=use_gpu)
def _testMaxPoolGradValidPadding2_2(self, data_format, use_gpu):
- for pool_func in [gen_nn_ops._max_pool_v2, nn_ops.max_pool]:
+ for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[2, 2, 2, 3],
@@ -1099,7 +1099,7 @@ class PoolingTest(test.TestCase):
use_gpu=use_gpu)
def _testMaxPoolGradSamePadding1_1(self, data_format, use_gpu):
- for pool_func in [gen_nn_ops._max_pool_v2, nn_ops.max_pool]:
+ for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[2, 2, 4, 3],
@@ -1113,7 +1113,7 @@ class PoolingTest(test.TestCase):
use_gpu=use_gpu)
def _testMaxPoolGradSamePadding1_2(self, data_format, use_gpu):
- for pool_func in [gen_nn_ops._max_pool_v2, nn_ops.max_pool]:
+ for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[2, 2, 4, 3],
@@ -1127,7 +1127,7 @@ class PoolingTest(test.TestCase):
use_gpu=use_gpu)
def _testMaxPoolGradSamePadding2_1(self, data_format, use_gpu):
- for pool_func in [gen_nn_ops._max_pool_v2, nn_ops.max_pool]:
+ for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[2, 2, 4, 3],
@@ -1141,7 +1141,7 @@ class PoolingTest(test.TestCase):
use_gpu=use_gpu)
def _testMaxPoolGradSamePadding2_2(self, data_format, use_gpu):
- for pool_func in [gen_nn_ops._max_pool_v2, nn_ops.max_pool]:
+ for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[2, 2, 4, 3],
@@ -1155,7 +1155,7 @@ class PoolingTest(test.TestCase):
use_gpu=use_gpu)
def _testMaxPoolGradSamePadding3_1(self, data_format, use_gpu):
- for pool_func in [gen_nn_ops._max_pool_v2, nn_ops.max_pool]:
+ for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[1, 7, 7, 1],
@@ -1199,7 +1199,7 @@ class PoolingTest(test.TestCase):
Returns:
A Tensor.
"""
- pool_func = gen_nn_ops.max_pool_grad_v2 if v2 else gen_nn_ops._max_pool_grad
+ pool_func = gen_nn_ops.max_pool_grad_v2 if v2 else gen_nn_ops.max_pool_grad
return pool_func(orig_input, orig_output, grad,
[1, window_rows, window_cols, 1],
[1, row_stride, col_stride, 1], padding)
@@ -1208,7 +1208,7 @@ class PoolingTest(test.TestCase):
expected_input_backprop, input_sizes, output_sizes,
window_rows, window_cols, row_stride, col_stride,
padding, use_gpu, v2):
- pool_func = gen_nn_ops._max_pool_v2 if v2 else nn_ops.max_pool
+ pool_func = gen_nn_ops.max_pool_v2 if v2 else nn_ops.max_pool
with self.test_session(use_gpu=use_gpu):
input_tensor = constant_op.constant(input_data, shape=input_sizes)
output_tensor = pool_func(input_tensor, [1, window_rows, window_cols, 1],
@@ -1504,7 +1504,7 @@ class PoolingTest(test.TestCase):
self._testMaxPoolGradDirectWithNans2_2()
def _testMaxPoolGradGradValidPadding1_1(self, data_format, use_gpu):
- for pool_func in [gen_nn_ops._max_pool_v2, nn_ops.max_pool]:
+ for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[1, 3, 3, 1],
@@ -1518,7 +1518,7 @@ class PoolingTest(test.TestCase):
use_gpu=use_gpu)
def _testMaxPoolGradGradValidPadding2_1_6(self, data_format, use_gpu):
- for pool_func in [gen_nn_ops._max_pool_v2, nn_ops.max_pool]:
+ for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[2, 6, 6, 3],
@@ -1532,7 +1532,7 @@ class PoolingTest(test.TestCase):
use_gpu=use_gpu)
def _testMaxPoolGradGradValidPadding2_1_7(self, data_format, use_gpu):
- for pool_func in [gen_nn_ops._max_pool_v2, nn_ops.max_pool]:
+ for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[2, 7, 7, 3],
@@ -1546,7 +1546,7 @@ class PoolingTest(test.TestCase):
use_gpu=use_gpu)
def _testMaxPoolGradGradValidPadding2_2(self, data_format, use_gpu):
- for pool_func in [gen_nn_ops._max_pool_v2, nn_ops.max_pool]:
+ for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[2, 2, 2, 3],
@@ -1560,7 +1560,7 @@ class PoolingTest(test.TestCase):
use_gpu=use_gpu)
def _testMaxPoolGradGradSamePadding1_1(self, data_format, use_gpu):
- for pool_func in [gen_nn_ops._max_pool_v2, nn_ops.max_pool]:
+ for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[2, 2, 4, 3],
@@ -1574,7 +1574,7 @@ class PoolingTest(test.TestCase):
use_gpu=use_gpu)
def _testMaxPoolGradGradSamePadding2_1(self, data_format, use_gpu):
- for pool_func in [gen_nn_ops._max_pool_v2, nn_ops.max_pool]:
+ for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[2, 2, 4, 3],
@@ -1588,7 +1588,7 @@ class PoolingTest(test.TestCase):
use_gpu=use_gpu)
def _testMaxPoolGradGradSamePadding2_2(self, data_format, use_gpu):
- for pool_func in [gen_nn_ops._max_pool_v2, nn_ops.max_pool]:
+ for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[2, 2, 4, 3],
@@ -1602,7 +1602,7 @@ class PoolingTest(test.TestCase):
use_gpu=use_gpu)
def _testMaxPoolGradGradSamePadding3_1(self, data_format, use_gpu):
- for pool_func in [gen_nn_ops._max_pool_v2, nn_ops.max_pool]:
+ for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[1, 7, 7, 1],
@@ -1644,7 +1644,7 @@ class PoolingTest(test.TestCase):
Returns:
A Tensor.
"""
- return gen_nn_ops._max_pool_grad_grad(
+ return gen_nn_ops.max_pool_grad_grad(
orig_input, orig_output, grad, [1, window_rows, window_cols, 1],
[1, row_stride, col_stride, 1], padding)
diff --git a/tensorflow/python/kernel_tests/save_restore_ops_test.py b/tensorflow/python/kernel_tests/save_restore_ops_test.py
index 1bdfa9ebd8..cb9aa1e34d 100644
--- a/tensorflow/python/kernel_tests/save_restore_ops_test.py
+++ b/tensorflow/python/kernel_tests/save_restore_ops_test.py
@@ -31,11 +31,10 @@ class ShardedFileOpsTest(test.TestCase):
with session.Session(
target="", config=config_pb2.ConfigProto(device_count={"CPU": 2})):
self.assertEqual(
- gen_io_ops._sharded_filename("foo", 4, 100).eval(),
+ gen_io_ops.sharded_filename("foo", 4, 100).eval(),
b"foo-00004-of-00100")
self.assertEqual(
- gen_io_ops._sharded_filespec("foo", 100).eval(),
- b"foo-?????-of-00100")
+ gen_io_ops.sharded_filespec("foo", 100).eval(), b"foo-?????-of-00100")
class ShapeInferenceTest(test.TestCase):
@@ -53,7 +52,7 @@ class ShapeInferenceTest(test.TestCase):
[dtypes.float32, dtypes.float32])
def testRestoreSlice(self):
- op = gen_io_ops._restore_slice("model", "var", "3 4 0,1:-", dtypes.float32)
+ op = gen_io_ops.restore_slice("model", "var", "3 4 0,1:-", dtypes.float32)
self.assertEqual([1, 4], op.get_shape())
diff --git a/tensorflow/python/kernel_tests/scalar_test.py b/tensorflow/python/kernel_tests/scalar_test.py
index e65241981e..0d8fd23294 100644
--- a/tensorflow/python/kernel_tests/scalar_test.py
+++ b/tensorflow/python/kernel_tests/scalar_test.py
@@ -92,11 +92,11 @@ class ScalarTest(test.TestCase):
self.check(array_ops.reshape, (7, 1), 'sizes input must be 1-D', [7])
def testShardedFilename(self):
- self.check(gen_io_ops._sharded_filename, ('foo', 4, [100]),
+ self.check(gen_io_ops.sharded_filename, ('foo', 4, [100]),
'must be a scalar', b'foo-00004-of-00100')
def testShardedFilespec(self):
- self.check(gen_io_ops._sharded_filespec, ('foo', [100]), 'must be a scalar',
+ self.check(gen_io_ops.sharded_filespec, ('foo', [100]), 'must be a scalar',
b'foo-?????-of-00100')
def testUnsortedSegmentSum(self):
diff --git a/tensorflow/python/kernel_tests/spacetobatch_op_test.py b/tensorflow/python/kernel_tests/spacetobatch_op_test.py
index b943dfa4e5..2a9232b6ae 100644
--- a/tensorflow/python/kernel_tests/spacetobatch_op_test.py
+++ b/tensorflow/python/kernel_tests/spacetobatch_op_test.py
@@ -86,11 +86,11 @@ class CppOpImpl(object):
@staticmethod
def space_to_batch(*args, **kwargs):
- return gen_array_ops._space_to_batch(*args, **kwargs)
+ return gen_array_ops.space_to_batch(*args, **kwargs)
@staticmethod
def batch_to_space(*args, **kwargs):
- return gen_array_ops._batch_to_space(*args, **kwargs)
+ return gen_array_ops.batch_to_space(*args, **kwargs)
class SpaceToBatchTest(test.TestCase, PythonOpImpl):
diff --git a/tensorflow/python/kernel_tests/sparse_xent_op_test.py b/tensorflow/python/kernel_tests/sparse_xent_op_test.py
index cd5b711a0e..a841fe83a7 100644
--- a/tensorflow/python/kernel_tests/sparse_xent_op_test.py
+++ b/tensorflow/python/kernel_tests/sparse_xent_op_test.py
@@ -64,7 +64,7 @@ class SparseXentTest(test.TestCase):
def _testXent(self, np_features, np_labels):
np_loss, np_backprop = self._npXent(np_features, np_labels)
with self.test_session(use_gpu=True) as sess:
- loss, backprop = gen_nn_ops._sparse_softmax_cross_entropy_with_logits(
+ loss, backprop = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
np_features, np_labels)
tf_loss, tf_backprop = sess.run([loss, backprop])
self.assertAllCloseAccordingToType(np_loss, tf_loss)
@@ -73,7 +73,7 @@ class SparseXentTest(test.TestCase):
def testSingleClass(self):
for label_dtype in np.int32, np.int64:
with self.test_session(use_gpu=True) as sess:
- loss, backprop = gen_nn_ops._sparse_softmax_cross_entropy_with_logits(
+ loss, backprop = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
np.array([[1.], [-1.], [0.]]).astype(np.float32),
np.array([0, 0, 0]).astype(label_dtype))
tf_loss, tf_backprop = sess.run([loss, backprop])
@@ -87,8 +87,9 @@ class SparseXentTest(test.TestCase):
if test.is_built_with_cuda() and test.is_gpu_available():
with self.test_session(use_gpu=True) as sess:
- loss, backprop = (gen_nn_ops._sparse_softmax_cross_entropy_with_logits(
- features, labels))
+ loss, backprop = (
+ gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
+ features, labels))
tf_loss, tf_backprop = sess.run([loss, backprop])
self.assertAllClose(
[[np.nan] * 4, [0.25, 0.25, 0.25, -0.75],
@@ -100,8 +101,8 @@ class SparseXentTest(test.TestCase):
[np.nan, 1.3862, 3.4420, np.nan], tf_loss, rtol=1e-3, atol=1e-3)
with self.test_session(use_gpu=False) as sess:
- loss, backprop = (gen_nn_ops._sparse_softmax_cross_entropy_with_logits(
- features, labels))
+ loss, backprop = (
+ gen_nn_ops.sparse_softmax_cross_entropy_with_logits(features, labels))
with self.assertRaisesOpError("Received a label value of"):
sess.run([loss, backprop])
diff --git a/tensorflow/python/kernel_tests/stack_ops_test.py b/tensorflow/python/kernel_tests/stack_ops_test.py
index aa409336f5..afd2eaffab 100644
--- a/tensorflow/python/kernel_tests/stack_ops_test.py
+++ b/tensorflow/python/kernel_tests/stack_ops_test.py
@@ -34,11 +34,11 @@ class StackOpTest(test.TestCase):
def _testStackPushPop(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
- h = gen_data_flow_ops._stack_v2(
+ h = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="foo")
- c = gen_data_flow_ops._stack_push_v2(h, [[4.0, 5.0]])
+ c = gen_data_flow_ops.stack_push_v2(h, [[4.0, 5.0]])
with ops.control_dependencies([c]):
- c1 = gen_data_flow_ops._stack_pop_v2(h, dtypes.float32)
+ c1 = gen_data_flow_ops.stack_pop_v2(h, dtypes.float32)
self.assertAllClose([[4.0, 5.0]], c1.eval())
def testStackPushPop(self):
@@ -49,11 +49,11 @@ class StackOpTest(test.TestCase):
with self.test_session(use_gpu=use_gpu):
a = np.arange(2000)
x = constant_op.constant(a, dtype=dtypes.float32)
- h = gen_data_flow_ops._stack_v2(
+ h = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="foo")
- c = gen_data_flow_ops._stack_push_v2(h, x, swap_memory=True)
+ c = gen_data_flow_ops.stack_push_v2(h, x, swap_memory=True)
with ops.control_dependencies([c]):
- c1 = gen_data_flow_ops._stack_pop_v2(h, dtypes.float32)
+ c1 = gen_data_flow_ops.stack_pop_v2(h, dtypes.float32)
self.assertAllClose(a, c1.eval())
def testStackPushPopSwap(self):
@@ -63,7 +63,7 @@ class StackOpTest(test.TestCase):
def _testStackWhileSwap(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = constant_op.constant(0)
- h = gen_data_flow_ops._stack_v2(
+ h = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="foo")
def c(x):
@@ -72,7 +72,7 @@ class StackOpTest(test.TestCase):
def b(x):
with ops.control_dependencies([x]):
a = constant_op.constant(np.ones(2000), dtype=dtypes.float32)
- v = gen_data_flow_ops._stack_push_v2(h, a, swap_memory=True)
+ v = gen_data_flow_ops.stack_push_v2(h, a, swap_memory=True)
with ops.control_dependencies([v]):
return math_ops.add(x, 1)
@@ -86,7 +86,7 @@ class StackOpTest(test.TestCase):
def b1(x, y):
nx = math_ops.subtract(x, 1)
- ny = y + gen_data_flow_ops._stack_pop_v2(h, dtypes.float32)
+ ny = y + gen_data_flow_ops.stack_pop_v2(h, dtypes.float32)
return [nx, ny]
_, ry = control_flow_ops.while_loop(
@@ -99,16 +99,16 @@ class StackOpTest(test.TestCase):
def _testMultiStack(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
- h1 = gen_data_flow_ops._stack_v2(
+ h1 = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="foo")
- c1 = gen_data_flow_ops._stack_push_v2(h1, 4.0)
+ c1 = gen_data_flow_ops.stack_push_v2(h1, 4.0)
with ops.control_dependencies([c1]):
- c1 = gen_data_flow_ops._stack_pop_v2(h1, dtypes.float32)
- h2 = gen_data_flow_ops._stack_v2(
+ c1 = gen_data_flow_ops.stack_pop_v2(h1, dtypes.float32)
+ h2 = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="bar")
- c2 = gen_data_flow_ops._stack_push_v2(h2, 5.0)
+ c2 = gen_data_flow_ops.stack_push_v2(h2, 5.0)
with ops.control_dependencies([c2]):
- c2 = gen_data_flow_ops._stack_pop_v2(h2, dtypes.float32)
+ c2 = gen_data_flow_ops.stack_pop_v2(h2, dtypes.float32)
r = c1 + c2
self.assertAllClose(9.0, r.eval())
@@ -119,17 +119,17 @@ class StackOpTest(test.TestCase):
def _testSameNameStacks(self, use_gpu):
"""Different stacks with the same name do not interfere."""
with self.test_session(use_gpu=use_gpu) as sess:
- h1 = gen_data_flow_ops._stack_v2(
+ h1 = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="foo")
- h2 = gen_data_flow_ops._stack_v2(
+ h2 = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="foo")
- c1 = gen_data_flow_ops._stack_push_v2(h1, 4.0)
+ c1 = gen_data_flow_ops.stack_push_v2(h1, 4.0)
with ops.control_dependencies([c1]):
- c2 = gen_data_flow_ops._stack_push_v2(h2, 5.0)
+ c2 = gen_data_flow_ops.stack_push_v2(h2, 5.0)
with ops.control_dependencies([c2]):
- pop1 = gen_data_flow_ops._stack_pop_v2(h1, dtypes.float32)
- pop2 = gen_data_flow_ops._stack_pop_v2(h2, dtypes.float32)
+ pop1 = gen_data_flow_ops.stack_pop_v2(h1, dtypes.float32)
+ pop2 = gen_data_flow_ops.stack_pop_v2(h2, dtypes.float32)
out1, out2 = sess.run([pop1, pop2])
self.assertAllClose(out1, 4.0)
@@ -141,9 +141,9 @@ class StackOpTest(test.TestCase):
def _testCloseStack(self, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
- h = gen_data_flow_ops._stack_v2(
+ h = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="foo")
- c1 = gen_data_flow_ops._stack_close_v2(h)
+ c1 = gen_data_flow_ops.stack_close_v2(h)
sess.run(c1)
def testCloseStack(self):
@@ -152,11 +152,11 @@ class StackOpTest(test.TestCase):
def _testPushCloseStack(self, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
- h = gen_data_flow_ops._stack_v2(
+ h = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="foo")
- c = gen_data_flow_ops._stack_push_v2(h, [[4.0, 5.0]])
+ c = gen_data_flow_ops.stack_push_v2(h, [[4.0, 5.0]])
with ops.control_dependencies([c]):
- c1 = gen_data_flow_ops._stack_close_v2(h)
+ c1 = gen_data_flow_ops.stack_close_v2(h)
sess.run(c1)
def testPushCloseStack(self):
@@ -170,9 +170,9 @@ class StackOpRefTest(test.TestCase):
def _testStackPushPop(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
- c = gen_data_flow_ops._stack_push(h, [[4.0, 5.0]])
+ c = gen_data_flow_ops.stack_push(h, [[4.0, 5.0]])
with ops.control_dependencies([c]):
- c1 = gen_data_flow_ops._stack_pop(h, dtypes.float32)
+ c1 = gen_data_flow_ops.stack_pop(h, dtypes.float32)
self.assertAllClose([[4.0, 5.0]], c1.eval())
def testStackPushPop(self):
@@ -184,9 +184,9 @@ class StackOpRefTest(test.TestCase):
a = np.arange(2000)
x = constant_op.constant(a, dtype=dtypes.float32)
h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
- c = gen_data_flow_ops._stack_push(h, x, swap_memory=True)
+ c = gen_data_flow_ops.stack_push(h, x, swap_memory=True)
with ops.control_dependencies([c]):
- c1 = gen_data_flow_ops._stack_pop(h, dtypes.float32)
+ c1 = gen_data_flow_ops.stack_pop(h, dtypes.float32)
self.assertAllClose(a, c1.eval())
def testStackPushPopSwap(self):
@@ -196,13 +196,13 @@ class StackOpRefTest(test.TestCase):
def _testMultiStack(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
h1 = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
- c1 = gen_data_flow_ops._stack_push(h1, 4.0)
+ c1 = gen_data_flow_ops.stack_push(h1, 4.0)
with ops.control_dependencies([c1]):
- c1 = gen_data_flow_ops._stack_pop(h1, dtypes.float32)
+ c1 = gen_data_flow_ops.stack_pop(h1, dtypes.float32)
h2 = gen_data_flow_ops._stack(dtypes.float32, stack_name="bar")
- c2 = gen_data_flow_ops._stack_push(h2, 5.0)
+ c2 = gen_data_flow_ops.stack_push(h2, 5.0)
with ops.control_dependencies([c2]):
- c2 = gen_data_flow_ops._stack_pop(h2, dtypes.float32)
+ c2 = gen_data_flow_ops.stack_pop(h2, dtypes.float32)
r = c1 + c2
self.assertAllClose(9.0, r.eval())
@@ -217,7 +217,7 @@ class StackOpRefTest(test.TestCase):
def b(x):
with ops.control_dependencies([x]):
a = constant_op.constant(np.ones(2000), dtype=dtypes.float32)
- v = gen_data_flow_ops._stack_push(h, a, swap_memory=True)
+ v = gen_data_flow_ops.stack_push(h, a, swap_memory=True)
with ops.control_dependencies([v]):
return math_ops.add(x, 1)
@@ -231,7 +231,7 @@ class StackOpRefTest(test.TestCase):
def b1(x, y):
nx = math_ops.subtract(x, 1)
- ny = y + gen_data_flow_ops._stack_pop(h, dtypes.float32)
+ ny = y + gen_data_flow_ops.stack_pop(h, dtypes.float32)
return [nx, ny]
_, ry = control_flow_ops.while_loop(
@@ -249,9 +249,9 @@ class StackOpRefTest(test.TestCase):
def _testSameNameStacks(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
h1 = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
- c1 = gen_data_flow_ops._stack_push(h1, 4.0)
+ c1 = gen_data_flow_ops.stack_push(h1, 4.0)
h2 = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
- c2 = gen_data_flow_ops._stack_push(h2, 5.0)
+ c2 = gen_data_flow_ops.stack_push(h2, 5.0)
_ = c1 + c2
self.assertNotEqual(h1.eval()[1], h2.eval()[1])
@@ -262,7 +262,7 @@ class StackOpRefTest(test.TestCase):
def _testCloseStack(self, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
- c1 = gen_data_flow_ops._stack_close(h)
+ c1 = gen_data_flow_ops.stack_close(h)
sess.run(c1)
def testCloseStack(self):
@@ -272,9 +272,9 @@ class StackOpRefTest(test.TestCase):
def _testPushCloseStack(self, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
- c = gen_data_flow_ops._stack_push(h, [[4.0, 5.0]])
+ c = gen_data_flow_ops.stack_push(h, [[4.0, 5.0]])
with ops.control_dependencies([c]):
- c1 = gen_data_flow_ops._stack_close(h)
+ c1 = gen_data_flow_ops.stack_close(h)
sess.run(c1)
def testPushCloseStack(self):
diff --git a/tensorflow/python/kernel_tests/tensor_array_ops_test.py b/tensorflow/python/kernel_tests/tensor_array_ops_test.py
index aad2443eea..8f09f3d78b 100644
--- a/tensorflow/python/kernel_tests/tensor_array_ops_test.py
+++ b/tensorflow/python/kernel_tests/tensor_array_ops_test.py
@@ -437,7 +437,7 @@ class TensorArrayTest(test.TestCase):
# Test reading wrong datatype, which is only possible in graph mode
if context.in_graph_mode():
- r0_bad = gen_data_flow_ops._tensor_array_read_v3(
+ r0_bad = gen_data_flow_ops.tensor_array_read_v3(
handle=w0.handle, index=0, dtype=dtypes.float64, flow_in=w0.flow)
with self.assertRaisesOpError(
"TensorArray dtype is float but Op requested dtype double."):
diff --git a/tensorflow/python/kernel_tests/unique_op_test.py b/tensorflow/python/kernel_tests/unique_op_test.py
index 6366d2e181..173d95b258 100644
--- a/tensorflow/python/kernel_tests/unique_op_test.py
+++ b/tensorflow/python/kernel_tests/unique_op_test.py
@@ -66,9 +66,9 @@ class UniqueTest(test.TestCase):
for dtype in [np.int32, np.int64]:
x = np.array([[1, 0, 0], [1, 0, 0], [2, 0, 0]])
with self.test_session() as sess:
- y0, idx0 = gen_array_ops._unique_v2(x, axis=np.array([0], dtype))
+ y0, idx0 = gen_array_ops.unique_v2(x, axis=np.array([0], dtype))
tf_y0, tf_idx0 = sess.run([y0, idx0])
- y1, idx1 = gen_array_ops._unique_v2(x, axis=np.array([1], dtype))
+ y1, idx1 = gen_array_ops.unique_v2(x, axis=np.array([1], dtype))
tf_y1, tf_idx1 = sess.run([y1, idx1])
self.assertAllEqual(tf_y0, np.array([[1, 0, 0], [2, 0, 0]]))
self.assertAllEqual(tf_idx0, np.array([0, 0, 1]))
@@ -80,7 +80,7 @@ class UniqueTest(test.TestCase):
# by default, the axis will be wrapped to allow `axis=None`.
x = np.random.randint(2, high=10, size=7000)
with self.test_session() as sess:
- y, idx = gen_array_ops._unique_v2(x, axis=np.array([], np.int32))
+ y, idx = gen_array_ops.unique_v2(x, axis=np.array([], np.int32))
tf_y, tf_idx = sess.run([y, idx])
self.assertEqual(len(x), len(tf_idx))
diff --git a/tensorflow/python/kernel_tests/variable_ops_test.py b/tensorflow/python/kernel_tests/variable_ops_test.py
index 79071029fd..cf369c0718 100644
--- a/tensorflow/python/kernel_tests/variable_ops_test.py
+++ b/tensorflow/python/kernel_tests/variable_ops_test.py
@@ -165,26 +165,26 @@ class VariableOpTest(test.TestCase):
def testTemporaryVariable(self):
with self.test_session(use_gpu=True):
- var = gen_state_ops._temporary_variable(
+ var = gen_state_ops.temporary_variable(
[1, 2], dtypes.float32, var_name="foo")
var = state_ops.assign(var, [[4.0, 5.0]])
var = state_ops.assign_add(var, [[6.0, 7.0]])
- final = gen_state_ops._destroy_temporary_variable(var, var_name="foo")
+ final = gen_state_ops.destroy_temporary_variable(var, var_name="foo")
self.assertAllClose([[10.0, 12.0]], final.eval())
def testDestroyNonexistentTemporaryVariable(self):
with self.test_session(use_gpu=True):
- var = gen_state_ops._temporary_variable([1, 2], dtypes.float32)
- final = gen_state_ops._destroy_temporary_variable(var, var_name="bad")
+ var = gen_state_ops.temporary_variable([1, 2], dtypes.float32)
+ final = gen_state_ops.destroy_temporary_variable(var, var_name="bad")
with self.assertRaises(errors.NotFoundError):
final.eval()
def testDuplicateTemporaryVariable(self):
with self.test_session(use_gpu=True):
- var1 = gen_state_ops._temporary_variable(
+ var1 = gen_state_ops.temporary_variable(
[1, 2], dtypes.float32, var_name="dup")
var1 = state_ops.assign(var1, [[1.0, 2.0]])
- var2 = gen_state_ops._temporary_variable(
+ var2 = gen_state_ops.temporary_variable(
[1, 2], dtypes.float32, var_name="dup")
var2 = state_ops.assign(var2, [[3.0, 4.0]])
final = var1 + var2
@@ -193,25 +193,25 @@ class VariableOpTest(test.TestCase):
def testDestroyTemporaryVariableTwice(self):
with self.test_session(use_gpu=True):
- var = gen_state_ops._temporary_variable([1, 2], dtypes.float32)
- val1 = gen_state_ops._destroy_temporary_variable(var, var_name="dup")
- val2 = gen_state_ops._destroy_temporary_variable(var, var_name="dup")
+ var = gen_state_ops.temporary_variable([1, 2], dtypes.float32)
+ val1 = gen_state_ops.destroy_temporary_variable(var, var_name="dup")
+ val2 = gen_state_ops.destroy_temporary_variable(var, var_name="dup")
final = val1 + val2
with self.assertRaises(errors.NotFoundError):
final.eval()
def testTemporaryVariableNoLeak(self):
with self.test_session(use_gpu=True):
- var = gen_state_ops._temporary_variable(
+ var = gen_state_ops.temporary_variable(
[1, 2], dtypes.float32, var_name="bar")
final = array_ops.identity(var)
final.eval()
def testTwoTemporaryVariablesNoLeaks(self):
with self.test_session(use_gpu=True):
- var1 = gen_state_ops._temporary_variable(
+ var1 = gen_state_ops.temporary_variable(
[1, 2], dtypes.float32, var_name="var1")
- var2 = gen_state_ops._temporary_variable(
+ var2 = gen_state_ops.temporary_variable(
[1, 2], dtypes.float32, var_name="var2")
final = var1 + var2
final.eval()
diff --git a/tensorflow/python/kernel_tests/variables_test.py b/tensorflow/python/kernel_tests/variables_test.py
index b16c8c002c..27599868b7 100644
--- a/tensorflow/python/kernel_tests/variables_test.py
+++ b/tensorflow/python/kernel_tests/variables_test.py
@@ -687,7 +687,7 @@ class VariableContainerTest(test.TestCase):
v1 = variables.Variable([1])
with ops.container("l2"):
v2 = variables.Variable([2])
- special_v = gen_state_ops._variable(
+ special_v = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="VariableInL3",
diff --git a/tensorflow/python/kernel_tests/xent_op_test.py b/tensorflow/python/kernel_tests/xent_op_test.py
index e152f02d8e..e3e120a4eb 100644
--- a/tensorflow/python/kernel_tests/xent_op_test.py
+++ b/tensorflow/python/kernel_tests/xent_op_test.py
@@ -48,7 +48,7 @@ class XentTest(test.TestCase):
def _testXent(self, np_features, np_labels, use_gpu=False):
np_loss, np_backprop = self._npXent(np_features, np_labels)
with self.test_session(use_gpu=use_gpu) as sess:
- loss, backprop = gen_nn_ops._softmax_cross_entropy_with_logits(
+ loss, backprop = gen_nn_ops.softmax_cross_entropy_with_logits(
np_features, np_labels)
tf_loss, tf_backprop = sess.run([loss, backprop])
self.assertAllCloseAccordingToType(np_loss, tf_loss)
@@ -71,7 +71,7 @@ class XentTest(test.TestCase):
def _testSingleClass(self, use_gpu=False):
for dtype in np.float16, np.float32:
with self.test_session(use_gpu=use_gpu) as sess:
- loss, backprop = gen_nn_ops._softmax_cross_entropy_with_logits(
+ loss, backprop = gen_nn_ops.softmax_cross_entropy_with_logits(
np.array([[1.], [-1.], [0.]]).astype(dtype),
np.array([[-1.], [0.], [1.]]).astype(dtype))
tf_loss, tf_backprop = sess.run([loss, backprop])
@@ -89,7 +89,7 @@ class XentTest(test.TestCase):
np_labels = np.array([[[0., 0., 0., 1.]], [[0., .5, .5,
0.]]]).astype(dtype)
self.assertRaisesRegexp(ValueError, "must be rank 2",
- gen_nn_ops._softmax_cross_entropy_with_logits,
+ gen_nn_ops.softmax_cross_entropy_with_logits,
np_features, np_labels)
def testNpXent(self):
@@ -131,14 +131,14 @@ class XentTest(test.TestCase):
def testShapeMismatch(self):
with self.test_session():
with self.assertRaises(ValueError):
- gen_nn_ops._softmax_cross_entropy_with_logits(
+ gen_nn_ops.softmax_cross_entropy_with_logits(
[[0., 1.], [2., 3.]], [[0., 1., 0.], [1., 0., 0.]])
def testNotMatrix(self):
with self.test_session():
with self.assertRaises(ValueError):
- gen_nn_ops._softmax_cross_entropy_with_logits([0., 1., 2., 3.],
- [0., 1., 0., 1.])
+ gen_nn_ops.softmax_cross_entropy_with_logits([0., 1., 2., 3.],
+ [0., 1., 0., 1.])
def testHalf(self):
self._testAll(