aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Justine Tunney <jart@google.com>2016-12-14 16:30:24 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2016-12-14 16:43:13 -0800
commit5866e065bc95c1d7de8a27413b368016941889a6 (patch)
tree55b7db600e38b3a799ab39053cd99e61204f840b
parent38a664cd961762e64899187a31a1b86cbe5a992e (diff)
Remove hourglass imports from kernel_tests
Change: 142080137
-rw-r--r--tensorflow/contrib/bayesflow/BUILD12
-rw-r--r--tensorflow/contrib/copy_graph/BUILD6
-rw-r--r--tensorflow/contrib/crf/BUILD7
-rw-r--r--tensorflow/contrib/cudnn_rnn/BUILD10
-rw-r--r--tensorflow/contrib/distributions/BUILD21
-rw-r--r--tensorflow/contrib/factorization/BUILD9
-rw-r--r--tensorflow/contrib/ffmpeg/BUILD4
-rw-r--r--tensorflow/contrib/framework/BUILD23
-rw-r--r--tensorflow/contrib/graph_editor/BUILD4
-rw-r--r--tensorflow/contrib/grid_rnn/BUILD6
-rw-r--r--tensorflow/contrib/image/BUILD10
-rw-r--r--tensorflow/contrib/input_pipeline/BUILD11
-rw-r--r--tensorflow/contrib/integrate/BUILD8
-rw-r--r--tensorflow/contrib/labeled_tensor/BUILD23
-rw-r--r--tensorflow/contrib/layers/BUILD29
-rw-r--r--tensorflow/contrib/layers/python/layers/target_column.py105
-rw-r--r--tensorflow/contrib/learn/BUILD63
-rw-r--r--tensorflow/contrib/learn/python/learn/datasets/BUILD5
-rw-r--r--tensorflow/contrib/legacy_seq2seq/BUILD12
-rw-r--r--tensorflow/contrib/linalg/BUILD9
-rw-r--r--tensorflow/contrib/linear_optimizer/BUILD17
-rw-r--r--tensorflow/contrib/lookup/BUILD11
-rw-r--r--tensorflow/contrib/losses/BUILD9
-rw-r--r--tensorflow/contrib/metrics/BUILD16
-rw-r--r--tensorflow/contrib/ndlstm/BUILD2
-rw-r--r--tensorflow/contrib/opt/BUILD10
-rw-r--r--tensorflow/contrib/quantization/BUILD6
-rw-r--r--tensorflow/contrib/rnn/BUILD18
-rw-r--r--tensorflow/contrib/rnn/python/ops/rnn.py10
-rw-r--r--tensorflow/contrib/seq2seq/BUILD11
-rw-r--r--tensorflow/contrib/session_bundle/BUILD18
-rw-r--r--tensorflow/contrib/tensor_forest/BUILD37
-rw-r--r--tensorflow/contrib/tensor_forest/hybrid/BUILD47
-rw-r--r--tensorflow/contrib/testing/BUILD4
-rw-r--r--tensorflow/contrib/tfprof/BUILD1
-rw-r--r--tensorflow/contrib/training/BUILD18
-rw-r--r--tensorflow/contrib/util/BUILD6
-rw-r--r--tensorflow/core/BUILD2
-rw-r--r--tensorflow/examples/how_tos/reading_data/BUILD1
-rw-r--r--tensorflow/examples/image_retraining/BUILD5
-rw-r--r--tensorflow/examples/tutorials/mnist/BUILD5
-rw-r--r--tensorflow/examples/tutorials/monitors/BUILD1
-rw-r--r--tensorflow/g3doc/how_tos/adding_an_op/BUILD3
-rw-r--r--tensorflow/python/BUILD286
-rw-r--r--tensorflow/python/build_defs.bzl11
-rw-r--r--tensorflow/python/debug/BUILD54
-rw-r--r--tensorflow/python/kernel_tests/BUILD1228
-rw-r--r--tensorflow/python/kernel_tests/argmax_op_test.py47
-rw-r--r--tensorflow/python/kernel_tests/array_ops_test.py270
-rw-r--r--tensorflow/python/kernel_tests/as_string_op_test.py150
-rw-r--r--tensorflow/python/kernel_tests/atrous_conv2d_test.py80
-rw-r--r--tensorflow/python/kernel_tests/atrous_convolution_test.py57
-rw-r--r--tensorflow/python/kernel_tests/attention_ops_test.py167
-rw-r--r--tensorflow/python/kernel_tests/barrier_ops_test.py257
-rw-r--r--tensorflow/python/kernel_tests/basic_gpu_test.py176
-rw-r--r--tensorflow/python/kernel_tests/batch_matmul_op_test.py33
-rw-r--r--tensorflow/python/kernel_tests/batchtospace_op_test.py114
-rw-r--r--tensorflow/python/kernel_tests/bcast_ops_test.py8
-rw-r--r--tensorflow/python/kernel_tests/benchmark_test.py59
-rw-r--r--tensorflow/python/kernel_tests/betainc_op_test.py69
-rw-r--r--tensorflow/python/kernel_tests/bias_op_test.py141
-rw-r--r--tensorflow/python/kernel_tests/bitcast_op_test.py31
-rw-r--r--tensorflow/python/kernel_tests/candidate_sampler_ops_test.py72
-rw-r--r--tensorflow/python/kernel_tests/cast_op_test.py112
-rw-r--r--tensorflow/python/kernel_tests/check_ops_test.py635
-rw-r--r--tensorflow/python/kernel_tests/cholesky_op_test.py74
-rw-r--r--tensorflow/python/kernel_tests/clip_ops_test.py166
-rw-r--r--tensorflow/python/kernel_tests/concat_op_test.py348
-rw-r--r--tensorflow/python/kernel_tests/conditional_accumulator_test.py147
-rw-r--r--tensorflow/python/kernel_tests/confusion_matrix_test.py77
-rw-r--r--tensorflow/python/kernel_tests/constant_op_test.py464
-rw-r--r--tensorflow/python/kernel_tests/control_flow_ops_py_test.py2032
-rw-r--r--tensorflow/python/kernel_tests/conv1d_test.py32
-rw-r--r--tensorflow/python/kernel_tests/conv2d_backprop_filter_grad_test.py42
-rw-r--r--tensorflow/python/kernel_tests/conv2d_transpose_test.py108
-rw-r--r--tensorflow/python/kernel_tests/conv3d_backprop_filter_v2_grad_test.py41
-rw-r--r--tensorflow/python/kernel_tests/conv3d_transpose_test.py56
-rw-r--r--tensorflow/python/kernel_tests/conv_ops_3d_test.py655
-rw-r--r--tensorflow/python/kernel_tests/conv_ops_test.py1105
-rw-r--r--tensorflow/python/kernel_tests/cross_grad_test.py20
-rw-r--r--tensorflow/python/kernel_tests/ctc_decoder_ops_test.py153
-rw-r--r--tensorflow/python/kernel_tests/ctc_loss_op_test.py83
-rw-r--r--tensorflow/python/kernel_tests/cwise_ops_test.py1255
-rw-r--r--tensorflow/python/kernel_tests/decode_csv_op_test.py68
-rw-r--r--tensorflow/python/kernel_tests/decode_image_op_test.py46
-rw-r--r--tensorflow/python/kernel_tests/decode_png_op_test.py30
-rw-r--r--tensorflow/python/kernel_tests/decode_raw_op_test.py28
-rw-r--r--tensorflow/python/kernel_tests/denormal_test.py16
-rw-r--r--tensorflow/python/kernel_tests/dense_update_ops_no_tsan_test.py51
-rw-r--r--tensorflow/python/kernel_tests/dense_update_ops_test.py86
-rw-r--r--tensorflow/python/kernel_tests/depthtospace_op_test.py34
-rw-r--r--tensorflow/python/kernel_tests/depthwise_conv_op_test.py214
-rw-r--r--tensorflow/python/kernel_tests/determinant_op_test.py34
-rw-r--r--tensorflow/python/kernel_tests/diag_op_test.py171
-rw-r--r--tensorflow/python/kernel_tests/division_future_test.py17
-rw-r--r--tensorflow/python/kernel_tests/division_past_test.py17
-rw-r--r--tensorflow/python/kernel_tests/draw_bounding_box_op_test.py32
-rw-r--r--tensorflow/python/kernel_tests/dynamic_partition_op_test.py62
-rw-r--r--tensorflow/python/kernel_tests/dynamic_stitch_op_test.py130
-rw-r--r--tensorflow/python/kernel_tests/edit_distance_op_test.py95
-rw-r--r--tensorflow/python/kernel_tests/embedding_ops_test.py375
-rw-r--r--tensorflow/python/kernel_tests/extract_image_patches_grad_test.py34
-rw-r--r--tensorflow/python/kernel_tests/extract_image_patches_op_test.py65
-rw-r--r--tensorflow/python/kernel_tests/fft_ops_test.py66
-rw-r--r--tensorflow/python/kernel_tests/fifo_queue_test.py487
-rw-r--r--tensorflow/python/kernel_tests/fractional_avg_pool_op_test.py74
-rw-r--r--tensorflow/python/kernel_tests/fractional_max_pool_op_test.py94
-rw-r--r--tensorflow/python/kernel_tests/functional_ops_test.py305
-rw-r--r--tensorflow/python/kernel_tests/gather_nd_op_test.py161
-rw-r--r--tensorflow/python/kernel_tests/gather_op_test.py54
-rw-r--r--tensorflow/python/kernel_tests/gradient_correctness_test.py22
-rw-r--r--tensorflow/python/kernel_tests/identity_op_py_test.py33
-rw-r--r--tensorflow/python/kernel_tests/in_topk_op_test.py17
-rw-r--r--tensorflow/python/kernel_tests/init_ops_test.py295
-rw-r--r--tensorflow/python/kernel_tests/io_ops_test.py74
-rw-r--r--tensorflow/python/kernel_tests/large_concat_op_test.py17
-rw-r--r--tensorflow/python/kernel_tests/linalg_grad_test.py59
-rw-r--r--tensorflow/python/kernel_tests/linalg_ops_test.py120
-rw-r--r--tensorflow/python/kernel_tests/listdiff_op_test.py31
-rw-r--r--tensorflow/python/kernel_tests/logging_ops_test.py56
-rw-r--r--tensorflow/python/kernel_tests/losses_test.py1168
-rw-r--r--tensorflow/python/kernel_tests/lrn_op_test.py83
-rw-r--r--tensorflow/python/kernel_tests/matmul_op_test.py157
-rw-r--r--tensorflow/python/kernel_tests/matrix_band_part_op_test.py22
-rw-r--r--tensorflow/python/kernel_tests/matrix_inverse_op_test.py35
-rw-r--r--tensorflow/python/kernel_tests/matrix_solve_ls_op_test.py36
-rw-r--r--tensorflow/python/kernel_tests/matrix_solve_op_test.py39
-rw-r--r--tensorflow/python/kernel_tests/matrix_triangular_solve_op_test.py40
-rw-r--r--tensorflow/python/kernel_tests/metrics_test.py2129
-rw-r--r--tensorflow/python/kernel_tests/morphological_ops_test.py476
-rw-r--r--tensorflow/python/kernel_tests/multinomial_op_test.py89
-rw-r--r--tensorflow/python/kernel_tests/numerics_test.py65
-rw-r--r--tensorflow/python/kernel_tests/one_hot_op_test.py308
-rw-r--r--tensorflow/python/kernel_tests/pack_op_test.py223
-rw-r--r--tensorflow/python/kernel_tests/pad_op_test.py130
-rw-r--r--tensorflow/python/kernel_tests/padding_fifo_queue_test.py561
-rw-r--r--tensorflow/python/kernel_tests/parameterized_truncated_normal_op_test.py39
-rw-r--r--tensorflow/python/kernel_tests/parsing_ops_test.py752
-rw-r--r--tensorflow/python/kernel_tests/partitioned_variables_test.py533
-rw-r--r--tensorflow/python/kernel_tests/pool_test.py112
-rw-r--r--tensorflow/python/kernel_tests/pooling_ops_3d_test.py390
-rw-r--r--tensorflow/python/kernel_tests/pooling_ops_test.py1129
-rw-r--r--tensorflow/python/kernel_tests/priority_queue_test.py135
-rw-r--r--tensorflow/python/kernel_tests/py_func_test.py128
-rw-r--r--tensorflow/python/kernel_tests/qr_op_test.py32
-rw-r--r--tensorflow/python/kernel_tests/random_crop_test.py21
-rw-r--r--tensorflow/python/kernel_tests/random_gamma_test.py90
-rw-r--r--tensorflow/python/kernel_tests/random_ops_test.py128
-rw-r--r--tensorflow/python/kernel_tests/random_shuffle_queue_test.py383
-rw-r--r--tensorflow/python/kernel_tests/reader_ops_test.py213
-rw-r--r--tensorflow/python/kernel_tests/reduce_join_op_test.py169
-rw-r--r--tensorflow/python/kernel_tests/reduction_ops_test.py227
-rw-r--r--tensorflow/python/kernel_tests/relu_op_test.py244
-rw-r--r--tensorflow/python/kernel_tests/reshape_op_test.py60
-rw-r--r--tensorflow/python/kernel_tests/reverse_sequence_op_test.py107
-rw-r--r--tensorflow/python/kernel_tests/rnn_test.py1254
-rw-r--r--tensorflow/python/kernel_tests/save_restore_ops_test.py24
-rw-r--r--tensorflow/python/kernel_tests/scalar_strict_test.py58
-rw-r--r--tensorflow/python/kernel_tests/scan_ops_test.py95
-rw-r--r--tensorflow/python/kernel_tests/scatter_nd_ops_test.py201
-rw-r--r--tensorflow/python/kernel_tests/scatter_ops_test.py68
-rw-r--r--tensorflow/python/kernel_tests/segment_reduction_ops_test.py286
-rw-r--r--tensorflow/python/kernel_tests/self_adjoint_eig_op_test.py43
-rw-r--r--tensorflow/python/kernel_tests/session_ops_test.py179
-rw-r--r--tensorflow/python/kernel_tests/sets_test.py1019
-rw-r--r--tensorflow/python/kernel_tests/shape_ops_test.py279
-rw-r--r--tensorflow/python/kernel_tests/slice_op_test.py124
-rw-r--r--tensorflow/python/kernel_tests/softmax_op_test.py56
-rw-r--r--tensorflow/python/kernel_tests/softplus_op_test.py50
-rw-r--r--tensorflow/python/kernel_tests/softsign_op_test.py32
-rw-r--r--tensorflow/python/kernel_tests/spacetobatch_op_test.py328
-rw-r--r--tensorflow/python/kernel_tests/spacetodepth_op_test.py120
-rw-r--r--tensorflow/python/kernel_tests/sparse_add_op_test.py79
-rw-r--r--tensorflow/python/kernel_tests/sparse_concat_op_test.py109
-rw-r--r--tensorflow/python/kernel_tests/sparse_conditional_accumulator_test.py226
-rw-r--r--tensorflow/python/kernel_tests/sparse_matmul_op_test.py153
-rw-r--r--tensorflow/python/kernel_tests/sparse_ops_test.py280
-rw-r--r--tensorflow/python/kernel_tests/sparse_reorder_op_test.py55
-rw-r--r--tensorflow/python/kernel_tests/sparse_reshape_op_test.py114
-rw-r--r--tensorflow/python/kernel_tests/sparse_serialization_ops_test.py116
-rw-r--r--tensorflow/python/kernel_tests/sparse_split_op_test.py128
-rw-r--r--tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_grad_test.py26
-rw-r--r--tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_op_test.py175
-rw-r--r--tensorflow/python/kernel_tests/sparse_tensors_map_ops_test.py122
-rw-r--r--tensorflow/python/kernel_tests/sparse_to_dense_op_py_test.py83
-rw-r--r--tensorflow/python/kernel_tests/sparse_xent_op_test.py176
-rw-r--r--tensorflow/python/kernel_tests/sparsemask_op_test.py24
-rw-r--r--tensorflow/python/kernel_tests/split_op_test.py75
-rw-r--r--tensorflow/python/kernel_tests/stack_ops_test.py83
-rw-r--r--tensorflow/python/kernel_tests/string_join_op_test.py20
-rw-r--r--tensorflow/python/kernel_tests/string_split_op_test.py58
-rw-r--r--tensorflow/python/kernel_tests/string_to_hash_bucket_op_test.py52
-rw-r--r--tensorflow/python/kernel_tests/string_to_number_op_test.py62
-rw-r--r--tensorflow/python/kernel_tests/substr_op_test.py110
-rw-r--r--tensorflow/python/kernel_tests/summary_audio_op_test.py17
-rw-r--r--tensorflow/python/kernel_tests/summary_image_op_test.py37
-rw-r--r--tensorflow/python/kernel_tests/summary_ops_test.py60
-rw-r--r--tensorflow/python/kernel_tests/summary_tensor_op_test.py48
-rw-r--r--tensorflow/python/kernel_tests/svd_op_test.py54
-rw-r--r--tensorflow/python/kernel_tests/template_test.py95
-rw-r--r--tensorflow/python/kernel_tests/tensor_array_ops_test.py645
-rw-r--r--tensorflow/python/kernel_tests/topk_op_test.py64
-rw-r--r--tensorflow/python/kernel_tests/trace_op_test.py10
-rw-r--r--tensorflow/python/kernel_tests/transpose_op_test.py151
-rw-r--r--tensorflow/python/kernel_tests/unique_op_test.py20
-rw-r--r--tensorflow/python/kernel_tests/unpack_op_test.py107
-rw-r--r--tensorflow/python/kernel_tests/variable_ops_test.py148
-rw-r--r--tensorflow/python/kernel_tests/variable_scope_test.py814
-rw-r--r--tensorflow/python/kernel_tests/variables_test.py325
-rw-r--r--tensorflow/python/kernel_tests/where_op_test.py31
-rw-r--r--tensorflow/python/kernel_tests/xent_op_test.py52
-rw-r--r--tensorflow/python/kernel_tests/zero_division_test.py22
-rw-r--r--tensorflow/python/ops/losses/BUILD1
-rw-r--r--tensorflow/python/saved_model/BUILD12
-rw-r--r--tensorflow/python/saved_model/example/BUILD3
-rw-r--r--tensorflow/python/tools/BUILD7
-rw-r--r--tensorflow/tensorboard/backend/BUILD3
-rw-r--r--tensorflow/tensorboard/scripts/BUILD1
-rw-r--r--tensorflow/tools/quantization/BUILD4
-rw-r--r--tensorflow/tools/test/BUILD6
220 files changed, 19790 insertions, 15062 deletions
diff --git a/tensorflow/contrib/bayesflow/BUILD b/tensorflow/contrib/bayesflow/BUILD
index e533cc51cb..466039bbe9 100644
--- a/tensorflow/contrib/bayesflow/BUILD
+++ b/tensorflow/contrib/bayesflow/BUILD
@@ -15,6 +15,18 @@ py_library(
name = "bayesflow_py",
srcs = ["__init__.py"] + glob(["python/ops/*.py"]),
srcs_version = "PY2AND3",
+ deps = [
+ "//tensorflow/contrib/distributions:distributions_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:check_ops",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:nn",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:training",
+ "//tensorflow/python:util",
+ "//tensorflow/python:variable_scope",
+ ],
)
cuda_py_test(
diff --git a/tensorflow/contrib/copy_graph/BUILD b/tensorflow/contrib/copy_graph/BUILD
index 5a775c2022..a168681635 100644
--- a/tensorflow/contrib/copy_graph/BUILD
+++ b/tensorflow/contrib/copy_graph/BUILD
@@ -15,6 +15,11 @@ py_library(
"python/util/copy_elements.py",
],
srcs_version = "PY2AND3",
+ deps = [
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:session",
+ "//tensorflow/python:variables",
+ ],
)
py_test(
@@ -24,6 +29,7 @@ py_test(
deps = [
":copy_graph_py",
"//tensorflow:tensorflow_py",
+ "//tensorflow/contrib/framework:framework_py",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:platform_test",
],
diff --git a/tensorflow/contrib/crf/BUILD b/tensorflow/contrib/crf/BUILD
index 33c1323b48..b4185d474d 100644
--- a/tensorflow/contrib/crf/BUILD
+++ b/tensorflow/contrib/crf/BUILD
@@ -14,6 +14,13 @@ py_library(
name = "crf_py",
srcs = ["__init__.py"] + glob(["python/ops/*.py"]),
srcs_version = "PY2AND3",
+ deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:rnn",
+ "//tensorflow/python:variable_scope",
+ ],
)
cuda_py_tests(
diff --git a/tensorflow/contrib/cudnn_rnn/BUILD b/tensorflow/contrib/cudnn_rnn/BUILD
index 8a36286eef..2d85806d4e 100644
--- a/tensorflow/contrib/cudnn_rnn/BUILD
+++ b/tensorflow/contrib/cudnn_rnn/BUILD
@@ -51,6 +51,14 @@ py_library(
visibility = ["//visibility:public"],
deps = [
":cudnn_rnn_ops",
+ "//tensorflow/contrib/util:util_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:state_ops",
+ "//tensorflow/python:training",
],
)
@@ -61,8 +69,10 @@ cuda_py_test(
additional_deps = [
":cudnn_rnn_py",
"//tensorflow:tensorflow_py",
+ "//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:platform_test",
+ "//tensorflow/python:variables",
],
tags = [
"manual",
diff --git a/tensorflow/contrib/distributions/BUILD b/tensorflow/contrib/distributions/BUILD
index 30fdea8694..13ce4c5946 100644
--- a/tensorflow/contrib/distributions/BUILD
+++ b/tensorflow/contrib/distributions/BUILD
@@ -17,6 +17,7 @@ cuda_py_tests(
additional_deps = [
":distributions_py",
"//tensorflow:tensorflow_py",
+ "//tensorflow/python:framework",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:platform_test",
],
@@ -100,6 +101,22 @@ py_library(
name = "distributions_py",
srcs = ["__init__.py"] + glob(["python/ops/*.py"]),
srcs_version = "PY2AND3",
+ deps = [
+ "//tensorflow/contrib/framework:framework_py",
+ "//tensorflow/contrib/linalg:linalg_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:check_ops",
+ "//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:data_flow_ops",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:linalg_ops",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:nn",
+ "//tensorflow/python:nn_ops",
+ "//tensorflow/python:random_ops",
+ "//tensorflow/python:special_math_ops",
+ ],
)
cuda_py_tests(
@@ -142,6 +159,7 @@ cuda_py_tests(
additional_deps = [
":distributions_py",
"//tensorflow:tensorflow_py",
+ "//tensorflow/python:framework",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:platform_test",
],
@@ -324,6 +342,7 @@ cuda_py_tests(
size = "small",
srcs = ["python/kernel_tests/kullback_leibler_test.py"],
additional_deps = [
+ ":distributions_py",
"//tensorflow:tensorflow_py",
"//tensorflow/python:platform_test",
],
@@ -369,6 +388,7 @@ cuda_py_tests(
additional_deps = [
":distributions_py",
"//tensorflow:tensorflow_py",
+ "//tensorflow/python:framework",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:platform_test",
],
@@ -381,6 +401,7 @@ cuda_py_tests(
additional_deps = [
":distributions_py",
"//tensorflow:tensorflow_py",
+ "//tensorflow/python:framework",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:platform_test",
],
diff --git a/tensorflow/contrib/factorization/BUILD b/tensorflow/contrib/factorization/BUILD
index e8bef0ced9..9658fec152 100644
--- a/tensorflow/contrib/factorization/BUILD
+++ b/tensorflow/contrib/factorization/BUILD
@@ -30,6 +30,13 @@ py_library(
deps = [
":gen_clustering_ops",
":gen_factorization_ops",
+ "//tensorflow/contrib/learn",
+ "//tensorflow/contrib/util:util_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:embedding_ops",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:platform",
],
)
@@ -105,8 +112,10 @@ tf_py_test(
"python/ops/gmm_ops_test.py",
],
additional_deps = [
+ ":factorization_py",
"//tensorflow:tensorflow_py",
"//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:platform",
"//tensorflow/python:platform_test",
],
)
diff --git a/tensorflow/contrib/ffmpeg/BUILD b/tensorflow/contrib/ffmpeg/BUILD
index e146b53025..99b69de30c 100644
--- a/tensorflow/contrib/ffmpeg/BUILD
+++ b/tensorflow/contrib/ffmpeg/BUILD
@@ -121,6 +121,10 @@ py_library(
deps = [
":decode_audio_op_py",
":encode_audio_op_py",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:platform",
],
)
diff --git a/tensorflow/contrib/framework/BUILD b/tensorflow/contrib/framework/BUILD
index 8534ecdeab..44017c82ea 100644
--- a/tensorflow/contrib/framework/BUILD
+++ b/tensorflow/contrib/framework/BUILD
@@ -32,6 +32,23 @@ py_library(
srcs_version = "PY2AND3",
deps = [
":gen_variable_ops",
+ "//tensorflow/contrib/util:util_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:init_ops",
+ "//tensorflow/python:io_ops",
+ "//tensorflow/python:logging_ops",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:state_ops",
+ "//tensorflow/python:state_ops_gen",
+ "//tensorflow/python:tensor_array_ops",
+ "//tensorflow/python:training",
+ "//tensorflow/python:util",
+ "//tensorflow/python:variable_scope",
+ "//tensorflow/python:variables",
],
)
@@ -104,7 +121,11 @@ py_test(
name = "experimental_test",
srcs = ["python/framework/experimental_test.py"],
srcs_version = "PY2AND3",
- deps = ["//tensorflow:tensorflow_py"],
+ deps = [
+ ":framework_py",
+ "//tensorflow:tensorflow_py",
+ "//tensorflow/python:platform",
+ ],
)
py_test(
diff --git a/tensorflow/contrib/graph_editor/BUILD b/tensorflow/contrib/graph_editor/BUILD
index 0ae04f208d..1cb8e2ffb5 100644
--- a/tensorflow/contrib/graph_editor/BUILD
+++ b/tensorflow/contrib/graph_editor/BUILD
@@ -20,6 +20,10 @@ py_library(
"util.py",
],
srcs_version = "PY2AND3",
+ deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ ],
)
filegroup(
diff --git a/tensorflow/contrib/grid_rnn/BUILD b/tensorflow/contrib/grid_rnn/BUILD
index 021f852e66..8ae5f31f36 100644
--- a/tensorflow/contrib/grid_rnn/BUILD
+++ b/tensorflow/contrib/grid_rnn/BUILD
@@ -14,6 +14,12 @@ py_library(
name = "grid_rnn_py",
srcs = ["__init__.py"] + glob(["python/ops/*.py"]),
srcs_version = "PY2AND3",
+ deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:nn",
+ "//tensorflow/python:variable_scope",
+ ],
)
cuda_py_tests(
diff --git a/tensorflow/contrib/image/BUILD b/tensorflow/contrib/image/BUILD
index 557c75acf4..fcffbe7855 100644
--- a/tensorflow/contrib/image/BUILD
+++ b/tensorflow/contrib/image/BUILD
@@ -45,7 +45,15 @@ py_library(
],
data = [":python/ops/_image_ops.so"],
srcs_version = "PY2AND3",
- deps = [":image_ops"],
+ deps = [
+ ":image_ops",
+ "//tensorflow/contrib/util:util_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:platform",
+ ],
)
cuda_py_test(
diff --git a/tensorflow/contrib/input_pipeline/BUILD b/tensorflow/contrib/input_pipeline/BUILD
index fa6b1cb69e..a9cf9160b3 100644
--- a/tensorflow/contrib/input_pipeline/BUILD
+++ b/tensorflow/contrib/input_pipeline/BUILD
@@ -55,7 +55,14 @@ py_library(
srcs = glob(["python/ops/*.py"]),
data = [":python/ops/_input_pipeline_ops.so"],
srcs_version = "PY2AND3",
- deps = [":input_pipeline_ops"],
+ deps = [
+ ":input_pipeline_ops",
+ "//tensorflow/contrib/util:util_py",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:variable_scope",
+ ],
)
py_test(
@@ -68,6 +75,8 @@ py_test(
"//tensorflow:tensorflow_py",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:platform_test",
+ "//tensorflow/python:state_ops",
+ "//tensorflow/python:variables",
],
)
diff --git a/tensorflow/contrib/integrate/BUILD b/tensorflow/contrib/integrate/BUILD
index 1e6db75d21..1df5f0a9f0 100644
--- a/tensorflow/contrib/integrate/BUILD
+++ b/tensorflow/contrib/integrate/BUILD
@@ -14,6 +14,14 @@ py_library(
"python/ops/odes.py",
],
srcs_version = "PY2AND3",
+ deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:tensor_array_ops",
+ "//tensorflow/python:util",
+ ],
)
py_test(
diff --git a/tensorflow/contrib/labeled_tensor/BUILD b/tensorflow/contrib/labeled_tensor/BUILD
index 41b7c778a5..709ab0e4aa 100644
--- a/tensorflow/contrib/labeled_tensor/BUILD
+++ b/tensorflow/contrib/labeled_tensor/BUILD
@@ -33,6 +33,9 @@ py_library(
srcs_version = "PY2AND3",
deps = [
":_typecheck",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
],
)
@@ -55,6 +58,7 @@ py_test(
],
srcs_version = "PY2AND3",
deps = [
+ ":_typecheck",
":core",
":test_util",
"//tensorflow:tensorflow_py",
@@ -66,7 +70,11 @@ py_library(
srcs = ["python/ops/io_ops.py"],
srcs_version = "PY2AND3",
deps = [
+ ":_typecheck",
":core",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:parsing_ops",
],
)
@@ -78,6 +86,7 @@ py_test(
],
srcs_version = "PY2AND3",
deps = [
+ ":core",
":io_ops",
":ops",
":test_util",
@@ -91,6 +100,7 @@ py_library(
srcs_version = "PY2AND3",
deps = [
":core",
+ "//tensorflow/python:nn",
],
)
@@ -102,6 +112,7 @@ py_test(
],
srcs_version = "PY2AND3",
deps = [
+ ":core",
":nn",
":test_util",
"//tensorflow:tensorflow_py",
@@ -113,7 +124,14 @@ py_library(
srcs = ["python/ops/ops.py"],
srcs_version = "PY2AND3",
deps = [
+ ":_typecheck",
":core",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:numerics",
+ "//tensorflow/python:random_ops",
+ "//tensorflow/python:training",
],
)
@@ -125,6 +143,7 @@ py_test(
],
srcs_version = "PY2AND3",
deps = [
+ ":core",
":ops",
":test_util",
"//tensorflow:tensorflow_py",
@@ -136,8 +155,10 @@ py_library(
srcs = ["python/ops/sugar.py"],
srcs_version = "PY2AND3",
deps = [
+ ":_typecheck",
":core",
":ops",
+ "//tensorflow/python:framework_for_generated_wrappers",
],
)
@@ -149,6 +170,8 @@ py_test(
],
srcs_version = "PY2AND3",
deps = [
+ ":core",
+ ":ops",
":sugar",
":test_util",
"//tensorflow:tensorflow_py",
diff --git a/tensorflow/contrib/layers/BUILD b/tensorflow/contrib/layers/BUILD
index 9ce48c23f7..5484866fdb 100644
--- a/tensorflow/contrib/layers/BUILD
+++ b/tensorflow/contrib/layers/BUILD
@@ -99,6 +99,33 @@ py_library(
deps = [
":bucketization_op",
":sparse_feature_cross_op",
+ "//tensorflow/contrib/framework:framework_py",
+ "//tensorflow/contrib/lookup:lookup_py",
+ "//tensorflow/contrib/losses:losses_py",
+ "//tensorflow/contrib/metrics:metrics_py",
+ "//tensorflow/contrib/util:util_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:check_ops",
+ "//tensorflow/python:clip_ops",
+ "//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:embedding_ops",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:init_ops",
+ "//tensorflow/python:layers",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:nn",
+ "//tensorflow/python:nn_ops",
+ "//tensorflow/python:parsing_ops",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:random_ops",
+ "//tensorflow/python:sparse_ops",
+ "//tensorflow/python:standard_ops",
+ "//tensorflow/python:string_ops",
+ "//tensorflow/python:training",
+ "//tensorflow/python:util",
+ "//tensorflow/python:variable_scope",
+ "//tensorflow/python:variables",
],
)
@@ -109,6 +136,7 @@ cuda_py_test(
additional_deps = [
":layers_py",
"//tensorflow:tensorflow_py",
+ "//tensorflow/python:control_flow_ops",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:platform_test",
],
@@ -187,6 +215,7 @@ py_test(
":layers_py",
"//tensorflow:tensorflow_py",
"//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:init_ops",
"//tensorflow/python:platform_test",
],
)
diff --git a/tensorflow/contrib/layers/python/layers/target_column.py b/tensorflow/contrib/layers/python/layers/target_column.py
index 1d56217b07..40be09979d 100644
--- a/tensorflow/contrib/layers/python/layers/target_column.py
+++ b/tensorflow/contrib/layers/python/layers/target_column.py
@@ -20,9 +20,9 @@ from __future__ import print_function
import six
-from tensorflow.contrib import losses
-from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.framework import deprecated
+from tensorflow.contrib.losses.python.losses import loss_ops
+from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
@@ -31,8 +31,7 @@ from tensorflow.python.ops import nn
@deprecated(
- "2016-11-12",
- "This file will be removed after the deprecation date."
+ "2016-11-12", "This file will be removed after the deprecation date."
"Please switch to "
"third_party/tensorflow/contrib/learn/python/learn/estimators/head.py")
def regression_target(label_name=None,
@@ -51,17 +50,18 @@ def regression_target(label_name=None,
Returns:
An instance of _TargetColumn
"""
- return _RegressionTargetColumn(loss_fn=_mean_squared_loss,
- label_name=label_name,
- weight_column_name=weight_column_name,
- label_dimension=label_dimension)
+ return _RegressionTargetColumn(
+ loss_fn=_mean_squared_loss,
+ label_name=label_name,
+ weight_column_name=weight_column_name,
+ label_dimension=label_dimension)
+
# TODO(zakaria): Add logistic_regression_target
@deprecated(
- "2016-11-12",
- "This file will be removed after the deprecation date."
+ "2016-11-12", "This file will be removed after the deprecation date."
"Please switch to "
"third_party/tensorflow/contrib/learn/python/learn/estimators/head.py")
def multi_class_target(n_classes, label_name=None, weight_column_name=None):
@@ -89,15 +89,15 @@ def multi_class_target(n_classes, label_name=None, weight_column_name=None):
loss_fn = _log_loss_with_two_classes
else:
loss_fn = _softmax_cross_entropy_loss
- return _MultiClassTargetColumn(loss_fn=loss_fn,
- n_classes=n_classes,
- label_name=label_name,
- weight_column_name=weight_column_name)
+ return _MultiClassTargetColumn(
+ loss_fn=loss_fn,
+ n_classes=n_classes,
+ label_name=label_name,
+ weight_column_name=weight_column_name)
@deprecated(
- "2016-11-12",
- "This file will be removed after the deprecation date."
+ "2016-11-12", "This file will be removed after the deprecation date."
"Please switch to "
"third_party/tensorflow/contrib/learn/python/learn/estimators/head.py")
def binary_svm_target(label_name=None, weight_column_name=None):
@@ -116,13 +116,12 @@ def binary_svm_target(label_name=None, weight_column_name=None):
An instance of _TargetColumn.
"""
- return _BinarySvmTargetColumn(label_name=label_name,
- weight_column_name=weight_column_name)
+ return _BinarySvmTargetColumn(
+ label_name=label_name, weight_column_name=weight_column_name)
@deprecated(
- "2016-11-12",
- "This file will be removed after the deprecation date."
+ "2016-11-12", "This file will be removed after the deprecation date."
"Please switch to "
"third_party/tensorflow/contrib/learn/python/learn/estimators/head.py")
class ProblemType(object):
@@ -148,8 +147,8 @@ class _TargetColumn(object):
ValueError: if loss_fn or n_classes are missing.
"""
- def __init__(self, loss_fn, num_label_columns, label_name,
- weight_column_name, problem_type):
+ def __init__(self, loss_fn, num_label_columns, label_name, weight_column_name,
+ problem_type):
if not loss_fn:
raise ValueError("loss_fn must be provided")
if num_label_columns is None: # n_classes can be 0
@@ -186,8 +185,7 @@ class _TargetColumn(object):
return None
else:
return array_ops.reshape(
- math_ops.to_float(features[self._weight_column_name]),
- shape=(-1,))
+ math_ops.to_float(features[self._weight_column_name]), shape=(-1,))
@property
def problem_type(self):
@@ -254,10 +252,9 @@ class _TargetColumn(object):
if weight_tensor is None:
return math_ops.reduce_mean(loss_unweighted, name="loss")
loss_weighted = self._weighted_loss(loss_unweighted, weight_tensor)
- return math_ops.div(
- math_ops.reduce_sum(loss_weighted),
- math_ops.to_float(math_ops.reduce_sum(weight_tensor)),
- name="loss")
+ return math_ops.div(math_ops.reduce_sum(loss_weighted),
+ math_ops.to_float(math_ops.reduce_sum(weight_tensor)),
+ name="loss")
class _RegressionTargetColumn(_TargetColumn):
@@ -278,11 +275,12 @@ class _RegressionTargetColumn(_TargetColumn):
def get_eval_ops(self, features, logits, labels, metrics=None):
loss = self.loss(logits, labels, features)
- result = {"loss": metrics_lib.streaming_mean(loss)}
+ result = {"loss": metric_ops.streaming_mean(loss)}
if metrics:
predictions = self.logits_to_predictions(logits, proba=False)
- result.update(_run_metrics(predictions, labels, metrics,
- self.get_weight_tensor(features)))
+ result.update(
+ _run_metrics(predictions, labels, metrics,
+ self.get_weight_tensor(features)))
return result
@@ -316,13 +314,13 @@ class _MultiClassTargetColumn(_TargetColumn):
def get_eval_ops(self, features, logits, labels, metrics=None):
loss = self.loss(logits, labels, features)
- result = {"loss": metrics_lib.streaming_mean(loss)}
+ result = {"loss": metric_ops.streaming_mean(loss)}
# Adds default metrics.
if metrics is None:
# TODO(b/29366811): This currently results in both an "accuracy" and an
# "accuracy/threshold_0.500000_mean" metric for binary classification.
- metrics = {("accuracy", "classes"): metrics_lib.streaming_accuracy}
+ metrics = {("accuracy", "classes"): metric_ops.streaming_accuracy}
predictions = math_ops.sigmoid(logits)
labels_float = math_ops.to_float(labels)
@@ -354,12 +352,14 @@ class _MultiClassTargetColumn(_TargetColumn):
"form.".format(name))
if class_metrics:
class_predictions = self.logits_to_predictions(logits, proba=False)
- result.update(_run_metrics(class_predictions, labels, class_metrics,
- self.get_weight_tensor(features)))
+ result.update(
+ _run_metrics(class_predictions, labels, class_metrics,
+ self.get_weight_tensor(features)))
if proba_metrics:
predictions = self.logits_to_predictions(logits, proba=True)
- result.update(_run_metrics(predictions, labels, proba_metrics,
- self.get_weight_tensor(features)))
+ result.update(
+ _run_metrics(predictions, labels, proba_metrics,
+ self.get_weight_tensor(features)))
return result
@@ -367,6 +367,7 @@ class _BinarySvmTargetColumn(_MultiClassTargetColumn):
"""_TargetColumn for binary classification using SVMs."""
def __init__(self, label_name, weight_column_name):
+
def loss_fn(logits, target):
check_shape_op = control_flow_ops.Assert(
math_ops.less_equal(array_ops.rank(target), 2),
@@ -374,7 +375,7 @@ class _BinarySvmTargetColumn(_MultiClassTargetColumn):
with ops.control_dependencies([check_shape_op]):
target = array_ops.reshape(
target, shape=[array_ops.shape(target)[0], 1])
- return losses.hinge_loss(logits, target)
+ return loss_ops.hinge_loss(logits, target)
super(_BinarySvmTargetColumn, self).__init__(
loss_fn=loss_fn,
@@ -435,8 +436,7 @@ def _run_metrics(predictions, labels, metrics, weights):
@deprecated(
- "2016-11-12",
- "This file will be removed after the deprecation date."
+ "2016-11-12", "This file will be removed after the deprecation date."
"Please switch to "
"third_party/tensorflow/contrib/learn/python/learn/estimators/head.py")
def get_default_binary_metrics_for_eval(thresholds):
@@ -459,14 +459,14 @@ def get_default_binary_metrics_for_eval(thresholds):
metrics[_MetricKeys.AUC] = _streaming_auc
for threshold in thresholds:
- metrics[_MetricKeys.ACCURACY_MEAN % threshold] = _accuracy_at_threshold(
- threshold)
+ metrics[_MetricKeys.ACCURACY_MEAN %
+ threshold] = _accuracy_at_threshold(threshold)
# Precision for positive examples.
metrics[_MetricKeys.PRECISION_MEAN % threshold] = _streaming_at_threshold(
- metrics_lib.streaming_precision_at_thresholds, threshold)
+ metric_ops.streaming_precision_at_thresholds, threshold)
# Recall for positive examples.
metrics[_MetricKeys.RECALL_MEAN % threshold] = _streaming_at_threshold(
- metrics_lib.streaming_recall_at_thresholds, threshold)
+ metric_ops.streaming_recall_at_thresholds, threshold)
return metrics
@@ -478,16 +478,16 @@ def _float_weights_or_none(weights):
def _labels_streaming_mean(unused_predictions, labels, weights=None):
- return metrics_lib.streaming_mean(labels, weights=weights)
+ return metric_ops.streaming_mean(labels, weights=weights)
def _predictions_streaming_mean(predictions, unused_labels, weights=None):
- return metrics_lib.streaming_mean(predictions, weights=weights)
+ return metric_ops.streaming_mean(predictions, weights=weights)
def _streaming_auc(predictions, labels, weights=None):
- return metrics_lib.streaming_auc(predictions, labels,
- weights=_float_weights_or_none(weights))
+ return metric_ops.streaming_auc(
+ predictions, labels, weights=_float_weights_or_none(weights))
def _accuracy_at_threshold(threshold):
@@ -495,9 +495,8 @@ def _accuracy_at_threshold(threshold):
def _accuracy_metric(predictions, labels, weights=None):
threshold_predictions = math_ops.to_float(
math_ops.greater_equal(predictions, threshold))
- return metrics_lib.streaming_accuracy(predictions=threshold_predictions,
- labels=labels,
- weights=weights)
+ return metric_ops.streaming_accuracy(
+ predictions=threshold_predictions, labels=labels, weights=weights)
return _accuracy_metric
@@ -506,7 +505,9 @@ def _streaming_at_threshold(streaming_metrics_fn, threshold):
def _streaming_metrics(predictions, labels, weights=None):
precision_tensor, update_op = streaming_metrics_fn(
- predictions, labels=labels, thresholds=[threshold],
+ predictions,
+ labels=labels,
+ thresholds=[threshold],
weights=_float_weights_or_none(weights))
return array_ops.squeeze(precision_tensor), update_op
diff --git a/tensorflow/contrib/learn/BUILD b/tensorflow/contrib/learn/BUILD
index 54177ffd2d..372b4eb565 100644
--- a/tensorflow/contrib/learn/BUILD
+++ b/tensorflow/contrib/learn/BUILD
@@ -18,12 +18,48 @@ py_library(
),
srcs_version = "PY2AND3",
deps = [
+ "//tensorflow/contrib/framework:framework_py",
+ "//tensorflow/contrib/layers:layers_py",
"//tensorflow/contrib/learn/python/learn/datasets",
+ "//tensorflow/contrib/linear_optimizer:sdca_ops_py",
+ "//tensorflow/contrib/losses:losses_py",
"//tensorflow/contrib/session_bundle:exporter",
+ "//tensorflow/contrib/session_bundle:gc",
"//tensorflow/contrib/tensor_forest:client_lib",
+ "//tensorflow/contrib/tensor_forest:data_ops_py",
+ "//tensorflow/contrib/tensor_forest:eval_metrics",
+ "//tensorflow/contrib/tensor_forest:tensor_forest_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:check_ops",
+ "//tensorflow/python:clip_ops",
+ "//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:data_flow_ops",
+ "//tensorflow/python:errors",
"//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:functional_ops",
+ "//tensorflow/python:gradients",
+ "//tensorflow/python:init_ops",
+ "//tensorflow/python:io_ops",
+ "//tensorflow/python:logging_ops",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:nn",
+ "//tensorflow/python:parsing_ops",
+ "//tensorflow/python:partitioned_variables",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:resources",
+ "//tensorflow/python:rnn",
+ "//tensorflow/python:session",
+ "//tensorflow/python:sparse_ops",
+ "//tensorflow/python:state_ops",
+ "//tensorflow/python:string_ops",
+ "//tensorflow/python:training",
+ "//tensorflow/python:util",
+ "//tensorflow/python:variable_scope",
+ "//tensorflow/python:variables",
"//tensorflow/python/saved_model:builder",
"//tensorflow/python/saved_model:loader",
+ "//tensorflow/python/saved_model:signature_constants",
"//tensorflow/python/saved_model:signature_def_utils",
"//tensorflow/python/saved_model:tag_constants",
],
@@ -89,6 +125,7 @@ py_test(
deps = [
":learn",
"//tensorflow:tensorflow_py",
+ "//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:framework_test_lib",
],
)
@@ -126,6 +163,7 @@ py_test(
],
srcs_version = "PY2AND3",
deps = [
+ ":learn",
"//tensorflow:tensorflow_py",
"//tensorflow/python:framework_test_lib",
],
@@ -200,6 +238,7 @@ py_test(
deps = [
":learn",
"//tensorflow:tensorflow_py",
+ "//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:framework_test_lib",
],
)
@@ -285,6 +324,7 @@ py_test(
":learn",
"//tensorflow:tensorflow_py",
"//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:util",
],
)
@@ -297,8 +337,12 @@ py_test(
":learn",
"//tensorflow:tensorflow_py",
"//tensorflow/python:extra_py_tests_deps",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:resources",
"//tensorflow/python:test_ops",
+ "//tensorflow/python:variables",
],
)
@@ -323,6 +367,8 @@ py_test(
":learn",
"//tensorflow:tensorflow_py",
"//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:training",
],
)
@@ -358,7 +404,11 @@ py_test(
deps = [
":learn",
"//tensorflow:tensorflow_py",
+ "//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:util",
+ "//tensorflow/python/saved_model:loader",
+ "//tensorflow/python/saved_model:tag_constants",
],
)
@@ -426,6 +476,7 @@ py_test(
":learn",
"//tensorflow:tensorflow_py",
"//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:math_ops",
],
)
@@ -454,6 +505,7 @@ py_test(
":learn",
"//tensorflow:tensorflow_py",
"//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:platform_benchmark",
],
)
@@ -502,7 +554,10 @@ py_test(
deps = [
":learn",
"//tensorflow:tensorflow_py",
+ "//tensorflow/contrib/framework:framework_py",
+ "//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:state_ops",
],
)
@@ -646,8 +701,11 @@ py_test(
deps = [
":learn",
"//tensorflow:tensorflow_py",
+ "//tensorflow/python:errors",
"//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:platform",
],
)
@@ -659,6 +717,7 @@ py_test(
deps = [
":learn",
"//tensorflow:tensorflow_py",
+ "//tensorflow/python:errors",
"//tensorflow/python:framework_test_lib",
],
)
@@ -671,6 +730,7 @@ py_test(
deps = [
":learn",
"//tensorflow:tensorflow_py",
+ "//tensorflow/python:errors",
"//tensorflow/python:framework_test_lib",
],
)
@@ -701,6 +761,7 @@ py_test(
"//tensorflow:tensorflow_py",
"//tensorflow/python:framework",
"//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:platform",
],
)
@@ -714,6 +775,8 @@ py_test(
"//tensorflow:tensorflow_py",
"//tensorflow/python:framework",
"//tensorflow/python:framework_test_lib",
+ "//tensorflow/python/saved_model:signature_constants",
+ "//tensorflow/python/saved_model:signature_def_utils",
],
)
diff --git a/tensorflow/contrib/learn/python/learn/datasets/BUILD b/tensorflow/contrib/learn/python/learn/datasets/BUILD
index 4597ec24b2..68760e2653 100644
--- a/tensorflow/contrib/learn/python/learn/datasets/BUILD
+++ b/tensorflow/contrib/learn/python/learn/datasets/BUILD
@@ -22,6 +22,10 @@ py_library(
],
data = [":data_csv"],
srcs_version = "PY2AND3",
+ deps = [
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:platform",
+ ],
)
py_binary(
@@ -54,6 +58,7 @@ py_test(
srcs = ["base_test.py"],
srcs_version = "PY2AND3",
deps = [
+ ":datasets",
"//tensorflow:tensorflow_py",
"//tensorflow/contrib/learn",
"//tensorflow/python:framework_test_lib",
diff --git a/tensorflow/contrib/legacy_seq2seq/BUILD b/tensorflow/contrib/legacy_seq2seq/BUILD
index 3fade19838..c54f058337 100644
--- a/tensorflow/contrib/legacy_seq2seq/BUILD
+++ b/tensorflow/contrib/legacy_seq2seq/BUILD
@@ -21,6 +21,18 @@ py_library(
),
srcs_version = "PY2AND3",
visibility = ["//visibility:public"],
+ deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:embedding_ops",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:nn_ops",
+ "//tensorflow/python:rnn",
+ "//tensorflow/python:rnn_cell",
+ "//tensorflow/python:util",
+ "//tensorflow/python:variable_scope",
+ ],
)
cuda_py_tests(
diff --git a/tensorflow/contrib/linalg/BUILD b/tensorflow/contrib/linalg/BUILD
index 0a2372ac69..ee4da2aa9d 100644
--- a/tensorflow/contrib/linalg/BUILD
+++ b/tensorflow/contrib/linalg/BUILD
@@ -93,6 +93,15 @@ py_library(
name = "linalg_py",
srcs = ["__init__.py"] + glob(["python/ops/*.py"]),
srcs_version = "PY2AND3",
+ deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:check_ops",
+ "//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:linalg_ops",
+ "//tensorflow/python:math_ops",
+ ],
)
filegroup(
diff --git a/tensorflow/contrib/linear_optimizer/BUILD b/tensorflow/contrib/linear_optimizer/BUILD
index 3b68c51413..31e8635532 100644
--- a/tensorflow/contrib/linear_optimizer/BUILD
+++ b/tensorflow/contrib/linear_optimizer/BUILD
@@ -19,6 +19,15 @@ py_library(
deps = [
":sharded_mutable_dense_hashtable_py",
":sparse_feature_column_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:nn",
+ "//tensorflow/python:nn_ops",
+ "//tensorflow/python:sdca_ops_gen",
+ "//tensorflow/python:state_ops",
+ "//tensorflow/python:variables",
],
)
@@ -29,9 +38,11 @@ py_test(
srcs_version = "PY2AND3",
deps = [
":sdca_ops_py",
+ ":sparse_feature_column_py",
"//tensorflow:tensorflow_py",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:platform_test",
+ "//tensorflow/python:sdca_ops_gen",
],
)
@@ -41,6 +52,11 @@ py_library(
srcs_version = "PY2AND3",
deps = [
"//tensorflow/contrib/lookup:lookup_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:data_flow_ops",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
],
)
@@ -61,6 +77,7 @@ py_library(
name = "sparse_feature_column_py",
srcs = ["python/ops/sparse_feature_column.py"],
srcs_version = "PY2AND3",
+ deps = ["//tensorflow/python:framework_for_generated_wrappers"],
)
py_test(
diff --git a/tensorflow/contrib/lookup/BUILD b/tensorflow/contrib/lookup/BUILD
index e608c2e2d2..c1d8ddfd10 100644
--- a/tensorflow/contrib/lookup/BUILD
+++ b/tensorflow/contrib/lookup/BUILD
@@ -14,6 +14,17 @@ py_library(
"lookup_ops.py",
],
srcs_version = "PY2AND3",
+ deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:data_flow_ops_gen",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:string_ops",
+ "//tensorflow/python:training",
+ "//tensorflow/python:util",
+ ],
)
py_test(
diff --git a/tensorflow/contrib/losses/BUILD b/tensorflow/contrib/losses/BUILD
index 8452132c45..f76a8f62cb 100644
--- a/tensorflow/contrib/losses/BUILD
+++ b/tensorflow/contrib/losses/BUILD
@@ -15,6 +15,15 @@ py_library(
"python/losses/loss_ops.py",
],
srcs_version = "PY2AND3",
+ deps = [
+ "//tensorflow/contrib/framework:framework_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:nn",
+ "//tensorflow/python:nn_ops",
+ "//tensorflow/python:util",
+ ],
)
py_test(
diff --git a/tensorflow/contrib/metrics/BUILD b/tensorflow/contrib/metrics/BUILD
index bfeeee530b..0dbc2446a9 100644
--- a/tensorflow/contrib/metrics/BUILD
+++ b/tensorflow/contrib/metrics/BUILD
@@ -21,9 +21,25 @@ py_library(
],
srcs_version = "PY2AND3",
deps = [
+ "//tensorflow/contrib/framework:framework_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:check_ops",
+ "//tensorflow/python:confusion_matrix",
+ "//tensorflow/python:control_flow_ops",
"//tensorflow/python:framework",
"//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:histogram_ops",
+ "//tensorflow/python:init_ops",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:metrics",
+ "//tensorflow/python:nn",
+ "//tensorflow/python:nn_ops",
"//tensorflow/python:sets",
+ "//tensorflow/python:sparse_ops",
+ "//tensorflow/python:state_ops",
+ "//tensorflow/python:util",
+ "//tensorflow/python:variable_scope",
+ "//tensorflow/python:variables",
],
)
diff --git a/tensorflow/contrib/ndlstm/BUILD b/tensorflow/contrib/ndlstm/BUILD
index 545912d129..e1a23d16c6 100644
--- a/tensorflow/contrib/ndlstm/BUILD
+++ b/tensorflow/contrib/ndlstm/BUILD
@@ -43,6 +43,7 @@ tf_py_test(
additional_deps = [
":ndlstm",
"//tensorflow:tensorflow_py",
+ "//tensorflow/python:framework_test_lib",
],
)
@@ -52,6 +53,7 @@ tf_py_test(
additional_deps = [
":ndlstm",
"//tensorflow:tensorflow_py",
+ "//tensorflow/python:framework_test_lib",
# "//tensorflow:tensorflow_py:tensorflow_google",
],
)
diff --git a/tensorflow/contrib/opt/BUILD b/tensorflow/contrib/opt/BUILD
index 55314613c2..f14ae879fc 100644
--- a/tensorflow/contrib/opt/BUILD
+++ b/tensorflow/contrib/opt/BUILD
@@ -16,6 +16,16 @@ py_library(
"python/training/variable_clipping_optimizer.py",
],
srcs_version = "PY2AND3",
+ deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:clip_ops",
+ "//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:gradients",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:training",
+ "//tensorflow/python:variables",
+ ],
)
py_test(
diff --git a/tensorflow/contrib/quantization/BUILD b/tensorflow/contrib/quantization/BUILD
index 5347b32bdb..b1d12cc510 100644
--- a/tensorflow/contrib/quantization/BUILD
+++ b/tensorflow/contrib/quantization/BUILD
@@ -22,6 +22,7 @@ py_library(
srcs_version = "PY2AND3",
deps = [
":ops",
+ "//tensorflow/python:array_ops_gen",
],
)
@@ -35,8 +36,13 @@ py_library(
srcs_version = "PY2AND3",
deps = [
"//tensorflow/python:array_ops",
+ "//tensorflow/python:array_ops_gen",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:math_ops",
+ "//tensorflow/python:math_ops_gen",
"//tensorflow/python:nn_ops",
+ "//tensorflow/python:nn_ops_gen",
],
)
diff --git a/tensorflow/contrib/rnn/BUILD b/tensorflow/contrib/rnn/BUILD
index a3d63b965c..29da261be1 100644
--- a/tensorflow/contrib/rnn/BUILD
+++ b/tensorflow/contrib/rnn/BUILD
@@ -31,6 +31,21 @@ py_library(
],
srcs_version = "PY2AND3",
visibility = ["//visibility:public"],
+ deps = [
+ "//tensorflow/contrib/layers:layers_py",
+ "//tensorflow/contrib/util:util_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:clip_ops",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:init_ops",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:nn_ops",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:rnn",
+ "//tensorflow/python:rnn_cell",
+ "//tensorflow/python:util",
+ "//tensorflow/python:variable_scope",
+ ],
)
cuda_py_tests(
@@ -51,6 +66,7 @@ cuda_py_tests(
srcs = ["python/kernel_tests/core_rnn_cell_test.py"],
additional_deps = [
"//tensorflow:tensorflow_py",
+ "//tensorflow/python:rnn_cell",
],
)
@@ -72,6 +88,7 @@ cuda_py_tests(
srcs = ["python/kernel_tests/core_rnn_test.py"],
additional_deps = [
"//tensorflow:tensorflow_py",
+ "//tensorflow/python:util",
],
shard_count = 10,
)
@@ -147,6 +164,7 @@ cuda_py_tests(
"//tensorflow:tensorflow_py",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:platform_test",
+ "//tensorflow/python:variable_scope",
],
)
diff --git a/tensorflow/contrib/rnn/python/ops/rnn.py b/tensorflow/contrib/rnn/python/ops/rnn.py
index b1a0d6c2f4..163743e59d 100644
--- a/tensorflow/contrib/rnn/python/ops/rnn.py
+++ b/tensorflow/contrib/rnn/python/ops/rnn.py
@@ -12,13 +12,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""RNN helpers for TensorFlow models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import tensorflow as tf
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import rnn
from tensorflow.python.ops import variable_scope as vs
@@ -105,7 +105,7 @@ def stack_bidirectional_rnn(cells_fw,
initial_state_bw = initial_states_bw[i]
with vs.variable_scope("cell_%d" % i) as cell_scope:
- prev_layer, state_fw, state_bw = tf.nn.bidirectional_rnn(
+ prev_layer, state_fw, state_bw = rnn.bidirectional_rnn(
cell_fw,
cell_bw,
prev_layer,
@@ -203,7 +203,7 @@ def stack_bidirectional_dynamic_rnn(cells_fw,
initial_state_bw = initial_states_bw[i]
with vs.variable_scope("cell_%d" % i):
- outputs, (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(
+ outputs, (state_fw, state_bw) = rnn.bidirectional_dynamic_rnn(
cell_fw,
cell_bw,
prev_layer,
@@ -212,7 +212,7 @@ def stack_bidirectional_dynamic_rnn(cells_fw,
sequence_length=sequence_length,
dtype=dtype)
# Concat the outputs to create the new input.
- prev_layer = tf.concat_v2(outputs, 2)
+ prev_layer = array_ops.concat_v2(outputs, 2)
states_fw.append(state_fw)
states_bw.append(state_bw)
diff --git a/tensorflow/contrib/seq2seq/BUILD b/tensorflow/contrib/seq2seq/BUILD
index 178bd81afb..9e7f7918b8 100644
--- a/tensorflow/contrib/seq2seq/BUILD
+++ b/tensorflow/contrib/seq2seq/BUILD
@@ -13,6 +13,17 @@ py_library(
name = "seq2seq_py",
srcs = ["__init__.py"] + glob(["python/ops/*.py"]),
srcs_version = "PY2AND3",
+ deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:rnn",
+ "//tensorflow/python:rnn_cell",
+ "//tensorflow/python:tensor_array_ops",
+ "//tensorflow/python:util",
+ "//tensorflow/python:variable_scope",
+ ],
)
cuda_py_test(
diff --git a/tensorflow/contrib/session_bundle/BUILD b/tensorflow/contrib/session_bundle/BUILD
index f6f574bd38..026b6d47cb 100644
--- a/tensorflow/contrib/session_bundle/BUILD
+++ b/tensorflow/contrib/session_bundle/BUILD
@@ -51,6 +51,7 @@ py_library(
":session_bundle_py",
"//tensorflow:tensorflow_py",
"//tensorflow/core:protos_all_py",
+ "//tensorflow/python:framework",
"//tensorflow/python/saved_model:constants",
"//tensorflow/python/saved_model:loader",
"//tensorflow/python/saved_model:signature_constants",
@@ -72,6 +73,12 @@ py_test(
tags = ["manual"],
deps = [
":bundle_shim_py",
+ ":constants",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:util",
+ "//tensorflow/python/saved_model:constants",
+ "//tensorflow/python/saved_model:signature_constants",
+ "//tensorflow/python/saved_model:tag_constants",
],
)
@@ -90,6 +97,10 @@ py_library(
":gc",
":manifest_proto_py",
"//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:training",
+ "//tensorflow/python:util",
],
)
@@ -107,6 +118,7 @@ py_test(
":gc",
":manifest_proto_py",
"//tensorflow:tensorflow_py",
+ "//tensorflow/python:platform",
],
)
@@ -116,6 +128,7 @@ py_library(
srcs_version = "PY2AND3",
deps = [
"//tensorflow/python:framework",
+ "//tensorflow/python:platform",
],
)
@@ -129,6 +142,8 @@ py_test(
deps = [
":gc",
"//tensorflow:tensorflow_py",
+ "//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:platform",
],
)
@@ -213,6 +228,7 @@ py_library(
":manifest_proto_py",
"//tensorflow:tensorflow_py",
"//tensorflow/core:protos_all_py",
+ "//tensorflow/python:lib",
],
)
@@ -234,6 +250,8 @@ py_test(
":session_bundle_py",
"//tensorflow:tensorflow_py",
"//tensorflow/core:protos_all_py",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:util",
],
)
diff --git a/tensorflow/contrib/tensor_forest/BUILD b/tensorflow/contrib/tensor_forest/BUILD
index e5ed22fdd3..03030d5ad8 100644
--- a/tensorflow/contrib/tensor_forest/BUILD
+++ b/tensorflow/contrib/tensor_forest/BUILD
@@ -73,6 +73,15 @@ py_library(
"data/data_ops.py",
],
srcs_version = "PY2AND3",
+ deps = [
+ ":constants",
+ ":tensor_forest_ops_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:sparse_ops",
+ ],
)
tf_gen_op_libs(
@@ -114,7 +123,15 @@ py_library(
],
srcs_version = "PY2AND3",
deps = [
+ ":constants",
":gen_tensor_forest_ops",
+ "//tensorflow/contrib/util:util_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:sparse_ops",
],
)
@@ -122,6 +139,12 @@ py_library(
name = "eval_metrics",
srcs = ["client/eval_metrics.py"],
srcs_version = "PY2AND3",
+ deps = [
+ "//tensorflow/contrib/metrics:metrics_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:nn",
+ ],
)
py_test(
@@ -274,6 +297,18 @@ py_library(
":constants",
":data_ops_py",
":tensor_forest_ops_py",
+ "//tensorflow/contrib/losses:losses_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:init_ops",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:random_ops",
+ "//tensorflow/python:state_ops",
+ "//tensorflow/python:variable_scope",
+ "//tensorflow/python:variables",
],
)
@@ -326,9 +361,11 @@ py_test(
srcs_version = "PY2AND3",
tags = ["manual"],
deps = [
+ ":tensor_forest_ops_py",
":topn_py",
"//tensorflow:tensorflow_py",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:platform_test",
+ "//tensorflow/python:session",
],
)
diff --git a/tensorflow/contrib/tensor_forest/hybrid/BUILD b/tensorflow/contrib/tensor_forest/hybrid/BUILD
index 0c99a09be3..622efb07b0 100644
--- a/tensorflow/contrib/tensor_forest/hybrid/BUILD
+++ b/tensorflow/contrib/tensor_forest/hybrid/BUILD
@@ -82,6 +82,13 @@ py_library(
"python/ops/_training_ops.so",
],
srcs_version = "PY2AND3",
+ deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:platform",
+ ],
)
py_library(
@@ -100,6 +107,7 @@ py_library(
srcs_version = "PY2AND3",
deps = [
"//tensorflow:tensorflow_py",
+ "//tensorflow/contrib/tensor_forest:tensor_forest_py",
],
)
@@ -114,6 +122,9 @@ py_test(
":fully_connected_layer",
":hybrid_layer",
":hybrid_model",
+ "//tensorflow/contrib/tensor_forest:tensor_forest_py",
+ "//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:platform_test",
],
)
@@ -125,6 +136,12 @@ py_library(
srcs_version = "PY2AND3",
deps = [
"//tensorflow:tensorflow_py",
+ "//tensorflow/contrib/tensor_forest:tensor_forest_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:nn_ops",
+ "//tensorflow/python:training",
+ "//tensorflow/python:variables",
],
)
@@ -137,6 +154,8 @@ py_library(
deps = [
":hybrid_layer",
"//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:framework_for_generated_wrappers",
],
)
@@ -161,6 +180,7 @@ py_test(
deps = [
":ops_lib",
"//tensorflow:tensorflow_py",
+ "//tensorflow/contrib/tensor_forest:tensor_forest_py",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:platform_test",
],
@@ -177,6 +197,10 @@ py_library(
":ops_lib",
"//tensorflow:tensorflow_py",
"//tensorflow/contrib/tensor_forest:tensor_forest_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:init_ops",
+ "//tensorflow/python:variable_scope",
],
)
@@ -190,6 +214,10 @@ py_test(
":decisions_to_data_layer",
"//tensorflow:tensorflow_py",
"//tensorflow/contrib/tensor_forest:tensor_forest_py",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:platform_test",
+ "//tensorflow/python:variable_scope",
],
)
@@ -206,6 +234,7 @@ py_library(
":ops_lib",
"//tensorflow:tensorflow_py",
"//tensorflow/contrib/tensor_forest:tensor_forest_py",
+ "//tensorflow/python:training",
],
)
@@ -222,6 +251,8 @@ py_library(
":ops_lib",
"//tensorflow:tensorflow_py",
"//tensorflow/contrib/tensor_forest:tensor_forest_py",
+ "//tensorflow/python:nn_ops",
+ "//tensorflow/python:training",
],
)
@@ -234,6 +265,10 @@ py_test(
":decisions_to_data_then_nn",
"//tensorflow:tensorflow_py",
"//tensorflow/contrib/tensor_forest:tensor_forest_py",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:platform_test",
+ "//tensorflow/python:variable_scope",
],
)
@@ -250,6 +285,7 @@ py_library(
":ops_lib",
"//tensorflow:tensorflow_py",
"//tensorflow/contrib/tensor_forest:tensor_forest_py",
+ "//tensorflow/python:training",
],
)
@@ -262,6 +298,10 @@ py_test(
":k_feature_decisions_to_data_then_nn",
"//tensorflow:tensorflow_py",
"//tensorflow/contrib/tensor_forest:tensor_forest_py",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:platform_test",
+ "//tensorflow/python:variable_scope",
],
)
@@ -278,6 +318,7 @@ py_library(
":ops_lib",
"//tensorflow:tensorflow_py",
"//tensorflow/contrib/tensor_forest:tensor_forest_py",
+ "//tensorflow/python:training",
],
)
@@ -290,6 +331,10 @@ py_test(
":forest_to_data_then_nn",
"//tensorflow:tensorflow_py",
"//tensorflow/contrib/tensor_forest:tensor_forest_py",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:platform_test",
+ "//tensorflow/python:variable_scope",
],
)
@@ -305,6 +350,7 @@ py_library(
":ops_lib",
"//tensorflow:tensorflow_py",
"//tensorflow/contrib/tensor_forest:tensor_forest_py",
+ "//tensorflow/python:training",
],
)
@@ -322,6 +368,7 @@ py_library(
":ops_lib",
"//tensorflow:tensorflow_py",
"//tensorflow/contrib/tensor_forest:tensor_forest_py",
+ "//tensorflow/python:training",
],
)
diff --git a/tensorflow/contrib/testing/BUILD b/tensorflow/contrib/testing/BUILD
index 847081d178..e1b3f7426c 100644
--- a/tensorflow/contrib/testing/BUILD
+++ b/tensorflow/contrib/testing/BUILD
@@ -15,6 +15,10 @@ py_library(
"python/framework/util_test.py",
],
srcs_version = "PY2AND3",
+ deps = [
+ "//tensorflow/python:summary",
+ "//tensorflow/python:training",
+ ],
)
filegroup(
diff --git a/tensorflow/contrib/tfprof/BUILD b/tensorflow/contrib/tfprof/BUILD
index e817cb86df..944d767e21 100644
--- a/tensorflow/contrib/tfprof/BUILD
+++ b/tensorflow/contrib/tfprof/BUILD
@@ -14,5 +14,6 @@ py_library(
deps = [
"//tensorflow/contrib/tfprof/python/tools/tfprof:model_analyzer",
"//tensorflow/contrib/tfprof/python/tools/tfprof:tfprof_logger",
+ "//tensorflow/python:util",
],
)
diff --git a/tensorflow/contrib/training/BUILD b/tensorflow/contrib/training/BUILD
index e021fd19c6..8478995b7b 100644
--- a/tensorflow/contrib/training/BUILD
+++ b/tensorflow/contrib/training/BUILD
@@ -24,10 +24,27 @@ py_library(
srcs_version = "PY2AND3",
visibility = ["//visibility:public"],
deps = [
+ "//tensorflow/contrib/framework:framework_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:check_ops",
+ "//tensorflow/python:clip_ops",
+ "//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:data_flow_ops",
+ "//tensorflow/python:errors",
"//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:logging_ops",
+ "//tensorflow/python:math_ops",
"//tensorflow/python:ops",
"//tensorflow/python:platform",
+ "//tensorflow/python:random_ops",
+ "//tensorflow/python:state_ops",
+ "//tensorflow/python:string_ops",
+ "//tensorflow/python:tensor_array_ops",
"//tensorflow/python:training",
+ "//tensorflow/python:util",
+ "//tensorflow/python:variable_scope",
+ "//tensorflow/python:variables",
],
)
@@ -113,6 +130,7 @@ py_test(
":training_py",
"//tensorflow:tensorflow_py",
"//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:platform",
],
)
diff --git a/tensorflow/contrib/util/BUILD b/tensorflow/contrib/util/BUILD
index 7683cda797..5ad8e3dd35 100644
--- a/tensorflow/contrib/util/BUILD
+++ b/tensorflow/contrib/util/BUILD
@@ -66,7 +66,11 @@ py_library(
name = "util_py",
srcs = glob(["**/*.py"]),
srcs_version = "PY2AND3",
- deps = [],
+ deps = [
+ "//tensorflow/python:framework",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:util",
+ ],
)
filegroup(
diff --git a/tensorflow/core/BUILD b/tensorflow/core/BUILD
index 011c17fd1a..0b05f9c0ee 100644
--- a/tensorflow/core/BUILD
+++ b/tensorflow/core/BUILD
@@ -5,7 +5,7 @@
# Public targets:
#
# ":protos_all" - exports all core TensorFlow protos
-# ":protos_all_py_pb2" - py_proto_library version (Google-internal)
+# ":protos_all_py" - py_proto_library version (Google-internal)
# ":lib" - exports the public non-test headers for:
# platform/: Platform-specific code and external dependencies
# lib/: Low-level libraries that are not TensorFlow-specific
diff --git a/tensorflow/examples/how_tos/reading_data/BUILD b/tensorflow/examples/how_tos/reading_data/BUILD
index c1e773d905..5f87ed002c 100644
--- a/tensorflow/examples/how_tos/reading_data/BUILD
+++ b/tensorflow/examples/how_tos/reading_data/BUILD
@@ -13,6 +13,7 @@ py_binary(
srcs_version = "PY2AND3",
deps = [
"//tensorflow:tensorflow_py",
+ "//tensorflow/contrib/learn/python/learn/datasets",
"//tensorflow/examples/tutorials/mnist:input_data",
],
)
diff --git a/tensorflow/examples/image_retraining/BUILD b/tensorflow/examples/image_retraining/BUILD
index 4cf6adecb9..bf1326f202 100644
--- a/tensorflow/examples/image_retraining/BUILD
+++ b/tensorflow/examples/image_retraining/BUILD
@@ -14,6 +14,10 @@ py_binary(
visibility = ["//tensorflow:__subpackages__"],
deps = [
"//tensorflow:tensorflow_py",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:util",
],
)
@@ -26,6 +30,7 @@ py_test(
],
srcs_version = "PY2AND3",
deps = [
+ ":retrain",
"//tensorflow:tensorflow_py",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:platform_test",
diff --git a/tensorflow/examples/tutorials/mnist/BUILD b/tensorflow/examples/tutorials/mnist/BUILD
index 532c868291..412895f353 100644
--- a/tensorflow/examples/tutorials/mnist/BUILD
+++ b/tensorflow/examples/tutorials/mnist/BUILD
@@ -23,7 +23,10 @@ py_library(
srcs = ["input_data.py"],
srcs_version = "PY2AND3",
visibility = ["//visibility:public"],
- deps = ["//tensorflow:tensorflow_py"],
+ deps = [
+ "//tensorflow:tensorflow_py",
+ "//tensorflow/contrib/learn/python/learn/datasets",
+ ],
)
py_library(
diff --git a/tensorflow/examples/tutorials/monitors/BUILD b/tensorflow/examples/tutorials/monitors/BUILD
index 5693eb4ee6..9f7571cd62 100644
--- a/tensorflow/examples/tutorials/monitors/BUILD
+++ b/tensorflow/examples/tutorials/monitors/BUILD
@@ -20,6 +20,7 @@ py_binary(
srcs_version = "PY2AND3",
deps = [
"//tensorflow:tensorflow_py",
+ "//tensorflow/contrib/learn",
],
)
diff --git a/tensorflow/g3doc/how_tos/adding_an_op/BUILD b/tensorflow/g3doc/how_tos/adding_an_op/BUILD
index e415662a03..7accc8e923 100644
--- a/tensorflow/g3doc/how_tos/adding_an_op/BUILD
+++ b/tensorflow/g3doc/how_tos/adding_an_op/BUILD
@@ -59,6 +59,9 @@ py_library(
deps = [
":zero_out_op_2",
"//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:sparse_ops",
],
)
diff --git a/tensorflow/python/BUILD b/tensorflow/python/BUILD
index 98666c2e62..7b05045aed 100644
--- a/tensorflow/python/BUILD
+++ b/tensorflow/python/BUILD
@@ -33,19 +33,42 @@ py_library(
srcs_version = "PY2AND3",
visibility = ["//tensorflow:__pkg__"],
deps = [
+ ":array_ops",
+ ":check_ops",
":client",
":client_testlib",
+ ":confusion_matrix",
+ ":control_flow_ops",
+ ":errors",
":framework",
+ ":framework_for_generated_wrappers",
":framework_test_lib",
+ ":functional_ops",
":gradient_checker",
+ ":histogram_ops",
+ ":image_ops",
+ ":io_ops",
+ ":lib",
+ ":math_ops",
+ ":nn",
":platform",
":platform_test",
+ ":script_ops",
+ ":sdca_ops",
+ ":session_ops",
+ ":sets",
+ ":sparse_ops",
+ ":standard_ops",
+ ":state_ops",
+ ":string_ops",
":summary",
":metrics",
":layers",
+ ":tensor_array_ops",
":training",
":ops",
":test_ops",
+ ":util",
"//tensorflow/python/ops/losses",
"//tensorflow/python/debug:debug_py",
] + if_not_windows([
@@ -66,6 +89,7 @@ py_library(
deps = [
":lib",
":pywrap_tensorflow",
+ ":util",
"//tensorflow/core:protos_all_py",
],
)
@@ -224,6 +248,7 @@ py_test(
srcs_version = "PY2AND3",
deps = [
":framework_test_lib",
+ ":util",
"//tensorflow:tensorflow_py",
],
)
@@ -233,6 +258,8 @@ py_test(
srcs = ["util/decorator_utils_test.py"],
srcs_version = "PY2AND3",
deps = [
+ ":platform",
+ ":util",
"//tensorflow:tensorflow_py",
],
)
@@ -242,6 +269,8 @@ py_test(
srcs = ["util/deprecation_test.py"],
srcs_version = "PY2AND3",
deps = [
+ ":platform",
+ ":util",
"//tensorflow:tensorflow_py",
],
)
@@ -251,6 +280,7 @@ py_test(
srcs = ["util/keyword_args_test.py"],
srcs_version = "PY2AND3",
deps = [
+ ":util",
"//tensorflow:tensorflow_py",
],
)
@@ -324,7 +354,9 @@ py_library(
":errors",
":framework_for_generated_wrappers",
":lib",
+ ":platform",
":pywrap_tensorflow",
+ ":util",
],
)
@@ -354,7 +386,10 @@ py_library(
],
srcs_version = "PY2AND3",
deps = [
+ ":errors",
":framework",
+ ":framework_for_generated_wrappers",
+ ":platform",
":platform_test",
":pywrap_tensorflow",
":session",
@@ -369,10 +404,12 @@ py_library(
],
srcs_version = "PY2AND3",
deps = [
+ ":client",
":device_lib",
":framework_test_lib",
":gradient_checker",
":platform_test",
+ ":util",
],
)
@@ -409,6 +446,7 @@ py_test(
main = "framework/subscribe_test.py",
srcs_version = "PY2AND3",
deps = [
+ ":framework",
":framework_test_lib",
":platform_test",
"//tensorflow:tensorflow_py",
@@ -423,6 +461,7 @@ py_test(
srcs_version = "PY2AND3",
deps = [
":framework_test_lib",
+ ":platform_test",
"//tensorflow:tensorflow_py",
],
)
@@ -449,8 +488,10 @@ py_library(
":array_ops",
":control_flow_ops",
":framework",
+ ":framework_for_generated_wrappers",
":functional_ops_gen",
":tensor_array_ops",
+ ":util",
":variable_scope",
],
)
@@ -460,7 +501,9 @@ cuda_py_tests(
size = "medium",
srcs = ["framework/function_test.py"],
additional_deps = [
+ ":framework_for_generated_wrappers",
":functional_ops",
+ ":logging_ops_gen",
"//tensorflow:tensorflow_py",
],
)
@@ -485,6 +528,7 @@ py_test(
main = "framework/importer_test.py",
srcs_version = "PY2AND3",
deps = [
+ ":framework_for_generated_wrappers",
":framework_test_lib",
":platform_test",
"//tensorflow:tensorflow_py",
@@ -498,7 +542,11 @@ py_test(
main = "framework/meta_graph_test.py",
srcs_version = "PY2AND3",
deps = [
+ ":control_flow_ops",
+ ":framework",
+ ":framework_for_generated_wrappers",
":framework_test_lib",
+ ":platform",
":platform_test",
"//tensorflow:tensorflow_py",
],
@@ -540,6 +588,8 @@ py_test(
main = "framework/common_shapes_test.py",
srcs_version = "PY2AND3",
deps = [
+ ":framework",
+ ":framework_for_generated_wrappers",
":framework_test_lib",
":platform_test",
"//tensorflow/core:protos_all_py",
@@ -554,6 +604,9 @@ py_test(
srcs_version = "PY2AND3",
deps = [
":control_flow_ops",
+ ":errors",
+ ":framework",
+ ":framework_for_generated_wrappers",
":framework_test_lib",
":gradients",
":math_ops",
@@ -563,6 +616,7 @@ py_test(
":sparse_ops",
":test_ops",
":test_ops_2",
+ ":util",
":variable_scope",
":variables",
"//tensorflow/core:protos_all_py",
@@ -576,6 +630,7 @@ py_test(
main = "framework/tensor_shape_test.py",
srcs_version = "PY2AND3",
deps = [
+ ":framework_for_generated_wrappers",
":framework_test_lib",
":platform_test",
"//tensorflow/core:protos_all_py",
@@ -589,6 +644,8 @@ py_test(
main = "framework/sparse_tensor_test.py",
srcs_version = "PY2AND3",
deps = [
+ ":framework",
+ ":framework_for_generated_wrappers",
":framework_test_lib",
":platform_test",
"//tensorflow/core:protos_all_py",
@@ -602,6 +659,7 @@ py_test(
main = "framework/device_test.py",
srcs_version = "PY2AND3",
deps = [
+ ":framework_for_generated_wrappers",
":framework_test_lib",
":platform_test",
"//tensorflow/core:protos_all_py",
@@ -615,6 +673,7 @@ py_test(
main = "framework/random_seed_test.py",
srcs_version = "PY2AND3",
deps = [
+ ":framework",
":framework_test_lib",
":platform_test",
"//tensorflow:tensorflow_py",
@@ -628,6 +687,7 @@ py_test(
main = "framework/tensor_shape_div_test.py",
srcs_version = "PY2AND3",
deps = [
+ ":framework_for_generated_wrappers",
":framework_test_lib",
":platform_test",
"//tensorflow/core:protos_all_py",
@@ -641,9 +701,13 @@ py_test(
main = "framework/tensor_util_test.py",
srcs_version = "PY2AND3",
deps = [
+ ":array_ops",
+ ":framework",
+ ":framework_for_generated_wrappers",
":framework_test_lib",
":platform_test",
":state_ops",
+ ":state_ops_gen",
"//tensorflow:tensorflow_py",
],
)
@@ -655,6 +719,8 @@ py_test(
main = "framework/test_util_test.py",
srcs_version = "PY2AND3",
deps = [
+ ":errors",
+ ":framework_for_generated_wrappers",
":framework_test_lib",
":logging_ops",
":platform_test",
@@ -669,6 +735,7 @@ py_test(
main = "framework/dtypes_test.py",
srcs_version = "PY2AND3",
deps = [
+ ":framework_for_generated_wrappers",
":framework_test_lib",
":platform_test",
"//tensorflow:tensorflow_py",
@@ -683,13 +750,20 @@ py_test(
main = "framework/op_def_library_test.py",
srcs_version = "PY2AND3",
deps = [
+ ":framework_for_generated_wrappers",
":framework_test_lib",
+ ":platform_test",
],
)
tf_gen_op_wrapper_private_py(
name = "array_ops_gen",
require_shape_functions = True,
+ visibility = [
+ "//tensorflow/compiler/tests:__pkg__",
+ "//tensorflow/contrib/quantization:__pkg__",
+ "//tensorflow/python/kernel_tests:__pkg__",
+ ],
)
tf_gen_op_wrapper_private_py(
@@ -714,6 +788,10 @@ tf_gen_op_wrapper_private_py(
tf_gen_op_wrapper_private_py(
name = "data_flow_ops_gen",
require_shape_functions = True,
+ visibility = [
+ "//tensorflow/contrib/lookup:__pkg__",
+ "//tensorflow/python/kernel_tests:__pkg__",
+ ],
)
tf_gen_op_wrapper_private_py(
@@ -724,6 +802,7 @@ tf_gen_op_wrapper_private_py(
tf_gen_op_wrapper_private_py(
name = "io_ops_gen",
require_shape_functions = True,
+ visibility = ["//tensorflow/python/kernel_tests:__pkg__"],
)
tf_gen_op_wrapper_private_py(
@@ -734,16 +813,28 @@ tf_gen_op_wrapper_private_py(
tf_gen_op_wrapper_private_py(
name = "logging_ops_gen",
require_shape_functions = True,
+ visibility = ["//tensorflow/python/kernel_tests:__pkg__"],
)
tf_gen_op_wrapper_private_py(
name = "math_ops_gen",
require_shape_functions = True,
+ visibility = [
+ "//tensorflow/compiler/tests:__pkg__",
+ "//tensorflow/contrib/quantization:__pkg__",
+ "//tensorflow/python/kernel_tests:__pkg__",
+ ],
)
tf_gen_op_wrapper_private_py(
name = "nn_ops_gen",
require_shape_functions = True,
+ visibility = [
+ "//tensorflow/compiler/tests:__pkg__",
+ "//tensorflow/contrib/quantization:__pkg__",
+ "//tensorflow/python/kernel_tests:__pkg__",
+ "//tensorflow/python/tools:__pkg__",
+ ],
)
tf_gen_op_wrapper_private_py(
@@ -769,6 +860,7 @@ tf_gen_op_wrapper_private_py(
tf_gen_op_wrapper_private_py(
name = "sdca_ops_gen",
require_shape_functions = True,
+ visibility = ["//tensorflow/contrib/linear_optimizer:__pkg__"],
)
tf_gen_op_wrapper_private_py(
@@ -779,6 +871,10 @@ tf_gen_op_wrapper_private_py(
tf_gen_op_wrapper_private_py(
name = "state_ops_gen",
require_shape_functions = True,
+ visibility = [
+ "//tensorflow/contrib/framework:__pkg__",
+ "//tensorflow/python/kernel_tests:__pkg__",
+ ],
)
tf_gen_op_wrapper_private_py(
@@ -809,6 +905,7 @@ py_library(
":array_ops",
":array_ops_gen",
":framework",
+ ":framework_for_generated_wrappers",
":math_ops",
":sparse_ops",
],
@@ -821,7 +918,9 @@ py_library(
deps = [
":array_ops_gen",
":framework",
+ ":framework_for_generated_wrappers",
":math_ops_gen",
+ ":util",
],
)
@@ -831,6 +930,7 @@ py_library(
srcs_version = "PY2AND3",
deps = [
":framework",
+ ":framework_for_generated_wrappers",
":set_ops_gen",
],
)
@@ -855,7 +955,9 @@ py_library(
":array_ops",
":control_flow_ops",
":framework",
+ ":framework_for_generated_wrappers",
":math_ops",
+ ":util",
],
)
@@ -866,6 +968,7 @@ py_library(
deps = [
":array_ops",
":framework",
+ ":framework_for_generated_wrappers",
":math_ops",
":nn_ops_gen",
],
@@ -879,6 +982,7 @@ py_library(
":control_flow_ops",
":control_flow_ops_gen",
":framework",
+ ":framework_for_generated_wrappers",
":math_ops",
],
)
@@ -893,9 +997,13 @@ py_library(
":control_flow_ops_gen",
":data_flow_ops_gen",
":framework",
+ ":framework_for_generated_wrappers",
":logging_ops",
+ ":logging_ops_gen",
":math_ops",
+ ":platform",
":tensor_array_ops",
+ ":util",
],
)
@@ -907,6 +1015,7 @@ py_library(
":array_ops",
":ctc_ops_gen",
":framework",
+ ":framework_for_generated_wrappers",
":nn_grad",
],
)
@@ -919,6 +1028,7 @@ py_library(
":array_ops",
":data_flow_ops",
":framework",
+ ":framework_for_generated_wrappers",
":math_ops",
],
)
@@ -932,6 +1042,7 @@ py_library(
":control_flow_ops",
":data_flow_ops_gen",
":framework",
+ ":framework_for_generated_wrappers",
":math_ops",
],
)
@@ -945,6 +1056,7 @@ py_library(
":clip_ops",
":data_flow_ops",
":framework",
+ ":framework_for_generated_wrappers",
":math_ops",
":platform",
":resource_variable_ops",
@@ -965,6 +1077,7 @@ py_library(
":control_flow_grad",
":control_flow_ops",
":framework",
+ ":framework_for_generated_wrappers",
":functional_ops",
":image_grad",
":linalg_grad",
@@ -972,6 +1085,8 @@ py_library(
":logging_ops",
":math_grad",
":math_ops",
+ ":platform",
+ ":util",
],
)
@@ -983,6 +1098,7 @@ py_library(
":array_ops",
":clip_ops",
":framework",
+ ":framework_for_generated_wrappers",
":math_ops",
],
)
@@ -994,6 +1110,7 @@ py_library(
deps = [
":array_ops",
":framework",
+ ":framework_for_generated_wrappers",
":image_ops_gen",
],
)
@@ -1011,11 +1128,15 @@ py_library(
":clip_ops",
":control_flow_ops",
":framework",
+ ":framework_for_generated_wrappers",
":image_ops_gen",
":logging_ops",
":math_ops",
":nn_ops_gen",
":random_ops",
+ ":string_ops",
+ ":util",
+ ":variables",
],
)
@@ -1026,6 +1147,7 @@ py_library(
deps = [
":array_ops",
":framework",
+ ":framework_for_generated_wrappers",
":linalg_ops",
":math_ops",
":nn_ops",
@@ -1039,7 +1161,9 @@ py_library(
srcs_version = "PY2AND3",
deps = [
":framework",
+ ":framework_for_generated_wrappers",
":io_ops_gen",
+ ":lib",
],
)
@@ -1051,6 +1175,7 @@ py_library(
":array_ops",
":control_flow_ops",
":framework",
+ ":framework_for_generated_wrappers",
":linalg_ops",
":math_ops",
],
@@ -1061,8 +1186,11 @@ py_library(
srcs = ["ops/linalg_ops.py"],
srcs_version = "PY2AND3",
deps = [
+ ":array_ops",
":framework",
+ ":framework_for_generated_wrappers",
":linalg_ops_gen",
+ ":math_ops",
],
)
@@ -1072,7 +1200,9 @@ py_library(
srcs_version = "PY2AND3",
deps = [
":framework",
+ ":framework_for_generated_wrappers",
":logging_ops_gen",
+ ":util",
],
)
@@ -1084,6 +1214,7 @@ py_library(
":array_ops",
":array_ops_gen",
":framework",
+ ":framework_for_generated_wrappers",
":math_ops",
":math_ops_gen",
],
@@ -1114,6 +1245,7 @@ py_library(
":array_ops",
":control_flow_ops",
":framework",
+ ":framework_for_generated_wrappers",
":math_ops",
],
)
@@ -1123,10 +1255,13 @@ py_library(
srcs = ["ops/resource_variable_ops.py"],
srcs_version = "PY2AND3",
deps = [
+ ":array_ops",
":framework",
+ ":framework_for_generated_wrappers",
":math_ops",
":resource_variable_ops_gen",
":resources",
+ ":util",
],
)
@@ -1142,12 +1277,15 @@ py_library(
":candidate_sampling_ops",
":ctc_ops",
":embedding_ops",
+ ":framework_for_generated_wrappers",
":math_ops",
":nn_grad",
":nn_ops",
+ ":nn_ops_gen",
":rnn",
":rnn_cell",
":sparse_ops",
+ ":util",
],
)
@@ -1158,6 +1296,7 @@ py_library(
deps = [
":array_ops",
":framework",
+ ":framework_for_generated_wrappers",
":math_ops",
":nn_ops",
":nn_ops_gen",
@@ -1172,6 +1311,7 @@ py_library(
deps = [
":array_ops",
":framework",
+ ":framework_for_generated_wrappers",
":math_ops",
":nn_ops_gen",
":random_ops",
@@ -1186,6 +1326,7 @@ py_library(
":array_ops",
":control_flow_ops",
":framework",
+ ":framework_for_generated_wrappers",
],
)
@@ -1197,8 +1338,10 @@ py_library(
":array_ops",
":control_flow_ops",
":framework",
+ ":framework_for_generated_wrappers",
":math_ops",
":parsing_ops_gen",
+ ":sparse_ops",
],
)
@@ -1208,6 +1351,8 @@ py_library(
srcs_version = "PY2AND3",
deps = [
":framework",
+ ":framework_for_generated_wrappers",
+ ":platform",
":variable_scope",
],
)
@@ -1220,6 +1365,7 @@ py_library(
":array_ops",
":control_flow_ops",
":framework",
+ ":framework_for_generated_wrappers",
":math_ops",
":random_ops_gen",
],
@@ -1233,10 +1379,12 @@ py_library(
":array_ops",
":control_flow_ops",
":framework",
+ ":framework_for_generated_wrappers",
":logging_ops",
":math_ops",
":rnn_cell",
":tensor_array_ops",
+ ":util",
":variable_scope",
],
)
@@ -1253,9 +1401,13 @@ py_library(
":clip_ops",
":embedding_ops",
":framework",
+ ":framework_for_generated_wrappers",
":init_ops",
":math_ops",
":nn_ops",
+ ":partitioned_variables",
+ ":platform",
+ ":util",
":variable_scope",
],
)
@@ -1266,6 +1418,7 @@ py_library(
srcs_version = "PY2AND3",
deps = [
":framework",
+ ":framework_for_generated_wrappers",
":script_ops_gen",
],
)
@@ -1276,6 +1429,7 @@ py_library(
srcs_version = "PY2AND3",
deps = [
":framework",
+ ":framework_for_generated_wrappers",
":sdca_ops_gen",
],
)
@@ -1288,6 +1442,8 @@ py_library(
":array_ops",
":data_flow_ops_gen",
":framework",
+ ":framework_for_generated_wrappers",
+ ":util",
],
)
@@ -1298,6 +1454,7 @@ py_library(
deps = [
":array_ops",
":framework",
+ ":framework_for_generated_wrappers",
":math_ops",
":sparse_ops",
":sparse_ops_gen",
@@ -1313,8 +1470,10 @@ py_library(
":check_ops",
":control_flow_ops",
":framework",
+ ":framework_for_generated_wrappers",
":math_ops",
":sparse_ops_gen",
+ ":util",
],
)
@@ -1326,6 +1485,7 @@ py_library(
":array_ops",
":control_flow_ops",
":framework",
+ ":framework_for_generated_wrappers",
":math_ops",
":sparse_ops",
],
@@ -1341,6 +1501,7 @@ py_library(
":confusion_matrix",
":control_flow_ops",
":framework",
+ ":framework_for_generated_wrappers",
":math_ops",
":nn",
":sets",
@@ -1360,7 +1521,9 @@ py_library(
":check_ops",
":control_flow_ops",
":framework",
+ ":framework_for_generated_wrappers",
":math_ops",
+ ":platform",
],
)
@@ -1378,6 +1541,7 @@ py_library(
":data_flow_grad",
":data_flow_ops",
":framework",
+ ":framework_for_generated_wrappers",
":functional_ops",
":gradients",
":histogram_ops",
@@ -1386,12 +1550,14 @@ py_library(
":linalg_ops",
":logging_ops",
":math_grad",
+ ":math_ops",
":numerics",
":parsing_ops",
":partitioned_variables",
":random_ops",
":script_ops",
":sdca_ops",
+ ":session_ops",
":sparse_grad",
":sparse_ops",
":special_math_ops",
@@ -1401,6 +1567,7 @@ py_library(
":template",
":tensor_array_grad",
":tensor_array_ops",
+ ":util",
":variable_scope",
":variables",
],
@@ -1412,6 +1579,7 @@ py_library(
srcs_version = "PY2AND3",
deps = [
":framework",
+ ":framework_for_generated_wrappers",
":state_ops",
],
)
@@ -1422,6 +1590,7 @@ py_library(
srcs_version = "PY2AND3",
deps = [
":framework",
+ ":framework_for_generated_wrappers",
":resource_variable_ops_gen",
":state_ops_gen",
],
@@ -1433,7 +1602,9 @@ py_library(
srcs_version = "PY2AND3",
deps = [
":framework",
+ ":framework_for_generated_wrappers",
":string_ops_gen",
+ ":util",
],
)
@@ -1443,6 +1614,7 @@ py_library(
srcs_version = "PY2AND3",
deps = [
":framework",
+ ":framework_for_generated_wrappers",
":logging_ops_gen",
],
)
@@ -1453,6 +1625,8 @@ py_library(
srcs_version = "PY2AND3",
deps = [
":framework",
+ ":framework_for_generated_wrappers",
+ ":platform",
":variable_scope",
],
)
@@ -1463,6 +1637,7 @@ py_library(
srcs_version = "PY2AND3",
deps = [
":framework",
+ ":framework_for_generated_wrappers",
":tensor_array_ops",
],
)
@@ -1475,7 +1650,9 @@ py_library(
":array_ops",
":data_flow_ops_gen",
":framework",
+ ":framework_for_generated_wrappers",
":math_ops",
+ ":util",
],
)
@@ -1486,7 +1663,9 @@ py_library(
deps = [
":array_ops",
":framework",
+ ":framework_for_generated_wrappers",
":init_ops",
+ ":platform",
":variables",
],
)
@@ -1499,8 +1678,10 @@ py_library(
":array_ops",
":control_flow_ops",
":framework",
+ ":framework_for_generated_wrappers",
":math_ops",
":state_ops",
+ ":util",
],
)
@@ -1511,7 +1692,9 @@ py_library(
deps = [
":array_ops",
":framework",
+ ":framework_for_generated_wrappers",
":gradients",
+ ":platform",
],
)
@@ -1598,10 +1781,13 @@ cuda_py_test(
additional_deps = [
":control_flow_ops",
":embedding_ops",
+ ":framework_for_generated_wrappers",
":framework_test_lib",
":lib",
+ ":platform_test",
":standard_ops",
":training",
+ ":util",
],
)
@@ -1624,8 +1810,22 @@ cuda_py_test(
size = "small",
srcs = ["ops/gradients_test.py"],
additional_deps = [
+ ":array_grad",
+ ":array_ops",
+ ":data_flow_grad",
+ ":data_flow_ops",
+ ":framework_for_generated_wrappers",
":framework_test_lib",
+ ":functional_ops",
+ ":gradients",
":lib",
+ ":math_grad",
+ ":math_ops",
+ ":nn_grad",
+ ":nn_ops",
+ ":platform_test",
+ ":state_grad",
+ ":test_ops",
"//tensorflow:tensorflow_py",
],
)
@@ -1658,11 +1858,16 @@ cuda_py_test(
srcs = ["ops/image_ops_test.py"],
additional_deps = [
":array_ops",
+ ":client_testlib",
":device_lib",
+ ":errors",
+ ":framework_for_generated_wrappers",
":framework_test_lib",
":image_ops",
":io_ops",
":lib",
+ ":math_ops",
+ ":platform_test",
"//tensorflow:tensorflow_py",
],
data = ["//tensorflow/core:image_testdata"],
@@ -1685,7 +1890,13 @@ cuda_py_test(
size = "small",
srcs = ["ops/math_ops_test.py"],
additional_deps = [
+ ":array_ops",
+ ":framework_for_generated_wrappers",
":framework_test_lib",
+ ":gradients",
+ ":math_ops",
+ ":platform_test",
+ ":variables",
],
)
@@ -1696,6 +1907,7 @@ cuda_py_test(
additional_deps = [
":framework_test_lib",
":lib",
+ ":nn_ops_gen",
"//tensorflow:tensorflow_py",
],
)
@@ -1718,6 +1930,7 @@ cuda_py_test(
additional_deps = [
":framework_test_lib",
":lib",
+ ":nn",
"//tensorflow:tensorflow_py",
],
)
@@ -1756,7 +1969,9 @@ py_library(
":client",
":control_flow_ops",
":data_flow_ops",
+ ":errors",
":framework",
+ ":framework_for_generated_wrappers",
":gradients",
":init_ops",
":io_ops",
@@ -1764,15 +1979,19 @@ py_library(
":lib",
":logging_ops",
":math_ops",
+ ":platform",
":protos_all_py",
":pywrap_tensorflow",
":random_ops",
":resource_variable_ops",
+ ":resources",
+ ":session",
":sparse_ops",
":state_ops",
":string_ops",
":summary",
":training_ops_gen",
+ ":util",
":variable_scope",
":variables",
],
@@ -1788,6 +2007,8 @@ py_library(
deps = [
":errors",
":framework",
+ ":framework_for_generated_wrappers",
+ ":platform",
":session",
":session_ops",
":training_ops_gen",
@@ -1838,6 +2059,8 @@ py_library(
srcs_version = "PY2AND3",
visibility = ["//visibility:public"],
deps = [
+ ":framework",
+ ":framework_for_generated_wrappers",
":platform",
"//tensorflow:tensorflow_py",
"//tensorflow/core:protos_all_py",
@@ -1905,9 +2128,11 @@ py_test(
],
srcs_version = "PY2AND3",
deps = [
+ ":errors",
":framework_test_lib",
":lib",
":platform_test",
+ ":util",
],
)
@@ -1960,7 +2185,10 @@ cuda_py_tests(
"client/device_lib_test.py",
],
additional_deps = [
+ ":client",
":device_lib",
+ ":framework_test_lib",
+ ":platform_test",
"//tensorflow:tensorflow_py",
],
)
@@ -2040,6 +2268,7 @@ py_library(
deps = [
":errors",
":pywrap_tensorflow",
+ ":util",
],
)
@@ -2050,9 +2279,11 @@ py_library(
deps = [
":errors",
":framework",
+ ":framework_for_generated_wrappers",
":platform",
":pywrap_tensorflow",
":session_ops",
+ ":util",
],
)
@@ -2118,11 +2349,15 @@ py_test(
":array_ops",
":control_flow_ops",
":data_flow_ops",
+ ":errors",
":framework",
+ ":framework_for_generated_wrappers",
":framework_test_lib",
":math_ops",
+ ":platform_test",
":session",
":state_ops",
+ ":util",
":variables",
],
)
@@ -2132,6 +2367,7 @@ cuda_py_test(
size = "small",
srcs = ["client/timeline_test.py"],
additional_deps = [
+ ":client",
":timeline",
"//tensorflow:tensorflow_py",
],
@@ -2144,7 +2380,10 @@ py_test(
srcs_version = "PY2AND3",
deps = [
":framework",
+ ":framework_for_generated_wrappers",
":framework_test_lib",
+ ":math_ops",
+ ":state_ops_gen",
"//tensorflow:tensorflow_py",
],
)
@@ -2155,6 +2394,7 @@ py_test(
srcs = ["lib/io/file_io_test.py"],
srcs_version = "PY2AND3",
deps = [
+ ":errors",
":lib",
"//tensorflow:tensorflow_py",
],
@@ -2187,7 +2427,21 @@ cuda_py_tests(
"training/training_ops_test.py",
],
additional_deps = [
+ ":control_flow_ops",
+ ":data_flow_ops_gen",
+ ":errors",
+ ":framework",
+ ":framework_for_generated_wrappers",
+ ":framework_test_lib",
+ ":platform",
+ ":platform_test",
+ ":resource_variable_ops",
+ ":resources",
+ ":state_ops",
+ ":state_ops_gen",
":training",
+ ":util",
+ ":variables",
"//tensorflow:tensorflow_py",
],
)
@@ -2225,6 +2479,8 @@ cuda_py_test(
size = "medium", # TODO(irving): Can this be made small?
srcs = ["training/session_manager_test.py"],
additional_deps = [
+ ":errors",
+ ":platform",
":training",
"//tensorflow:tensorflow_py",
],
@@ -2238,6 +2494,7 @@ py_test(
srcs_version = "PY2AND3",
deps = [
":extra_py_tests_deps",
+ ":framework",
":training",
"//tensorflow:tensorflow_py",
],
@@ -2249,6 +2506,8 @@ py_test(
srcs = ["training/basic_session_run_hooks_test.py"],
srcs_version = "PY2AND3",
deps = [
+ ":framework",
+ ":training",
"//tensorflow:tensorflow_py",
],
)
@@ -2259,6 +2518,8 @@ py_test(
srcs = ["training/monitored_session_test.py"],
srcs_version = "PY2AND3",
deps = [
+ ":framework_for_generated_wrappers",
+ ":training",
"//tensorflow:tensorflow_py",
],
)
@@ -2282,7 +2543,11 @@ py_library(
srcs_version = "PY2AND3",
deps = [
":client",
+ ":errors",
":framework",
+ ":framework_for_generated_wrappers",
+ ":lib",
+ ":logging_ops_gen",
":platform",
":protos_all_py",
":pywrap_tensorflow",
@@ -2305,9 +2570,13 @@ py_tests(
"summary/writer/writer_test.py",
],
additional_deps = [
+ "//tensorflow:tensorflow_py",
+ ":framework",
+ ":framework_test_lib",
+ ":platform",
+ ":platform_test",
":summary",
":training",
- "//tensorflow:tensorflow_py",
],
)
@@ -2326,14 +2595,17 @@ py_library(
srcs_version = "PY2AND3",
deps = [
":array_ops",
+ ":control_flow_ops",
":framework",
":framework_for_generated_wrappers",
":init_ops",
+ ":math_ops",
":nn",
":standard_ops",
":training",
":util",
":variable_scope",
+ ":variables",
],
)
@@ -2475,7 +2747,10 @@ py_binary(
main = "framework/gen_docs_combined.py",
srcs_version = "PY2AND3",
deps = [
+ ":client",
":docs",
+ ":framework",
+ ":framework_for_generated_wrappers",
"//tensorflow:tensorflow_py",
],
)
@@ -2525,7 +2800,12 @@ cuda_py_test(
srcs = [
"ops/accumulate_n_benchmark.py",
],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ ":control_flow_ops_gen",
+ ":framework_for_generated_wrappers",
+ ":state_ops_gen",
+ "//tensorflow:tensorflow_py",
+ ],
main = "ops/accumulate_n_benchmark.py",
)
@@ -2536,6 +2816,7 @@ cuda_py_test(
],
additional_deps = [
":nn_ops",
+ ":nn_ops_gen",
"//tensorflow:tensorflow_py",
],
main = "ops/batch_norm_benchmark.py",
@@ -2559,6 +2840,7 @@ cuda_py_test(
"ops/split_benchmark.py",
],
additional_deps = [
+ ":platform",
":platform_benchmark",
":nn_ops",
"//tensorflow:tensorflow_py",
diff --git a/tensorflow/python/build_defs.bzl b/tensorflow/python/build_defs.bzl
index 71adab99e0..1efac5738c 100644
--- a/tensorflow/python/build_defs.bzl
+++ b/tensorflow/python/build_defs.bzl
@@ -3,24 +3,27 @@
load("//tensorflow:tensorflow.bzl", "tf_gen_op_wrapper_py")
# Intended only for use within this directory.
-# Generated python wrappers are private visibility, users should depend on the
+# Generated python wrappers are "private" visibility, users should depend on the
# full python code that incorporates the wrappers. The generated targets have
# a _gen suffix, so that the full python version can use the bare name.
# We also hard code the hidden_file here to reduce duplication.
#
# We should consider moving the "out" default pattern into here, many other
-# consumers of the tf_gen_op_wrapper_py rule would be simplified if we don't
+# consumers of the tf_gen_op_wrapper_py rule would be simplified if we don't
# hard code the ops/ directory.
def tf_gen_op_wrapper_private_py(name, out=None, deps=[],
- require_shape_functions=False):
+ require_shape_functions=False,
+ visibility=[]):
if not name.endswith("_gen"):
fail("name must end in _gen")
+ if not visibility:
+ visibility = ["//visibility:private"]
bare_op_name = name[:-4] # Strip of the _gen
tf_gen_op_wrapper_py(name=bare_op_name,
out=out,
hidden_file="ops/hidden_ops.txt",
- visibility=["//visibility:private"],
+ visibility=visibility,
deps=deps,
require_shape_functions=require_shape_functions,
generated_target_name=name,
diff --git a/tensorflow/python/debug/BUILD b/tensorflow/python/debug/BUILD
index 54e5e5072b..66a63d0197 100644
--- a/tensorflow/python/debug/BUILD
+++ b/tensorflow/python/debug/BUILD
@@ -32,6 +32,10 @@ py_library(
name = "debug_data",
srcs = ["debug_data.py"],
srcs_version = "PY2AND3",
+ deps = [
+ "//tensorflow/python:framework",
+ "//tensorflow/python:platform",
+ ],
)
py_library(
@@ -50,6 +54,8 @@ py_library(
deps = [
":debug_data",
"//tensorflow/python:data_flow_ops",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:session_ops",
],
)
@@ -60,6 +66,7 @@ py_library(
deps = [
":debug_utils",
":stepper",
+ "//tensorflow/python:errors",
"//tensorflow/python:session",
],
)
@@ -68,6 +75,7 @@ py_library(
name = "debugger_cli_common",
srcs = ["cli/debugger_cli_common.py"],
srcs_version = "PY2AND3",
+ deps = ["//tensorflow/python:platform"],
)
py_library(
@@ -92,6 +100,7 @@ py_library(
":debugger_cli_common",
":tensor_format",
"//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:variables",
],
)
@@ -155,9 +164,12 @@ py_library(
srcs = ["wrappers/hooks.py"],
srcs_version = "PY2AND3",
deps = [
+ ":debug_utils",
+ ":framework",
":local_cli_wrapper",
":stepper",
"//tensorflow/python:session",
+ "//tensorflow/python:training",
],
)
@@ -214,6 +226,7 @@ py_test(
":debug_data",
"//tensorflow/python:framework",
"//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:platform_test",
],
)
@@ -227,8 +240,10 @@ py_test(
deps = [
":debug_utils",
"//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:math_ops",
+ "//tensorflow/python:platform_test",
"//tensorflow/python:session",
"//tensorflow/python:variables",
],
@@ -247,6 +262,7 @@ cuda_py_test(
"//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:math_ops",
+ "//tensorflow/python:platform_test",
"//tensorflow/python:session",
"//tensorflow/python:training",
"//tensorflow/python:variables",
@@ -262,9 +278,13 @@ py_test(
":debug_data",
":framework",
":stepper",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:errors",
"//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:math_ops",
+ "//tensorflow/python:platform_test",
"//tensorflow/python:session",
"//tensorflow/python:variables",
],
@@ -283,6 +303,8 @@ py_test(
":tensor_format",
"//tensorflow/python:framework",
"//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:platform_test",
],
)
@@ -294,9 +316,17 @@ py_library(
":debug_data",
":debug_utils",
"//tensorflow:tensorflow_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:errors",
"//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:platform_test",
"//tensorflow/python:session",
+ "//tensorflow/python:state_ops",
+ "//tensorflow/python:variables",
],
)
@@ -305,8 +335,15 @@ cuda_py_test(
size = "small",
srcs = ["session_debug_file_test.py"],
additional_deps = [
+ ":debug_data",
+ ":debug_utils",
":session_debug_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:platform_test",
+ "//tensorflow/python:session",
+ "//tensorflow/python:variables",
],
)
@@ -320,6 +357,8 @@ py_test(
deps = [
":debugger_cli_common",
"//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:platform_test",
],
)
@@ -333,6 +372,7 @@ py_test(
deps = [
":command_parser",
"//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:platform_test",
],
)
@@ -346,6 +386,7 @@ py_test(
deps = [
":tensor_format",
"//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:platform_test",
],
)
@@ -358,7 +399,12 @@ py_test(
srcs_version = "PY2AND3",
deps = [
":cli_shared",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:platform_test",
+ "//tensorflow/python:variables",
],
)
@@ -374,9 +420,15 @@ cuda_py_test(
":debug_utils",
":debugger_cli_common",
"//tensorflow:tensorflow_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:control_flow_ops",
"//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:platform_test",
"//tensorflow/python:session",
+ "//tensorflow/python:variables",
],
)
@@ -393,6 +445,7 @@ cuda_py_test(
"//tensorflow/python:framework",
"//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:platform_test",
"//tensorflow/python:math_ops",
"//tensorflow/python:session",
"//tensorflow/python:training",
@@ -414,6 +467,7 @@ py_test(
"//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:math_ops",
+ "//tensorflow/python:platform_test",
"//tensorflow/python:session",
"//tensorflow/python:state_ops",
"//tensorflow/python:variables",
diff --git a/tensorflow/python/kernel_tests/BUILD b/tensorflow/python/kernel_tests/BUILD
index b7dfd68711..81f363be42 100644
--- a/tensorflow/python/kernel_tests/BUILD
+++ b/tensorflow/python/kernel_tests/BUILD
@@ -22,105 +22,188 @@ tf_py_test(
name = "as_string_op_test",
size = "small",
srcs = ["as_string_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:string_ops",
+ ],
)
tf_py_test(
name = "attention_ops_test",
size = "small",
srcs = ["attention_ops_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:image_ops",
+ ],
)
tf_py_test(
name = "barrier_ops_test",
size = "small",
srcs = ["barrier_ops_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:data_flow_ops",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ ],
)
tf_py_test(
name = "base64_ops_test",
size = "small",
srcs = ["base64_ops_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:string_ops",
+ ],
)
tf_py_test(
name = "bcast_ops_test",
size = "small",
srcs = ["bcast_ops_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops_gen",
+ "//tensorflow/python:client_testlib",
+ ],
)
tf_py_test(
name = "benchmark_test",
size = "small",
srcs = ["benchmark_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:platform_benchmark",
+ "//tensorflow/python:session",
+ ],
)
tf_py_test(
name = "candidate_sampler_ops_test",
size = "small",
srcs = ["candidate_sampler_ops_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:candidate_sampling_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
+ ],
)
tf_py_test(
name = "cholesky_op_test",
size = "small",
srcs = ["cholesky_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:linalg_ops",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:platform",
+ ],
)
tf_py_test(
name = "clip_ops_test",
size = "small",
srcs = ["clip_ops_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:clip_ops",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ ],
)
tf_py_test(
name = "conditional_accumulator_test",
size = "small",
srcs = ["conditional_accumulator_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:data_flow_ops",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:state_ops",
+ "//tensorflow/python:variables",
+ ],
)
tf_py_test(
name = "ctc_decoder_ops_test",
size = "small",
srcs = ["ctc_decoder_ops_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:ctc_ops",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ ],
)
tf_py_test(
name = "ctc_loss_op_test",
size = "small",
srcs = ["ctc_loss_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:ctc_ops",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:gradients",
+ ],
)
tf_py_test(
name = "decode_csv_op_test",
size = "small",
srcs = ["decode_csv_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:parsing_ops",
+ ],
)
tf_py_test(
name = "decode_png_op_test",
size = "small",
srcs = ["decode_png_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:image_ops",
+ "//tensorflow/python:nn_grad",
+ ],
)
tf_py_test(
name = "decode_image_op_test",
size = "small",
srcs = ["decode_image_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:image_ops",
+ "//tensorflow/python:io_ops",
+ "//tensorflow/python:nn_grad",
+ ],
data = ["//tensorflow/core:image_testdata"],
)
@@ -128,154 +211,291 @@ tf_py_test(
name = "decode_raw_op_test",
size = "small",
srcs = ["decode_raw_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:parsing_ops",
+ ],
)
tf_py_test(
name = "determinant_op_test",
size = "small",
srcs = ["determinant_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:linalg_ops",
+ ],
)
tf_py_test(
name = "draw_bounding_box_op_test",
size = "small",
srcs = ["draw_bounding_box_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:image_ops",
+ "//tensorflow/python:math_ops",
+ ],
)
tf_py_test(
name = "edit_distance_op_test",
size = "small",
srcs = ["edit_distance_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ ],
)
tf_py_test(
name = "fifo_queue_test",
size = "small",
srcs = ["fifo_queue_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/core:protos_all_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:data_flow_ops",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:session",
+ "//tensorflow/python:util",
+ ],
)
tf_py_test(
name = "fractional_avg_pool_op_test",
size = "small",
srcs = ["fractional_avg_pool_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:nn_grad",
+ "//tensorflow/python:nn_ops",
+ "//tensorflow/python:nn_ops_gen",
+ ],
)
tf_py_test(
name = "fractional_max_pool_op_test",
size = "small",
srcs = ["fractional_max_pool_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:nn_grad",
+ "//tensorflow/python:nn_ops",
+ "//tensorflow/python:nn_ops_gen",
+ ],
)
tf_py_test(
name = "identity_op_py_test",
size = "small",
srcs = ["identity_op_py_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:array_ops_gen",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:variables",
+ ],
)
tf_py_test(
name = "in_topk_op_test",
size = "small",
srcs = ["in_topk_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:nn_ops",
+ ],
)
tf_py_test(
name = "io_ops_test",
size = "small",
srcs = ["io_ops_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:io_ops",
+ "//tensorflow/python:util",
+ ],
)
tf_py_test(
name = "listdiff_op_test",
size = "small",
srcs = ["listdiff_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:util",
+ ],
)
tf_py_test(
name = "logging_ops_test",
size = "small",
srcs = ["logging_ops_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:gradients",
+ "//tensorflow/python:logging_ops",
+ "//tensorflow/python:math_ops",
+ ],
)
tf_py_test(
name = "losses_test",
size = "small",
srcs = ["losses_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/contrib/losses:losses_py",
+ "//tensorflow/python/ops/losses",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:init_ops",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:training",
+ "//tensorflow/python:variable_scope",
+ "//tensorflow/python:variables",
+ ],
)
tf_py_test(
name = "matrix_inverse_op_test",
size = "small",
srcs = ["matrix_inverse_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:linalg_ops",
+ "//tensorflow/python:math_ops",
+ ],
)
tf_py_test(
name = "matrix_solve_ls_op_test",
size = "small",
srcs = ["matrix_solve_ls_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:linalg_ops",
+ ],
)
tf_py_test(
name = "matrix_solve_op_test",
size = "small",
srcs = ["matrix_solve_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:linalg_ops",
+ ],
)
cuda_py_test(
name = "matrix_triangular_solve_op_test",
size = "small",
srcs = ["matrix_triangular_solve_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:linalg_ops",
+ ],
)
cuda_py_test(
name = "parameterized_truncated_normal_op_test",
size = "small",
srcs = ["parameterized_truncated_normal_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/core:protos_all_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:random_ops",
+ "//tensorflow/python:session",
+ ],
)
tf_py_test(
name = "parsing_ops_test",
size = "small",
srcs = ["parsing_ops_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/core:protos_all_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:parsing_ops",
+ "//tensorflow/python:platform",
+ ],
)
tf_py_test(
name = "partitioned_variables_test",
size = "small",
srcs = ["partitioned_variables_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:init_ops",
+ "//tensorflow/python:partitioned_variables",
+ "//tensorflow/python:random_ops",
+ "//tensorflow/python:variable_scope",
+ "//tensorflow/python:variables",
+ ],
)
tf_py_test(
name = "priority_queue_test",
size = "small",
srcs = ["priority_queue_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:data_flow_ops",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:nn_grad",
+ ],
)
tf_py_test(
name = "random_shuffle_queue_test",
size = "small",
srcs = ["random_shuffle_queue_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:data_flow_ops",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:platform",
+ ],
)
tf_py_test(
@@ -283,8 +503,13 @@ tf_py_test(
size = "small",
srcs = ["resource_variable_ops_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:framework_test_lib",
"//tensorflow/python:resource_variable_ops",
+ "//tensorflow/python:variables",
],
)
@@ -292,14 +517,27 @@ tf_py_test(
name = "save_restore_ops_test",
size = "small",
srcs = ["save_restore_ops_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/core:protos_all_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:io_ops_gen",
+ "//tensorflow/python:session",
+ ],
)
tf_py_test(
name = "scatter_nd_ops_test",
size = "medium",
srcs = ["scatter_nd_ops_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:gradients",
+ "//tensorflow/python:session",
+ "//tensorflow/python:state_ops",
+ "//tensorflow/python:variables",
+ ],
tags = ["noasan"], # http://b/32635055
)
@@ -307,140 +545,263 @@ tf_py_test(
name = "segment_reduction_ops_test",
size = "small",
srcs = ["segment_reduction_ops_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:nn_grad",
+ ],
)
tf_py_test(
name = "sparse_add_op_test",
size = "small",
srcs = ["sparse_add_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:session",
+ "//tensorflow/python:sparse_grad",
+ "//tensorflow/python:sparse_ops",
+ ],
)
tf_py_test(
name = "sparse_concat_op_test",
size = "small",
srcs = ["sparse_concat_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:sparse_ops",
+ ],
)
tf_py_test(
name = "sparse_conditional_accumulator_test",
size = "small",
srcs = ["sparse_conditional_accumulator_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:data_flow_ops",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ ],
)
tf_py_test(
name = "sparse_reorder_op_test",
size = "small",
srcs = ["sparse_reorder_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:sparse_grad",
+ "//tensorflow/python:sparse_ops",
+ ],
)
tf_py_test(
name = "sparse_reshape_op_test",
size = "small",
srcs = ["sparse_reshape_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:sparse_ops",
+ ],
)
tf_py_test(
name = "sparse_split_op_test",
size = "small",
srcs = ["sparse_split_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:sparse_ops",
+ ],
)
tf_py_test(
name = "sparse_to_dense_op_py_test",
size = "small",
srcs = ["sparse_to_dense_op_py_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:sparse_ops",
+ ],
)
tf_py_test(
name = "sparsemask_op_test",
size = "small",
srcs = ["sparsemask_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ ],
)
tf_py_test(
name = "string_join_op_test",
size = "small",
srcs = ["string_join_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:string_ops",
+ ],
)
tf_py_test(
name = "string_split_op_test",
size = "small",
srcs = ["string_split_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:string_ops",
+ ],
)
tf_py_test(
name = "substr_op_test",
size = "small",
srcs = ["substr_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:string_ops",
+ ],
)
tf_py_test(
name = "summary_ops_test",
size = "small",
srcs = ["summary_ops_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/core:protos_all_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:logging_ops",
+ "//tensorflow/python:summary",
+ ],
)
tf_py_test(
name = "summary_tensor_op_test",
size = "small",
srcs = ["summary_tensor_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/core:protos_all_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:summary_ops",
+ ],
)
tf_py_test(
name = "template_test",
size = "small",
srcs = ["template_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:init_ops",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:nn_grad",
+ "//tensorflow/python:session",
+ "//tensorflow/python:template",
+ "//tensorflow/python:training",
+ "//tensorflow/python:variable_scope",
+ "//tensorflow/python:variables",
+ ],
)
tf_py_test(
name = "topk_op_test",
size = "small",
srcs = ["topk_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:gradients",
+ "//tensorflow/python:nn_grad",
+ "//tensorflow/python:nn_ops",
+ ],
)
tf_py_test(
name = "unique_op_test",
size = "small",
srcs = ["unique_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ ],
)
tf_py_test(
name = "variable_scope_test",
size = "small",
srcs = ["variable_scope_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:init_ops",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:variable_scope",
+ "//tensorflow/python:variables",
+ ],
)
tf_py_test(
name = "variables_test",
size = "small",
srcs = ["variables_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:random_ops",
+ "//tensorflow/python:state_ops_gen",
+ "//tensorflow/python:training",
+ "//tensorflow/python:util",
+ "//tensorflow/python:variables",
+ ],
)
tf_py_test(
name = "where_op_test",
size = "small",
srcs = ["where_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ ],
)
cuda_py_test(
@@ -448,7 +809,12 @@ cuda_py_test(
size = "small",
srcs = ["cast_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:variables",
],
tags = ["noasan"],
)
@@ -458,7 +824,11 @@ cuda_py_test(
size = "small",
srcs = ["dense_update_ops_no_tsan_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:state_ops",
+ "//tensorflow/python:variables",
],
tags = ["notsan"],
)
@@ -468,7 +838,11 @@ tf_py_test(
size = "medium",
srcs = ["diag_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:gradients",
+ "//tensorflow/python:platform",
],
shard_count = 2,
)
@@ -478,7 +852,15 @@ tf_py_test(
size = "small",
srcs = ["reader_ops_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/core:protos_all_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:data_flow_ops",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:io_ops",
+ "//tensorflow/python:lib",
+ "//tensorflow/python:util",
+ "//tensorflow/python:variables",
],
)
@@ -487,7 +869,8 @@ cuda_py_test(
size = "small",
srcs = ["argmax_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:math_ops",
],
)
@@ -496,7 +879,16 @@ cuda_py_test(
size = "medium",
srcs = ["array_ops_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:gradients",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:session",
+ "//tensorflow/python:variables",
],
)
@@ -505,7 +897,10 @@ cuda_py_test(
size = "small",
srcs = ["batch_matmul_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
],
shard_count = 20,
)
@@ -515,7 +910,10 @@ cuda_py_test(
size = "small",
srcs = ["batchtospace_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:array_ops_gen",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
],
)
@@ -524,7 +922,11 @@ cuda_py_test(
size = "small",
srcs = ["betainc_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:platform",
],
)
@@ -533,7 +935,12 @@ cuda_py_test(
size = "small",
srcs = ["bias_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:gradients",
+ "//tensorflow/python:nn_grad",
+ "//tensorflow/python:nn_ops",
],
)
@@ -542,7 +949,9 @@ cuda_py_test(
size = "small",
srcs = ["bitcast_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
],
)
@@ -551,7 +960,11 @@ cuda_py_test(
size = "small",
srcs = ["check_ops_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:check_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
],
)
@@ -560,7 +973,12 @@ cuda_py_test(
size = "small",
srcs = ["constant_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:util",
],
)
@@ -569,7 +987,31 @@ cuda_py_test(
size = "small",
srcs = ["control_flow_ops_py_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/core:protos_all_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:array_ops_gen",
+ "//tensorflow/python:client",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:data_flow_ops",
+ "//tensorflow/python:data_flow_ops_gen",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:functional_ops",
+ "//tensorflow/python:gradients",
+ "//tensorflow/python:logging_ops",
+ "//tensorflow/python:logging_ops_gen",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:script_ops",
+ "//tensorflow/python:session",
+ "//tensorflow/python:state_ops",
+ "//tensorflow/python:state_ops_gen",
+ "//tensorflow/python:tensor_array_grad",
+ "//tensorflow/python:training",
+ "//tensorflow/python:util",
+ "//tensorflow/python:variable_scope",
+ "//tensorflow/python:variables",
],
)
@@ -578,7 +1020,10 @@ cuda_py_test(
size = "small",
srcs = ["conv1d_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:nn_ops",
],
)
@@ -587,7 +1032,11 @@ cuda_py_test(
size = "small",
srcs = ["conv2d_transpose_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:client",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:nn_grad",
+ "//tensorflow/python:nn_ops",
],
)
@@ -596,7 +1045,11 @@ cuda_py_test(
size = "small",
srcs = ["conv3d_backprop_filter_v2_grad_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:nn_grad",
+ "//tensorflow/python:nn_ops",
],
)
@@ -605,7 +1058,9 @@ cuda_py_test(
size = "small",
srcs = ["cross_grad_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:math_ops",
],
)
@@ -614,7 +1069,10 @@ cuda_py_test(
size = "small",
srcs = ["denormal_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:platform",
],
)
@@ -623,7 +1081,12 @@ cuda_py_test(
size = "small",
srcs = ["dense_update_ops_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:state_ops",
+ "//tensorflow/python:variables",
],
)
@@ -632,7 +1095,10 @@ cuda_py_test(
size = "small",
srcs = ["depthtospace_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
],
)
@@ -641,7 +1107,8 @@ cuda_py_test(
size = "medium",
srcs = ["division_past_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
],
)
@@ -650,7 +1117,12 @@ cuda_py_test(
size = "small",
srcs = ["dynamic_partition_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:data_flow_grad",
+ "//tensorflow/python:data_flow_ops",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:gradients",
],
)
@@ -659,7 +1131,11 @@ cuda_py_test(
size = "small",
srcs = ["dynamic_stitch_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:data_flow_grad",
+ "//tensorflow/python:data_flow_ops",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:gradients",
],
)
@@ -668,7 +1144,9 @@ cuda_py_test(
size = "small",
srcs = ["extract_image_patches_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
],
)
@@ -677,7 +1155,17 @@ cuda_py_test(
size = "small",
srcs = ["functional_ops_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:functional_ops",
+ "//tensorflow/python:gradients",
+ "//tensorflow/python:init_ops",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:tensor_array_grad",
+ "//tensorflow/python:variable_scope",
+ "//tensorflow/python:variables",
],
)
@@ -686,7 +1174,12 @@ cuda_py_test(
size = "small",
srcs = ["gather_nd_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:gradients",
+ "//tensorflow/python:session",
+ "//tensorflow/python:variables",
],
)
@@ -695,7 +1188,10 @@ cuda_py_test(
size = "small",
srcs = ["gather_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:gradients",
],
)
@@ -704,7 +1200,10 @@ cuda_py_test(
size = "small",
srcs = ["gradient_correctness_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:gradients",
+ "//tensorflow/python:math_ops",
],
)
@@ -713,7 +1212,15 @@ cuda_py_test(
size = "small",
srcs = ["init_ops_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:init_ops",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:nn_ops",
+ "//tensorflow/python:variable_scope",
+ "//tensorflow/python:variables",
],
)
@@ -722,7 +1229,11 @@ cuda_py_test(
size = "small",
srcs = ["linalg_ops_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:linalg_ops",
+ "//tensorflow/python:math_ops",
],
)
@@ -731,7 +1242,12 @@ cuda_py_test(
size = "small",
srcs = ["lrn_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:gradients",
+ "//tensorflow/python:nn",
+ "//tensorflow/python:nn_grad",
],
)
@@ -740,7 +1256,13 @@ cuda_py_test(
size = "small",
srcs = ["matmul_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:random_ops",
+ "//tensorflow/python:variables",
],
)
@@ -749,7 +1271,10 @@ cuda_py_test(
size = "small",
srcs = ["morphological_ops_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:nn_grad",
+ "//tensorflow/python:nn_ops",
],
)
@@ -758,7 +1283,15 @@ cuda_py_test(
size = "small",
srcs = ["multinomial_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/core:protos_all_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:random_ops",
+ "//tensorflow/python:session",
],
)
@@ -767,7 +1300,12 @@ cuda_py_test(
size = "small",
srcs = ["numerics_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:numerics",
],
)
@@ -776,7 +1314,9 @@ cuda_py_test(
size = "small",
srcs = ["one_hot_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
],
)
@@ -785,7 +1325,11 @@ cuda_py_test(
size = "small",
srcs = ["pack_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:variables",
],
)
@@ -794,7 +1338,9 @@ cuda_py_test(
size = "small",
srcs = ["pad_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
],
)
@@ -803,7 +1349,11 @@ cuda_py_test(
size = "small",
srcs = ["padding_fifo_queue_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:data_flow_ops",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework_for_generated_wrappers",
],
)
@@ -812,7 +1362,12 @@ cuda_py_test(
size = "small",
srcs = ["py_func_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:script_ops",
+ "//tensorflow/python:session",
],
)
@@ -821,7 +1376,8 @@ cuda_py_test(
size = "small",
srcs = ["random_crop_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:random_ops",
],
)
@@ -830,7 +1386,10 @@ cuda_py_test(
size = "small",
srcs = ["random_ops_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:random_ops",
],
)
@@ -839,7 +1398,10 @@ cuda_py_test(
size = "small",
srcs = ["reduce_join_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:string_ops",
],
)
@@ -848,7 +1410,10 @@ cuda_py_test(
size = "medium",
srcs = ["reduction_ops_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
],
)
@@ -857,7 +1422,14 @@ cuda_py_test(
size = "small",
srcs = ["relu_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:gradients",
+ "//tensorflow/python:nn_grad",
+ "//tensorflow/python:nn_ops",
+ "//tensorflow/python:random_ops",
+ "//tensorflow/python:training",
+ "//tensorflow/python:variables",
],
)
@@ -866,7 +1438,9 @@ cuda_py_test(
size = "small",
srcs = ["reshape_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
],
)
@@ -875,7 +1449,9 @@ cuda_py_test(
size = "small",
srcs = ["reverse_sequence_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
],
)
@@ -884,7 +1460,15 @@ cuda_py_test(
size = "small",
srcs = ["scalar_strict_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:io_ops_gen",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:nn_grad",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:random_ops",
+ "//tensorflow/python:sparse_ops",
],
)
@@ -893,7 +1477,10 @@ cuda_py_test(
size = "medium",
srcs = ["scan_ops_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
],
)
@@ -902,7 +1489,10 @@ cuda_py_test(
size = "small",
srcs = ["session_ops_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:session_ops",
],
)
@@ -911,7 +1501,13 @@ cuda_py_test(
size = "small",
srcs = ["shape_ops_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/core:protos_all_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:gradients",
],
)
@@ -920,7 +1516,11 @@ cuda_py_test(
size = "small",
srcs = ["softmax_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:nn_ops",
],
)
@@ -929,7 +1529,10 @@ cuda_py_test(
size = "small",
srcs = ["softplus_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:nn_grad",
+ "//tensorflow/python:nn_ops",
],
)
@@ -938,7 +1541,10 @@ cuda_py_test(
size = "small",
srcs = ["softsign_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:nn_grad",
+ "//tensorflow/python:nn_ops",
],
)
@@ -947,7 +1553,12 @@ cuda_py_test(
size = "small",
srcs = ["spacetobatch_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:array_ops_gen",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
],
)
@@ -956,7 +1567,10 @@ cuda_py_test(
size = "small",
srcs = ["spacetodepth_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
],
)
@@ -965,7 +1579,11 @@ tf_py_test(
size = "small",
srcs = ["sparse_serialization_ops_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:sparse_ops",
],
)
@@ -974,7 +1592,13 @@ tf_py_test(
size = "small",
srcs = ["sparse_tensors_map_ops_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:session",
+ "//tensorflow/python:sparse_ops",
+ "//tensorflow/python:variables",
],
)
@@ -983,7 +1607,11 @@ cuda_py_test(
size = "small",
srcs = ["sparse_tensor_dense_matmul_grad_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:sparse_grad",
+ "//tensorflow/python:sparse_ops",
],
)
@@ -992,7 +1620,19 @@ cuda_py_test(
size = "small",
srcs = ["sparse_xent_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/core:protos_all_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:gradients",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:nn_grad",
+ "//tensorflow/python:nn_ops",
+ "//tensorflow/python:nn_ops_gen",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:session",
+ "//tensorflow/python:sparse_ops",
],
)
@@ -1001,7 +1641,11 @@ cuda_py_test(
size = "small",
srcs = ["split_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:gradients",
+ "//tensorflow/python:math_ops",
],
)
@@ -1010,7 +1654,12 @@ cuda_py_test(
size = "small",
srcs = ["stack_ops_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:data_flow_ops_gen",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
],
)
@@ -1019,7 +1668,10 @@ cuda_py_test(
size = "small",
srcs = ["string_to_hash_bucket_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:string_ops",
],
)
@@ -1028,7 +1680,10 @@ cuda_py_test(
size = "small",
srcs = ["string_to_number_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:parsing_ops",
],
)
@@ -1037,7 +1692,10 @@ cuda_py_test(
size = "small",
srcs = ["summary_audio_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/core:protos_all_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:summary",
],
)
@@ -1046,7 +1704,12 @@ cuda_py_test(
size = "small",
srcs = ["summary_image_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/core:protos_all_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:image_ops",
+ "//tensorflow/python:nn_grad",
+ "//tensorflow/python:summary",
],
)
@@ -1055,7 +1718,17 @@ cuda_py_test(
size = "small",
srcs = ["tensor_array_ops_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:data_flow_ops_gen",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:gradients",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:nn_grad",
+ "//tensorflow/python:tensor_array_grad",
+ "//tensorflow/python:tensor_array_ops",
+ "//tensorflow/python:variables",
],
)
@@ -1064,7 +1737,8 @@ cuda_py_test(
size = "small",
srcs = ["trace_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:math_ops",
],
)
@@ -1073,7 +1747,9 @@ cuda_py_test(
size = "small",
srcs = ["transpose_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
],
)
@@ -1082,7 +1758,9 @@ cuda_py_test(
size = "small",
srcs = ["unpack_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
],
)
@@ -1091,7 +1769,14 @@ cuda_py_test(
size = "small",
srcs = ["variable_ops_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:state_ops",
+ "//tensorflow/python:state_ops_gen",
+ "//tensorflow/python:variables",
],
)
@@ -1100,7 +1785,11 @@ cuda_py_test(
size = "small",
srcs = ["xent_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:nn_grad",
+ "//tensorflow/python:nn_ops",
+ "//tensorflow/python:nn_ops_gen",
],
)
@@ -1109,7 +1798,9 @@ cuda_py_test(
size = "small",
srcs = ["zero_division_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework_for_generated_wrappers",
],
)
@@ -1118,7 +1809,12 @@ cuda_py_test(
size = "medium",
srcs = ["atrous_conv2d_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:nn",
+ "//tensorflow/python:nn_grad",
+ "//tensorflow/python:nn_ops",
],
)
@@ -1127,7 +1823,10 @@ cuda_py_test(
size = "medium",
srcs = ["atrous_convolution_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:nn_grad",
+ "//tensorflow/python:nn_ops",
],
)
@@ -1136,7 +1835,10 @@ cuda_py_test(
size = "medium",
srcs = ["pool_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:nn_grad",
+ "//tensorflow/python:nn_ops",
],
)
@@ -1145,7 +1847,11 @@ cuda_py_test(
size = "medium",
srcs = ["conv2d_backprop_filter_grad_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:nn_grad",
+ "//tensorflow/python:nn_ops",
],
)
@@ -1154,7 +1860,10 @@ cuda_py_test(
size = "medium",
srcs = ["conv3d_transpose_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:nn_grad",
+ "//tensorflow/python:nn_ops",
],
)
@@ -1163,7 +1872,19 @@ cuda_py_test(
size = "medium",
srcs = ["conv_ops_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/contrib/layers:layers_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:nn",
+ "//tensorflow/python:nn_grad",
+ "//tensorflow/python:nn_ops",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:random_ops",
+ "//tensorflow/python:session",
+ "//tensorflow/python:variables",
],
)
@@ -1172,7 +1893,11 @@ cuda_py_test(
size = "medium", # http://b/30603882
srcs = ["depthwise_conv_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:nn",
+ "//tensorflow/python:nn_grad",
+ "//tensorflow/python:nn_ops",
],
)
@@ -1181,7 +1906,8 @@ cuda_py_test(
size = "medium",
srcs = ["division_future_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
],
)
@@ -1190,7 +1916,9 @@ cuda_py_test(
size = "medium",
srcs = ["fft_ops_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
],
)
@@ -1199,7 +1927,10 @@ cuda_py_test(
size = "medium", # http://b/30600785
srcs = ["pooling_ops_3d_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:nn_grad",
+ "//tensorflow/python:nn_ops",
],
)
@@ -1208,7 +1939,14 @@ cuda_py_test(
size = "medium",
srcs = ["pooling_ops_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:nn_grad",
+ "//tensorflow/python:nn_ops",
+ "//tensorflow/python:nn_ops_gen",
],
)
@@ -1217,7 +1955,12 @@ cuda_py_test(
size = "medium",
srcs = ["random_gamma_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:random_ops",
],
)
@@ -1226,7 +1969,27 @@ cuda_py_test(
size = "medium",
srcs = ["rnn_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/contrib/rnn:rnn_py",
+ "//tensorflow/core:protos_all_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:data_flow_grad",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:gradients",
+ "//tensorflow/python:init_ops",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:nn_grad",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:rnn",
+ "//tensorflow/python:rnn_cell",
+ "//tensorflow/python:session",
+ "//tensorflow/python:sparse_grad",
+ "//tensorflow/python:tensor_array_grad",
+ "//tensorflow/python:tensor_array_ops",
+ "//tensorflow/python:util",
+ "//tensorflow/python:variable_scope",
+ "//tensorflow/python:variables",
],
shard_count = 10,
)
@@ -1236,7 +1999,10 @@ cuda_py_test(
size = "large", # NOTE: This is not run.
srcs = ["scatter_ops_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:state_ops",
+ "//tensorflow/python:variables",
],
)
@@ -1245,7 +2011,11 @@ cuda_py_test(
size = "medium",
srcs = ["slice_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:gradients",
],
)
@@ -1254,7 +2024,9 @@ cuda_py_test(
size = "medium",
srcs = ["sparse_matmul_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
],
)
@@ -1263,7 +2035,15 @@ cuda_py_test(
size = "medium",
srcs = ["sparse_ops_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:nn_ops",
+ "//tensorflow/python:platform_test",
+ "//tensorflow/python:sparse_grad",
+ "//tensorflow/python:sparse_ops",
],
)
@@ -1272,7 +2052,16 @@ cuda_py_test(
size = "medium",
srcs = ["sparse_tensor_dense_matmul_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/core:protos_all_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:session",
+ "//tensorflow/python:sparse_ops",
],
)
@@ -1283,7 +2072,10 @@ cuda_py_test(
size = "medium",
srcs = ["extract_image_patches_grad_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
],
tags = ["notap"], # http://b/31080670
)
@@ -1293,7 +2085,14 @@ cuda_py_test(
size = "medium",
srcs = ["concat_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:array_ops_gen",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:gradients",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:variables",
],
)
@@ -1302,7 +2101,9 @@ cuda_py_test(
size = "small",
srcs = ["large_concat_op_test.py"],
additional_deps = [
- "//tensorflow:tensorflow_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
],
tags = [
"nomsan",
@@ -1314,7 +2115,12 @@ cuda_py_test(
name = "conv_ops_3d_test",
size = "medium",
srcs = ["conv_ops_3d_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:nn_grad",
+ "//tensorflow/python:nn_ops",
+ ],
shard_count = 20,
)
@@ -1322,7 +2128,17 @@ cuda_py_test(
name = "cwise_ops_test",
size = "medium",
srcs = ["cwise_ops_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:gradients",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:math_ops_gen",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:variables",
+ ],
shard_count = 50,
tags = ["notap"], # b/30226163
)
@@ -1331,7 +2147,22 @@ cuda_py_test(
name = "embedding_ops_test",
size = "medium",
srcs = ["embedding_ops_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:data_flow_grad",
+ "//tensorflow/python:data_flow_ops",
+ "//tensorflow/python:embedding_ops",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:partitioned_variables",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:state_ops",
+ "//tensorflow/python:util",
+ "//tensorflow/python:variable_scope",
+ "//tensorflow/python:variables",
+ ],
shard_count = 20,
)
@@ -1339,7 +2170,14 @@ cuda_py_test(
name = "linalg_grad_test",
size = "medium",
srcs = ["linalg_grad_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:gradients",
+ "//tensorflow/python:linalg_ops",
+ "//tensorflow/python:math_ops",
+ ],
shard_count = 20,
)
@@ -1347,7 +2185,11 @@ cuda_py_test(
name = "matrix_band_part_op_test",
size = "medium",
srcs = ["matrix_band_part_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ ],
shard_count = 20,
)
@@ -1355,7 +2197,13 @@ cuda_py_test(
name = "self_adjoint_eig_op_test",
size = "medium",
srcs = ["self_adjoint_eig_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:linalg_ops",
+ "//tensorflow/python:math_ops",
+ ],
shard_count = 20,
)
@@ -1363,7 +2211,13 @@ cuda_py_test(
name = "qr_op_test",
size = "medium",
srcs = ["qr_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:linalg_ops",
+ "//tensorflow/python:math_ops",
+ ],
shard_count = 20,
tags = ["nomsan"], # fails in msan from numpy calls
)
@@ -1372,7 +2226,13 @@ cuda_py_test(
name = "svd_op_test",
size = "medium",
srcs = ["svd_op_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:linalg_ops",
+ "//tensorflow/python:math_ops",
+ ],
shard_count = 20,
tags = ["nomsan"], # fails in msan from numpy calls
)
@@ -1381,21 +2241,48 @@ sycl_py_test(
name = "basic_gpu_test",
size = "small",
srcs = ["basic_gpu_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops_gen",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:math_ops_gen",
+ ],
)
tf_py_test(
name = "sets_test",
size = "small",
srcs = ["sets_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:platform_test",
+ "//tensorflow/python:sets",
+ "//tensorflow/python:sparse_ops",
+ ],
)
tf_py_test(
name = "metrics_test",
size = "medium",
srcs = ["metrics_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:data_flow_grad",
+ "//tensorflow/python:data_flow_ops",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:metrics",
+ "//tensorflow/python:nn_grad",
+ "//tensorflow/python:random_ops",
+ "//tensorflow/python:variables",
+ ],
shard_count = 3,
)
@@ -1403,7 +2290,14 @@ tf_py_test(
name = "confusion_matrix_test",
size = "small",
srcs = ["confusion_matrix_test.py"],
- additional_deps = ["//tensorflow:tensorflow_py"],
+ additional_deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:confusion_matrix",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:random_ops",
+ ],
)
filegroup(
diff --git a/tensorflow/python/kernel_tests/argmax_op_test.py b/tensorflow/python/kernel_tests/argmax_op_test.py
index 1ad3dba2ae..ac9a78d0fa 100644
--- a/tensorflow/python/kernel_tests/argmax_op_test.py
+++ b/tensorflow/python/kernel_tests/argmax_op_test.py
@@ -12,19 +12,26 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.ops.argmax_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
-class ArgMaxTest(tf.test.TestCase):
+from tensorflow.python.ops import math_ops
+from tensorflow.python.platform import test
+
+
+class ArgMaxTest(test.TestCase):
- def _testArg(self, method, x, dimension,
- expected_values, use_gpu=False, expected_err_re=None):
+ def _testArg(self,
+ method,
+ x,
+ dimension,
+ expected_values,
+ use_gpu=False,
+ expected_err_re=None):
with self.test_session(use_gpu=use_gpu):
ans = method(x, dimension=dimension)
if expected_err_re is None:
@@ -35,28 +42,30 @@ class ArgMaxTest(tf.test.TestCase):
with self.assertRaisesOpError(expected_err_re):
ans.eval()
- def _testBothArg(self, method, x, dimension,
- expected_values, expected_err_re=None):
- self._testArg(method, x, dimension,
- expected_values, True, expected_err_re)
- self._testArg(method, x, dimension,
- expected_values, False, expected_err_re)
+ def _testBothArg(self,
+ method,
+ x,
+ dimension,
+ expected_values,
+ expected_err_re=None):
+ self._testArg(method, x, dimension, expected_values, True, expected_err_re)
+ self._testArg(method, x, dimension, expected_values, False, expected_err_re)
def _testBasic(self, dtype):
- x = np.asarray(100*np.random.randn(200), dtype=dtype)
+ x = np.asarray(100 * np.random.randn(200), dtype=dtype)
# Check that argmin and argmax match numpy along the primary
# dimension
- self._testBothArg(tf.argmax, x, 0, x.argmax())
- self._testBothArg(tf.argmin, x, 0, x.argmin())
+ self._testBothArg(math_ops.argmax, x, 0, x.argmax())
+ self._testBothArg(math_ops.argmin, x, 0, x.argmin())
def _testDim(self, dtype):
- x = np.asarray(100*np.random.randn(3, 2, 4, 5, 6), dtype=dtype)
+ x = np.asarray(100 * np.random.randn(3, 2, 4, 5, 6), dtype=dtype)
# Check that argmin and argmax match numpy along all dimensions
for dim in range(-5, 5):
- self._testBothArg(tf.argmax, x, dim, x.argmax(dim))
- self._testBothArg(tf.argmin, x, dim, x.argmin(dim))
+ self._testBothArg(math_ops.argmax, x, dim, x.argmax(dim))
+ self._testBothArg(math_ops.argmin, x, dim, x.argmin(dim))
def testFloat(self):
self._testBasic(np.float32)
@@ -76,11 +85,11 @@ class ArgMaxTest(tf.test.TestCase):
def testEmpty(self):
with self.test_session():
- for op in tf.argmin, tf.argmax:
+ for op in math_ops.argmin, math_ops.argmax:
with self.assertRaisesOpError(
r"Reduction axis 0 is empty in shape \[0\]"):
op([], 0).eval()
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/array_ops_test.py b/tensorflow/python/kernel_tests/array_ops_test.py
index e91fa7fe3b..a40bc2ce4c 100644
--- a/tensorflow/python/kernel_tests/array_ops_test.py
+++ b/tensorflow/python/kernel_tests/array_ops_test.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for array_ops."""
from __future__ import absolute_import
from __future__ import division
@@ -21,13 +20,20 @@ from __future__ import print_function
import time
import numpy as np
-import tensorflow as tf
+from tensorflow.python.client import session
+from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
+from tensorflow.python.framework import errors_impl
+from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradients_impl
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import variables
+from tensorflow.python.platform import test as test_lib
class BatchMatrixTransposeTest(test_util.TensorFlowTestCase):
@@ -36,7 +42,7 @@ class BatchMatrixTransposeTest(test_util.TensorFlowTestCase):
matrix = [[1, 2, 3], [4, 5, 6]] # Shape (2, 3)
expected_transposed = [[1, 4], [2, 5], [3, 6]] # Shape (3, 2)
with self.test_session():
- transposed = tf.matrix_transpose(matrix)
+ transposed = array_ops.matrix_transpose(matrix)
self.assertEqual((3, 2), transposed.get_shape())
self.assertAllEqual(expected_transposed, transposed.eval())
@@ -48,7 +54,7 @@ class BatchMatrixTransposeTest(test_util.TensorFlowTestCase):
batch_matrix = [matrix_0, matrix_1] # Shape (2, 2, 3)
expected_transposed = [matrix_0_t, matrix_1_t] # Shape (2, 3, 2)
with self.test_session():
- transposed = tf.matrix_transpose(batch_matrix)
+ transposed = array_ops.matrix_transpose(batch_matrix)
self.assertEqual((2, 3, 2), transposed.get_shape())
self.assertAllEqual(expected_transposed, transposed.eval())
@@ -56,11 +62,10 @@ class BatchMatrixTransposeTest(test_util.TensorFlowTestCase):
matrix = [[1, 2, 3], [4, 5, 6]] # Shape (2, 3)
expected_transposed = [[1, 4], [2, 5], [3, 6]] # Shape (3, 2)
with self.test_session():
- matrix_ph = tf.placeholder(tf.int32)
- transposed = tf.matrix_transpose(matrix_ph)
+ matrix_ph = array_ops.placeholder(dtypes.int32)
+ transposed = array_ops.matrix_transpose(matrix_ph)
self.assertAllEqual(
- expected_transposed,
- transposed.eval(feed_dict={matrix_ph: matrix}))
+ expected_transposed, transposed.eval(feed_dict={matrix_ph: matrix}))
def testBatchMatrixDynamicallyDefined(self):
matrix_0 = [[1, 2, 3], [4, 5, 6]]
@@ -70,8 +75,8 @@ class BatchMatrixTransposeTest(test_util.TensorFlowTestCase):
batch_matrix = [matrix_0, matrix_1] # Shape (2, 2, 3)
expected_transposed = [matrix_0_t, matrix_1_t] # Shape (2, 3, 2)
with self.test_session():
- batch_matrix_ph = tf.placeholder(tf.int32)
- transposed = tf.matrix_transpose(batch_matrix_ph)
+ batch_matrix_ph = array_ops.placeholder(dtypes.int32)
+ transposed = array_ops.matrix_transpose(batch_matrix_ph)
self.assertAllEqual(
expected_transposed,
transposed.eval(feed_dict={batch_matrix_ph: batch_matrix}))
@@ -80,7 +85,7 @@ class BatchMatrixTransposeTest(test_util.TensorFlowTestCase):
vector = [1, 2, 3]
with self.test_session():
with self.assertRaisesRegexp(ValueError, "should be a "):
- tf.matrix_transpose(vector)
+ array_ops.matrix_transpose(vector)
class BooleanMaskTest(test_util.TensorFlowTestCase):
@@ -93,7 +98,7 @@ class BooleanMaskTest(test_util.TensorFlowTestCase):
if make_mask is None:
make_mask = lambda shape: self.rng.randint(0, 2, size=shape).astype(bool)
arr = np.random.rand(*arr_shape)
- mask = make_mask(arr_shape[: ndims_mask])
+ mask = make_mask(arr_shape[:ndims_mask])
masked_arr = arr[mask]
with self.test_session():
masked_tensor = array_ops.boolean_mask(arr, mask)
@@ -128,7 +133,7 @@ class BooleanMaskTest(test_util.TensorFlowTestCase):
mask = np.array([True, False])
arr = np.array([[], []]).astype(np.float32)
numpy_result = arr[mask]
- tf_result = tf.boolean_mask(arr, mask)
+ tf_result = array_ops.boolean_mask(arr, mask)
self.assertAllEqual(numpy_result.shape[1:], tf_result.get_shape()[1:])
with self.test_session():
self.assertAllClose(numpy_result, tf_result.eval())
@@ -137,7 +142,7 @@ class BooleanMaskTest(test_util.TensorFlowTestCase):
mask = np.array([]).astype(bool)
arr = np.array([]).astype(np.float32)
numpy_result = arr[mask]
- tf_result = tf.boolean_mask(arr, mask)
+ tf_result = array_ops.boolean_mask(arr, mask)
self.assertAllEqual(numpy_result.shape[1:], tf_result.get_shape()[1:])
with self.test_session():
self.assertAllClose(numpy_result, tf_result.eval())
@@ -160,9 +165,9 @@ class BooleanMaskTest(test_util.TensorFlowTestCase):
arr = np.array([[1, 2], [3, 4]])
mask = np.array([False, True])
- masked_tensor = sess.run(
- array_ops.boolean_mask(ph_tensor, ph_mask),
- feed_dict={ph_tensor: arr, ph_mask: mask})
+ masked_tensor = sess.run(array_ops.boolean_mask(ph_tensor, ph_mask),
+ feed_dict={ph_tensor: arr,
+ ph_mask: mask})
np.testing.assert_allclose(masked_tensor, arr[mask])
def testMaskDimensionsSetToNoneRaises(self):
@@ -262,13 +267,13 @@ class ReverseV2Test(test_util.TensorFlowTestCase):
def testInvalid(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
with self.test_session():
- with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
+ with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"is out of valid range"):
array_ops.reverse_v2(x_np, [-30]).eval()
- with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
+ with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"is out of valid range"):
array_ops.reverse_v2(x_np, [2]).eval()
- with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
+ with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"axis 0 specified more than once"):
array_ops.reverse_v2(x_np, [0, -2]).eval()
@@ -288,18 +293,18 @@ class ReverseV2Test(test_util.TensorFlowTestCase):
def testUnknownDims(self):
reverse_v2 = array_ops.reverse_v2
- data_t = tf.placeholder(tf.float32)
- axis_known_t = tf.placeholder(tf.int32, shape=[3])
+ data_t = array_ops.placeholder(dtypes.float32)
+ axis_known_t = array_ops.placeholder(dtypes.int32, shape=[3])
reverse_known_t = reverse_v2(data_t, axis_known_t)
# Unlike V1 we cannot know this anymore
self.assertEqual(None, reverse_known_t.get_shape().ndims)
- axis_unknown_t = tf.placeholder(tf.int32)
+ axis_unknown_t = array_ops.placeholder(dtypes.int32)
reverse_unknown_t = reverse_v2(data_t, axis_unknown_t)
self.assertIs(None, reverse_unknown_t.get_shape().ndims)
- data_2d_t = tf.placeholder(tf.float32, shape=[None, None])
- axis_2d_t = tf.placeholder(tf.int32, shape=[3])
+ data_2d_t = array_ops.placeholder(dtypes.float32, shape=[None, None])
+ axis_2d_t = array_ops.placeholder(dtypes.int32, shape=[3])
reverse_2d_t = reverse_v2(data_2d_t, axis_2d_t)
self.assertEqual(2, reverse_2d_t.get_shape().ndims)
@@ -307,7 +312,7 @@ class ReverseV2Test(test_util.TensorFlowTestCase):
class MeshgridTest(test_util.TensorFlowTestCase):
def _compareDiff(self, x, y, use_gpu):
- for index in ('ij', 'xy'):
+ for index in ("ij", "xy"):
numpy_out = np.meshgrid(x, y, indexing=index)
tf_out = array_ops.meshgrid(x, y, indexing=index)
with self.test_session(use_gpu=use_gpu):
@@ -316,7 +321,7 @@ class MeshgridTest(test_util.TensorFlowTestCase):
def _compareDiffType(self, n, np_dtype, use_gpu):
inputs = []
- for index in ('ij', 'xy'):
+ for index in ("ij", "xy"):
for i in range(n):
x = np.linspace(-10, 10, 5).astype(np_dtype)
if np_dtype in (np.complex64, np.complex128):
@@ -330,7 +335,7 @@ class MeshgridTest(test_util.TensorFlowTestCase):
def testCompare(self):
for t in (np.float16, np.float32, np.float64, np.int32, np.int64,
- np.complex64, np.complex128):
+ np.complex64, np.complex128):
self._compareDiffType(2, t, False)
self._compareDiffType(3, t, False)
@@ -349,9 +354,11 @@ class StridedSliceChecker(object):
REF_TENSOR = np.arange(1, 19, dtype=np.float32).reshape(3, 2, 3)
REF_TENSOR_ALIGNED = np.arange(1, 97, dtype=np.float32).reshape(3, 4, 8)
- def __init__(self, test, x, tensor_type=tf.int32, check_type_infer=True):
+ def __init__(self, test, x, tensor_type=dtypes.int32, check_type_infer=True):
self.test = test
- self.x = tf.cast(tf.constant(x, dtype=tf.float32), dtype=tensor_type)
+ self.x = math_ops.cast(
+ constant_op.constant(
+ x, dtype=dtypes.float32), dtype=tensor_type)
self.x_np = np.array(x)
self.check_type_infer = check_type_infer
@@ -390,8 +397,10 @@ class StridedSliceTest(test_util.TensorFlowTestCase):
"""Test the strided slice operation with variants of slices."""
def test_basic_slice(self):
- for tensor_type in [tf.int32, tf.int64, tf.int16, tf.int8, tf.float32,
- tf.float64]:
+ for tensor_type in [
+ dtypes.int32, dtypes.int64, dtypes.int16, dtypes.int8, dtypes.float32,
+ dtypes.float64
+ ]:
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
checker = StridedSliceChecker(
@@ -413,7 +422,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase):
_ = checker[-2::-1, :, ::2]
# Check rank-0 examples
- checker2 = StridedSliceChecker(self, 5, tensor_type=tf.int32)
+ checker2 = StridedSliceChecker(self, 5, tensor_type=dtypes.int32)
_ = checker2[None]
_ = checker2[...]
_ = checker2[tuple()]
@@ -467,8 +476,8 @@ class StridedSliceTest(test_util.TensorFlowTestCase):
raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
[[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]
checker = StridedSliceChecker(self, raw, check_type_infer=False)
- bar = tf.constant(2)
- bar2 = tf.constant(3)
+ bar = constant_op.constant(2)
+ bar2 = constant_op.constant(3)
_ = checker[..., bar:bar2]
_ = checker[..., bar]
with self.assertRaisesRegexp(
@@ -499,7 +508,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase):
def testExpandVariable(self):
for use_gpu in False, True:
with self.test_session(use_gpu=use_gpu):
- x = tf.Variable(7, dtype=tf.int32)
+ x = variables.Variable(7, dtype=dtypes.int32)
x.initializer.run()
y = x[None].eval()
self.assertEqual(y.shape, (1,))
@@ -537,7 +546,7 @@ class StridedSliceShapeTest(test_util.TensorFlowTestCase):
def testUnknown(self):
with self.test_session(use_gpu=False):
- uncertain_tensor = tf.placeholder(tf.float32)
+ uncertain_tensor = array_ops.placeholder(dtypes.float32)
a = StridedSliceShapeChecker(uncertain_tensor)
a_slice_shape = a[...]
self.assertAllEqual(a_slice_shape.ndims, None)
@@ -549,7 +558,8 @@ class StridedSliceShapeTest(test_util.TensorFlowTestCase):
def testTensorShapeUncertain(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
- uncertain_tensor = tf.placeholder(tf.float32, shape=(5, None, 7))
+ uncertain_tensor = array_ops.placeholder(
+ dtypes.float32, shape=(5, None, 7))
a = StridedSliceShapeChecker(uncertain_tensor)
self.tensorShapeEqual(a[3:5], tensor_shape.TensorShape([2, None, 7]))
self.tensorShapeEqual(a[3:5, :, 4], tensor_shape.TensorShape([2, None]))
@@ -559,29 +569,30 @@ class StridedSliceShapeTest(test_util.TensorFlowTestCase):
tensor_shape.TensorShape([2, None, 2]))
self.tensorShapeEqual(a[3:5, :, 50:3],
tensor_shape.TensorShape([2, None, 0]))
- self.tensorShapeEqual(a[3:5, :, tf.newaxis, 50:3,],
+ self.tensorShapeEqual(a[3:5, :, array_ops.newaxis, 50:3,],
tensor_shape.TensorShape([2, None, 1, 0]))
- self.tensorShapeEqual(a[1:5:2, :, tf.newaxis, 50:3,],
+ self.tensorShapeEqual(a[1:5:2, :, array_ops.newaxis, 50:3,],
tensor_shape.TensorShape([2, None, 1, 0]))
- self.tensorShapeEqual(a[:5:3, :, tf.newaxis, 50:3,],
+ self.tensorShapeEqual(a[:5:3, :, array_ops.newaxis, 50:3,],
tensor_shape.TensorShape([2, None, 1, 0]))
- self.tensorShapeEqual(a[:2:3, :, tf.newaxis, 50:3,],
+ self.tensorShapeEqual(a[:2:3, :, array_ops.newaxis, 50:3,],
tensor_shape.TensorShape([1, None, 1, 0]))
- self.tensorShapeEqual(a[::-1, :, tf.newaxis, ::-2],
+ self.tensorShapeEqual(a[::-1, :, array_ops.newaxis, ::-2],
tensor_shape.TensorShape([5, None, 1, 4]))
def testTensorValuedIndexShape(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
- defined_shape_tensor = tf.placeholder(tf.float32, shape=(5, 3, 7))
- index_value = tf.placeholder(tf.int32, shape=())
+ defined_shape_tensor = array_ops.placeholder(
+ dtypes.float32, shape=(5, 3, 7))
+ index_value = array_ops.placeholder(dtypes.int32, shape=())
a = StridedSliceShapeChecker(defined_shape_tensor)
self.tensorShapeEqual(a[index_value], tensor_shape.TensorShape([3, 7]))
self.tensorShapeEqual(a[index_value, ::-1],
tensor_shape.TensorShape([3, 7]))
self.tensorShapeEqual(a[index_value, ::-2],
tensor_shape.TensorShape([2, 7]))
- other_scalar = tf.placeholder(tf.int32, shape=())
+ other_scalar = array_ops.placeholder(dtypes.int32, shape=())
self.tensorShapeEqual(a[index_value, other_scalar:2],
tensor_shape.TensorShape([None, 7]))
@@ -603,10 +614,13 @@ class GradSliceChecker(object):
# compute analytic 2nd derivative
analytic_grad2 = 2 * slice_val
- dy = tf.Variable(tf.ones(shape=slice_var.get_shape(), dtype=tf.int32))
+ dy = variables.Variable(
+ array_ops.ones(
+ shape=slice_var.get_shape(), dtype=dtypes.int32))
assign = dy.assign(slice_var)
- slice_val_grad, = tf.gradients(slice_val, self.var, grad_ys=dy)
- slice_val_grad2, = tf.gradients(slice_val_grad, dy, grad_ys=self.var)
+ slice_val_grad, = gradients_impl.gradients(slice_val, self.var, grad_ys=dy)
+ slice_val_grad2, = gradients_impl.gradients(
+ slice_val_grad, dy, grad_ys=self.var)
self.sess.run(assign)
slice_val_grad_evaled, slice_val_grad2_evaled = (
self.sess.run([slice_val_grad, slice_val_grad2]))
@@ -627,15 +641,17 @@ class StridedSliceGradTest(test_util.TensorFlowTestCase):
def testGradient(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu) as sess:
- var = tf.Variable(tf.reshape(tf.range(1, 97, 1), shape=(6, 4, 4)))
- init = tf.global_variables_initializer()
+ var = variables.Variable(
+ array_ops.reshape(
+ math_ops.range(1, 97, 1), shape=(6, 4, 4)))
+ init = variables.global_variables_initializer()
sess.run(init)
grad = GradSliceChecker(self, sess, var,
np.array(range(1, 97, 1)).reshape((6, 4, 4)))
_ = grad[2:6:2, 1:3, 1:3]
_ = grad[3:0:-2, 1:3, 1:3]
- _ = grad[3:0:-2, tf.newaxis, 1:3, 2, tf.newaxis]
+ _ = grad[3:0:-2, array_ops.newaxis, 1:3, 2, array_ops.newaxis]
_ = grad[3:0:-2, 1:3, 2]
_ = grad[:, -1, :]
_ = grad[:, -2, :]
@@ -647,11 +663,10 @@ class StridedSliceGradTest(test_util.TensorFlowTestCase):
def testGradientZero(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu) as sess:
- var = tf.Variable(8)
- init = tf.global_variables_initializer()
+ var = variables.Variable(8)
+ init = variables.global_variables_initializer()
sess.run(init)
- grad = GradSliceChecker(self, sess, var,
- np.array(8))
+ grad = GradSliceChecker(self, sess, var, np.array(8))
_ = grad[tuple()]
@@ -660,39 +675,42 @@ class StridedSliceGradTypeTest(test_util.TensorFlowTestCase):
def testHostVsDevice(self):
with self.test_session(use_gpu=True) as sess:
- var2 = tf.Variable(
- tf.reshape(
- tf.cast(tf.range(1, 5, 1), tf.float32), shape=(4, 1, 1)))
- varshape = tf.Variable([6, 4, 4], dtype=tf.int32)
- sess.run(tf.global_variables_initializer())
- begin = tf.constant([0, 0, 0])
- end = tf.constant([4, 1, 1])
- strides = tf.constant([1, 1, 1])
+ var2 = variables.Variable(
+ array_ops.reshape(
+ math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32),
+ shape=(4, 1, 1)))
+ varshape = variables.Variable([6, 4, 4], dtype=dtypes.int32)
+ sess.run(variables.global_variables_initializer())
+ begin = constant_op.constant([0, 0, 0])
+ end = constant_op.constant([4, 1, 1])
+ strides = constant_op.constant([1, 1, 1])
foo = array_ops.strided_slice_grad(varshape, begin, end, strides, var2)
sess.run(foo)
def testInt64Shape(self):
with self.test_session(use_gpu=True) as sess:
- original_dy = tf.reshape(
- tf.cast(tf.range(1, 5, 1), tf.float32), shape=(4, 1, 1))
- original_shape = tf.constant([6, 4, 4], dtype=tf.int64)
- sess.run(tf.global_variables_initializer())
- begin = tf.constant([0, 0, 0], dtype=tf.int64)
- end = tf.constant([4, 1, 1], dtype=tf.int64)
- strides = tf.constant([1, 1, 1], dtype=tf.int64)
+ original_dy = array_ops.reshape(
+ math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32),
+ shape=(4, 1, 1))
+ original_shape = constant_op.constant([6, 4, 4], dtype=dtypes.int64)
+ sess.run(variables.global_variables_initializer())
+ begin = constant_op.constant([0, 0, 0], dtype=dtypes.int64)
+ end = constant_op.constant([4, 1, 1], dtype=dtypes.int64)
+ strides = constant_op.constant([1, 1, 1], dtype=dtypes.int64)
dx = array_ops.strided_slice_grad(original_shape, begin, end, strides,
original_dy)
sess.run(dx)
def testMixedIndexTypes(self):
with self.test_session(use_gpu=True) as sess:
- original_dy = tf.reshape(
- tf.cast(tf.range(1, 5, 1), tf.float32), shape=(4, 1, 1))
- original_shape = tf.constant([6, 4, 4], dtype=tf.int64)
- sess.run(tf.global_variables_initializer())
- begin = tf.constant([0, 0, 0], dtype=tf.int32)
- end = tf.constant([4, 1, 1], dtype=tf.int64)
- strides = tf.constant([1, 1, 1], dtype=tf.int64)
+ original_dy = array_ops.reshape(
+ math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32),
+ shape=(4, 1, 1))
+ original_shape = constant_op.constant([6, 4, 4], dtype=dtypes.int64)
+ sess.run(variables.global_variables_initializer())
+ begin = constant_op.constant([0, 0, 0], dtype=dtypes.int32)
+ end = constant_op.constant([4, 1, 1], dtype=dtypes.int64)
+ strides = constant_op.constant([1, 1, 1], dtype=dtypes.int64)
with self.assertRaisesRegexp(
TypeError, "Input 'begin' of 'StridedSliceGrad' Op has type int32"
" that does not match type int64 of argument 'shape'"):
@@ -710,11 +728,11 @@ class BenchmarkSlice(object):
return self.tensor[x]
-class StridedSliceBenchmark(tf.test.Benchmark):
+class StridedSliceBenchmark(test_lib.Benchmark):
"""Benchmark new strided slice operation on non-trivial case."""
def run_and_time(self, slice_op):
- tf.global_variables_initializer().run()
+ variables.global_variables_initializer().run()
for _ in range(10):
_ = slice_op.eval()
iters = 1000
@@ -728,28 +746,27 @@ class StridedSliceBenchmark(tf.test.Benchmark):
n = 256
shape = (n, n, n)
items = n**3
- var = tf.Variable(
- tf.reshape(
- tf.linspace(1., float(items), items), shape),
- dtype=tf.float32)
+ var = variables.Variable(
+ array_ops.reshape(math_ops.linspace(1., float(items), items), shape),
+ dtype=dtypes.float32)
return var
def benchmark_strided_slice_skip(self):
- with tf.Session():
+ with session.Session():
var = self.make_variable()
helper = BenchmarkSlice(var)
slice_op = helper[::2, ::1, ::2]
self.run_and_time(slice_op)
def benchmark_strided_slice_easy(self):
- with tf.Session():
+ with session.Session():
var = self.make_variable()
helper = BenchmarkSlice(var)
slice_op = helper[3::1, 3::1, 3::1]
self.run_and_time(slice_op)
def benchmark_slice_easy(self):
- with tf.Session():
+ with session.Session():
var = self.make_variable()
slice_op = var[3::1, 3::1, 3::1]
self.run_and_time(slice_op)
@@ -757,19 +774,21 @@ class StridedSliceBenchmark(tf.test.Benchmark):
class StridedSliceAssignChecker(object):
- def __init__(self, test, x, tensor_type=tf.float32):
+ def __init__(self, test, x, tensor_type=dtypes.float32):
self.tensor_type = tensor_type
self.test = test
- self.x = tf.cast(tf.constant(x, dtype=tf.float32), dtype=tensor_type)
+ self.x = math_ops.cast(
+ constant_op.constant(
+ x, dtype=dtypes.float32), dtype=tensor_type)
self.x_np = np.array(x)
def __setitem__(self, index, value):
for use_gpu in [False, True]:
with self.test.test_session(use_gpu=use_gpu) as sess:
- var = tf.Variable(self.x)
- sess.run(tf.initialize_variables([var]))
+ var = variables.Variable(self.x)
+ sess.run(variables.initialize_variables([var]))
val = sess.run(var[index].assign(
- tf.constant(
+ constant_op.constant(
value, dtype=self.tensor_type)))
valnp = np.copy(self.x_np)
valnp[index] = np.array(value)
@@ -780,10 +799,10 @@ class SliceAssignTest(test_util.TensorFlowTestCase):
def testInvalidSlice(self):
with self.test_session() as sess:
- foo = tf.constant([1, 2, 3])
+ foo = constant_op.constant([1, 2, 3])
with self.assertRaisesRegexp(ValueError, "Sliced assignment"
" is only supported for variables"):
- bar = foo[:2].assign(tf.constant([1, 2]))
+ bar = foo[:2].assign(constant_op.constant([1, 2]))
sess.run(bar)
def testSliceAssign(self):
@@ -816,7 +835,7 @@ class SliceAssignTest(test_util.TensorFlowTestCase):
errors.FailedPreconditionError,
"Attempting to use uninitialized value Variable"):
with self.test_session() as sess:
- v = tf.Variable([1, 2])
+ v = variables.Variable([1, 2])
sess.run(v[:].assign([1, 2]))
@@ -825,29 +844,27 @@ class ShapeSizeRankTest(test_util.TensorFlowTestCase):
def testDenseShape(self):
with self.test_session():
t_value = [[0, 42], [24, 0]]
- self.assertAllEqual((2, 2), tf.shape(t_value).eval())
- self.assertEqual(4, tf.size(t_value).eval())
- self.assertEqual(2, tf.rank(t_value).eval())
+ self.assertAllEqual((2, 2), array_ops.shape(t_value).eval())
+ self.assertEqual(4, array_ops.size(t_value).eval())
+ self.assertEqual(2, array_ops.rank(t_value).eval())
- t = tf.constant(t_value)
- self.assertAllEqual((2, 2), tf.shape(t).eval())
- self.assertEqual(4, tf.size(t).eval())
- self.assertEqual(2, tf.rank(t).eval())
+ t = constant_op.constant(t_value)
+ self.assertAllEqual((2, 2), array_ops.shape(t).eval())
+ self.assertEqual(4, array_ops.size(t).eval())
+ self.assertEqual(2, array_ops.rank(t).eval())
def testSparseShape(self):
with self.test_session():
- sp_value = tf.SparseTensorValue(
- indices=((0, 1), (1, 0)),
- values=(42, 24),
- dense_shape=(2, 2))
- self.assertAllEqual((2, 2), tf.shape(sp_value).eval())
- self.assertEqual(4, tf.size(sp_value).eval())
- self.assertEqual(2, tf.rank(sp_value).eval())
+ sp_value = sparse_tensor.SparseTensorValue(
+ indices=((0, 1), (1, 0)), values=(42, 24), dense_shape=(2, 2))
+ self.assertAllEqual((2, 2), array_ops.shape(sp_value).eval())
+ self.assertEqual(4, array_ops.size(sp_value).eval())
+ self.assertEqual(2, array_ops.rank(sp_value).eval())
- sp = tf.SparseTensor.from_value(sp_value)
- self.assertAllEqual((2, 2), tf.shape(sp).eval())
- self.assertEqual(4, tf.size(sp).eval())
- self.assertEqual(2, tf.rank(sp).eval())
+ sp = sparse_tensor.SparseTensor.from_value(sp_value)
+ self.assertAllEqual((2, 2), array_ops.shape(sp).eval())
+ self.assertEqual(4, array_ops.size(sp).eval())
+ self.assertEqual(2, array_ops.rank(sp).eval())
class SequenceMaskTest(test_util.TensorFlowTestCase):
@@ -855,40 +872,45 @@ class SequenceMaskTest(test_util.TensorFlowTestCase):
def testExceptions(self):
with self.test_session():
with self.assertRaisesRegexp(ValueError, "lengths must be 1D"):
- tf.sequence_mask([[10, 20]], [10, 20])
+ array_ops.sequence_mask([[10, 20]], [10, 20])
with self.assertRaisesRegexp(ValueError, "maxlen must be scalar"):
- tf.sequence_mask([10, 20], [10, 20])
+ array_ops.sequence_mask([10, 20], [10, 20])
def testNormal(self):
with self.test_session():
- res = tf.sequence_mask(tf.constant([1, 3, 2]), 5)
+ res = array_ops.sequence_mask(constant_op.constant([1, 3, 2]), 5)
self.assertAllEqual(res.get_shape(), [3, 5])
self.assertAllEqual(res.eval(), [[True, False, False, False, False],
[True, True, True, False, False],
[True, True, False, False, False]])
# test dtype and default maxlen:
- res = tf.sequence_mask(tf.constant([0, 1, 4]), dtype=tf.float32)
+ res = array_ops.sequence_mask(
+ constant_op.constant([0, 1, 4]), dtype=dtypes.float32)
self.assertAllEqual(res.get_shape().as_list(), [3, None])
self.assertAllEqual(res.eval(), [[0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.0]])
def testDtypes(self):
+
def check_dtypes(lengths_dtype, maxlen_dtype):
- res = tf.sequence_mask(tf.constant([1, 3, 2], dtype=lengths_dtype),
- tf.constant(5, dtype=maxlen_dtype))
+ res = array_ops.sequence_mask(
+ constant_op.constant(
+ [1, 3, 2], dtype=lengths_dtype),
+ constant_op.constant(
+ 5, dtype=maxlen_dtype))
self.assertAllEqual(res.get_shape(), [3, 5])
self.assertAllEqual(res.eval(), [[True, False, False, False, False],
[True, True, True, False, False],
[True, True, False, False, False]])
with self.test_session():
- check_dtypes(tf.int32, tf.int32)
- check_dtypes(tf.int32, tf.int64)
- check_dtypes(tf.int64, tf.int32)
- check_dtypes(tf.int64, tf.int64)
+ check_dtypes(dtypes.int32, dtypes.int32)
+ check_dtypes(dtypes.int32, dtypes.int64)
+ check_dtypes(dtypes.int64, dtypes.int32)
+ check_dtypes(dtypes.int64, dtypes.int64)
if __name__ == "__main__":
- tf.test.main()
+ test_lib.main()
diff --git a/tensorflow/python/kernel_tests/as_string_op_test.py b/tensorflow/python/kernel_tests/as_string_op_test.py
index 1f2b0ea1d5..9d54add264 100644
--- a/tensorflow/python/kernel_tests/as_string_op_test.py
+++ b/tensorflow/python/kernel_tests/as_string_op_test.py
@@ -12,69 +12,70 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for as_string_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import string_ops
+from tensorflow.python.platform import test
-class AsStringOpTest(tf.test.TestCase):
+class AsStringOpTest(test.TestCase):
def testFloat(self):
- float_inputs_ = [0, 1, -1, 0.5, 0.25, 0.125, float("INF"), float("NAN"),
- float("-INF")]
+ float_inputs_ = [
+ 0, 1, -1, 0.5, 0.25, 0.125, float("INF"), float("NAN"), float("-INF")
+ ]
with self.test_session():
- for dtype in (tf.float32, tf.float64):
- input_ = tf.placeholder(dtype)
+ for dtype in (dtypes.float32, dtypes.float64):
+ input_ = array_ops.placeholder(dtype)
- output = tf.as_string(input_, shortest=True)
+ output = string_ops.as_string(input_, shortest=True)
result = output.eval(feed_dict={input_: float_inputs_})
s = lambda strs: [x.decode("ascii") for x in strs]
self.assertAllEqual(s(result), ["%g" % x for x in float_inputs_])
- output = tf.as_string(input_, scientific=True)
+ output = string_ops.as_string(input_, scientific=True)
result = output.eval(feed_dict={input_: float_inputs_})
self.assertAllEqual(s(result), ["%e" % x for x in float_inputs_])
- output = tf.as_string(input_)
+ output = string_ops.as_string(input_)
result = output.eval(feed_dict={input_: float_inputs_})
self.assertAllEqual(s(result), ["%f" % x for x in float_inputs_])
- output = tf.as_string(input_, width=3)
+ output = string_ops.as_string(input_, width=3)
result = output.eval(feed_dict={input_: float_inputs_})
self.assertAllEqual(s(result), ["%3f" % x for x in float_inputs_])
- output = tf.as_string(input_, width=3, fill="0")
+ output = string_ops.as_string(input_, width=3, fill="0")
result = output.eval(feed_dict={input_: float_inputs_})
self.assertAllEqual(s(result), ["%03f" % x for x in float_inputs_])
- output = tf.as_string(input_, width=3, fill="0", shortest=True)
+ output = string_ops.as_string(input_, width=3, fill="0", shortest=True)
result = output.eval(feed_dict={input_: float_inputs_})
self.assertAllEqual(s(result), ["%03g" % x for x in float_inputs_])
- output = tf.as_string(input_, precision=10, width=3)
+ output = string_ops.as_string(input_, precision=10, width=3)
result = output.eval(feed_dict={input_: float_inputs_})
self.assertAllEqual(s(result), ["%03.10f" % x for x in float_inputs_])
- output = tf.as_string(input_,
- precision=10,
- width=3,
- fill="0",
- shortest=True)
+ output = string_ops.as_string(
+ input_, precision=10, width=3, fill="0", shortest=True)
result = output.eval(feed_dict={input_: float_inputs_})
self.assertAllEqual(s(result), ["%03.10g" % x for x in float_inputs_])
with self.assertRaisesOpError("Cannot select both"):
- output = tf.as_string(input_, scientific=True, shortest=True)
+ output = string_ops.as_string(input_, scientific=True, shortest=True)
output.eval(feed_dict={input_: float_inputs_})
with self.assertRaisesOpError("Fill string must be one or fewer"):
- output = tf.as_string(input_, fill="ab")
+ output = string_ops.as_string(input_, fill="ab")
output.eval(feed_dict={input_: float_inputs_})
def testInt(self):
@@ -84,31 +85,31 @@ class AsStringOpTest(tf.test.TestCase):
s = lambda strs: [x.decode("ascii") for x in strs]
with self.test_session():
- for dtype in (tf.int32, tf.int64, tf.int8):
- input_ = tf.placeholder(dtype)
+ for dtype in (dtypes.int32, dtypes.int64, dtypes.int8):
+ input_ = array_ops.placeholder(dtype)
- output = tf.as_string(input_)
+ output = string_ops.as_string(input_)
result = output.eval(feed_dict={input_: int_inputs_})
self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_])
- output = tf.as_string(input_, width=3)
+ output = string_ops.as_string(input_, width=3)
result = output.eval(feed_dict={input_: int_inputs_})
self.assertAllEqual(s(result), ["%3d" % x for x in int_inputs_])
- output = tf.as_string(input_, width=3, fill="0")
+ output = string_ops.as_string(input_, width=3, fill="0")
result = output.eval(feed_dict={input_: int_inputs_})
self.assertAllEqual(s(result), ["%03d" % x for x in int_inputs_])
with self.assertRaisesOpError("scientific and shortest"):
- output = tf.as_string(input_, scientific=True)
+ output = string_ops.as_string(input_, scientific=True)
output.eval(feed_dict={input_: int_inputs_})
with self.assertRaisesOpError("scientific and shortest"):
- output = tf.as_string(input_, shortest=True)
+ output = string_ops.as_string(input_, shortest=True)
output.eval(feed_dict={input_: int_inputs_})
with self.assertRaisesOpError("precision not supported"):
- output = tf.as_string(input_, precision=0)
+ output = string_ops.as_string(input_, precision=0)
output.eval(feed_dict={input_: int_inputs_})
def testLargeInt(self):
@@ -117,15 +118,15 @@ class AsStringOpTest(tf.test.TestCase):
s = lambda strs: [x.decode("ascii") for x in strs]
with self.test_session():
- input_ = tf.placeholder(tf.int32)
+ input_ = array_ops.placeholder(dtypes.int32)
int_inputs_ = [np.iinfo(np.int32).min, np.iinfo(np.int32).max]
- output = tf.as_string(input_)
+ output = string_ops.as_string(input_)
result = output.eval(feed_dict={input_: int_inputs_})
self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_])
- input_ = tf.placeholder(tf.int64)
+ input_ = array_ops.placeholder(dtypes.int64)
int_inputs_ = [np.iinfo(np.int64).min, np.iinfo(np.int64).max]
- output = tf.as_string(input_)
+ output = string_ops.as_string(input_)
result = output.eval(feed_dict={input_: int_inputs_})
self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_])
@@ -134,75 +135,74 @@ class AsStringOpTest(tf.test.TestCase):
s = lambda strs: [x.decode("ascii") for x in strs]
with self.test_session():
- for dtype in (tf.bool,):
- input_ = tf.placeholder(dtype)
+ for dtype in (dtypes.bool,):
+ input_ = array_ops.placeholder(dtype)
- output = tf.as_string(input_)
+ output = string_ops.as_string(input_)
result = output.eval(feed_dict={input_: bool_inputs_})
self.assertAllEqual(s(result), ["false", "true"])
def testComplex(self):
- float_inputs_ = [0, 1, -1, 0.5, 0.25, 0.125, complex("INF"), complex("NAN"),
- complex("-INF")]
+ float_inputs_ = [
+ 0, 1, -1, 0.5, 0.25, 0.125, complex("INF"), complex("NAN"),
+ complex("-INF")
+ ]
complex_inputs_ = [(x + (x + 1) * 1j) for x in float_inputs_]
with self.test_session():
- for dtype in (tf.complex64,):
- input_ = tf.placeholder(dtype)
+ for dtype in (dtypes.complex64,):
+ input_ = array_ops.placeholder(dtype)
def clean_nans(s_l):
return [s.decode("ascii").replace("-nan", "nan") for s in s_l]
- output = tf.as_string(input_, shortest=True)
+ output = string_ops.as_string(input_, shortest=True)
result = output.eval(feed_dict={input_: complex_inputs_})
- self.assertAllEqual(clean_nans(result),
- ["(%g,%g)" % (x.real, x.imag)
- for x in complex_inputs_])
+ self.assertAllEqual(
+ clean_nans(result),
+ ["(%g,%g)" % (x.real, x.imag) for x in complex_inputs_])
- output = tf.as_string(input_, scientific=True)
+ output = string_ops.as_string(input_, scientific=True)
result = output.eval(feed_dict={input_: complex_inputs_})
- self.assertAllEqual(clean_nans(result),
- ["(%e,%e)" % (x.real, x.imag)
- for x in complex_inputs_])
+ self.assertAllEqual(
+ clean_nans(result),
+ ["(%e,%e)" % (x.real, x.imag) for x in complex_inputs_])
- output = tf.as_string(input_)
+ output = string_ops.as_string(input_)
result = output.eval(feed_dict={input_: complex_inputs_})
- self.assertAllEqual(clean_nans(result),
- ["(%f,%f)" % (x.real, x.imag)
- for x in complex_inputs_])
+ self.assertAllEqual(
+ clean_nans(result),
+ ["(%f,%f)" % (x.real, x.imag) for x in complex_inputs_])
- output = tf.as_string(input_, width=3)
+ output = string_ops.as_string(input_, width=3)
result = output.eval(feed_dict={input_: complex_inputs_})
- self.assertAllEqual(clean_nans(result),
- ["(%03f,%03f)" % (x.real, x.imag)
- for x in complex_inputs_])
+ self.assertAllEqual(
+ clean_nans(result),
+ ["(%03f,%03f)" % (x.real, x.imag) for x in complex_inputs_])
- output = tf.as_string(input_, width=3, fill="0", shortest=True)
+ output = string_ops.as_string(input_, width=3, fill="0", shortest=True)
result = output.eval(feed_dict={input_: complex_inputs_})
- self.assertAllEqual(clean_nans(result),
- ["(%03g,%03g)" % (x.real, x.imag)
- for x in complex_inputs_])
+ self.assertAllEqual(
+ clean_nans(result),
+ ["(%03g,%03g)" % (x.real, x.imag) for x in complex_inputs_])
- output = tf.as_string(input_, precision=10, width=3)
+ output = string_ops.as_string(input_, precision=10, width=3)
result = output.eval(feed_dict={input_: complex_inputs_})
- self.assertAllEqual(clean_nans(result),
- ["(%03.10f,%03.10f)" % (x.real, x.imag)
- for x in complex_inputs_])
-
- output = tf.as_string(input_,
- precision=10,
- width=3,
- fill="0",
- shortest=True)
+ self.assertAllEqual(
+ clean_nans(result),
+ ["(%03.10f,%03.10f)" % (x.real, x.imag) for x in complex_inputs_])
+
+ output = string_ops.as_string(
+ input_, precision=10, width=3, fill="0", shortest=True)
result = output.eval(feed_dict={input_: complex_inputs_})
- self.assertAllEqual(clean_nans(result),
- ["(%03.10g,%03.10g)" % (x.real, x.imag)
- for x in complex_inputs_])
+ self.assertAllEqual(
+ clean_nans(result),
+ ["(%03.10g,%03.10g)" % (x.real, x.imag) for x in complex_inputs_])
with self.assertRaisesOpError("Cannot select both"):
- output = tf.as_string(input_, scientific=True, shortest=True)
+ output = string_ops.as_string(input_, scientific=True, shortest=True)
output.eval(feed_dict={input_: complex_inputs_})
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/atrous_conv2d_test.py b/tensorflow/python/kernel_tests/atrous_conv2d_test.py
index 5e02c6df8e..ab1d698f6e 100644
--- a/tensorflow/python/kernel_tests/atrous_conv2d_test.py
+++ b/tensorflow/python/kernel_tests/atrous_conv2d_test.py
@@ -12,14 +12,22 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for convolution related functionality in tensorflow.ops.nn."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import nn_impl
+from tensorflow.python.ops import nn_ops
+import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
def _upsample_filters(filters, rate):
@@ -42,13 +50,13 @@ def _upsample_filters(filters, rate):
filters_up = np.transpose(filters, [2, 3, 0, 1])
ker = np.zeros([rate, rate], dtype=np.float32)
ker[0, 0] = 1
- filters_up = np.kron(filters_up, ker)[:, :, :-(rate-1), :-(rate-1)]
+ filters_up = np.kron(filters_up, ker)[:, :, :-(rate - 1), :-(rate - 1)]
# [in_depth, out_depth, h_up, w_up] -> [h_up, w_up, in_depth, out_depth]
filters_up = np.transpose(filters_up, [2, 3, 0, 1])
return filters_up
-class AtrousConv2DTest(tf.test.TestCase):
+class AtrousConv2DTest(test.TestCase):
def testAtrousConv2DForward(self):
with self.test_session(use_gpu=True):
@@ -68,9 +76,9 @@ class AtrousConv2DTest(tf.test.TestCase):
f_up = _upsample_filters(f, rate)
for padding in ["SAME", "VALID"]:
- y1 = tf.nn.atrous_conv2d(x, f, rate, padding=padding)
- y2 = tf.nn.conv2d(x, f_up, strides=[1, 1, 1, 1],
- padding=padding)
+ y1 = nn_ops.atrous_conv2d(x, f, rate, padding=padding)
+ y2 = nn_ops.conv2d(
+ x, f_up, strides=[1, 1, 1, 1], padding=padding)
self.assertAllClose(y1.eval(), y2.eval(), rtol=1e-3, atol=1e-3)
def testAtrousSequence(self):
@@ -111,18 +119,18 @@ class AtrousConv2DTest(tf.test.TestCase):
for rate in range(2, 4):
# y1: three atrous_conv2d in a row.
- y1 = tf.nn.atrous_conv2d(x, f, rate, padding=padding)
- y1 = tf.nn.atrous_conv2d(y1, f, rate, padding=padding)
- y1 = tf.nn.atrous_conv2d(y1, f, rate, padding=padding)
+ y1 = nn_ops.atrous_conv2d(x, f, rate, padding=padding)
+ y1 = nn_ops.atrous_conv2d(y1, f, rate, padding=padding)
+ y1 = nn_ops.atrous_conv2d(y1, f, rate, padding=padding)
# y2: space_to_batch, three conv2d in a row, batch_to_space
pad_bottom = 0 if height % rate == 0 else rate - height % rate
pad_right = 0 if width % rate == 0 else rate - width % rate
pad = [[0, pad_bottom], [0, pad_right]]
- y2 = tf.space_to_batch(x, paddings=pad, block_size=rate)
- y2 = tf.nn.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
- y2 = tf.nn.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
- y2 = tf.nn.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
- y2 = tf.batch_to_space(y2, crops=pad, block_size=rate)
+ y2 = array_ops.space_to_batch(x, paddings=pad, block_size=rate)
+ y2 = nn_ops.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
+ y2 = nn_ops.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
+ y2 = nn_ops.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
+ y2 = array_ops.batch_to_space(y2, crops=pad, block_size=rate)
self.assertAllClose(y1.eval(), y2.eval(), rtol=1e-2, atol=1e-2)
def testGradient(self):
@@ -137,19 +145,20 @@ class AtrousConv2DTest(tf.test.TestCase):
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float32)
f_val = np.random.random_sample(f_shape).astype(np.float32)
- x = tf.constant(x_val, name="x", dtype=tf.float32)
- f = tf.constant(f_val, name="f", dtype=tf.float32)
+ x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
+ f = constant_op.constant(f_val, name="f", dtype=dtypes.float32)
for rate in range(1, 4):
- output = tf.nn.atrous_conv2d(x, f, rate=rate, padding="SAME")
- err = tf.test.compute_gradient_error(
- [x, f], [x_shape, f_shape], output, y_shape)
+ output = nn_ops.atrous_conv2d(x, f, rate=rate, padding="SAME")
+ err = gradient_checker.compute_gradient_error([x, f],
+ [x_shape, f_shape],
+ output, y_shape)
print("atrous_conv2d gradient err = %g " % err)
err_tolerance = 1e-3
self.assertLess(err, err_tolerance)
-class AtrousConv2DTransposeTest(tf.test.TestCase):
+class AtrousConv2DTransposeTest(test.TestCase):
def testAtrousConv2DTransposeForward(self):
with self.test_session(use_gpu=True):
@@ -167,26 +176,27 @@ class AtrousConv2DTransposeTest(tf.test.TestCase):
for rate in range(1, 4):
f_up = _upsample_filters(f, rate)
- kernel_height_up = (kernel_height +
- (kernel_height - 1) * (rate - 1))
+ kernel_height_up = (kernel_height + (kernel_height - 1) *
+ (rate - 1))
kernel_width_up = kernel_width + (kernel_width - 1) * (rate - 1)
for padding in ["SAME", "VALID"]:
if padding == "SAME":
y_shape = [2, height, width, 2]
else:
- y_shape = [2,
- height + kernel_height_up - 1,
- width + kernel_width_up - 1,
- 2]
-
- y1 = tf.nn.atrous_conv2d_transpose(x, f, y_shape, rate, padding)
- y2 = tf.nn.conv2d_transpose(
+ y_shape = [
+ 2, height + kernel_height_up - 1,
+ width + kernel_width_up - 1, 2
+ ]
+
+ y1 = nn_ops.atrous_conv2d_transpose(x, f, y_shape, rate,
+ padding)
+ y2 = nn_ops.conv2d_transpose(
x, f_up, y_shape, strides=[1, 1, 1, 1], padding=padding)
self.assertAllClose(y1.eval(), y2.eval(), rtol=1e-3, atol=1e-3)
-class AtrousDepthwiseConv2DTest(tf.test.TestCase):
+class AtrousDepthwiseConv2DTest(test.TestCase):
def testAtrousDepthwiseConv2DForward(self):
strides = [1, 1, 1, 1]
@@ -207,11 +217,11 @@ class AtrousDepthwiseConv2DTest(tf.test.TestCase):
f_up = _upsample_filters(f, rate)
for padding in ["SAME", "VALID"]:
- y1 = tf.nn.depthwise_conv2d(x, f, strides, padding,
- rate=[rate, rate])
- y2 = tf.nn.depthwise_conv2d(x, f_up, strides, padding)
+ y1 = nn_impl.depthwise_conv2d(
+ x, f, strides, padding, rate=[rate, rate])
+ y2 = nn_impl.depthwise_conv2d(x, f_up, strides, padding)
self.assertAllClose(y1.eval(), y2.eval(), rtol=1e-3, atol=1e-3)
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/atrous_convolution_test.py b/tensorflow/python/kernel_tests/atrous_convolution_test.py
index b52b648f79..ca73cf88f8 100644
--- a/tensorflow/python/kernel_tests/atrous_convolution_test.py
+++ b/tensorflow/python/kernel_tests/atrous_convolution_test.py
@@ -13,12 +13,19 @@
# limitations under the License.
# ==============================================================================
"""Tests for atrous convolution functionality in tensorflow.ops.nn."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import nn_ops
+import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
def upsample_filters(filters, rate):
@@ -40,13 +47,13 @@ def upsample_filters(filters, rate):
num_spatial_dims = len(rate)
spatial_shape = np.array(filters.shape[:num_spatial_dims])
output_spatial_shape = (spatial_shape - 1) * rate + 1
- output = np.zeros(tuple(output_spatial_shape) + tuple(filters.shape[-2:]),
- filters.dtype)
+ output = np.zeros(
+ tuple(output_spatial_shape) + tuple(filters.shape[-2:]), filters.dtype)
output[tuple(np.s_[::rate[i]] for i in range(num_spatial_dims))] = filters
return output
-class AtrousConvolutionTest(tf.test.TestCase):
+class AtrousConvolutionTest(test.TestCase):
def _test_atrous_convolution(self, input_shape, filter_shape, dilation_rate,
**kwargs):
@@ -54,9 +61,9 @@ class AtrousConvolutionTest(tf.test.TestCase):
np.prod(filter_shape), dtype=np.float32).reshape(filter_shape)
filters_upsampled = upsample_filters(filters, dilation_rate)
x = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape)
- y1 = tf.nn.convolution(
+ y1 = nn_ops.convolution(
input=x, filter=filters, dilation_rate=dilation_rate, **kwargs)
- y2 = tf.nn.convolution(input=x, filter=filters_upsampled, **kwargs)
+ y2 = nn_ops.convolution(input=x, filter=filters_upsampled, **kwargs)
self.assertAllClose(y1.eval(), y2.eval(), rtol=1e-2, atol=1e-2)
def testAtrousConvolution2D(self):
@@ -78,12 +85,12 @@ class AtrousConvolutionTest(tf.test.TestCase):
for kernel_depth, kernel_height, kernel_width in [[3, 3, 3],
[3, 2, 2],
[2, 1, 3]]:
- for dilation_rate in [[1, 1, 1], [3, 3, 3], [3, 2, 3],
- [3, 1, 2]]:
+ for dilation_rate in [[1, 1, 1], [3, 3, 3], [3, 2, 3], [3, 1, 2]]:
self._test_atrous_convolution(
input_shape=[2, depth, height, width, 2],
- filter_shape=[kernel_depth, kernel_height, kernel_width,
- 2, 2],
+ filter_shape=[
+ kernel_depth, kernel_height, kernel_width, 2, 2
+ ],
padding=padding,
dilation_rate=dilation_rate)
@@ -100,7 +107,7 @@ class AtrousConvolutionTest(tf.test.TestCase):
dilation_rate=[rate])
def testAtrousConvolutionNC(self):
- if tf.test.is_gpu_available(cuda_only=True):
+ if test.is_gpu_available(cuda_only=True):
# "NCW" and "NCHW" formats are currently supported only on CUDA.
with self.test_session(use_gpu=True):
for padding in ["SAME", "VALID"]:
@@ -136,26 +143,28 @@ class AtrousConvolutionTest(tf.test.TestCase):
f2 = 1e-2 * np.random.random_sample(f_shape).astype(np.float32)
def combined_op(converted_input, num_spatial_dims, padding_arg): # pylint: disable=unused-argument
- result = tf.nn.convolution(
- input=converted_input, filter=f1, padding=padding) # pylint: disable=cell-var-from-loop
- result = tf.nn.convolution(
- input=result, filter=f2, padding=padding) # pylint: disable=cell-var-from-loop
+ result = nn_ops.convolution(
+ input=converted_input, filter=f1,
+ padding=padding) # pylint: disable=cell-var-from-loop
+ result = nn_ops.convolution(
+ input=result, filter=f2,
+ padding=padding) # pylint: disable=cell-var-from-loop
return result
for rate_height in range(2, 4):
for rate_width in range(2, 4):
dilation_rate = [rate_height, rate_width]
- y1 = tf.nn.convolution(
+ y1 = nn_ops.convolution(
input=x,
filter=f1,
padding=padding,
dilation_rate=dilation_rate)
- y1 = tf.nn.convolution(
+ y1 = nn_ops.convolution(
input=y1,
filter=f2,
padding=padding,
dilation_rate=dilation_rate)
- y2 = tf.nn.with_space_to_batch(
+ y2 = nn_ops.with_space_to_batch(
input=x,
dilation_rate=dilation_rate,
op=combined_op,
@@ -166,13 +175,13 @@ class AtrousConvolutionTest(tf.test.TestCase):
def _test_gradient(self, x_shape, f_shape, dilation_rate, padding):
x_val = np.random.random_sample(x_shape).astype(np.float32)
f_val = np.random.random_sample(f_shape).astype(np.float32)
- x = tf.constant(x_val, name="x", dtype=tf.float32)
- f = tf.constant(f_val, name="f", dtype=tf.float32)
- output = tf.nn.convolution(
+ x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
+ f = constant_op.constant(f_val, name="f", dtype=dtypes.float32)
+ output = nn_ops.convolution(
input=x, filter=f, dilation_rate=dilation_rate, padding=padding)
y_shape = output.get_shape().as_list()
- err = tf.test.compute_gradient_error([x, f], [x_shape, f_shape], output,
- y_shape)
+ err = gradient_checker.compute_gradient_error([x, f], [x_shape, f_shape],
+ output, y_shape)
err_tolerance = 1e-3
self.assertLess(err, err_tolerance)
@@ -189,4 +198,4 @@ class AtrousConvolutionTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/attention_ops_test.py b/tensorflow/python/kernel_tests/attention_ops_test.py
index cbb90f3e8f..f9c1727309 100644
--- a/tensorflow/python/kernel_tests/attention_ops_test.py
+++ b/tensorflow/python/kernel_tests/attention_ops_test.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for image.extract_glimpse()."""
from __future__ import absolute_import
@@ -20,14 +19,17 @@ from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import image_ops
+from tensorflow.python.platform import test
-class ExtractGlimpseTest(tf.test.TestCase):
+class ExtractGlimpseTest(test.TestCase):
- def _VerifyValues(
- self, tensor_in_sizes, glimpse_sizes, offsets, expected_rows,
- expected_cols):
+ def _VerifyValues(self, tensor_in_sizes, glimpse_sizes, offsets,
+ expected_rows, expected_cols):
"""Verifies the output values of the glimpse extraction kernel.
Args:
@@ -49,14 +51,13 @@ class ExtractGlimpseTest(tf.test.TestCase):
# [ 3 3 3 ... ]
# [ ...
# ]
- t_rows = tf.tile(
- [[1.0 * r] for r in range(1, rows + 1)], [1, cols],
- name='tile_rows')
+ t_rows = array_ops.tile(
+ [[1.0 * r] for r in range(1, rows + 1)], [1, cols], name='tile_rows')
# Shuffle to switch to a convention of (batch_size, height, width, depth).
- t_rows_4d = tf.transpose(
- tf.expand_dims(
- tf.expand_dims(t_rows, 0), 3), [0, 2, 1, 3])
+ t_rows_4d = array_ops.transpose(
+ array_ops.expand_dims(array_ops.expand_dims(t_rows, 0), 3),
+ [0, 2, 1, 3])
# Column Tensor with entries by column.
# [[ 1 2 3 4 ... ]
@@ -64,24 +65,23 @@ class ExtractGlimpseTest(tf.test.TestCase):
# [ 1 2 3 4 ... ]
# [ ... ]
# ]
- t_cols = tf.tile(
- [[1.0 * r for r in range(1, cols + 1)]],
- [rows, 1], name='tile_cols')
+ t_cols = array_ops.tile(
+ [[1.0 * r for r in range(1, cols + 1)]], [rows, 1], name='tile_cols')
# Shuffle to switch to a convention of (batch_size, height, width, depth).
- t_cols_4d = tf.transpose(
- tf.expand_dims(
- tf.expand_dims(t_cols, 0), 3), [0, 2, 1, 3])
+ t_cols_4d = array_ops.transpose(
+ array_ops.expand_dims(array_ops.expand_dims(t_cols, 0), 3),
+ [0, 2, 1, 3])
# extract_glimpses from Row and Column Tensor, respectively.
# Switch order for glimpse_sizes and offsets to switch from (row, col)
# convention to tensorflows (height, width) convention.
- t1 = tf.constant([glimpse_sizes[1], glimpse_sizes[0]], shape=[2])
- t2 = tf.constant([offsets[1], offsets[0]], shape=[1, 2])
- glimpse_rows = (tf.transpose(
- tf.image.extract_glimpse(t_rows_4d, t1, t2), [0, 2, 1, 3]))
- glimpse_cols = (tf.transpose(
- tf.image.extract_glimpse(t_cols_4d, t1, t2), [0, 2, 1, 3]))
+ t1 = constant_op.constant([glimpse_sizes[1], glimpse_sizes[0]], shape=[2])
+ t2 = constant_op.constant([offsets[1], offsets[0]], shape=[1, 2])
+ glimpse_rows = (array_ops.transpose(
+ image_ops.extract_glimpse(t_rows_4d, t1, t2), [0, 2, 1, 3]))
+ glimpse_cols = (array_ops.transpose(
+ image_ops.extract_glimpse(t_cols_4d, t1, t2), [0, 2, 1, 3]))
# Evaluate the TensorFlow Graph.
with self.test_session() as sess:
@@ -108,83 +108,94 @@ class ExtractGlimpseTest(tf.test.TestCase):
self.assertEqual(value_cols[0][i][j][0], expected_cols[j])
def testCenterGlimpse(self):
- self._VerifyValues(tensor_in_sizes=[41, 61],
- glimpse_sizes=[3, 5],
- offsets=[0.0, 0.0],
- expected_rows=[20, 21, 22],
- expected_cols=[29, 30, 31, 32, 33])
+ self._VerifyValues(
+ tensor_in_sizes=[41, 61],
+ glimpse_sizes=[3, 5],
+ offsets=[0.0, 0.0],
+ expected_rows=[20, 21, 22],
+ expected_cols=[29, 30, 31, 32, 33])
def testEmptyTensor(self):
empty_image = np.zeros((0, 4, 3, 0))
offsets = np.zeros((0, 2))
with self.test_session():
- result = tf.image.extract_glimpse(empty_image, [1, 1], offsets)
- self.assertAllEqual(np.zeros((0, 1, 1, 0), dtype=np.float32),
- result.eval())
+ result = image_ops.extract_glimpse(empty_image, [1, 1], offsets)
+ self.assertAllEqual(
+ np.zeros(
+ (0, 1, 1, 0), dtype=np.float32), result.eval())
def testLargeCenterGlimpse(self):
- self._VerifyValues(tensor_in_sizes=[41, 61],
- glimpse_sizes=[41, 61],
- offsets=[0.0, 0.0],
- expected_rows=list(range(1, 42)),
- expected_cols=list(range(1, 62)))
+ self._VerifyValues(
+ tensor_in_sizes=[41, 61],
+ glimpse_sizes=[41, 61],
+ offsets=[0.0, 0.0],
+ expected_rows=list(range(1, 42)),
+ expected_cols=list(range(1, 62)))
def testTooLargeCenterGlimpse(self):
- self._VerifyValues(tensor_in_sizes=[41, 61],
- glimpse_sizes=[43, 63],
- offsets=[0.0, 0.0],
- expected_rows=[None] + list(range(1, 42)) + [None],
- expected_cols=[None] + list(range(1, 62)) + [None])
+ self._VerifyValues(
+ tensor_in_sizes=[41, 61],
+ glimpse_sizes=[43, 63],
+ offsets=[0.0, 0.0],
+ expected_rows=[None] + list(range(1, 42)) + [None],
+ expected_cols=[None] + list(range(1, 62)) + [None])
def testGlimpseFullOverlap(self):
- self._VerifyValues(tensor_in_sizes=[41, 61],
- glimpse_sizes=[3, 5],
- offsets=[0.1, 0.3],
- expected_rows=[22, 23, 24],
- expected_cols=[38, 39, 40, 41, 42])
+ self._VerifyValues(
+ tensor_in_sizes=[41, 61],
+ glimpse_sizes=[3, 5],
+ offsets=[0.1, 0.3],
+ expected_rows=[22, 23, 24],
+ expected_cols=[38, 39, 40, 41, 42])
def testGlimpseFullOverlap2(self):
- self._VerifyValues(tensor_in_sizes=[41, 61],
- glimpse_sizes=[11, 3],
- offsets=[-0.7, -0.7],
- expected_rows=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
- expected_cols=[8, 9, 10])
+ self._VerifyValues(
+ tensor_in_sizes=[41, 61],
+ glimpse_sizes=[11, 3],
+ offsets=[-0.7, -0.7],
+ expected_rows=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
+ expected_cols=[8, 9, 10])
def testGlimpseBeforeLeftMargin(self):
- self._VerifyValues(tensor_in_sizes=[41, 61],
- glimpse_sizes=[11, 5],
- offsets=[-0.7, -0.9],
- expected_rows=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
- expected_cols=[1, 2, 3, 4, 5])
+ self._VerifyValues(
+ tensor_in_sizes=[41, 61],
+ glimpse_sizes=[11, 5],
+ offsets=[-0.7, -0.9],
+ expected_rows=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
+ expected_cols=[1, 2, 3, 4, 5])
def testGlimpseLowerRightCorner(self):
- self._VerifyValues(tensor_in_sizes=[41, 61],
- glimpse_sizes=[7, 5],
- offsets=[1.0, 1.0],
- expected_rows=[38, 39, 40, 41, None, None, None],
- expected_cols=[59, 60, 61, None, None])
+ self._VerifyValues(
+ tensor_in_sizes=[41, 61],
+ glimpse_sizes=[7, 5],
+ offsets=[1.0, 1.0],
+ expected_rows=[38, 39, 40, 41, None, None, None],
+ expected_cols=[59, 60, 61, None, None])
def testGlimpseNoOverlap(self):
- self._VerifyValues(tensor_in_sizes=[20, 30],
- glimpse_sizes=[3, 3],
- offsets=[-2.0, 2.0],
- expected_rows=[None, None, None],
- expected_cols=[None, None, None])
+ self._VerifyValues(
+ tensor_in_sizes=[20, 30],
+ glimpse_sizes=[3, 3],
+ offsets=[-2.0, 2.0],
+ expected_rows=[None, None, None],
+ expected_cols=[None, None, None])
def testGlimpseOnLeftMargin(self):
- self._VerifyValues(tensor_in_sizes=[41, 61],
- glimpse_sizes=[11, 7],
- offsets=[-0.7, -1.0],
- expected_rows=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
- expected_cols=[None, None, None, 1, 2, 3, 4])
+ self._VerifyValues(
+ tensor_in_sizes=[41, 61],
+ glimpse_sizes=[11, 7],
+ offsets=[-0.7, -1.0],
+ expected_rows=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
+ expected_cols=[None, None, None, 1, 2, 3, 4])
def testGlimpseUpperMargin(self):
- self._VerifyValues(tensor_in_sizes=[41, 61],
- glimpse_sizes=[7, 5],
- offsets=[-1, 0.9],
- expected_rows=[None, None, None, 1, 2, 3, 4],
- expected_cols=[56, 57, 58, 59, 60])
+ self._VerifyValues(
+ tensor_in_sizes=[41, 61],
+ glimpse_sizes=[7, 5],
+ offsets=[-1, 0.9],
+ expected_rows=[None, None, None, 1, 2, 3, 4],
+ expected_cols=[56, 57, 58, 59, 60])
if __name__ == '__main__':
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/barrier_ops_test.py b/tensorflow/python/kernel_tests/barrier_ops_test.py
index 03275b86e5..e90543a44b 100644
--- a/tensorflow/python/kernel_tests/barrier_ops_test.py
+++ b/tensorflow/python/kernel_tests/barrier_ops_test.py
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for barrier ops."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
@@ -21,21 +21,24 @@ from __future__ import print_function
import time
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import errors_impl
+from tensorflow.python.framework import ops
from tensorflow.python.ops import data_flow_ops
+from tensorflow.python.platform import test
-class BarrierTest(tf.test.TestCase):
+class BarrierTest(test.TestCase):
def testConstructorWithShapes(self):
- with tf.Graph().as_default():
+ with ops.Graph().as_default():
b = data_flow_ops.Barrier(
- (tf.float32, tf.float32),
+ (dtypes.float32, dtypes.float32),
shapes=((1, 2, 3), (8,)),
shared_name="B",
name="B")
- self.assertTrue(isinstance(b.barrier_ref, tf.Tensor))
+ self.assertTrue(isinstance(b.barrier_ref, ops.Tensor))
self.assertProtoEquals("""
name:'B' op:'Barrier'
attr {
@@ -66,9 +69,7 @@ class BarrierTest(tf.test.TestCase):
def testInsertMany(self):
with self.test_session():
b = data_flow_ops.Barrier(
- (tf.float32, tf.float32),
- shapes=((), ()),
- name="B")
+ (dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
size_t = b.ready_size()
self.assertEqual([], size_t.get_shape())
keys = [b"a", b"b", b"c"]
@@ -86,15 +87,12 @@ class BarrierTest(tf.test.TestCase):
error_message = ("Empty tensors are not supported, but received shape "
r"\'\(0,\)\' at index 1")
with self.assertRaisesRegexp(ValueError, error_message):
- data_flow_ops.Barrier((tf.float32, tf.float32),
- shapes=((1,), (0,)),
- name="B")
+ data_flow_ops.Barrier(
+ (dtypes.float32, dtypes.float32), shapes=((1,), (0,)), name="B")
def testInsertManyEmptyTensorUnknown(self):
with self.test_session():
- b = data_flow_ops.Barrier(
- (tf.float32, tf.float32),
- name="B")
+ b = data_flow_ops.Barrier((dtypes.float32, dtypes.float32), name="B")
size_t = b.ready_size()
self.assertEqual([], size_t.get_shape())
keys = [b"a", b"b", b"c"]
@@ -107,9 +105,7 @@ class BarrierTest(tf.test.TestCase):
def testTakeMany(self):
with self.test_session() as sess:
b = data_flow_ops.Barrier(
- (tf.float32, tf.float32),
- shapes=((), ()),
- name="B")
+ (dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
size_t = b.ready_size()
keys = [b"a", b"b", b"c"]
values_0 = [10.0, 20.0, 30.0]
@@ -122,8 +118,8 @@ class BarrierTest(tf.test.TestCase):
insert_1_op.run()
self.assertEquals(size_t.eval(), [3])
- indices_val, keys_val, values_0_val, values_1_val = sess.run([
- take_t[0], take_t[1], take_t[2][0], take_t[2][1]])
+ indices_val, keys_val, values_0_val, values_1_val = sess.run(
+ [take_t[0], take_t[1], take_t[2][0], take_t[2][1]])
self.assertAllEqual(indices_val, [-2**63] * 3)
for k, v0, v1 in zip(keys, values_0, values_1):
@@ -134,9 +130,7 @@ class BarrierTest(tf.test.TestCase):
def testTakeManySmallBatch(self):
with self.test_session() as sess:
b = data_flow_ops.Barrier(
- (tf.float32, tf.float32),
- shapes=((), ()),
- name="B")
+ (dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
size_t = b.ready_size()
size_i = b.incomplete_size()
keys = [b"a", b"b", b"c", b"d"]
@@ -160,9 +154,8 @@ class BarrierTest(tf.test.TestCase):
# should return a reduced batch with 2 elements only.
self.assertEquals(size_i.eval(), [2]) # assert that incomplete size = 2
self.assertEquals(size_t.eval(), [2]) # assert that ready size = 2
- _, keys_val, values_0_val, values_1_val = sess.run([
- index_t, key_t, value_list_t[0], value_list_t[1]
- ])
+ _, keys_val, values_0_val, values_1_val = sess.run(
+ [index_t, key_t, value_list_t[0], value_list_t[1]])
# Check that correct values have been returned.
for k, v0, v1 in zip(keys[0:2], values_0[0:2], values_1[0:2]):
idx = keys_val.tolist().index(k)
@@ -174,9 +167,8 @@ class BarrierTest(tf.test.TestCase):
insert_1_2_op.run()
self.assertEquals(size_i.eval(), [1]) # assert that incomplete size = 1
self.assertEquals(size_t.eval(), [1]) # assert that ready size = 1
- _, keys_val, values_0_val, values_1_val = sess.run([
- index_t, key_t, value_list_t[0], value_list_t[1]
- ])
+ _, keys_val, values_0_val, values_1_val = sess.run(
+ [index_t, key_t, value_list_t[0], value_list_t[1]])
# Check that correct values have been returned.
for k, v0, v1 in zip(keys[2:3], values_0[2:3], values_1[2:3]):
idx = keys_val.tolist().index(k)
@@ -202,15 +194,12 @@ class BarrierTest(tf.test.TestCase):
def testUseBarrierWithShape(self):
with self.test_session() as sess:
b = data_flow_ops.Barrier(
- (tf.float32, tf.float32),
- shapes=((2, 2), (8,)),
- name="B")
+ (dtypes.float32, dtypes.float32), shapes=((2, 2), (8,)), name="B")
size_t = b.ready_size()
keys = [b"a", b"b", b"c"]
values_0 = np.array(
[[[10.0] * 2] * 2, [[20.0] * 2] * 2, [[30.0] * 2] * 2], np.float32)
- values_1 = np.array([[100.0] * 8, [200.0] * 8, [300.0] * 8],
- np.float32)
+ values_1 = np.array([[100.0] * 8, [200.0] * 8, [300.0] * 8], np.float32)
insert_0_op = b.insert_many(0, keys, values_0)
insert_1_op = b.insert_many(1, keys, values_1)
take_t = b.take_many(3)
@@ -219,8 +208,8 @@ class BarrierTest(tf.test.TestCase):
insert_1_op.run()
self.assertEquals(size_t.eval(), [3])
- indices_val, keys_val, values_0_val, values_1_val = sess.run([
- take_t[0], take_t[1], take_t[2][0], take_t[2][1]])
+ indices_val, keys_val, values_0_val, values_1_val = sess.run(
+ [take_t[0], take_t[1], take_t[2][0], take_t[2][1]])
self.assertAllEqual(indices_val, [-2**63] * 3)
self.assertShapeEqual(keys_val, take_t[1])
self.assertShapeEqual(values_0_val, take_t[2][0])
@@ -233,7 +222,7 @@ class BarrierTest(tf.test.TestCase):
def testParallelInsertMany(self):
with self.test_session() as sess:
- b = data_flow_ops.Barrier(tf.float32, shapes=())
+ b = data_flow_ops.Barrier(dtypes.float32, shapes=())
size_t = b.ready_size()
keys = [str(x).encode("ascii") for x in range(10)]
values = [float(x) for x in range(10)]
@@ -253,7 +242,7 @@ class BarrierTest(tf.test.TestCase):
def testParallelTakeMany(self):
with self.test_session() as sess:
- b = data_flow_ops.Barrier(tf.float32, shapes=())
+ b = data_flow_ops.Barrier(dtypes.float32, shapes=())
size_t = b.ready_size()
keys = [str(x).encode("ascii") for x in range(10)]
values = [float(x) for x in range(10)]
@@ -283,12 +272,11 @@ class BarrierTest(tf.test.TestCase):
self.assertAllEqual(np.hstack(index_vals), [-2**63] * 10)
self.assertItemsEqual(
- zip(keys, values),
- [(k[0], v[0]) for k, v in zip(key_vals, value_vals)])
+ zip(keys, values), [(k[0], v[0]) for k, v in zip(key_vals, value_vals)])
def testBlockingTakeMany(self):
with self.test_session() as sess:
- b = data_flow_ops.Barrier(tf.float32, shapes=())
+ b = data_flow_ops.Barrier(dtypes.float32, shapes=())
keys = [str(x).encode("ascii") for x in range(10)]
values = [float(x) for x in range(10)]
insert_ops = [b.insert_many(0, [k], [v]) for k, v in zip(keys, values)]
@@ -297,8 +285,8 @@ class BarrierTest(tf.test.TestCase):
def take():
indices_val, keys_val, values_val = sess.run(
[take_t[0], take_t[1], take_t[2][0]])
- self.assertAllEqual(
- indices_val, [int(x.decode("ascii")) - 2**63 for x in keys_val])
+ self.assertAllEqual(indices_val,
+ [int(x.decode("ascii")) - 2**63 for x in keys_val])
self.assertItemsEqual(zip(keys, values), zip(keys_val, values_val))
t = self.checkedThread(target=take)
@@ -311,28 +299,32 @@ class BarrierTest(tf.test.TestCase):
def testParallelInsertManyTakeMany(self):
with self.test_session() as sess:
b = data_flow_ops.Barrier(
- (tf.float32, tf.int64), shapes=((), (2,)))
+ (dtypes.float32, dtypes.int64), shapes=((), (2,)))
num_iterations = 100
keys = [str(x) for x in range(10)]
values_0 = np.asarray(range(10), dtype=np.float32)
- values_1 = np.asarray([[x+1, x + 2] for x in range(10)], dtype=np.int64)
+ values_1 = np.asarray([[x + 1, x + 2] for x in range(10)], dtype=np.int64)
keys_i = lambda i: [("%d:%s" % (i, k)).encode("ascii") for k in keys]
insert_0_ops = [
b.insert_many(0, keys_i(i), values_0 + i)
- for i in range(num_iterations)]
+ for i in range(num_iterations)
+ ]
insert_1_ops = [
b.insert_many(1, keys_i(i), values_1 + i)
- for i in range(num_iterations)]
+ for i in range(num_iterations)
+ ]
take_ops = [b.take_many(10) for _ in range(num_iterations)]
def take(sess, i, taken):
- indices_val, keys_val, values_0_val, values_1_val = sess.run(
- [take_ops[i][0], take_ops[i][1],
- take_ops[i][2][0], take_ops[i][2][1]])
- taken.append({"indices": indices_val,
- "keys": keys_val,
- "values_0": values_0_val,
- "values_1": values_1_val})
+ indices_val, keys_val, values_0_val, values_1_val = sess.run([
+ take_ops[i][0], take_ops[i][1], take_ops[i][2][0], take_ops[i][2][1]
+ ])
+ taken.append({
+ "indices": indices_val,
+ "keys": keys_val,
+ "values_0": values_0_val,
+ "values_1": values_1_val
+ })
def insert(sess, i):
sess.run([insert_0_ops[i], insert_1_ops[i]])
@@ -340,11 +332,13 @@ class BarrierTest(tf.test.TestCase):
taken = []
take_threads = [
- self.checkedThread(target=take, args=(sess, i, taken))
- for i in range(num_iterations)]
+ self.checkedThread(
+ target=take, args=(sess, i, taken)) for i in range(num_iterations)
+ ]
insert_threads = [
- self.checkedThread(target=insert, args=(sess, i))
- for i in range(num_iterations)]
+ self.checkedThread(
+ target=insert, args=(sess, i)) for i in range(num_iterations)
+ ]
for t in take_threads:
t.start()
@@ -361,10 +355,10 @@ class BarrierTest(tf.test.TestCase):
all_indices = sorted(flatten([t_i["indices"] for t_i in taken]))
all_keys = sorted(flatten([t_i["keys"] for t_i in taken]))
- expected_keys = sorted(flatten(
- [keys_i(i) for i in range(num_iterations)]))
- expected_indices = sorted(flatten(
- [-2**63 + j] * 10 for j in range(num_iterations)))
+ expected_keys = sorted(
+ flatten([keys_i(i) for i in range(num_iterations)]))
+ expected_indices = sorted(
+ flatten([-2**63 + j] * 10 for j in range(num_iterations)))
self.assertAllEqual(all_indices, expected_indices)
self.assertAllEqual(all_keys, expected_keys)
@@ -384,9 +378,7 @@ class BarrierTest(tf.test.TestCase):
def testClose(self):
with self.test_session() as sess:
b = data_flow_ops.Barrier(
- (tf.float32, tf.float32),
- shapes=((), ()),
- name="B")
+ (dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
size_t = b.ready_size()
incomplete_t = b.incomplete_size()
keys = [b"a", b"b", b"c"]
@@ -422,9 +414,8 @@ class BarrierTest(tf.test.TestCase):
# This op should fail because we requested more elements than are
# available in incomplete + ready queue.
- with self.assertRaisesOpError(
- r"is closed and has insufficient elements "
- r"\(requested 4, total size 3\)"):
+ with self.assertRaisesOpError(r"is closed and has insufficient elements "
+ r"\(requested 4, total size 3\)"):
sess.run(take_too_many_t[0]) # Sufficient to request just the indices
# This op should succeed because there are still completed elements
@@ -445,9 +436,7 @@ class BarrierTest(tf.test.TestCase):
def testCancel(self):
with self.test_session() as sess:
b = data_flow_ops.Barrier(
- (tf.float32, tf.float32),
- shapes=((), ()),
- name="B")
+ (dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
size_t = b.ready_size()
incomplete_t = b.incomplete_size()
keys = [b"a", b"b", b"c"]
@@ -478,9 +467,8 @@ class BarrierTest(tf.test.TestCase):
# This op should fail because we requested more elements than are
# available in incomplete + ready queue.
- with self.assertRaisesOpError(
- r"is closed and has insufficient elements "
- r"\(requested 3, total size 2\)"):
+ with self.assertRaisesOpError(r"is closed and has insufficient elements "
+ r"\(requested 3, total size 2\)"):
sess.run(take_too_many_t[0]) # Sufficient to request just the indices
# This op should succeed because there are still completed elements
@@ -501,7 +489,7 @@ class BarrierTest(tf.test.TestCase):
def _testClosedEmptyBarrierTakeManyAllowSmallBatchRaises(self, cancel):
with self.test_session() as sess:
b = data_flow_ops.Barrier(
- (tf.float32, tf.float32), shapes=((), ()), name="B")
+ (dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
take_t = b.take_many(1, allow_small_batch=True)
sess.run(b.close(cancel))
with self.assertRaisesOpError("is closed and has insufficient elements"):
@@ -514,7 +502,7 @@ class BarrierTest(tf.test.TestCase):
def _testParallelInsertManyTakeManyCloseHalfwayThrough(self, cancel):
with self.test_session() as sess:
b = data_flow_ops.Barrier(
- (tf.float32, tf.int64), shapes=((), (2,)))
+ (dtypes.float32, dtypes.int64), shapes=((), (2,)))
num_iterations = 50
keys = [str(x) for x in range(10)]
values_0 = np.asarray(range(10), dtype=np.float32)
@@ -522,39 +510,44 @@ class BarrierTest(tf.test.TestCase):
keys_i = lambda i: [("%d:%s" % (i, k)).encode("ascii") for k in keys]
insert_0_ops = [
b.insert_many(0, keys_i(i), values_0 + i)
- for i in range(num_iterations)]
+ for i in range(num_iterations)
+ ]
insert_1_ops = [
b.insert_many(1, keys_i(i), values_1 + i)
- for i in range(num_iterations)]
+ for i in range(num_iterations)
+ ]
take_ops = [b.take_many(10) for _ in range(num_iterations)]
close_op = b.close(cancel_pending_enqueues=cancel)
def take(sess, i, taken):
try:
- indices_val, unused_keys_val, unused_val_0, unused_val_1 = sess.run(
- [take_ops[i][0], take_ops[i][1],
- take_ops[i][2][0], take_ops[i][2][1]])
+ indices_val, unused_keys_val, unused_val_0, unused_val_1 = sess.run([
+ take_ops[i][0], take_ops[i][1], take_ops[i][2][0],
+ take_ops[i][2][1]
+ ])
taken.append(len(indices_val))
- except tf.errors.OutOfRangeError:
+ except errors_impl.OutOfRangeError:
taken.append(0)
def insert(sess, i):
try:
sess.run([insert_0_ops[i], insert_1_ops[i]])
- except tf.errors.CancelledError:
+ except errors_impl.CancelledError:
pass
taken = []
take_threads = [
- self.checkedThread(target=take, args=(sess, i, taken))
- for i in range(num_iterations)]
+ self.checkedThread(
+ target=take, args=(sess, i, taken)) for i in range(num_iterations)
+ ]
insert_threads = [
- self.checkedThread(target=insert, args=(sess, i))
- for i in range(num_iterations)]
+ self.checkedThread(
+ target=insert, args=(sess, i)) for i in range(num_iterations)
+ ]
- first_half_insert_threads = insert_threads[:num_iterations//2]
- second_half_insert_threads = insert_threads[num_iterations//2:]
+ first_half_insert_threads = insert_threads[:num_iterations // 2]
+ second_half_insert_threads = insert_threads[num_iterations // 2:]
for t in take_threads:
t.start()
@@ -573,7 +566,8 @@ class BarrierTest(tf.test.TestCase):
t.join()
self.assertEqual(
- sorted(taken), [0] * (num_iterations//2) + [10] * (num_iterations//2))
+ sorted(taken),
+ [0] * (num_iterations // 2) + [10] * (num_iterations // 2))
def testParallelInsertManyTakeManyCloseHalfwayThrough(self):
self._testParallelInsertManyTakeManyCloseHalfwayThrough(cancel=False)
@@ -584,38 +578,47 @@ class BarrierTest(tf.test.TestCase):
def _testParallelPartialInsertManyTakeManyCloseHalfwayThrough(self, cancel):
with self.test_session() as sess:
b = data_flow_ops.Barrier(
- (tf.float32, tf.int64), shapes=((), (2,)))
+ (dtypes.float32, dtypes.int64), shapes=((), (2,)))
num_iterations = 100
keys = [str(x) for x in range(10)]
values_0 = np.asarray(range(10), dtype=np.float32)
values_1 = np.asarray([[x + 1, x + 2] for x in range(10)], dtype=np.int64)
keys_i = lambda i: [("%d:%s" % (i, k)).encode("ascii") for k in keys]
insert_0_ops = [
- b.insert_many(0, keys_i(i), values_0 + i, name="insert_0_%d" % i)
- for i in range(num_iterations)]
+ b.insert_many(
+ 0, keys_i(i), values_0 + i, name="insert_0_%d" % i)
+ for i in range(num_iterations)
+ ]
close_op = b.close(cancel_pending_enqueues=cancel)
- take_ops = [b.take_many(10, name="take_%d" % i)
- for i in range(num_iterations)]
+ take_ops = [
+ b.take_many(
+ 10, name="take_%d" % i) for i in range(num_iterations)
+ ]
# insert_1_ops will only run after closure
insert_1_ops = [
- b.insert_many(1, keys_i(i), values_1 + i, name="insert_1_%d" % i)
- for i in range(num_iterations)]
+ b.insert_many(
+ 1, keys_i(i), values_1 + i, name="insert_1_%d" % i)
+ for i in range(num_iterations)
+ ]
def take(sess, i, taken):
if cancel:
try:
indices_val, unused_keys_val, unused_val_0, unused_val_1 = sess.run(
- [take_ops[i][0], take_ops[i][1],
- take_ops[i][2][0], take_ops[i][2][1]])
+ [
+ take_ops[i][0], take_ops[i][1], take_ops[i][2][0],
+ take_ops[i][2][1]
+ ])
taken.append(len(indices_val))
- except tf.errors.OutOfRangeError:
+ except errors_impl.OutOfRangeError:
taken.append(0)
else:
- indices_val, unused_keys_val, unused_val_0, unused_val_1 = sess.run(
- [take_ops[i][0], take_ops[i][1],
- take_ops[i][2][0], take_ops[i][2][1]])
+ indices_val, unused_keys_val, unused_val_0, unused_val_1 = sess.run([
+ take_ops[i][0], take_ops[i][1], take_ops[i][2][0],
+ take_ops[i][2][1]
+ ])
taken.append(len(indices_val))
def insert_0(sess, i):
@@ -625,7 +628,7 @@ class BarrierTest(tf.test.TestCase):
if cancel:
try:
insert_1_ops[i].run(session=sess)
- except tf.errors.CancelledError:
+ except errors_impl.CancelledError:
pass
else:
insert_1_ops[i].run(session=sess)
@@ -633,14 +636,17 @@ class BarrierTest(tf.test.TestCase):
taken = []
take_threads = [
- self.checkedThread(target=take, args=(sess, i, taken))
- for i in range(num_iterations)]
+ self.checkedThread(
+ target=take, args=(sess, i, taken)) for i in range(num_iterations)
+ ]
insert_0_threads = [
- self.checkedThread(target=insert_0, args=(sess, i))
- for i in range(num_iterations)]
+ self.checkedThread(
+ target=insert_0, args=(sess, i)) for i in range(num_iterations)
+ ]
insert_1_threads = [
- self.checkedThread(target=insert_1, args=(sess, i))
- for i in range(num_iterations)]
+ self.checkedThread(
+ target=insert_1, args=(sess, i)) for i in range(num_iterations)
+ ]
for t in insert_0_threads:
t.start()
@@ -672,39 +678,36 @@ class BarrierTest(tf.test.TestCase):
def testIncompatibleSharedBarrierErrors(self):
with self.test_session():
# Do component types and shapes.
- b_a_1 = data_flow_ops.Barrier((tf.float32,), shapes=(()),
- shared_name="b_a")
- b_a_2 = data_flow_ops.Barrier((tf.int32,), shapes=(()),
- shared_name="b_a")
+ b_a_1 = data_flow_ops.Barrier(
+ (dtypes.float32,), shapes=(()), shared_name="b_a")
+ b_a_2 = data_flow_ops.Barrier(
+ (dtypes.int32,), shapes=(()), shared_name="b_a")
b_a_1.barrier_ref.eval()
with self.assertRaisesOpError("component types"):
b_a_2.barrier_ref.eval()
- b_b_1 = data_flow_ops.Barrier((tf.float32,), shapes=(()),
- shared_name="b_b")
+ b_b_1 = data_flow_ops.Barrier(
+ (dtypes.float32,), shapes=(()), shared_name="b_b")
b_b_2 = data_flow_ops.Barrier(
- (tf.float32, tf.int32),
- shapes=((), ()),
- shared_name="b_b")
+ (dtypes.float32, dtypes.int32), shapes=((), ()), shared_name="b_b")
b_b_1.barrier_ref.eval()
with self.assertRaisesOpError("component types"):
b_b_2.barrier_ref.eval()
b_c_1 = data_flow_ops.Barrier(
- (tf.float32, tf.float32),
+ (dtypes.float32, dtypes.float32),
shapes=((2, 2), (8,)),
shared_name="b_c")
b_c_2 = data_flow_ops.Barrier(
- (tf.float32, tf.float32), shared_name="b_c")
+ (dtypes.float32, dtypes.float32), shared_name="b_c")
b_c_1.barrier_ref.eval()
with self.assertRaisesOpError("component shapes"):
b_c_2.barrier_ref.eval()
b_d_1 = data_flow_ops.Barrier(
- (tf.float32, tf.float32), shapes=((), ()),
- shared_name="b_d")
+ (dtypes.float32, dtypes.float32), shapes=((), ()), shared_name="b_d")
b_d_2 = data_flow_ops.Barrier(
- (tf.float32, tf.float32),
+ (dtypes.float32, dtypes.float32),
shapes=((2, 2), (8,)),
shared_name="b_d")
b_d_1.barrier_ref.eval()
@@ -712,11 +715,11 @@ class BarrierTest(tf.test.TestCase):
b_d_2.barrier_ref.eval()
b_e_1 = data_flow_ops.Barrier(
- (tf.float32, tf.float32),
+ (dtypes.float32, dtypes.float32),
shapes=((2, 2), (8,)),
shared_name="b_e")
b_e_2 = data_flow_ops.Barrier(
- (tf.float32, tf.float32),
+ (dtypes.float32, dtypes.float32),
shapes=((2, 5), (8,)),
shared_name="b_e")
b_e_1.barrier_ref.eval()
@@ -725,4 +728,4 @@ class BarrierTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/basic_gpu_test.py b/tensorflow/python/kernel_tests/basic_gpu_test.py
index 979347dec8..a9bcb3fea9 100644
--- a/tensorflow/python/kernel_tests/basic_gpu_test.py
+++ b/tensorflow/python/kernel_tests/basic_gpu_test.py
@@ -13,28 +13,36 @@
# limitations under the License.
# ==============================================================================
"""Functional tests for basic component wise operations using a GPU device."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import tensorflow as tf
-
import math
+
import numpy as np
+
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_math_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import math_ops
from tensorflow.python.ops.gen_array_ops import _broadcast_gradient_args
+from tensorflow.python.platform import test
+
+
+class GPUBinaryOpsTest(test.TestCase):
-class GPUBinaryOpsTest(tf.test.TestCase):
def _compareGPU(self, x, y, np_func, tf_func):
with self.test_session(use_gpu=True) as sess:
- inx = tf.convert_to_tensor(x)
- iny = tf.convert_to_tensor(y)
+ inx = ops.convert_to_tensor(x)
+ iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = sess.run(out)
with self.test_session(use_gpu=False) as sess:
- inx = tf.convert_to_tensor(x)
- iny = tf.convert_to_tensor(y)
+ inx = ops.convert_to_tensor(x)
+ iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_cpu = sess.run(out)
@@ -43,43 +51,44 @@ class GPUBinaryOpsTest(tf.test.TestCase):
def testFloatBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float32)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float32)
- self._compareGPU(x, y, np.add, tf.add)
- self._compareGPU(x, y, np.subtract, tf.sub)
- self._compareGPU(x, y, np.multiply, tf.mul)
- self._compareGPU(x, y + 0.1, np.true_divide, tf.truediv)
- self._compareGPU(x, y + 0.1, np.floor_divide, tf.floordiv)
- self._compareGPU(x, y, np.power, tf.pow)
+ self._compareGPU(x, y, np.add, math_ops.add)
+ self._compareGPU(x, y, np.subtract, math_ops.sub)
+ self._compareGPU(x, y, np.multiply, math_ops.mul)
+ self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
+ self._compareGPU(x, y + 0.1, np.floor_divide, math_ops.floordiv)
+ self._compareGPU(x, y, np.power, math_ops.pow)
def testFloatWithBCast(self):
x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float32)
y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float32)
- self._compareGPU(x, y, np.add, tf.add)
- self._compareGPU(x, y, np.subtract, tf.sub)
- self._compareGPU(x, y, np.multiply, tf.mul)
- self._compareGPU(x, y + 0.1, np.true_divide, tf.truediv)
+ self._compareGPU(x, y, np.add, math_ops.add)
+ self._compareGPU(x, y, np.subtract, math_ops.sub)
+ self._compareGPU(x, y, np.multiply, math_ops.mul)
+ self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
def testDoubleBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float64)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float64)
- self._compareGPU(x, y, np.add, tf.add)
- self._compareGPU(x, y, np.subtract, tf.sub)
- self._compareGPU(x, y, np.multiply, tf.mul)
- self._compareGPU(x, y + 0.1, np.true_divide, tf.truediv)
+ self._compareGPU(x, y, np.add, math_ops.add)
+ self._compareGPU(x, y, np.subtract, math_ops.sub)
+ self._compareGPU(x, y, np.multiply, math_ops.mul)
+ self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
def testDoubleWithBCast(self):
x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float64)
y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float64)
- self._compareGPU(x, y, np.add, tf.add)
- self._compareGPU(x, y, np.subtract, tf.sub)
- self._compareGPU(x, y, np.multiply, tf.mul)
- self._compareGPU(x, y + 0.1, np.true_divide, tf.truediv)
+ self._compareGPU(x, y, np.add, math_ops.add)
+ self._compareGPU(x, y, np.subtract, math_ops.sub)
+ self._compareGPU(x, y, np.multiply, math_ops.mul)
+ self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
-class MathBuiltinUnaryTest(tf.test.TestCase):
+class MathBuiltinUnaryTest(test.TestCase):
+
def _compare(self, x, np_func, tf_func, use_gpu):
np_out = np_func(x)
with self.test_session(use_gpu=use_gpu) as sess:
- inx = tf.convert_to_tensor(x)
+ inx = ops.convert_to_tensor(x)
ofunc = tf_func(inx)
tf_out = sess.run(ofunc)
self.assertAllClose(np_out, tf_out)
@@ -92,44 +101,48 @@ class MathBuiltinUnaryTest(tf.test.TestCase):
def _testDtype(self, dtype, use_gpu):
data = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(dtype)
- self._compare(data, np.abs, tf.abs, use_gpu)
- self._compare(data, np.arccos, tf.acos, use_gpu)
- self._compare(data, np.arcsin, tf.asin, use_gpu)
- self._compare(data, np.arctan, tf.atan, use_gpu)
- self._compare(data, np.ceil, tf.ceil, use_gpu)
- self._compare(data, np.cos, tf.cos, use_gpu)
- self._compare(data, np.exp, tf.exp, use_gpu)
- self._compare(data, np.floor, tf.floor, use_gpu)
- self._compare(data, np.log, tf.log, use_gpu)
- self._compare(data, np.log1p, tf.log1p, use_gpu)
- self._compare(data, np.negative, tf.neg, use_gpu)
- self._compare(data, self._rsqrt, tf.rsqrt, use_gpu)
- self._compare(data, np.sin, tf.sin, use_gpu)
- self._compare(data, np.sqrt, tf.sqrt, use_gpu)
- self._compare(data, np.square, tf.square, use_gpu)
- self._compare(data, np.tan, tf.tan, use_gpu)
- self._compare(data, np.tanh, tf.tanh, use_gpu)
+ self._compare(data, np.abs, math_ops.abs, use_gpu)
+ self._compare(data, np.arccos, math_ops.acos, use_gpu)
+ self._compare(data, np.arcsin, math_ops.asin, use_gpu)
+ self._compare(data, np.arctan, math_ops.atan, use_gpu)
+ self._compare(data, np.ceil, math_ops.ceil, use_gpu)
+ self._compare(data, np.cos, math_ops.cos, use_gpu)
+ self._compare(data, np.exp, math_ops.exp, use_gpu)
+ self._compare(data, np.floor, math_ops.floor, use_gpu)
+ self._compare(data, np.log, math_ops.log, use_gpu)
+ self._compare(data, np.log1p, math_ops.log1p, use_gpu)
+ self._compare(data, np.negative, math_ops.neg, use_gpu)
+ self._compare(data, self._rsqrt, math_ops.rsqrt, use_gpu)
+ self._compare(data, np.sin, math_ops.sin, use_gpu)
+ self._compare(data, np.sqrt, math_ops.sqrt, use_gpu)
+ self._compare(data, np.square, math_ops.square, use_gpu)
+ self._compare(data, np.tan, math_ops.tan, use_gpu)
+ self._compare(data, np.tanh, math_ops.tanh, use_gpu)
def testTypes(self):
for dtype in [np.float32]:
self._testDtype(dtype, use_gpu=True)
def testFloorDevide(self):
- x = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape([1, 3, 2])
- y = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape([1, 3, 2])
+ x = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
+ [1, 3, 2])
+ y = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
+ [1, 3, 2])
np_out = np.floor_divide(x, y + 0.1)
with self.test_session(use_gpu=True) as sess:
- inx = tf.convert_to_tensor(x)
- iny = tf.convert_to_tensor(y + 0.1)
+ inx = ops.convert_to_tensor(x)
+ iny = ops.convert_to_tensor(y + 0.1)
ofunc = inx / iny
- out_func2 = tf.floor(ofunc)
+ out_func2 = math_ops.floor(ofunc)
tf_out = sess.run(out_func2)
self.assertAllClose(np_out, tf_out)
-class BroadcastSimpleTest(tf.test.TestCase):
+
+class BroadcastSimpleTest(test.TestCase):
+
def _GetGradientArgs(self, xs, ys):
with self.test_session(use_gpu=True) as sess:
return sess.run(_broadcast_gradient_args(xs, ys))
@@ -139,53 +152,55 @@ class BroadcastSimpleTest(tf.test.TestCase):
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 1, 2])
- _GRAD_TOL = {tf.float32: 1e-3}
+ _GRAD_TOL = {dtypes.float32: 1e-3}
- def _compareGradientX(self, x, y, np_func, tf_func,
- numeric_gradient_type=None):
+ def _compareGradientX(self,
+ x,
+ y,
+ np_func,
+ tf_func,
+ numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.test_session():
- inx = tf.convert_to_tensor(x)
- iny = tf.convert_to_tensor(y)
+ inx = ops.convert_to_tensor(x)
+ iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
xs = list(x.shape)
- jacob_t, jacob_n = tf.test.compute_gradient(inx,
- xs,
- out,
- zs,
- x_init_value=x)
- tol = self._GRAD_TOL[tf.as_dtype(x.dtype)]
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
+ inx, xs, out, zs, x_init_value=x)
+ tol = self._GRAD_TOL[dtypes.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
- def _compareGradientY(self, x, y, np_func, tf_func,
+ def _compareGradientY(self,
+ x,
+ y,
+ np_func,
+ tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.test_session():
- inx = tf.convert_to_tensor(x)
- iny = tf.convert_to_tensor(y)
+ inx = ops.convert_to_tensor(x)
+ iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
ys = list(np.shape(y))
- jacob_t, jacob_n = tf.test.compute_gradient(iny,
- ys,
- out,
- zs,
- x_init_value=y)
- tol = self._GRAD_TOL[tf.as_dtype(x.dtype)]
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
+ iny, ys, out, zs, x_init_value=y)
+ tol = self._GRAD_TOL[dtypes.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.test_session(use_gpu=True):
- inx = tf.convert_to_tensor(x)
- iny = tf.convert_to_tensor(y)
+ inx = ops.convert_to_tensor(x)
+ iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = out.eval()
self.assertAllClose(np_ans, tf_gpu)
@@ -193,13 +208,16 @@ class BroadcastSimpleTest(tf.test.TestCase):
# TODO(zhifengc/ke): make gradient checker work on GPU.
def testGradient(self):
- x = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape([1, 3, 2])
- y = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape([1, 3, 2])
+ x = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
+ [1, 3, 2])
+ y = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
+ [1, 3, 2])
+
+ self._compareGradientX(x, y, np.true_divide, math_ops.truediv)
+ self._compareGradientY(x, y, np.true_divide, math_ops.truediv)
+ self._compareGpu(x, y, np.true_divide, math_ops.truediv)
+ self._compareGpu(x, y + 0.1, np.floor_divide, math_ops.floordiv)
- self._compareGradientX(x , y, np.true_divide, tf.truediv)
- self._compareGradientY(x, y, np.true_divide, tf.truediv)
- self._compareGpu(x, y, np.true_divide, tf.truediv)
- self._compareGpu(x, y +0.1 , np.floor_divide, tf.floordiv)
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/batch_matmul_op_test.py b/tensorflow/python/kernel_tests/batch_matmul_op_test.py
index 8e9daff319..a1aad2f4e1 100644
--- a/tensorflow/python/kernel_tests/batch_matmul_op_test.py
+++ b/tensorflow/python/kernel_tests/batch_matmul_op_test.py
@@ -13,15 +13,21 @@
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.BatchMatMul."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import math_ops
+from tensorflow.python.platform import test
-class BatchMatmulOpTest(tf.test.TestCase):
+class BatchMatmulOpTest(test.TestCase):
# Uses numpy to compute batch_matmul(x, y, adjoint_a, adjoint_b).
def _npBatchMatmul(self, x, y, adjoint_a, adjoint_b):
@@ -79,12 +85,13 @@ class BatchMatmulOpTest(tf.test.TestCase):
tol = 100 * np.finfo(x.dtype).eps if is_floating else 0
with self.test_session(use_gpu=is_floating) as sess:
if static_shape:
- z0 = tf.matmul(x, y, adjoint_a=adjoint_a, adjoint_b=adjoint_b)
+ z0 = math_ops.matmul(x, y, adjoint_a=adjoint_a, adjoint_b=adjoint_b)
z0_val = z0.eval()
else:
- x_ph = tf.placeholder(x.dtype)
- y_ph = tf.placeholder(y.dtype)
- z0 = tf.matmul(x_ph, y_ph, adjoint_a=adjoint_a, adjoint_b=adjoint_b)
+ x_ph = array_ops.placeholder(x.dtype)
+ y_ph = array_ops.placeholder(y.dtype)
+ z0 = math_ops.matmul(
+ x_ph, y_ph, adjoint_a=adjoint_a, adjoint_b=adjoint_b)
z0_val = sess.run(z0, feed_dict={x_ph: x, y_ph: y})
z1 = self._npBatchMatmul(x, y, adjoint_a, adjoint_b)
self.assertAllClose(z0_val, z1, rtol=tol, atol=tol)
@@ -135,7 +142,7 @@ def _GetBatchMatmulOpTest(dtype, adjoint_a, adjoint_b, use_static_shape):
return Test
-class BatchMatmulGradientTest(tf.test.TestCase):
+class BatchMatmulGradientTest(test.TestCase):
# loss = sum(batch_matmul(x, y)). Verify dl/dx and dl/dy via the
# gradient checker.
@@ -147,12 +154,12 @@ class BatchMatmulGradientTest(tf.test.TestCase):
epsilon = np.finfo(x.dtype).eps
delta = epsilon**(1.0 / 3.0)
with self.test_session(use_gpu=True):
- inx = tf.constant(x)
- iny = tf.constant(y)
- z = tf.matmul(inx, iny, adjoint_a, adjoint_b)
- loss = tf.reduce_sum(z)
+ inx = constant_op.constant(x)
+ iny = constant_op.constant(y)
+ z = math_ops.matmul(inx, iny, adjoint_a, adjoint_b)
+ loss = math_ops.reduce_sum(z)
((x_jacob_t, x_jacob_n),
- (y_jacob_t, y_jacob_n)) = tf.test.compute_gradient(
+ (y_jacob_t, y_jacob_n)) = gradient_checker.compute_gradient(
[inx, iny], [x.shape, y.shape],
loss, [1],
x_init_value=[x, y],
@@ -196,4 +203,4 @@ if __name__ == "__main__":
if dtype_ is not np.int32:
setattr(BatchMatmulGradientTest, "testBatchMatmulGradient_" + name,
_GetBatchMatmulGradientTest(dtype_, adjoint_a_, adjoint_b_))
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/batchtospace_op_test.py b/tensorflow/python/kernel_tests/batchtospace_op_test.py
index 1b9b2da83f..8ec93119f2 100644
--- a/tensorflow/python/kernel_tests/batchtospace_op_test.py
+++ b/tensorflow/python/kernel_tests/batchtospace_op_test.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Functional tests for BatchToSpace op.
Additional tests are included in spacetobatch_op_test.py, where the BatchToSpace
@@ -24,16 +23,20 @@ from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.platform import test
class PythonOpImpl(object):
@staticmethod
def batch_to_space(*args, **kwargs):
- return tf.batch_to_space(*args, **kwargs)
+ return array_ops.batch_to_space(*args, **kwargs)
class CppOpImpl(object):
@@ -43,7 +46,7 @@ class CppOpImpl(object):
return gen_array_ops._batch_to_space(*args, **kwargs)
-class BatchToSpaceDepthToSpace(tf.test.TestCase, PythonOpImpl):
+class BatchToSpaceDepthToSpace(test.TestCase, PythonOpImpl):
# Verifies that: batch_to_space(x) = transpose(depth_to_space(transpose(x)))
def testDepthToSpaceTranspose(self):
@@ -51,10 +54,10 @@ class BatchToSpaceDepthToSpace(tf.test.TestCase, PythonOpImpl):
block_size = 2
crops = np.zeros((2, 2), dtype=np.int32)
y1 = self.batch_to_space(x, crops, block_size=block_size)
- y2 = tf.transpose(
- tf.depth_to_space(
- tf.transpose(x, [3, 1, 2, 0]),
- block_size=block_size), [3, 1, 2, 0])
+ y2 = array_ops.transpose(
+ array_ops.depth_to_space(
+ array_ops.transpose(x, [3, 1, 2, 0]), block_size=block_size),
+ [3, 1, 2, 0])
with self.test_session():
self.assertAllEqual(y1.eval(), y2.eval())
@@ -63,7 +66,7 @@ class BatchToSpaceDepthToSpaceCpp(BatchToSpaceDepthToSpace, CppOpImpl):
pass
-class BatchToSpaceErrorHandlingTest(tf.test.TestCase, PythonOpImpl):
+class BatchToSpaceErrorHandlingTest(test.TestCase, PythonOpImpl):
def testInputWrongDimMissingBatch(self):
# The input is missing the first dimension ("batch")
@@ -110,8 +113,8 @@ class BatchToSpaceErrorHandlingTest(tf.test.TestCase, PythonOpImpl):
def testUnknownShape(self):
t = self.batch_to_space(
- tf.placeholder(tf.float32),
- tf.placeholder(tf.int32),
+ array_ops.placeholder(dtypes.float32),
+ array_ops.placeholder(dtypes.int32),
block_size=4)
self.assertEqual(4, t.get_shape().ndims)
@@ -121,7 +124,7 @@ class BatchToSpaceErrorHandlingCppTest(BatchToSpaceErrorHandlingTest,
pass
-class BatchToSpaceNDErrorHandlingTest(tf.test.TestCase):
+class BatchToSpaceNDErrorHandlingTest(test.TestCase):
def _testStaticShape(self, input_shape, block_shape, paddings, error):
block_shape = np.array(block_shape)
@@ -129,7 +132,7 @@ class BatchToSpaceNDErrorHandlingTest(tf.test.TestCase):
# Try with sizes known at graph construction time.
with self.assertRaises(error):
- _ = tf.batch_to_space_nd(
+ _ = array_ops.batch_to_space_nd(
np.zeros(input_shape, np.float32), block_shape, paddings)
def _testDynamicShape(self, input_shape, block_shape, paddings):
@@ -137,16 +140,19 @@ class BatchToSpaceNDErrorHandlingTest(tf.test.TestCase):
paddings = np.array(paddings)
# Try with sizes unknown at graph construction time.
- input_placeholder = tf.placeholder(tf.float32)
- block_shape_placeholder = tf.placeholder(tf.int32, shape=block_shape.shape)
- paddings_placeholder = tf.placeholder(tf.int32)
- t = tf.batch_to_space_nd(input_placeholder, block_shape_placeholder,
- paddings_placeholder)
+ input_placeholder = array_ops.placeholder(dtypes.float32)
+ block_shape_placeholder = array_ops.placeholder(
+ dtypes.int32, shape=block_shape.shape)
+ paddings_placeholder = array_ops.placeholder(dtypes.int32)
+ t = array_ops.batch_to_space_nd(input_placeholder, block_shape_placeholder,
+ paddings_placeholder)
with self.assertRaises(ValueError):
- _ = t.eval({input_placeholder: np.zeros(input_shape, np.float32),
- block_shape_placeholder: block_shape,
- paddings_placeholder: paddings})
+ _ = t.eval({
+ input_placeholder: np.zeros(input_shape, np.float32),
+ block_shape_placeholder: block_shape,
+ paddings_placeholder: paddings
+ })
def _testShape(self, input_shape, block_shape, paddings, error):
self._testStaticShape(input_shape, block_shape, paddings, error)
@@ -176,54 +182,62 @@ class BatchToSpaceNDErrorHandlingTest(tf.test.TestCase):
def testUnknownShape(self):
# Verify that input shape and paddings shape can be unknown.
- _ = tf.batch_to_space_nd(
- tf.placeholder(tf.float32),
- tf.placeholder(tf.int32, shape=(2,)),
- tf.placeholder(tf.int32))
+ _ = array_ops.batch_to_space_nd(
+ array_ops.placeholder(dtypes.float32),
+ array_ops.placeholder(
+ dtypes.int32, shape=(2,)),
+ array_ops.placeholder(dtypes.int32))
# Only number of input dimensions is known.
- t = tf.batch_to_space_nd(
- tf.placeholder(tf.float32, shape=(None, None, None, None)),
- tf.placeholder(tf.int32, shape=(2,)),
- tf.placeholder(tf.int32))
+ t = array_ops.batch_to_space_nd(
+ array_ops.placeholder(
+ dtypes.float32, shape=(None, None, None, None)),
+ array_ops.placeholder(
+ dtypes.int32, shape=(2,)),
+ array_ops.placeholder(dtypes.int32))
self.assertEqual(4, t.get_shape().ndims)
# Dimensions are partially known.
- t = tf.batch_to_space_nd(
- tf.placeholder(tf.float32, shape=(None, None, None, 2)),
- tf.placeholder(tf.int32, shape=(2,)),
- tf.placeholder(tf.int32))
+ t = array_ops.batch_to_space_nd(
+ array_ops.placeholder(
+ dtypes.float32, shape=(None, None, None, 2)),
+ array_ops.placeholder(
+ dtypes.int32, shape=(2,)),
+ array_ops.placeholder(dtypes.int32))
self.assertEqual([None, None, None, 2], t.get_shape().as_list())
# Dimensions are partially known.
- t = tf.batch_to_space_nd(
- tf.placeholder(tf.float32, shape=(3 * 2 * 3, None, None, 2)), [2, 3],
- tf.placeholder(tf.int32))
+ t = array_ops.batch_to_space_nd(
+ array_ops.placeholder(
+ dtypes.float32, shape=(3 * 2 * 3, None, None, 2)), [2, 3],
+ array_ops.placeholder(dtypes.int32))
self.assertEqual([3, None, None, 2], t.get_shape().as_list())
# Dimensions are partially known.
- t = tf.batch_to_space_nd(
- tf.placeholder(tf.float32, shape=(3 * 2 * 3, None, 2, 2)), [2, 3],
+ t = array_ops.batch_to_space_nd(
+ array_ops.placeholder(
+ dtypes.float32, shape=(3 * 2 * 3, None, 2, 2)), [2, 3],
[[1, 1], [0, 1]])
self.assertEqual([3, None, 5, 2], t.get_shape().as_list())
# Dimensions are fully known.
- t = tf.batch_to_space_nd(
- tf.placeholder(tf.float32, shape=(3 * 2 * 3, 2, 1, 2)), [2, 3],
+ t = array_ops.batch_to_space_nd(
+ array_ops.placeholder(
+ dtypes.float32, shape=(3 * 2 * 3, 2, 1, 2)), [2, 3],
[[1, 1], [0, 0]])
self.assertEqual([3, 2, 3, 2], t.get_shape().as_list())
-class BatchToSpaceGradientTest(tf.test.TestCase, PythonOpImpl):
+class BatchToSpaceGradientTest(test.TestCase, PythonOpImpl):
# Check the gradients.
def _checkGrad(self, x, crops, block_size):
assert 4 == x.ndim
with self.test_session():
- tf_x = tf.convert_to_tensor(x)
+ tf_x = ops.convert_to_tensor(x)
tf_y = self.batch_to_space(tf_x, crops, block_size)
epsilon = 1e-5
- ((x_jacob_t, x_jacob_n)) = tf.test.compute_gradient(
+ ((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
tf_x,
x.shape,
tf_y,
@@ -240,8 +254,8 @@ class BatchToSpaceGradientTest(tf.test.TestCase, PythonOpImpl):
x = np.random.normal(0, 1, b * h * w * d *
block_size_sq).astype(np.float32).reshape(
[b * block_size * block_size, h, w, d])
- crops = np.array([[crop_beg, crop_end], [crop_beg, crop_end]],
- dtype=np.int32)
+ crops = np.array(
+ [[crop_beg, crop_end], [crop_beg, crop_end]], dtype=np.int32)
self._checkGrad(x, crops, block_size)
@@ -270,17 +284,17 @@ class BatchToSpaceGradientCppTest(BatchToSpaceGradientTest, CppOpImpl):
pass
-class BatchToSpaceNDGradientTest(tf.test.TestCase):
+class BatchToSpaceNDGradientTest(test.TestCase):
# Check the gradients.
def _checkGrad(self, x, block_shape, crops):
block_shape = np.array(block_shape)
crops = np.array(crops).reshape((len(block_shape), 2))
with self.test_session():
- tf_x = tf.convert_to_tensor(x)
- tf_y = tf.batch_to_space_nd(tf_x, block_shape, crops)
+ tf_x = ops.convert_to_tensor(x)
+ tf_y = array_ops.batch_to_space_nd(tf_x, block_shape, crops)
epsilon = 1e-5
- ((x_jacob_t, x_jacob_n)) = tf.test.compute_gradient(
+ ((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
tf_x,
x.shape,
tf_y,
@@ -310,4 +324,4 @@ class BatchToSpaceNDGradientTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/bcast_ops_test.py b/tensorflow/python/kernel_tests/bcast_ops_test.py
index a4cc821ec6..994bff8fe6 100644
--- a/tensorflow/python/kernel_tests/bcast_ops_test.py
+++ b/tensorflow/python/kernel_tests/bcast_ops_test.py
@@ -12,19 +12,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.kernels.bcast_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import tensorflow as tf
-
from tensorflow.python.ops.gen_array_ops import _broadcast_gradient_args
+from tensorflow.python.platform import test
-class BcastOpsTest(tf.test.TestCase):
+class BcastOpsTest(test.TestCase):
def _GetGradientArgs(self, xs, ys):
with self.test_session() as sess:
@@ -90,4 +88,4 @@ class BcastOpsTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/benchmark_test.py b/tensorflow/python/kernel_tests/benchmark_test.py
index ffab743be1..bb72a06966 100644
--- a/tensorflow/python/kernel_tests/benchmark_test.py
+++ b/tensorflow/python/kernel_tests/benchmark_test.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.python.framework.importer."""
from __future__ import absolute_import
from __future__ import division
@@ -22,11 +21,12 @@ import json
import os
import random
-import tensorflow as tf
-
from tensorflow.core.util import test_log_pb2
+from tensorflow.python.client import session
+from tensorflow.python.framework import constant_op
from tensorflow.python.platform import benchmark
-
+from tensorflow.python.platform import gfile
+from tensorflow.python.platform import test
# Used by SomeRandomBenchmark class below.
_ran_somebenchmark_1 = [False]
@@ -34,7 +34,7 @@ _ran_somebenchmark_2 = [False]
_ran_somebenchmark_but_shouldnt = [False]
-class SomeRandomBenchmark(tf.test.Benchmark):
+class SomeRandomBenchmark(test.Benchmark):
"""This Benchmark should automatically be registered in the registry."""
def _dontRunThisBenchmark(self):
@@ -50,7 +50,7 @@ class SomeRandomBenchmark(tf.test.Benchmark):
_ran_somebenchmark_2[0] = True
-class TestReportingBenchmark(tf.test.Benchmark):
+class TestReportingBenchmark(test.Benchmark):
"""This benchmark (maybe) reports some stuff."""
def benchmarkReport1(self):
@@ -58,19 +58,20 @@ class TestReportingBenchmark(tf.test.Benchmark):
def benchmarkReport2(self):
self.report_benchmark(
- iters=2, name="custom_benchmark_name",
- extras={"number_key": 3, "other_key": "string"})
+ iters=2,
+ name="custom_benchmark_name",
+ extras={"number_key": 3,
+ "other_key": "string"})
def benchmark_times_an_op(self):
- with tf.Session() as sess:
- a = tf.constant(0.0)
+ with session.Session() as sess:
+ a = constant_op.constant(0.0)
a_plus_a = a + a
self.run_op_benchmark(
- sess, a_plus_a, min_iters=1000, store_trace=True,
- name="op_benchmark")
+ sess, a_plus_a, min_iters=1000, store_trace=True, name="op_benchmark")
-class BenchmarkTest(tf.test.TestCase):
+class BenchmarkTest(test.TestCase):
def testGlobalBenchmarkRegistry(self):
registry = list(benchmark.GLOBAL_BENCHMARK_REGISTRY)
@@ -116,30 +117,30 @@ class BenchmarkTest(tf.test.TestCase):
self.assertFalse(_ran_somebenchmark_but_shouldnt[0])
def testReportingBenchmark(self):
- tempdir = tf.test.get_temp_dir()
+ tempdir = test.get_temp_dir()
try:
- tf.gfile.MakeDirs(tempdir)
+ gfile.MakeDirs(tempdir)
except OSError as e:
# It's OK if the directory already exists.
if " exists:" not in str(e):
raise e
- prefix = os.path.join(
- tempdir, "reporting_bench_%016x_" % random.getrandbits(64))
- expected_output_file = "%s%s" % (
- prefix, "TestReportingBenchmark.benchmarkReport1")
+ prefix = os.path.join(tempdir,
+ "reporting_bench_%016x_" % random.getrandbits(64))
+ expected_output_file = "%s%s" % (prefix,
+ "TestReportingBenchmark.benchmarkReport1")
expected_output_file_2 = "%s%s" % (
prefix, "TestReportingBenchmark.custom_benchmark_name")
- expected_output_file_3 = "%s%s" % (
- prefix, "TestReportingBenchmark.op_benchmark")
+ expected_output_file_3 = "%s%s" % (prefix,
+ "TestReportingBenchmark.op_benchmark")
try:
- self.assertFalse(tf.gfile.Exists(expected_output_file))
+ self.assertFalse(gfile.Exists(expected_output_file))
# Run benchmark but without env, shouldn't write anything
if benchmark.TEST_REPORTER_TEST_ENV in os.environ:
del os.environ[benchmark.TEST_REPORTER_TEST_ENV]
reporting = TestReportingBenchmark()
reporting.benchmarkReport1() # This should run without writing anything
- self.assertFalse(tf.gfile.Exists(expected_output_file))
+ self.assertFalse(gfile.Exists(expected_output_file))
# Runbenchmark with env, should write
os.environ[benchmark.TEST_REPORTER_TEST_ENV] = prefix
@@ -150,9 +151,9 @@ class BenchmarkTest(tf.test.TestCase):
reporting.benchmark_times_an_op() # This should write
# Check the files were written
- self.assertTrue(tf.gfile.Exists(expected_output_file))
- self.assertTrue(tf.gfile.Exists(expected_output_file_2))
- self.assertTrue(tf.gfile.Exists(expected_output_file_3))
+ self.assertTrue(gfile.Exists(expected_output_file))
+ self.assertTrue(gfile.Exists(expected_output_file_2))
+ self.assertTrue(gfile.Exists(expected_output_file_3))
# Check the contents are correct
expected_1 = test_log_pb2.BenchmarkEntry()
@@ -170,7 +171,7 @@ class BenchmarkTest(tf.test.TestCase):
expected_3.iters = 1000
def read_benchmark_entry(f):
- s = tf.gfile.GFile(f, "rb").read()
+ s = gfile.GFile(f, "rb").read()
entries = test_log_pb2.BenchmarkEntries.FromString(s)
self.assertEquals(1, len(entries.entry))
return entries.entry[0]
@@ -191,8 +192,8 @@ class BenchmarkTest(tf.test.TestCase):
self.assertTrue("traceEvents" in json_trace.keys())
finally:
- tf.gfile.DeleteRecursively(tempdir)
+ gfile.DeleteRecursively(tempdir)
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/betainc_op_test.py b/tensorflow/python/kernel_tests/betainc_op_test.py
index d311f45a19..afdb436dc6 100644
--- a/tensorflow/python/kernel_tests/betainc_op_test.py
+++ b/tensorflow/python/kernel_tests/betainc_op_test.py
@@ -13,6 +13,7 @@
# limitations under the License.
# ==============================================================================
"""Functional tests for 3d convolutional operations."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
@@ -20,10 +21,16 @@ from __future__ import print_function
import itertools
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.platform import test
+from tensorflow.python.platform import tf_logging
-class BetaincTest(tf.test.TestCase):
+class BetaincTest(test.TestCase):
use_gpu = False
def _testBetaInc(self, dtype):
@@ -36,24 +43,23 @@ class BetaincTest(tf.test.TestCase):
b_s = np.abs(np.random.randn(10, 10) * 30).astype(np_dt) # in (0, infty)
x_s = np.random.rand(10, 10).astype(np_dt) # in (0, 1)
with self.test_session(use_gpu=self.use_gpu):
- tf_a_s = tf.constant(a_s, dtype=dtype)
- tf_b_s = tf.constant(b_s, dtype=dtype)
- tf_x_s = tf.constant(x_s, dtype=dtype)
- tf_out = tf.betainc(tf_a_s, tf_b_s, tf_x_s).eval()
+ tf_a_s = constant_op.constant(a_s, dtype=dtype)
+ tf_b_s = constant_op.constant(b_s, dtype=dtype)
+ tf_x_s = constant_op.constant(x_s, dtype=dtype)
+ tf_out = math_ops.betainc(tf_a_s, tf_b_s, tf_x_s).eval()
scipy_out = special.betainc(a_s, b_s, x_s).astype(np_dt)
# the scipy version of betainc uses a double-only implementation.
# TODO(ebrevdo): identify reasons for (sometime) precision loss
# with doubles
- tol = 1e-4 if dtype == tf.float32 else 5e-5
+ tol = 1e-4 if dtype == dtypes.float32 else 5e-5
self.assertAllCloseAccordingToType(scipy_out, tf_out, rtol=tol, atol=tol)
# Test out-of-range values (most should return nan output)
combinations = list(itertools.product([-1, 0, 0.5, 1.0, 1.5], repeat=3))
- a_comb, b_comb, x_comb = np.asarray(
- list(zip(*combinations)), dtype=np_dt)
+ a_comb, b_comb, x_comb = np.asarray(list(zip(*combinations)), dtype=np_dt)
with self.test_session(use_gpu=self.use_gpu):
- tf_comb = tf.betainc(a_comb, b_comb, x_comb).eval()
+ tf_comb = math_ops.betainc(a_comb, b_comb, x_comb).eval()
scipy_comb = special.betainc(a_comb, b_comb, x_comb).astype(np_dt)
self.assertAllCloseAccordingToType(scipy_comb, tf_comb)
@@ -61,43 +67,56 @@ class BetaincTest(tf.test.TestCase):
with self.test_session(use_gpu=self.use_gpu):
self.assertAllCloseAccordingToType(
special.betainc(0.1, b_s, x_s).astype(np_dt),
- tf.betainc(0.1, b_s, x_s).eval(), rtol=tol, atol=tol)
+ math_ops.betainc(0.1, b_s, x_s).eval(),
+ rtol=tol,
+ atol=tol)
self.assertAllCloseAccordingToType(
special.betainc(a_s, 0.1, x_s).astype(np_dt),
- tf.betainc(a_s, 0.1, x_s).eval(), rtol=tol, atol=tol)
+ math_ops.betainc(a_s, 0.1, x_s).eval(),
+ rtol=tol,
+ atol=tol)
self.assertAllCloseAccordingToType(
special.betainc(a_s, b_s, 0.1).astype(np_dt),
- tf.betainc(a_s, b_s, 0.1).eval(), rtol=tol, atol=tol)
+ math_ops.betainc(a_s, b_s, 0.1).eval(),
+ rtol=tol,
+ atol=tol)
self.assertAllCloseAccordingToType(
special.betainc(0.1, b_s, 0.1).astype(np_dt),
- tf.betainc(0.1, b_s, 0.1).eval(), rtol=tol, atol=tol)
+ math_ops.betainc(0.1, b_s, 0.1).eval(),
+ rtol=tol,
+ atol=tol)
self.assertAllCloseAccordingToType(
special.betainc(0.1, 0.1, 0.1).astype(np_dt),
- tf.betainc(0.1, 0.1, 0.1).eval(), rtol=tol, atol=tol)
+ math_ops.betainc(0.1, 0.1, 0.1).eval(),
+ rtol=tol,
+ atol=tol)
with self.assertRaisesRegexp(ValueError, "must be equal"):
- tf.betainc(0.5, [0.5], [[0.5]])
+ math_ops.betainc(0.5, [0.5], [[0.5]])
with self.test_session(use_gpu=self.use_gpu):
with self.assertRaisesOpError("Shapes of .* are inconsistent"):
- a_p = tf.placeholder(dtype)
- b_p = tf.placeholder(dtype)
- x_p = tf.placeholder(dtype)
- tf.betainc(a_p, b_p, x_p).eval(
- feed_dict={a_p: 0.5, b_p: [0.5], x_p: [[0.5]]})
+ a_p = array_ops.placeholder(dtype)
+ b_p = array_ops.placeholder(dtype)
+ x_p = array_ops.placeholder(dtype)
+ math_ops.betainc(a_p, b_p, x_p).eval(
+ feed_dict={a_p: 0.5,
+ b_p: [0.5],
+ x_p: [[0.5]]})
except ImportError as e:
- tf.logging.warn("Cannot test special functions: %s" % str(e))
+ tf_logging.warn("Cannot test special functions: %s" % str(e))
def testBetaIncFloat(self):
- self._testBetaInc(tf.float32)
+ self._testBetaInc(dtypes.float32)
def testBetaIncDouble(self):
- self._testBetaInc(tf.float64)
+ self._testBetaInc(dtypes.float64)
class BetaincTestGPU(BetaincTest):
use_gpu = True
+
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/bias_op_test.py b/tensorflow/python/kernel_tests/bias_op_test.py
index 4c8ff58af0..42ba665725 100644
--- a/tensorflow/python/kernel_tests/bias_op_test.py
+++ b/tensorflow/python/kernel_tests/bias_op_test.py
@@ -12,14 +12,22 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Functional tests for BiasAdd."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import gradients_impl
+from tensorflow.python.ops import nn_ops
+import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
def GetTestConfigs():
@@ -29,31 +37,32 @@ def GetTestConfigs():
all the valid test configs as tuples of data_format and use_gpu.
"""
test_configs = [("NHWC", False), ("NHWC", True)]
- if tf.test.is_gpu_available(cuda_only=True):
+ if test.is_gpu_available(cuda_only=True):
# "NCHW" format is currently only supported on CUDA.
test_configs += [("NCHW", True)]
return test_configs
-class BiasAddTest(tf.test.TestCase):
+class BiasAddTest(test.TestCase):
def _npBias(self, inputs, bias):
assert len(bias.shape) == 1
print(inputs.shape)
print(bias.shape)
assert inputs.shape[-1] == bias.shape[0]
- return inputs + bias.reshape(([1] * (len(inputs.shape) - 1))
- + [bias.shape[0]])
+ return inputs + bias.reshape(([1] * (len(inputs.shape) - 1)) +
+ [bias.shape[0]])
def testNpBias(self):
- self.assertAllClose(np.array([[11, 22, 33], [41, 52, 63]]),
- self._npBias(np.array([[10, 20, 30], [40, 50, 60]]),
- np.array([1, 2, 3])))
+ self.assertAllClose(
+ np.array([[11, 22, 33], [41, 52, 63]]),
+ self._npBias(
+ np.array([[10, 20, 30], [40, 50, 60]]), np.array([1, 2, 3])))
def _testBias(self, np_inputs, np_bias, use_gpu=False):
np_val = self._npBias(np_inputs, np_bias)
with self.test_session(use_gpu=use_gpu):
- tf_val = tf.nn.bias_add(np_inputs, np_bias).eval()
+ tf_val = nn_ops.bias_add(np_inputs, np_bias).eval()
self.assertAllCloseAccordingToType(np_val, tf_val)
def _AtLeast3d(self, np_value):
@@ -81,7 +90,7 @@ class BiasAddTest(tf.test.TestCase):
np_val = self._npBias(np_inputs, np_bias)
np_inputs = self._NHWCToNCHW(np_inputs)
with self.test_session(use_gpu=use_gpu):
- tf_val = tf.nn.bias_add(np_inputs, np_bias, data_format="NCHW").eval()
+ tf_val = nn_ops.bias_add(np_inputs, np_bias, data_format="NCHW").eval()
tf_val = self._NCHWToNHWC(tf_val)
self.assertAllCloseAccordingToType(self._AtLeast3d(np_val), tf_val)
@@ -89,75 +98,90 @@ class BiasAddTest(tf.test.TestCase):
self._testBias(np_inputs, np_bias, use_gpu=False)
if np_inputs.dtype in [np.float16, np.float32, np.float64]:
self._testBias(np_inputs, np_bias, use_gpu=True)
- if tf.test.is_gpu_available(cuda_only=True):
+ if test.is_gpu_available(cuda_only=True):
self._testBiasNCHW(np_inputs, np_bias, use_gpu=True)
def testInputDims(self):
with self.assertRaises(ValueError):
- tf.nn.bias_add([1, 2], [1])
+ nn_ops.bias_add([1, 2], [1])
def testBiasVec(self):
with self.assertRaises(ValueError):
- tf.nn.bias_add(tf.reshape([1, 2], shape=[1, 2]),
- tf.reshape([1, 2], shape=[1, 2]))
+ nn_ops.bias_add(
+ array_ops.reshape(
+ [1, 2], shape=[1, 2]),
+ array_ops.reshape(
+ [1, 2], shape=[1, 2]))
def testBiasInputsMatch(self):
with self.assertRaises(ValueError):
- tf.nn.bias_add(tf.reshape([1, 2], shape=[1, 2]),
- tf.reshape([1], shape=[1]))
+ nn_ops.bias_add(
+ array_ops.reshape(
+ [1, 2], shape=[1, 2]),
+ array_ops.reshape(
+ [1], shape=[1]))
def testIntTypes(self):
for t in [np.int8, np.int16, np.int32, np.int64]:
- self._testAll(np.array([[10, 20, 30], [40, 50, 60]]).astype(t),
- np.array([1, 2, 3]).astype(t))
+ self._testAll(
+ np.array([[10, 20, 30], [40, 50, 60]]).astype(t),
+ np.array([1, 2, 3]).astype(t))
def testFloatTypes(self):
for t in [np.float16, np.float32, np.float64]:
- self._testAll(np.random.rand(4, 3, 3).astype(t),
- np.random.rand(3).astype(t))
+ self._testAll(
+ np.random.rand(4, 3, 3).astype(t), np.random.rand(3).astype(t))
def _testGradient(self, np_input, bias, dtype, data_format, use_gpu):
with self.test_session(use_gpu=use_gpu):
if data_format == "NCHW":
np_input = self._NHWCToNCHW(np_input)
- input_tensor = tf.constant(np_input, shape=np_input.shape, dtype=dtype)
- bias_tensor = tf.constant(bias, shape=bias.shape, dtype=dtype)
- output_tensor = tf.nn.bias_add(input_tensor, bias_tensor,
- data_format=data_format)
- tensor_jacob_t, tensor_jacob_n = tf.test.compute_gradient(
+ input_tensor = constant_op.constant(
+ np_input, shape=np_input.shape, dtype=dtype)
+ bias_tensor = constant_op.constant(bias, shape=bias.shape, dtype=dtype)
+ output_tensor = nn_ops.bias_add(
+ input_tensor, bias_tensor, data_format=data_format)
+ tensor_jacob_t, tensor_jacob_n = gradient_checker.compute_gradient(
input_tensor, np_input.shape, output_tensor, np_input.shape)
- bias_jacob_t, bias_jacob_n = tf.test.compute_gradient(
+ bias_jacob_t, bias_jacob_n = gradient_checker.compute_gradient(
bias_tensor, bias.shape, output_tensor, np_input.shape)
-
+
# Test gradient of BiasAddGrad
- bias_add_grad = tf.gradients(tf.nn.l2_loss(output_tensor),
- bias_tensor)[0]
- grad_jacob_t, grad_jacob_n = tf.test.compute_gradient(
+ bias_add_grad = gradients_impl.gradients(
+ nn_ops.l2_loss(output_tensor), bias_tensor)[0]
+ grad_jacob_t, grad_jacob_n = gradient_checker.compute_gradient(
output_tensor, np_input.shape, bias_add_grad, bias.shape)
-
+
if dtype == np.float16:
# Compare fp16 theoretical gradients to fp32 numerical gradients,
# since fp16 numerical gradients are too imprecise unless great
# care is taken with choosing the inputs and the delta. This is
# a weaker check (in particular, it does not test the op itself,
# only its gradient), but it's much better than nothing.
- input_tensor = tf.constant(np_input, shape=np_input.shape,
- dtype=np.float32)
- bias_tensor = tf.constant(bias, shape=bias.shape, dtype=np.float32)
- output_tensor = tf.nn.bias_add(input_tensor, bias_tensor,
- data_format=data_format)
- _, tensor_jacob_n = tf.test.compute_gradient(
- input_tensor, np_input.shape, output_tensor, np_input.shape)
- _, bias_jacob_n = tf.test.compute_gradient(
- bias_tensor, bias.shape, output_tensor, np_input.shape)
-
- bias_add_grad = tf.gradients(tf.nn.l2_loss(output_tensor),
- bias_tensor)[0]
- _, grad_jacob_n = tf.test.compute_gradient(
- output_tensor, np_input.shape, bias_add_grad, bias.shape)
-
+ input_tensor = constant_op.constant(
+ np_input, shape=np_input.shape, dtype=np.float32)
+ bias_tensor = constant_op.constant(
+ bias, shape=bias.shape, dtype=np.float32)
+ output_tensor = nn_ops.bias_add(
+ input_tensor, bias_tensor, data_format=data_format)
+ _, tensor_jacob_n = gradient_checker.compute_gradient(input_tensor,
+ np_input.shape,
+ output_tensor,
+ np_input.shape)
+ _, bias_jacob_n = gradient_checker.compute_gradient(bias_tensor,
+ bias.shape,
+ output_tensor,
+ np_input.shape)
+
+ bias_add_grad = gradients_impl.gradients(
+ nn_ops.l2_loss(output_tensor), bias_tensor)[0]
+ _, grad_jacob_n = gradient_checker.compute_gradient(output_tensor,
+ np_input.shape,
+ bias_add_grad,
+ bias.shape)
+
threshold = 2e-3
- if dtype == tf.float64:
+ if dtype == dtypes.float64:
threshold = 1e-10
self.assertAllClose(tensor_jacob_t, tensor_jacob_n, threshold, threshold)
self.assertAllClose(bias_jacob_t, bias_jacob_n, threshold, threshold)
@@ -165,17 +189,19 @@ class BiasAddTest(tf.test.TestCase):
def testGradientTensor(self):
for (data_format, use_gpu) in GetTestConfigs():
- for dtype in (tf.float16, tf.float32, tf.float64):
- np_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
- dtype=dtype.as_numpy_dtype).reshape(3, 2)
+ for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
+ np_input = np.array(
+ [1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
+ dtype=dtype.as_numpy_dtype).reshape(3, 2)
bias = np.array([1.3, 2.4], dtype=dtype.as_numpy_dtype)
self._testGradient(np_input, bias, dtype, data_format, use_gpu)
def testGradientTensor4D(self):
for (data_format, use_gpu) in GetTestConfigs():
- for dtype in (tf.float16, tf.float32, tf.float64):
- np_input = np.arange(1.0, 49.0, dtype=dtype.as_numpy_dtype).reshape(
- [2, 3, 4, 2]).astype(np.float32)
+ for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
+ np_input = np.arange(
+ 1.0, 49.0, dtype=dtype.as_numpy_dtype).reshape(
+ [2, 3, 4, 2]).astype(np.float32)
bias = np.array([1.3, 2.4], dtype=dtype.as_numpy_dtype)
self._testGradient(np_input, bias, dtype, data_format, use_gpu)
@@ -187,9 +213,10 @@ class BiasAddTest(tf.test.TestCase):
def testEmptyGradient(self):
for data_format, use_gpu in GetTestConfigs():
for shape in (0, 0), (2, 0), (0, 2), (4, 3, 0), (4, 0, 3), (0, 4, 3):
- self._testGradient(np.random.randn(*shape), np.random.randn(shape[-1]),
- tf.float64, data_format, use_gpu)
+ self._testGradient(
+ np.random.randn(*shape),
+ np.random.randn(shape[-1]), dtypes.float64, data_format, use_gpu)
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/bitcast_op_test.py b/tensorflow/python/kernel_tests/bitcast_op_test.py
index 47fd68a879..329658f9c2 100644
--- a/tensorflow/python/kernel_tests/bitcast_op_test.py
+++ b/tensorflow/python/kernel_tests/bitcast_op_test.py
@@ -13,20 +13,23 @@
# limitations under the License.
# ==============================================================================
"""Tests for tf.bitcast."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import array_ops
+from tensorflow.python.platform import test
-class BitcastTest(tf.test.TestCase):
+class BitcastTest(test.TestCase):
def _testBitcast(self, x, datatype, shape):
with self.test_session():
- tf_ans = tf.bitcast(x, datatype)
+ tf_ans = array_ops.bitcast(x, datatype)
out = tf_ans.eval()
buff_after = memoryview(out).tobytes()
buff_before = memoryview(x).tobytes()
@@ -36,13 +39,13 @@ class BitcastTest(tf.test.TestCase):
def testSmaller(self):
x = np.random.rand(3, 2)
- datatype = tf.int8
+ datatype = dtypes.int8
shape = [3, 2, 8]
self._testBitcast(x, datatype, shape)
def testLarger(self):
x = np.arange(16, dtype=np.int8).reshape([4, 4])
- datatype = tf.int32
+ datatype = dtypes.int32
shape = [4]
self._testBitcast(x, datatype, shape)
@@ -54,25 +57,25 @@ class BitcastTest(tf.test.TestCase):
def testSameSize(self):
x = np.random.rand(3, 4)
shape = [3, 4]
- self._testBitcast(x, tf.int64, shape)
+ self._testBitcast(x, dtypes.int64, shape)
def testErrors(self):
x = np.zeros([1, 1], np.int8)
- datatype = tf.int32
+ datatype = dtypes.int32
with self.assertRaisesRegexp(ValueError, "Cannot bitcast due to shape"):
- tf.bitcast(x, datatype, None)
+ array_ops.bitcast(x, datatype, None)
def testEmpty(self):
x = np.ones([], np.int32)
- datatype = tf.int8
+ datatype = dtypes.int8
shape = [4]
self._testBitcast(x, datatype, shape)
def testUnknown(self):
- x = tf.placeholder(tf.float32)
- datatype = tf.int8
- tf.bitcast(x, datatype, None)
+ x = array_ops.placeholder(dtypes.float32)
+ datatype = dtypes.int8
+ array_ops.bitcast(x, datatype, None)
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/candidate_sampler_ops_test.py b/tensorflow/python/kernel_tests/candidate_sampler_ops_test.py
index e03a276442..88b3f20469 100644
--- a/tensorflow/python/kernel_tests/candidate_sampler_ops_test.py
+++ b/tensorflow/python/kernel_tests/candidate_sampler_ops_test.py
@@ -12,17 +12,23 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for CandidateSamplerOp."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import candidate_sampling_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.platform import test
-class RangeSamplerOpsTest(tf.test.TestCase):
+
+class RangeSamplerOpsTest(test.TestCase):
BATCH_SIZE = 3
NUM_TRUE = 2
@@ -33,9 +39,9 @@ class RangeSamplerOpsTest(tf.test.TestCase):
def testTrueCandidates(self):
with self.test_session() as sess:
- indices = tf.constant([0, 0, 1, 1, 2, 2])
- true_candidates_vec = tf.constant([1, 2, 0, 4, 3, 3])
- true_candidates_matrix = tf.reshape(
+ indices = constant_op.constant([0, 0, 1, 1, 2, 2])
+ true_candidates_vec = constant_op.constant([1, 2, 0, 4, 3, 3])
+ true_candidates_matrix = array_ops.reshape(
true_candidates_vec, [self.BATCH_SIZE, self.NUM_TRUE])
indices_val, true_candidates_val = sess.run(
[indices, true_candidates_matrix])
@@ -45,9 +51,9 @@ class RangeSamplerOpsTest(tf.test.TestCase):
def testSampledCandidates(self):
with self.test_session():
- true_classes = tf.constant([[1, 2], [0, 4], [3, 3]],
- dtype=tf.int64)
- sampled_candidates, _, _ = tf.nn.all_candidate_sampler(
+ true_classes = constant_op.constant(
+ [[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)
+ sampled_candidates, _, _ = candidate_sampling_ops.all_candidate_sampler(
true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True)
result = sampled_candidates.eval()
@@ -57,26 +63,26 @@ class RangeSamplerOpsTest(tf.test.TestCase):
def testTrueLogExpectedCount(self):
with self.test_session():
- true_classes = tf.constant([[1, 2], [0, 4], [3, 3]],
- dtype=tf.int64)
- _, true_expected_count, _ = tf.nn.all_candidate_sampler(
+ true_classes = constant_op.constant(
+ [[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)
+ _, true_expected_count, _ = candidate_sampling_ops.all_candidate_sampler(
true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True)
- true_log_expected_count = tf.log(true_expected_count)
+ true_log_expected_count = math_ops.log(true_expected_count)
result = true_log_expected_count.eval()
self.assertAllEqual(result, [[0.0] * self.NUM_TRUE] * self.BATCH_SIZE)
- self.assertEqual(true_expected_count.get_shape(), [self.BATCH_SIZE,
- self.NUM_TRUE])
- self.assertEqual(true_log_expected_count.get_shape(), [self.BATCH_SIZE,
- self.NUM_TRUE])
+ self.assertEqual(true_expected_count.get_shape(),
+ [self.BATCH_SIZE, self.NUM_TRUE])
+ self.assertEqual(true_log_expected_count.get_shape(),
+ [self.BATCH_SIZE, self.NUM_TRUE])
def testSampledLogExpectedCount(self):
with self.test_session():
- true_classes = tf.constant([[1, 2], [0, 4], [3, 3]],
- dtype=tf.int64)
- _, _, sampled_expected_count = tf.nn.all_candidate_sampler(
+ true_classes = constant_op.constant(
+ [[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)
+ _, _, sampled_expected_count = candidate_sampling_ops.all_candidate_sampler(
true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True)
- sampled_log_expected_count = tf.log(sampled_expected_count)
+ sampled_log_expected_count = math_ops.log(sampled_expected_count)
result = sampled_log_expected_count.eval()
self.assertAllEqual(result, [0.0] * self.NUM_SAMPLED)
@@ -85,11 +91,11 @@ class RangeSamplerOpsTest(tf.test.TestCase):
def testAccidentalHits(self):
with self.test_session() as sess:
- true_classes = tf.constant([[1, 2], [0, 4], [3, 3]],
- dtype=tf.int64)
- sampled_candidates, _, _ = tf.nn.all_candidate_sampler(
+ true_classes = constant_op.constant(
+ [[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)
+ sampled_candidates, _, _ = candidate_sampling_ops.all_candidate_sampler(
true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True)
- accidental_hits = tf.nn.compute_accidental_hits(
+ accidental_hits = candidate_sampling_ops.compute_accidental_hits(
true_classes, sampled_candidates, self.NUM_TRUE)
indices, ids, weights = sess.run(accidental_hits)
@@ -104,16 +110,12 @@ class RangeSamplerOpsTest(tf.test.TestCase):
def draw(seed):
with self.test_session():
- true_classes = tf.constant([[1, 2], [0, 4], [3, 3]],
- dtype=tf.int64)
- sampled, _, _ = tf.nn.log_uniform_candidate_sampler(
- true_classes,
- self.NUM_TRUE,
- self.NUM_SAMPLED,
- True,
- 5,
- seed=seed)
+ true_classes = constant_op.constant(
+ [[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)
+ sampled, _, _ = candidate_sampling_ops.log_uniform_candidate_sampler(
+ true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True, 5, seed=seed)
return sampled.eval()
+
# Non-zero seed. Repeatable.
for seed in [1, 12, 123, 1234]:
self.assertAllEqual(draw(seed), draw(seed))
@@ -128,4 +130,4 @@ class RangeSamplerOpsTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/cast_op_test.py b/tensorflow/python/kernel_tests/cast_op_test.py
index ba5d6fc963..30416a8bc6 100644
--- a/tensorflow/python/kernel_tests/cast_op_test.py
+++ b/tensorflow/python/kernel_tests/cast_op_test.py
@@ -12,41 +12,49 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.ops.tf.cast."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import sparse_tensor
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import variables
+from tensorflow.python.platform import test
-class CastOpTest(tf.test.TestCase):
+class CastOpTest(test.TestCase):
def _toDataType(self, dtype):
"""Returns TensorFlow data type for numpy type."""
if dtype == np.float32:
- return tf.float32
+ return dtypes.float32
elif dtype == np.float64:
- return tf.float64
+ return dtypes.float64
elif dtype == np.int32:
- return tf.int32
+ return dtypes.int32
elif dtype == np.int64:
- return tf.int64
+ return dtypes.int64
elif dtype == np.bool:
- return tf.bool
+ return dtypes.bool
elif dtype == np.complex64:
- return tf.complex64
+ return dtypes.complex64
elif dtype == np.complex128:
- return tf.complex128
+ return dtypes.complex128
else:
return None
def _cast(self, x, dtype, use_gpu=False):
with self.test_session(use_gpu=use_gpu):
- val = tf.constant(x, self._toDataType(np.array([x]).dtype))
- return tf.cast(val, self._toDataType(dtype), name="cast").eval()
+ val = constant_op.constant(x, self._toDataType(np.array([x]).dtype))
+ return math_ops.cast(val, self._toDataType(dtype), name="cast").eval()
def _test(self, x, dtype, use_gpu=False):
"""Tests cast(x) to dtype behaves the same as numpy.astype."""
@@ -57,11 +65,14 @@ class CastOpTest(tf.test.TestCase):
def _testTypes(self, x, use_gpu=False):
"""Tests cast(x) to different tf."""
if use_gpu:
- type_list = [np.float32, np.float64, np.int64,
- np.complex64, np.complex128]
+ type_list = [
+ np.float32, np.float64, np.int64, np.complex64, np.complex128
+ ]
else:
- type_list = [np.float32, np.float64, np.int32,
- np.int64, np.complex64, np.complex128]
+ type_list = [
+ np.float32, np.float64, np.int32, np.int64, np.complex64,
+ np.complex128
+ ]
for from_type in type_list:
for to_type in type_list:
self._test(x.astype(from_type), to_type, use_gpu)
@@ -84,17 +95,20 @@ class CastOpTest(tf.test.TestCase):
def testSmallValues(self):
f4 = np.finfo(np.float32)
f8 = np.finfo(np.float64)
- self._testAll(np.array([0, -1, 1, -f4.resolution, f4.resolution,
- f8.resolution, -f8.resolution]))
+ self._testAll(
+ np.array([
+ 0, -1, 1, -f4.resolution, f4.resolution, f8.resolution,
+ -f8.resolution
+ ]))
def testBfloat16(self):
a = np.random.uniform(-100, 100, 100).astype(np.float32)
with self.test_session(use_gpu=False):
- b = tf.cast(tf.cast(a, tf.bfloat16), tf.float32)
- self.assertAllClose(a, b.eval(), rtol=1/128.)
+ b = math_ops.cast(math_ops.cast(a, dtypes.bfloat16), dtypes.float32)
+ self.assertAllClose(a, b.eval(), rtol=1 / 128.)
with self.test_session(use_gpu=True):
- b = tf.cast(tf.cast(a, tf.bfloat16), tf.float32)
- self.assertAllClose(a, b.eval(), rtol=1/128.)
+ b = math_ops.cast(math_ops.cast(a, dtypes.bfloat16), dtypes.float32)
+ self.assertAllClose(a, b.eval(), rtol=1 / 128.)
def testRandom(self):
self._testAll(np.random.normal(0, 10, 210).reshape([2, 3, 5, 7]))
@@ -104,8 +118,9 @@ class CastOpTest(tf.test.TestCase):
# integer values in somewhat unexpected ways. And they behave
# differently on CPU and GPU.
def _compare(self, x, dst_dtype, expected, use_gpu=False):
- np.testing.assert_equal(self._cast(x, dst_dtype, use_gpu=use_gpu),
- dst_dtype(expected))
+ np.testing.assert_equal(
+ self._cast(
+ x, dst_dtype, use_gpu=use_gpu), dst_dtype(expected))
def testIntToFloatBoundary(self):
i4 = np.iinfo(np.int32)
@@ -148,40 +163,39 @@ class CastOpTest(tf.test.TestCase):
def _OpError(self, x, dtype, err):
with self.test_session():
with self.assertRaisesOpError(err):
- tf.cast(x, dtype).eval()
+ math_ops.cast(x, dtype).eval()
def testNotImplemented(self):
- self._OpError(np.arange(0, 10), tf.string,
- "Cast.*int64.*string.*")
+ self._OpError(np.arange(0, 10), dtypes.string, "Cast.*int64.*string.*")
def testCastToTypeOfVariable(self):
with self.test_session() as sess:
- x = tf.Variable(5, dtype=tf.float32)
- y = tf.Variable(True, dtype=tf.bool)
- cast = tf.cast(y, x.dtype)
- tf.global_variables_initializer().run()
+ x = variables.Variable(5, dtype=dtypes.float32)
+ y = variables.Variable(True, dtype=dtypes.bool)
+ cast = math_ops.cast(y, x.dtype)
+ variables.global_variables_initializer().run()
self.assertEqual(1.0, sess.run(cast))
def testGradients(self):
- t = [tf.float32, tf.float64, tf.complex64, tf.complex128]
+ t = [dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128]
for src_t in t:
for dst_t in t:
with self.test_session():
- x = tf.constant(1.0, src_t)
- z = tf.identity(x)
- y = tf.cast(z, dst_t)
- err = tf.test.compute_gradient_error(x, [], y, [])
+ x = constant_op.constant(1.0, src_t)
+ z = array_ops.identity(x)
+ y = math_ops.cast(z, dst_t)
+ err = gradient_checker.compute_gradient_error(x, [], y, [])
self.assertLess(err, 1e-3)
-class SparseTensorCastTest(tf.test.TestCase):
+class SparseTensorCastTest(test.TestCase):
def testCast(self):
- indices = tf.constant([[0], [1], [2]], tf.int64)
- values = tf.constant(np.array([1, 2, 3], np.int64))
- shape = tf.constant([3], tf.int64)
- st = tf.SparseTensor(indices, values, shape)
- st_cast = tf.cast(st, tf.float32)
+ indices = constant_op.constant([[0], [1], [2]], dtypes.int64)
+ values = constant_op.constant(np.array([1, 2, 3], np.int64))
+ shape = constant_op.constant([3], dtypes.int64)
+ st = sparse_tensor.SparseTensor(indices, values, shape)
+ st_cast = math_ops.cast(st, dtypes.float32)
with self.test_session():
self.assertAllEqual(st_cast.indices.eval(), [[0], [1], [2]])
self.assertAllEqual(st_cast.values.eval(),
@@ -189,18 +203,18 @@ class SparseTensorCastTest(tf.test.TestCase):
self.assertAllEqual(st_cast.dense_shape.eval(), [3])
-class SaturateCastTest(tf.test.TestCase):
+class SaturateCastTest(test.TestCase):
def testSaturate(self):
- in_types = tf.float32,
- out_types = tf.int8, tf.uint8, tf.int16, tf.float32
+ in_types = dtypes.float32,
+ out_types = dtypes.int8, dtypes.uint8, dtypes.int16, dtypes.float32
with self.test_session() as sess:
for in_type in in_types:
for out_type in out_types:
lo, hi = in_type.min, in_type.max
- x = tf.constant([lo, lo + 1, lo // 2, hi // 2, hi - 1, hi],
- dtype=in_type)
- y = tf.saturate_cast(x, dtype=out_type)
+ x = constant_op.constant(
+ [lo, lo + 1, lo // 2, hi // 2, hi - 1, hi], dtype=in_type)
+ y = math_ops.saturate_cast(x, dtype=out_type)
self.assertEqual(y.dtype, out_type)
x, y = sess.run([x, y])
correct = np.maximum(out_type.min, np.minimum(out_type.max, x))
@@ -208,4 +222,4 @@ class SaturateCastTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/check_ops_test.py b/tensorflow/python/kernel_tests/check_ops_test.py
index 0cb6bbdedd..cdfbbbdaf2 100644
--- a/tensorflow/python/kernel_tests/check_ops_test.py
+++ b/tensorflow/python/kernel_tests/check_ops_test.py
@@ -13,341 +13,367 @@
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.check_ops."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.framework import sparse_tensor
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import check_ops
+from tensorflow.python.platform import test
-class AssertProperIterableTest(tf.test.TestCase):
+class AssertProperIterableTest(test.TestCase):
def test_single_tensor_raises(self):
- tensor = tf.constant(1)
+ tensor = constant_op.constant(1)
with self.assertRaisesRegexp(TypeError, "proper"):
- tf.assert_proper_iterable(tensor)
+ check_ops.assert_proper_iterable(tensor)
def test_single_sparse_tensor_raises(self):
- ten = tf.SparseTensor(
+ ten = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
with self.assertRaisesRegexp(TypeError, "proper"):
- tf.assert_proper_iterable(ten)
+ check_ops.assert_proper_iterable(ten)
def test_single_ndarray_raises(self):
array = np.array([1, 2, 3])
with self.assertRaisesRegexp(TypeError, "proper"):
- tf.assert_proper_iterable(array)
+ check_ops.assert_proper_iterable(array)
def test_single_string_raises(self):
mystr = "hello"
with self.assertRaisesRegexp(TypeError, "proper"):
- tf.assert_proper_iterable(mystr)
+ check_ops.assert_proper_iterable(mystr)
def test_non_iterable_object_raises(self):
non_iterable = 1234
with self.assertRaisesRegexp(TypeError, "to be iterable"):
- tf.assert_proper_iterable(non_iterable)
+ check_ops.assert_proper_iterable(non_iterable)
def test_list_does_not_raise(self):
- list_of_stuff = [tf.constant([11, 22]), tf.constant([1, 2])]
- tf.assert_proper_iterable(list_of_stuff)
+ list_of_stuff = [
+ constant_op.constant([11, 22]), constant_op.constant([1, 2])
+ ]
+ check_ops.assert_proper_iterable(list_of_stuff)
def test_generator_does_not_raise(self):
- generator_of_stuff = (tf.constant([11, 22]), tf.constant([1, 2]))
- tf.assert_proper_iterable(generator_of_stuff)
+ generator_of_stuff = (constant_op.constant([11, 22]), constant_op.constant(
+ [1, 2]))
+ check_ops.assert_proper_iterable(generator_of_stuff)
-class AssertEqualTest(tf.test.TestCase):
+class AssertEqualTest(test.TestCase):
def test_doesnt_raise_when_equal(self):
with self.test_session():
- small = tf.constant([1, 2], name="small")
- with tf.control_dependencies([tf.assert_equal(small, small)]):
- out = tf.identity(small)
+ small = constant_op.constant([1, 2], name="small")
+ with ops.control_dependencies([check_ops.assert_equal(small, small)]):
+ out = array_ops.identity(small)
out.eval()
def test_raises_when_greater(self):
with self.test_session():
- small = tf.constant([1, 2], name="small")
- big = tf.constant([3, 4], name="big")
- with tf.control_dependencies(
- [tf.assert_equal(big, small, message="fail")]):
- out = tf.identity(small)
+ small = constant_op.constant([1, 2], name="small")
+ big = constant_op.constant([3, 4], name="big")
+ with ops.control_dependencies(
+ [check_ops.assert_equal(
+ big, small, message="fail")]):
+ out = array_ops.identity(small)
with self.assertRaisesOpError("fail.*big.*small"):
out.eval()
def test_raises_when_less(self):
with self.test_session():
- small = tf.constant([3, 1], name="small")
- big = tf.constant([4, 2], name="big")
- with tf.control_dependencies([tf.assert_equal(small, big)]):
- out = tf.identity(small)
+ small = constant_op.constant([3, 1], name="small")
+ big = constant_op.constant([4, 2], name="big")
+ with ops.control_dependencies([check_ops.assert_equal(small, big)]):
+ out = array_ops.identity(small)
with self.assertRaisesOpError("small.*big"):
out.eval()
def test_doesnt_raise_when_equal_and_broadcastable_shapes(self):
with self.test_session():
- small = tf.constant([1, 2], name="small")
- small_2 = tf.constant([1, 2], name="small_2")
- with tf.control_dependencies([tf.assert_equal(small, small_2)]):
- out = tf.identity(small)
+ small = constant_op.constant([1, 2], name="small")
+ small_2 = constant_op.constant([1, 2], name="small_2")
+ with ops.control_dependencies([check_ops.assert_equal(small, small_2)]):
+ out = array_ops.identity(small)
out.eval()
def test_raises_when_equal_but_non_broadcastable_shapes(self):
with self.test_session():
- small = tf.constant([1, 1, 1], name="small")
- small_2 = tf.constant([1, 1], name="small_2")
+ small = constant_op.constant([1, 1, 1], name="small")
+ small_2 = constant_op.constant([1, 1], name="small_2")
with self.assertRaisesRegexp(ValueError, "must be"):
- with tf.control_dependencies([tf.assert_equal(small, small_2)]):
- out = tf.identity(small)
+ with ops.control_dependencies([check_ops.assert_equal(small, small_2)]):
+ out = array_ops.identity(small)
out.eval()
def test_doesnt_raise_when_both_empty(self):
with self.test_session():
- larry = tf.constant([])
- curly = tf.constant([])
- with tf.control_dependencies([tf.assert_equal(larry, curly)]):
- out = tf.identity(larry)
+ larry = constant_op.constant([])
+ curly = constant_op.constant([])
+ with ops.control_dependencies([check_ops.assert_equal(larry, curly)]):
+ out = array_ops.identity(larry)
out.eval()
-class AssertLessTest(tf.test.TestCase):
+class AssertLessTest(test.TestCase):
def test_raises_when_equal(self):
with self.test_session():
- small = tf.constant([1, 2], name="small")
- with tf.control_dependencies(
- [tf.assert_less(small, small, message="fail")]):
- out = tf.identity(small)
+ small = constant_op.constant([1, 2], name="small")
+ with ops.control_dependencies(
+ [check_ops.assert_less(
+ small, small, message="fail")]):
+ out = array_ops.identity(small)
with self.assertRaisesOpError("fail.*small.*small"):
out.eval()
def test_raises_when_greater(self):
with self.test_session():
- small = tf.constant([1, 2], name="small")
- big = tf.constant([3, 4], name="big")
- with tf.control_dependencies([tf.assert_less(big, small)]):
- out = tf.identity(small)
+ small = constant_op.constant([1, 2], name="small")
+ big = constant_op.constant([3, 4], name="big")
+ with ops.control_dependencies([check_ops.assert_less(big, small)]):
+ out = array_ops.identity(small)
with self.assertRaisesOpError("big.*small"):
out.eval()
def test_doesnt_raise_when_less(self):
with self.test_session():
- small = tf.constant([3, 1], name="small")
- big = tf.constant([4, 2], name="big")
- with tf.control_dependencies([tf.assert_less(small, big)]):
- out = tf.identity(small)
+ small = constant_op.constant([3, 1], name="small")
+ big = constant_op.constant([4, 2], name="big")
+ with ops.control_dependencies([check_ops.assert_less(small, big)]):
+ out = array_ops.identity(small)
out.eval()
def test_doesnt_raise_when_less_and_broadcastable_shapes(self):
with self.test_session():
- small = tf.constant([1], name="small")
- big = tf.constant([3, 2], name="big")
- with tf.control_dependencies([tf.assert_less(small, big)]):
- out = tf.identity(small)
+ small = constant_op.constant([1], name="small")
+ big = constant_op.constant([3, 2], name="big")
+ with ops.control_dependencies([check_ops.assert_less(small, big)]):
+ out = array_ops.identity(small)
out.eval()
def test_raises_when_less_but_non_broadcastable_shapes(self):
with self.test_session():
- small = tf.constant([1, 1, 1], name="small")
- big = tf.constant([3, 2], name="big")
+ small = constant_op.constant([1, 1, 1], name="small")
+ big = constant_op.constant([3, 2], name="big")
with self.assertRaisesRegexp(ValueError, "must be"):
- with tf.control_dependencies([tf.assert_less(small, big)]):
- out = tf.identity(small)
+ with ops.control_dependencies([check_ops.assert_less(small, big)]):
+ out = array_ops.identity(small)
out.eval()
def test_doesnt_raise_when_both_empty(self):
with self.test_session():
- larry = tf.constant([])
- curly = tf.constant([])
- with tf.control_dependencies([tf.assert_less(larry, curly)]):
- out = tf.identity(larry)
+ larry = constant_op.constant([])
+ curly = constant_op.constant([])
+ with ops.control_dependencies([check_ops.assert_less(larry, curly)]):
+ out = array_ops.identity(larry)
out.eval()
-class AssertLessEqualTest(tf.test.TestCase):
+class AssertLessEqualTest(test.TestCase):
def test_doesnt_raise_when_equal(self):
with self.test_session():
- small = tf.constant([1, 2], name="small")
- with tf.control_dependencies([tf.assert_less_equal(small, small)]):
- out = tf.identity(small)
+ small = constant_op.constant([1, 2], name="small")
+ with ops.control_dependencies(
+ [check_ops.assert_less_equal(small, small)]):
+ out = array_ops.identity(small)
out.eval()
def test_raises_when_greater(self):
with self.test_session():
- small = tf.constant([1, 2], name="small")
- big = tf.constant([3, 4], name="big")
- with tf.control_dependencies(
- [tf.assert_less_equal(big, small, message="fail")]):
- out = tf.identity(small)
+ small = constant_op.constant([1, 2], name="small")
+ big = constant_op.constant([3, 4], name="big")
+ with ops.control_dependencies(
+ [check_ops.assert_less_equal(
+ big, small, message="fail")]):
+ out = array_ops.identity(small)
with self.assertRaisesOpError("fail.*big.*small"):
out.eval()
def test_doesnt_raise_when_less_equal(self):
with self.test_session():
- small = tf.constant([1, 2], name="small")
- big = tf.constant([3, 2], name="big")
- with tf.control_dependencies([tf.assert_less_equal(small, big)]):
- out = tf.identity(small)
+ small = constant_op.constant([1, 2], name="small")
+ big = constant_op.constant([3, 2], name="big")
+ with ops.control_dependencies([check_ops.assert_less_equal(small, big)]):
+ out = array_ops.identity(small)
out.eval()
def test_doesnt_raise_when_less_equal_and_broadcastable_shapes(self):
with self.test_session():
- small = tf.constant([1], name="small")
- big = tf.constant([3, 1], name="big")
- with tf.control_dependencies([tf.assert_less_equal(small, big)]):
- out = tf.identity(small)
+ small = constant_op.constant([1], name="small")
+ big = constant_op.constant([3, 1], name="big")
+ with ops.control_dependencies([check_ops.assert_less_equal(small, big)]):
+ out = array_ops.identity(small)
out.eval()
def test_raises_when_less_equal_but_non_broadcastable_shapes(self):
with self.test_session():
- small = tf.constant([1, 1, 1], name="small")
- big = tf.constant([3, 1], name="big")
+ small = constant_op.constant([1, 1, 1], name="small")
+ big = constant_op.constant([3, 1], name="big")
with self.assertRaisesRegexp(ValueError, "must be"):
- with tf.control_dependencies([tf.assert_less_equal(small, big)]):
- out = tf.identity(small)
+ with ops.control_dependencies(
+ [check_ops.assert_less_equal(small, big)]):
+ out = array_ops.identity(small)
out.eval()
def test_doesnt_raise_when_both_empty(self):
with self.test_session():
- larry = tf.constant([])
- curly = tf.constant([])
- with tf.control_dependencies([tf.assert_less_equal(larry, curly)]):
- out = tf.identity(larry)
+ larry = constant_op.constant([])
+ curly = constant_op.constant([])
+ with ops.control_dependencies(
+ [check_ops.assert_less_equal(larry, curly)]):
+ out = array_ops.identity(larry)
out.eval()
-class AssertGreaterTest(tf.test.TestCase):
+class AssertGreaterTest(test.TestCase):
def test_raises_when_equal(self):
with self.test_session():
- small = tf.constant([1, 2], name="small")
- with tf.control_dependencies(
- [tf.assert_greater(small, small, message="fail")]):
- out = tf.identity(small)
+ small = constant_op.constant([1, 2], name="small")
+ with ops.control_dependencies(
+ [check_ops.assert_greater(
+ small, small, message="fail")]):
+ out = array_ops.identity(small)
with self.assertRaisesOpError("fail.*small.*small"):
out.eval()
def test_raises_when_less(self):
with self.test_session():
- small = tf.constant([1, 2], name="small")
- big = tf.constant([3, 4], name="big")
- with tf.control_dependencies([tf.assert_greater(small, big)]):
- out = tf.identity(big)
+ small = constant_op.constant([1, 2], name="small")
+ big = constant_op.constant([3, 4], name="big")
+ with ops.control_dependencies([check_ops.assert_greater(small, big)]):
+ out = array_ops.identity(big)
with self.assertRaisesOpError("small.*big"):
out.eval()
def test_doesnt_raise_when_greater(self):
with self.test_session():
- small = tf.constant([3, 1], name="small")
- big = tf.constant([4, 2], name="big")
- with tf.control_dependencies([tf.assert_greater(big, small)]):
- out = tf.identity(small)
+ small = constant_op.constant([3, 1], name="small")
+ big = constant_op.constant([4, 2], name="big")
+ with ops.control_dependencies([check_ops.assert_greater(big, small)]):
+ out = array_ops.identity(small)
out.eval()
def test_doesnt_raise_when_greater_and_broadcastable_shapes(self):
with self.test_session():
- small = tf.constant([1], name="small")
- big = tf.constant([3, 2], name="big")
- with tf.control_dependencies([tf.assert_greater(big, small)]):
- out = tf.identity(small)
+ small = constant_op.constant([1], name="small")
+ big = constant_op.constant([3, 2], name="big")
+ with ops.control_dependencies([check_ops.assert_greater(big, small)]):
+ out = array_ops.identity(small)
out.eval()
def test_raises_when_greater_but_non_broadcastable_shapes(self):
with self.test_session():
- small = tf.constant([1, 1, 1], name="small")
- big = tf.constant([3, 2], name="big")
+ small = constant_op.constant([1, 1, 1], name="small")
+ big = constant_op.constant([3, 2], name="big")
with self.assertRaisesRegexp(ValueError, "must be"):
- with tf.control_dependencies([tf.assert_greater(big, small)]):
- out = tf.identity(small)
+ with ops.control_dependencies([check_ops.assert_greater(big, small)]):
+ out = array_ops.identity(small)
out.eval()
def test_doesnt_raise_when_both_empty(self):
with self.test_session():
- larry = tf.constant([])
- curly = tf.constant([])
- with tf.control_dependencies([tf.assert_greater(larry, curly)]):
- out = tf.identity(larry)
+ larry = constant_op.constant([])
+ curly = constant_op.constant([])
+ with ops.control_dependencies([check_ops.assert_greater(larry, curly)]):
+ out = array_ops.identity(larry)
out.eval()
-class AssertGreaterEqualTest(tf.test.TestCase):
+class AssertGreaterEqualTest(test.TestCase):
def test_doesnt_raise_when_equal(self):
with self.test_session():
- small = tf.constant([1, 2], name="small")
- with tf.control_dependencies([tf.assert_greater_equal(small, small)]):
- out = tf.identity(small)
+ small = constant_op.constant([1, 2], name="small")
+ with ops.control_dependencies(
+ [check_ops.assert_greater_equal(small, small)]):
+ out = array_ops.identity(small)
out.eval()
def test_raises_when_less(self):
with self.test_session():
- small = tf.constant([1, 2], name="small")
- big = tf.constant([3, 4], name="big")
- with tf.control_dependencies(
- [tf.assert_greater_equal(small, big, message="fail")]):
- out = tf.identity(small)
+ small = constant_op.constant([1, 2], name="small")
+ big = constant_op.constant([3, 4], name="big")
+ with ops.control_dependencies(
+ [check_ops.assert_greater_equal(
+ small, big, message="fail")]):
+ out = array_ops.identity(small)
with self.assertRaisesOpError("fail.*small.*big"):
out.eval()
def test_doesnt_raise_when_greater_equal(self):
with self.test_session():
- small = tf.constant([1, 2], name="small")
- big = tf.constant([3, 2], name="big")
- with tf.control_dependencies([tf.assert_greater_equal(big, small)]):
- out = tf.identity(small)
+ small = constant_op.constant([1, 2], name="small")
+ big = constant_op.constant([3, 2], name="big")
+ with ops.control_dependencies(
+ [check_ops.assert_greater_equal(big, small)]):
+ out = array_ops.identity(small)
out.eval()
def test_doesnt_raise_when_greater_equal_and_broadcastable_shapes(self):
with self.test_session():
- small = tf.constant([1], name="small")
- big = tf.constant([3, 1], name="big")
- with tf.control_dependencies([tf.assert_greater_equal(big, small)]):
- out = tf.identity(small)
+ small = constant_op.constant([1], name="small")
+ big = constant_op.constant([3, 1], name="big")
+ with ops.control_dependencies(
+ [check_ops.assert_greater_equal(big, small)]):
+ out = array_ops.identity(small)
out.eval()
def test_raises_when_less_equal_but_non_broadcastable_shapes(self):
with self.test_session():
- small = tf.constant([1, 1, 1], name="big")
- big = tf.constant([3, 1], name="small")
+ small = constant_op.constant([1, 1, 1], name="big")
+ big = constant_op.constant([3, 1], name="small")
with self.assertRaisesRegexp(ValueError, "Dimensions must be equal"):
- with tf.control_dependencies([tf.assert_greater_equal(big, small)]):
- out = tf.identity(small)
+ with ops.control_dependencies(
+ [check_ops.assert_greater_equal(big, small)]):
+ out = array_ops.identity(small)
out.eval()
def test_doesnt_raise_when_both_empty(self):
with self.test_session():
- larry = tf.constant([])
- curly = tf.constant([])
- with tf.control_dependencies([tf.assert_greater_equal(larry, curly)]):
- out = tf.identity(larry)
+ larry = constant_op.constant([])
+ curly = constant_op.constant([])
+ with ops.control_dependencies(
+ [check_ops.assert_greater_equal(larry, curly)]):
+ out = array_ops.identity(larry)
out.eval()
-class AssertNegativeTest(tf.test.TestCase):
+class AssertNegativeTest(test.TestCase):
def test_doesnt_raise_when_negative(self):
with self.test_session():
- frank = tf.constant([-1, -2], name="frank")
- with tf.control_dependencies([tf.assert_negative(frank)]):
- out = tf.identity(frank)
+ frank = constant_op.constant([-1, -2], name="frank")
+ with ops.control_dependencies([check_ops.assert_negative(frank)]):
+ out = array_ops.identity(frank)
out.eval()
def test_raises_when_positive(self):
with self.test_session():
- doug = tf.constant([1, 2], name="doug")
- with tf.control_dependencies([tf.assert_negative(doug, message="fail")]):
- out = tf.identity(doug)
+ doug = constant_op.constant([1, 2], name="doug")
+ with ops.control_dependencies(
+ [check_ops.assert_negative(
+ doug, message="fail")]):
+ out = array_ops.identity(doug)
with self.assertRaisesOpError("fail.*doug"):
out.eval()
def test_raises_when_zero(self):
with self.test_session():
- claire = tf.constant([0], name="claire")
- with tf.control_dependencies([tf.assert_negative(claire)]):
- out = tf.identity(claire)
+ claire = constant_op.constant([0], name="claire")
+ with ops.control_dependencies([check_ops.assert_negative(claire)]):
+ out = array_ops.identity(claire)
with self.assertRaisesOpError("claire"):
out.eval()
@@ -357,35 +383,36 @@ class AssertNegativeTest(tf.test.TestCase):
# and an empty tensor has no elements, so this is trivially satisfied.
# This is standard set theory.
with self.test_session():
- empty = tf.constant([], name="empty")
- with tf.control_dependencies([tf.assert_negative(empty)]):
- out = tf.identity(empty)
+ empty = constant_op.constant([], name="empty")
+ with ops.control_dependencies([check_ops.assert_negative(empty)]):
+ out = array_ops.identity(empty)
out.eval()
-class AssertPositiveTest(tf.test.TestCase):
+class AssertPositiveTest(test.TestCase):
def test_raises_when_negative(self):
with self.test_session():
- freddie = tf.constant([-1, -2], name="freddie")
- with tf.control_dependencies(
- [tf.assert_positive(freddie, message="fail")]):
- out = tf.identity(freddie)
+ freddie = constant_op.constant([-1, -2], name="freddie")
+ with ops.control_dependencies(
+ [check_ops.assert_positive(
+ freddie, message="fail")]):
+ out = array_ops.identity(freddie)
with self.assertRaisesOpError("fail.*freddie"):
out.eval()
def test_doesnt_raise_when_positive(self):
with self.test_session():
- remmy = tf.constant([1, 2], name="remmy")
- with tf.control_dependencies([tf.assert_positive(remmy)]):
- out = tf.identity(remmy)
+ remmy = constant_op.constant([1, 2], name="remmy")
+ with ops.control_dependencies([check_ops.assert_positive(remmy)]):
+ out = array_ops.identity(remmy)
out.eval()
def test_raises_when_zero(self):
with self.test_session():
- meechum = tf.constant([0], name="meechum")
- with tf.control_dependencies([tf.assert_positive(meechum)]):
- out = tf.identity(meechum)
+ meechum = constant_op.constant([0], name="meechum")
+ with ops.control_dependencies([check_ops.assert_positive(meechum)]):
+ out = array_ops.identity(meechum)
with self.assertRaisesOpError("meechum"):
out.eval()
@@ -395,226 +422,240 @@ class AssertPositiveTest(tf.test.TestCase):
# and an empty tensor has no elements, so this is trivially satisfied.
# This is standard set theory.
with self.test_session():
- empty = tf.constant([], name="empty")
- with tf.control_dependencies([tf.assert_positive(empty)]):
- out = tf.identity(empty)
+ empty = constant_op.constant([], name="empty")
+ with ops.control_dependencies([check_ops.assert_positive(empty)]):
+ out = array_ops.identity(empty)
out.eval()
-class AssertRankTest(tf.test.TestCase):
+class AssertRankTest(test.TestCase):
def test_rank_zero_tensor_raises_if_rank_too_small_static_rank(self):
with self.test_session():
- tensor = tf.constant(1, name="my_tensor")
+ tensor = constant_op.constant(1, name="my_tensor")
desired_rank = 1
- with self.assertRaisesRegexp(
- ValueError, "fail.*my_tensor.*must have rank 1"):
- with tf.control_dependencies(
- [tf.assert_rank(tensor, desired_rank, message="fail")]):
- tf.identity(tensor).eval()
+ with self.assertRaisesRegexp(ValueError,
+ "fail.*my_tensor.*must have rank 1"):
+ with ops.control_dependencies(
+ [check_ops.assert_rank(
+ tensor, desired_rank, message="fail")]):
+ array_ops.identity(tensor).eval()
def test_rank_zero_tensor_raises_if_rank_too_small_dynamic_rank(self):
with self.test_session():
- tensor = tf.placeholder(tf.float32, name="my_tensor")
+ tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 1
- with tf.control_dependencies(
- [tf.assert_rank(tensor, desired_rank, message="fail")]):
+ with ops.control_dependencies(
+ [check_ops.assert_rank(
+ tensor, desired_rank, message="fail")]):
with self.assertRaisesOpError("fail.*my_tensor.*rank"):
- tf.identity(tensor).eval(feed_dict={tensor: 0})
+ array_ops.identity(tensor).eval(feed_dict={tensor: 0})
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
with self.test_session():
- tensor = tf.constant(1, name="my_tensor")
+ tensor = constant_op.constant(1, name="my_tensor")
desired_rank = 0
- with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
- tf.identity(tensor).eval()
+ with ops.control_dependencies(
+ [check_ops.assert_rank(tensor, desired_rank)]):
+ array_ops.identity(tensor).eval()
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
with self.test_session():
- tensor = tf.placeholder(tf.float32, name="my_tensor")
+ tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 0
- with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
- tf.identity(tensor).eval(feed_dict={tensor: 0})
+ with ops.control_dependencies(
+ [check_ops.assert_rank(tensor, desired_rank)]):
+ array_ops.identity(tensor).eval(feed_dict={tensor: 0})
def test_rank_one_tensor_raises_if_rank_too_large_static_rank(self):
with self.test_session():
- tensor = tf.constant([1, 2], name="my_tensor")
+ tensor = constant_op.constant([1, 2], name="my_tensor")
desired_rank = 0
with self.assertRaisesRegexp(ValueError, "my_tensor.*rank"):
- with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
- tf.identity(tensor).eval()
+ with ops.control_dependencies(
+ [check_ops.assert_rank(tensor, desired_rank)]):
+ array_ops.identity(tensor).eval()
def test_rank_one_tensor_raises_if_rank_too_large_dynamic_rank(self):
with self.test_session():
- tensor = tf.placeholder(tf.float32, name="my_tensor")
+ tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 0
- with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
+ with ops.control_dependencies(
+ [check_ops.assert_rank(tensor, desired_rank)]):
with self.assertRaisesOpError("my_tensor.*rank"):
- tf.identity(tensor).eval(feed_dict={tensor: [1, 2]})
+ array_ops.identity(tensor).eval(feed_dict={tensor: [1, 2]})
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
with self.test_session():
- tensor = tf.constant([1, 2], name="my_tensor")
+ tensor = constant_op.constant([1, 2], name="my_tensor")
desired_rank = 1
- with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
- tf.identity(tensor).eval()
+ with ops.control_dependencies(
+ [check_ops.assert_rank(tensor, desired_rank)]):
+ array_ops.identity(tensor).eval()
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
with self.test_session():
- tensor = tf.placeholder(tf.float32, name="my_tensor")
+ tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 1
- with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
- tf.identity(tensor).eval(feed_dict={tensor: [1, 2]})
+ with ops.control_dependencies(
+ [check_ops.assert_rank(tensor, desired_rank)]):
+ array_ops.identity(tensor).eval(feed_dict={tensor: [1, 2]})
def test_rank_one_tensor_raises_if_rank_too_small_static_rank(self):
with self.test_session():
- tensor = tf.constant([1, 2], name="my_tensor")
+ tensor = constant_op.constant([1, 2], name="my_tensor")
desired_rank = 2
with self.assertRaisesRegexp(ValueError, "my_tensor.*rank"):
- with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
- tf.identity(tensor).eval()
+ with ops.control_dependencies(
+ [check_ops.assert_rank(tensor, desired_rank)]):
+ array_ops.identity(tensor).eval()
def test_rank_one_tensor_raises_if_rank_too_small_dynamic_rank(self):
with self.test_session():
- tensor = tf.placeholder(tf.float32, name="my_tensor")
+ tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 2
- with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
+ with ops.control_dependencies(
+ [check_ops.assert_rank(tensor, desired_rank)]):
with self.assertRaisesOpError("my_tensor.*rank"):
- tf.identity(tensor).eval(feed_dict={tensor: [1, 2]})
+ array_ops.identity(tensor).eval(feed_dict={tensor: [1, 2]})
def test_raises_if_rank_is_not_scalar_static(self):
with self.test_session():
- tensor = tf.constant([1, 2], name="my_tensor")
+ tensor = constant_op.constant([1, 2], name="my_tensor")
with self.assertRaisesRegexp(ValueError, "Rank must be a scalar"):
- tf.assert_rank(tensor, np.array([], dtype=np.int32))
+ check_ops.assert_rank(tensor, np.array([], dtype=np.int32))
def test_raises_if_rank_is_not_scalar_dynamic(self):
with self.test_session():
- tensor = tf.constant([1, 2], dtype=tf.float32, name="my_tensor")
- rank_tensor = tf.placeholder(tf.int32, name="rank_tensor")
+ tensor = constant_op.constant(
+ [1, 2], dtype=dtypes.float32, name="my_tensor")
+ rank_tensor = array_ops.placeholder(dtypes.int32, name="rank_tensor")
with self.assertRaisesOpError("Rank must be a scalar"):
- with tf.control_dependencies([tf.assert_rank(tensor, rank_tensor)]):
- tf.identity(tensor).eval(feed_dict={rank_tensor: [1, 2]})
+ with ops.control_dependencies(
+ [check_ops.assert_rank(tensor, rank_tensor)]):
+ array_ops.identity(tensor).eval(feed_dict={rank_tensor: [1, 2]})
def test_raises_if_rank_is_not_integer_static(self):
with self.test_session():
- tensor = tf.constant([1, 2], name="my_tensor")
+ tensor = constant_op.constant([1, 2], name="my_tensor")
with self.assertRaisesRegexp(TypeError,
"must be of type <dtype: 'int32'>"):
- tf.assert_rank(tensor, .5)
+ check_ops.assert_rank(tensor, .5)
def test_raises_if_rank_is_not_integer_dynamic(self):
with self.test_session():
- tensor = tf.constant([1, 2], dtype=tf.float32, name="my_tensor")
- rank_tensor = tf.placeholder(tf.float32, name="rank_tensor")
+ tensor = constant_op.constant(
+ [1, 2], dtype=dtypes.float32, name="my_tensor")
+ rank_tensor = array_ops.placeholder(dtypes.float32, name="rank_tensor")
with self.assertRaisesRegexp(TypeError,
"must be of type <dtype: 'int32'>"):
- with tf.control_dependencies([tf.assert_rank(tensor, rank_tensor)]):
- tf.identity(tensor).eval(feed_dict={rank_tensor: .5})
+ with ops.control_dependencies(
+ [check_ops.assert_rank(tensor, rank_tensor)]):
+ array_ops.identity(tensor).eval(feed_dict={rank_tensor: .5})
-class AssertRankAtLeastTest(tf.test.TestCase):
+class AssertRankAtLeastTest(test.TestCase):
def test_rank_zero_tensor_raises_if_rank_too_small_static_rank(self):
with self.test_session():
- tensor = tf.constant(1, name="my_tensor")
+ tensor = constant_op.constant(1, name="my_tensor")
desired_rank = 1
with self.assertRaisesRegexp(ValueError, "my_tensor.*rank at least 1"):
- with tf.control_dependencies([tf.assert_rank_at_least(tensor,
- desired_rank)]):
- tf.identity(tensor).eval()
+ with ops.control_dependencies(
+ [check_ops.assert_rank_at_least(tensor, desired_rank)]):
+ array_ops.identity(tensor).eval()
def test_rank_zero_tensor_raises_if_rank_too_small_dynamic_rank(self):
with self.test_session():
- tensor = tf.placeholder(tf.float32, name="my_tensor")
+ tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 1
- with tf.control_dependencies([tf.assert_rank_at_least(tensor,
- desired_rank)]):
+ with ops.control_dependencies(
+ [check_ops.assert_rank_at_least(tensor, desired_rank)]):
with self.assertRaisesOpError("my_tensor.*rank"):
- tf.identity(tensor).eval(feed_dict={tensor: 0})
+ array_ops.identity(tensor).eval(feed_dict={tensor: 0})
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
with self.test_session():
- tensor = tf.constant(1, name="my_tensor")
+ tensor = constant_op.constant(1, name="my_tensor")
desired_rank = 0
- with tf.control_dependencies([tf.assert_rank_at_least(tensor,
- desired_rank)]):
- tf.identity(tensor).eval()
+ with ops.control_dependencies(
+ [check_ops.assert_rank_at_least(tensor, desired_rank)]):
+ array_ops.identity(tensor).eval()
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
with self.test_session():
- tensor = tf.placeholder(tf.float32, name="my_tensor")
+ tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 0
- with tf.control_dependencies([tf.assert_rank_at_least(tensor,
- desired_rank)]):
- tf.identity(tensor).eval(feed_dict={tensor: 0})
+ with ops.control_dependencies(
+ [check_ops.assert_rank_at_least(tensor, desired_rank)]):
+ array_ops.identity(tensor).eval(feed_dict={tensor: 0})
def test_rank_one_ten_doesnt_raise_raise_if_rank_too_large_static_rank(self):
with self.test_session():
- tensor = tf.constant([1, 2], name="my_tensor")
+ tensor = constant_op.constant([1, 2], name="my_tensor")
desired_rank = 0
- with tf.control_dependencies([tf.assert_rank_at_least(tensor,
- desired_rank)]):
- tf.identity(tensor).eval()
+ with ops.control_dependencies(
+ [check_ops.assert_rank_at_least(tensor, desired_rank)]):
+ array_ops.identity(tensor).eval()
def test_rank_one_ten_doesnt_raise_if_rank_too_large_dynamic_rank(self):
with self.test_session():
- tensor = tf.placeholder(tf.float32, name="my_tensor")
+ tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 0
- with tf.control_dependencies([tf.assert_rank_at_least(tensor,
- desired_rank)]):
- tf.identity(tensor).eval(feed_dict={tensor: [1, 2]})
+ with ops.control_dependencies(
+ [check_ops.assert_rank_at_least(tensor, desired_rank)]):
+ array_ops.identity(tensor).eval(feed_dict={tensor: [1, 2]})
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
with self.test_session():
- tensor = tf.constant([1, 2], name="my_tensor")
+ tensor = constant_op.constant([1, 2], name="my_tensor")
desired_rank = 1
- with tf.control_dependencies([tf.assert_rank_at_least(tensor,
- desired_rank)]):
- tf.identity(tensor).eval()
+ with ops.control_dependencies(
+ [check_ops.assert_rank_at_least(tensor, desired_rank)]):
+ array_ops.identity(tensor).eval()
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
with self.test_session():
- tensor = tf.placeholder(tf.float32, name="my_tensor")
+ tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 1
- with tf.control_dependencies([tf.assert_rank_at_least(tensor,
- desired_rank)]):
- tf.identity(tensor).eval(feed_dict={tensor: [1, 2]})
+ with ops.control_dependencies(
+ [check_ops.assert_rank_at_least(tensor, desired_rank)]):
+ array_ops.identity(tensor).eval(feed_dict={tensor: [1, 2]})
def test_rank_one_tensor_raises_if_rank_too_small_static_rank(self):
with self.test_session():
- tensor = tf.constant([1, 2], name="my_tensor")
+ tensor = constant_op.constant([1, 2], name="my_tensor")
desired_rank = 2
with self.assertRaisesRegexp(ValueError, "my_tensor.*rank"):
- with tf.control_dependencies([tf.assert_rank_at_least(tensor,
- desired_rank)]):
- tf.identity(tensor).eval()
+ with ops.control_dependencies(
+ [check_ops.assert_rank_at_least(tensor, desired_rank)]):
+ array_ops.identity(tensor).eval()
def test_rank_one_tensor_raises_if_rank_too_small_dynamic_rank(self):
with self.test_session():
- tensor = tf.placeholder(tf.float32, name="my_tensor")
+ tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 2
- with tf.control_dependencies([tf.assert_rank_at_least(tensor,
- desired_rank)]):
+ with ops.control_dependencies(
+ [check_ops.assert_rank_at_least(tensor, desired_rank)]):
with self.assertRaisesOpError("my_tensor.*rank"):
- tf.identity(tensor).eval(feed_dict={tensor: [1, 2]})
+ array_ops.identity(tensor).eval(feed_dict={tensor: [1, 2]})
-class AssertNonNegativeTest(tf.test.TestCase):
+class AssertNonNegativeTest(test.TestCase):
def test_raises_when_negative(self):
with self.test_session():
- zoe = tf.constant([-1, -2], name="zoe")
- with tf.control_dependencies([tf.assert_non_negative(zoe)]):
- out = tf.identity(zoe)
+ zoe = constant_op.constant([-1, -2], name="zoe")
+ with ops.control_dependencies([check_ops.assert_non_negative(zoe)]):
+ out = array_ops.identity(zoe)
with self.assertRaisesOpError("zoe"):
out.eval()
def test_doesnt_raise_when_zero_and_positive(self):
with self.test_session():
- lucas = tf.constant([0, 2], name="lucas")
- with tf.control_dependencies([tf.assert_non_negative(lucas)]):
- out = tf.identity(lucas)
+ lucas = constant_op.constant([0, 2], name="lucas")
+ with ops.control_dependencies([check_ops.assert_non_negative(lucas)]):
+ out = array_ops.identity(lucas)
out.eval()
def test_empty_tensor_doesnt_raise(self):
@@ -623,26 +664,26 @@ class AssertNonNegativeTest(tf.test.TestCase):
# and an empty tensor has no elements, so this is trivially satisfied.
# This is standard set theory.
with self.test_session():
- empty = tf.constant([], name="empty")
- with tf.control_dependencies([tf.assert_non_negative(empty)]):
- out = tf.identity(empty)
+ empty = constant_op.constant([], name="empty")
+ with ops.control_dependencies([check_ops.assert_non_negative(empty)]):
+ out = array_ops.identity(empty)
out.eval()
-class AssertNonPositiveTest(tf.test.TestCase):
+class AssertNonPositiveTest(test.TestCase):
def test_doesnt_raise_when_zero_and_negative(self):
with self.test_session():
- tom = tf.constant([0, -2], name="tom")
- with tf.control_dependencies([tf.assert_non_positive(tom)]):
- out = tf.identity(tom)
+ tom = constant_op.constant([0, -2], name="tom")
+ with ops.control_dependencies([check_ops.assert_non_positive(tom)]):
+ out = array_ops.identity(tom)
out.eval()
def test_raises_when_positive(self):
with self.test_session():
- rachel = tf.constant([0, 2], name="rachel")
- with tf.control_dependencies([tf.assert_non_positive(rachel)]):
- out = tf.identity(rachel)
+ rachel = constant_op.constant([0, 2], name="rachel")
+ with ops.control_dependencies([check_ops.assert_non_positive(rachel)]):
+ out = array_ops.identity(rachel)
with self.assertRaisesOpError("rachel"):
out.eval()
@@ -652,89 +693,91 @@ class AssertNonPositiveTest(tf.test.TestCase):
# and an empty tensor has no elements, so this is trivially satisfied.
# This is standard set theory.
with self.test_session():
- empty = tf.constant([], name="empty")
- with tf.control_dependencies([tf.assert_non_positive(empty)]):
- out = tf.identity(empty)
+ empty = constant_op.constant([], name="empty")
+ with ops.control_dependencies([check_ops.assert_non_positive(empty)]):
+ out = array_ops.identity(empty)
out.eval()
-class AssertIntegerTest(tf.test.TestCase):
+class AssertIntegerTest(test.TestCase):
def test_doesnt_raise_when_integer(self):
with self.test_session():
- integers = tf.constant([1, 2], name="integers")
- with tf.control_dependencies([tf.assert_integer(integers)]):
- out = tf.identity(integers)
+ integers = constant_op.constant([1, 2], name="integers")
+ with ops.control_dependencies([check_ops.assert_integer(integers)]):
+ out = array_ops.identity(integers)
out.eval()
def test_raises_when_float(self):
with self.test_session():
- floats = tf.constant([1.0, 2.0], name="floats")
+ floats = constant_op.constant([1.0, 2.0], name="floats")
with self.assertRaisesRegexp(TypeError, "Expected.*integer"):
- tf.assert_integer(floats)
+ check_ops.assert_integer(floats)
-class IsStrictlyIncreasingTest(tf.test.TestCase):
+class IsStrictlyIncreasingTest(test.TestCase):
def test_constant_tensor_is_not_strictly_increasing(self):
with self.test_session():
- self.assertFalse(tf.is_strictly_increasing([1, 1, 1]).eval())
+ self.assertFalse(check_ops.is_strictly_increasing([1, 1, 1]).eval())
def test_decreasing_tensor_is_not_strictly_increasing(self):
with self.test_session():
- self.assertFalse(tf.is_strictly_increasing([1, 0, -1]).eval())
+ self.assertFalse(check_ops.is_strictly_increasing([1, 0, -1]).eval())
def test_2d_decreasing_tensor_is_not_strictly_increasing(self):
with self.test_session():
- self.assertFalse(tf.is_strictly_increasing([[1, 3], [2, 4]]).eval())
+ self.assertFalse(
+ check_ops.is_strictly_increasing([[1, 3], [2, 4]]).eval())
def test_increasing_tensor_is_increasing(self):
with self.test_session():
- self.assertTrue(tf.is_strictly_increasing([1, 2, 3]).eval())
+ self.assertTrue(check_ops.is_strictly_increasing([1, 2, 3]).eval())
def test_increasing_rank_two_tensor(self):
with self.test_session():
- self.assertTrue(tf.is_strictly_increasing([[-1, 2], [3, 4]]).eval())
+ self.assertTrue(
+ check_ops.is_strictly_increasing([[-1, 2], [3, 4]]).eval())
def test_tensor_with_one_element_is_strictly_increasing(self):
with self.test_session():
- self.assertTrue(tf.is_strictly_increasing([1]).eval())
+ self.assertTrue(check_ops.is_strictly_increasing([1]).eval())
def test_empty_tensor_is_strictly_increasing(self):
with self.test_session():
- self.assertTrue(tf.is_strictly_increasing([]).eval())
+ self.assertTrue(check_ops.is_strictly_increasing([]).eval())
-class IsNonDecreasingTest(tf.test.TestCase):
+class IsNonDecreasingTest(test.TestCase):
def test_constant_tensor_is_non_decreasing(self):
with self.test_session():
- self.assertTrue(tf.is_non_decreasing([1, 1, 1]).eval())
+ self.assertTrue(check_ops.is_non_decreasing([1, 1, 1]).eval())
def test_decreasing_tensor_is_not_non_decreasing(self):
with self.test_session():
- self.assertFalse(tf.is_non_decreasing([3, 2, 1]).eval())
+ self.assertFalse(check_ops.is_non_decreasing([3, 2, 1]).eval())
def test_2d_decreasing_tensor_is_not_non_decreasing(self):
with self.test_session():
- self.assertFalse(tf.is_non_decreasing([[1, 3], [2, 4]]).eval())
+ self.assertFalse(check_ops.is_non_decreasing([[1, 3], [2, 4]]).eval())
def test_increasing_rank_one_tensor_is_non_decreasing(self):
with self.test_session():
- self.assertTrue(tf.is_non_decreasing([1, 2, 3]).eval())
+ self.assertTrue(check_ops.is_non_decreasing([1, 2, 3]).eval())
def test_increasing_rank_two_tensor(self):
with self.test_session():
- self.assertTrue(tf.is_non_decreasing([[-1, 2], [3, 3]]).eval())
+ self.assertTrue(check_ops.is_non_decreasing([[-1, 2], [3, 3]]).eval())
def test_tensor_with_one_element_is_non_decreasing(self):
with self.test_session():
- self.assertTrue(tf.is_non_decreasing([1]).eval())
+ self.assertTrue(check_ops.is_non_decreasing([1]).eval())
def test_empty_tensor_is_non_decreasing(self):
with self.test_session():
- self.assertTrue(tf.is_non_decreasing([]).eval())
+ self.assertTrue(check_ops.is_non_decreasing([]).eval())
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/cholesky_op_test.py b/tensorflow/python/kernel_tests/cholesky_op_test.py
index f989e41383..6da5627863 100644
--- a/tensorflow/python/kernel_tests/cholesky_op_test.py
+++ b/tensorflow/python/kernel_tests/cholesky_op_test.py
@@ -13,16 +13,25 @@
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.Cholesky."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes as dtypes_lib
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import linalg_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.platform import test
+from tensorflow.python.platform import tf_logging
-class CholeskyOpTest(tf.test.TestCase):
+class CholeskyOpTest(test.TestCase):
def _verifyCholeskyBase(self, sess, x, chol, verification):
chol_np, verification_np = sess.run([chol, verification])
@@ -40,8 +49,8 @@ class CholeskyOpTest(tf.test.TestCase):
def _verifyCholesky(self, x):
# Verify that LL^T == x.
with self.test_session() as sess:
- chol = tf.cholesky(x)
- verification = tf.matmul(chol, chol, adjoint_b=True)
+ chol = linalg_ops.cholesky(x)
+ verification = math_ops.matmul(chol, chol, adjoint_b=True)
self._verifyCholeskyBase(sess, x, chol, verification)
def testBasic(self):
@@ -62,18 +71,18 @@ class CholeskyOpTest(tf.test.TestCase):
def testNonSquareMatrix(self):
with self.assertRaises(ValueError):
- tf.cholesky(np.array([[1., 2., 3.], [3., 4., 5.]]))
+ linalg_ops.cholesky(np.array([[1., 2., 3.], [3., 4., 5.]]))
with self.assertRaises(ValueError):
- tf.cholesky(
+ linalg_ops.cholesky(
np.array([[[1., 2., 3.], [3., 4., 5.]], [[1., 2., 3.], [3., 4., 5.]]
]))
def testWrongDimensions(self):
- tensor3 = tf.constant([1., 2.])
+ tensor3 = constant_op.constant([1., 2.])
with self.assertRaises(ValueError):
- tf.cholesky(tensor3)
+ linalg_ops.cholesky(tensor3)
with self.assertRaises(ValueError):
- tf.cholesky(tensor3)
+ linalg_ops.cholesky(tensor3)
def testNotInvertible(self):
# The input should be invertible.
@@ -89,7 +98,7 @@ class CholeskyOpTest(tf.test.TestCase):
self._verifyCholesky(np.empty([2, 0, 0]))
-class CholeskyGradTest(tf.test.TestCase):
+class CholeskyGradTest(test.TestCase):
_backprop_block_size = 32
def getShapes(self, shapeList):
@@ -104,50 +113,59 @@ class CholeskyGradTest(tf.test.TestCase):
np.random.seed(0)
shapes = self.getShapes([self._backprop_block_size + 1])
self.runFiniteDifferences(
- shapes, dtypes=(tf.float32, tf.float64), scalarTest=True)
+ shapes,
+ dtypes=(dtypes_lib.float32, dtypes_lib.float64),
+ scalarTest=True)
def testTwoBlockMatrixFloat(self):
np.random.seed(0)
shapes = self.getShapes([2 * self._backprop_block_size + 1])
- self.runFiniteDifferences(shapes, dtypes=(tf.float32,), scalarTest=True)
+ self.runFiniteDifferences(
+ shapes, dtypes=(dtypes_lib.float32,), scalarTest=True)
def testTwoBlockMatrixDouble(self):
np.random.seed(0)
shapes = self.getShapes([2 * self._backprop_block_size + 1])
- self.runFiniteDifferences(shapes, dtypes=(tf.float64,), scalarTest=True)
+ self.runFiniteDifferences(
+ shapes, dtypes=(dtypes_lib.float64,), scalarTest=True)
def runFiniteDifferences(self,
shapes,
- dtypes=(tf.float32, tf.float64),
+ dtypes=(dtypes_lib.float32, dtypes_lib.float64),
scalarTest=False):
with self.test_session(use_gpu=False):
for shape in shapes:
for batch in False, True:
for dtype in dtypes:
if not scalarTest:
- x = tf.constant(np.random.randn(shape[0], shape[1]), dtype)
- tensor = tf.matmul(x, tf.transpose(x)) / shape[0]
+ x = constant_op.constant(
+ np.random.randn(shape[0], shape[1]), dtype)
+ tensor = math_ops.matmul(x, array_ops.transpose(x)) / shape[0]
else:
# This is designed to be a faster test for larger matrices.
- x = tf.constant(np.random.randn(), dtype)
- R = tf.constant(np.random.randn(shape[0], shape[1]), dtype)
- e = tf.mul(R, x)
- tensor = tf.matmul(e, tf.transpose(e)) / shape[0]
+ x = constant_op.constant(np.random.randn(), dtype)
+ R = constant_op.constant(
+ np.random.randn(shape[0], shape[1]), dtype)
+ e = math_ops.mul(R, x)
+ tensor = math_ops.matmul(e, array_ops.transpose(e)) / shape[0]
# Inner-most matrices in tensor are positive definite.
if batch:
- tensor = tf.tile(tf.expand_dims(tensor, 0), [4, 1, 1])
- y = tf.cholesky(tensor)
+ tensor = array_ops.tile(
+ array_ops.expand_dims(tensor, 0), [4, 1, 1])
+ y = linalg_ops.cholesky(tensor)
if scalarTest:
- y = tf.reduce_mean(y)
- error = tf.test.compute_gradient_error(x, x._shape_as_list(), y,
- y._shape_as_list())
- tf.logging.info("error = %f", error)
- if dtype == tf.float64:
+ y = math_ops.reduce_mean(y)
+ error = gradient_checker.compute_gradient_error(x,
+ x._shape_as_list(),
+ y,
+ y._shape_as_list())
+ tf_logging.info("error = %f", error)
+ if dtype == dtypes_lib.float64:
self.assertLess(error, 1e-5)
else:
self.assertLess(error, 3e-3)
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/clip_ops_test.py b/tensorflow/python/kernel_tests/clip_ops_test.py
index b2f6725b82..bbd1ab46ae 100644
--- a/tensorflow/python/kernel_tests/clip_ops_test.py
+++ b/tensorflow/python/kernel_tests/clip_ops_test.py
@@ -12,36 +12,37 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.ops.clip_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import clip_ops
+from tensorflow.python.platform import test
-class ClipTest(tf.test.TestCase):
+class ClipTest(test.TestCase):
# ClipByValue test
def testClipByValue(self):
with self.test_session():
- x = tf.constant([-5.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3])
- np_ans = [[-4.4, 2.0, 3.0],
- [4.0, 4.4, 4.4]]
+ x = constant_op.constant([-5.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3])
+ np_ans = [[-4.4, 2.0, 3.0], [4.0, 4.4, 4.4]]
clip_value = 4.4
- ans = tf.clip_by_value(x, -clip_value, clip_value)
+ ans = clip_ops.clip_by_value(x, -clip_value, clip_value)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
def testClipByValueNonFinite(self):
with self.test_session():
- x = tf.constant([float('NaN'), float('Inf'), -float('Inf')])
+ x = constant_op.constant([float('NaN'), float('Inf'), -float('Inf')])
np_ans = [float('NaN'), 4.0, -4.0]
clip_value = 4.0
- ans = tf.clip_by_value(x, -clip_value, clip_value)
+ ans = clip_ops.clip_by_value(x, -clip_value, clip_value)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
@@ -50,16 +51,15 @@ class ClipTest(tf.test.TestCase):
def testClipByNormClipped(self):
# Norm clipping when clip_norm < 5
with self.test_session():
- x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
+ x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Norm of x = sqrt(3^2 + 4^2) = 5
- np_ans = [[-2.4, 0.0, 0.0],
- [3.2, 0.0, 0.0]]
+ np_ans = [[-2.4, 0.0, 0.0], [3.2, 0.0, 0.0]]
clip_norm = 4.0
- ans = tf.clip_by_norm(x, clip_norm)
+ ans = clip_ops.clip_by_norm(x, clip_norm)
tf_ans = ans.eval()
- clip_tensor = tf.constant(4.0)
- ans = tf.clip_by_norm(x, clip_norm)
+ clip_tensor = constant_op.constant(4.0)
+ ans = clip_ops.clip_by_norm(x, clip_norm)
tf_ans_tensor = ans.eval()
self.assertAllClose(np_ans, tf_ans)
@@ -68,12 +68,11 @@ class ClipTest(tf.test.TestCase):
def testClipByNormNotClipped(self):
# No norm clipping when clip_norm >= 5
with self.test_session():
- x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
+ x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Norm of x = sqrt(3^2 + 4^2) = 5
- np_ans = [[-3.0, 0.0, 0.0],
- [4.0, 0.0, 0.0]]
+ np_ans = [[-3.0, 0.0, 0.0], [4.0, 0.0, 0.0]]
clip_norm = 6.0
- ans = tf.clip_by_norm(x, clip_norm)
+ ans = clip_ops.clip_by_norm(x, clip_norm)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
@@ -81,12 +80,11 @@ class ClipTest(tf.test.TestCase):
def testClipByNormZero(self):
# No norm clipping when norm = 0
with self.test_session():
- x = tf.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
+ x = constant_op.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
# Norm = 0, no changes
- np_ans = [[0.0, 0.0, 0.0],
- [0.0, 0.0, 0.0]]
+ np_ans = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
clip_norm = 6.0
- ans = tf.clip_by_norm(x, clip_norm)
+ ans = clip_ops.clip_by_norm(x, clip_norm)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
@@ -94,12 +92,11 @@ class ClipTest(tf.test.TestCase):
def testClipByNormClippedWithDim0(self):
# Norm clipping when clip_norm < 5
with self.test_session():
- x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
+ x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
# Norm of x[:, 0] = sqrt(3^2 + 4^2) = 5, x[:, 2] = 3
- np_ans = [[-2.4, 0.0, 0.0],
- [3.2, 0.0, 3.0]]
+ np_ans = [[-2.4, 0.0, 0.0], [3.2, 0.0, 3.0]]
clip_norm = 4.0
- ans = tf.clip_by_norm(x, clip_norm, [0])
+ ans = clip_ops.clip_by_norm(x, clip_norm, [0])
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
@@ -107,12 +104,11 @@ class ClipTest(tf.test.TestCase):
def testClipByNormClippedWithDim1(self):
# Norm clipping when clip_norm < 5
with self.test_session():
- x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
+ x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
# Norm of x[0, :] = 3, x[1, :] = sqrt(3^2 + 4^2) = 5
- np_ans = [[-3.0, 0.0, 0.0],
- [3.2, 0.0, 2.4]]
+ np_ans = [[-3.0, 0.0, 0.0], [3.2, 0.0, 2.4]]
clip_norm = 4.0
- ans = tf.clip_by_norm(x, clip_norm, [1])
+ ans = clip_ops.clip_by_norm(x, clip_norm, [1])
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
@@ -120,12 +116,11 @@ class ClipTest(tf.test.TestCase):
def testClipByNormNotClippedWithAxes(self):
# No norm clipping when clip_norm >= 5
with self.test_session():
- x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
+ x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
# Norm of x[0, :] = 3, x[1, :] = sqrt(3^2 + 4^2) = 5
- np_ans = [[-3.0, 0.0, 0.0],
- [4.0, 0.0, 3.0]]
+ np_ans = [[-3.0, 0.0, 0.0], [4.0, 0.0, 3.0]]
clip_norm = 6.0
- ans = tf.clip_by_norm(x, clip_norm, [1])
+ ans = clip_ops.clip_by_norm(x, clip_norm, [1])
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
@@ -134,17 +129,16 @@ class ClipTest(tf.test.TestCase):
def testClipByGlobalNormClipped(self):
# Norm clipping when clip_norm < 5
with self.test_session():
- x0 = tf.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
- x1 = tf.constant([1.0, -2.0])
+ x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
+ x1 = constant_op.constant([1.0, -2.0])
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
clip_norm = 4.0
# Answers are the original tensors scaled by 4.0/5.0
- np_ans_0 = [[-1.6, 0.0, 0.0],
- [3.2, 0.0, 0.0]]
+ np_ans_0 = [[-1.6, 0.0, 0.0], [3.2, 0.0, 0.0]]
np_ans_1 = [0.8, -1.6]
- ans, norm = tf.clip_by_global_norm((x0, x1), clip_norm)
+ ans, norm = clip_ops.clip_by_global_norm((x0, x1), clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].eval()
tf_norm = norm.eval()
@@ -156,17 +150,16 @@ class ClipTest(tf.test.TestCase):
def testClipByGlobalNormClippedTensor(self):
# Norm clipping when clip_norm < 5
with self.test_session():
- x0 = tf.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
- x1 = tf.constant([1.0, -2.0])
+ x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
+ x1 = constant_op.constant([1.0, -2.0])
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
- clip_norm = tf.constant(4.0)
+ clip_norm = constant_op.constant(4.0)
# Answers are the original tensors scaled by 4.0/5.0
- np_ans_0 = [[-1.6, 0.0, 0.0],
- [3.2, 0.0, 0.0]]
+ np_ans_0 = [[-1.6, 0.0, 0.0], [3.2, 0.0, 0.0]]
np_ans_1 = [0.8, -1.6]
- ans, norm = tf.clip_by_global_norm((x0, x1), clip_norm)
+ ans, norm = clip_ops.clip_by_global_norm((x0, x1), clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].eval()
tf_norm = norm.eval()
@@ -178,17 +171,16 @@ class ClipTest(tf.test.TestCase):
def testClipByGlobalNormSupportsNone(self):
# Norm clipping when clip_norm < 5
with self.test_session():
- x0 = tf.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
- x1 = tf.constant([1.0, -2.0])
+ x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
+ x1 = constant_op.constant([1.0, -2.0])
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
clip_norm = 4.0
# Answers are the original tensors scaled by 4.0/5.0
- np_ans_0 = [[-1.6, 0.0, 0.0],
- [3.2, 0.0, 0.0]]
+ np_ans_0 = [[-1.6, 0.0, 0.0], [3.2, 0.0, 0.0]]
np_ans_1 = [0.8, -1.6]
- ans, norm = tf.clip_by_global_norm((x0, None, x1, None), clip_norm)
+ ans, norm = clip_ops.clip_by_global_norm((x0, None, x1, None), clip_norm)
self.assertTrue(ans[1] is None)
self.assertTrue(ans[3] is None)
tf_ans_1 = ans[0].eval()
@@ -202,18 +194,17 @@ class ClipTest(tf.test.TestCase):
def testClipByGlobalNormWithIndexedSlicesClipped(self):
# Norm clipping when clip_norm < 5
with self.test_session():
- x0 = tf.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
- x1 = tf.IndexedSlices(tf.constant([1.0, -2.0]),
- tf.constant([3, 4]))
+ x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
+ x1 = ops.IndexedSlices(
+ constant_op.constant([1.0, -2.0]), constant_op.constant([3, 4]))
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
clip_norm = 4.0
# Answers are the original tensors scaled by 4.0/5.0
- np_ans_0 = [[-1.6, 0.0, 0.0],
- [3.2, 0.0, 0.0]]
+ np_ans_0 = [[-1.6, 0.0, 0.0], [3.2, 0.0, 0.0]]
np_ans_1 = [0.8, -1.6]
- ans, norm = tf.clip_by_global_norm([x0, x1], clip_norm)
+ ans, norm = clip_ops.clip_by_global_norm([x0, x1], clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].values.eval()
tf_norm = norm.eval()
@@ -224,11 +215,11 @@ class ClipTest(tf.test.TestCase):
def testClipByGlobalNormPreservesDenseShape(self):
dense_shape = (1,)
- slices = tf.IndexedSlices(
- tf.constant([1.0]),
- tf.constant([0]),
+ slices = ops.IndexedSlices(
+ constant_op.constant([1.0]),
+ constant_op.constant([0]),
dense_shape=dense_shape)
- ans, _ = tf.clip_by_global_norm([slices], 1.0)
+ ans, _ = clip_ops.clip_by_global_norm([slices], 1.0)
modified_slices = ans[0]
self.assertEqual(dense_shape, slices.dense_shape)
self.assertEqual(dense_shape, modified_slices.dense_shape)
@@ -236,15 +227,14 @@ class ClipTest(tf.test.TestCase):
def testClipByGlobalNormNotClipped(self):
# No norm clipping when clip_norm >= 5
with self.test_session():
- x0 = tf.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
- x1 = tf.constant([1.0, -2.0])
+ x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
+ x1 = constant_op.constant([1.0, -2.0])
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
- np_ans_0 = [[-2.0, 0.0, 0.0],
- [4.0, 0.0, 0.0]]
+ np_ans_0 = [[-2.0, 0.0, 0.0], [4.0, 0.0, 0.0]]
np_ans_1 = [1.0, -2.0]
clip_norm = 6.0
- ans, norm = tf.clip_by_global_norm([x0, x1], clip_norm)
+ ans, norm = clip_ops.clip_by_global_norm([x0, x1], clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].eval()
tf_norm = norm.eval()
@@ -256,15 +246,14 @@ class ClipTest(tf.test.TestCase):
def testClipByGlobalNormZero(self):
# No norm clipping when norm = 0
with self.test_session():
- x0 = tf.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
- x1 = tf.constant([0.0, 0.0])
+ x0 = constant_op.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
+ x1 = constant_op.constant([0.0, 0.0])
# Norm = 0, no changes
- np_ans_0 = [[0.0, 0.0, 0.0],
- [0.0, 0.0, 0.0]]
+ np_ans_0 = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
np_ans_1 = [0.0, 0.0]
clip_norm = 6.0
- ans, norm = tf.clip_by_global_norm([x0, x1], clip_norm)
+ ans, norm = clip_ops.clip_by_global_norm([x0, x1], clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].eval()
tf_norm = norm.eval()
@@ -276,12 +265,11 @@ class ClipTest(tf.test.TestCase):
def testClipByAverageNormClipped(self):
# Norm clipping when average clip_norm < 0.83333333
with self.test_session():
- x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
+ x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
- np_ans = [[-2.88, 0.0, 0.0],
- [3.84, 0.0, 0.0]]
+ np_ans = [[-2.88, 0.0, 0.0], [3.84, 0.0, 0.0]]
clip_norm = 0.8
- ans = tf.clip_by_average_norm(x, clip_norm)
+ ans = clip_ops.clip_by_average_norm(x, clip_norm)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
@@ -289,12 +277,11 @@ class ClipTest(tf.test.TestCase):
def testClipByAverageNormClippedTensor(self):
# Norm clipping when average clip_norm < 0.83333333
with self.test_session():
- x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
+ x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
- np_ans = [[-2.88, 0.0, 0.0],
- [3.84, 0.0, 0.0]]
- clip_norm = tf.constant(0.8)
- ans = tf.clip_by_average_norm(x, clip_norm)
+ np_ans = [[-2.88, 0.0, 0.0], [3.84, 0.0, 0.0]]
+ clip_norm = constant_op.constant(0.8)
+ ans = clip_ops.clip_by_average_norm(x, clip_norm)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
@@ -302,12 +289,11 @@ class ClipTest(tf.test.TestCase):
def testClipByAverageNormNotClipped(self):
# No norm clipping when average clip_norm >= 0.83333333
with self.test_session():
- x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
+ x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
- np_ans = [[-3.0, 0.0, 0.0],
- [4.0, 0.0, 0.0]]
+ np_ans = [[-3.0, 0.0, 0.0], [4.0, 0.0, 0.0]]
clip_norm = 0.9
- ans = tf.clip_by_average_norm(x, clip_norm)
+ ans = clip_ops.clip_by_average_norm(x, clip_norm)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
@@ -315,15 +301,15 @@ class ClipTest(tf.test.TestCase):
def testClipByAverageNormZero(self):
# No norm clipping when average clip_norm = 0
with self.test_session():
- x = tf.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
+ x = constant_op.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
# Average norm = 0, no changes
- np_ans = [[0.0, 0.0, 0.0],
- [0.0, 0.0, 0.0]]
+ np_ans = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
clip_norm = 0.9
- ans = tf.clip_by_average_norm(x, clip_norm)
+ ans = clip_ops.clip_by_average_norm(x, clip_norm)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
-if __name__ == "__main__":
- tf.test.main()
+
+if __name__ == '__main__':
+ test.main()
diff --git a/tensorflow/python/kernel_tests/concat_op_test.py b/tensorflow/python/kernel_tests/concat_op_test.py
index e779dc7c69..14211f0ccc 100644
--- a/tensorflow/python/kernel_tests/concat_op_test.py
+++ b/tensorflow/python/kernel_tests/concat_op_test.py
@@ -12,36 +12,44 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Functional tests for Concat Op."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import errors_impl
+from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import gradients_impl
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import variables
+from tensorflow.python.platform import test
def _call_concat(values, axis, use_concat_v2):
if use_concat_v2:
return gen_array_ops._concat_v2(values, axis)
else:
- return tf.concat(axis, values)
+ return array_ops.concat(axis, values)
-class ConcatOpTest(tf.test.TestCase):
+class ConcatOpTest(test.TestCase):
def testHStack(self):
with self.test_session():
- p1 = tf.placeholder(tf.float32, shape=[4, 4])
- p2 = tf.placeholder(tf.float32, shape=[4, 4])
- c = tf.concat(0, [p1, p2])
+ p1 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
+ p2 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
+ c = array_ops.concat(0, [p1, p2])
params = {
p1: np.random.rand(4, 4).astype("f"),
p2: np.random.rand(4, 4).astype("f")
- }
+ }
result = c.eval(feed_dict=params)
self.assertEqual(result.shape, c.get_shape())
@@ -50,13 +58,13 @@ class ConcatOpTest(tf.test.TestCase):
def testVStack(self):
with self.test_session():
- p1 = tf.placeholder(tf.float32, shape=[4, 4])
- p2 = tf.placeholder(tf.float32, shape=[4, 4])
- c = tf.concat(1, [p1, p2])
+ p1 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
+ p2 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
+ c = array_ops.concat(1, [p1, p2])
params = {
p1: np.random.rand(4, 4).astype("f"),
p2: np.random.rand(4, 4).astype("f")
- }
+ }
result = c.eval(feed_dict=params)
self.assertEqual(result.shape, c.get_shape())
@@ -67,9 +75,9 @@ class ConcatOpTest(tf.test.TestCase):
with self.test_session(use_gpu=True):
p1 = np.random.rand(2, 3).astype("i")
p2 = np.random.rand(2, 3).astype("i")
- x1 = tf.constant(p1)
- x2 = tf.constant(p2)
- c = tf.concat(0, [x1, x2])
+ x1 = constant_op.constant(p1)
+ x2 = constant_op.constant(p2)
+ c = array_ops.concat(0, [x1, x2])
result = c.eval()
self.assertAllEqual(result[:2, :], p1)
self.assertAllEqual(result[2:, :], p2)
@@ -78,10 +86,10 @@ class ConcatOpTest(tf.test.TestCase):
with self.test_session():
p1 = np.random.rand(4, 4).astype("f")
p2 = np.random.rand(4, 4).astype("f")
- v1 = tf.Variable(p1)
- v2 = tf.Variable(p2)
- c = tf.concat(0, [v1, v2])
- tf.global_variables_initializer().run()
+ v1 = variables.Variable(p1)
+ v2 = variables.Variable(p2)
+ c = array_ops.concat(0, [v1, v2])
+ variables.global_variables_initializer().run()
result = c.eval()
self.assertEqual(result.shape, c.get_shape())
@@ -96,8 +104,8 @@ class ConcatOpTest(tf.test.TestCase):
# Random dim to concat on
concat_dim = np.random.randint(5)
params = {}
- if dtype == tf.bfloat16:
- dtype_feed = tf.float32
+ if dtype == dtypes.bfloat16:
+ dtype_feed = dtypes.float32
else:
dtype_feed = dtype
with self.test_session(use_gpu=use_gpu):
@@ -105,19 +113,19 @@ class ConcatOpTest(tf.test.TestCase):
for i in np.arange(num_tensors):
input_shape = shape
input_shape[concat_dim] = np.random.randint(1, 5)
- placeholder = tf.placeholder(dtype_feed, shape=input_shape)
+ placeholder = array_ops.placeholder(dtype_feed, shape=input_shape)
p.append(placeholder)
t = dtype_feed.as_numpy_dtype
params[placeholder] = np.random.rand(*input_shape).astype(t)
if dtype != dtype_feed:
- concat_inputs = [tf.cast(p_i, dtype) for p_i in p]
+ concat_inputs = [math_ops.cast(p_i, dtype) for p_i in p]
else:
concat_inputs = p
- c = tf.concat(concat_dim, concat_inputs)
+ c = array_ops.concat(concat_dim, concat_inputs)
if dtype != dtype_feed:
- c = tf.cast(c, dtype_feed)
+ c = math_ops.cast(c, dtype_feed)
result = c.eval(feed_dict=params)
self.assertEqual(result.shape, c.get_shape())
@@ -137,33 +145,33 @@ class ConcatOpTest(tf.test.TestCase):
self.assertAllClose(result[ind], params[p[i]], 0.01)
def testRandom(self):
- self._testRandom(tf.float32)
- self._testRandom(tf.float32, use_gpu=True)
- self._testRandom(tf.int16)
- self._testRandom(tf.int32, use_gpu=True)
- self._testRandom(tf.bfloat16)
- self._testRandom(tf.bfloat16, use_gpu=True)
+ self._testRandom(dtypes.float32)
+ self._testRandom(dtypes.float32, use_gpu=True)
+ self._testRandom(dtypes.int16)
+ self._testRandom(dtypes.int32, use_gpu=True)
+ self._testRandom(dtypes.bfloat16)
+ self._testRandom(dtypes.bfloat16, use_gpu=True)
def testInvalidConcatDimTypeAndShape(self):
- a = tf.Variable(tf.constant(1.0, shape=[1]))
- b = tf.Variable(tf.constant(2.0, shape=[1]))
+ a = variables.Variable(constant_op.constant(1.0, shape=[1]))
+ b = variables.Variable(constant_op.constant(2.0, shape=[1]))
with self.assertRaises(ValueError):
- tf.concat(a, b)
+ array_ops.concat(a, b)
with self.assertRaises(TypeError):
- tf.concat(4.2, 1)
+ array_ops.concat(4.2, 1)
with self.assertRaises(ValueError):
- tf.concat(a, 1)
+ array_ops.concat(a, 1)
with self.assertRaises(TypeError):
- tf.concat(a, [a, b])
+ array_ops.concat(a, [a, b])
with self.assertRaises(ValueError):
- tf.concat([3], [a, b])
+ array_ops.concat([3], [a, b])
with self.assertRaises(ValueError):
- tf.concat(0, [])
+ array_ops.concat(0, [])
# An integer tensor for shape dim should throw no error.
- tf.concat(tf.constant(0, shape=[]), 1)
+ array_ops.concat(constant_op.constant(0, shape=[]), 1)
# A non-scalar tensor for shape should throw ValueError.
with self.assertRaises(ValueError):
- tf.concat(tf.constant(0, shape=[1]), 1)
+ array_ops.concat(constant_op.constant(0, shape=[1]), 1)
def _testGradientsSimple(self, use_gpu, use_concat_v2):
with self.test_session(use_gpu=use_gpu):
@@ -174,14 +182,16 @@ class ConcatOpTest(tf.test.TestCase):
t = np.random.rand(*shape).astype("f")
inp.append(t)
inp_tensors.append(
- tf.constant([float(y) for y in t.flatten()],
- shape=shape, dtype=tf.float32))
+ constant_op.constant(
+ [float(y) for y in t.flatten()],
+ shape=shape,
+ dtype=dtypes.float32))
c = _call_concat(inp_tensors, 1, use_concat_v2)
output_shape = [10, 9, 2]
grad_inp = np.random.rand(*output_shape).astype("f")
- grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()],
- shape=output_shape)
- grad = tf.gradients([c], inp_tensors, [grad_tensor])
+ grad_tensor = constant_op.constant(
+ [float(x) for x in grad_inp.flatten()], shape=output_shape)
+ grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = _call_concat(grad, 1, use_concat_v2)
result = concated_grad.eval()
self.assertAllEqual(result, grad_inp)
@@ -201,14 +211,16 @@ class ConcatOpTest(tf.test.TestCase):
t = np.random.rand(*shape).astype("f")
inp.append(t)
inp_tensors.append(
- tf.constant([float(y) for y in t.flatten()],
- shape=shape, dtype=tf.float32))
+ constant_op.constant(
+ [float(y) for y in t.flatten()],
+ shape=shape,
+ dtype=dtypes.float32))
c = _call_concat(inp_tensors, 0, use_concat_v2)
output_shape = [9, 10, 2]
grad_inp = np.random.rand(*output_shape).astype("f")
- grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()],
- shape=output_shape)
- grad = tf.gradients([c], inp_tensors, [grad_tensor])
+ grad_tensor = constant_op.constant(
+ [float(x) for x in grad_inp.flatten()], shape=output_shape)
+ grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = _call_concat(grad, 0, use_concat_v2)
result = concated_grad.eval()
@@ -229,14 +241,16 @@ class ConcatOpTest(tf.test.TestCase):
t = np.random.rand(*shape).astype("f")
inp.append(t)
inp_tensors.append(
- tf.constant([float(y) for y in t.flatten()],
- shape=shape, dtype=tf.float32))
+ constant_op.constant(
+ [float(y) for y in t.flatten()],
+ shape=shape,
+ dtype=dtypes.float32))
c = _call_concat(inp_tensors, 2, use_concat_v2)
output_shape = [10, 2, 9]
grad_inp = np.random.rand(*output_shape).astype("f")
- grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()],
- shape=output_shape)
- grad = tf.gradients([c], inp_tensors, [grad_tensor])
+ grad_tensor = constant_op.constant(
+ [float(x) for x in grad_inp.flatten()], shape=output_shape)
+ grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = _call_concat(grad, 2, use_concat_v2)
result = concated_grad.eval()
@@ -265,15 +279,17 @@ class ConcatOpTest(tf.test.TestCase):
t = np.random.rand(*shape).astype("f")
inp.append(t)
inp_tensors.append(
- tf.constant([float(y) for y in t.flatten()],
- shape=shape, dtype=tf.float32))
+ constant_op.constant(
+ [float(y) for y in t.flatten()],
+ shape=shape,
+ dtype=dtypes.float32))
c = _call_concat(inp_tensors, concat_dim, use_concat_v2)
output_shape = input_shape
output_shape[concat_dim] = concat_dim_sizes.sum()
grad_inp = np.random.rand(*output_shape).astype("f")
- grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()],
- shape=output_shape)
- grad = tf.gradients([c], inp_tensors, [grad_tensor])
+ grad_tensor = constant_op.constant(
+ [float(x) for x in grad_inp.flatten()], shape=output_shape)
+ grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = _call_concat(grad, concat_dim, use_concat_v2)
result = concated_grad.eval()
@@ -289,41 +305,57 @@ class ConcatOpTest(tf.test.TestCase):
def testShapeError(self):
# Rank doesn't match.
with self.assertRaises(ValueError):
- tf.concat(1, [tf.constant(10.0, shape=[4, 4, 4, 4]),
- tf.constant(20.0, shape=[4, 4, 4])])
+ array_ops.concat(
+ 1, [
+ constant_op.constant(
+ 10.0, shape=[4, 4, 4, 4]), constant_op.constant(
+ 20.0, shape=[4, 4, 4])
+ ])
# Dimensions don't match in a non-concat dim.
with self.assertRaises(ValueError):
- tf.concat(1, [tf.constant(10.0, shape=[1, 2, 1]),
- tf.constant(20.0, shape=[3, 2, 1])])
+ array_ops.concat(
+ 1, [
+ constant_op.constant(
+ 10.0, shape=[1, 2, 1]), constant_op.constant(
+ 20.0, shape=[3, 2, 1])
+ ])
# concat_dim out of range.
with self.assertRaises(ValueError):
- tf.concat(3, [tf.constant(10.0, shape=[4, 4, 4]),
- tf.constant(20.0, shape=[4, 4, 4])])
+ array_ops.concat(
+ 3, [
+ constant_op.constant(
+ 10.0, shape=[4, 4, 4]), constant_op.constant(
+ 20.0, shape=[4, 4, 4])
+ ])
# concat_dim out of range
with self.assertRaises(ValueError):
- tf.concat(-4, [tf.constant(10.0, shape=[4, 4, 4]),
- tf.constant(20.0, shape=[4, 4, 4])])
+ array_ops.concat(
+ -4, [
+ constant_op.constant(
+ 10.0, shape=[4, 4, 4]), constant_op.constant(
+ 20.0, shape=[4, 4, 4])
+ ])
def testShapeWithUnknownConcatDim(self):
- p1 = tf.placeholder(tf.float32)
- c1 = tf.constant(10.0, shape=[4, 4, 4, 4])
- p2 = tf.placeholder(tf.float32)
- c2 = tf.constant(20.0, shape=[4, 4, 4, 4])
- dim = tf.placeholder(tf.int32)
- concat = tf.concat(dim, [p1, c1, p2, c2])
+ p1 = array_ops.placeholder(dtypes.float32)
+ c1 = constant_op.constant(10.0, shape=[4, 4, 4, 4])
+ p2 = array_ops.placeholder(dtypes.float32)
+ c2 = constant_op.constant(20.0, shape=[4, 4, 4, 4])
+ dim = array_ops.placeholder(dtypes.int32)
+ concat = array_ops.concat(dim, [p1, c1, p2, c2])
self.assertEqual(4, concat.get_shape().ndims)
# All dimensions unknown.
- concat2 = tf.concat(dim, [p1, p2])
+ concat2 = array_ops.concat(dim, [p1, p2])
self.assertEqual(None, concat2.get_shape())
# Rank doesn't match.
- c3 = tf.constant(30.0, shape=[4, 4, 4])
+ c3 = constant_op.constant(30.0, shape=[4, 4, 4])
with self.assertRaises(ValueError):
- tf.concat(dim, [p1, c1, p2, c3])
+ array_ops.concat(dim, [p1, c1, p2, c3])
def testZeroSize(self):
# Verify that concat doesn't crash and burn for zero size inputs
@@ -339,87 +371,102 @@ class ConcatOpTest(tf.test.TestCase):
x1 = np.random.randn(*(shape0 + (n1,) + shape1))
correct = np.concatenate([x0, x1], axis=axis)
# TODO(irving): Make tf.concat handle map, then drop list().
- xs = list(map(tf.constant, [x0, x1]))
- c = tf.concat(axis, xs)
+ xs = list(map(constant_op.constant, [x0, x1]))
+ c = array_ops.concat(axis, xs)
self.assertAllEqual(c.eval(), correct)
# Check gradients
dc = np.random.randn(*c.get_shape().as_list())
- dxs = sess.run(tf.gradients(c, xs, dc))
+ dxs = sess.run(gradients_impl.gradients(c, xs, dc))
self.assertAllEqual(dc, np.concatenate(dxs, axis=axis))
def testTensorConcatDim0Grad(self):
x_shapes = [[20, 7, 3], [10, 7, 3], [14, 7, 3]]
output_shape = [44, 7, 3]
- x_vals = [np.random.random_sample(x_shape).astype(
- np.float64) for x_shape in x_shapes]
+ x_vals = [
+ np.random.random_sample(x_shape).astype(np.float64)
+ for x_shape in x_shapes
+ ]
with self.test_session():
- xs = [tf.constant(x_val) for x_val in x_vals]
- output = tf.concat(0, xs)
- err = tf.test.compute_gradient_error(xs, x_shapes, output, output_shape)
+ xs = [constant_op.constant(x_val) for x_val in x_vals]
+ output = array_ops.concat(0, xs)
+ err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
+ output_shape)
self.assertLess(err, 1e-11)
def testTensorConcatDim1Grad(self):
x_shapes = [[20, 7, 3], [20, 3, 3], [20, 1, 3]]
output_shape = [20, 11, 3]
- x_vals = [np.random.random_sample(x_shape).astype(
- np.float64) for x_shape in x_shapes]
+ x_vals = [
+ np.random.random_sample(x_shape).astype(np.float64)
+ for x_shape in x_shapes
+ ]
with self.test_session():
- xs = [tf.constant(x_val) for x_val in x_vals]
- output = tf.concat(1, xs)
- err = tf.test.compute_gradient_error(xs, x_shapes, output, output_shape)
+ xs = [constant_op.constant(x_val) for x_val in x_vals]
+ output = array_ops.concat(1, xs)
+ err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
+ output_shape)
self.assertLess(err, 1e-11)
def testIndexedSlicesConcatDim0Grad(self):
x_shapes = [[20, 7, 3], [10, 7, 3], [14, 7, 3]]
output_shape = [4, 7, 3]
- x_vals = [np.random.random_sample(x_shape).astype(
- np.float64) for x_shape in x_shapes]
+ x_vals = [
+ np.random.random_sample(x_shape).astype(np.float64)
+ for x_shape in x_shapes
+ ]
with self.test_session():
- xs = [tf.constant(x_val) for x_val in x_vals]
- x_concat = tf.concat(0, xs)
- output = tf.gather(x_concat, [1, 2, 0, 5])
- err = tf.test.compute_gradient_error(xs, x_shapes, output, output_shape)
+ xs = [constant_op.constant(x_val) for x_val in x_vals]
+ x_concat = array_ops.concat(0, xs)
+ output = array_ops.gather(x_concat, [1, 2, 0, 5])
+ err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
+ output_shape)
self.assertLess(err, 1e-11)
def testIndexedSlicesConcatDim1Grad(self):
x_shapes = [[20, 7, 3], [20, 3, 3], [20, 1, 3]]
output_shape = [4, 11, 3]
- x_vals = [np.random.random_sample(x_shape).astype(
- np.float64) for x_shape in x_shapes]
+ x_vals = [
+ np.random.random_sample(x_shape).astype(np.float64)
+ for x_shape in x_shapes
+ ]
with self.test_session():
- xs = [tf.constant(x_val) for x_val in x_vals]
- x_concat = tf.concat(1, xs)
- output = tf.gather(x_concat, [1, 2, 0, 5])
- err = tf.test.compute_gradient_error(xs, x_shapes, output, output_shape)
+ xs = [constant_op.constant(x_val) for x_val in x_vals]
+ x_concat = array_ops.concat(1, xs)
+ output = array_ops.gather(x_concat, [1, 2, 0, 5])
+ err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
+ output_shape)
self.assertLess(err, 1e-11)
def testIndexedSlicesConcatDim2Grad(self):
x_shapes = [[20, 7, 3], [20, 7, 1], [20, 7, 2]]
output_shape = [4, 7, 6]
- x_vals = [np.random.random_sample(x_shape).astype(
- np.float64) for x_shape in x_shapes]
+ x_vals = [
+ np.random.random_sample(x_shape).astype(np.float64)
+ for x_shape in x_shapes
+ ]
with self.test_session():
- xs = [tf.constant(x_val) for x_val in x_vals]
- x_concat = tf.concat(2, xs)
- output = tf.gather(x_concat, [1, 2, 0, 5])
- err = tf.test.compute_gradient_error(xs, x_shapes, output, output_shape)
+ xs = [constant_op.constant(x_val) for x_val in x_vals]
+ x_concat = array_ops.concat(2, xs)
+ output = array_ops.gather(x_concat, [1, 2, 0, 5])
+ err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
+ output_shape)
self.assertLess(err, 1e-11)
def testConcatTuple(self):
c1 = np.random.rand(4, 4)
c2 = np.random.rand(4, 4)
with self.test_session():
- concat_list_t = tf.concat(0, [c1, c2])
- concat_tuple_t = tf.concat(0, (c1, c2))
+ concat_list_t = array_ops.concat(0, [c1, c2])
+ concat_tuple_t = array_ops.concat(0, (c1, c2))
self.assertAllEqual(concat_list_t.eval(), concat_tuple_t.eval())
def testConcatNoScalars(self):
with self.test_session():
- scalar = tf.constant(7)
- dim = tf.placeholder(tf.int32)
+ scalar = constant_op.constant(7)
+ dim = array_ops.placeholder(dtypes.int32)
with self.assertRaisesRegexp(
ValueError, r"Can't concatenate scalars \(use tf\.pack instead\)"):
- tf.concat(dim, [scalar, scalar, scalar])
+ array_ops.concat(dim, [scalar, scalar, scalar])
# important as gpu implementation could fail if
# shared memory is not large for all the inputs
@@ -429,19 +476,19 @@ class ConcatOpTest(tf.test.TestCase):
params = {}
p = []
shape = np.array([7, 13])
- if tf.test.is_gpu_available():
+ if test.is_gpu_available():
num_tensors = 10000
else:
num_tensors = 1000
for i in np.arange(num_tensors):
input_shape = shape
- placeholder = tf.placeholder(tf.float32, shape=input_shape)
+ placeholder = array_ops.placeholder(dtypes.float32, shape=input_shape)
p.append(placeholder)
params[placeholder] = np.random.rand(*input_shape).astype(np.float32)
concat_inputs = p
- c = tf.concat(concat_dim, concat_inputs)
+ c = array_ops.concat(concat_dim, concat_inputs)
result = c.eval(feed_dict=params)
self.assertEqual(result.shape, c.get_shape())
@@ -462,19 +509,16 @@ class ConcatOpTest(tf.test.TestCase):
t1 = [[1, 2, 3], [4, 5, 6]]
t2 = [[7, 8, 9], [10, 11, 12]]
- c = tf.concat(-2, [t1, t2])
+ c = array_ops.concat(-2, [t1, t2])
output = c.eval()
self.assertEqual([4, 3], c.get_shape().as_list())
- self.assertAllEqual(
- [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]],
- output)
+ self.assertAllEqual([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]],
+ output)
- c = tf.concat(-1, [t1, t2])
+ c = array_ops.concat(-1, [t1, t2])
self.assertEqual([2, 6], c.get_shape().as_list())
output = c.eval()
- self.assertAllEqual(
- [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]],
- output)
+ self.assertAllEqual([[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]], output)
def testConcatV2Empty(self):
with self.test_session(use_gpu=True):
@@ -498,72 +542,70 @@ class ConcatOpTest(tf.test.TestCase):
c = gen_array_ops._concat_v2([t1, t2], -2)
self.assertEqual([4, 3], c.get_shape().as_list())
output = c.eval()
- self.assertAllEqual(
- [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]],
- output)
+ self.assertAllEqual([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]],
+ output)
c = gen_array_ops._concat_v2([t1, t2], -1)
self.assertEqual([2, 6], c.get_shape().as_list())
output = c.eval()
- self.assertAllEqual(
- [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]],
- output)
+ self.assertAllEqual([[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]], output)
-class ConcatOffsetTest(tf.test.TestCase):
+class ConcatOffsetTest(test.TestCase):
def testBasic(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu) as sess:
- cdim = tf.constant(1, tf.int32)
- s0 = tf.constant([2, 3, 5], tf.int32)
- s1 = tf.constant([2, 7, 5], tf.int32)
- s2 = tf.constant([2, 20, 5], tf.int32)
+ cdim = constant_op.constant(1, dtypes.int32)
+ s0 = constant_op.constant([2, 3, 5], dtypes.int32)
+ s1 = constant_op.constant([2, 7, 5], dtypes.int32)
+ s2 = constant_op.constant([2, 20, 5], dtypes.int32)
off = gen_array_ops._concat_offset(cdim, [s0, s1, s2])
ans = sess.run(off)
self.assertAllEqual(ans, [[0, 0, 0], [0, 3, 0], [0, 10, 0]])
def testNotVector(self):
with self.test_session() as sess:
- cdim = tf.constant(1, tf.int32)
- s0 = tf.constant([[2, 3, 5]], tf.int32)
- s1 = tf.constant([[2, 7, 5]], tf.int32)
+ cdim = constant_op.constant(1, dtypes.int32)
+ s0 = constant_op.constant([[2, 3, 5]], dtypes.int32)
+ s1 = constant_op.constant([[2, 7, 5]], dtypes.int32)
off = gen_array_ops._concat_offset(cdim, [s0, s1])
- with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
+ with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"should be a vector"):
sess.run(off)
def testConcatDimOutOfRange(self):
with self.test_session() as sess:
- cdim = tf.constant(4, tf.int32)
- s0 = tf.constant([2, 3, 5], tf.int32)
- s1 = tf.constant([2, 7, 5], tf.int32)
+ cdim = constant_op.constant(4, dtypes.int32)
+ s0 = constant_op.constant([2, 3, 5], dtypes.int32)
+ s1 = constant_op.constant([2, 7, 5], dtypes.int32)
off = gen_array_ops._concat_offset(cdim, [s0, s1])
- with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
+ with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"Concat dim is out of range: 4 vs. 3"):
sess.run(off)
def testDimMismatch(self):
with self.test_session() as sess:
- cdim = tf.constant(1, tf.int32)
- s0 = tf.constant([2, 3, 5], tf.int32)
- s1 = tf.constant([2, 7, 5, 10], tf.int32)
+ cdim = constant_op.constant(1, dtypes.int32)
+ s0 = constant_op.constant([2, 3, 5], dtypes.int32)
+ s1 = constant_op.constant([2, 7, 5, 10], dtypes.int32)
off = gen_array_ops._concat_offset(cdim, [s0, s1])
- with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
+ with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"should contain 3 elem"):
sess.run(off)
def testSizeMismatch(self):
with self.test_session() as sess:
- cdim = tf.constant(1, tf.int32)
- s0 = tf.constant([2, 3, 5], tf.int32)
- s1 = tf.constant([2, 7, 10], tf.int32)
+ cdim = constant_op.constant(1, dtypes.int32)
+ s0 = constant_op.constant([2, 3, 5], dtypes.int32)
+ s1 = constant_op.constant([2, 7, 10], dtypes.int32)
off = gen_array_ops._concat_offset(cdim, [s0, s1])
with self.assertRaisesRegexp(
- tf.errors.InvalidArgumentError,
+ errors_impl.InvalidArgumentError,
r"All dimensions except 1 must match. Input 1 has shape \[2 7 10\] "
r"and doesn't match input 0 with shape \[2 3 5\]."):
sess.run(off)
+
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/conditional_accumulator_test.py b/tensorflow/python/kernel_tests/conditional_accumulator_test.py
index 5985906e3f..7570523495 100644
--- a/tensorflow/python/kernel_tests/conditional_accumulator_test.py
+++ b/tensorflow/python/kernel_tests/conditional_accumulator_test.py
@@ -20,17 +20,28 @@ from __future__ import print_function
import time
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes as dtypes_lib
+from tensorflow.python.framework import errors_impl
+from tensorflow.python.framework import ops
+from tensorflow.python.framework import tensor_shape
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import data_flow_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import state_ops
+from tensorflow.python.ops import variables
+from tensorflow.python.platform import test
# from functools import reduce
-class ConditionalAccumulatorTest(tf.test.TestCase):
+class ConditionalAccumulatorTest(test.TestCase):
def testConstructor(self):
- with tf.Graph().as_default():
- q = tf.ConditionalAccumulator(tf.float32, name="Q")
- self.assertTrue(isinstance(q.accumulator_ref, tf.Tensor))
+ with ops.Graph().as_default():
+ q = data_flow_ops.ConditionalAccumulator(dtypes_lib.float32, name="Q")
+ self.assertTrue(isinstance(q.accumulator_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'ConditionalAccumulator'
attr { key: 'dtype' value { type: DT_FLOAT } }
@@ -40,10 +51,12 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
""", q.accumulator_ref.op.node_def)
def testConstructorWithShape(self):
- with tf.Graph().as_default():
- q = tf.ConditionalAccumulator(
- tf.float32, name="Q", shape=tf.TensorShape([1, 5, 2, 8]))
- self.assertTrue(isinstance(q.accumulator_ref, tf.Tensor))
+ with ops.Graph().as_default():
+ q = data_flow_ops.ConditionalAccumulator(
+ dtypes_lib.float32,
+ name="Q",
+ shape=tensor_shape.TensorShape([1, 5, 2, 8]))
+ self.assertTrue(isinstance(q.accumulator_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'ConditionalAccumulator'
attr { key: 'dtype' value { type: DT_FLOAT } }
@@ -58,30 +71,31 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
def testAccumulatorSizeEmpty(self):
with self.test_session():
- q = tf.ConditionalAccumulator(tf.float32, name="Q")
+ q = data_flow_ops.ConditionalAccumulator(dtypes_lib.float32, name="Q")
self.assertEqual(q.num_accumulated().eval(), 0)
def testAccumulatorSetGlobalStep(self):
with self.test_session():
- q = tf.ConditionalAccumulator(
- tf.float32, name="Q", shape=tf.TensorShape([1]))
+ q = data_flow_ops.ConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
set_global_step_op = q.set_global_step(1)
set_global_step_op.run()
def testAccumulatorApplyGradFloat32(self):
with self.test_session():
- q = tf.ConditionalAccumulator(
- tf.float32, name="Q", shape=tf.TensorShape([1]))
+ q = data_flow_ops.ConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
accum_op = q.apply_grad((10.0,))
accum_op.run()
def testDtypes(self):
with self.test_session() as sess:
- dtypes = [tf.float16, tf.float32, tf.float64]
+ dtypes = [dtypes_lib.float16, dtypes_lib.float32, dtypes_lib.float64]
for i in range(len(dtypes)):
dtype = dtypes[i]
- q = tf.ConditionalAccumulator(dtype, shape=tf.TensorShape([1]))
+ q = data_flow_ops.ConditionalAccumulator(
+ dtype, shape=tensor_shape.TensorShape([1]))
elems = np.arange(10).astype(dtype.as_numpy_dtype)
for e in elems:
@@ -93,14 +107,14 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
def testAccumulatorMultipleAccumulators(self):
with self.test_session():
- q_f32_0 = tf.ConditionalAccumulator(
- tf.float32, name="Q", shape=tf.TensorShape([1]))
- q_f32_1 = tf.ConditionalAccumulator(
- tf.float32, name="Q", shape=tf.TensorShape([1]))
- q_f16_0 = tf.ConditionalAccumulator(
- tf.float16, name="Q", shape=tf.TensorShape([1]))
- q_f16_1 = tf.ConditionalAccumulator(
- tf.float16, name="Q", shape=tf.TensorShape([1]))
+ q_f32_0 = data_flow_ops.ConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
+ q_f32_1 = data_flow_ops.ConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
+ q_f16_0 = data_flow_ops.ConditionalAccumulator(
+ dtypes_lib.float16, name="Q", shape=tensor_shape.TensorShape([1]))
+ q_f16_1 = data_flow_ops.ConditionalAccumulator(
+ dtypes_lib.float16, name="Q", shape=tensor_shape.TensorShape([1]))
accums = [q_f16_0, q_f16_1, q_f32_0, q_f32_1]
for i in range(len(accums)):
@@ -112,7 +126,8 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
def testAccumulatorApplyAndTakeGradWithShape(self):
with self.test_session():
- q = tf.ConditionalAccumulator(tf.float32, name="Q", shape=(3, 2))
+ q = data_flow_ops.ConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=(3, 2))
elems = [[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
[[10.0, 20.0], [30.0, 40.0], [50.0, 60.0]]]
elems_ave = [[(a + b) / len(elems) for a, b in zip(x, y)]
@@ -131,7 +146,8 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
self.assertTrue(is_all_equal)
def testAccumulatorApplyGradWithWrongShape(self):
- q = tf.ConditionalAccumulator(tf.float32, name="Q", shape=(3, 2))
+ q = data_flow_ops.ConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=(3, 2))
with self.assertRaises(ValueError):
q.apply_grad([[1.0, 2.0], [3.0, 4.0]])
@@ -141,9 +157,10 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
def testAccumulatorDynamicShape(self):
with self.test_session() as sess:
- q = tf.ConditionalAccumulator(tf.float32, name="Q", shape=None)
+ q = data_flow_ops.ConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=None)
- x = tf.placeholder(tf.float32)
+ x = array_ops.placeholder(dtypes_lib.float32)
accum_op = q.apply_grad(x)
@@ -165,25 +182,26 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
def testAccumulatorWrongDynamicShape(self):
with self.test_session() as sess:
- q = tf.ConditionalAccumulator(tf.float32, name="Q", shape=None)
+ q = data_flow_ops.ConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=None)
- x = tf.placeholder(tf.float32)
+ x = array_ops.placeholder(dtypes_lib.float32)
accum_op = q.apply_grad(x)
# First successful apply_grad determines shape
sess.run(accum_op, feed_dict={x: [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]})
- with self.assertRaises(tf.errors.InvalidArgumentError):
+ with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(accum_op, feed_dict={x: [[1.0, 2.0], [3.0, 4.0]]})
- with self.assertRaises(tf.errors.InvalidArgumentError):
+ with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(accum_op, feed_dict={x: [[1.0], [2.0], [3.0]]})
def testAccumulatorSizeAfterApplyGrad(self):
with self.test_session():
- q = tf.ConditionalAccumulator(
- tf.float32, name="Q", shape=tf.TensorShape([1]))
+ q = data_flow_ops.ConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
accum_op = q.apply_grad((10.0,))
self.assertEqual(q.num_accumulated().eval(), 0)
accum_op.run()
@@ -193,8 +211,8 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
def testAccumulatorSizeAfterApplyGradAndTakeGrad(self):
with self.test_session():
- q = tf.ConditionalAccumulator(
- tf.float32, name="Q", shape=tf.TensorShape([1]))
+ q = data_flow_ops.ConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
accum_op = q.apply_grad((10.0,))
extract_t = q.take_grad(2)
@@ -221,8 +239,8 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
def testAccumulatorTakeGrad(self):
with self.test_session():
- q = tf.ConditionalAccumulator(
- tf.float32, name="Q", shape=tf.TensorShape([1]))
+ q = data_flow_ops.ConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
elems = [10.0, 20.0]
elems_ave = sum(elems) / len(elems)
@@ -236,7 +254,7 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
self.assertEqual(elems_ave, val)
accum_ops = [q.apply_grad((x,), local_step=1) for x in elems]
- takeg_t = q.take_grad(tf.constant(1))
+ takeg_t = q.take_grad(constant_op.constant(1))
for accum_op in accum_ops:
accum_op.run()
@@ -246,8 +264,8 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
def testAccumulatorInvalidTakeGrad(self):
with self.test_session():
- q = tf.ConditionalAccumulator(
- tf.float32, name="Q", shape=tf.TensorShape([1]))
+ q = data_flow_ops.ConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
elems = [10.0, 20.0]
accum_ops = [q.apply_grad((x,)) for x in elems]
@@ -256,13 +274,13 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
for accum_op in accum_ops:
accum_op.run()
- with self.assertRaises(tf.errors.InvalidArgumentError):
+ with self.assertRaises(errors_impl.InvalidArgumentError):
takeg_t.eval()
def testAccumulatorRepeatedTakeGrad(self):
with self.test_session():
- q = tf.ConditionalAccumulator(
- tf.float32, name="Q", shape=tf.TensorShape([1]))
+ q = data_flow_ops.ConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
elems = [10.0, 20.0]
elems_ave = sum(elems) / len(elems)
@@ -288,24 +306,24 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
def testAccumulatorIncrementGlobalStep(self):
with self.test_session():
- q = tf.ConditionalAccumulator(
- tf.float32, name="Q", shape=tf.TensorShape([1]))
+ q = data_flow_ops.ConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
- global_step = tf.Variable(0, name="global_step")
- new_global_step = tf.add(global_step, 1)
- inc_global_step = tf.assign(global_step, new_global_step)
+ global_step = variables.Variable(0, name="global_step")
+ new_global_step = math_ops.add(global_step, 1)
+ inc_global_step = state_ops.assign(global_step, new_global_step)
set_global_step_op = q.set_global_step(new_global_step)
- tf.global_variables_initializer().run()
+ variables.global_variables_initializer().run()
for _ in range(3):
set_global_step_op.run()
inc_global_step.eval()
def testAccumulatorSetGlobalStepPreventsAccumulation(self):
with self.test_session():
- q = tf.ConditionalAccumulator(
- tf.float32, name="Q", shape=tf.TensorShape([1]))
+ q = data_flow_ops.ConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
local_steps = range(1000, 1005)
accum_ops = [q.apply_grad((0.0 + x,), local_step=x) for x in local_steps]
@@ -325,8 +343,8 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
def testParallelApplyGrad(self):
with self.test_session() as sess:
- q = tf.ConditionalAccumulator(
- tf.float32, name="Q", shape=tf.TensorShape([1]))
+ q = data_flow_ops.ConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
accum_ops = [q.apply_grad((x,), local_step=0) for x in elems]
takeg_t = q.take_grad(1)
@@ -334,8 +352,10 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
def apply_grad(accum_op):
sess.run(accum_op)
- threads = [self.checkedThread(
- target=apply_grad, args=(o,)) for o in accum_ops]
+ threads = [
+ self.checkedThread(
+ target=apply_grad, args=(o,)) for o in accum_ops
+ ]
for thread in threads:
thread.start()
@@ -348,8 +368,8 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
def testParallelTakeGrad(self):
with self.test_session() as sess:
- q = tf.ConditionalAccumulator(
- tf.float32, name="Q", shape=tf.TensorShape([1]))
+ q = data_flow_ops.ConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
elems = [e for e in range(10)]
accum_ops = [q.apply_grad((np.float32(e),), local_step=e) for e in elems]
takeg_t = q.take_grad(1)
@@ -380,8 +400,8 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
def testAccumulatorApplyAndBlockingTake(self):
with self.test_session() as sess:
- q = tf.ConditionalAccumulator(
- tf.float32, name="Q", shape=tf.TensorShape([1]))
+ q = data_flow_ops.ConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
elems = [10.0, 20.0, 30.0]
elems_ave = sum(elems) / len(elems)
@@ -413,8 +433,8 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
def testAccumulatorCancel(self):
with self.test_session() as sess:
- q = tf.ConditionalAccumulator(
- tf.float32, name="Q", shape=tf.TensorShape([1]))
+ q = data_flow_ops.ConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
takeg_t = q.take_grad(1)
takeg_thread = self.checkedThread(
@@ -428,5 +448,6 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
takeg_thread.join()
+
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/confusion_matrix_test.py b/tensorflow/python/kernel_tests/confusion_matrix_test.py
index e03a07a32d..c7e6d82d9b 100644
--- a/tensorflow/python/kernel_tests/confusion_matrix_test.py
+++ b/tensorflow/python/kernel_tests/confusion_matrix_test.py
@@ -13,22 +13,28 @@
# limitations under the License.
# ==============================================================================
"""Tests for confusion_matrix_ops."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import confusion_matrix
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import random_ops
+from tensorflow.python.platform import test
-class ConfusionMatrixTest(tf.test.TestCase):
+class ConfusionMatrixTest(test.TestCase):
def _testConfMatrix(self, predictions, labels, truth, weights=None):
with self.test_session():
dtype = predictions.dtype
- ans = tf.confusion_matrix(
+ ans = confusion_matrix.confusion_matrix(
labels, predictions, dtype=dtype, weights=weights)
tf_ans = ans.eval()
self.assertAllClose(tf_ans, truth, atol=1e-10)
@@ -56,27 +62,30 @@ class ConfusionMatrixTest(tf.test.TestCase):
def _testConfMatrixOnTensors(self, tf_dtype, np_dtype):
with self.test_session() as sess:
- m_neg = tf.placeholder(dtype=tf.float32)
- m_pos = tf.placeholder(dtype=tf.float32)
- s = tf.placeholder(dtype=tf.float32)
-
- neg = tf.random_normal([20], mean=m_neg, stddev=s, dtype=tf.float32)
- pos = tf.random_normal([20], mean=m_pos, stddev=s, dtype=tf.float32)
-
- data = tf.concat_v2([neg, pos], 0)
- data = tf.cast(tf.round(data), tf_dtype)
- data = tf.minimum(tf.maximum(data, 0), 1)
- lab = tf.concat_v2(
- [tf.zeros(
- [20], dtype=tf_dtype), tf.ones(
- [20], dtype=tf_dtype)], 0)
-
- cm = tf.confusion_matrix(
+ m_neg = array_ops.placeholder(dtype=dtypes.float32)
+ m_pos = array_ops.placeholder(dtype=dtypes.float32)
+ s = array_ops.placeholder(dtype=dtypes.float32)
+
+ neg = random_ops.random_normal(
+ [20], mean=m_neg, stddev=s, dtype=dtypes.float32)
+ pos = random_ops.random_normal(
+ [20], mean=m_pos, stddev=s, dtype=dtypes.float32)
+
+ data = array_ops.concat_v2([neg, pos], 0)
+ data = math_ops.cast(math_ops.round(data), tf_dtype)
+ data = math_ops.minimum(math_ops.maximum(data, 0), 1)
+ lab = array_ops.concat_v2(
+ [
+ array_ops.zeros(
+ [20], dtype=tf_dtype), array_ops.ones(
+ [20], dtype=tf_dtype)
+ ],
+ 0)
+
+ cm = confusion_matrix.confusion_matrix(
lab, data, dtype=tf_dtype, num_classes=2)
- d, l, cm_out = sess.run([data, lab, cm], {m_neg: 0.0,
- m_pos: 1.0,
- s: 1.0})
+ d, l, cm_out = sess.run([data, lab, cm], {m_neg: 0.0, m_pos: 1.0, s: 1.0})
truth = np.zeros([2, 2], dtype=np_dtype)
try:
@@ -90,10 +99,10 @@ class ConfusionMatrixTest(tf.test.TestCase):
self.assertAllClose(cm_out, truth, atol=1e-10)
def _testOnTensors_int32(self):
- self._testConfMatrixOnTensors(tf.int32, np.int32)
+ self._testConfMatrixOnTensors(dtypes.int32, np.int32)
def testOnTensors_int64(self):
- self._testConfMatrixOnTensors(tf.int64, np.int64)
+ self._testConfMatrixOnTensors(dtypes.int64, np.int64)
def _testDifferentLabelsInPredictionAndTarget(self, dtype):
predictions = np.asarray([1, 2, 3], dtype=dtype)
@@ -142,7 +151,7 @@ class ConfusionMatrixTest(tf.test.TestCase):
def testWeighted(self):
predictions = np.arange(5, dtype=np.int32)
labels = np.arange(5, dtype=np.int32)
- weights = tf.constant(np.arange(5, dtype=np.int32))
+ weights = constant_op.constant(np.arange(5, dtype=np.int32))
truth = np.asarray(
[[0, 0, 0, 0, 0],
@@ -159,27 +168,27 @@ class ConfusionMatrixTest(tf.test.TestCase):
predictions = np.asarray([[1, 2, 3]])
labels = np.asarray([1, 2, 3])
self.assertRaisesRegexp(ValueError, "an not squeeze dim",
- tf.confusion_matrix,
- predictions, labels)
+ confusion_matrix.confusion_matrix, predictions,
+ labels)
predictions = np.asarray([1, 2, 3])
labels = np.asarray([[1, 2, 3]])
self.assertRaisesRegexp(ValueError, "an not squeeze dim",
- tf.confusion_matrix,
- predictions, labels)
+ confusion_matrix.confusion_matrix, predictions,
+ labels)
def testInputDifferentSize(self):
predictions = np.asarray([1, 2, 3])
labels = np.asarray([1, 2])
self.assertRaisesRegexp(ValueError, "must be equal",
- tf.confusion_matrix,
- predictions, labels)
+ confusion_matrix.confusion_matrix, predictions,
+ labels)
def testOutputIsInt32(self):
predictions = np.arange(2)
labels = np.arange(2)
with self.test_session():
- cm = tf.confusion_matrix(
+ cm = confusion_matrix.confusion_matrix(
labels, predictions, dtype=dtypes.int32)
tf_cm = cm.eval()
self.assertEqual(tf_cm.dtype, np.int32)
@@ -188,11 +197,11 @@ class ConfusionMatrixTest(tf.test.TestCase):
predictions = np.arange(2)
labels = np.arange(2)
with self.test_session():
- cm = tf.confusion_matrix(
+ cm = confusion_matrix.confusion_matrix(
labels, predictions, dtype=dtypes.int64)
tf_cm = cm.eval()
self.assertEqual(tf_cm.dtype, np.int64)
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/constant_op_test.py b/tensorflow/python/kernel_tests/constant_op_test.py
index 13658f82c2..b40b4b429a 100644
--- a/tensorflow/python/kernel_tests/constant_op_test.py
+++ b/tensorflow/python/kernel_tests/constant_op_test.py
@@ -12,24 +12,32 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for ConstantOp."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes as dtypes_lib
+from tensorflow.python.framework import errors_impl
+from tensorflow.python.framework import ops
+from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import math_ops
+from tensorflow.python.platform import test
+from tensorflow.python.util import compat
-class ConstantTest(tf.test.TestCase):
+class ConstantTest(test.TestCase):
def _testCpu(self, x):
np_ans = np.array(x)
with self.test_session(use_gpu=False):
- tf_ans = tf.convert_to_tensor(x).eval()
+ tf_ans = ops.convert_to_tensor(x).eval()
if np_ans.dtype in [np.float32, np.float64, np.complex64, np.complex128]:
self.assertAllClose(np_ans, tf_ans)
else:
@@ -38,7 +46,7 @@ class ConstantTest(tf.test.TestCase):
def _testGpu(self, x):
np_ans = np.array(x)
with self.test_session(use_gpu=True):
- tf_ans = tf.convert_to_tensor(x).eval()
+ tf_ans = ops.convert_to_tensor(x).eval()
if np_ans.dtype in [np.float32, np.float64, np.complex64, np.complex128]:
self.assertAllClose(np_ans, tf_ans)
else:
@@ -62,231 +70,238 @@ class ConstantTest(tf.test.TestCase):
def testInt32(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int32))
- self._testAll(
- (100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(np.int32))
+ self._testAll((100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(
+ np.int32))
self._testAll(np.empty((2, 0, 5)).astype(np.int32))
def testInt64(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int64))
- self._testAll(
- (100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(np.int64))
+ self._testAll((100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(
+ np.int64))
self._testAll(np.empty((2, 0, 5)).astype(np.int64))
def testComplex64(self):
self._testAll(
- np.complex(1, 2) * np.arange(-15, 15).reshape([2, 3, 5]).astype(
- np.complex64))
- self._testAll(np.complex(
- 1, 2) * np.random.normal(size=30).reshape([2, 3, 5]).astype(
- np.complex64))
+ np.complex(1, 2) *
+ np.arange(-15, 15).reshape([2, 3, 5]).astype(np.complex64))
+ self._testAll(
+ np.complex(1, 2) *
+ np.random.normal(size=30).reshape([2, 3, 5]).astype(np.complex64))
self._testAll(np.empty((2, 0, 5)).astype(np.complex64))
def testComplex128(self):
self._testAll(
- np.complex(1, 2) * np.arange(-15, 15).reshape([2, 3, 5]).astype(
- np.complex128))
- self._testAll(np.complex(
- 1, 2) * np.random.normal(size=30).reshape([2, 3, 5]).astype(
- np.complex128))
+ np.complex(1, 2) *
+ np.arange(-15, 15).reshape([2, 3, 5]).astype(np.complex128))
+ self._testAll(
+ np.complex(1, 2) *
+ np.random.normal(size=30).reshape([2, 3, 5]).astype(np.complex128))
self._testAll(np.empty((2, 0, 5)).astype(np.complex128))
def testString(self):
- self._testCpu(np.array([tf.compat.as_bytes(str(x))
- for x in np.arange(-15, 15)]).reshape([2, 3, 5]))
+ self._testCpu(
+ np.array([compat.as_bytes(str(x)) for x in np.arange(-15, 15)]).reshape(
+ [2, 3, 5]))
self._testCpu(np.empty((2, 0, 5)).astype(np.str_))
def testStringWithNulls(self):
with self.test_session():
- val = tf.convert_to_tensor(b"\0\0\0\0").eval()
+ val = ops.convert_to_tensor(b"\0\0\0\0").eval()
self.assertEqual(len(val), 4)
self.assertEqual(val, b"\0\0\0\0")
with self.test_session():
- val = tf.convert_to_tensor(b"xx\0xx").eval()
+ val = ops.convert_to_tensor(b"xx\0xx").eval()
self.assertEqual(len(val), 5)
self.assertAllEqual(val, b"xx\0xx")
nested = [[b"\0\0\0\0", b"xx\0xx"], [b"\0_\0_\0_\0", b"\0"]]
with self.test_session():
- val = tf.convert_to_tensor(nested).eval()
+ val = ops.convert_to_tensor(nested).eval()
# NOTE(mrry): Do not use assertAllEqual, because it converts nested to a
# numpy array, which loses the null terminators.
self.assertEqual(val.tolist(), nested)
def testExplicitShapeNumPy(self):
- with tf.Graph().as_default():
- c = tf.constant(
+ with ops.Graph().as_default():
+ c = constant_op.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32),
shape=[2, 3, 5])
self.assertEqual(c.get_shape(), [2, 3, 5])
def testImplicitShapeNumPy(self):
- with tf.Graph().as_default():
- c = tf.constant(
+ with ops.Graph().as_default():
+ c = constant_op.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
self.assertEqual(c.get_shape(), [2, 3, 5])
def testExplicitShapeList(self):
- with tf.Graph().as_default():
- c = tf.constant([1, 2, 3, 4, 5, 6, 7], shape=[7])
+ with ops.Graph().as_default():
+ c = constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[7])
self.assertEqual(c.get_shape(), [7])
def testImplicitShapeList(self):
- with tf.Graph().as_default():
- c = tf.constant([1, 2, 3, 4, 5, 6, 7])
+ with ops.Graph().as_default():
+ c = constant_op.constant([1, 2, 3, 4, 5, 6, 7])
self.assertEqual(c.get_shape(), [7])
def testExplicitShapeNumber(self):
- with tf.Graph().as_default():
- c = tf.constant(1, shape=[1])
+ with ops.Graph().as_default():
+ c = constant_op.constant(1, shape=[1])
self.assertEqual(c.get_shape(), [1])
def testImplicitShapeNumber(self):
- with tf.Graph().as_default():
- c = tf.constant(1)
+ with ops.Graph().as_default():
+ c = constant_op.constant(1)
self.assertEqual(c.get_shape(), [])
def testShapeInconsistent(self):
- with tf.Graph().as_default():
- c = tf.constant([1, 2, 3, 4, 5, 6, 7], shape=[10])
+ with ops.Graph().as_default():
+ c = constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[10])
self.assertEqual(c.get_shape(), [10])
# pylint: disable=g-long-lambda
def testShapeWrong(self):
- with tf.Graph().as_default():
+ with ops.Graph().as_default():
with self.assertRaisesWithPredicateMatch(
ValueError,
lambda e: ("Too many elements provided. Needed at most 5, "
"but received 7" == str(e))):
- tf.constant([1, 2, 3, 4, 5, 6, 7], shape=[5])
+ constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[5])
+
# pylint: enable=g-long-lambda
def testTooLargeConstant(self):
- with tf.Graph().as_default():
+ with ops.Graph().as_default():
large_array = np.zeros((512, 1024, 1024), dtype=np.float32)
with self.assertRaisesRegexp(
ValueError,
"Cannot create a tensor proto whose content is larger than 2GB."):
- c = tf.constant(large_array)
+ c = constant_op.constant(large_array)
def testTooLargeGraph(self):
- with tf.Graph().as_default() as g:
+ with ops.Graph().as_default() as g:
large_array = np.zeros((256, 1024, 1024), dtype=np.float32)
- c = tf.constant(large_array)
- d = tf.constant(large_array)
- with self.assertRaisesRegexp(
- ValueError, "GraphDef cannot be larger than 2GB."):
+ c = constant_op.constant(large_array)
+ d = constant_op.constant(large_array)
+ with self.assertRaisesRegexp(ValueError,
+ "GraphDef cannot be larger than 2GB."):
g.as_graph_def()
def testSparseValuesRaiseErrors(self):
with self.assertRaisesRegexp(ValueError,
"setting an array element with a sequence"):
- c = tf.constant([[1, 2], [3]], dtype=tf.int32)
+ c = constant_op.constant([[1, 2], [3]], dtype=dtypes_lib.int32)
with self.assertRaisesRegexp(ValueError, "must be a dense"):
- c = tf.constant([[1, 2], [3]])
+ c = constant_op.constant([[1, 2], [3]])
with self.assertRaisesRegexp(ValueError, "must be a dense"):
- c = tf.constant([[1, 2], [3], [4, 5]])
+ c = constant_op.constant([[1, 2], [3], [4, 5]])
-class AsTensorTest(tf.test.TestCase):
+class AsTensorTest(test.TestCase):
def testAsTensorForTensorInput(self):
- with tf.Graph().as_default():
- t = tf.constant(10.0)
- x = tf.convert_to_tensor(t)
+ with ops.Graph().as_default():
+ t = constant_op.constant(10.0)
+ x = ops.convert_to_tensor(t)
self.assertIs(t, x)
def testAsTensorForNonTensorInput(self):
- with tf.Graph().as_default():
- x = tf.convert_to_tensor(10.0)
- self.assertTrue(isinstance(x, tf.Tensor))
+ with ops.Graph().as_default():
+ x = ops.convert_to_tensor(10.0)
+ self.assertTrue(isinstance(x, ops.Tensor))
def testAsTensorForShapeInput(self):
with self.test_session():
- x = tf.convert_to_tensor(tf.TensorShape([]))
- self.assertEqual(tf.int32, x.dtype)
+ x = ops.convert_to_tensor(tensor_shape.TensorShape([]))
+ self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual([], x.eval())
- x = tf.convert_to_tensor(tf.TensorShape([1, 2, 3]))
- self.assertEqual(tf.int32, x.dtype)
+ x = ops.convert_to_tensor(tensor_shape.TensorShape([1, 2, 3]))
+ self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual([1, 2, 3], x.eval())
- x = tf.convert_to_tensor(tf.TensorShape([1, 2, 3]), dtype=tf.int64)
- self.assertEqual(tf.int64, x.dtype)
+ x = ops.convert_to_tensor(
+ tensor_shape.TensorShape([1, 2, 3]), dtype=dtypes_lib.int64)
+ self.assertEqual(dtypes_lib.int64, x.dtype)
self.assertAllEqual([1, 2, 3], x.eval())
- x = tf.reshape(tf.zeros([6]), tf.TensorShape([2, 3]))
+ x = array_ops.reshape(
+ array_ops.zeros([6]), tensor_shape.TensorShape([2, 3]))
self.assertAllEqual([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], x.eval())
with self.assertRaisesRegexp(ValueError, "partially known"):
- tf.convert_to_tensor(tf.TensorShape(None))
+ ops.convert_to_tensor(tensor_shape.TensorShape(None))
with self.assertRaisesRegexp(ValueError, "partially known"):
- tf.convert_to_tensor(tf.TensorShape([1, None, 64]))
+ ops.convert_to_tensor(tensor_shape.TensorShape([1, None, 64]))
with self.assertRaises(TypeError):
- tf.convert_to_tensor(tf.TensorShape([1, 2, 3]), dtype=tf.float32)
+ ops.convert_to_tensor(
+ tensor_shape.TensorShape([1, 2, 3]), dtype=dtypes_lib.float32)
def testAsTensorForDimensionInput(self):
with self.test_session():
- x = tf.convert_to_tensor(tf.TensorShape([1, 2, 3])[1])
- self.assertEqual(tf.int32, x.dtype)
+ x = ops.convert_to_tensor(tensor_shape.TensorShape([1, 2, 3])[1])
+ self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual(2, x.eval())
- x = tf.convert_to_tensor(tf.TensorShape([1, 2, 3])[1], dtype=tf.int64)
- self.assertEqual(tf.int64, x.dtype)
+ x = ops.convert_to_tensor(
+ tensor_shape.TensorShape([1, 2, 3])[1], dtype=dtypes_lib.int64)
+ self.assertEqual(dtypes_lib.int64, x.dtype)
self.assertAllEqual(2, x.eval())
with self.assertRaisesRegexp(ValueError, "unknown Dimension"):
- tf.convert_to_tensor(tf.TensorShape(None)[1])
+ ops.convert_to_tensor(tensor_shape.TensorShape(None)[1])
with self.assertRaisesRegexp(ValueError, "unknown Dimension"):
- tf.convert_to_tensor(tf.TensorShape([1, None, 64])[1])
+ ops.convert_to_tensor(tensor_shape.TensorShape([1, None, 64])[1])
with self.assertRaises(TypeError):
- tf.convert_to_tensor(tf.TensorShape([1, 2, 3])[1], dtype=tf.float32)
+ ops.convert_to_tensor(
+ tensor_shape.TensorShape([1, 2, 3])[1], dtype=dtypes_lib.float32)
-class IdentityOpTest(tf.test.TestCase):
+class IdentityOpTest(test.TestCase):
def testIdTensor(self):
- with tf.Graph().as_default():
- x = tf.constant(2.0, shape=[6], name="input")
- id_op = tf.identity(x, name="id")
- self.assertTrue(isinstance(id_op.op.inputs[0], tf.Tensor))
- self.assertProtoEquals(
- "name: 'id' op: 'Identity' input: 'input' "
- "attr { key: 'T' value { type: DT_FLOAT } }", id_op.op.node_def)
+ with ops.Graph().as_default():
+ x = constant_op.constant(2.0, shape=[6], name="input")
+ id_op = array_ops.identity(x, name="id")
+ self.assertTrue(isinstance(id_op.op.inputs[0], ops.Tensor))
+ self.assertProtoEquals("name: 'id' op: 'Identity' input: 'input' "
+ "attr { key: 'T' value { type: DT_FLOAT } }",
+ id_op.op.node_def)
-class ZerosTest(tf.test.TestCase):
+class ZerosTest(test.TestCase):
def _Zeros(self, shape):
with self.test_session():
- ret = tf.zeros(shape)
+ ret = array_ops.zeros(shape)
self.assertEqual(shape, ret.get_shape())
return ret.eval()
def testConst(self):
- self.assertTrue(np.array_equal(self._Zeros([2, 3]), np.array([[0] * 3] *
- 2)))
+ self.assertTrue(
+ np.array_equal(self._Zeros([2, 3]), np.array([[0] * 3] * 2)))
def testScalar(self):
self.assertEqual(0, self._Zeros([]))
self.assertEqual(0, self._Zeros(()))
with self.test_session():
- scalar = tf.zeros(tf.constant([], dtype=tf.int32))
+ scalar = array_ops.zeros(constant_op.constant([], dtype=dtypes_lib.int32))
self.assertEqual(0, scalar.eval())
def testDynamicSizes(self):
np_ans = np.array([[0] * 3] * 2)
with self.test_session():
# Creates a tensor of 2 x 3.
- d = tf.fill([2, 3], 12., name="fill")
+ d = array_ops.fill([2, 3], 12., name="fill")
# Constructs a tensor of zeros of the same dimensions as "d".
- z = tf.zeros(tf.shape(d))
+ z = array_ops.zeros(array_ops.shape(d))
out = z.eval()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, d)
@@ -294,40 +309,43 @@ class ZerosTest(tf.test.TestCase):
def testDtype(self):
with self.test_session():
- d = tf.fill([2, 3], 12., name="fill")
+ d = array_ops.fill([2, 3], 12., name="fill")
self.assertEqual(d.get_shape(), [2, 3])
# Test default type for both constant size and dynamic size
- z = tf.zeros([2, 3])
- self.assertEqual(z.dtype, tf.float32)
+ z = array_ops.zeros([2, 3])
+ self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.zeros([2, 3]))
- z = tf.zeros(tf.shape(d))
- self.assertEqual(z.dtype, tf.float32)
+ z = array_ops.zeros(array_ops.shape(d))
+ self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.zeros([2, 3]))
# Test explicit type control
- for dtype in [tf.float32, tf.float64, tf.int32,
- tf.uint8, tf.int16, tf.int8,
- tf.complex64, tf.complex128, tf.int64, tf.bool]:
- z = tf.zeros([2, 3], dtype=dtype)
+ for dtype in [
+ dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
+ dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
+ dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64,
+ dtypes_lib.bool
+ ]:
+ z = array_ops.zeros([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.zeros([2, 3]))
- z = tf.zeros(tf.shape(d), dtype=dtype)
+ z = array_ops.zeros(array_ops.shape(d), dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.zeros([2, 3]))
-class ZerosLikeTest(tf.test.TestCase):
+class ZerosLikeTest(test.TestCase):
def _compareZeros(self, dtype, use_gpu):
with self.test_session(use_gpu=use_gpu):
# Creates a tensor of non-zero values with shape 2 x 3.
numpy_dtype = dtype.as_numpy_dtype
- d = tf.constant(np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
+ d = constant_op.constant(np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
# Constructs a tensor of zeros of the same dimensions and type as "d".
- z_var = tf.zeros_like(d)
+ z_var = array_ops.zeros_like(d)
# Test that the type is correct
self.assertEqual(z_var.dtype, dtype)
z_value = z_var.eval()
@@ -337,17 +355,23 @@ class ZerosLikeTest(tf.test.TestCase):
self.assertEqual([2, 3], z_var.get_shape())
def testZerosLikeCPU(self):
- for dtype in [tf.float32, tf.float64, tf.int32, tf.uint8, tf.int16, tf.int8,
- tf.complex64, tf.complex128, tf.int64]:
+ for dtype in [
+ dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
+ dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
+ dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64
+ ]:
self._compareZeros(dtype, False)
def testZerosLikeGPU(self):
- for dtype in [tf.float32, tf.float64, tf.int32, tf.bool, tf.int64]:
+ for dtype in [
+ dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
+ dtypes_lib.bool, dtypes_lib.int64
+ ]:
self._compareZeros(dtype, True)
def testZerosLikePartialShape(self):
- d = tf.placeholder(tf.float32, shape=[None, 4, None])
- z = tf.zeros_like(d)
+ d = array_ops.placeholder(dtypes_lib.float32, shape=[None, 4, None])
+ z = array_ops.zeros_like(d)
self.assertEqual(d.get_shape().as_list(), z.get_shape().as_list())
def testZerosLikeDtype(self):
@@ -358,17 +382,17 @@ class ZerosLikeTest(tf.test.TestCase):
for in_type in dtypes:
x = np.arange(15).astype(in_type).reshape(*shape)
for out_type in dtypes:
- y = tf.zeros_like(x, dtype=out_type).eval()
+ y = array_ops.zeros_like(x, dtype=out_type).eval()
self.assertEqual(y.dtype, out_type)
self.assertEqual(y.shape, shape)
self.assertAllEqual(y, np.zeros(shape, dtype=out_type))
-class OnesTest(tf.test.TestCase):
+class OnesTest(test.TestCase):
def _Ones(self, shape):
with self.test_session():
- ret = tf.ones(shape)
+ ret = array_ops.ones(shape)
self.assertEqual(shape, ret.get_shape())
return ret.eval()
@@ -379,16 +403,16 @@ class OnesTest(tf.test.TestCase):
self.assertEqual(1, self._Ones([]))
self.assertEqual(1, self._Ones(()))
with self.test_session():
- scalar = tf.ones(tf.constant([], dtype=tf.int32))
+ scalar = array_ops.ones(constant_op.constant([], dtype=dtypes_lib.int32))
self.assertEqual(1, scalar.eval())
def testDynamicSizes(self):
np_ans = np.array([[1] * 3] * 2)
with self.test_session():
# Creates a tensor of 2 x 3.
- d = tf.fill([2, 3], 12., name="fill")
+ d = array_ops.fill([2, 3], 12., name="fill")
# Constructs a tensor of ones of the same dimensions as "d".
- z = tf.ones(tf.shape(d))
+ z = array_ops.ones(array_ops.shape(d))
out = z.eval()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, d)
@@ -396,51 +420,56 @@ class OnesTest(tf.test.TestCase):
def testAutoPack(self):
with self.test_session():
- h = tf.placeholder(tf.int32, shape=[])
- w = tf.placeholder(tf.int32, shape=[])
- z = tf.ones([h, w])
+ h = array_ops.placeholder(dtypes_lib.int32, shape=[])
+ w = array_ops.placeholder(dtypes_lib.int32, shape=[])
+ z = array_ops.ones([h, w])
out = z.eval(feed_dict={h: 4, w: 16})
self.assertAllEqual(out, np.array([[1] * 16] * 4))
def testDtype(self):
with self.test_session():
- d = tf.fill([2, 3], 12., name="fill")
+ d = array_ops.fill([2, 3], 12., name="fill")
self.assertEqual(d.get_shape(), [2, 3])
# Test default type for both constant size and dynamic size
- z = tf.ones([2, 3])
- self.assertEqual(z.dtype, tf.float32)
+ z = array_ops.ones([2, 3])
+ self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.ones([2, 3]))
- z = tf.ones(tf.shape(d))
- self.assertEqual(z.dtype, tf.float32)
+ z = array_ops.ones(array_ops.shape(d))
+ self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.ones([2, 3]))
# Test explicit type control
- for dtype in (tf.float32, tf.float64, tf.int32,
- tf.uint8, tf.int16, tf.int8,
- tf.complex64, tf.complex128, tf.int64, tf.bool):
- z = tf.ones([2, 3], dtype=dtype)
+ for dtype in (dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
+ dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
+ dtypes_lib.complex64, dtypes_lib.complex128,
+ dtypes_lib.int64, dtypes_lib.bool):
+ z = array_ops.ones([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.ones([2, 3]))
- z = tf.ones(tf.shape(d), dtype=dtype)
+ z = array_ops.ones(array_ops.shape(d), dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.ones([2, 3]))
-class OnesLikeTest(tf.test.TestCase):
+class OnesLikeTest(test.TestCase):
def testOnesLike(self):
- for dtype in [tf.float32, tf.float64, tf.int32,
- tf.uint8, tf.int16, tf.int8,
- tf.complex64, tf.complex128, tf.int64]:
+ for dtype in [
+ dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
+ dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
+ dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64
+ ]:
numpy_dtype = dtype.as_numpy_dtype
with self.test_session():
# Creates a tensor of non-zero values with shape 2 x 3.
- d = tf.constant(np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
+ d = constant_op.constant(
+ np.ones(
+ (2, 3), dtype=numpy_dtype), dtype=dtype)
# Constructs a tensor of zeros of the same dimensions and type as "d".
- z_var = tf.ones_like(d)
+ z_var = array_ops.ones_like(d)
# Test that the type is correct
self.assertEqual(z_var.dtype, dtype)
z_value = z_var.eval()
@@ -450,16 +479,16 @@ class OnesLikeTest(tf.test.TestCase):
self.assertEqual([2, 3], z_var.get_shape())
def testOnesLikePartialShape(self):
- d = tf.placeholder(tf.float32, shape=[None, 4, None])
- z = tf.ones_like(d)
+ d = array_ops.placeholder(dtypes_lib.float32, shape=[None, 4, None])
+ z = array_ops.ones_like(d)
self.assertEqual(d.get_shape().as_list(), z.get_shape().as_list())
-class FillTest(tf.test.TestCase):
+class FillTest(test.TestCase):
def _compare(self, dims, val, np_ans, use_gpu):
with self.test_session(use_gpu=use_gpu):
- tf_ans = tf.fill(dims, val, name="fill")
+ tf_ans = array_ops.fill(dims, val, name="fill")
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
# Fill does not set the shape.
@@ -496,58 +525,59 @@ class FillTest(tf.test.TestCase):
def testFillString(self):
np_ans = np.array([[b"yolo"] * 3] * 2)
with self.test_session(use_gpu=False):
- tf_ans = tf.fill([2, 3], np_ans[0][0], name="fill").eval()
+ tf_ans = array_ops.fill([2, 3], np_ans[0][0], name="fill").eval()
self.assertAllEqual(np_ans, tf_ans)
def testFillNegative(self):
with self.test_session():
for shape in (-1,), (2, -1), (-1, 2), (-2), (-3):
with self.assertRaises(ValueError):
- tf.fill(shape, 7)
+ array_ops.fill(shape, 7)
# Using a placeholder so this won't be caught in static analysis.
- dims = tf.placeholder(tf.int32)
- fill_t = tf.fill(dims, 3.0)
+ dims = array_ops.placeholder(dtypes_lib.int32)
+ fill_t = array_ops.fill(dims, 3.0)
for shape in (-1,), (2, -1), (-1, 2), (-2), (-3):
- with self.assertRaises(tf.errors.InvalidArgumentError):
+ with self.assertRaises(errors_impl.InvalidArgumentError):
fill_t.eval({dims: shape})
def testShapeFunctionEdgeCases(self):
# Non-vector dimensions.
with self.assertRaises(ValueError):
- tf.fill([[0, 1], [2, 3]], 1.0)
+ array_ops.fill([[0, 1], [2, 3]], 1.0)
# Non-scalar value.
with self.assertRaises(ValueError):
- tf.fill([3, 2], [1.0, 2.0])
+ array_ops.fill([3, 2], [1.0, 2.0])
# Partial dimension information.
- f = tf.fill(
- tf.placeholder(tf.int32, shape=(4,)), 3.0)
+ f = array_ops.fill(array_ops.placeholder(dtypes_lib.int32, shape=(4,)), 3.0)
self.assertEqual([None, None, None, None], f.get_shape().as_list())
- f = tf.fill([tf.placeholder(tf.int32, shape=()), 17], 1.0)
+ f = array_ops.fill(
+ [array_ops.placeholder(
+ dtypes_lib.int32, shape=()), 17], 1.0)
self.assertEqual([None, 17], f.get_shape().as_list())
def testGradient(self):
with self.test_session():
- in_v = tf.constant(5.0)
+ in_v = constant_op.constant(5.0)
out_shape = [3, 2]
- out_filled = tf.fill(out_shape, in_v)
- err = tf.test.compute_gradient_error(in_v, [],
- out_filled, out_shape)
+ out_filled = array_ops.fill(out_shape, in_v)
+ err = gradient_checker.compute_gradient_error(in_v, [], out_filled,
+ out_shape)
self.assertLess(err, 1e-3)
-class PlaceholderTest(tf.test.TestCase):
+class PlaceholderTest(test.TestCase):
def testDtype(self):
with self.test_session():
- p = tf.placeholder(tf.float32, name="p")
- p_identity = tf.identity(p)
+ p = array_ops.placeholder(dtypes_lib.float32, name="p")
+ p_identity = array_ops.identity(p)
feed_array = np.random.rand(10, 10)
- self.assertAllClose(p_identity.eval(feed_dict={p: feed_array}),
- feed_array)
+ self.assertAllClose(
+ p_identity.eval(feed_dict={p: feed_array}), feed_array)
with self.assertRaisesOpError(
"must feed a value for placeholder tensor 'p' with dtype float"):
@@ -555,11 +585,11 @@ class PlaceholderTest(tf.test.TestCase):
def testShape(self):
with self.test_session():
- p = tf.placeholder(tf.float32, shape=(10, 10), name="p")
- p_identity = tf.identity(p)
+ p = array_ops.placeholder(dtypes_lib.float32, shape=(10, 10), name="p")
+ p_identity = array_ops.identity(p)
feed_array = np.random.rand(10, 10)
- self.assertAllClose(p_identity.eval(feed_dict={p: feed_array}),
- feed_array)
+ self.assertAllClose(
+ p_identity.eval(feed_dict={p: feed_array}), feed_array)
with self.assertRaisesOpError(
"must feed a value for placeholder tensor 'p' with dtype float and "
@@ -572,11 +602,11 @@ class PlaceholderTest(tf.test.TestCase):
def testPartialShape(self):
with self.test_session():
- p = tf.placeholder(tf.float32, shape=[None, 3], name="p")
- p_identity = tf.identity(p)
+ p = array_ops.placeholder(dtypes_lib.float32, shape=[None, 3], name="p")
+ p_identity = array_ops.identity(p)
feed_array = np.random.rand(10, 3)
- self.assertAllClose(p_identity.eval(feed_dict={p: feed_array}),
- feed_array)
+ self.assertAllClose(
+ p_identity.eval(feed_dict={p: feed_array}), feed_array)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Cannot feed value of shape" in str(e)):
@@ -584,42 +614,36 @@ class PlaceholderTest(tf.test.TestCase):
def testControlDependency(self):
with self.test_session():
- p = tf.placeholder(tf.int32, shape=[], name="p")
- with tf.control_dependencies([p]):
- c = tf.constant(5, tf.int32)
- d = tf.mul(p, c)
+ p = array_ops.placeholder(dtypes_lib.int32, shape=[], name="p")
+ with ops.control_dependencies([p]):
+ c = constant_op.constant(5, dtypes_lib.int32)
+ d = math_ops.mul(p, c)
self.assertEqual(10, d.eval(feed_dict={p: 2}))
def testBadShape(self):
with self.assertRaises(ValueError):
- tf.placeholder(tf.float32, shape=(-1, 10))
+ array_ops.placeholder(dtypes_lib.float32, shape=(-1, 10))
def testTensorStr(self):
- a = tf.placeholder(tf.float32, name="a")
+ a = array_ops.placeholder(dtypes_lib.float32, name="a")
self.assertEqual("<tf.Tensor 'a:0' shape=<unknown> dtype=float32>", repr(a))
- b = tf.placeholder(tf.int32, shape=(32, 40), name="b")
- self.assertEqual(
- "<tf.Tensor 'b:0' shape=(32, 40) dtype=int32>",
- repr(b))
+ b = array_ops.placeholder(dtypes_lib.int32, shape=(32, 40), name="b")
+ self.assertEqual("<tf.Tensor 'b:0' shape=(32, 40) dtype=int32>", repr(b))
- c = tf.placeholder(tf.qint32, shape=(32, None, 2), name="c")
- self.assertEqual(
- "<tf.Tensor 'c:0' shape=(32, ?, 2) dtype=qint32>",
- repr(c))
+ c = array_ops.placeholder(dtypes_lib.qint32, shape=(32, None, 2), name="c")
+ self.assertEqual("<tf.Tensor 'c:0' shape=(32, ?, 2) dtype=qint32>", repr(c))
-class PlaceholderV2Test(tf.test.TestCase):
+class PlaceholderV2Test(test.TestCase):
def testDtype(self):
with self.test_session():
- p = array_ops.placeholder_v2(tf.float32, shape=None, name="p")
- p_identity = tf.identity(p)
+ p = array_ops.placeholder_v2(dtypes_lib.float32, shape=None, name="p")
+ p_identity = array_ops.identity(p)
feed_array = np.random.rand(10, 10)
self.assertAllClose(
- p_identity.eval(feed_dict={
- p: feed_array
- }), feed_array)
+ p_identity.eval(feed_dict={p: feed_array}), feed_array)
with self.assertRaisesOpError(
"must feed a value for placeholder tensor 'p' with dtype float"):
@@ -627,13 +651,11 @@ class PlaceholderV2Test(tf.test.TestCase):
def testShape(self):
with self.test_session():
- p = array_ops.placeholder_v2(tf.float32, shape=(10, 10), name="p")
- p_identity = tf.identity(p)
+ p = array_ops.placeholder_v2(dtypes_lib.float32, shape=(10, 10), name="p")
+ p_identity = array_ops.identity(p)
feed_array = np.random.rand(10, 10)
self.assertAllClose(
- p_identity.eval(feed_dict={
- p: feed_array
- }), feed_array)
+ p_identity.eval(feed_dict={p: feed_array}), feed_array)
with self.assertRaisesOpError(
"must feed a value for placeholder tensor 'p' with dtype float and "
@@ -646,35 +668,30 @@ class PlaceholderV2Test(tf.test.TestCase):
def testUnknownShape(self):
with self.test_session():
- p = array_ops.placeholder_v2(tf.float32, shape=None, name="p")
- p_identity = tf.identity(p)
+ p = array_ops.placeholder_v2(dtypes_lib.float32, shape=None, name="p")
+ p_identity = array_ops.identity(p)
# can feed anything
feed_array = np.random.rand(10, 3)
self.assertAllClose(
- p_identity.eval(feed_dict={
- p: feed_array
- }), feed_array)
+ p_identity.eval(feed_dict={p: feed_array}), feed_array)
feed_array = np.random.rand(4, 2, 5)
self.assertAllClose(
- p_identity.eval(feed_dict={
- p: feed_array
- }), feed_array)
+ p_identity.eval(feed_dict={p: feed_array}), feed_array)
def testScalarShape(self):
with self.test_session():
- p = array_ops.placeholder_v2(tf.float32, shape=[], name="p")
- p_identity = tf.identity(p)
+ p = array_ops.placeholder_v2(dtypes_lib.float32, shape=[], name="p")
+ p_identity = array_ops.identity(p)
self.assertAllClose(p_identity.eval(feed_dict={p: 5}), 5)
def testPartialShape(self):
with self.test_session():
- p = array_ops.placeholder_v2(tf.float32, shape=[None, 3], name="p")
- p_identity = tf.identity(p)
+ p = array_ops.placeholder_v2(
+ dtypes_lib.float32, shape=[None, 3], name="p")
+ p_identity = array_ops.identity(p)
feed_array = np.random.rand(10, 3)
self.assertAllClose(
- p_identity.eval(feed_dict={
- p: feed_array
- }), feed_array)
+ p_identity.eval(feed_dict={p: feed_array}), feed_array)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Cannot feed value of shape" in str(e)):
@@ -682,45 +699,46 @@ class PlaceholderV2Test(tf.test.TestCase):
def testControlDependency(self):
with self.test_session():
- p = array_ops.placeholder_v2(tf.int32, shape=[], name="p")
- with tf.control_dependencies([p]):
- c = tf.constant(5, tf.int32)
- d = tf.mul(p, c)
+ p = array_ops.placeholder_v2(dtypes_lib.int32, shape=[], name="p")
+ with ops.control_dependencies([p]):
+ c = constant_op.constant(5, dtypes_lib.int32)
+ d = math_ops.mul(p, c)
val = np.array(2).astype(np.int)
self.assertEqual(10, d.eval(feed_dict={p: val}))
def testBadShape(self):
with self.assertRaises(ValueError):
- array_ops.placeholder_v2(tf.float32, shape=(-1, 10))
+ array_ops.placeholder_v2(dtypes_lib.float32, shape=(-1, 10))
def testTensorStr(self):
- a = array_ops.placeholder_v2(tf.float32, shape=None, name="a")
+ a = array_ops.placeholder_v2(dtypes_lib.float32, shape=None, name="a")
self.assertEqual("<tf.Tensor 'a:0' shape=<unknown> dtype=float32>", repr(a))
- b = array_ops.placeholder_v2(tf.int32, shape=(32, 40), name="b")
+ b = array_ops.placeholder_v2(dtypes_lib.int32, shape=(32, 40), name="b")
self.assertEqual("<tf.Tensor 'b:0' shape=(32, 40) dtype=int32>", repr(b))
- c = array_ops.placeholder_v2(tf.qint32, shape=(32, None, 2), name="c")
+ c = array_ops.placeholder_v2(
+ dtypes_lib.qint32, shape=(32, None, 2), name="c")
self.assertEqual("<tf.Tensor 'c:0' shape=(32, ?, 2) dtype=qint32>", repr(c))
-class PlaceholderWithDefaultTest(tf.test.TestCase):
+class PlaceholderWithDefaultTest(test.TestCase):
def testFullShape(self):
with self.test_session():
- p = tf.placeholder_with_default([[2, 2], [2, 2]], shape=[2, 2])
- a = tf.identity(p)
+ p = array_ops.placeholder_with_default([[2, 2], [2, 2]], shape=[2, 2])
+ a = array_ops.identity(p)
self.assertAllEqual([[2, 2], [2, 2]], a.eval())
- self.assertAllEqual([[3, 3], [3, 3]],
- a.eval(feed_dict={p: [[3, 3], [3, 3]]}))
+ self.assertAllEqual(
+ [[3, 3], [3, 3]], a.eval(feed_dict={p: [[3, 3], [3, 3]]}))
with self.assertRaises(ValueError):
a.eval(feed_dict={p: [[6, 6, 6], [6, 6, 6]]})
def testPartialShape(self):
with self.test_session():
- p = tf.placeholder_with_default([1, 2, 3], shape=[None])
- a = tf.identity(p)
+ p = array_ops.placeholder_with_default([1, 2, 3], shape=[None])
+ a = array_ops.identity(p)
self.assertAllEqual([1, 2, 3], a.eval())
self.assertAllEqual([3, 37], a.eval(feed_dict={p: [3, 37]}))
@@ -729,13 +747,13 @@ class PlaceholderWithDefaultTest(tf.test.TestCase):
def testNoShape(self):
with self.test_session():
- p = tf.placeholder_with_default([17], shape=None)
- a = tf.identity(p)
+ p = array_ops.placeholder_with_default([17], shape=None)
+ a = array_ops.identity(p)
self.assertAllEqual([17], a.eval())
self.assertAllEqual([3, 37], a.eval(feed_dict={p: [3, 37]}))
- self.assertAllEqual([[3, 3], [3, 3]],
- a.eval(feed_dict={p: [[3, 3], [3, 3]]}))
+ self.assertAllEqual(
+ [[3, 3], [3, 3]], a.eval(feed_dict={p: [[3, 3], [3, 3]]}))
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/control_flow_ops_py_test.py b/tensorflow/python/kernel_tests/control_flow_ops_py_test.py
index 474261f78e..bc10cdeb5e 100644
--- a/tensorflow/python/kernel_tests/control_flow_ops_py_test.py
+++ b/tensorflow/python/kernel_tests/control_flow_ops_py_test.py
@@ -15,6 +15,7 @@
# pylint: disable=g-long-lambda
"""Tests for tensorflow.ops.control_flow_ops."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
@@ -24,20 +25,39 @@ import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
-import tensorflow as tf
+from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import device_lib
+from tensorflow.python.client import session
+from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import function
+from tensorflow.python.framework import ops
+from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
+from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
+from tensorflow.python.ops import data_flow_ops
+from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_logging_ops
from tensorflow.python.ops import gen_state_ops
+from tensorflow.python.ops import gradients_impl
+from tensorflow.python.ops import logging_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import script_ops
from tensorflow.python.ops import state_ops
+from tensorflow.python.ops import variable_scope
+from tensorflow.python.ops import variables
+import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
+from tensorflow.python.training import adam
+from tensorflow.python.training import gradient_descent
from tensorflow.python.util import nest
+
def check_op_order(graph):
"""Sanity check on the ordering of op id."""
@@ -63,68 +83,68 @@ def check_consumers(graph):
def opt_cfg():
- return tf.ConfigProto(
+ return config_pb2.ConfigProto(
allow_soft_placement=True,
- graph_options=tf.GraphOptions(
- optimizer_options=tf.OptimizerOptions(
- opt_level=tf.OptimizerOptions.L1,
+ graph_options=config_pb2.GraphOptions(
+ optimizer_options=config_pb2.OptimizerOptions(
+ opt_level=config_pb2.OptimizerOptions.L1,
do_function_inlining=True,
do_constant_folding=True)))
def isum(s):
- i = tf.constant(0, name="i")
- c = lambda i, s: tf.less(i, 10)
- b = lambda i, s: [tf.add(i, 1), tf.add(i, s)]
- _, r_s = tf.while_loop(c, b, [i, s])
+ i = constant_op.constant(0, name="i")
+ c = lambda i, s: math_ops.less(i, 10)
+ b = lambda i, s: [math_ops.add(i, 1), math_ops.add(i, s)]
+ _, r_s = control_flow_ops.while_loop(c, b, [i, s])
return r_s
-class ControlFlowTest(tf.test.TestCase):
+class ControlFlowTest(test.TestCase):
def testRefIdentity(self):
with self.test_session():
- v = tf.Variable(7)
+ v = variables.Variable(7)
v = control_flow_ops._Identity(v)
- op = tf.assign(v, 9)
+ op = state_ops.assign(v, 9)
v2 = control_flow_ops.with_dependencies([op], v)
self.assertTrue(check_op_order(v.graph))
- self.assertTrue(isinstance(v2, tf.Tensor))
- tf.global_variables_initializer().run()
+ self.assertTrue(isinstance(v2, ops.Tensor))
+ variables.global_variables_initializer().run()
self.assertEqual(9, v2.eval())
def testRefEnter(self):
with self.test_session():
- v = tf.Variable(7)
+ v = variables.Variable(7)
enter_v = control_flow_ops._Enter(v, "foo_1", is_constant=True)
- nine = tf.constant(9)
+ nine = constant_op.constant(9)
enter_nine = control_flow_ops.enter(nine, "foo_1")
- op = tf.assign(enter_v, enter_nine)
+ op = state_ops.assign(enter_v, enter_nine)
v2 = control_flow_ops.with_dependencies([op], enter_v)
v3 = control_flow_ops.exit(v2)
- tf.global_variables_initializer().run()
+ variables.global_variables_initializer().run()
self.assertEqual(9, v3.eval())
def testRefSwitch(self):
with self.test_session():
- v = tf.Variable(7)
+ v = variables.Variable(7)
- p = tf.constant(True)
+ p = constant_op.constant(True)
v1 = control_flow_ops._SwitchRefOrTensor(v._ref(), p) # pylint: disable=protected-access
- v2 = tf.assign(v1[1], 9)
- tf.global_variables_initializer().run()
+ v2 = state_ops.assign(v1[1], 9)
+ variables.global_variables_initializer().run()
self.assertEqual(9, v2.eval())
def testEnterMulExit(self):
with self.test_session():
- data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
+ data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
enter_data = control_flow_ops.enter(data, "foo_1", False)
- five = tf.constant(5)
+ five = constant_op.constant(5)
enter_five = control_flow_ops.enter(five, "foo_1", False)
- mul_op = tf.mul(enter_data, enter_five)
+ mul_op = math_ops.mul(enter_data, enter_five)
exit_op = control_flow_ops.exit(mul_op)
result = exit_op.eval()
@@ -132,10 +152,10 @@ class ControlFlowTest(tf.test.TestCase):
def testSwitchMergeIndexedSlices(self):
with self.test_session():
- values = tf.constant([1, 2, 3, 4, 5, 6])
- indices = tf.constant([0, 2, 4, 6, 8, 10])
- data = tf.IndexedSlices(values, indices)
- pred = tf.convert_to_tensor(True)
+ values = constant_op.constant([1, 2, 3, 4, 5, 6])
+ indices = constant_op.constant([0, 2, 4, 6, 8, 10])
+ data = ops.IndexedSlices(values, indices)
+ pred = ops.convert_to_tensor(True)
switch_op = control_flow_ops.switch(data, pred)
merge_op = control_flow_ops.merge(switch_op)[0]
@@ -146,22 +166,22 @@ class ControlFlowTest(tf.test.TestCase):
def testSwitchDeadBranch(self):
with self.test_session():
- data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
- ports = tf.convert_to_tensor(True, name="ports")
+ data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
+ ports = ops.convert_to_tensor(True, name="ports")
switch_op = control_flow_ops.switch(data, ports)
- dead_branch = tf.identity(switch_op[0])
+ dead_branch = array_ops.identity(switch_op[0])
with self.assertRaisesWithPredicateMatch(
- tf.errors.InvalidArgumentError,
+ errors_impl.InvalidArgumentError,
lambda e: "The tensor returned for" in str(e)):
dead_branch.eval()
def testSwitchMergeLess(self):
with self.test_session():
- data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
- zero = tf.convert_to_tensor(0)
- one = tf.convert_to_tensor(1)
- less_op = tf.less(zero, one)
+ data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
+ zero = ops.convert_to_tensor(0)
+ one = ops.convert_to_tensor(1)
+ less_op = math_ops.less(zero, one)
switch_op = control_flow_ops.switch(data, less_op)
merge_op = control_flow_ops.merge(switch_op)[0]
@@ -170,12 +190,12 @@ class ControlFlowTest(tf.test.TestCase):
def testSwitchMergeAddIdentity(self):
with self.test_session():
- data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
- ports = tf.convert_to_tensor(False, name="ports")
+ data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
+ ports = ops.convert_to_tensor(False, name="ports")
switch_op = control_flow_ops.switch(data, ports)
- one = tf.constant(1)
- add_op = tf.add(switch_op[0], one)
- id_op = tf.identity(switch_op[1])
+ one = constant_op.constant(1)
+ add_op = math_ops.add(switch_op[0], one)
+ id_op = array_ops.identity(switch_op[1])
merge_op = control_flow_ops.merge([add_op, id_op])[0]
result = merge_op.eval()
@@ -183,13 +203,13 @@ class ControlFlowTest(tf.test.TestCase):
def testSwitchMergeAddMul(self):
with self.test_session():
- data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
- ports = tf.convert_to_tensor(True, name="ports")
+ data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
+ ports = ops.convert_to_tensor(True, name="ports")
switch_op = control_flow_ops.switch(data, ports)
- one = tf.constant(1)
- add_op = tf.add(switch_op[0], one)
- five = tf.constant(5)
- mul_op = tf.mul(switch_op[1], five)
+ one = constant_op.constant(1)
+ add_op = math_ops.add(switch_op[0], one)
+ five = constant_op.constant(5)
+ mul_op = math_ops.mul(switch_op[1], five)
merge_op = control_flow_ops.merge([add_op, mul_op])[0]
result = merge_op.eval()
@@ -197,8 +217,8 @@ class ControlFlowTest(tf.test.TestCase):
def testLoop_false(self):
with self.test_session():
- false = tf.convert_to_tensor(False)
- n = tf.constant(10)
+ false = ops.convert_to_tensor(False)
+ n = constant_op.constant(10)
enter_false = control_flow_ops.enter(false, "foo_1", False)
enter_n = control_flow_ops.enter(n, "foo_1", False)
@@ -214,22 +234,22 @@ class ControlFlowTest(tf.test.TestCase):
def testLoop_1(self):
with self.test_session():
- zero = tf.constant(0)
- one = tf.constant(1)
- n = tf.constant(10)
+ zero = constant_op.constant(0)
+ one = constant_op.constant(1)
+ n = constant_op.constant(10)
enter_i = control_flow_ops.enter(zero, "foo", False)
enter_one = control_flow_ops.enter(one, "foo", True)
enter_n = control_flow_ops.enter(n, "foo", True)
- with tf.device("/gpu:0"):
+ with ops.device("/gpu:0"):
merge_i = control_flow_ops.merge([enter_i, enter_i])[0]
- less_op = tf.less(merge_i, enter_n)
+ less_op = math_ops.less(merge_i, enter_n)
cond_op = control_flow_ops.loop_cond(less_op)
switch_i = control_flow_ops.switch(merge_i, cond_op)
- add_i = tf.add(switch_i[1], enter_one)
+ add_i = math_ops.add(switch_i[1], enter_one)
next_i = control_flow_ops.next_iteration(add_i)
merge_i.op._update_input(1, next_i)
@@ -240,9 +260,9 @@ class ControlFlowTest(tf.test.TestCase):
def testLoop_2(self):
with self.test_session():
- zero = tf.constant(0)
- one = tf.constant(1)
- n = tf.constant(10)
+ zero = constant_op.constant(0)
+ one = constant_op.constant(1)
+ n = constant_op.constant(10)
enter_i = control_flow_ops.enter(zero, "foo", False)
enter_one = control_flow_ops.enter(one, "foo", True)
@@ -250,13 +270,13 @@ class ControlFlowTest(tf.test.TestCase):
merge_i = control_flow_ops.merge([enter_i, enter_i])[0]
- less_op = tf.less(merge_i, enter_n)
+ less_op = math_ops.less(merge_i, enter_n)
cond_op = control_flow_ops.loop_cond(less_op)
switch_i = control_flow_ops.switch(merge_i, cond_op)
- add_i = tf.add(switch_i[1], enter_one)
+ add_i = math_ops.add(switch_i[1], enter_one)
- with tf.device("/gpu:0"):
+ with ops.device("/gpu:0"):
next_i = control_flow_ops.next_iteration(add_i)
merge_i.op._update_input(1, next_i)
@@ -266,29 +286,29 @@ class ControlFlowTest(tf.test.TestCase):
def testDifferentFrame(self):
with self.test_session():
- data = tf.placeholder(tf.float32, shape=[])
+ data = array_ops.placeholder(dtypes.float32, shape=[])
enter_1 = control_flow_ops.enter(data, "foo_1", False)
enter_2 = control_flow_ops.enter(data, "foo_2", False)
- res = tf.add(enter_1, enter_2)
+ res = math_ops.add(enter_1, enter_2)
with self.assertRaisesOpError("has inputs from different frames"):
res.eval(feed_dict={data: 1.0})
def testCondBool(self):
- values = tf.constant(10)
- fn1 = lambda: tf.add(values, 1)
- fn2 = lambda: tf.sub(values, 1)
+ values = constant_op.constant(10)
+ fn1 = lambda: math_ops.add(values, 1)
+ fn2 = lambda: math_ops.sub(values, 1)
with self.assertRaisesRegexp(TypeError, "must not be a Python bool"):
- _ = tf.cond(False, fn1, fn2)
+ _ = control_flow_ops.cond(False, fn1, fn2)
def testCondIndexedSlices(self):
with self.test_session():
- values = tf.constant(10)
- indices = tf.constant(0)
- x = tf.IndexedSlices(values, indices)
- pred = tf.less(1, 2)
- fn1 = lambda: tf.IndexedSlices(tf.add(x.values, 1), indices)
- fn2 = lambda: tf.IndexedSlices(tf.sub(x.values, 1), indices)
- r = tf.cond(pred, fn1, fn2)
+ values = constant_op.constant(10)
+ indices = constant_op.constant(0)
+ x = ops.IndexedSlices(values, indices)
+ pred = math_ops.less(1, 2)
+ fn1 = lambda: ops.IndexedSlices(math_ops.add(x.values, 1), indices)
+ fn2 = lambda: ops.IndexedSlices(math_ops.sub(x.values, 1), indices)
+ r = control_flow_ops.cond(pred, fn1, fn2)
val = r.values.eval()
ind = r.indices.eval()
@@ -298,30 +318,31 @@ class ControlFlowTest(tf.test.TestCase):
def testCondSparseTensor(self):
with self.test_session():
- values = tf.constant([2.0, 4.0], name="values")
- indices = tf.constant([[0], [3]], dtype=tf.int64, name="indices")
- shape = tf.constant([10], dtype=tf.int64, name="dense_shape")
- x = tf.SparseTensor(indices, values, dense_shape=shape)
- pred = tf.less(1, 2)
- fn1 = lambda: tf.SparseTensor(
+ values = constant_op.constant([2.0, 4.0], name="values")
+ indices = constant_op.constant(
+ [[0], [3]], dtype=dtypes.int64, name="indices")
+ shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
+ x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
+ pred = math_ops.less(1, 2)
+ fn1 = lambda: sparse_tensor.SparseTensor(
indices + 1, x.values + 1, dense_shape=shape)
- fn2 = lambda: tf.SparseTensor(
+ fn2 = lambda: sparse_tensor.SparseTensor(
indices, x.values - 1, dense_shape=shape)
- r = tf.cond(pred, fn1, fn2)
+ r = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual([3.0, 5.0], r.values.eval())
self.assertAllEqual([[1], [4]], r.indices.eval())
self.assertAllEqual(r.values.get_shape(), (2,))
def testCondIndexedSlicesDifferentTypes(self):
with self.test_session():
- values = tf.constant(10)
- i_32 = tf.convert_to_tensor(0, name="one", dtype=tf.int32)
- i_64 = tf.convert_to_tensor(0, name="one", dtype=tf.int64)
- x = tf.IndexedSlices(values, i_32)
- pred = tf.less(1, 2)
- fn1 = lambda: tf.IndexedSlices(tf.add(x.values, 1), i_32)
- fn2 = lambda: tf.IndexedSlices(tf.sub(x.values, 1), i_64)
- r = tf.cond(pred, fn1, fn2)
+ values = constant_op.constant(10)
+ i_32 = ops.convert_to_tensor(0, name="one", dtype=dtypes.int32)
+ i_64 = ops.convert_to_tensor(0, name="one", dtype=dtypes.int64)
+ x = ops.IndexedSlices(values, i_32)
+ pred = math_ops.less(1, 2)
+ fn1 = lambda: ops.IndexedSlices(math_ops.add(x.values, 1), i_32)
+ fn2 = lambda: ops.IndexedSlices(math_ops.sub(x.values, 1), i_64)
+ r = control_flow_ops.cond(pred, fn1, fn2)
val = r.values.eval()
ind = r.indices.eval()
@@ -332,14 +353,14 @@ class ControlFlowTest(tf.test.TestCase):
def testCondColocation(self):
with self.test_session(use_gpu=True):
- with tf.device("/cpu:0"):
- v = tf.Variable(7.0)
+ with ops.device("/cpu:0"):
+ v = variables.Variable(7.0)
- x = tf.constant(10.0)
- pred = tf.less(1.0, 2.0)
- fn1 = lambda: tf.add(v, 1.0)
- fn2 = lambda: tf.sub(x, 1.0)
- r = tf.cond(pred, fn1, fn2)
+ x = constant_op.constant(10.0)
+ pred = math_ops.less(1.0, 2.0)
+ fn1 = lambda: math_ops.add(v, 1.0)
+ fn2 = lambda: math_ops.sub(x, 1.0)
+ r = control_flow_ops.cond(pred, fn1, fn2)
for op in x.graph.get_operations():
if op.name == "cond/Add/Switch":
@@ -347,11 +368,11 @@ class ControlFlowTest(tf.test.TestCase):
def _testCond_1(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
- x = tf.constant(10)
- pred = tf.less(1, 2)
- fn1 = lambda: tf.add(x, 1)
- fn2 = lambda: tf.sub(x, 1)
- r = tf.cond(pred, fn1, fn2)
+ x = constant_op.constant(10)
+ pred = math_ops.less(1, 2)
+ fn1 = lambda: math_ops.add(x, 1)
+ fn2 = lambda: math_ops.sub(x, 1)
+ r = control_flow_ops.cond(pred, fn1, fn2)
result = r.eval()
self.assertTrue(check_op_order(x.graph))
@@ -363,20 +384,22 @@ class ControlFlowTest(tf.test.TestCase):
def testCond_2(self):
with self.test_session():
- x = tf.constant(10)
- r = tf.cond(tf.less(1, 0), lambda: tf.add(x, 1), lambda: tf.sub(x, 1))
+ x = constant_op.constant(10)
+ r = control_flow_ops.cond(
+ math_ops.less(1, 0), lambda: math_ops.add(x, 1),
+ lambda: math_ops.sub(x, 1))
result = r.eval()
self.assertTrue(check_op_order(x.graph))
self.assertAllEqual(9, result)
def testCond_3(self):
with self.test_session():
- x = tf.constant(10)
- pred = tf.less(1, 2)
- fn1 = lambda: tf.add(x, 1)
- fn2 = lambda: tf.sub(x, 1)
- fn3 = lambda: tf.add(tf.cond(pred, fn1, fn2), 1)
- r = tf.cond(pred, fn3, fn2)
+ x = constant_op.constant(10)
+ pred = math_ops.less(1, 2)
+ fn1 = lambda: math_ops.add(x, 1)
+ fn2 = lambda: math_ops.sub(x, 1)
+ fn3 = lambda: math_ops.add(control_flow_ops.cond(pred, fn1, fn2), 1)
+ r = control_flow_ops.cond(pred, fn3, fn2)
result = r.eval()
self.assertTrue(check_op_order(x.graph))
@@ -384,18 +407,18 @@ class ControlFlowTest(tf.test.TestCase):
def testCond_4(self):
with self.test_session():
- v1 = tf.Variable(7)
- v2 = tf.Variable(7)
- v3 = tf.Variable(7)
+ v1 = variables.Variable(7)
+ v2 = variables.Variable(7)
+ v3 = variables.Variable(7)
- age = tf.constant(3)
- max_age = tf.constant(2)
- pred = tf.greater(age, max_age)
- fn1 = lambda: [tf.assign(v1, 1).op, tf.assign(v2, 2).op]
- fn2 = lambda: [tf.assign(v3, 3).op, tf.constant(10).op]
- r = tf.cond(pred, fn1, fn2)
+ age = constant_op.constant(3)
+ max_age = constant_op.constant(2)
+ pred = math_ops.greater(age, max_age)
+ fn1 = lambda: [state_ops.assign(v1, 1).op, state_ops.assign(v2, 2).op]
+ fn2 = lambda: [state_ops.assign(v3, 3).op, constant_op.constant(10).op]
+ r = control_flow_ops.cond(pred, fn1, fn2)
- tf.global_variables_initializer().run()
+ variables.global_variables_initializer().run()
self.assertEqual(len(r), 2)
result = r[1].eval()
self.assertTrue(check_op_order(age.graph))
@@ -406,12 +429,12 @@ class ControlFlowTest(tf.test.TestCase):
def testCond_5(self):
with self.test_session():
- alive = tf.constant(True, name="alive")
- count = tf.constant(0, name="count")
+ alive = constant_op.constant(True, name="alive")
+ count = constant_op.constant(0, name="count")
def body(i):
- return tf.cond(
- alive, lambda: [tf.less(i, 3), tf.add(count, 1)],
+ return control_flow_ops.cond(
+ alive, lambda: [math_ops.less(i, 3), math_ops.add(count, 1)],
lambda: [alive, count])
for i in range(10):
@@ -420,41 +443,49 @@ class ControlFlowTest(tf.test.TestCase):
def testCond_6(self):
with self.test_session():
- v1 = tf.Variable([7])
+ v1 = variables.Variable([7])
- age = tf.constant(3)
- pred = tf.greater(age, 4)
+ age = constant_op.constant(3)
+ pred = math_ops.greater(age, 4)
fn1 = lambda: age
fn2 = lambda: v1
- r = tf.cond(pred, fn1, fn2)
+ r = control_flow_ops.cond(pred, fn1, fn2)
- tf.global_variables_initializer().run()
+ variables.global_variables_initializer().run()
result = r.eval()
self.assertAllEqual(np.array([7]), result)
def testCond_7(self):
with self.test_session() as sess:
- x = tf.constant(10)
- y = tf.constant(200)
- pred = tf.less(1, 2)
- fn1 = lambda: [tf.add(x, 1), tf.add(x, 2)]
+ x = constant_op.constant(10)
+ y = constant_op.constant(200)
+ pred = math_ops.less(1, 2)
+ fn1 = lambda: [math_ops.add(x, 1), math_ops.add(x, 2)]
fn2 = lambda: [y, y]
- r = tf.cond(pred, fn1, fn2)
+ r = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual([11, 12], sess.run(r))
def testCondRef(self):
with self.test_session():
- x = gen_state_ops._variable(shape=[1], dtype=tf.float32,
- name="x", container="", shared_name="")
+ x = gen_state_ops._variable(
+ shape=[1],
+ dtype=dtypes.float32,
+ name="x",
+ container="",
+ shared_name="")
true_fn = lambda: x
- false_fn = lambda: tf.constant([2.0])
- r = tf.cond(tf.constant(False), true_fn, false_fn)
+ false_fn = lambda: constant_op.constant([2.0])
+ r = control_flow_ops.cond(constant_op.constant(False), true_fn, false_fn)
self.assertAllEqual([2.0], r.eval())
def testUninitializedRefIdentity(self):
with self.test_session() as sess:
- v = gen_state_ops._variable(shape=[1], dtype=tf.float32,
- name="v", container="", shared_name="")
+ v = gen_state_ops._variable(
+ shape=[1],
+ dtype=dtypes.float32,
+ name="v",
+ container="",
+ shared_name="")
inited = state_ops.is_variable_initialized(v)
v_f, v_t = control_flow_ops.ref_switch(v, inited)
# Both v_f and v_t are uninitialized references. However, an actual use
@@ -464,124 +495,132 @@ class ControlFlowTest(tf.test.TestCase):
# so that this construction is allowed.
v_f_op = gen_array_ops._ref_identity(v_f)
v_t_op = gen_array_ops._ref_identity(v_t)
- with tf.control_dependencies([v_f_op]):
- assign_v = tf.assign(v, [1.0])
- with tf.control_dependencies([v_t_op]):
- orig_v = tf.identity(v)
+ with ops.control_dependencies([v_f_op]):
+ assign_v = state_ops.assign(v, [1.0])
+ with ops.control_dependencies([v_t_op]):
+ orig_v = array_ops.identity(v)
merged_op = control_flow_ops.merge([assign_v, orig_v])
self.assertAllEqual([1.0], sess.run(merged_op.output))
def testCondSwitchIdentity(self):
# Make sure the recv identity is not removed by optimization.
- with tf.Session(config=opt_cfg()) as sess:
- pred = tf.constant(True)
+ with session.Session(config=opt_cfg()) as sess:
+ pred = constant_op.constant(True)
+
def fn1():
- return tf.no_op()
+ return control_flow_ops.no_op()
+
def fn2():
- return tf.Assert(False, ["Wrong branch!!!"])
- r = tf.cond(pred, fn1, fn2)
+ return control_flow_ops.Assert(False, ["Wrong branch!!!"])
+
+ r = control_flow_ops.cond(pred, fn1, fn2)
sess.run(r)
def testCondRecvIdentity(self):
# Make sure the switch identity is not removed by optimization.
- with tf.Session(config=opt_cfg()) as sess:
- with tf.device("/gpu:0"):
- pred = tf.constant(True)
+ with session.Session(config=opt_cfg()) as sess:
+ with ops.device("/gpu:0"):
+ pred = constant_op.constant(True)
+
def fn1():
- return tf.no_op()
+ return control_flow_ops.no_op()
+
def fn2():
- with tf.device("/cpu:0"):
- return tf.Assert(False, ["Wrong branch!!!"])
- r = tf.cond(pred, fn1, fn2)
+ with ops.device("/cpu:0"):
+ return control_flow_ops.Assert(False, ["Wrong branch!!!"])
+
+ r = control_flow_ops.cond(pred, fn1, fn2)
sess.run(r)
def testCondGrad_1(self):
with self.test_session():
- x = tf.constant(10.0, name="x")
- pred = tf.less(1, 2)
- fn1 = lambda: tf.identity(x)
- fn2 = lambda: tf.identity(x)
- r = tf.cond(pred, fn1, fn2)
+ x = constant_op.constant(10.0, name="x")
+ pred = math_ops.less(1, 2)
+ fn1 = lambda: array_ops.identity(x)
+ fn2 = lambda: array_ops.identity(x)
+ r = control_flow_ops.cond(pred, fn1, fn2)
- grad = tf.gradients(r, [x])[0]
+ grad = gradients_impl.gradients(r, [x])[0]
result = grad.eval()
self.assertAllEqual(1.0, result)
def testCondGrad_2(self):
with self.test_session():
- c = tf.placeholder(tf.int32, shape=[])
- x = tf.constant(10.0)
- pred = tf.less(c, 2)
- fn1 = lambda: tf.mul(x, 42.0)
- fn2 = lambda: tf.mul(x, 3.0)
- r = tf.cond(pred, fn1, fn2)
+ c = array_ops.placeholder(dtypes.int32, shape=[])
+ x = constant_op.constant(10.0)
+ pred = math_ops.less(c, 2)
+ fn1 = lambda: math_ops.mul(x, 42.0)
+ fn2 = lambda: math_ops.mul(x, 3.0)
+ r = control_flow_ops.cond(pred, fn1, fn2)
- grad = tf.gradients(r, [x])[0]
+ grad = gradients_impl.gradients(r, [x])[0]
self.assertAllEqual(42.0, grad.eval(feed_dict={c: 1}))
self.assertAllEqual(3.0, grad.eval(feed_dict={c: 3}))
def testNestedCond_Simple(self):
with self.test_session():
- x = tf.constant(0., name="X")
- y = tf.cond(tf.constant(True),
- lambda: x,
- lambda: tf.cond(x < 1., lambda: x, lambda: x))
- result = tf.gradients(y, x)[0]
+ x = constant_op.constant(0., name="X")
+ y = control_flow_ops.cond(
+ constant_op.constant(True), lambda: x,
+ lambda: control_flow_ops.cond(x < 1., lambda: x, lambda: x))
+ result = gradients_impl.gradients(y, x)[0]
self.assertEqual(1.0, result.eval())
- z = tf.cond(tf.constant(False),
- lambda: x,
- lambda: tf.cond(x < 1., lambda: x, lambda: x))
- result = tf.gradients(z, x)[0]
+ z = control_flow_ops.cond(
+ constant_op.constant(False), lambda: x,
+ lambda: control_flow_ops.cond(x < 1., lambda: x, lambda: x))
+ result = gradients_impl.gradients(z, x)[0]
self.assertEqual(1.0, result.eval())
def testCondGrad_Gather(self):
with self.test_session() as sess:
- v1 = tf.Variable([1.0, 42.0])
- c = tf.placeholder(tf.int32, shape=[])
- pred = tf.less(c, 2)
- fn1 = lambda: tf.identity(v1)
- fn2 = lambda: tf.gather(v1, [1, 1])
- r = tf.cond(pred, fn1, fn2)
- grad = tf.gradients(r, [v1])[0]
- tf.global_variables_initializer().run()
+ v1 = variables.Variable([1.0, 42.0])
+ c = array_ops.placeholder(dtypes.int32, shape=[])
+ pred = math_ops.less(c, 2)
+ fn1 = lambda: array_ops.identity(v1)
+ fn2 = lambda: array_ops.gather(v1, [1, 1])
+ r = control_flow_ops.cond(pred, fn1, fn2)
+ grad = gradients_impl.gradients(r, [v1])[0]
+ variables.global_variables_initializer().run()
# Should just be [1, 1], but possibly a sparse representation
gv, gi = sess.run([grad.values, grad.indices], feed_dict={c: 1})
- dense_gv = [sum([y for (x, y) in zip(gi, gv) if x == i]) for i in range(2)
- ]
+ dense_gv = [
+ sum([y for (x, y) in zip(gi, gv) if x == i]) for i in range(2)
+ ]
self.assertAllEqual(dense_gv, [1.0, 1.0])
# Should be [0, 2], as the else forwards v1[1] twice
gv, gi = sess.run([grad.values, grad.indices], feed_dict={c: 3})
- dense_gv = [sum([y for (x, y) in zip(gi, gv) if x == i]) for i in range(2)
- ]
+ dense_gv = [
+ sum([y for (x, y) in zip(gi, gv) if x == i]) for i in range(2)
+ ]
self.assertAllEqual(dense_gv, [0.0, 2.0])
# Microbenchmark: 256,000 iterations/s.
def testWhile_1(self):
with self.test_session():
- n = tf.constant(0)
- c = lambda x: tf.less(x, 10000)
- b = lambda x: tf.add(x, 1)
- r = tf.while_loop(c, b, [n], parallel_iterations=20)
+ n = constant_op.constant(0)
+ c = lambda x: math_ops.less(x, 10000)
+ b = lambda x: math_ops.add(x, 1)
+ r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual(10000, r.eval())
def testWhileWithRefs_1(self):
with self.test_session() as sess:
- x = tf.Variable(0)._ref() # pylint: disable=protected-access
- i = tf.constant(0)
- c = lambda i, x: tf.less(i, 100)
+ x = variables.Variable(0)._ref() # pylint: disable=protected-access
+ i = constant_op.constant(0)
+ c = lambda i, x: math_ops.less(i, 100)
self.assertEqual(x.dtype, dtypes.int32_ref)
def b(i, x):
self.assertEqual(x.dtype, dtypes.int32_ref)
- return (i+1, gen_array_ops._ref_identity(x))
+ return (i + 1, gen_array_ops._ref_identity(x))
- r = tf.while_loop(c, b, [i, x], parallel_iterations=5)
+ r = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=5)
- tf.global_variables_initializer().run()
+ variables.global_variables_initializer().run()
- self.assertEqual(r[0].dtype, tf.int32)
+ self.assertEqual(r[0].dtype, dtypes.int32)
self.assertEqual(r[1].dtype, dtypes.int32_ref)
value_i, value_x = sess.run(r)
@@ -591,7 +630,7 @@ class ControlFlowTest(tf.test.TestCase):
def testWhile_2(self):
with self.test_session():
- s = tf.constant(0)
+ s = constant_op.constant(0)
r = isum(s)
self.assertAllEqual(45, r.eval())
@@ -601,19 +640,19 @@ class ControlFlowTest(tf.test.TestCase):
with self.test_session():
def compute(i, m, c, o):
- m, c = [tf.add(m, 1), tf.add(c, 1)]
- o = tf.add(o, m)
- o = tf.add(o, c)
- i = tf.add(i, 1)
+ m, c = [math_ops.add(m, 1), math_ops.add(c, 1)]
+ o = math_ops.add(o, m)
+ o = math_ops.add(o, c)
+ i = math_ops.add(i, 1)
return [i, m, c, o]
- i = tf.convert_to_tensor(0)
- m = tf.convert_to_tensor(0)
- c = tf.convert_to_tensor(0)
- o = tf.convert_to_tensor(0)
- d = tf.convert_to_tensor(100)
- r = tf.while_loop(
- lambda i, m, c, o: tf.less(i, d), compute, [i, m, c, o])
+ i = ops.convert_to_tensor(0)
+ m = ops.convert_to_tensor(0)
+ c = ops.convert_to_tensor(0)
+ o = ops.convert_to_tensor(0)
+ d = ops.convert_to_tensor(100)
+ r = control_flow_ops.while_loop(lambda i, m, c, o: math_ops.less(i, d),
+ compute, [i, m, c, o])
result = r[3].eval()
self.assertTrue(check_op_order(i.graph))
self.assertAllEqual(10100, result)
@@ -622,20 +661,20 @@ class ControlFlowTest(tf.test.TestCase):
with self.test_session():
def compute(i, m, c, o):
- m, c = [tf.gather(x, i), tf.gather(x, i)]
- o = tf.add(o, m)
- o = tf.add(o, c)
- i = tf.add(i, 1)
+ m, c = [array_ops.gather(x, i), array_ops.gather(x, i)]
+ o = math_ops.add(o, m)
+ o = math_ops.add(o, c)
+ i = math_ops.add(i, 1)
return [i, m, c, o]
- i = tf.convert_to_tensor(0)
- m = tf.convert_to_tensor(0)
- c = tf.convert_to_tensor(0)
- o = tf.convert_to_tensor(0)
- x = tf.convert_to_tensor([1, 2, 3, 4, 5, 6])
- s = tf.size(x)
- r = tf.while_loop(
- lambda i, m, c, o: tf.less(i, s), compute, [i, m, c, o])
+ i = ops.convert_to_tensor(0)
+ m = ops.convert_to_tensor(0)
+ c = ops.convert_to_tensor(0)
+ o = ops.convert_to_tensor(0)
+ x = ops.convert_to_tensor([1, 2, 3, 4, 5, 6])
+ s = array_ops.size(x)
+ r = control_flow_ops.while_loop(lambda i, m, c, o: math_ops.less(i, s),
+ compute, [i, m, c, o])
result = r[3].eval()
self.assertTrue(check_op_order(i.graph))
self.assertAllEqual(42, result)
@@ -644,31 +683,34 @@ class ControlFlowTest(tf.test.TestCase):
with self.test_session():
def compute(i, c, o):
- c = tf.strided_slice(x, tf.expand_dims(i, 0),
- [1] + tf.expand_dims(i, 0))
- o = tf.concat_v2([o, c], 0)
- i = tf.add(i, 1)
+ c = array_ops.strided_slice(x,
+ array_ops.expand_dims(i, 0),
+ [1] + array_ops.expand_dims(i, 0))
+ o = array_ops.concat_v2([o, c], 0)
+ i = math_ops.add(i, 1)
return [i, c, o]
- i = tf.convert_to_tensor(0)
- c = tf.convert_to_tensor([0])
- o = tf.convert_to_tensor([0])
- x = tf.convert_to_tensor([1, 2, 3, 4, 5, 6])
- s = tf.size(x)
- r = tf.while_loop(lambda i, c, o: tf.less(i, s), compute, [i, c, o], [
- i.get_shape(), tensor_shape.unknown_shape(),
- tensor_shape.unknown_shape()
- ])
+ i = ops.convert_to_tensor(0)
+ c = ops.convert_to_tensor([0])
+ o = ops.convert_to_tensor([0])
+ x = ops.convert_to_tensor([1, 2, 3, 4, 5, 6])
+ s = array_ops.size(x)
+ r = control_flow_ops.while_loop(lambda i, c, o: math_ops.less(i, s),
+ compute, [i, c, o], [
+ i.get_shape(),
+ tensor_shape.unknown_shape(),
+ tensor_shape.unknown_shape()
+ ])
result = r[2].eval()
self.assertTrue(check_op_order(i.graph))
self.assertAllEqual(np.array([0, 1, 2, 3, 4, 5, 6]), result)
def _testWhile_Gpu_1(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
- n = tf.constant(1.0)
- c = lambda x: tf.less(x, 10.0)
- b = lambda x: tf.add(x, 1.0)
- r = tf.while_loop(c, b, [n])
+ n = constant_op.constant(1.0)
+ c = lambda x: math_ops.less(x, 10.0)
+ b = lambda x: math_ops.add(x, 1.0)
+ r = control_flow_ops.while_loop(c, b, [n])
self.assertAllClose(10.0, r.eval())
def testWhile_Gpu_1(self):
@@ -677,12 +719,14 @@ class ControlFlowTest(tf.test.TestCase):
def _testWhile_Gpu_2(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
- n = tf.constant(1.0)
- c = lambda x: tf.less(x, 10.0)
+ n = constant_op.constant(1.0)
+ c = lambda x: math_ops.less(x, 10.0)
+
def b(x):
- with tf.device("/cpu:0"):
- return tf.add(x, 1.0)
- r = tf.while_loop(c, b, [n])
+ with ops.device("/cpu:0"):
+ return math_ops.add(x, 1.0)
+
+ r = control_flow_ops.while_loop(c, b, [n])
self.assertAllClose(10.0, r.eval())
def testWhile_Gpu_2(self):
@@ -691,16 +735,18 @@ class ControlFlowTest(tf.test.TestCase):
def testWhileShape(self):
with self.test_session():
- i = tf.constant(0)
- m = tf.ones([2, 2])
- c = lambda i, j: tf.less(i, 2)
+ i = constant_op.constant(0)
+ m = array_ops.ones([2, 2])
+ c = lambda i, j: math_ops.less(i, 2)
+
def _b(i, j):
- new_i = tf.add(i, 1)
- new_j = tf.tile(j, [2, 2])
+ new_i = math_ops.add(i, 1)
+ new_j = array_ops.tile(j, [2, 2])
return [new_i, new_j]
- r = tf.while_loop(c, _b, [i, m],
- [i.get_shape(), tensor_shape.unknown_shape()])
- r = r[1] * tf.ones([8, 8])
+
+ r = control_flow_ops.while_loop(
+ c, _b, [i, m], [i.get_shape(), tensor_shape.unknown_shape()])
+ r = r[1] * array_ops.ones([8, 8])
self.assertAllEqual(np.ones((8, 8)), r.eval())
def testWhileWithNonTensorInput_Scalar(self):
@@ -708,99 +754,114 @@ class ControlFlowTest(tf.test.TestCase):
n = 0
c = lambda x: x < 10000
b = lambda x: x + 1
- r = tf.while_loop(c, b, [n], parallel_iterations=20)
+ r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual(10000, r.eval())
def testWhileWithNonTensorInput_Vector(self):
with self.test_session():
n = np.array([0]) # Note, [0] would not work here; that is a list
c = lambda x: x[0] < 10000
- b = lambda x: tf.stack([x[0] + 1])
- r = tf.while_loop(c, b, [n], parallel_iterations=20)
+ b = lambda x: array_ops.stack([x[0] + 1])
+ r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual([10000], r.eval())
def testWhileShapeInference(self):
with self.test_session():
- i = tf.constant(0)
- m = tf.ones([2, 2])
- c = lambda i, j: tf.less(i, 2)
+ i = constant_op.constant(0)
+ m = array_ops.ones([2, 2])
+ c = lambda i, j: math_ops.less(i, 2)
+
def b(i, j):
- new_i = tf.add(i, 1)
- new_j = tf.concat_v2([j, j], 0)
+ new_i = math_ops.add(i, 1)
+ new_j = array_ops.concat_v2([j, j], 0)
return [new_i, new_j]
- r = tf.while_loop(c, b, [i, m],
- [i.get_shape(), tensor_shape.TensorShape([None, 2])])
+
+ r = control_flow_ops.while_loop(
+ c, b, [i, m], [i.get_shape(), tensor_shape.TensorShape([None, 2])])
self.assertTrue(r[1].get_shape()[0].value is None)
- self.assertEqual(r[1].get_shape()[1], tf.Dimension(2))
+ self.assertEqual(r[1].get_shape()[1], tensor_shape.Dimension(2))
with self.assertRaisesRegexp(ValueError, "not an invariant for"):
- r = tf.while_loop(c, b, [i, m])
+ r = control_flow_ops.while_loop(c, b, [i, m])
def testWhileShapeInferenceSparseTensor(self):
with self.test_session():
- values = tf.constant([2.0, 4.0], name="values")
- indices = tf.constant([[0], [3]], dtype=tf.int64, name="indices")
- shape = tf.constant([10], dtype=tf.int64, name="dense_shape")
- i = tf.constant(0)
- x = tf.SparseTensor(indices, values, dense_shape=shape)
+ values = constant_op.constant([2.0, 4.0], name="values")
+ indices = constant_op.constant(
+ [[0], [3]], dtype=dtypes.int64, name="indices")
+ shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
+ i = constant_op.constant(0)
+ x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
+
def c(i, _):
return i < 10
+
def b(i, x):
- return [i + 1, tf.SparseTensor(x.indices, x.values * 2.0,
- x.dense_shape)]
- _, r = tf.while_loop(c, b, [i, x])
+ return [
+ i + 1, sparse_tensor.SparseTensor(x.indices, x.values * 2.0,
+ x.dense_shape)
+ ]
+
+ _, r = control_flow_ops.while_loop(c, b, [i, x])
self.assertEqual(r.dense_shape.get_shape()[0].value, 1)
- _, r = tf.while_loop(c, b, [i, x],
- [i.get_shape(), tensor_shape.TensorShape([None])])
+ _, r = control_flow_ops.while_loop(
+ c, b, [i, x], [i.get_shape(), tensor_shape.TensorShape([None])])
self.assertTrue(r.dense_shape.get_shape()[0].value is None)
with self.assertRaisesRegexp(ValueError, "is not compatible with"):
- _, r = tf.while_loop(c, b, [i, x],
- [i.get_shape(), tensor_shape.TensorShape([5])])
+ _, r = control_flow_ops.while_loop(
+ c, b, [i, x], [i.get_shape(), tensor_shape.TensorShape([5])])
def testWhileShapeInferenceIndexedSlices(self):
with self.test_session():
- values = tf.constant([[2.0, 4.0], [3.0, 5.0]], name="values")
- indices = tf.constant([0, 3], name="indices")
- shape = tf.constant([10, 2], name="dense_shape")
- i = tf.constant(0)
- x = tf.IndexedSlices(values, indices, dense_shape=shape)
+ values = constant_op.constant([[2.0, 4.0], [3.0, 5.0]], name="values")
+ indices = constant_op.constant([0, 3], name="indices")
+ shape = constant_op.constant([10, 2], name="dense_shape")
+ i = constant_op.constant(0)
+ x = ops.IndexedSlices(values, indices, dense_shape=shape)
+
def c(i, _):
return i < 10
+
def b(i, x):
- return [i + 1, tf.IndexedSlices(x.values * 2.0, x.indices,
- x.dense_shape)]
- _, r = tf.while_loop(c, b, [i, x])
+ return [
+ i + 1, ops.IndexedSlices(x.values * 2.0, x.indices, x.dense_shape)
+ ]
+
+ _, r = control_flow_ops.while_loop(c, b, [i, x])
self.assertEqual(r.dense_shape.get_shape()[0].value, 2)
self.assertEqual(r.values.get_shape(), tensor_shape.TensorShape([2, 2]))
- _, r = tf.while_loop(c, b, [i, x],
- [i.get_shape(), tensor_shape.TensorShape([None, 2])])
+ _, r = control_flow_ops.while_loop(
+ c, b, [i, x], [i.get_shape(), tensor_shape.TensorShape([None, 2])])
self.assertEqual(r.dense_shape.get_shape()[0].value, 2)
self.assertTrue(r.values.get_shape()[0].value is None)
self.assertEqual(r.values.get_shape()[1].value, 2)
with self.assertRaisesRegexp(ValueError, "is not compatible with"):
- _, r = tf.while_loop(c, b, [i, x],
- [i.get_shape(),
- tensor_shape.TensorShape([None, 5])])
+ _, r = control_flow_ops.while_loop(
+ c, b, [i, x], [i.get_shape(), tensor_shape.TensorShape([None, 5])])
def _testNestedWhile_1(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
- n = tf.constant(0)
+ n = constant_op.constant(0)
+
def cpu_sum(s):
- c = lambda i, s: tf.less(i, 10)
+ c = lambda i, s: math_ops.less(i, 10)
+
def b(i, s):
- i1 = tf.add(i, 1)
- with tf.device("/cpu:0"):
- s1 = tf.add(i, s)
+ i1 = math_ops.add(i, 1)
+ with ops.device("/cpu:0"):
+ s1 = math_ops.add(i, s)
return i1, s1
- _, r_s = tf.while_loop(c, b, [n, s])
+
+ _, r_s = control_flow_ops.while_loop(c, b, [n, s])
return r_s
- c = lambda x: tf.less(x, 200)
- b = lambda x: tf.add(x, cpu_sum(n))
- r = tf.while_loop(c, b, [n])
+
+ c = lambda x: math_ops.less(x, 200)
+ b = lambda x: math_ops.add(x, cpu_sum(n))
+ r = control_flow_ops.while_loop(c, b, [n])
self.assertEqual(225, r.eval())
def testNestedWhile_1(self):
@@ -810,23 +871,29 @@ class ControlFlowTest(tf.test.TestCase):
def _testNestedWhile_2(self, use_gpu):
# Test the cases that A -> Enter and Exit -> A are partitioned.
with self.test_session(use_gpu=use_gpu):
- s0 = tf.constant(2.0)
+ s0 = constant_op.constant(2.0)
+
def inner_loop(s):
- c = lambda s: tf.less(s, 20.0)
+ c = lambda s: math_ops.less(s, 20.0)
+
def b(s):
- s1 = tf.add(s, s)
+ s1 = math_ops.add(s, s)
return s1
- r_s = tf.while_loop(c, b, [s], parallel_iterations=1)
+
+ r_s = control_flow_ops.while_loop(c, b, [s], parallel_iterations=1)
return r_s
- outer_c = lambda x: tf.less(x, 3000.0)
+ outer_c = lambda x: math_ops.less(x, 3000.0)
+
def outer_b(x):
- x = tf.Print(x, [x]) # Edge "Print -> Enter" is partitioned
+ x = logging_ops.Print(x, [x]) # Edge "Print -> Enter" is partitioned
x = inner_loop(x)
- with tf.device("/cpu:0"):
- x = tf.square(x) # Edge "Exit -> Square" is partitioned
+ with ops.device("/cpu:0"):
+ x = math_ops.square(x) # Edge "Exit -> Square" is partitioned
return x
- r = tf.while_loop(outer_c, outer_b, [s0], parallel_iterations=1)
+
+ r = control_flow_ops.while_loop(
+ outer_c, outer_b, [s0], parallel_iterations=1)
self.assertEqual(1048576.0, r.eval())
def testNestedWhile_2(self):
@@ -835,127 +902,140 @@ class ControlFlowTest(tf.test.TestCase):
def testWhileWithControl_1(self):
with self.test_session():
- n = tf.constant(0)
- r = tf.constant(0)
- condition = lambda n_, r_: tf.less(n_, 10)
+ n = constant_op.constant(0)
+ r = constant_op.constant(0)
+ condition = lambda n_, r_: math_ops.less(n_, 10)
def body(n_, r_):
- n_ = tf.add(n_, 1)
+ n_ = math_ops.add(n_, 1)
with r_.graph.control_dependencies([r_]):
- r_ = tf.constant(12)
+ r_ = constant_op.constant(12)
return [n_, r_]
- res = tf.while_loop(condition, body, [n, r], parallel_iterations=1)
+ res = control_flow_ops.while_loop(
+ condition, body, [n, r], parallel_iterations=1)
self.assertAllEqual(12, res[1].eval())
def testWhileWithControl_2(self):
with self.test_session():
- r = tf.constant(0)
- condition = lambda r_: tf.less(r_, 10)
+ r = constant_op.constant(0)
+ condition = lambda r_: math_ops.less(r_, 10)
def body(r_):
with r_.graph.control_dependencies([r_]):
- r_ = tf.constant(12)
+ r_ = constant_op.constant(12)
return [r_]
- res = tf.while_loop(condition, body, [r], parallel_iterations=1)
+ res = control_flow_ops.while_loop(
+ condition, body, [r], parallel_iterations=1)
self.assertAllEqual(12, res.eval())
def testWhileWithControl_3(self):
with self.test_session() as sess:
- b = tf.placeholder(tf.bool)
- c = tf.constant(1)
- x0 = tf.constant(0)
- with tf.control_dependencies([b]):
- r = tf.while_loop(lambda x: x < 10, lambda x: x + c, [x0])
+ b = array_ops.placeholder(dtypes.bool)
+ c = constant_op.constant(1)
+ x0 = constant_op.constant(0)
+ with ops.control_dependencies([b]):
+ r = control_flow_ops.while_loop(lambda x: x < 10, lambda x: x + c, [x0])
self.assertEqual(10, sess.run(r, {b: True}))
def testWhileWithControl_4(self):
with self.test_session() as sess:
- b = tf.placeholder(tf.bool)
- c = tf.constant(1)
- x0 = tf.constant(0)
- with tf.control_dependencies([b]):
- r = tf.while_loop(lambda x: x < 10, lambda x: x + tf.identity(c), [x0])
+ b = array_ops.placeholder(dtypes.bool)
+ c = constant_op.constant(1)
+ x0 = constant_op.constant(0)
+ with ops.control_dependencies([b]):
+ r = control_flow_ops.while_loop(
+ lambda x: x < 10, lambda x: x + array_ops.identity(c), [x0])
self.assertEqual(10, sess.run(r, {b: True}))
def testWhileWithControl_5(self):
with self.test_session() as sess:
- b = tf.placeholder(tf.bool)
- c = tf.constant(1)
- x0 = tf.constant(0)
+ b = array_ops.placeholder(dtypes.bool)
+ c = constant_op.constant(1)
+ x0 = constant_op.constant(0)
def body(x):
- with tf.control_dependencies([b]):
+ with ops.control_dependencies([b]):
return x + c
- r = tf.while_loop(lambda x: x < 10, body, [x0])
+
+ r = control_flow_ops.while_loop(lambda x: x < 10, body, [x0])
self.assertEqual(10, sess.run(r, {b: True}))
def testWhileCondWithControl(self):
# Ensure that no control edges by an outer control dependency context are
# added to nodes inside cond/while contexts.
with self.test_session() as sess:
- const_true = lambda: tf.constant(True)
- const_false = lambda: tf.constant(False)
- cond = lambda i: tf.cond(i > 0, const_true, const_false)
- body = lambda i: tf.cond(i > 0, lambda: i - 1, lambda: i)
-
- with tf.control_dependencies([tf.no_op()]):
- loop = tf.while_loop(cond, body, (tf.constant(5),))
+ const_true = lambda: constant_op.constant(True)
+ const_false = lambda: constant_op.constant(False)
+ cond = lambda i: control_flow_ops.cond(i > 0, const_true, const_false)
+ body = lambda i: control_flow_ops.cond(i > 0, lambda: i - 1, lambda: i)
+
+ with ops.control_dependencies([control_flow_ops.no_op()]):
+ loop = control_flow_ops.while_loop(cond, body,
+ (constant_op.constant(5),))
self.assertEqual(0, sess.run(loop))
def testWhileCondExitControl(self):
with self.test_session():
- v = tf.Variable(1)
+ v = variables.Variable(1)
+
def false_branch():
cond = lambda i: i < 100
+
def body(i):
- x = tf.assign(v, i)
+ x = state_ops.assign(v, i)
return x + 1
- loop = tf.while_loop(cond, body, [0])
+
+ loop = control_flow_ops.while_loop(cond, body, [0])
# Make sure to handle correctly control edge from Exit to a node.
- with tf.control_dependencies([loop]):
- return tf.constant(6.0)
- r = tf.cond(tf.constant(False),
- lambda: tf.constant(1.0),
- false_branch)
- tf.global_variables_initializer().run()
+ with ops.control_dependencies([loop]):
+ return constant_op.constant(6.0)
+
+ r = control_flow_ops.cond(
+ constant_op.constant(False), lambda: constant_op.constant(1.0),
+ false_branch)
+ variables.global_variables_initializer().run()
self.assertEqual(6.0, r.eval())
self.assertEqual(99, v.eval())
def testCondWhile_1(self):
with self.test_session():
- n = tf.convert_to_tensor(0, name="n")
- c = lambda x: tf.less(x, 10)
- b = lambda x: tf.add(x, 1)
- r = tf.cond(tf.less(0, 1),
- lambda: tf.while_loop(c, b, [n]),
- lambda: n)
+ n = ops.convert_to_tensor(0, name="n")
+ c = lambda x: math_ops.less(x, 10)
+ b = lambda x: math_ops.add(x, 1)
+ r = control_flow_ops.cond(
+ math_ops.less(0, 1), lambda: control_flow_ops.while_loop(c, b, [n]),
+ lambda: n)
self.assertAllEqual(10, r.eval())
def testCondWhile_2(self):
with self.test_session():
- n = tf.convert_to_tensor(0)
- c = lambda x: tf.less(x, 10)
- b = lambda x: tf.add(x, 1)
- r = tf.cond(tf.less(1, 0), lambda: tf.add(n, 1),
- lambda: tf.while_loop(c, b, [n]))
+ n = ops.convert_to_tensor(0)
+ c = lambda x: math_ops.less(x, 10)
+ b = lambda x: math_ops.add(x, 1)
+ r = control_flow_ops.cond(
+ math_ops.less(1, 0), lambda: math_ops.add(n, 1),
+ lambda: control_flow_ops.while_loop(c, b, [n]))
self.assertAllEqual(10, r.eval())
def _testCondWhile_3(self, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
- p = tf.placeholder(tf.bool)
- n = tf.constant(0.0)
+ p = array_ops.placeholder(dtypes.bool)
+ n = constant_op.constant(0.0)
+
def c(x):
- return tf.less(x, 10.0)
+ return math_ops.less(x, 10.0)
+
def b(x):
- with tf.device("/cpu:0"):
- x1 = tf.add(x, 1.0)
+ with ops.device("/cpu:0"):
+ x1 = math_ops.add(x, 1.0)
return x1
- r = tf.cond(p,
- lambda: tf.while_loop(c, b, [n]),
- lambda: tf.mul(n, 2.0))
- r1 = tf.gradients(r, [n])
+
+ r = control_flow_ops.cond(p,
+ lambda: control_flow_ops.while_loop(c, b, [n]),
+ lambda: math_ops.mul(n, 2.0))
+ r1 = gradients_impl.gradients(r, [n])
self.assertEqual(10, sess.run(r, {p: True}))
self.assertEqual([1.0], sess.run(r1, {p: True}))
self.assertEqual(0.0, sess.run(r, {p: False}))
@@ -967,83 +1047,83 @@ class ControlFlowTest(tf.test.TestCase):
def testWhileCond_1(self):
with self.test_session():
- i = tf.convert_to_tensor(0, name="i")
- n = tf.convert_to_tensor(10, name="n")
- one = tf.convert_to_tensor(1, name="one")
- c = lambda x: tf.less(x, n)
+ i = ops.convert_to_tensor(0, name="i")
+ n = ops.convert_to_tensor(10, name="n")
+ one = ops.convert_to_tensor(1, name="one")
+ c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
- b = lambda x: tf.cond(
- tf.constant(True), lambda: tf.add(x, one), lambda: tf.sub(x, one))
+ b = lambda x: control_flow_ops.cond(
+ constant_op.constant(True), lambda: math_ops.add(x, one), lambda: math_ops.sub(x, one))
# pylint: enable=undefined-variable
- r = tf.while_loop(c, b, [i])
+ r = control_flow_ops.while_loop(c, b, [i])
self.assertAllEqual(10, r.eval())
def testWhileCond_2(self):
with self.test_session():
- n = tf.convert_to_tensor(0, name="n")
- c = lambda x: tf.less(x, 10)
- b = lambda x: tf.cond(tf.constant(True), lambda: tf.add(x, 1), lambda: n)
- r = tf.while_loop(c, b, [n])
+ n = ops.convert_to_tensor(0, name="n")
+ c = lambda x: math_ops.less(x, 10)
+ b = lambda x: control_flow_ops.cond(constant_op.constant(True), lambda: math_ops.add(x, 1), lambda: n)
+ r = control_flow_ops.while_loop(c, b, [n])
self.assertAllEqual(10, r.eval())
def testWhileCond_3(self):
with self.test_session():
- n = tf.convert_to_tensor(0)
- c = lambda x: tf.less(x, 10)
+ n = ops.convert_to_tensor(0)
+ c = lambda x: math_ops.less(x, 10)
# pylint: disable=undefined-variable
# for OSS build
- b = lambda x: tf.cond(tf.less(0, 1), lambda: tf.add(x, 1),
- lambda: tf.sub(x, 1))
+ b = lambda x: control_flow_ops.cond(math_ops.less(0, 1), lambda: math_ops.add(x, 1),
+ lambda: math_ops.sub(x, 1))
# pylint: enable=undefined-variable
- r = tf.while_loop(c, b, [n])
+ r = control_flow_ops.while_loop(c, b, [n])
self.assertAllEqual(10, r.eval())
# NOTE: It is ok to have parallel_iterations > 1
def testWhileUpdateVariable_1(self):
with self.test_session():
- select = tf.Variable([3.0, 4.0, 5.0])
- n = tf.constant(0)
+ select = variables.Variable([3.0, 4.0, 5.0])
+ n = constant_op.constant(0)
def loop_iterator(j):
- return tf.less(j, 3)
+ return math_ops.less(j, 3)
def loop_body(j):
- ns = tf.scatter_update(select, j, 10.0)
- nj = tf.add(j, 1)
+ ns = state_ops.scatter_update(select, j, 10.0)
+ nj = math_ops.add(j, 1)
op = control_flow_ops.group(ns)
nj = control_flow_ops.with_dependencies([op], nj)
return [nj]
- r = tf.while_loop(loop_iterator, loop_body, [n],
- parallel_iterations=1)
+ r = control_flow_ops.while_loop(
+ loop_iterator, loop_body, [n], parallel_iterations=1)
self.assertTrue(check_op_order(n.graph))
- tf.global_variables_initializer().run()
+ variables.global_variables_initializer().run()
self.assertEqual(3, r.eval())
result = select.eval()
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result)
def testWhileUpdateVariable_2(self):
with self.test_session():
- select1 = tf.Variable([3.0, 4.0, 5.0])
- select2 = tf.Variable([3.0, 4.0, 5.0])
- n = tf.constant(0)
+ select1 = variables.Variable([3.0, 4.0, 5.0])
+ select2 = variables.Variable([3.0, 4.0, 5.0])
+ n = constant_op.constant(0)
def loop_iterator(j):
- return tf.less(j, 3)
+ return math_ops.less(j, 3)
def loop_body(j):
- ns1 = tf.scatter_update(select1, j, 10.0)
- ns2 = tf.scatter_update(select2, j, 10.0)
- nj = tf.add(j, 1)
+ ns1 = state_ops.scatter_update(select1, j, 10.0)
+ ns2 = state_ops.scatter_update(select2, j, 10.0)
+ nj = math_ops.add(j, 1)
op = control_flow_ops.group(ns1, ns2)
nj = control_flow_ops.with_dependencies([op], nj)
return [nj]
- r = tf.while_loop(loop_iterator, loop_body, [n],
- parallel_iterations=1)
+ r = control_flow_ops.while_loop(
+ loop_iterator, loop_body, [n], parallel_iterations=1)
self.assertTrue(check_op_order(n.graph))
- tf.global_variables_initializer().run()
+ variables.global_variables_initializer().run()
self.assertEqual(3, r.eval())
result1 = select1.eval()
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result1)
@@ -1052,21 +1132,22 @@ class ControlFlowTest(tf.test.TestCase):
def testWhileUpdateVariable_3(self):
with self.test_session():
- select = tf.Variable([3.0, 4.0, 5.0])
- n = tf.constant(0)
+ select = variables.Variable([3.0, 4.0, 5.0])
+ n = constant_op.constant(0)
def loop_iterator(j, _):
- return tf.less(j, 3)
+ return math_ops.less(j, 3)
def loop_body(j, _):
- ns = tf.scatter_update(select, j, 10.0)
- nj = tf.add(j, 1)
+ ns = state_ops.scatter_update(select, j, 10.0)
+ nj = math_ops.add(j, 1)
return [nj, ns]
- r = tf.while_loop(loop_iterator, loop_body,
- [n, tf.identity(select)],
- parallel_iterations=1)
- tf.global_variables_initializer().run()
+ r = control_flow_ops.while_loop(
+ loop_iterator,
+ loop_body, [n, array_ops.identity(select)],
+ parallel_iterations=1)
+ variables.global_variables_initializer().run()
result = r[1].eval()
self.assertTrue(check_op_order(n.graph))
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result)
@@ -1074,24 +1155,26 @@ class ControlFlowTest(tf.test.TestCase):
# b/24814703
def testWhileUpdateVariable_4(self):
with self.test_session():
- var_a = tf.Variable(0, name="a")
- var_b = tf.Variable(0, name="b")
- tf.global_variables_initializer().run()
+ var_a = variables.Variable(0, name="a")
+ var_b = variables.Variable(0, name="b")
+ variables.global_variables_initializer().run()
+
+ c = constant_op.constant(0, name="c")
+ asn1 = state_ops.assign_add(var_a, 1, name="a_add")
- c = tf.constant(0, name="c")
- asn1 = tf.assign_add(var_a, 1, name="a_add")
# Loop condition
def pred(i):
- return tf.less(i, 10)
+ return math_ops.less(i, 10)
+
# Loop body
def loop_body(i):
- asn2 = tf.assign_add(var_b, asn1, name="b_add")
- with tf.control_dependencies([asn2]):
- ni = tf.add(i, 1, name="i_add")
+ asn2 = state_ops.assign_add(var_b, asn1, name="b_add")
+ with ops.control_dependencies([asn2]):
+ ni = math_ops.add(i, 1, name="i_add")
return ni
- lpa = tf.while_loop(pred, loop_body, [c],
- parallel_iterations=1)
+ lpa = control_flow_ops.while_loop(
+ pred, loop_body, [c], parallel_iterations=1)
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
@@ -1101,24 +1184,26 @@ class ControlFlowTest(tf.test.TestCase):
def testWhileUpdateVariable_5(self):
with self.test_session():
# Create some variables.
- var_a = tf.Variable(0, name="a")
- var_b = tf.Variable(0, name="b")
- tf.global_variables_initializer().run()
+ var_a = variables.Variable(0, name="a")
+ var_b = variables.Variable(0, name="b")
+ variables.global_variables_initializer().run()
# Change condition to check var_b
def pred(_):
- return tf.less(var_b, 10)
+ return math_ops.less(var_b, 10)
# Change body to increment var_b
def loop_body(i):
- asn1 = tf.assign_add(var_a, tf.constant(1), name="a_add")
- asn2 = tf.assign_add(var_b, tf.constant(1), name="b_add")
- with tf.control_dependencies([asn1, asn2]):
- inc_b = tf.identity(var_b)
+ asn1 = state_ops.assign_add(
+ var_a, constant_op.constant(1), name="a_add")
+ asn2 = state_ops.assign_add(
+ var_b, constant_op.constant(1), name="b_add")
+ with ops.control_dependencies([asn1, asn2]):
+ inc_b = array_ops.identity(var_b)
return inc_b
- lpa = tf.while_loop(pred, loop_body, [var_b], parallel_iterations=1,
- name="loop")
+ lpa = control_flow_ops.while_loop(
+ pred, loop_body, [var_b], parallel_iterations=1, name="loop")
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
@@ -1129,26 +1214,26 @@ class ControlFlowTest(tf.test.TestCase):
def testWhileUpdateVariable_6(self):
with self.test_session():
# Create some variables.
- var_a = tf.Variable(0, name="a")
- var_b = tf.Variable(0, name="b")
- c = tf.constant(0)
- tf.global_variables_initializer().run()
+ var_a = variables.Variable(0, name="a")
+ var_b = variables.Variable(0, name="b")
+ c = constant_op.constant(0)
+ variables.global_variables_initializer().run()
# Loop condition
def pred(i):
- return tf.less(i, 10)
+ return math_ops.less(i, 10)
# Loop body
def loop_body(i):
- asn1 = tf.assign_add(var_a, 1, name="a_add")
- with tf.control_dependencies([asn1]):
- asn2 = tf.assign_add(var_b, var_a, name="b_add")
- with tf.control_dependencies([asn2]):
- ni = tf.add(i, 1, name="i_add")
+ asn1 = state_ops.assign_add(var_a, 1, name="a_add")
+ with ops.control_dependencies([asn1]):
+ asn2 = state_ops.assign_add(var_b, var_a, name="b_add")
+ with ops.control_dependencies([asn2]):
+ ni = math_ops.add(i, 1, name="i_add")
return ni
- lpa = tf.while_loop(pred, loop_body, [c], parallel_iterations=1,
- name="loop")
+ lpa = control_flow_ops.while_loop(
+ pred, loop_body, [c], parallel_iterations=1, name="loop")
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
@@ -1157,57 +1242,66 @@ class ControlFlowTest(tf.test.TestCase):
def testWhileQueue_1(self):
with self.test_session():
- q = tf.FIFOQueue(-1, tf.int32)
- i = tf.constant(0)
+ q = data_flow_ops.FIFOQueue(-1, dtypes.int32)
+ i = constant_op.constant(0)
def c(i):
- return tf.less(i, 10)
+ return math_ops.less(i, 10)
def b(i):
- ni = tf.add(i, 1)
+ ni = math_ops.add(i, 1)
ni = control_flow_ops.with_dependencies([q.enqueue((i,))], ni)
return ni
- r = tf.while_loop(c, b, [i], parallel_iterations=1)
+ r = control_flow_ops.while_loop(c, b, [i], parallel_iterations=1)
self.assertEqual([10], r.eval())
for i in xrange(10):
self.assertEqual([i], q.dequeue().eval())
def testWhileStack_1(self):
with self.test_session():
- s = gen_data_flow_ops._stack(tf.int32, stack_name="foo")
- i = tf.constant(0)
+ s = gen_data_flow_ops._stack(dtypes.int32, stack_name="foo")
+ i = constant_op.constant(0)
def c(i):
- return tf.less(i, 10)
+ return math_ops.less(i, 10)
+
def b(i):
- ni = tf.add(i, 1)
+ ni = math_ops.add(i, 1)
ni = control_flow_ops.with_dependencies(
[gen_data_flow_ops._stack_push(s, i)], ni)
return ni
- r = tf.while_loop(c, b, [i], parallel_iterations=1)
- x = tf.constant(0)
+ r = control_flow_ops.while_loop(c, b, [i], parallel_iterations=1)
+
+ x = constant_op.constant(0)
+
def c1(i, _):
- return tf.greater(i, 0)
+ return math_ops.greater(i, 0)
+
def b1(i, x):
- ni = tf.sub(i, 1)
- nx = x + gen_data_flow_ops._stack_pop(s, tf.int32)
+ ni = math_ops.sub(i, 1)
+ nx = x + gen_data_flow_ops._stack_pop(s, dtypes.int32)
return [ni, nx]
- _, rx = tf.while_loop(c1, b1, [r, x],
- [r.get_shape(), tensor_shape.unknown_shape()],
- parallel_iterations=1)
+
+ _, rx = control_flow_ops.while_loop(
+ c1,
+ b1, [r, x], [r.get_shape(), tensor_shape.unknown_shape()],
+ parallel_iterations=1)
self.assertEqual(45, rx.eval())
def _testWhileGrad_ColocateGradients(self, colocate):
- with self.test_session(graph=tf.Graph()) as sess:
- v = tf.constant(2.0, name="v")
- c = lambda v: tf.less(v, 100.0)
+ with self.test_session(graph=ops.Graph()) as sess:
+ v = constant_op.constant(2.0, name="v")
+ c = lambda v: math_ops.less(v, 100.0)
+
def b(x):
- with tf.device("/gpu:0"):
- return tf.square(x)
- loop = tf.while_loop(c, b, [v], parallel_iterations=1)
- r = tf.gradients(loop, v, colocate_gradients_with_ops=colocate)[0]
+ with ops.device("/gpu:0"):
+ return math_ops.square(x)
+
+ loop = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
+ r = gradients_impl.gradients(
+ loop, v, colocate_gradients_with_ops=colocate)[0]
r_ops = r.graph.get_operations()
r_devices = [(op.name, op.device.lower()) for op in r_ops]
@@ -1230,73 +1324,74 @@ class ControlFlowTest(tf.test.TestCase):
def testWhileGrad_Square(self):
with self.test_session():
- v = tf.constant(2.0, name="v")
- c = lambda v: tf.less(v, 100.0)
- b = tf.square
- r = tf.while_loop(c, b, [v], parallel_iterations=1)
- r = tf.cond(tf.less(1, 2), lambda: r, lambda: v)
+ v = constant_op.constant(2.0, name="v")
+ c = lambda v: math_ops.less(v, 100.0)
+ b = math_ops.square
+ r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
+ r = control_flow_ops.cond(math_ops.less(1, 2), lambda: r, lambda: v)
- r = tf.gradients(r, v)[0]
+ r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(1024.0, r.eval())
def testWhileGrad_Shape(self):
with self.test_session():
- x = tf.placeholder(tf.float32, shape=[None])
- v = tf.constant([2.0], name="v")
- n = tf.constant(0, name="n")
- c = lambda i, v: tf.less(i, 5)
- b = lambda i, v: [i + 1, tf.mul(x, v)]
- r = tf.while_loop(c, b, [n, v],
- [n.get_shape(), tensor_shape.unknown_shape()],
- parallel_iterations=1)
-
- r = tf.gradients(r[1], x)[0]
+ x = array_ops.placeholder(dtypes.float32, shape=[None])
+ v = constant_op.constant([2.0], name="v")
+ n = constant_op.constant(0, name="n")
+ c = lambda i, v: math_ops.less(i, 5)
+ b = lambda i, v: [i + 1, math_ops.mul(x, v)]
+ r = control_flow_ops.while_loop(
+ c,
+ b, [n, v], [n.get_shape(), tensor_shape.unknown_shape()],
+ parallel_iterations=1)
+
+ r = gradients_impl.gradients(r[1], x)[0]
self.assertEqual([None], r.get_shape().as_list())
self.assertAllClose([810.0, 2560.0], r.eval(feed_dict={x: [3.0, 4.0]}))
def testWhileGrad_BaseShape(self):
with self.test_session() as sess:
- x = tf.placeholder(tf.float32, [None])
- v0 = tf.constant([2.0, 2.0], name="v")
- c = lambda v: tf.constant(False)
- b = lambda v: tf.mul(v, x)
- r = tf.while_loop(c, b, [v0])
- y = tf.square(x)
-
- r = tf.gradients([r, y], x)[0]
+ x = array_ops.placeholder(dtypes.float32, [None])
+ v0 = constant_op.constant([2.0, 2.0], name="v")
+ c = lambda v: constant_op.constant(False)
+ b = lambda v: math_ops.mul(v, x)
+ r = control_flow_ops.while_loop(c, b, [v0])
+ y = math_ops.square(x)
+
+ r = gradients_impl.gradients([r, y], x)[0]
self.assertAllClose([2.0, 4.0], sess.run(r, feed_dict={x: [1.0, 2.0]}))
def testWhileGrad_MultipleUses(self):
with self.test_session():
- v = tf.constant(2.0, name="v")
- c = lambda v: tf.less(v, 100.0)
- b = tf.square
- r = tf.while_loop(c, b, [v], parallel_iterations=1)
- r = tf.mul(r, r)
+ v = constant_op.constant(2.0, name="v")
+ c = lambda v: math_ops.less(v, 100.0)
+ b = math_ops.square
+ r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
+ r = math_ops.mul(r, r)
- r = tf.gradients(r, v)[0]
+ r = gradients_impl.gradients(r, v)[0]
self.assertEqual(524288.0, r.eval())
def testWhileGrad_LoopAdd(self):
with self.test_session():
- v = tf.constant(2.0, name="v")
- c = lambda v: tf.less(v, 100.0)
- b = tf.square
- r = tf.while_loop(c, b, [v], parallel_iterations=1)
- r = tf.add(r, r)
+ v = constant_op.constant(2.0, name="v")
+ c = lambda v: math_ops.less(v, 100.0)
+ b = math_ops.square
+ r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
+ r = math_ops.add(r, r)
- r = tf.gradients(r, v)[0]
+ r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(2048.0, r.eval())
def _testWhileGrad_Mul(self, use_gpu, p_iters):
with self.test_session(use_gpu=use_gpu) as sess:
- a = tf.constant(3.0, name="a")
- v = tf.constant(2.0, name="v")
- c = lambda v: tf.less(v, 100.0)
- b = lambda v: tf.mul(v, a)
- r = tf.while_loop(c, b, [v], parallel_iterations=p_iters)
+ a = constant_op.constant(3.0, name="a")
+ v = constant_op.constant(2.0, name="v")
+ c = lambda v: math_ops.less(v, 100.0)
+ b = lambda v: math_ops.mul(v, a)
+ r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=p_iters)
- grad_a, grad_v = tf.gradients(r, [a, v])
+ grad_a, grad_v = gradients_impl.gradients(r, [a, v])
grad_a_val, grad_v_val = sess.run([grad_a, grad_v])
self.assertAllClose(216.0, grad_a_val)
self.assertAllClose(81.0, grad_v_val)
@@ -1309,222 +1404,254 @@ class ControlFlowTest(tf.test.TestCase):
def testWhileGrad_Variable(self):
with self.test_session():
- a = tf.Variable(3.0)
- v = tf.constant(2.0, name="v")
- c = lambda v: tf.less(v, 100.0)
- b = lambda v: tf.mul(v, a)
- r = tf.while_loop(c, b, [v], parallel_iterations=1)
+ a = variables.Variable(3.0)
+ v = constant_op.constant(2.0, name="v")
+ c = lambda v: math_ops.less(v, 100.0)
+ b = lambda v: math_ops.mul(v, a)
+ r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
- r = tf.gradients(r, a)
- tf.global_variables_initializer().run()
+ r = gradients_impl.gradients(r, a)
+ variables.global_variables_initializer().run()
self.assertAllClose(216.0, r[0].eval())
def testWhileGradInCond(self):
with self.test_session():
- n = tf.convert_to_tensor(1.0, name="n")
- x = tf.placeholder(tf.float32, shape=None)
- c = lambda n: tf.less(n, 10.0)
- b = lambda n: tf.add(n, x)
+ n = ops.convert_to_tensor(1.0, name="n")
+ x = array_ops.placeholder(dtypes.float32, shape=None)
+ c = lambda n: math_ops.less(n, 10.0)
+ b = lambda n: math_ops.add(n, x)
+
def fn1():
- r = tf.while_loop(c, b, [n], [tensor_shape.unknown_shape()])
- return tf.gradients(r, x)
- r = tf.cond(tf.less(1, 2), fn1, lambda: x)
+ r = control_flow_ops.while_loop(c, b, [n],
+ [tensor_shape.unknown_shape()])
+ return gradients_impl.gradients(r, x)
+
+ r = control_flow_ops.cond(math_ops.less(1, 2), fn1, lambda: x)
self.assertAllClose(9.0, r.eval(feed_dict={x: 1.0}))
def testWhileGradInWhile(self):
with self.test_session():
- n = tf.convert_to_tensor(1.0, name="n")
- x = tf.placeholder(tf.float32, shape=None)
- c = lambda n: tf.less(n, 10.0)
- b = lambda n: tf.add(n, x)
+ n = ops.convert_to_tensor(1.0, name="n")
+ x = array_ops.placeholder(dtypes.float32, shape=None)
+ c = lambda n: math_ops.less(n, 10.0)
+ b = lambda n: math_ops.add(n, x)
+
def b1(n):
- r = tf.while_loop(c, b, [n], [tensor_shape.unknown_shape()])
- return tf.gradients(r, x)
- r = tf.while_loop(lambda n: n < 6.0, b1, [n],
- [tensor_shape.unknown_shape()])
+ r = control_flow_ops.while_loop(c, b, [n],
+ [tensor_shape.unknown_shape()])
+ return gradients_impl.gradients(r, x)
+
+ r = control_flow_ops.while_loop(lambda n: n < 6.0, b1, [n],
+ [tensor_shape.unknown_shape()])
self.assertAllClose(9.0, r.eval(feed_dict={x: 1.0}))
def testWhile_NestedInput(self):
with self.test_session() as sess:
named = collections.namedtuple("named", ("a", "b"))
- loop_vars = [named(a=tf.constant(0.0), b=tf.constant(1.0)),
- (tf.constant(2.0), tf.constant(3.0)),
- tf.constant(4.0)]
+ loop_vars = [
+ named(
+ a=constant_op.constant(0.0), b=constant_op.constant(1.0)),
+ (constant_op.constant(2.0), constant_op.constant(3.0)),
+ constant_op.constant(4.0)
+ ]
c = lambda lv0, _1, _2: lv0.a < 100.0
+
def b(lv0, lv1, lv2):
lv0 = named(a=lv0.a + 1, b=lv0.b)
lv1 = (lv1[0] + 1, lv1[1])
lv2 += 2
return [lv0, lv1, lv2]
- r = tf.while_loop(c, b, loop_vars)
+
+ r = control_flow_ops.while_loop(c, b, loop_vars)
self.assertTrue(isinstance(r, list))
self.assertTrue(isinstance(r[0], named))
self.assertTrue(isinstance(r[1], tuple))
- self.assertTrue(isinstance(r[2], tf.Tensor))
+ self.assertTrue(isinstance(r[2], ops.Tensor))
r_flattened = nest.flatten(r)
- self.assertEqual(
- [100.0, 1.0, 102.0, 3.0, 4.0 + 100*2.0],
- sess.run(r_flattened))
+ self.assertEqual([100.0, 1.0, 102.0, 3.0, 4.0 + 100 * 2.0],
+ sess.run(r_flattened))
def testWhile_NestedBadArityFails(self):
with self.test_session():
named = collections.namedtuple("named", ("a", "b"))
- loop_vars = [named(a=tf.constant(0.0), b=tf.constant(1.0)),
- (tf.constant(2.0), tf.constant(3.0)),
- tf.constant(4.0)]
+ loop_vars = [
+ named(
+ a=constant_op.constant(0.0), b=constant_op.constant(1.0)),
+ (constant_op.constant(2.0), constant_op.constant(3.0)),
+ constant_op.constant(4.0)
+ ]
c = lambda lv0, _1, _2: lv0.a < 100.0
+
def b(lv0, lv1, _):
return [lv0, lv1]
with self.assertRaisesRegexp(ValueError, "the same number of elements"):
- tf.while_loop(c, b, loop_vars)
+ control_flow_ops.while_loop(c, b, loop_vars)
def testWhileGrad_ys_xs(self):
with self.test_session():
- x = tf.constant(3.0, name="x")
- y = tf.constant(2.0, name="y")
+ x = constant_op.constant(3.0, name="x")
+ y = constant_op.constant(2.0, name="y")
+
+ c = lambda x, y: math_ops.less(x, 100.0)
- c = lambda x, y: tf.less(x, 100.0)
def b(x, y):
- y1 = tf.add(x, y)
- x1 = tf.mul(x, y1)
+ y1 = math_ops.add(x, y)
+ x1 = math_ops.mul(x, y1)
return x1, y1
- rx, ry = tf.while_loop(c, b, [x, y], parallel_iterations=1)
- r = tf.gradients([rx, ry], x)
+ rx, ry = control_flow_ops.while_loop(c, b, [x, y], parallel_iterations=1)
+
+ r = gradients_impl.gradients([rx, ry], x)
self.assertAllClose(304.0, r[0].eval())
- r = tf.gradients([rx, ry], y)
+ r = gradients_impl.gradients([rx, ry], y)
self.assertAllClose(124.0, r[0].eval())
- r = tf.gradients([rx], x)
+ r = gradients_impl.gradients([rx], x)
self.assertAllClose(295.0, r[0].eval())
- r = tf.gradients([rx], y)
+ r = gradients_impl.gradients([rx], y)
self.assertAllClose(120.0, r[0].eval())
def testWhileGrad_Dependency(self):
with self.test_session():
- i = tf.constant(0, name="i")
- x = tf.constant(2.0, name="x")
+ i = constant_op.constant(0, name="i")
+ x = constant_op.constant(2.0, name="x")
+
+ c = lambda i, x: math_ops.less(i, 10)
- c = lambda i, x: tf.less(i, 10)
def b(i, x):
- x = tf.mul(x, 2.0)
- i = tf.add(i, 1)
+ x = math_ops.mul(x, 2.0)
+ i = math_ops.add(i, 1)
return i, x
- ri, rx = tf.while_loop(c, b, [i, x], parallel_iterations=1)
- r = tf.gradients([ri, rx], x)
+ ri, rx = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
+
+ r = gradients_impl.gradients([ri, rx], x)
self.assertAllClose(1024.0, r[0].eval())
- r = tf.gradients([rx], x)
+ r = gradients_impl.gradients([rx], x)
self.assertAllClose(1024.0, r[0].eval())
def testWhileGrad_NoGradient(self):
with self.test_session():
- v = tf.constant(2.0, name="v")
- c = lambda v: tf.less(v, 100.0)
- b = tf.square
- r = tf.while_loop(c, b, [v], back_prop=False)
- r = tf.add(r, v)
- r = tf.gradients(r, v)
+ v = constant_op.constant(2.0, name="v")
+ c = lambda v: math_ops.less(v, 100.0)
+ b = math_ops.square
+ r = control_flow_ops.while_loop(c, b, [v], back_prop=False)
+ r = math_ops.add(r, v)
+ r = gradients_impl.gradients(r, v)
self.assertAllClose(1.0, r[0].eval())
def testWhileGrad_NoDependency(self):
with self.test_session() as sess:
- variable = tf.Variable(tf.ones([2, 3]))
- time = tf.zeros([], dtype=tf.int32)
+ variable = variables.Variable(array_ops.ones([2, 3]))
+ time = array_ops.zeros([], dtype=dtypes.int32)
+
def cond(time, tensor, _):
return time < 10
+
def body(time, tensor, _):
- return (time+1, tensor, tensor)
+ return (time + 1, tensor, tensor)
+
loop_vars = [time, variable, variable]
- tensors = tf.while_loop(cond=cond, body=body, loop_vars=loop_vars)
- cost = tf.reduce_sum(tensors[2])
- grad = tf.gradients(cost, [variable])
- tf.global_variables_initializer().run()
+ tensors = control_flow_ops.while_loop(
+ cond=cond, body=body, loop_vars=loop_vars)
+ cost = math_ops.reduce_sum(tensors[2])
+ grad = gradients_impl.gradients(cost, [variable])
+ variables.global_variables_initializer().run()
self.assertAllClose(np.ones([2, 3]), sess.run(grad[0]))
def testWhileGrad_Const(self):
with self.test_session() as sess:
- c0 = tf.constant(0.0, name="c0")
- c1 = tf.constant(1.0, name="c1")
- time = tf.constant(0, name="t")
+ c0 = constant_op.constant(0.0, name="c0")
+ c1 = constant_op.constant(1.0, name="c1")
+ time = constant_op.constant(0, name="t")
+
def cond(time, _):
return time < 1
+
def body(time, tensor):
- return time+1, c1
+ return time + 1, c1
+
loop_vars = [time, c0]
- tensors = tf.while_loop(cond=cond, body=body, loop_vars=loop_vars)
- cost = tf.reduce_sum(tensors[1])
- grad = tf.gradients(cost, [c0])
+ tensors = control_flow_ops.while_loop(
+ cond=cond, body=body, loop_vars=loop_vars)
+ cost = math_ops.reduce_sum(tensors[1])
+ grad = gradients_impl.gradients(cost, [c0])
self.assertAllClose(0.0, sess.run(grad[0]))
def testWhileGrad_SerialTwoLoops(self):
with self.test_session():
- i = tf.constant(0, name="i")
- x = tf.constant(2.0, name="x")
+ i = constant_op.constant(0, name="i")
+ x = constant_op.constant(2.0, name="x")
+
+ c = lambda i, x: math_ops.less(i, 5)
- c = lambda i, x: tf.less(i, 5)
def b(i, x):
- x = tf.mul(x, 2.0)
- i = tf.add(i, 1)
+ x = math_ops.mul(x, 2.0)
+ i = math_ops.add(i, 1)
return i, x
- _, rx = tf.while_loop(c, b, [i, x], parallel_iterations=1)
- _, rx = tf.while_loop(c, b, [i, rx], parallel_iterations=1)
- r = tf.gradients([rx], x)
+ _, rx = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
+ _, rx = control_flow_ops.while_loop(c, b, [i, rx], parallel_iterations=1)
+
+ r = gradients_impl.gradients([rx], x)
self.assertAllClose(1024.0, r[0].eval())
def testWhileGrad_ParallelTwoLoops(self):
with self.test_session():
- i = tf.constant(0, name="i")
- x = tf.constant(2.0, name="x")
+ i = constant_op.constant(0, name="i")
+ x = constant_op.constant(2.0, name="x")
+
+ c = lambda i, x: math_ops.less(i, 5)
- c = lambda i, x: tf.less(i, 5)
def b(i, x):
- x = tf.mul(x, 2.0)
- i = tf.add(i, 1)
+ x = math_ops.mul(x, 2.0)
+ i = math_ops.add(i, 1)
return i, x
- _, r1 = tf.while_loop(c, b, [i, x], parallel_iterations=1)
- _, r2 = tf.while_loop(c, b, [i, x], parallel_iterations=1)
- rx = tf.add(r1, r2)
- r = tf.gradients([rx], x)
+ _, r1 = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
+ _, r2 = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
+ rx = math_ops.add(r1, r2)
+
+ r = gradients_impl.gradients([rx], x)
self.assertAllClose(64.0, r[0].eval())
def testWhileGrad_OneOutputWithControlDependencyOnSecond(self):
with self.test_session():
- i = tf.constant(0, name="i")
- x = tf.constant(1.0, name="x")
- y = tf.constant(1.0, name="y")
- c = lambda i, *_: tf.less(i, 1, name="cond_less")
+ i = constant_op.constant(0, name="i")
+ x = constant_op.constant(1.0, name="x")
+ y = constant_op.constant(1.0, name="y")
+ c = lambda i, *_: math_ops.less(i, 1, name="cond_less")
+
def b(i, xi, yi):
# return (i + 1, xi, xi + yi)
- return (tf.add(i, 1, name="inc"),
- tf.identity(xi, name="xi"),
- tf.add(xi, yi, name="xi_plus_yi"))
+ return (math_ops.add(i, 1, name="inc"), array_ops.identity(
+ xi, name="xi"), math_ops.add(xi, yi, name="xi_plus_yi"))
- _, x_f, y_f = tf.while_loop(c, b, [i, x, y])
- with tf.control_dependencies([x_f]):
- y_f_d = tf.identity(y_f, name="y_f_d")
+ _, x_f, y_f = control_flow_ops.while_loop(c, b, [i, x, y])
+ with ops.control_dependencies([x_f]):
+ y_f_d = array_ops.identity(y_f, name="y_f_d")
self.assertAllClose(2.0, y_f_d.eval()) # y_f_d = 1.0 + 1.0
- g = tf.gradients([y_f_d], [x])[0]
+ g = gradients_impl.gradients([y_f_d], [x])[0]
self.assertTrue(g is not None)
self.assertAllClose(1.0, g.eval()) # y_f_d = x + 1.0, dy_f_d/dx = 1.0
def _testNestedWhileGrad_Simple(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
- v = tf.constant(1.0)
+ v = constant_op.constant(1.0)
+
def inner_loop(s):
- c = lambda x: tf.less(x, 4.0)
- b = lambda x: tf.mul(x, 2.0)
- return tf.while_loop(c, b, [s])
- c = lambda x: tf.less(x, 2.0)
- b = lambda x: tf.mul(inner_loop(x), 2.0)
- r = tf.while_loop(c, b, [v])
-
- r = tf.gradients(r, v)[0]
+ c = lambda x: math_ops.less(x, 4.0)
+ b = lambda x: math_ops.mul(x, 2.0)
+ return control_flow_ops.while_loop(c, b, [s])
+
+ c = lambda x: math_ops.less(x, 2.0)
+ b = lambda x: math_ops.mul(inner_loop(x), 2.0)
+ r = control_flow_ops.while_loop(c, b, [v])
+
+ r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(8.0, r.eval())
def testNestedWhileGrad_Simple(self):
@@ -1533,78 +1660,86 @@ class ControlFlowTest(tf.test.TestCase):
def testNestedWhileGrad_SerialInner(self):
with self.test_session():
- v = tf.constant(1.0)
+ v = constant_op.constant(1.0)
+
def inner_loop1(s):
- z = tf.constant(0)
- c = lambda i, x: tf.less(i, 4)
- b = lambda i, x: [tf.add(i, 1), tf.mul(x, 2.0)]
- return tf.while_loop(c, b, [z, s])
+ z = constant_op.constant(0)
+ c = lambda i, x: math_ops.less(i, 4)
+ b = lambda i, x: [math_ops.add(i, 1), math_ops.mul(x, 2.0)]
+ return control_flow_ops.while_loop(c, b, [z, s])
+
def inner_loop2(s):
- z = tf.constant(0)
- c = lambda i, x: tf.less(i, 4)
- b = lambda i, x: [tf.add(i, 1), tf.mul(x, 2.0)]
- return tf.while_loop(c, b, [z, s])
- c = lambda x: tf.less(x, 128.0)
+ z = constant_op.constant(0)
+ c = lambda i, x: math_ops.less(i, 4)
+ b = lambda i, x: [math_ops.add(i, 1), math_ops.mul(x, 2.0)]
+ return control_flow_ops.while_loop(c, b, [z, s])
+
+ c = lambda x: math_ops.less(x, 128.0)
b = lambda x: inner_loop2(inner_loop1(x)[1])[1]
- r = tf.while_loop(c, b, [v])
+ r = control_flow_ops.while_loop(c, b, [v])
- r = tf.gradients(r, v)[0]
+ r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(256.0, r.eval())
def testNestedWhileGrad_ParallelInner(self):
with self.test_session():
- v = tf.constant(1.0)
+ v = constant_op.constant(1.0)
+
def inner_loop1(s):
- z = tf.constant(0)
- c = lambda i, x: tf.less(i, 4)
- b = lambda i, x: [tf.add(i, 1), tf.mul(x, 2.0)]
- return tf.while_loop(c, b, [z, s])
+ z = constant_op.constant(0)
+ c = lambda i, x: math_ops.less(i, 4)
+ b = lambda i, x: [math_ops.add(i, 1), math_ops.mul(x, 2.0)]
+ return control_flow_ops.while_loop(c, b, [z, s])
+
def inner_loop2(s):
- z = tf.constant(0)
- c = lambda i, x: tf.less(i, 4)
- b = lambda i, x: [tf.add(i, 1), tf.mul(x, 2.0)]
- return tf.while_loop(c, b, [z, s])
- c = lambda x: tf.less(x, 128.0)
- b = lambda x: tf.mul(inner_loop1(x)[1], inner_loop2(x)[1])
- r = tf.while_loop(c, b, [v])
-
- r = tf.gradients(r, v)[0]
+ z = constant_op.constant(0)
+ c = lambda i, x: math_ops.less(i, 4)
+ b = lambda i, x: [math_ops.add(i, 1), math_ops.mul(x, 2.0)]
+ return control_flow_ops.while_loop(c, b, [z, s])
+
+ c = lambda x: math_ops.less(x, 128.0)
+ b = lambda x: math_ops.mul(inner_loop1(x)[1], inner_loop2(x)[1])
+ r = control_flow_ops.while_loop(c, b, [v])
+
+ r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(512.0, r.eval())
def testNestedWhileGrad_ParallelIterations(self):
# Make sure the stack pushes and pops of an inner loop are executed in
# the sequential order of the iterations of its outer loop.
with self.test_session() as sess:
+
def inner_loop(t):
- fn = lambda n: n + tf.square(var)
- return tf.map_fn(fn=fn, elems=t, parallel_iterations=10)
+ fn = lambda n: n + math_ops.square(var)
+ return functional_ops.map_fn(fn=fn, elems=t, parallel_iterations=10)
def outer_loop(inp):
- return tf.map_fn(fn=inner_loop, elems=inp, parallel_iterations=10)
+ return functional_ops.map_fn(
+ fn=inner_loop, elems=inp, parallel_iterations=10)
- var = tf.Variable(tf.constant(3.0))
- inp = tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
+ var = variables.Variable(constant_op.constant(3.0))
+ inp = constant_op.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
res = outer_loop(inp)
- optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
- train_op = optimizer.minimize(tf.reduce_mean(tf.square(res)))
- sess.run(tf.global_variables_initializer())
+ optimizer = adam.AdamOptimizer(learning_rate=0.001)
+ train_op = optimizer.minimize(math_ops.reduce_mean(math_ops.square(res)))
+ sess.run(variables.global_variables_initializer())
sess.run(train_op)
self.assertAllClose(2.999, var.eval())
def _testWhileCondGrad_Simple(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
- v = tf.convert_to_tensor(2.0, name="v")
- n = tf.convert_to_tensor(100.0, name="n")
- one = tf.convert_to_tensor(1.0, name="one")
- c = lambda x: tf.less(x, n)
+ v = ops.convert_to_tensor(2.0, name="v")
+ n = ops.convert_to_tensor(100.0, name="n")
+ one = ops.convert_to_tensor(1.0, name="one")
+ c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
- b = lambda x: tf.cond(tf.constant(True),
- lambda: tf.square(x),
- lambda: tf.sub(x, one))
+ b = lambda x: control_flow_ops.cond(constant_op.constant(True),
+ lambda: math_ops.square(x),
+ lambda: math_ops.sub(x, one))
# pylint: enable=undefined-variable
- r = tf.while_loop(c, b, [v])
- r = tf.gradients(r, v)[0]
+ r = control_flow_ops.while_loop(c, b, [v])
+ r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(1024.0, r.eval())
def testWhileCondGrad_Simple(self):
@@ -1613,66 +1748,67 @@ class ControlFlowTest(tf.test.TestCase):
def testWhileCondGrad_UnknownShape(self):
with self.test_session() as sess:
- v = tf.placeholder(tf.float32)
- n = tf.convert_to_tensor(100.0, name="n")
- one = tf.convert_to_tensor(1.0, name="one")
- c = lambda x: tf.less(x, n)
+ v = array_ops.placeholder(dtypes.float32)
+ n = ops.convert_to_tensor(100.0, name="n")
+ one = ops.convert_to_tensor(1.0, name="one")
+ c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
- b = lambda x: tf.cond(tf.constant(True),
- lambda: tf.square(x),
- lambda: tf.sub(x, one))
+ b = lambda x: control_flow_ops.cond(constant_op.constant(True),
+ lambda: math_ops.square(x),
+ lambda: math_ops.sub(x, one))
# pylint: enable=undefined-variable
- r = tf.while_loop(c, b, [v])
- r = tf.gradients(r, v)[0]
+ r = control_flow_ops.while_loop(c, b, [v])
+ r = gradients_impl.gradients(r, v)[0]
r = sess.run(r, feed_dict={v: 2.0})
self.assertAllClose(1024.0, r)
def testWhileGrad_Concat(self):
with self.test_session() as sess:
- x = tf.get_variable("x", initializer=[[1., 2.]])
- i0 = tf.constant(0)
- h0 = tf.zeros([0, 2])
+ x = variable_scope.get_variable("x", initializer=[[1., 2.]])
+ i0 = constant_op.constant(0)
+ h0 = array_ops.zeros([0, 2])
def condition(i, _):
return i < 2
def body(i, h):
- return i + 1, tf.concat_v2([h, x], 0)
+ return i + 1, array_ops.concat_v2([h, x], 0)
- _, h = tf.while_loop(
+ _, h = control_flow_ops.while_loop(
condition, body, [i0, h0],
[i0.get_shape(), tensor_shape.TensorShape([None, 2])])
- s = tf.reduce_sum(h)
+ s = math_ops.reduce_sum(h)
- sess.run(tf.global_variables_initializer())
- optimizer = tf.train.GradientDescentOptimizer(0.01)
+ sess.run(variables.global_variables_initializer())
+ optimizer = gradient_descent.GradientDescentOptimizer(0.01)
op = optimizer.minimize(s)
sess.run(op)
self.assertAllClose([[0.98000002, 1.98000002]], sess.run(x))
def testWhileWithRefsWithGradients_1(self):
with self.test_session() as sess:
- x = tf.Variable(0)._ref() # pylint: disable=protected-access
- i = tf.constant(0)
- c = lambda i, x: tf.less(i, 10)
+ x = variables.Variable(0)._ref() # pylint: disable=protected-access
+ i = constant_op.constant(0)
+ c = lambda i, x: math_ops.less(i, 10)
self.assertEqual(x.dtype, dtypes.int32_ref)
# pylint: disable=protected-access
def body(i, x):
self.assertEqual(x.dtype, dtypes.int32_ref)
- return [i+1, gen_array_ops._ref_identity(x)]
+ return [i + 1, gen_array_ops._ref_identity(x)]
+
# pylint: enable=protected-access
- r = tf.while_loop(c, body, [i, x], parallel_iterations=5)
+ r = control_flow_ops.while_loop(c, body, [i, x], parallel_iterations=5)
- grad_ys = [tf.Variable(73)._ref()] # pylint: disable=protected-access
- grad = tf.gradients([r[1]], [x], grad_ys=grad_ys)
+ grad_ys = [variables.Variable(73)._ref()] # pylint: disable=protected-access
+ grad = gradients_impl.gradients([r[1]], [x], grad_ys=grad_ys)
- tf.global_variables_initializer().run()
+ variables.global_variables_initializer().run()
- self.assertEqual(r[0].dtype, tf.int32)
+ self.assertEqual(r[0].dtype, dtypes.int32)
self.assertEqual(r[1].dtype, dtypes.int32_ref)
value_i, value_x, value_x_grad = sess.run(r + grad)
@@ -1683,159 +1819,189 @@ class ControlFlowTest(tf.test.TestCase):
def testWhileGrad_IndexedSlices(self):
with self.test_session():
- values = tf.constant([2.0, 4.0], name="values")
- indices = tf.constant([0, 3], name="indices")
- shape = tf.constant([10], name="dense_shape")
- i = tf.constant(0)
- x = tf.IndexedSlices(values, indices, dense_shape=shape)
+ values = constant_op.constant([2.0, 4.0], name="values")
+ indices = constant_op.constant([0, 3], name="indices")
+ shape = constant_op.constant([10], name="dense_shape")
+ i = constant_op.constant(0)
+ x = ops.IndexedSlices(values, indices, dense_shape=shape)
+
def c(i, _):
return i < 10
+
def b(i, x):
- return [i + 1, tf.IndexedSlices(x.values * 2.0, x.indices,
- x.dense_shape)]
- _, r = tf.while_loop(c, b, [i, x])
- r = tf.gradients(r.values, values)[0]
+ return [
+ i + 1, ops.IndexedSlices(x.values * 2.0, x.indices, x.dense_shape)
+ ]
+
+ _, r = control_flow_ops.while_loop(c, b, [i, x])
+ r = gradients_impl.gradients(r.values, values)[0]
self.assertAllClose(np.array([1024.0, 1024.0]), r.eval())
def testWhileGrad_SparseTensor(self):
with self.test_session():
- values = tf.constant([2.0, 4.0], name="values")
- indices = tf.constant([[0], [3]], dtype=tf.int64, name="indices")
- shape = tf.constant([10], dtype=tf.int64, name="dense_shape")
- i = tf.constant(0)
- x = tf.SparseTensor(indices, values, dense_shape=shape)
+ values = constant_op.constant([2.0, 4.0], name="values")
+ indices = constant_op.constant(
+ [[0], [3]], dtype=dtypes.int64, name="indices")
+ shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
+ i = constant_op.constant(0)
+ x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
+
def c(i, _):
return i < 10
+
def b(i, x):
- return [i + 1, tf.SparseTensor(x.indices, x.values * 2.0,
- x.dense_shape)]
- _, r = tf.while_loop(c, b, [i, x])
- r = tf.gradients(r.values, values)[0]
+ return [
+ i + 1, sparse_tensor.SparseTensor(x.indices, x.values * 2.0,
+ x.dense_shape)
+ ]
+
+ _, r = control_flow_ops.while_loop(c, b, [i, x])
+ r = gradients_impl.gradients(r.values, values)[0]
self.assertAllClose(np.array([1024.0, 1024.0]), r.eval())
def testCallGradInLoop(self):
with self.test_session() as sess:
- i0 = tf.constant(0)
- params = tf.constant(5.0)
- params_1 = tf.square(params)
+ i0 = constant_op.constant(0)
+ params = constant_op.constant(5.0)
+ params_1 = math_ops.square(params)
+
def c(i, _):
return i < 10
+
def b(i, x):
- data = tf.constant([1.0, 2.0, 3.0])
- data = tf.mul(data, params_1)
- x1 = x + tf.gradients(data, params)[0]
+ data = constant_op.constant([1.0, 2.0, 3.0])
+ data = math_ops.mul(data, params_1)
+ x1 = x + gradients_impl.gradients(data, params)[0]
return i + 1, x1
- output_grad = tf.while_loop(c, b, [i0, tf.constant(0.0)])
+
+ output_grad = control_flow_ops.while_loop(c, b,
+ [i0, constant_op.constant(0.0)])
self.assertAllClose(600.0, sess.run(output_grad)[1])
def testWhileAndTensorArray(self):
with self.test_session() as sess:
- param = tf.constant(2.0)
- n0 = tf.constant(0)
- y0 = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="elems")
+ param = constant_op.constant(2.0)
+ n0 = constant_op.constant(0)
+ y0 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="elems")
def c(i, _):
return i < 10
+
def b(i, y):
- return [i + 1, tf.map_fn(lambda x: tf.mul(x, param), y)]
+ return [
+ i + 1, functional_ops.map_fn(lambda x: math_ops.mul(x, param), y)
+ ]
- r = tf.while_loop(c, b, [n0, y0], parallel_iterations=1)
- r = tf.gradients(r, param)[0]
+ r = control_flow_ops.while_loop(c, b, [n0, y0], parallel_iterations=1)
+ r = gradients_impl.gradients(r, param)[0]
self.assertAllClose(107520.0, sess.run(r))
def testWhileGrad_StopGrad(self):
with self.test_session():
- x = tf.constant(3.0, name="x")
- y = tf.constant(2.0, name="y")
+ x = constant_op.constant(3.0, name="x")
+ y = constant_op.constant(2.0, name="y")
+
+ c = lambda x, y: math_ops.less(x, 100.0)
- c = lambda x, y: tf.less(x, 100.0)
def b(x, y):
- y1 = tf.square(y)
- x1 = tf.add(tf.square(x), y1)
+ y1 = math_ops.square(y)
+ x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
- rx, ry = tf.while_loop(c, b, [x, y])
- r = tf.gradients(rx, y)[0]
+ rx, ry = control_flow_ops.while_loop(c, b, [x, y])
+
+ r = gradients_impl.gradients(rx, y)[0]
self.assertEqual(136.0, r.eval())
- r = tf.gradients(ry, y)[0]
+ r = gradients_impl.gradients(ry, y)[0]
self.assertEqual(32.0, r.eval())
- r = tf.gradients(tf.stop_gradient(rx), y)[0]
+ r = gradients_impl.gradients(array_ops.stop_gradient(rx), y)[0]
self.assertEqual(r, None)
- r = tf.gradients(tf.stop_gradient(ry), y)[0]
+ r = gradients_impl.gradients(array_ops.stop_gradient(ry), y)[0]
self.assertEqual(r, None)
- r = tf.gradients(tf.stop_gradient(tf.square(rx)), y)[0]
+ r = gradients_impl.gradients(
+ array_ops.stop_gradient(math_ops.square(rx)), y)[0]
self.assertEqual(r, None)
- r = tf.gradients(tf.stop_gradient(tf.add(rx, ry)), x)[0]
+ r = gradients_impl.gradients(
+ array_ops.stop_gradient(math_ops.add(rx, ry)), x)[0]
self.assertEqual(r, None)
- r = tf.gradients(tf.stop_gradient(tf.add(rx, ry)), y)[0]
+ r = gradients_impl.gradients(
+ array_ops.stop_gradient(math_ops.add(rx, ry)), y)[0]
self.assertEqual(r, None)
- r = tf.gradients(tf.add(rx, ry), y)[0]
+ r = gradients_impl.gradients(math_ops.add(rx, ry), y)[0]
self.assertEqual(168.0, r.eval())
- r = tf.gradients(tf.add(rx, tf.stop_gradient(ry)), y)[0]
+ r = gradients_impl.gradients(
+ math_ops.add(rx, array_ops.stop_gradient(ry)), y)[0]
self.assertEqual(136.0, r.eval())
- r = tf.gradients(tf.add(tf.stop_gradient(rx), ry), y)[0]
+ r = gradients_impl.gradients(
+ math_ops.add(array_ops.stop_gradient(rx), ry), y)[0]
self.assertEqual(32.0, r.eval())
def testWhileGrad_StopGradInside(self):
with self.test_session():
- x = tf.constant(3.0, name="x")
- y = tf.constant(2.0, name="y")
+ x = constant_op.constant(3.0, name="x")
+ y = constant_op.constant(2.0, name="y")
+
+ c = lambda x, y: math_ops.less(x, 100.0)
- c = lambda x, y: tf.less(x, 100.0)
def b(x, y):
- y1 = tf.stop_gradient(tf.square(y))
- x1 = tf.add(tf.square(x), y1)
+ y1 = array_ops.stop_gradient(math_ops.square(y))
+ x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
- rx, _ = tf.while_loop(c, b, [x, y])
- r = tf.gradients(rx, y)[0]
+ rx, _ = control_flow_ops.while_loop(c, b, [x, y])
+
+ r = gradients_impl.gradients(rx, y)[0]
self.assertAllClose(0.0, r.eval())
- r = tf.gradients(rx, x)[0]
+ r = gradients_impl.gradients(rx, x)[0]
self.assertAllClose(156.0, r.eval())
def testWhileGrad_StopGradInsideNoShape(self):
with self.test_session() as sess:
- x = tf.placeholder(tf.float32)
- y = tf.placeholder(tf.float32)
+ x = array_ops.placeholder(dtypes.float32)
+ y = array_ops.placeholder(dtypes.float32)
+
+ c = lambda x, y: math_ops.less(math_ops.reduce_sum(x), 100.0)
- c = lambda x, y: tf.less(tf.reduce_sum(x), 100.0)
def b(x, y):
- y1 = tf.stop_gradient(tf.square(y, name="stopped"))
- x1 = tf.add(tf.square(x), y1)
+ y1 = array_ops.stop_gradient(math_ops.square(y, name="stopped"))
+ x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
- rx, _ = tf.while_loop(c, b, [x, y])
- r = tf.gradients(rx, y)[0]
+ rx, _ = control_flow_ops.while_loop(c, b, [x, y])
+
+ r = gradients_impl.gradients(rx, y)[0]
feed_dict = {x: [3.0, 4.0], y: [2.0, 3.0]}
self.assertAllClose([0.0, 0.0], sess.run(r, feed_dict=feed_dict))
- r = tf.gradients(rx, x)[0]
+ r = gradients_impl.gradients(rx, x)[0]
self.assertAllClose([156.0, 400.0], sess.run(r, feed_dict=feed_dict))
name = "gradients/while/stopped_grad"
all_ops = x.graph.get_operations()
self.assertFalse(any([name in op.name for op in all_ops]))
def testWhileGradGradFail(self):
- theta = tf.Variable(initial_value=1.)
+ theta = variables.Variable(initial_value=1.)
+
def fn(prev, x):
return prev + x * theta
- result = tf.scan(fn, np.array([1., 2., 3.], dtype=np.float32))
- grad_theta = tf.gradients(result, theta)
+
+ result = functional_ops.scan(fn, np.array([1., 2., 3.], dtype=np.float32))
+ grad_theta = gradients_impl.gradients(result, theta)
with self.assertRaisesRegexp(TypeError, "Second-order gradient"):
- tf.gradients(grad_theta, theta)
- grad_theta_stopped = tf.stop_gradient(grad_theta)
- tf.gradients(grad_theta_stopped, theta)
+ gradients_impl.gradients(grad_theta, theta)
+ grad_theta_stopped = array_ops.stop_gradient(grad_theta)
+ gradients_impl.gradients(grad_theta_stopped, theta)
def testOneValueCond(self):
with self.test_session():
- c = tf.placeholder(tf.int32, shape=[])
- one = tf.convert_to_tensor(1, name="one")
- two = tf.convert_to_tensor(2, name="two")
- p = tf.greater_equal(c, 1)
- i = tf.cond(p, lambda: one, lambda: two)
- self.assertTrue(isinstance(i, tf.Tensor))
+ c = array_ops.placeholder(dtypes.int32, shape=[])
+ one = ops.convert_to_tensor(1, name="one")
+ two = ops.convert_to_tensor(2, name="two")
+ p = math_ops.greater_equal(c, 1)
+ i = control_flow_ops.cond(p, lambda: one, lambda: two)
+ self.assertTrue(isinstance(i, ops.Tensor))
# True case: c = 2 is >= 1
self.assertEqual([1], i.eval(feed_dict={c: 2}))
@@ -1845,113 +2011,124 @@ class ControlFlowTest(tf.test.TestCase):
def testExampleCond(self):
with self.test_session():
- x = tf.convert_to_tensor([-2.0, 2.0], name="x")
- d = tf.placeholder(tf.int32, shape=[])
+ x = ops.convert_to_tensor([-2.0, 2.0], name="x")
+ d = array_ops.placeholder(dtypes.int32, shape=[])
def l2():
- return tf.sqrt(tf.reduce_sum(tf.square(x)))
+ return math_ops.sqrt(math_ops.reduce_sum(math_ops.square(x)))
def l1():
- return tf.reduce_sum(tf.abs(x))
+ return math_ops.reduce_sum(math_ops.abs(x))
- i = tf.cond(tf.equal(d, 2), l2, l1)
+ i = control_flow_ops.cond(math_ops.equal(d, 2), l2, l1)
self.assertAllClose(4.0, i.eval(feed_dict={d: 1}))
self.assertAllClose(2.0 * math.sqrt(2), i.eval(feed_dict={d: 2}))
def testCase(self):
with self.test_session():
- x = tf.constant(1)
- y = tf.constant(2)
- z = tf.constant(3)
- f1 = lambda: tf.constant(17)
- f2 = lambda: tf.constant(23)
- f3 = lambda: tf.constant(-1)
-
- r1 = tf.case({x < y: f1, x > z: f2}, default=f3, exclusive=True)
+ x = constant_op.constant(1)
+ y = constant_op.constant(2)
+ z = constant_op.constant(3)
+ f1 = lambda: constant_op.constant(17)
+ f2 = lambda: constant_op.constant(23)
+ f3 = lambda: constant_op.constant(-1)
+
+ r1 = control_flow_ops.case(
+ {
+ x < y: f1,
+ x > z: f2
+ }, default=f3, exclusive=True)
self.assertAllEqual(r1.eval(), 17)
- r2 = tf.case([(y > z, f1), (y > x, f2)], default=f3)
+ r2 = control_flow_ops.case([(y > z, f1), (y > x, f2)], default=f3)
self.assertAllEqual(r2.eval(), 23)
# Duplicate events can happen, first one is selected
- r3 = tf.case([(x < y, f1), (x < y, f2)], default=f3)
+ r3 = control_flow_ops.case([(x < y, f1), (x < y, f2)], default=f3)
self.assertAllEqual(r3.eval(), 17)
# Duplicate events cause an error if exclusive = True
- r4 = tf.case([(x < y, f1), (x < y, f2)], default=f3, exclusive=True)
+ r4 = control_flow_ops.case(
+ [(x < y, f1), (x < y, f2)], default=f3, exclusive=True)
with self.assertRaisesOpError(
"More than one condition evaluated as True but exclusive=True."):
r4.eval()
# Check that the default is called if none of the others are
- r5 = tf.case({x > y: f1}, default=f3)
+ r5 = control_flow_ops.case({x > y: f1}, default=f3)
self.assertAllEqual(r5.eval(), -1)
ran_once = [False, False, False]
def break_run_twice(ix):
+
def _break():
ran_once[ix] = True
- return tf.constant(ix)
+ return constant_op.constant(ix)
+
return _break
# Should not fail - each conditional gets called exactly once
# except default. Default gets called twice: once to create an
# empty output and once for the actual cond switch.
- r6 = tf.case([(x < y, break_run_twice(0)), (x > y, break_run_twice(1))],
- default=lambda: tf.constant(2))
+ r6 = control_flow_ops.case(
+ [(x < y, break_run_twice(0)), (x > y, break_run_twice(1))],
+ default=lambda: constant_op.constant(2))
self.assertAllEqual(r6.eval(), 0)
def testCaseSideEffects(self):
with self.test_session() as sess:
- v0 = tf.Variable(-1)
- v1 = tf.Variable(-1)
- v2 = tf.Variable(-1)
+ v0 = variables.Variable(-1)
+ v1 = variables.Variable(-1)
+ v2 = variables.Variable(-1)
- a = lambda: control_flow_ops.with_dependencies([tf.assign(v0, 0)], 0)
- b = lambda: control_flow_ops.with_dependencies([tf.assign(v1, 1)], 1)
- c = lambda: control_flow_ops.with_dependencies([tf.assign(v2, 2)], 2)
+ a = lambda: control_flow_ops.with_dependencies([state_ops.assign(v0, 0)], 0)
+ b = lambda: control_flow_ops.with_dependencies([state_ops.assign(v1, 1)], 1)
+ c = lambda: control_flow_ops.with_dependencies([state_ops.assign(v2, 2)], 2)
- x = tf.constant(1)
- y = tf.constant(2)
+ x = constant_op.constant(1)
+ y = constant_op.constant(2)
- r0 = tf.case(((x < y, a), (x > y, b)), default=c, exclusive=True)
- r1 = tf.case(((x > y, a), (x < y, b)), default=c, exclusive=True)
- r2 = tf.case(((x > y, a), (x > y, b)), default=c, exclusive=True)
+ r0 = control_flow_ops.case(
+ ((x < y, a), (x > y, b)), default=c, exclusive=True)
+ r1 = control_flow_ops.case(
+ ((x > y, a), (x < y, b)), default=c, exclusive=True)
+ r2 = control_flow_ops.case(
+ ((x > y, a), (x > y, b)), default=c, exclusive=True)
- tf.global_variables_initializer().run()
+ variables.global_variables_initializer().run()
self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3)
self.assertEqual(2, r2.eval())
self.assertAllEqual(sess.run([v0, v1, v2]), [-1, -1, 2])
- tf.global_variables_initializer().run()
+ variables.global_variables_initializer().run()
self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3)
self.assertEqual(1, r1.eval())
self.assertAllEqual(sess.run([v0, v1, v2]), [-1, 1, -1])
- tf.global_variables_initializer().run()
+ variables.global_variables_initializer().run()
self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3)
self.assertEqual(0, r0.eval())
self.assertAllEqual(sess.run([v0, v1, v2]), [0, -1, -1])
def testOneOpCond(self):
with self.test_session():
- v = tf.Variable(0)
- c = tf.convert_to_tensor(0)
- one = tf.convert_to_tensor(1)
- two = tf.convert_to_tensor(2)
- p = tf.greater_equal(c, 1)
+ v = variables.Variable(0)
+ c = ops.convert_to_tensor(0)
+ one = ops.convert_to_tensor(1)
+ two = ops.convert_to_tensor(2)
+ p = math_ops.greater_equal(c, 1)
def a():
- return tf.assign(v, one)
+ return state_ops.assign(v, one)
def b():
- return tf.assign(v, two)
+ return state_ops.assign(v, two)
- i = tf.cond(p, a, b)
- self.assertTrue(isinstance(i, tf.Tensor))
- tf.global_variables_initializer().run()
+ i = control_flow_ops.cond(p, a, b)
+ self.assertTrue(isinstance(i, ops.Tensor))
+ variables.global_variables_initializer().run()
self.assertEqual(0, v.eval())
@@ -1965,8 +2142,8 @@ class ControlFlowTest(tf.test.TestCase):
def testWithOpsDependencies(self):
with self.test_session() as sess:
- v = tf.Variable(0.0)
- c = tf.constant(10)
+ v = variables.Variable(0.0)
+ c = constant_op.constant(10)
# Fetching v directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
@@ -1988,15 +2165,13 @@ class ControlFlowTest(tf.test.TestCase):
def testWithTensorDependencies(self):
with self.test_session():
- v = tf.Variable(0.0)
- c1 = tf.constant(10)
- c2 = tf.constant(20)
+ v = variables.Variable(0.0)
+ c1 = constant_op.constant(10)
+ c2 = constant_op.constant(20)
# c1_with_init_v depends on the init op for v
c1_with_init_v = control_flow_ops.with_dependencies(
- name="c1_with_init_v",
- output_tensor=c1,
- dependencies=[v.initializer])
+ name="c1_with_init_v", output_tensor=c1, dependencies=[v.initializer])
# c2_with_c1 depends on the value of c1_with_init_v
c2_with_c1_dep = control_flow_ops.with_dependencies(
name="c2_with_c1_dep",
@@ -2016,14 +2191,14 @@ class ControlFlowTest(tf.test.TestCase):
def testWithIndexedSlicesDependencies(self):
with self.test_session():
- v = tf.Variable(
+ v = variables.Variable(
np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(np.float32))
- v_at_1 = tf.IndexedSlices(v, tf.constant([1]))
- gather_v_at_1 = tf.gather(v_at_1.values, v_at_1.indices)
+ v_at_1 = ops.IndexedSlices(v, constant_op.constant([1]))
+ gather_v_at_1 = array_ops.gather(v_at_1.values, v_at_1.indices)
v_at_1_after_init = control_flow_ops.with_dependencies([v.initializer],
v_at_1)
- gather_v_at_1_after_init = tf.gather(
- v_at_1_after_init.values, v_at_1_after_init.indices)
+ gather_v_at_1_after_init = array_ops.gather(v_at_1_after_init.values,
+ v_at_1_after_init.indices)
# Fetching gather_v_at_1 will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
@@ -2036,33 +2211,32 @@ class ControlFlowTest(tf.test.TestCase):
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]], v.eval())
def testDependenciesDevice(self):
- with tf.Graph().as_default():
+ with ops.Graph().as_default():
# device set on tensor => same device on dep.
- with tf.device("/job:ps"):
- vd = tf.Variable([0.0])
+ with ops.device("/job:ps"):
+ vd = variables.Variable([0.0])
with_vd_dep = control_flow_ops.with_dependencies([vd.initializer], vd)
self.assertTrue("/job:ps" in with_vd_dep.device)
# No device set on tensor => no device on dep.
- vnod = tf.Variable([0.0])
+ vnod = variables.Variable([0.0])
with_vnod_dep = control_flow_ops.with_dependencies([vnod.initializer],
vnod)
self.assertDeviceEqual(None, with_vnod_dep.device)
# device set on tensor, default device on graph => default device on dep.
- vdef = tf.Variable([0.0], name="vdef")
- with tf.device("/job:worker/gpu:1"):
+ vdef = variables.Variable([0.0], name="vdef")
+ with ops.device("/job:worker/gpu:1"):
with_vdef_dep = control_flow_ops.with_dependencies([vdef.initializer],
vdef)
# The device is empty, but the colocation constraint is set.
self.assertDeviceEqual("", with_vdef_dep.device)
- self.assertEqual([b"loc:@vdef"],
- with_vdef_dep.op.colocation_groups())
+ self.assertEqual([b"loc:@vdef"], with_vdef_dep.op.colocation_groups())
def testGroup(self):
with self.test_session() as sess:
- v1 = tf.Variable([0.0])
- v2 = tf.Variable([1.0])
+ v1 = variables.Variable([0.0])
+ v2 = variables.Variable([1.0])
# Group init1 and init2 and run.
init = control_flow_ops.group(v1.initializer, v2.initializer)
@@ -2079,106 +2253,110 @@ class ControlFlowTest(tf.test.TestCase):
self.assertAllClose([1.0], v2_val)
def testGroupEmpty(self):
- op = tf.group()
+ op = control_flow_ops.group()
self.assertEqual(op.type, "NoOp")
self.assertEqual(op.control_inputs, [])
def testMergeShapes(self):
# All inputs unknown.
- p1 = tf.placeholder(tf.float32)
- p2 = tf.placeholder(tf.float32)
- p3 = tf.placeholder(tf.float32)
+ p1 = array_ops.placeholder(dtypes.float32)
+ p2 = array_ops.placeholder(dtypes.float32)
+ p3 = array_ops.placeholder(dtypes.float32)
m, index = control_flow_ops.merge([p1, p2, p3])
self.assertIs(None, m.get_shape().ndims)
self.assertEqual([], index.get_shape())
# All inputs known with different ranks.
- p1 = tf.placeholder(tf.float32, shape=[1, 2])
- p2 = tf.placeholder(tf.float32, shape=[1, 2, 3])
+ p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
+ p2 = array_ops.placeholder(dtypes.float32, shape=[1, 2, 3])
m, index = control_flow_ops.merge([p1, p2])
self.assertIs(None, m.get_shape().ndims)
self.assertEqual([], index.get_shape())
# All inputs known with some dimensions different.
- p1 = tf.placeholder(tf.float32, shape=[1, 2])
- p2 = tf.placeholder(tf.float32, shape=[2, 1])
+ p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
+ p2 = array_ops.placeholder(dtypes.float32, shape=[2, 1])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, None], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
- p1 = tf.placeholder(tf.float32, shape=[1, 2])
- p2 = tf.placeholder(tf.float32, shape=[None, 2])
+ p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
+ p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
- p1 = tf.placeholder(tf.float32, shape=[1, 2])
- p2 = tf.placeholder(tf.float32, shape=[2, 2])
+ p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
+ p2 = array_ops.placeholder(dtypes.float32, shape=[2, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
# All inputs known with same dimensions.
- p1 = tf.placeholder(tf.float32, shape=[1, 2])
- p2 = tf.placeholder(tf.float32, shape=[1, 2])
+ p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
+ p2 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([1, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
- p1 = tf.placeholder(tf.float32, shape=[None, 2])
- p2 = tf.placeholder(tf.float32, shape=[None, 2])
+ p1 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
+ p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
- p1 = tf.placeholder(tf.float32, shape=[None, None])
- p2 = tf.placeholder(tf.float32, shape=[None, None])
+ p1 = array_ops.placeholder(dtypes.float32, shape=[None, None])
+ p2 = array_ops.placeholder(dtypes.float32, shape=[None, None])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, None], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
def testRefSelect(self):
- index = tf.placeholder(tf.int32)
+ index = array_ops.placeholder(dtypes.int32)
# All inputs unknown.
- p1 = tf.placeholder(tf.float32)
- p2 = tf.placeholder(tf.float32)
- p3 = tf.placeholder(tf.float32)
- v1 = tf.Variable(p1, validate_shape=False)
- v2 = tf.Variable(p2, validate_shape=False)
- v3 = tf.Variable(p3, validate_shape=False)
+ p1 = array_ops.placeholder(dtypes.float32)
+ p2 = array_ops.placeholder(dtypes.float32)
+ p3 = array_ops.placeholder(dtypes.float32)
+ v1 = variables.Variable(p1, validate_shape=False)
+ v2 = variables.Variable(p2, validate_shape=False)
+ v3 = variables.Variable(p3, validate_shape=False)
self.assertIs(None, v1.get_shape().ndims)
s = control_flow_ops.ref_select(index, [v1, v2, v3])
self.assertIs(None, s.get_shape().ndims)
# All inputs known but different.
- v1 = tf.Variable([[1, 2]])
- v2 = tf.Variable([[2], [1]])
+ v1 = variables.Variable([[1, 2]])
+ v2 = variables.Variable([[2], [1]])
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertIs(None, s.get_shape().ndims)
# All inputs known and same.
- v1 = tf.Variable([[1, 2]])
- v2 = tf.Variable([[1, 2]])
+ v1 = variables.Variable([[1, 2]])
+ v2 = variables.Variable([[1, 2]])
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertEqual([1, 2], s.get_shape())
# Possibly the same but not guaranteed.
- v1 = tf.Variable([[1., 2.]])
- p2 = tf.placeholder(tf.float32, shape=[None, 2])
- v2 = tf.Variable(p2, validate_shape=False)
+ v1 = variables.Variable([[1., 2.]])
+ p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
+ v2 = variables.Variable(p2, validate_shape=False)
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertEqual(None, s.get_shape())
def testRunLoopTensor(self):
with self.test_session() as sess:
tensor_list = []
+
def condition(t):
- return t < tf.constant(5)
+ return t < constant_op.constant(5)
+
def body(_):
- tensor_list.append(tf.constant(5))
- return tf.constant(10)
- result = tf.while_loop(condition, body, [tf.constant(4)])
+ tensor_list.append(constant_op.constant(5))
+ return constant_op.constant(10)
+
+ result = control_flow_ops.while_loop(condition, body,
+ [constant_op.constant(4)])
self.assertEqual(10, sess.run(result))
# Ensure that we cannot run a tensor that escapes the loop body
@@ -2187,50 +2365,54 @@ class ControlFlowTest(tf.test.TestCase):
sess.run(tensor_list[0])
def testWhilePyFuncBasic(self):
+
def func(x):
return np.square(x)
with self.test_session():
- r = tf.while_loop(
+ r = control_flow_ops.while_loop(
lambda i, v: i < 4,
- lambda i, v: [i + 1, tf.py_func(func, [v], [tf.float32])[0]],
- [tf.constant(0), tf.constant(2.0, tf.float32)],
+ lambda i, v: [i + 1, script_ops.py_func(func, [v], [dtypes.float32])[0]],
+ [constant_op.constant(0), constant_op.constant(2.0, dtypes.float32)],
[tensor_shape.unknown_shape(), tensor_shape.unknown_shape()])
self.assertEqual(r[1].eval(), 65536.0)
def testWhileFuncBasic(self):
- @function.Defun(tf.float32)
+
+ @function.Defun(dtypes.float32)
def func(x):
- return tf.square(tf.square(x))
+ return math_ops.square(math_ops.square(x))
with self.test_session():
- x = tf.constant(2.0, tf.float32)
- r = tf.while_loop(
- lambda i, v: i < 2,
- lambda i, v: [i + 1, func(v)],
- [tf.constant(0), x],
+ x = constant_op.constant(2.0, dtypes.float32)
+ r = control_flow_ops.while_loop(
+ lambda i, v: i < 2, lambda i, v: [i + 1, func(v)],
+ [constant_op.constant(0), x],
[tensor_shape.unknown_shape(), tensor_shape.unknown_shape()])
self.assertEqual(r[1].eval(), 65536.0)
- r = tf.gradients(r, x)[0]
+ r = gradients_impl.gradients(r, x)[0]
self.assertEqual(r.eval(), 524288.0)
- self.assertEqual(len([op for op in x.graph.get_operations()
- if op.type == "Stack"]),
- 1)
+ self.assertEqual(
+ len([op for op in x.graph.get_operations() if op.type == "Stack"]), 1)
-class TupleTest(tf.test.TestCase):
+class TupleTest(test.TestCase):
def testTensors(self):
for v1_first in [True, False]:
with self.test_session():
- v1 = tf.Variable([1.0])
- add1 = tf.add(
- control_flow_ops.with_dependencies([v1.initializer], v1._ref()), # pylint: disable=protected-access
+ v1 = variables.Variable([1.0])
+ add1 = math_ops.add(
+ control_flow_ops.with_dependencies(
+ [v1.initializer],
+ v1._ref()), # pylint: disable=protected-access
2.0)
- v2 = tf.Variable([10.0])
- add2 = tf.add(
- control_flow_ops.with_dependencies([v2.initializer], v2._ref()), # pylint: disable=protected-access
+ v2 = variables.Variable([10.0])
+ add2 = math_ops.add(
+ control_flow_ops.with_dependencies(
+ [v2.initializer],
+ v2._ref()), # pylint: disable=protected-access
20.0)
t1, _, t2 = control_flow_ops.tuple([add1, None, add2])
@@ -2254,23 +2436,27 @@ class TupleTest(tf.test.TestCase):
def testIndexedSlices(self):
for v1_first in [True, False]:
with self.test_session():
- v1 = tf.Variable(
+ v1 = variables.Variable(
np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(
np.float32))
- v1_at_1 = tf.IndexedSlices(
- control_flow_ops.with_dependencies([v1.initializer], v1._ref()), # pylint: disable=protected-access
- tf.constant([1]))
+ v1_at_1 = ops.IndexedSlices(
+ control_flow_ops.with_dependencies(
+ [v1.initializer],
+ v1._ref()), # pylint: disable=protected-access
+ constant_op.constant([1]))
- v2 = tf.Variable(
+ v2 = variables.Variable(
np.array([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]]).astype(
np.float32))
- v2_at_1 = tf.IndexedSlices(
- control_flow_ops.with_dependencies([v2.initializer], v2._ref()), # pylint: disable=protected-access
- tf.constant([1]))
+ v2_at_1 = ops.IndexedSlices(
+ control_flow_ops.with_dependencies(
+ [v2.initializer],
+ v2._ref()), # pylint: disable=protected-access
+ constant_op.constant([1]))
st1, st2 = control_flow_ops.tuple([v1_at_1, v2_at_1])
- g1 = tf.gather(st1.values, st1.indices)
- g2 = tf.gather(st2.values, st2.indices)
+ g1 = array_ops.gather(st1.values, st1.indices)
+ g2 = array_ops.gather(st2.values, st2.indices)
# v1 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
@@ -2293,9 +2479,10 @@ class TupleTest(tf.test.TestCase):
def testAcceptTensorsAsControlInputs(self):
with self.test_session():
- var = tf.Variable(0)
- assign = tf.assign(var, 1)
- t, = tf.tuple([tf.constant(0)], control_inputs=[assign])
+ var = variables.Variable(0)
+ assign = state_ops.assign(var, 1)
+ t, = control_flow_ops.tuple(
+ [constant_op.constant(0)], control_inputs=[assign])
# Should trigger the assign.
t.eval()
@@ -2303,37 +2490,42 @@ class TupleTest(tf.test.TestCase):
self.assertEquals(1, var.eval())
-class AssertTest(tf.test.TestCase):
+class AssertTest(test.TestCase):
def testGuardedAssertDoesNotCopyWhenTrue(self):
with self.test_session(use_gpu=True) as sess:
- with tf.device("/gpu:0"):
- value = tf.constant(1.0)
- with tf.device("/cpu:0"):
- true = tf.constant(True)
- guarded_assert = tf.Assert(true, [value], name="guarded")
+ with ops.device("/gpu:0"):
+ value = constant_op.constant(1.0)
+ with ops.device("/cpu:0"):
+ true = constant_op.constant(True)
+ guarded_assert = control_flow_ops.Assert(true, [value], name="guarded")
unguarded_assert = gen_logging_ops._assert(
true, [value], name="unguarded")
- opts = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
- guarded_metadata = tf.RunMetadata()
+ opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
+ guarded_metadata = config_pb2.RunMetadata()
sess.run(guarded_assert, options=opts, run_metadata=guarded_metadata)
- unguarded_metadata = tf.RunMetadata()
+ unguarded_metadata = config_pb2.RunMetadata()
sess.run(unguarded_assert, options=opts, run_metadata=unguarded_metadata)
guarded_nodestat_names = [
- n.node_name for d in guarded_metadata.step_stats.dev_stats
- for n in d.node_stats]
+ n.node_name
+ for d in guarded_metadata.step_stats.dev_stats for n in d.node_stats
+ ]
unguarded_nodestat_names = [
- n.node_name for d in unguarded_metadata.step_stats.dev_stats
- for n in d.node_stats]
+ n.node_name
+ for d in unguarded_metadata.step_stats.dev_stats for n in d.node_stats
+ ]
guarded_memcpy_nodestat_names = [
- n for n in guarded_nodestat_names if "MEMCPYDtoH" in n]
+ n for n in guarded_nodestat_names if "MEMCPYDtoH" in n
+ ]
unguarded_memcpy_nodestat_names = [
- n for n in unguarded_nodestat_names if "MEMCPYDtoH" in n]
+ n for n in unguarded_nodestat_names if "MEMCPYDtoH" in n
+ ]
if "GPU" in [d.device_type for d in device_lib.list_local_devices()]:
# A copy was performed for the unguarded assert
self.assertLess(0, len(unguarded_memcpy_nodestat_names))
# No copy was performed for the guarded assert
self.assertEqual([], guarded_memcpy_nodestat_names)
+
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/conv1d_test.py b/tensorflow/python/kernel_tests/conv1d_test.py
index fb02540ddc..662c94eea7 100644
--- a/tensorflow/python/kernel_tests/conv1d_test.py
+++ b/tensorflow/python/kernel_tests/conv1d_test.py
@@ -12,41 +12,43 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for convolution related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import nn_ops
+from tensorflow.python.platform import test
-class Conv1DTest(tf.test.TestCase):
+class Conv1DTest(test.TestCase):
def testBasic(self):
"""Test that argument passing to conv2d is handled properly."""
- x = tf.constant([1, 2, 3, 4], dtype=tf.float32)
- x = tf.expand_dims(x, 0) # Add batch dimension
- x = tf.expand_dims(x, 2) # And depth dimension
- filters = tf.constant([2, 1], dtype=tf.float32)
- filters = tf.expand_dims(filters, 1) # in_channels
- filters = tf.expand_dims(filters, 2) # out_channels
+ x = constant_op.constant([1, 2, 3, 4], dtype=dtypes.float32)
+ x = array_ops.expand_dims(x, 0) # Add batch dimension
+ x = array_ops.expand_dims(x, 2) # And depth dimension
+ filters = constant_op.constant([2, 1], dtype=dtypes.float32)
+ filters = array_ops.expand_dims(filters, 1) # in_channels
+ filters = array_ops.expand_dims(filters, 2) # out_channels
# Filters is 2x1x1
for stride in [1, 2]:
with self.test_session():
- c = tf.nn.conv1d(x, filters, stride, padding="VALID")
- reduced = tf.squeeze(c)
+ c = nn_ops.conv1d(x, filters, stride, padding="VALID")
+ reduced = array_ops.squeeze(c)
output = reduced.eval()
if stride == 1:
self.assertEqual(len(output), 3)
self.assertAllClose(output,
- [2*1+1*2, 2*2+1*3, 2*3+1*4])
+ [2 * 1 + 1 * 2, 2 * 2 + 1 * 3, 2 * 3 + 1 * 4])
else:
self.assertEqual(len(output), 2)
- self.assertAllClose(output,
- [2*1+1*2, 2*3+1*4])
+ self.assertAllClose(output, [2 * 1 + 1 * 2, 2 * 3 + 1 * 4])
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/conv2d_backprop_filter_grad_test.py b/tensorflow/python/kernel_tests/conv2d_backprop_filter_grad_test.py
index 8220253697..1679857bd5 100644
--- a/tensorflow/python/kernel_tests/conv2d_backprop_filter_grad_test.py
+++ b/tensorflow/python/kernel_tests/conv2d_backprop_filter_grad_test.py
@@ -12,17 +12,24 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for convolution related functionality in tensorflow.ops.nn."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import nn_ops
+import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
-class Conv2DBackpropFilterGradTest(tf.test.TestCase):
+class Conv2DBackpropFilterGradTest(test.TestCase):
def testGradient(self):
with self.test_session():
@@ -30,28 +37,29 @@ class Conv2DBackpropFilterGradTest(tf.test.TestCase):
for stride in [1, 2]:
np.random.seed(1)
in_shape = [5, 8, 6, 4]
- in_val = tf.constant(
- 2 * np.random.random_sample(in_shape) - 1,
- dtype=tf.float32)
+ in_val = constant_op.constant(
+ 2 * np.random.random_sample(in_shape) - 1, dtype=dtypes.float32)
filter_shape = [3, 3, 4, 6]
# Make a convolution op with the current settings, just to easily get
# the shape of the output.
- conv_out = tf.nn.conv2d(in_val, tf.zeros(filter_shape),
- [1, stride, stride, 1], padding)
+ conv_out = nn_ops.conv2d(in_val,
+ array_ops.zeros(filter_shape),
+ [1, stride, stride, 1], padding)
out_backprop_shape = conv_out.get_shape().as_list()
- out_backprop_val = tf.constant(
+ out_backprop_val = constant_op.constant(
2 * np.random.random_sample(out_backprop_shape) - 1,
- dtype=tf.float32)
- output = tf.nn.conv2d_backprop_filter(in_val, filter_shape,
- out_backprop_val,
- [1, stride, stride, 1], padding)
- err = tf.test.compute_gradient_error([in_val, out_backprop_val],
- [in_shape, out_backprop_shape],
- output, filter_shape)
+ dtype=dtypes.float32)
+ output = nn_ops.conv2d_backprop_filter(in_val, filter_shape,
+ out_backprop_val,
+ [1, stride, stride, 1],
+ padding)
+ err = gradient_checker.compute_gradient_error(
+ [in_val, out_backprop_val], [in_shape, out_backprop_shape],
+ output, filter_shape)
print("conv2d_backprop_filter gradient err = %g " % err)
err_tolerance = 2e-3
self.assertLess(err, err_tolerance)
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/conv2d_transpose_test.py b/tensorflow/python/kernel_tests/conv2d_transpose_test.py
index d260660e10..18184a0ee0 100644
--- a/tensorflow/python/kernel_tests/conv2d_transpose_test.py
+++ b/tensorflow/python/kernel_tests/conv2d_transpose_test.py
@@ -12,19 +12,25 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for convolution related functionality in tensorflow.ops.nn."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
-import tensorflow as tf
+
from tensorflow.python.client import device_lib
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import nn_ops
+import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
-class Conv2DTransposeTest(tf.test.TestCase):
+class Conv2DTransposeTest(test.TestCase):
def testConv2DTransposeSingleStride(self):
with self.test_session():
@@ -37,10 +43,12 @@ class Conv2DTransposeTest(tf.test.TestCase):
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
- x = tf.constant(1.0, shape=x_shape, name="x", dtype=tf.float32)
- f = tf.constant(1.0, shape=f_shape, name="filter", dtype=tf.float32)
- output = tf.nn.conv2d_transpose(x, f, y_shape, strides=strides,
- padding="SAME")
+ x = constant_op.constant(
+ 1.0, shape=x_shape, name="x", dtype=dtypes.float32)
+ f = constant_op.constant(
+ 1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
+ output = nn_ops.conv2d_transpose(
+ x, f, y_shape, strides=strides, padding="SAME")
value = output.eval()
# We count the number of cells being added at the locations in the output.
@@ -73,10 +81,12 @@ class Conv2DTransposeTest(tf.test.TestCase):
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
- x = tf.constant(1.0, shape=x_shape, name="x", dtype=tf.float32)
- f = tf.constant(1.0, shape=f_shape, name="filter", dtype=tf.float32)
- output = tf.nn.conv2d_transpose(x, f, y_shape, strides=strides,
- padding="SAME")
+ x = constant_op.constant(
+ 1.0, shape=x_shape, name="x", dtype=dtypes.float32)
+ f = constant_op.constant(
+ 1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
+ output = nn_ops.conv2d_transpose(
+ x, f, y_shape, strides=strides, padding="SAME")
value = output.eval()
for n in xrange(x_shape[0]):
@@ -104,10 +114,12 @@ class Conv2DTransposeTest(tf.test.TestCase):
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
- x = tf.constant(1.0, shape=x_shape, name="x", dtype=tf.float32)
- f = tf.constant(1.0, shape=f_shape, name="filter", dtype=tf.float32)
- output = tf.nn.conv2d_transpose(x, f, y_shape, strides=strides,
- padding="VALID")
+ x = constant_op.constant(
+ 1.0, shape=x_shape, name="x", dtype=dtypes.float32)
+ f = constant_op.constant(
+ 1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
+ output = nn_ops.conv2d_transpose(
+ x, f, y_shape, strides=strides, padding="VALID")
value = output.eval()
cache_values = np.zeros(y_shape, dtype=np.float32)
@@ -121,10 +133,10 @@ class Conv2DTransposeTest(tf.test.TestCase):
for h in xrange(pad, y_shape[1] - pad):
target = 3.0
# We add a case for locations divisible by the stride.
- h_in = h % strides[
- 1] == 0 and h > pad and h < y_shape[1] - 1 - pad
- w_in = w % strides[
- 2] == 0 and w > pad and w < y_shape[2] - 1 - pad
+ h_in = h % strides[1] == 0 and h > pad and h < y_shape[
+ 1] - 1 - pad
+ w_in = w % strides[2] == 0 and w > pad and w < y_shape[
+ 2] - 1 - pad
if h_in and w_in:
target += 9.0
elif h_in or w_in:
@@ -148,19 +160,19 @@ class Conv2DTransposeTest(tf.test.TestCase):
x_val = np.random.random_sample(x_shape).astype(np.float64)
f_val = np.random.random_sample(f_shape).astype(np.float64)
with self.test_session():
- x = tf.constant(x_val, name="x", dtype=tf.float32)
- f = tf.constant(f_val, name="f", dtype=tf.float32)
- output = tf.nn.conv2d_transpose(x, f, y_shape, strides=strides,
- padding="SAME")
- err = tf.test.compute_gradient_error(
- [x, f], [x_shape, f_shape], output, y_shape)
+ x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
+ f = constant_op.constant(f_val, name="f", dtype=dtypes.float32)
+ output = nn_ops.conv2d_transpose(
+ x, f, y_shape, strides=strides, padding="SAME")
+ err = gradient_checker.compute_gradient_error([x, f], [x_shape, f_shape],
+ output, y_shape)
print("conv2d_transpose gradient err = %g " % err)
err_tolerance = 0.0005
self.assertLess(err, err_tolerance)
def testConv2DTransposeSingleStrideNCHW(self):
# `NCHW` data fomat is only supported for CUDA device.
- if tf.test.is_gpu_available(cuda_only=True):
+ if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True):
strides = [1, 1, 1, 1]
@@ -171,11 +183,13 @@ class Conv2DTransposeTest(tf.test.TestCase):
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
- x = tf.constant(1.0, shape=x_shape, name="x", dtype=tf.float32)
- f = tf.constant(1.0, shape=f_shape, name="filter", dtype=tf.float32)
+ x = constant_op.constant(
+ 1.0, shape=x_shape, name="x", dtype=dtypes.float32)
+ f = constant_op.constant(
+ 1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
- output = tf.nn.conv2d_transpose(x, f, y_shape, strides=strides,
- padding="SAME", data_format='NCHW')
+ output = nn_ops.conv2d_transpose(
+ x, f, y_shape, strides=strides, padding="SAME", data_format="NCHW")
value = output.eval()
for n in xrange(x_shape[0]):
@@ -193,7 +207,7 @@ class Conv2DTransposeTest(tf.test.TestCase):
def testConv2DTransposeSameNCHW(self):
# `NCHW` data fomat is only supported for CUDA device.
- if tf.test.is_gpu_available(cuda_only=True):
+ if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True):
strides = [1, 1, 2, 2]
@@ -204,11 +218,13 @@ class Conv2DTransposeTest(tf.test.TestCase):
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
- x = tf.constant(1.0, shape=x_shape, name="x", dtype=tf.float32)
- f = tf.constant(1.0, shape=f_shape, name="filter", dtype=tf.float32)
+ x = constant_op.constant(
+ 1.0, shape=x_shape, name="x", dtype=dtypes.float32)
+ f = constant_op.constant(
+ 1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
- output = tf.nn.conv2d_transpose(x, f, y_shape, strides=strides,
- padding="SAME", data_format='NCHW')
+ output = nn_ops.conv2d_transpose(
+ x, f, y_shape, strides=strides, padding="SAME", data_format="NCHW")
value = output.eval()
for n in xrange(x_shape[0]):
@@ -227,7 +243,7 @@ class Conv2DTransposeTest(tf.test.TestCase):
def testConv2DTransposeValidNCHW(self):
# `NCHW` data fomat is only supported for CUDA device.
- if tf.test.is_gpu_available(cuda_only=True):
+ if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True):
strides = [1, 1, 2, 2]
@@ -238,10 +254,12 @@ class Conv2DTransposeTest(tf.test.TestCase):
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
- x = tf.constant(1.0, shape=x_shape, name="x", dtype=tf.float32)
- f = tf.constant(1.0, shape=f_shape, name="filter", dtype=tf.float32)
- output = tf.nn.conv2d_transpose(x, f, y_shape, strides=strides,
- padding="VALID", data_format='NCHW')
+ x = constant_op.constant(
+ 1.0, shape=x_shape, name="x", dtype=dtypes.float32)
+ f = constant_op.constant(
+ 1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
+ output = nn_ops.conv2d_transpose(
+ x, f, y_shape, strides=strides, padding="VALID", data_format="NCHW")
value = output.eval()
cache_values = np.zeros(y_shape, dtype=np.float32)
@@ -253,10 +271,10 @@ class Conv2DTransposeTest(tf.test.TestCase):
for h in xrange(pad, y_shape[2] - pad):
target = 3.0
# We add a case for locations divisible by the stride.
- h_in = h % strides[
- 2] == 0 and h > pad and h < y_shape[2] - 1 - pad
- w_in = w % strides[
- 3] == 0 and w > pad and w < y_shape[3] - 1 - pad
+ h_in = h % strides[2] == 0 and h > pad and h < y_shape[
+ 2] - 1 - pad
+ w_in = w % strides[3] == 0 and w > pad and w < y_shape[
+ 3] - 1 - pad
if h_in and w_in:
target += 9.0
elif h_in or w_in:
@@ -273,4 +291,4 @@ class Conv2DTransposeTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/conv3d_backprop_filter_v2_grad_test.py b/tensorflow/python/kernel_tests/conv3d_backprop_filter_v2_grad_test.py
index 572f8b0d48..85264ef876 100644
--- a/tensorflow/python/kernel_tests/conv3d_backprop_filter_v2_grad_test.py
+++ b/tensorflow/python/kernel_tests/conv3d_backprop_filter_v2_grad_test.py
@@ -12,17 +12,24 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for convolution related functionality in tensorflow.ops.nn."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import nn_ops
+import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
-class Conv3DBackpropFilterV2GradTest(tf.test.TestCase):
+class Conv3DBackpropFilterV2GradTest(test.TestCase):
def testGradient(self):
with self.test_session():
@@ -30,29 +37,29 @@ class Conv3DBackpropFilterV2GradTest(tf.test.TestCase):
for stride in [1, 2]:
np.random.seed(1)
in_shape = [2, 4, 3, 3, 2]
- in_val = tf.constant(
- 2 * np.random.random_sample(in_shape) - 1,
- dtype=tf.float32)
+ in_val = constant_op.constant(
+ 2 * np.random.random_sample(in_shape) - 1, dtype=dtypes.float32)
filter_shape = [3, 3, 3, 2, 3]
strides = [1, stride, stride, stride, 1]
# Make a convolution op with the current settings, just to easily get
# the shape of the output.
- conv_out = tf.nn.conv3d(in_val, tf.zeros(filter_shape), strides,
- padding)
+ conv_out = nn_ops.conv3d(in_val,
+ array_ops.zeros(filter_shape), strides,
+ padding)
out_backprop_shape = conv_out.get_shape().as_list()
- out_backprop_val = tf.constant(
+ out_backprop_val = constant_op.constant(
2 * np.random.random_sample(out_backprop_shape) - 1,
- dtype=tf.float32)
- output = tf.nn.conv3d_backprop_filter_v2(in_val, filter_shape,
- out_backprop_val,
- strides, padding)
- err = tf.test.compute_gradient_error([in_val, out_backprop_val],
- [in_shape, out_backprop_shape],
- output, filter_shape)
+ dtype=dtypes.float32)
+ output = nn_ops.conv3d_backprop_filter_v2(in_val, filter_shape,
+ out_backprop_val, strides,
+ padding)
+ err = gradient_checker.compute_gradient_error(
+ [in_val, out_backprop_val], [in_shape, out_backprop_shape],
+ output, filter_shape)
print("conv3d_backprop_filter gradient err = %g " % err)
err_tolerance = 1e-3
self.assertLess(err, err_tolerance)
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/conv3d_transpose_test.py b/tensorflow/python/kernel_tests/conv3d_transpose_test.py
index 56fc34c59e..a8b3af5096 100644
--- a/tensorflow/python/kernel_tests/conv3d_transpose_test.py
+++ b/tensorflow/python/kernel_tests/conv3d_transpose_test.py
@@ -12,18 +12,24 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for convolution related functionality in tensorflow.ops.nn."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import nn_ops
+import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
-class Conv3DTransposeTest(tf.test.TestCase):
+class Conv3DTransposeTest(test.TestCase):
def testConv3DTransposeSingleStride(self):
with self.test_session():
@@ -36,10 +42,12 @@ class Conv3DTransposeTest(tf.test.TestCase):
# Filter: [kernel_depth, kernel_height, kernel_width, out_depth, in_depth]
f_shape = [3, 3, 3, 2, 3]
- x = tf.constant(1.0, shape=x_shape, name="x", dtype=tf.float32)
- f = tf.constant(1.0, shape=f_shape, name="filter", dtype=tf.float32)
- output = tf.nn.conv3d_transpose(x, f, y_shape, strides=strides,
- padding="SAME")
+ x = constant_op.constant(
+ 1.0, shape=x_shape, name="x", dtype=dtypes.float32)
+ f = constant_op.constant(
+ 1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
+ output = nn_ops.conv3d_transpose(
+ x, f, y_shape, strides=strides, padding="SAME")
value = output.eval()
# We count the number of cells being added at the locations in the output.
@@ -84,10 +92,12 @@ class Conv3DTransposeTest(tf.test.TestCase):
# Filter: [kernel_depth, kernel_height, kernel_width, out_depth, in_depth]
f_shape = [3, 3, 3, 2, 3]
- x = tf.constant(1.0, shape=x_shape, name="x", dtype=tf.float32)
- f = tf.constant(1.0, shape=f_shape, name="filter", dtype=tf.float32)
- output = tf.nn.conv3d_transpose(x, f, y_shape, strides=strides,
- padding="SAME")
+ x = constant_op.constant(
+ 1.0, shape=x_shape, name="x", dtype=dtypes.float32)
+ f = constant_op.constant(
+ 1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
+ output = nn_ops.conv3d_transpose(
+ x, f, y_shape, strides=strides, padding="SAME")
value = output.eval()
for n in xrange(x_shape[0]):
@@ -120,10 +130,12 @@ class Conv3DTransposeTest(tf.test.TestCase):
# Filter: [kernel_depth, kernel_height, kernel_width, out_depth, in_depth]
f_shape = [3, 3, 3, 2, 3]
- x = tf.constant(1.0, shape=x_shape, name="x", dtype=tf.float32)
- f = tf.constant(1.0, shape=f_shape, name="filter", dtype=tf.float32)
- output = tf.nn.conv3d_transpose(x, f, y_shape, strides=strides,
- padding="VALID")
+ x = constant_op.constant(
+ 1.0, shape=x_shape, name="x", dtype=dtypes.float32)
+ f = constant_op.constant(
+ 1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
+ output = nn_ops.conv3d_transpose(
+ x, f, y_shape, strides=strides, padding="VALID")
value = output.eval()
cache_values = np.zeros(y_shape, dtype=np.float32)
@@ -169,16 +181,16 @@ class Conv3DTransposeTest(tf.test.TestCase):
x_val = np.random.random_sample(x_shape).astype(np.float64)
f_val = np.random.random_sample(f_shape).astype(np.float64)
with self.test_session():
- x = tf.constant(x_val, name="x", dtype=tf.float32)
- f = tf.constant(f_val, name="f", dtype=tf.float32)
- output = tf.nn.conv3d_transpose(x, f, y_shape, strides=strides,
- padding="SAME")
- err = tf.test.compute_gradient_error(
- [x, f], [x_shape, f_shape], output, y_shape)
+ x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
+ f = constant_op.constant(f_val, name="f", dtype=dtypes.float32)
+ output = nn_ops.conv3d_transpose(
+ x, f, y_shape, strides=strides, padding="SAME")
+ err = gradient_checker.compute_gradient_error([x, f], [x_shape, f_shape],
+ output, y_shape)
print("conv3d_transpose gradient err = %g " % err)
err_tolerance = 0.0005
self.assertLess(err, err_tolerance)
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/conv_ops_3d_test.py b/tensorflow/python/kernel_tests/conv_ops_3d_test.py
index bd6d34f6a1..2523f01687 100644
--- a/tensorflow/python/kernel_tests/conv_ops_3d_test.py
+++ b/tensorflow/python/kernel_tests/conv_ops_3d_test.py
@@ -13,19 +13,26 @@
# limitations under the License.
# ==============================================================================
"""Functional tests for 3d convolutional operations."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import nn_ops
+import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
-class Conv3DTest(tf.test.TestCase):
+class Conv3DTest(test.TestCase):
- def _VerifyValues(
- self, tensor_in_sizes, filter_in_sizes, stride, padding, expected):
+ def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
+ expected):
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
@@ -43,55 +50,119 @@ class Conv3DTest(tf.test.TestCase):
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with self.test_session(use_gpu=True) as sess:
- t1 = tf.constant(x1, shape=tensor_in_sizes)
- t2 = tf.constant(x2, shape=filter_in_sizes)
- conv = tf.nn.conv3d(t1, t2, strides, padding=padding)
+ t1 = constant_op.constant(x1, shape=tensor_in_sizes)
+ t2 = constant_op.constant(x2, shape=filter_in_sizes)
+ conv = nn_ops.conv3d(t1, t2, strides, padding=padding)
value = sess.run(conv)
print("expected = ", expected)
print("actual = ", value)
self.assertArrayNear(expected, value.flatten(), 1e-5)
def testConv3D1x1x1Filter(self):
- expected_output = [30.0, 36.0, 42.0, 66.0, 81.0, 96.0, 102.0, 126.0, 150.0,
- 138.0, 171.0, 204.0, 174.0, 216.0, 258.0, 210.0, 261.0,
- 312.0]
+ expected_output = [
+ 30.0, 36.0, 42.0, 66.0, 81.0, 96.0, 102.0, 126.0, 150.0, 138.0, 171.0,
+ 204.0, 174.0, 216.0, 258.0, 210.0, 261.0, 312.0
+ ]
# These are equivalent to the Conv2D1x1 case.
- self._VerifyValues(tensor_in_sizes=[1, 2, 3, 1, 3],
- filter_in_sizes=[1, 1, 1, 3, 3],
- stride=1,
- padding="VALID",
- expected=expected_output)
- self._VerifyValues(tensor_in_sizes=[1, 2, 1, 3, 3],
- filter_in_sizes=[1, 1, 1, 3, 3],
- stride=1,
- padding="VALID",
- expected=expected_output)
- self._VerifyValues(tensor_in_sizes=[1, 1, 2, 3, 3],
- filter_in_sizes=[1, 1, 1, 3, 3],
- stride=1,
- padding="VALID",
- expected=expected_output)
+ self._VerifyValues(
+ tensor_in_sizes=[1, 2, 3, 1, 3],
+ filter_in_sizes=[1, 1, 1, 3, 3],
+ stride=1,
+ padding="VALID",
+ expected=expected_output)
+ self._VerifyValues(
+ tensor_in_sizes=[1, 2, 1, 3, 3],
+ filter_in_sizes=[1, 1, 1, 3, 3],
+ stride=1,
+ padding="VALID",
+ expected=expected_output)
+ self._VerifyValues(
+ tensor_in_sizes=[1, 1, 2, 3, 3],
+ filter_in_sizes=[1, 1, 1, 3, 3],
+ stride=1,
+ padding="VALID",
+ expected=expected_output)
# Expected values computed using scipy's correlate function.
def testConv3D2x2x2Filter(self):
- expected_output = [19554., 19962., 20370., 22110., 22590., 23070., 34890.,
- 35730., 36570., 37446., 38358., 39270., 50226., 51498.,
- 52770., 52782., 54126., 55470.]
+ expected_output = [
+ 19554., 19962., 20370., 22110., 22590., 23070., 34890., 35730., 36570.,
+ 37446., 38358., 39270., 50226., 51498., 52770., 52782., 54126., 55470.
+ ]
# expected_shape = [1, 3, 1, 2, 5]
- self._VerifyValues(tensor_in_sizes=[1, 4, 2, 3, 3], # b, z, y, x, fin
- filter_in_sizes=[2, 2, 2, 3, 3], # z, y, x, fin, fout
- stride=1, padding="VALID",
- expected=expected_output)
+ self._VerifyValues(
+ tensor_in_sizes=[1, 4, 2, 3, 3], # b, z, y, x, fin
+ filter_in_sizes=[2, 2, 2, 3, 3], # z, y, x, fin, fout
+ stride=1,
+ padding="VALID",
+ expected=expected_output)
def testConv3DStrides(self):
expected_output = [
- 102., 151., 172., 193., 214., 235., 142., 438., 592., 613., 634., 655.,
- 676., 394., 774., 1033., 1054., 1075., 1096., 1117., 646., 1894., 2503.,
- 2524., 2545., 2566., 2587., 1486., 2230., 2944., 2965., 2986., 3007.,
- 3028., 1738., 2566., 3385., 3406., 3427., 3448., 3469., 1990., 3686.,
- 4855., 4876., 4897., 4918., 4939., 2830., 4022., 5296., 5317., 5338.,
- 5359., 5380., 3082., 4358., 5737., 5758., 5779., 5800., 5821., 3334.,
+ 102.,
+ 151.,
+ 172.,
+ 193.,
+ 214.,
+ 235.,
+ 142.,
+ 438.,
+ 592.,
+ 613.,
+ 634.,
+ 655.,
+ 676.,
+ 394.,
+ 774.,
+ 1033.,
+ 1054.,
+ 1075.,
+ 1096.,
+ 1117.,
+ 646.,
+ 1894.,
+ 2503.,
+ 2524.,
+ 2545.,
+ 2566.,
+ 2587.,
+ 1486.,
+ 2230.,
+ 2944.,
+ 2965.,
+ 2986.,
+ 3007.,
+ 3028.,
+ 1738.,
+ 2566.,
+ 3385.,
+ 3406.,
+ 3427.,
+ 3448.,
+ 3469.,
+ 1990.,
+ 3686.,
+ 4855.,
+ 4876.,
+ 4897.,
+ 4918.,
+ 4939.,
+ 2830.,
+ 4022.,
+ 5296.,
+ 5317.,
+ 5338.,
+ 5359.,
+ 5380.,
+ 3082.,
+ 4358.,
+ 5737.,
+ 5758.,
+ 5779.,
+ 5800.,
+ 5821.,
+ 3334.,
]
self._VerifyValues(
tensor_in_sizes=[1, 5, 8, 7, 1],
@@ -102,11 +173,12 @@ class Conv3DTest(tf.test.TestCase):
def testConv3D2x2x2FilterStride2(self):
expected_output = [19554., 19962., 20370., 50226., 51498., 52770.]
- self._VerifyValues(tensor_in_sizes=[1, 4, 2, 3, 3],
- filter_in_sizes=[2, 2, 2, 3, 3],
- stride=2,
- padding="VALID",
- expected=expected_output)
+ self._VerifyValues(
+ tensor_in_sizes=[1, 4, 2, 3, 3],
+ filter_in_sizes=[2, 2, 2, 3, 3],
+ stride=2,
+ padding="VALID",
+ expected=expected_output)
def testConv3DStride3(self):
expected_output = [
@@ -116,71 +188,68 @@ class Conv3DTest(tf.test.TestCase):
120912., 117204., 123270., 129336., 118464., 124602., 130740., 119724.,
125934., 132144.
]
- self._VerifyValues(tensor_in_sizes=[1, 6, 7, 8, 2],
- filter_in_sizes=[3, 2, 1, 2, 3],
- stride=3,
- padding="VALID",
- expected=expected_output)
+ self._VerifyValues(
+ tensor_in_sizes=[1, 6, 7, 8, 2],
+ filter_in_sizes=[3, 2, 1, 2, 3],
+ stride=3,
+ padding="VALID",
+ expected=expected_output)
def testConv3D2x2x2FilterStride2Same(self):
expected_output = [
19554., 19962., 20370., 10452., 10710., 10968., 50226., 51498., 52770.,
23844., 24534., 25224.
]
- self._VerifyValues(tensor_in_sizes=[1, 4, 2, 3, 3],
- filter_in_sizes=[2, 2, 2, 3, 3],
- stride=2,
- padding="SAME",
- expected=expected_output)
+ self._VerifyValues(
+ tensor_in_sizes=[1, 4, 2, 3, 3],
+ filter_in_sizes=[2, 2, 2, 3, 3],
+ stride=2,
+ padding="SAME",
+ expected=expected_output)
def testKernelSmallerThanStride(self):
expected_output = [1., 3., 7., 9., 19., 21., 25., 27.]
- self._VerifyValues(tensor_in_sizes=[1, 3, 3, 3, 1],
- filter_in_sizes=[1, 1, 1, 1, 1],
- stride=2,
- padding="SAME",
- expected=expected_output)
- self._VerifyValues(tensor_in_sizes=[1, 3, 3, 3, 1],
- filter_in_sizes=[1, 1, 1, 1, 1],
- stride=2,
- padding="VALID",
- expected=expected_output)
-
- expected_output = [1484., 1592., 770.,
- 2240., 2348., 1106.,
- 1149., 1191., 539.,
-
- 6776., 6884., 3122.,
- 7532., 7640., 3458.,
- 3207., 3249., 1421.,
-
- 3005., 3035., 1225.,
- 3215., 3245., 1309.,
- 1013., 1022., 343.]
- self._VerifyValues(tensor_in_sizes=[1, 7, 7, 7, 1],
- filter_in_sizes=[2, 2, 2, 1, 1],
- stride=3,
- padding="SAME",
- expected=expected_output)
-
- expected_output = [1484., 1592.,
- 2240., 2348.,
-
- 6776., 6884.,
- 7532., 7640.]
- self._VerifyValues(tensor_in_sizes=[1, 7, 7, 7, 1],
- filter_in_sizes=[2, 2, 2, 1, 1],
- stride=3,
- padding="VALID",
- expected=expected_output)
+ self._VerifyValues(
+ tensor_in_sizes=[1, 3, 3, 3, 1],
+ filter_in_sizes=[1, 1, 1, 1, 1],
+ stride=2,
+ padding="SAME",
+ expected=expected_output)
+ self._VerifyValues(
+ tensor_in_sizes=[1, 3, 3, 3, 1],
+ filter_in_sizes=[1, 1, 1, 1, 1],
+ stride=2,
+ padding="VALID",
+ expected=expected_output)
+
+ expected_output = [
+ 1484., 1592., 770., 2240., 2348., 1106., 1149., 1191., 539., 6776.,
+ 6884., 3122., 7532., 7640., 3458., 3207., 3249., 1421., 3005., 3035.,
+ 1225., 3215., 3245., 1309., 1013., 1022., 343.
+ ]
+ self._VerifyValues(
+ tensor_in_sizes=[1, 7, 7, 7, 1],
+ filter_in_sizes=[2, 2, 2, 1, 1],
+ stride=3,
+ padding="SAME",
+ expected=expected_output)
+
+ expected_output = [1484., 1592., 2240., 2348., 6776., 6884., 7532., 7640.]
+ self._VerifyValues(
+ tensor_in_sizes=[1, 7, 7, 7, 1],
+ filter_in_sizes=[2, 2, 2, 1, 1],
+ stride=3,
+ padding="VALID",
+ expected=expected_output)
def ConstructAndTestGradient(self, batch, input_planes, input_rows,
input_cols, filter_planes, filter_rows,
filter_cols, in_depth, out_depth, stride,
padding, test_input):
input_shape = [batch, input_planes, input_rows, input_cols, in_depth]
- filter_shape = [filter_planes, filter_rows, filter_cols, in_depth,
- out_depth]
+ filter_shape = [
+ filter_planes, filter_rows, filter_cols, in_depth, out_depth
+ ]
if isinstance(stride, collections.Iterable):
strides = [1] + list(stride) + [1]
@@ -188,12 +257,12 @@ class Conv3DTest(tf.test.TestCase):
strides = [1, stride, stride, stride, 1]
if padding == "VALID":
- output_planes = int(math.ceil((input_planes - filter_planes + 1.0) /
- strides[1]))
- output_rows = int(math.ceil((input_rows - filter_rows + 1.0) /
- strides[2]))
- output_cols = int(math.ceil((input_cols - filter_cols + 1.0) /
- strides[3]))
+ output_planes = int(
+ math.ceil((input_planes - filter_planes + 1.0) / strides[1]))
+ output_rows = int(
+ math.ceil((input_rows - filter_rows + 1.0) / strides[2]))
+ output_cols = int(
+ math.ceil((input_cols - filter_cols + 1.0) / strides[3]))
else:
output_planes = int(math.ceil(float(input_planes) / strides[1]))
output_rows = int(math.ceil(float(input_rows) / strides[2]))
@@ -207,9 +276,9 @@ class Conv3DTest(tf.test.TestCase):
filter_size *= x
input_data = [x * 1.0 / input_size for x in range(0, input_size)]
filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]
- if tf.test.is_gpu_available():
- data_type = tf.float32
- if tf.test.is_gpu_available():
+ if test.is_gpu_available():
+ data_type = dtypes.float32
+ if test.is_gpu_available():
tolerance = 4e-3
else:
# As of Aug 2016, higher tolerance is needed for some CPU architectures.
@@ -217,228 +286,236 @@ class Conv3DTest(tf.test.TestCase):
# because of multithreading.
tolerance = 8e-3
else:
- data_type = tf.float64
+ data_type = dtypes.float64
tolerance = 1e-8
with self.test_session(use_gpu=True):
- input_tensor = tf.constant(input_data,
- shape=input_shape,
- dtype=data_type,
- name="input")
- filter_tensor = tf.constant(filter_data,
- shape=filter_shape,
- dtype=data_type,
- name="filter")
- conv = tf.nn.conv3d(input_tensor,
- filter_tensor,
- strides,
- padding,
- name="conv")
+ input_tensor = constant_op.constant(
+ input_data, shape=input_shape, dtype=data_type, name="input")
+ filter_tensor = constant_op.constant(
+ filter_data, shape=filter_shape, dtype=data_type, name="filter")
+ conv = nn_ops.conv3d(
+ input_tensor, filter_tensor, strides, padding, name="conv")
if test_input:
- err = tf.test.compute_gradient_error(input_tensor, input_shape, conv,
- output_shape)
+ err = gradient_checker.compute_gradient_error(input_tensor, input_shape,
+ conv, output_shape)
else:
- err = tf.test.compute_gradient_error(filter_tensor, filter_shape, conv,
- output_shape)
+ err = gradient_checker.compute_gradient_error(filter_tensor,
+ filter_shape, conv,
+ output_shape)
print("conv3d gradient error = ", err)
self.assertLess(err, tolerance)
def testInputGradientValidPaddingStrideOne(self):
- self.ConstructAndTestGradient(batch=2,
- input_planes=3,
- input_rows=5,
- input_cols=4,
- filter_planes=3,
- filter_rows=3,
- filter_cols=3,
- in_depth=2,
- out_depth=3,
- stride=1,
- padding="VALID",
- test_input=True)
+ self.ConstructAndTestGradient(
+ batch=2,
+ input_planes=3,
+ input_rows=5,
+ input_cols=4,
+ filter_planes=3,
+ filter_rows=3,
+ filter_cols=3,
+ in_depth=2,
+ out_depth=3,
+ stride=1,
+ padding="VALID",
+ test_input=True)
def testFilterGradientValidPaddingStrideOne(self):
- self.ConstructAndTestGradient(batch=4,
- input_planes=4,
- input_rows=6,
- input_cols=5,
- filter_planes=2,
- filter_rows=2,
- filter_cols=2,
- in_depth=2,
- out_depth=3,
- stride=1,
- padding="VALID",
- test_input=False)
+ self.ConstructAndTestGradient(
+ batch=4,
+ input_planes=4,
+ input_rows=6,
+ input_cols=5,
+ filter_planes=2,
+ filter_rows=2,
+ filter_cols=2,
+ in_depth=2,
+ out_depth=3,
+ stride=1,
+ padding="VALID",
+ test_input=False)
def testInputGradientValidPaddingStrideTwo(self):
- self.ConstructAndTestGradient(batch=2,
- input_planes=6,
- input_rows=3,
- input_cols=5,
- filter_planes=3,
- filter_rows=3,
- filter_cols=3,
- in_depth=2,
- out_depth=3,
- stride=2,
- padding="VALID",
- test_input=True)
+ self.ConstructAndTestGradient(
+ batch=2,
+ input_planes=6,
+ input_rows=3,
+ input_cols=5,
+ filter_planes=3,
+ filter_rows=3,
+ filter_cols=3,
+ in_depth=2,
+ out_depth=3,
+ stride=2,
+ padding="VALID",
+ test_input=True)
def testFilterGradientValidPaddingStrideTwo(self):
- self.ConstructAndTestGradient(batch=2,
- input_planes=7,
- input_rows=6,
- input_cols=5,
- filter_planes=2,
- filter_rows=2,
- filter_cols=2,
- in_depth=2,
- out_depth=3,
- stride=2,
- padding="VALID",
- test_input=False)
+ self.ConstructAndTestGradient(
+ batch=2,
+ input_planes=7,
+ input_rows=6,
+ input_cols=5,
+ filter_planes=2,
+ filter_rows=2,
+ filter_cols=2,
+ in_depth=2,
+ out_depth=3,
+ stride=2,
+ padding="VALID",
+ test_input=False)
def testInputGradientValidPaddingStrideThree(self):
- self.ConstructAndTestGradient(batch=2,
- input_planes=3,
- input_rows=7,
- input_cols=6,
- filter_planes=3,
- filter_rows=3,
- filter_cols=3,
- in_depth=2,
- out_depth=3,
- stride=3,
- padding="VALID",
- test_input=True)
+ self.ConstructAndTestGradient(
+ batch=2,
+ input_planes=3,
+ input_rows=7,
+ input_cols=6,
+ filter_planes=3,
+ filter_rows=3,
+ filter_cols=3,
+ in_depth=2,
+ out_depth=3,
+ stride=3,
+ padding="VALID",
+ test_input=True)
def testFilterGradientValidPaddingStrideThree(self):
- self.ConstructAndTestGradient(batch=2,
- input_planes=4,
- input_rows=4,
- input_cols=7,
- filter_planes=4,
- filter_rows=4,
- filter_cols=4,
- in_depth=2,
- out_depth=3,
- stride=3,
- padding="VALID",
- test_input=False)
+ self.ConstructAndTestGradient(
+ batch=2,
+ input_planes=4,
+ input_rows=4,
+ input_cols=7,
+ filter_planes=4,
+ filter_rows=4,
+ filter_cols=4,
+ in_depth=2,
+ out_depth=3,
+ stride=3,
+ padding="VALID",
+ test_input=False)
def testInputGradientSamePaddingStrideOne(self):
- self.ConstructAndTestGradient(batch=2,
- input_planes=3,
- input_rows=2,
- input_cols=2,
- filter_planes=3,
- filter_rows=2,
- filter_cols=1,
- in_depth=2,
- out_depth=1,
- stride=1,
- padding="SAME",
- test_input=True)
+ self.ConstructAndTestGradient(
+ batch=2,
+ input_planes=3,
+ input_rows=2,
+ input_cols=2,
+ filter_planes=3,
+ filter_rows=2,
+ filter_cols=1,
+ in_depth=2,
+ out_depth=1,
+ stride=1,
+ padding="SAME",
+ test_input=True)
def testFilterGradientSamePaddingStrideOne(self):
- self.ConstructAndTestGradient(batch=2,
- input_planes=3,
- input_rows=6,
- input_cols=5,
- filter_planes=2,
- filter_rows=2,
- filter_cols=2,
- in_depth=2,
- out_depth=3,
- stride=1,
- padding="SAME",
- test_input=False)
+ self.ConstructAndTestGradient(
+ batch=2,
+ input_planes=3,
+ input_rows=6,
+ input_cols=5,
+ filter_planes=2,
+ filter_rows=2,
+ filter_cols=2,
+ in_depth=2,
+ out_depth=3,
+ stride=1,
+ padding="SAME",
+ test_input=False)
def testInputGradientSamePaddingStrideTwo(self):
- self.ConstructAndTestGradient(batch=2,
- input_planes=6,
- input_rows=3,
- input_cols=4,
- filter_planes=3,
- filter_rows=3,
- filter_cols=3,
- in_depth=2,
- out_depth=3,
- stride=2,
- padding="SAME",
- test_input=True)
+ self.ConstructAndTestGradient(
+ batch=2,
+ input_planes=6,
+ input_rows=3,
+ input_cols=4,
+ filter_planes=3,
+ filter_rows=3,
+ filter_cols=3,
+ in_depth=2,
+ out_depth=3,
+ stride=2,
+ padding="SAME",
+ test_input=True)
def testFilterGradientSamePaddingStrideTwo(self):
- self.ConstructAndTestGradient(batch=4,
- input_planes=7,
- input_rows=3,
- input_cols=5,
- filter_planes=2,
- filter_rows=2,
- filter_cols=2,
- in_depth=2,
- out_depth=3,
- stride=2,
- padding="SAME",
- test_input=False)
+ self.ConstructAndTestGradient(
+ batch=4,
+ input_planes=7,
+ input_rows=3,
+ input_cols=5,
+ filter_planes=2,
+ filter_rows=2,
+ filter_cols=2,
+ in_depth=2,
+ out_depth=3,
+ stride=2,
+ padding="SAME",
+ test_input=False)
def testInputGradientSamePaddingStrideThree(self):
- self.ConstructAndTestGradient(batch=2,
- input_planes=9,
- input_rows=3,
- input_cols=6,
- filter_planes=3,
- filter_rows=3,
- filter_cols=3,
- in_depth=2,
- out_depth=3,
- stride=3,
- padding="SAME",
- test_input=True)
+ self.ConstructAndTestGradient(
+ batch=2,
+ input_planes=9,
+ input_rows=3,
+ input_cols=6,
+ filter_planes=3,
+ filter_rows=3,
+ filter_cols=3,
+ in_depth=2,
+ out_depth=3,
+ stride=3,
+ padding="SAME",
+ test_input=True)
def testFilterGradientSamePaddingStrideThree(self):
- self.ConstructAndTestGradient(batch=2,
- input_planes=9,
- input_rows=4,
- input_cols=7,
- filter_planes=4,
- filter_rows=4,
- filter_cols=4,
- in_depth=2,
- out_depth=3,
- stride=3,
- padding="SAME",
- test_input=False)
+ self.ConstructAndTestGradient(
+ batch=2,
+ input_planes=9,
+ input_rows=4,
+ input_cols=7,
+ filter_planes=4,
+ filter_rows=4,
+ filter_cols=4,
+ in_depth=2,
+ out_depth=3,
+ stride=3,
+ padding="SAME",
+ test_input=False)
def testInputGradientSamePaddingDifferentStrides(self):
- self.ConstructAndTestGradient(batch=1,
- input_planes=5,
- input_rows=8,
- input_cols=7,
- filter_planes=1,
- filter_rows=2,
- filter_cols=3,
- in_depth=2,
- out_depth=3,
- stride=[2, 3, 1],
- padding="SAME",
- test_input=True)
+ self.ConstructAndTestGradient(
+ batch=1,
+ input_planes=5,
+ input_rows=8,
+ input_cols=7,
+ filter_planes=1,
+ filter_rows=2,
+ filter_cols=3,
+ in_depth=2,
+ out_depth=3,
+ stride=[2, 3, 1],
+ padding="SAME",
+ test_input=True)
def disabledtestFilterGradientSamePaddingDifferentStrides(self):
- self.ConstructAndTestGradient(batch=1,
- input_planes=5,
- input_rows=8,
- input_cols=7,
- filter_planes=1,
- filter_rows=2,
- filter_cols=3,
- in_depth=2,
- out_depth=3,
- stride=[2, 3, 1],
- padding="SAME",
- test_input=False)
+ self.ConstructAndTestGradient(
+ batch=1,
+ input_planes=5,
+ input_rows=8,
+ input_cols=7,
+ filter_planes=1,
+ filter_rows=2,
+ filter_cols=3,
+ in_depth=2,
+ out_depth=3,
+ stride=[2, 3, 1],
+ padding="SAME",
+ test_input=False)
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/conv_ops_test.py b/tensorflow/python/kernel_tests/conv_ops_test.py
index c21336645f..35e0a39840 100644
--- a/tensorflow/python/kernel_tests/conv_ops_test.py
+++ b/tensorflow/python/kernel_tests/conv_ops_test.py
@@ -12,19 +12,40 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Functional tests for convolutional operations."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
+import sys
import time
import numpy as np
-import tensorflow as tf
+# TODO(mrry): Remove this hack which makes dlopen() in
+# sparse_feature_cross_op.py not crash in the open source world.
+if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
+ import ctypes
+ sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
+
+from tensorflow.contrib import layers
+from tensorflow.python.client import session as session_lib
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import errors_impl
+from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import nn_impl
+from tensorflow.python.ops import nn_ops
+from tensorflow.python.ops import random_ops
+from tensorflow.python.ops import variables
+import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
+from tensorflow.python.platform import tf_logging
def GetShrunkInceptionShapes(shrink=10):
@@ -96,9 +117,11 @@ def GetShrunkInceptionShapes(shrink=10):
[4, 35, 35, 96], [4, 35, 35, 32], [4, 35, 35, 64],
[4, 35, 35, 48], [4, 71, 71, 192], [4, 73, 73, 64],
[4, 147, 147, 64]]
- strides = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+ strides = [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1
+ ]
# Shrink sizes to make the test faster
for i in input_sizes:
i[3] //= shrink
@@ -111,13 +134,13 @@ def GetShrunkInceptionShapes(shrink=10):
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
- paddings = [SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
- SAME, SAME, SAME, SAME, VALID, SAME, SAME, VALID,
- SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
- SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
- SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
- SAME, VALID, VALID, SAME, SAME, SAME, SAME, SAME,
- SAME, SAME, SAME, SAME, VALID, VALID, VALID]
+ paddings = [
+ SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
+ VALID, SAME, SAME, VALID, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
+ SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
+ SAME, SAME, SAME, SAME, SAME, VALID, VALID, SAME, SAME, SAME, SAME, SAME,
+ SAME, SAME, SAME, SAME, VALID, VALID, VALID
+ ]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
@@ -131,8 +154,8 @@ def NHWCToNCHW(input_tensor):
Returns:
the converted tensor or a shape array
"""
- if isinstance(input_tensor, tf.Tensor):
- return tf.transpose(input_tensor, [0, 3, 1, 2])
+ if isinstance(input_tensor, ops.Tensor):
+ return array_ops.transpose(input_tensor, [0, 3, 1, 2])
else:
return [input_tensor[0], input_tensor[3], input_tensor[1], input_tensor[2]]
@@ -145,8 +168,8 @@ def NCHWToNHWC(input_tensor):
Returns:
the converted tensor or a shape array
"""
- if isinstance(input_tensor, tf.Tensor):
- return tf.transpose(input_tensor, [0, 2, 3, 1])
+ if isinstance(input_tensor, ops.Tensor):
+ return array_ops.transpose(input_tensor, [0, 2, 3, 1])
else:
return [input_tensor[0], input_tensor[2], input_tensor[3], input_tensor[1]]
@@ -158,21 +181,21 @@ def GetTestConfigs():
all the valid test configs as tuples of data_format and use_gpu.
"""
test_configs = [("NHWC", False), ("NHWC", True)]
- if tf.test.is_gpu_available():
+ if test.is_gpu_available():
# "NCHW" format is not currently supported on CPU.
test_configs += [("NCHW", True)]
return test_configs
-class Conv2DTest(tf.test.TestCase):
+class Conv2DTest(test.TestCase):
def _DtypesToTest(self, use_gpu):
if use_gpu and not test_util.CudaSupportsHalfMatMulAndConv():
- return [tf.float32]
+ return [dtypes.float32]
else:
# It is important that float32 comes before float16 here,
# as we will be using its gradients as reference for fp16 gradients.
- return [tf.float32, tf.float16]
+ return [dtypes.float32, dtypes.float16]
def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, strides,
padding, data_format, dtype, use_gpu):
@@ -202,24 +225,21 @@ class Conv2DTest(tf.test.TestCase):
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with self.test_session(use_gpu=use_gpu) as sess:
- t1 = tf.constant(x1, shape=tensor_in_sizes, dtype=dtype)
- t2 = tf.constant(x2, shape=filter_in_sizes, dtype=dtype)
+ t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype)
+ t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype)
strides = [1] + strides + [1]
if data_format == "NCHW":
t1 = NHWCToNCHW(t1)
strides = NHWCToNCHW(strides)
- conv = tf.nn.conv2d(t1,
- t2,
- strides=strides,
- padding=padding,
- data_format=data_format)
+ conv = nn_ops.conv2d(
+ t1, t2, strides=strides, padding=padding, data_format=data_format)
if data_format == "NCHW":
conv = NCHWToNHWC(conv)
return conv
- def _CompareFwdValues(self, tensor_in_sizes, filter_in_sizes,
- conv_strides, padding):
+ def _CompareFwdValues(self, tensor_in_sizes, filter_in_sizes, conv_strides,
+ padding):
"""Verifies that CPU and GPU produce the same values.
Args:
@@ -232,22 +252,21 @@ class Conv2DTest(tf.test.TestCase):
"""
x1 = np.random.rand(*tensor_in_sizes).astype(np.float32)
x2 = np.random.rand(*filter_in_sizes).astype(np.float32)
+
def _SetupVal(data_format, use_gpu):
with self.test_session(use_gpu=use_gpu):
- t1 = tf.constant(x1, shape=tensor_in_sizes)
- t2 = tf.constant(x2, shape=filter_in_sizes)
+ t1 = constant_op.constant(x1, shape=tensor_in_sizes)
+ t2 = constant_op.constant(x2, shape=filter_in_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t1 = NHWCToNCHW(t1)
strides = NHWCToNCHW(strides)
- conv = tf.nn.conv2d(t1,
- t2,
- strides=strides,
- padding=padding,
- data_format=data_format)
+ conv = nn_ops.conv2d(
+ t1, t2, strides=strides, padding=padding, data_format=data_format)
if data_format == "NCHW":
conv = NCHWToNHWC(conv)
return conv
+
tensors = []
for (data_format, use_gpu) in GetTestConfigs():
tensors.append(_SetupVal(data_format, use_gpu))
@@ -256,18 +275,19 @@ class Conv2DTest(tf.test.TestCase):
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-5, atol=1e-5)
- def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, strides,
- padding, expected):
+ def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, strides, padding,
+ expected):
tensors = []
for (data_format, use_gpu) in GetTestConfigs():
for dtype in self._DtypesToTest(use_gpu):
- result = self._SetupValuesForDevice(tensor_in_sizes,
- filter_in_sizes,
- strides,
- padding,
- data_format,
- dtype,
- use_gpu=use_gpu)
+ result = self._SetupValuesForDevice(
+ tensor_in_sizes,
+ filter_in_sizes,
+ strides,
+ padding,
+ data_format,
+ dtype,
+ use_gpu=use_gpu)
tensors.append(result)
with self.test_session() as sess:
values = sess.run(tensors)
@@ -283,81 +303,106 @@ class Conv2DTest(tf.test.TestCase):
self.assertShapeEqual(value, conv)
def testConv2D1x1Filter(self):
- expected_output = [30.0, 36.0, 42.0, 66.0, 81.0, 96.0, 102.0, 126.0, 150.0,
- 138.0, 171.0, 204.0, 174.0, 216.0, 258.0, 210.0, 261.0,
- 312.0]
- self._VerifyValues(tensor_in_sizes=[1, 2, 3, 3],
- filter_in_sizes=[1, 1, 3, 3],
- strides=[1, 1], padding="VALID",
- expected=expected_output)
+ expected_output = [
+ 30.0, 36.0, 42.0, 66.0, 81.0, 96.0, 102.0, 126.0, 150.0, 138.0, 171.0,
+ 204.0, 174.0, 216.0, 258.0, 210.0, 261.0, 312.0
+ ]
+ self._VerifyValues(
+ tensor_in_sizes=[1, 2, 3, 3],
+ filter_in_sizes=[1, 1, 3, 3],
+ strides=[1, 1],
+ padding="VALID",
+ expected=expected_output)
def testConv2DEmpty(self):
expected_output = []
- self._VerifyValues(tensor_in_sizes=[0, 2, 3, 3],
- filter_in_sizes=[1, 1, 3, 3],
- strides=[1, 1], padding="VALID",
- expected=expected_output)
+ self._VerifyValues(
+ tensor_in_sizes=[0, 2, 3, 3],
+ filter_in_sizes=[1, 1, 3, 3],
+ strides=[1, 1],
+ padding="VALID",
+ expected=expected_output)
def testConv2D2x2Filter(self):
# The outputs are computed using third_party/py/IPython/notebook.
expected_output = [2271.0, 2367.0, 2463.0, 2901.0, 3033.0, 3165.0]
- self._VerifyValues(tensor_in_sizes=[1, 2, 3, 3],
- filter_in_sizes=[2, 2, 3, 3],
- strides=[1, 1], padding="VALID",
- expected=expected_output)
+ self._VerifyValues(
+ tensor_in_sizes=[1, 2, 3, 3],
+ filter_in_sizes=[2, 2, 3, 3],
+ strides=[1, 1],
+ padding="VALID",
+ expected=expected_output)
def testConv2D1x2Filter(self):
# The outputs are computed using third_party/py/IPython/notebook.
- expected_output = [231.0, 252.0, 273.0, 384.0, 423.0, 462.0, 690.0,
- 765.0, 840.0, 843.0, 936.0, 1029.0]
- self._VerifyValues(tensor_in_sizes=[1, 2, 3, 3],
- filter_in_sizes=[1, 2, 3, 3],
- strides=[1, 1], padding="VALID",
- expected=expected_output)
+ expected_output = [
+ 231.0, 252.0, 273.0, 384.0, 423.0, 462.0, 690.0, 765.0, 840.0, 843.0,
+ 936.0, 1029.0
+ ]
+ self._VerifyValues(
+ tensor_in_sizes=[1, 2, 3, 3],
+ filter_in_sizes=[1, 2, 3, 3],
+ strides=[1, 1],
+ padding="VALID",
+ expected=expected_output)
def testConv2D2x2FilterStride2(self):
expected_output = [2271.0, 2367.0, 2463.0]
- self._VerifyValues(tensor_in_sizes=[1, 2, 3, 3],
- filter_in_sizes=[2, 2, 3, 3],
- strides=[2, 2], padding="VALID",
- expected=expected_output)
+ self._VerifyValues(
+ tensor_in_sizes=[1, 2, 3, 3],
+ filter_in_sizes=[2, 2, 3, 3],
+ strides=[2, 2],
+ padding="VALID",
+ expected=expected_output)
def testConv2D2x2FilterStride2Same(self):
expected_output = [2271.0, 2367.0, 2463.0, 1230.0, 1305.0, 1380.0]
- self._VerifyValues(tensor_in_sizes=[1, 2, 3, 3],
- filter_in_sizes=[2, 2, 3, 3],
- strides=[2, 2], padding="SAME",
- expected=expected_output)
+ self._VerifyValues(
+ tensor_in_sizes=[1, 2, 3, 3],
+ filter_in_sizes=[2, 2, 3, 3],
+ strides=[2, 2],
+ padding="SAME",
+ expected=expected_output)
def testConv2D2x2FilterStride1x2(self):
expected_output = [58.0, 78.0, 98.0, 118.0, 138.0, 158.0]
- self._VerifyValues(tensor_in_sizes=[1, 3, 6, 1],
- filter_in_sizes=[2, 2, 1, 1],
- strides=[1, 2], padding="VALID",
- expected=expected_output)
+ self._VerifyValues(
+ tensor_in_sizes=[1, 3, 6, 1],
+ filter_in_sizes=[2, 2, 1, 1],
+ strides=[1, 2],
+ padding="VALID",
+ expected=expected_output)
def testConv2DKernelSmallerThanStrideValid(self):
expected_output = [65, 95, 275, 305]
- self._VerifyValues(tensor_in_sizes=[1, 7, 7, 1],
- filter_in_sizes=[2, 2, 1, 1],
- strides=[3, 3], padding="VALID",
- expected=expected_output)
+ self._VerifyValues(
+ tensor_in_sizes=[1, 7, 7, 1],
+ filter_in_sizes=[2, 2, 1, 1],
+ strides=[3, 3],
+ padding="VALID",
+ expected=expected_output)
def testConv2DKernelSmallerThanStrideSame(self):
- self._VerifyValues(tensor_in_sizes=[1, 3, 3, 1],
- filter_in_sizes=[1, 1, 1, 1],
- strides=[2, 2], padding="SAME",
- expected=[1, 3, 7, 9])
-
- self._VerifyValues(tensor_in_sizes=[1, 4, 4, 1],
- filter_in_sizes=[1, 1, 1, 1],
- strides=[2, 2], padding="SAME",
- expected=[1, 3, 9, 11])
-
- self._VerifyValues(tensor_in_sizes=[1, 4, 4, 1],
- filter_in_sizes=[2, 2, 1, 1],
- strides=[3, 3], padding="SAME",
- expected=[44, 28, 41, 16])
+ self._VerifyValues(
+ tensor_in_sizes=[1, 3, 3, 1],
+ filter_in_sizes=[1, 1, 1, 1],
+ strides=[2, 2],
+ padding="SAME",
+ expected=[1, 3, 7, 9])
+
+ self._VerifyValues(
+ tensor_in_sizes=[1, 4, 4, 1],
+ filter_in_sizes=[1, 1, 1, 1],
+ strides=[2, 2],
+ padding="SAME",
+ expected=[1, 3, 9, 11])
+
+ self._VerifyValues(
+ tensor_in_sizes=[1, 4, 4, 1],
+ filter_in_sizes=[2, 2, 1, 1],
+ strides=[3, 3],
+ padding="SAME",
+ expected=[44, 28, 41, 16])
# TODO this currently fails.
#self._VerifyValues(tensor_in_sizes=[1, 8, 8, 1],
@@ -382,19 +427,15 @@ class Conv2DTest(tf.test.TestCase):
with self.test_session(use_gpu=use_gpu) as sess:
if data_format == "NCHW":
input_sizes = NHWCToNCHW(input_sizes)
- t0 = tf.constant(input_sizes, shape=[len(input_sizes)])
- t1 = tf.constant(x1, shape=filter_sizes)
- t2 = tf.constant(x2, shape=output_sizes)
+ t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])
+ t1 = constant_op.constant(x1, shape=filter_sizes)
+ t2 = constant_op.constant(x2, shape=output_sizes)
strides = [1] + strides + [1]
if data_format == "NCHW":
t2 = NHWCToNCHW(t2)
strides = NHWCToNCHW(strides)
- conv = tf.nn.conv2d_backprop_input(t0,
- t1,
- t2,
- strides=strides,
- padding=padding,
- data_format=data_format)
+ conv = nn_ops.conv2d_backprop_input(
+ t0, t1, t2, strides=strides, padding=padding, data_format=data_format)
if data_format == "NCHW":
conv = NCHWToNHWC(conv)
# "values" consists of two tensors for two backprops
@@ -408,30 +449,33 @@ class Conv2DTest(tf.test.TestCase):
conv_strides, padding):
x1 = np.random.rand(*filter_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
+
def _GetVal(data_format, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
if data_format == "NCHW":
new_input_sizes = NHWCToNCHW(input_sizes)
else:
new_input_sizes = input_sizes
- t0 = tf.constant(new_input_sizes, shape=[len(new_input_sizes)])
- t1 = tf.constant(x1, shape=filter_sizes)
- t2 = tf.constant(x2, shape=output_sizes)
+ t0 = constant_op.constant(new_input_sizes, shape=[len(new_input_sizes)])
+ t1 = constant_op.constant(x1, shape=filter_sizes)
+ t2 = constant_op.constant(x2, shape=output_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t2 = NHWCToNCHW(t2)
strides = NHWCToNCHW(strides)
- conv = tf.nn.conv2d_backprop_input(t0,
- t1,
- t2,
- strides=strides,
- padding=padding,
- data_format=data_format)
+ conv = nn_ops.conv2d_backprop_input(
+ t0,
+ t1,
+ t2,
+ strides=strides,
+ padding=padding,
+ data_format=data_format)
if data_format == "NCHW":
conv = NCHWToNHWC(conv)
ret = conv.eval()
self.assertShapeEqual(ret, conv)
return ret
+
values = []
for (data_format, use_gpu) in GetTestConfigs():
values.append(_GetVal(data_format, use_gpu))
@@ -442,66 +486,69 @@ class Conv2DTest(tf.test.TestCase):
def testConv2D2x2Depth1ValidBackpropInput(self):
expected_output = [1.0, 4.0, 4.0, 3.0, 10.0, 8.0]
for (data_format, use_gpu) in GetTestConfigs():
- self._RunAndVerifyBackpropInput(input_sizes=[1, 2, 3, 1],
- filter_sizes=[2, 2, 1, 1],
- output_sizes=[1, 1, 2, 1],
- strides=[1, 1],
- padding="VALID",
- expected=expected_output,
- data_format=data_format,
- use_gpu=use_gpu,
- err=1e-5)
+ self._RunAndVerifyBackpropInput(
+ input_sizes=[1, 2, 3, 1],
+ filter_sizes=[2, 2, 1, 1],
+ output_sizes=[1, 1, 2, 1],
+ strides=[1, 1],
+ padding="VALID",
+ expected=expected_output,
+ data_format=data_format,
+ use_gpu=use_gpu,
+ err=1e-5)
def testConv2D2x2Depth3ValidBackpropInput(self):
- expected_output = [14.0, 32.0, 50.0,
- 100.0, 163.0, 226.0,
- 167.0, 212.0, 257.0,
- 122.0, 140.0, 158.0,
- 478.0, 541.0, 604.0,
- 437.0, 482.0, 527.0]
+ expected_output = [
+ 14.0, 32.0, 50.0, 100.0, 163.0, 226.0, 167.0, 212.0, 257.0, 122.0,
+ 140.0, 158.0, 478.0, 541.0, 604.0, 437.0, 482.0, 527.0
+ ]
for (data_format, use_gpu) in GetTestConfigs():
# The GPU version of this test is not very stable. So adjusting the
# error threshold to 1e-4.
- self._RunAndVerifyBackpropInput(input_sizes=[1, 2, 3, 3],
- filter_sizes=[2, 2, 3, 3],
- output_sizes=[1, 1, 2, 3],
- strides=[1, 1],
- padding="VALID",
- expected=expected_output,
- data_format=data_format,
- use_gpu=use_gpu,
- err=1e-4)
+ self._RunAndVerifyBackpropInput(
+ input_sizes=[1, 2, 3, 3],
+ filter_sizes=[2, 2, 3, 3],
+ output_sizes=[1, 1, 2, 3],
+ strides=[1, 1],
+ padding="VALID",
+ expected=expected_output,
+ data_format=data_format,
+ use_gpu=use_gpu,
+ err=1e-4)
def testConv2D2x2Depth3ValidBackpropInputStride1x2(self):
- expected_output = [1.0, 2.0, 2.0, 4.0, 3.0, 6.0,
- 7.0, 12.0, 11.0, 18.0, 15.0, 24.0,
- 12.0, 16.0, 15.0, 20.0, 18.0, 24.0]
+ expected_output = [
+ 1.0, 2.0, 2.0, 4.0, 3.0, 6.0, 7.0, 12.0, 11.0, 18.0, 15.0, 24.0, 12.0,
+ 16.0, 15.0, 20.0, 18.0, 24.0
+ ]
for (data_format, use_gpu) in GetTestConfigs():
- self._RunAndVerifyBackpropInput(input_sizes=[1, 3, 6, 1],
- filter_sizes=[2, 2, 1, 1],
- output_sizes=[1, 2, 3, 1],
- strides=[1, 2],
- padding="VALID",
- expected=expected_output,
- data_format=data_format,
- use_gpu=use_gpu,
- err=1e-5)
+ self._RunAndVerifyBackpropInput(
+ input_sizes=[1, 3, 6, 1],
+ filter_sizes=[2, 2, 1, 1],
+ output_sizes=[1, 2, 3, 1],
+ strides=[1, 2],
+ padding="VALID",
+ expected=expected_output,
+ data_format=data_format,
+ use_gpu=use_gpu,
+ err=1e-5)
def testConv2DStrideTwoFilterOneSameBackpropInput(self):
- expected_output = [1.0, 0.0, 2.0, 0.0,
- 0.0, 0.0, 0.0, 0.0,
- 3.0, 0.0, 4.0, 0.0,
- 0.0, 0.0, 0.0, 0.0]
+ expected_output = [
+ 1.0, 0.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3.0, 0.0, 4.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0
+ ]
for (data_format, use_gpu) in GetTestConfigs():
- self._RunAndVerifyBackpropInput(input_sizes=[1, 4, 4, 1],
- filter_sizes=[1, 1, 1, 1],
- output_sizes=[1, 2, 2, 1],
- strides=[2, 2],
- padding="SAME",
- expected=expected_output,
- data_format=data_format,
- use_gpu=use_gpu,
- err=1e-5)
+ self._RunAndVerifyBackpropInput(
+ input_sizes=[1, 4, 4, 1],
+ filter_sizes=[1, 1, 1, 1],
+ output_sizes=[1, 2, 2, 1],
+ strides=[2, 2],
+ padding="SAME",
+ expected=expected_output,
+ data_format=data_format,
+ use_gpu=use_gpu,
+ err=1e-5)
# Testing for backprops
def _RunAndVerifyBackpropFilter(self, input_sizes, filter_sizes, output_sizes,
@@ -519,20 +566,21 @@ class Conv2DTest(tf.test.TestCase):
x2 = [f * 1.0 for f in range(1, total_output_size + 1)]
for dtype in self._DtypesToTest(use_gpu=use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
- t0 = tf.constant(x0, shape=input_sizes, dtype=dtype)
- t1 = tf.constant(filter_sizes, shape=[len(filter_sizes)])
- t2 = tf.constant(x2, shape=output_sizes, dtype=dtype)
+ t0 = constant_op.constant(x0, shape=input_sizes, dtype=dtype)
+ t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
+ t2 = constant_op.constant(x2, shape=output_sizes, dtype=dtype)
explicit_strides = [1] + strides + [1]
if data_format == "NCHW":
t0 = NHWCToNCHW(t0)
t2 = NHWCToNCHW(t2)
explicit_strides = NHWCToNCHW(explicit_strides)
- conv = tf.nn.conv2d_backprop_filter(t0,
- t1,
- t2,
- strides=explicit_strides,
- padding=padding,
- data_format=data_format)
+ conv = nn_ops.conv2d_backprop_filter(
+ t0,
+ t1,
+ t2,
+ strides=explicit_strides,
+ padding=padding,
+ data_format=data_format)
value = sess.run(conv)
self.assertShapeEqual(value, conv)
print("expected = ", expected)
@@ -543,25 +591,28 @@ class Conv2DTest(tf.test.TestCase):
conv_strides, padding):
x0 = np.random.rand(*input_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
+
def _GetVal(data_format, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
- t0 = tf.constant(x0, shape=input_sizes)
- t1 = tf.constant(filter_sizes, shape=[len(filter_sizes)])
- t2 = tf.constant(x2, shape=output_sizes)
+ t0 = constant_op.constant(x0, shape=input_sizes)
+ t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
+ t2 = constant_op.constant(x2, shape=output_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t0 = NHWCToNCHW(t0)
t2 = NHWCToNCHW(t2)
strides = NHWCToNCHW(strides)
- conv = tf.nn.conv2d_backprop_filter(t0,
- t1,
- t2,
- strides=strides,
- padding=padding,
- data_format=data_format)
+ conv = nn_ops.conv2d_backprop_filter(
+ t0,
+ t1,
+ t2,
+ strides=strides,
+ padding=padding,
+ data_format=data_format)
ret = conv.eval()
self.assertShapeEqual(ret, conv)
return ret
+
values = []
for (data_format, use_gpu) in GetTestConfigs():
values.append(_GetVal(data_format, use_gpu))
@@ -571,53 +622,59 @@ class Conv2DTest(tf.test.TestCase):
def testConv2D2x2Depth1ValidBackpropFilter(self):
expected = [5.0, 8.0, 14.0, 17.0]
for (data_format, use_gpu) in GetTestConfigs():
- self._RunAndVerifyBackpropFilter(input_sizes=[1, 2, 3, 1],
- filter_sizes=[2, 2, 1, 1],
- output_sizes=[1, 1, 2, 1],
- strides=[1, 1],
- padding="VALID",
- expected=expected,
- data_format=data_format,
- use_gpu=use_gpu)
+ self._RunAndVerifyBackpropFilter(
+ input_sizes=[1, 2, 3, 1],
+ filter_sizes=[2, 2, 1, 1],
+ output_sizes=[1, 1, 2, 1],
+ strides=[1, 1],
+ padding="VALID",
+ expected=expected,
+ data_format=data_format,
+ use_gpu=use_gpu)
def testConv2D2x2Depth3ValidBackpropFilter(self):
- expected = [17.0, 22.0, 27.0, 22.0, 29.0, 36.0, 27.0, 36.0, 45.0,
- 32.0, 43.0, 54.0, 37.0, 50.0, 63.0, 42.0, 57.0, 72.0,
- 62.0, 85.0, 108.0, 67.0, 92.0, 117.0, 72.0, 99.0, 126.0,
- 77.0, 106.0, 135.0, 82.0, 113.0, 144.0, 87.0, 120.0, 153.0]
+ expected = [
+ 17.0, 22.0, 27.0, 22.0, 29.0, 36.0, 27.0, 36.0, 45.0, 32.0, 43.0, 54.0,
+ 37.0, 50.0, 63.0, 42.0, 57.0, 72.0, 62.0, 85.0, 108.0, 67.0, 92.0,
+ 117.0, 72.0, 99.0, 126.0, 77.0, 106.0, 135.0, 82.0, 113.0, 144.0, 87.0,
+ 120.0, 153.0
+ ]
for (data_format, use_gpu) in GetTestConfigs():
- self._RunAndVerifyBackpropFilter(input_sizes=[1, 2, 3, 3],
- filter_sizes=[2, 2, 3, 3],
- output_sizes=[1, 1, 2, 3],
- strides=[1, 1],
- padding="VALID",
- expected=expected,
- data_format=data_format,
- use_gpu=use_gpu)
+ self._RunAndVerifyBackpropFilter(
+ input_sizes=[1, 2, 3, 3],
+ filter_sizes=[2, 2, 3, 3],
+ output_sizes=[1, 1, 2, 3],
+ strides=[1, 1],
+ padding="VALID",
+ expected=expected,
+ data_format=data_format,
+ use_gpu=use_gpu)
def testConv2D2x2Depth3ValidBackpropFilterStride1x2(self):
expected = [161.0, 182.0, 287.0, 308.0]
for (data_format, use_gpu) in GetTestConfigs():
- self._RunAndVerifyBackpropFilter(input_sizes=[1, 3, 6, 1],
- filter_sizes=[2, 2, 1, 1],
- output_sizes=[1, 2, 3, 1],
- strides=[1, 2],
- padding="VALID",
- expected=expected,
- data_format=data_format,
- use_gpu=use_gpu)
+ self._RunAndVerifyBackpropFilter(
+ input_sizes=[1, 3, 6, 1],
+ filter_sizes=[2, 2, 1, 1],
+ output_sizes=[1, 2, 3, 1],
+ strides=[1, 2],
+ padding="VALID",
+ expected=expected,
+ data_format=data_format,
+ use_gpu=use_gpu)
def testConv2DStrideTwoFilterOneSameBackpropFilter(self):
expected_output = [78.]
for (data_format, use_gpu) in GetTestConfigs():
- self._RunAndVerifyBackpropFilter(input_sizes=[1, 4, 4, 1],
- filter_sizes=[1, 1, 1, 1],
- output_sizes=[1, 2, 2, 1],
- strides=[2, 2],
- padding="SAME",
- expected=expected_output,
- data_format=data_format,
- use_gpu=use_gpu)
+ self._RunAndVerifyBackpropFilter(
+ input_sizes=[1, 4, 4, 1],
+ filter_sizes=[1, 1, 1, 1],
+ output_sizes=[1, 2, 2, 1],
+ strides=[2, 2],
+ padding="SAME",
+ expected=expected_output,
+ data_format=data_format,
+ use_gpu=use_gpu)
# Gradient checkers
def ConstructAndTestGradient(self, batch, input_rows, input_cols, filter_rows,
@@ -648,32 +705,37 @@ class Conv2DTest(tf.test.TestCase):
# when double support returns for CPU and/or GPU.
for dtype in self._DtypesToTest(use_gpu=use_gpu):
with self.test_session(use_gpu=use_gpu):
- input_tensor = tf.constant(input_data, shape=input_shape,
- dtype=dtype, name="input")
- filter_tensor = tf.constant(filter_data, shape=filter_shape,
- dtype=dtype, name="filter")
+ input_tensor = constant_op.constant(
+ input_data, shape=input_shape, dtype=dtype, name="input")
+ filter_tensor = constant_op.constant(
+ filter_data, shape=filter_shape, dtype=dtype, name="filter")
strides = [1, stride_rows, stride_cols, 1]
if data_format == "NCHW":
new_input_tensor = NHWCToNCHW(input_tensor)
strides = NHWCToNCHW(strides)
else:
new_input_tensor = input_tensor
- conv = tf.nn.conv2d(new_input_tensor,
- filter_tensor,
- strides,
- padding,
- data_format=data_format,
- name="conv")
+ conv = nn_ops.conv2d(
+ new_input_tensor,
+ filter_tensor,
+ strides,
+ padding,
+ data_format=data_format,
+ name="conv")
if data_format == "NCHW":
conv = NCHWToNHWC(conv)
self.assertEqual(output_shape, conv.get_shape())
if test_input:
- jacob_t, jacob_n = tf.test.compute_gradient(input_tensor, input_shape,
- conv, output_shape)
+ jacob_t, jacob_n = gradient_checker.compute_gradient(input_tensor,
+ input_shape,
+ conv,
+ output_shape)
else:
- jacob_t, jacob_n = tf.test.compute_gradient(
- filter_tensor, filter_shape, conv, output_shape)
- if dtype == tf.float32:
+ jacob_t, jacob_n = gradient_checker.compute_gradient(filter_tensor,
+ filter_shape,
+ conv,
+ output_shape)
+ if dtype == dtypes.float32:
reference_jacob_t = jacob_t
err = np.fabs(jacob_t - jacob_n).max()
else:
@@ -686,276 +748,299 @@ class Conv2DTest(tf.test.TestCase):
def testInputGradientValidPaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
- self.ConstructAndTestGradient(batch=2,
- input_rows=5,
- input_cols=4,
- filter_rows=3,
- filter_cols=3,
- in_depth=2,
- out_depth=3,
- stride_rows=1,
- stride_cols=1,
- padding="VALID",
- test_input=True,
- data_format=data_format,
- use_gpu=use_gpu)
+ self.ConstructAndTestGradient(
+ batch=2,
+ input_rows=5,
+ input_cols=4,
+ filter_rows=3,
+ filter_cols=3,
+ in_depth=2,
+ out_depth=3,
+ stride_rows=1,
+ stride_cols=1,
+ padding="VALID",
+ test_input=True,
+ data_format=data_format,
+ use_gpu=use_gpu)
def testFilterGradientValidPaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
- self.ConstructAndTestGradient(batch=4,
- input_rows=6,
- input_cols=5,
- filter_rows=2,
- filter_cols=2,
- in_depth=2,
- out_depth=3,
- stride_rows=1,
- stride_cols=1,
- padding="VALID",
- test_input=False,
- data_format=data_format,
- use_gpu=use_gpu)
+ self.ConstructAndTestGradient(
+ batch=4,
+ input_rows=6,
+ input_cols=5,
+ filter_rows=2,
+ filter_cols=2,
+ in_depth=2,
+ out_depth=3,
+ stride_rows=1,
+ stride_cols=1,
+ padding="VALID",
+ test_input=False,
+ data_format=data_format,
+ use_gpu=use_gpu)
def testInputGradientValidPaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
- self.ConstructAndTestGradient(batch=2,
- input_rows=4,
- input_cols=5,
- filter_rows=3,
- filter_cols=3,
- in_depth=2,
- out_depth=3,
- stride_rows=2,
- stride_cols=2,
- padding="VALID",
- test_input=True,
- data_format=data_format,
- use_gpu=use_gpu)
+ self.ConstructAndTestGradient(
+ batch=2,
+ input_rows=4,
+ input_cols=5,
+ filter_rows=3,
+ filter_cols=3,
+ in_depth=2,
+ out_depth=3,
+ stride_rows=2,
+ stride_cols=2,
+ padding="VALID",
+ test_input=True,
+ data_format=data_format,
+ use_gpu=use_gpu)
def testFilterGradientValidPaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
- self.ConstructAndTestGradient(batch=4,
- input_rows=6,
- input_cols=5,
- filter_rows=2,
- filter_cols=2,
- in_depth=2,
- out_depth=3,
- stride_rows=2,
- stride_cols=2,
- padding="VALID",
- test_input=False,
- data_format=data_format,
- use_gpu=use_gpu)
+ self.ConstructAndTestGradient(
+ batch=4,
+ input_rows=6,
+ input_cols=5,
+ filter_rows=2,
+ filter_cols=2,
+ in_depth=2,
+ out_depth=3,
+ stride_rows=2,
+ stride_cols=2,
+ padding="VALID",
+ test_input=False,
+ data_format=data_format,
+ use_gpu=use_gpu)
def testInputGradientValidPaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
- self.ConstructAndTestGradient(batch=2,
- input_rows=7,
- input_cols=6,
- filter_rows=3,
- filter_cols=3,
- in_depth=4,
- out_depth=5,
- stride_rows=3,
- stride_cols=3,
- padding="VALID",
- test_input=True,
- data_format=data_format,
- use_gpu=use_gpu)
+ self.ConstructAndTestGradient(
+ batch=2,
+ input_rows=7,
+ input_cols=6,
+ filter_rows=3,
+ filter_cols=3,
+ in_depth=4,
+ out_depth=5,
+ stride_rows=3,
+ stride_cols=3,
+ padding="VALID",
+ test_input=True,
+ data_format=data_format,
+ use_gpu=use_gpu)
def testFilterGradientValidPaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
- self.ConstructAndTestGradient(batch=2,
- input_rows=8,
- input_cols=7,
- filter_rows=4,
- filter_cols=4,
- in_depth=2,
- out_depth=3,
- stride_rows=3,
- stride_cols=3,
- padding="VALID",
- test_input=False,
- data_format=data_format,
- use_gpu=use_gpu)
+ self.ConstructAndTestGradient(
+ batch=2,
+ input_rows=8,
+ input_cols=7,
+ filter_rows=4,
+ filter_cols=4,
+ in_depth=2,
+ out_depth=3,
+ stride_rows=3,
+ stride_cols=3,
+ padding="VALID",
+ test_input=False,
+ data_format=data_format,
+ use_gpu=use_gpu)
def testInputGradientSamePaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
- self.ConstructAndTestGradient(batch=2,
- input_rows=7,
- input_cols=6,
- filter_rows=3,
- filter_cols=3,
- in_depth=2,
- out_depth=3,
- stride_rows=1,
- stride_cols=1,
- padding="SAME",
- test_input=True,
- data_format=data_format,
- use_gpu=use_gpu)
+ self.ConstructAndTestGradient(
+ batch=2,
+ input_rows=7,
+ input_cols=6,
+ filter_rows=3,
+ filter_cols=3,
+ in_depth=2,
+ out_depth=3,
+ stride_rows=1,
+ stride_cols=1,
+ padding="SAME",
+ test_input=True,
+ data_format=data_format,
+ use_gpu=use_gpu)
def testFilterGradientSamePaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
- self.ConstructAndTestGradient(batch=4,
- input_rows=6,
- input_cols=5,
- filter_rows=2,
- filter_cols=2,
- in_depth=2,
- out_depth=3,
- stride_rows=1,
- stride_cols=1,
- padding="SAME",
- test_input=False,
- data_format=data_format,
- use_gpu=use_gpu)
+ self.ConstructAndTestGradient(
+ batch=4,
+ input_rows=6,
+ input_cols=5,
+ filter_rows=2,
+ filter_cols=2,
+ in_depth=2,
+ out_depth=3,
+ stride_rows=1,
+ stride_cols=1,
+ padding="SAME",
+ test_input=False,
+ data_format=data_format,
+ use_gpu=use_gpu)
def testInputGradientSamePaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
- self.ConstructAndTestGradient(batch=2,
- input_rows=5,
- input_cols=4,
- filter_rows=3,
- filter_cols=3,
- in_depth=3,
- out_depth=3,
- stride_rows=2,
- stride_cols=2,
- padding="SAME",
- test_input=True,
- data_format=data_format,
- use_gpu=use_gpu)
+ self.ConstructAndTestGradient(
+ batch=2,
+ input_rows=5,
+ input_cols=4,
+ filter_rows=3,
+ filter_cols=3,
+ in_depth=3,
+ out_depth=3,
+ stride_rows=2,
+ stride_cols=2,
+ padding="SAME",
+ test_input=True,
+ data_format=data_format,
+ use_gpu=use_gpu)
def testFilterGradientSamePaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
- self.ConstructAndTestGradient(batch=4,
- input_rows=6,
- input_cols=5,
- filter_rows=2,
- filter_cols=2,
- in_depth=2,
- out_depth=3,
- stride_rows=2,
- stride_cols=2,
- padding="SAME",
- test_input=False,
- data_format=data_format,
- use_gpu=use_gpu)
+ self.ConstructAndTestGradient(
+ batch=4,
+ input_rows=6,
+ input_cols=5,
+ filter_rows=2,
+ filter_cols=2,
+ in_depth=2,
+ out_depth=3,
+ stride_rows=2,
+ stride_cols=2,
+ padding="SAME",
+ test_input=False,
+ data_format=data_format,
+ use_gpu=use_gpu)
def testInputGradientSamePaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
- self.ConstructAndTestGradient(batch=2,
- input_rows=7,
- input_cols=6,
- filter_rows=3,
- filter_cols=3,
- in_depth=4,
- out_depth=5,
- stride_rows=3,
- stride_cols=3,
- padding="SAME",
- test_input=True,
- data_format=data_format,
- use_gpu=use_gpu)
+ self.ConstructAndTestGradient(
+ batch=2,
+ input_rows=7,
+ input_cols=6,
+ filter_rows=3,
+ filter_cols=3,
+ in_depth=4,
+ out_depth=5,
+ stride_rows=3,
+ stride_cols=3,
+ padding="SAME",
+ test_input=True,
+ data_format=data_format,
+ use_gpu=use_gpu)
def testFilterGradientSamePaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
- self.ConstructAndTestGradient(batch=2,
- input_rows=8,
- input_cols=7,
- filter_rows=4,
- filter_cols=4,
- in_depth=2,
- out_depth=3,
- stride_rows=3,
- stride_cols=3,
- padding="SAME",
- test_input=False,
- data_format=data_format,
- use_gpu=use_gpu)
+ self.ConstructAndTestGradient(
+ batch=2,
+ input_rows=8,
+ input_cols=7,
+ filter_rows=4,
+ filter_cols=4,
+ in_depth=2,
+ out_depth=3,
+ stride_rows=3,
+ stride_cols=3,
+ padding="SAME",
+ test_input=False,
+ data_format=data_format,
+ use_gpu=use_gpu)
def testFilterGradientSamePaddingStride2x1(self):
for (data_format, use_gpu) in GetTestConfigs():
- self.ConstructAndTestGradient(batch=2,
- input_rows=8,
- input_cols=7,
- filter_rows=4,
- filter_cols=4,
- in_depth=2,
- out_depth=3,
- stride_rows=2,
- stride_cols=1,
- padding="SAME",
- test_input=False,
- data_format=data_format,
- use_gpu=use_gpu)
+ self.ConstructAndTestGradient(
+ batch=2,
+ input_rows=8,
+ input_cols=7,
+ filter_rows=4,
+ filter_cols=4,
+ in_depth=2,
+ out_depth=3,
+ stride_rows=2,
+ stride_cols=1,
+ padding="SAME",
+ test_input=False,
+ data_format=data_format,
+ use_gpu=use_gpu)
def testShapeFunctionEdgeCases(self):
# All shapes unknown.
- c1 = tf.nn.conv2d(tf.placeholder(tf.float32),
- tf.placeholder(tf.float32),
- strides=[1, 1, 1, 1], padding="SAME")
+ c1 = nn_ops.conv2d(
+ array_ops.placeholder(dtypes.float32),
+ array_ops.placeholder(dtypes.float32),
+ strides=[1, 1, 1, 1],
+ padding="SAME")
self.assertEqual([None, None, None, None], c1.get_shape().as_list())
# Incorrect input shape.
with self.assertRaises(ValueError):
- tf.nn.conv2d(tf.placeholder(tf.float32, shape=[1, 3]),
- tf.placeholder(tf.float32),
- strides=[1, 1, 1, 1], padding="SAME")
+ nn_ops.conv2d(
+ array_ops.placeholder(
+ dtypes.float32, shape=[1, 3]),
+ array_ops.placeholder(dtypes.float32),
+ strides=[1, 1, 1, 1],
+ padding="SAME")
# Incorrect filter shape.
with self.assertRaises(ValueError):
- tf.nn.conv2d(tf.placeholder(tf.float32),
- tf.placeholder(tf.float32, shape=[1, 3]),
- strides=[1, 1, 1, 1], padding="SAME")
+ nn_ops.conv2d(
+ array_ops.placeholder(dtypes.float32),
+ array_ops.placeholder(
+ dtypes.float32, shape=[1, 3]),
+ strides=[1, 1, 1, 1],
+ padding="SAME")
# Depth mismatch.
with self.assertRaises(ValueError):
- tf.nn.conv2d(tf.placeholder(tf.float32,
- shape=[32, 20, 20, 3]),
- tf.placeholder(tf.float32,
- shape=[4, 4, 2, 2]),
- strides=[1, 1, 1, 1], padding="SAME")
+ nn_ops.conv2d(
+ array_ops.placeholder(
+ dtypes.float32, shape=[32, 20, 20, 3]),
+ array_ops.placeholder(
+ dtypes.float32, shape=[4, 4, 2, 2]),
+ strides=[1, 1, 1, 1],
+ padding="SAME")
def testOpEdgeCases(self):
with self.test_session() as sess:
# Illegal strides.
- with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
+ with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"strides in the batch and depth"):
sess.run(
- tf.nn.conv2d(
- tf.placeholder(tf.float32),
- tf.placeholder(tf.float32),
+ nn_ops.conv2d(
+ array_ops.placeholder(dtypes.float32),
+ array_ops.placeholder(dtypes.float32),
strides=[2, 1, 1, 1],
padding="SAME"))
- with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
+ with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"strides in the batch and depth"):
sess.run(
- tf.nn.conv2d(
- tf.placeholder(tf.float32),
- tf.placeholder(tf.float32),
+ nn_ops.conv2d(
+ array_ops.placeholder(dtypes.float32),
+ array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 2],
padding="SAME"))
# Filter larger than input.
with self.assertRaisesRegexp(ValueError, "Negative dimension size"):
sess.run(
- tf.nn.conv2d(
- tf.placeholder(
- tf.float32, shape=[32, 20, 20, 3]),
- tf.placeholder(
- tf.float32, shape=[20, 21, 3, 2]),
+ nn_ops.conv2d(
+ array_ops.placeholder(
+ dtypes.float32, shape=[32, 20, 20, 3]),
+ array_ops.placeholder(
+ dtypes.float32, shape=[20, 21, 3, 2]),
strides=[1, 1, 1, 1],
padding="VALID"))
with self.assertRaisesRegexp(ValueError, "Negative dimension size"):
sess.run(
- tf.nn.conv2d(
- tf.placeholder(
- tf.float32, shape=[32, 20, 20, 3]),
- tf.placeholder(
- tf.float32, shape=[21, 20, 3, 2]),
+ nn_ops.conv2d(
+ array_ops.placeholder(
+ dtypes.float32, shape=[32, 20, 20, 3]),
+ array_ops.placeholder(
+ dtypes.float32, shape=[21, 20, 3, 2]),
strides=[1, 1, 1, 1],
padding="VALID"))
@@ -964,10 +1049,10 @@ class Conv2DTest(tf.test.TestCase):
# //learning/dist_belief/experimental/brain_compatibility/conv_nn_test.py
# where we compare the numeric results of the depthwise conv op with the
# depthwise weighted sum transformer in dist_belief.
-class DepthwiseConv2DTest(tf.test.TestCase):
+class DepthwiseConv2DTest(test.TestCase):
- def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride,
- padding, expected):
+ def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
+ expected):
"""Verifies the output values of the convolution function.
Args:
@@ -990,11 +1075,11 @@ class DepthwiseConv2DTest(tf.test.TestCase):
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with self.test_session() as sess:
- t1 = tf.constant(x1, shape=tensor_in_sizes)
+ t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t1.set_shape(tensor_in_sizes)
- t2 = tf.constant(x2, shape=filter_in_sizes)
- conv = tf.nn.depthwise_conv2d(t1, t2, strides=[1, stride, stride, 1],
- padding=padding)
+ t2 = constant_op.constant(x2, shape=filter_in_sizes)
+ conv = nn_impl.depthwise_conv2d(
+ t1, t2, strides=[1, stride, stride, 1], padding=padding)
value = sess.run(conv)
print("value = ", value)
self.assertArrayNear(expected, np.ravel(value), 1e-5)
@@ -1050,13 +1135,15 @@ class DepthwiseConv2DTest(tf.test.TestCase):
# (position 1, 0: in_depth 1, output_depth 3 -- using filter #1)
# 4.0 * 4.0 + 10.0 * 12.0 + 6.0 * 8.0 + 12.0 * 16.0 = 376
expected_output = [196, 216, 272, 296, 252, 280, 344, 376]
- self._VerifyValues(tensor_in_sizes=[1, 2, 3, 2],
- filter_in_sizes=[2, 2, 2, 2],
- stride=1, padding="VALID",
- expected=expected_output)
+ self._VerifyValues(
+ tensor_in_sizes=[1, 2, 3, 2],
+ filter_in_sizes=[2, 2, 2, 2],
+ stride=1,
+ padding="VALID",
+ expected=expected_output)
-class SeparableConv2DTest(tf.test.TestCase):
+class SeparableConv2DTest(test.TestCase):
def _InitValues(self, sizes):
"""Initializes values for input tensors.
@@ -1071,7 +1158,7 @@ class SeparableConv2DTest(tf.test.TestCase):
for s in sizes:
total_size *= s
x = [f * 0.5 for f in range(1, total_size + 1)]
- return tf.constant(x, shape=sizes)
+ return constant_op.constant(x, shape=sizes)
def _VerifyValues(self, tensor_in_sizes, depthwise_filter_in_sizes,
pointwise_filter_in_sizes, stride, padding, expected):
@@ -1090,8 +1177,8 @@ class SeparableConv2DTest(tf.test.TestCase):
f1 = self._InitValues(depthwise_filter_in_sizes)
f1.set_shape(depthwise_filter_in_sizes)
f2 = self._InitValues(pointwise_filter_in_sizes)
- conv = tf.nn.separable_conv2d(t1, f1, f2, strides=[1, stride, stride, 1],
- padding=padding)
+ conv = nn_impl.separable_conv2d(
+ t1, f1, f2, strides=[1, stride, stride, 1], padding=padding)
value = sess.run(conv)
print("value = ", value)
self.assertArrayNear(expected, np.ravel(value), 1e-5)
@@ -1116,13 +1203,16 @@ class SeparableConv2DTest(tf.test.TestCase):
10569.5, 10998.5, 11427.5, 5746.75, 6010.75, 6274.75, 6538.75, 6802.75,
7066.75, 7330.75, 6168.75, 6452.25, 6735.75, 7019.25, 7302.75, 7586.25,
7869.75, 6590.75, 6893.75, 7196.75, 7499.75, 7802.75, 8105.75, 8408.75,
- 2036.25, 2119.5, 2202.75, 2286.0, 2369.25, 2452.5, 2535.75]
+ 2036.25, 2119.5, 2202.75, 2286.0, 2369.25, 2452.5, 2535.75
+ ]
- self._VerifyValues(tensor_in_sizes=[1, 4, 4, 2],
- depthwise_filter_in_sizes=[2, 2, 2, 3],
- pointwise_filter_in_sizes=[1, 1, 6, 7],
- stride=1, padding="SAME",
- expected=expected_output)
+ self._VerifyValues(
+ tensor_in_sizes=[1, 4, 4, 2],
+ depthwise_filter_in_sizes=[2, 2, 2, 3],
+ pointwise_filter_in_sizes=[1, 1, 6, 7],
+ stride=1,
+ padding="SAME",
+ expected=expected_output)
def testSeparableConv2DEqualInputOutputDepth(self):
# The output is the result of two convolutions:
@@ -1130,45 +1220,46 @@ class SeparableConv2DTest(tf.test.TestCase):
# Second with intermediate_out[1, 4, 4, 6] * filter2[1, 1, 6, 6].
# Complexity is O(2*3*2*2 + 6*6*1*1) as opposed to O(2*6*2*2).
expected_output = [
- 5742.0, 6069.0, 6396.0, 6723.0, 7050.0, 7377.0,
- 7047.0, 7449.0, 7851.0, 8253.0, 8655.0, 9057.0,
- 8352.0, 8829.0, 9306.0, 9783.0, 10260.0, 10737.0,
- 3582.0, 3783.0, 3984.0, 4185.0, 4386.0, 4587.0,
- 10962.0, 11589.0, 12216.0, 12843.0, 13470.0, 14097.0,
- 12267.0, 12969.0, 13671.0, 14373.0, 15075.0, 15777.0,
- 13572.0, 14349.0, 15126.0, 15903.0, 16680.0, 17457.0,
- 5616.0, 5931.0, 6246.0, 6561.0, 6876.0, 7191.0,
- 16182.0, 17109.0, 18036.0, 18963.0, 19890.0, 20817.0,
- 17487.0, 18489.0, 19491.0, 20493.0, 21495.0, 22497.0,
- 18792.0, 19869.0, 20946.0, 22023.0, 23100.0, 24177.0,
- 7650.0, 8079.0, 8508.0, 8937.0, 9366.0, 9795.0,
- 4963.5, 5227.5, 5491.5, 5755.5, 6019.5, 6283.5,
- 5328.0, 5611.5, 5895.0, 6178.5, 6462.0, 6745.5,
- 5692.5, 5995.5, 6298.5, 6601.5, 6904.5, 7207.5,
- 1757.25, 1840.5, 1923.75, 2007.0, 2090.25, 2173.5]
-
- self._VerifyValues(tensor_in_sizes=[1, 4, 4, 2],
- depthwise_filter_in_sizes=[2, 2, 2, 3],
- pointwise_filter_in_sizes=[1, 1, 6, 6],
- stride=1, padding="SAME",
- expected=expected_output)
+ 5742.0, 6069.0, 6396.0, 6723.0, 7050.0, 7377.0, 7047.0, 7449.0, 7851.0,
+ 8253.0, 8655.0, 9057.0, 8352.0, 8829.0, 9306.0, 9783.0, 10260.0,
+ 10737.0, 3582.0, 3783.0, 3984.0, 4185.0, 4386.0, 4587.0, 10962.0,
+ 11589.0, 12216.0, 12843.0, 13470.0, 14097.0, 12267.0, 12969.0, 13671.0,
+ 14373.0, 15075.0, 15777.0, 13572.0, 14349.0, 15126.0, 15903.0, 16680.0,
+ 17457.0, 5616.0, 5931.0, 6246.0, 6561.0, 6876.0, 7191.0, 16182.0,
+ 17109.0, 18036.0, 18963.0, 19890.0, 20817.0, 17487.0, 18489.0, 19491.0,
+ 20493.0, 21495.0, 22497.0, 18792.0, 19869.0, 20946.0, 22023.0, 23100.0,
+ 24177.0, 7650.0, 8079.0, 8508.0, 8937.0, 9366.0, 9795.0, 4963.5, 5227.5,
+ 5491.5, 5755.5, 6019.5, 6283.5, 5328.0, 5611.5, 5895.0, 6178.5, 6462.0,
+ 6745.5, 5692.5, 5995.5, 6298.5, 6601.5, 6904.5, 7207.5, 1757.25, 1840.5,
+ 1923.75, 2007.0, 2090.25, 2173.5
+ ]
+
+ self._VerifyValues(
+ tensor_in_sizes=[1, 4, 4, 2],
+ depthwise_filter_in_sizes=[2, 2, 2, 3],
+ pointwise_filter_in_sizes=[1, 1, 6, 6],
+ stride=1,
+ padding="SAME",
+ expected=expected_output)
def testSeparableConv2DIllegalCases(self):
# Output depth less then input depth.
with self.assertRaisesRegexp(
ValueError,
"Refusing to perform an overparameterized separable convolution"):
- self._VerifyValues(tensor_in_sizes=[1, 4, 4, 2],
- depthwise_filter_in_sizes=[2, 2, 2, 3],
- pointwise_filter_in_sizes=[1, 1, 6, 5],
- stride=1, padding="SAME",
- expected=None)
+ self._VerifyValues(
+ tensor_in_sizes=[1, 4, 4, 2],
+ depthwise_filter_in_sizes=[2, 2, 2, 3],
+ pointwise_filter_in_sizes=[1, 1, 6, 5],
+ stride=1,
+ padding="SAME",
+ expected=None)
-class DeepConv2DTest(tf.test.TestCase):
+class DeepConv2DTest(test.TestCase):
- def _CompareFwdConv2D(self, tensor_in_sizes, filter_in_sizes,
- conv_strides, padding):
+ def _CompareFwdConv2D(self, tensor_in_sizes, filter_in_sizes, conv_strides,
+ padding):
"""Verifies that DeepConv2D and Conv2D produce the same values.
Args:
@@ -1183,11 +1274,11 @@ class DeepConv2DTest(tf.test.TestCase):
x2 = np.random.rand(*filter_in_sizes).astype(np.float32)
with self.test_session(use_gpu=False) as sess:
- t1 = tf.constant(x1, shape=tensor_in_sizes)
- t2 = tf.constant(x2, shape=filter_in_sizes)
+ t1 = constant_op.constant(x1, shape=tensor_in_sizes)
+ t2 = constant_op.constant(x2, shape=filter_in_sizes)
strides = [1] + conv_strides + [1]
- conv = tf.nn.conv2d(t1, t2, strides=strides, padding=padding)
+ conv = nn_ops.conv2d(t1, t2, strides=strides, padding=padding)
os.environ["TF_USE_DEEP_CONV2D"] = "0"
values_expect = sess.run([conv])
@@ -1212,29 +1303,29 @@ class DeepConv2DTest(tf.test.TestCase):
self._RunTestCases([1, 1], "SAME")
-class Conv2DBenchmark(tf.test.Benchmark):
+class Conv2DBenchmark(test.Benchmark):
def benchmarkGPUConvStackFirst(self):
# Benchmark the first iteration of a conv-net with many identical conv
# operations.
- if not tf.test.is_gpu_available():
+ if not test.is_gpu_available():
return
- with tf.Graph().as_default(), tf.Session() as session:
+ with ops.Graph().as_default(), session_lib.Session() as session:
batch_size = 1
timesteps = 600
features = 1
- inputs = tf.random_uniform(
+ inputs = random_ops.random_uniform(
[batch_size, 1, timesteps, features], seed=1234)
num_outputs_list = [512] * 40 + [1]
kernel_w = 3
x = inputs
for num_outputs in num_outputs_list:
- x = tf.contrib.layers.convolution2d(x, num_outputs, [1, kernel_w])
+ x = layers.convolution2d(x, num_outputs, [1, kernel_w])
outputs = x
- tf.global_variables_initializer().run()
+ variables.global_variables_initializer().run()
num_iterations = 4
for iter_index in xrange(num_iterations):
start = time.time()
@@ -1246,30 +1337,36 @@ class Conv2DBenchmark(tf.test.Benchmark):
def GetInceptionFwdTest(input_size, filter_size, stride, padding):
+
def Test(self):
- tf.logging.info("Testing InceptionFwd %s", (input_size, filter_size,
- stride, padding))
+ tf_logging.info("Testing InceptionFwd %s", (input_size, filter_size, stride,
+ padding))
self._CompareFwdValues(input_size, filter_size, [stride, stride], padding)
+
return Test
-def GetInceptionBackInputTest(input_size, filter_size, output_size,
- stride, padding):
+def GetInceptionBackInputTest(input_size, filter_size, output_size, stride,
+ padding):
+
def Test(self):
- tf.logging.info("Testing InceptionBackInput %s",
+ tf_logging.info("Testing InceptionBackInput %s",
(input_size, filter_size, output_size, stride, padding))
self._CompareBackpropInput(input_size, filter_size, output_size,
[stride, stride], padding)
+
return Test
-def GetInceptionBackFilterTest(input_size, filter_size, output_size,
- strides, padding):
+def GetInceptionBackFilterTest(input_size, filter_size, output_size, strides,
+ padding):
+
def Test(self):
- tf.logging.info("Testing InceptionBackFilter %s",
+ tf_logging.info("Testing InceptionBackFilter %s",
(input_size, filter_size, output_size, strides, padding))
- self._CompareBackFilter(input_size, filter_size, output_size,
- strides, padding)
+ self._CompareBackFilter(input_size, filter_size, output_size, strides,
+ padding)
+
return Test
@@ -1285,4 +1382,4 @@ if __name__ == "__main__":
GetInceptionBackFilterTest(input_size_, filter_size_, output_size_,
[stride_, stride_], padding_))
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/cross_grad_test.py b/tensorflow/python/kernel_tests/cross_grad_test.py
index 66480928f0..f040ac6055 100644
--- a/tensorflow/python/kernel_tests/cross_grad_test.py
+++ b/tensorflow/python/kernel_tests/cross_grad_test.py
@@ -18,22 +18,28 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import tensorflow as tf
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import math_ops
+from tensorflow.python.platform import test
-class CrossOpTest(tf.test.TestCase):
+class CrossOpTest(test.TestCase):
def testGradientRandomValues(self):
with self.test_session():
us = [2, 3]
- u = tf.reshape([0.854, -0.616, 0.767, 0.725, -0.927, 0.159], shape=us)
- v = tf.reshape([-0.522, 0.755, 0.407, -0.652, 0.241, 0.247], shape=us)
- s = tf.cross(u, v)
- jacob_u, jacob_v = tf.test.compute_gradient([u, v], [us, us], s, us)
+ u = array_ops.reshape(
+ [0.854, -0.616, 0.767, 0.725, -0.927, 0.159], shape=us)
+ v = array_ops.reshape(
+ [-0.522, 0.755, 0.407, -0.652, 0.241, 0.247], shape=us)
+ s = math_ops.cross(u, v)
+ jacob_u, jacob_v = gradient_checker.compute_gradient([u, v], [us, us], s,
+ us)
self.assertAllClose(jacob_u[0], jacob_u[1], rtol=1e-3, atol=1e-3)
self.assertAllClose(jacob_v[0], jacob_v[1], rtol=1e-3, atol=1e-3)
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/ctc_decoder_ops_test.py b/tensorflow/python/kernel_tests/ctc_decoder_ops_test.py
index e01edc88c1..431587e11c 100644
--- a/tensorflow/python/kernel_tests/ctc_decoder_ops_test.py
+++ b/tensorflow/python/kernel_tests/ctc_decoder_ops_test.py
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.ctc_ops.ctc_loss_op."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
@@ -23,7 +23,10 @@ import itertools
import numpy as np
from six.moves import zip_longest
-import tensorflow as tf
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import ctc_ops
+from tensorflow.python.platform import test
def grouper(iterable, n, fillvalue=None):
@@ -38,25 +41,30 @@ def flatten(list_of_lists):
return itertools.chain.from_iterable(list_of_lists)
-class CTCGreedyDecoderTest(tf.test.TestCase):
+class CTCGreedyDecoderTest(test.TestCase):
- def _testCTCDecoder(self, decoder, inputs, seq_lens, log_prob_truth,
- decode_truth, expected_err_re=None, **decoder_args):
- inputs_t = [tf.convert_to_tensor(x) for x in inputs]
+ def _testCTCDecoder(self,
+ decoder,
+ inputs,
+ seq_lens,
+ log_prob_truth,
+ decode_truth,
+ expected_err_re=None,
+ **decoder_args):
+ inputs_t = [ops.convert_to_tensor(x) for x in inputs]
# convert inputs_t into a [max_time x batch_size x depth] tensor
# from a len time python list of [batch_size x depth] tensors
- inputs_t = tf.stack(inputs_t)
+ inputs_t = array_ops.stack(inputs_t)
with self.test_session(use_gpu=False) as sess:
decoded_list, log_probability = decoder(
- inputs_t,
- sequence_length=seq_lens, **decoder_args)
- decoded_unwrapped = list(flatten([
- (st.indices, st.values, st.dense_shape) for st in decoded_list]))
+ inputs_t, sequence_length=seq_lens, **decoder_args)
+ decoded_unwrapped = list(
+ flatten([(st.indices, st.values, st.dense_shape) for st in
+ decoded_list]))
if expected_err_re is None:
- outputs = sess.run(
- decoded_unwrapped + [log_probability])
+ outputs = sess.run(decoded_unwrapped + [log_probability])
# Group outputs into (ix, vals, shape) tuples
output_sparse_tensors = list(grouper(outputs[:-1], 3))
@@ -67,8 +75,8 @@ class CTCGreedyDecoderTest(tf.test.TestCase):
self.assertEqual(len(output_sparse_tensors), len(decode_truth))
# For each SparseTensor tuple, compare (ix, vals, shape)
- for out_st, truth_st, tf_st in zip(
- output_sparse_tensors, decode_truth, decoded_list):
+ for out_st, truth_st, tf_st in zip(output_sparse_tensors, decode_truth,
+ decoded_list):
self.assertAllEqual(out_st[0], truth_st[0]) # ix
self.assertAllEqual(out_st[1], truth_st[1]) # vals
self.assertAllEqual(out_st[2], truth_st[2]) # shape
@@ -105,19 +113,23 @@ class CTCGreedyDecoderTest(tf.test.TestCase):
# dimensions are time x depth
input_prob_matrix_1 = np.asarray(
- [[0.1, 0.9, 0.0, 0.0], # t=0
- [0.0, 0.9, 0.1, 0.0], # t=1
- [0.0, 0.0, 0.1, 0.9], # t=2
- [0.0, 0.9, 0.1, 0.1], # t=3
- [0.9, 0.1, 0.0, 0.0], # t=4
- [0.0, 0.0, 0.0, 0.0]], # t=5 (ignored)
+ [
+ [0.1, 0.9, 0.0, 0.0], # t=0
+ [0.0, 0.9, 0.1, 0.0], # t=1
+ [0.0, 0.0, 0.1, 0.9], # t=2
+ [0.0, 0.9, 0.1, 0.1], # t=3
+ [0.9, 0.1, 0.0, 0.0], # t=4
+ [0.0, 0.0, 0.0, 0.0]
+ ], # t=5 (ignored)
dtype=np.float32)
input_log_prob_matrix_1 = np.log(input_prob_matrix_1)
# len max_time_steps array of batch_size x depth matrices
- inputs = [np.vstack([input_log_prob_matrix_0[t, :],
- input_log_prob_matrix_1[t, :]])
- for t in range(max_time_steps)]
+ inputs = [
+ np.vstack(
+ [input_log_prob_matrix_0[t, :], input_log_prob_matrix_1[t, :]])
+ for t in range(max_time_steps)
+ ]
# batch_size length vector of sequence_lengths
seq_lens = np.array([seq_len_0, seq_len_1], dtype=np.int32)
@@ -130,21 +142,32 @@ class CTCGreedyDecoderTest(tf.test.TestCase):
# decode_truth: one SparseTensor (ix, vals, shape)
decode_truth = [
- (np.array([[0, 0], # batch 0, 2 outputs
- [0, 1],
- [1, 0], # batch 1, 3 outputs
- [1, 1],
- [1, 2]], dtype=np.int64),
- np.array([0, 1, # batch 0
- 1, 1, 0], # batch 1
- dtype=np.int64),
- # shape is batch x max_decoded_length
- np.array([2, 3], dtype=np.int64)),
+ (
+ np.array(
+ [
+ [0, 0], # batch 0, 2 outputs
+ [0, 1],
+ [1, 0], # batch 1, 3 outputs
+ [1, 1],
+ [1, 2]
+ ],
+ dtype=np.int64),
+ np.array(
+ [
+ 0,
+ 1, # batch 0
+ 1,
+ 1,
+ 0
+ ], # batch 1
+ dtype=np.int64),
+ # shape is batch x max_decoded_length
+ np.array(
+ [2, 3], dtype=np.int64)),
]
- self._testCTCDecoder(
- tf.nn.ctc_greedy_decoder,
- inputs, seq_lens, log_prob_truth, decode_truth)
+ self._testCTCDecoder(ctc_ops.ctc_greedy_decoder, inputs, seq_lens,
+ log_prob_truth, decode_truth)
def testCTCDecoderBeamSearch(self):
"""Test one batch, two beams - hibernating beam search."""
@@ -153,50 +176,60 @@ class CTCGreedyDecoderTest(tf.test.TestCase):
seq_len_0 = 5
input_prob_matrix_0 = np.asarray(
- [[0.30999, 0.309938, 0.0679938, 0.0673362, 0.0708352, 0.173908],
- [0.215136, 0.439699, 0.0370931, 0.0393967, 0.0381581, 0.230517],
- [0.199959, 0.489485, 0.0233221, 0.0251417, 0.0233289, 0.238763],
- [0.279611, 0.452966, 0.0204795, 0.0209126, 0.0194803, 0.20655],
- [0.51286, 0.288951, 0.0243026, 0.0220788, 0.0219297, 0.129878],
- # Random entry added in at time=5
- [0.155251, 0.164444, 0.173517, 0.176138, 0.169979, 0.160671]],
+ [
+ [0.30999, 0.309938, 0.0679938, 0.0673362, 0.0708352, 0.173908],
+ [0.215136, 0.439699, 0.0370931, 0.0393967, 0.0381581, 0.230517],
+ [0.199959, 0.489485, 0.0233221, 0.0251417, 0.0233289, 0.238763],
+ [0.279611, 0.452966, 0.0204795, 0.0209126, 0.0194803, 0.20655],
+ [0.51286, 0.288951, 0.0243026, 0.0220788, 0.0219297, 0.129878],
+ # Random entry added in at time=5
+ [0.155251, 0.164444, 0.173517, 0.176138, 0.169979, 0.160671]
+ ],
dtype=np.float32)
# Add arbitrary offset - this is fine
input_log_prob_matrix_0 = np.log(input_prob_matrix_0) + 2.0
# len max_time_steps array of batch_size x depth matrices
- inputs = ([input_log_prob_matrix_0[t, :][np.newaxis, :]
- for t in range(seq_len_0)] # Pad to max_time_steps = 8
- + 2 * [np.zeros((1, depth), dtype=np.float32)])
+ inputs = ([
+ input_log_prob_matrix_0[t, :][np.newaxis, :] for t in range(seq_len_0)
+ ] # Pad to max_time_steps = 8
+ + 2 * [np.zeros(
+ (1, depth), dtype=np.float32)])
# batch_size length vector of sequence_lengths
seq_lens = np.array([seq_len_0], dtype=np.int32)
# batch_size length vector of negative log probabilities
- log_prob_truth = np.array([
- 0.584855, # output beam 0
- 0.389139 # output beam 1
- ], np.float32)[np.newaxis, :]
+ log_prob_truth = np.array(
+ [
+ 0.584855, # output beam 0
+ 0.389139 # output beam 1
+ ],
+ np.float32)[np.newaxis, :]
# decode_truth: two SparseTensors, (ix, values, shape)
decode_truth = [
# beam 0, batch 0, two outputs decoded
- (np.array([[0, 0], [0, 1]], dtype=np.int64),
- np.array([1, 0], dtype=np.int64),
- np.array([1, 2], dtype=np.int64)),
+ (np.array(
+ [[0, 0], [0, 1]], dtype=np.int64), np.array(
+ [1, 0], dtype=np.int64), np.array(
+ [1, 2], dtype=np.int64)),
# beam 1, batch 0, three outputs decoded
- (np.array([[0, 0], [0, 1], [0, 2]], dtype=np.int64),
- np.array([0, 1, 0], dtype=np.int64),
- np.array([1, 3], dtype=np.int64)),
+ (np.array(
+ [[0, 0], [0, 1], [0, 2]], dtype=np.int64), np.array(
+ [0, 1, 0], dtype=np.int64), np.array(
+ [1, 3], dtype=np.int64)),
]
self._testCTCDecoder(
- tf.nn.ctc_beam_search_decoder,
- inputs, seq_lens, log_prob_truth,
+ ctc_ops.ctc_beam_search_decoder,
+ inputs,
+ seq_lens,
+ log_prob_truth,
decode_truth,
beam_width=2,
top_paths=2)
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/ctc_loss_op_test.py b/tensorflow/python/kernel_tests/ctc_loss_op_test.py
index 929f1d78b1..28afa8399a 100644
--- a/tensorflow/python/kernel_tests/ctc_loss_op_test.py
+++ b/tensorflow/python/kernel_tests/ctc_loss_op_test.py
@@ -12,14 +12,20 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.ctc_ops.ctc_decoder_ops."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import sparse_tensor
+from tensorflow.python.ops import ctc_ops
+from tensorflow.python.ops import gradients_impl
+from tensorflow.python.platform import test
def SimpleSparseTensorFrom(x):
@@ -37,27 +43,31 @@ def SimpleSparseTensorFrom(x):
for time, val in enumerate(batch):
x_ix.append([batch_i, time])
x_val.append(val)
- x_shape = [len(x), np.asarray(x_ix).max(0)[1]+1]
- x_ix = tf.constant(x_ix, tf.int64)
- x_val = tf.constant(x_val, tf.int32)
- x_shape = tf.constant(x_shape, tf.int64)
+ x_shape = [len(x), np.asarray(x_ix).max(0)[1] + 1]
+ x_ix = constant_op.constant(x_ix, dtypes.int64)
+ x_val = constant_op.constant(x_val, dtypes.int32)
+ x_shape = constant_op.constant(x_shape, dtypes.int64)
- return tf.SparseTensor(x_ix, x_val, x_shape)
+ return sparse_tensor.SparseTensor(x_ix, x_val, x_shape)
-class CTCLossTest(tf.test.TestCase):
+class CTCLossTest(test.TestCase):
- def _testCTCLoss(self, inputs, seq_lens, labels,
- loss_truth, grad_truth, expected_err_re=None):
+ def _testCTCLoss(self,
+ inputs,
+ seq_lens,
+ labels,
+ loss_truth,
+ grad_truth,
+ expected_err_re=None):
self.assertEquals(len(inputs), len(grad_truth))
- inputs_t = tf.constant(inputs)
+ inputs_t = constant_op.constant(inputs)
with self.test_session(use_gpu=False) as sess:
- loss = tf.nn.ctc_loss(inputs=inputs_t,
- labels=labels,
- sequence_length=seq_lens)
- grad = tf.gradients(loss, [inputs_t])[0]
+ loss = ctc_ops.ctc_loss(
+ inputs=inputs_t, labels=labels, sequence_length=seq_lens)
+ grad = gradients_impl.gradients(loss, [inputs_t])[0]
self.assertShapeEqual(loss_truth, loss)
self.assertShapeEqual(grad_truth, grad)
@@ -176,9 +186,11 @@ class CTCLossTest(tf.test.TestCase):
dtype=np.float32)
# len max_time_steps array of 2 x depth matrices
- inputs = [np.vstack([input_log_prob_matrix_0[t, :],
- input_log_prob_matrix_1[t, :]])
- for t in range(5)] + 2 * [np.nan*np.ones((2, depth), np.float32)]
+ inputs = [
+ np.vstack(
+ [input_log_prob_matrix_0[t, :], input_log_prob_matrix_1[t, :]])
+ for t in range(5)
+ ] + 2 * [np.nan * np.ones((2, depth), np.float32)]
# convert inputs into [max_time x batch_size x depth tensor] Tensor
inputs = np.asarray(inputs, dtype=np.float32)
@@ -193,44 +205,45 @@ class CTCLossTest(tf.test.TestCase):
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
# output: len max_time_steps array of 2 x depth matrices
- grad_truth = [np.vstack([gradient_log_prob_0[t, :],
- gradient_log_prob_1[t, :]])
- for t in range(5)] + 2 * [np.zeros((2, depth), np.float32)]
+ grad_truth = [
+ np.vstack([gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
+ for t in range(5)
+ ] + 2 * [np.zeros((2, depth), np.float32)]
# convert grad_truth into [max_time x batch_size x depth] Tensor
grad_truth = np.asarray(grad_truth, dtype=np.float32)
self._testCTCLoss(inputs, seq_lens, labels, loss_truth, grad_truth)
-
def test_time_major(self):
"""Testing time_major param.
-
- testing if transposing and setting time_major=False will result in the same loss
+
+
+ testing if transposing and setting time_major=False will result in the same
+ loss
"""
# [max_time x batch_size x depth tensor]
inputs = np.random.randn(2, 2, 3).astype(np.float32)
labels = SimpleSparseTensorFrom([[0, 1], [1, 0]])
seq_lens = np.array([2, 2], dtype=np.int32)
-
- inputs_t = tf.constant(inputs)
+ inputs_t = constant_op.constant(inputs)
# Transposing tensor to [batch_size x max_time x depth tensor]
- inputs_t_transposed = tf.constant(inputs.transpose(1, 0, 2))
-
+ inputs_t_transposed = constant_op.constant(inputs.transpose(1, 0, 2))
with self.test_session(use_gpu=False) as sess:
- loss = tf.nn.ctc_loss(inputs=inputs_t,
- labels=labels,
- sequence_length=seq_lens)
- loss_transposed = tf.nn.ctc_loss(inputs=inputs_t_transposed,
- labels=labels,
- sequence_length=seq_lens, time_major=False)
+ loss = ctc_ops.ctc_loss(
+ inputs=inputs_t, labels=labels, sequence_length=seq_lens)
+ loss_transposed = ctc_ops.ctc_loss(
+ inputs=inputs_t_transposed,
+ labels=labels,
+ sequence_length=seq_lens,
+ time_major=False)
(tf_loss, tf_loss_transposed) = sess.run([loss, loss_transposed])
self.assertAllEqual(tf_loss, tf_loss_transposed)
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/cwise_ops_test.py b/tensorflow/python/kernel_tests/cwise_ops_test.py
index c097177849..5e73893db4 100644
--- a/tensorflow/python/kernel_tests/cwise_ops_test.py
+++ b/tensorflow/python/kernel_tests/cwise_ops_test.py
@@ -12,9 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
+"""Functional tests for coefficient-wise operations."""
-"""Functional tests for coefficient-wise operations.
-"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
@@ -22,15 +21,24 @@ from __future__ import print_function
import math
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes as dtypes_lib
+from tensorflow.python.framework import ops
+from tensorflow.python.framework import sparse_tensor
+from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
-
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import gradients_impl
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import variables
+from tensorflow.python.platform import test
+from tensorflow.python.platform import tf_logging
_ADD = lambda x, y: x + y
_SUB = lambda x, y: x - y
_MUL = lambda x, y: x * y
-_POW = lambda x, y: x ** y
+_POW = lambda x, y: x**y
_TRUEDIV = lambda x, y: x / y
_FLOORDIV = lambda x, y: x // y
_MOD = lambda x, y: x % y
@@ -58,15 +66,16 @@ def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x_values = x[non_zero]
x_shape = x.shape
- return tf.SparseTensor(
+ return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), x_values
-class UnaryOpTest(tf.test.TestCase):
+
+class UnaryOpTest(test.TestCase):
def _compareCpu(self, x, np_func, tf_func):
np_ans = np_func(x)
with self.test_session(use_gpu=False):
- inx = tf.convert_to_tensor(x)
+ inx = ops.convert_to_tensor(x)
if x.dtype in (np.float32, np.float64):
y = 1.1 * tf_func(inx)
np_ans *= 1.1
@@ -79,54 +88,42 @@ class UnaryOpTest(tf.test.TestCase):
else:
self.assertAllClose(np_ans, tf_cpu)
- if x.dtype in (np.complex64, np.complex128) and tf_func == tf.sign:
+ if x.dtype in (np.complex64, np.complex128) and tf_func == math_ops.sign:
return # Return early
if x.dtype == np.float16:
s = list(np.shape(x))
- jacob_t, _ = tf.test.compute_gradient(inx,
- s,
- y,
- s,
- x_init_value=x)
+ jacob_t, _ = gradient_checker.compute_gradient(
+ inx, s, y, s, x_init_value=x)
xf = x.astype(np.float)
- inxf = tf.convert_to_tensor(xf)
+ inxf = ops.convert_to_tensor(xf)
yf = tf_func(inxf)
- _, jacob_n = tf.test.compute_gradient(inxf,
- s,
- yf,
- s,
- x_init_value=xf)
+ _, jacob_n = gradient_checker.compute_gradient(
+ inxf, s, yf, s, x_init_value=xf)
jacob_n = jacob_n.astype(np.float16)
self.assertAllClose(jacob_t, jacob_n, rtol=5e-3, atol=5e-3)
elif x.dtype in (np.float32, np.complex64):
s = list(np.shape(x))
- jacob_t, jacob_n = tf.test.compute_gradient(inx,
- s,
- y,
- s,
- x_init_value=x)
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
+ inx, s, y, s, x_init_value=x)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype in (np.float64, np.complex128):
s = list(np.shape(x))
- jacob_t, jacob_n = tf.test.compute_gradient(inx,
- s,
- y,
- s,
- x_init_value=x)
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
+ inx, s, y, s, x_init_value=x)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _check(self, result_tensor, result_np, input_sp_t, tol):
- self.assertTrue(isinstance(result_tensor, tf.SparseTensor))
- self.assertTrue(isinstance(input_sp_t, tf.SparseTensor))
+ self.assertTrue(isinstance(result_tensor, sparse_tensor.SparseTensor))
+ self.assertTrue(isinstance(input_sp_t, sparse_tensor.SparseTensor))
self.assertAllEqual(input_sp_t.indices.eval(), result_tensor.indices.eval())
- self.assertAllEqual(
- input_sp_t.dense_shape.eval(), result_tensor.dense_shape.eval())
+ self.assertAllEqual(input_sp_t.dense_shape.eval(),
+ result_tensor.dense_shape.eval())
if tol is None:
self.assertAllClose(result_np, result_tensor.values.eval())
else:
- self.assertAllClose(result_np, result_tensor.values.eval(), rtol=tol,
- atol=tol)
+ self.assertAllClose(
+ result_np, result_tensor.values.eval(), rtol=tol, atol=tol)
def _compareSparseCpu(self, x, np_func, tf_func, tol):
x_sp, x_sp_vals = _sparsify(x)
@@ -137,7 +134,7 @@ class UnaryOpTest(tf.test.TestCase):
def _compareGpu(self, x, np_func, tf_func):
np_ans = np_func(x)
with self.test_session(use_gpu=True):
- result = tf_func(tf.convert_to_tensor(x))
+ result = tf_func(ops.convert_to_tensor(x))
tf_gpu = result.eval()
if x.dtype == np.float16:
self.assertAllClose(np_ans, tf_gpu, rtol=1e-3, atol=1e-3)
@@ -169,6 +166,7 @@ class UnaryOpTest(tf.test.TestCase):
return 1.0 / (1.0 + np.exp(-x))
def _replace_domain_error_with_inf(self, fn):
+
def func(x):
try:
return fn(x)
@@ -177,268 +175,273 @@ class UnaryOpTest(tf.test.TestCase):
return np.inf * np.ones_like(x)
else:
raise e
+
return func
def testFloatBasic(self):
x = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float32)
- y = (x + .5).astype(np.float32) # no zero
- z = (x + 15.5).astype(np.float32) # all positive
- k = np.arange(-0.90, 0.90, 0.25).astype(np.float32) # between -1 and 1
+ y = (x + .5).astype(np.float32) # no zero
+ z = (x + 15.5).astype(np.float32) # all positive
+ k = np.arange(-0.90, 0.90, 0.25).astype(np.float32) # between -1 and 1
- self._compareBoth(x, np.abs, tf.abs)
+ self._compareBoth(x, np.abs, math_ops.abs)
self._compareBoth(x, np.abs, _ABS)
- self._compareBoth(x, np.negative, tf.neg)
+ self._compareBoth(x, np.negative, math_ops.neg)
self._compareBoth(x, np.negative, _NEG)
- self._compareBoth(y, self._inv, tf.reciprocal)
- self._compareBoth(x, np.square, tf.square)
- self._compareBoth(z, np.sqrt, tf.sqrt)
- self._compareBoth(z, self._rsqrt, tf.rsqrt)
- self._compareBoth(x, np.exp, tf.exp)
- self._compareBoth(z, np.log, tf.log)
- self._compareBoth(z, np.log1p, tf.log1p)
- self._compareBoth(x, np.tanh, tf.tanh)
- self._compareBoth(x, self._sigmoid, tf.sigmoid)
- self._compareBoth(y, np.sign, tf.sign)
- self._compareBoth(x, np.sin, tf.sin)
- self._compareBoth(x, np.cos, tf.cos)
- self._compareBoth(k, np.arcsin, tf.asin)
- self._compareBoth(k, np.arccos, tf.acos)
- self._compareBoth(x, np.arctan, tf.atan)
- self._compareBoth(x, np.tan, tf.tan)
+ self._compareBoth(y, self._inv, math_ops.reciprocal)
+ self._compareBoth(x, np.square, math_ops.square)
+ self._compareBoth(z, np.sqrt, math_ops.sqrt)
+ self._compareBoth(z, self._rsqrt, math_ops.rsqrt)
+ self._compareBoth(x, np.exp, math_ops.exp)
+ self._compareBoth(z, np.log, math_ops.log)
+ self._compareBoth(z, np.log1p, math_ops.log1p)
+ self._compareBoth(x, np.tanh, math_ops.tanh)
+ self._compareBoth(x, self._sigmoid, math_ops.sigmoid)
+ self._compareBoth(y, np.sign, math_ops.sign)
+ self._compareBoth(x, np.sin, math_ops.sin)
+ self._compareBoth(x, np.cos, math_ops.cos)
+ self._compareBoth(k, np.arcsin, math_ops.asin)
+ self._compareBoth(k, np.arccos, math_ops.acos)
+ self._compareBoth(x, np.arctan, math_ops.atan)
+ self._compareBoth(x, np.tan, math_ops.tan)
self._compareBoth(
y,
np.vectorize(self._replace_domain_error_with_inf(math.lgamma)),
- tf.lgamma)
- self._compareBoth(x, np.vectorize(math.erf), tf.erf)
- self._compareBoth(x, np.vectorize(math.erfc), tf.erfc)
-
- self._compareBothSparse(x, np.abs, tf.abs)
- self._compareBothSparse(x, np.negative, tf.neg)
- self._compareBothSparse(x, np.square, tf.square)
- self._compareBothSparse(z, np.sqrt, tf.sqrt, tol=1e-3)
- self._compareBothSparse(x, np.tanh, tf.tanh)
- self._compareBothSparse(y, np.sign, tf.sign)
- self._compareBothSparse(x, np.vectorize(math.erf), tf.erf)
+ math_ops.lgamma)
+ self._compareBoth(x, np.vectorize(math.erf), math_ops.erf)
+ self._compareBoth(x, np.vectorize(math.erfc), math_ops.erfc)
+
+ self._compareBothSparse(x, np.abs, math_ops.abs)
+ self._compareBothSparse(x, np.negative, math_ops.neg)
+ self._compareBothSparse(x, np.square, math_ops.square)
+ self._compareBothSparse(z, np.sqrt, math_ops.sqrt, tol=1e-3)
+ self._compareBothSparse(x, np.tanh, math_ops.tanh)
+ self._compareBothSparse(y, np.sign, math_ops.sign)
+ self._compareBothSparse(x, np.vectorize(math.erf), math_ops.erf)
def testFloatTanhEdge(self):
x = np.arange(40, 40 + 6).reshape(6).astype(np.float32)
- self._compareBoth(x, np.tanh, tf.tanh)
+ self._compareBoth(x, np.tanh, math_ops.tanh)
x = np.arange(-40, -40 + 6).reshape(6).astype(np.float32)
- self._compareBoth(x, np.tanh, tf.tanh)
+ self._compareBoth(x, np.tanh, math_ops.tanh)
def testFloatEmpty(self):
x = np.empty((2, 0, 5), dtype=np.float32)
- self._compareBoth(x, np.abs, tf.abs)
+ self._compareBoth(x, np.abs, math_ops.abs)
self._compareBoth(x, np.abs, _ABS)
- self._compareBoth(x, np.negative, tf.neg)
+ self._compareBoth(x, np.negative, math_ops.neg)
self._compareBoth(x, np.negative, _NEG)
- self._compareBoth(x, self._inv, tf.reciprocal)
- self._compareBoth(x, np.square, tf.square)
- self._compareBoth(x, np.sqrt, tf.sqrt)
- self._compareBoth(x, self._rsqrt, tf.rsqrt)
- self._compareBoth(x, np.exp, tf.exp)
- self._compareBoth(x, np.log, tf.log)
- self._compareBoth(x, np.log1p, tf.log1p)
- self._compareBoth(x, np.tanh, tf.tanh)
- self._compareBoth(x, self._sigmoid, tf.sigmoid)
- self._compareBoth(x, np.sign, tf.sign)
- self._compareBoth(x, np.sin, tf.sin)
- self._compareBoth(x, np.cos, tf.cos)
+ self._compareBoth(x, self._inv, math_ops.reciprocal)
+ self._compareBoth(x, np.square, math_ops.square)
+ self._compareBoth(x, np.sqrt, math_ops.sqrt)
+ self._compareBoth(x, self._rsqrt, math_ops.rsqrt)
+ self._compareBoth(x, np.exp, math_ops.exp)
+ self._compareBoth(x, np.log, math_ops.log)
+ self._compareBoth(x, np.log1p, math_ops.log1p)
+ self._compareBoth(x, np.tanh, math_ops.tanh)
+ self._compareBoth(x, self._sigmoid, math_ops.sigmoid)
+ self._compareBoth(x, np.sign, math_ops.sign)
+ self._compareBoth(x, np.sin, math_ops.sin)
+ self._compareBoth(x, np.cos, math_ops.cos)
# Can't use vectorize below, so just use some arbitrary function
- self._compareBoth(x, np.sign, tf.lgamma)
- self._compareBoth(x, np.sign, tf.erf)
- self._compareBoth(x, np.sign, tf.erfc)
- self._compareBoth(x, np.tan, tf.tan)
- self._compareBoth(x, np.arcsin, tf.asin)
- self._compareBoth(x, np.arccos, tf.acos)
- self._compareBoth(x, np.arctan, tf.atan)
-
- self._compareBothSparse(x, np.abs, tf.abs)
- self._compareBothSparse(x, np.negative, tf.neg)
- self._compareBothSparse(x, np.square, tf.square)
- self._compareBothSparse(x, np.sqrt, tf.sqrt, tol=1e-3)
- self._compareBothSparse(x, np.tanh, tf.tanh)
- self._compareBothSparse(x, np.sign, tf.sign)
- self._compareBothSparse(x, np.sign, tf.erf)
+ self._compareBoth(x, np.sign, math_ops.lgamma)
+ self._compareBoth(x, np.sign, math_ops.erf)
+ self._compareBoth(x, np.sign, math_ops.erfc)
+ self._compareBoth(x, np.tan, math_ops.tan)
+ self._compareBoth(x, np.arcsin, math_ops.asin)
+ self._compareBoth(x, np.arccos, math_ops.acos)
+ self._compareBoth(x, np.arctan, math_ops.atan)
+
+ self._compareBothSparse(x, np.abs, math_ops.abs)
+ self._compareBothSparse(x, np.negative, math_ops.neg)
+ self._compareBothSparse(x, np.square, math_ops.square)
+ self._compareBothSparse(x, np.sqrt, math_ops.sqrt, tol=1e-3)
+ self._compareBothSparse(x, np.tanh, math_ops.tanh)
+ self._compareBothSparse(x, np.sign, math_ops.sign)
+ self._compareBothSparse(x, np.sign, math_ops.erf)
def testDoubleBasic(self):
x = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float64)
- y = (x + .5).astype(np.float64) # no zero
+ y = (x + .5).astype(np.float64) # no zero
z = (x + 15.5).astype(np.float64) # all positive
- k = np.arange(-0.90, 0.90, 0.35).reshape(1, 3, 2).astype(np.float64) # between -1 and 1
- self._compareBoth(x, np.abs, tf.abs)
+ k = np.arange(-0.90, 0.90, 0.35).reshape(1, 3, 2).astype(
+ np.float64) # between -1 and 1
+ self._compareBoth(x, np.abs, math_ops.abs)
self._compareBoth(x, np.abs, _ABS)
- self._compareBoth(x, np.negative, tf.neg)
+ self._compareBoth(x, np.negative, math_ops.neg)
self._compareBoth(x, np.negative, _NEG)
- self._compareBoth(y, self._inv, tf.reciprocal)
- self._compareBoth(x, np.square, tf.square)
- self._compareBoth(z, np.sqrt, tf.sqrt)
- self._compareBoth(z, self._rsqrt, tf.rsqrt)
- self._compareBoth(x, np.exp, tf.exp)
- self._compareBoth(z, np.log, tf.log)
- self._compareBoth(z, np.log1p, tf.log1p)
- self._compareBoth(x, np.tanh, tf.tanh)
- self._compareBoth(x, self._sigmoid, tf.sigmoid)
- self._compareBoth(y, np.sign, tf.sign)
- self._compareBoth(x, np.sin, tf.sin)
- self._compareBoth(x, np.cos, tf.cos)
+ self._compareBoth(y, self._inv, math_ops.reciprocal)
+ self._compareBoth(x, np.square, math_ops.square)
+ self._compareBoth(z, np.sqrt, math_ops.sqrt)
+ self._compareBoth(z, self._rsqrt, math_ops.rsqrt)
+ self._compareBoth(x, np.exp, math_ops.exp)
+ self._compareBoth(z, np.log, math_ops.log)
+ self._compareBoth(z, np.log1p, math_ops.log1p)
+ self._compareBoth(x, np.tanh, math_ops.tanh)
+ self._compareBoth(x, self._sigmoid, math_ops.sigmoid)
+ self._compareBoth(y, np.sign, math_ops.sign)
+ self._compareBoth(x, np.sin, math_ops.sin)
+ self._compareBoth(x, np.cos, math_ops.cos)
self._compareBoth(
y,
np.vectorize(self._replace_domain_error_with_inf(math.lgamma)),
- tf.lgamma)
- self._compareBoth(x, np.vectorize(math.erf), tf.erf)
- self._compareBoth(x, np.vectorize(math.erfc), tf.erfc)
- self._compareBoth(x, np.arctan, tf.atan)
- self._compareBoth(k, np.arcsin, tf.asin)
- self._compareBoth(k, np.arccos, tf.acos)
- self._compareBoth(k, np.tan, tf.tan)
-
- self._compareBothSparse(x, np.abs, tf.abs)
- self._compareBothSparse(x, np.negative, tf.neg)
- self._compareBothSparse(x, np.square, tf.square)
- self._compareBothSparse(z, np.sqrt, tf.sqrt, tol=1e-3)
- self._compareBothSparse(x, np.tanh, tf.tanh)
- self._compareBothSparse(y, np.sign, tf.sign)
- self._compareBothSparse(x, np.vectorize(math.erf), tf.erf)
+ math_ops.lgamma)
+ self._compareBoth(x, np.vectorize(math.erf), math_ops.erf)
+ self._compareBoth(x, np.vectorize(math.erfc), math_ops.erfc)
+ self._compareBoth(x, np.arctan, math_ops.atan)
+ self._compareBoth(k, np.arcsin, math_ops.asin)
+ self._compareBoth(k, np.arccos, math_ops.acos)
+ self._compareBoth(k, np.tan, math_ops.tan)
+
+ self._compareBothSparse(x, np.abs, math_ops.abs)
+ self._compareBothSparse(x, np.negative, math_ops.neg)
+ self._compareBothSparse(x, np.square, math_ops.square)
+ self._compareBothSparse(z, np.sqrt, math_ops.sqrt, tol=1e-3)
+ self._compareBothSparse(x, np.tanh, math_ops.tanh)
+ self._compareBothSparse(y, np.sign, math_ops.sign)
+ self._compareBothSparse(x, np.vectorize(math.erf), math_ops.erf)
def testHalfBasic(self):
x = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float16)
- y = (x + .5).astype(np.float16) # no zero
+ y = (x + .5).astype(np.float16) # no zero
z = (x + 15.5).astype(np.float16) # all positive
- self._compareBoth(x, np.abs, tf.abs)
+ self._compareBoth(x, np.abs, math_ops.abs)
self._compareBoth(x, np.abs, _ABS)
- self._compareBoth(x, np.negative, tf.neg)
+ self._compareBoth(x, np.negative, math_ops.neg)
self._compareBoth(x, np.negative, _NEG)
- self._compareBoth(y, self._inv, tf.reciprocal)
- self._compareBoth(x, np.square, tf.square)
- self._compareBoth(z, np.sqrt, tf.sqrt)
- self._compareBoth(z, self._rsqrt, tf.rsqrt)
- self._compareBoth(x, np.exp, tf.exp)
- self._compareBoth(z, np.log, tf.log)
- self._compareBoth(z, np.log1p, tf.log1p)
- self._compareBoth(x, np.tanh, tf.tanh)
- self._compareBoth(x, self._sigmoid, tf.sigmoid)
- self._compareBoth(y, np.sign, tf.sign)
- self._compareBoth(x, np.sin, tf.sin)
- self._compareBoth(x, np.cos, tf.cos)
+ self._compareBoth(y, self._inv, math_ops.reciprocal)
+ self._compareBoth(x, np.square, math_ops.square)
+ self._compareBoth(z, np.sqrt, math_ops.sqrt)
+ self._compareBoth(z, self._rsqrt, math_ops.rsqrt)
+ self._compareBoth(x, np.exp, math_ops.exp)
+ self._compareBoth(z, np.log, math_ops.log)
+ self._compareBoth(z, np.log1p, math_ops.log1p)
+ self._compareBoth(x, np.tanh, math_ops.tanh)
+ self._compareBoth(x, self._sigmoid, math_ops.sigmoid)
+ self._compareBoth(y, np.sign, math_ops.sign)
+ self._compareBoth(x, np.sin, math_ops.sin)
+ self._compareBoth(x, np.cos, math_ops.cos)
self._compareBoth(
y,
np.vectorize(self._replace_domain_error_with_inf(math.lgamma)),
- tf.lgamma)
- self._compareBoth(x, np.vectorize(math.erf), tf.erf)
- self._compareBoth(x, np.vectorize(math.erfc), tf.erfc)
-
- self._compareBothSparse(x, np.abs, tf.abs)
- self._compareBothSparse(x, np.negative, tf.neg)
- self._compareBothSparse(x, np.square, tf.square)
- self._compareBothSparse(z, np.sqrt, tf.sqrt, tol=1e-3)
- self._compareBothSparse(x, np.tanh, tf.tanh)
- self._compareBothSparse(y, np.sign, tf.sign)
- self._compareBothSparse(x, np.vectorize(math.erf), tf.erf, tol=1e-3)
+ math_ops.lgamma)
+ self._compareBoth(x, np.vectorize(math.erf), math_ops.erf)
+ self._compareBoth(x, np.vectorize(math.erfc), math_ops.erfc)
+
+ self._compareBothSparse(x, np.abs, math_ops.abs)
+ self._compareBothSparse(x, np.negative, math_ops.neg)
+ self._compareBothSparse(x, np.square, math_ops.square)
+ self._compareBothSparse(z, np.sqrt, math_ops.sqrt, tol=1e-3)
+ self._compareBothSparse(x, np.tanh, math_ops.tanh)
+ self._compareBothSparse(y, np.sign, math_ops.sign)
+ self._compareBothSparse(x, np.vectorize(math.erf), math_ops.erf, tol=1e-3)
def testInt32Basic(self):
x = np.arange(-6, 6, 2).reshape(1, 3, 2).astype(np.int32)
- self._compareCpu(x, np.abs, tf.abs)
+ self._compareCpu(x, np.abs, math_ops.abs)
self._compareCpu(x, np.abs, _ABS)
- self._compareBoth(x, np.negative, tf.neg)
+ self._compareBoth(x, np.negative, math_ops.neg)
self._compareBoth(x, np.negative, _NEG)
- self._compareBoth(x, np.square, tf.square)
- self._compareCpu(x, np.sign, tf.sign)
+ self._compareBoth(x, np.square, math_ops.square)
+ self._compareCpu(x, np.sign, math_ops.sign)
- self._compareBothSparse(x, np.abs, tf.abs)
- self._compareBothSparse(x, np.negative, tf.neg)
- self._compareBothSparse(x, np.square, tf.square)
- self._compareBothSparse(x, np.sign, tf.sign)
+ self._compareBothSparse(x, np.abs, math_ops.abs)
+ self._compareBothSparse(x, np.negative, math_ops.neg)
+ self._compareBothSparse(x, np.square, math_ops.square)
+ self._compareBothSparse(x, np.sign, math_ops.sign)
def testInt64Basic(self):
- x = np.arange(
- -6 << 40, 6 << 40, 2 << 40).reshape(1, 3, 2).astype(np.int64)
- self._compareCpu(x, np.abs, tf.abs)
+ x = np.arange(-6 << 40, 6 << 40, 2 << 40).reshape(1, 3, 2).astype(np.int64)
+ self._compareCpu(x, np.abs, math_ops.abs)
self._compareCpu(x, np.abs, _ABS)
- self._compareCpu(x, np.negative, tf.neg)
+ self._compareCpu(x, np.negative, math_ops.neg)
self._compareCpu(x, np.negative, _NEG)
- self._compareCpu(x, np.square, tf.square)
- self._compareCpu(x, np.sign, tf.sign)
+ self._compareCpu(x, np.square, math_ops.square)
+ self._compareCpu(x, np.sign, math_ops.sign)
- self._compareBothSparse(x, np.abs, tf.abs)
- self._compareBothSparse(x, np.negative, tf.neg)
- self._compareBothSparse(x, np.square, tf.square)
- self._compareBothSparse(x, np.sign, tf.sign)
+ self._compareBothSparse(x, np.abs, math_ops.abs)
+ self._compareBothSparse(x, np.negative, math_ops.neg)
+ self._compareBothSparse(x, np.square, math_ops.square)
+ self._compareBothSparse(x, np.sign, math_ops.sign)
def testComplex64Basic(self):
- x = np.complex(1, 1) * np.arange(-3, 3).reshape(1, 3, 2).astype(
- np.complex64)
+ x = np.complex(1, 1) * np.arange(-3, 3).reshape(1, 3,
+ 2).astype(np.complex64)
y = x + 0.5 # no zeros
- self._compareCpu(x, np.abs, tf.complex_abs)
+ self._compareCpu(x, np.abs, math_ops.complex_abs)
self._compareCpu(x, np.abs, _ABS)
- self._compareCpu(x, np.negative, tf.neg)
+ self._compareCpu(x, np.negative, math_ops.neg)
self._compareCpu(x, np.negative, _NEG)
- self._compareCpu(y, self._inv, tf.reciprocal)
- self._compareCpu(x, np.square, tf.square)
- self._compareCpu(y, np.sqrt, tf.sqrt)
- self._compareCpu(y, self._rsqrt, tf.rsqrt)
- self._compareCpu(x, np.exp, tf.exp)
- self._compareCpu(y, np.log, tf.log)
- self._compareCpu(y, np.log1p, tf.log1p)
- self._compareCpu(x, np.tanh, tf.tanh)
- self._compareCpu(x, self._sigmoid, tf.sigmoid)
- self._compareCpu(x, np.sin, tf.sin)
- self._compareCpu(x, np.cos, tf.cos)
-
- self._compareBothSparse(x, np.abs, tf.abs)
- self._compareBothSparse(x, np.negative, tf.neg)
- self._compareBothSparse(x, np.square, tf.square)
- self._compareBothSparse(x, np.sqrt, tf.sqrt, 1e-3)
- self._compareBothSparse(x, np.tanh, tf.tanh)
+ self._compareCpu(y, self._inv, math_ops.reciprocal)
+ self._compareCpu(x, np.square, math_ops.square)
+ self._compareCpu(y, np.sqrt, math_ops.sqrt)
+ self._compareCpu(y, self._rsqrt, math_ops.rsqrt)
+ self._compareCpu(x, np.exp, math_ops.exp)
+ self._compareCpu(y, np.log, math_ops.log)
+ self._compareCpu(y, np.log1p, math_ops.log1p)
+ self._compareCpu(x, np.tanh, math_ops.tanh)
+ self._compareCpu(x, self._sigmoid, math_ops.sigmoid)
+ self._compareCpu(x, np.sin, math_ops.sin)
+ self._compareCpu(x, np.cos, math_ops.cos)
+
+ self._compareBothSparse(x, np.abs, math_ops.abs)
+ self._compareBothSparse(x, np.negative, math_ops.neg)
+ self._compareBothSparse(x, np.square, math_ops.square)
+ self._compareBothSparse(x, np.sqrt, math_ops.sqrt, 1e-3)
+ self._compareBothSparse(x, np.tanh, math_ops.tanh)
# Numpy uses an incorrect definition of sign; use the right one instead.
def complex_sign(x):
return x / np.abs(x)
- self._compareCpu(y, complex_sign, tf.sign)
- self._compareBothSparse(y, complex_sign, tf.sign)
+
+ self._compareCpu(y, complex_sign, math_ops.sign)
+ self._compareBothSparse(y, complex_sign, math_ops.sign)
def testComplex128Basic(self):
- x = np.complex(1, 1) * np.arange(-3, 3).reshape(1, 3, 2).astype(
- np.complex128)
+ x = np.complex(1, 1) * np.arange(-3, 3).reshape(1, 3,
+ 2).astype(np.complex128)
y = x + 0.5 # no zeros
- self._compareCpu(x, np.abs, tf.abs)
+ self._compareCpu(x, np.abs, math_ops.abs)
self._compareCpu(x, np.abs, _ABS)
- self._compareCpu(x, np.negative, tf.neg)
+ self._compareCpu(x, np.negative, math_ops.neg)
self._compareCpu(x, np.negative, _NEG)
- self._compareCpu(y, self._inv, tf.reciprocal)
- self._compareCpu(x, np.square, tf.square)
- self._compareCpu(y, np.sqrt, tf.sqrt)
- self._compareCpu(y, self._rsqrt, tf.rsqrt)
- self._compareCpu(x, np.exp, tf.exp)
- self._compareCpu(y, np.log, tf.log)
- self._compareCpu(y, np.log1p, tf.log1p)
- self._compareCpu(x, np.tanh, tf.tanh)
- self._compareCpu(x, self._sigmoid, tf.sigmoid)
- self._compareCpu(x, np.sin, tf.sin)
- self._compareCpu(x, np.cos, tf.cos)
-
- self._compareBothSparse(x, np.abs, tf.abs)
- self._compareBothSparse(x, np.negative, tf.neg)
- self._compareBothSparse(x, np.square, tf.square)
- self._compareBothSparse(x, np.sqrt, tf.sqrt, 1e-3)
- self._compareBothSparse(x, np.tanh, tf.tanh)
+ self._compareCpu(y, self._inv, math_ops.reciprocal)
+ self._compareCpu(x, np.square, math_ops.square)
+ self._compareCpu(y, np.sqrt, math_ops.sqrt)
+ self._compareCpu(y, self._rsqrt, math_ops.rsqrt)
+ self._compareCpu(x, np.exp, math_ops.exp)
+ self._compareCpu(y, np.log, math_ops.log)
+ self._compareCpu(y, np.log1p, math_ops.log1p)
+ self._compareCpu(x, np.tanh, math_ops.tanh)
+ self._compareCpu(x, self._sigmoid, math_ops.sigmoid)
+ self._compareCpu(x, np.sin, math_ops.sin)
+ self._compareCpu(x, np.cos, math_ops.cos)
+
+ self._compareBothSparse(x, np.abs, math_ops.abs)
+ self._compareBothSparse(x, np.negative, math_ops.neg)
+ self._compareBothSparse(x, np.square, math_ops.square)
+ self._compareBothSparse(x, np.sqrt, math_ops.sqrt, 1e-3)
+ self._compareBothSparse(x, np.tanh, math_ops.tanh)
# Numpy uses an incorrect definition of sign; use the right one instead.
def complex_sign(x):
return x / np.abs(x)
- self._compareCpu(y, complex_sign, tf.sign)
- self._compareBothSparse(y, complex_sign, tf.sign)
+
+ self._compareCpu(y, complex_sign, math_ops.sign)
+ self._compareBothSparse(y, complex_sign, math_ops.sign)
def testGradGrad(self):
np.random.seed(7)
shape = (5,)
dtype_tols = [(np.float32, 5e-4), (np.float64, 1e-6), (np.complex64, 5e-4),
(np.complex128, 1e-6)]
- op_range = [(gen_math_ops._reciprocal_grad, [-2, 2]),
- (gen_math_ops._rsqrt_grad, [0.1, 3]),
- (gen_math_ops._sigmoid_grad, [-2, 2]),
- (gen_math_ops._sqrt_grad, [0.1, 3]),
- (gen_math_ops._tanh_grad, [-2, 2]),]
+ op_range = [
+ (gen_math_ops._reciprocal_grad, [-2, 2]),
+ (gen_math_ops._rsqrt_grad, [0.1, 3]),
+ (gen_math_ops._sigmoid_grad, [-2, 2]),
+ (gen_math_ops._sqrt_grad, [0.1, 3]),
+ (gen_math_ops._tanh_grad, [-2, 2]),
+ ]
def rand(dtype):
x = np.random.uniform(
@@ -450,10 +453,10 @@ class UnaryOpTest(tf.test.TestCase):
for op, real_range in op_range:
with self.test_session():
for dtype, tol in dtype_tols:
- x = tf.constant(rand(dtype))
- y = tf.constant(rand(dtype))
+ x = constant_op.constant(rand(dtype))
+ y = constant_op.constant(rand(dtype))
z = op(x, y)
- grads = tf.test.compute_gradient(
+ grads = gradient_checker.compute_gradient(
[x, y], [shape, shape],
z,
shape,
@@ -464,13 +467,13 @@ class UnaryOpTest(tf.test.TestCase):
self.assertAllClose(analytical, numerical, rtol=tol, atol=tol)
-class BinaryOpTest(tf.test.TestCase):
+class BinaryOpTest(test.TestCase):
def _compareCpu(self, x, y, np_func, tf_func, also_compare_variables=False):
np_ans = np_func(x, y)
with self.test_session(use_gpu=False):
- inx = tf.convert_to_tensor(x)
- iny = tf.convert_to_tensor(y)
+ inx = ops.convert_to_tensor(x)
+ iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_cpu = out.eval()
# Test that the op takes precedence over numpy operators.
@@ -478,9 +481,9 @@ class BinaryOpTest(tf.test.TestCase):
np_right = tf_func(inx, y).eval()
if also_compare_variables:
- var_x = tf.Variable(x)
- var_y = tf.Variable(y)
- tf.global_variables_initializer().run()
+ var_x = variables.Variable(x)
+ var_y = variables.Variable(y)
+ variables.global_variables_initializer().run()
print(type(x), type(y), type(var_x), type(var_y))
print(type(tf_func(x, var_y)), type(tf_func(var_x, y)))
np_var_left = tf_func(x, var_y).eval()
@@ -495,82 +498,79 @@ class BinaryOpTest(tf.test.TestCase):
self.assertAllClose(np_ans, np_var_right)
self.assertShapeEqual(np_ans, out)
- _GRAD_TOL = {tf.float16: 1e-3,
- tf.float32: 1e-3,
- tf.complex64: 1e-2,
- tf.float64: 1e-5,
- tf.complex128: 1e-4}
-
- def _compareGradientX(self, x, y, np_func, tf_func,
+ _GRAD_TOL = {
+ dtypes_lib.float16: 1e-3,
+ dtypes_lib.float32: 1e-3,
+ dtypes_lib.complex64: 1e-2,
+ dtypes_lib.float64: 1e-5,
+ dtypes_lib.complex128: 1e-4
+ }
+
+ def _compareGradientX(self,
+ x,
+ y,
+ np_func,
+ tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.test_session():
- inx = tf.convert_to_tensor(x)
- iny = tf.convert_to_tensor(y)
+ inx = ops.convert_to_tensor(x)
+ iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
xs = list(x.shape)
- jacob_t, jacob_n = tf.test.compute_gradient(inx,
- xs,
- out,
- zs,
- x_init_value=x)
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
+ inx, xs, out, zs, x_init_value=x)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
- inxf = tf.convert_to_tensor(xf)
- inyf = tf.convert_to_tensor(yf)
+ inxf = ops.convert_to_tensor(xf)
+ inyf = ops.convert_to_tensor(yf)
outf = tf_func(inxf, inyf)
- _, jacob_n = tf.test.compute_gradient(inxf,
- xs,
- outf,
- zs,
- x_init_value=xf,
- delta=1e-3)
+ _, jacob_n = gradient_checker.compute_gradient(
+ inxf, xs, outf, zs, x_init_value=xf, delta=1e-3)
jacob_n = jacob_n.astype(x.dtype)
- tol = self._GRAD_TOL[tf.as_dtype(x.dtype)]
+ tol = self._GRAD_TOL[dtypes_lib.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
- def _compareGradientY(self, x, y, np_func, tf_func,
+ def _compareGradientY(self,
+ x,
+ y,
+ np_func,
+ tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.test_session():
- inx = tf.convert_to_tensor(x)
- iny = tf.convert_to_tensor(y)
+ inx = ops.convert_to_tensor(x)
+ iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
ys = list(np.shape(y))
- jacob_t, jacob_n = tf.test.compute_gradient(iny,
- ys,
- out,
- zs,
- x_init_value=y)
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
+ iny, ys, out, zs, x_init_value=y)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
- inxf = tf.convert_to_tensor(xf)
- inyf = tf.convert_to_tensor(yf)
+ inxf = ops.convert_to_tensor(xf)
+ inyf = ops.convert_to_tensor(yf)
outf = tf_func(inxf, inyf)
- _, jacob_n = tf.test.compute_gradient(inyf,
- ys,
- outf,
- zs,
- x_init_value=yf)
+ _, jacob_n = gradient_checker.compute_gradient(
+ inyf, ys, outf, zs, x_init_value=yf)
jacob_n = jacob_n.astype(x.dtype)
- tol = self._GRAD_TOL[tf.as_dtype(x.dtype)]
+ tol = self._GRAD_TOL[dtypes_lib.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.test_session(use_gpu=True):
- inx = tf.convert_to_tensor(x)
- iny = tf.convert_to_tensor(y)
+ inx = ops.convert_to_tensor(x)
+ iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = out.eval()
self.assertAllClose(np_ans, tf_gpu)
@@ -580,10 +580,12 @@ class BinaryOpTest(tf.test.TestCase):
def _compareBoth(self, x, y, np_func, tf_func, also_compare_variables=False):
self._compareCpu(x, y, np_func, tf_func, also_compare_variables)
if x.dtype in (np.float16, np.float32, np.float64):
- if tf_func not in (_FLOORDIV, tf.floordiv, tf.igamma, tf.igammac, tf.zeta, tf.polygamma):
+ if tf_func not in (_FLOORDIV, math_ops.floordiv, math_ops.igamma,
+ math_ops.igammac, math_ops.zeta, math_ops.polygamma):
self._compareGradientX(x, y, np_func, tf_func)
self._compareGradientY(x, y, np_func, tf_func)
- if tf_func in (tf.igamma, tf.igammac, tf.zeta, tf.polygamma):
+ if tf_func in (math_ops.igamma, math_ops.igammac, math_ops.zeta,
+ math_ops.polygamma):
# These methods only support gradients in the second parameter
self._compareGradientY(x, y, np_func, tf_func)
self._compareGpu(x, y, np_func, tf_func)
@@ -591,11 +593,11 @@ class BinaryOpTest(tf.test.TestCase):
def testFloatBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float32)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float32)
- self._compareBoth(x, y, np.add, tf.add, also_compare_variables=True)
- self._compareBoth(x, y, np.subtract, tf.sub)
- self._compareBoth(x, y, np.multiply, tf.mul)
- self._compareBoth(x, y + 0.1, np.true_divide, tf.truediv)
- self._compareBoth(x, y + 0.1, np.floor_divide, tf.floordiv)
+ self._compareBoth(x, y, np.add, math_ops.add, also_compare_variables=True)
+ self._compareBoth(x, y, np.subtract, math_ops.sub)
+ self._compareBoth(x, y, np.multiply, math_ops.mul)
+ self._compareBoth(x, y + 0.1, np.true_divide, math_ops.truediv)
+ self._compareBoth(x, y + 0.1, np.floor_divide, math_ops.floordiv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
@@ -605,35 +607,38 @@ class BinaryOpTest(tf.test.TestCase):
from scipy import special # pylint: disable=g-import-not-at-top
a_pos_small = np.linspace(0.1, 2, 15).reshape(1, 3, 5).astype(np.float32)
x_pos_small = np.linspace(0.1, 10, 15).reshape(1, 3, 5).astype(np.float32)
- self._compareBoth(a_pos_small, x_pos_small, special.gammainc, tf.igamma)
- self._compareBoth(a_pos_small, x_pos_small, special.gammaincc, tf.igammac)
+ self._compareBoth(a_pos_small, x_pos_small, special.gammainc,
+ math_ops.igamma)
+ self._compareBoth(a_pos_small, x_pos_small, special.gammaincc,
+ math_ops.igammac)
# Need x > 1
- self._compareBoth(x_pos_small + 1, a_pos_small, special.zeta, tf.zeta)
+ self._compareBoth(x_pos_small + 1, a_pos_small, special.zeta,
+ math_ops.zeta)
n_small = np.arange(0, 15).reshape(1, 3, 5).astype(np.float32)
- self._compareBoth(n_small, x_pos_small, special.polygamma, tf.polygamma)
+ self._compareBoth(n_small, x_pos_small, special.polygamma,
+ math_ops.polygamma)
except ImportError as e:
- tf.logging.warn("Cannot test special functions: %s" % str(e))
+ tf_logging.warn("Cannot test special functions: %s" % str(e))
def testFloatDifferentShapes(self):
x = np.array([1, 2, 3, 4]).reshape(2, 2).astype(np.float32)
y = np.array([1, 2]).reshape(2, 1).astype(np.float32)
with self.test_session() as sess:
- inx = tf.convert_to_tensor(x)
- iny = tf.convert_to_tensor(y)
- s = tf.reduce_sum(inx * iny)
- gx, gy = sess.run(tf.gradients(s, [inx, iny]))
+ inx = ops.convert_to_tensor(x)
+ iny = ops.convert_to_tensor(y)
+ s = math_ops.reduce_sum(inx * iny)
+ gx, gy = sess.run(gradients_impl.gradients(s, [inx, iny]))
# gx is simply the broadcasted y
- self.assertAllEqual(gx, np.array([1, 1, 2, 2])
- .reshape(2, 2).astype(np.float32))
+ self.assertAllEqual(gx,
+ np.array([1, 1, 2, 2]).reshape(2, 2).astype(np.float32))
# gy is x's column summed up
- self.assertAllEqual(gy, np.array([3, 7]).
- reshape(2, 1).astype(np.float32))
+ self.assertAllEqual(gy, np.array([3, 7]).reshape(2, 1).astype(np.float32))
def testFloatVariableOverload(self):
x = np.array([1, 2, 3, 4]).reshape(2, 2).astype(np.int32)
y = np.array([1, 2]).reshape(2, 1).astype(np.int32)
- var_x = tf.Variable(x)
- var_y = tf.Variable(y)
+ var_x = variables.Variable(x)
+ var_y = variables.Variable(y)
with self.test_session() as sess:
sess.run([var_x.initializer, var_y.initializer])
left_result = (var_x * y).eval()
@@ -645,11 +650,11 @@ class BinaryOpTest(tf.test.TestCase):
def testDoubleBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float64)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float64)
- self._compareBoth(x, y, np.add, tf.add)
- self._compareBoth(x, y, np.subtract, tf.sub)
- self._compareBoth(x, y, np.multiply, tf.mul)
- self._compareBoth(x, y + 0.1, np.true_divide, tf.truediv)
- self._compareBoth(x, y + 0.1, np.floor_divide, tf.floordiv)
+ self._compareBoth(x, y, np.add, math_ops.add)
+ self._compareBoth(x, y, np.subtract, math_ops.sub)
+ self._compareBoth(x, y, np.multiply, math_ops.mul)
+ self._compareBoth(x, y + 0.1, np.true_divide, math_ops.truediv)
+ self._compareBoth(x, y + 0.1, np.floor_divide, math_ops.floordiv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
@@ -659,42 +664,44 @@ class BinaryOpTest(tf.test.TestCase):
from scipy import special # pylint: disable=g-import-not-at-top
a_pos_small = np.linspace(0.1, 2, 15).reshape(1, 3, 5).astype(np.float32)
x_pos_small = np.linspace(0.1, 10, 15).reshape(1, 3, 5).astype(np.float32)
- self._compareBoth(a_pos_small, x_pos_small, special.gammainc, tf.igamma)
- self._compareBoth(a_pos_small, x_pos_small, special.gammaincc, tf.igammac)
+ self._compareBoth(a_pos_small, x_pos_small, special.gammainc,
+ math_ops.igamma)
+ self._compareBoth(a_pos_small, x_pos_small, special.gammaincc,
+ math_ops.igammac)
except ImportError as e:
- tf.logging.warn("Cannot test special functions: %s" % str(e))
+ tf_logging.warn("Cannot test special functions: %s" % str(e))
def testInt8Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int8)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int8)
- self._compareBoth(x, y, np.multiply, tf.mul)
+ self._compareBoth(x, y, np.multiply, math_ops.mul)
self._compareBoth(x, y, np.multiply, _MUL)
def testInt16Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int16)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int16)
- self._compareBoth(x, y, np.multiply, tf.mul)
+ self._compareBoth(x, y, np.multiply, math_ops.mul)
self._compareBoth(x, y, np.multiply, _MUL)
def testUint16Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.uint16)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.uint16)
- self._compareBoth(x, y, np.multiply, tf.mul)
+ self._compareBoth(x, y, np.multiply, math_ops.mul)
self._compareBoth(x, y, np.multiply, _MUL)
- self._compareBoth(x, y, np.true_divide, tf.truediv)
- self._compareBoth(x, y, np.floor_divide, tf.floordiv)
+ self._compareBoth(x, y, np.true_divide, math_ops.truediv)
+ self._compareBoth(x, y, np.floor_divide, math_ops.floordiv)
self._compareBoth(x, y, np.true_divide, _TRUEDIV)
self._compareBoth(x, y, np.floor_divide, _FLOORDIV)
def testInt32Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int32)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int32)
- self._compareBoth(x, y, np.add, tf.add)
- self._compareBoth(x, y, np.subtract, tf.sub)
- self._compareBoth(x, y, np.multiply, tf.mul)
- self._compareBoth(x, y, np.true_divide, tf.truediv)
- self._compareBoth(x, y, np.floor_divide, tf.floordiv)
- self._compareBoth(x, y, np.mod, tf.mod)
+ self._compareBoth(x, y, np.add, math_ops.add)
+ self._compareBoth(x, y, np.subtract, math_ops.sub)
+ self._compareBoth(x, y, np.multiply, math_ops.mul)
+ self._compareBoth(x, y, np.true_divide, math_ops.truediv)
+ self._compareBoth(x, y, np.floor_divide, math_ops.floordiv)
+ self._compareBoth(x, y, np.mod, math_ops.mod)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
@@ -708,11 +715,11 @@ class BinaryOpTest(tf.test.TestCase):
def testInt64Basic(self):
x = np.arange(1 << 40, 13 << 40, 2 << 40).reshape(1, 3, 2).astype(np.int64)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int64)
- self._compareBoth(x, y, np.subtract, tf.sub)
- self._compareBoth(x, y, np.multiply, tf.mul)
- self._compareBoth(x, y, np.true_divide, tf.truediv)
- self._compareBoth(x, y, np.floor_divide, tf.floordiv)
- self._compareBoth(x, y, np.mod, tf.mod)
+ self._compareBoth(x, y, np.subtract, math_ops.sub)
+ self._compareBoth(x, y, np.multiply, math_ops.mul)
+ self._compareBoth(x, y, np.true_divide, math_ops.truediv)
+ self._compareBoth(x, y, np.floor_divide, math_ops.floordiv)
+ self._compareBoth(x, y, np.mod, math_ops.mod)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y, np.true_divide, _TRUEDIV)
@@ -720,28 +727,28 @@ class BinaryOpTest(tf.test.TestCase):
self._compareBoth(x, y, np.mod, _MOD)
def testComplex64Basic(self):
- x = np.complex(1, 1) * np.linspace(-10, 10, 6).reshape(1, 3, 2).astype(
- np.complex64)
- y = np.complex(1, 1) * np.linspace(20, -20, 6).reshape(1, 3, 2).astype(
- np.complex64)
- self._compareBoth(x, y, np.add, tf.add)
- self._compareBoth(x, y, np.subtract, tf.sub)
- self._compareBoth(x, y, np.multiply, tf.mul)
- self._compareBoth(x, y + 0.1, np.true_divide, tf.truediv)
+ x = np.complex(1, 1) * np.linspace(-10, 10, 6).reshape(
+ 1, 3, 2).astype(np.complex64)
+ y = np.complex(1, 1) * np.linspace(20, -20, 6).reshape(
+ 1, 3, 2).astype(np.complex64)
+ self._compareBoth(x, y, np.add, math_ops.add)
+ self._compareBoth(x, y, np.subtract, math_ops.sub)
+ self._compareBoth(x, y, np.multiply, math_ops.mul)
+ self._compareBoth(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
def testComplex128Basic(self):
- x = np.complex(1, 1) * np.linspace(-10, 10, 6).reshape(1, 3, 2).astype(
- np.complex128)
- y = np.complex(1, 1) * np.linspace(20, -20, 6).reshape(1, 3, 2).astype(
- np.complex128)
- self._compareBoth(x, y, np.add, tf.add)
- self._compareBoth(x, y, np.subtract, tf.sub)
- self._compareBoth(x, y, np.multiply, tf.mul)
- self._compareBoth(x, y + 0.1, np.true_divide, tf.truediv)
+ x = np.complex(1, 1) * np.linspace(-10, 10, 6).reshape(
+ 1, 3, 2).astype(np.complex128)
+ y = np.complex(1, 1) * np.linspace(20, -20, 6).reshape(
+ 1, 3, 2).astype(np.complex128)
+ self._compareBoth(x, y, np.add, math_ops.add)
+ self._compareBoth(x, y, np.subtract, math_ops.sub)
+ self._compareBoth(x, y, np.multiply, math_ops.mul)
+ self._compareBoth(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
@@ -751,19 +758,21 @@ class BinaryOpTest(tf.test.TestCase):
x = np.array([["abc", "bh"], ["c", ""]])
y = np.array([["abc", "bh"], ["def", "hi"]])
with self.test_session(use_gpu=False) as sess:
- cmp_eq = tf.equal(x, y)
- cmp_not_eq = tf.not_equal(x, y)
+ cmp_eq = math_ops.equal(x, y)
+ cmp_not_eq = math_ops.not_equal(x, y)
values = sess.run([cmp_eq, cmp_not_eq])
self.assertAllEqual([[True, True], [False, False]], values[0])
self.assertAllEqual([[False, False], [True, True]], values[1])
def testString(self):
- x = np.array([["x_0_0", "x_0_1", "x_0_2"],
- ["x_1_0", "x_1_1", "x_1_2"],
- ["x_2_0", "x_2_1", "x_2_2"]], dtype=np.object)
- y = np.array([["y_0_0", "y_0_1", "y_0_2"],
- ["y_1_0", "y_1_1", "y_1_2"],
- ["y_2_0", "y_2_1", "y_2_2"]], dtype=np.object)
+ x = np.array(
+ [["x_0_0", "x_0_1", "x_0_2"], ["x_1_0", "x_1_1", "x_1_2"],
+ ["x_2_0", "x_2_1", "x_2_2"]],
+ dtype=np.object)
+ y = np.array(
+ [["y_0_0", "y_0_1", "y_0_2"], ["y_1_0", "y_1_1", "y_1_2"],
+ ["y_2_0", "y_2_1", "y_2_2"]],
+ dtype=np.object)
z = np.array([["z_0", "z_1", "z_2"]], dtype=np.object)
w = np.array("w", dtype=np.object)
self._compareCpu(x, y, _ADD, _ADD)
@@ -782,7 +791,7 @@ class BinaryOpTest(tf.test.TestCase):
if x.dtype in (np.float16, np.float32, np.float64):
# TODO(aselle): Make the test work for dtypes:
# (np.complex64, np.complex128).
- if tf_func not in (_FLOORDIV, tf.floordiv):
+ if tf_func not in (_FLOORDIV, math_ops.floordiv):
if x.dtype == np.float16:
# Compare fp16 theoretical gradients to fp32 numerical gradients,
# since fp16 numerical gradients are too imprecise unless great
@@ -810,37 +819,37 @@ class BinaryOpTest(tf.test.TestCase):
for dtype in dtypes:
for (np_func, tf_func) in funcs:
if (dtype in (np.complex64, np.complex128) and
- tf_func in (_FLOORDIV, tf.floordiv)):
+ tf_func in (_FLOORDIV, math_ops.floordiv)):
continue # floordiv makes no sense for complex numbers
self._compareBCast(xs, ys, dtype, np_func, tf_func)
self._compareBCast(ys, xs, dtype, np_func, tf_func)
def _testBCastA(self, xs, ys):
funcs = [
- (np.add, tf.add),
+ (np.add, math_ops.add),
(np.add, _ADD),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastB(self, xs, ys):
funcs = [
- (np.subtract, tf.sub),
+ (np.subtract, math_ops.sub),
(np.subtract, _SUB),
- (np.power, tf.pow),
+ (np.power, math_ops.pow),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastC(self, xs, ys):
funcs = [
- (np.multiply, tf.mul),
+ (np.multiply, math_ops.mul),
(np.multiply, _MUL),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastD(self, xs, ys):
funcs = [
- (np.true_divide, tf.truediv),
- (np.floor_divide, tf.floordiv),
+ (np.true_divide, math_ops.truediv),
+ (np.floor_divide, math_ops.floordiv),
(np.true_divide, _TRUEDIV),
(np.floor_divide, _FLOORDIV),
]
@@ -1039,40 +1048,44 @@ class BinaryOpTest(tf.test.TestCase):
self._testBCastD([10, 3, 1, 2], [3, 1, 2])
def testMismatchedDimensions(self):
- for func in [tf.add, tf.sub, tf.mul, tf.div, _ADD, _SUB, _MUL, _TRUEDIV,
- _FLOORDIV]:
+ for func in [
+ math_ops.add, math_ops.sub, math_ops.mul, math_ops.div, _ADD, _SUB,
+ _MUL, _TRUEDIV, _FLOORDIV
+ ]:
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Dimensions must" in str(e)):
- func(tf.convert_to_tensor([10.0, 20.0, 30.0]),
- tf.convert_to_tensor([[40.0, 50.0], [60.0, 70.0]]))
+ func(
+ ops.convert_to_tensor([10.0, 20.0, 30.0]),
+ ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0]]))
def testZeroPowGrad(self):
with self.test_session():
for dtype in (np.float16, np.float32, np.float64, np.complex64,
np.complex128):
- x = tf.constant(0.0, dtype=dtype)
- y = tf.constant(2.0, dtype=dtype)
- z = tf.pow(x, y)
- error = tf.test.compute_gradient_error(y, [], z, [])
+ x = constant_op.constant(0.0, dtype=dtype)
+ y = constant_op.constant(2.0, dtype=dtype)
+ z = math_ops.pow(x, y)
+ error = gradient_checker.compute_gradient_error(y, [], z, [])
self.assertEqual(error, 0)
def testComplexPowGrad(self):
with self.test_session():
for dtype in np.complex64, np.complex128:
for base in 2.0, -2.0:
- x = tf.constant(base, dtype=dtype)
- y = tf.constant(2.0, dtype=dtype)
- z = tf.pow(x, y)
- error = tf.test.compute_gradient_error(y, [], z, [])
+ x = constant_op.constant(base, dtype=dtype)
+ y = constant_op.constant(2.0, dtype=dtype)
+ z = math_ops.pow(x, y)
+ error = gradient_checker.compute_gradient_error(y, [], z, [])
self.assertLess(error, 2e-4)
-class ComparisonOpTest(tf.test.TestCase):
+class ComparisonOpTest(test.TestCase):
def _compare(self, func, x, y, dtype):
with self.test_session(use_gpu=False):
- out = func(tf.convert_to_tensor(np.array([x]).astype(dtype)),
- tf.convert_to_tensor(np.array([y]).astype(dtype)))
+ out = func(
+ ops.convert_to_tensor(np.array([x]).astype(dtype)),
+ ops.convert_to_tensor(np.array([y]).astype(dtype)))
ret = out.eval()
return ret[0]
@@ -1082,30 +1095,31 @@ class ComparisonOpTest(tf.test.TestCase):
for t in dtypes:
for x in data:
for y in data:
- self.assertEqual(self._compare(tf.less, x, y, t), x < y)
- self.assertEqual(self._compare(tf.less_equal, x, y, t), x <= y)
- self.assertEqual(self._compare(tf.greater, x, y, t), x > y)
- self.assertEqual(self._compare(tf.greater_equal, x, y, t), x >= y)
- self.assertEqual(self._compare(tf.equal, x, y, t), x == y)
- self.assertEqual(self._compare(tf.not_equal, x, y, t), x != y)
+ self.assertEqual(self._compare(math_ops.less, x, y, t), x < y)
+ self.assertEqual(self._compare(math_ops.less_equal, x, y, t), x <= y)
+ self.assertEqual(self._compare(math_ops.greater, x, y, t), x > y)
+ self.assertEqual(
+ self._compare(math_ops.greater_equal, x, y, t), x >= y)
+ self.assertEqual(self._compare(math_ops.equal, x, y, t), x == y)
+ self.assertEqual(self._compare(math_ops.not_equal, x, y, t), x != y)
data = [-1, 0, 1, -1j, 1j, 1 + 1j, 1 - 1j]
for t in [np.complex64, np.complex128]:
for x in data:
for y in data:
- self.assertEqual(self._compare(tf.equal, x, y, t), x == y)
- self.assertEqual(self._compare(tf.not_equal, x, y, t), x != y)
+ self.assertEqual(self._compare(math_ops.equal, x, y, t), x == y)
+ self.assertEqual(self._compare(math_ops.not_equal, x, y, t), x != y)
def _compareCpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.test_session(use_gpu=False):
- out = tf_func(tf.convert_to_tensor(x), tf.convert_to_tensor(y))
+ out = tf_func(ops.convert_to_tensor(x), ops.convert_to_tensor(y))
tf_cpu = out.eval()
self.assertAllEqual(np_ans, tf_cpu)
def _compareGpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.test_session(use_gpu=True):
- out = tf_func(tf.convert_to_tensor(x), tf.convert_to_tensor(y))
+ out = tf_func(ops.convert_to_tensor(x), ops.convert_to_tensor(y))
tf_gpu = out.eval()
self.assertAllEqual(np_ans, tf_gpu)
@@ -1120,16 +1134,17 @@ class ComparisonOpTest(tf.test.TestCase):
for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:
xt = x.astype(t)
yt = y.astype(t)
- self._compareBoth(xt, yt, np.less, tf.less)
- self._compareBoth(xt, yt, np.less_equal, tf.less_equal)
- self._compareBoth(xt, yt, np.greater, tf.greater)
- self._compareBoth(xt, yt, np.greater_equal, tf.greater_equal)
- self._compareBoth(xt, yt, np.equal, tf.equal)
- self._compareBoth(xt, yt, np.not_equal, tf.not_equal)
+ self._compareBoth(xt, yt, np.less, math_ops.less)
+ self._compareBoth(xt, yt, np.less_equal, math_ops.less_equal)
+ self._compareBoth(xt, yt, np.greater, math_ops.greater)
+ self._compareBoth(xt, yt, np.greater_equal, math_ops.greater_equal)
+ self._compareBoth(xt, yt, np.equal, math_ops.equal)
+ self._compareBoth(xt, yt, np.not_equal, math_ops.not_equal)
# TODO(zhifengc): complex64 doesn't work on GPU yet.
for t in [np.complex64, np.complex128]:
- self._compareCpu(x.astype(t), y.astype(t), np.equal, tf.equal)
- self._compareCpu(x.astype(t), y.astype(t), np.not_equal, tf.not_equal)
+ self._compareCpu(x.astype(t), y.astype(t), np.equal, math_ops.equal)
+ self._compareCpu(
+ x.astype(t), y.astype(t), np.not_equal, math_ops.not_equal)
def _compareBCast(self, xs, ys, dtype, np_func, tf_func):
x = np.linspace(-15, 15, np.prod(xs)).astype(dtype).reshape(xs)
@@ -1166,27 +1181,29 @@ class ComparisonOpTest(tf.test.TestCase):
self._compareBCast(xs, ys, dtype, np_func, tf_func)
def testBCastLess(self):
- self._testBCastByFunc(np.less, tf.less)
+ self._testBCastByFunc(np.less, math_ops.less)
def testBCastLessEqual(self):
- self._testBCastByFunc(np.less_equal, tf.less_equal)
+ self._testBCastByFunc(np.less_equal, math_ops.less_equal)
def testBCastGreater(self):
- self._testBCastByFunc(np.greater, tf.greater)
+ self._testBCastByFunc(np.greater, math_ops.greater)
def testBCastGreaterEqual(self):
- self._testBCastByFunc(np.greater_equal, tf.greater_equal)
+ self._testBCastByFunc(np.greater_equal, math_ops.greater_equal)
def testBCastEqual(self):
- self._testBCastByFunc(np.equal, tf.equal)
+ self._testBCastByFunc(np.equal, math_ops.equal)
def testBCastNotEqual(self):
- self._testBCastByFunc(np.not_equal, tf.not_equal)
+ self._testBCastByFunc(np.not_equal, math_ops.not_equal)
def testShapeMismatch(self):
dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]
- funcs = [tf.less, tf.less_equal, tf.greater,
- tf.greater_equal, tf.equal, tf.not_equal]
+ funcs = [
+ math_ops.less, math_ops.less_equal, math_ops.greater,
+ math_ops.greater_equal, math_ops.equal, math_ops.not_equal
+ ]
x = np.arange(0, 10).reshape([2, 5])
y = np.arange(0, 10).reshape([5, 2])
for t in dtypes:
@@ -1196,25 +1213,25 @@ class ComparisonOpTest(tf.test.TestCase):
f(x.astype(t), y.astype(t))
-class LogicalOpTest(tf.test.TestCase):
+class LogicalOpTest(test.TestCase):
def _compareBinary(self, x, y, np_func, tf_func, use_gpu=False):
np_ans = np_func(x, y)
with self.test_session(use_gpu=use_gpu):
- inx = tf.convert_to_tensor(x)
- iny = tf.convert_to_tensor(y)
+ inx = ops.convert_to_tensor(x)
+ iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_val = out.eval()
- self.assertEqual(out.dtype, tf.bool)
+ self.assertEqual(out.dtype, dtypes_lib.bool)
self.assertAllEqual(np_ans, tf_val)
self.assertShapeEqual(np_ans, out)
def _not(self, x, use_gpu=False):
np_ans = np.logical_not(x)
with self.test_session(use_gpu=use_gpu):
- out = tf.logical_not(tf.convert_to_tensor(x))
+ out = math_ops.logical_not(ops.convert_to_tensor(x))
tf_val = out.eval()
- self.assertEqual(out.dtype, tf.bool)
+ self.assertEqual(out.dtype, dtypes_lib.bool)
self.assertAllEqual(np_ans, tf_val)
self.assertShapeEqual(np_ans, out)
@@ -1225,21 +1242,20 @@ class LogicalOpTest(tf.test.TestCase):
self._not(x, use_gpu)
for x in data:
for y in data:
- self._compareBinary(
- x, y, np.logical_and, tf.logical_and, use_gpu)
- self._compareBinary(
- x, y, np.logical_or, tf.logical_or, use_gpu)
- self._compareBinary(
- x, y, np.logical_xor, tf.logical_xor, use_gpu)
+ self._compareBinary(x, y, np.logical_and, math_ops.logical_and,
+ use_gpu)
+ self._compareBinary(x, y, np.logical_or, math_ops.logical_or, use_gpu)
+ self._compareBinary(x, y, np.logical_xor, math_ops.logical_xor,
+ use_gpu)
def testTensor(self):
x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
y = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
for use_gpu in [True, False]:
self._not(x, use_gpu)
- self._compareBinary(x, y, np.logical_and, tf.logical_and, use_gpu)
- self._compareBinary(x, y, np.logical_or, tf.logical_or, use_gpu)
- self._compareBinary(x, y, np.logical_xor, tf.logical_xor, use_gpu)
+ self._compareBinary(x, y, np.logical_and, math_ops.logical_and, use_gpu)
+ self._compareBinary(x, y, np.logical_or, math_ops.logical_or, use_gpu)
+ self._compareBinary(x, y, np.logical_xor, math_ops.logical_xor, use_gpu)
def testBCast(self):
shapes = [
@@ -1259,14 +1275,14 @@ class LogicalOpTest(tf.test.TestCase):
x = np.random.randint(0, 2, np.prod(xs)).astype(np.bool).reshape(xs)
y = np.random.randint(0, 2, np.prod(ys)).astype(np.bool).reshape(ys)
for use_gpu in [True, False]:
- self._compareBinary(x, y, np.logical_and, tf.logical_and, use_gpu)
- self._compareBinary(x, y, np.logical_or, tf.logical_or, use_gpu)
- self._compareBinary(x, y, np.logical_xor, tf.logical_xor, use_gpu)
+ self._compareBinary(x, y, np.logical_and, math_ops.logical_and, use_gpu)
+ self._compareBinary(x, y, np.logical_or, math_ops.logical_or, use_gpu)
+ self._compareBinary(x, y, np.logical_xor, math_ops.logical_xor, use_gpu)
def testShapeMismatch(self):
x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
y = np.random.randint(0, 2, 6).astype(np.bool).reshape(3, 2, 1)
- for f in [tf.logical_and, tf.logical_or, tf.logical_xor]:
+ for f in [math_ops.logical_and, math_ops.logical_or, math_ops.logical_xor]:
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Dimensions must" in str(e)):
f(x, y)
@@ -1274,18 +1290,18 @@ class LogicalOpTest(tf.test.TestCase):
def testUsingAsPythonValueFails(self):
# Ensure that we raise an error when the user attempts to treat a
# `Tensor` as a Python `bool`.
- b = tf.constant(False)
+ b = constant_op.constant(False)
with self.assertRaises(TypeError):
if b:
pass
- x = tf.constant(3)
- y = tf.constant(4)
+ x = constant_op.constant(3)
+ y = constant_op.constant(4)
with self.assertRaises(TypeError):
if x > y:
pass
- z = tf.constant(7)
+ z = constant_op.constant(7)
# The chained comparison should fail because Python computes `x <
# y` and short-circuits the comparison with `z` if it is `False`.
@@ -1293,38 +1309,32 @@ class LogicalOpTest(tf.test.TestCase):
_ = x < y < z
-class SelectOpTest(tf.test.TestCase):
+class SelectOpTest(test.TestCase):
def _compare(self, c, x, y, use_gpu):
np_ans = np.where(c, x, y)
with self.test_session(use_gpu=use_gpu):
- out = tf.where(c, x, y)
+ out = array_ops.where(c, x, y)
tf_ans = out.eval()
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, out)
def _compareGradientX(self, c, x, y, numeric_gradient_type=None):
with self.test_session():
- inx = tf.convert_to_tensor(x)
- iny = tf.convert_to_tensor(y)
- out = tf.where(c, inx, iny)
+ inx = ops.convert_to_tensor(x)
+ iny = ops.convert_to_tensor(y)
+ out = array_ops.where(c, inx, iny)
s = list(np.shape(c))
- jacob_t, jacob_n = tf.test.compute_gradient(inx,
- s,
- out,
- s,
- x_init_value=x)
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
+ inx, s, out, s, x_init_value=x)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
- inxf = tf.convert_to_tensor(xf)
- inyf = tf.convert_to_tensor(yf)
- outf = tf.where(c, inxf, inyf)
- _, jacob_n = tf.test.compute_gradient(inxf,
- s,
- outf,
- s,
- x_init_value=xf)
+ inxf = ops.convert_to_tensor(xf)
+ inyf = ops.convert_to_tensor(yf)
+ outf = array_ops.where(c, inxf, inyf)
+ _, jacob_n = gradient_checker.compute_gradient(
+ inxf, s, outf, s, x_init_value=xf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
@@ -1335,27 +1345,20 @@ class SelectOpTest(tf.test.TestCase):
def _compareGradientY(self, c, x, y, numeric_gradient_type=None):
with self.test_session():
- inx = tf.convert_to_tensor(x)
- iny = tf.convert_to_tensor(y)
- out = tf.where(c, inx, iny)
+ inx = ops.convert_to_tensor(x)
+ iny = ops.convert_to_tensor(y)
+ out = array_ops.where(c, inx, iny)
s = list(np.shape(c))
- jacob_t, jacob_n = tf.test.compute_gradient(iny,
- s,
- out,
- s,
- x_init_value=y,
- delta=1.0)
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
+ iny, s, out, s, x_init_value=y, delta=1.0)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
- inxf = tf.convert_to_tensor(xf)
- inyf = tf.convert_to_tensor(yf)
- outf = tf.where(c, inxf, inyf)
- _, jacob_n = tf.test.compute_gradient(inyf,
- s,
- outf,
- s,
- x_init_value=yf)
+ inxf = ops.convert_to_tensor(xf)
+ inyf = ops.convert_to_tensor(yf)
+ outf = array_ops.where(c, inxf, inyf)
+ _, jacob_n = gradient_checker.compute_gradient(
+ inyf, s, outf, s, x_init_value=yf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
@@ -1368,8 +1371,10 @@ class SelectOpTest(tf.test.TestCase):
c = True
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 2) * 100
- for t in [np.float16, np.float32, np.float64, np.int32, np.int64,
- np.complex64, np.complex128]:
+ for t in [
+ np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
+ np.complex128
+ ]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(c, xt, yt, use_gpu=False)
@@ -1380,8 +1385,10 @@ class SelectOpTest(tf.test.TestCase):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 2) * 100
- for t in [np.float16, np.float32, np.float64, np.int32, np.int64,
- np.complex64, np.complex128]:
+ for t in [
+ np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
+ np.complex128
+ ]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(c, xt, yt, use_gpu=False)
@@ -1411,12 +1418,14 @@ class SelectOpTest(tf.test.TestCase):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(2, 5, 3) * 100
- for t in [np.float16, np.float32, np.float64, np.int32, np.int64,
- np.complex64, np.complex128]:
+ for t in [
+ np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
+ np.complex128
+ ]:
xt = x.astype(t)
yt = y.astype(t)
with self.assertRaises(ValueError):
- tf.where(c, xt, yt)
+ array_ops.where(c, xt, yt)
def testEmptyTensor(self):
c = np.random.randint(0, 3, 0).astype(np.bool).reshape(1, 3, 0)
@@ -1426,7 +1435,7 @@ class SelectOpTest(tf.test.TestCase):
with self.test_session():
xt = x.astype(np.float32)
yt = y.astype(np.float32)
- z = tf.where(c, xt, yt).eval()
+ z = array_ops.where(c, xt, yt).eval()
self.assertAllEqual(z_expected, z)
def testNan(self):
@@ -1435,12 +1444,12 @@ class SelectOpTest(tf.test.TestCase):
for c in False, True:
for a in 7.0, np.nan:
for b in 5.0, np.nan:
- x = tf.where(c, a, b).eval()
+ x = array_ops.where(c, a, b).eval()
y = a if c else b
self.assertEqual(np.isnan(x), np.isnan(y))
-class BatchSelectOpTest(tf.test.TestCase):
+class BatchSelectOpTest(test.TestCase):
"""Test broadcasting of Select when 'c' is a vec and 't' &'e' are rank2+."""
def _compare(self, c, x, y, use_gpu):
@@ -1448,33 +1457,27 @@ class BatchSelectOpTest(tf.test.TestCase):
[x_i if c_i else y_i for c_i, x_i, y_i in zip(c, x, y)]).transpose(
[2, 0, 1])
with self.test_session(use_gpu=use_gpu):
- out = tf.where(c, x, y)
+ out = array_ops.where(c, x, y)
tf_ans = out.eval()
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, out)
def _compareGradientX(self, c, x, y, numeric_gradient_type=None):
with self.test_session():
- inx = tf.convert_to_tensor(x)
- iny = tf.convert_to_tensor(y)
- out = tf.where(c, inx, iny)
+ inx = ops.convert_to_tensor(x)
+ iny = ops.convert_to_tensor(y)
+ out = array_ops.where(c, inx, iny)
s = list(np.shape(x))
- jacob_t, jacob_n = tf.test.compute_gradient(inx,
- s,
- out,
- s,
- x_init_value=x)
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
+ inx, s, out, s, x_init_value=x)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
- inxf = tf.convert_to_tensor(xf)
- inyf = tf.convert_to_tensor(yf)
- outf = tf.where(c, inxf, inyf)
- _, jacob_n = tf.test.compute_gradient(inxf,
- s,
- outf,
- s,
- x_init_value=xf)
+ inxf = ops.convert_to_tensor(xf)
+ inyf = ops.convert_to_tensor(yf)
+ outf = array_ops.where(c, inxf, inyf)
+ _, jacob_n = gradient_checker.compute_gradient(
+ inxf, s, outf, s, x_init_value=xf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
@@ -1485,26 +1488,20 @@ class BatchSelectOpTest(tf.test.TestCase):
def _compareGradientY(self, c, x, y, numeric_gradient_type=None):
with self.test_session():
- inx = tf.convert_to_tensor(x)
- iny = tf.convert_to_tensor(y)
- out = tf.where(c, inx, iny)
+ inx = ops.convert_to_tensor(x)
+ iny = ops.convert_to_tensor(y)
+ out = array_ops.where(c, inx, iny)
s = list(np.shape(x))
- jacob_t, jacob_n = tf.test.compute_gradient(iny,
- s,
- out,
- s,
- x_init_value=y)
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
+ iny, s, out, s, x_init_value=y)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
- inxf = tf.convert_to_tensor(xf)
- inyf = tf.convert_to_tensor(yf)
- outf = tf.where(c, inxf, inyf)
- _, jacob_n = tf.test.compute_gradient(inyf,
- s,
- outf,
- s,
- x_init_value=yf)
+ inxf = ops.convert_to_tensor(xf)
+ inyf = ops.convert_to_tensor(yf)
+ outf = array_ops.where(c, inxf, inyf)
+ _, jacob_n = gradient_checker.compute_gradient(
+ inyf, s, outf, s, x_init_value=yf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
@@ -1517,8 +1514,10 @@ class BatchSelectOpTest(tf.test.TestCase):
c = np.random.randint(0, 2, 16).astype(np.bool)
x = np.random.rand(16, 2, 8) * 100
y = np.random.rand(16, 2, 8) * 100
- for t in [np.float16, np.float32, np.float64, np.int32, np.int64,
- np.complex64, np.complex128]:
+ for t in [
+ np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
+ np.complex128
+ ]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(c, xt, yt, use_gpu=False)
@@ -1548,22 +1547,24 @@ class BatchSelectOpTest(tf.test.TestCase):
c = np.random.randint(0, 2, 8).astype(np.bool)
x = np.random.rand(16, 3, 2) * 100
y = np.random.rand(16, 3, 2) * 100
- for t in [np.float16, np.float32, np.float64, np.int32, np.int64,
- np.complex64, np.complex128]:
+ for t in [
+ np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
+ np.complex128
+ ]:
xt = x.astype(t)
yt = y.astype(t)
with self.assertRaises(ValueError):
- tf.where(c, xt, yt)
+ array_ops.where(c, xt, yt)
-class MinMaxOpTest(tf.test.TestCase):
+class MinMaxOpTest(test.TestCase):
def _compare(self, x, y, use_gpu):
np_min, np_max = np.minimum(x, y), np.maximum(x, y)
with self.test_session(use_gpu=use_gpu) as sess:
- inx = tf.convert_to_tensor(x)
- iny = tf.convert_to_tensor(y)
- omin, omax = tf.minimum(inx, iny), tf.maximum(inx, iny)
+ inx = ops.convert_to_tensor(x)
+ iny = ops.convert_to_tensor(y)
+ omin, omax = math_ops.minimum(inx, iny), math_ops.maximum(inx, iny)
tf_min, tf_max = sess.run([omin, omax])
self.assertAllEqual(np_min, tf_min)
self.assertAllEqual(np_max, tf_max)
@@ -1592,15 +1593,12 @@ class MinMaxOpTest(tf.test.TestCase):
def _compareGradientX(self, func, x, y):
with self.test_session():
- inx = tf.convert_to_tensor(x)
- iny = tf.convert_to_tensor(y)
+ inx = ops.convert_to_tensor(x)
+ iny = ops.convert_to_tensor(y)
out = func(inx, iny)
s = list(np.shape(x))
- jacob_t, jacob_n = tf.test.compute_gradient(inx,
- s,
- out,
- s,
- x_init_value=x)
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
+ inx, s, out, s, x_init_value=x)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
@@ -1610,15 +1608,12 @@ class MinMaxOpTest(tf.test.TestCase):
def _compareGradientY(self, func, x, y):
with self.test_session():
- inx = tf.convert_to_tensor(x)
- iny = tf.convert_to_tensor(y)
+ inx = ops.convert_to_tensor(x)
+ iny = ops.convert_to_tensor(y)
out = func(inx, iny)
s = list(np.shape(x))
- jacob_t, jacob_n = tf.test.compute_gradient(iny,
- s,
- out,
- s,
- x_init_value=y)
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
+ iny, s, out, s, x_init_value=y)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
@@ -1630,47 +1625,49 @@ class MinMaxOpTest(tf.test.TestCase):
x = np.random.rand(1, 3, 2) * 100.
# ensure x != y
y = x + (np.random.randint(2, size=x.shape) - .5) * 2 # -1 or +1
- self._compareGradientX(tf.maximum, x, y)
- self._compareGradientY(tf.maximum, x, y)
- self._compareGradientX(tf.minimum, x, y)
- self._compareGradientY(tf.minimum, x, y)
+ self._compareGradientX(math_ops.maximum, x, y)
+ self._compareGradientY(math_ops.maximum, x, y)
+ self._compareGradientX(math_ops.minimum, x, y)
+ self._compareGradientY(math_ops.minimum, x, y)
-class MathOpsOverloadTest(tf.test.TestCase):
+class MathOpsOverloadTest(test.TestCase):
def _computeTensorAndLiteral(self, x, y, dtype, func):
with self.test_session(use_gpu=False):
- inx = tf.convert_to_tensor(x, dtype=dtype)
+ inx = ops.convert_to_tensor(x, dtype=dtype)
z = func(inx, y) # Should use __add__, __sub__, etc.
return z.eval()
def _computeLiteralAndTensor(self, x, y, dtype, func):
with self.test_session(use_gpu=False):
- iny = tf.convert_to_tensor(y, dtype=dtype)
+ iny = ops.convert_to_tensor(y, dtype=dtype)
z = func(x, iny) # Should use __radd__, __rsub__, etc.
return z.eval()
def _compareBinary(self, x, y, dtype, np_func, tf_func):
np_ans = np_func(x, y).astype(dtype.as_numpy_dtype)
- self.assertAllClose(np_ans, self._computeTensorAndLiteral(
- x, y, dtype, tf_func))
- self.assertAllClose(np_ans, self._computeLiteralAndTensor(
- x, y, dtype, tf_func))
+ self.assertAllClose(np_ans,
+ self._computeTensorAndLiteral(x, y, dtype, tf_func))
+ self.assertAllClose(np_ans,
+ self._computeLiteralAndTensor(x, y, dtype, tf_func))
def _compareUnary(self, x, dtype, np_func, tf_func):
np_ans = np_func(x).astype(dtype.as_numpy_dtype)
with self.test_session(use_gpu=False):
- self.assertAllClose(np_ans, tf_func(tf.convert_to_tensor(x, dtype=dtype)).eval())
+ self.assertAllClose(
+ np_ans, tf_func(ops.convert_to_tensor(
+ x, dtype=dtype)).eval())
def testOverload(self):
dtypes = [
- tf.float16,
- tf.float32,
- tf.float64,
- tf.int32,
- tf.int64,
- tf.complex64,
- tf.complex128,
+ dtypes_lib.float16,
+ dtypes_lib.float32,
+ dtypes_lib.float64,
+ dtypes_lib.int32,
+ dtypes_lib.int64,
+ dtypes_lib.complex64,
+ dtypes_lib.complex128,
]
funcs = [
(np.add, _ADD),
@@ -1682,20 +1679,21 @@ class MathOpsOverloadTest(tf.test.TestCase):
]
for dtype in dtypes:
for np_func, tf_func in funcs:
- if dtype in (tf.complex64, tf.complex128) and tf_func == _FLOORDIV:
+ if dtype in (dtypes_lib.complex64, dtypes_lib.complex128
+ ) and tf_func == _FLOORDIV:
continue # floordiv makes no sense for complex
self._compareBinary(10, 5, dtype, np_func, tf_func)
# Mod only works for int32 and int64.
- for dtype in [tf.int32, tf.int64]:
+ for dtype in [dtypes_lib.int32, dtypes_lib.int64]:
self._compareBinary(10, 3, dtype, np.mod, _MOD)
def testOverloadComparisons(self):
dtypes = [
- tf.float16,
- tf.float32,
- tf.float64,
- tf.int32,
- tf.int64,
+ dtypes_lib.float16,
+ dtypes_lib.float32,
+ dtypes_lib.float64,
+ dtypes_lib.int32,
+ dtypes_lib.int64,
]
funcs = [
(np.less, _LT),
@@ -1706,34 +1704,30 @@ class MathOpsOverloadTest(tf.test.TestCase):
for dtype in dtypes:
for np_func, tf_func in funcs:
self._compareBinary(10, 5, dtype, np_func, tf_func)
- logical_funcs = [
- (np.logical_and, _AND),
- (np.logical_or, _OR),
- (np.logical_xor, _XOR),
- (np.equal, tf.equal),
- (np.not_equal, tf.not_equal)
- ]
+ logical_funcs = [(np.logical_and, _AND), (np.logical_or, _OR),
+ (np.logical_xor, _XOR), (np.equal, math_ops.equal),
+ (np.not_equal, math_ops.not_equal)]
for np_func, tf_func in logical_funcs:
- self._compareBinary(True, False, tf.bool, np_func, tf_func)
- self._compareBinary(True, True, tf.bool, np_func, tf_func)
- self._compareBinary(False, False, tf.bool, np_func, tf_func)
- self._compareBinary(False, True, tf.bool, np_func, tf_func)
+ self._compareBinary(True, False, dtypes_lib.bool, np_func, tf_func)
+ self._compareBinary(True, True, dtypes_lib.bool, np_func, tf_func)
+ self._compareBinary(False, False, dtypes_lib.bool, np_func, tf_func)
+ self._compareBinary(False, True, dtypes_lib.bool, np_func, tf_func)
self._compareBinary([True, True, False, False],
- [True, False, True, False],
- tf.bool, np_func, tf_func)
- self._compareUnary(True, tf.bool, np.logical_not, _INV)
- self._compareUnary(False, tf.bool, np.logical_not, _INV)
- self._compareUnary([True, False], tf.bool, np.logical_not, _INV)
+ [True, False, True, False], dtypes_lib.bool, np_func,
+ tf_func)
+ self._compareUnary(True, dtypes_lib.bool, np.logical_not, _INV)
+ self._compareUnary(False, dtypes_lib.bool, np.logical_not, _INV)
+ self._compareUnary([True, False], dtypes_lib.bool, np.logical_not, _INV)
-class IsFiniteInfNanTest(tf.test.TestCase):
+class IsFiniteInfNanTest(test.TestCase):
def _compare(self, x, use_gpu):
np_finite, np_inf, np_nan = np.isfinite(x), np.isinf(x), np.isnan(x)
with self.test_session(use_gpu=use_gpu) as sess:
- inx = tf.convert_to_tensor(x)
- ofinite, oinf, onan = tf.is_finite(inx), tf.is_inf(
- inx), tf.is_nan(inx)
+ inx = ops.convert_to_tensor(x)
+ ofinite, oinf, onan = math_ops.is_finite(inx), math_ops.is_inf(
+ inx), math_ops.is_nan(inx)
tf_finite, tf_inf, tf_nan = sess.run([ofinite, oinf, onan])
self.assertAllEqual(np_inf, tf_inf)
self.assertAllEqual(np_nan, tf_nan)
@@ -1744,8 +1738,10 @@ class IsFiniteInfNanTest(tf.test.TestCase):
def _testDtype(self, dtype):
fi = np.finfo(dtype)
- data = np.array([0, -1, 1, fi.resolution, -fi.resolution, fi.min, fi.max,
- -np.inf, np.inf, np.nan]).astype(dtype)
+ data = np.array([
+ 0, -1, 1, fi.resolution, -fi.resolution, fi.min, fi.max, -np.inf,
+ np.inf, np.nan
+ ]).astype(dtype)
self._compare(data, use_gpu=False)
self._compare(data, use_gpu=True)
@@ -1765,25 +1761,25 @@ class IsFiniteInfNanTest(tf.test.TestCase):
# For float32 Eigen uses Carmack's fast vectorized sqrt algorithm.
# It is not accurate for very large arguments, so we test for
# fi.max/100 instead of fi.max here.
- for value in [fi.min, -2, -1, 0, fi.tiny, 1, 2, 1000, fi.max/100]:
+ for value in [fi.min, -2, -1, 0, fi.tiny, 1, 2, 1000, fi.max / 100]:
x = np.full((size,), value, dtype=dtype)
np_y = np.sqrt(x)
np_nan = np.isnan(np_y)
with self.test_session(use_gpu=True):
- tf_y = tf.sqrt(x)
- tf_nan = tf.is_nan(tf_y)
+ tf_y = math_ops.sqrt(x)
+ tf_nan = math_ops.is_nan(tf_y)
if value < 0:
self.assertAllEqual(np_nan, tf_nan.eval())
else:
self.assertAllCloseAccordingToType(np_y, tf_y.eval())
-class RoundingTest(tf.test.TestCase):
+class RoundingTest(test.TestCase):
def _compare_values(self, x, y=None):
y = np.rint(x) if y is None else np.asarray(y)
with self.test_session() as sess:
- tf_rint = tf.rint(x)
+ tf_rint = math_ops.rint(x)
np_rint = sess.run(tf_rint)
self.assertAllEqual(y, np_rint)
self.assertShapeEqual(y, tf_rint)
@@ -1791,8 +1787,8 @@ class RoundingTest(tf.test.TestCase):
def _compare(self, x):
np_floor, np_ceil = np.floor(x), np.ceil(x)
with self.test_session() as sess:
- inx = tf.convert_to_tensor(x)
- ofloor, oceil = tf.floor(inx), tf.ceil(inx)
+ inx = ops.convert_to_tensor(x)
+ ofloor, oceil = math_ops.floor(inx), math_ops.ceil(inx)
tf_floor, tf_ceil = sess.run([ofloor, oceil])
self.assertAllEqual(np_floor, tf_floor)
self.assertAllEqual(np_ceil, tf_ceil)
@@ -1812,7 +1808,7 @@ class RoundingTest(tf.test.TestCase):
# numpy example
x = [-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]
- y = [-2., -2., -0., 0., 2., 2., 2.]
+ y = [-2., -2., -0., 0., 2., 2., 2.]
self._compare_values(x, y=y)
def testTypes(self):
@@ -1820,14 +1816,14 @@ class RoundingTest(tf.test.TestCase):
self._testDtype(dtype)
-class ComplexMakeRealImagTest(tf.test.TestCase):
+class ComplexMakeRealImagTest(test.TestCase):
def _compareMake(self, real, imag, use_gpu):
np_ans = real + (1j) * imag
with self.test_session(use_gpu=use_gpu):
- real = tf.convert_to_tensor(real)
- imag = tf.convert_to_tensor(imag)
- tf_ans = tf.complex(real, imag)
+ real = ops.convert_to_tensor(real)
+ imag = ops.convert_to_tensor(imag)
+ tf_ans = math_ops.complex(real, imag)
out = tf_ans.eval()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
@@ -1843,9 +1839,9 @@ class ComplexMakeRealImagTest(tf.test.TestCase):
def _compareRealImag(self, cplx, use_gpu):
np_real, np_imag = np.real(cplx), np.imag(cplx)
with self.test_session(use_gpu=use_gpu) as sess:
- inx = tf.convert_to_tensor(cplx)
- tf_real = tf.real(inx)
- tf_imag = tf.imag(inx)
+ inx = ops.convert_to_tensor(cplx)
+ tf_real = math_ops.real(inx)
+ tf_imag = math_ops.imag(inx)
tf_real_val, tf_imag_val = sess.run([tf_real, tf_imag])
self.assertAllEqual(np_real, tf_real_val)
self.assertAllEqual(np_imag, tf_imag_val)
@@ -1867,16 +1863,16 @@ class ComplexMakeRealImagTest(tf.test.TestCase):
self._compareRealImag(cplx, use_gpu=True)
def testRealReal(self):
- for dtype in tf.int32, tf.int64, tf.float32, tf.float64:
- x = tf.placeholder(dtype)
- y = tf.real(x)
+ for dtype in dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.float32, dtypes_lib.float64:
+ x = array_ops.placeholder(dtype)
+ y = math_ops.real(x)
self.assertEqual(x, y)
def _compareConj(self, cplx, use_gpu):
np_ans = np.conj(cplx)
with self.test_session(use_gpu=use_gpu):
- inx = tf.convert_to_tensor(cplx)
- tf_conj = tf.conj(inx)
+ inx = ops.convert_to_tensor(cplx)
+ tf_conj = math_ops.conj(inx)
tf_ans = tf_conj.eval()
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, tf_conj)
@@ -1896,15 +1892,15 @@ class ComplexMakeRealImagTest(tf.test.TestCase):
self._compareConj(cplx, use_gpu=True)
def testConjReal(self):
- for dtype in tf.int32, tf.int64, tf.float16, tf.float32, tf.float64:
- x = tf.placeholder(dtype)
- y = tf.conj(x)
+ for dtype in dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.float16, dtypes_lib.float32, dtypes_lib.float64:
+ x = array_ops.placeholder(dtype)
+ y = math_ops.conj(x)
self.assertEqual(x, y)
def testConjString(self):
- x = tf.placeholder(tf.string)
+ x = array_ops.placeholder(dtypes_lib.string)
with self.assertRaisesRegexp(TypeError, r"Expected numeric tensor"):
- tf.conj(x)
+ math_ops.conj(x)
def _compareGradient(self, x):
# x[:, 0] is real, x[:, 1] is imag. We combine real and imag into
@@ -1913,35 +1909,26 @@ class ComplexMakeRealImagTest(tf.test.TestCase):
# * real) + sum(imag * imag). We just want to make sure the
# gradient function is checked.
with self.test_session():
- inx = tf.convert_to_tensor(x)
- real, imag = tf.split(value=inx, num_or_size_splits=2, axis=1)
- real, imag = tf.reshape(real, [-1]), tf.reshape(imag, [-1])
- cplx = tf.complex(real, imag)
- cplx = tf.conj(cplx)
- loss = tf.reduce_sum(
- tf.square(tf.real(cplx))) + tf.reduce_sum(
- tf.square(tf.imag(cplx)))
+ inx = ops.convert_to_tensor(x)
+ real, imag = array_ops.split(value=inx, num_or_size_splits=2, axis=1)
+ real, imag = array_ops.reshape(real, [-1]), array_ops.reshape(imag, [-1])
+ cplx = math_ops.complex(real, imag)
+ cplx = math_ops.conj(cplx)
+ loss = math_ops.reduce_sum(math_ops.square(math_ops.real(
+ cplx))) + math_ops.reduce_sum(math_ops.square(math_ops.imag(cplx)))
epsilon = 1e-3
- jacob_t, jacob_n = tf.test.compute_gradient(inx,
- list(x.shape),
- loss,
- [1],
- x_init_value=x,
- delta=epsilon)
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
+ inx, list(x.shape), loss, [1], x_init_value=x, delta=epsilon)
self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
def _compareBroadcastGradient(self, x):
- x_ = tf.convert_to_tensor(x)
+ x_ = ops.convert_to_tensor(x)
epsilon = 1e-3
with self.test_session():
for args in [(x_, 0.), (0., x_)]:
- z = tf.reduce_sum(tf.complex_abs(tf.complex(*args)))
- jacob_t, jacob_n = tf.test.compute_gradient(x_,
- list(x.shape),
- z,
- [1],
- x_init_value=x,
- delta=epsilon)
+ z = math_ops.reduce_sum(math_ops.complex_abs(math_ops.complex(*args)))
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
+ x_, list(x.shape), z, [1], x_init_value=x, delta=epsilon)
self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
def testGradient(self):
@@ -1958,27 +1945,25 @@ class ComplexMakeRealImagTest(tf.test.TestCase):
# data[:, 2], data[:, 3] are real parts of x, imaginary parts of
# x, real parts of y and imaginary parts of y.
with self.test_session():
- inp = tf.convert_to_tensor(data)
- xr, xi, yr, yi = tf.split(value=inp, num_or_size_splits=4, axis=1)
+ inp = ops.convert_to_tensor(data)
+ xr, xi, yr, yi = array_ops.split(value=inp, num_or_size_splits=4, axis=1)
def vec(x): # Reshape to a vector
- return tf.reshape(x, [-1])
+ return array_ops.reshape(x, [-1])
+
xr, xi, yr, yi = vec(xr), vec(xi), vec(yr), vec(yi)
def cplx(r, i): # Combine to a complex vector
- return tf.complex(r, i)
+ return math_ops.complex(r, i)
+
x, y = cplx(xr, xi), cplx(yr, yi)
# z is x times y in complex plane.
z = x * y
# Defines the loss function as the sum of all coefficients of z.
- loss = tf.reduce_sum(tf.real(z) + tf.imag(z))
+ loss = math_ops.reduce_sum(math_ops.real(z) + math_ops.imag(z))
epsilon = 0.005
- jacob_t, jacob_n = tf.test.compute_gradient(inp,
- list(data.shape),
- loss,
- [1],
- x_init_value=data,
- delta=epsilon)
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
+ inp, list(data.shape), loss, [1], x_init_value=data, delta=epsilon)
self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
def testMulGradient(self):
@@ -1986,15 +1971,18 @@ class ComplexMakeRealImagTest(tf.test.TestCase):
self._compareMulGradient(data)
-class AccumulateTest(tf.test.TestCase):
+class AccumulateTest(test.TestCase):
def testSimple(self):
with self.test_session():
- random_arrays = [np.random.rand(16, 16, 16, 16).astype(np.float32)
- for _ in range(20)]
- random_tensors = [tf.convert_to_tensor(x, dtype=tf.float32)
- for x in random_arrays]
- tf_val = tf.accumulate_n(random_tensors)
+ random_arrays = [
+ np.random.rand(16, 16, 16, 16).astype(np.float32) for _ in range(20)
+ ]
+ random_tensors = [
+ ops.convert_to_tensor(
+ x, dtype=dtypes_lib.float32) for x in random_arrays
+ ]
+ tf_val = math_ops.accumulate_n(random_tensors)
np_val = random_arrays[0]
for random_array in random_arrays[1:]:
np_val += random_array
@@ -2003,8 +1991,9 @@ class AccumulateTest(tf.test.TestCase):
def testZeroArgs(self):
with self.test_session():
with self.assertRaises(ValueError):
- tf_val = tf.accumulate_n([])
+ tf_val = math_ops.accumulate_n([])
tf_val.eval()
+
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/decode_csv_op_test.py b/tensorflow/python/kernel_tests/decode_csv_op_test.py
index 2133b2e171..17051c3480 100644
--- a/tensorflow/python/kernel_tests/decode_csv_op_test.py
+++ b/tensorflow/python/kernel_tests/decode_csv_op_test.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for DecodeCSV op from parsing_ops."""
from __future__ import absolute_import
@@ -20,14 +19,16 @@ from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.ops import parsing_ops
+from tensorflow.python.platform import test
-class DecodeCSVOpTest(tf.test.TestCase):
+class DecodeCSVOpTest(test.TestCase):
def _test(self, args, expected_out=None, expected_err_re=None):
with self.test_session() as sess:
- decode = tf.decode_csv(**args)
+ decode = parsing_ops.decode_csv(**args)
if expected_err_re is None:
out = sess.run(decode)
@@ -43,7 +44,10 @@ class DecodeCSVOpTest(tf.test.TestCase):
sess.run(decode)
def testSimple(self):
- args = {"records": ["1", "2", '"3"'], "record_defaults": [[1]],}
+ args = {
+ "records": ["1", "2", '"3"'],
+ "record_defaults": [[1]],
+ }
expected_out = [[1, 2, 3]]
@@ -65,8 +69,8 @@ class DecodeCSVOpTest(tf.test.TestCase):
def testInt64(self):
args = {
"records": ["1", "2", '"2147483648"'],
- "record_defaults": [np.array([],
- dtype=np.int64)],
+ "record_defaults": [np.array(
+ [], dtype=np.int64)],
}
expected_out = [[1, 2, 2147483648]]
@@ -117,22 +121,22 @@ class DecodeCSVOpTest(tf.test.TestCase):
def testWithoutDefaultsError(self):
args = {
"records": [",1", "0.2,3", "3.0,"],
- "record_defaults": [[1.0], np.array([],
- dtype=np.int32)]
+ "record_defaults": [[1.0], np.array(
+ [], dtype=np.int32)]
}
- self._test(args,
- expected_err_re="Field 1 is required but missing in record 2!")
+ self._test(
+ args, expected_err_re="Field 1 is required but missing in record 2!")
def testWrongFieldIntError(self):
args = {
"records": [",1", "0.2,234a", "3.0,2"],
- "record_defaults": [[1.0], np.array([],
- dtype=np.int32)]
+ "record_defaults": [[1.0], np.array(
+ [], dtype=np.int32)]
}
- self._test(args,
- expected_err_re="Field 1 in record 1 is not a valid int32: 234a")
+ self._test(
+ args, expected_err_re="Field 1 in record 1 is not a valid int32: 234a")
def testOutOfRangeError(self):
args = {
@@ -140,41 +144,39 @@ class DecodeCSVOpTest(tf.test.TestCase):
"record_defaults": [[1]]
}
- self._test(args,
- expected_err_re="Field 0 in record 1 is not a valid int32: ")
+ self._test(
+ args, expected_err_re="Field 0 in record 1 is not a valid int32: ")
def testWrongFieldFloatError(self):
args = {
"records": [",1", "0.2,2", "3.0adf,3"],
- "record_defaults": [[1.0], np.array([],
- dtype=np.int32)]
+ "record_defaults": [[1.0], np.array(
+ [], dtype=np.int32)]
}
- self._test(args,
- expected_err_re="Field 0 in record 2 is not a valid float: ")
+ self._test(
+ args, expected_err_re="Field 0 in record 2 is not a valid float: ")
def testWrongFieldStringError(self):
args = {"records": ['"1,a,"', "0.22", 'a"bc'], "record_defaults": [["a"]]}
self._test(
- args,
- expected_err_re="Unquoted fields cannot have quotes/CRLFs inside")
+ args, expected_err_re="Unquoted fields cannot have quotes/CRLFs inside")
def testWrongDefaults(self):
- args = {
- "records": [",1", "0.2,2", "3.0adf,3"],
- "record_defaults": [[1.0]]
- }
+ args = {"records": [",1", "0.2,2", "3.0adf,3"], "record_defaults": [[1.0]]}
- self._test(args,
- expected_err_re="Expect 1 fields but have 2 in record 0")
+ self._test(args, expected_err_re="Expect 1 fields but have 2 in record 0")
def testShortQuotedString(self):
- args = {"records": ["\""], "record_defaults": [["default"]],}
+ args = {
+ "records": ["\""],
+ "record_defaults": [["default"]],
+ }
- self._test(args,
- expected_err_re="Quoted field has to end with quote followed.*")
+ self._test(
+ args, expected_err_re="Quoted field has to end with quote followed.*")
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/decode_image_op_test.py b/tensorflow/python/kernel_tests/decode_image_op_test.py
index 51cdedcf0f..d658b80601 100644
--- a/tensorflow/python/kernel_tests/decode_image_op_test.py
+++ b/tensorflow/python/kernel_tests/decode_image_op_test.py
@@ -19,13 +19,20 @@ from __future__ import division
from __future__ import print_function
import os.path
+
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import errors_impl
+from tensorflow.python.ops import image_ops
+from tensorflow.python.ops import io_ops
+import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
# Double-quote usage here is intentional to make internal path rewriting easier.
-prefix_path = os.path.join("tensorflow", "core", "lib")
+prefix_path = os.path.join('third_party', 'tensorflow', 'core', 'lib')
-class DecodeImageOpTest(tf.test.TestCase):
+
+class DecodeImageOpTest(test.TestCase):
def testGif(self):
# Read some real GIFs
@@ -36,9 +43,9 @@ class DecodeImageOpTest(tf.test.TestCase):
shape = (12, HEIGHT, WIDTH, 3)
with self.test_session(use_gpu=True) as sess:
- gif0 = tf.read_file(path)
- image0 = tf.image.decode_image(gif0)
- image1 = tf.image.decode_gif(gif0)
+ gif0 = io_ops.read_file(path)
+ image0 = image_ops.decode_image(gif0)
+ image1 = image_ops.decode_gif(gif0)
gif0, image0, image1 = sess.run([gif0, image0, image1])
self.assertEqual(image0.shape, shape)
@@ -57,18 +64,17 @@ class DecodeImageOpTest(tf.test.TestCase):
self.assertAllClose(frame, gt)
- bad_channels = tf.image.decode_image(gif0, channels=1)
- with self.assertRaises(tf.errors.InvalidArgumentError):
+ bad_channels = image_ops.decode_image(gif0, channels=1)
+ with self.assertRaises(errors_impl.InvalidArgumentError):
bad_channels.eval()
-
def testJpeg(self):
# Read a real jpeg and verify shape
path = os.path.join(prefix_path, 'jpeg', 'testdata', 'jpeg_merge_test1.jpg')
with self.test_session(use_gpu=True) as sess:
- jpeg0 = tf.read_file(path)
- image0 = tf.image.decode_image(jpeg0)
- image1 = tf.image.decode_jpeg(jpeg0)
+ jpeg0 = io_ops.read_file(path)
+ image0 = image_ops.decode_image(jpeg0)
+ image1 = image_ops.decode_jpeg(jpeg0)
jpeg0, image0, image1 = sess.run([jpeg0, image0, image1])
self.assertEqual(len(jpeg0), 3771)
self.assertEqual(image0.shape, (256, 128, 3))
@@ -81,25 +87,25 @@ class DecodeImageOpTest(tf.test.TestCase):
for channels in 0, 1, 3:
with self.test_session(use_gpu=True) as sess:
path = os.path.join(prefix_path, 'png', 'testdata', filename)
- png0 = tf.read_file(path)
- image0 = tf.image.decode_image(png0, channels=channels)
- image1 = tf.image.decode_png(png0, channels=channels)
+ png0 = io_ops.read_file(path)
+ image0 = image_ops.decode_image(png0, channels=channels)
+ image1 = image_ops.decode_png(png0, channels=channels)
png0, image0, image1 = sess.run([png0, image0, image1])
self.assertEqual(image0.shape, (26, 51, channels or channels_in))
self.assertAllEqual(image0, image1)
def testInvalidBytes(self):
image_bytes = b'ThisIsNotAnImage!'
- decode = tf.image.decode_image(image_bytes)
+ decode = image_ops.decode_image(image_bytes)
with self.test_session():
- with self.assertRaises(tf.errors.InvalidArgumentError):
+ with self.assertRaises(errors_impl.InvalidArgumentError):
decode.eval()
def testInvalidChannels(self):
image_bytes = b'unused'
with self.assertRaises(ValueError):
- decode = tf.image.decode_image(image_bytes, channels=4)
+ decode = image_ops.decode_image(image_bytes, channels=4)
-if __name__ == "__main__":
- tf.test.main()
+if __name__ == '__main__':
+ test.main()
diff --git a/tensorflow/python/kernel_tests/decode_png_op_test.py b/tensorflow/python/kernel_tests/decode_png_op_test.py
index 2987fb459a..d2e03938ee 100644
--- a/tensorflow/python/kernel_tests/decode_png_op_test.py
+++ b/tensorflow/python/kernel_tests/decode_png_op_test.py
@@ -18,25 +18,33 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import image_ops
+import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
-class DecodePngOpTest(tf.test.TestCase):
+class DecodePngOpTest(test.TestCase):
def test16bit(self):
img_bytes = [[0, 255], [1024, 1024 + 255]]
# Encoded PNG bytes resulting from encoding the above img_bytes
# using go's image/png encoder.
- encoded_bytes = [137, 80, 78, 71, 13, 10, 26, 10, 0, 0, 0, 13, 73, 72, 68,
- 82, 0, 0, 0, 2, 0, 0, 0, 2, 16, 0, 0, 0, 0, 7, 77, 142,
- 187, 0, 0, 0, 21, 73, 68, 65, 84, 120, 156, 98, 98, 96, 96,
- 248, 207, 194, 2, 36, 1, 1, 0, 0, 255, 255, 6, 60, 1, 10,
- 68, 160, 26, 131, 0, 0, 0, 0, 73, 69, 78, 68, 174, 66, 96,
- 130]
+ encoded_bytes = [
+ 137, 80, 78, 71, 13, 10, 26, 10, 0, 0, 0, 13, 73, 72, 68, 82, 0, 0, 0,
+ 2, 0, 0, 0, 2, 16, 0, 0, 0, 0, 7, 77, 142, 187, 0, 0, 0, 21, 73, 68, 65,
+ 84, 120, 156, 98, 98, 96, 96, 248, 207, 194, 2, 36, 1, 1, 0, 0, 255,
+ 255, 6, 60, 1, 10, 68, 160, 26, 131, 0, 0, 0, 0, 73, 69, 78, 68, 174,
+ 66, 96, 130
+ ]
byte_string = bytes(bytearray(encoded_bytes))
- img_in = tf.constant(byte_string, dtype=tf.string)
- decode = tf.squeeze(tf.image.decode_png(img_in, dtype=tf.uint16))
+ img_in = constant_op.constant(byte_string, dtype=dtypes.string)
+ decode = array_ops.squeeze(
+ image_ops.decode_png(
+ img_in, dtype=dtypes.uint16))
with self.test_session():
decoded = decode.eval()
@@ -44,4 +52,4 @@ class DecodePngOpTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/decode_raw_op_test.py b/tensorflow/python/kernel_tests/decode_raw_op_test.py
index bb707b32f7..472808c8f9 100644
--- a/tensorflow/python/kernel_tests/decode_raw_op_test.py
+++ b/tensorflow/python/kernel_tests/decode_raw_op_test.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for DecodeRaw op from parsing_ops."""
from __future__ import absolute_import
@@ -20,15 +19,19 @@ from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import parsing_ops
+from tensorflow.python.platform import test
-class DecodeRawOpTest(tf.test.TestCase):
+class DecodeRawOpTest(test.TestCase):
def testToUint8(self):
with self.test_session():
- in_bytes = tf.placeholder(tf.string, shape=[2])
- decode = tf.decode_raw(in_bytes, out_type=tf.uint8)
+ in_bytes = array_ops.placeholder(dtypes.string, shape=[2])
+ decode = parsing_ops.decode_raw(in_bytes, out_type=dtypes.uint8)
self.assertEqual([2, None], decode.get_shape().as_list())
result = decode.eval(feed_dict={in_bytes: ["A", "a"]})
@@ -45,13 +48,13 @@ class DecodeRawOpTest(tf.test.TestCase):
def testToInt16(self):
with self.test_session():
- in_bytes = tf.placeholder(tf.string, shape=[None])
- decode = tf.decode_raw(in_bytes, out_type=tf.int16)
+ in_bytes = array_ops.placeholder(dtypes.string, shape=[None])
+ decode = parsing_ops.decode_raw(in_bytes, out_type=dtypes.int16)
self.assertEqual([None, None], decode.get_shape().as_list())
result = decode.eval(feed_dict={in_bytes: ["AaBC"]})
- self.assertAllEqual([[ord("A") + ord("a") * 256,
- ord("B") + ord("C") * 256]], result)
+ self.assertAllEqual(
+ [[ord("A") + ord("a") * 256, ord("B") + ord("C") * 256]], result)
with self.assertRaisesOpError(
"Input to DecodeRaw has length 3 that is not a multiple of 2, the "
@@ -60,8 +63,8 @@ class DecodeRawOpTest(tf.test.TestCase):
def testToFloat16(self):
with self.test_session():
- in_bytes = tf.placeholder(tf.string, shape=[None])
- decode = tf.decode_raw(in_bytes, out_type=tf.float16)
+ in_bytes = array_ops.placeholder(dtypes.string, shape=[None])
+ decode = parsing_ops.decode_raw(in_bytes, out_type=dtypes.float16)
self.assertEqual([None, None], decode.get_shape().as_list())
expected_result = np.matrix([[1, -2, -3, 4]], dtype=np.float16)
@@ -69,5 +72,6 @@ class DecodeRawOpTest(tf.test.TestCase):
self.assertAllEqual(expected_result, result)
+
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/denormal_test.py b/tensorflow/python/kernel_tests/denormal_test.py
index b5449d59c7..7047c25555 100644
--- a/tensorflow/python/kernel_tests/denormal_test.py
+++ b/tensorflow/python/kernel_tests/denormal_test.py
@@ -12,19 +12,21 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for denormal handling."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.ops import array_ops
from tensorflow.python.platform import control_imports
+from tensorflow.python.platform import test
-class DenormalTest(tf.test.TestCase):
+class DenormalTest(test.TestCase):
def testPythonHasDenormals(self):
"""Non-tf numpy code should treat denormals correctly."""
@@ -37,12 +39,12 @@ class DenormalTest(tf.test.TestCase):
# TODO(irving): Fix denormal flushing for open source.
return
with self.test_session(use_gpu=use_gpu):
- tf.identity(7).eval()
+ array_ops.identity(7).eval()
for dtype in dtypes:
tiny = np.finfo(dtype).tiny
# Small shape to test main thread, large shape to test thread pool
- for shape in (), (1<<20,):
- flush = 0.1 * tf.constant(tiny, shape=shape)
+ for shape in (), (1 << 20,):
+ flush = 0.1 * constant_op.constant(tiny, shape=shape)
self.assertAllEqual(flush.eval(), np.zeros(shape))
# Make sure the flags don't leak out
self.testPythonHasDenormals()
@@ -57,4 +59,4 @@ class DenormalTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/dense_update_ops_no_tsan_test.py b/tensorflow/python/kernel_tests/dense_update_ops_no_tsan_test.py
index 70e184e203..eab1563057 100644
--- a/tensorflow/python/kernel_tests/dense_update_ops_no_tsan_test.py
+++ b/tensorflow/python/kernel_tests/dense_update_ops_no_tsan_test.py
@@ -12,33 +12,43 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for state updating ops that may have benign race conditions."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import state_ops
+from tensorflow.python.ops import variables
+from tensorflow.python.platform import test
-class AssignOpTest(tf.test.TestCase):
+class AssignOpTest(test.TestCase):
# NOTE(mrry): We exclude thess tests from the TSAN TAP target, because they
# contain benign and deliberate data races when multiple threads update
# the same parameters without a lock.
def testParallelUpdateWithoutLocking(self):
with self.test_session() as sess:
- ones_t = tf.fill([1024, 1024], 1.0)
- p = tf.Variable(tf.zeros([1024, 1024]))
- adds = [tf.assign_add(p, ones_t, use_locking=False)
- for _ in range(20)]
- tf.global_variables_initializer().run()
+ ones_t = array_ops.fill([1024, 1024], 1.0)
+ p = variables.Variable(array_ops.zeros([1024, 1024]))
+ adds = [
+ state_ops.assign_add(
+ p, ones_t, use_locking=False) for _ in range(20)
+ ]
+ variables.global_variables_initializer().run()
def run_add(add_op):
sess.run(add_op)
- threads = [self.checkedThread(target=run_add, args=(add_op,))
- for add_op in adds]
+
+ threads = [
+ self.checkedThread(
+ target=run_add, args=(add_op,)) for add_op in adds
+ ]
for t in threads:
t.start()
for t in threads:
@@ -51,16 +61,21 @@ class AssignOpTest(tf.test.TestCase):
def testParallelAssignWithoutLocking(self):
with self.test_session() as sess:
- ones_t = tf.fill([1024, 1024], float(1))
- p = tf.Variable(tf.zeros([1024, 1024]))
- assigns = [tf.assign(p, tf.mul(ones_t, float(i)), False)
- for i in range(1, 21)]
- tf.global_variables_initializer().run()
+ ones_t = array_ops.fill([1024, 1024], float(1))
+ p = variables.Variable(array_ops.zeros([1024, 1024]))
+ assigns = [
+ state_ops.assign(p, math_ops.mul(ones_t, float(i)), False)
+ for i in range(1, 21)
+ ]
+ variables.global_variables_initializer().run()
def run_assign(assign_op):
sess.run(assign_op)
- threads = [self.checkedThread(target=run_assign, args=(assign_op,))
- for assign_op in assigns]
+
+ threads = [
+ self.checkedThread(
+ target=run_assign, args=(assign_op,)) for assign_op in assigns
+ ]
for t in threads:
t.start()
for t in threads:
@@ -74,4 +89,4 @@ class AssignOpTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/dense_update_ops_test.py b/tensorflow/python/kernel_tests/dense_update_ops_test.py
index 0f71694673..eda5823901 100644
--- a/tensorflow/python/kernel_tests/dense_update_ops_test.py
+++ b/tensorflow/python/kernel_tests/dense_update_ops_test.py
@@ -12,24 +12,30 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.ops.tf.Assign*."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import state_ops
+from tensorflow.python.ops import variables
+from tensorflow.python.platform import test
-class AssignOpTest(tf.test.TestCase):
+class AssignOpTest(test.TestCase):
def _initAssignFetch(self, x, y, use_gpu=False):
"""Initialize a param to init and update it with y."""
super(AssignOpTest, self).setUp()
with self.test_session(use_gpu=use_gpu):
- p = tf.Variable(x)
- assign = tf.assign(p, y)
+ p = variables.Variable(x)
+ assign = state_ops.assign(p, y)
p.initializer.run()
new_value = assign.eval()
return p.eval(), new_value
@@ -37,8 +43,8 @@ class AssignOpTest(tf.test.TestCase):
def _initAssignAddFetch(self, x, y, use_gpu=False):
"""Initialize a param to init, and compute param += y."""
with self.test_session(use_gpu=use_gpu):
- p = tf.Variable(x)
- add = tf.assign_add(p, y)
+ p = variables.Variable(x)
+ add = state_ops.assign_add(p, y)
p.initializer.run()
new_value = add.eval()
return p.eval(), new_value
@@ -46,8 +52,8 @@ class AssignOpTest(tf.test.TestCase):
def _initAssignSubFetch(self, x, y, use_gpu=False):
"""Initialize a param to init, and compute param -= y."""
with self.test_session(use_gpu=use_gpu):
- p = tf.Variable(x)
- sub = tf.assign_sub(p, y)
+ p = variables.Variable(x)
+ sub = state_ops.assign_sub(p, y)
p.initializer.run()
new_value = sub.eval()
return p.eval(), new_value
@@ -65,7 +71,7 @@ class AssignOpTest(tf.test.TestCase):
var_value, op_value = self._initAssignSubFetch(x, y, use_gpu=False)
self.assertAllEqual(x - y, var_value)
self.assertAllEqual(x - y, op_value)
- if tf.test.is_built_with_cuda() and dtype in [np.float32, np.float64]:
+ if test.is_built_with_cuda() and dtype in [np.float32, np.float64]:
var_value, op_value = self._initAssignFetch(x, y, use_gpu=True)
self.assertAllEqual(y, var_value)
self.assertAllEqual(y, op_value)
@@ -81,31 +87,29 @@ class AssignOpTest(tf.test.TestCase):
def testAssignNonStrictShapeChecking(self):
with self.test_session():
- data = tf.fill([1024, 1024], 0)
- p = tf.Variable([1])
- a = tf.assign(p, data, validate_shape=False)
+ data = array_ops.fill([1024, 1024], 0)
+ p = variables.Variable([1])
+ a = state_ops.assign(p, data, validate_shape=False)
a.op.run()
self.assertAllEqual(p.eval(), data.eval())
# Assign to yet another shape
- data2 = tf.fill([10, 10], 1)
- a2 = tf.assign(p, data2, validate_shape=False)
+ data2 = array_ops.fill([10, 10], 1)
+ a2 = state_ops.assign(p, data2, validate_shape=False)
a2.op.run()
self.assertAllEqual(p.eval(), data2.eval())
def testInitRequiredAssignAdd(self):
with self.test_session():
- p = tf.Variable(tf.fill([1024, 1024], 1),
- tf.int32)
- a = tf.assign_add(p, tf.fill([1024, 1024], 0))
+ p = variables.Variable(array_ops.fill([1024, 1024], 1), dtypes.int32)
+ a = state_ops.assign_add(p, array_ops.fill([1024, 1024], 0))
with self.assertRaisesOpError("use uninitialized"):
a.op.run()
def testInitRequiredAssignSub(self):
with self.test_session():
- p = tf.Variable(tf.fill([1024, 1024], 1),
- tf.int32)
- a = tf.assign_sub(p, tf.fill([1024, 1024], 0))
+ p = variables.Variable(array_ops.fill([1024, 1024], 1), dtypes.int32)
+ a = state_ops.assign_sub(p, array_ops.fill([1024, 1024], 0))
with self.assertRaisesOpError("use uninitialized"):
a.op.run()
@@ -114,17 +118,22 @@ class AssignOpTest(tf.test.TestCase):
# data race and must run without TSAN.
def testParallelUpdateWithLocking(self):
with self.test_session() as sess:
- zeros_t = tf.fill([1024, 1024], 0.0)
- ones_t = tf.fill([1024, 1024], 1.0)
- p = tf.Variable(zeros_t)
- adds = [tf.assign_add(p, ones_t, use_locking=True)
- for _ in range(20)]
+ zeros_t = array_ops.fill([1024, 1024], 0.0)
+ ones_t = array_ops.fill([1024, 1024], 1.0)
+ p = variables.Variable(zeros_t)
+ adds = [
+ state_ops.assign_add(
+ p, ones_t, use_locking=True) for _ in range(20)
+ ]
p.initializer.run()
def run_add(add_op):
sess.run(add_op)
+
threads = [
- self.checkedThread(target=run_add, args=(add_op,)) for add_op in adds]
+ self.checkedThread(
+ target=run_add, args=(add_op,)) for add_op in adds
+ ]
for t in threads:
t.start()
for t in threads:
@@ -139,18 +148,23 @@ class AssignOpTest(tf.test.TestCase):
# which contains a benign data race and must run without TSAN.
def testParallelAssignWithLocking(self):
with self.test_session() as sess:
- zeros_t = tf.fill([1024, 1024], 0.0)
- ones_t = tf.fill([1024, 1024], 1.0)
- p = tf.Variable(zeros_t)
- assigns = [tf.assign(p, tf.mul(ones_t, float(i)),
- use_locking=True)
- for i in range(1, 21)]
+ zeros_t = array_ops.fill([1024, 1024], 0.0)
+ ones_t = array_ops.fill([1024, 1024], 1.0)
+ p = variables.Variable(zeros_t)
+ assigns = [
+ state_ops.assign(
+ p, math_ops.mul(ones_t, float(i)), use_locking=True)
+ for i in range(1, 21)
+ ]
p.initializer.run()
def run_assign(assign_op):
sess.run(assign_op)
- threads = [self.checkedThread(target=run_assign, args=(assign_op,))
- for assign_op in assigns]
+
+ threads = [
+ self.checkedThread(
+ target=run_assign, args=(assign_op,)) for assign_op in assigns
+ ]
for t in threads:
t.start()
for t in threads:
@@ -165,4 +179,4 @@ class AssignOpTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/depthtospace_op_test.py b/tensorflow/python/kernel_tests/depthtospace_op_test.py
index 9e45b5ced3..95a7e1f971 100644
--- a/tensorflow/python/kernel_tests/depthtospace_op_test.py
+++ b/tensorflow/python/kernel_tests/depthtospace_op_test.py
@@ -20,14 +20,20 @@ from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import math_ops
+from tensorflow.python.platform import test
-class DepthToSpaceTest(tf.test.TestCase):
+
+class DepthToSpaceTest(test.TestCase):
def _testOne(self, inputs, block_size, outputs):
with self.test_session(use_gpu=True):
- x_tf = tf.depth_to_space(tf.to_float(inputs), block_size)
+ x_tf = array_ops.depth_to_space(math_ops.to_float(inputs), block_size)
self.assertAllEqual(x_tf.eval(), outputs)
def testBasic(self):
@@ -137,7 +143,7 @@ class DepthToSpaceTest(tf.test.TestCase):
# Raise an exception, since th depth is only 4 and needs to be
# divisible by 16.
with self.assertRaises(ValueError):
- out_tf = tf.depth_to_space(x_np, block_size)
+ out_tf = array_ops.depth_to_space(x_np, block_size)
out_tf.eval()
# Test when the block size is 0.
@@ -146,7 +152,7 @@ class DepthToSpaceTest(tf.test.TestCase):
[[3], [4]]]]
block_size = 0
with self.assertRaises(ValueError):
- out_tf = tf.depth_to_space(x_np, block_size)
+ out_tf = array_ops.depth_to_space(x_np, block_size)
out_tf.eval()
# Test when the block size is 1. The block size should be > 1.
@@ -157,7 +163,7 @@ class DepthToSpaceTest(tf.test.TestCase):
[4, 4, 4, 4]]]]
block_size = 1
with self.assertRaises(ValueError):
- out_tf = tf.depth_to_space(x_np, block_size)
+ out_tf = array_ops.depth_to_space(x_np, block_size)
out_tf.eval()
def testBlockSizeLargerThanInput(self):
@@ -166,7 +172,7 @@ class DepthToSpaceTest(tf.test.TestCase):
[[3], [4]]]]
block_size = 10
with self.assertRaises(ValueError):
- out_tf = tf.space_to_depth(x_np, block_size)
+ out_tf = array_ops.space_to_depth(x_np, block_size)
out_tf.eval()
def testBlockSizeNotDivisibleDepth(self):
@@ -177,23 +183,23 @@ class DepthToSpaceTest(tf.test.TestCase):
[4, 4, 4, 4]]]]
block_size = 3
with self.assertRaises(ValueError):
- _ = tf.space_to_depth(x_np, block_size)
+ _ = array_ops.space_to_depth(x_np, block_size)
def testUnknownShape(self):
- t = tf.depth_to_space(tf.placeholder(tf.float32), block_size=4)
+ t = array_ops.depth_to_space(array_ops.placeholder(dtypes.float32), block_size=4)
self.assertEqual(4, t.get_shape().ndims)
-class DepthToSpaceGradientTest(tf.test.TestCase):
+class DepthToSpaceGradientTest(test.TestCase):
# Check the gradients.
def _checkGrad(self, x, block_size):
assert 4 == x.ndim
with self.test_session(use_gpu=True):
- tf_x = tf.convert_to_tensor(x)
- tf_y = tf.depth_to_space(tf_x, block_size)
+ tf_x = ops.convert_to_tensor(x)
+ tf_y = array_ops.depth_to_space(tf_x, block_size)
epsilon = 1e-2
- ((x_jacob_t, x_jacob_n)) = tf.test.compute_gradient(
+ ((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
tf_x,
x.shape,
tf_y,
@@ -225,4 +231,4 @@ class DepthToSpaceGradientTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/depthwise_conv_op_test.py b/tensorflow/python/kernel_tests/depthwise_conv_op_test.py
index 7a1545daca..427db39221 100644
--- a/tensorflow/python/kernel_tests/depthwise_conv_op_test.py
+++ b/tensorflow/python/kernel_tests/depthwise_conv_op_test.py
@@ -13,12 +13,20 @@
# limitations under the License.
# ==============================================================================
"""Functional tests for depthwise convolutional operations."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import nn_impl
+from tensorflow.python.ops import nn_ops
+import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
def ConfigsToTest():
@@ -72,7 +80,7 @@ def CheckGradConfigsToTest():
yield i, f, o, s, p
-class DepthwiseConv2DTest(tf.test.TestCase):
+class DepthwiseConv2DTest(test.TestCase):
# This is testing against the output of the implementation using the
# combination of conv_2d and slicing ops.
@@ -100,19 +108,14 @@ class DepthwiseConv2DTest(tf.test.TestCase):
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with self.test_session(use_gpu=use_gpu) as sess:
- t1 = tf.constant(x1, shape=tensor_in_sizes)
+ t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t1.set_shape(tensor_in_sizes)
- t2 = tf.constant(x2, shape=filter_in_sizes)
- conv_native = tf.nn.depthwise_conv2d_native(
- t1,
- t2,
- strides=[1, stride, stride, 1],
- padding=padding)
-
- conv_gold = tf.nn.depthwise_conv2d(t1,
- t2,
- strides=[1, stride, stride, 1],
- padding=padding)
+ t2 = constant_op.constant(x2, shape=filter_in_sizes)
+ conv_native = nn_ops.depthwise_conv2d_native(
+ t1, t2, strides=[1, stride, stride, 1], padding=padding)
+
+ conv_gold = nn_impl.depthwise_conv2d(
+ t1, t2, strides=[1, stride, stride, 1], padding=padding)
native_result = sess.run(conv_native)
gold_result = sess.run(conv_gold)
@@ -127,18 +130,13 @@ class DepthwiseConv2DTest(tf.test.TestCase):
padding) in enumerate(ConfigsToTest()):
print("Processing ", index, "th config.")
if index == 2:
- self._VerifyValues(input_size,
- filter_size,
- stride,
- padding,
- use_gpu=True)
- self._VerifyValues(input_size,
- filter_size,
- stride,
- padding,
- use_gpu=False)
+ self._VerifyValues(
+ input_size, filter_size, stride, padding, use_gpu=True)
+ self._VerifyValues(
+ input_size, filter_size, stride, padding, use_gpu=False)
# This is testing against hand calculated results.
+
def _VerifyHandValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
expected, use_gpu):
"""Verifies the output values of the depthwise convolution function.
@@ -164,13 +162,11 @@ class DepthwiseConv2DTest(tf.test.TestCase):
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with self.test_session(use_gpu=use_gpu) as sess:
- t1 = tf.constant(x1, shape=tensor_in_sizes)
+ t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t1.set_shape(tensor_in_sizes)
- t2 = tf.constant(x2, shape=filter_in_sizes)
- conv = tf.nn.depthwise_conv2d_native(t1,
- t2,
- strides=[1, stride, stride, 1],
- padding=padding)
+ t2 = constant_op.constant(x2, shape=filter_in_sizes)
+ conv = nn_ops.depthwise_conv2d_native(
+ t1, t2, strides=[1, stride, stride, 1], padding=padding)
value = sess.run(conv)
print("value = ", value)
self.assertArrayNear(expected, np.ravel(value), 1e-5)
@@ -226,19 +222,21 @@ class DepthwiseConv2DTest(tf.test.TestCase):
# (position 1, 0: in_depth 1, output_depth 3 -- using filter #1)
# 4.0 * 4.0 + 10.0 * 12.0 + 6.0 * 8.0 + 12.0 * 16.0 = 376
expected_output = [196, 216, 272, 296, 252, 280, 344, 376]
- self._VerifyHandValues(tensor_in_sizes=[1, 2, 3, 2],
- filter_in_sizes=[2, 2, 2, 2],
- stride=1,
- padding="VALID",
- expected=expected_output,
- use_gpu=False)
-
- self._VerifyHandValues(tensor_in_sizes=[1, 2, 3, 2],
- filter_in_sizes=[2, 2, 2, 2],
- stride=1,
- padding="VALID",
- expected=expected_output,
- use_gpu=True)
+ self._VerifyHandValues(
+ tensor_in_sizes=[1, 2, 3, 2],
+ filter_in_sizes=[2, 2, 2, 2],
+ stride=1,
+ padding="VALID",
+ expected=expected_output,
+ use_gpu=False)
+
+ self._VerifyHandValues(
+ tensor_in_sizes=[1, 2, 3, 2],
+ filter_in_sizes=[2, 2, 2, 2],
+ stride=1,
+ padding="VALID",
+ expected=expected_output,
+ use_gpu=True)
# Gradient checkers.This tests depthwise gradient computations for both
# BackpropFilter and BackpropInput by comparing gradients computed by the
@@ -257,32 +255,31 @@ class DepthwiseConv2DTest(tf.test.TestCase):
input_data = [x * 1.0 / input_size for x in range(0, input_size)]
filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]
with self.test_session(use_gpu=use_gpu):
- if data_type == tf.float32:
+ if data_type == dtypes.float32:
tolerance = 0.002
else:
- self.assertEqual(data_type, tf.float64)
+ self.assertEqual(data_type, dtypes.float64)
tolerance = 1e-8
- input_tensor = tf.constant(input_data,
- shape=input_shape,
- dtype=data_type,
- name="input")
- filter_tensor = tf.constant(filter_data,
- shape=filter_shape,
- dtype=data_type,
- name="filter")
- depthwise_conv2d = tf.nn.depthwise_conv2d_native(input_tensor,
- filter_tensor,
- [1, stride, stride, 1],
- padding,
- name="depthwise_conv2d")
+ input_tensor = constant_op.constant(
+ input_data, shape=input_shape, dtype=data_type, name="input")
+ filter_tensor = constant_op.constant(
+ filter_data, shape=filter_shape, dtype=data_type, name="filter")
+ depthwise_conv2d = nn_ops.depthwise_conv2d_native(
+ input_tensor,
+ filter_tensor, [1, stride, stride, 1],
+ padding,
+ name="depthwise_conv2d")
self.assertEqual(output_shape, depthwise_conv2d.get_shape())
if test_input:
- err = tf.test.compute_gradient_error(input_tensor, input_shape,
- depthwise_conv2d, output_shape)
+ err = gradient_checker.compute_gradient_error(input_tensor, input_shape,
+ depthwise_conv2d,
+ output_shape)
else:
- err = tf.test.compute_gradient_error(filter_tensor, filter_shape,
- depthwise_conv2d, output_shape)
+ err = gradient_checker.compute_gradient_error(filter_tensor,
+ filter_shape,
+ depthwise_conv2d,
+ output_shape)
print("depthwise conv_2d gradient error = ", err)
self.assertLess(err, tolerance)
@@ -291,28 +288,30 @@ class DepthwiseConv2DTest(tf.test.TestCase):
padding) in enumerate(CheckGradConfigsToTest()):
print("Processing ", index, "th config.")
for use_gpu in [True, False]:
- self._ConstructAndTestGradient(input_size,
- filter_size,
- output_size,
- stride,
- padding,
- tf.float32,
- test_input=True,
- use_gpu=use_gpu)
+ self._ConstructAndTestGradient(
+ input_size,
+ filter_size,
+ output_size,
+ stride,
+ padding,
+ dtypes.float32,
+ test_input=True,
+ use_gpu=use_gpu)
def testDepthwiseConv2DFilterGrad(self):
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(CheckGradConfigsToTest()):
print("Processing ", index, "th config.")
for use_gpu in [True, False]:
- self._ConstructAndTestGradient(input_size,
- filter_size,
- output_size,
- stride,
- padding,
- tf.float32,
- test_input=False,
- use_gpu=use_gpu)
+ self._ConstructAndTestGradient(
+ input_size,
+ filter_size,
+ output_size,
+ stride,
+ padding,
+ dtypes.float32,
+ test_input=False,
+ use_gpu=use_gpu)
def _CompareBackpropInputFloat(self, input_sizes, filter_sizes, output_sizes,
stride, padding):
@@ -321,15 +320,11 @@ class DepthwiseConv2DTest(tf.test.TestCase):
def _GetVal(use_gpu):
with self.test_session(use_gpu=use_gpu):
- t0 = tf.constant(input_sizes, shape=[len(input_sizes)])
- t1 = tf.constant(x1, shape=filter_sizes)
- t2 = tf.constant(x2, shape=output_sizes)
- backprop = tf.nn.depthwise_conv2d_native_backprop_input(
- t0,
- t1,
- t2,
- strides=[1, stride, stride, 1],
- padding=padding)
+ t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])
+ t1 = constant_op.constant(x1, shape=filter_sizes)
+ t2 = constant_op.constant(x2, shape=output_sizes)
+ backprop = nn_ops.depthwise_conv2d_native_backprop_input(
+ t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
ret = backprop.eval()
self.assertShapeEqual(ret, backprop)
return ret
@@ -345,15 +340,11 @@ class DepthwiseConv2DTest(tf.test.TestCase):
def _GetVal(use_gpu):
with self.test_session(use_gpu=use_gpu):
- t0 = tf.constant(input_sizes, shape=[len(input_sizes)])
- t1 = tf.constant(x1, shape=filter_sizes)
- t2 = tf.constant(x2, shape=output_sizes)
- backprop = tf.nn.depthwise_conv2d_native_backprop_input(
- t0,
- t1,
- t2,
- strides=[1, stride, stride, 1],
- padding=padding)
+ t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])
+ t1 = constant_op.constant(x1, shape=filter_sizes)
+ t2 = constant_op.constant(x2, shape=output_sizes)
+ backprop = nn_ops.depthwise_conv2d_native_backprop_input(
+ t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
ret = backprop.eval()
self.assertShapeEqual(ret, backprop)
return ret
@@ -378,15 +369,11 @@ class DepthwiseConv2DTest(tf.test.TestCase):
def _GetVal(use_gpu):
with self.test_session(use_gpu=use_gpu):
- t0 = tf.constant(x0, shape=input_sizes)
- t1 = tf.constant(filter_sizes, shape=[len(filter_sizes)])
- t2 = tf.constant(x2, shape=output_sizes)
- backprop = tf.nn.depthwise_conv2d_native_backprop_filter(
- t0,
- t1,
- t2,
- strides=[1, stride, stride, 1],
- padding=padding)
+ t0 = constant_op.constant(x0, shape=input_sizes)
+ t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
+ t2 = constant_op.constant(x2, shape=output_sizes)
+ backprop = nn_ops.depthwise_conv2d_native_backprop_filter(
+ t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
ret = backprop.eval()
self.assertShapeEqual(ret, backprop)
return ret
@@ -402,15 +389,11 @@ class DepthwiseConv2DTest(tf.test.TestCase):
def _GetVal(use_gpu):
with self.test_session(use_gpu=use_gpu):
- t0 = tf.constant(x0, shape=input_sizes)
- t1 = tf.constant(filter_sizes, shape=[len(filter_sizes)])
- t2 = tf.constant(x2, shape=output_sizes)
- backprop = tf.nn.depthwise_conv2d_native_backprop_filter(
- t0,
- t1,
- t2,
- strides=[1, stride, stride, 1],
- padding=padding)
+ t0 = constant_op.constant(x0, shape=input_sizes)
+ t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
+ t2 = constant_op.constant(x2, shape=output_sizes)
+ backprop = nn_ops.depthwise_conv2d_native_backprop_filter(
+ t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
ret = backprop.eval()
self.assertShapeEqual(ret, backprop)
return ret
@@ -428,6 +411,5 @@ class DepthwiseConv2DTest(tf.test.TestCase):
self._CompareBackpropFilterDouble(input_size, filter_size, output_size,
stride, padding)
-
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/determinant_op_test.py b/tensorflow/python/kernel_tests/determinant_op_test.py
index 36d6a64054..2d05ab6139 100644
--- a/tensorflow/python/kernel_tests/determinant_op_test.py
+++ b/tensorflow/python/kernel_tests/determinant_op_test.py
@@ -12,17 +12,20 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.ops.tf.MatrixDeterminant."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.ops import linalg_ops
+from tensorflow.python.platform import test
-class DeterminantOpTest(tf.test.TestCase):
+class DeterminantOpTest(test.TestCase):
def _compareDeterminantBase(self, matrix_x, tf_ans):
out = tf_ans.eval()
@@ -36,16 +39,18 @@ class DeterminantOpTest(tf.test.TestCase):
def _compareDeterminant(self, matrix_x):
with self.test_session():
- self._compareDeterminantBase(matrix_x, tf.matrix_determinant(matrix_x))
+ self._compareDeterminantBase(matrix_x,
+ linalg_ops.matrix_determinant(matrix_x))
def testBasic(self):
# 2x2 matrices
self._compareDeterminant(np.array([[2., 3.], [3., 4.]]).astype(np.float32))
self._compareDeterminant(np.array([[0., 0.], [0., 0.]]).astype(np.float32))
# 5x5 matrices (Eigen forces LU decomposition)
- self._compareDeterminant(np.array(
- [[2., 3., 4., 5., 6.], [3., 4., 9., 2., 0.], [2., 5., 8., 3., 8.],
- [1., 6., 7., 4., 7.], [2., 3., 4., 5., 6.]]).astype(np.float32))
+ self._compareDeterminant(
+ np.array([[2., 3., 4., 5., 6.], [3., 4., 9., 2., 0.], [
+ 2., 5., 8., 3., 8.
+ ], [1., 6., 7., 4., 7.], [2., 3., 4., 5., 6.]]).astype(np.float32))
# A multidimensional batch of 2x2 matrices
self._compareDeterminant(np.random.rand(3, 4, 5, 2, 2).astype(np.float32))
@@ -54,9 +59,10 @@ class DeterminantOpTest(tf.test.TestCase):
self._compareDeterminant(np.array([[2., 3.], [3., 4.]]).astype(np.float64))
self._compareDeterminant(np.array([[0., 0.], [0., 0.]]).astype(np.float64))
# 5x5 matrices (Eigen forces LU decomposition)
- self._compareDeterminant(np.array(
- [[2., 3., 4., 5., 6.], [3., 4., 9., 2., 0.], [2., 5., 8., 3., 8.],
- [1., 6., 7., 4., 7.], [2., 3., 4., 5., 6.]]).astype(np.float64))
+ self._compareDeterminant(
+ np.array([[2., 3., 4., 5., 6.], [3., 4., 9., 2., 0.], [
+ 2., 5., 8., 3., 8.
+ ], [1., 6., 7., 4., 7.], [2., 3., 4., 5., 6.]]).astype(np.float64))
# A multidimensional batch of 2x2 matrices
self._compareDeterminant(np.random.rand(3, 4, 5, 2, 2).astype(np.float64))
@@ -70,14 +76,14 @@ class DeterminantOpTest(tf.test.TestCase):
# When the determinant of a non-square matrix is attempted we should return
# an error
with self.assertRaises(ValueError):
- tf.matrix_determinant(
+ linalg_ops.matrix_determinant(
np.array([[1., 2., 3.], [3., 5., 4.]]).astype(np.float32))
def testWrongDimensions(self):
# The input to the determinant should be a 2-dimensional tensor.
- tensor1 = tf.constant([1., 2.])
+ tensor1 = constant_op.constant([1., 2.])
with self.assertRaises(ValueError):
- tf.matrix_determinant(tensor1)
+ linalg_ops.matrix_determinant(tensor1)
def testEmpty(self):
self._compareDeterminant(np.empty([0, 2, 2]))
@@ -85,4 +91,4 @@ class DeterminantOpTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/diag_op_test.py b/tensorflow/python/kernel_tests/diag_op_test.py
index 339198bb55..09d6436f43 100644
--- a/tensorflow/python/kernel_tests/diag_op_test.py
+++ b/tensorflow/python/kernel_tests/diag_op_test.py
@@ -18,17 +18,25 @@ from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes as dtypes_lib
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import gradients_impl
+from tensorflow.python.platform import test
+from tensorflow.python.platform import tf_logging
-class MatrixDiagTest(tf.test.TestCase):
+
+class MatrixDiagTest(test.TestCase):
_use_gpu = False
def testVector(self):
with self.test_session(use_gpu=self._use_gpu):
v = np.array([1.0, 2.0, 3.0])
mat = np.diag(v)
- v_diag = tf.matrix_diag(v)
+ v_diag = array_ops.matrix_diag(v)
self.assertEqual((3, 3), v_diag.get_shape())
self.assertAllEqual(v_diag.eval(), mat)
@@ -43,28 +51,30 @@ class MatrixDiagTest(tf.test.TestCase):
[[4.0, 0.0, 0.0],
[0.0, 5.0, 0.0],
[0.0, 0.0, 6.0]]])
- v_batch_diag = tf.matrix_diag(v_batch)
+ v_batch_diag = array_ops.matrix_diag(v_batch)
self.assertEqual((2, 3, 3), v_batch_diag.get_shape())
self.assertAllEqual(v_batch_diag.eval(), mat_batch)
def testInvalidShape(self):
with self.assertRaisesRegexp(ValueError, "must be at least rank 1"):
- tf.matrix_diag(0)
+ array_ops.matrix_diag(0)
def testInvalidShapeAtEval(self):
with self.test_session(use_gpu=self._use_gpu):
- v = tf.placeholder(dtype=tf.float32)
+ v = array_ops.placeholder(dtype=dtypes_lib.float32)
with self.assertRaisesOpError("input must be at least 1-dim"):
- tf.matrix_diag(v).eval(feed_dict={v: 0.0})
+ array_ops.matrix_diag(v).eval(feed_dict={v: 0.0})
def testGrad(self):
shapes = ((3,), (7, 4))
with self.test_session(use_gpu=self._use_gpu):
for shape in shapes:
- x = tf.constant(np.random.rand(*shape), np.float32)
- y = tf.matrix_diag(x)
- error = tf.test.compute_gradient_error(x, x.get_shape().as_list(),
- y, y.get_shape().as_list())
+ x = constant_op.constant(np.random.rand(*shape), np.float32)
+ y = array_ops.matrix_diag(x)
+ error = gradient_checker.compute_gradient_error(x,
+ x.get_shape().as_list(),
+ y,
+ y.get_shape().as_list())
self.assertLess(error, 1e-4)
@@ -72,7 +82,7 @@ class MatrixDiagGpuTest(MatrixDiagTest):
_use_gpu = True
-class MatrixSetDiagTest(tf.test.TestCase):
+class MatrixSetDiagTest(test.TestCase):
_use_gpu = False
def testSquare(self):
@@ -84,7 +94,7 @@ class MatrixSetDiagTest(tf.test.TestCase):
mat_set_diag = np.array([[1.0, 1.0, 0.0],
[1.0, 2.0, 1.0],
[1.0, 1.0, 3.0]])
- output = tf.matrix_set_diag(mat, v)
+ output = array_ops.matrix_set_diag(mat, v)
self.assertEqual((3, 3), output.get_shape())
self.assertAllEqual(mat_set_diag, output.eval())
@@ -93,14 +103,14 @@ class MatrixSetDiagTest(tf.test.TestCase):
v = np.array([3.0, 4.0])
mat = np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0]])
expected = np.array([[3.0, 1.0, 0.0], [1.0, 4.0, 1.0]])
- output = tf.matrix_set_diag(mat, v)
+ output = array_ops.matrix_set_diag(mat, v)
self.assertEqual((2, 3), output.get_shape())
self.assertAllEqual(expected, output.eval())
v = np.array([3.0, 4.0])
mat = np.array([[0.0, 1.0], [1.0, 0.0], [1.0, 1.0]])
expected = np.array([[3.0, 1.0], [1.0, 4.0], [1.0, 1.0]])
- output = tf.matrix_set_diag(mat, v)
+ output = array_ops.matrix_set_diag(mat, v)
self.assertEqual((3, 2), output.get_shape())
self.assertAllEqual(expected, output.eval())
@@ -123,7 +133,7 @@ class MatrixSetDiagTest(tf.test.TestCase):
[[-4.0, 0.0, 4.0],
[0.0, -5.0, 0.0],
[2.0, 0.0, -6.0]]])
- output = tf.matrix_set_diag(mat_batch, v_batch)
+ output = array_ops.matrix_set_diag(mat_batch, v_batch)
self.assertEqual((2, 3, 3), output.get_shape())
self.assertAllEqual(mat_set_diag_batch, output.eval())
@@ -142,52 +152,56 @@ class MatrixSetDiagTest(tf.test.TestCase):
[0.0, -2.0, 0.0]],
[[-4.0, 0.0, 4.0],
[0.0, -5.0, 0.0]]])
- output = tf.matrix_set_diag(mat_batch, v_batch)
+ output = array_ops.matrix_set_diag(mat_batch, v_batch)
self.assertEqual((2, 2, 3), output.get_shape())
self.assertAllEqual(mat_set_diag_batch, output.eval())
def testInvalidShape(self):
with self.assertRaisesRegexp(ValueError, "must be at least rank 2"):
- tf.matrix_set_diag(0, [0])
+ array_ops.matrix_set_diag(0, [0])
with self.assertRaisesRegexp(ValueError, "must be at least rank 1"):
- tf.matrix_set_diag([[0]], 0)
+ array_ops.matrix_set_diag([[0]], 0)
def testInvalidShapeAtEval(self):
with self.test_session(use_gpu=self._use_gpu):
- v = tf.placeholder(dtype=tf.float32)
+ v = array_ops.placeholder(dtype=dtypes_lib.float32)
with self.assertRaisesOpError("input must be at least 2-dim"):
- tf.matrix_set_diag(v, [v]).eval(feed_dict={v: 0.0})
+ array_ops.matrix_set_diag(v, [v]).eval(feed_dict={v: 0.0})
with self.assertRaisesOpError(
r"but received input shape: \[1,1\] and diagonal shape: \[\]"):
- tf.matrix_set_diag([[v]], v).eval(feed_dict={v: 0.0})
+ array_ops.matrix_set_diag([[v]], v).eval(feed_dict={v: 0.0})
def testGrad(self):
shapes = ((3, 4, 4), (3, 3, 4), (3, 4, 3), (7, 4, 8, 8))
with self.test_session(use_gpu=self._use_gpu):
for shape in shapes:
- x = tf.constant(np.random.rand(*shape), dtype=tf.float32)
+ x = constant_op.constant(
+ np.random.rand(*shape), dtype=dtypes_lib.float32)
diag_shape = shape[:-2] + (min(shape[-2:]),)
- x_diag = tf.constant(np.random.rand(*diag_shape), dtype=tf.float32)
- y = tf.matrix_set_diag(x, x_diag)
- error_x = tf.test.compute_gradient_error(x, x.get_shape().as_list(),
- y, y.get_shape().as_list())
+ x_diag = constant_op.constant(
+ np.random.rand(*diag_shape), dtype=dtypes_lib.float32)
+ y = array_ops.matrix_set_diag(x, x_diag)
+ error_x = gradient_checker.compute_gradient_error(
+ x, x.get_shape().as_list(), y, y.get_shape().as_list())
self.assertLess(error_x, 1e-4)
- error_x_diag = tf.test.compute_gradient_error(
- x_diag, x_diag.get_shape().as_list(),
- y, y.get_shape().as_list())
+ error_x_diag = gradient_checker.compute_gradient_error(
+ x_diag, x_diag.get_shape().as_list(), y, y.get_shape().as_list())
self.assertLess(error_x_diag, 1e-4)
def testGradWithNoShapeInformation(self):
with self.test_session(use_gpu=self._use_gpu) as sess:
- v = tf.placeholder(dtype=tf.float32)
- mat = tf.placeholder(dtype=tf.float32)
- grad_input = tf.placeholder(dtype=tf.float32)
- output = tf.matrix_set_diag(mat, v)
- grads = tf.gradients(output, [mat, v], grad_ys=grad_input)
+ v = array_ops.placeholder(dtype=dtypes_lib.float32)
+ mat = array_ops.placeholder(dtype=dtypes_lib.float32)
+ grad_input = array_ops.placeholder(dtype=dtypes_lib.float32)
+ output = array_ops.matrix_set_diag(mat, v)
+ grads = gradients_impl.gradients(output, [mat, v], grad_ys=grad_input)
grad_input_val = np.random.rand(3, 3).astype(np.float32)
- grad_vals = sess.run(
- grads, feed_dict={v: 2 * np.ones(3), mat: np.ones((3, 3)),
- grad_input: grad_input_val})
+ grad_vals = sess.run(grads,
+ feed_dict={
+ v: 2 * np.ones(3),
+ mat: np.ones((3, 3)),
+ grad_input: grad_input_val
+ })
self.assertAllEqual(np.diag(grad_input_val), grad_vals[1])
self.assertAllEqual(grad_input_val - np.diag(np.diag(grad_input_val)),
grad_vals[0])
@@ -197,24 +211,24 @@ class MatrixSetDiagGpuTest(MatrixSetDiagTest):
_use_gpu = True
-class MatrixDiagPartTest(tf.test.TestCase):
+class MatrixDiagPartTest(test.TestCase):
_use_gpu = False
def testSquare(self):
with self.test_session(use_gpu=self._use_gpu):
v = np.array([1.0, 2.0, 3.0])
mat = np.diag(v)
- mat_diag = tf.matrix_diag_part(mat)
+ mat_diag = array_ops.matrix_diag_part(mat)
self.assertEqual((3,), mat_diag.get_shape())
self.assertAllEqual(mat_diag.eval(), v)
def testRectangular(self):
with self.test_session(use_gpu=self._use_gpu):
mat = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
- mat_diag = tf.matrix_diag_part(mat)
+ mat_diag = array_ops.matrix_diag_part(mat)
self.assertAllEqual(mat_diag.eval(), np.array([1.0, 5.0]))
mat = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
- mat_diag = tf.matrix_diag_part(mat)
+ mat_diag = array_ops.matrix_diag_part(mat)
self.assertAllEqual(mat_diag.eval(), np.array([1.0, 4.0]))
def testSquareBatch(self):
@@ -229,7 +243,7 @@ class MatrixDiagPartTest(tf.test.TestCase):
[0.0, 5.0, 0.0],
[0.0, 0.0, 6.0]]])
self.assertEqual(mat_batch.shape, (2, 3, 3))
- mat_batch_diag = tf.matrix_diag_part(mat_batch)
+ mat_batch_diag = array_ops.matrix_diag_part(mat_batch)
self.assertEqual((2, 3), mat_batch_diag.get_shape())
self.assertAllEqual(mat_batch_diag.eval(), v_batch)
@@ -243,28 +257,30 @@ class MatrixDiagPartTest(tf.test.TestCase):
[[4.0, 0.0, 0.0],
[0.0, 5.0, 0.0]]])
self.assertEqual(mat_batch.shape, (2, 2, 3))
- mat_batch_diag = tf.matrix_diag_part(mat_batch)
+ mat_batch_diag = array_ops.matrix_diag_part(mat_batch)
self.assertEqual((2, 2), mat_batch_diag.get_shape())
self.assertAllEqual(mat_batch_diag.eval(), v_batch)
def testInvalidShape(self):
with self.assertRaisesRegexp(ValueError, "must be at least rank 2"):
- tf.matrix_diag_part(0)
+ array_ops.matrix_diag_part(0)
def testInvalidShapeAtEval(self):
with self.test_session(use_gpu=self._use_gpu):
- v = tf.placeholder(dtype=tf.float32)
+ v = array_ops.placeholder(dtype=dtypes_lib.float32)
with self.assertRaisesOpError("input must be at least 2-dim"):
- tf.matrix_diag_part(v).eval(feed_dict={v: 0.0})
+ array_ops.matrix_diag_part(v).eval(feed_dict={v: 0.0})
def testGrad(self):
shapes = ((3, 3), (2, 3), (3, 2), (5, 3, 3))
with self.test_session(use_gpu=self._use_gpu):
for shape in shapes:
- x = tf.constant(np.random.rand(*shape), dtype=np.float32)
- y = tf.matrix_diag_part(x)
- error = tf.test.compute_gradient_error(x, x.get_shape().as_list(),
- y, y.get_shape().as_list())
+ x = constant_op.constant(np.random.rand(*shape), dtype=np.float32)
+ y = array_ops.matrix_diag_part(x)
+ error = gradient_checker.compute_gradient_error(x,
+ x.get_shape().as_list(),
+ y,
+ y.get_shape().as_list())
self.assertLess(error, 1e-4)
@@ -272,13 +288,13 @@ class MatrixDiagPartGpuTest(MatrixDiagPartTest):
_use_gpu = True
-class DiagTest(tf.test.TestCase):
+class DiagTest(test.TestCase):
def diagOp(self, diag, dtype, expected_ans, use_gpu=False):
with self.test_session(use_gpu=use_gpu):
- tf_ans = tf.diag(tf.convert_to_tensor(diag.astype(dtype)))
+ tf_ans = array_ops.diag(ops.convert_to_tensor(diag.astype(dtype)))
out = tf_ans.eval()
- tf_ans_inv = tf.diag_part(expected_ans)
+ tf_ans_inv = array_ops.diag_part(expected_ans)
inv_out = tf_ans_inv.eval()
self.assertAllClose(out, expected_ans)
self.assertAllClose(inv_out, diag)
@@ -392,19 +408,19 @@ class DiagTest(tf.test.TestCase):
[[0 + 0j, 0 + 0j], [7.7 + 7.7j, 0 + 0j]]],
[[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]],
[[0 + 0j, 0 + 0j], [0 + 0j, 8.8 + 8.8j]]]]]],
- dtype=dtype)
+ dtype=dtype)
self.diagOp(x, dtype, expected_ans)
-class DiagPartOpTest(tf.test.TestCase):
+class DiagPartOpTest(test.TestCase):
def setUp(self):
np.random.seed(0)
def diagPartOp(self, tensor, dtype, expected_ans, use_gpu=False):
with self.test_session(use_gpu=use_gpu):
- tensor = tf.convert_to_tensor(tensor.astype(dtype))
- tf_ans_inv = tf.diag_part(tensor)
+ tensor = ops.convert_to_tensor(tensor.astype(dtype))
+ tf_ans_inv = array_ops.diag_part(tensor)
inv_out = tf_ans_inv.eval()
self.assertAllClose(inv_out, expected_ans)
self.assertShapeEqual(expected_ans, tf_ans_inv)
@@ -422,9 +438,9 @@ class DiagPartOpTest(tf.test.TestCase):
expected_ans = x[i, i]
for shape in None, (None, 3), (3, None):
with self.test_session(use_gpu=False):
- t = tf.convert_to_tensor(x.astype(np.float32))
+ t = ops.convert_to_tensor(x.astype(np.float32))
t.set_shape(shape)
- tf_ans = tf.diag_part(t)
+ tf_ans = array_ops.diag_part(t)
out = tf_ans.eval()
self.assertAllClose(out, expected_ans)
self.assertShapeEqual(expected_ans, tf_ans)
@@ -459,40 +475,41 @@ class DiagPartOpTest(tf.test.TestCase):
self.assertRaises(ValueError, self.diagPartOp, x, np.float32, 0)
-class DiagGradOpTest(tf.test.TestCase):
+class DiagGradOpTest(test.TestCase):
def testDiagGrad(self):
np.random.seed(0)
- shapes = ((3,), (3,3), (3,3,3))
- dtypes = (tf.float32, tf.float64)
+ shapes = ((3,), (3, 3), (3, 3, 3))
+ dtypes = (dtypes_lib.float32, dtypes_lib.float64)
with self.test_session(use_gpu=False):
errors = []
for shape in shapes:
for dtype in dtypes:
- x1 = tf.constant(np.random.rand(*shape), dtype=dtype)
- y = tf.diag(x1)
- error = tf.test.compute_gradient_error(x1, x1.get_shape().as_list(),
- y, y.get_shape().as_list())
- tf.logging.info("error = %f", error)
+ x1 = constant_op.constant(np.random.rand(*shape), dtype=dtype)
+ y = array_ops.diag(x1)
+ error = gradient_checker.compute_gradient_error(
+ x1, x1.get_shape().as_list(), y, y.get_shape().as_list())
+ tf_logging.info("error = %f", error)
self.assertLess(error, 1e-4)
-class DiagGradPartOpTest(tf.test.TestCase):
+class DiagGradPartOpTest(test.TestCase):
def testDiagPartGrad(self):
np.random.seed(0)
- shapes = ((3,3), (3,3,3,3))
- dtypes = (tf.float32, tf.float64)
+ shapes = ((3, 3), (3, 3, 3, 3))
+ dtypes = (dtypes_lib.float32, dtypes_lib.float64)
with self.test_session(use_gpu=False):
errors = []
for shape in shapes:
for dtype in dtypes:
- x1 = tf.constant(np.random.rand(*shape), dtype=dtype)
- y = tf.diag_part(x1)
- error = tf.test.compute_gradient_error(x1, x1.get_shape().as_list(),
- y, y.get_shape().as_list())
- tf.logging.info("error = %f", error)
+ x1 = constant_op.constant(np.random.rand(*shape), dtype=dtype)
+ y = array_ops.diag_part(x1)
+ error = gradient_checker.compute_gradient_error(
+ x1, x1.get_shape().as_list(), y, y.get_shape().as_list())
+ tf_logging.info("error = %f", error)
self.assertLess(error, 1e-4)
+
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/division_future_test.py b/tensorflow/python/kernel_tests/division_future_test.py
index e860cb1e23..4d943decf2 100644
--- a/tensorflow/python/kernel_tests/division_future_test.py
+++ b/tensorflow/python/kernel_tests/division_future_test.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for division with division imported from __future__.
This file should be exactly the same as division_past_test.py except
@@ -24,25 +23,29 @@ from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import ops
+from tensorflow.python.platform import test
-class DivisionTestCase(tf.test.TestCase):
+class DivisionTestCase(test.TestCase):
def testDivision(self):
"""Test all the different ways to divide."""
values = [1, 2, 7, 11]
- functions = (lambda x: x), tf.constant
+ functions = (lambda x: x), constant_op.constant
# TODO(irving): Test int8, int16 once we support casts for those.
dtypes = np.int32, np.int64, np.float32, np.float64
def check(x, y):
- if isinstance(x, tf.Tensor):
+ if isinstance(x, ops.Tensor):
x = x.eval()
- if isinstance(y, tf.Tensor):
+ if isinstance(y, ops.Tensor):
y = y.eval()
self.assertEqual(x.dtype, y.dtype)
self.assertEqual(x, y)
+
with self.test_session():
for dtype in dtypes:
for x in map(dtype, values):
@@ -60,4 +63,4 @@ class DivisionTestCase(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/division_past_test.py b/tensorflow/python/kernel_tests/division_past_test.py
index 50ac150cfc..8f446d01bb 100644
--- a/tensorflow/python/kernel_tests/division_past_test.py
+++ b/tensorflow/python/kernel_tests/division_past_test.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for division with division imported from __future__.
This file should be exactly the same as division_past_test.py except
@@ -24,25 +23,29 @@ from __future__ import absolute_import
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import ops
+from tensorflow.python.platform import test
-class DivisionTestCase(tf.test.TestCase):
+class DivisionTestCase(test.TestCase):
def testDivision(self):
"""Test all the different ways to divide."""
values = [1, 2, 7, 11]
- functions = (lambda x: x), tf.constant
+ functions = (lambda x: x), constant_op.constant
# TODO(irving): Test int8, int16 once we support casts for those.
dtypes = np.int32, np.int64, np.float32, np.float64
def check(x, y):
- if isinstance(x, tf.Tensor):
+ if isinstance(x, ops.Tensor):
x = x.eval()
- if isinstance(y, tf.Tensor):
+ if isinstance(y, ops.Tensor):
y = y.eval()
self.assertEqual(x.dtype, y.dtype)
self.assertEqual(x, y)
+
with self.test_session():
for dtype in dtypes:
for x in map(dtype, values):
@@ -60,4 +63,4 @@ class DivisionTestCase(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/draw_bounding_box_op_test.py b/tensorflow/python/kernel_tests/draw_bounding_box_op_test.py
index aaccd95514..4f5b854e6f 100644
--- a/tensorflow/python/kernel_tests/draw_bounding_box_op_test.py
+++ b/tensorflow/python/kernel_tests/draw_bounding_box_op_test.py
@@ -13,15 +13,23 @@
# limitations under the License.
# ==============================================================================
"""Tests for draw_bounding_box_op."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import image_ops
+from tensorflow.python.ops import image_ops_impl
+from tensorflow.python.ops import math_ops
+from tensorflow.python.platform import test
-class DrawBoundingBoxOpTest(tf.test.TestCase):
+class DrawBoundingBoxOpTest(test.TestCase):
def _fillBorder(self, image, color):
"""Fill the border of the image.
@@ -54,9 +62,8 @@ class DrawBoundingBoxOpTest(tf.test.TestCase):
"""
# THIS TABLE MUST MATCH draw_bounding_box_op.cc
color_table = np.asarray([[1, 1, 0, 1], [0, 0, 1, 1], [1, 0, 0, 1],
- [0, 1, 0, 1], [0.5, 0, 0.5, 1],
- [0.5, 0.5, 0, 1], [0.5, 0, 0, 1],
- [0, 0, 0.5, 1], [0, 1, 1, 1],
+ [0, 1, 0, 1], [0.5, 0, 0.5, 1], [0.5, 0.5, 0, 1],
+ [0.5, 0, 0, 1], [0, 0, 0.5, 1], [0, 1, 1, 1],
[1, 0, 1, 1]])
assert len(img.shape) == 3
depth = img.shape[2]
@@ -73,12 +80,12 @@ class DrawBoundingBoxOpTest(tf.test.TestCase):
test_drawn_image = self._fillBorder(image, color)
bboxes = np.asarray([0, 0, 1, 1])
bboxes = np.vstack([bboxes for _ in range(num_boxes)])
- bboxes = tf.to_float(bboxes)
- bboxes = tf.expand_dims(bboxes, 0)
- image = tf.convert_to_tensor(image)
- image = tf.image.convert_image_dtype(image, tf.float32)
- image = tf.expand_dims(image, 0)
- image = tf.image.draw_bounding_boxes(image, bboxes)
+ bboxes = math_ops.to_float(bboxes)
+ bboxes = array_ops.expand_dims(bboxes, 0)
+ image = ops.convert_to_tensor(image)
+ image = image_ops_impl.convert_image_dtype(image, dtypes.float32)
+ image = array_ops.expand_dims(image, 0)
+ image = image_ops.draw_bounding_boxes(image, bboxes)
with self.test_session(use_gpu=False) as sess:
op_drawn_image = np.squeeze(sess.run(image), 0)
self.assertAllEqual(test_drawn_image, op_drawn_image)
@@ -98,5 +105,6 @@ class DrawBoundingBoxOpTest(tf.test.TestCase):
image = np.zeros([4, 4, 1], "float32")
self._testDrawBoundingBoxColorCycling(image)
+
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/dynamic_partition_op_test.py b/tensorflow/python/kernel_tests/dynamic_partition_op_test.py
index 72b639bf3c..4883095707 100644
--- a/tensorflow/python/kernel_tests/dynamic_partition_op_test.py
+++ b/tensorflow/python/kernel_tests/dynamic_partition_op_test.py
@@ -12,24 +12,32 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for the DynamicPartition op."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import data_flow_ops
+from tensorflow.python.ops import gradients_impl
+import tensorflow.python.ops.data_flow_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
-class DynamicPartitionTest(tf.test.TestCase):
+class DynamicPartitionTest(test.TestCase):
def testSimpleOneDimensional(self):
with self.test_session() as sess:
- data = tf.constant([0, 13, 2, 39, 4, 17])
- indices = tf.constant([0, 0, 2, 3, 2, 1])
- partitions = tf.dynamic_partition(data, indices, num_partitions=4)
+ data = constant_op.constant([0, 13, 2, 39, 4, 17])
+ indices = constant_op.constant([0, 0, 2, 3, 2, 1])
+ partitions = data_flow_ops.dynamic_partition(
+ data, indices, num_partitions=4)
partition_vals = sess.run(partitions)
self.assertAllEqual([0, 13], partition_vals[0])
@@ -45,10 +53,11 @@ class DynamicPartitionTest(tf.test.TestCase):
def testSimpleTwoDimensional(self):
with self.test_session() as sess:
- data = tf.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8],
- [9, 10, 11], [12, 13, 14], [15, 16, 17]])
- indices = tf.constant([0, 0, 2, 3, 2, 1])
- partitions = tf.dynamic_partition(data, indices, num_partitions=4)
+ data = constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],
+ [12, 13, 14], [15, 16, 17]])
+ indices = constant_op.constant([0, 0, 2, 3, 2, 1])
+ partitions = data_flow_ops.dynamic_partition(
+ data, indices, num_partitions=4)
partition_vals = sess.run(partitions)
self.assertAllEqual([[0, 1, 2], [3, 4, 5]], partition_vals[0])
@@ -70,9 +79,9 @@ class DynamicPartitionTest(tf.test.TestCase):
partitions = np.random.randint(n, size=np.prod(shape)).reshape(shape)
for extra_shape in (), (6,), (6, 7):
data = np.random.randn(*(shape + extra_shape))
- partitions_t = tf.constant(partitions, dtype=tf.int32)
- data_t = tf.constant(data)
- outputs = tf.dynamic_partition(
+ partitions_t = constant_op.constant(partitions, dtype=dtypes.int32)
+ data_t = constant_op.constant(data)
+ outputs = data_flow_ops.dynamic_partition(
data_t, partitions_t, num_partitions=n)
self.assertEqual(n, len(outputs))
outputs_val = sess.run(outputs)
@@ -81,16 +90,18 @@ class DynamicPartitionTest(tf.test.TestCase):
# Test gradients
outputs_grad = [7 * output for output in outputs_val]
- grads = tf.gradients(outputs, [data_t, partitions_t], outputs_grad)
+ grads = gradients_impl.gradients(outputs, [data_t, partitions_t],
+ outputs_grad)
self.assertEqual(grads[1], None) # Partitions has no gradients
self.assertAllEqual(7 * data, sess.run(grads[0]))
def testErrorIndexOutOfRange(self):
with self.test_session() as sess:
- data = tf.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8],
- [9, 10, 11], [12, 13, 14]])
- indices = tf.constant([0, 2, 99, 2, 2])
- partitions = tf.dynamic_partition(data, indices, num_partitions=4)
+ data = constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],
+ [12, 13, 14]])
+ indices = constant_op.constant([0, 2, 99, 2, 2])
+ partitions = data_flow_ops.dynamic_partition(
+ data, indices, num_partitions=4)
with self.assertRaisesOpError(r"partitions\[2\] = 99 is not in \[0, 4\)"):
sess.run(partitions)
@@ -98,16 +109,17 @@ class DynamicPartitionTest(tf.test.TestCase):
with self.test_session() as sess:
bad = 17
data = np.zeros(5)
- partitions = tf.dynamic_partition(data, bad, num_partitions=7)
+ partitions = data_flow_ops.dynamic_partition(data, bad, num_partitions=7)
with self.assertRaisesOpError(r"partitions = 17 is not in \[0, 7\)"):
sess.run(partitions)
def testHigherRankIndexOutOfRange(self):
with self.test_session() as sess:
shape = (2, 3)
- indices = tf.placeholder(shape=shape, dtype=np.int32)
+ indices = array_ops.placeholder(shape=shape, dtype=np.int32)
data = np.zeros(shape + (5,))
- partitions = tf.dynamic_partition(data, indices, num_partitions=7)
+ partitions = data_flow_ops.dynamic_partition(
+ data, indices, num_partitions=7)
for i in xrange(2):
for j in xrange(3):
bad = np.zeros(shape, dtype=np.int32)
@@ -117,11 +129,11 @@ class DynamicPartitionTest(tf.test.TestCase):
sess.run(partitions, feed_dict={indices: bad})
def testErrorWrongDimsIndices(self):
- data = tf.constant([[0], [1], [2]])
- indices = tf.constant([[0], [0]])
+ data = constant_op.constant([[0], [1], [2]])
+ indices = constant_op.constant([[0], [0]])
with self.assertRaises(ValueError):
- tf.dynamic_partition(data, indices, num_partitions=4)
+ data_flow_ops.dynamic_partition(data, indices, num_partitions=4)
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/dynamic_stitch_op_test.py b/tensorflow/python/kernel_tests/dynamic_stitch_op_test.py
index c34cd2ef66..847fc66a1e 100644
--- a/tensorflow/python/kernel_tests/dynamic_stitch_op_test.py
+++ b/tensorflow/python/kernel_tests/dynamic_stitch_op_test.py
@@ -12,24 +12,29 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.ops.data_flow_ops.dynamic_stitch."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.ops import data_flow_ops
+from tensorflow.python.ops import gradients_impl
+import tensorflow.python.ops.data_flow_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
-class DynamicStitchTest(tf.test.TestCase):
+class DynamicStitchTest(test.TestCase):
def testScalar(self):
with self.test_session():
- indices = [tf.constant(0), tf.constant(1)]
- data = [tf.constant(40), tf.constant(60)]
+ indices = [constant_op.constant(0), constant_op.constant(1)]
+ data = [constant_op.constant(40), constant_op.constant(60)]
for step in -1, 1:
- stitched_t = tf.dynamic_stitch(indices[::step], data)
+ stitched_t = data_flow_ops.dynamic_stitch(indices[::step], data)
stitched_val = stitched_t.eval()
self.assertAllEqual([40, 60][::step], stitched_val)
# Dimension 0 is determined by the max index in indices, so we
@@ -39,11 +44,14 @@ class DynamicStitchTest(tf.test.TestCase):
def testSimpleOneDimensional(self):
with self.test_session():
- indices = [tf.constant([0, 4, 7]),
- tf.constant([1, 6, 2, 3, 5])]
- data = [tf.constant([0, 40, 70]),
- tf.constant([10, 60, 20, 30, 50])]
- stitched_t = tf.dynamic_stitch(indices, data)
+ indices = [
+ constant_op.constant([0, 4, 7]), constant_op.constant([1, 6, 2, 3, 5])
+ ]
+ data = [
+ constant_op.constant([0, 40, 70]),
+ constant_op.constant([10, 60, 20, 30, 50])
+ ]
+ stitched_t = data_flow_ops.dynamic_stitch(indices, data)
stitched_val = stitched_t.eval()
self.assertAllEqual([0, 10, 20, 30, 40, 50, 60, 70], stitched_val)
# Dimension 0 is determined by the max index in indices, so we
@@ -53,9 +61,9 @@ class DynamicStitchTest(tf.test.TestCase):
def testOneListOneDimensional(self):
with self.test_session():
- indices = [tf.constant([1, 6, 2, 3, 5, 0, 4, 7])]
- data = [tf.constant([10, 60, 20, 30, 50, 0, 40, 70])]
- stitched_t = tf.dynamic_stitch(indices, data)
+ indices = [constant_op.constant([1, 6, 2, 3, 5, 0, 4, 7])]
+ data = [constant_op.constant([10, 60, 20, 30, 50, 0, 40, 70])]
+ stitched_t = data_flow_ops.dynamic_stitch(indices, data)
stitched_val = stitched_t.eval()
self.assertAllEqual([0, 10, 20, 30, 40, 50, 60, 70], stitched_val)
# Dimension 0 is determined by the max index in indices, so we
@@ -65,17 +73,19 @@ class DynamicStitchTest(tf.test.TestCase):
def testSimpleTwoDimensional(self):
with self.test_session():
- indices = [tf.constant([0, 4, 7]),
- tf.constant([1, 6]),
- tf.constant([2, 3, 5])]
- data = [tf.constant([[0, 1], [40, 41], [70, 71]]),
- tf.constant([[10, 11], [60, 61]]),
- tf.constant([[20, 21], [30, 31], [50, 51]])]
- stitched_t = tf.dynamic_stitch(indices, data)
+ indices = [
+ constant_op.constant([0, 4, 7]), constant_op.constant([1, 6]),
+ constant_op.constant([2, 3, 5])
+ ]
+ data = [
+ constant_op.constant([[0, 1], [40, 41], [70, 71]]),
+ constant_op.constant([[10, 11], [60, 61]]),
+ constant_op.constant([[20, 21], [30, 31], [50, 51]])
+ ]
+ stitched_t = data_flow_ops.dynamic_stitch(indices, data)
stitched_val = stitched_t.eval()
- self.assertAllEqual(
- [[0, 1], [10, 11], [20, 21], [30, 31],
- [40, 41], [50, 51], [60, 61], [70, 71]], stitched_val)
+ self.assertAllEqual([[0, 1], [10, 11], [20, 21], [30, 31], [40, 41],
+ [50, 51], [60, 61], [70, 71]], stitched_val)
# Dimension 0 is determined by the max index in indices, so we
# can only infer that the output is a matrix with 2 columns and
# some unknown number of rows.
@@ -83,54 +93,72 @@ class DynamicStitchTest(tf.test.TestCase):
def testHigherRank(self):
with self.test_session() as sess:
- indices = [tf.constant(6), tf.constant([4, 1]),
- tf.constant([[5, 2], [0, 3]])]
- data = [tf.constant([61, 62]), tf.constant([[41, 42], [11, 12]]),
- tf.constant([[[51, 52], [21, 22]], [[1, 2], [31, 32]]])]
- stitched_t = tf.dynamic_stitch(indices, data)
+ indices = [
+ constant_op.constant(6), constant_op.constant([4, 1]),
+ constant_op.constant([[5, 2], [0, 3]])
+ ]
+ data = [
+ constant_op.constant([61, 62]),
+ constant_op.constant([[41, 42], [11, 12]]),
+ constant_op.constant([[[51, 52], [21, 22]], [[1, 2], [31, 32]]])
+ ]
+ stitched_t = data_flow_ops.dynamic_stitch(indices, data)
stitched_val = stitched_t.eval()
correct = 10 * np.arange(7)[:, None] + [1, 2]
self.assertAllEqual(correct, stitched_val)
self.assertEqual([None, 2], stitched_t.get_shape().as_list())
# Test gradients
stitched_grad = 7 * stitched_val
- grads = tf.gradients(stitched_t, indices + data, stitched_grad)
+ grads = gradients_impl.gradients(stitched_t, indices + data,
+ stitched_grad)
self.assertEqual(grads[:3], [None] * 3) # Indices have no gradients
for datum, grad in zip(data, sess.run(grads[3:])):
self.assertAllEqual(7 * datum.eval(), grad)
def testErrorIndicesMultiDimensional(self):
- indices = [tf.constant([0, 4, 7]),
- tf.constant([[1, 6, 2, 3, 5]])]
- data = [tf.constant([[0, 40, 70]]),
- tf.constant([10, 60, 20, 30, 50])]
+ indices = [
+ constant_op.constant([0, 4, 7]), constant_op.constant([[1, 6, 2, 3, 5]])
+ ]
+ data = [
+ constant_op.constant([[0, 40, 70]]),
+ constant_op.constant([10, 60, 20, 30, 50])
+ ]
with self.assertRaises(ValueError):
- tf.dynamic_stitch(indices, data)
+ data_flow_ops.dynamic_stitch(indices, data)
def testErrorDataNumDimsMismatch(self):
- indices = [tf.constant([0, 4, 7]),
- tf.constant([1, 6, 2, 3, 5])]
- data = [tf.constant([0, 40, 70]),
- tf.constant([[10, 60, 20, 30, 50]])]
+ indices = [
+ constant_op.constant([0, 4, 7]), constant_op.constant([1, 6, 2, 3, 5])
+ ]
+ data = [
+ constant_op.constant([0, 40, 70]),
+ constant_op.constant([[10, 60, 20, 30, 50]])
+ ]
with self.assertRaises(ValueError):
- tf.dynamic_stitch(indices, data)
+ data_flow_ops.dynamic_stitch(indices, data)
def testErrorDataDimSizeMismatch(self):
- indices = [tf.constant([0, 4, 5]),
- tf.constant([1, 6, 2, 3])]
- data = [tf.constant([[0], [40], [70]]),
- tf.constant([[10, 11], [60, 61], [20, 21], [30, 31]])]
+ indices = [
+ constant_op.constant([0, 4, 5]), constant_op.constant([1, 6, 2, 3])
+ ]
+ data = [
+ constant_op.constant([[0], [40], [70]]),
+ constant_op.constant([[10, 11], [60, 61], [20, 21], [30, 31]])
+ ]
with self.assertRaises(ValueError):
- tf.dynamic_stitch(indices, data)
+ data_flow_ops.dynamic_stitch(indices, data)
def testErrorDataAndIndicesSizeMismatch(self):
- indices = [tf.constant([0, 4, 7]),
- tf.constant([1, 6, 2, 3, 5])]
- data = [tf.constant([0, 40, 70]),
- tf.constant([10, 60, 20, 30])]
+ indices = [
+ constant_op.constant([0, 4, 7]), constant_op.constant([1, 6, 2, 3, 5])
+ ]
+ data = [
+ constant_op.constant([0, 40, 70]),
+ constant_op.constant([10, 60, 20, 30])
+ ]
with self.assertRaises(ValueError):
- tf.dynamic_stitch(indices, data)
+ data_flow_ops.dynamic_stitch(indices, data)
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/edit_distance_op_test.py b/tensorflow/python/kernel_tests/edit_distance_op_test.py
index 4662b956cf..12f85af7a5 100644
--- a/tensorflow/python/kernel_tests/edit_distance_op_test.py
+++ b/tensorflow/python/kernel_tests/edit_distance_op_test.py
@@ -12,29 +12,39 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.kernels.edit_distance_op."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import ops
+from tensorflow.python.framework import sparse_tensor
+from tensorflow.python.ops import array_ops
+from tensorflow.python.platform import test
def ConstantOf(x):
x = np.asarray(x)
# Convert to int64 if it's not a string or unicode
- if x.dtype.char not in "SU": x = np.asarray(x, dtype=np.int64)
- return tf.constant(x)
+ if x.dtype.char not in "SU":
+ x = np.asarray(x, dtype=np.int64)
+ return constant_op.constant(x)
-class EditDistanceTest(tf.test.TestCase):
+class EditDistanceTest(test.TestCase):
- def _testEditDistanceST(
- self, hypothesis_st, truth_st, normalize, expected_output,
- expected_shape, expected_err_re=None):
- edit_distance = tf.edit_distance(
+ def _testEditDistanceST(self,
+ hypothesis_st,
+ truth_st,
+ normalize,
+ expected_output,
+ expected_shape,
+ expected_err_re=None):
+ edit_distance = array_ops.edit_distance(
hypothesis=hypothesis_st, truth=truth_st, normalize=normalize)
if expected_err_re is None:
@@ -45,46 +55,49 @@ class EditDistanceTest(tf.test.TestCase):
with self.assertRaisesOpError(expected_err_re):
edit_distance.eval()
- def _testEditDistance(self, hypothesis, truth, normalize,
- expected_output, expected_err_re=None):
+ def _testEditDistance(self,
+ hypothesis,
+ truth,
+ normalize,
+ expected_output,
+ expected_err_re=None):
# Shape inference figures out the shape from the shape variables
# Explicit tuple() needed since zip returns an iterator in Python 3.
expected_shape = [
- max(h, t) for h, t in tuple(zip(hypothesis[2], truth[2]))[:-1]]
+ max(h, t) for h, t in tuple(zip(hypothesis[2], truth[2]))[:-1]
+ ]
# SparseTensorValue inputs.
- with tf.Graph().as_default() as g, self.test_session(g):
+ with ops.Graph().as_default() as g, self.test_session(g):
# hypothesis and truth are (index, value, shape) tuples
self._testEditDistanceST(
- hypothesis_st=tf.SparseTensorValue(
+ hypothesis_st=sparse_tensor.SparseTensorValue(
*[ConstantOf(x) for x in hypothesis]),
- truth_st=tf.SparseTensorValue(*[ConstantOf(x) for x in truth]),
+ truth_st=sparse_tensor.SparseTensorValue(
+ *[ConstantOf(x) for x in truth]),
normalize=normalize,
expected_output=expected_output,
expected_shape=expected_shape,
expected_err_re=expected_err_re)
# SparseTensor inputs.
- with tf.Graph().as_default() as g, self.test_session(g):
+ with ops.Graph().as_default() as g, self.test_session(g):
# hypothesis and truth are (index, value, shape) tuples
self._testEditDistanceST(
- hypothesis_st=tf.SparseTensor(*[ConstantOf(x) for x in hypothesis]),
- truth_st=tf.SparseTensor(*[ConstantOf(x) for x in truth]),
+ hypothesis_st=sparse_tensor.SparseTensor(
+ *[ConstantOf(x) for x in hypothesis]),
+ truth_st=sparse_tensor.SparseTensor(*[ConstantOf(x) for x in truth]),
normalize=normalize,
expected_output=expected_output,
expected_shape=expected_shape,
expected_err_re=expected_err_re)
def testEditDistanceNormalized(self):
- hypothesis_indices = [[0, 0], [0, 1],
- [1, 0], [1, 1]]
- hypothesis_values = [0, 1,
- 1, -1]
+ hypothesis_indices = [[0, 0], [0, 1], [1, 0], [1, 1]]
+ hypothesis_values = [0, 1, 1, -1]
hypothesis_shape = [2, 2]
- truth_indices = [[0, 0],
- [1, 0], [1, 1]]
- truth_values = [0,
- 1, 1]
+ truth_indices = [[0, 0], [1, 0], [1, 1]]
+ truth_values = [0, 1, 1]
truth_shape = [2, 2]
expected_output = [1.0, 0.5]
@@ -95,15 +108,11 @@ class EditDistanceTest(tf.test.TestCase):
expected_output=expected_output)
def testEditDistanceUnnormalized(self):
- hypothesis_indices = [[0, 0],
- [1, 0], [1, 1]]
- hypothesis_values = [10,
- 10, 11]
+ hypothesis_indices = [[0, 0], [1, 0], [1, 1]]
+ hypothesis_values = [10, 10, 11]
hypothesis_shape = [2, 2]
- truth_indices = [[0, 0], [0, 1],
- [1, 0], [1, 1]]
- truth_values = [1, 2,
- 1, -1]
+ truth_indices = [[0, 0], [0, 1], [1, 0], [1, 1]]
+ truth_values = [1, 2, 1, -1]
truth_shape = [2, 3]
expected_output = [2.0, 2.0]
@@ -125,8 +134,7 @@ class EditDistanceTest(tf.test.TestCase):
truth_values = [x for x in "altruistic"] + [x for x in "algorithm"]
truth_shape = [2, 11]
expected_unnormalized = [6.0, 6.0]
- expected_normalized = [6.0/len("altruistic"),
- 6.0/len("algorithm")]
+ expected_normalized = [6.0 / len("altruistic"), 6.0 / len("algorithm")]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
@@ -141,17 +149,16 @@ class EditDistanceTest(tf.test.TestCase):
expected_output=expected_normalized)
def testEditDistance3D(self):
- hypothesis_indices = [[0, 0, 0],
- [1, 0, 0]]
+ hypothesis_indices = [[0, 0, 0], [1, 0, 0]]
hypothesis_values = [0, 1]
hypothesis_shape = [2, 1, 1]
- truth_indices = [[0, 1, 0],
- [1, 0, 0],
- [1, 1, 0]]
+ truth_indices = [[0, 1, 0], [1, 0, 0], [1, 1, 0]]
truth_values = [0, 1, 1]
truth_shape = [2, 2, 1]
- expected_output = [[np.inf, 1.0], # (0,0): no truth, (0,1): no hypothesis
- [0.0, 1.0]] # (1,0): match, (1,1): no hypothesis
+ expected_output = [
+ [np.inf, 1.0], # (0,0): no truth, (0,1): no hypothesis
+ [0.0, 1.0]
+ ] # (1,0): match, (1,1): no hypothesis
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
@@ -206,4 +213,4 @@ class EditDistanceTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/embedding_ops_test.py b/tensorflow/python/kernel_tests/embedding_ops_test.py
index 2ae7f57303..701a97adbb 100644
--- a/tensorflow/python/kernel_tests/embedding_ops_test.py
+++ b/tensorflow/python/kernel_tests/embedding_ops_test.py
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Functional tests for ops used with embeddings."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
@@ -22,7 +22,24 @@ import itertools
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.framework import sparse_tensor
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import data_flow_ops
+from tensorflow.python.ops import embedding_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import partitioned_variables
+from tensorflow.python.ops import state_ops
+from tensorflow.python.ops import variable_scope
+from tensorflow.python.ops import variables
+import tensorflow.python.ops.data_flow_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
+from tensorflow.python.platform import tf_logging
+from tensorflow.python.util import compat
def _AsLong(array):
@@ -30,9 +47,9 @@ def _AsLong(array):
return [int(x) for x in array]
-class ScatterAddSubTest(tf.test.TestCase):
+class ScatterAddSubTest(test.TestCase):
- def _TestCase(self, shape, indices, scatter_op=tf.scatter_add):
+ def _TestCase(self, shape, indices, scatter_op=state_ops.scatter_add):
"""Run a random test case with the given shape and indices.
Args:
@@ -50,17 +67,17 @@ class ScatterAddSubTest(tf.test.TestCase):
vals_shape = [len(indices)] + shape[1:]
vals_init = np.random.rand(*vals_shape).astype("f")
v_i = [float(x) for x in vals_init.ravel()]
- p = tf.Variable(p_init)
- vals = tf.constant(v_i, shape=vals_shape, name="vals")
- ind = tf.constant(indices, dtype=tf.int32)
+ p = variables.Variable(p_init)
+ vals = constant_op.constant(v_i, shape=vals_shape, name="vals")
+ ind = constant_op.constant(indices, dtype=dtypes.int32)
p2 = scatter_op(p, ind, vals, name="updated_p")
# p = init
- tf.global_variables_initializer().run()
+ variables.global_variables_initializer().run()
# p += vals
result = p2.eval()
# Compute the expected 'p' using numpy operations.
for i, ind in enumerate(indices):
- if scatter_op == tf.scatter_add:
+ if scatter_op == state_ops.scatter_add:
p_init.reshape(shape[0], -1)[ind, :] += (
vals_init.reshape(vals_shape[0], -1)[i, :])
else:
@@ -90,35 +107,38 @@ class ScatterAddSubTest(tf.test.TestCase):
for _ in range(5):
shape = np.random.randint(1, 20, size=4)
indices = np.random.randint(shape[0], size=2 * shape[0])
- self._TestCase(_AsLong(list(shape)), list(indices),
- tf.scatter_sub)
+ self._TestCase(_AsLong(list(shape)), list(indices), state_ops.scatter_sub)
def testWrongShape(self):
# Indices and values mismatch.
- var = tf.Variable(tf.zeros(shape=[1024, 64, 64], dtype=tf.float32))
- indices = tf.placeholder(tf.int32, shape=[32])
- values = tf.placeholder(tf.float32, shape=[33, 64, 64])
+ var = variables.Variable(
+ array_ops.zeros(
+ shape=[1024, 64, 64], dtype=dtypes.float32))
+ indices = array_ops.placeholder(dtypes.int32, shape=[32])
+ values = array_ops.placeholder(dtypes.float32, shape=[33, 64, 64])
with self.assertRaises(ValueError):
- tf.scatter_add(var, indices, values)
+ state_ops.scatter_add(var, indices, values)
# Var and values mismatch.
- values = tf.placeholder(tf.float32, shape=[32, 64, 63])
+ values = array_ops.placeholder(dtypes.float32, shape=[32, 64, 63])
with self.assertRaises(ValueError):
- tf.scatter_add(var, indices, values)
+ state_ops.scatter_add(var, indices, values)
def _PName(param_id):
return "p" + str(param_id)
-def _EmbeddingParams(num_shards, vocab_size,
- dtype=tf.float32,
+def _EmbeddingParams(num_shards,
+ vocab_size,
+ dtype=dtypes.float32,
shape=None,
use_shapeless_placeholder=False):
p = []
params = {}
feed_dict = {}
- if not shape: shape = [10]
+ if not shape:
+ shape = [10]
for i in range(num_shards):
shard_shape = [vocab_size // num_shards] + shape
if i < vocab_size % num_shards: # Excess goes evenly on the first shards
@@ -127,32 +147,38 @@ def _EmbeddingParams(num_shards, vocab_size,
param_name = _PName(i)
if use_shapeless_placeholder:
- param = tf.placeholder(dtype, shape=None, name=param_name)
+ param = array_ops.placeholder(dtype, shape=None, name=param_name)
else:
- param = tf.constant(1.0, shape=shard_shape, dtype=dtype, name=param_name)
+ param = constant_op.constant(
+ 1.0, shape=shard_shape, dtype=dtype, name=param_name)
p.append(param)
- np_type = "f" if dtype == tf.float32 else "d"
+ np_type = "f" if dtype == dtypes.float32 else "d"
val = (np.random.rand(*shard_shape).astype(np_type)) + 1
params[param_name + ":0"] = val
feed_dict[param.name] = val
return p, params, feed_dict
-def _EmbeddingParamsAsPartitionedVariable(num_shards, vocab_size,
- dtype=tf.float32, shape=None):
+def _EmbeddingParamsAsPartitionedVariable(num_shards,
+ vocab_size,
+ dtype=dtypes.float32,
+ shape=None):
p, params, feed_dict = _EmbeddingParams(
num_shards, vocab_size, dtype=dtype, shape=shape)
shape = shape or [10]
- partitioned_variable = tf.get_variable(
+ partitioned_variable = variable_scope.get_variable(
"p",
shape=[vocab_size] + shape,
- initializer=tf.concat_v2([params[p_i.name] for p_i in p], 0),
- partitioner=tf.min_max_variable_partitioner(
+ initializer=array_ops.concat_v2([params[p_i.name] for p_i in p], 0),
+ partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=num_shards, min_slice_size=1))
return p, partitioned_variable, params, feed_dict
-def _EmbeddingResult(params, id_vals, num_shards, vocab_size,
+def _EmbeddingResult(params,
+ id_vals,
+ num_shards,
+ vocab_size,
partition_strategy="mod",
weight_vals=None):
if weight_vals is None:
@@ -165,7 +191,7 @@ def _EmbeddingResult(params, id_vals, num_shards, vocab_size,
value_aggregation = None
weight_aggregation = None
squared_weight_aggregation = None
- if isinstance(ids, tf.compat.integral_types):
+ if isinstance(ids, compat.integral_types):
ids = [ids]
wts = [wts]
for i, weight_value in zip(ids, wts):
@@ -181,8 +207,8 @@ def _EmbeddingResult(params, id_vals, num_shards, vocab_size,
else:
partition = extras + (i - threshold) // ids_per_partition
offset = (i - threshold) % ids_per_partition
- val = np.copy(
- params[_PName(partition) + ":0"][offset, :]) * weight_value
+ val = np.copy(params[_PName(partition) + ":0"][
+ offset, :]) * weight_value
else:
assert False
if value_aggregation is None:
@@ -206,7 +232,7 @@ def _EmbeddingResult(params, id_vals, num_shards, vocab_size,
return values, weights, weights_squared
-class EmbeddingLookupTest(tf.test.TestCase):
+class EmbeddingLookupTest(test.TestCase):
# This test looks up [0, 0] in a parameter matrix sharded 2 ways. Since
# both the ids are in the first shard, one of the resulting lookup
@@ -219,9 +245,9 @@ class EmbeddingLookupTest(tf.test.TestCase):
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
id_vals = np.array([0, 0])
- ids = tf.constant(list(id_vals), dtype=tf.int32)
+ ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
print("Construct ids", ids.get_shape())
- embedding = tf.nn.embedding_lookup(p, ids)
+ embedding = embedding_ops.embedding_lookup(p, ids)
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
@@ -230,22 +256,26 @@ class EmbeddingLookupTest(tf.test.TestCase):
def testMaxNorm(self):
with self.test_session():
- embeddings = tf.constant([[2.0]])
+ embeddings = constant_op.constant([[2.0]])
- ids = tf.constant([0], dtype=tf.int32)
- embedding = tf.nn.embedding_lookup([embeddings], ids, max_norm=1.0)
+ ids = constant_op.constant([0], dtype=dtypes.int32)
+ embedding = embedding_ops.embedding_lookup(
+ [embeddings], ids, max_norm=1.0)
self.assertAllEqual(embedding.eval(), [[1.0]])
def testMaxNormNontrivial(self):
with self.test_session():
- embeddings = tf.constant([[2.0, 4.0], [3.0, 1.0]])
+ embeddings = constant_op.constant([[2.0, 4.0], [3.0, 1.0]])
- ids = tf.constant([0, 1], dtype=tf.int32)
- embedding = tf.nn.embedding_lookup([embeddings], ids, max_norm=2.0)
+ ids = constant_op.constant([0, 1], dtype=dtypes.int32)
+ embedding = embedding_ops.embedding_lookup(
+ [embeddings], ids, max_norm=2.0)
- norms = tf.sqrt(tf.reduce_sum(embeddings * embeddings, axis=1))
- normalized = embeddings/tf.stack([norms, norms], axis=1)
+ norms = math_ops.sqrt(
+ math_ops.reduce_sum(
+ embeddings * embeddings, axis=1))
+ normalized = embeddings / array_ops.stack([norms, norms], axis=1)
self.assertAllEqual(embedding.eval(), 2 * normalized.eval())
def testSimpleShardedPartitionedVariable(self):
@@ -256,10 +286,10 @@ class EmbeddingLookupTest(tf.test.TestCase):
num_shards, vocab_size)
id_vals = np.array([0, 0])
- ids = tf.constant(list(id_vals), dtype=tf.int32)
+ ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
print("Construct ids", ids.get_shape())
- embedding = tf.nn.embedding_lookup(p_variable, ids)
- tf.global_variables_initializer().run()
+ embedding = embedding_ops.embedding_lookup(p_variable, ids)
+ variables.global_variables_initializer().run()
params_values = [params[p_i.name] for p_i in p]
# Test that the PartitionedVariable components equal the list in p
p_var_val = sess.run(list(p_variable))
@@ -284,9 +314,9 @@ class EmbeddingLookupTest(tf.test.TestCase):
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
- ids = tf.constant(list(id_vals), dtype=tf.int32)
+ ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
- embedding = tf.nn.embedding_lookup(p, ids)
+ embedding = embedding_ops.embedding_lookup(p, ids)
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(np_result, tf_result)
@@ -306,9 +336,9 @@ class EmbeddingLookupTest(tf.test.TestCase):
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
- ids = tf.constant(list(id_vals), dtype=tf.int64)
+ ids = constant_op.constant(list(id_vals), dtype=dtypes.int64)
- embedding = tf.nn.embedding_lookup(p, ids)
+ embedding = embedding_ops.embedding_lookup(p, ids)
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(np_result, tf_result)
@@ -328,9 +358,10 @@ class EmbeddingLookupTest(tf.test.TestCase):
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
- ids = tf.constant(list(id_vals), dtype=tf.int32)
+ ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
- embedding = tf.nn.embedding_lookup(p, ids, partition_strategy="div")
+ embedding = embedding_ops.embedding_lookup(
+ p, ids, partition_strategy="div")
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(
params, id_vals, num_shards, vocab_size, partition_strategy="div")
@@ -352,9 +383,9 @@ class EmbeddingLookupTest(tf.test.TestCase):
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
- ids = tf.constant(list(id_vals), dtype=tf.int32)
- tf.global_variables_initializer().run()
- embedding = tf.nn.embedding_lookup(
+ ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
+ variables.global_variables_initializer().run()
+ embedding = embedding_ops.embedding_lookup(
p_variable, ids, partition_strategy="div")
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(
@@ -376,9 +407,10 @@ class EmbeddingLookupTest(tf.test.TestCase):
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
- ids = tf.constant(list(id_vals), dtype=tf.int64)
+ ids = constant_op.constant(list(id_vals), dtype=dtypes.int64)
- embedding = tf.nn.embedding_lookup(p, ids, partition_strategy="div")
+ embedding = embedding_ops.embedding_lookup(
+ p, ids, partition_strategy="div")
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(
params, id_vals, num_shards, vocab_size, partition_strategy="div")
@@ -402,9 +434,10 @@ class EmbeddingLookupTest(tf.test.TestCase):
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
- ids = tf.constant(list(id_vals), dtype=tf.int64)
+ ids = constant_op.constant(list(id_vals), dtype=dtypes.int64)
- embedding = tf.nn.embedding_lookup(p, ids, partition_strategy="div")
+ embedding = embedding_ops.embedding_lookup(
+ p, ids, partition_strategy="div")
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(
params, id_vals, num_shards, vocab_size, partition_strategy="div")
@@ -414,62 +447,61 @@ class EmbeddingLookupTest(tf.test.TestCase):
vocab_size = 9
num_ids = 10
id_vals = list(np.random.randint(vocab_size, size=num_ids))
- tf.logging.vlog(1, id_vals)
+ tf_logging.vlog(1, id_vals)
for ids_shape in [(10,), (2, 5)]:
for num_shards in [1, 3]:
with self.test_session():
- ids = tf.constant(id_vals, shape=ids_shape, dtype=tf.int32)
- x, params, _ = _EmbeddingParams(
- num_shards, vocab_size, shape=[2])
- y = tf.nn.embedding_lookup(x, ids)
+ ids = constant_op.constant(
+ id_vals, shape=ids_shape, dtype=dtypes.int32)
+ x, params, _ = _EmbeddingParams(num_shards, vocab_size, shape=[2])
+ y = embedding_ops.embedding_lookup(x, ids)
y_shape = [num_ids] + list(params[_PName(0) + ":0"].shape[1:])
x_name = [_PName(i) for i in range(num_shards)]
x_init_value = [params[x_n + ":0"] for x_n in x_name]
x_shape = [i.shape for i in x_init_value]
- err = tf.test.compute_gradient_error(x,
- x_shape,
- y,
- y_shape,
- x_init_value=x_init_value)
+ err = gradient_checker.compute_gradient_error(
+ x, x_shape, y, y_shape, x_init_value=x_init_value)
self.assertLess(err, 1e-4)
def testGradientsEmbeddingLookupWithComputedParams(self):
vocab_size = 9
num_ids = 5
id_vals = list(np.random.randint(vocab_size, size=num_ids))
- tf.logging.vlog(1, id_vals)
+ tf_logging.vlog(1, id_vals)
for num_shards in [1, 3]:
with self.test_session():
- ids = tf.constant(id_vals, dtype=tf.int32)
- x, params, _ = _EmbeddingParams(
- num_shards, vocab_size, shape=[2])
+ ids = constant_op.constant(id_vals, dtype=dtypes.int32)
+ x, params, _ = _EmbeddingParams(num_shards, vocab_size, shape=[2])
# This will force a conversion from IndexedSlices to Tensor.
- x_squared = [tf.square(elem) for elem in x]
- y = tf.nn.embedding_lookup(x_squared, ids)
+ x_squared = [math_ops.square(elem) for elem in x]
+ y = embedding_ops.embedding_lookup(x_squared, ids)
y_shape = [num_ids] + list(params[_PName(0) + ":0"].shape[1:])
x_name = [_PName(i) for i in range(num_shards)]
x_init_value = [params[x_n + ":0"] for x_n in x_name]
x_shape = [i.shape for i in x_init_value]
- err = tf.test.compute_gradient_error(x,
- x_shape,
- y,
- y_shape,
- x_init_value=x_init_value)
+ err = gradient_checker.compute_gradient_error(
+ x, x_shape, y, y_shape, x_init_value=x_init_value)
self.assertLess(err, 1e-3)
def testConstructionNonSharded(self):
- with tf.Graph().as_default():
- p = tf.Variable(tf.zeros(shape=[100, 100], dtype=tf.float32))
- ids = tf.constant([0, 1, 1, 7], dtype=tf.int32)
- tf.nn.embedding_lookup([p], ids)
+ with ops.Graph().as_default():
+ p = variables.Variable(
+ array_ops.zeros(
+ shape=[100, 100], dtype=dtypes.float32))
+ ids = constant_op.constant([0, 1, 1, 7], dtype=dtypes.int32)
+ embedding_ops.embedding_lookup([p], ids)
def testConstructionSharded(self):
- with tf.Graph().as_default():
+ with ops.Graph().as_default():
p = []
for _ in range(2):
- p += [tf.Variable(tf.zeros(shape=[100, 100], dtype=tf.float32))]
- ids = tf.constant([0, 1, 1, 17], dtype=tf.int32)
- tf.nn.embedding_lookup(p, ids)
+ p += [
+ variables.Variable(
+ array_ops.zeros(
+ shape=[100, 100], dtype=dtypes.float32))
+ ]
+ ids = constant_op.constant([0, 1, 1, 17], dtype=dtypes.int32)
+ embedding_ops.embedding_lookup(p, ids)
def testHigherRank(self):
np.random.seed(8)
@@ -477,21 +509,22 @@ class EmbeddingLookupTest(tf.test.TestCase):
for params_shape in (12,), (6, 3):
params = np.random.randn(*params_shape)
for ids_shape in (3, 2), (4, 3):
- ids = np.random.randint(params.shape[0],
- size=np.prod(ids_shape)).reshape(ids_shape)
+ ids = np.random.randint(
+ params.shape[0], size=np.prod(ids_shape)).reshape(ids_shape)
# Compare nonsharded to gather
- simple = tf.nn.embedding_lookup(params, ids).eval()
- self.assertAllEqual(simple, tf.gather(params, ids).eval())
+ simple = embedding_ops.embedding_lookup(params, ids).eval()
+ self.assertAllEqual(simple, array_ops.gather(params, ids).eval())
# Run a few random sharded versions
for procs in 1, 2, 3:
- stride = procs * tf.range(params.shape[0] // procs)
- split_params = [tf.gather(params, stride + p)
- for p in xrange(procs)]
- sharded = tf.nn.embedding_lookup(split_params, ids).eval()
+ stride = procs * math_ops.range(params.shape[0] // procs)
+ split_params = [
+ array_ops.gather(params, stride + p) for p in xrange(procs)
+ ]
+ sharded = embedding_ops.embedding_lookup(split_params, ids).eval()
self.assertAllEqual(simple, sharded)
-class EmbeddingLookupSparseTest(tf.test.TestCase):
+class EmbeddingLookupSparseTest(test.TestCase):
def _RandomIdsAndWeights(self, batch_size, vocab_size):
max_val_per_entry = 6
@@ -509,14 +542,14 @@ class EmbeddingLookupSparseTest(tf.test.TestCase):
shape = [batch_size, max_val_per_entry]
- sp_ids = tf.SparseTensor(
- tf.constant(indices, tf.int64),
- tf.constant(ids, tf.int32),
- tf.constant(shape, tf.int64))
- sp_weights = tf.SparseTensor(
- tf.constant(indices, tf.int64),
- tf.constant(weights, tf.float32),
- tf.constant(shape, tf.int64))
+ sp_ids = sparse_tensor.SparseTensor(
+ constant_op.constant(indices, dtypes.int64),
+ constant_op.constant(ids, dtypes.int32),
+ constant_op.constant(shape, dtypes.int64))
+ sp_weights = sparse_tensor.SparseTensor(
+ constant_op.constant(indices, dtypes.int64),
+ constant_op.constant(weights, dtypes.float32),
+ constant_op.constant(shape, dtypes.int64))
return sp_ids, sp_weights, ids, weights, vals_per_batch_entry
@@ -524,7 +557,7 @@ class EmbeddingLookupSparseTest(tf.test.TestCase):
grouped_vals = []
index = 0
for num_val in vals_per_batch_entry:
- grouped_vals.append(list(vals[index: (index + num_val)]))
+ grouped_vals.append(list(vals[index:(index + num_val)]))
index += num_val
return grouped_vals
@@ -543,17 +576,16 @@ class EmbeddingLookupSparseTest(tf.test.TestCase):
np.ones(np.sum(vals_per_batch_entry)), vals_per_batch_entry)
for num_shards, combiner, dtype, ignore_weights in itertools.product(
- [1, 5],
- ["sum", "mean", "sqrtn"],
- [tf.float32, tf.float64],
+ [1, 5], ["sum", "mean", "sqrtn"], [dtypes.float32, dtypes.float64],
[True, False]):
with self.test_session():
- p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size,
- shape=param_shape,
- dtype=dtype)
- embedding_sum = tf.nn.embedding_lookup_sparse(
- p, sp_ids, None if ignore_weights else sp_weights,
+ p, params, feed_dict = _EmbeddingParams(
+ num_shards, vocab_size, shape=param_shape, dtype=dtype)
+ embedding_sum = embedding_ops.embedding_lookup_sparse(
+ p,
+ sp_ids,
+ None if ignore_weights else sp_weights,
combiner=combiner)
self.assertEqual(embedding_sum.get_shape().as_list(),
@@ -562,9 +594,12 @@ class EmbeddingLookupSparseTest(tf.test.TestCase):
tf_embedding_sum = embedding_sum.eval(feed_dict=feed_dict)
np_embedding_sum, np_weight_sum, np_weight_sq_sum = _EmbeddingResult(
- params, grouped_ids, num_shards, vocab_size,
- weight_vals=grouped_ignored_weights
- if ignore_weights else grouped_weights)
+ params,
+ grouped_ids,
+ num_shards,
+ vocab_size,
+ weight_vals=grouped_ignored_weights if ignore_weights else
+ grouped_weights)
if combiner == "mean":
np_embedding_sum /= np.reshape(np_weight_sum, (batch_size, 1, 1))
if combiner == "sqrtn":
@@ -580,82 +615,98 @@ class EmbeddingLookupSparseTest(tf.test.TestCase):
self._RandomIdsAndWeights(batch_size, vocab_size))
for num_shards, combiner, dtype, ignore_weights in itertools.product(
- [1, 3],
- ["sum", "mean", "sqrtn"],
- [tf.float32, tf.float64],
+ [1, 3], ["sum", "mean", "sqrtn"], [dtypes.float32, dtypes.float64],
[True, False]):
with self.test_session():
- x, params, _ = _EmbeddingParams(num_shards, vocab_size,
- shape=param_shape,
- dtype=dtype)
+ x, params, _ = _EmbeddingParams(
+ num_shards, vocab_size, shape=param_shape, dtype=dtype)
- y = tf.nn.embedding_lookup_sparse(
- x, sp_ids, None if ignore_weights else sp_weights,
+ y = embedding_ops.embedding_lookup_sparse(
+ x,
+ sp_ids,
+ None if ignore_weights else sp_weights,
combiner=combiner)
x_name = [_PName(i) for i in range(num_shards)]
x_init_value = [params[x_n + ":0"] for x_n in x_name]
x_shape = [i.shape for i in x_init_value]
y_shape = [batch_size] + list(params[_PName(0) + ":0"].shape[1:])
- err = tf.test.compute_gradient_error(x,
- x_shape,
- y,
- y_shape,
- x_init_value=x_init_value)
- self.assertLess(err, 1e-5 if dtype == tf.float64 else 2e-3)
+ err = gradient_checker.compute_gradient_error(
+ x, x_shape, y, y_shape, x_init_value=x_init_value)
+ self.assertLess(err, 1e-5 if dtype == dtypes.float64 else 2e-3)
def testIncompatibleShapes(self):
with self.test_session():
- x, _, _ = _EmbeddingParams(1, 10, dtype=tf.float32)
- sp_ids = tf.SparseTensor(
- tf.constant([[0, 0], [0, 1], [1, 0]], tf.int64),
- tf.constant([0, 1, 2], tf.int32),
- tf.constant([2, 2], tf.int64))
- sp_weights = tf.SparseTensor(
- tf.constant([[0, 0], [0, 1]], tf.int64),
- tf.constant([12.0, 5.0], tf.float32),
- tf.constant([1, 2], tf.int64))
+ x, _, _ = _EmbeddingParams(1, 10, dtype=dtypes.float32)
+ sp_ids = sparse_tensor.SparseTensor(
+ constant_op.constant([[0, 0], [0, 1], [1, 0]], dtypes.int64),
+ constant_op.constant([0, 1, 2], dtypes.int32),
+ constant_op.constant([2, 2], dtypes.int64))
+ sp_weights = sparse_tensor.SparseTensor(
+ constant_op.constant([[0, 0], [0, 1]], dtypes.int64),
+ constant_op.constant([12.0, 5.0], dtypes.float32),
+ constant_op.constant([1, 2], dtypes.int64))
with self.assertRaises(ValueError):
- tf.nn.embedding_lookup_sparse(x, sp_ids, sp_weights, combiner="mean")
+ embedding_ops.embedding_lookup_sparse(
+ x, sp_ids, sp_weights, combiner="mean")
-class DynamicStitchOpTest(tf.test.TestCase):
+class DynamicStitchOpTest(test.TestCase):
def testCint32Cpu(self):
with self.test_session(use_gpu=False):
- indices = [tf.convert_to_tensor([0, 1, 2]), tf.convert_to_tensor([2, 3])]
- values = [tf.convert_to_tensor([12, 23, 34]), tf.convert_to_tensor([1, 2])]
+ indices = [
+ ops.convert_to_tensor([0, 1, 2]), ops.convert_to_tensor([2, 3])
+ ]
+ values = [
+ ops.convert_to_tensor([12, 23, 34]), ops.convert_to_tensor([1, 2])
+ ]
self.assertAllEqual(
- tf.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])
+ data_flow_ops.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])
def testCint32Gpu(self):
with self.test_session(use_gpu=True):
- indices = [tf.convert_to_tensor([0, 1, 2]), tf.convert_to_tensor([2, 3])]
- values = [tf.convert_to_tensor([12, 23, 34]), tf.convert_to_tensor([1, 2])]
+ indices = [
+ ops.convert_to_tensor([0, 1, 2]), ops.convert_to_tensor([2, 3])
+ ]
+ values = [
+ ops.convert_to_tensor([12, 23, 34]), ops.convert_to_tensor([1, 2])
+ ]
self.assertAllEqual(
- tf.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])
+ data_flow_ops.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])
def testInt32Cpu(self):
with self.test_session(use_gpu=False):
- indices = [tf.convert_to_tensor([0, 1, 2]), tf.convert_to_tensor([2, 3])]
- values = [tf.convert_to_tensor([12, 23, 34]), tf.convert_to_tensor([1, 2])]
+ indices = [
+ ops.convert_to_tensor([0, 1, 2]), ops.convert_to_tensor([2, 3])
+ ]
+ values = [
+ ops.convert_to_tensor([12, 23, 34]), ops.convert_to_tensor([1, 2])
+ ]
self.assertAllEqual(
- tf.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])
+ data_flow_ops.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])
def testInt32Gpu(self):
with self.test_session(use_gpu=True):
- indices = [tf.convert_to_tensor([0, 1, 2]), tf.convert_to_tensor([2, 3])]
- values = [tf.convert_to_tensor([12, 23, 34]), tf.convert_to_tensor([1, 2])]
+ indices = [
+ ops.convert_to_tensor([0, 1, 2]), ops.convert_to_tensor([2, 3])
+ ]
+ values = [
+ ops.convert_to_tensor([12, 23, 34]), ops.convert_to_tensor([1, 2])
+ ]
self.assertAllEqual(
- tf.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])
+ data_flow_ops.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])
def testSumGradArgs(self):
with self.test_session(use_gpu=False):
- indices = [tf.convert_to_tensor([0, 1, 2, 3]),
- tf.convert_to_tensor([2, 3])]
- values = [tf.convert_to_tensor([2, 3, 5, 7]), tf.convert_to_tensor([1, 1])]
+ indices = [
+ ops.convert_to_tensor([0, 1, 2, 3]), ops.convert_to_tensor([2, 3])
+ ]
+ values = [
+ ops.convert_to_tensor([2, 3, 5, 7]), ops.convert_to_tensor([1, 1])
+ ]
self.assertAllEqual(
- tf.dynamic_stitch(indices, values).eval(), [2, 3, 1, 1])
+ data_flow_ops.dynamic_stitch(indices, values).eval(), [2, 3, 1, 1])
# We expect that the values are merged in order.
def testStitchOrder(self):
@@ -664,12 +715,12 @@ class DynamicStitchOpTest(tf.test.TestCase):
np_values = []
values = []
for _ in range(10):
- indices.extend([tf.convert_to_tensor(np.arange(100).astype(np.int32))])
+ indices.extend([ops.convert_to_tensor(np.arange(100).astype(np.int32))])
np_values.extend([np.random.uniform(size=100)])
- values.extend([tf.convert_to_tensor(np_values[-1])])
- stitched = tf.dynamic_stitch(indices, values).eval()
+ values.extend([ops.convert_to_tensor(np_values[-1])])
+ stitched = data_flow_ops.dynamic_stitch(indices, values).eval()
self.assertAllEqual(np_values[-1], stitched)
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/extract_image_patches_grad_test.py b/tensorflow/python/kernel_tests/extract_image_patches_grad_test.py
index e9445009ad..60090a1510 100644
--- a/tensorflow/python/kernel_tests/extract_image_patches_grad_test.py
+++ b/tensorflow/python/kernel_tests/extract_image_patches_grad_test.py
@@ -12,17 +12,23 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-""" Tests for ExtractImagePatches gradient. """
+"""Tests for ExtractImagePatches gradient."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import random_seed as random_seed_lib
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.platform import test
-class ExtractImagePatchesGradTest(tf.test.TestCase):
+
+class ExtractImagePatchesGradTest(test.TestCase):
"""Gradient-checking for ExtractImagePatches op."""
_TEST_CASES = [
@@ -73,29 +79,27 @@ class ExtractImagePatchesGradTest(tf.test.TestCase):
def testGradient(self):
# Set graph seed for determinism.
random_seed = 42
- tf.set_random_seed(random_seed)
+ random_seed_lib.set_random_seed(random_seed)
with self.test_session():
for test_case in self._TEST_CASES:
np.random.seed(random_seed)
in_shape = test_case['in_shape']
- in_val = tf.constant(np.random.random(in_shape),
- dtype=tf.float32)
+ in_val = constant_op.constant(
+ np.random.random(in_shape), dtype=dtypes.float32)
for padding in ['VALID', 'SAME']:
- out_val = tf.extract_image_patches(in_val,
- test_case['ksizes'],
- test_case['strides'],
- test_case['rates'],
- padding)
+ out_val = array_ops.extract_image_patches(in_val, test_case['ksizes'],
+ test_case['strides'],
+ test_case['rates'], padding)
out_shape = out_val.get_shape().as_list()
- err = tf.test.compute_gradient_error(
- in_val, in_shape, out_val, out_shape
- )
+ err = gradient_checker.compute_gradient_error(in_val, in_shape,
+ out_val, out_shape)
print('extract_image_patches gradient err: %.4e' % err)
self.assertLess(err, 1e-4)
+
if __name__ == '__main__':
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/extract_image_patches_op_test.py b/tensorflow/python/kernel_tests/extract_image_patches_op_test.py
index 303176cb09..5c7624f1f6 100644
--- a/tensorflow/python/kernel_tests/extract_image_patches_op_test.py
+++ b/tensorflow/python/kernel_tests/extract_image_patches_op_test.py
@@ -19,10 +19,13 @@ from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.ops import array_ops
+from tensorflow.python.platform import test
-class ExtractImagePatches(tf.test.TestCase):
+
+class ExtractImagePatches(test.TestCase):
"""Functional tests for ExtractImagePatches op."""
def _VerifyValues(self, image, ksizes, strides, rates, padding, patches):
@@ -41,8 +44,8 @@ class ExtractImagePatches(tf.test.TestCase):
rates = [1] + rates + [1]
with self.test_session(use_gpu=True):
- out_tensor = tf.extract_image_patches(
- tf.constant(image),
+ out_tensor = array_ops.extract_image_patches(
+ constant_op.constant(image),
ksizes=ksizes,
strides=strides,
rates=rates,
@@ -57,12 +60,13 @@ class ExtractImagePatches(tf.test.TestCase):
# [2, 3, 4, 5]
patches = np.reshape(range(120), [2, 3, 4, 5])
for padding in ["VALID", "SAME"]:
- self._VerifyValues(image,
- ksizes=[1, 1],
- strides=[1, 1],
- rates=[1, 1],
- padding=padding,
- patches=patches)
+ self._VerifyValues(
+ image,
+ ksizes=[1, 1],
+ strides=[1, 1],
+ rates=[1, 1],
+ padding=padding,
+ patches=patches)
def testKsize1x1Stride2x3Rate1x1(self):
"""Test for 1x1 kernel and strides."""
@@ -71,12 +75,13 @@ class ExtractImagePatches(tf.test.TestCase):
# [2, 2, 2, 3]
patches = image[:, ::2, ::3, :]
for padding in ["VALID", "SAME"]:
- self._VerifyValues(image,
- ksizes=[1, 1],
- strides=[2, 3],
- rates=[1, 1],
- padding=padding,
- patches=patches)
+ self._VerifyValues(
+ image,
+ ksizes=[1, 1],
+ strides=[2, 3],
+ rates=[1, 1],
+ padding=padding,
+ patches=patches)
def testKsize2x2Stride1x1Rate1x1Valid(self):
"""Test for 1x1 kernel ."""
@@ -84,12 +89,13 @@ class ExtractImagePatches(tf.test.TestCase):
image = [[[[1], [2]], [[3], [4]]]]
# [1, 1, 1, 4]
patches = [[[[1, 2, 3, 4]]]]
- self._VerifyValues(image,
- ksizes=[2, 2],
- strides=[1, 1],
- rates=[1, 1],
- padding="VALID",
- patches=patches)
+ self._VerifyValues(
+ image,
+ ksizes=[2, 2],
+ strides=[1, 1],
+ rates=[1, 1],
+ padding="VALID",
+ patches=patches)
def testKsize2x2Stride1x1Rate1x1Same(self):
"""Test for 1x1 kernel ."""
@@ -97,13 +103,14 @@ class ExtractImagePatches(tf.test.TestCase):
image = [[[[1], [2]], [[3], [4]]]]
# [1, 2, 2, 4]
patches = [[[[1, 2, 3, 4], [2, 0, 4, 0]], [[3, 4, 0, 0], [4, 0, 0, 0]]]]
- self._VerifyValues(image,
- ksizes=[2, 2],
- strides=[1, 1],
- rates=[1, 1],
- padding="SAME",
- patches=patches)
+ self._VerifyValues(
+ image,
+ ksizes=[2, 2],
+ strides=[1, 1],
+ rates=[1, 1],
+ padding="SAME",
+ patches=patches)
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/fft_ops_test.py b/tensorflow/python/kernel_tests/fft_ops_test.py
index 6eb25d486e..030bbafafe 100644
--- a/tensorflow/python/kernel_tests/fft_ops_test.py
+++ b/tensorflow/python/kernel_tests/fft_ops_test.py
@@ -12,51 +12,54 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-"""Tests for fft operations.
-"""
+"""Tests for fft operations."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
from six.moves import xrange # pylint: disable=redefined-builtin
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import math_ops
+from tensorflow.python.platform import test
VALID_FFT_RANKS = (1, 2, 3)
-class FFTOpsTest(tf.test.TestCase):
+class FFTOpsTest(test.TestCase):
def _Compare(self, x, rank):
- if tf.test.is_gpu_available():
+ if test.is_gpu_available():
# GPU/Forward
self.assertAllClose(
self._npFFT(x, rank),
- self._tfFFT(x, rank, use_gpu=True),
+ self._tfFFT(
+ x, rank, use_gpu=True),
rtol=1e-4,
atol=1e-4)
# GPU/Backward
self.assertAllClose(
self._npIFFT(x, rank),
- self._tfIFFT(x, rank, use_gpu=True),
+ self._tfIFFT(
+ x, rank, use_gpu=True),
rtol=1e-4,
atol=1e-4)
def _checkGrad(self, func, x, y, use_gpu=False):
with self.test_session(use_gpu=use_gpu):
- inx = tf.convert_to_tensor(x)
- iny = tf.convert_to_tensor(y)
+ inx = ops.convert_to_tensor(x)
+ iny = ops.convert_to_tensor(y)
# func is a forward or inverse FFT function (batched or unbatched)
- z = func(tf.complex(inx, iny))
+ z = func(math_ops.complex(inx, iny))
# loss = sum(|z|^2)
- loss = tf.reduce_sum(tf.real(z * tf.conj(z)))
+ loss = math_ops.reduce_sum(math_ops.real(z * math_ops.conj(z)))
((x_jacob_t, x_jacob_n),
- (y_jacob_t, y_jacob_n)) = tf.test.compute_gradient(
- [inx, iny],
- [list(x.shape), list(y.shape)],
- loss,
- [1],
+ (y_jacob_t, y_jacob_n)) = gradient_checker.compute_gradient(
+ [inx, iny], [list(x.shape), list(y.shape)],
+ loss, [1],
x_init_value=[x, y],
delta=1e-2)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=1e-2)
@@ -92,26 +95,26 @@ class FFTOpsTest(tf.test.TestCase):
def _tfFFTForRank(self, rank):
if rank == 1:
- return tf.fft
+ return math_ops.fft
elif rank == 2:
- return tf.fft2d
+ return math_ops.fft2d
elif rank == 3:
- return tf.fft3d
+ return math_ops.fft3d
else:
raise ValueError("invalid rank")
def _tfIFFTForRank(self, rank):
if rank == 1:
- return tf.ifft
+ return math_ops.ifft
elif rank == 2:
- return tf.ifft2d
+ return math_ops.ifft2d
elif rank == 3:
- return tf.ifft3d
+ return math_ops.ifft3d
else:
raise ValueError("invalid rank")
def testEmpty(self):
- if tf.test.is_gpu_available():
+ if test.is_gpu_available():
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
x = np.zeros((0,) * dims).astype(np.complex64)
@@ -122,8 +125,7 @@ class FFTOpsTest(tf.test.TestCase):
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
self._Compare(
- np.mod(
- np.arange(np.power(4, dims)), 10).reshape((4,) * dims), rank)
+ np.mod(np.arange(np.power(4, dims)), 10).reshape((4,) * dims), rank)
def testRandom(self):
np.random.seed(12345)
@@ -139,21 +141,19 @@ class FFTOpsTest(tf.test.TestCase):
self._Compare(gen((4,) * dims), rank)
def testError(self):
- if tf.test.is_gpu_available():
+ if test.is_gpu_available():
for rank in VALID_FFT_RANKS:
for dims in xrange(0, rank):
x = np.zeros((1,) * dims).astype(np.complex64)
with self.assertRaisesWithPredicateMatch(
- ValueError,
- "Shape must be .*rank {}.*".format(rank)):
+ ValueError, "Shape must be .*rank {}.*".format(rank)):
self._tfFFT(x, rank)
with self.assertRaisesWithPredicateMatch(
- ValueError,
- "Shape must be .*rank {}.*".format(rank)):
+ ValueError, "Shape must be .*rank {}.*".format(rank)):
self._tfIFFT(x, rank)
def testGrad_Simple(self):
- if tf.test.is_gpu_available():
+ if test.is_gpu_available():
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 2):
re = np.ones(shape=(4,) * dims, dtype=np.float32) / 10.0
@@ -162,7 +162,7 @@ class FFTOpsTest(tf.test.TestCase):
self._checkGrad(self._tfIFFTForRank(rank), re, im, use_gpu=True)
def testGrad_Random(self):
- if tf.test.is_gpu_available():
+ if test.is_gpu_available():
np.random.seed(54321)
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 2):
@@ -173,4 +173,4 @@ class FFTOpsTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/fifo_queue_test.py b/tensorflow/python/kernel_tests/fifo_queue_test.py
index bf2ed45269..2b790b4a92 100644
--- a/tensorflow/python/kernel_tests/fifo_queue_test.py
+++ b/tensorflow/python/kernel_tests/fifo_queue_test.py
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.ops.data_flow_ops.FIFOQueue."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
@@ -21,17 +21,29 @@ from __future__ import print_function
import random
import re
import time
+
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
-import tensorflow as tf
+from tensorflow.core.protobuf import config_pb2
+from tensorflow.python.client import session as session_lib
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes as dtypes_lib
+from tensorflow.python.framework import errors_impl
+from tensorflow.python.framework import ops
+from tensorflow.python.framework import tensor_shape
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import data_flow_ops
+from tensorflow.python.platform import test
+from tensorflow.python.util import compat
-class FIFOQueueTest(tf.test.TestCase):
+
+class FIFOQueueTest(test.TestCase):
def testConstructor(self):
- with tf.Graph().as_default():
- q = tf.FIFOQueue(10, tf.float32, name="Q")
- self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
+ with ops.Graph().as_default():
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, name="Q")
+ self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueue'
attr { key: 'component_types' value { list { type: DT_FLOAT } } }
@@ -42,9 +54,12 @@ class FIFOQueueTest(tf.test.TestCase):
""", q.queue_ref.op.node_def)
def testMultiQueueConstructor(self):
- with tf.Graph().as_default():
- q = tf.FIFOQueue(5, (tf.int32, tf.float32), shared_name="foo", name="Q")
- self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
+ with ops.Graph().as_default():
+ q = data_flow_ops.FIFOQueue(
+ 5, (dtypes_lib.int32, dtypes_lib.float32),
+ shared_name="foo",
+ name="Q")
+ self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueue'
attr { key: 'component_types' value { list {
@@ -57,11 +72,13 @@ class FIFOQueueTest(tf.test.TestCase):
""", q.queue_ref.op.node_def)
def testConstructorWithShapes(self):
- with tf.Graph().as_default():
- q = tf.FIFOQueue(5, (tf.int32, tf.float32),
- shapes=(tf.TensorShape([1, 1, 2, 3]),
- tf.TensorShape([5, 8])), name="Q")
- self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
+ with ops.Graph().as_default():
+ q = data_flow_ops.FIFOQueue(
+ 5, (dtypes_lib.int32, dtypes_lib.float32),
+ shapes=(tensor_shape.TensorShape([1, 1, 2, 3]),
+ tensor_shape.TensorShape([5, 8])),
+ name="Q")
+ self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueue'
attr { key: 'component_types' value { list {
@@ -82,19 +99,19 @@ class FIFOQueueTest(tf.test.TestCase):
def testEnqueue(self):
with self.test_session():
- q = tf.FIFOQueue(10, tf.float32)
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueHalf(self):
with self.test_session():
- q = tf.FIFOQueue(10, tf.float16)
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float16)
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueWithShape(self):
with self.test_session():
- q = tf.FIFOQueue(10, tf.float32, shapes=(3, 2))
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shapes=(3, 2))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
with self.assertRaises(ValueError):
@@ -103,14 +120,14 @@ class FIFOQueueTest(tf.test.TestCase):
def testEnqueueManyWithShape(self):
with self.test_session():
- q = tf.FIFOQueue(10, [tf.int32, tf.int32],
- shapes=[(), (2,)])
+ q = data_flow_ops.FIFOQueue(
+ 10, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertEqual(4, q.size().eval())
def testEnqueueDictWithoutNames(self):
with self.test_session():
- q = tf.FIFOQueue(10, tf.float32)
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
with self.assertRaisesRegexp(ValueError, "must have names"):
q.enqueue({"a": 12.0})
with self.assertRaisesRegexp(ValueError, "must have names"):
@@ -118,7 +135,7 @@ class FIFOQueueTest(tf.test.TestCase):
def testParallelEnqueue(self):
with self.test_session() as sess:
- q = tf.FIFOQueue(10, tf.float32)
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
@@ -126,8 +143,11 @@ class FIFOQueueTest(tf.test.TestCase):
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
- threads = [self.checkedThread(target=enqueue, args=(e,))
- for e in enqueue_ops]
+
+ threads = [
+ self.checkedThread(
+ target=enqueue, args=(e,)) for e in enqueue_ops
+ ]
for thread in threads:
thread.start()
for thread in threads:
@@ -141,7 +161,7 @@ class FIFOQueueTest(tf.test.TestCase):
def testParallelDequeue(self):
with self.test_session() as sess:
- q = tf.FIFOQueue(10, tf.float32)
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
@@ -155,6 +175,7 @@ class FIFOQueueTest(tf.test.TestCase):
def dequeue():
results.append(sess.run(dequeued_t))
+
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
@@ -164,7 +185,7 @@ class FIFOQueueTest(tf.test.TestCase):
def testDequeue(self):
with self.test_session():
- q = tf.FIFOQueue(10, tf.float32)
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
@@ -178,7 +199,7 @@ class FIFOQueueTest(tf.test.TestCase):
def testDequeueHalf(self):
with self.test_session():
- q = tf.FIFOQueue(10, tf.float16)
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float16)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
@@ -192,7 +213,7 @@ class FIFOQueueTest(tf.test.TestCase):
def testEnqueueAndBlockingDequeue(self):
with self.test_session() as sess:
- q = tf.FIFOQueue(3, tf.float32)
+ q = data_flow_ops.FIFOQueue(3, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
@@ -222,7 +243,7 @@ class FIFOQueueTest(tf.test.TestCase):
def testMultiEnqueueAndDequeue(self):
with self.test_session() as sess:
- q = tf.FIFOQueue(10, (tf.int32, tf.float32))
+ q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.float32))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
@@ -238,12 +259,12 @@ class FIFOQueueTest(tf.test.TestCase):
def testQueueSizeEmpty(self):
with self.test_session():
- q = tf.FIFOQueue(10, tf.float32)
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
self.assertEqual([0], q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.test_session():
- q = tf.FIFOQueue(10, tf.float32)
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
@@ -256,7 +277,7 @@ class FIFOQueueTest(tf.test.TestCase):
def testEnqueueMany(self):
with self.test_session():
- q = tf.FIFOQueue(10, tf.float32)
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
@@ -269,9 +290,9 @@ class FIFOQueueTest(tf.test.TestCase):
def testEmptyEnqueueMany(self):
with self.test_session():
- q = tf.FIFOQueue(10, tf.float32)
- empty_t = tf.constant([], dtype=tf.float32,
- shape=[0, 2, 3])
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
+ empty_t = constant_op.constant(
+ [], dtype=dtypes_lib.float32, shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
@@ -281,7 +302,7 @@ class FIFOQueueTest(tf.test.TestCase):
def testEmptyDequeueMany(self):
with self.test_session():
- q = tf.FIFOQueue(10, tf.float32, shapes=())
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
@@ -291,7 +312,7 @@ class FIFOQueueTest(tf.test.TestCase):
def testEmptyDequeueUpTo(self):
with self.test_session():
- q = tf.FIFOQueue(10, tf.float32, shapes=())
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_up_to(0)
@@ -301,14 +322,14 @@ class FIFOQueueTest(tf.test.TestCase):
def testEmptyDequeueManyWithNoShape(self):
with self.test_session():
- q = tf.FIFOQueue(10, tf.float32)
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError("specified shapes"):
q.dequeue_many(0).eval()
def testMultiEnqueueMany(self):
with self.test_session() as sess:
- q = tf.FIFOQueue(10, (tf.float32, tf.int32))
+ q = data_flow_ops.FIFOQueue(10, (dtypes_lib.float32, dtypes_lib.int32))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
@@ -324,7 +345,7 @@ class FIFOQueueTest(tf.test.TestCase):
def testDequeueMany(self):
with self.test_session():
- q = tf.FIFOQueue(10, tf.float32, ())
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
@@ -336,7 +357,7 @@ class FIFOQueueTest(tf.test.TestCase):
def testDequeueUpToNoBlocking(self):
with self.test_session():
- q = tf.FIFOQueue(10, tf.float32, ())
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
@@ -348,12 +369,13 @@ class FIFOQueueTest(tf.test.TestCase):
def testMultiDequeueMany(self):
with self.test_session() as sess:
- q = tf.FIFOQueue(10, (tf.float32, tf.int32),
- shapes=((), (2,)))
+ q = data_flow_ops.FIFOQueue(
+ 10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
- 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
- int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
- [11, 12], [13, 14], [15, 16], [17, 18], [19, 20]]
+ 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
+ ]
+ int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
+ [15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
@@ -378,12 +400,13 @@ class FIFOQueueTest(tf.test.TestCase):
def testMultiDequeueUpToNoBlocking(self):
with self.test_session() as sess:
- q = tf.FIFOQueue(10, (tf.float32, tf.int32),
- shapes=((), (2,)))
+ q = data_flow_ops.FIFOQueue(
+ 10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
- 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
- int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
- [11, 12], [13, 14], [15, 16], [17, 18], [19, 20]]
+ 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
+ ]
+ int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
+ [15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_up_to(4)
@@ -401,7 +424,7 @@ class FIFOQueueTest(tf.test.TestCase):
def testHighDimension(self):
with self.test_session():
- q = tf.FIFOQueue(10, tf.int32, (4, 4, 4, 4))
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.int32, (4, 4, 4, 4))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
@@ -410,7 +433,8 @@ class FIFOQueueTest(tf.test.TestCase):
self.assertAllEqual(dequeued_t.eval(), elems)
def testEnqueueWrongShape(self):
- q = tf.FIFOQueue(10, (tf.int32, tf.int32), ((), (2)))
+ q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32), ((),
+ (2)))
with self.assertRaises(ValueError):
q.enqueue(([1, 2], [2, 2]))
@@ -419,61 +443,69 @@ class FIFOQueueTest(tf.test.TestCase):
q.enqueue_many((7, [[1, 2], [3, 4], [5, 6]]))
def testBatchSizeMismatch(self):
- q = tf.FIFOQueue(10, (tf.int32, tf.int32, tf.int32), ((), (), ()))
+ q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32,
+ dtypes_lib.int32), ((), (), ()))
with self.assertRaises(ValueError):
q.enqueue_many(([1, 2, 3], [1, 2], [1, 2, 3]))
with self.assertRaises(ValueError):
- q.enqueue_many(([1, 2, 3], [1, 2], tf.placeholder(tf.int32)))
+ q.enqueue_many(
+ ([1, 2, 3], [1, 2], array_ops.placeholder(dtypes_lib.int32)))
with self.assertRaises(ValueError):
- q.enqueue_many((tf.placeholder(tf.int32), [1, 2], [1, 2, 3]))
+ q.enqueue_many(
+ (array_ops.placeholder(dtypes_lib.int32), [1, 2], [1, 2, 3]))
def testEnqueueManyEmptyTypeConversion(self):
- q = tf.FIFOQueue(10, (tf.int32, tf.float32), ((), ()))
+ q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.float32), (
+ (), ()))
enq = q.enqueue_many(([], []))
- self.assertEqual(tf.int32, enq.inputs[1].dtype)
- self.assertEqual(tf.float32, enq.inputs[2].dtype)
+ self.assertEqual(dtypes_lib.int32, enq.inputs[1].dtype)
+ self.assertEqual(dtypes_lib.float32, enq.inputs[2].dtype)
def testEnqueueWrongType(self):
- q = tf.FIFOQueue(10, (tf.int32, tf.float32), ((), ()))
+ q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.float32), (
+ (), ()))
with self.assertRaises(ValueError):
- q.enqueue((tf.placeholder(tf.int32), tf.placeholder(tf.int32)))
+ q.enqueue((array_ops.placeholder(dtypes_lib.int32),
+ array_ops.placeholder(dtypes_lib.int32)))
with self.assertRaises(ValueError):
- q.enqueue_many((tf.placeholder(tf.int32), tf.placeholder(tf.int32)))
+ q.enqueue_many((array_ops.placeholder(dtypes_lib.int32),
+ array_ops.placeholder(dtypes_lib.int32)))
def testEnqueueWrongShapeAtRuntime(self):
with self.test_session() as sess:
- q = tf.FIFOQueue(10, (tf.int32, tf.int32), ((2, 2), (3, 3)))
+ q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32), (
+ (2, 2), (3, 3)))
elems_ok = np.array([1] * 4).reshape((2, 2)).astype(np.int32)
- elems_bad = tf.placeholder(tf.int32)
+ elems_bad = array_ops.placeholder(dtypes_lib.int32)
enqueue_op = q.enqueue((elems_ok, elems_bad))
- with self.assertRaisesRegexp(
- tf.errors.InvalidArgumentError, r"Expected \[3,3\], got \[3,4\]"):
+ with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
+ r"Expected \[3,3\], got \[3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 12).reshape((3, 4))})
def testEnqueueDequeueManyWrongShape(self):
with self.test_session() as sess:
- q = tf.FIFOQueue(10, (tf.int32, tf.int32), ((2, 2), (3, 3)))
+ q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32), (
+ (2, 2), (3, 3)))
elems_ok = np.array([1] * 8).reshape((2, 2, 2)).astype(np.int32)
- elems_bad = tf.placeholder(tf.int32)
+ elems_bad = array_ops.placeholder(dtypes_lib.int32)
enqueue_op = q.enqueue_many((elems_ok, elems_bad))
dequeued_t = q.dequeue_many(2)
- with self.assertRaisesRegexp(
- tf.errors.InvalidArgumentError,
- "Shape mismatch in tuple component 1. "
- r"Expected \[2,3,3\], got \[2,3,4\]"):
+ with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
+ "Shape mismatch in tuple component 1. "
+ r"Expected \[2,3,3\], got \[2,3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 24).reshape((2, 3, 4))})
dequeued_t.eval()
def testParallelEnqueueMany(self):
with self.test_session() as sess:
- q = tf.FIFOQueue(1000, tf.float32, shapes=())
+ q = data_flow_ops.FIFOQueue(1000, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
@@ -481,6 +513,7 @@ class FIFOQueueTest(tf.test.TestCase):
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
sess.run(enqueue_op)
+
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
@@ -491,7 +524,7 @@ class FIFOQueueTest(tf.test.TestCase):
def testParallelDequeueMany(self):
with self.test_session() as sess:
- q = tf.FIFOQueue(1000, tf.float32, shapes=())
+ q = data_flow_ops.FIFOQueue(1000, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
@@ -503,6 +536,7 @@ class FIFOQueueTest(tf.test.TestCase):
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
+
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
@@ -512,7 +546,7 @@ class FIFOQueueTest(tf.test.TestCase):
def testParallelDequeueUpTo(self):
with self.test_session() as sess:
- q = tf.FIFOQueue(1000, tf.float32, shapes=())
+ q = data_flow_ops.FIFOQueue(1000, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
@@ -526,6 +560,7 @@ class FIFOQueueTest(tf.test.TestCase):
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
+
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
@@ -535,7 +570,7 @@ class FIFOQueueTest(tf.test.TestCase):
def testParallelEnqueueAndDequeue(self):
with self.test_session() as sess:
- q = tf.FIFOQueue(50, tf.float32, shapes=())
+ q = data_flow_ops.FIFOQueue(50, dtypes_lib.float32, shapes=())
initial_elements = [10.0] * 49
q.enqueue_many((initial_elements,)).run()
@@ -545,6 +580,7 @@ class FIFOQueueTest(tf.test.TestCase):
def enqueue():
for _ in xrange(100):
sess.run(enqueue_op)
+
def dequeue():
for _ in xrange(100):
self.assertTrue(sess.run(dequeued_t) in (10.0, 20.0))
@@ -567,11 +603,11 @@ class FIFOQueueTest(tf.test.TestCase):
def testMixtureOfEnqueueAndEnqueueMany(self):
with self.test_session() as sess:
- q = tf.FIFOQueue(10, tf.int32, shapes=())
- enqueue_placeholder = tf.placeholder(tf.int32, shape=())
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.int32, shapes=())
+ enqueue_placeholder = array_ops.placeholder(dtypes_lib.int32, shape=())
enqueue_op = q.enqueue((enqueue_placeholder,))
- enqueuemany_placeholder = tf.placeholder(
- tf.int32, shape=(None,))
+ enqueuemany_placeholder = array_ops.placeholder(
+ dtypes_lib.int32, shape=(None,))
enqueuemany_op = q.enqueue_many((enqueuemany_placeholder,))
dequeued_t = q.dequeue()
@@ -580,6 +616,7 @@ class FIFOQueueTest(tf.test.TestCase):
def dequeue():
for i in xrange(250):
self.assertEqual(i, sess.run(dequeued_t))
+
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
@@ -591,9 +628,8 @@ class FIFOQueueTest(tf.test.TestCase):
elements_enqueued += 1
else:
count = random.randint(0, min(20, 250 - elements_enqueued))
- range_to_enqueue = np.arange(elements_enqueued,
- elements_enqueued + count,
- dtype=np.int32)
+ range_to_enqueue = np.arange(
+ elements_enqueued, elements_enqueued + count, dtype=np.int32)
enqueuemany_op.run({enqueuemany_placeholder: range_to_enqueue})
elements_enqueued += count
@@ -603,14 +639,15 @@ class FIFOQueueTest(tf.test.TestCase):
def testMixtureOfDequeueAndDequeueMany(self):
with self.test_session() as sess:
- q = tf.FIFOQueue(10, tf.int32, shapes=())
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.int32, shapes=())
enqueue_op = q.enqueue_many((np.arange(250, dtype=np.int32),))
dequeued_t = q.dequeue()
- count_placeholder = tf.placeholder(tf.int32, shape=())
+ count_placeholder = array_ops.placeholder(dtypes_lib.int32, shape=())
dequeuemany_t = q.dequeue_many(count_placeholder)
def enqueue():
sess.run(enqueue_op)
+
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
@@ -622,11 +659,12 @@ class FIFOQueueTest(tf.test.TestCase):
elements_dequeued += 1
else:
count = random.randint(0, min(20, 250 - elements_dequeued))
- expected_range = np.arange(elements_dequeued,
- elements_dequeued + count,
- dtype=np.int32)
- self.assertAllEqual(
- expected_range, dequeuemany_t.eval({count_placeholder: count}))
+ expected_range = np.arange(
+ elements_dequeued, elements_dequeued + count, dtype=np.int32)
+ self.assertAllEqual(expected_range,
+ dequeuemany_t.eval({
+ count_placeholder: count
+ }))
elements_dequeued += count
q.close().run()
@@ -635,7 +673,7 @@ class FIFOQueueTest(tf.test.TestCase):
def testBlockingDequeueMany(self):
with self.test_session() as sess:
- q = tf.FIFOQueue(10, tf.float32, ())
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
@@ -662,7 +700,7 @@ class FIFOQueueTest(tf.test.TestCase):
def testBlockingDequeueUpTo(self):
with self.test_session() as sess:
- q = tf.FIFOQueue(10, tf.float32, ())
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
@@ -691,13 +729,13 @@ class FIFOQueueTest(tf.test.TestCase):
with self.test_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
- count_q = tf.FIFOQueue(100, tf.int32, ())
+ count_q = data_flow_ops.FIFOQueue(100, dtypes_lib.int32, ())
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
- q = tf.FIFOQueue(total_count, tf.int32, ())
+ q = data_flow_ops.FIFOQueue(total_count, dtypes_lib.int32, ())
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
@@ -714,7 +752,7 @@ class FIFOQueueTest(tf.test.TestCase):
def testDequeueFromClosedQueue(self):
with self.test_session():
- q = tf.FIFOQueue(10, tf.float32)
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
@@ -726,13 +764,13 @@ class FIFOQueueTest(tf.test.TestCase):
self.assertEqual([elem], dequeued_t.eval())
# Expect the operation to fail due to the queue being closed.
- with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
+ with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
dequeued_t.eval()
def testBlockingDequeueFromClosedQueue(self):
with self.test_session() as sess:
- q = tf.FIFOQueue(10, tf.float32)
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
@@ -744,7 +782,7 @@ class FIFOQueueTest(tf.test.TestCase):
for elem in elems:
self.assertEqual([elem], sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
- with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
+ with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
@@ -758,13 +796,13 @@ class FIFOQueueTest(tf.test.TestCase):
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.test_session() as sess:
- q = tf.FIFOQueue(10, tf.float32)
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
close_op = q.close()
dequeued_t = q.dequeue()
def dequeue():
# Expect the operation to fail due to the queue being closed.
- with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
+ with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
@@ -778,7 +816,7 @@ class FIFOQueueTest(tf.test.TestCase):
def testBlockingDequeueManyFromClosedQueue(self):
with self.test_session() as sess:
- q = tf.FIFOQueue(10, tf.float32, ())
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
@@ -789,7 +827,7 @@ class FIFOQueueTest(tf.test.TestCase):
def dequeue():
self.assertAllEqual(elems, sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
- with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
+ with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
@@ -803,7 +841,7 @@ class FIFOQueueTest(tf.test.TestCase):
def testBlockingDequeueManyButNotAllFromClosedQueue(self):
with self.test_session() as sess:
- q = tf.FIFOQueue(10, tf.float32, ())
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
@@ -814,7 +852,7 @@ class FIFOQueueTest(tf.test.TestCase):
def dequeue():
self.assertAllEqual(elems[:3], sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
- with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
+ with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
@@ -828,7 +866,7 @@ class FIFOQueueTest(tf.test.TestCase):
def testDequeueUpToFromClosedQueueReturnsRemainder(self):
with self.test_session() as sess:
- q = tf.FIFOQueue(10, tf.float32, ())
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
@@ -850,7 +888,7 @@ class FIFOQueueTest(tf.test.TestCase):
def testEnqueueManyLargerThanCapacityWithConcurrentDequeueMany(self):
with self.test_session() as sess:
- q = tf.FIFOQueue(4, tf.float32, ())
+ q = data_flow_ops.FIFOQueue(4, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
@@ -862,7 +900,7 @@ class FIFOQueueTest(tf.test.TestCase):
def dequeue():
self.assertAllEqual(elems[0:3], sess.run(dequeued_t))
- with self.assertRaises(tf.errors.OutOfRangeError):
+ with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(dequeued_t)
self.assertEqual(elems[3], sess.run(cleanup_dequeue_t))
@@ -887,7 +925,8 @@ class FIFOQueueTest(tf.test.TestCase):
def testClosedBlockingDequeueManyRestoresPartialBatch(self):
with self.test_session() as sess:
- q = tf.FIFOQueue(4, (tf.float32, tf.float32), ((), ()))
+ q = data_flow_ops.FIFOQueue(4, (dtypes_lib.float32, dtypes_lib.float32), (
+ (), ()))
elems_a = [1.0, 2.0, 3.0]
elems_b = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems_a, elems_b))
@@ -898,7 +937,7 @@ class FIFOQueueTest(tf.test.TestCase):
enqueue_op.run()
def dequeue():
- with self.assertRaises(tf.errors.OutOfRangeError):
+ with self.assertRaises(errors_impl.OutOfRangeError):
sess.run([dequeued_a_t, dequeued_b_t])
dequeue_thread = self.checkedThread(target=dequeue)
@@ -919,13 +958,13 @@ class FIFOQueueTest(tf.test.TestCase):
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.test_session() as sess:
- q = tf.FIFOQueue(10, tf.float32, ())
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
- with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
+ with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
@@ -939,13 +978,13 @@ class FIFOQueueTest(tf.test.TestCase):
def testBlockingDequeueUpToFromClosedEmptyQueue(self):
with self.test_session() as sess:
- q = tf.FIFOQueue(10, tf.float32, ())
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
close_op = q.close()
dequeued_t = q.dequeue_up_to(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
- with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
+ with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
@@ -959,7 +998,7 @@ class FIFOQueueTest(tf.test.TestCase):
def testEnqueueToClosedQueue(self):
with self.test_session():
- q = tf.FIFOQueue(10, tf.float32)
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
@@ -967,12 +1006,12 @@ class FIFOQueueTest(tf.test.TestCase):
close_op.run()
# Expect the operation to fail due to the queue being closed.
- with self.assertRaisesRegexp(tf.errors.CancelledError, "is closed"):
+ with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.test_session():
- q = tf.FIFOQueue(10, tf.float32)
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
@@ -981,12 +1020,12 @@ class FIFOQueueTest(tf.test.TestCase):
close_op.run()
# Expect the operation to fail due to the queue being closed.
- with self.assertRaisesRegexp(tf.errors.CancelledError, "is closed"):
+ with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.test_session() as sess:
- q = tf.FIFOQueue(4, tf.float32)
+ q = data_flow_ops.FIFOQueue(4, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
@@ -996,6 +1035,7 @@ class FIFOQueueTest(tf.test.TestCase):
def blocking_enqueue():
sess.run(blocking_enqueue_op)
+
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
@@ -1008,7 +1048,7 @@ class FIFOQueueTest(tf.test.TestCase):
def testBlockingEnqueueManyToFullQueue(self):
with self.test_session() as sess:
- q = tf.FIFOQueue(4, tf.float32)
+ q = data_flow_ops.FIFOQueue(4, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
@@ -1018,6 +1058,7 @@ class FIFOQueueTest(tf.test.TestCase):
def blocking_enqueue():
sess.run(blocking_enqueue_op)
+
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
@@ -1031,7 +1072,7 @@ class FIFOQueueTest(tf.test.TestCase):
def testBlockingEnqueueBeforeClose(self):
with self.test_session() as sess:
- q = tf.FIFOQueue(4, tf.float32)
+ q = data_flow_ops.FIFOQueue(4, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
@@ -1043,6 +1084,7 @@ class FIFOQueueTest(tf.test.TestCase):
def blocking_enqueue():
# Expect the operation to succeed once the dequeue op runs.
sess.run(blocking_enqueue_op)
+
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
@@ -1052,6 +1094,7 @@ class FIFOQueueTest(tf.test.TestCase):
def close():
sess.run(close_op)
+
close_thread = self.checkedThread(target=close)
close_thread.start()
@@ -1066,7 +1109,7 @@ class FIFOQueueTest(tf.test.TestCase):
def testBlockingEnqueueManyBeforeClose(self):
with self.test_session() as sess:
- q = tf.FIFOQueue(4, tf.float32)
+ q = data_flow_ops.FIFOQueue(4, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
@@ -1076,6 +1119,7 @@ class FIFOQueueTest(tf.test.TestCase):
def blocking_enqueue():
sess.run(blocking_enqueue_op)
+
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
@@ -1085,6 +1129,7 @@ class FIFOQueueTest(tf.test.TestCase):
def close():
sess.run(close_op)
+
close_thread = self.checkedThread(target=close)
close_thread.start()
@@ -1097,7 +1142,7 @@ class FIFOQueueTest(tf.test.TestCase):
def testDoesNotLoseValue(self):
with self.test_session():
- q = tf.FIFOQueue(1, tf.float32)
+ q = data_flow_ops.FIFOQueue(1, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
size_t = q.size()
@@ -1107,12 +1152,12 @@ class FIFOQueueTest(tf.test.TestCase):
def testSharedQueueSameSession(self):
with self.test_session():
- q1 = tf.FIFOQueue(
- 1, tf.float32, shared_name="shared_queue")
+ q1 = data_flow_ops.FIFOQueue(
+ 1, dtypes_lib.float32, shared_name="shared_queue")
q1.enqueue((10.0,)).run()
- q2 = tf.FIFOQueue(
- 1, tf.float32, shared_name="shared_queue")
+ q2 = data_flow_ops.FIFOQueue(
+ 1, dtypes_lib.float32, shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
@@ -1137,43 +1182,43 @@ class FIFOQueueTest(tf.test.TestCase):
def testIncompatibleSharedQueueErrors(self):
with self.test_session():
- q_a_1 = tf.FIFOQueue(10, tf.float32, shared_name="q_a")
- q_a_2 = tf.FIFOQueue(15, tf.float32, shared_name="q_a")
+ q_a_1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_a")
+ q_a_2 = data_flow_ops.FIFOQueue(15, dtypes_lib.float32, shared_name="q_a")
q_a_1.queue_ref.eval()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.eval()
- q_b_1 = tf.FIFOQueue(10, tf.float32, shared_name="q_b")
- q_b_2 = tf.FIFOQueue(10, tf.int32, shared_name="q_b")
+ q_b_1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_b")
+ q_b_2 = data_flow_ops.FIFOQueue(10, dtypes_lib.int32, shared_name="q_b")
q_b_1.queue_ref.eval()
with self.assertRaisesOpError("component types"):
q_b_2.queue_ref.eval()
- q_c_1 = tf.FIFOQueue(10, tf.float32, shared_name="q_c")
- q_c_2 = tf.FIFOQueue(
- 10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_c")
+ q_c_1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_c")
+ q_c_2 = data_flow_ops.FIFOQueue(
+ 10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_c")
q_c_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_c_2.queue_ref.eval()
- q_d_1 = tf.FIFOQueue(
- 10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
- q_d_2 = tf.FIFOQueue(10, tf.float32, shared_name="q_d")
+ q_d_1 = data_flow_ops.FIFOQueue(
+ 10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
+ q_d_2 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_d")
q_d_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.eval()
- q_e_1 = tf.FIFOQueue(
- 10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
- q_e_2 = tf.FIFOQueue(
- 10, tf.float32, shapes=[(1, 1, 2, 4)], shared_name="q_e")
+ q_e_1 = data_flow_ops.FIFOQueue(
+ 10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
+ q_e_2 = data_flow_ops.FIFOQueue(
+ 10, dtypes_lib.float32, shapes=[(1, 1, 2, 4)], shared_name="q_e")
q_e_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.eval()
- q_f_1 = tf.FIFOQueue(10, tf.float32, shared_name="q_f")
- q_f_2 = tf.FIFOQueue(
- 10, (tf.float32, tf.int32), shared_name="q_f")
+ q_f_1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_f")
+ q_f_2 = data_flow_ops.FIFOQueue(
+ 10, (dtypes_lib.float32, dtypes_lib.int32), shared_name="q_f")
q_f_1.queue_ref.eval()
with self.assertRaisesOpError("component types"):
q_f_2.queue_ref.eval()
@@ -1183,19 +1228,19 @@ class FIFOQueueTest(tf.test.TestCase):
num_queues = 10
qlist = list()
for _ in xrange(num_queues):
- qlist.append(tf.FIFOQueue(10, tf.float32))
+ qlist.append(data_flow_ops.FIFOQueue(10, dtypes_lib.float32))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
- q = tf.FIFOQueue.from_list(index, qlist)
+ q = data_flow_ops.FIFOQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.test_session():
- q1 = tf.FIFOQueue(10, tf.float32)
- q2 = tf.FIFOQueue(15, tf.float32)
- enq_q = tf.FIFOQueue.from_list(3, [q1, q2])
+ q1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
+ q2 = data_flow_ops.FIFOQueue(15, dtypes_lib.float32)
+ enq_q = data_flow_ops.FIFOQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("Index must be in the range"):
enq_q.dequeue().eval()
@@ -1217,22 +1262,25 @@ class FIFOQueueTest(tf.test.TestCase):
def testResetOfBlockingOperation(self):
with self.test_session() as sess:
- q_empty = tf.FIFOQueue(5, tf.float32, ())
+ q_empty = data_flow_ops.FIFOQueue(5, dtypes_lib.float32, ())
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
- q_full = tf.FIFOQueue(5, tf.float32)
+ q_full = data_flow_ops.FIFOQueue(5, dtypes_lib.float32)
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
- self.checkedThread(self._blockingDequeue, args=(sess, dequeue_op)),
- self.checkedThread(self._blockingDequeueMany, args=(sess,
- dequeue_many_op)),
- self.checkedThread(self._blockingEnqueue, args=(sess, enqueue_op)),
- self.checkedThread(self._blockingEnqueueMany, args=(sess,
- enqueue_many_op))]
+ self.checkedThread(
+ self._blockingDequeue, args=(sess, dequeue_op)),
+ self.checkedThread(
+ self._blockingDequeueMany, args=(sess, dequeue_many_op)),
+ self.checkedThread(
+ self._blockingEnqueue, args=(sess, enqueue_op)),
+ self.checkedThread(
+ self._blockingEnqueueMany, args=(sess, enqueue_many_op))
+ ]
for t in threads:
t.start()
time.sleep(0.1)
@@ -1242,18 +1290,20 @@ class FIFOQueueTest(tf.test.TestCase):
def testBigEnqueueMany(self):
with self.test_session() as sess:
- q = tf.FIFOQueue(5, tf.int32, ((),))
+ q = data_flow_ops.FIFOQueue(5, dtypes_lib.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
+
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
sess.run(enq)
enq_done.append(True)
+
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
@@ -1285,15 +1335,17 @@ class FIFOQueueTest(tf.test.TestCase):
def testBigDequeueMany(self):
with self.test_session() as sess:
- q = tf.FIFOQueue(2, tf.int32, ((),))
+ q = data_flow_ops.FIFOQueue(2, dtypes_lib.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
+
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(sess.run(deq))
+
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
@@ -1309,18 +1361,21 @@ class FIFOQueueTest(tf.test.TestCase):
def testDtypes(self):
with self.test_session() as sess:
- dtypes = [tf.float32, tf.float64, tf.int32, tf.uint8, tf.int16, tf.int8,
- tf.int64, tf.bool, tf.complex64, tf.complex128]
+ dtypes = [
+ dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
+ dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8, dtypes_lib.int64,
+ dtypes_lib.bool, dtypes_lib.complex64, dtypes_lib.complex128
+ ]
shape = (32, 4, 128)
- q = tf.FIFOQueue(32, dtypes, [shape[1:]] * len(dtypes))
+ q = data_flow_ops.FIFOQueue(32, dtypes, [shape[1:]] * len(dtypes))
input_tuple = []
for dtype in dtypes:
np_dtype = dtype.as_numpy_dtype
np_array = np.random.randint(-10, 10, shape)
- if dtype == tf.bool:
+ if dtype == dtypes_lib.bool:
np_array = np_array > 0
- elif dtype in (tf.complex64, tf.complex128):
+ elif dtype in (dtypes_lib.complex64, dtypes_lib.complex128):
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
@@ -1335,23 +1390,26 @@ class FIFOQueueTest(tf.test.TestCase):
self.assertAllEqual(input_elem, output_elem)
def testDeviceColocation(self):
- with tf.device("/job:ps"):
- q = tf.FIFOQueue(32, [tf.int32], name="q")
+ with ops.device("/job:ps"):
+ q = data_flow_ops.FIFOQueue(32, [dtypes_lib.int32], name="q")
- with tf.device("/job:worker/task:7"):
+ with ops.device("/job:worker/task:7"):
dequeued_t = q.dequeue()
self.assertDeviceEqual("/job:ps", dequeued_t.device)
self.assertEqual([b"loc:@q"], dequeued_t.op.colocation_groups())
-class FIFOQueueDictTest(tf.test.TestCase):
+class FIFOQueueDictTest(test.TestCase):
def testConstructor(self):
- with tf.Graph().as_default():
- q = tf.FIFOQueue(5, (tf.int32, tf.float32), names=("i", "j"),
- shared_name="foo", name="Q")
- self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
+ with ops.Graph().as_default():
+ q = data_flow_ops.FIFOQueue(
+ 5, (dtypes_lib.int32, dtypes_lib.float32),
+ names=("i", "j"),
+ shared_name="foo",
+ name="Q")
+ self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueue'
attr { key: 'component_types' value { list {
@@ -1365,11 +1423,14 @@ class FIFOQueueDictTest(tf.test.TestCase):
self.assertEqual(["i", "j"], q.names)
def testConstructorWithShapes(self):
- with tf.Graph().as_default():
- q = tf.FIFOQueue(5, (tf.int32, tf.float32), names=("i", "f"),
- shapes=(tf.TensorShape([1, 1, 2, 3]),
- tf.TensorShape([5, 8])), name="Q")
- self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
+ with ops.Graph().as_default():
+ q = data_flow_ops.FIFOQueue(
+ 5, (dtypes_lib.int32, dtypes_lib.float32),
+ names=("i", "f"),
+ shapes=(tensor_shape.TensorShape([1, 1, 2, 3]),
+ tensor_shape.TensorShape([5, 8])),
+ name="Q")
+ self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueue'
attr { key: 'component_types' value { list {
@@ -1391,7 +1452,8 @@ class FIFOQueueDictTest(tf.test.TestCase):
def testEnqueueDequeueOneComponent(self):
with self.test_session() as sess:
- q = tf.FIFOQueue(10, tf.float32, shapes=((),), names="f")
+ q = data_flow_ops.FIFOQueue(
+ 10, dtypes_lib.float32, shapes=((),), names="f")
# Verify that enqueue() checks that when using names we must enqueue a
# dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
@@ -1435,8 +1497,10 @@ class FIFOQueueDictTest(tf.test.TestCase):
def testEnqueueDequeueMultipleComponent(self):
with self.test_session() as sess:
- q = tf.FIFOQueue(10, (tf.float32, tf.int32, tf.string),
- shapes=((), (), ()), names=("f", "i", "s"))
+ q = data_flow_ops.FIFOQueue(
+ 10, (dtypes_lib.float32, dtypes_lib.int32, dtypes_lib.string),
+ shapes=((), (), ()),
+ names=("f", "i", "s"))
# Verify that enqueue() checks that when using names we must enqueue a
# dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
@@ -1465,10 +1529,17 @@ class FIFOQueueDictTest(tf.test.TestCase):
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"i": [12, 12], "s": ["aa", "bb"]})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
- enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0], "i": [126, 127],
- "s": ["dd", "ee"], "x": [1, 2]})
- enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0], "i": [126, 127],
- "s": ["dd", "ee"]})
+ enqueue_op4 = q.enqueue_many({
+ "f": [40.0, 50.0],
+ "i": [126, 127],
+ "s": ["dd", "ee"],
+ "x": [1, 2]
+ })
+ enqueue_op4 = q.enqueue_many({
+ "f": [40.0, 50.0],
+ "i": [126, 127],
+ "s": ["dd", "ee"]
+ })
dequeue = q.dequeue()
dequeue_2 = q.dequeue_many(2)
sess.run(enqueue_op)
@@ -1478,64 +1549,62 @@ class FIFOQueueDictTest(tf.test.TestCase):
i, f, s = sess.run([dequeue["i"], dequeue["f"], dequeue["s"]])
self.assertEqual(123, i)
self.assertEqual(10.0, f)
- self.assertEqual(tf.compat.as_bytes("aa"), s)
+ self.assertEqual(compat.as_bytes("aa"), s)
i, f, s = sess.run([dequeue_2["i"], dequeue_2["f"], dequeue_2["s"]])
self.assertEqual([124, 125], list(i))
self.assertTrue([20.0, 30.0], list(f))
- self.assertTrue([tf.compat.as_bytes("bb"), tf.compat.as_bytes("cc")],
- list(s))
+ self.assertTrue([compat.as_bytes("bb"), compat.as_bytes("cc")], list(s))
i, f, s = sess.run([dequeue_2["i"], dequeue_2["f"], dequeue_2["s"]])
self.assertEqual([126, 127], list(i))
self.assertTrue([40.0, 50.0], list(f))
- self.assertTrue([tf.compat.as_bytes("dd"), tf.compat.as_bytes("ee")],
- list(s))
+ self.assertTrue([compat.as_bytes("dd"), compat.as_bytes("ee")], list(s))
-class FIFOQueueWithTimeoutTest(tf.test.TestCase):
+class FIFOQueueWithTimeoutTest(test.TestCase):
def testDequeueWithTimeout(self):
with self.test_session(
- config=tf.ConfigProto(operation_timeout_in_ms=20)) as sess:
- q = tf.FIFOQueue(10, tf.float32)
- self.assertEqual(tf.compat.as_bytes(""),
- q.queue_ref.op.get_attr("container"))
+ config=config_pb2.ConfigProto(operation_timeout_in_ms=20)) as sess:
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
+ self.assertEqual(
+ compat.as_bytes(""), q.queue_ref.op.get_attr("container"))
dequeued_t = q.dequeue()
# Intentionally do not run any enqueue_ops so that dequeue will block
# until operation_timeout_in_ms.
- with self.assertRaisesRegexp(tf.errors.DeadlineExceededError,
+ with self.assertRaisesRegexp(errors_impl.DeadlineExceededError,
"Timed out waiting for notification"):
sess.run(dequeued_t)
def testReusableAfterTimeout(self):
with self.test_session() as sess:
- q = tf.FIFOQueue(10, tf.float32)
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
dequeued_t = q.dequeue()
enqueue_op = q.enqueue(37)
- with self.assertRaisesRegexp(tf.errors.DeadlineExceededError,
+ with self.assertRaisesRegexp(errors_impl.DeadlineExceededError,
"Timed out waiting for notification"):
- sess.run(dequeued_t, options=tf.RunOptions(timeout_in_ms=10))
+ sess.run(dequeued_t, options=config_pb2.RunOptions(timeout_in_ms=10))
- with self.assertRaisesRegexp(tf.errors.DeadlineExceededError,
+ with self.assertRaisesRegexp(errors_impl.DeadlineExceededError,
"Timed out waiting for notification"):
- sess.run(dequeued_t, options=tf.RunOptions(timeout_in_ms=10))
+ sess.run(dequeued_t, options=config_pb2.RunOptions(timeout_in_ms=10))
sess.run(enqueue_op)
self.assertEqual(37, sess.run(dequeued_t))
-class QueueContainerTest(tf.test.TestCase):
+class QueueContainerTest(test.TestCase):
def testContainer(self):
- with tf.Graph().as_default():
- with tf.container("test"):
- q = tf.FIFOQueue(10, tf.float32)
- self.assertEqual(tf.compat.as_bytes("test"),
- q.queue_ref.op.get_attr("container"))
+ with ops.Graph().as_default():
+ with ops.container("test"):
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
+ self.assertEqual(
+ compat.as_bytes("test"), q.queue_ref.op.get_attr("container"))
-class FIFOQueueBenchmark(tf.test.Benchmark):
+class FIFOQueueBenchmark(test.Benchmark):
"""Benchmark FIFOQueue operations."""
def _build_graph(self):
@@ -1544,7 +1613,7 @@ class FIFOQueueBenchmark(tf.test.Benchmark):
Returns:
A tuple with the graph init tensor and graph output tensor.
"""
- q = tf.FIFOQueue(1, "float")
+ q = data_flow_ops.FIFOQueue(1, "float")
init = q.enqueue(1.0)
x = q.dequeue()
q_inc = q.enqueue(x + 1)
@@ -1563,10 +1632,10 @@ class FIFOQueueBenchmark(tf.test.Benchmark):
Returns:
The duration of the run in seconds.
"""
- graph = tf.Graph()
+ graph = ops.Graph()
with graph.as_default():
init, output = self._build_graph()
- with tf.Session(graph=graph) as session:
+ with session_lib.Session(graph=graph) as session:
init.run()
_ = session.run(output) # warm up.
start_time = time.time()
@@ -1582,4 +1651,4 @@ class FIFOQueueBenchmark(tf.test.Benchmark):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/fractional_avg_pool_op_test.py b/tensorflow/python/kernel_tests/fractional_avg_pool_op_test.py
index dafecf2728..48a51c8072 100644
--- a/tensorflow/python/kernel_tests/fractional_avg_pool_op_test.py
+++ b/tensorflow/python/kernel_tests/fractional_avg_pool_op_test.py
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for fractional average pool operation."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
@@ -21,12 +21,16 @@ from __future__ import print_function
import math
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
from tensorflow.python.ops import gen_nn_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import nn_ops
+import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
-class FractionalAvgTest(tf.test.TestCase):
+class FractionalAvgTest(test.TestCase):
# Random number generate with seed.
_PRNG = np.random.RandomState(341261000)
@@ -52,9 +56,8 @@ class FractionalAvgTest(tf.test.TestCase):
row_start = row_seq[i]
row_end = row_seq[i + 1] + 1 if overlapping else row_seq[i + 1]
row_end = min(row_end, row_max)
- output_image = np.vstack((output_image,
- np.mean(input_matrix[row_start:row_end, :],
- axis=0))) # axis 0 is along row
+ output_image = np.vstack((output_image, np.mean(
+ input_matrix[row_start:row_end, :], axis=0))) # axis 0 is along row
# remove the sentinel row
return output_image[1:, :]
@@ -123,13 +126,14 @@ class FractionalAvgTest(tf.test.TestCase):
None
"""
with self.test_session() as sess:
- p, r, c = tf.nn.fractional_avg_pool(input_tensor,
- pooling_ratio,
- pseudo_random,
- overlapping,
- deterministic=True,
- seed=self._SEED,
- seed2=self._SEED2)
+ p, r, c = nn_ops.fractional_avg_pool(
+ input_tensor,
+ pooling_ratio,
+ pseudo_random,
+ overlapping,
+ deterministic=True,
+ seed=self._SEED,
+ seed2=self._SEED2)
actual, row_seq, col_seq = sess.run([p, r, c])
expected = self._GetExpectedFractionalAvgPoolResult(input_tensor, row_seq,
col_seq, overlapping)
@@ -155,7 +159,7 @@ class FractionalAvgTest(tf.test.TestCase):
rand_mat = self._PRNG.randint(10, size=tensor_shape)
pooling_ratio = [1, math.sqrt(2), math.sqrt(2), 1]
with self.test_session() as sess:
- p, r, c = tf.nn.fractional_avg_pool(
+ p, r, c = nn_ops.fractional_avg_pool(
rand_mat.astype(np.float32),
pooling_ratio,
pseudo_random,
@@ -241,7 +245,7 @@ class FractionalAvgTest(tf.test.TestCase):
[5, 4, 7, 5]
]).reshape((1, 3, 4, 1))
# pyformat: enable
- p, unused_r, unused_c = tf.nn.fractional_avg_pool(
+ p, unused_r, unused_c = nn_ops.fractional_avg_pool(
mat.reshape(tensor_shape), [1, math.sqrt(3), math.sqrt(2), 1],
pseudo_random,
overlapping,
@@ -307,7 +311,7 @@ class FractionalAvgTest(tf.test.TestCase):
overlapping)
-class FractionalAvgPoolGradTest(tf.test.TestCase):
+class FractionalAvgPoolGradTest(test.TestCase):
"""Tests for FractionalAvgPoolGrad.
Two types of tests for FractionalAvgPoolGrad.
@@ -355,13 +359,14 @@ class FractionalAvgPoolGradTest(tf.test.TestCase):
for num_channels in [1, 2]:
input_shape = (num_batches, num_rows, num_cols, num_channels)
with self.test_session() as _:
- input_tensor = tf.constant(self._GenerateRandomInputTensor(
- input_shape).astype(np.float32))
+ input_tensor = constant_op.constant(
+ self._GenerateRandomInputTensor(input_shape).astype(
+ np.float32))
window_size = [1, row_window_size, col_window_size, 1]
stride_size = [1, row_window_size, col_window_size, 1]
padding = "VALID"
- output_tensor = tf.nn.avg_pool(input_tensor, window_size,
- stride_size, padding)
+ output_tensor = nn_ops.avg_pool(input_tensor, window_size,
+ stride_size, padding)
output_data = output_tensor.eval()
num_elements = 1
for dim_size in output_data.shape:
@@ -393,13 +398,14 @@ class FractionalAvgPoolGradTest(tf.test.TestCase):
for num_channels in [1, 2]:
input_shape = (num_batches, num_rows, num_cols, num_channels)
with self.test_session() as _:
- input_tensor = tf.constant(self._GenerateRandomInputTensor(
- input_shape).astype(np.float32))
+ input_tensor = constant_op.constant(
+ self._GenerateRandomInputTensor(input_shape).astype(
+ np.float32))
window_size = [1, row_window_size, col_window_size, 1]
stride_size = [1, row_window_size - 1, col_window_size - 1, 1]
padding = "VALID"
- output_tensor = tf.nn.avg_pool(input_tensor, window_size,
- stride_size, padding)
+ output_tensor = nn_ops.avg_pool(input_tensor, window_size,
+ stride_size, padding)
output_data = output_tensor.eval()
num_elements = 1
for dim_size in output_data.shape:
@@ -432,8 +438,8 @@ class FractionalAvgPoolGradTest(tf.test.TestCase):
for pseudo_random in True, False:
for overlapping in True, False:
with self.test_session() as _:
- input_tensor = tf.constant(input_data, shape=input_shape)
- output_tensor, unused_a, unused_b = tf.nn.fractional_avg_pool(
+ input_tensor = constant_op.constant(input_data, shape=input_shape)
+ output_tensor, unused_a, unused_b = nn_ops.fractional_avg_pool(
input_tensor,
pooling_ratio,
pseudo_random=pseudo_random,
@@ -445,7 +451,7 @@ class FractionalAvgPoolGradTest(tf.test.TestCase):
output_shape = output_data.shape
# error_margin and delta setting is similar to avg_pool_grad.
error_margin = 1e-4
- gradient_error = tf.test.compute_gradient_error(
+ gradient_error = gradient_checker.compute_gradient_error(
input_tensor,
input_shape,
output_tensor,
@@ -465,8 +471,8 @@ class FractionalAvgPoolGradTest(tf.test.TestCase):
input_shape = (num_batches, num_rows, num_cols, num_channels)
input_data = self._GenerateRandomInputTensor(input_shape)
with self.test_session() as _:
- input_tensor = tf.constant(input_data, shape=input_shape)
- output_tensor, unused_a, unused_b = tf.nn.fractional_avg_pool(
+ input_tensor = constant_op.constant(input_data, shape=input_shape)
+ output_tensor, unused_a, unused_b = nn_ops.fractional_avg_pool(
input_tensor,
pooling_ratio,
pseudo_random=pseudo_random,
@@ -478,7 +484,7 @@ class FractionalAvgPoolGradTest(tf.test.TestCase):
output_shape = output_data.shape
# error_margin and delta setting is similar to avg_pool_grad.
error_margin = 1e-4
- gradient_error = tf.test.compute_gradient_error(
+ gradient_error = gradient_checker.compute_gradient_error(
input_tensor,
input_shape,
output_tensor,
@@ -496,8 +502,8 @@ class FractionalAvgPoolGradTest(tf.test.TestCase):
pseudo_random = False
with self.test_session() as _:
- input_tensor = tf.constant(input_data, shape=input_shape)
- output_tensor, unused_a, unused_b = tf.nn.fractional_avg_pool(
+ input_tensor = constant_op.constant(input_data, shape=input_shape)
+ output_tensor, unused_a, unused_b = nn_ops.fractional_avg_pool(
input_tensor,
pooling_ratio,
pseudo_random=pseudo_random,
@@ -507,7 +513,7 @@ class FractionalAvgPoolGradTest(tf.test.TestCase):
seed2=self._SEED2)
# error_margin and delta setting is similar to avg_pool_grad.
error_margin = 1e-4
- gradient_error = tf.test.compute_gradient_error(
+ gradient_error = gradient_checker.compute_gradient_error(
input_tensor,
input_shape,
output_tensor,
@@ -518,4 +524,4 @@ class FractionalAvgPoolGradTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/fractional_max_pool_op_test.py b/tensorflow/python/kernel_tests/fractional_max_pool_op_test.py
index 424f4d588a..d380c31de3 100644
--- a/tensorflow/python/kernel_tests/fractional_max_pool_op_test.py
+++ b/tensorflow/python/kernel_tests/fractional_max_pool_op_test.py
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for fractional max pool operation."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
@@ -21,12 +21,16 @@ from __future__ import print_function
import math
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
from tensorflow.python.ops import gen_nn_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import nn_ops
+import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
-class FractionalMaxPoolTest(tf.test.TestCase):
+class FractionalMaxPoolTest(test.TestCase):
# Random number generate with seed.
_PRNG = np.random.RandomState(341261)
@@ -52,9 +56,8 @@ class FractionalMaxPoolTest(tf.test.TestCase):
row_start = row_seq[i]
row_end = row_seq[i + 1] + 1 if overlapping else row_seq[i + 1]
row_end = min(row_end, row_max)
- output_image = np.vstack((output_image,
- np.amax(input_matrix[row_start:row_end, :],
- axis=0))) # axis 0 is along row
+ output_image = np.vstack((output_image, np.amax(
+ input_matrix[row_start:row_end, :], axis=0))) # axis 0 is along row
# remove the sentinel row
return output_image[1:, :]
@@ -123,13 +126,14 @@ class FractionalMaxPoolTest(tf.test.TestCase):
None
"""
with self.test_session() as sess:
- p, r, c = tf.nn.fractional_max_pool(input_tensor,
- pooling_ratio,
- pseudo_random,
- overlapping,
- deterministic=True,
- seed=self._SEED,
- seed2=self._SEED2)
+ p, r, c = nn_ops.fractional_max_pool(
+ input_tensor,
+ pooling_ratio,
+ pseudo_random,
+ overlapping,
+ deterministic=True,
+ seed=self._SEED,
+ seed2=self._SEED2)
actual, row_seq, col_seq = sess.run([p, r, c])
expected = self._GetExpectedFractionalMaxPoolResult(input_tensor, row_seq,
col_seq, overlapping)
@@ -155,13 +159,14 @@ class FractionalMaxPoolTest(tf.test.TestCase):
rand_mat = self._PRNG.randint(10, size=tensor_shape)
pooling_ratio = [1, math.sqrt(2), math.sqrt(2), 1]
with self.test_session() as sess:
- p, r, c = tf.nn.fractional_max_pool(rand_mat,
- pooling_ratio,
- pseudo_random,
- overlapping,
- deterministic=True,
- seed=self._SEED,
- seed2=self._SEED2)
+ p, r, c = nn_ops.fractional_max_pool(
+ rand_mat,
+ pooling_ratio,
+ pseudo_random,
+ overlapping,
+ deterministic=True,
+ seed=self._SEED,
+ seed2=self._SEED2)
tensor_output, row_seq, col_seq = sess.run([p, r, c])
expected_result = self._GetExpectedFractionalMaxPoolResult(rand_mat,
row_seq,
@@ -277,7 +282,7 @@ class FractionalMaxPoolTest(tf.test.TestCase):
overlapping)
-class FractionalMaxPoolGradTest(tf.test.TestCase):
+class FractionalMaxPoolGradTest(test.TestCase):
"""Tests for FractionalMaxPoolGrad.
Two types of tests for FractionalMaxPoolGrad.
@@ -339,13 +344,13 @@ class FractionalMaxPoolGradTest(tf.test.TestCase):
for num_channels in [1, 2]:
input_shape = (num_batches, num_rows, num_cols, num_channels)
with self.test_session() as _:
- input_tensor = tf.constant(self._GenerateUniqueRandomInputTensor(
- input_shape))
+ input_tensor = constant_op.constant(
+ self._GenerateUniqueRandomInputTensor(input_shape))
window_size = [1, row_window_size, col_window_size, 1]
stride_size = [1, row_window_size, col_window_size, 1]
padding = "VALID"
- output_tensor = tf.nn.max_pool(input_tensor, window_size,
- stride_size, padding)
+ output_tensor = nn_ops.max_pool(input_tensor, window_size,
+ stride_size, padding)
output_data = output_tensor.eval()
output_backprop = self._PRNG.randint(100, size=output_data.shape)
input_backprop_tensor = gen_nn_ops._max_pool_grad(input_tensor,
@@ -377,13 +382,13 @@ class FractionalMaxPoolGradTest(tf.test.TestCase):
for num_channels in [1, 2]:
input_shape = (num_batches, num_rows, num_cols, num_channels)
with self.test_session() as _:
- input_tensor = tf.constant(self._GenerateUniqueRandomInputTensor(
- input_shape))
+ input_tensor = constant_op.constant(
+ self._GenerateUniqueRandomInputTensor(input_shape))
window_size = [1, row_window_size, col_window_size, 1]
stride_size = [1, row_window_size - 1, col_window_size - 1, 1]
padding = "VALID"
- output_tensor = tf.nn.max_pool(input_tensor, window_size,
- stride_size, padding)
+ output_tensor = nn_ops.max_pool(input_tensor, window_size,
+ stride_size, padding)
output_data = output_tensor.eval()
output_backprop = self._PRNG.randint(100, size=output_data.shape)
input_backprop_tensor = gen_nn_ops._max_pool_grad(input_tensor,
@@ -418,8 +423,8 @@ class FractionalMaxPoolGradTest(tf.test.TestCase):
for pseudo_random in True, False:
for overlapping in True, False:
with self.test_session() as _:
- input_tensor = tf.constant(input_data, shape=input_shape)
- output_tensor, unused_a, unused_b = tf.nn.fractional_max_pool(
+ input_tensor = constant_op.constant(input_data, shape=input_shape)
+ output_tensor, unused_a, unused_b = nn_ops.fractional_max_pool(
input_tensor,
pooling_ratio,
pseudo_random=pseudo_random,
@@ -431,7 +436,7 @@ class FractionalMaxPoolGradTest(tf.test.TestCase):
output_shape = output_data.shape
# error_margin and delta setting is similar to max_pool_grad.
error_margin = 1e-3
- gradient_error = tf.test.compute_gradient_error(
+ gradient_error = gradient_checker.compute_gradient_error(
input_tensor,
input_shape,
output_tensor,
@@ -453,8 +458,8 @@ class FractionalMaxPoolGradTest(tf.test.TestCase):
# Add some randomness to make input_data not so 'integer'
input_data += self._PRNG.random_sample(input_shape)
with self.test_session() as _:
- input_tensor = tf.constant(input_data, shape=input_shape)
- output_tensor, unused_a, unused_b = tf.nn.fractional_max_pool(
+ input_tensor = constant_op.constant(input_data, shape=input_shape)
+ output_tensor, unused_a, unused_b = nn_ops.fractional_max_pool(
input_tensor,
pooling_ratio,
pseudo_random=pseudo_random,
@@ -466,7 +471,7 @@ class FractionalMaxPoolGradTest(tf.test.TestCase):
output_shape = output_data.shape
# error_margin and delta setting is similar to max_pool_grad.
error_margin = 1e-3
- gradient_error = tf.test.compute_gradient_error(
+ gradient_error = gradient_checker.compute_gradient_error(
input_tensor,
input_shape,
output_tensor,
@@ -486,8 +491,8 @@ class FractionalMaxPoolGradTest(tf.test.TestCase):
pseudo_random = False
with self.test_session() as _:
- input_tensor = tf.constant(input_data, shape=input_shape)
- output_tensor, unused_a, unused_b = tf.nn.fractional_max_pool(
+ input_tensor = constant_op.constant(input_data, shape=input_shape)
+ output_tensor, unused_a, unused_b = nn_ops.fractional_max_pool(
input_tensor,
pooling_ratio,
pseudo_random=pseudo_random,
@@ -497,7 +502,7 @@ class FractionalMaxPoolGradTest(tf.test.TestCase):
seed2=self._SEED2)
# error_margin and delta setting is similar to max_pool_grad.
error_margin = 1e-3
- gradient_error = tf.test.compute_gradient_error(
+ gradient_error = gradient_checker.compute_gradient_error(
input_tensor,
input_shape,
output_tensor,
@@ -551,10 +556,10 @@ class FractionalMaxPoolGradTest(tf.test.TestCase):
input_size) # pyformat: disable
with self.test_session() as _:
# Test when overlapping is False
- input_tensor = tf.constant(input_data, shape=input_size)
- output_tensor = tf.constant(output_data_not_overlapping,
- shape=output_size)
- grad = tf.constant(output_backprop, shape=output_size)
+ input_tensor = constant_op.constant(input_data, shape=input_size)
+ output_tensor = constant_op.constant(
+ output_data_not_overlapping, shape=output_size)
+ grad = constant_op.constant(output_backprop, shape=output_size)
r = gen_nn_ops._fractional_max_pool_grad(
input_tensor,
output_tensor,
@@ -568,7 +573,8 @@ class FractionalMaxPoolGradTest(tf.test.TestCase):
self.assertAllClose(expected_input_backprop_not_overlapping,
input_backprop_not_overlapping)
# Test when overlapping is True
- output_tensor = tf.constant(output_data_overlapping, shape=output_size)
+ output_tensor = constant_op.constant(
+ output_data_overlapping, shape=output_size)
r = gen_nn_ops._fractional_max_pool_grad(
input_tensor, output_tensor, grad, row_seq, col_seq, overlapping=True)
input_backprop_overlapping = r.eval()
@@ -579,4 +585,4 @@ class FractionalMaxPoolGradTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/functional_ops_test.py b/tensorflow/python/kernel_tests/functional_ops_test.py
index 0ab4778a20..0bceb2fa3a 100644
--- a/tensorflow/python/kernel_tests/functional_ops_test.py
+++ b/tensorflow/python/kernel_tests/functional_ops_test.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.kernels.bcast_ops."""
from __future__ import absolute_import
@@ -20,156 +19,190 @@ from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.framework import sparse_tensor
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import functional_ops
+from tensorflow.python.ops import gradients_impl
+from tensorflow.python.ops import init_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import variable_scope
+from tensorflow.python.ops import variables
+import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
def simple_scoped_fn(a, x):
"""Simple function: (a, x) -> 2(x+a), but with "2" as a variable in scope."""
- with tf.variable_scope("body"):
+ with variable_scope.variable_scope("body"):
# Dummy variable, just to check that scoping works as intended.
- two = tf.get_variable("two", [], dtype=tf.int32,
- initializer=tf.constant_initializer(2))
- return tf.mul(tf.add(a, x), two)
+ two = variable_scope.get_variable(
+ "two", [],
+ dtype=dtypes.int32,
+ initializer=init_ops.constant_initializer(2))
+ return math_ops.mul(math_ops.add(a, x), two)
-class FunctionalOpsTest(tf.test.TestCase):
+class FunctionalOpsTest(test.TestCase):
def testFoldl_Simple(self):
with self.test_session():
- elems = tf.constant([1, 2, 3, 4, 5, 6], name="data")
+ elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
- r = tf.foldl(lambda a, x: tf.mul(tf.add(a, x), 2), elems)
+ r = functional_ops.foldl(lambda a, x: math_ops.mul(math_ops.add(a, x), 2),
+ elems)
self.assertAllEqual(208, r.eval())
- r = tf.foldl(
- lambda a, x: tf.mul(tf.add(a, x), 2), elems, initializer=10)
+ r = functional_ops.foldl(
+ lambda a, x: math_ops.mul(math_ops.add(a, x), 2),
+ elems,
+ initializer=10)
self.assertAllEqual(880, r.eval())
def testFoldl_Scoped(self):
with self.test_session() as sess:
- with tf.variable_scope("root") as varscope:
- elems = tf.constant([1, 2, 3, 4, 5, 6], name="data")
+ with variable_scope.variable_scope("root") as varscope:
+ elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
- r = tf.foldl(simple_scoped_fn, elems)
+ r = functional_ops.foldl(simple_scoped_fn, elems)
# Check that we have the one variable we asked for here.
- self.assertEqual(len(tf.trainable_variables()), 1)
- self.assertEqual(tf.trainable_variables()[0].name, "root/body/two:0")
- sess.run([tf.global_variables_initializer()])
+ self.assertEqual(len(variables.trainable_variables()), 1)
+ self.assertEqual(variables.trainable_variables()[0].name,
+ "root/body/two:0")
+ sess.run([variables.global_variables_initializer()])
self.assertAllEqual(208, r.eval())
# Now let's reuse our single variable.
varscope.reuse_variables()
- r = tf.foldl(simple_scoped_fn, elems, initializer=10)
- self.assertEqual(len(tf.trainable_variables()), 1)
+ r = functional_ops.foldl(simple_scoped_fn, elems, initializer=10)
+ self.assertEqual(len(variables.trainable_variables()), 1)
self.assertAllEqual(880, r.eval())
def testFoldr_Simple(self):
with self.test_session():
- elems = tf.constant([1, 2, 3, 4, 5, 6], name="data")
+ elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
- r = tf.foldr(lambda a, x: tf.mul(tf.add(a, x), 2), elems)
+ r = functional_ops.foldr(lambda a, x: math_ops.mul(math_ops.add(a, x), 2),
+ elems)
self.assertAllEqual(450, r.eval())
- r = tf.foldr(
- lambda a, x: tf.mul(tf.add(a, x), 2), elems, initializer=10)
+ r = functional_ops.foldr(
+ lambda a, x: math_ops.mul(math_ops.add(a, x), 2),
+ elems,
+ initializer=10)
self.assertAllEqual(1282, r.eval())
def testFoldr_Scoped(self):
with self.test_session() as sess:
- with tf.variable_scope("root") as varscope:
- elems = tf.constant([1, 2, 3, 4, 5, 6], name="data")
+ with variable_scope.variable_scope("root") as varscope:
+ elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
- r = tf.foldr(simple_scoped_fn, elems)
+ r = functional_ops.foldr(simple_scoped_fn, elems)
# Check that we have the one variable we asked for here.
- self.assertEqual(len(tf.trainable_variables()), 1)
- self.assertEqual(tf.trainable_variables()[0].name, "root/body/two:0")
- sess.run([tf.global_variables_initializer()])
+ self.assertEqual(len(variables.trainable_variables()), 1)
+ self.assertEqual(variables.trainable_variables()[0].name,
+ "root/body/two:0")
+ sess.run([variables.global_variables_initializer()])
self.assertAllEqual(450, r.eval())
# Now let's reuse our single variable.
varscope.reuse_variables()
- r = tf.foldr(simple_scoped_fn, elems, initializer=10)
- self.assertEqual(len(tf.trainable_variables()), 1)
+ r = functional_ops.foldr(simple_scoped_fn, elems, initializer=10)
+ self.assertEqual(len(variables.trainable_variables()), 1)
self.assertAllEqual(1282, r.eval())
def testFold_Grad(self):
with self.test_session():
- elems = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
- v = tf.constant(2.0, name="v")
+ elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
+ v = constant_op.constant(2.0, name="v")
- r = tf.foldl(
- lambda a, x: tf.mul(a, x), elems, initializer=v)
- r = tf.gradients(r, v)[0]
+ r = functional_ops.foldl(
+ lambda a, x: math_ops.mul(a, x), elems, initializer=v)
+ r = gradients_impl.gradients(r, v)[0]
self.assertAllEqual(720.0, r.eval())
- r = tf.foldr(
- lambda a, x: tf.mul(a, x), elems, initializer=v)
- r = tf.gradients(r, v)[0]
+ r = functional_ops.foldr(
+ lambda a, x: math_ops.mul(a, x), elems, initializer=v)
+ r = gradients_impl.gradients(r, v)[0]
self.assertAllEqual(720.0, r.eval())
def testMap_Simple(self):
with self.test_session():
nums = [1, 2, 3, 4, 5, 6]
- elems = tf.constant(nums, name="data")
- r = tf.map_fn(lambda x: tf.mul(tf.add(x, 3), 2), elems)
+ elems = constant_op.constant(nums, name="data")
+ r = functional_ops.map_fn(lambda x: math_ops.mul(math_ops.add(x, 3), 2),
+ elems)
self.assertAllEqual(np.array([(x + 3) * 2 for x in nums]), r.eval())
def testMapSparseTensor(self):
with self.test_session():
with self.assertRaises(TypeError):
- tf.map_fn(lambda x: x, tf.SparseTensor(indices=[[0, 0], [0, 1], [1, 0]],
- values=tf.constant([0, 1, 2]),
- dense_shape=[2, 2]))
+ functional_ops.map_fn(
+ lambda x: x,
+ sparse_tensor.SparseTensor(
+ indices=[[0, 0], [0, 1], [1, 0]],
+ values=constant_op.constant([0, 1, 2]),
+ dense_shape=[2, 2]))
def testMap_Scoped(self):
with self.test_session() as sess:
def double_scoped(x):
"""2x with a dummy 2 that is scoped."""
- with tf.variable_scope("body"):
+ with variable_scope.variable_scope("body"):
# Dummy variable, just to check that scoping works as intended.
- two = tf.get_variable("two", [], dtype=tf.int32,
- initializer=tf.constant_initializer(2))
- return tf.mul(x, two)
+ two = variable_scope.get_variable(
+ "two", [],
+ dtype=dtypes.int32,
+ initializer=init_ops.constant_initializer(2))
+ return math_ops.mul(x, two)
- with tf.variable_scope("root") as varscope:
- elems = tf.constant([1, 2, 3, 4, 5, 6], name="data")
- doubles = np.array([2*x for x in [1, 2, 3, 4, 5, 6]])
+ with variable_scope.variable_scope("root") as varscope:
+ elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
+ doubles = np.array([2 * x for x in [1, 2, 3, 4, 5, 6]])
- r = tf.map_fn(double_scoped, elems)
+ r = functional_ops.map_fn(double_scoped, elems)
# Check that we have the one variable we asked for here.
- self.assertEqual(len(tf.trainable_variables()), 1)
- self.assertEqual(tf.trainable_variables()[0].name, "root/body/two:0")
- sess.run([tf.global_variables_initializer()])
+ self.assertEqual(len(variables.trainable_variables()), 1)
+ self.assertEqual(variables.trainable_variables()[0].name,
+ "root/body/two:0")
+ sess.run([variables.global_variables_initializer()])
self.assertAllEqual(doubles, r.eval())
# Now let's reuse our single variable.
varscope.reuse_variables()
- r = tf.map_fn(double_scoped, elems)
- self.assertEqual(len(tf.trainable_variables()), 1)
+ r = functional_ops.map_fn(double_scoped, elems)
+ self.assertEqual(len(variables.trainable_variables()), 1)
self.assertAllEqual(doubles, r.eval())
def testMap_Grad(self):
with self.test_session():
- param = tf.constant(2.0)
- elems = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="elems")
- y = tf.map_fn(lambda x: tf.mul(tf.square(x), param), elems)
- r = tf.gradients(y, param)[0]
+ param = constant_op.constant(2.0)
+ elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="elems")
+ y = functional_ops.map_fn(
+ lambda x: math_ops.mul(math_ops.square(x), param), elems)
+ r = gradients_impl.gradients(y, param)[0]
self.assertAllEqual(91.0, r.eval())
- r = tf.gradients(y, elems)[0]
+ r = gradients_impl.gradients(y, elems)[0]
self.assertAllEqual([4.0, 8.0, 12.0, 16.0, 20.0, 24.0], r.eval())
def testMap_SimpleNotTensor(self):
with self.test_session():
nums = np.array([1, 2, 3, 4, 5, 6])
- r = tf.map_fn(lambda x: tf.mul(tf.add(x, 3), 2), nums)
+ r = functional_ops.map_fn(lambda x: math_ops.mul(math_ops.add(x, 3), 2),
+ nums)
self.assertAllEqual(np.array([(x + 3) * 2 for x in nums]), r.eval())
def testMap_SingleInputMultiOutput(self):
with self.test_session() as sess:
nums = np.array([1, 2, 3, 4, 5, 6])
- r = tf.map_fn(lambda x: ((x + 3) * 2, -(x + 3) * 2), nums,
- dtype=(tf.int64, tf.int64))
+ r = functional_ops.map_fn(
+ lambda x: ((x + 3) * 2, -(x + 3) * 2),
+ nums,
+ dtype=(dtypes.int64, dtypes.int64))
self.assertEqual(2, len(r))
self.assertEqual((6,), r[0].get_shape())
self.assertEqual((6,), r[1].get_shape())
@@ -183,23 +216,26 @@ class FunctionalOpsTest(tf.test.TestCase):
with self.assertRaisesRegexp(
TypeError, r"two structures don't have the same sequence type."):
# lambda emits tuple, but dtype is a list
- tf.map_fn(lambda x: ((x + 3) * 2, -(x + 3) * 2), nums,
- dtype=[tf.int64, tf.int64])
+ functional_ops.map_fn(
+ lambda x: ((x + 3) * 2, -(x + 3) * 2),
+ nums,
+ dtype=[dtypes.int64, dtypes.int64])
def testMap_MultiInputSingleOutput(self):
with self.test_session():
nums = np.array([1, 2, 3, 4, 5, 6])
- r = tf.map_fn(lambda x: x[0]*x[1][0] + x[1][1], (nums, (nums, -nums)),
- dtype=tf.int64)
+ r = functional_ops.map_fn(
+ lambda x: x[0] * x[1][0] + x[1][1], (nums, (nums, -nums)),
+ dtype=dtypes.int64)
self.assertEqual((6,), r.get_shape())
received = r.eval()
- self.assertAllEqual(nums*nums + (-nums), received)
+ self.assertAllEqual(nums * nums + (-nums), received)
def testMap_MultiInputSameStructureOutput(self):
with self.test_session() as sess:
nums = np.array([1, 2, 3, 4, 5, 6])
- r = tf.map_fn(
- lambda x: (x[1][0], (x[1][1], x[0])), (nums, (2*nums, -nums)))
+ r = functional_ops.map_fn(lambda x: (x[1][0], (x[1][1], x[0])),
+ (nums, (2 * nums, -nums)))
r = [r[0], r[1][0], r[1][1]]
self.assertEqual((6,), r[0].get_shape())
self.assertEqual((6,), r[1].get_shape())
@@ -211,21 +247,22 @@ class FunctionalOpsTest(tf.test.TestCase):
def testScan_Simple(self):
with self.test_session():
- elems = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
- v = tf.constant(2.0, name="v")
+ elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
+ v = constant_op.constant(2.0, name="v")
- r = tf.scan(lambda a, x: tf.mul(a, x), elems)
+ r = functional_ops.scan(lambda a, x: math_ops.mul(a, x), elems)
self.assertAllEqual([1., 2., 6., 24., 120., 720.], r.eval())
- r = tf.scan(
- lambda a, x: tf.mul(a, x), elems, initializer=v)
+ r = functional_ops.scan(
+ lambda a, x: math_ops.mul(a, x), elems, initializer=v)
self.assertAllEqual([2., 4., 12., 48., 240., 1440.], r.eval())
def testScan_SingleInputMultiOutput(self):
with self.test_session() as sess:
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = (np.array(1.0), np.array(-1.0))
- r = tf.scan(lambda a, x: (a[0] * x, -a[1] * x), elems, initializer)
+ r = functional_ops.scan(lambda a, x: (a[0] * x, -a[1] * x), elems,
+ initializer)
r_value = sess.run(r)
self.assertAllEqual([1.0, 2.0, 6.0, 24.0, 120.0, 720.0], r_value[0])
@@ -236,15 +273,15 @@ class FunctionalOpsTest(tf.test.TestCase):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = np.array(1.0)
# Multiply a * 1 each time
- r = tf.scan(
- lambda a, x: a * (x[0] + x[1]), (elems + 1, -elems), initializer)
+ r = functional_ops.scan(lambda a, x: a * (x[0] + x[1]),
+ (elems + 1, -elems), initializer)
self.assertAllEqual([1.0, 1.0, 1.0, 1.0, 1.0, 1.0], r.eval())
def testScan_MultiInputSameTypeOutput(self):
with self.test_session() as sess:
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
- r = tf.scan(
- lambda a, x: (a[0] + x[0], a[1] + x[1]), (elems, -elems))
+ r = functional_ops.scan(lambda a, x: (a[0] + x[0], a[1] + x[1]),
+ (elems, -elems))
r_value = sess.run(r)
self.assertAllEqual(np.cumsum(elems), r_value[0])
self.assertAllEqual(np.cumsum(-elems), r_value[1])
@@ -256,37 +293,39 @@ class FunctionalOpsTest(tf.test.TestCase):
# Multiply a * 1 each time
with self.assertRaisesRegexp(
ValueError, "two structures don't have the same number of elements"):
- tf.scan(lambda a, x: (a, -a), elems, initializer)
+ functional_ops.scan(lambda a, x: (a, -a), elems, initializer)
def testScan_Scoped(self):
with self.test_session() as sess:
- with tf.variable_scope("root") as varscope:
- elems = tf.constant([1, 2, 3, 4, 5, 6], name="data")
+ with variable_scope.variable_scope("root") as varscope:
+ elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
- r = tf.scan(simple_scoped_fn, elems)
+ r = functional_ops.scan(simple_scoped_fn, elems)
# Check that we have the one variable we asked for here.
- self.assertEqual(len(tf.trainable_variables()), 1)
- self.assertEqual(tf.trainable_variables()[0].name, "root/body/two:0")
- sess.run([tf.global_variables_initializer()])
+ self.assertEqual(len(variables.trainable_variables()), 1)
+ self.assertEqual(variables.trainable_variables()[0].name,
+ "root/body/two:0")
+ sess.run([variables.global_variables_initializer()])
results = np.array([1, 6, 18, 44, 98, 208])
self.assertAllEqual(results, r.eval())
# Now let's reuse our single variable.
varscope.reuse_variables()
- r = tf.scan(simple_scoped_fn, elems, initializer=2)
- self.assertEqual(len(tf.trainable_variables()), 1)
+ r = functional_ops.scan(simple_scoped_fn, elems, initializer=2)
+ self.assertEqual(len(variables.trainable_variables()), 1)
results = np.array([6, 16, 38, 84, 178, 368])
self.assertAllEqual(results, r.eval())
def testScanFoldl_Nested(self):
with self.test_session():
- elems = tf.constant([1.0, 2.0, 3.0, 4.0], name="data")
- inner_elems = tf.constant([0.5, 0.5], name="data")
+ elems = constant_op.constant([1.0, 2.0, 3.0, 4.0], name="data")
+ inner_elems = constant_op.constant([0.5, 0.5], name="data")
def r_inner(a, x):
- return tf.foldl(lambda b, y: b * y * x, inner_elems, initializer=a)
+ return functional_ops.foldl(
+ lambda b, y: b * y * x, inner_elems, initializer=a)
- r = tf.scan(r_inner, elems)
+ r = functional_ops.scan(r_inner, elems)
# t == 0 (returns 1)
# t == 1, a == 1, x == 2 (returns 1)
@@ -302,89 +341,99 @@ class FunctionalOpsTest(tf.test.TestCase):
def testScan_Control(self):
with self.test_session() as sess:
- s = tf.placeholder(tf.float32, shape=[None])
- b = tf.placeholder(tf.bool)
+ s = array_ops.placeholder(dtypes.float32, shape=[None])
+ b = array_ops.placeholder(dtypes.bool)
- with tf.control_dependencies([b]):
- c = tf.scan(lambda a, x: x * a, s)
- self.assertAllClose(np.array([1.0, 3.0, 9.0]),
- sess.run(c, {s: [1, 3, 3], b: True}))
+ with ops.control_dependencies([b]):
+ c = functional_ops.scan(lambda a, x: x * a, s)
+ self.assertAllClose(
+ np.array([1.0, 3.0, 9.0]), sess.run(c, {s: [1, 3, 3],
+ b: True}))
def testScan_Grad(self):
with self.test_session():
- elems = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
- v = tf.constant(2.0, name="v")
+ elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
+ v = constant_op.constant(2.0, name="v")
- r = tf.scan(lambda a, x: tf.mul(a, x), elems, initializer=v)
- r = tf.gradients(r, v)[0]
+ r = functional_ops.scan(
+ lambda a, x: math_ops.mul(a, x), elems, initializer=v)
+ r = gradients_impl.gradients(r, v)[0]
self.assertAllEqual(873.0, r.eval())
def testFoldShape(self):
with self.test_session():
- x = tf.constant([[1, 2, 3], [4, 5, 6]])
+ x = constant_op.constant([[1, 2, 3], [4, 5, 6]])
+
def fn(_, current_input):
return current_input
- initializer = tf.constant([0, 0, 0])
- y = tf.foldl(fn, x, initializer=initializer)
+
+ initializer = constant_op.constant([0, 0, 0])
+ y = functional_ops.foldl(fn, x, initializer=initializer)
self.assertAllEqual(y.get_shape(), y.eval().shape)
def testMapShape(self):
with self.test_session():
- x = tf.constant([[1, 2, 3], [4, 5, 6]])
- y = tf.map_fn(lambda e: e, x)
+ x = constant_op.constant([[1, 2, 3], [4, 5, 6]])
+ y = functional_ops.map_fn(lambda e: e, x)
self.assertAllEqual(y.get_shape(), y.eval().shape)
def testMapUnknownShape(self):
- x = tf.placeholder(tf.float32)
- y = tf.map_fn(lambda e: e, x)
+ x = array_ops.placeholder(dtypes.float32)
+ y = functional_ops.map_fn(lambda e: e, x)
self.assertIs(None, y.get_shape().dims)
def testMapEmptyScalar(self):
with self.test_session():
- map_return = tf.map_fn(lambda x: 1, tf.constant([]))
+ map_return = functional_ops.map_fn(lambda x: 1, constant_op.constant([]))
self.assertAllEqual([0], map_return.get_shape().dims)
self.assertAllEqual([0], map_return.eval().shape)
def testMapEmptyTensor(self):
with self.test_session():
- map_return = tf.map_fn(lambda x: tf.zeros([3, 2]), tf.constant([]))
+ map_return = functional_ops.map_fn(lambda x: array_ops.zeros([3, 2]),
+ constant_op.constant([]))
self.assertAllEqual([0, 3, 2], map_return.get_shape().dims)
self.assertAllEqual([0, 3, 2], map_return.eval().shape)
def testScanShape(self):
with self.test_session():
- x = tf.constant([[1, 2, 3], [4, 5, 6]])
+ x = constant_op.constant([[1, 2, 3], [4, 5, 6]])
+
def fn(_, current_input):
return current_input
- initializer = tf.constant([0, 0, 0])
- y = tf.scan(fn, x, initializer=initializer)
+
+ initializer = constant_op.constant([0, 0, 0])
+ y = functional_ops.scan(fn, x, initializer=initializer)
self.assertAllEqual(y.get_shape(), y.eval().shape)
def testScanEmptyTensor(self):
with self.test_session():
- x = tf.scan(lambda x, _: x, tf.range(0), initializer=tf.ones([2, 4]))
+ x = functional_ops.scan(
+ lambda x, _: x, math_ops.range(0), initializer=array_ops.ones([2, 4]))
self.assertAllEqual([0, 2, 4], x.get_shape())
self.assertAllEqual(x.get_shape(), x.eval().shape)
def testScanUnknownShape(self):
- x = tf.placeholder(tf.float32)
- initializer = tf.placeholder(tf.float32)
+ x = array_ops.placeholder(dtypes.float32)
+ initializer = array_ops.placeholder(dtypes.float32)
+
def fn(_, current_input):
return current_input
- y = tf.scan(fn, x, initializer=initializer)
+
+ y = functional_ops.scan(fn, x, initializer=initializer)
self.assertIs(None, y.get_shape().dims)
def testScanVaryingShape(self):
with self.test_session() as sess:
- x = tf.placeholder(dtype=tf.float32, shape=[None, 2])
- x_t = tf.transpose(x)
+ x = array_ops.placeholder(dtype=dtypes.float32, shape=[None, 2])
+ x_t = array_ops.transpose(x)
# scan over dimension 0 (with shape None)
- result = tf.scan(lambda a, x: a + x, x)
+ result = functional_ops.scan(lambda a, x: a + x, x)
# scanned over transposed dimension 0 (with shape 2)
- result_t = tf.scan(lambda a, x: a + x, x_t, infer_shape=False)
+ result_t = functional_ops.scan(lambda a, x: a + x, x_t, infer_shape=False)
# ensure gradients can be calculated
- result_grad = tf.gradients(result, [x])[0]
- result_t_grad = tf.gradients(result_t, [x_t])[0]
+ result_grad = gradients_impl.gradients(result, [x])[0]
+ result_t_grad = gradients_impl.gradients(result_t, [x_t])[0]
# smoke test to ensure they all evaluate
sess.run([result, result_t, result_grad, result_t_grad],
@@ -392,4 +441,4 @@ class FunctionalOpsTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/gather_nd_op_test.py b/tensorflow/python/kernel_tests/gather_nd_op_test.py
index 90b5660f69..c0278128e6 100644
--- a/tensorflow/python/kernel_tests/gather_nd_op_test.py
+++ b/tensorflow/python/kernel_tests/gather_nd_op_test.py
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.ops.tf.gather_nd."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
@@ -21,17 +21,24 @@ from __future__ import print_function
import time
import numpy as np
-import tensorflow as tf
+from tensorflow.python.client import session
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradients_impl
+from tensorflow.python.ops import variables
+from tensorflow.python.platform import test
-class GatherNdTest(tf.test.TestCase):
+
+class GatherNdTest(test.TestCase):
use_gpu = False
def _testSimpleDtype(self, dtype):
with self.test_session(use_gpu=self.use_gpu):
- params = tf.constant(np.array([8, 1, 2, 3, 7, 5], dtype=dtype))
- indices = tf.constant([[4], [4], [0]])
- gather_nd_t = tf.gather_nd(params, indices)
+ params = constant_op.constant(np.array([8, 1, 2, 3, 7, 5], dtype=dtype))
+ indices = constant_op.constant([[4], [4], [0]])
+ gather_nd_t = array_ops.gather_nd(params, indices)
gather_nd_val = gather_nd_t.eval()
self.assertAllEqual(np.array([7, 7, 8], dtype=dtype), gather_nd_val)
@@ -50,27 +57,27 @@ class GatherNdTest(tf.test.TestCase):
params = np.ones((3, 3), dtype=np.float32)
indices_empty = np.empty((0, 2), dtype=np.int32)
- gather_nd_ok_t = tf.gather_nd(params, indices_empty)
+ gather_nd_ok_t = array_ops.gather_nd(params, indices_empty)
gather_nd_ok_val = gather_nd_ok_t.eval()
self.assertEqual([0], gather_nd_ok_t.get_shape())
self.assertAllClose(np.empty((0,), dtype=np.float32), gather_nd_ok_val)
indices_empty = np.empty((0, 1), dtype=np.int32)
- gather_nd_ok_t = tf.gather_nd(params, indices_empty)
+ gather_nd_ok_t = array_ops.gather_nd(params, indices_empty)
gather_nd_ok_val = gather_nd_ok_t.eval()
self.assertEqual([0, 3], gather_nd_ok_t.get_shape())
self.assertAllClose(np.empty((0, 3), dtype=np.float32), gather_nd_ok_val)
params_empty = np.empty((0, 3), dtype=np.float32)
indices_empty = np.empty((0, 2), dtype=np.int32)
- gather_nd_ok_t = tf.gather_nd(params_empty, indices_empty)
+ gather_nd_ok_t = array_ops.gather_nd(params_empty, indices_empty)
gather_nd_ok_val = gather_nd_ok_t.eval()
self.assertEqual([0], gather_nd_ok_t.get_shape())
self.assertAllClose(np.empty((0,), dtype=np.float32), gather_nd_ok_val)
params_empty = np.empty((0, 3), dtype=np.float32)
indices_nonempty = np.zeros((1, 2), dtype=np.int32)
- gather_nd_break_t = tf.gather_nd(params_empty, indices_nonempty)
+ gather_nd_break_t = array_ops.gather_nd(params_empty, indices_nonempty)
with self.assertRaisesOpError(
r"Requested more than 0 entries, but params is empty."):
gather_nd_break_t.eval()
@@ -78,30 +85,30 @@ class GatherNdTest(tf.test.TestCase):
def testIndexScalar(self):
with self.test_session(use_gpu=self.use_gpu):
- params = np.array([[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]],
- dtype=np.float32).T
- indices = tf.constant([4, 1])
- gather_nd_t = tf.gather_nd(params, indices)
+ params = np.array(
+ [[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T
+ indices = constant_op.constant([4, 1])
+ gather_nd_t = array_ops.gather_nd(params, indices)
gather_nd_val = gather_nd_t.eval()
self.assertEqual([], gather_nd_t.get_shape())
self.assertAllEqual(np.array(7), gather_nd_val)
def testParamsRankLargerThanIndexIndexScalarSlices(self):
with self.test_session(use_gpu=self.use_gpu):
- params = np.array([[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]],
- dtype=np.float32).T
- indices = tf.constant([4])
- gather_nd_t = tf.gather_nd(params, indices)
+ params = np.array(
+ [[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T
+ indices = constant_op.constant([4])
+ gather_nd_t = array_ops.gather_nd(params, indices)
gather_nd_val = gather_nd_t.eval()
self.assertEqual([2], gather_nd_t.get_shape())
self.assertAllEqual(np.array([-7, 7]), gather_nd_val)
def testParamsRankLargerThanIndexSlices(self):
with self.test_session(use_gpu=self.use_gpu):
- params = np.array([[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]],
- dtype=np.float32).T
- indices = tf.constant([[4], [4], [0]])
- gather_nd_t = tf.gather_nd(params, indices)
+ params = np.array(
+ [[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T
+ indices = constant_op.constant([[4], [4], [0]])
+ gather_nd_t = array_ops.gather_nd(params, indices)
gather_nd_val = gather_nd_t.eval()
self.assertEqual([3, 2], gather_nd_t.get_shape())
@@ -113,9 +120,9 @@ class GatherNdTest(tf.test.TestCase):
[[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]],
[[-80, -10, -20, -30, -70, -50], [80, 10, 20, 30, 70, 50]]],
dtype=np.float32).T
- params_t = tf.constant(params)
- indices = tf.constant([[4], [4], [0]])
- gather_nd_t = tf.gather_nd(params_t, indices)
+ params_t = constant_op.constant(params)
+ indices = constant_op.constant([[4], [4], [0]])
+ gather_nd_t = array_ops.gather_nd(params_t, indices)
gather_nd_val = gather_nd_t.eval()
self.assertEqual([3, 2, 2], gather_nd_t.get_shape())
@@ -127,9 +134,10 @@ class GatherNdTest(tf.test.TestCase):
[[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]],
[[-80, -10, -20, -30, -70, -50], [80, 10, 20, 30, 70, 50]]],
dtype=np.float32).T
- params_t = tf.constant(params)
- indices = tf.constant([[], []], dtype=tf.int32) # Size (2, 0)
- gather_nd_t = tf.gather_nd(params_t, indices)
+ params_t = constant_op.constant(params)
+ indices = constant_op.constant(
+ [[], []], dtype=dtypes.int32) # Size (2, 0)
+ gather_nd_t = array_ops.gather_nd(params_t, indices)
gather_nd_val = gather_nd_t.eval()
self.assertEqual([2, 6, 2, 2], gather_nd_t.get_shape())
@@ -143,9 +151,9 @@ class GatherNdTest(tf.test.TestCase):
[[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]],
[[-80, -10, -20, -30, -70, -50], [80, 10, 20, 30, 70, 50]]],
dtype=np.float32).T
- params_t = tf.constant(params)
- indices = tf.constant([[[3], [2], [1]], [[4], [4], [0]]])
- gather_nd_t = tf.gather_nd(params_t, indices)
+ params_t = constant_op.constant(params)
+ indices = constant_op.constant([[[3], [2], [1]], [[4], [4], [0]]])
+ gather_nd_t = array_ops.gather_nd(params_t, indices)
gather_nd_val = gather_nd_t.eval()
self.assertEqual([2, 3, 2, 2], gather_nd_t.get_shape())
@@ -156,9 +164,8 @@ class GatherNdTest(tf.test.TestCase):
with self.test_session(use_gpu=self.use_gpu):
shape = (10, 20, 5, 1, 17)
params = np.random.rand(*shape)
- indices = np.vstack([
- np.random.randint(0, s, size=2000) for s in shape]).T
- gather_nd_t = tf.gather_nd(params, indices)
+ indices = np.vstack([np.random.randint(0, s, size=2000) for s in shape]).T
+ gather_nd_t = array_ops.gather_nd(params, indices)
gather_nd_val = gather_nd_t.eval()
expected = params[tuple(indices.T)]
@@ -169,10 +176,9 @@ class GatherNdTest(tf.test.TestCase):
with self.test_session(use_gpu=self.use_gpu):
shape = (10, 20, 5, 1, 17)
params = np.random.rand(*shape)
- indices = np.vstack([
- np.random.randint(0, s, size=2000) for s in shape]).T
+ indices = np.vstack([np.random.randint(0, s, size=2000) for s in shape]).T
indices_reshaped = indices.reshape([10, 10, 20, 5])
- gather_nd_t = tf.gather_nd(params, indices_reshaped)
+ gather_nd_t = array_ops.gather_nd(params, indices_reshaped)
gather_nd_val = gather_nd_t.eval()
expected = params[tuple(indices.T)]
@@ -180,9 +186,9 @@ class GatherNdTest(tf.test.TestCase):
self.assertEqual([10, 10, 20], gather_nd_t.get_shape())
def testUnknownIndices(self):
- params = tf.constant([[0, 1, 2]])
- indices = tf.placeholder(tf.int32)
- gather_nd_t = tf.gather_nd(params, indices)
+ params = constant_op.constant([[0, 1, 2]])
+ indices = array_ops.placeholder(dtypes.int32)
+ gather_nd_t = array_ops.gather_nd(params, indices)
shape = gather_nd_t.get_shape()
self.assertEqual(None, shape.ndims)
self.assertEqual(None, shape[0].value)
@@ -191,7 +197,7 @@ class GatherNdTest(tf.test.TestCase):
with self.test_session():
params = [0, 1, 2]
indices = [[[0], [7]]] # Make this one higher rank
- gather_nd = tf.gather_nd(params, indices)
+ gather_nd = array_ops.gather_nd(params, indices)
with self.assertRaisesOpError(
r"flat indices\[1, :\] = \[7\] does not index into param "
r"\(shape: \[3\]\)"):
@@ -201,60 +207,62 @@ class GatherNdTest(tf.test.TestCase):
with self.test_session():
params = [[0, 1, 2]]
indices = [[[0], [0], [1]]] # Make this one higher rank
- gather_nd = tf.gather_nd(params, indices)
+ gather_nd = array_ops.gather_nd(params, indices)
with self.assertRaisesOpError(
r"flat indices\[2, :\] = \[1\] does not index into param "
r"\(shape: \[1,3\]\)"):
gather_nd.eval()
def testGradientsRank2Elements(self):
- indices = tf.constant([[0, 0], [1, 1]], dtype=tf.int32)
- inputs = tf.constant([[1, 2], [3, 4]], dtype=tf.float64)
- outputs = tf.gather_nd(inputs, indices)
+ indices = constant_op.constant([[0, 0], [1, 1]], dtype=dtypes.int32)
+ inputs = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float64)
+ outputs = array_ops.gather_nd(inputs, indices)
- grad_vals = tf.constant([1, 2], dtype=tf.float64)
- grads = tf.gradients([outputs], [inputs], [grad_vals])[0]
+ grad_vals = constant_op.constant([1, 2], dtype=dtypes.float64)
+ grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
expected_grads = np.array([[1, 0], [0, 2]], dtype=np.float64)
with self.test_session():
assert np.array_equal(expected_grads, grads.eval())
def testGradientsRank2Slices(self):
- indices = tf.constant([[1], [0]], dtype=tf.int32)
- inputs = tf.constant([[1, 2], [3, 4]], dtype=tf.float64)
- outputs = tf.gather_nd(inputs, indices)
+ indices = constant_op.constant([[1], [0]], dtype=dtypes.int32)
+ inputs = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float64)
+ outputs = array_ops.gather_nd(inputs, indices)
- grad_vals = tf.constant([[1, 2], [3, 4]], dtype=tf.float64)
- grads = tf.gradients([outputs], [inputs], [grad_vals])[0]
+ grad_vals = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float64)
+ grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
expected_grads = np.array([[3, 4], [1, 2]], dtype=np.float64)
with self.test_session():
self.assertAllEqual(expected_grads, grads.eval())
def testGradientsRank3Elements(self):
- indices = tf.constant([[[0, 1], [1, 0]], [[0, 0], [1, 1]]], dtype=tf.int32)
- inputs = tf.constant([[[1, 3], [5, 7]], [[2, 4], [6, 8]]], dtype=tf.float64)
- outputs = tf.gather_nd(inputs, indices)
-
- grad_vals = tf.constant(
- [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=tf.float64)
- grads = tf.gradients([outputs], [inputs], [grad_vals])[0]
+ indices = constant_op.constant(
+ [[[0, 1], [1, 0]], [[0, 0], [1, 1]]], dtype=dtypes.int32)
+ inputs = constant_op.constant(
+ [[[1, 3], [5, 7]], [[2, 4], [6, 8]]], dtype=dtypes.float64)
+ outputs = array_ops.gather_nd(inputs, indices)
+
+ grad_vals = constant_op.constant(
+ [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=dtypes.float64)
+ grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
expected_grads = np.array(
[[[5, 6], [1, 2]], [[3, 4], [7, 8]]], dtype=np.float64)
with self.test_session():
self.assertAllEqual(expected_grads, grads.eval())
def testGradientsRank2SlicesWithEmptySpace(self):
- indices = tf.constant([[2], [0], [5]], dtype=tf.int32)
- inputs = tf.constant(
+ indices = constant_op.constant([[2], [0], [5]], dtype=dtypes.int32)
+ inputs = constant_op.constant(
[[1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9]],
- dtype=tf.float64)
- outputs = tf.gather_nd(inputs, indices)
- grad_vals = tf.constant(
+ dtype=dtypes.float64)
+ outputs = array_ops.gather_nd(inputs, indices)
+ grad_vals = constant_op.constant(
[[1, 1, 1, 1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2, 2, 2, 2],
[3, 3, 3, 3, 3, 3, 3, 3, 3]],
- dtype=tf.float64)
- grads = tf.gradients([outputs], [inputs], [grad_vals])[0]
+ dtype=dtypes.float64)
+ grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
expected_grads = np.array(
[[2, 2, 2, 2, 2, 2, 2, 2, 2], [0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0],
@@ -268,28 +276,27 @@ class GatherNdGpuTest(GatherNdTest):
use_gpu = True
-class GatherNdOpBenchmark(tf.test.Benchmark):
+class GatherNdOpBenchmark(test.Benchmark):
def benchmark_gather_nd_op(self):
shape = (100, 47, 18, 170, 13)
np.random.seed(127)
params = np.random.rand(*shape)
- indices = np.vstack([
- np.random.randint(0, s, size=10000) for s in shape]).T
-
- with tf.Session():
- t_params = tf.Variable(params)
- t_indices = tf.Variable(indices)
- gather_op = tf.gather_nd(t_params, t_indices)
- tf.global_variables_initializer().run()
+ indices = np.vstack([np.random.randint(0, s, size=10000) for s in shape]).T
+
+ with session.Session():
+ t_params = variables.Variable(params)
+ t_indices = variables.Variable(indices)
+ gather_op = array_ops.gather_nd(t_params, t_indices)
+ variables.global_variables_initializer().run()
for _ in range(10):
gather_op.eval()
t1 = time.time()
for _ in range(1000):
gather_op.eval()
t2 = time.time()
- self.report_benchmark(iters=1000, wall_time=(t2-t1)/1000.0)
+ self.report_benchmark(iters=1000, wall_time=(t2 - t1) / 1000.0)
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/gather_op_test.py b/tensorflow/python/kernel_tests/gather_op_test.py
index 272f28d437..dac8d58b35 100644
--- a/tensorflow/python/kernel_tests/gather_op_test.py
+++ b/tensorflow/python/kernel_tests/gather_op_test.py
@@ -12,44 +12,50 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.ops.tf.gather."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradients_impl
+from tensorflow.python.platform import test
-class GatherTest(tf.test.TestCase):
+class GatherTest(test.TestCase):
use_gpu = False
def testScalar1D(self):
with self.test_session(use_gpu=self.use_gpu):
- params = tf.constant([0, 1, 2, 3, 7, 5])
- indices = tf.constant(4)
- gather_t = tf.gather(params, indices)
+ params = constant_op.constant([0, 1, 2, 3, 7, 5])
+ indices = constant_op.constant(4)
+ gather_t = array_ops.gather(params, indices)
gather_val = gather_t.eval()
self.assertAllEqual(7, gather_val)
self.assertEqual([], gather_t.get_shape())
def testScalar2D(self):
with self.test_session(use_gpu=self.use_gpu):
- params = tf.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8],
+ params = constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8],
[9, 10, 11], [12, 13, 14]])
- indices = tf.constant(2)
- gather_t = tf.gather(params, indices)
+ indices = constant_op.constant(2)
+ gather_t = array_ops.gather(params, indices)
gather_val = gather_t.eval()
self.assertAllEqual([6, 7, 8], gather_val)
self.assertEqual([3], gather_t.get_shape())
def testSimpleTwoD32(self):
with self.test_session(use_gpu=self.use_gpu):
- params = tf.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8],
+ params = constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8],
[9, 10, 11], [12, 13, 14]])
- indices = tf.constant([0, 4, 0, 2])
- gather_t = tf.gather(params, indices)
+ indices = constant_op.constant([0, 4, 0, 2])
+ gather_t = array_ops.gather(params, indices)
gather_val = gather_t.eval()
self.assertAllEqual([[0, 1, 2], [12, 13, 14], [0, 1, 2], [6, 7, 8]],
gather_val)
@@ -63,18 +69,18 @@ class GatherTest(tf.test.TestCase):
params = np.random.randn(*shape)
indices = np.random.randint(shape[0], size=indices_shape)
with self.test_session(use_gpu=self.use_gpu):
- tf_params = tf.constant(params)
- tf_indices = tf.constant(indices)
- gather = tf.gather(tf_params, tf_indices)
+ tf_params = constant_op.constant(params)
+ tf_indices = constant_op.constant(indices)
+ gather = array_ops.gather(tf_params, tf_indices)
self.assertAllEqual(params[indices], gather.eval())
self.assertEqual(indices.shape + params.shape[1:], gather.get_shape())
# Test gradients
gather_grad = np.random.randn(*gather.get_shape().as_list())
- params_grad, indices_grad = tf.gradients(
+ params_grad, indices_grad = gradients_impl.gradients(
gather, [tf_params, tf_indices], gather_grad)
self.assertEqual(indices_grad, None)
- self.assertEqual(type(params_grad), tf.IndexedSlices)
- params_grad = tf.convert_to_tensor(params_grad)
+ self.assertEqual(type(params_grad), ops.IndexedSlices)
+ params_grad = ops.convert_to_tensor(params_grad)
correct_params_grad = np.zeros(shape)
for i, g in zip(indices.flat,
gather_grad.reshape((indices.size,) + shape[1:])):
@@ -82,16 +88,16 @@ class GatherTest(tf.test.TestCase):
self.assertAllClose(correct_params_grad, params_grad.eval())
def testUnknownIndices(self):
- params = tf.constant([[0, 1, 2]])
- indices = tf.placeholder(tf.int32)
- gather_t = tf.gather(params, indices)
+ params = constant_op.constant([[0, 1, 2]])
+ indices = array_ops.placeholder(dtypes.int32)
+ gather_t = array_ops.gather(params, indices)
self.assertEqual(None, gather_t.get_shape())
def testBadIndices(self):
with self.test_session(use_gpu=False):
params = [0, 1, 2]
indices = [[7]]
- gather = tf.gather(params, indices)
+ gather = array_ops.gather(params, indices)
with self.assertRaisesOpError(r"indices\[0,0\] = 7 is not in \[0, 3\)"):
gather.eval()
@@ -101,7 +107,7 @@ class GatherTest(tf.test.TestCase):
for itype in np.int32, np.int64:
params = np.zeros((7, 0), dtype=dtype)
indices = np.array([3, 4], dtype=itype)
- gather = tf.gather(params, indices)
+ gather = array_ops.gather(params, indices)
self.assertAllEqual(gather.eval(), np.zeros((2, 0)))
@@ -110,4 +116,4 @@ class GatherGpuTest(GatherTest):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/gradient_correctness_test.py b/tensorflow/python/kernel_tests/gradient_correctness_test.py
index fe5137b731..10fe4f5090 100644
--- a/tensorflow/python/kernel_tests/gradient_correctness_test.py
+++ b/tensorflow/python/kernel_tests/gradient_correctness_test.py
@@ -12,28 +12,34 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.ops.argmax_op."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import gradients_impl
+from tensorflow.python.ops import math_ops
+from tensorflow.python.platform import test
-class GradientCorrectnessTest(tf.test.TestCase):
+
+class GradientCorrectnessTest(test.TestCase):
def testMultipleOutputChainedGradients(self):
with self.test_session() as sess:
- x = tf.constant(1.0, dtype=tf.float32)
- yexp = tf.exp(x)
- yexplog = tf.log(yexp)
- grads = tf.gradients([yexp, yexplog], [x])
+ x = constant_op.constant(1.0, dtype=dtypes.float32)
+ yexp = math_ops.exp(x)
+ yexplog = math_ops.log(yexp)
+ grads = gradients_impl.gradients([yexp, yexplog], [x])
grad_vals = sess.run(grads)
exp1_plus_one = (1.0 + np.exp(1.0)).astype(np.float32)
# [dexp(x)/dx + d(log(exp(x)))/dx] @ x=1 == exp(1) + 1
self.assertAllClose(grad_vals[0], exp1_plus_one)
+
if __name__ == '__main__':
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/identity_op_py_test.py b/tensorflow/python/kernel_tests/identity_op_py_test.py
index 4f02a9881b..2cfe420bd4 100644
--- a/tensorflow/python/kernel_tests/identity_op_py_test.py
+++ b/tensorflow/python/kernel_tests/identity_op_py_test.py
@@ -12,54 +12,61 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for IdentityOp."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
+from tensorflow.python.ops import variables
+from tensorflow.python.platform import test
-class IdentityOpTest(tf.test.TestCase):
+class IdentityOpTest(test.TestCase):
def testInt32_6(self):
with self.test_session():
- value = tf.identity([1, 2, 3, 4, 5, 6]).eval()
+ value = array_ops.identity([1, 2, 3, 4, 5, 6]).eval()
self.assertAllEqual(np.array([1, 2, 3, 4, 5, 6]), value)
def testInt32_2_3(self):
with self.test_session():
- inp = tf.constant([10, 20, 30, 40, 50, 60], shape=[2, 3])
- value = tf.identity(inp).eval()
+ inp = constant_op.constant([10, 20, 30, 40, 50, 60], shape=[2, 3])
+ value = array_ops.identity(inp).eval()
self.assertAllEqual(np.array([[10, 20, 30], [40, 50, 60]]), value)
def testString(self):
source = [b"A", b"b", b"C", b"d", b"E", b"f"]
with self.test_session():
- value = tf.identity(source).eval()
+ value = array_ops.identity(source).eval()
self.assertAllEqual(source, value)
def testIdentityShape(self):
with self.test_session():
shape = [2, 3]
array_2x3 = [[1, 2, 3], [6, 5, 4]]
- tensor = tf.constant(array_2x3)
+ tensor = constant_op.constant(array_2x3)
self.assertEquals(shape, tensor.get_shape())
- self.assertEquals(shape, tf.identity(tensor).get_shape())
- self.assertEquals(shape, tf.identity(array_2x3).get_shape())
- self.assertEquals(shape, tf.identity(np.array(array_2x3)).get_shape())
+ self.assertEquals(shape, array_ops.identity(tensor).get_shape())
+ self.assertEquals(shape, array_ops.identity(array_2x3).get_shape())
+ self.assertEquals(shape,
+ array_ops.identity(np.array(array_2x3)).get_shape())
def testRefIdentityShape(self):
with self.test_session():
shape = [2, 3]
- tensor = tf.Variable(tf.constant([[1, 2, 3], [6, 5, 4]], dtype=tf.int32))
+ tensor = variables.Variable(
+ constant_op.constant(
+ [[1, 2, 3], [6, 5, 4]], dtype=dtypes.int32))
self.assertEquals(shape, tensor.get_shape())
self.assertEquals(shape, gen_array_ops._ref_identity(tensor).get_shape())
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/in_topk_op_test.py b/tensorflow/python/kernel_tests/in_topk_op_test.py
index 5cba3acdd1..4a4686d1b9 100644
--- a/tensorflow/python/kernel_tests/in_topk_op_test.py
+++ b/tensorflow/python/kernel_tests/in_topk_op_test.py
@@ -12,22 +12,25 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for PrecisionOp."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import errors_impl
+from tensorflow.python.ops import nn_ops
+from tensorflow.python.platform import test
-class InTopKTest(tf.test.TestCase):
+class InTopKTest(test.TestCase):
def _validateInTopK(self, predictions, target, k, expected):
np_ans = np.array(expected)
with self.test_session():
- precision = tf.nn.in_top_k(predictions, target, k)
+ precision = nn_ops.in_top_k(predictions, target, k)
out = precision.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, precision)
@@ -62,10 +65,10 @@ class InTopKTest(tf.test.TestCase):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
target = [0, 80000]
with self.test_session():
- with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
+ with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"target.*out of range"):
- tf.nn.in_top_k(predictions, target, 2).eval()
+ nn_ops.in_top_k(predictions, target, 2).eval()
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/init_ops_test.py b/tensorflow/python/kernel_tests/init_ops_test.py
index 4581263f17..e9e5fe0135 100644
--- a/tensorflow/python/kernel_tests/init_ops_test.py
+++ b/tensorflow/python/kernel_tests/init_ops_test.py
@@ -13,16 +13,24 @@
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.ops."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
-import tensorflow as tf
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
+from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import nn_ops
+from tensorflow.python.ops import variable_scope
+from tensorflow.python.ops import variables
+from tensorflow.python.platform import test
# Returns true iff the two initializers produce the same tensor to
@@ -41,9 +49,9 @@ def identicaltest(tc, init1, init2, shape=None):
"""
if shape is None:
shape = [100]
- with tc.test_session(graph=tf.Graph()):
+ with tc.test_session(graph=ops.Graph()):
t1 = init1(shape).eval()
- with tc.test_session(graph=tf.Graph()):
+ with tc.test_session(graph=ops.Graph()):
t2 = init2(shape).eval()
return np.allclose(t1, t2, rtol=1e-15, atol=1e-15)
@@ -67,7 +75,7 @@ def duplicated_initializer(tc, init, graph_seed, shape=None):
"""
if shape is None:
shape = [100]
- with tc.test_session(graph=tf.Graph()):
+ with tc.test_session(graph=ops.Graph()):
random_seed.set_random_seed(graph_seed)
t1 = init(shape).eval()
t2 = init(shape).eval()
@@ -92,57 +100,59 @@ def _init_sampler(tc, init, num):
return func
-class ConstantInitializersTest(tf.test.TestCase):
+class ConstantInitializersTest(test.TestCase):
def testZerosInitializer(self):
with self.test_session(use_gpu=True):
shape = [2, 3]
- x = tf.get_variable("x", shape=shape, initializer=tf.zeros_initializer())
+ x = variable_scope.get_variable(
+ "x", shape=shape, initializer=init_ops.zeros_initializer())
x.initializer.run()
self.assertAllEqual(x.eval(), np.zeros(shape))
def testOnesInitializer(self):
with self.test_session(use_gpu=True):
shape = [2, 3]
- x = tf.get_variable("x", shape=shape, initializer=tf.ones_initializer())
+ x = variable_scope.get_variable(
+ "x", shape=shape, initializer=init_ops.ones_initializer())
x.initializer.run()
self.assertAllEqual(x.eval(), np.ones(shape))
def testConstantZeroInitializer(self):
with self.test_session(use_gpu=True):
shape = [2, 3]
- x = tf.get_variable(
- "x", shape=shape, initializer=tf.constant_initializer(0.0))
+ x = variable_scope.get_variable(
+ "x", shape=shape, initializer=init_ops.constant_initializer(0.0))
x.initializer.run()
self.assertAllEqual(x.eval(), np.zeros(shape))
def testConstantOneInitializer(self):
with self.test_session(use_gpu=True):
shape = [2, 3]
- x = tf.get_variable(
- "x", shape=shape, initializer=tf.constant_initializer(1.0))
+ x = variable_scope.get_variable(
+ "x", shape=shape, initializer=init_ops.constant_initializer(1.0))
x.initializer.run()
self.assertAllEqual(x.eval(), np.ones(shape))
def testConstantIntInitializer(self):
with self.test_session(use_gpu=True):
shape = [2, 3]
- x = tf.get_variable(
+ x = variable_scope.get_variable(
"x",
shape=shape,
- dtype=tf.int32,
- initializer=tf.constant_initializer(7))
+ dtype=dtypes.int32,
+ initializer=init_ops.constant_initializer(7))
x.initializer.run()
- self.assertEqual(x.dtype.base_dtype, tf.int32)
+ self.assertEqual(x.dtype.base_dtype, dtypes.int32)
self.assertAllEqual(x.eval(), 7 * np.ones(shape, dtype=np.int32))
def _testNDimConstantInitializer(self, name, value, shape, expected):
with self.test_session(use_gpu=True):
- init = tf.constant_initializer(value, dtype=tf.int32)
- x = tf.get_variable(name, shape=shape, initializer=init)
+ init = init_ops.constant_initializer(value, dtype=dtypes.int32)
+ x = variable_scope.get_variable(name, shape=shape, initializer=init)
x.initializer.run()
- actual = tf.reshape(x, [-1]).eval()
+ actual = array_ops.reshape(x, [-1]).eval()
self.assertEqual(len(actual), len(expected))
for a, e in zip(actual, expected):
self.assertEqual(a, e)
@@ -162,11 +172,11 @@ class ConstantInitializersTest(tf.test.TestCase):
def _testNDimConstantInitializerLessValues(self, name, value, shape,
expected):
with self.test_session(use_gpu=True):
- init = tf.constant_initializer(value, dtype=tf.int32)
- x = tf.get_variable(name, shape=shape, initializer=init)
+ init = init_ops.constant_initializer(value, dtype=dtypes.int32)
+ x = variable_scope.get_variable(name, shape=shape, initializer=init)
x.initializer.run()
- actual = tf.reshape(x, [-1]).eval()
+ actual = array_ops.reshape(x, [-1]).eval()
self.assertGreater(len(actual), len(expected))
for i in xrange(len(actual)):
a = actual[i]
@@ -186,11 +196,15 @@ class ConstantInitializersTest(tf.test.TestCase):
"2D-ndarray", np.asarray(value).reshape(tuple([2, 3])), shape, expected)
def _testNDimConstantInitializerMoreValues(self, value, shape):
- tf.reset_default_graph()
+ ops.reset_default_graph()
with self.test_session(use_gpu=True):
- init = tf.constant_initializer(value, dtype=tf.int32)
+ init = init_ops.constant_initializer(value, dtype=dtypes.int32)
self.assertRaises(
- ValueError, tf.get_variable, "x", shape=shape, initializer=init)
+ ValueError,
+ variable_scope.get_variable,
+ "x",
+ shape=shape,
+ initializer=init)
def testNDimConstantInitializerMoreValues(self):
value = [0, 1, 2, 3, 4, 5, 6, 7]
@@ -201,87 +215,102 @@ class ConstantInitializersTest(tf.test.TestCase):
np.asarray(value).reshape(tuple([2, 4])), shape)
-class RandomNormalInitializationTest(tf.test.TestCase):
+class RandomNormalInitializationTest(test.TestCase):
def testInitializerIdentical(self):
- for dtype in [tf.float32, tf.float64]:
- init1 = tf.random_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
- init2 = tf.random_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
+ for dtype in [dtypes.float32, dtypes.float64]:
+ init1 = init_ops.random_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
+ init2 = init_ops.random_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2))
def testInitializerDifferent(self):
- for dtype in [tf.float32, tf.float64]:
- init1 = tf.random_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
- init2 = tf.random_normal_initializer(0.0, 1.0, seed=2, dtype=dtype)
+ for dtype in [dtypes.float32, dtypes.float64]:
+ init1 = init_ops.random_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
+ init2 = init_ops.random_normal_initializer(0.0, 1.0, seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2))
def testDuplicatedInitializer(self):
- init = tf.random_normal_initializer(0.0, 1.0)
+ init = init_ops.random_normal_initializer(0.0, 1.0)
self.assertFalse(duplicated_initializer(self, init, 1))
def testInvalidDataType(self):
self.assertRaises(
- ValueError, tf.random_normal_initializer, 0.0, 1.0, dtype=tf.string)
+ ValueError,
+ init_ops.random_normal_initializer,
+ 0.0,
+ 1.0,
+ dtype=dtypes.string)
-class TruncatedNormalInitializationTest(tf.test.TestCase):
+class TruncatedNormalInitializationTest(test.TestCase):
def testInitializerIdentical(self):
- for dtype in [tf.float32, tf.float64]:
- init1 = tf.truncated_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
- init2 = tf.truncated_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
+ for dtype in [dtypes.float32, dtypes.float64]:
+ init1 = init_ops.truncated_normal_initializer(
+ 0.0, 1.0, seed=1, dtype=dtype)
+ init2 = init_ops.truncated_normal_initializer(
+ 0.0, 1.0, seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2))
def testInitializerDifferent(self):
- for dtype in [tf.float32, tf.float64]:
- init1 = tf.truncated_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
- init2 = tf.truncated_normal_initializer(0.0, 1.0, seed=2, dtype=dtype)
+ for dtype in [dtypes.float32, dtypes.float64]:
+ init1 = init_ops.truncated_normal_initializer(
+ 0.0, 1.0, seed=1, dtype=dtype)
+ init2 = init_ops.truncated_normal_initializer(
+ 0.0, 1.0, seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2))
def testDuplicatedInitializer(self):
- init = tf.truncated_normal_initializer(0.0, 1.0)
+ init = init_ops.truncated_normal_initializer(0.0, 1.0)
self.assertFalse(duplicated_initializer(self, init, 1))
def testInvalidDataType(self):
self.assertRaises(
- ValueError, tf.truncated_normal_initializer, 0.0, 1.0, dtype=tf.string)
+ ValueError,
+ init_ops.truncated_normal_initializer,
+ 0.0,
+ 1.0,
+ dtype=dtypes.string)
-class RandomUniformInitializationTest(tf.test.TestCase):
+class RandomUniformInitializationTest(test.TestCase):
def testInitializerIdentical(self):
- for dtype in [tf.float32, tf.float64, tf.int64]:
- init1 = tf.random_uniform_initializer(0, 7, seed=1, dtype=dtype)
- init2 = tf.random_uniform_initializer(0, 7, seed=1, dtype=dtype)
+ for dtype in [dtypes.float32, dtypes.float64, dtypes.int64]:
+ init1 = init_ops.random_uniform_initializer(0, 7, seed=1, dtype=dtype)
+ init2 = init_ops.random_uniform_initializer(0, 7, seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2))
def testInitializerDifferent(self):
- for dtype in [tf.float32, tf.float64, tf.int32, tf.int64]:
- init1 = tf.random_uniform_initializer(0, 7, seed=1, dtype=dtype)
- init2 = tf.random_uniform_initializer(0, 7, seed=2, dtype=dtype)
+ for dtype in [dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64]:
+ init1 = init_ops.random_uniform_initializer(0, 7, seed=1, dtype=dtype)
+ init2 = init_ops.random_uniform_initializer(0, 7, seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2))
def testDuplicatedInitializer(self):
- init = tf.random_uniform_initializer(0.0, 1.0)
+ init = init_ops.random_uniform_initializer(0.0, 1.0)
self.assertFalse(duplicated_initializer(self, init, 1))
-class UniformUnitScalingInitializationTest(tf.test.TestCase):
+class UniformUnitScalingInitializationTest(test.TestCase):
def testInitializerIdentical(self):
- for dtype in [tf.float32, tf.float64]:
- init1 = tf.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
- init2 = tf.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
+ for dtype in [dtypes.float32, dtypes.float64]:
+ init1 = init_ops.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
+ init2 = init_ops.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2))
- init3 = tf.uniform_unit_scaling_initializer(1.5, seed=1, dtype=dtype)
- init4 = tf.uniform_unit_scaling_initializer(1.5, seed=1, dtype=dtype)
+ init3 = init_ops.uniform_unit_scaling_initializer(
+ 1.5, seed=1, dtype=dtype)
+ init4 = init_ops.uniform_unit_scaling_initializer(
+ 1.5, seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init3, init4))
def testInitializerDifferent(self):
- for dtype in [tf.float32, tf.float64]:
- init1 = tf.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
- init2 = tf.uniform_unit_scaling_initializer(seed=2, dtype=dtype)
- init3 = tf.uniform_unit_scaling_initializer(1.5, seed=1, dtype=dtype)
+ for dtype in [dtypes.float32, dtypes.float64]:
+ init1 = init_ops.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
+ init2 = init_ops.uniform_unit_scaling_initializer(seed=2, dtype=dtype)
+ init3 = init_ops.uniform_unit_scaling_initializer(
+ 1.5, seed=1, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2))
self.assertFalse(identicaltest(self, init1, init3))
self.assertFalse(identicaltest(self, init2, init3))
@@ -289,33 +318,37 @@ class UniformUnitScalingInitializationTest(tf.test.TestCase):
def testZeroSize(self):
shape = [0, 2]
with self.test_session():
- x = tf.get_variable(
- "x", shape=shape, initializer=tf.uniform_unit_scaling_initializer())
+ x = variable_scope.get_variable(
+ "x",
+ shape=shape,
+ initializer=init_ops.uniform_unit_scaling_initializer())
self.assertAllEqual(shape, x.eval().shape)
def testDuplicatedInitializer(self):
- init = tf.uniform_unit_scaling_initializer()
+ init = init_ops.uniform_unit_scaling_initializer()
self.assertFalse(duplicated_initializer(self, init, 1))
def testInvalidDataType(self):
self.assertRaises(
- ValueError, tf.uniform_unit_scaling_initializer, dtype=tf.string)
+ ValueError,
+ init_ops.uniform_unit_scaling_initializer,
+ dtype=dtypes.string)
-class RandomWalkShapeTest(tf.test.TestCase):
+class RandomWalkShapeTest(test.TestCase):
def testRandomWalk(self):
# Fully known shape.
- rnd1 = init_ops._random_walk([1, 2], tf.nn.relu)
+ rnd1 = init_ops._random_walk([1, 2], nn_ops.relu)
self.assertEqual([1, 2], rnd1.get_shape())
# TODO(vrv): move to sequence_ops_test?
-class RangeTest(tf.test.TestCase):
+class RangeTest(test.TestCase):
def _Range(self, start, limit, delta):
with self.test_session(use_gpu=True):
- tf_ans = tf.range(start, limit, delta, name="range")
+ tf_ans = math_ops.range(start, limit, delta, name="range")
self.assertEqual([len(np.arange(start, limit, delta))],
tf_ans.get_shape())
return tf_ans.eval()
@@ -330,11 +363,11 @@ class RangeTest(tf.test.TestCase):
self.assertTrue(
np.array_equal(
self._Range(100, 500, 100), np.array([100, 200, 300, 400])))
- self.assertEqual(tf.range(0, 5, 1).dtype, tf.int32)
+ self.assertEqual(math_ops.range(0, 5, 1).dtype, dtypes.int32)
def testLimitOnly(self):
with self.test_session(use_gpu=True):
- self.assertAllEqual(np.arange(5), tf.range(5).eval())
+ self.assertAllEqual(np.arange(5), math_ops.range(5).eval())
def testEmpty(self):
for start in 0, 5:
@@ -349,53 +382,64 @@ class RangeTest(tf.test.TestCase):
self.assertTrue(
np.allclose(
self._Range(100., 500., 100.), np.array([100, 200, 300, 400])))
- self.assertEqual(tf.range(0., 5., 1.).dtype, tf.float32)
+ self.assertEqual(math_ops.range(0., 5., 1.).dtype, dtypes.float32)
def testNegativeDelta(self):
self.assertTrue(
np.array_equal(self._Range(5, -1, -1), np.array([5, 4, 3, 2, 1, 0])))
self.assertTrue(
- np.allclose(
- self._Range(2.5, 0, -0.5), np.array([2.5, 2, 1.5, 1, 0.5])))
+ np.allclose(self._Range(2.5, 0, -0.5), np.array([2.5, 2, 1.5, 1, 0.5])))
self.assertTrue(
np.array_equal(self._Range(-5, -10, -3), np.array([-5, -8])))
def testDType(self):
- zero_int32 = tf.cast(0, tf.int32)
- zero_int64 = tf.cast(0, tf.int64)
- zero_float32 = tf.cast(0, tf.float32)
- zero_float64 = tf.cast(0, tf.float64)
-
- self.assertEqual(tf.range(zero_int32, 0, 1).dtype, tf.int32)
- self.assertEqual(tf.range(zero_int64, 0, 1).dtype, tf.int64)
- self.assertEqual(tf.range(zero_float32, 0, 1).dtype, tf.float32)
- self.assertEqual(tf.range(zero_float64, 0, 1).dtype, tf.float64)
-
- self.assertEqual(tf.range(zero_int32, zero_int64, 1).dtype, tf.int64)
- self.assertEqual(tf.range(zero_int64, zero_float32, 1).dtype, tf.float32)
- self.assertEqual(tf.range(zero_float32, zero_float64, 1).dtype, tf.float64)
- self.assertEqual(tf.range(zero_float64, zero_int32, 1).dtype, tf.float64)
-
- self.assertEqual(tf.range(0, 0, 1, dtype=tf.int32).dtype, tf.int32)
- self.assertEqual(tf.range(0, 0, 1, dtype=tf.int64).dtype, tf.int64)
- self.assertEqual(tf.range(0, 0, 1, dtype=tf.float32).dtype, tf.float32)
- self.assertEqual(tf.range(0, 0, 1, dtype=tf.float64).dtype, tf.float64)
+ zero_int32 = math_ops.cast(0, dtypes.int32)
+ zero_int64 = math_ops.cast(0, dtypes.int64)
+ zero_float32 = math_ops.cast(0, dtypes.float32)
+ zero_float64 = math_ops.cast(0, dtypes.float64)
+
+ self.assertEqual(math_ops.range(zero_int32, 0, 1).dtype, dtypes.int32)
+ self.assertEqual(math_ops.range(zero_int64, 0, 1).dtype, dtypes.int64)
+ self.assertEqual(math_ops.range(zero_float32, 0, 1).dtype, dtypes.float32)
+ self.assertEqual(math_ops.range(zero_float64, 0, 1).dtype, dtypes.float64)
+
+ self.assertEqual(
+ math_ops.range(zero_int32, zero_int64, 1).dtype, dtypes.int64)
+ self.assertEqual(
+ math_ops.range(zero_int64, zero_float32, 1).dtype, dtypes.float32)
+ self.assertEqual(
+ math_ops.range(zero_float32, zero_float64, 1).dtype, dtypes.float64)
+ self.assertEqual(
+ math_ops.range(zero_float64, zero_int32, 1).dtype, dtypes.float64)
+
+ self.assertEqual(
+ math_ops.range(
+ 0, 0, 1, dtype=dtypes.int32).dtype, dtypes.int32)
+ self.assertEqual(
+ math_ops.range(
+ 0, 0, 1, dtype=dtypes.int64).dtype, dtypes.int64)
+ self.assertEqual(
+ math_ops.range(
+ 0, 0, 1, dtype=dtypes.float32).dtype, dtypes.float32)
+ self.assertEqual(
+ math_ops.range(
+ 0, 0, 1, dtype=dtypes.float64).dtype, dtypes.float64)
# TODO(vrv): move to sequence_ops_test?
-class LinSpaceTest(tf.test.TestCase):
+class LinSpaceTest(test.TestCase):
def _gpu_modes(self):
- if tf.test.is_gpu_available():
+ if test.is_gpu_available():
return [False, True]
else:
return [False]
def _LinSpace(self, start, stop, num):
# NOTE(touts): Needs to pass a graph to get a new session each time.
- with tf.Graph().as_default() as graph:
+ with ops.Graph().as_default() as graph:
with self.test_session(graph=graph, force_gpu=self.force_gpu):
- tf_ans = tf.linspace(start, stop, num, name="linspace")
+ tf_ans = math_ops.linspace(start, stop, num, name="linspace")
self.assertEqual([num], tf_ans.get_shape())
return tf_ans.eval()
@@ -406,8 +450,8 @@ class LinSpaceTest(tf.test.TestCase):
self.assertArrayNear(
self._LinSpace(1., 5., 3), np.array([1., 3., 5.]), 1e-5)
self.assertArrayNear(
- self._LinSpace(1., 5., 4),
- np.array([1., 7. / 3., 11. / 3., 5.]), 1e-5)
+ self._LinSpace(1., 5., 4), np.array([1., 7. / 3., 11. / 3., 5.]),
+ 1e-5)
def testNegative(self):
for self.force_gpu in self._gpu_modes():
@@ -438,65 +482,66 @@ class LinSpaceTest(tf.test.TestCase):
self.assertArrayNear(self._LinSpace(5., 5., 4), np.array([5.] * 4), 1e-5)
-class DeviceTest(tf.test.TestCase):
+class DeviceTest(test.TestCase):
def testNoDevice(self):
- with tf.Graph().as_default():
- var = tf.Variable([[1.0, 1.0]])
+ with ops.Graph().as_default():
+ var = variables.Variable([[1.0, 1.0]])
self.assertDeviceEqual(None, var.device)
self.assertDeviceEqual(None, var.initializer.device)
def testDevice(self):
- with tf.Graph().as_default():
- with tf.device("/job:ps"):
- var = tf.Variable([[1.0, 1.0]])
+ with ops.Graph().as_default():
+ with ops.device("/job:ps"):
+ var = variables.Variable([[1.0, 1.0]])
self.assertDeviceEqual("/job:ps", var.device)
self.assertDeviceEqual("/job:ps", var.initializer.device)
-class OrthogonalInitializerTest(tf.test.TestCase):
+class OrthogonalInitializerTest(test.TestCase):
def testInitializerIdentical(self):
- for dtype in [tf.float32, tf.float64]:
- init1 = tf.orthogonal_initializer(seed=1, dtype=dtype)
- init2 = tf.orthogonal_initializer(seed=1, dtype=dtype)
+ for dtype in [dtypes.float32, dtypes.float64]:
+ init1 = init_ops.orthogonal_initializer(seed=1, dtype=dtype)
+ init2 = init_ops.orthogonal_initializer(seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2, (10, 10)))
def testInitializerDifferent(self):
- for dtype in [tf.float32, tf.float64]:
- init1 = tf.orthogonal_initializer(seed=1, dtype=dtype)
- init2 = tf.orthogonal_initializer(seed=2, dtype=dtype)
+ for dtype in [dtypes.float32, dtypes.float64]:
+ init1 = init_ops.orthogonal_initializer(seed=1, dtype=dtype)
+ init2 = init_ops.orthogonal_initializer(seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2, (10, 10)))
def testDuplicatedInitializer(self):
- init = tf.orthogonal_initializer()
+ init = init_ops.orthogonal_initializer()
self.assertFalse(duplicated_initializer(self, init, 1, (10, 10)))
def testInvalidDataType(self):
- self.assertRaises(ValueError, tf.orthogonal_initializer, dtype=tf.string)
+ self.assertRaises(
+ ValueError, init_ops.orthogonal_initializer, dtype=dtypes.string)
def testInvalidShape(self):
- init1 = tf.orthogonal_initializer()
- with self.test_session(graph=tf.Graph(), use_gpu=True):
+ init1 = init_ops.orthogonal_initializer()
+ with self.test_session(graph=ops.Graph(), use_gpu=True):
self.assertRaises(ValueError, init1, shape=[5])
def testGain(self):
shape = (10, 10)
- for dtype in [tf.float32, tf.float64]:
- init1 = tf.orthogonal_initializer(seed=1, dtype=dtype)
- init2 = tf.orthogonal_initializer(gain=3.14, seed=1, dtype=dtype)
- with self.test_session(graph=tf.Graph(), use_gpu=True):
+ for dtype in [dtypes.float32, dtypes.float64]:
+ init1 = init_ops.orthogonal_initializer(seed=1, dtype=dtype)
+ init2 = init_ops.orthogonal_initializer(gain=3.14, seed=1, dtype=dtype)
+ with self.test_session(graph=ops.Graph(), use_gpu=True):
t1 = init1(shape).eval()
- with self.test_session(graph=tf.Graph(), use_gpu=True):
+ with self.test_session(graph=ops.Graph(), use_gpu=True):
t2 = init2(shape).eval()
return np.allclose(t1, t2 / 3.14, rtol=1e-15, atol=1e-15)
def testShapesValues(self):
- for dtype in [tf.float32, tf.float64]:
+ for dtype in [dtypes.float32, dtypes.float64]:
for shape in [(10, 10), (10, 9, 8), (100, 5, 5), (50, 40), (40, 50)]:
- init = tf.orthogonal_initializer(dtype=dtype)
- tol = 1e-5 if dtype == tf.float32 else 1e-12
- with self.test_session(graph=tf.Graph(), use_gpu=True):
+ init = init_ops.orthogonal_initializer(dtype=dtype)
+ tol = 1e-5 if dtype == dtypes.float32 else 1e-12
+ with self.test_session(graph=ops.Graph(), use_gpu=True):
# Check the shape
t = init(shape).eval()
self.assertAllEqual(shape, t.shape)
@@ -511,4 +556,4 @@ class OrthogonalInitializerTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/io_ops_test.py b/tensorflow/python/kernel_tests/io_ops_test.py
index b0c46ea07d..0e5ca21c48 100644
--- a/tensorflow/python/kernel_tests/io_ops_test.py
+++ b/tensorflow/python/kernel_tests/io_ops_test.py
@@ -13,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.python.ops.io_ops."""
from __future__ import absolute_import
@@ -23,21 +22,22 @@ from __future__ import print_function
import os
import tempfile
-import tensorflow as tf
+from tensorflow.python.ops import io_ops
+from tensorflow.python.platform import test
+from tensorflow.python.util import compat
-class IoOpsTest(tf.test.TestCase):
+class IoOpsTest(test.TestCase):
def testReadFile(self):
cases = ['', 'Some contents', 'Неки садржаји на српском']
for contents in cases:
- contents = tf.compat.as_bytes(contents)
- with tempfile.NamedTemporaryFile(prefix='ReadFileTest',
- dir=self.get_temp_dir(),
- delete=False) as temp:
+ contents = compat.as_bytes(contents)
+ with tempfile.NamedTemporaryFile(
+ prefix='ReadFileTest', dir=self.get_temp_dir(), delete=False) as temp:
temp.write(contents)
with self.test_session():
- read = tf.read_file(temp.name)
+ read = io_ops.read_file(temp.name)
self.assertEqual([], read.get_shape())
self.assertEqual(read.eval(), contents)
os.remove(temp.name)
@@ -45,13 +45,13 @@ class IoOpsTest(tf.test.TestCase):
def testWriteFile(self):
cases = ['', 'Some contents']
for contents in cases:
- contents = tf.compat.as_bytes(contents)
- with tempfile.NamedTemporaryFile(prefix='WriteFileTest',
- dir=self.get_temp_dir(),
- delete=False) as temp:
+ contents = compat.as_bytes(contents)
+ with tempfile.NamedTemporaryFile(
+ prefix='WriteFileTest', dir=self.get_temp_dir(),
+ delete=False) as temp:
pass
with self.test_session() as sess:
- w = tf.write_file(temp.name, contents)
+ w = io_ops.write_file(temp.name, contents)
sess.run(w)
with open(temp.name, 'rb') as f:
file_contents = f.read()
@@ -59,42 +59,52 @@ class IoOpsTest(tf.test.TestCase):
os.remove(temp.name)
def _subset(self, files, indices):
- return set(tf.compat.as_bytes(files[i].name)
- for i in range(len(files)) if i in indices)
+ return set(
+ compat.as_bytes(files[i].name) for i in range(len(files))
+ if i in indices)
def testMatchingFiles(self):
- cases = ['ABcDEF.GH', 'ABzDEF.GH', 'ABasdfjklDEF.GH', 'AB3DEF.GH',
- 'AB4DEF.GH', 'ABDEF.GH', 'XYZ']
- files = [tempfile.NamedTemporaryFile(
- prefix=c, dir=self.get_temp_dir(), delete=True) for c in cases]
+ cases = [
+ 'ABcDEF.GH', 'ABzDEF.GH', 'ABasdfjklDEF.GH', 'AB3DEF.GH', 'AB4DEF.GH',
+ 'ABDEF.GH', 'XYZ'
+ ]
+ files = [
+ tempfile.NamedTemporaryFile(
+ prefix=c, dir=self.get_temp_dir(), delete=True) for c in cases
+ ]
with self.test_session():
# Test exact match without wildcards.
for f in files:
- self.assertEqual(tf.matching_files(f.name).eval(),
- tf.compat.as_bytes(f.name))
+ self.assertEqual(
+ io_ops.matching_files(f.name).eval(), compat.as_bytes(f.name))
# We will look for files matching "ABxDEF.GH*" where "x" is some wildcard.
pos = files[0].name.find(cases[0])
pattern = files[0].name[:pos] + 'AB%sDEF.GH*'
- self.assertEqual(set(tf.matching_files(pattern % 'z').eval()),
- self._subset(files, [1]))
- self.assertEqual(set(tf.matching_files(pattern % '?').eval()),
- self._subset(files, [0, 1, 3, 4]))
- self.assertEqual(set(tf.matching_files(pattern % '*').eval()),
- self._subset(files, [0, 1, 2, 3, 4, 5]))
+ self.assertEqual(
+ set(io_ops.matching_files(pattern % 'z').eval()),
+ self._subset(files, [1]))
+ self.assertEqual(
+ set(io_ops.matching_files(pattern % '?').eval()),
+ self._subset(files, [0, 1, 3, 4]))
+ self.assertEqual(
+ set(io_ops.matching_files(pattern % '*').eval()),
+ self._subset(files, [0, 1, 2, 3, 4, 5]))
# NOTE(mrry): Windows uses PathMatchSpec to match file patterns, which
# does not support the following expressions.
if os.name != 'nt':
- self.assertEqual(set(tf.matching_files(pattern % '[cxz]').eval()),
- self._subset(files, [0, 1]))
- self.assertEqual(set(tf.matching_files(pattern % '[0-9]').eval()),
- self._subset(files, [3, 4]))
+ self.assertEqual(
+ set(io_ops.matching_files(pattern % '[cxz]').eval()),
+ self._subset(files, [0, 1]))
+ self.assertEqual(
+ set(io_ops.matching_files(pattern % '[0-9]').eval()),
+ self._subset(files, [3, 4]))
for f in files:
f.close()
if __name__ == '__main__':
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/large_concat_op_test.py b/tensorflow/python/kernel_tests/large_concat_op_test.py
index 856c99a4e6..6619a2d94b 100644
--- a/tensorflow/python/kernel_tests/large_concat_op_test.py
+++ b/tensorflow/python/kernel_tests/large_concat_op_test.py
@@ -17,18 +17,21 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import tensorflow as tf
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
+from tensorflow.python.platform import test
-class LargeConcatOpTest(tf.test.TestCase):
+class LargeConcatOpTest(test.TestCase):
"""Tests that belong in concat_op_test.py, but run over large tensors."""
def testConcatLargeTensors(self):
# CPU-only test, because it fails on GPUs with <= 4GB memory.
- with tf.device("/cpu:0"):
- a = tf.ones([2**31 + 6], dtype=tf.int8)
- b = tf.zeros([1024], dtype=tf.int8)
- onezeros = tf.concat_v2([a, b], 0)
+ with ops.device("/cpu:0"):
+ a = array_ops.ones([2**31 + 6], dtype=dtypes.int8)
+ b = array_ops.zeros([1024], dtype=dtypes.int8)
+ onezeros = array_ops.concat_v2([a, b], 0)
with self.test_session(use_gpu=False):
# TODO(dga): Add more depth to this test to validate correctness,
# not just non-crashingness, once other large tensor fixes have gone in.
@@ -36,4 +39,4 @@ class LargeConcatOpTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/linalg_grad_test.py b/tensorflow/python/kernel_tests/linalg_grad_test.py
index 314d28c8f3..9dd382a1fb 100644
--- a/tensorflow/python/kernel_tests/linalg_grad_test.py
+++ b/tensorflow/python/kernel_tests/linalg_grad_test.py
@@ -13,12 +13,20 @@
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.linalg_grad."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import gradients_impl
+from tensorflow.python.ops import linalg_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.platform import test as test_lib
def _AddTest(test, op_name, testcase_name, fn):
@@ -28,22 +36,23 @@ def _AddTest(test, op_name, testcase_name, fn):
setattr(test, test_name, fn)
-class ShapeTest(tf.test.TestCase):
+class ShapeTest(test_lib.TestCase):
def testBatchGradientUnknownSize(self):
with self.test_session():
- batch_size = tf.constant(3)
- matrix_size = tf.constant(4)
- batch_identity = tf.tile(
- tf.expand_dims(tf.diag(tf.ones([matrix_size])), 0),
+ batch_size = constant_op.constant(3)
+ matrix_size = constant_op.constant(4)
+ batch_identity = array_ops.tile(
+ array_ops.expand_dims(
+ array_ops.diag(array_ops.ones([matrix_size])), 0),
[batch_size, 1, 1])
- determinants = tf.matrix_determinant(batch_identity)
- reduced = tf.reduce_sum(determinants)
- sum_grad = tf.gradients(reduced, batch_identity)[0]
+ determinants = linalg_ops.matrix_determinant(batch_identity)
+ reduced = math_ops.reduce_sum(determinants)
+ sum_grad = gradients_impl.gradients(reduced, batch_identity)[0]
self.assertAllClose(batch_identity.eval(), sum_grad.eval())
-class MatrixUnaryFunctorGradientTest(tf.test.TestCase):
+class MatrixUnaryFunctorGradientTest(test_lib.TestCase):
pass # Filled in below
@@ -55,7 +64,7 @@ def _GetMatrixUnaryFunctorGradientTest(functor_, dtype_, shape_, **kwargs_):
a_np = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
- a = tf.constant(a_np)
+ a = constant_op.constant(a_np)
b = functor_(a, **kwargs_)
# Optimal stepsize for central difference is O(epsilon^{1/3}).
@@ -65,7 +74,7 @@ def _GetMatrixUnaryFunctorGradientTest(functor_, dtype_, shape_, **kwargs_):
# np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
tol = 1e-6 if dtype_ == np.float64 else 0.05
- theoretical, numerical = tf.test.compute_gradient(
+ theoretical, numerical = gradient_checker.compute_gradient(
a,
a.get_shape().as_list(),
b,
@@ -77,7 +86,7 @@ def _GetMatrixUnaryFunctorGradientTest(functor_, dtype_, shape_, **kwargs_):
return Test
-class MatrixBinaryFunctorGradientTest(tf.test.TestCase):
+class MatrixBinaryFunctorGradientTest(test_lib.TestCase):
pass # Filled in below
@@ -93,12 +102,12 @@ def _GetMatrixBinaryFunctorGradientTest(functor_,
a_np = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
- a = tf.constant(a_np)
+ a = constant_op.constant(a_np)
b_np = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
- b = tf.constant(b_np)
+ b = constant_op.constant(b_np)
c = functor_(a, b, **kwargs_)
# Optimal stepsize for central difference is O(epsilon^{1/3}).
@@ -110,7 +119,7 @@ def _GetMatrixBinaryFunctorGradientTest(functor_,
# The gradients for a and b may be of very different magnitudes,
# so to not get spurious failures we test them separately.
for factor, factor_init in [a, a_np], [b, b_np]:
- theoretical, numerical = tf.test.compute_gradient(
+ theoretical, numerical = gradient_checker.compute_gradient(
factor,
factor.get_shape().as_list(),
c,
@@ -138,7 +147,7 @@ if __name__ == '__main__':
'MatrixSolveGradient',
name,
_GetMatrixBinaryFunctorGradientTest(
- tf.matrix_solve, dtype, shape, adjoint=adjoint))
+ linalg_ops.matrix_solve, dtype, shape, adjoint=adjoint))
for lower in True, False:
name = '%s_low_%s' % (name, lower)
@@ -147,7 +156,7 @@ if __name__ == '__main__':
'MatrixTriangularSolveGradient',
name,
_GetMatrixBinaryFunctorGradientTest(
- tf.matrix_triangular_solve,
+ linalg_ops.matrix_triangular_solve,
dtype,
shape,
float32_tol_fudge=4.0,
@@ -163,12 +172,12 @@ if __name__ == '__main__':
shape = extra + (size, size)
name = '%s_%s' % (dtype.__name__, '_'.join(map(str, shape)))
_AddTest(MatrixUnaryFunctorGradientTest, 'MatrixInverseGradient', name,
- _GetMatrixUnaryFunctorGradientTest(tf.matrix_inverse, dtype,
- shape))
- _AddTest(MatrixUnaryFunctorGradientTest, 'MatrixDeterminantGradient',
- name,
- _GetMatrixUnaryFunctorGradientTest(tf.matrix_determinant,
+ _GetMatrixUnaryFunctorGradientTest(linalg_ops.matrix_inverse,
dtype, shape))
+ _AddTest(
+ MatrixUnaryFunctorGradientTest, 'MatrixDeterminantGradient', name,
+ _GetMatrixUnaryFunctorGradientTest(linalg_ops.matrix_determinant,
+ dtype, shape))
# Tests for gradients of matrix_solve_ls
for dtype in np.float32, np.float64:
@@ -183,9 +192,9 @@ if __name__ == '__main__':
'MatrixSolveLsGradient',
name,
_GetMatrixBinaryFunctorGradientTest(
- lambda a, b, l=l2_regularization: tf.matrix_solve_ls(a, b, l),
+ lambda a, b, l=l2_regularization: linalg_ops.matrix_solve_ls(a, b, l),
dtype,
shape,
float32_tol_fudge=4.0))
- tf.test.main()
+ test_lib.main()
diff --git a/tensorflow/python/kernel_tests/linalg_ops_test.py b/tensorflow/python/kernel_tests/linalg_ops_test.py
index 4a41ebc315..ff299e6511 100644
--- a/tensorflow/python/kernel_tests/linalg_ops_test.py
+++ b/tensorflow/python/kernel_tests/linalg_ops_test.py
@@ -13,12 +13,18 @@
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.ops.special_math_ops."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import linalg_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.platform import test
def _random_pd_matrix(n, rng):
@@ -27,7 +33,7 @@ def _random_pd_matrix(n, rng):
return temp.dot(temp.T)
-class CholeskySolveTest(tf.test.TestCase):
+class CholeskySolveTest(test.TestCase):
_use_gpu = False
def setUp(self):
@@ -39,27 +45,28 @@ class CholeskySolveTest(tf.test.TestCase):
for np_type, atol in [(np.float32, 0.05), (np.float64, 1e-5)]:
# Create 2 x n x n matrix
array = np.array(
- [_random_pd_matrix(n, self.rng), _random_pd_matrix(n, self.rng)]
- ).astype(np_type)
- chol = tf.cholesky(array)
+ [_random_pd_matrix(n, self.rng), _random_pd_matrix(n, self.rng)
+ ]).astype(np_type)
+ chol = linalg_ops.cholesky(array)
for k in range(1, 3):
rhs = self.rng.randn(2, n, k).astype(np_type)
- x = tf.cholesky_solve(chol, rhs)
- self.assertAllClose(rhs, tf.matmul(array, x).eval(), atol=atol)
+ x = linalg_ops.cholesky_solve(chol, rhs)
+ self.assertAllClose(
+ rhs, math_ops.matmul(array, x).eval(), atol=atol)
class CholeskySolveGpuTest(CholeskySolveTest):
_use_gpu = True
-class EyeTest(tf.test.TestCase):
+class EyeTest(test.TestCase):
def test_non_batch_2x2(self):
num_rows = 2
dtype = np.float32
np_eye = np.eye(num_rows).astype(dtype)
with self.test_session():
- eye = tf.eye(num_rows, dtype=dtype)
+ eye = linalg_ops.eye(num_rows, dtype=dtype)
self.assertAllEqual((num_rows, num_rows), eye.get_shape())
self.assertAllEqual(np_eye, eye.eval())
@@ -69,7 +76,7 @@ class EyeTest(tf.test.TestCase):
dtype = np.float32
np_eye = np.eye(num_rows, num_columns).astype(dtype)
with self.test_session():
- eye = tf.eye(num_rows, num_columns=num_columns, dtype=dtype)
+ eye = linalg_ops.eye(num_rows, num_columns=num_columns, dtype=dtype)
self.assertAllEqual((num_rows, num_columns), eye.get_shape())
self.assertAllEqual(np_eye, eye.eval())
@@ -79,7 +86,7 @@ class EyeTest(tf.test.TestCase):
dtype = np.float32
np_eye = np.eye(num_rows).astype(dtype)
with self.test_session():
- eye = tf.eye(num_rows, batch_shape=batch_shape, dtype=dtype)
+ eye = linalg_ops.eye(num_rows, batch_shape=batch_shape, dtype=dtype)
self.assertAllEqual(batch_shape + [num_rows, num_rows], eye.get_shape())
eye_v = eye.eval()
for i in range(batch_shape[0]):
@@ -92,16 +99,12 @@ class EyeTest(tf.test.TestCase):
dtype = np.float32
np_eye = np.eye(num_rows).astype(dtype)
with self.test_session():
- num_rows_ph = tf.placeholder(tf.int32)
- batch_shape_ph = tf.placeholder(tf.int32)
- eye = tf.eye(
- num_rows_ph,
- batch_shape=batch_shape_ph,
- dtype=dtype)
+ num_rows_ph = array_ops.placeholder(dtypes.int32)
+ batch_shape_ph = array_ops.placeholder(dtypes.int32)
+ eye = linalg_ops.eye(num_rows_ph, batch_shape=batch_shape_ph, dtype=dtype)
eye_v = eye.eval(
- feed_dict={
- num_rows_ph: num_rows,
- batch_shape_ph: batch_shape})
+ feed_dict={num_rows_ph: num_rows,
+ batch_shape_ph: batch_shape})
for i in range(batch_shape[0]):
for j in range(batch_shape[1]):
self.assertAllEqual(np_eye, eye_v[i, j, :, :])
@@ -113,14 +116,12 @@ class EyeTest(tf.test.TestCase):
dtype = np.float32
np_eye = np.eye(num_rows, num_columns).astype(dtype)
with self.test_session():
- eye = tf.eye(
- num_rows,
- num_columns=num_columns,
- batch_shape=batch_shape,
- dtype=dtype)
- self.assertAllEqual(
- batch_shape + [num_rows, num_columns],
- eye.get_shape())
+ eye = linalg_ops.eye(num_rows,
+ num_columns=num_columns,
+ batch_shape=batch_shape,
+ dtype=dtype)
+ self.assertAllEqual(batch_shape + [num_rows, num_columns],
+ eye.get_shape())
eye_v = eye.eval()
for i in range(batch_shape[0]):
for j in range(batch_shape[1]):
@@ -133,19 +134,18 @@ class EyeTest(tf.test.TestCase):
dtype = np.float32
np_eye = np.eye(num_rows, num_columns).astype(dtype)
with self.test_session():
- num_rows_ph = tf.placeholder(tf.int32)
- num_columns_ph = tf.placeholder(tf.int32)
- batch_shape_ph = tf.placeholder(tf.int32)
- eye = tf.eye(
- num_rows_ph,
- num_columns=num_columns_ph,
- batch_shape=batch_shape_ph,
- dtype=dtype)
- eye_v = eye.eval(
- feed_dict={
- num_rows_ph: num_rows,
- num_columns_ph: num_columns,
- batch_shape_ph: batch_shape})
+ num_rows_ph = array_ops.placeholder(dtypes.int32)
+ num_columns_ph = array_ops.placeholder(dtypes.int32)
+ batch_shape_ph = array_ops.placeholder(dtypes.int32)
+ eye = linalg_ops.eye(num_rows_ph,
+ num_columns=num_columns_ph,
+ batch_shape=batch_shape_ph,
+ dtype=dtype)
+ eye_v = eye.eval(feed_dict={
+ num_rows_ph: num_rows,
+ num_columns_ph: num_columns,
+ batch_shape_ph: batch_shape
+ })
for i in range(batch_shape[0]):
for j in range(batch_shape[1]):
self.assertAllEqual(np_eye, eye_v[i, j, :, :])
@@ -155,7 +155,7 @@ class EyeTest(tf.test.TestCase):
dtype = np.int64
np_eye = np.eye(num_rows).astype(dtype)
with self.test_session():
- eye = tf.eye(num_rows, dtype=dtype)
+ eye = linalg_ops.eye(num_rows, dtype=dtype)
self.assertAllEqual((num_rows, num_rows), eye.get_shape())
self.assertAllEqual(np_eye, eye.eval())
@@ -165,7 +165,7 @@ class EyeTest(tf.test.TestCase):
dtype = np.int64
np_eye = np.eye(num_rows, num_columns).astype(dtype)
with self.test_session():
- eye = tf.eye(num_rows, num_columns=num_columns, dtype=dtype)
+ eye = linalg_ops.eye(num_rows, num_columns=num_columns, dtype=dtype)
self.assertAllEqual((num_rows, num_columns), eye.get_shape())
self.assertAllEqual(np_eye, eye.eval())
@@ -175,7 +175,7 @@ class EyeTest(tf.test.TestCase):
dtype = np.int64
np_eye = np.eye(num_rows, num_columns).astype(dtype)
with self.test_session():
- eye = tf.eye(num_rows, num_columns=num_columns, dtype=dtype)
+ eye = linalg_ops.eye(num_rows, num_columns=num_columns, dtype=dtype)
self.assertAllEqual((num_rows, num_columns), eye.get_shape())
self.assertAllEqual(np_eye, eye.eval())
@@ -185,7 +185,7 @@ class EyeTest(tf.test.TestCase):
dtype = np.float32
np_eye = np.eye(num_rows).astype(dtype)
with self.test_session():
- eye = tf.eye(num_rows, batch_shape=batch_shape, dtype=dtype)
+ eye = linalg_ops.eye(num_rows, batch_shape=batch_shape, dtype=dtype)
self.assertAllEqual((1, 3, 0, 0), eye.get_shape())
eye_v = eye.eval()
for i in range(batch_shape[0]):
@@ -199,14 +199,12 @@ class EyeTest(tf.test.TestCase):
dtype = np.float32
np_eye = np.eye(num_rows, num_columns).astype(dtype)
with self.test_session():
- eye = tf.eye(
- num_rows,
- num_columns=num_columns,
- batch_shape=batch_shape,
- dtype=dtype)
- self.assertAllEqual(
- batch_shape + [num_rows, num_columns],
- eye.get_shape())
+ eye = linalg_ops.eye(num_rows,
+ num_columns=num_columns,
+ batch_shape=batch_shape,
+ dtype=dtype)
+ self.assertAllEqual(batch_shape + [num_rows, num_columns],
+ eye.get_shape())
eye_v = eye.eval()
for i in range(batch_shape[0]):
for j in range(batch_shape[1]):
@@ -219,14 +217,12 @@ class EyeTest(tf.test.TestCase):
dtype = np.float32
np_eye = np.eye(num_rows, num_columns).astype(dtype)
with self.test_session():
- eye = tf.eye(
- num_rows,
- num_columns=num_columns,
- batch_shape=batch_shape,
- dtype=dtype)
- self.assertAllEqual(
- batch_shape + [num_rows, num_columns],
- eye.get_shape())
+ eye = linalg_ops.eye(num_rows,
+ num_columns=num_columns,
+ batch_shape=batch_shape,
+ dtype=dtype)
+ self.assertAllEqual(batch_shape + [num_rows, num_columns],
+ eye.get_shape())
eye_v = eye.eval()
for i in range(batch_shape[0]):
for j in range(batch_shape[1]):
@@ -234,4 +230,4 @@ class EyeTest(tf.test.TestCase):
if __name__ == '__main__':
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/listdiff_op_test.py b/tensorflow/python/kernel_tests/listdiff_op_test.py
index 4ce0163da9..4f053d2a21 100644
--- a/tensorflow/python/kernel_tests/listdiff_op_test.py
+++ b/tensorflow/python/kernel_tests/listdiff_op_test.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.kernels.listdiff_op."""
from __future__ import absolute_import
@@ -21,23 +20,30 @@ from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
-import tensorflow as tf
-_TYPES = [tf.int32, tf.int64, tf.float32, tf.float64, tf.string]
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
+from tensorflow.python.platform import test
+from tensorflow.python.util import compat
+
+_TYPES = [
+ dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64, dtypes.string
+]
-class ListDiffTest(tf.test.TestCase):
+class ListDiffTest(test.TestCase):
def _testListDiff(self, x, y, out, idx):
for dtype in _TYPES:
- if dtype == tf.string:
- x = [tf.compat.as_bytes(str(a)) for a in x]
- y = [tf.compat.as_bytes(str(a)) for a in y]
- out = [tf.compat.as_bytes(str(a)) for a in out]
- for diff_func in [tf.setdiff1d]:
+ if dtype == dtypes.string:
+ x = [compat.as_bytes(str(a)) for a in x]
+ y = [compat.as_bytes(str(a)) for a in y]
+ out = [compat.as_bytes(str(a)) for a in out]
+ for diff_func in [array_ops.setdiff1d]:
with self.test_session() as sess:
- x_tensor = tf.convert_to_tensor(x, dtype=dtype)
- y_tensor = tf.convert_to_tensor(y, dtype=dtype)
+ x_tensor = ops.convert_to_tensor(x, dtype=dtype)
+ y_tensor = ops.convert_to_tensor(y, dtype=dtype)
out_tensor, idx_tensor = diff_func(x_tensor, y_tensor)
tf_out, tf_idx = sess.run([out_tensor, idx_tensor])
self.assertAllEqual(tf_out, out)
@@ -126,5 +132,6 @@ class ListDiffTest(tf.test.TestCase):
idx = []
self._testListDiff(x, y, out, idx)
+
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/logging_ops_test.py b/tensorflow/python/kernel_tests/logging_ops_test.py
index c343b23ba8..7fe65c57cc 100644
--- a/tensorflow/python/kernel_tests/logging_ops_test.py
+++ b/tensorflow/python/kernel_tests/logging_ops_test.py
@@ -12,61 +12,69 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.kernels.logging_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import control_flow_ops
+from tensorflow.python.ops import gradients_impl
+from tensorflow.python.ops import logging_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.platform import test
-class LoggingOpsTest(tf.test.TestCase):
+class LoggingOpsTest(test.TestCase):
def testAssertDivideByZero(self):
with self.test_session() as sess:
- epsilon = tf.convert_to_tensor(1e-20)
- x = tf.convert_to_tensor(0.0)
- y = tf.convert_to_tensor(1.0)
- z = tf.convert_to_tensor(2.0)
+ epsilon = ops.convert_to_tensor(1e-20)
+ x = ops.convert_to_tensor(0.0)
+ y = ops.convert_to_tensor(1.0)
+ z = ops.convert_to_tensor(2.0)
# assert(epsilon < y)
# z / y
- with sess.graph.control_dependencies(
- [tf.Assert(tf.less(epsilon, y), ["Divide-by-zero"])]):
- out = tf.div(z, y)
+ with sess.graph.control_dependencies([
+ control_flow_ops.Assert(
+ math_ops.less(epsilon, y), ["Divide-by-zero"])
+ ]):
+ out = math_ops.div(z, y)
self.assertAllEqual(2.0, out.eval())
# assert(epsilon < x)
# z / x
#
# This tests printing out multiple tensors
- with sess.graph.control_dependencies(
- [tf.Assert(tf.less(epsilon, x),
- ["Divide-by-zero", "less than x"])]):
- out = tf.div(z, x)
+ with sess.graph.control_dependencies([
+ control_flow_ops.Assert(
+ math_ops.less(epsilon, x), ["Divide-by-zero", "less than x"])
+ ]):
+ out = math_ops.div(z, x)
with self.assertRaisesOpError("less than x"):
out.eval()
-class PrintGradientTest(tf.test.TestCase):
+class PrintGradientTest(test.TestCase):
def testPrintShape(self):
- inp = tf.constant(2.0, shape=[100, 32])
- inp_printed = tf.Print(inp, [inp])
+ inp = constant_op.constant(2.0, shape=[100, 32])
+ inp_printed = logging_ops.Print(inp, [inp])
self.assertEqual(inp.get_shape(), inp_printed.get_shape())
def testPrintGradient(self):
with self.test_session():
- inp = tf.constant(2.0, shape=[100, 32], name="in")
- w = tf.constant(4.0, shape=[10, 100], name="w")
- wx = tf.matmul(w, inp, name="wx")
- wx_print = tf.Print(wx, [w, w, w])
- wx_grad = tf.gradients(wx, w)[0]
- wx_print_grad = tf.gradients(wx_print, w)[0]
+ inp = constant_op.constant(2.0, shape=[100, 32], name="in")
+ w = constant_op.constant(4.0, shape=[10, 100], name="w")
+ wx = math_ops.matmul(w, inp, name="wx")
+ wx_print = logging_ops.Print(wx, [w, w, w])
+ wx_grad = gradients_impl.gradients(wx, w)[0]
+ wx_print_grad = gradients_impl.gradients(wx_print, w)[0]
wxg = wx_grad.eval()
wxpg = wx_print_grad.eval()
self.assertAllEqual(wxg, wxpg)
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/losses_test.py b/tensorflow/python/kernel_tests/losses_test.py
index 8d718b8d24..3eff721e4d 100644
--- a/tensorflow/python/kernel_tests/losses_test.py
+++ b/tensorflow/python/kernel_tests/losses_test.py
@@ -12,206 +12,182 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for losses."""
-# pylint: disable=unused-import,g-bad-import-order
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-# pylint: enable=unused-import
import numpy as np
-import tensorflow as tf
-
-class AbsoluteDifferenceLossTest(tf.test.TestCase):
+from tensorflow.contrib.losses.python.losses import loss_ops
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import errors_impl
+from tensorflow.python.framework import ops
+from tensorflow.python.framework import random_seed
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import init_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import variable_scope
+from tensorflow.python.ops import variables
+from tensorflow.python.ops.losses import losses
+from tensorflow.python.ops.losses import util
+from tensorflow.python.platform import test
+from tensorflow.python.training import momentum as momentum_lib
+
+
+class AbsoluteDifferenceLossTest(test.TestCase):
def setUp(self):
- self._predictions = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
- self._labels = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
+ self._predictions = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
+ self._labels = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
- tf.losses.absolute_difference(
+ losses.absolute_difference(
self._predictions, self._predictions, weights=None)
def testAllCorrectNoLossWeight(self):
- loss = tf.losses.absolute_difference(
- self._predictions, self._predictions)
+ loss = losses.absolute_difference(self._predictions, self._predictions)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testNonZeroLoss(self):
- loss = tf.losses.absolute_difference(
- self._labels, self._predictions)
+ loss = losses.absolute_difference(self._labels, self._predictions)
with self.test_session():
self.assertAlmostEqual(5.5, loss.eval(), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3
- loss = tf.losses.absolute_difference(
- self._labels, self._predictions, weights)
+ loss = losses.absolute_difference(self._labels, self._predictions, weights)
with self.test_session():
self.assertAlmostEqual(5.5 * weights, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
- loss = tf.losses.absolute_difference(
- self._labels, self._predictions, tf.constant(weights))
+ loss = losses.absolute_difference(self._labels, self._predictions,
+ constant_op.constant(weights))
with self.test_session():
self.assertAlmostEqual(5.5 * weights, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
- weights = tf.constant([1.2, 0.0], shape=[2,])
- loss = tf.losses.absolute_difference(
- self._labels, self._predictions, weights)
+ weights = constant_op.constant([1.2, 0.0], shape=[2,])
+ loss = losses.absolute_difference(self._labels, self._predictions, weights)
with self.test_session():
self.assertAlmostEqual(5.6, loss.eval(), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeights(self):
- weights = tf.constant([1.2, 0.0], shape=[2, 1])
- loss = tf.losses.absolute_difference(
- self._labels, self._predictions, weights)
+ weights = constant_op.constant([1.2, 0.0], shape=[2, 1])
+ loss = losses.absolute_difference(self._labels, self._predictions, weights)
with self.test_session():
self.assertAlmostEqual(5.6, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeights(self):
- weights = tf.constant([3, 6, 5, 0, 4, 2], shape=[2, 3])
- loss = tf.losses.absolute_difference(
- self._labels, self._predictions, weights)
+ weights = constant_op.constant([3, 6, 5, 0, 4, 2], shape=[2, 3])
+ loss = losses.absolute_difference(self._labels, self._predictions, weights)
with self.test_session():
self.assertAlmostEqual(16.6, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
- weights = tf.constant([0, 0, 0, 0, 0, 2], shape=[2, 3])
- loss = tf.losses.absolute_difference(
- self._labels, self._predictions, weights)
+ weights = constant_op.constant([0, 0, 0, 0, 0, 2], shape=[2, 3])
+ loss = losses.absolute_difference(self._labels, self._predictions, weights)
with self.test_session():
self.assertAlmostEqual(6.0, loss.eval(), 3)
def testLossWithSampleSpecificWeightsAllZero(self):
- weights = tf.zeros((2, 3))
- loss = tf.losses.absolute_difference(
- self._labels, self._predictions, weights)
+ weights = array_ops.zeros((2, 3))
+ loss = losses.absolute_difference(self._labels, self._predictions, weights)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
-class SoftmaxCrossEntropyLossTest(tf.test.TestCase):
+class SoftmaxCrossEntropyLossTest(test.TestCase):
def testNoneWeightRaisesValueError(self):
- logits = tf.constant([[10.0, 0.0, 0.0],
- [0.0, 10.0, 0.0],
- [0.0, 0.0, 10.0]])
- labels = tf.constant([[1, 0, 0],
- [0, 1, 0],
- [0, 0, 1]])
+ logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
+ [0.0, 0.0, 10.0]])
+ labels = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
with self.test_session():
with self.assertRaises(ValueError):
- tf.losses.softmax_cross_entropy(labels, logits, weights=None)
+ losses.softmax_cross_entropy(labels, logits, weights=None)
def testAllCorrect(self):
with self.test_session():
- logits = tf.constant([[10.0, 0.0, 0.0],
- [0.0, 10.0, 0.0],
- [0.0, 0.0, 10.0]])
- labels = tf.constant([[1, 0, 0],
- [0, 1, 0],
- [0, 0, 1]])
- loss = tf.losses.softmax_cross_entropy(labels, logits)
+ logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
+ [0.0, 0.0, 10.0]])
+ labels = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
+ loss = losses.softmax_cross_entropy(labels, logits)
self.assertEquals('softmax_cross_entropy_loss/value', loss.op.name)
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllWrong(self):
- logits = tf.constant([[10.0, 0.0, 0.0],
- [0.0, 10.0, 0.0],
- [0.0, 0.0, 10.0]])
- labels = tf.constant([[0, 0, 1],
- [1, 0, 0],
- [0, 1, 0]])
+ logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
+ [0.0, 0.0, 10.0]])
+ labels = constant_op.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
with self.test_session():
- loss = tf.losses.softmax_cross_entropy(labels, logits)
+ loss = losses.softmax_cross_entropy(labels, logits)
self.assertEquals(loss.op.name, 'softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testNonZeroLossWithPythonScalarWeight(self):
- logits = tf.constant([[10.0, 0.0, 0.0],
- [0.0, 10.0, 0.0],
- [0.0, 0.0, 10.0]])
- labels = tf.constant([[0, 0, 1],
- [1, 0, 0],
- [0, 1, 0]])
+ logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
+ [0.0, 0.0, 10.0]])
+ labels = constant_op.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
weights = 2.3
with self.test_session():
- loss = tf.losses.softmax_cross_entropy(labels, logits, weights)
+ loss = losses.softmax_cross_entropy(labels, logits, weights)
self.assertAlmostEqual(weights * 10.0, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
- logits = tf.constant([[10.0, 0.0, 0.0],
- [0.0, 10.0, 0.0],
- [0.0, 0.0, 10.0]])
- labels = tf.constant([[0, 0, 1],
- [1, 0, 0],
- [0, 1, 0]])
+ logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
+ [0.0, 0.0, 10.0]])
+ labels = constant_op.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
weights = 2.3
with self.test_session():
- loss = tf.losses.softmax_cross_entropy(
- labels, logits, tf.constant(weights))
+ loss = losses.softmax_cross_entropy(labels, logits,
+ constant_op.constant(weights))
self.assertAlmostEqual(weights * 10.0, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
- logits = tf.constant([[10.0, 0.0, 0.0],
- [0.0, 10.0, 0.0],
- [0.0, 0.0, 10.0]])
- labels = tf.constant([[0, 0, 1],
- [1, 0, 0],
- [0, 1, 0]])
- weights = tf.constant([1.2, 3.4, 5.6], shape=[3])
- with self.test_session():
- loss = tf.losses.softmax_cross_entropy(labels, logits, weights)
+ logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
+ [0.0, 0.0, 10.0]])
+ labels = constant_op.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
+ weights = constant_op.constant([1.2, 3.4, 5.6], shape=[3])
+ with self.test_session():
+ loss = losses.softmax_cross_entropy(labels, logits, weights)
self.assertAlmostEqual((1.2 + 3.4 + 5.6) * 10.0 / 3.0, loss.eval(), 3)
def testAllWrongAllWeightsMissing(self):
- logits = tf.constant([[10.0, 0.0, 0.0],
- [0.0, 10.0, 0.0],
- [0.0, 0.0, 10.0]])
- labels = tf.constant([[0, 0, 1],
- [1, 0, 0],
- [0, 1, 0]])
- weights = tf.constant([0, 0, 0], shape=[3])
- with self.test_session():
- loss = tf.losses.softmax_cross_entropy(labels, logits, weights)
+ logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
+ [0.0, 0.0, 10.0]])
+ labels = constant_op.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
+ weights = constant_op.constant([0, 0, 0], shape=[3])
+ with self.test_session():
+ loss = losses.softmax_cross_entropy(labels, logits, weights)
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testSomeWeightsMissing(self):
- logits = tf.constant([[10.0, 0.0, 0.0],
- [0.0, 10.0, 0.0],
- [0.0, 0.0, 10.0]])
- labels = tf.constant([[0, 0, 1],
- [1, 0, 0],
- [0, 1, 0]])
- weights = tf.constant([1.2, 0, 0], shape=[3])
- with self.test_session():
- loss = tf.losses.softmax_cross_entropy(labels, logits, weights)
+ logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
+ [0.0, 0.0, 10.0]])
+ labels = constant_op.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
+ weights = constant_op.constant([1.2, 0, 0], shape=[3])
+ with self.test_session():
+ loss = losses.softmax_cross_entropy(labels, logits, weights)
self.assertAlmostEqual(12.0, loss.eval(), 3)
def testSoftmaxWithMeasurementSpecificWeightsRaisesException(self):
with self.test_session():
- logits = tf.constant([[100.0, -100.0, -100.0],
- [-100.0, 100.0, -100.0],
- [-100.0, -100.0, 100.0]])
- labels = tf.constant([[1, 0, 0],
- [0, 1, 0],
- [0, 0, 1]])
- weights = tf.constant([[3, 4, 5],
- [2, 6, 0],
- [8, 0, 1]])
+ logits = constant_op.constant([[100.0, -100.0, -100.0],
+ [-100.0, 100.0, -100.0],
+ [-100.0, -100.0, 100.0]])
+ labels = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
+ weights = constant_op.constant([[3, 4, 5], [2, 6, 0], [8, 0, 1]])
with self.assertRaises(ValueError):
- tf.losses.softmax_cross_entropy(
- labels, logits, weights=weights).eval()
+ losses.softmax_cross_entropy(labels, logits, weights=weights).eval()
def testSoftmaxLabelSmoothing(self):
with self.test_session():
@@ -225,304 +201,274 @@ class SoftmaxCrossEntropyLossTest(tf.test.TestCase):
# so our log softmaxes become: [0, -200, -200]
# so our cross entropy loss is:
# -(1 - L + L/n) * 0 + 400 * L/n = 400 L/n
- logits = tf.constant([[100.0, -100.0, -100.0]])
- labels = tf.constant([[1, 0, 0]])
+ logits = constant_op.constant([[100.0, -100.0, -100.0]])
+ labels = constant_op.constant([[1, 0, 0]])
label_smoothing = 0.1
- loss = tf.losses.softmax_cross_entropy(
+ loss = losses.softmax_cross_entropy(
labels, logits, label_smoothing=label_smoothing)
self.assertEquals(loss.op.name, 'softmax_cross_entropy_loss/value')
expected_value = 400.0 * label_smoothing / 3.0
self.assertAlmostEqual(loss.eval(), expected_value, 3)
-class SparseSoftmaxCrossEntropyLossTest(tf.test.TestCase):
+class SparseSoftmaxCrossEntropyLossTest(test.TestCase):
def testNoneWeightRaisesValueError(self):
- logits = tf.constant([[10.0, 0.0, 0.0],
- [0.0, 10.0, 0.0],
- [0.0, 0.0, 10.0]])
- labels = tf.constant([[0], [1], [2]])
+ logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
+ [0.0, 0.0, 10.0]])
+ labels = constant_op.constant([[0], [1], [2]])
with self.test_session():
with self.assertRaises(ValueError):
- tf.losses.sparse_softmax_cross_entropy(
- labels, logits, weights=None)
+ losses.sparse_softmax_cross_entropy(labels, logits, weights=None)
def testAllCorrectInt32Labels(self):
with self.test_session():
- logits = tf.constant([[10.0, 0.0, 0.0],
- [0.0, 10.0, 0.0],
- [0.0, 0.0, 10.0]])
- labels = tf.constant([[0], [1], [2]], dtype=tf.int32)
- loss = tf.losses.sparse_softmax_cross_entropy(labels, logits)
+ logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
+ [0.0, 0.0, 10.0]])
+ labels = constant_op.constant([[0], [1], [2]], dtype=dtypes.int32)
+ loss = losses.sparse_softmax_cross_entropy(labels, logits)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllCorrectInt64Labels(self):
with self.test_session():
- logits = tf.constant([[10.0, 0.0, 0.0],
- [0.0, 10.0, 0.0],
- [0.0, 0.0, 10.0]])
- labels = tf.constant([[0], [1], [2]], dtype=tf.int64)
- loss = tf.losses.sparse_softmax_cross_entropy(labels, logits)
+ logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
+ [0.0, 0.0, 10.0]])
+ labels = constant_op.constant([[0], [1], [2]], dtype=dtypes.int64)
+ loss = losses.sparse_softmax_cross_entropy(labels, logits)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllCorrectNonColumnLabels(self):
with self.test_session():
- logits = tf.constant([[10.0, 0.0, 0.0],
- [0.0, 10.0, 0.0],
- [0.0, 0.0, 10.0]])
- labels = tf.constant([0, 1, 2])
- loss = tf.losses.sparse_softmax_cross_entropy(labels, logits)
+ logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
+ [0.0, 0.0, 10.0]])
+ labels = constant_op.constant([0, 1, 2])
+ loss = losses.sparse_softmax_cross_entropy(labels, logits)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllWrongInt32Labels(self):
- logits = tf.constant([[10.0, 0.0, 0.0],
- [0.0, 10.0, 0.0],
- [0.0, 0.0, 10.0]])
- labels = tf.constant([[2], [0], [1]], dtype=tf.int32)
+ logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
+ [0.0, 0.0, 10.0]])
+ labels = constant_op.constant([[2], [0], [1]], dtype=dtypes.int32)
with self.test_session():
- loss = tf.losses.sparse_softmax_cross_entropy(labels, logits)
+ loss = losses.sparse_softmax_cross_entropy(labels, logits)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testAllWrongInt64Labels(self):
- logits = tf.constant([[10.0, 0.0, 0.0],
- [0.0, 10.0, 0.0],
- [0.0, 0.0, 10.0]])
- labels = tf.constant([[2], [0], [1]], dtype=tf.int64)
+ logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
+ [0.0, 0.0, 10.0]])
+ labels = constant_op.constant([[2], [0], [1]], dtype=dtypes.int64)
with self.test_session():
- loss = tf.losses.sparse_softmax_cross_entropy(labels, logits)
+ loss = losses.sparse_softmax_cross_entropy(labels, logits)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testAllWrongNonColumnLabels(self):
- logits = tf.constant([[10.0, 0.0, 0.0],
- [0.0, 10.0, 0.0],
- [0.0, 0.0, 10.0]])
- labels = tf.constant([2, 0, 1])
+ logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
+ [0.0, 0.0, 10.0]])
+ labels = constant_op.constant([2, 0, 1])
with self.test_session():
- loss = tf.losses.sparse_softmax_cross_entropy(labels, logits)
+ loss = losses.sparse_softmax_cross_entropy(labels, logits)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testNonZeroLossWithPythonScalarWeight(self):
- logits = tf.constant([[10.0, 0.0, 0.0],
- [0.0, 10.0, 0.0],
- [0.0, 0.0, 10.0]])
- labels = tf.constant([[2], [0], [1]])
+ logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
+ [0.0, 0.0, 10.0]])
+ labels = constant_op.constant([[2], [0], [1]])
weights = 2.3
with self.test_session():
- loss = tf.losses.sparse_softmax_cross_entropy(
- labels, logits, weights)
+ loss = losses.sparse_softmax_cross_entropy(labels, logits, weights)
self.assertAlmostEqual(weights * 10.0, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
- logits = tf.constant([[10.0, 0.0, 0.0],
- [0.0, 10.0, 0.0],
- [0.0, 0.0, 10.0]])
- labels = tf.constant([[2], [0], [1]])
+ logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
+ [0.0, 0.0, 10.0]])
+ labels = constant_op.constant([[2], [0], [1]])
weights = 2.3
with self.test_session():
- loss = tf.losses.sparse_softmax_cross_entropy(
- labels, logits, tf.constant(weights))
+ loss = losses.sparse_softmax_cross_entropy(labels, logits,
+ constant_op.constant(weights))
self.assertAlmostEqual(weights * 10.0, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
- logits = tf.constant([[10.0, 0.0, 0.0],
- [0.0, 10.0, 0.0],
- [0.0, 0.0, 10.0]])
- labels = tf.constant([[2], [0], [1]])
- weights = tf.constant([1.2, 3.4, 5.6], shape=[3])
- with self.test_session():
- loss = tf.losses.sparse_softmax_cross_entropy(
- labels, logits, weights)
+ logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
+ [0.0, 0.0, 10.0]])
+ labels = constant_op.constant([[2], [0], [1]])
+ weights = constant_op.constant([1.2, 3.4, 5.6], shape=[3])
+ with self.test_session():
+ loss = losses.sparse_softmax_cross_entropy(labels, logits, weights)
self.assertAlmostEqual((1.2 + 3.4 + 5.6) * 10.0 / 3.0, loss.eval(), 3)
def testNonZeroLossWithColumnWeights(self):
- logits = tf.constant([[10.0, 0.0, 0.0],
- [0.0, 10.0, 0.0],
- [0.0, 0.0, 10.0]])
- labels = tf.constant([[2], [0], [1]])
- weights = tf.constant([[1.2], [3.4], [5.6]])
- with self.test_session():
- loss = tf.losses.sparse_softmax_cross_entropy(
- labels, logits, weights)
+ logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
+ [0.0, 0.0, 10.0]])
+ labels = constant_op.constant([[2], [0], [1]])
+ weights = constant_op.constant([[1.2], [3.4], [5.6]])
+ with self.test_session():
+ loss = losses.sparse_softmax_cross_entropy(labels, logits, weights)
self.assertAlmostEqual((1.2 + 3.4 + 5.6) * 10.0 / 3.0, loss.eval(), 3)
def testAllWrongAllWeightsMissing(self):
- logits = tf.constant([[10.0, 0.0, 0.0],
- [0.0, 10.0, 0.0],
- [0.0, 0.0, 10.0]])
- labels = tf.constant([[2], [0], [1]])
- weights = tf.constant([0, 0, 0], shape=[3])
- with self.test_session():
- loss = tf.losses.sparse_softmax_cross_entropy(
- labels, logits, weights)
+ logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
+ [0.0, 0.0, 10.0]])
+ labels = constant_op.constant([[2], [0], [1]])
+ weights = constant_op.constant([0, 0, 0], shape=[3])
+ with self.test_session():
+ loss = losses.sparse_softmax_cross_entropy(labels, logits, weights)
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testSomeWeightsMissing(self):
- logits = tf.constant([[10.0, 0.0, 0.0],
- [0.0, 10.0, 0.0],
- [0.0, 0.0, 10.0]])
- labels = tf.constant([[2], [0], [1]])
- weights = tf.constant([1.2, 0, 0], shape=[3])
- with self.test_session():
- loss = tf.losses.sparse_softmax_cross_entropy(
- labels, logits, weights)
+ logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
+ [0.0, 0.0, 10.0]])
+ labels = constant_op.constant([[2], [0], [1]])
+ weights = constant_op.constant([1.2, 0, 0], shape=[3])
+ with self.test_session():
+ loss = losses.sparse_softmax_cross_entropy(labels, logits, weights)
self.assertAlmostEqual(12.0, loss.eval(), 3)
def testMeasurementSpecificWeightsRaisesException(self):
with self.test_session():
- logits = tf.constant([[100.0, -100.0, -100.0],
- [-100.0, 100.0, -100.0],
- [-100.0, -100.0, 100.0]])
- labels = tf.constant([[0], [1], [2]])
- weights = tf.constant([[3, 4, 5],
- [2, 6, 0],
- [8, 0, 1]])
+ logits = constant_op.constant([[100.0, -100.0, -100.0],
+ [-100.0, 100.0, -100.0],
+ [-100.0, -100.0, 100.0]])
+ labels = constant_op.constant([[0], [1], [2]])
+ weights = constant_op.constant([[3, 4, 5], [2, 6, 0], [8, 0, 1]])
with self.assertRaises(ValueError):
- tf.losses.sparse_softmax_cross_entropy(
+ losses.sparse_softmax_cross_entropy(
labels, logits, weights=weights).eval()
def testInconsistentWeightSizeRaisesException(self):
"""The weight tensor has incorrect number of elements."""
with self.test_session():
- logits = tf.constant([[100.0, -100.0, -100.0],
- [-100.0, 100.0, -100.0],
- [-100.0, -100.0, 100.0]])
- labels = tf.constant([[0], [1], [2]])
- weights = tf.constant([1.2, 3.4, 5.6, 7.8])
+ logits = constant_op.constant([[100.0, -100.0, -100.0],
+ [-100.0, 100.0, -100.0],
+ [-100.0, -100.0, 100.0]])
+ labels = constant_op.constant([[0], [1], [2]])
+ weights = constant_op.constant([1.2, 3.4, 5.6, 7.8])
with self.assertRaises(ValueError):
- tf.losses.sparse_softmax_cross_entropy(
+ losses.sparse_softmax_cross_entropy(
labels, logits, weights=weights).eval()
def testInconsistentLabelSizeRaisesException(self):
"""The label tensor has incorrect number of elements."""
with self.test_session():
- logits = tf.constant([[100.0, -100.0, -100.0],
- [-100.0, 100.0, -100.0],
- [-100.0, -100.0, 100.0]])
- labels = tf.constant([[0], [1], [2], [3]])
- weights = tf.constant([1.2, 3.4, 5.6])
+ logits = constant_op.constant([[100.0, -100.0, -100.0],
+ [-100.0, 100.0, -100.0],
+ [-100.0, -100.0, 100.0]])
+ labels = constant_op.constant([[0], [1], [2], [3]])
+ weights = constant_op.constant([1.2, 3.4, 5.6])
with self.assertRaises(ValueError):
- tf.losses.sparse_softmax_cross_entropy(
+ losses.sparse_softmax_cross_entropy(
labels, logits, weights=weights).eval()
def testInconsistentWeightShapeRaisesException(self):
"""The weight tensor has incorrect shape."""
with self.test_session():
- logits = tf.constant([[100.0, -100.0, -100.0, -100.0],
- [-100.0, 100.0, -100.0, -100.0],
- [-100.0, -100.0, 100.0, -100.0],
- [-100.0, -100.0, -100.0, 100.0]])
- labels = tf.constant([[0], [1], [2], [3]])
- weights = tf.constant([[1.2, 3.4], [5.6, 7.8]])
+ logits = constant_op.constant([[100.0, -100.0, -100.0, -100.0],
+ [-100.0, 100.0, -100.0, -100.0],
+ [-100.0, -100.0, 100.0, -100.0],
+ [-100.0, -100.0, -100.0, 100.0]])
+ labels = constant_op.constant([[0], [1], [2], [3]])
+ weights = constant_op.constant([[1.2, 3.4], [5.6, 7.8]])
with self.assertRaises(ValueError):
- tf.losses.sparse_softmax_cross_entropy(
+ losses.sparse_softmax_cross_entropy(
labels, logits, weights=weights).eval()
def testInconsistentLabelShapeRaisesException(self):
"""The label tensor has incorrect shape."""
with self.test_session():
- logits = tf.constant([[100.0, -100.0, -100.0, -100.0],
- [-100.0, 100.0, -100.0, -100.0],
- [-100.0, -100.0, 100.0, -100.0],
- [-100.0, -100.0, -100.0, 100.0]])
- labels = tf.constant([[0, 1], [2, 3]])
- weights = tf.constant([1.2, 3.4, 5.6, 7.8])
+ logits = constant_op.constant([[100.0, -100.0, -100.0, -100.0],
+ [-100.0, 100.0, -100.0, -100.0],
+ [-100.0, -100.0, 100.0, -100.0],
+ [-100.0, -100.0, -100.0, 100.0]])
+ labels = constant_op.constant([[0, 1], [2, 3]])
+ weights = constant_op.constant([1.2, 3.4, 5.6, 7.8])
- with self.assertRaises(tf.errors.InvalidArgumentError):
- tf.losses.sparse_softmax_cross_entropy(
+ with self.assertRaises(errors_impl.InvalidArgumentError):
+ losses.sparse_softmax_cross_entropy(
labels, logits, weights=weights).eval()
-class SigmoidCrossEntropyLossTest(tf.test.TestCase):
+class SigmoidCrossEntropyLossTest(test.TestCase):
def testAllCorrectSigmoid(self):
with self.test_session():
- logits = tf.constant([[100.0, -100.0, -100.0],
- [-100.0, 100.0, -100.0],
- [-100.0, -100.0, 100.0]])
- labels = tf.constant([[1, 0, 0],
- [0, 1, 0],
- [0, 0, 1]])
- loss = tf.losses.sigmoid_cross_entropy(labels, logits)
+ logits = constant_op.constant([[100.0, -100.0, -100.0],
+ [-100.0, 100.0, -100.0],
+ [-100.0, -100.0, 100.0]])
+ labels = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
+ loss = losses.sigmoid_cross_entropy(labels, logits)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testLossWithSingleDimPlaceholderForLogitsAndWeights1(self):
- logits = tf.placeholder(tf.float32, shape=(None, 1))
- labels = tf.placeholder(tf.float32, shape=(None, 1))
- weights = tf.ones_like(logits, dtype=tf.float32)
+ logits = array_ops.placeholder(dtypes.float32, shape=(None, 1))
+ labels = array_ops.placeholder(dtypes.float32, shape=(None, 1))
+ weights = array_ops.ones_like(logits, dtype=dtypes.float32)
- loss = tf.losses.sigmoid_cross_entropy(labels, logits, weights)
+ loss = losses.sigmoid_cross_entropy(labels, logits, weights)
with self.test_session() as sess:
- loss = sess.run(loss, feed_dict={
- logits: np.ones((32, 1)),
- labels: np.ones((32, 1)),
- })
+ loss = sess.run(loss,
+ feed_dict={
+ logits: np.ones((32, 1)),
+ labels: np.ones((32, 1)),
+ })
self.assertAlmostEqual(0.313, loss, 3)
def testLossWithSingleDimPlaceholderForLogitsAndWeights2(self):
- logits = tf.placeholder(tf.float32, shape=(None, 2))
- labels = tf.placeholder(tf.float32, shape=(None, 2))
- weights = tf.ones_like(logits, dtype=tf.float32)
+ logits = array_ops.placeholder(dtypes.float32, shape=(None, 2))
+ labels = array_ops.placeholder(dtypes.float32, shape=(None, 2))
+ weights = array_ops.ones_like(logits, dtype=dtypes.float32)
- loss = tf.losses.sigmoid_cross_entropy(labels, logits, weights)
+ loss = losses.sigmoid_cross_entropy(labels, logits, weights)
with self.test_session() as sess:
- loss = sess.run(loss, feed_dict={
- logits: np.ones((32, 2)),
- labels: np.ones((32, 2)),
- })
+ loss = sess.run(loss,
+ feed_dict={
+ logits: np.ones((32, 2)),
+ labels: np.ones((32, 2)),
+ })
self.assertAlmostEqual(0.313, loss, 3)
def testAllWrongSigmoid(self):
with self.test_session():
- logits = tf.constant([[100.0, -100.0, -100.0],
- [-100.0, 100.0, -100.0],
- [-100.0, -100.0, 100.0]])
- labels = tf.constant([[0, 0, 1],
- [1, 0, 0],
- [0, 1, 0]])
- loss = tf.losses.sigmoid_cross_entropy(labels, logits)
+ logits = constant_op.constant([[100.0, -100.0, -100.0],
+ [-100.0, 100.0, -100.0],
+ [-100.0, -100.0, 100.0]])
+ labels = constant_op.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
+ loss = losses.sigmoid_cross_entropy(labels, logits)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 600.0 / 9.0, 3)
def testAllWrongSigmoidWithMeasurementSpecificWeights(self):
with self.test_session():
- logits = tf.constant([[100.0, -100.0, -100.0],
- [-100.0, 100.0, -100.0],
- [-100.0, -100.0, 100.0]])
- labels = tf.constant([[0, 0, 1],
- [1, 0, 0],
- [0, 1, 0]])
- weights = tf.constant([[3, 4, 5],
- [2, 6, 0],
- [8, 0, 1]])
- loss = tf.losses.sigmoid_cross_entropy(
- labels, logits, weights)
+ logits = constant_op.constant([[100.0, -100.0, -100.0],
+ [-100.0, 100.0, -100.0],
+ [-100.0, -100.0, 100.0]])
+ labels = constant_op.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
+ weights = constant_op.constant([[3, 4, 5], [2, 6, 0], [8, 0, 1]])
+ loss = losses.sigmoid_cross_entropy(labels, logits, weights)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
self.assertAlmostEqual(1700.0 / 7.0, loss.eval(), 3)
def testMultiCorrectSigmoid(self):
- logits = tf.constant([[100.0, -100.0, 100.0],
- [100.0, 100.0, -100.0],
- [-100.0, 100.0, 100.0]])
- labels = tf.constant([[1, 0, 1],
- [1, 1, 0],
- [0, 1, 1]])
- loss = tf.losses.sigmoid_cross_entropy(labels, logits)
+ logits = constant_op.constant([[100.0, -100.0, 100.0],
+ [100.0, 100.0, -100.0],
+ [-100.0, 100.0, 100.0]])
+ labels = constant_op.constant([[1, 0, 1], [1, 1, 0], [0, 1, 1]])
+ loss = losses.sigmoid_cross_entropy(labels, logits)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
with self.test_session():
@@ -530,8 +476,8 @@ class SigmoidCrossEntropyLossTest(tf.test.TestCase):
def testSigmoidLabelSmoothingCorrect(self):
with self.test_session():
- logits = tf.constant([[100.0, -100.0, -100.0]])
- labels = tf.constant([[1, 0, 1]])
+ logits = constant_op.constant([[100.0, -100.0, -100.0]])
+ labels = constant_op.constant([[1, 0, 1]])
# Sigmoid cross entropy loss is:
# max(x,0) - x*z + log(1 + exp(-abs(x)))
# The new labels are:
@@ -544,7 +490,7 @@ class SigmoidCrossEntropyLossTest(tf.test.TestCase):
# + 0 + 100 * (1 - 0.5 L) + 0)
# = 1/3 * (100 + 50 L)
label_smoothing = 0.1
- loss = tf.losses.sigmoid_cross_entropy(
+ loss = losses.sigmoid_cross_entropy(
labels, logits, label_smoothing=label_smoothing)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
expected_value = (100.0 + 50.0 * label_smoothing) / 3.0
@@ -553,19 +499,20 @@ class SigmoidCrossEntropyLossTest(tf.test.TestCase):
def testSigmoidLabelSmoothingEqualsSoftmaxTwoLabel(self):
with self.test_session():
label_smoothing = 0.1
- sigmoid_logits = tf.constant([[100.0, -100.0, -100.0]])
- sigmoid_labels = tf.constant([[1, 0, 1]])
- sigmoid_loss = tf.losses.sigmoid_cross_entropy(
+ sigmoid_logits = constant_op.constant([[100.0, -100.0, -100.0]])
+ sigmoid_labels = constant_op.constant([[1, 0, 1]])
+ sigmoid_loss = losses.sigmoid_cross_entropy(
sigmoid_labels, sigmoid_logits, label_smoothing=label_smoothing)
- softmax_logits = tf.constant([[0.0, 100.0], [100.0, 0.0], [100.0, 0.0]])
- softmax_labels = tf.constant([[0, 1], [1, 0], [0, 1]])
- softmax_loss = tf.losses.softmax_cross_entropy(
+ softmax_logits = constant_op.constant(
+ [[0.0, 100.0], [100.0, 0.0], [100.0, 0.0]])
+ softmax_labels = constant_op.constant([[0, 1], [1, 0], [0, 1]])
+ softmax_loss = losses.softmax_cross_entropy(
softmax_labels, softmax_logits, label_smoothing=label_smoothing)
self.assertAlmostEqual(sigmoid_loss.eval(), softmax_loss.eval(), 3)
-class LogLossTest(tf.test.TestCase):
+class LogLossTest(test.TestCase):
def setUp(self):
predictions = np.asarray([.9, .2, .2, .8, .4, .6]).reshape((2, 3))
@@ -579,116 +526,111 @@ class LogLossTest(tf.test.TestCase):
labels, np.log(predictions + epsilon)) + np.multiply(
1 - labels, np.log(1 - predictions + epsilon))
- self._predictions = tf.constant(predictions)
- self._labels = tf.constant(labels)
+ self._predictions = constant_op.constant(predictions)
+ self._labels = constant_op.constant(labels)
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
- tf.losses.log_loss(self._labels, self._labels, weights=None)
+ losses.log_loss(self._labels, self._labels, weights=None)
def testAllCorrectNoLossWeight(self):
- loss = tf.losses.log_loss(self._labels, self._labels)
+ loss = losses.log_loss(self._labels, self._labels)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testAllCorrectNoLossWeightWithPlaceholder(self):
- tf_predictions = tf.placeholder(tf.float32, shape=self._np_labels.shape)
- loss = tf.losses.log_loss(self._labels, tf_predictions)
+ tf_predictions = array_ops.placeholder(
+ dtypes.float32, shape=self._np_labels.shape)
+ loss = losses.log_loss(self._labels, tf_predictions)
with self.test_session():
- self.assertAlmostEqual(0.0, loss.eval(feed_dict={
- tf_predictions: self._np_labels}), 3)
+ self.assertAlmostEqual(
+ 0.0, loss.eval(feed_dict={tf_predictions: self._np_labels}), 3)
def testNonZeroLoss(self):
- loss = tf.losses.log_loss(self._labels, self._predictions)
+ loss = losses.log_loss(self._labels, self._predictions)
with self.test_session():
self.assertAlmostEqual(-np.sum(self._expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3
- loss = tf.losses.log_loss(
- self._labels, self._predictions, weights)
+ loss = losses.log_loss(self._labels, self._predictions, weights)
with self.test_session():
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
- loss = tf.losses.log_loss(
- self._labels, self._predictions, tf.constant(weights))
+ loss = losses.log_loss(self._labels, self._predictions,
+ constant_op.constant(weights))
with self.test_session():
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeightAndPlaceholder(self):
- tf_predictions = tf.placeholder(tf.float32,
- shape=self._np_predictions.shape)
+ tf_predictions = array_ops.placeholder(
+ dtypes.float32, shape=self._np_predictions.shape)
weights = 2.3
- loss = tf.losses.log_loss(
- self._labels, tf_predictions, tf.constant(weights))
+ loss = losses.log_loss(self._labels, tf_predictions,
+ constant_op.constant(weights))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
loss, 3)
def testNonZeroLossWithScalarTensorWeightAndPlaceholderWithRankOnly(self):
- tf_predictions = tf.placeholder(tf.float32, shape=[None, None])
+ tf_predictions = array_ops.placeholder(dtypes.float32, shape=[None, None])
weights = 2.3
- loss = tf.losses.log_loss(
- self._labels, tf_predictions, tf.constant(weights))
+ loss = losses.log_loss(self._labels, tf_predictions,
+ constant_op.constant(weights))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
loss, 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
- weights = tf.constant([1.2, 3.4], shape=[2])
+ weights = constant_op.constant([1.2, 3.4], shape=[2])
expected_losses = np.multiply(
self._expected_losses,
np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)))
- loss = tf.losses.log_loss(
- self._labels, self._predictions, weights)
+ loss = losses.log_loss(self._labels, self._predictions, weights)
with self.test_session():
- self.assertAlmostEqual(-np.sum(expected_losses) / 6.0,
- loss.eval(), 3)
+ self.assertAlmostEqual(-np.sum(expected_losses) / 6.0, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeightsSomeZero(self):
- weights = tf.constant([1.2, 0], shape=[2])
- expected_losses = np.multiply(
- self._expected_losses,
- np.asarray([1.2, 1.2, 1.2, 0, 0, 0]).reshape((2, 3)))
- loss = tf.losses.log_loss(
- self._labels, self._predictions, weights)
+ weights = constant_op.constant([1.2, 0], shape=[2])
+ expected_losses = np.multiply(self._expected_losses,
+ np.asarray([1.2, 1.2, 1.2, 0, 0, 0]).reshape(
+ (2, 3)))
+ loss = losses.log_loss(self._labels, self._predictions, weights)
with self.test_session():
- self.assertAlmostEqual(-np.sum(expected_losses) / 3.0,
- loss.eval(), 3)
+ self.assertAlmostEqual(-np.sum(expected_losses) / 3.0, loss.eval(), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeightsSomeZero(self):
- weights = tf.constant([1.2, 0], shape=[2, 1])
- expected_losses = np.multiply(
- self._expected_losses,
- np.asarray([1.2, 1.2, 1.2, 0, 0, 0]).reshape((2, 3)))
- loss = tf.losses.log_loss(
- self._labels, self._predictions, weights)
+ weights = constant_op.constant([1.2, 0], shape=[2, 1])
+ expected_losses = np.multiply(self._expected_losses,
+ np.asarray([1.2, 1.2, 1.2, 0, 0, 0]).reshape(
+ (2, 3)))
+ loss = losses.log_loss(self._labels, self._predictions, weights)
with self.test_session():
- self.assertAlmostEqual(-np.sum(expected_losses) / 3.0,
- loss.eval(), 3)
+ self.assertAlmostEqual(-np.sum(expected_losses) / 3.0, loss.eval(), 3)
def testWeightsWithSameNumDimsButWrongShapeThrowsException(self):
- weights = tf.constant(np.random.normal(size=(2, 4)), shape=[2, 4])
+ weights = constant_op.constant(np.random.normal(size=(2, 4)), shape=[2, 4])
with self.test_session():
with self.assertRaises(ValueError):
- tf.losses.log_loss(self._labels, self._predictions, weights)
+ losses.log_loss(self._labels, self._predictions, weights)
def testNonZeroLossWithMeasurementSpecificWeights(self):
weights = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
- loss = tf.losses.log_loss(
+ loss = losses.log_loss(
self._labels,
self._predictions,
- tf.constant(weights, shape=(2, 3)))
+ constant_op.constant(
+ weights, shape=(2, 3)))
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 5.0, loss.eval(), 3)
@@ -696,11 +638,12 @@ class LogLossTest(tf.test.TestCase):
weights = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
- tf_predictions = tf.placeholder(tf.float32, shape=[2, 3])
- loss = tf.losses.log_loss(
+ tf_predictions = array_ops.placeholder(dtypes.float32, shape=[2, 3])
+ loss = losses.log_loss(
self._labels,
tf_predictions,
- tf.constant(weights, shape=(2, 3)))
+ constant_op.constant(
+ weights, shape=(2, 3)))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
@@ -710,10 +653,11 @@ class LogLossTest(tf.test.TestCase):
weights = np.array([0, 0, 0, 0, 0, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
- loss = tf.losses.log_loss(
+ loss = losses.log_loss(
self._labels,
self._predictions,
- tf.constant(weights, shape=(2, 3)))
+ constant_op.constant(
+ weights, shape=(2, 3)))
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses), loss.eval(), 3)
@@ -721,138 +665,127 @@ class LogLossTest(tf.test.TestCase):
weights = np.array([0, 0, 0, 0, 0, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
- tf_predictions = tf.placeholder(tf.float32, shape=[2, 3])
- tf_weights = tf.constant(weights, shape=(2, 3))
- loss = tf.losses.log_loss(self._labels, tf_predictions, tf_weights)
+ tf_predictions = array_ops.placeholder(dtypes.float32, shape=[2, 3])
+ tf_weights = constant_op.constant(weights, shape=(2, 3))
+ loss = losses.log_loss(self._labels, tf_predictions, tf_weights)
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(-np.sum(expected_losses), loss, 3)
def testLossWithSampleSpecificWeightsAllZero(self):
- tf_weights = tf.zeros(shape=(2, 3))
- loss = tf.losses.log_loss(
- self._labels, self._predictions, tf_weights)
+ tf_weights = array_ops.zeros(shape=(2, 3))
+ loss = losses.log_loss(self._labels, self._predictions, tf_weights)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
-class HingeLossTest(tf.test.TestCase):
+class HingeLossTest(test.TestCase):
def testIncompatibleShapes(self):
with self.test_session():
- logits = tf.constant([[-1.0], [2.1]])
- labels = tf.constant([0.0, 1.0])
+ logits = constant_op.constant([[-1.0], [2.1]])
+ labels = constant_op.constant([0.0, 1.0])
with self.assertRaises(ValueError):
- _ = tf.losses.hinge_loss(labels, logits).eval()
+ _ = losses.hinge_loss(labels, logits).eval()
def testAllOutsideMargin(self):
with self.test_session():
- logits = tf.constant([1.2, -1.4, -1.0, 2.1])
- labels = tf.constant([1.0, 0.0, 0.0, 1.0])
- loss = tf.losses.hinge_loss(labels, logits)
+ logits = constant_op.constant([1.2, -1.4, -1.0, 2.1])
+ labels = constant_op.constant([1.0, 0.0, 0.0, 1.0])
+ loss = losses.hinge_loss(labels, logits)
self.assertAllClose(loss.eval(), 0.0, atol=1e-3)
def testSomeInsideMargin(self):
with self.test_session():
- logits = tf.constant([[-0.7], [-1.4], [1.4], [0.6]])
- labels = tf.constant([[0.0], [0.0], [1.0], [1.0]])
- loss = tf.losses.hinge_loss(labels, logits)
+ logits = constant_op.constant([[-0.7], [-1.4], [1.4], [0.6]])
+ labels = constant_op.constant([[0.0], [0.0], [1.0], [1.0]])
+ loss = losses.hinge_loss(labels, logits)
# Examples 1 and 4 are on the correct side of the hyperplane but within
# the margin so they incur some (small) loss.
self.assertAllClose(loss.eval(), 0.175, atol=1e-3)
def testSomeMisclassified(self):
with self.test_session():
- logits = tf.constant([[[1.2], [0.4], [-1.0], [-1.1]]])
- labels = tf.constant([[[1.0], [0.0], [0.0], [1.0]]])
- loss = tf.losses.hinge_loss(labels, logits)
+ logits = constant_op.constant([[[1.2], [0.4], [-1.0], [-1.1]]])
+ labels = constant_op.constant([[[1.0], [0.0], [0.0], [1.0]]])
+ loss = losses.hinge_loss(labels, logits)
# Examples 2 and 4 are on the wrong side of the hyperplane so they incur
# some (fairly large) loss.
self.assertAllClose(loss.eval(), 0.875, atol=1e-3)
-class MeanSquaredErrorTest(tf.test.TestCase):
+class MeanSquaredErrorTest(test.TestCase):
def setUp(self):
- self._predictions = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
- self._labels = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
+ self._predictions = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
+ self._labels = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
- tf.losses.mean_squared_error(
+ losses.mean_squared_error(
self._predictions, self._predictions, weights=None)
def testAllCorrectNoLossWeight(self):
- loss = tf.losses.mean_squared_error(
- self._predictions, self._predictions)
+ loss = losses.mean_squared_error(self._predictions, self._predictions)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testNonZeroLoss(self):
- loss = tf.losses.mean_squared_error(
- self._labels, self._predictions)
+ loss = losses.mean_squared_error(self._labels, self._predictions)
with self.test_session():
self.assertAlmostEqual(49.5, loss.eval(), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3
- loss = tf.losses.mean_squared_error(
- self._labels, self._predictions, weights)
+ loss = losses.mean_squared_error(self._labels, self._predictions, weights)
with self.test_session():
self.assertAlmostEqual(49.5 * weights, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
- loss = tf.losses.mean_squared_error(
- self._labels, self._predictions, tf.constant(weights))
+ loss = losses.mean_squared_error(self._labels, self._predictions,
+ constant_op.constant(weights))
with self.test_session():
self.assertAlmostEqual(49.5 * weights, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
- weights = tf.constant([1.2, 3.4], shape=[2,])
- loss = tf.losses.mean_squared_error(
- self._labels, self._predictions, weights)
+ weights = constant_op.constant([1.2, 3.4], shape=[2,])
+ loss = losses.mean_squared_error(self._labels, self._predictions, weights)
with self.test_session():
self.assertAlmostEqual(767.8 / 6.0, loss.eval(), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeights(self):
- weights = tf.constant([1.2, 3.4], shape=[2, 1])
- loss = tf.losses.mean_squared_error(
- self._labels, self._predictions, weights)
+ weights = constant_op.constant([1.2, 3.4], shape=[2, 1])
+ loss = losses.mean_squared_error(self._labels, self._predictions, weights)
with self.test_session():
self.assertAlmostEqual(767.8 / 6.0, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeights(self):
- weights = tf.constant([3, 6, 5, 0, 4, 2], shape=[2, 3])
- loss = tf.losses.mean_squared_error(
- self._labels, self._predictions, weights)
+ weights = constant_op.constant([3, 6, 5, 0, 4, 2], shape=[2, 3])
+ loss = losses.mean_squared_error(self._labels, self._predictions, weights)
with self.test_session():
self.assertAlmostEqual(587 / 5.0, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
- weights = tf.constant([0, 0, 0, 0, 0, 2], shape=[2, 3])
- loss = tf.losses.mean_squared_error(
- self._labels, self._predictions, weights)
+ weights = constant_op.constant([0, 0, 0, 0, 0, 2], shape=[2, 3])
+ loss = losses.mean_squared_error(self._labels, self._predictions, weights)
with self.test_session():
self.assertAlmostEqual(18.0, loss.eval(), 3)
def testLossWithSampleSpecificWeightsAllZero(self):
- weights = tf.zeros((2, 3))
- loss = tf.losses.mean_squared_error(
- self._labels, self._predictions, weights)
+ weights = array_ops.zeros((2, 3))
+ loss = losses.mean_squared_error(self._labels, self._predictions, weights)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
-class MeanPairwiseSquaresErrorTest(tf.test.TestCase):
+class MeanPairwiseSquaresErrorTest(test.TestCase):
def setUp(self):
- self._predictions = np.array([[4, 8, 12],
- [8, 1, 3]])
- self._labels = np.array([[1, 9, 2],
- [-5, -5, 7]])
+ self._predictions = np.array([[4, 8, 12], [8, 1, 3]])
+ self._labels = np.array([[1, 9, 2], [-5, -5, 7]])
batch_size, dims = self._labels.shape
@@ -863,7 +796,7 @@ class MeanPairwiseSquaresErrorTest(tf.test.TestCase):
for j in range(dims):
x = self._predictions[b, i].item() - self._predictions[b, j].item()
y = self._labels[b, i].item() - self._labels[b, j].item()
- tmp = (x-y) * (x-y)
+ tmp = (x - y) * (x - y)
total[b] += tmp
self._expected_losses = np.divide(total, 9.0)
@@ -871,44 +804,43 @@ class MeanPairwiseSquaresErrorTest(tf.test.TestCase):
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
- tf.losses.mean_pairwise_squared_error(
- predictions=tf.constant(self._labels),
- labels=tf.constant(self._labels),
+ losses.mean_pairwise_squared_error(
+ predictions=constant_op.constant(self._labels),
+ labels=constant_op.constant(self._labels),
weights=None)
def testAllCorrectNoLossWeight(self):
- loss = tf.losses.mean_pairwise_squared_error(
- predictions=tf.constant(self._labels),
- labels=tf.constant(self._labels))
+ loss = losses.mean_pairwise_squared_error(
+ predictions=constant_op.constant(self._labels),
+ labels=constant_op.constant(self._labels))
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testNonZeroLoss(self):
- loss = tf.losses.mean_pairwise_squared_error(
- predictions=tf.constant(self._predictions),
- labels=tf.constant(self._labels))
+ loss = losses.mean_pairwise_squared_error(
+ predictions=constant_op.constant(self._predictions),
+ labels=constant_op.constant(self._labels))
with self.test_session():
self.assertAlmostEqual(np.sum(self._expected_losses), loss.eval(), 3)
def testGradientWithZeroWeight(self):
- with tf.Graph().as_default():
- tf.set_random_seed(0)
+ with ops.Graph().as_default():
+ random_seed.set_random_seed(0)
- inputs = tf.ones((2, 3))
- weights = tf.get_variable('weights',
- shape=[3, 4],
- initializer=tf.truncated_normal_initializer())
- predictions = tf.matmul(inputs, weights)
+ inputs = array_ops.ones((2, 3))
+ weights = variable_scope.get_variable(
+ 'weights',
+ shape=[3, 4],
+ initializer=init_ops.truncated_normal_initializer())
+ predictions = math_ops.matmul(inputs, weights)
- optimizer = tf.train.MomentumOptimizer(learning_rate=0.001, momentum=0.9)
- loss = tf.losses.mean_pairwise_squared_error(
- predictions,
- predictions,
- 0)
+ optimizer = momentum_lib.MomentumOptimizer(
+ learning_rate=0.001, momentum=0.9)
+ loss = losses.mean_pairwise_squared_error(predictions, predictions, 0)
gradients_to_variables = optimizer.compute_gradients(loss)
- init_op = tf.global_variables_initializer()
+ init_op = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
@@ -918,9 +850,9 @@ class MeanPairwiseSquaresErrorTest(tf.test.TestCase):
def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3
- loss = tf.losses.mean_pairwise_squared_error(
- predictions=tf.constant(self._predictions),
- labels=tf.constant(self._labels),
+ loss = losses.mean_pairwise_squared_error(
+ predictions=constant_op.constant(self._predictions),
+ labels=constant_op.constant(self._labels),
weights=weights)
with self.test_session():
self.assertAlmostEqual(weights * np.sum(self._expected_losses),
@@ -928,55 +860,59 @@ class MeanPairwiseSquaresErrorTest(tf.test.TestCase):
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
- loss = tf.losses.mean_pairwise_squared_error(
- predictions=tf.constant(self._predictions),
- labels=tf.constant(self._labels),
- weights=tf.constant(weights))
+ loss = losses.mean_pairwise_squared_error(
+ predictions=constant_op.constant(self._predictions),
+ labels=constant_op.constant(self._labels),
+ weights=constant_op.constant(weights))
with self.test_session():
self.assertAlmostEqual(weights * np.sum(self._expected_losses),
loss.eval(), 3)
def testNonZeroLossWithScalarZeroWeight(self):
weights = 0
- loss = tf.losses.mean_pairwise_squared_error(
- predictions=tf.constant(self._predictions),
- labels=tf.constant(self._labels),
- weights=tf.constant(weights))
+ loss = losses.mean_pairwise_squared_error(
+ predictions=constant_op.constant(self._predictions),
+ labels=constant_op.constant(self._labels),
+ weights=constant_op.constant(weights))
with self.test_session():
self.assertAlmostEqual(0, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeightWithPlaceholder(self):
weights = 2.3
- tf_predictions = tf.placeholder(tf.float32, shape=self._predictions.shape)
- tf_labels = tf.placeholder(tf.float32, shape=self._labels.shape)
- loss = tf.losses.mean_pairwise_squared_error(
+ tf_predictions = array_ops.placeholder(
+ dtypes.float32, shape=self._predictions.shape)
+ tf_labels = array_ops.placeholder(dtypes.float32, shape=self._labels.shape)
+ loss = losses.mean_pairwise_squared_error(
predictions=tf_predictions,
labels=tf_labels,
- weights=tf.constant(weights))
+ weights=constant_op.constant(weights))
with self.test_session() as sess:
- loss = sess.run(loss, feed_dict={
- tf_predictions: self._predictions,
- tf_labels: self._labels,
- })
+ loss = sess.run(loss,
+ feed_dict={
+ tf_predictions: self._predictions,
+ tf_labels: self._labels,
+ })
self.assertAlmostEqual(weights * np.sum(self._expected_losses), loss, 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weights = np.asarray([2.0, 1.0]).reshape((2, 1))
expected_losses = np.multiply(weights, self._expected_losses)
- loss = tf.losses.mean_pairwise_squared_error(
- predictions=tf.constant(self._predictions),
- labels=tf.constant(self._labels),
- weights=tf.constant(weights, shape=[2]))
+ loss = losses.mean_pairwise_squared_error(
+ predictions=constant_op.constant(self._predictions),
+ labels=constant_op.constant(self._labels),
+ weights=constant_op.constant(
+ weights, shape=[2]))
with self.test_session():
self.assertAlmostEqual(np.sum(expected_losses), loss.eval(), 3)
def testZeroLossWithOneDimBatchZeroWeights(self):
weights = np.asarray([0.0, 0.0]).reshape((2, 1))
- loss = tf.losses.mean_pairwise_squared_error(
- predictions=tf.constant(self._predictions),
- labels=tf.constant(self._labels),
- weights=tf.constant(weights, shape=[2]))
+ loss = losses.mean_pairwise_squared_error(
+ predictions=constant_op.constant(self._predictions),
+ labels=constant_op.constant(self._labels),
+ weights=constant_op.constant(
+ weights, shape=[2]))
with self.test_session():
self.assertAlmostEqual(0, loss.eval(), 3)
@@ -984,162 +920,169 @@ class MeanPairwiseSquaresErrorTest(tf.test.TestCase):
weights = np.asarray([1.2, 3.4]).reshape((2, 1))
expected_losses = np.multiply(weights, self._expected_losses)
- tf_predictions = tf.placeholder(tf.float32, shape=self._predictions.shape)
- tf_labels = tf.placeholder(tf.int32, shape=self._labels.shape)
- loss = tf.losses.mean_pairwise_squared_error(
+ tf_predictions = array_ops.placeholder(
+ dtypes.float32, shape=self._predictions.shape)
+ tf_labels = array_ops.placeholder(dtypes.int32, shape=self._labels.shape)
+ loss = losses.mean_pairwise_squared_error(
predictions=tf_predictions,
labels=tf_labels,
- weights=tf.constant(weights, shape=[2]))
+ weights=constant_op.constant(
+ weights, shape=[2]))
with self.test_session() as sess:
- loss = sess.run(loss, feed_dict={
- tf_predictions: self._predictions,
- tf_labels: self._labels,
- })
+ loss = sess.run(loss,
+ feed_dict={
+ tf_predictions: self._predictions,
+ tf_labels: self._labels,
+ })
self.assertAlmostEqual(np.sum(expected_losses), loss, 3)
def testLossWithAllZeroBatchSpecificWeights(self):
weights = np.zeros((2, 1))
- loss = tf.losses.mean_pairwise_squared_error(
- predictions=tf.constant(self._predictions),
- labels=tf.constant(self._labels),
- weights=tf.constant(weights, shape=[2]))
+ loss = losses.mean_pairwise_squared_error(
+ predictions=constant_op.constant(self._predictions),
+ labels=constant_op.constant(self._labels),
+ weights=constant_op.constant(
+ weights, shape=[2]))
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
-class CosineDistanceLossTest(tf.test.TestCase):
+class CosineDistanceLossTest(test.TestCase):
def setUp(self):
- self._predictions = np.asarray([[1, 0, 0], # Batch 1
- [0, 0, -1],
- [1, 0, 0], # Batch 2
- [1, 0, 0],
- [0, 0, -1], # Batch 3
- [1, 0, 0]]).reshape((3, 2, 3))
-
- self._labels = np.asarray([[1, 0, 0],
- [0, 0, 1],
- [0, 1, 0],
- [1, 0, 0],
- [0, 0, 1],
- [0, 1, 0]]).reshape((3, 2, 3))
+ self._predictions = np.asarray([
+ [1, 0, 0], # Batch 1
+ [0, 0, -1],
+ [1, 0, 0], # Batch 2
+ [1, 0, 0],
+ [0, 0, -1], # Batch 3
+ [1, 0, 0]
+ ]).reshape((3, 2, 3))
+
+ self._labels = np.asarray([[1, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0],
+ [0, 0, 1], [0, 1, 0]]).reshape((3, 2, 3))
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
- tf.losses.cosine_distance(
- predictions=tf.constant(self._labels),
- labels=tf.constant(self._labels),
+ losses.cosine_distance(
+ predictions=constant_op.constant(self._labels),
+ labels=constant_op.constant(self._labels),
dim=2,
weights=None)
def testAllCorrectNoWeights(self):
- loss = tf.losses.cosine_distance(
- predictions=tf.constant(self._labels),
- labels=tf.constant(self._labels),
+ loss = losses.cosine_distance(
+ predictions=constant_op.constant(self._labels),
+ labels=constant_op.constant(self._labels),
dim=2)
with self.test_session():
self.assertAlmostEqual(0, loss.eval(), 5)
def testPartiallyCorrectWithIntegerValues(self):
- loss = tf.losses.cosine_distance(
- predictions=tf.constant(self._predictions),
- labels=tf.constant(self._labels),
+ loss = losses.cosine_distance(
+ predictions=constant_op.constant(self._predictions),
+ labels=constant_op.constant(self._labels),
dim=2)
with self.test_session():
self.assertAlmostEqual(1, loss.eval(), 5)
def testPartiallyCorrectFloatingPointValues(self):
- predictions = np.matrix((
- '0.819031913261206 0.567041924552012 0.087465312324590;'
- '-0.665139432070255 -0.739487441769973 -0.103671883216994;'
- '0.707106781186548 -0.707106781186548 0'))
- labels = np.matrix((
- '0.819031913261206 0.567041924552012 0.087465312324590;'
- '0.665139432070255 0.739487441769973 0.103671883216994;'
- '0.707106781186548 0.707106781186548 0'))
+ predictions = np.matrix(
+ ('0.819031913261206 0.567041924552012 0.087465312324590;'
+ '-0.665139432070255 -0.739487441769973 -0.103671883216994;'
+ '0.707106781186548 -0.707106781186548 0'))
+ labels = np.matrix(('0.819031913261206 0.567041924552012 0.087465312324590;'
+ '0.665139432070255 0.739487441769973 0.103671883216994;'
+ '0.707106781186548 0.707106781186548 0'))
- tf_preds = tf.constant(predictions, shape=(3, 1, 3), dtype=tf.float32)
- tf_labels = tf.constant(labels, shape=(3, 1, 3), dtype=tf.float32)
- loss = tf.losses.cosine_distance(tf_labels, tf_preds, dim=2)
+ tf_preds = constant_op.constant(
+ predictions, shape=(3, 1, 3), dtype=dtypes.float32)
+ tf_labels = constant_op.constant(
+ labels, shape=(3, 1, 3), dtype=dtypes.float32)
+ loss = losses.cosine_distance(tf_labels, tf_preds, dim=2)
with self.test_session():
self.assertAlmostEqual(1.0, loss.eval(), 5)
def testSampleSpecificWeights(self):
- loss = tf.losses.cosine_distance(
- predictions=tf.constant(self._predictions),
- labels=tf.constant(self._labels),
+ loss = losses.cosine_distance(
+ predictions=constant_op.constant(self._predictions),
+ labels=constant_op.constant(self._labels),
dim=2,
- weights=tf.constant([1, 0, 0]))
+ weights=constant_op.constant([1, 0, 0]))
with self.test_session():
self.assertEqual(1.0, loss.eval())
def testMeasurementSpecificWeights(self):
- loss = tf.losses.cosine_distance(
- predictions=tf.constant(self._predictions),
- labels=tf.constant(self._labels),
+ loss = losses.cosine_distance(
+ predictions=constant_op.constant(self._predictions),
+ labels=constant_op.constant(self._labels),
dim=2,
- weights=tf.constant([1, 0, 0, 1, 1, 1], shape=(3, 2)))
+ weights=constant_op.constant(
+ [1, 0, 0, 1, 1, 1], shape=(3, 2)))
with self.test_session():
self.assertEqual(3.0 / 4.0, loss.eval())
def testValueErrorThrownWithShapelessPlaceholder(self):
- tf_predictions = tf.placeholder(tf.float32)
+ tf_predictions = array_ops.placeholder(dtypes.float32)
with self.test_session():
with self.assertRaises(ValueError):
- tf.losses.cosine_distance(
+ losses.cosine_distance(
predictions=tf_predictions,
- labels=tf.constant(self._labels),
+ labels=constant_op.constant(self._labels),
dim=2,
- weights=tf.constant([1, 0, 0, 1, 1, 1], shape=(3, 2)))
+ weights=constant_op.constant(
+ [1, 0, 0, 1, 1, 1], shape=(3, 2)))
def testMeasurementSpecificWeightsWithPlaceholderWithShape(self):
- tf_predictions = tf.placeholder(tf.float32, shape=self._labels.shape)
- loss = tf.losses.cosine_distance(
+ tf_predictions = array_ops.placeholder(
+ dtypes.float32, shape=self._labels.shape)
+ loss = losses.cosine_distance(
predictions=tf_predictions,
- labels=tf.constant(self._labels),
+ labels=constant_op.constant(self._labels),
dim=2,
- weights=tf.constant([1, 0, 0, 1, 1, 1], shape=(3, 2)))
+ weights=constant_op.constant(
+ [1, 0, 0, 1, 1, 1], shape=(3, 2)))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._predictions})
self.assertEqual(3.0 / 4.0, loss)
def testZeroLossWhenAllSampleSpecificWeightsAreZero(self):
- loss = tf.losses.cosine_distance(
- predictions=tf.constant(self._predictions),
- labels=tf.constant(self._labels),
+ loss = losses.cosine_distance(
+ predictions=constant_op.constant(self._predictions),
+ labels=constant_op.constant(self._labels),
dim=2,
- weights=tf.zeros((3,)))
+ weights=array_ops.zeros((3,)))
with self.test_session():
self.assertEqual(0, loss.eval())
def testZeroLossWhenAllMeasurementSpecificWeightsAreZero(self):
- loss = tf.losses.cosine_distance(
- predictions=tf.constant(self._predictions),
- labels=tf.constant(self._labels),
+ loss = losses.cosine_distance(
+ predictions=constant_op.constant(self._predictions),
+ labels=constant_op.constant(self._labels),
dim=2,
- weights=tf.zeros((3, 2)))
+ weights=array_ops.zeros((3, 2)))
with self.test_session():
self.assertEqual(0, loss.eval())
-class AddLossTest(tf.test.TestCase):
+class AddLossTest(test.TestCase):
def testNoCollectLossesBatch2(self):
- logits = tf.constant([[1.2, 0.4, -1.0, -1.1]] * 2)
- labels = tf.constant([[1.0, 0.0, 0.0, 1.0]] * 2)
- self.assertFalse(tf.losses.get_losses())
- tf.losses.absolute_difference(logits, labels, loss_collection=None)
- tf.losses.log_loss(logits, labels, loss_collection=None)
- tf.losses.mean_squared_error(logits, labels, loss_collection=None)
- tf.losses.sigmoid_cross_entropy(logits, labels, loss_collection=None)
- tf.losses.softmax_cross_entropy(logits, labels, loss_collection=None)
- self.assertFalse(tf.losses.get_losses())
+ logits = constant_op.constant([[1.2, 0.4, -1.0, -1.1]] * 2)
+ labels = constant_op.constant([[1.0, 0.0, 0.0, 1.0]] * 2)
+ self.assertFalse(util.get_losses())
+ losses.absolute_difference(logits, labels, loss_collection=None)
+ losses.log_loss(logits, labels, loss_collection=None)
+ losses.mean_squared_error(logits, labels, loss_collection=None)
+ losses.sigmoid_cross_entropy(logits, labels, loss_collection=None)
+ losses.softmax_cross_entropy(logits, labels, loss_collection=None)
+ self.assertFalse(util.get_losses())
-class ComputeWeightedLossTest(tf.test.TestCase):
+class ComputeWeightedLossTest(test.TestCase):
def setUp(self):
self._shape = (3, 2, 4)
@@ -1155,132 +1098,131 @@ class ComputeWeightedLossTest(tf.test.TestCase):
self._unweighted_loss = np.mean(self._raw_losses)
def testUnweighted(self):
- with tf.Graph().as_default():
- self.assertEqual(0, len(tf.contrib.losses.get_losses()))
+ with ops.Graph().as_default():
+ self.assertEqual(0, len(loss_ops.get_losses()))
raw_losses = self._raw_losses
shape = self._shape
- unweighted_losses = (
- tf.losses.compute_weighted_loss(raw_losses),
- tf.losses.compute_weighted_loss(raw_losses, weights=1.0),
- tf.losses.compute_weighted_loss(
- raw_losses, weights=np.ones(shape=shape[0:1])),
- tf.losses.compute_weighted_loss(
- raw_losses, weights=np.ones(shape=shape[0:2])),
- tf.losses.compute_weighted_loss(
- raw_losses, weights=np.ones(shape=shape))
- )
- self.assertEqual(5, len(tf.contrib.losses.get_losses()))
+ unweighted_losses = (losses.compute_weighted_loss(raw_losses),
+ losses.compute_weighted_loss(
+ raw_losses, weights=1.0),
+ losses.compute_weighted_loss(
+ raw_losses, weights=np.ones(shape=shape[0:1])),
+ losses.compute_weighted_loss(
+ raw_losses, weights=np.ones(shape=shape[0:2])),
+ losses.compute_weighted_loss(
+ raw_losses, weights=np.ones(shape=shape)))
+ self.assertEqual(5, len(loss_ops.get_losses()))
with self.test_session():
for unweighted_loss in unweighted_losses:
self.assertAllClose(self._unweighted_loss, unweighted_loss.eval())
def testScalarWeight(self):
- with tf.Graph().as_default():
- self.assertEqual(0, len(tf.contrib.losses.get_losses()))
+ with ops.Graph().as_default():
+ self.assertEqual(0, len(loss_ops.get_losses()))
weight = 17.0
- weighted_loss = tf.losses.compute_weighted_loss(
+ weighted_loss = losses.compute_weighted_loss(
self._raw_losses, weights=weight)
- self.assertEqual(1, len(tf.contrib.losses.get_losses()))
+ self.assertEqual(1, len(loss_ops.get_losses()))
with self.test_session():
self.assertAllClose(
- np.mean(weight * self._raw_losses),
- weighted_loss.eval())
+ np.mean(weight * self._raw_losses), weighted_loss.eval())
# TODO(b/33556118): Bug: `loss1` should be the same as `testUnweighted`, and
# `loss17` should be the same as `testScalarWeight`.
def testScalar1DWeight(self):
- with tf.Graph().as_default():
- self.assertEqual(0, len(tf.contrib.losses.get_losses()))
- loss1 = tf.losses.compute_weighted_loss(self._raw_losses, weights=(1.0,))
- self.assertEqual(1, len(tf.contrib.losses.get_losses()))
+ with ops.Graph().as_default():
+ self.assertEqual(0, len(loss_ops.get_losses()))
+ loss1 = losses.compute_weighted_loss(self._raw_losses, weights=(1.0,))
+ self.assertEqual(1, len(loss_ops.get_losses()))
weight = 17.0
- loss17 = tf.losses.compute_weighted_loss(
- self._raw_losses, weights=(weight,))
- self.assertEqual(2, len(tf.contrib.losses.get_losses()))
+ loss17 = losses.compute_weighted_loss(self._raw_losses, weights=(weight,))
+ self.assertEqual(2, len(loss_ops.get_losses()))
with self.test_session():
+ self.assertAllClose(self._unweighted_loss * self._shape[0],
+ loss1.eval())
self.assertAllClose(
- self._unweighted_loss * self._shape[0],
- loss1.eval())
- self.assertAllClose(
- np.mean(weight * self._raw_losses) * self._shape[0],
- loss17.eval())
+ np.mean(weight * self._raw_losses) * self._shape[0], loss17.eval())
def testInvalid1DWeight(self):
- with tf.Graph().as_default():
+ with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, 'Dimensions must be equal'):
- tf.losses.compute_weighted_loss(self._raw_losses, weights=(17.0, 31.0))
+ losses.compute_weighted_loss(self._raw_losses, weights=(17.0, 31.0))
def testInvalid4DWeight(self):
- with tf.Graph().as_default():
+ with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, 'Invalid weights shape'):
- tf.losses.compute_weighted_loss(
+ losses.compute_weighted_loss(
self._raw_losses, weights=np.zeros(shape=(2, 2, 2, 2)))
def test3Weight(self):
- with tf.Graph().as_default():
- self.assertEqual(0, len(tf.contrib.losses.get_losses()))
+ with ops.Graph().as_default():
+ self.assertEqual(0, len(loss_ops.get_losses()))
weights3 = (17.0, 5.0, 2.0)
- weighted_loss = tf.losses.compute_weighted_loss(
+ weighted_loss = losses.compute_weighted_loss(
self._raw_losses, weights=weights3)
- self.assertEqual(1, len(tf.contrib.losses.get_losses()))
+ self.assertEqual(1, len(loss_ops.get_losses()))
with self.test_session():
weights3x1x1 = np.reshape(weights3, (3, 1, 1))
self.assertAllClose(
- np.mean(weights3x1x1 * self._raw_losses),
- weighted_loss.eval())
+ np.mean(weights3x1x1 * self._raw_losses), weighted_loss.eval())
def test3x1Weight(self):
- with tf.Graph().as_default():
- self.assertEqual(0, len(tf.contrib.losses.get_losses()))
- weights3x1 = ((17.0,), (5.0,), (2.0,),)
- weighted_loss = tf.losses.compute_weighted_loss(
+ with ops.Graph().as_default():
+ self.assertEqual(0, len(loss_ops.get_losses()))
+ weights3x1 = (
+ (17.0,),
+ (5.0,),
+ (2.0,),)
+ weighted_loss = losses.compute_weighted_loss(
self._raw_losses, weights=weights3x1)
- self.assertEqual(1, len(tf.contrib.losses.get_losses()))
+ self.assertEqual(1, len(loss_ops.get_losses()))
with self.test_session():
weights3x1x1 = np.reshape(weights3x1, (3, 1, 1))
self.assertAllClose(
- np.mean(weights3x1x1 * self._raw_losses),
- weighted_loss.eval())
+ np.mean(weights3x1x1 * self._raw_losses), weighted_loss.eval())
# TODO(ptucker): Bug: this should be the same as `test3x1Weight`.
def test3x1x1Weight(self):
- with tf.Graph().as_default():
- self.assertEqual(0, len(tf.contrib.losses.get_losses()))
- weights3x1x1 = (((17.0,),), ((5.0,),), ((2.0,),),)
- weighted_loss = tf.losses.compute_weighted_loss(
+ with ops.Graph().as_default():
+ self.assertEqual(0, len(loss_ops.get_losses()))
+ weights3x1x1 = (
+ ((17.0,),),
+ ((5.0,),),
+ ((2.0,),),)
+ weighted_loss = losses.compute_weighted_loss(
self._raw_losses, weights=weights3x1x1)
- self.assertEqual(1, len(tf.contrib.losses.get_losses()))
+ self.assertEqual(1, len(loss_ops.get_losses()))
with self.test_session():
self.assertAllClose(
np.mean(weights3x1x1 * self._raw_losses) * self._shape[1],
weighted_loss.eval())
def test3x2Weight(self):
- with tf.Graph().as_default():
- self.assertEqual(0, len(tf.contrib.losses.get_losses()))
+ with ops.Graph().as_default():
+ self.assertEqual(0, len(loss_ops.get_losses()))
weights3x2 = (
(17.0, 3.0),
(5.0, 31.0),
- (2.0, 7.0),
- )
- weighted_loss = tf.losses.compute_weighted_loss(
+ (2.0, 7.0),)
+ weighted_loss = losses.compute_weighted_loss(
self._raw_losses, weights=weights3x2)
- self.assertEqual(1, len(tf.contrib.losses.get_losses()))
+ self.assertEqual(1, len(loss_ops.get_losses()))
with self.test_session():
weights3x2x1 = np.reshape(weights3x2, (3, 2, 1))
self.assertAllClose(
- np.mean(weights3x2x1 * self._raw_losses),
- weighted_loss.eval())
+ np.mean(weights3x2x1 * self._raw_losses), weighted_loss.eval())
# TODO(b/33556118): Bug: this should be averaged across all dimensions, not
# summed across dim 0.
def test1x2Weight(self):
- with tf.Graph().as_default():
- self.assertEqual(0, len(tf.contrib.losses.get_losses()))
- weights1x2 = ((17.0, 3.0,),)
- weighted_loss = tf.losses.compute_weighted_loss(
+ with ops.Graph().as_default():
+ self.assertEqual(0, len(loss_ops.get_losses()))
+ weights1x2 = ((
+ 17.0,
+ 3.0,),)
+ weighted_loss = losses.compute_weighted_loss(
self._raw_losses, weights=weights1x2)
- self.assertEqual(1, len(tf.contrib.losses.get_losses()))
+ self.assertEqual(1, len(loss_ops.get_losses()))
with self.test_session():
weights1x2x1 = np.reshape(weights1x2, (1, 2, 1))
self.assertAllClose(
@@ -1290,12 +1232,14 @@ class ComputeWeightedLossTest(tf.test.TestCase):
# TODO(b/33556118): Bug: this should be averaged across all dimensions, not
# summed across dim 0.
def test1x2x1Weight(self):
- with tf.Graph().as_default():
- self.assertEqual(0, len(tf.contrib.losses.get_losses()))
- weights1x2x1 = (((17.0,), (3.0,),),)
- weighted_loss = tf.losses.compute_weighted_loss(
+ with ops.Graph().as_default():
+ self.assertEqual(0, len(loss_ops.get_losses()))
+ weights1x2x1 = ((
+ (17.0,),
+ (3.0,),),)
+ weighted_loss = losses.compute_weighted_loss(
self._raw_losses, weights=weights1x2x1)
- self.assertEqual(1, len(tf.contrib.losses.get_losses()))
+ self.assertEqual(1, len(loss_ops.get_losses()))
with self.test_session():
self.assertAllClose(
np.mean(weights1x2x1 * self._raw_losses) * self._shape[0],
@@ -1304,12 +1248,12 @@ class ComputeWeightedLossTest(tf.test.TestCase):
# TODO(b/33556118): Bug: this should be averaged across all dimensions, not
# summed across dims 0 & 1.
def test1x1x4Weight(self):
- with tf.Graph().as_default():
- self.assertEqual(0, len(tf.contrib.losses.get_losses()))
+ with ops.Graph().as_default():
+ self.assertEqual(0, len(loss_ops.get_losses()))
weights1x1x4 = (((17.0, 13.0, 2.0, 5.0),),)
- weighted_loss = tf.losses.compute_weighted_loss(
+ weighted_loss = losses.compute_weighted_loss(
self._raw_losses, weights=weights1x1x4)
- self.assertEqual(1, len(tf.contrib.losses.get_losses()))
+ self.assertEqual(1, len(loss_ops.get_losses()))
shape = self._shape
with self.test_session():
self.assertAllClose(
@@ -1319,47 +1263,39 @@ class ComputeWeightedLossTest(tf.test.TestCase):
# TODO(b/33556118): Bug: this should be averaged across all dimensions, not
# summed across dim 0.
def test1x2x4Weight(self):
- with tf.Graph().as_default():
- self.assertEqual(0, len(tf.contrib.losses.get_losses()))
- weights1x2x4 = (
- (
- (17.0, 13.0, 2.0, 5.0),
- (3.0, 13.0, 11.0, 2.0),
- ),
- )
- weighted_loss = tf.losses.compute_weighted_loss(
+ with ops.Graph().as_default():
+ self.assertEqual(0, len(loss_ops.get_losses()))
+ weights1x2x4 = ((
+ (17.0, 13.0, 2.0, 5.0),
+ (3.0, 13.0, 11.0, 2.0),),)
+ weighted_loss = losses.compute_weighted_loss(
self._raw_losses, weights=weights1x2x4)
- self.assertEqual(1, len(tf.contrib.losses.get_losses()))
+ self.assertEqual(1, len(loss_ops.get_losses()))
with self.test_session():
self.assertAllClose(
np.mean(weights1x2x4 * self._raw_losses) * self._shape[0],
weighted_loss.eval())
def test3x2x4Weight(self):
- with tf.Graph().as_default():
- self.assertEqual(0, len(tf.contrib.losses.get_losses()))
+ with ops.Graph().as_default():
+ self.assertEqual(0, len(loss_ops.get_losses()))
weights3x2x4 = (
(
(17.0, 13.0, 2.0, 5.0),
- (3.0, 13.0, 11.0, 2.0),
- ),
+ (3.0, 13.0, 11.0, 2.0),),
(
(5.0, 31.0, 17.0, 5.0),
- (13.0, 3.0, 1.0, 11.0),
- ),
+ (13.0, 3.0, 1.0, 11.0),),
(
(7.0, 3.0, 11.0, 5.0),
- (13.0, 11.0, 1.0, 7.0),
- ),
- )
- weighted_loss = tf.losses.compute_weighted_loss(
+ (13.0, 11.0, 1.0, 7.0),),)
+ weighted_loss = losses.compute_weighted_loss(
self._raw_losses, weights=weights3x2x4)
- self.assertEqual(1, len(tf.contrib.losses.get_losses()))
+ self.assertEqual(1, len(loss_ops.get_losses()))
with self.test_session():
self.assertAllClose(
- np.mean(weights3x2x4 * self._raw_losses),
- weighted_loss.eval())
+ np.mean(weights3x2x4 * self._raw_losses), weighted_loss.eval())
if __name__ == '__main__':
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/lrn_op_test.py b/tensorflow/python/kernel_tests/lrn_op_test.py
index 3957d083dc..9eba059549 100644
--- a/tensorflow/python/kernel_tests/lrn_op_test.py
+++ b/tensorflow/python/kernel_tests/lrn_op_test.py
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for local response normalization."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
@@ -21,13 +21,21 @@ from __future__ import print_function
import copy
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import gradients_impl
+from tensorflow.python.ops import nn
+import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
-class LRNOpTest(tf.test.TestCase):
+class LRNOpTest(test.TestCase):
- def _LRN(self, input_image, lrn_depth_radius=5, bias=1.0,
- alpha=1.0, beta=0.5):
+ def _LRN(self, input_image, lrn_depth_radius=5, bias=1.0, alpha=1.0,
+ beta=0.5):
"""Compute expected result."""
output = copy.deepcopy(input_image)
batch_size = input_image.shape[0]
@@ -51,7 +59,7 @@ class LRNOpTest(tf.test.TestCase):
shape = np.random.randint(1, 16, size=4)
# Make depth at least 2 to make it meaningful
shape[3] += 1
- p = tf.placeholder(dtype, shape=shape)
+ p = array_ops.placeholder(dtype, shape=shape)
# random depth_radius, bias, alpha, beta. cuDNN requires depth_radius to
# be in [1, 7].
lrn_depth_radius = np.random.randint(1, min(8, shape[3]))
@@ -60,17 +68,25 @@ class LRNOpTest(tf.test.TestCase):
alpha = 2.0 * np.random.rand()
# cuDNN requires beta >= 0.01.
beta = 0.01 + 2.0 * np.random.rand()
- lrn_t = tf.nn.local_response_normalization(
- p, name="lrn", depth_radius=lrn_depth_radius, bias=bias,
- alpha=alpha, beta=beta)
+ lrn_t = nn.local_response_normalization(
+ p,
+ name="lrn",
+ depth_radius=lrn_depth_radius,
+ bias=bias,
+ alpha=alpha,
+ beta=beta)
params = {p: np.random.rand(*shape).astype("f")}
result = lrn_t.eval(feed_dict=params)
expected = self._LRN(
- params[p], lrn_depth_radius=lrn_depth_radius, bias=bias, alpha=alpha,
+ params[p],
+ lrn_depth_radius=lrn_depth_radius,
+ bias=bias,
+ alpha=alpha,
beta=beta)
err = np.amax(np.abs(result - expected))
- print("LRN error for bias ", bias, "alpha ", alpha, " beta ", beta, " is ", err)
- if dtype == tf.float32:
+ print("LRN error for bias ", bias, "alpha ", alpha, " beta ", beta, " is ",
+ err)
+ if dtype == dtypes.float32:
self.assertTrue(err < 1e-4)
else:
self.assertTrue(err < 1e-2)
@@ -78,19 +94,18 @@ class LRNOpTest(tf.test.TestCase):
def testCompute(self):
for _ in range(2):
- self._RunAndVerify(tf.float32)
+ self._RunAndVerify(dtypes.float32)
# Enable when LRN supports tf.float16 on GPU.
- if not tf.test.is_gpu_available():
- self._RunAndVerify(tf.float16)
+ if not test.is_gpu_available():
+ self._RunAndVerify(dtypes.float16)
def testGradientsZeroInput(self):
with self.test_session(use_gpu=True):
shape = [4, 4, 4, 4]
- p = tf.placeholder(tf.float32, shape=shape)
+ p = array_ops.placeholder(dtypes.float32, shape=shape)
inp_array = np.zeros(shape).astype("f")
- lrn_op = tf.nn.local_response_normalization(p, 2, 1.0, 0.0,
- 1.0, name="lrn")
- grad = tf.gradients([lrn_op], [p])[0]
+ lrn_op = nn.local_response_normalization(p, 2, 1.0, 0.0, 1.0, name="lrn")
+ grad = gradients_impl.gradients([lrn_op], [p])[0]
params = {p: inp_array}
r = grad.eval(feed_dict=params)
expected = np.ones(shape).astype("f")
@@ -110,33 +125,35 @@ class LRNOpTest(tf.test.TestCase):
alpha = 1.0 * np.random.rand()
# cuDNN requires beta >= 0.01.
beta = 0.01 + 1.0 * np.random.rand()
- if dtype == tf.float32:
+ if dtype == dtypes.float32:
inp_array = np.random.rand(*shape).astype(np.float32)
else:
inp_array = np.random.rand(*shape).astype(np.float16)
- inp = tf.constant(
- list(inp_array.ravel(order="C")),
- shape=shape,
- dtype=dtype)
- lrn_op = tf.nn.local_response_normalization(
- inp, name="lrn", depth_radius=lrn_depth_radius, bias=bias,
- alpha=alpha, beta=beta)
- err = tf.test.compute_gradient_error(inp, shape, lrn_op, shape)
+ inp = constant_op.constant(
+ list(inp_array.ravel(order="C")), shape=shape, dtype=dtype)
+ lrn_op = nn.local_response_normalization(
+ inp,
+ name="lrn",
+ depth_radius=lrn_depth_radius,
+ bias=bias,
+ alpha=alpha,
+ beta=beta)
+ err = gradient_checker.compute_gradient_error(inp, shape, lrn_op, shape)
print("LRN Gradient error for bias ", bias, "alpha ", alpha, " beta ", beta,
" is ", err)
- if dtype == tf.float32:
+ if dtype == dtypes.float32:
self.assertLess(err, 1e-4)
else:
self.assertLess(err, 1.0)
def testGradients(self):
for _ in range(2):
- self._RunAndVerifyGradients(tf.float32)
+ self._RunAndVerifyGradients(dtypes.float32)
# Enable when LRN supports tf.float16 on GPU.
- if not tf.test.is_gpu_available():
- self._RunAndVerifyGradients(tf.float16)
+ if not test.is_gpu_available():
+ self._RunAndVerifyGradients(dtypes.float16)
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/matmul_op_test.py b/tensorflow/python/kernel_tests/matmul_op_test.py
index 69242e3b73..f11f86d5d5 100644
--- a/tensorflow/python/kernel_tests/matmul_op_test.py
+++ b/tensorflow/python/kernel_tests/matmul_op_test.py
@@ -12,20 +12,28 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.ops.math_ops.matmul."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
+from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import random_ops
+from tensorflow.python.ops import variables
+from tensorflow.python.platform import test
-class MatMulTest(tf.test.TestCase):
+class MatMulTest(test.TestCase):
def assertAllCloseAccordingToType(self, a, b, rtol=1e-6, atol=1e-6):
"""Like test_util.assertAllCloseToType, but with looser fp16 limits.
@@ -54,7 +62,7 @@ class MatMulTest(tf.test.TestCase):
y_mat = np.matrix(y).T if transpose_y else np.matrix(y)
np_ans = x_mat * y_mat
with self.test_session(use_gpu=False):
- tf_ans = tf.matmul(x, y, transpose_x, transpose_y).eval()
+ tf_ans = math_ops.matmul(x, y, transpose_x, transpose_y).eval()
self.assertAllCloseAccordingToType(np_ans, tf_ans)
self.assertAllEqual(np_ans.shape, tf_ans.shape)
@@ -63,7 +71,7 @@ class MatMulTest(tf.test.TestCase):
y_mat = np.matrix(y).T if transpose_y else np.matrix(y)
np_ans = x_mat * y_mat
with self.test_session(use_gpu=True):
- tf_ans = tf.matmul(x, y, transpose_x, transpose_y).eval()
+ tf_ans = math_ops.matmul(x, y, transpose_x, transpose_y).eval()
self.assertAllCloseAccordingToType(np_ans, tf_ans)
self.assertAllEqual(np_ans.shape, tf_ans.shape)
@@ -77,8 +85,9 @@ class MatMulTest(tf.test.TestCase):
imag = self._randMatrix(rows, cols, float_dtype)
return real + 1j * imag
else:
- return np.random.uniform(low=1.0, high=100.0, size=rows * cols).reshape(
- [rows, cols]).astype(dtype)
+ return np.random.uniform(
+ low=1.0, high=100.0, size=rows * cols).reshape(
+ [rows, cols]).astype(dtype)
# Basic test:
# [ [1],
@@ -279,62 +288,74 @@ class MatMulTest(tf.test.TestCase):
self._testGpuMatmul(x, y)
def testShapeErrors(self):
- a = tf.placeholder(tf.float32, [32, 37])
- b = tf.placeholder(tf.float32, [36, 2])
- c = tf.placeholder(tf.float32, [37])
- with self.assertRaisesRegexp(
- ValueError, "Dimensions must be equal, but are 37 and 36"):
- tf.matmul(a, b)
+ a = array_ops.placeholder(dtypes.float32, [32, 37])
+ b = array_ops.placeholder(dtypes.float32, [36, 2])
+ c = array_ops.placeholder(dtypes.float32, [37])
+ with self.assertRaisesRegexp(ValueError,
+ "Dimensions must be equal, but are 37 and 36"):
+ math_ops.matmul(a, b)
with self.assertRaisesRegexp(ValueError, "must be rank 2"):
- tf.matmul(a, c)
+ math_ops.matmul(a, c)
def testShapeInference(self):
"""Tests common_shapes.call_cpp_shape_fn."""
- a = tf.constant([2] * 6, shape=[3, 2])
- b = tf.constant([2] * 2, shape=[2, 1])
- mm = tf.matmul(a, b)
+ a = constant_op.constant([2] * 6, shape=[3, 2])
+ b = constant_op.constant([2] * 2, shape=[2, 1])
+ mm = math_ops.matmul(a, b)
self.assertEqual([3, 1], mm.get_shape())
# Transpose arguments are respected.
- a = tf.constant([2] * 6, shape=[2, 3])
- b = tf.constant([2] * 2, shape=[1, 2])
- mm = tf.matmul(a, b, transpose_a=True, transpose_b=True)
+ a = constant_op.constant([2] * 6, shape=[2, 3])
+ b = constant_op.constant([2] * 2, shape=[1, 2])
+ mm = math_ops.matmul(a, b, transpose_a=True, transpose_b=True)
self.assertEqual([3, 1], mm.get_shape())
# Unknown dims come through in output.
- a = tf.placeholder(np.float32)
- b = tf.placeholder(np.float32)
- mm = tf.matmul(a, b)
- self.assertEqual(tf.TensorShape(None), mm.get_shape())
+ a = array_ops.placeholder(np.float32)
+ b = array_ops.placeholder(np.float32)
+ mm = math_ops.matmul(a, b)
+ self.assertEqual(tensor_shape.TensorShape(None), mm.get_shape())
- a = tf.constant([1] * 6, shape=[2, 3])
- b = tf.constant([2] * 2, shape=[1, 2])
+ a = constant_op.constant([1] * 6, shape=[2, 3])
+ b = constant_op.constant([2] * 2, shape=[1, 2])
with self.assertRaisesRegexp(ValueError, ".*must be equal.*"):
- tf.matmul(a, b, transpose_a=False, transpose_b=True)
+ math_ops.matmul(a, b, transpose_a=False, transpose_b=True)
# TODO(zhifengc): Figures out how to test matmul gradients on GPU.
-class MatMulGradientTest(tf.test.TestCase):
+class MatMulGradientTest(test.TestCase):
def testGradientInput0(self):
with self.test_session(use_gpu=False):
- x = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2],
- dtype=tf.float64, name="x")
- y = tf.constant([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
- shape=[2, 4], dtype=tf.float64, name="y")
- m = tf.matmul(x, y, name="matmul")
- err = tf.test.compute_gradient_error(x, [3, 2], m, [3, 4])
+ x = constant_op.constant(
+ [1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
+ shape=[3, 2],
+ dtype=dtypes.float64,
+ name="x")
+ y = constant_op.constant(
+ [1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
+ shape=[2, 4],
+ dtype=dtypes.float64,
+ name="y")
+ m = math_ops.matmul(x, y, name="matmul")
+ err = gradient_checker.compute_gradient_error(x, [3, 2], m, [3, 4])
print("matmul input0 gradient err = ", err)
self.assertLess(err, 1e-10)
def testGradientInput1(self):
with self.test_session(use_gpu=False):
- x = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2],
- dtype=tf.float64, name="x")
- y = tf.constant([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
- shape=[2, 4], dtype=tf.float64, name="y")
- m = tf.matmul(x, y, name="matmul")
- err = tf.test.compute_gradient_error(y, [2, 4], m, [3, 4])
+ x = constant_op.constant(
+ [1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
+ shape=[3, 2],
+ dtype=dtypes.float64,
+ name="x")
+ y = constant_op.constant(
+ [1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
+ shape=[2, 4],
+ dtype=dtypes.float64,
+ name="y")
+ m = math_ops.matmul(x, y, name="matmul")
+ err = gradient_checker.compute_gradient_error(y, [2, 4], m, [3, 4])
print("matmul input1 gradient err = ", err)
self.assertLess(err, 1e-10)
@@ -346,12 +367,18 @@ class MatMulGradientTest(tf.test.TestCase):
if transpose_b:
shape_y = list(reversed(shape_y))
with self.test_session(use_gpu=False):
- x = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=shape_x,
- dtype=tf.float64, name="x")
- y = tf.constant([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
- shape=shape_y, dtype=tf.float64, name="y")
- m = tf.matmul(x, y, transpose_a, transpose_b, name="matmul")
- err = tf.test.compute_gradient_error(x, shape_x, m, [3, 4])
+ x = constant_op.constant(
+ [1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
+ shape=shape_x,
+ dtype=dtypes.float64,
+ name="x")
+ y = constant_op.constant(
+ [1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
+ shape=shape_y,
+ dtype=dtypes.float64,
+ name="y")
+ m = math_ops.matmul(x, y, transpose_a, transpose_b, name="matmul")
+ err = gradient_checker.compute_gradient_error(x, shape_x, m, [3, 4])
print("matmul input0 gradient err = ", err)
self.assertLess(err, 1e-10)
@@ -368,12 +395,18 @@ class MatMulGradientTest(tf.test.TestCase):
if transpose_b:
shape_y = list(reversed(shape_y))
with self.test_session(use_gpu=False):
- x = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=shape_x,
- dtype=tf.float64, name="x")
- y = tf.constant([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
- shape=shape_y, dtype=tf.float64, name="y")
- m = tf.matmul(x, y, transpose_a, transpose_b, name="matmul")
- err = tf.test.compute_gradient_error(y, shape_y, m, [3, 4])
+ x = constant_op.constant(
+ [1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
+ shape=shape_x,
+ dtype=dtypes.float64,
+ name="x")
+ y = constant_op.constant(
+ [1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
+ shape=shape_y,
+ dtype=dtypes.float64,
+ name="y")
+ m = math_ops.matmul(x, y, transpose_a, transpose_b, name="matmul")
+ err = gradient_checker.compute_gradient_error(y, shape_y, m, [3, 4])
print("matmul input1 gradient err = ", err)
self.assertLess(err, 1e-10)
@@ -383,25 +416,25 @@ class MatMulGradientTest(tf.test.TestCase):
self._VerifyInput1(transpose_a=True, transpose_b=True)
-class MatMulStatsTest(tf.test.TestCase):
+class MatMulStatsTest(test.TestCase):
def testSimpleStatistics(self):
- g = tf.Graph()
+ g = ops.Graph()
with g.as_default():
- a = tf.Variable(tf.random_normal([25, 16]))
- b = tf.Variable(tf.random_normal([16, 9]))
- tf.matmul(a, b)
+ a = variables.Variable(random_ops.random_normal([25, 16]))
+ b = variables.Variable(random_ops.random_normal([16, 9]))
+ math_ops.matmul(a, b)
for op in g.get_operations():
flops = ops.get_stats_for_node_def(g, op.node_def, "flops").value
if op.name == "MatMul":
self.assertEqual(7200, flops)
def testTransposedStatistics(self):
- g = tf.Graph()
+ g = ops.Graph()
with g.as_default():
- a = tf.Variable(tf.random_normal([16, 25]))
- b = tf.Variable(tf.random_normal([16, 9]))
- tf.matmul(a, b, transpose_a=True)
+ a = variables.Variable(random_ops.random_normal([16, 25]))
+ b = variables.Variable(random_ops.random_normal([16, 9]))
+ math_ops.matmul(a, b, transpose_a=True)
for op in g.get_operations():
flops = ops.get_stats_for_node_def(g, op.node_def, "flops").value
if op.name == "MatMul":
@@ -409,4 +442,4 @@ class MatMulStatsTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/matrix_band_part_op_test.py b/tensorflow/python/kernel_tests/matrix_band_part_op_test.py
index 5f5efaeaa4..ee7db77dd0 100644
--- a/tensorflow/python/kernel_tests/matrix_band_part_op_test.py
+++ b/tensorflow/python/kernel_tests/matrix_band_part_op_test.py
@@ -18,10 +18,14 @@ from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.platform import test
-class MatrixBandPartTest(tf.test.TestCase):
+
+class MatrixBandPartTest(test.TestCase):
pass # Filled in below
@@ -40,13 +44,13 @@ def _GetMatrixBandPartTest(dtype_, batch_shape_, shape_):
band_np = np.tril(band_np, upper)
if batch_shape is not ():
band_np = np.tile(band_np, batch_shape + (1, 1))
- band = tf.matrix_band_part(batch_mat, lower, upper)
+ band = array_ops.matrix_band_part(batch_mat, lower, upper)
self.assertAllEqual(band_np, band.eval())
return Test
-class MatrixBandPartGradTest(tf.test.TestCase):
+class MatrixBandPartGradTest(test.TestCase):
pass # Filled in below
@@ -54,13 +58,13 @@ def _GetMatrixBandPartGradTest(dtype_, batch_shape_, shape_):
def Test(self):
shape = batch_shape_ + shape_
- x = tf.constant(np.random.rand(*shape), dtype=dtype_)
+ x = constant_op.constant(np.random.rand(*shape), dtype=dtype_)
with self.test_session(use_gpu=True):
for lower in -1, 0, 1, shape_[-2] - 1:
for upper in -1, 0, 1, shape_[-1] - 1:
- y = tf.matrix_band_part(x, lower, upper)
- error = tf.test.compute_gradient_error(x, x.get_shape().as_list(), y,
- y.get_shape().as_list())
+ y = array_ops.matrix_band_part(x, lower, upper)
+ error = gradient_checker.compute_gradient_error(
+ x, x.get_shape().as_list(), y, y.get_shape().as_list())
self.assertLess(error, 1e-4)
return Test
@@ -79,4 +83,4 @@ if __name__ == '__main__':
setattr(MatrixBandPartGradTest, 'testMatrixBandPartGrad_' + name,
_GetMatrixBandPartGradTest(dtype, batch_shape, shape))
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/matrix_inverse_op_test.py b/tensorflow/python/kernel_tests/matrix_inverse_op_test.py
index 972cb9ba7b..263f90c4f1 100644
--- a/tensorflow/python/kernel_tests/matrix_inverse_op_test.py
+++ b/tensorflow/python/kernel_tests/matrix_inverse_op_test.py
@@ -12,17 +12,21 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.ops.math_ops.matrix_inverse."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.ops import linalg_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.platform import test
-class InverseOpTest(tf.test.TestCase):
+class InverseOpTest(test.TestCase):
def _verifyInverse(self, x):
for np_type in [np.float32, np.float64]:
@@ -30,8 +34,8 @@ class InverseOpTest(tf.test.TestCase):
y = x.astype(np_type)
with self.test_session():
# Verify that x^{-1} * x == Identity matrix.
- inv = tf.matrix_inverse(y, adjoint=adjoint)
- tf_ans = tf.matmul(inv, y, adjoint_b=adjoint)
+ inv = linalg_ops.matrix_inverse(y, adjoint=adjoint)
+ tf_ans = math_ops.matmul(inv, y, adjoint_b=adjoint)
np_ans = np.identity(y.shape[-1])
if x.ndim > 2:
tiling = list(y.shape)
@@ -48,8 +52,8 @@ class InverseOpTest(tf.test.TestCase):
self._verifyInverse(matrix1)
self._verifyInverse(matrix2)
# A multidimensional batch of 2x2 matrices
- matrix_batch = np.concatenate([np.expand_dims(matrix1, 0),
- np.expand_dims(matrix2, 0)])
+ matrix_batch = np.concatenate(
+ [np.expand_dims(matrix1, 0), np.expand_dims(matrix2, 0)])
matrix_batch = np.tile(matrix_batch, [2, 3, 1, 1])
self._verifyInverse(matrix_batch)
@@ -60,8 +64,8 @@ class InverseOpTest(tf.test.TestCase):
self._verifyInverse(matrix1)
self._verifyInverse(matrix2)
# A multidimensional batch of 2x2 matrices
- matrix_batch = np.concatenate([np.expand_dims(matrix1, 0), np.expand_dims(
- matrix2, 0)])
+ matrix_batch = np.concatenate(
+ [np.expand_dims(matrix1, 0), np.expand_dims(matrix2, 0)])
matrix_batch = np.tile(matrix_batch, [2, 3, 1, 1])
self._verifyInverse(matrix_batch)
@@ -69,21 +73,22 @@ class InverseOpTest(tf.test.TestCase):
# When the inverse of a non-square matrix is attempted we should return
# an error
with self.assertRaises(ValueError):
- tf.matrix_inverse(np.array([[1., 2., 3.], [3., 4., 5.]]))
+ linalg_ops.matrix_inverse(np.array([[1., 2., 3.], [3., 4., 5.]]))
def testWrongDimensions(self):
# The input to the inverse should be at least a 2-dimensional tensor.
- tensor3 = tf.constant([1., 2.])
+ tensor3 = constant_op.constant([1., 2.])
with self.assertRaises(ValueError):
- tf.matrix_inverse(tensor3)
+ linalg_ops.matrix_inverse(tensor3)
def testNotInvertible(self):
# The input should be invertible.
with self.test_session():
with self.assertRaisesOpError("Input is not invertible."):
# All rows of the matrix below add to zero.
- tensor3 = tf.constant([[1., 0., -1.], [-1., 1., 0.], [0., -1., 1.]])
- tf.matrix_inverse(tensor3).eval()
+ tensor3 = constant_op.constant(
+ [[1., 0., -1.], [-1., 1., 0.], [0., -1., 1.]])
+ linalg_ops.matrix_inverse(tensor3).eval()
def testEmpty(self):
self._verifyInverse(np.empty([0, 2, 2]))
@@ -91,4 +96,4 @@ class InverseOpTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/matrix_solve_ls_op_test.py b/tensorflow/python/kernel_tests/matrix_solve_ls_op_test.py
index 5d60dc6152..9a7645ff61 100644
--- a/tensorflow/python/kernel_tests/matrix_solve_ls_op_test.py
+++ b/tensorflow/python/kernel_tests/matrix_solve_ls_op_test.py
@@ -13,12 +13,16 @@
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_solve."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.ops import linalg_ops
+from tensorflow.python.platform import test
def BatchMatMul(a, b):
@@ -58,7 +62,7 @@ def BatchRegularizedLeastSquares(matrices, rhss, l2_regularization=0.0):
return BatchMatMul(right_pseudo_inverse, rhss)
-class MatrixSolveLsOpTest(tf.test.TestCase):
+class MatrixSolveLsOpTest(test.TestCase):
def _verifySolve(self, x, y):
for np_type in [np.float32, np.float64]:
@@ -67,7 +71,7 @@ class MatrixSolveLsOpTest(tf.test.TestCase):
np_ans, _, _, _ = np.linalg.lstsq(a, b)
for fast in [True, False]:
with self.test_session():
- tf_ans = tf.matrix_solve_ls(a, b, fast=fast)
+ tf_ans = linalg_ops.matrix_solve_ls(a, b, fast=fast)
ans = tf_ans.eval()
self.assertEqual(np_ans.shape, tf_ans.get_shape())
self.assertEqual(np_ans.shape, ans.shape)
@@ -96,7 +100,7 @@ class MatrixSolveLsOpTest(tf.test.TestCase):
a[dim1, dim2, :, :], b[dim1, dim2, :, :])
for fast in [True, False]:
with self.test_session():
- tf_ans = tf.matrix_solve_ls(a, b, fast=fast).eval()
+ tf_ans = linalg_ops.matrix_solve_ls(a, b, fast=fast).eval()
self.assertEqual(np_ans.shape, tf_ans.shape)
# Check residual norm.
tf_r = b - BatchMatMul(a, tf_ans)
@@ -120,7 +124,7 @@ class MatrixSolveLsOpTest(tf.test.TestCase):
np_ans = BatchRegularizedLeastSquares(a, b, l2_regularizer)
with self.test_session():
# Test matrix_solve_ls on regular matrices
- tf_ans = tf.matrix_solve_ls(
+ tf_ans = linalg_ops.matrix_solve_ls(
a, b, l2_regularizer=l2_regularizer, fast=True).eval()
self.assertAllClose(np_ans, tf_ans, atol=1e-5, rtol=1e-5)
@@ -129,7 +133,7 @@ class MatrixSolveLsOpTest(tf.test.TestCase):
b = np.tile(y.astype(np_type), [2, 3, 1, 1])
np_ans = BatchRegularizedLeastSquares(a, b, l2_regularizer)
with self.test_session():
- tf_ans = tf.matrix_solve_ls(
+ tf_ans = linalg_ops.matrix_solve_ls(
a, b, l2_regularizer=l2_regularizer, fast=True).eval()
self.assertAllClose(np_ans, tf_ans, atol=1e-5, rtol=1e-5)
@@ -161,10 +165,10 @@ class MatrixSolveLsOpTest(tf.test.TestCase):
def testWrongDimensions(self):
# The matrix and right-hand sides should have the same number of rows.
with self.test_session():
- matrix = tf.constant([[1., 0.], [0., 1.]])
- rhs = tf.constant([[1., 0.]])
+ matrix = constant_op.constant([[1., 0.], [0., 1.]])
+ rhs = constant_op.constant([[1., 0.]])
with self.assertRaises(ValueError):
- tf.matrix_solve_ls(matrix, rhs)
+ linalg_ops.matrix_solve_ls(matrix, rhs)
def testEmpty(self):
full = np.array([[1., 2.], [3., 4.], [5., 6.]])
@@ -172,24 +176,24 @@ class MatrixSolveLsOpTest(tf.test.TestCase):
empty1 = np.empty([0, 2])
for fast in [True, False]:
with self.test_session():
- tf_ans = tf.matrix_solve_ls(empty0, empty0, fast=fast).eval()
+ tf_ans = linalg_ops.matrix_solve_ls(empty0, empty0, fast=fast).eval()
self.assertEqual(tf_ans.shape, (0, 0))
- tf_ans = tf.matrix_solve_ls(empty0, full, fast=fast).eval()
+ tf_ans = linalg_ops.matrix_solve_ls(empty0, full, fast=fast).eval()
self.assertEqual(tf_ans.shape, (0, 2))
- tf_ans = tf.matrix_solve_ls(full, empty0, fast=fast).eval()
+ tf_ans = linalg_ops.matrix_solve_ls(full, empty0, fast=fast).eval()
self.assertEqual(tf_ans.shape, (2, 0))
- tf_ans = tf.matrix_solve_ls(empty1, empty1, fast=fast).eval()
+ tf_ans = linalg_ops.matrix_solve_ls(empty1, empty1, fast=fast).eval()
self.assertEqual(tf_ans.shape, (2, 2))
def testBatchResultSize(self):
# 3x3x3 matrices, 3x3x1 right-hand sides.
matrix = np.array([1., 2., 3., 4., 5., 6., 7., 8., 9.] * 3).reshape(3, 3, 3)
rhs = np.array([1., 2., 3.] * 3).reshape(3, 3, 1)
- answer = tf.matrix_solve(matrix, rhs)
- ls_answer = tf.matrix_solve_ls(matrix, rhs)
+ answer = linalg_ops.matrix_solve(matrix, rhs)
+ ls_answer = linalg_ops.matrix_solve_ls(matrix, rhs)
self.assertEqual(ls_answer.get_shape(), [3, 3, 1])
self.assertEqual(answer.get_shape(), [3, 3, 1])
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/matrix_solve_op_test.py b/tensorflow/python/kernel_tests/matrix_solve_op_test.py
index 04f558654a..07ff53cfe6 100644
--- a/tensorflow/python/kernel_tests/matrix_solve_op_test.py
+++ b/tensorflow/python/kernel_tests/matrix_solve_op_test.py
@@ -12,17 +12,20 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.ops.math_ops.matrix_solve."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.ops import linalg_ops
+from tensorflow.python.platform import test
-class MatrixSolveOpTest(tf.test.TestCase):
+class MatrixSolveOpTest(test.TestCase):
def _verifySolve(self, x, y, batch_dims=None):
for adjoint in False, True:
@@ -44,24 +47,25 @@ class MatrixSolveOpTest(tf.test.TestCase):
np_ans = np.linalg.solve(a_np, b)
with self.test_session():
- tf_ans = tf.matrix_solve(a, b, adjoint=adjoint)
+ tf_ans = linalg_ops.matrix_solve(a, b, adjoint=adjoint)
out = tf_ans.eval()
self.assertEqual(tf_ans.get_shape(), out.shape)
self.assertEqual(np_ans.shape, out.shape)
self.assertAllClose(np_ans, out)
def testSolve(self):
- matrix = np.array([[1.+5.j, 2.+6.j], [3.+7j, 4.+8.j]])
+ matrix = np.array([[1. + 5.j, 2. + 6.j], [3. + 7j, 4. + 8.j]])
# 2x1 right-hand side.
- rhs1 = np.array([[1.+0.j], [1.+0.j]])
+ rhs1 = np.array([[1. + 0.j], [1. + 0.j]])
self._verifySolve(matrix, rhs1)
# 2x3 right-hand sides.
- rhs3 = np.array([[1.+0.j, 0.+0.j, 1.+0.j], [0.+0.j, 1.+0.j, 1.+0.j]])
+ rhs3 = np.array(
+ [[1. + 0.j, 0. + 0.j, 1. + 0.j], [0. + 0.j, 1. + 0.j, 1. + 0.j]])
self._verifySolve(matrix, rhs3)
def testSolveBatch(self):
- matrix = np.array([[1.+5.j, 2.+6.j], [3.+7j, 4.+8.j]])
- rhs = np.array([[1.+0.j], [1.+0.j]])
+ matrix = np.array([[1. + 5.j, 2. + 6.j], [3. + 7j, 4. + 8.j]])
+ rhs = np.array([[1. + 0.j], [1. + 0.j]])
# Batch of 2x3x2x2 matrices, 2x3x2x3 right-hand sides.
self._verifySolve(matrix, rhs, batch_dims=[2, 3])
# Batch of 3x2x2x2 matrices, 3x2x2x3 right-hand sides.
@@ -72,24 +76,25 @@ class MatrixSolveOpTest(tf.test.TestCase):
# an error
with self.test_session():
with self.assertRaises(ValueError):
- matrix = tf.constant([[1., 2., 3.], [3., 4., 5.]])
- tf.matrix_solve(matrix, matrix)
+ matrix = constant_op.constant([[1., 2., 3.], [3., 4., 5.]])
+ linalg_ops.matrix_solve(matrix, matrix)
def testWrongDimensions(self):
# The matrix and right-hand sides should have the same number of rows.
with self.test_session():
- matrix = tf.constant([[1., 0.], [0., 1.]])
- rhs = tf.constant([[1., 0.]])
+ matrix = constant_op.constant([[1., 0.], [0., 1.]])
+ rhs = constant_op.constant([[1., 0.]])
with self.assertRaises(ValueError):
- tf.matrix_solve(matrix, rhs)
+ linalg_ops.matrix_solve(matrix, rhs)
def testNotInvertible(self):
# The input should be invertible.
with self.test_session():
with self.assertRaisesOpError("Input matrix is not invertible."):
# All rows of the matrix below add to zero
- matrix = tf.constant([[1., 0., -1.], [-1., 1., 0.], [0., -1., 1.]])
- tf.matrix_solve(matrix, matrix).eval()
+ matrix = constant_op.constant(
+ [[1., 0., -1.], [-1., 1., 0.], [0., -1., 1.]])
+ linalg_ops.matrix_solve(matrix, matrix).eval()
def testEmpty(self):
with self.test_session():
@@ -98,4 +103,4 @@ class MatrixSolveOpTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/matrix_triangular_solve_op_test.py b/tensorflow/python/kernel_tests/matrix_triangular_solve_op_test.py
index c415482fc2..cdf828f3ca 100644
--- a/tensorflow/python/kernel_tests/matrix_triangular_solve_op_test.py
+++ b/tensorflow/python/kernel_tests/matrix_triangular_solve_op_test.py
@@ -13,28 +13,38 @@
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_triangular_solve."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.ops import linalg_ops
+from tensorflow.python.platform import test
-class MatrixTriangularSolveOpTest(tf.test.TestCase):
+class MatrixTriangularSolveOpTest(test.TestCase):
def _verifySolveAllWays(self, x, y, batch_dims=None):
for use_gpu in True, False:
for lower in True, False:
for adjoint in True, False:
- self._verifySolve(x,
- y,
- lower=lower,
- adjoint=adjoint,
- batch_dims=batch_dims,
- use_gpu=use_gpu)
-
- def _verifySolve(self, x, y, lower=True, adjoint=False, batch_dims=None, use_gpu=False):
+ self._verifySolve(
+ x,
+ y,
+ lower=lower,
+ adjoint=adjoint,
+ batch_dims=batch_dims,
+ use_gpu=use_gpu)
+
+ def _verifySolve(self,
+ x,
+ y,
+ lower=True,
+ adjoint=False,
+ batch_dims=None,
+ use_gpu=False):
for np_type in [np.float32, np.float64]:
a = x.astype(np_type)
b = y.astype(np_type)
@@ -55,7 +65,8 @@ class MatrixTriangularSolveOpTest(tf.test.TestCase):
b = np.tile(b, batch_dims + [1, 1])
with self.test_session(use_gpu=use_gpu):
- tf_ans = tf.matrix_triangular_solve(a, b, lower=lower, adjoint=adjoint)
+ tf_ans = linalg_ops.matrix_triangular_solve(
+ a, b, lower=lower, adjoint=adjoint)
out = tf_ans.eval()
np_ans = np.linalg.solve(a_np, b)
self.assertEqual(np_ans.shape, tf_ans.get_shape())
@@ -114,11 +125,8 @@ class MatrixTriangularSolveOpTest(tf.test.TestCase):
self._verifySolve(np.empty([2, 0, 0]), np.empty([2, 0, 0]), lower=True)
self._verifySolve(np.empty([2, 0, 0]), np.empty([2, 0, 0]), lower=False)
self._verifySolve(
- np.empty([2, 0, 0]),
- np.empty([2, 0, 0]),
- lower=True,
- batch_dims=[3, 2])
+ np.empty([2, 0, 0]), np.empty([2, 0, 0]), lower=True, batch_dims=[3, 2])
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/metrics_test.py b/tensorflow/python/kernel_tests/metrics_test.py
index 87b8a5d3e7..a9052708ea 100644
--- a/tensorflow/python/kernel_tests/metrics_test.py
+++ b/tensorflow/python/kernel_tests/metrics_test.py
@@ -23,8 +23,20 @@ import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes as dtypes_lib
+from tensorflow.python.framework import errors_impl
+from tensorflow.python.framework import ops
+from tensorflow.python.framework import sparse_tensor
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import metrics
+from tensorflow.python.ops import random_ops
+from tensorflow.python.ops import variables
+import tensorflow.python.ops.data_flow_grad # pylint: disable=unused-import
+import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
NAN = float('nan')
@@ -33,7 +45,9 @@ def _enqueue_vector(sess, queue, values, shape=None):
if not shape:
shape = (1, len(values))
dtype = queue.dtypes[0]
- sess.run(queue.enqueue(tf.constant(values, dtype=dtype, shape=shape)))
+ sess.run(
+ queue.enqueue(constant_op.constant(
+ values, dtype=dtype, shape=shape)))
def _binary_2d_label_to_2d_sparse_value(labels):
@@ -65,10 +79,9 @@ def _binary_2d_label_to_2d_sparse_value(labels):
label += 1
batch += 1
shape = [len(labels), len(labels[0])]
- return tf.SparseTensorValue(
+ return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64),
- np.array(values, np.int64),
- np.array(shape, np.int64))
+ np.array(values, np.int64), np.array(shape, np.int64))
def _binary_2d_label_to_1d_sparse_value(labels):
@@ -105,10 +118,9 @@ def _binary_2d_label_to_1d_sparse_value(labels):
if indices != [[i] for i in range(len(labels))]:
raise ValueError('Expected 1 label/example, got %s.' % indices)
shape = [len(labels)]
- return tf.SparseTensorValue(
+ return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64),
- np.array(values, np.int64),
- np.array(shape, np.int64))
+ np.array(values, np.int64), np.array(shape, np.int64))
def _binary_3d_label_to_sparse_value(labels):
@@ -136,10 +148,9 @@ def _binary_3d_label_to_sparse_value(labels):
else:
assert label == 0
shape = [len(labels), len(labels[0]), len(labels[0][0])]
- return tf.SparseTensorValue(
+ return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64),
- np.array(values, np.int64),
- np.array(shape, np.int64))
+ np.array(values, np.int64), np.array(shape, np.int64))
def _assert_nan(test_case, actual):
@@ -148,35 +159,34 @@ def _assert_nan(test_case, actual):
def _assert_local_variables(test_case, expected):
test_case.assertEquals(
- set(expected), set(v.name for v in tf.local_variables()))
+ set(expected), set(v.name for v in variables.local_variables()))
-class MeanTest(tf.test.TestCase):
+class MeanTest(test.TestCase):
def setUp(self):
- tf.reset_default_graph()
+ ops.reset_default_graph()
def testVars(self):
- metrics.mean(tf.ones([4, 3]))
+ metrics.mean(array_ops.ones([4, 3]))
_assert_local_variables(self, ('mean/count:0', 'mean/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.mean(
- tf.ones([4, 3]),
- metrics_collections=[my_collection_name])
- self.assertListEqual(tf.get_collection(my_collection_name), [mean])
+ array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
+ self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean(
- tf.ones([4, 3]),
- updates_collections=[my_collection_name])
- self.assertListEqual(tf.get_collection(my_collection_name), [update_op])
+ array_ops.ones([4, 3]), updates_collections=[my_collection_name])
+ self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.test_session() as sess:
- values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
+ values_queue = data_flow_ops.FIFOQueue(
+ 4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
@@ -185,14 +195,15 @@ class MeanTest(tf.test.TestCase):
mean, update_op = metrics.mean(values)
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def testUpdateOpsReturnsCurrentValue(self):
with self.test_session() as sess:
- values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
+ values_queue = data_flow_ops.FIFOQueue(
+ 4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
@@ -201,11 +212,11 @@ class MeanTest(tf.test.TestCase):
mean, update_op = metrics.mean(values)
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(1.475, sess.run(update_op), 5)
- self.assertAlmostEqual(12.4/6.0, sess.run(update_op), 5)
+ self.assertAlmostEqual(12.4 / 6.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
@@ -213,7 +224,8 @@ class MeanTest(tf.test.TestCase):
def test1dWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the values.
- values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
+ values_queue = data_flow_ops.FIFOQueue(
+ 4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
@@ -221,7 +233,8 @@ class MeanTest(tf.test.TestCase):
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
- weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 1))
+ weights_queue = data_flow_ops.FIFOQueue(
+ 4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
@@ -230,7 +243,7 @@ class MeanTest(tf.test.TestCase):
mean, update_op = metrics.mean(values, weights)
- tf.local_variables_initializer().run()
+ variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
@@ -238,16 +251,12 @@ class MeanTest(tf.test.TestCase):
def test1dWeightedValues_placeholders(self):
with self.test_session() as sess:
# Create the queue that populates the values.
- feed_values = (
- (0, 1),
- (-4.2, 9.1),
- (6.5, 0),
- (-3.2, 4.0)
- )
- values = tf.placeholder(dtype=tf.float32)
+ feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
+ values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
- weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 1))
+ weights_queue = data_flow_ops.FIFOQueue(
+ 4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
@@ -256,7 +265,7 @@ class MeanTest(tf.test.TestCase):
mean, update_op = metrics.mean(values, weights)
- tf.local_variables_initializer().run()
+ variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
@@ -264,7 +273,8 @@ class MeanTest(tf.test.TestCase):
def test2dWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the values.
- values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
+ values_queue = data_flow_ops.FIFOQueue(
+ 4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
@@ -272,7 +282,8 @@ class MeanTest(tf.test.TestCase):
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
- weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
+ weights_queue = data_flow_ops.FIFOQueue(
+ 4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
@@ -281,7 +292,7 @@ class MeanTest(tf.test.TestCase):
mean, update_op = metrics.mean(values, weights)
- tf.local_variables_initializer().run()
+ variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
@@ -289,16 +300,12 @@ class MeanTest(tf.test.TestCase):
def test2dWeightedValues_placeholders(self):
with self.test_session() as sess:
# Create the queue that populates the values.
- feed_values = (
- (0, 1),
- (-4.2, 9.1),
- (6.5, 0),
- (-3.2, 4.0)
- )
- values = tf.placeholder(dtype=tf.float32)
+ feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
+ values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
- weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
+ weights_queue = data_flow_ops.FIFOQueue(
+ 4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
@@ -307,39 +314,38 @@ class MeanTest(tf.test.TestCase):
mean, update_op = metrics.mean(values, weights)
- tf.local_variables_initializer().run()
+ variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
-class StreamingMeanTensorTest(tf.test.TestCase):
+class StreamingMeanTensorTest(test.TestCase):
def setUp(self):
- tf.reset_default_graph()
+ ops.reset_default_graph()
def testVars(self):
- metrics.mean_tensor(tf.ones([4, 3]))
- _assert_local_variables(self, (
- 'mean/total_tensor:0', 'mean/count_tensor:0'))
+ metrics.mean_tensor(array_ops.ones([4, 3]))
+ _assert_local_variables(self, ('mean/total_tensor:0',
+ 'mean/count_tensor:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.mean_tensor(
- tf.ones([4, 3]),
- metrics_collections=[my_collection_name])
- self.assertListEqual(tf.get_collection(my_collection_name), [mean])
+ array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
+ self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_tensor(
- tf.ones([4, 3]),
- updates_collections=[my_collection_name])
- self.assertListEqual(tf.get_collection(my_collection_name), [update_op])
+ array_ops.ones([4, 3]), updates_collections=[my_collection_name])
+ self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.test_session() as sess:
- values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
+ values_queue = data_flow_ops.FIFOQueue(
+ 4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
@@ -348,35 +354,36 @@ class StreamingMeanTensorTest(tf.test.TestCase):
mean, update_op = metrics.mean_tensor(values)
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
- self.assertAllClose([[-0.9/4., 3.525]], sess.run(mean))
+ self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(mean))
def testMultiDimensional(self):
with self.test_session() as sess:
- values_queue = tf.FIFOQueue(2, dtypes=tf.float32, shapes=(2, 2, 2))
- _enqueue_vector(sess,
- values_queue,
- [[[1, 2], [1, 2]], [[1, 2], [1, 2]]],
- shape=(2, 2, 2))
- _enqueue_vector(sess,
- values_queue,
- [[[1, 2], [1, 2]], [[3, 4], [9, 10]]],
- shape=(2, 2, 2))
+ values_queue = data_flow_ops.FIFOQueue(
+ 2, dtypes=dtypes_lib.float32, shapes=(2, 2, 2))
+ _enqueue_vector(
+ sess,
+ values_queue, [[[1, 2], [1, 2]], [[1, 2], [1, 2]]],
+ shape=(2, 2, 2))
+ _enqueue_vector(
+ sess,
+ values_queue, [[[1, 2], [1, 2]], [[3, 4], [9, 10]]],
+ shape=(2, 2, 2))
values = values_queue.dequeue()
mean, update_op = metrics.mean_tensor(values)
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
for _ in range(2):
sess.run(update_op)
- self.assertAllClose([[[1, 2], [1, 2]], [[2, 3], [5, 6]]],
- sess.run(mean))
+ self.assertAllClose([[[1, 2], [1, 2]], [[2, 3], [5, 6]]], sess.run(mean))
def testUpdateOpsReturnsCurrentValue(self):
with self.test_session() as sess:
- values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
+ values_queue = data_flow_ops.FIFOQueue(
+ 4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
@@ -385,19 +392,20 @@ class StreamingMeanTensorTest(tf.test.TestCase):
mean, update_op = metrics.mean_tensor(values)
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertAllClose([[0, 1]], sess.run(update_op), 5)
self.assertAllClose([[-2.1, 5.05]], sess.run(update_op), 5)
- self.assertAllClose([[2.3/3., 10.1/3.]], sess.run(update_op), 5)
- self.assertAllClose([[-0.9/4., 3.525]], sess.run(update_op), 5)
+ self.assertAllClose([[2.3 / 3., 10.1 / 3.]], sess.run(update_op), 5)
+ self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(update_op), 5)
- self.assertAllClose([[-0.9/4., 3.525]], sess.run(mean), 5)
+ self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(mean), 5)
def testWeighted1d(self):
with self.test_session() as sess:
# Create the queue that populates the values.
- values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
+ values_queue = data_flow_ops.FIFOQueue(
+ 4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
@@ -405,7 +413,8 @@ class StreamingMeanTensorTest(tf.test.TestCase):
values = values_queue.dequeue()
# Create the queue that populates the weights.
- weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 1))
+ weights_queue = data_flow_ops.FIFOQueue(
+ 4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
_enqueue_vector(sess, weights_queue, [[1]])
@@ -414,7 +423,7 @@ class StreamingMeanTensorTest(tf.test.TestCase):
mean, update_op = metrics.mean_tensor(values, weights)
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[3.25, 0.5]], sess.run(mean), 5)
@@ -422,7 +431,8 @@ class StreamingMeanTensorTest(tf.test.TestCase):
def testWeighted2d_1(self):
with self.test_session() as sess:
# Create the queue that populates the values.
- values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
+ values_queue = data_flow_ops.FIFOQueue(
+ 4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
@@ -430,7 +440,8 @@ class StreamingMeanTensorTest(tf.test.TestCase):
values = values_queue.dequeue()
# Create the queue that populates the weights.
- weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
+ weights_queue = data_flow_ops.FIFOQueue(
+ 4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
@@ -439,7 +450,7 @@ class StreamingMeanTensorTest(tf.test.TestCase):
mean, update_op = metrics.mean_tensor(values, weights)
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[-2.1, 0.5]], sess.run(mean), 5)
@@ -447,7 +458,8 @@ class StreamingMeanTensorTest(tf.test.TestCase):
def testWeighted2d_2(self):
with self.test_session() as sess:
# Create the queue that populates the values.
- values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
+ values_queue = data_flow_ops.FIFOQueue(
+ 4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
@@ -455,7 +467,8 @@ class StreamingMeanTensorTest(tf.test.TestCase):
values = values_queue.dequeue()
# Create the queue that populates the weights.
- weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
+ weights_queue = data_flow_ops.FIFOQueue(
+ 4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
@@ -464,61 +477,63 @@ class StreamingMeanTensorTest(tf.test.TestCase):
mean, update_op = metrics.mean_tensor(values, weights)
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[0, 0.5]], sess.run(mean), 5)
-class AccuracyTest(tf.test.TestCase):
+class AccuracyTest(test.TestCase):
def setUp(self):
- tf.reset_default_graph()
+ ops.reset_default_graph()
def testVars(self):
metrics.accuracy(
- predictions=tf.ones((10, 1)), labels=tf.ones((10, 1)),
+ predictions=array_ops.ones((10, 1)),
+ labels=array_ops.ones((10, 1)),
name='my_accuracy')
- _assert_local_variables(self, (
- 'my_accuracy/count:0', 'my_accuracy/total:0'))
+ _assert_local_variables(self, ('my_accuracy/count:0',
+ 'my_accuracy/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.accuracy(
- predictions=tf.ones((10, 1)),
- labels=tf.ones((10, 1)),
+ predictions=array_ops.ones((10, 1)),
+ labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
- self.assertListEqual(tf.get_collection(my_collection_name), [mean])
+ self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.accuracy(
- predictions=tf.ones((10, 1)),
- labels=tf.ones((10, 1)),
+ predictions=array_ops.ones((10, 1)),
+ labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
- self.assertListEqual(tf.get_collection(my_collection_name), [update_op])
+ self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
- predictions = tf.ones((10, 3))
- labels = tf.ones((10, 4))
+ predictions = array_ops.ones((10, 3))
+ labels = array_ops.ones((10, 4))
with self.assertRaises(ValueError):
metrics.accuracy(labels, predictions)
def testPredictionsAndWeightsOfDifferentSizeRaisesValueError(self):
- predictions = tf.ones((10, 3))
- labels = tf.ones((10, 3))
- weights = tf.ones((9, 3))
+ predictions = array_ops.ones((10, 3))
+ labels = array_ops.ones((10, 3))
+ weights = array_ops.ones((9, 3))
with self.assertRaises(ValueError):
metrics.accuracy(labels, predictions, weights)
def testValueTensorIsIdempotent(self):
- predictions = tf.random_uniform((10, 3), maxval=3, dtype=tf.int64, seed=1)
- labels = tf.random_uniform((10, 3), maxval=3, dtype=tf.int64, seed=1)
- accuracy, update_op = metrics.accuracy(
- labels, predictions)
+ predictions = random_ops.random_uniform(
+ (10, 3), maxval=3, dtype=dtypes_lib.int64, seed=1)
+ labels = random_ops.random_uniform(
+ (10, 3), maxval=3, dtype=dtypes_lib.int64, seed=1)
+ accuracy, update_op = metrics.accuracy(labels, predictions)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
@@ -532,7 +547,8 @@ class AccuracyTest(tf.test.TestCase):
def testMultipleUpdates(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
- preds_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 1))
+ preds_queue = data_flow_ops.FIFOQueue(
+ 4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
@@ -540,43 +556,43 @@ class AccuracyTest(tf.test.TestCase):
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
- labels_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 1))
+ labels_queue = data_flow_ops.FIFOQueue(
+ 4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
labels = labels_queue.dequeue()
- accuracy, update_op = metrics.accuracy(
- labels, predictions)
+ accuracy, update_op = metrics.accuracy(labels, predictions)
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
for _ in xrange(3):
sess.run(update_op)
self.assertEqual(0.5, sess.run(update_op))
self.assertEqual(0.5, accuracy.eval())
def testEffectivelyEquivalentSizes(self):
- predictions = tf.ones((40, 1))
- labels = tf.ones((40,))
+ predictions = array_ops.ones((40, 1))
+ labels = array_ops.ones((40,))
with self.test_session() as sess:
- accuracy, update_op = metrics.accuracy(
- labels, predictions)
+ accuracy, update_op = metrics.accuracy(labels, predictions)
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, update_op.eval())
self.assertEqual(1.0, accuracy.eval())
def testEffectivelyEquivalentSizesWithStaicShapedWeight(self):
- predictions = tf.convert_to_tensor([1, 1, 1]) # shape 3,
- labels = tf.expand_dims(tf.convert_to_tensor([1, 0, 0]), 1) # shape 3, 1
- weights = tf.expand_dims(tf.convert_to_tensor([100, 1, 1]), 1) # shape 3, 1
+ predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,
+ labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),
+ 1) # shape 3, 1
+ weights = array_ops.expand_dims(ops.convert_to_tensor([100, 1, 1]),
+ 1) # shape 3, 1
with self.test_session() as sess:
- accuracy, update_op = metrics.accuracy(
- labels, predictions, weights)
+ accuracy, update_op = metrics.accuracy(labels, predictions, weights)
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
@@ -584,18 +600,20 @@ class AccuracyTest(tf.test.TestCase):
self.assertGreater(accuracy.eval(), .95)
def testEffectivelyEquivalentSizesWithDynamicallyShapedWeight(self):
- predictions = tf.convert_to_tensor([1, 1, 1]) # shape 3,
- labels = tf.expand_dims(tf.convert_to_tensor([1, 0, 0]), 1) # shape 3, 1
+ predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,
+ labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),
+ 1) # shape 3, 1
weights = [[100], [1], [1]] # shape 3, 1
- weights_placeholder = tf.placeholder(dtype=tf.int32, name='weights')
+ weights_placeholder = array_ops.placeholder(
+ dtype=dtypes_lib.int32, name='weights')
feed_dict = {weights_placeholder: weights}
with self.test_session() as sess:
- accuracy, update_op = metrics.accuracy(
- labels, predictions, weights_placeholder)
+ accuracy, update_op = metrics.accuracy(labels, predictions,
+ weights_placeholder)
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
@@ -605,7 +623,8 @@ class AccuracyTest(tf.test.TestCase):
def testMultipleUpdatesWithWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
- preds_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 1))
+ preds_queue = data_flow_ops.FIFOQueue(
+ 4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
@@ -613,7 +632,8 @@ class AccuracyTest(tf.test.TestCase):
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
- labels_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 1))
+ labels_queue = data_flow_ops.FIFOQueue(
+ 4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
@@ -621,61 +641,60 @@ class AccuracyTest(tf.test.TestCase):
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
- weights_queue = tf.FIFOQueue(4, dtypes=tf.int64, shapes=(1, 1))
+ weights_queue = data_flow_ops.FIFOQueue(
+ 4, dtypes=dtypes_lib.int64, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
weights = weights_queue.dequeue()
- accuracy, update_op = metrics.accuracy(
- labels, predictions, weights)
+ accuracy, update_op = metrics.accuracy(labels, predictions, weights)
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
for _ in xrange(3):
sess.run(update_op)
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, accuracy.eval())
-class PrecisionTest(tf.test.TestCase):
+class PrecisionTest(test.TestCase):
def setUp(self):
np.random.seed(1)
- tf.reset_default_graph()
+ ops.reset_default_graph()
def testVars(self):
metrics.precision(
- predictions=tf.ones((10, 1)), labels=tf.ones((10, 1)))
- _assert_local_variables(self, (
- 'precision/false_positives/count:0',
- 'precision/true_positives/count:0'
- ))
+ predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
+ _assert_local_variables(self, ('precision/false_positives/count:0',
+ 'precision/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.precision(
- predictions=tf.ones((10, 1)),
- labels=tf.ones((10, 1)),
+ predictions=array_ops.ones((10, 1)),
+ labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
- self.assertListEqual(tf.get_collection(my_collection_name), [mean])
+ self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.precision(
- predictions=tf.ones((10, 1)),
- labels=tf.ones((10, 1)),
+ predictions=array_ops.ones((10, 1)),
+ labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
- self.assertListEqual(tf.get_collection(my_collection_name), [update_op])
+ self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
- predictions = tf.random_uniform((10, 3), maxval=1, dtype=tf.int64, seed=1)
- labels = tf.random_uniform((10, 3), maxval=1, dtype=tf.int64, seed=1)
- precision, update_op = metrics.precision(
- labels, predictions)
+ predictions = random_ops.random_uniform(
+ (10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
+ labels = random_ops.random_uniform(
+ (10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
+ precision, update_op = metrics.precision(labels, predictions)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
@@ -689,35 +708,33 @@ class PrecisionTest(tf.test.TestCase):
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
- predictions = tf.constant(inputs)
- labels = tf.constant(inputs)
- precision, update_op = metrics.precision(
- labels, predictions)
+ predictions = constant_op.constant(inputs)
+ labels = constant_op.constant(inputs)
+ precision, update_op = metrics.precision(labels, predictions)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op))
self.assertAlmostEqual(1, precision.eval())
def testSomeCorrect(self):
- predictions = tf.constant([1, 0, 1, 0], shape=(1, 4))
- labels = tf.constant([0, 1, 1, 0], shape=(1, 4))
- precision, update_op = metrics.precision(
- labels, predictions)
+ predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
+ labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
+ precision, update_op = metrics.precision(labels, predictions)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, precision.eval())
def testWeighted1d(self):
- predictions = tf.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
- labels = tf.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
+ predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
+ labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.precision(
- labels, predictions, weights=tf.constant([[2], [5]]))
+ labels, predictions, weights=constant_op.constant([[2], [5]]))
with self.test_session():
- tf.local_variables_initializer().run()
+ variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
@@ -725,17 +742,17 @@ class PrecisionTest(tf.test.TestCase):
self.assertAlmostEqual(expected_precision, precision.eval())
def testWeighted1d_placeholders(self):
- predictions = tf.placeholder(dtype=tf.float32)
- labels = tf.placeholder(dtype=tf.float32)
+ predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
+ labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.precision(
- labels, predictions, weights=tf.constant([[2], [5]]))
+ labels, predictions, weights=constant_op.constant([[2], [5]]))
with self.test_session():
- tf.local_variables_initializer().run()
+ variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
@@ -745,13 +762,15 @@ class PrecisionTest(tf.test.TestCase):
expected_precision, precision.eval(feed_dict=feed_dict))
def testWeighted2d(self):
- predictions = tf.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
- labels = tf.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
+ predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
+ labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.precision(
- labels, predictions, weights=tf.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
+ labels,
+ predictions,
+ weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.test_session():
- tf.local_variables_initializer().run()
+ variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
@@ -759,17 +778,19 @@ class PrecisionTest(tf.test.TestCase):
self.assertAlmostEqual(expected_precision, precision.eval())
def testWeighted2d_placeholders(self):
- predictions = tf.placeholder(dtype=tf.float32)
- labels = tf.placeholder(dtype=tf.float32)
+ predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
+ labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.precision(
- labels, predictions, weights=tf.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
+ labels,
+ predictions,
+ weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.test_session():
- tf.local_variables_initializer().run()
+ variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
@@ -781,66 +802,63 @@ class PrecisionTest(tf.test.TestCase):
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
- predictions = tf.constant(inputs)
- labels = tf.constant(1 - inputs)
- precision, update_op = metrics.precision(
- labels, predictions)
+ predictions = constant_op.constant(inputs)
+ labels = constant_op.constant(1 - inputs)
+ precision, update_op = metrics.precision(labels, predictions)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0, precision.eval())
def testZeroTrueAndFalsePositivesGivesZeroPrecision(self):
- predictions = tf.constant([0, 0, 0, 0])
- labels = tf.constant([0, 0, 0, 0])
- precision, update_op = metrics.precision(
- labels, predictions)
+ predictions = constant_op.constant([0, 0, 0, 0])
+ labels = constant_op.constant([0, 0, 0, 0])
+ precision, update_op = metrics.precision(labels, predictions)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0.0, precision.eval())
-class StreamingRecallTest(tf.test.TestCase):
+class StreamingRecallTest(test.TestCase):
def setUp(self):
np.random.seed(1)
- tf.reset_default_graph()
+ ops.reset_default_graph()
def testVars(self):
metrics.recall(
- predictions=tf.ones((10, 1)), labels=tf.ones((10, 1)))
- _assert_local_variables(self, (
- 'recall/false_negatives/count:0',
- 'recall/true_positives/count:0'
- ))
+ predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
+ _assert_local_variables(self, ('recall/false_negatives/count:0',
+ 'recall/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.recall(
- predictions=tf.ones((10, 1)),
- labels=tf.ones((10, 1)),
+ predictions=array_ops.ones((10, 1)),
+ labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
- self.assertListEqual(tf.get_collection(my_collection_name), [mean])
+ self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.recall(
- predictions=tf.ones((10, 1)),
- labels=tf.ones((10, 1)),
+ predictions=array_ops.ones((10, 1)),
+ labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
- self.assertListEqual(tf.get_collection(my_collection_name), [update_op])
+ self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
- predictions = tf.random_uniform((10, 3), maxval=1, dtype=tf.int64, seed=1)
- labels = tf.random_uniform((10, 3), maxval=1, dtype=tf.int64, seed=1)
- recall, update_op = metrics.recall(
- labels, predictions)
+ predictions = random_ops.random_uniform(
+ (10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
+ labels = random_ops.random_uniform(
+ (10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
+ recall, update_op = metrics.recall(labels, predictions)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
@@ -854,34 +872,33 @@ class StreamingRecallTest(tf.test.TestCase):
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
- predictions = tf.constant(np_inputs)
- labels = tf.constant(np_inputs)
+ predictions = constant_op.constant(np_inputs)
+ labels = constant_op.constant(np_inputs)
recall, update_op = metrics.recall(labels, predictions)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, recall.eval())
def testSomeCorrect(self):
- predictions = tf.constant([1, 0, 1, 0], shape=(1, 4))
- labels = tf.constant([0, 1, 1, 0], shape=(1, 4))
+ predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
+ labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
recall, update_op = metrics.recall(labels, predictions)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, recall.eval())
def testWeighted1d(self):
- predictions = tf.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
- labels = tf.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
- weights = tf.constant([[2], [5]])
- recall, update_op = metrics.recall(
- labels, predictions, weights=weights)
+ predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
+ labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
+ weights = constant_op.constant([[2], [5]])
+ recall, update_op = metrics.recall(labels, predictions, weights=weights)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
weighted_tp = 2.0 + 5.0
weighted_t = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_t
@@ -889,14 +906,13 @@ class StreamingRecallTest(tf.test.TestCase):
self.assertAlmostEqual(expected_precision, recall.eval())
def testWeighted2d(self):
- predictions = tf.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
- labels = tf.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
- weights = tf.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
- recall, update_op = metrics.recall(
- labels, predictions, weights=weights)
+ predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
+ labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
+ weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
+ recall, update_op = metrics.recall(labels, predictions, weights=weights)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
weighted_tp = 3.0 + 1.0
weighted_t = (2.0 + 3.0) + (4.0 + 1.0)
expected_precision = weighted_tp / weighted_t
@@ -906,66 +922,62 @@ class StreamingRecallTest(tf.test.TestCase):
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
- predictions = tf.constant(np_inputs)
- labels = tf.constant(1 - np_inputs)
+ predictions = constant_op.constant(np_inputs)
+ labels = constant_op.constant(1 - np_inputs)
recall, update_op = metrics.recall(labels, predictions)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, recall.eval())
def testZeroTruePositivesAndFalseNegativesGivesZeroRecall(self):
- predictions = tf.zeros((1, 4))
- labels = tf.zeros((1, 4))
+ predictions = array_ops.zeros((1, 4))
+ labels = array_ops.zeros((1, 4))
recall, update_op = metrics.recall(labels, predictions)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, recall.eval())
-class StreamingAUCTest(tf.test.TestCase):
+class StreamingAUCTest(test.TestCase):
def setUp(self):
np.random.seed(1)
- tf.reset_default_graph()
+ ops.reset_default_graph()
def testVars(self):
- metrics.auc(
- predictions=tf.ones((10, 1)), labels=tf.ones((10, 1)))
- _assert_local_variables(self, (
- 'auc/true_positives:0',
- 'auc/false_negatives:0',
- 'auc/false_positives:0',
- 'auc/true_negatives:0'
- ))
+ metrics.auc(predictions=array_ops.ones((10, 1)),
+ labels=array_ops.ones((10, 1)))
+ _assert_local_variables(self,
+ ('auc/true_positives:0', 'auc/false_negatives:0',
+ 'auc/false_positives:0', 'auc/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
- mean, _ = metrics.auc(
- predictions=tf.ones((10, 1)),
- labels=tf.ones((10, 1)),
- metrics_collections=[my_collection_name])
- self.assertListEqual(tf.get_collection(my_collection_name), [mean])
+ mean, _ = metrics.auc(predictions=array_ops.ones((10, 1)),
+ labels=array_ops.ones((10, 1)),
+ metrics_collections=[my_collection_name])
+ self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
- _, update_op = metrics.auc(
- predictions=tf.ones((10, 1)),
- labels=tf.ones((10, 1)),
- updates_collections=[my_collection_name])
- self.assertListEqual(tf.get_collection(my_collection_name), [update_op])
+ _, update_op = metrics.auc(predictions=array_ops.ones((10, 1)),
+ labels=array_ops.ones((10, 1)),
+ updates_collections=[my_collection_name])
+ self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
- predictions = tf.random_uniform((10, 3), maxval=1, dtype=tf.float32, seed=1)
- labels = tf.random_uniform((10, 3), maxval=1, dtype=tf.int64, seed=1)
- auc, update_op = metrics.auc(
- labels, predictions)
+ predictions = random_ops.random_uniform(
+ (10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
+ labels = random_ops.random_uniform(
+ (10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
+ auc, update_op = metrics.auc(labels, predictions)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
@@ -983,84 +995,89 @@ class StreamingAUCTest(tf.test.TestCase):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
- predictions = tf.constant(inputs, dtype=tf.float32)
- labels = tf.constant(inputs)
+ predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
+ labels = constant_op.constant(inputs)
auc, update_op = metrics.auc(labels, predictions, curve=curve)
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, auc.eval())
def testSomeCorrect(self):
with self.test_session() as sess:
- predictions = tf.constant([1, 0, 1, 0], shape=(1, 4), dtype=tf.float32)
- labels = tf.constant([0, 1, 1, 0], shape=(1, 4))
+ predictions = constant_op.constant(
+ [1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
+ labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
auc, update_op = metrics.auc(labels, predictions)
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op))
self.assertAlmostEqual(0.5, auc.eval())
def testWeighted1d(self):
with self.test_session() as sess:
- predictions = tf.constant([1, 0, 1, 0], shape=(1, 4), dtype=tf.float32)
- labels = tf.constant([0, 1, 1, 0], shape=(1, 4))
- weights = tf.constant([2], shape=(1, 1))
- auc, update_op = metrics.auc(labels,
- predictions, weights=weights)
+ predictions = constant_op.constant(
+ [1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
+ labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
+ weights = constant_op.constant([2], shape=(1, 1))
+ auc, update_op = metrics.auc(labels, predictions, weights=weights)
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(0.5, auc.eval(), 5)
def testWeighted2d(self):
with self.test_session() as sess:
- predictions = tf.constant([1, 0, 1, 0], shape=(1, 4), dtype=tf.float32)
- labels = tf.constant([0, 1, 1, 0], shape=(1, 4))
- weights = tf.constant([1, 2, 3, 4], shape=(1, 4))
- auc, update_op = metrics.auc(labels,
- predictions, weights=weights)
+ predictions = constant_op.constant(
+ [1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
+ labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
+ weights = constant_op.constant([1, 2, 3, 4], shape=(1, 4))
+ auc, update_op = metrics.auc(labels, predictions, weights=weights)
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.7, sess.run(update_op), 5)
self.assertAlmostEqual(0.7, auc.eval(), 5)
def testAUCPRSpecialCase(self):
with self.test_session() as sess:
- predictions = tf.constant([0.1, 0.4, 0.35, 0.8],
- shape=(1, 4), dtype=tf.float32)
- labels = tf.constant([0, 0, 1, 1], shape=(1, 4))
+ predictions = constant_op.constant(
+ [0.1, 0.4, 0.35, 0.8], shape=(1, 4), dtype=dtypes_lib.float32)
+ labels = constant_op.constant([0, 0, 1, 1], shape=(1, 4))
auc, update_op = metrics.auc(labels, predictions, curve='PR')
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.79166, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.79166, auc.eval(), delta=1e-3)
def testAnotherAUCPRSpecialCase(self):
with self.test_session() as sess:
- predictions = tf.constant([0.1, 0.4, 0.35, 0.8, 0.1, 0.135, 0.81],
- shape=(1, 7), dtype=tf.float32)
- labels = tf.constant([0, 0, 1, 0, 1, 0, 1], shape=(1, 7))
+ predictions = constant_op.constant(
+ [0.1, 0.4, 0.35, 0.8, 0.1, 0.135, 0.81],
+ shape=(1, 7),
+ dtype=dtypes_lib.float32)
+ labels = constant_op.constant([0, 0, 1, 0, 1, 0, 1], shape=(1, 7))
auc, update_op = metrics.auc(labels, predictions, curve='PR')
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.610317, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.610317, auc.eval(), delta=1e-3)
def testThirdAUCPRSpecialCase(self):
with self.test_session() as sess:
- predictions = tf.constant([0.0, 0.1, 0.2, 0.33, 0.3, 0.4, 0.5],
- shape=(1, 7), dtype=tf.float32)
- labels = tf.constant([0, 0, 0, 0, 1, 1, 1], shape=(1, 7))
+ predictions = constant_op.constant(
+ [0.0, 0.1, 0.2, 0.33, 0.3, 0.4, 0.5],
+ shape=(1, 7),
+ dtype=dtypes_lib.float32)
+ labels = constant_op.constant([0, 0, 0, 0, 1, 1, 1], shape=(1, 7))
auc, update_op = metrics.auc(labels, predictions, curve='PR')
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.90277, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.90277, auc.eval(), delta=1e-3)
@@ -1069,35 +1086,33 @@ class StreamingAUCTest(tf.test.TestCase):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
- predictions = tf.constant(inputs, dtype=tf.float32)
- labels = tf.constant(1 - inputs, dtype=tf.float32)
+ predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
+ labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
auc, update_op = metrics.auc(labels, predictions)
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0, sess.run(update_op))
self.assertAlmostEqual(0, auc.eval())
def testZeroTruePositivesAndFalseNegativesGivesOneAUC(self):
with self.test_session() as sess:
- predictions = tf.zeros([4], dtype=tf.float32)
- labels = tf.zeros([4])
+ predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
+ labels = array_ops.zeros([4])
auc, update_op = metrics.auc(labels, predictions)
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
def testRecallOneAndPrecisionOneGivesOnePRAUC(self):
with self.test_session() as sess:
- predictions = tf.ones([4], dtype=tf.float32)
- labels = tf.ones([4])
- auc, update_op = metrics.auc(labels,
- predictions,
- curve='PR')
+ predictions = array_ops.ones([4], dtype=dtypes_lib.float32)
+ labels = array_ops.ones([4])
+ auc, update_op = metrics.auc(labels, predictions, curve='PR')
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
@@ -1143,32 +1158,33 @@ class StreamingAUCTest(tf.test.TestCase):
def _enqueue_as_batches(x, enqueue_ops):
x_batches = x.astype(np.float32).reshape((num_batches, batch_size))
- x_queue = tf.FIFOQueue(num_batches, dtypes=tf.float32,
- shapes=(batch_size,))
+ x_queue = data_flow_ops.FIFOQueue(
+ num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(num_batches):
enqueue_ops[i].append(x_queue.enqueue(x_batches[i, :]))
return x_queue.dequeue()
- for weights in (None,
- np.ones(num_samples),
- np.random.exponential(scale=1.0, size=num_samples)):
+ for weights in (None, np.ones(num_samples), np.random.exponential(
+ scale=1.0, size=num_samples)):
expected_auc = self.np_auc(predictions, labels, weights)
with self.test_session() as sess:
enqueue_ops = [[] for i in range(num_batches)]
tf_predictions = _enqueue_as_batches(predictions, enqueue_ops)
tf_labels = _enqueue_as_batches(labels, enqueue_ops)
- tf_weights = (_enqueue_as_batches(weights, enqueue_ops)
- if weights is not None else None)
+ tf_weights = (_enqueue_as_batches(weights, enqueue_ops) if
+ weights is not None else None)
for i in range(num_batches):
sess.run(enqueue_ops[i])
- auc, update_op = metrics.auc(
- tf_labels, tf_predictions, curve='ROC', num_thresholds=500,
- weights=tf_weights)
+ auc, update_op = metrics.auc(tf_labels,
+ tf_predictions,
+ curve='ROC',
+ num_thresholds=500,
+ weights=tf_weights)
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
for i in range(num_batches):
sess.run(update_op)
@@ -1178,48 +1194,51 @@ class StreamingAUCTest(tf.test.TestCase):
self.assertAlmostEqual(expected_auc, auc.eval(), 2)
-class SpecificityAtSensitivityTest(tf.test.TestCase):
+class SpecificityAtSensitivityTest(test.TestCase):
def setUp(self):
np.random.seed(1)
- tf.reset_default_graph()
+ ops.reset_default_graph()
def testVars(self):
metrics.specificity_at_sensitivity(
- predictions=tf.ones((10, 1)), labels=tf.ones((10, 1)), sensitivity=0.7)
- _assert_local_variables(self, (
- 'specificity_at_sensitivity/true_positives:0',
- 'specificity_at_sensitivity/false_negatives:0',
- 'specificity_at_sensitivity/false_positives:0',
- 'specificity_at_sensitivity/true_negatives:0'
- ))
+ predictions=array_ops.ones((10, 1)),
+ labels=array_ops.ones((10, 1)),
+ sensitivity=0.7)
+ _assert_local_variables(self,
+ ('specificity_at_sensitivity/true_positives:0',
+ 'specificity_at_sensitivity/false_negatives:0',
+ 'specificity_at_sensitivity/false_positives:0',
+ 'specificity_at_sensitivity/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.specificity_at_sensitivity(
- predictions=tf.ones((10, 1)),
- labels=tf.ones((10, 1)),
+ predictions=array_ops.ones((10, 1)),
+ labels=array_ops.ones((10, 1)),
sensitivity=0.7,
metrics_collections=[my_collection_name])
- self.assertListEqual(tf.get_collection(my_collection_name), [mean])
+ self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.specificity_at_sensitivity(
- predictions=tf.ones((10, 1)),
- labels=tf.ones((10, 1)),
+ predictions=array_ops.ones((10, 1)),
+ labels=array_ops.ones((10, 1)),
sensitivity=0.7,
updates_collections=[my_collection_name])
- self.assertListEqual(tf.get_collection(my_collection_name), [update_op])
+ self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
- predictions = tf.random_uniform((10, 3), maxval=1, dtype=tf.float32, seed=1)
- labels = tf.random_uniform((10, 3), maxval=2, dtype=tf.int64, seed=1)
+ predictions = random_ops.random_uniform(
+ (10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
+ labels = random_ops.random_uniform(
+ (10, 3), maxval=2, dtype=dtypes_lib.int64, seed=1)
specificity, update_op = metrics.specificity_at_sensitivity(
labels, predictions, sensitivity=0.7)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
@@ -1233,126 +1252,129 @@ class SpecificityAtSensitivityTest(tf.test.TestCase):
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
- predictions = tf.constant(inputs, dtype=tf.float32)
- labels = tf.constant(inputs)
+ predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
+ labels = constant_op.constant(inputs)
specificity, update_op = metrics.specificity_at_sensitivity(
labels, predictions, sensitivity=0.7)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, specificity.eval())
def testSomeCorrectHighSensitivity(self):
- predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0,
- 0.1, 0.45, 0.5, 0.8, 0.9]
+ predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
- predictions = tf.constant(predictions_values, dtype=tf.float32)
- labels = tf.constant(labels_values)
+ predictions = constant_op.constant(
+ predictions_values, dtype=dtypes_lib.float32)
+ labels = constant_op.constant(labels_values)
specificity, update_op = metrics.specificity_at_sensitivity(
labels, predictions, sensitivity=0.8)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1.0, sess.run(update_op))
self.assertAlmostEqual(1.0, specificity.eval())
def testSomeCorrectLowSensitivity(self):
- predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0,
- 0.1, 0.2, 0.2, 0.26, 0.26]
+ predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
- predictions = tf.constant(predictions_values, dtype=tf.float32)
- labels = tf.constant(labels_values)
+ predictions = constant_op.constant(
+ predictions_values, dtype=dtypes_lib.float32)
+ labels = constant_op.constant(labels_values)
specificity, update_op = metrics.specificity_at_sensitivity(
labels, predictions, sensitivity=0.4)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted1d(self):
- predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0,
- 0.1, 0.2, 0.2, 0.26, 0.26]
+ predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [3]
- predictions = tf.constant(predictions_values, dtype=tf.float32)
- labels = tf.constant(labels_values)
- weights = tf.constant(weights_values)
+ predictions = constant_op.constant(
+ predictions_values, dtype=dtypes_lib.float32)
+ labels = constant_op.constant(labels_values)
+ weights = constant_op.constant(weights_values)
specificity, update_op = metrics.specificity_at_sensitivity(
labels, predictions, weights=weights, sensitivity=0.4)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted2d(self):
- predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0,
- 0.1, 0.2, 0.2, 0.26, 0.26]
+ predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
- predictions = tf.constant(predictions_values, dtype=tf.float32)
- labels = tf.constant(labels_values)
- weights = tf.constant(weights_values)
+ predictions = constant_op.constant(
+ predictions_values, dtype=dtypes_lib.float32)
+ labels = constant_op.constant(labels_values)
+ weights = constant_op.constant(weights_values)
specificity, update_op = metrics.specificity_at_sensitivity(
labels, predictions, weights=weights, sensitivity=0.4)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(8.0 / 15.0, sess.run(update_op))
self.assertAlmostEqual(8.0 / 15.0, specificity.eval())
-class StreamingSensitivityAtSpecificityTest(tf.test.TestCase):
+class StreamingSensitivityAtSpecificityTest(test.TestCase):
def setUp(self):
np.random.seed(1)
- tf.reset_default_graph()
+ ops.reset_default_graph()
def testVars(self):
metrics.sensitivity_at_specificity(
- predictions=tf.ones((10, 1)), labels=tf.ones((10, 1)), specificity=0.7)
- _assert_local_variables(self, (
- 'sensitivity_at_specificity/true_positives:0',
- 'sensitivity_at_specificity/false_negatives:0',
- 'sensitivity_at_specificity/false_positives:0',
- 'sensitivity_at_specificity/true_negatives:0'
- ))
+ predictions=array_ops.ones((10, 1)),
+ labels=array_ops.ones((10, 1)),
+ specificity=0.7)
+ _assert_local_variables(self,
+ ('sensitivity_at_specificity/true_positives:0',
+ 'sensitivity_at_specificity/false_negatives:0',
+ 'sensitivity_at_specificity/false_positives:0',
+ 'sensitivity_at_specificity/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.sensitivity_at_specificity(
- predictions=tf.ones((10, 1)),
- labels=tf.ones((10, 1)),
+ predictions=array_ops.ones((10, 1)),
+ labels=array_ops.ones((10, 1)),
specificity=0.7,
metrics_collections=[my_collection_name])
- self.assertListEqual(tf.get_collection(my_collection_name), [mean])
+ self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.sensitivity_at_specificity(
- predictions=tf.ones((10, 1)),
- labels=tf.ones((10, 1)),
+ predictions=array_ops.ones((10, 1)),
+ labels=array_ops.ones((10, 1)),
specificity=0.7,
updates_collections=[my_collection_name])
- self.assertListEqual(tf.get_collection(my_collection_name), [update_op])
+ self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
- predictions = tf.random_uniform((10, 3), maxval=1, dtype=tf.float32, seed=1)
- labels = tf.random_uniform((10, 3), maxval=2, dtype=tf.int64, seed=1)
+ predictions = random_ops.random_uniform(
+ (10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
+ labels = random_ops.random_uniform(
+ (10, 3), maxval=2, dtype=dtypes_lib.int64, seed=1)
sensitivity, update_op = metrics.sensitivity_at_specificity(
labels, predictions, specificity=0.7)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
@@ -1366,120 +1388,121 @@ class StreamingSensitivityAtSpecificityTest(tf.test.TestCase):
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
- predictions = tf.constant(inputs, dtype=tf.float32)
- labels = tf.constant(inputs)
+ predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
+ labels = constant_op.constant(inputs)
specificity, update_op = metrics.sensitivity_at_specificity(
labels, predictions, specificity=0.7)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, specificity.eval())
def testSomeCorrectHighSpecificity(self):
- predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4,
- 0.1, 0.45, 0.5, 0.8, 0.9]
+ predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
- predictions = tf.constant(predictions_values, dtype=tf.float32)
- labels = tf.constant(labels_values)
+ predictions = constant_op.constant(
+ predictions_values, dtype=dtypes_lib.float32)
+ labels = constant_op.constant(labels_values)
specificity, update_op = metrics.sensitivity_at_specificity(
labels, predictions, specificity=0.8)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.8, sess.run(update_op))
self.assertAlmostEqual(0.8, specificity.eval())
def testSomeCorrectLowSpecificity(self):
- predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4,
- 0.01, 0.02, 0.25, 0.26, 0.26]
+ predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
- predictions = tf.constant(predictions_values, dtype=tf.float32)
- labels = tf.constant(labels_values)
+ predictions = constant_op.constant(
+ predictions_values, dtype=dtypes_lib.float32)
+ labels = constant_op.constant(labels_values)
specificity, update_op = metrics.sensitivity_at_specificity(
labels, predictions, specificity=0.4)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted(self):
- predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4,
- 0.01, 0.02, 0.25, 0.26, 0.26]
+ predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
- predictions = tf.constant(predictions_values, dtype=tf.float32)
- labels = tf.constant(labels_values)
- weights = tf.constant(weights_values)
+ predictions = constant_op.constant(
+ predictions_values, dtype=dtypes_lib.float32)
+ labels = constant_op.constant(labels_values)
+ weights = constant_op.constant(weights_values)
specificity, update_op = metrics.sensitivity_at_specificity(
labels, predictions, weights=weights, specificity=0.4)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.675, sess.run(update_op))
self.assertAlmostEqual(0.675, specificity.eval())
# TODO(nsilberman): Break this up into two sets of tests.
-class StreamingPrecisionRecallThresholdsTest(tf.test.TestCase):
+class StreamingPrecisionRecallThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
- tf.reset_default_graph()
+ ops.reset_default_graph()
def testVars(self):
metrics.precision_at_thresholds(
- predictions=tf.ones((10, 1)), labels=tf.ones((10, 1)),
+ predictions=array_ops.ones((10, 1)),
+ labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_local_variables(self, (
'precision_at_thresholds/true_positives:0',
- 'precision_at_thresholds/false_positives:0',
- ))
+ 'precision_at_thresholds/false_positives:0',))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
prec, _ = metrics.precision_at_thresholds(
- predictions=tf.ones((10, 1)),
- labels=tf.ones((10, 1)),
+ predictions=array_ops.ones((10, 1)),
+ labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
rec, _ = metrics.recall_at_thresholds(
- predictions=tf.ones((10, 1)),
- labels=tf.ones((10, 1)),
+ predictions=array_ops.ones((10, 1)),
+ labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
- self.assertListEqual(tf.get_collection(my_collection_name), [prec, rec])
+ self.assertListEqual(ops.get_collection(my_collection_name), [prec, rec])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, precision_op = metrics.precision_at_thresholds(
- predictions=tf.ones((10, 1)),
- labels=tf.ones((10, 1)),
+ predictions=array_ops.ones((10, 1)),
+ labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
_, recall_op = metrics.recall_at_thresholds(
- predictions=tf.ones((10, 1)),
- labels=tf.ones((10, 1)),
+ predictions=array_ops.ones((10, 1)),
+ labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
- self.assertListEqual(tf.get_collection(my_collection_name),
- [precision_op, recall_op])
+ self.assertListEqual(
+ ops.get_collection(my_collection_name), [precision_op, recall_op])
def testValueTensorIsIdempotent(self):
- predictions = tf.random_uniform((10, 3), maxval=1, dtype=tf.float32, seed=1)
- labels = tf.random_uniform((10, 3), maxval=1, dtype=tf.int64, seed=1)
+ predictions = random_ops.random_uniform(
+ (10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
+ labels = random_ops.random_uniform(
+ (10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
thresholds = [0, 0.5, 1.0]
- prec, prec_op = metrics.precision_at_thresholds(
- labels, predictions, thresholds)
- rec, rec_op = metrics.recall_at_thresholds(
- labels, predictions, thresholds)
+ prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
+ thresholds)
+ rec, rec_op = metrics.recall_at_thresholds(labels, predictions, thresholds)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
# Run several updates, then verify idempotency.
sess.run([prec_op, rec_op])
@@ -1495,15 +1518,15 @@ class StreamingPrecisionRecallThresholdsTest(tf.test.TestCase):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
- predictions = tf.constant(inputs, dtype=tf.float32)
- labels = tf.constant(inputs)
+ predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
+ labels = constant_op.constant(inputs)
thresholds = [0.5]
- prec, prec_op = metrics.precision_at_thresholds(
- labels, predictions, thresholds)
- rec, rec_op = metrics.recall_at_thresholds(
- labels, predictions, thresholds)
+ prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
+ thresholds)
+ rec, rec_op = metrics.recall_at_thresholds(labels, predictions,
+ thresholds)
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertEqual(1, prec.eval())
@@ -1511,15 +1534,16 @@ class StreamingPrecisionRecallThresholdsTest(tf.test.TestCase):
def testSomeCorrect(self):
with self.test_session() as sess:
- predictions = tf.constant([1, 0, 1, 0], shape=(1, 4), dtype=tf.float32)
- labels = tf.constant([0, 1, 1, 0], shape=(1, 4))
+ predictions = constant_op.constant(
+ [1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
+ labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
thresholds = [0.5]
- prec, prec_op = metrics.precision_at_thresholds(
- labels, predictions, thresholds)
- rec, rec_op = metrics.recall_at_thresholds(
- labels, predictions, thresholds)
+ prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
+ thresholds)
+ rec, rec_op = metrics.recall_at_thresholds(labels, predictions,
+ thresholds)
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0.5, prec.eval())
@@ -1529,15 +1553,15 @@ class StreamingPrecisionRecallThresholdsTest(tf.test.TestCase):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
- predictions = tf.constant(inputs, dtype=tf.float32)
- labels = tf.constant(1 - inputs, dtype=tf.float32)
+ predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
+ labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
- prec, prec_op = metrics.precision_at_thresholds(
- labels, predictions, thresholds)
- rec, rec_op = metrics.recall_at_thresholds(
- labels, predictions, thresholds)
+ prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
+ thresholds)
+ rec, rec_op = metrics.recall_at_thresholds(labels, predictions,
+ thresholds)
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval())
@@ -1545,24 +1569,27 @@ class StreamingPrecisionRecallThresholdsTest(tf.test.TestCase):
def testWeights1d(self):
with self.test_session() as sess:
- predictions = tf.constant([[1, 0], [1, 0]], shape=(2, 2),
- dtype=tf.float32)
- labels = tf.constant([[0, 1], [1, 0]], shape=(2, 2))
- weights = tf.constant([[0], [1]], shape=(2, 1), dtype=tf.float32)
+ predictions = constant_op.constant(
+ [[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
+ labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
+ weights = constant_op.constant(
+ [[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.precision_at_thresholds(
labels, predictions, thresholds, weights=weights)
rec, rec_op = metrics.recall_at_thresholds(
labels, predictions, thresholds, weights=weights)
- [prec_low, prec_high] = tf.split(value=prec, num_or_size_splits=2, axis=0)
- prec_low = tf.reshape(prec_low, shape=())
- prec_high = tf.reshape(prec_high, shape=())
- [rec_low, rec_high] = tf.split(value=rec, num_or_size_splits=2, axis=0)
- rec_low = tf.reshape(rec_low, shape=())
- rec_high = tf.reshape(rec_high, shape=())
+ [prec_low, prec_high] = array_ops.split(
+ value=prec, num_or_size_splits=2, axis=0)
+ prec_low = array_ops.reshape(prec_low, shape=())
+ prec_high = array_ops.reshape(prec_high, shape=())
+ [rec_low, rec_high] = array_ops.split(
+ value=rec, num_or_size_splits=2, axis=0)
+ rec_low = array_ops.reshape(rec_low, shape=())
+ rec_high = array_ops.reshape(rec_high, shape=())
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
@@ -1572,24 +1599,27 @@ class StreamingPrecisionRecallThresholdsTest(tf.test.TestCase):
def testWeights2d(self):
with self.test_session() as sess:
- predictions = tf.constant([[1, 0], [1, 0]], shape=(2, 2),
- dtype=tf.float32)
- labels = tf.constant([[0, 1], [1, 0]], shape=(2, 2))
- weights = tf.constant([[0, 0], [1, 1]], shape=(2, 2), dtype=tf.float32)
+ predictions = constant_op.constant(
+ [[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
+ labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
+ weights = constant_op.constant(
+ [[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.precision_at_thresholds(
labels, predictions, thresholds, weights=weights)
rec, rec_op = metrics.recall_at_thresholds(
labels, predictions, thresholds, weights=weights)
- [prec_low, prec_high] = tf.split(value=prec, num_or_size_splits=2, axis=0)
- prec_low = tf.reshape(prec_low, shape=())
- prec_high = tf.reshape(prec_high, shape=())
- [rec_low, rec_high] = tf.split(value=rec, num_or_size_splits=2, axis=0)
- rec_low = tf.reshape(rec_low, shape=())
- rec_high = tf.reshape(rec_high, shape=())
+ [prec_low, prec_high] = array_ops.split(
+ value=prec, num_or_size_splits=2, axis=0)
+ prec_low = array_ops.reshape(prec_low, shape=())
+ prec_high = array_ops.reshape(prec_high, shape=())
+ [rec_low, rec_high] = array_ops.split(
+ value=rec, num_or_size_splits=2, axis=0)
+ rec_low = array_ops.reshape(rec_low, shape=())
+ rec_high = array_ops.reshape(rec_high, shape=())
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
@@ -1599,18 +1629,21 @@ class StreamingPrecisionRecallThresholdsTest(tf.test.TestCase):
def testExtremeThresholds(self):
with self.test_session() as sess:
- predictions = tf.constant([1, 0, 1, 0], shape=(1, 4), dtype=tf.float32)
- labels = tf.constant([0, 1, 1, 1], shape=(1, 4))
+ predictions = constant_op.constant(
+ [1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
+ labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
- prec, prec_op = metrics.precision_at_thresholds(
- labels, predictions, thresholds)
- rec, rec_op = metrics.recall_at_thresholds(
- labels, predictions, thresholds)
+ prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
+ thresholds)
+ rec, rec_op = metrics.recall_at_thresholds(labels, predictions,
+ thresholds)
- [prec_low, prec_high] = tf.split(value=prec, num_or_size_splits=2, axis=0)
- [rec_low, rec_high] = tf.split(value=rec, num_or_size_splits=2, axis=0)
+ [prec_low, prec_high] = array_ops.split(
+ value=prec, num_or_size_splits=2, axis=0)
+ [rec_low, rec_high] = array_ops.split(
+ value=rec, num_or_size_splits=2, axis=0)
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0.75, prec_low.eval())
@@ -1620,15 +1653,15 @@ class StreamingPrecisionRecallThresholdsTest(tf.test.TestCase):
def testZeroLabelsPredictions(self):
with self.test_session() as sess:
- predictions = tf.zeros([4], dtype=tf.float32)
- labels = tf.zeros([4])
+ predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
+ labels = array_ops.zeros([4])
thresholds = [0.5]
- prec, prec_op = metrics.precision_at_thresholds(
- labels, predictions, thresholds)
- rec, rec_op = metrics.recall_at_thresholds(
- labels, predictions, thresholds)
+ prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
+ thresholds)
+ rec, rec_op = metrics.recall_at_thresholds(labels, predictions,
+ thresholds)
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval(), 6)
@@ -1675,26 +1708,28 @@ class StreamingPrecisionRecallThresholdsTest(tf.test.TestCase):
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
- predictions_queue = tf.FIFOQueue(num_batches, dtypes=tf.float32,
- shapes=(batch_size,))
- labels_queue = tf.FIFOQueue(num_batches, dtypes=tf.float32,
- shapes=(batch_size,))
+ predictions_queue = data_flow_ops.FIFOQueue(
+ num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
+ labels_queue = data_flow_ops.FIFOQueue(
+ num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
- tf_prediction = tf.constant(predictions_batches[:, i])
- tf_label = tf.constant(labels_batches[:, i])
- sess.run([predictions_queue.enqueue(tf_prediction),
- labels_queue.enqueue(tf_label)])
+ tf_prediction = constant_op.constant(predictions_batches[:, i])
+ tf_label = constant_op.constant(labels_batches[:, i])
+ sess.run([
+ predictions_queue.enqueue(tf_prediction),
+ labels_queue.enqueue(tf_label)
+ ])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
- prec, prec_op = metrics.precision_at_thresholds(
- tf_labels, tf_predictions, thresholds)
- rec, rec_op = metrics.recall_at_thresholds(
- tf_labels, tf_predictions, thresholds)
+ prec, prec_op = metrics.precision_at_thresholds(tf_labels, tf_predictions,
+ thresholds)
+ rec, rec_op = metrics.recall_at_thresholds(tf_labels, tf_predictions,
+ thresholds)
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
sess.run([prec_op, rec_op])
# Since this is only approximate, we can't expect a 6 digits match.
@@ -1704,20 +1739,27 @@ class StreamingPrecisionRecallThresholdsTest(tf.test.TestCase):
self.assertAlmostEqual(expected_rec, rec.eval(), 2)
-def _test_sparse_precision_at_k(
- predictions, labels, k, expected, class_id=None, weights=None,
- test_case=None):
- with tf.Graph().as_default() as g, test_case.test_session(g):
+def _test_sparse_precision_at_k(predictions,
+ labels,
+ k,
+ expected,
+ class_id=None,
+ weights=None,
+ test_case=None):
+ with ops.Graph().as_default() as g, test_case.test_session(g):
if weights is not None:
- weights = tf.constant(weights, tf.float32)
+ weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.sparse_precision_at_k(
- predictions=tf.constant(predictions, tf.float32), labels=labels,
- k=k, class_id=class_id, weights=weights)
+ predictions=constant_op.constant(predictions, dtypes_lib.float32),
+ labels=labels,
+ k=k,
+ class_id=class_id,
+ weights=weights)
# Fails without initialized vars.
- test_case.assertRaises(tf.OpError, metric.eval)
- test_case.assertRaises(tf.OpError, update.eval)
- tf.variables_initializer(tf.local_variables()).run()
+ test_case.assertRaises(errors_impl.OpError, metric.eval)
+ test_case.assertRaises(errors_impl.OpError, update.eval)
+ variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
@@ -1728,19 +1770,23 @@ def _test_sparse_precision_at_k(
test_case.assertEqual(expected, metric.eval())
-def _test_sparse_average_precision_at_k(
- predictions, labels, k, expected, weights=None, test_case=None):
- with tf.Graph().as_default() as g, test_case.test_session(g):
+def _test_sparse_average_precision_at_k(predictions,
+ labels,
+ k,
+ expected,
+ weights=None,
+ test_case=None):
+ with ops.Graph().as_default() as g, test_case.test_session(g):
if weights is not None:
- weights = tf.constant(weights, tf.float32)
- predictions = tf.constant(predictions, tf.float32)
+ weights = constant_op.constant(weights, dtypes_lib.float32)
+ predictions = constant_op.constant(predictions, dtypes_lib.float32)
metric, update = metrics.sparse_average_precision_at_k(
labels, predictions, k, weights=weights)
# Fails without initialized vars.
- test_case.assertRaises(tf.OpError, metric.eval)
- test_case.assertRaises(tf.OpError, update.eval)
- tf.variables_initializer(tf.local_variables()).run()
+ test_case.assertRaises(errors_impl.OpError, metric.eval)
+ test_case.assertRaises(errors_impl.OpError, update.eval)
+ variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
@@ -1751,7 +1797,7 @@ def _test_sparse_average_precision_at_k(
test_case.assertAlmostEqual(expected, metric.eval())
-class SingleLabelSparsePrecisionTest(tf.test.TestCase):
+class SingleLabelSparsePrecisionTest(test.TestCase):
def setUp(self):
self._predictions = ((0.1, 0.3, 0.2, 0.4), (0.1, 0.2, 0.3, 0.4))
@@ -1760,9 +1806,9 @@ class SingleLabelSparsePrecisionTest(tf.test.TestCase):
# Sparse vs dense, and 1d vs 2d labels should all be handled the same.
self._labels = (
_binary_2d_label_to_1d_sparse_value(indicator_labels),
- _binary_2d_label_to_2d_sparse_value(indicator_labels),
- np.array(class_labels, dtype=np.int64),
- np.array([[class_id] for class_id in class_labels], dtype=np.int64))
+ _binary_2d_label_to_2d_sparse_value(indicator_labels), np.array(
+ class_labels, dtype=np.int64), np.array(
+ [[class_id] for class_id in class_labels], dtype=np.int64))
self._test_sparse_precision_at_k = functools.partial(
_test_sparse_precision_at_k, test_case=self)
self._test_sparse_average_precision_at_k = functools.partial(
@@ -1786,7 +1832,7 @@ class SingleLabelSparsePrecisionTest(tf.test.TestCase):
self._predictions, labels, k=1, expected=1.0 / 2)
-class MultiLabelSparsePrecisionTest(tf.test.TestCase):
+class MultiLabelSparsePrecisionTest(test.TestCase):
def setUp(self):
self._test_sparse_precision_at_k = functools.partial(
@@ -1802,18 +1848,9 @@ class MultiLabelSparsePrecisionTest(tf.test.TestCase):
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
- precision_ex1 = (
- 0.0 / 1,
- 1.0 / 2,
- 1.0 / 3,
- 2.0 / 4
- )
- avg_precision_ex1 = (
- 0.0 / 1,
- precision_ex1[1] / 2,
- precision_ex1[1] / 3,
- (precision_ex1[1] + precision_ex1[3]) / 4
- )
+ precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
+ avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
+ (precision_ex1[1] + precision_ex1[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_sparse_precision_at_k(
@@ -1826,18 +1863,9 @@ class MultiLabelSparsePrecisionTest(tf.test.TestCase):
labels = np.array([labels_ex2], dtype=np.int64)
predictions_ex2 = (0.3, 0.5, 0.0, 0.4, 0.0, 0.1, 0.2)
predictions = (predictions_ex2,)
- precision_ex2 = (
- 0.0 / 1,
- 0.0 / 2,
- 1.0 / 3,
- 2.0 / 4
- )
- avg_precision_ex2 = (
- 0.0 / 1,
- 0.0 / 2,
- precision_ex2[2] / 3,
- (precision_ex2[2] + precision_ex2[3]) / 4
- )
+ precision_ex2 = (0.0 / 1, 0.0 / 2, 1.0 / 3, 2.0 / 4)
+ avg_precision_ex2 = (0.0 / 1, 0.0 / 2, precision_ex2[2] / 3,
+ (precision_ex2[2] + precision_ex2[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_sparse_precision_at_k(
@@ -1849,12 +1877,12 @@ class MultiLabelSparsePrecisionTest(tf.test.TestCase):
# average of the 2 examples.
labels = np.array([labels_ex1, labels_ex2], dtype=np.int64)
predictions = (predictions_ex1, predictions_ex2)
- streaming_precision = [
- (ex1 + ex2) / 2
- for ex1, ex2 in zip(precision_ex1, precision_ex2)]
+ streaming_precision = [(ex1 + ex2) / 2
+ for ex1, ex2 in zip(precision_ex1, precision_ex2)]
streaming_average_precision = [
(ex1 + ex2) / 2
- for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)]
+ for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
+ ]
for i in xrange(4):
k = i + 1
self._test_sparse_precision_at_k(
@@ -1867,11 +1895,15 @@ class MultiLabelSparsePrecisionTest(tf.test.TestCase):
weights = (0.3, 0.6)
streaming_average_precision = [
(weights[0] * ex1 + weights[1] * ex2) / (weights[0] + weights[1])
- for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)]
+ for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
+ ]
for i in xrange(4):
k = i + 1
self._test_sparse_average_precision_at_k(
- predictions, labels, k, expected=streaming_average_precision[i],
+ predictions,
+ labels,
+ k,
+ expected=streaming_average_precision[i],
weights=weights)
def test_average_precision_some_labels_out_of_range(self):
@@ -1880,18 +1912,9 @@ class MultiLabelSparsePrecisionTest(tf.test.TestCase):
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
- precision_ex1 = (
- 0.0 / 1,
- 1.0 / 2,
- 1.0 / 3,
- 2.0 / 4
- )
- avg_precision_ex1 = (
- 0.0 / 1,
- precision_ex1[1] / 2,
- precision_ex1[1] / 3,
- (precision_ex1[1] + precision_ex1[3]) / 4
- )
+ precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
+ avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
+ (precision_ex1[1] + precision_ex1[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_sparse_precision_at_k(
@@ -1900,14 +1923,10 @@ class MultiLabelSparsePrecisionTest(tf.test.TestCase):
predictions, labels, k, expected=avg_precision_ex1[i])
def test_three_labels_at_k5_no_predictions(self):
- predictions = [
- [0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
- [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]
- ]
- sparse_labels = _binary_2d_label_to_2d_sparse_value([
- [0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
- [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]
- ])
+ predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
+ [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
+ sparse_labels = _binary_2d_label_to_2d_sparse_value(
+ [[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
@@ -1917,14 +1936,10 @@ class MultiLabelSparsePrecisionTest(tf.test.TestCase):
predictions, labels, k=5, expected=NAN, class_id=class_id)
def test_three_labels_at_k5_no_labels(self):
- predictions = [
- [0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
- [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]
- ]
- sparse_labels = _binary_2d_label_to_2d_sparse_value([
- [0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
- [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]
- ])
+ predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
+ [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
+ sparse_labels = _binary_2d_label_to_2d_sparse_value(
+ [[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
@@ -1934,21 +1949,16 @@ class MultiLabelSparsePrecisionTest(tf.test.TestCase):
predictions, labels, k=5, expected=0.0, class_id=class_id)
def test_three_labels_at_k5(self):
- predictions = [
- [0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
- [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]
- ]
- sparse_labels = _binary_2d_label_to_2d_sparse_value([
- [0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
- [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]
- ])
+ predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
+ [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
+ sparse_labels = _binary_2d_label_to_2d_sparse_value(
+ [[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 2 labels, 2 correct predictions.
self._test_sparse_precision_at_k(
- predictions, labels, k=5, expected=2.0 / 2,
- class_id=2)
+ predictions, labels, k=5, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_sparse_precision_at_k(
@@ -1964,16 +1974,13 @@ class MultiLabelSparsePrecisionTest(tf.test.TestCase):
def test_three_labels_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
- predictions = [
- [0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
- [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]
- ]
- sp_labels = tf.SparseTensorValue(
- indices=[[0, 0], [0, 1], [0, 2], [0, 3],
- [1, 0], [1, 1], [1, 2], [1, 3]],
+ predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
+ [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
+ sp_labels = sparse_tensor.SparseTensorValue(
+ indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2],
+ [1, 3]],
# values -1 and 10 are outside the [0, n_classes) range and are ignored.
- values=np.array([2, 7, -1, 8,
- 1, 2, 5, 10], np.int64),
+ values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, 2 correct predictions.
@@ -1993,20 +2000,13 @@ class MultiLabelSparsePrecisionTest(tf.test.TestCase):
predictions, sp_labels, k=5, expected=3.0 / 10)
def test_3d_nan(self):
- predictions = [[
- [0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
- [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]
- ], [
- [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
- [0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]
- ]]
- labels = _binary_3d_label_to_sparse_value([[
- [0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
- [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]
- ], [
- [0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
- [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]
- ]])
+ predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
+ [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
+ [[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
+ [0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
+ labels = _binary_3d_label_to_sparse_value(
+ [[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
+ [[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
@@ -2014,20 +2014,13 @@ class MultiLabelSparsePrecisionTest(tf.test.TestCase):
predictions, labels, k=5, expected=NAN, class_id=class_id)
def test_3d_no_labels(self):
- predictions = [[
- [0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
- [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]
- ], [
- [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
- [0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]
- ]]
- labels = _binary_3d_label_to_sparse_value([[
- [0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
- [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]
- ], [
- [0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
- [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]
- ]])
+ predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
+ [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
+ [[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
+ [0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
+ labels = _binary_3d_label_to_sparse_value(
+ [[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
+ [[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
@@ -2035,20 +2028,13 @@ class MultiLabelSparsePrecisionTest(tf.test.TestCase):
predictions, labels, k=5, expected=0.0, class_id=class_id)
def test_3d(self):
- predictions = [[
- [0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
- [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]
- ], [
- [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
- [0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]
- ]]
- labels = _binary_3d_label_to_sparse_value([[
- [0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
- [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]
- ], [
- [0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
- [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]
- ]])
+ predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
+ [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
+ [[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
+ [0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
+ labels = _binary_3d_label_to_sparse_value(
+ [[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
+ [[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 4 predictions, all correct.
self._test_sparse_precision_at_k(
@@ -2067,66 +2053,90 @@ class MultiLabelSparsePrecisionTest(tf.test.TestCase):
predictions, labels, k=5, expected=7.0 / 20)
def test_3d_ignore_some(self):
- predictions = [[
- [0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
- [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]
- ], [
- [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
- [0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]
- ]]
- labels = _binary_3d_label_to_sparse_value([[
- [0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
- [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]
- ], [
- [0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
- [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]
- ]])
+ predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
+ [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
+ [[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
+ [0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
+ labels = _binary_3d_label_to_sparse_value(
+ [[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
+ [[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 2 predictions, both correct.
self._test_sparse_precision_at_k(
- predictions, labels, k=5, expected=2.0 / 2.0, class_id=2,
+ predictions,
+ labels,
+ k=5,
+ expected=2.0 / 2.0,
+ class_id=2,
weights=[[1], [0]])
# Class 2: 2 predictions, both correct.
self._test_sparse_precision_at_k(
- predictions, labels, k=5, expected=2.0 / 2.0, class_id=2,
+ predictions,
+ labels,
+ k=5,
+ expected=2.0 / 2.0,
+ class_id=2,
weights=[[0], [1]])
# Class 7: 1 incorrect prediction.
self._test_sparse_precision_at_k(
- predictions, labels, k=5, expected=0.0 / 1.0, class_id=7,
+ predictions,
+ labels,
+ k=5,
+ expected=0.0 / 1.0,
+ class_id=7,
weights=[[1], [0]])
# Class 7: 1 correct prediction.
self._test_sparse_precision_at_k(
- predictions, labels, k=5, expected=1.0 / 1.0, class_id=7,
+ predictions,
+ labels,
+ k=5,
+ expected=1.0 / 1.0,
+ class_id=7,
weights=[[0], [1]])
# Class 7: no predictions.
self._test_sparse_precision_at_k(
- predictions, labels, k=5, expected=NAN, class_id=7,
+ predictions,
+ labels,
+ k=5,
+ expected=NAN,
+ class_id=7,
weights=[[1, 0], [0, 1]])
# Class 7: 2 predictions, 1 correct.
self._test_sparse_precision_at_k(
- predictions, labels, k=5, expected=1.0 / 2.0, class_id=7,
+ predictions,
+ labels,
+ k=5,
+ expected=1.0 / 2.0,
+ class_id=7,
weights=[[0, 1], [1, 0]])
-def _test_recall_at_k(
- predictions, labels, k, expected, class_id=None, weights=None,
- test_case=None):
- with tf.Graph().as_default() as g, test_case.test_session(g):
+def _test_recall_at_k(predictions,
+ labels,
+ k,
+ expected,
+ class_id=None,
+ weights=None,
+ test_case=None):
+ with ops.Graph().as_default() as g, test_case.test_session(g):
if weights is not None:
- weights = tf.constant(weights, tf.float32)
+ weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.recall_at_k(
- predictions=tf.constant(predictions, tf.float32),
- labels=labels, k=k, class_id=class_id, weights=weights)
+ predictions=constant_op.constant(predictions, dtypes_lib.float32),
+ labels=labels,
+ k=k,
+ class_id=class_id,
+ weights=weights)
# Fails without initialized vars.
- test_case.assertRaises(tf.OpError, metric.eval)
- test_case.assertRaises(tf.OpError, update.eval)
- tf.variables_initializer(tf.local_variables()).run()
+ test_case.assertRaises(errors_impl.OpError, metric.eval)
+ test_case.assertRaises(errors_impl.OpError, update.eval)
+ variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
@@ -2137,7 +2147,7 @@ def _test_recall_at_k(
test_case.assertEqual(expected, metric.eval())
-class SingleLabelRecallAtKTest(tf.test.TestCase):
+class SingleLabelRecallAtKTest(test.TestCase):
def setUp(self):
self._predictions = ((0.1, 0.3, 0.2, 0.4), (0.1, 0.2, 0.3, 0.4))
@@ -2146,9 +2156,9 @@ class SingleLabelRecallAtKTest(tf.test.TestCase):
# Sparse vs dense, and 1d vs 2d labels should all be handled the same.
self._labels = (
_binary_2d_label_to_1d_sparse_value(indicator_labels),
- _binary_2d_label_to_2d_sparse_value(indicator_labels),
- np.array(class_labels, dtype=np.int64),
- np.array([[class_id] for class_id in class_labels], dtype=np.int64))
+ _binary_2d_label_to_2d_sparse_value(indicator_labels), np.array(
+ class_labels, dtype=np.int64), np.array(
+ [[class_id] for class_id in class_labels], dtype=np.int64))
self._test_recall_at_k = functools.partial(
_test_recall_at_k, test_case=self)
@@ -2173,8 +2183,7 @@ class SingleLabelRecallAtKTest(tf.test.TestCase):
self._predictions, labels, k=1, expected=1.0 / 1, class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
- self._test_recall_at_k(
- self._predictions, labels, k=1, expected=1.0 / 2)
+ self._test_recall_at_k(self._predictions, labels, k=1, expected=1.0 / 2)
def test_one_label_at_k1_weighted(self):
predictions = self._predictions
@@ -2183,34 +2192,74 @@ class SingleLabelRecallAtKTest(tf.test.TestCase):
self._test_recall_at_k(
predictions, labels, k=1, expected=NAN, class_id=3, weights=(0.0,))
self._test_recall_at_k(
- predictions, labels, k=1, expected=1.0 / 1, class_id=3,
+ predictions,
+ labels,
+ k=1,
+ expected=1.0 / 1,
+ class_id=3,
weights=(1.0,))
self._test_recall_at_k(
- predictions, labels, k=1, expected=1.0 / 1, class_id=3,
+ predictions,
+ labels,
+ k=1,
+ expected=1.0 / 1,
+ class_id=3,
weights=(2.0,))
self._test_recall_at_k(
- predictions, labels, k=1, expected=NAN, class_id=3,
+ predictions,
+ labels,
+ k=1,
+ expected=NAN,
+ class_id=3,
weights=(0.0, 0.0))
self._test_recall_at_k(
- predictions, labels, k=1, expected=NAN, class_id=3,
+ predictions,
+ labels,
+ k=1,
+ expected=NAN,
+ class_id=3,
weights=(0.0, 1.0))
self._test_recall_at_k(
- predictions, labels, k=1, expected=1.0 / 1, class_id=3,
+ predictions,
+ labels,
+ k=1,
+ expected=1.0 / 1,
+ class_id=3,
weights=(1.0, 0.0))
self._test_recall_at_k(
- predictions, labels, k=1, expected=1.0 / 1, class_id=3,
+ predictions,
+ labels,
+ k=1,
+ expected=1.0 / 1,
+ class_id=3,
weights=(1.0, 1.0))
self._test_recall_at_k(
- predictions, labels, k=1, expected=2.0 / 2, class_id=3,
+ predictions,
+ labels,
+ k=1,
+ expected=2.0 / 2,
+ class_id=3,
weights=(2.0, 3.0))
self._test_recall_at_k(
- predictions, labels, k=1, expected=3.0 / 3, class_id=3,
+ predictions,
+ labels,
+ k=1,
+ expected=3.0 / 3,
+ class_id=3,
weights=(3.0, 2.0))
self._test_recall_at_k(
- predictions, labels, k=1, expected=0.3 / 0.3, class_id=3,
+ predictions,
+ labels,
+ k=1,
+ expected=0.3 / 0.3,
+ class_id=3,
weights=(0.3, 0.6))
self._test_recall_at_k(
- predictions, labels, k=1, expected=0.6 / 0.6, class_id=3,
+ predictions,
+ labels,
+ k=1,
+ expected=0.6 / 0.6,
+ class_id=3,
weights=(0.6, 0.3))
# All classes: 2 labels, 2 predictions, 1 correct.
@@ -2236,20 +2285,18 @@ class SingleLabelRecallAtKTest(tf.test.TestCase):
predictions, labels, k=1, expected=0.6 / 0.9, weights=(0.6, 0.3))
-class MultiLabel2dRecallAtKTest(tf.test.TestCase):
+class MultiLabel2dRecallAtKTest(test.TestCase):
def setUp(self):
- self._predictions = (
- (0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9),
- (0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6))
- indicator_labels = (
- (0, 0, 1, 0, 0, 0, 0, 1, 1, 0),
- (0, 1, 1, 0, 0, 1, 0, 0, 0, 0))
+ self._predictions = ((0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9),
+ (0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6))
+ indicator_labels = ((0, 0, 1, 0, 0, 0, 0, 1, 1, 0),
+ (0, 1, 1, 0, 0, 1, 0, 0, 0, 0))
class_labels = ((2, 7, 8), (1, 2, 5))
# Sparse vs dense labels should be handled the same.
- self._labels = (
- _binary_2d_label_to_2d_sparse_value(indicator_labels),
- np.array(class_labels, dtype=np.int64))
+ self._labels = (_binary_2d_label_to_2d_sparse_value(indicator_labels),
+ np.array(
+ class_labels, dtype=np.int64))
self._test_recall_at_k = functools.partial(
_test_recall_at_k, test_case=self)
@@ -2285,12 +2332,11 @@ class MultiLabel2dRecallAtKTest(tf.test.TestCase):
def test_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) count in denominator."""
- labels = tf.SparseTensorValue(
- indices=[[0, 0], [0, 1], [0, 2], [0, 3],
- [1, 0], [1, 1], [1, 2], [1, 3]],
+ labels = sparse_tensor.SparseTensorValue(
+ indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2],
+ [1, 3]],
# values -1 and 10 are outside the [0, n_classes) range.
- values=np.array([2, 7, -1, 8,
- 1, 2, 5, 10], np.int64),
+ values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, both correct.
@@ -2309,25 +2355,18 @@ class MultiLabel2dRecallAtKTest(tf.test.TestCase):
self._test_recall_at_k(self._predictions, labels, k=5, expected=3.0 / 8)
-class MultiLabel3dRecallAtKTest(tf.test.TestCase):
+class MultiLabel3dRecallAtKTest(test.TestCase):
def setUp(self):
- self._predictions = ((
- (0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9),
- (0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6)
- ), (
- (0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6),
- (0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9)
- ))
+ self._predictions = (((0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9),
+ (0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6)),
+ ((0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6),
+ (0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9)))
# Note: We don't test dense labels here, since examples have different
# numbers of labels.
self._labels = _binary_3d_label_to_sparse_value(((
- (0, 0, 1, 0, 0, 0, 0, 1, 1, 0),
- (0, 1, 1, 0, 0, 1, 0, 0, 0, 0)
- ), (
- (0, 1, 1, 0, 0, 1, 0, 1, 0, 0),
- (0, 0, 1, 0, 0, 0, 0, 0, 1, 0)
- )))
+ (0, 0, 1, 0, 0, 0, 0, 1, 1, 0), (0, 1, 1, 0, 0, 1, 0, 0, 0, 0)), (
+ (0, 1, 1, 0, 0, 1, 0, 1, 0, 0), (0, 0, 1, 0, 0, 0, 0, 0, 1, 0))))
self._test_recall_at_k = functools.partial(
_test_recall_at_k, test_case=self)
@@ -2363,86 +2402,118 @@ class MultiLabel3dRecallAtKTest(tf.test.TestCase):
def test_3d_ignore_all(self):
for class_id in xrange(10):
self._test_recall_at_k(
- self._predictions, self._labels, k=5, expected=NAN, class_id=class_id,
+ self._predictions,
+ self._labels,
+ k=5,
+ expected=NAN,
+ class_id=class_id,
weights=[[0], [0]])
self._test_recall_at_k(
- self._predictions, self._labels, k=5, expected=NAN, class_id=class_id,
+ self._predictions,
+ self._labels,
+ k=5,
+ expected=NAN,
+ class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=NAN, weights=[[0], [0]])
self._test_recall_at_k(
- self._predictions, self._labels, k=5, expected=NAN,
+ self._predictions,
+ self._labels,
+ k=5,
+ expected=NAN,
weights=[[0, 0], [0, 0]])
def test_3d_ignore_some(self):
# Class 2: 2 labels, both correct.
self._test_recall_at_k(
- self._predictions, self._labels, k=5, expected=2.0 / 2.0, class_id=2,
+ self._predictions,
+ self._labels,
+ k=5,
+ expected=2.0 / 2.0,
+ class_id=2,
weights=[[1], [0]])
# Class 2: 2 labels, both correct.
self._test_recall_at_k(
- self._predictions, self._labels, k=5, expected=2.0 / 2.0, class_id=2,
+ self._predictions,
+ self._labels,
+ k=5,
+ expected=2.0 / 2.0,
+ class_id=2,
weights=[[0], [1]])
# Class 7: 1 label, correct.
self._test_recall_at_k(
- self._predictions, self._labels, k=5, expected=1.0 / 1.0, class_id=7,
+ self._predictions,
+ self._labels,
+ k=5,
+ expected=1.0 / 1.0,
+ class_id=7,
weights=[[0], [1]])
# Class 7: 1 label, incorrect.
self._test_recall_at_k(
- self._predictions, self._labels, k=5, expected=0.0 / 1.0, class_id=7,
+ self._predictions,
+ self._labels,
+ k=5,
+ expected=0.0 / 1.0,
+ class_id=7,
weights=[[1], [0]])
# Class 7: 2 labels, 1 correct.
self._test_recall_at_k(
- self._predictions, self._labels, k=5, expected=1.0 / 2.0, class_id=7,
+ self._predictions,
+ self._labels,
+ k=5,
+ expected=1.0 / 2.0,
+ class_id=7,
weights=[[1, 0], [1, 0]])
# Class 7: No labels.
self._test_recall_at_k(
- self._predictions, self._labels, k=5, expected=NAN, class_id=7,
+ self._predictions,
+ self._labels,
+ k=5,
+ expected=NAN,
+ class_id=7,
weights=[[0, 1], [0, 1]])
-class MeanAbsoluteErrorTest(tf.test.TestCase):
+class MeanAbsoluteErrorTest(test.TestCase):
def setUp(self):
- tf.reset_default_graph()
+ ops.reset_default_graph()
def testVars(self):
metrics.mean_absolute_error(
- predictions=tf.ones((10, 1)), labels=tf.ones((10, 1)))
- _assert_local_variables(self, (
- 'mean_absolute_error/count:0',
- 'mean_absolute_error/total:0'
- ))
+ predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
+ _assert_local_variables(self, ('mean_absolute_error/count:0',
+ 'mean_absolute_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.mean_absolute_error(
- predictions=tf.ones((10, 1)),
- labels=tf.ones((10, 1)),
+ predictions=array_ops.ones((10, 1)),
+ labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
- self.assertListEqual(tf.get_collection(my_collection_name), [mean])
+ self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_absolute_error(
- predictions=tf.ones((10, 1)),
- labels=tf.ones((10, 1)),
+ predictions=array_ops.ones((10, 1)),
+ labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
- self.assertListEqual(tf.get_collection(my_collection_name), [update_op])
+ self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
- predictions = tf.random_normal((10, 3), seed=1)
- labels = tf.random_normal((10, 3), seed=2)
- error, update_op = metrics.mean_absolute_error(
- labels, predictions)
+ predictions = random_ops.random_normal((10, 3), seed=1)
+ labels = random_ops.random_normal((10, 3), seed=2)
+ error, update_op = metrics.mean_absolute_error(labels, predictions)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
@@ -2454,61 +2525,60 @@ class MeanAbsoluteErrorTest(tf.test.TestCase):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateWithErrorAndWeights(self):
- predictions = tf.constant([2, 4, 6, 8], shape=(1, 4), dtype=tf.float32)
- labels = tf.constant([1, 3, 2, 3], shape=(1, 4), dtype=tf.float32)
- weights = tf.constant([0, 1, 0, 1], shape=(1, 4))
+ predictions = constant_op.constant(
+ [2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
+ labels = constant_op.constant(
+ [1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
+ weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
- error, update_op = metrics.mean_absolute_error(
- labels, predictions, weights)
+ error, update_op = metrics.mean_absolute_error(labels, predictions, weights)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertEqual(3, sess.run(update_op))
self.assertEqual(3, error.eval())
-class MeanRelativeErrorTest(tf.test.TestCase):
+class MeanRelativeErrorTest(test.TestCase):
def setUp(self):
- tf.reset_default_graph()
+ ops.reset_default_graph()
def testVars(self):
metrics.mean_relative_error(
- predictions=tf.ones((10, 1)), labels=tf.ones((10, 1)),
- normalizer=tf.ones((10, 1)))
- _assert_local_variables(self, (
- 'mean_relative_error/count:0',
- 'mean_relative_error/total:0'
- ))
+ predictions=array_ops.ones((10, 1)),
+ labels=array_ops.ones((10, 1)),
+ normalizer=array_ops.ones((10, 1)))
+ _assert_local_variables(self, ('mean_relative_error/count:0',
+ 'mean_relative_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.mean_relative_error(
- predictions=tf.ones((10, 1)),
- labels=tf.ones((10, 1)),
- normalizer=tf.ones((10, 1)),
+ predictions=array_ops.ones((10, 1)),
+ labels=array_ops.ones((10, 1)),
+ normalizer=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
- self.assertListEqual(
- tf.get_collection(my_collection_name), [mean])
+ self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_relative_error(
- predictions=tf.ones((10, 1)),
- labels=tf.ones((10, 1)),
- normalizer=tf.ones((10, 1)),
+ predictions=array_ops.ones((10, 1)),
+ labels=array_ops.ones((10, 1)),
+ normalizer=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
- self.assertListEqual(tf.get_collection(my_collection_name), [update_op])
+ self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
- predictions = tf.random_normal((10, 3), seed=1)
- labels = tf.random_normal((10, 3), seed=2)
- normalizer = tf.random_normal((10, 3), seed=3)
- error, update_op = metrics.mean_relative_error(
- labels, predictions, normalizer)
+ predictions = random_ops.random_normal((10, 3), seed=1)
+ labels = random_ops.random_normal((10, 3), seed=2)
+ normalizer = random_ops.random_normal((10, 3), seed=3)
+ error, update_op = metrics.mean_relative_error(labels, predictions,
+ normalizer)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
@@ -2523,72 +2593,71 @@ class MeanRelativeErrorTest(tf.test.TestCase):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
np_labels = np.asarray([1, 3, 2, 3], dtype=np.float32)
expected_error = np.mean(
- np.divide(np.absolute(np_predictions - np_labels),
- np_labels))
+ np.divide(np.absolute(np_predictions - np_labels), np_labels))
- predictions = tf.constant(np_predictions, shape=(1, 4), dtype=tf.float32)
- labels = tf.constant(np_labels, shape=(1, 4))
+ predictions = constant_op.constant(
+ np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)
+ labels = constant_op.constant(np_labels, shape=(1, 4))
error, update_op = metrics.mean_relative_error(
labels, predictions, normalizer=labels)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertEqual(expected_error, sess.run(update_op))
self.assertEqual(expected_error, error.eval())
def testSingleUpdateNormalizedByZeros(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
- predictions = tf.constant(np_predictions, shape=(1, 4), dtype=tf.float32)
- labels = tf.constant([1, 3, 2, 3], shape=(1, 4), dtype=tf.float32)
+ predictions = constant_op.constant(
+ np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)
+ labels = constant_op.constant(
+ [1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_relative_error(
- labels, predictions, normalizer=tf.zeros_like(labels))
+ labels, predictions, normalizer=array_ops.zeros_like(labels))
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertEqual(0.0, sess.run(update_op))
self.assertEqual(0.0, error.eval())
-class MeanSquaredErrorTest(tf.test.TestCase):
+class MeanSquaredErrorTest(test.TestCase):
def setUp(self):
- tf.reset_default_graph()
+ ops.reset_default_graph()
def testVars(self):
metrics.mean_squared_error(
- predictions=tf.ones((10, 1)), labels=tf.ones((10, 1)))
- _assert_local_variables(self, (
- 'mean_squared_error/count:0',
- 'mean_squared_error/total:0'
- ))
+ predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
+ _assert_local_variables(self, ('mean_squared_error/count:0',
+ 'mean_squared_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.mean_squared_error(
- predictions=tf.ones((10, 1)),
- labels=tf.ones((10, 1)),
+ predictions=array_ops.ones((10, 1)),
+ labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
- self.assertListEqual(tf.get_collection(my_collection_name), [mean])
+ self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_squared_error(
- predictions=tf.ones((10, 1)),
- labels=tf.ones((10, 1)),
+ predictions=array_ops.ones((10, 1)),
+ labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
- self.assertListEqual(tf.get_collection(my_collection_name), [update_op])
+ self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
- predictions = tf.random_normal((10, 3), seed=1)
- labels = tf.random_normal((10, 3), seed=2)
- error, update_op = metrics.mean_squared_error(
- labels, predictions)
+ predictions = random_ops.random_normal((10, 3), seed=1)
+ labels = random_ops.random_normal((10, 3), seed=2)
+ error, update_op = metrics.mean_squared_error(labels, predictions)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
@@ -2600,60 +2669,62 @@ class MeanSquaredErrorTest(tf.test.TestCase):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
- predictions = tf.zeros((1, 3), dtype=tf.float32)
- labels = tf.zeros((1, 3), dtype=tf.float32)
+ predictions = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
+ labels = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
- error, update_op = metrics.mean_squared_error(
- labels, predictions)
+ error, update_op = metrics.mean_squared_error(labels, predictions)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithError(self):
- predictions = tf.constant([2, 4, 6], shape=(1, 3), dtype=tf.float32)
- labels = tf.constant([1, 3, 2], shape=(1, 3), dtype=tf.float32)
+ predictions = constant_op.constant(
+ [2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
+ labels = constant_op.constant(
+ [1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
- error, update_op = metrics.mean_squared_error(
- labels, predictions)
+ error, update_op = metrics.mean_squared_error(labels, predictions)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertEqual(6, sess.run(update_op))
self.assertEqual(6, error.eval())
def testSingleUpdateWithErrorAndWeights(self):
- predictions = tf.constant([2, 4, 6, 8], shape=(1, 4), dtype=tf.float32)
- labels = tf.constant([1, 3, 2, 3], shape=(1, 4), dtype=tf.float32)
- weights = tf.constant([0, 1, 0, 1], shape=(1, 4))
+ predictions = constant_op.constant(
+ [2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
+ labels = constant_op.constant(
+ [1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
+ weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
- error, update_op = metrics.mean_squared_error(
- labels, predictions, weights)
+ error, update_op = metrics.mean_squared_error(labels, predictions, weights)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertEqual(13, sess.run(update_op))
self.assertEqual(13, error.eval())
def testMultipleBatchesOfSizeOne(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
- preds_queue = tf.FIFOQueue(2, dtypes=tf.float32, shapes=(1, 3))
+ preds_queue = data_flow_ops.FIFOQueue(
+ 2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
- labels_queue = tf.FIFOQueue(2, dtypes=tf.float32, shapes=(1, 3))
+ labels_queue = data_flow_ops.FIFOQueue(
+ 2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
- error, update_op = metrics.mean_squared_error(
- labels, predictions)
+ error, update_op = metrics.mean_squared_error(labels, predictions)
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(208.0 / 6, sess.run(update_op), 5)
@@ -2662,25 +2733,29 @@ class MeanSquaredErrorTest(tf.test.TestCase):
def testMetricsComputedConcurrently(self):
with self.test_session() as sess:
# Create the queue that populates one set of predictions.
- preds_queue0 = tf.FIFOQueue(2, dtypes=tf.float32, shapes=(1, 3))
+ preds_queue0 = data_flow_ops.FIFOQueue(
+ 2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue0, [10, 8, 6])
_enqueue_vector(sess, preds_queue0, [-4, 3, -1])
predictions0 = preds_queue0.dequeue()
# Create the queue that populates one set of predictions.
- preds_queue1 = tf.FIFOQueue(2, dtypes=tf.float32, shapes=(1, 3))
+ preds_queue1 = data_flow_ops.FIFOQueue(
+ 2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue1, [0, 1, 1])
_enqueue_vector(sess, preds_queue1, [1, 1, 0])
predictions1 = preds_queue1.dequeue()
# Create the queue that populates one set of labels.
- labels_queue0 = tf.FIFOQueue(2, dtypes=tf.float32, shapes=(1, 3))
+ labels_queue0 = data_flow_ops.FIFOQueue(
+ 2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue0, [1, 3, 2])
_enqueue_vector(sess, labels_queue0, [2, 4, 6])
labels0 = labels_queue0.dequeue()
# Create the queue that populates another set of labels.
- labels_queue1 = tf.FIFOQueue(2, dtypes=tf.float32, shapes=(1, 3))
+ labels_queue1 = data_flow_ops.FIFOQueue(
+ 2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue1, [-5, -3, -1])
_enqueue_vector(sess, labels_queue1, [5, 4, 3])
labels1 = labels_queue1.dequeue()
@@ -2690,7 +2765,7 @@ class MeanSquaredErrorTest(tf.test.TestCase):
mse1, update_op1 = metrics.mean_squared_error(
labels1, predictions1, name='msd1')
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
sess.run([update_op0, update_op1])
sess.run([update_op0, update_op1])
@@ -2701,23 +2776,23 @@ class MeanSquaredErrorTest(tf.test.TestCase):
def testMultipleMetricsOnMultipleBatchesOfSizeOne(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
- preds_queue = tf.FIFOQueue(2, dtypes=tf.float32, shapes=(1, 3))
+ preds_queue = data_flow_ops.FIFOQueue(
+ 2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
- labels_queue = tf.FIFOQueue(2, dtypes=tf.float32, shapes=(1, 3))
+ labels_queue = data_flow_ops.FIFOQueue(
+ 2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
- mae, ma_update_op = metrics.mean_absolute_error(
- labels, predictions)
- mse, ms_update_op = metrics.mean_squared_error(
- labels, predictions)
+ mae, ma_update_op = metrics.mean_absolute_error(labels, predictions)
+ mse, ms_update_op = metrics.mean_squared_error(labels, predictions)
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
sess.run([ma_update_op, ms_update_op])
sess.run([ma_update_op, ms_update_op])
@@ -2725,43 +2800,40 @@ class MeanSquaredErrorTest(tf.test.TestCase):
self.assertAlmostEqual(208.0 / 6, mse.eval(), 5)
-class RootMeanSquaredErrorTest(tf.test.TestCase):
+class RootMeanSquaredErrorTest(test.TestCase):
def setUp(self):
- tf.reset_default_graph()
+ ops.reset_default_graph()
def testVars(self):
metrics.root_mean_squared_error(
- predictions=tf.ones((10, 1)), labels=tf.ones((10, 1)))
- _assert_local_variables(self, (
- 'root_mean_squared_error/count:0',
- 'root_mean_squared_error/total:0'
- ))
+ predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
+ _assert_local_variables(self, ('root_mean_squared_error/count:0',
+ 'root_mean_squared_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.root_mean_squared_error(
- predictions=tf.ones((10, 1)),
- labels=tf.ones((10, 1)),
+ predictions=array_ops.ones((10, 1)),
+ labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
- self.assertListEqual(tf.get_collection(my_collection_name), [mean])
+ self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.root_mean_squared_error(
- predictions=tf.ones((10, 1)),
- labels=tf.ones((10, 1)),
+ predictions=array_ops.ones((10, 1)),
+ labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
- self.assertListEqual(tf.get_collection(my_collection_name), [update_op])
+ self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
- predictions = tf.random_normal((10, 3), seed=1)
- labels = tf.random_normal((10, 3), seed=2)
- error, update_op = metrics.root_mean_squared_error(
- labels, predictions)
+ predictions = random_ops.random_normal((10, 3), seed=1)
+ labels = random_ops.random_normal((10, 3), seed=2)
+ error, update_op = metrics.root_mean_squared_error(labels, predictions)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
@@ -2774,39 +2846,42 @@ class RootMeanSquaredErrorTest(tf.test.TestCase):
def testSingleUpdateZeroError(self):
with self.test_session() as sess:
- predictions = tf.constant(0.0, shape=(1, 3), dtype=tf.float32)
- labels = tf.constant(0.0, shape=(1, 3), dtype=tf.float32)
+ predictions = constant_op.constant(
+ 0.0, shape=(1, 3), dtype=dtypes_lib.float32)
+ labels = constant_op.constant(0.0, shape=(1, 3), dtype=dtypes_lib.float32)
- rmse, update_op = metrics.root_mean_squared_error(
- labels, predictions)
+ rmse, update_op = metrics.root_mean_squared_error(labels, predictions)
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, rmse.eval())
def testSingleUpdateWithError(self):
with self.test_session() as sess:
- predictions = tf.constant([2, 4, 6], shape=(1, 3), dtype=tf.float32)
- labels = tf.constant([1, 3, 2], shape=(1, 3), dtype=tf.float32)
+ predictions = constant_op.constant(
+ [2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
+ labels = constant_op.constant(
+ [1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
- rmse, update_op = metrics.root_mean_squared_error(
- labels, predictions)
+ rmse, update_op = metrics.root_mean_squared_error(labels, predictions)
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(6), update_op.eval(), 5)
self.assertAlmostEqual(math.sqrt(6), rmse.eval(), 5)
def testSingleUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
- predictions = tf.constant([2, 4, 6, 8], shape=(1, 4), dtype=tf.float32)
- labels = tf.constant([1, 3, 2, 3], shape=(1, 4), dtype=tf.float32)
- weights = tf.constant([0, 1, 0, 1], shape=(1, 4))
+ predictions = constant_op.constant(
+ [2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
+ labels = constant_op.constant(
+ [1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
+ weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
- rmse, update_op = metrics.root_mean_squared_error(
- labels, predictions, weights)
+ rmse, update_op = metrics.root_mean_squared_error(labels, predictions,
+ weights)
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(13), sess.run(update_op))
self.assertAlmostEqual(math.sqrt(13), rmse.eval(), 5)
@@ -2817,45 +2892,45 @@ def _reweight(predictions, labels, weights):
np.concatenate([[l] * int(w) for l, w in zip(labels, weights)]))
-class MeanCosineDistanceTest(tf.test.TestCase):
+class MeanCosineDistanceTest(test.TestCase):
def setUp(self):
- tf.reset_default_graph()
+ ops.reset_default_graph()
def testVars(self):
metrics.mean_cosine_distance(
- predictions=tf.ones((10, 3)), labels=tf.ones((10, 3)), dim=1)
+ predictions=array_ops.ones((10, 3)),
+ labels=array_ops.ones((10, 3)),
+ dim=1)
_assert_local_variables(self, (
'mean_cosine_distance/count:0',
- 'mean_cosine_distance/total:0',
- ))
+ 'mean_cosine_distance/total:0',))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.mean_cosine_distance(
- predictions=tf.ones((10, 3)),
- labels=tf.ones((10, 3)),
+ predictions=array_ops.ones((10, 3)),
+ labels=array_ops.ones((10, 3)),
dim=1,
metrics_collections=[my_collection_name])
- self.assertListEqual(tf.get_collection(my_collection_name), [mean])
+ self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_cosine_distance(
- predictions=tf.ones((10, 3)),
- labels=tf.ones((10, 3)),
+ predictions=array_ops.ones((10, 3)),
+ labels=array_ops.ones((10, 3)),
dim=1,
updates_collections=[my_collection_name])
- self.assertListEqual(tf.get_collection(my_collection_name), [update_op])
+ self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
- predictions = tf.random_normal((10, 3), seed=1)
- labels = tf.random_normal((10, 3), seed=2)
- error, update_op = metrics.mean_cosine_distance(
- labels, predictions, dim=1)
+ predictions = random_ops.random_normal((10, 3), seed=1)
+ labels = random_ops.random_normal((10, 3), seed=2)
+ error, update_op = metrics.mean_cosine_distance(labels, predictions, dim=1)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
@@ -2867,141 +2942,133 @@ class MeanCosineDistanceTest(tf.test.TestCase):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
- np_labels = np.matrix(('1 0 0;'
- '0 0 1;'
- '0 1 0'))
+ np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
- predictions = tf.constant(np_labels, shape=(1, 3, 3), dtype=tf.float32)
- labels = tf.constant(np_labels, shape=(1, 3, 3), dtype=tf.float32)
+ predictions = constant_op.constant(
+ np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)
+ labels = constant_op.constant(
+ np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)
- error, update_op = metrics.mean_cosine_distance(
- labels, predictions, dim=2)
+ error, update_op = metrics.mean_cosine_distance(labels, predictions, dim=2)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithError1(self):
- np_labels = np.matrix(('1 0 0;'
- '0 0 1;'
- '0 1 0'))
- np_predictions = np.matrix(('1 0 0;'
- '0 0 -1;'
- '1 0 0'))
+ np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
+ np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
- predictions = tf.constant(np_predictions, shape=(3, 1, 3), dtype=tf.float32)
- labels = tf.constant(np_labels, shape=(3, 1, 3), dtype=tf.float32)
+ predictions = constant_op.constant(
+ np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
+ labels = constant_op.constant(
+ np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
- error, update_op = metrics.mean_cosine_distance(
- labels, predictions, dim=2)
+ error, update_op = metrics.mean_cosine_distance(labels, predictions, dim=2)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 5)
self.assertAlmostEqual(1, error.eval(), 5)
def testSingleUpdateWithError2(self):
- np_predictions = np.matrix((
- '0.819031913261206 0.567041924552012 0.087465312324590;'
- '-0.665139432070255 -0.739487441769973 -0.103671883216994;'
- '0.707106781186548 -0.707106781186548 0'))
- np_labels = np.matrix((
- '0.819031913261206 0.567041924552012 0.087465312324590;'
- '0.665139432070255 0.739487441769973 0.103671883216994;'
- '0.707106781186548 0.707106781186548 0'))
-
- predictions = tf.constant(np_predictions, shape=(3, 1, 3), dtype=tf.float32)
- labels = tf.constant(np_labels, shape=(3, 1, 3), dtype=tf.float32)
- error, update_op = metrics.mean_cosine_distance(
- labels, predictions, dim=2)
-
- with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ np_predictions = np.matrix(
+ ('0.819031913261206 0.567041924552012 0.087465312324590;'
+ '-0.665139432070255 -0.739487441769973 -0.103671883216994;'
+ '0.707106781186548 -0.707106781186548 0'))
+ np_labels = np.matrix(
+ ('0.819031913261206 0.567041924552012 0.087465312324590;'
+ '0.665139432070255 0.739487441769973 0.103671883216994;'
+ '0.707106781186548 0.707106781186548 0'))
+
+ predictions = constant_op.constant(
+ np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
+ labels = constant_op.constant(
+ np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
+ error, update_op = metrics.mean_cosine_distance(labels, predictions, dim=2)
+
+ with self.test_session() as sess:
+ sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.0, error.eval(), 5)
def testSingleUpdateWithErrorAndWeights1(self):
- np_predictions = np.matrix(('1 0 0;'
- '0 0 -1;'
- '1 0 0'))
- np_labels = np.matrix(('1 0 0;'
- '0 0 1;'
- '0 1 0'))
+ np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
+ np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
- predictions = tf.constant(np_predictions, shape=(3, 1, 3), dtype=tf.float32)
- labels = tf.constant(np_labels, shape=(3, 1, 3), dtype=tf.float32)
- weights = tf.constant([1, 0, 0], shape=(3, 1, 1), dtype=tf.float32)
+ predictions = constant_op.constant(
+ np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
+ labels = constant_op.constant(
+ np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
+ weights = constant_op.constant(
+ [1, 0, 0], shape=(3, 1, 1), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_cosine_distance(
labels, predictions, dim=2, weights=weights)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithErrorAndWeights2(self):
- np_predictions = np.matrix(('1 0 0;'
- '0 0 -1;'
- '1 0 0'))
- np_labels = np.matrix(('1 0 0;'
- '0 0 1;'
- '0 1 0'))
+ np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
+ np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
- predictions = tf.constant(np_predictions, shape=(3, 1, 3), dtype=tf.float32)
- labels = tf.constant(np_labels, shape=(3, 1, 3), dtype=tf.float32)
- weights = tf.constant([0, 1, 1], shape=(3, 1, 1), dtype=tf.float32)
+ predictions = constant_op.constant(
+ np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
+ labels = constant_op.constant(
+ np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
+ weights = constant_op.constant(
+ [0, 1, 1], shape=(3, 1, 1), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_cosine_distance(
labels, predictions, dim=2, weights=weights)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertEqual(1.5, update_op.eval())
self.assertEqual(1.5, error.eval())
-class PcntBelowThreshTest(tf.test.TestCase):
+class PcntBelowThreshTest(test.TestCase):
def setUp(self):
- tf.reset_default_graph()
+ ops.reset_default_graph()
def testVars(self):
- metrics.percentage_below(values=tf.ones((10,)), threshold=2)
+ metrics.percentage_below(values=array_ops.ones((10,)), threshold=2)
_assert_local_variables(self, (
'percentage_below_threshold/count:0',
- 'percentage_below_threshold/total:0',
- ))
+ 'percentage_below_threshold/total:0',))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.percentage_below(
- values=tf.ones((10,)),
+ values=array_ops.ones((10,)),
threshold=2,
metrics_collections=[my_collection_name])
- self.assertListEqual(tf.get_collection(my_collection_name), [mean])
+ self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.percentage_below(
- values=tf.ones((10,)),
+ values=array_ops.ones((10,)),
threshold=2,
updates_collections=[my_collection_name])
- self.assertListEqual(tf.get_collection(my_collection_name), [update_op])
+ self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testOneUpdate(self):
with self.test_session() as sess:
- values = tf.constant([2, 4, 6, 8], shape=(1, 4), dtype=tf.float32)
+ values = constant_op.constant(
+ [2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
- pcnt0, update_op0 = metrics.percentage_below(
- values, 100, name='high')
- pcnt1, update_op1 = metrics.percentage_below(
- values, 7, name='medium')
- pcnt2, update_op2 = metrics.percentage_below(
- values, 1, name='low')
+ pcnt0, update_op0 = metrics.percentage_below(values, 100, name='high')
+ pcnt1, update_op1 = metrics.percentage_below(values, 7, name='medium')
+ pcnt2, update_op2 = metrics.percentage_below(values, 1, name='low')
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
sess.run([update_op0, update_op1, update_op2])
pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])
@@ -3011,8 +3078,10 @@ class PcntBelowThreshTest(tf.test.TestCase):
def testSomePresentOneUpdate(self):
with self.test_session() as sess:
- values = tf.constant([2, 4, 6, 8], shape=(1, 4), dtype=tf.float32)
- weights = tf.constant([1, 0, 0, 1], shape=(1, 4), dtype=tf.float32)
+ values = constant_op.constant(
+ [2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
+ weights = constant_op.constant(
+ [1, 0, 0, 1], shape=(1, 4), dtype=dtypes_lib.float32)
pcnt0, update_op0 = metrics.percentage_below(
values, 100, weights=weights, name='high')
@@ -3021,7 +3090,7 @@ class PcntBelowThreshTest(tf.test.TestCase):
pcnt2, update_op2 = metrics.percentage_below(
values, 1, weights=weights, name='low')
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertListEqual([1.0, 0.5, 0.0],
sess.run([update_op0, update_op1, update_op2]))
@@ -3031,61 +3100,61 @@ class PcntBelowThreshTest(tf.test.TestCase):
self.assertAlmostEqual(0.0, pcnt2, 5)
-class MeanIOUTest(tf.test.TestCase):
+class MeanIOUTest(test.TestCase):
def setUp(self):
np.random.seed(1)
- tf.reset_default_graph()
+ ops.reset_default_graph()
def testVars(self):
metrics.mean_iou(
- predictions=tf.ones([10, 1]), labels=tf.ones([10, 1]), num_classes=2)
+ predictions=array_ops.ones([10, 1]),
+ labels=array_ops.ones([10, 1]),
+ num_classes=2)
_assert_local_variables(self, ('mean_iou/total_confusion_matrix:0',))
def testMetricsCollections(self):
my_collection_name = '__metrics__'
mean_iou, _ = metrics.mean_iou(
- predictions=tf.ones([10, 1]),
- labels=tf.ones([10, 1]),
+ predictions=array_ops.ones([10, 1]),
+ labels=array_ops.ones([10, 1]),
num_classes=2,
metrics_collections=[my_collection_name])
- self.assertListEqual(tf.get_collection(my_collection_name), [mean_iou])
+ self.assertListEqual(ops.get_collection(my_collection_name), [mean_iou])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_iou(
- predictions=tf.ones([10, 1]),
- labels=tf.ones([10, 1]),
+ predictions=array_ops.ones([10, 1]),
+ labels=array_ops.ones([10, 1]),
num_classes=2,
updates_collections=[my_collection_name])
- self.assertListEqual(tf.get_collection(my_collection_name), [update_op])
+ self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
- predictions = tf.ones([10, 3])
- labels = tf.ones([10, 4])
+ predictions = array_ops.ones([10, 3])
+ labels = array_ops.ones([10, 4])
with self.assertRaises(ValueError):
- metrics.mean_iou(
- labels, predictions, num_classes=2)
+ metrics.mean_iou(labels, predictions, num_classes=2)
def testLabelsAndWeightsOfDifferentSizeRaisesValueError(self):
- predictions = tf.ones([10])
- labels = tf.ones([10])
- weights = tf.zeros([9])
+ predictions = array_ops.ones([10])
+ labels = array_ops.ones([10])
+ weights = array_ops.zeros([9])
with self.assertRaises(ValueError):
- metrics.mean_iou(
- labels, predictions, num_classes=2, weights=weights)
+ metrics.mean_iou(labels, predictions, num_classes=2, weights=weights)
def testValueTensorIsIdempotent(self):
num_classes = 3
- predictions = tf.random_uniform([10], maxval=num_classes,
- dtype=tf.int64, seed=1)
- labels = tf.random_uniform([10], maxval=num_classes,
- dtype=tf.int64, seed=1)
+ predictions = random_ops.random_uniform(
+ [10], maxval=num_classes, dtype=dtypes_lib.int64, seed=1)
+ labels = random_ops.random_uniform(
+ [10], maxval=num_classes, dtype=dtypes_lib.int64, seed=1)
miou, update_op = metrics.mean_iou(
labels, predictions, num_classes=num_classes)
with self.test_session() as sess:
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
@@ -3100,7 +3169,8 @@ class MeanIOUTest(tf.test.TestCase):
num_classes = 3
with self.test_session() as sess:
# Create the queue that populates the predictions.
- preds_queue = tf.FIFOQueue(5, dtypes=tf.int32, shapes=(1, 1))
+ preds_queue = data_flow_ops.FIFOQueue(
+ 5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
@@ -3109,7 +3179,8 @@ class MeanIOUTest(tf.test.TestCase):
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
- labels_queue = tf.FIFOQueue(5, dtypes=tf.int32, shapes=(1, 1))
+ labels_queue = data_flow_ops.FIFOQueue(
+ 5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
@@ -3117,20 +3188,20 @@ class MeanIOUTest(tf.test.TestCase):
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
- miou, update_op = metrics.mean_iou(
- labels, predictions, num_classes)
+ miou, update_op = metrics.mean_iou(labels, predictions, num_classes)
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
- desired_output = np.mean([1.0/2.0, 1.0/4.0, 0.])
+ desired_output = np.mean([1.0 / 2.0, 1.0 / 4.0, 0.])
self.assertEqual(desired_output, miou.eval())
def testMultipleUpdatesWithWeights(self):
num_classes = 2
with self.test_session() as sess:
# Create the queue that populates the predictions.
- preds_queue = tf.FIFOQueue(6, dtypes=tf.int32, shapes=(1, 1))
+ preds_queue = data_flow_ops.FIFOQueue(
+ 6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
@@ -3140,7 +3211,8 @@ class MeanIOUTest(tf.test.TestCase):
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
- labels_queue = tf.FIFOQueue(6, dtypes=tf.int32, shapes=(1, 1))
+ labels_queue = data_flow_ops.FIFOQueue(
+ 6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
@@ -3150,7 +3222,8 @@ class MeanIOUTest(tf.test.TestCase):
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
- weights_queue = tf.FIFOQueue(6, dtypes=tf.float32, shapes=(1, 1))
+ weights_queue = data_flow_ops.FIFOQueue(
+ 6, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
@@ -3162,10 +3235,10 @@ class MeanIOUTest(tf.test.TestCase):
miou, update_op = metrics.mean_iou(
labels, predictions, num_classes, weights=weights)
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
for _ in range(6):
sess.run(update_op)
- desired_output = np.mean([2.0/3.0, 1.0/2.0])
+ desired_output = np.mean([2.0 / 3.0, 1.0 / 2.0])
self.assertAlmostEqual(desired_output, miou.eval())
def testMultipleUpdatesWithMissingClass(self):
@@ -3176,7 +3249,8 @@ class MeanIOUTest(tf.test.TestCase):
with self.test_session() as sess:
# Create the queue that populates the predictions.
# There is no prediction for class 2.
- preds_queue = tf.FIFOQueue(5, dtypes=tf.int32, shapes=(1, 1))
+ preds_queue = data_flow_ops.FIFOQueue(
+ 5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
@@ -3186,7 +3260,8 @@ class MeanIOUTest(tf.test.TestCase):
# Create the queue that populates the labels.
# There is label for class 2.
- labels_queue = tf.FIFOQueue(5, dtypes=tf.int32, shapes=(1, 1))
+ labels_queue = data_flow_ops.FIFOQueue(
+ 5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
@@ -3194,82 +3269,90 @@ class MeanIOUTest(tf.test.TestCase):
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
- miou, update_op = metrics.mean_iou(
- labels, predictions, num_classes)
+ miou, update_op = metrics.mean_iou(labels, predictions, num_classes)
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
- desired_output = np.mean([1.0/3.0, 2.0/4.0, 0.])
+ desired_output = np.mean([1.0 / 3.0, 2.0 / 4.0, 0.])
self.assertAlmostEqual(desired_output, miou.eval())
def testUpdateOpEvalIsAccumulatedConfusionMatrix(self):
- predictions = tf.concat_v2(
- [tf.constant(
- 0, shape=[5]), tf.constant(
- 1, shape=[5])], 0)
- labels = tf.concat_v2(
- [tf.constant(
- 0, shape=[3]), tf.constant(
- 1, shape=[7])], 0)
+ predictions = array_ops.concat_v2(
+ [
+ constant_op.constant(
+ 0, shape=[5]), constant_op.constant(
+ 1, shape=[5])
+ ],
+ 0)
+ labels = array_ops.concat_v2(
+ [
+ constant_op.constant(
+ 0, shape=[3]), constant_op.constant(
+ 1, shape=[7])
+ ],
+ 0)
num_classes = 2
with self.test_session() as sess:
- miou, update_op = metrics.mean_iou(
- labels, predictions, num_classes)
- sess.run(tf.local_variables_initializer())
+ miou, update_op = metrics.mean_iou(labels, predictions, num_classes)
+ sess.run(variables.local_variables_initializer())
confusion_matrix = update_op.eval()
self.assertAllEqual([[3, 2], [0, 5]], confusion_matrix)
- desired_miou = np.mean([3./5., 5./7.])
+ desired_miou = np.mean([3. / 5., 5. / 7.])
self.assertAlmostEqual(desired_miou, miou.eval())
def testAllCorrect(self):
- predictions = tf.zeros([40])
- labels = tf.zeros([40])
+ predictions = array_ops.zeros([40])
+ labels = array_ops.zeros([40])
num_classes = 1
with self.test_session() as sess:
- miou, update_op = metrics.mean_iou(
- labels, predictions, num_classes)
- sess.run(tf.local_variables_initializer())
+ miou, update_op = metrics.mean_iou(labels, predictions, num_classes)
+ sess.run(variables.local_variables_initializer())
self.assertEqual(40, update_op.eval()[0])
self.assertEqual(1.0, miou.eval())
def testAllWrong(self):
- predictions = tf.zeros([40])
- labels = tf.ones([40])
+ predictions = array_ops.zeros([40])
+ labels = array_ops.ones([40])
num_classes = 2
with self.test_session() as sess:
- miou, update_op = metrics.mean_iou(
- labels, predictions, num_classes)
- sess.run(tf.local_variables_initializer())
+ miou, update_op = metrics.mean_iou(labels, predictions, num_classes)
+ sess.run(variables.local_variables_initializer())
self.assertAllEqual([[0, 40], [0, 0]], update_op.eval())
self.assertEqual(0., miou.eval())
def testResultsWithSomeMissing(self):
- predictions = tf.concat_v2(
- [tf.constant(
- 0, shape=[5]), tf.constant(
- 1, shape=[5])], 0)
- labels = tf.concat_v2(
- [tf.constant(
- 0, shape=[3]), tf.constant(
- 1, shape=[7])], 0)
+ predictions = array_ops.concat_v2(
+ [
+ constant_op.constant(
+ 0, shape=[5]), constant_op.constant(
+ 1, shape=[5])
+ ],
+ 0)
+ labels = array_ops.concat_v2(
+ [
+ constant_op.constant(
+ 0, shape=[3]), constant_op.constant(
+ 1, shape=[7])
+ ],
+ 0)
num_classes = 2
- weights = tf.concat_v2(
+ weights = array_ops.concat_v2(
[
- tf.constant(
- 0, shape=[1]), tf.constant(
- 1, shape=[8]), tf.constant(
+ constant_op.constant(
+ 0, shape=[1]), constant_op.constant(
+ 1, shape=[8]), constant_op.constant(
0, shape=[1])
],
0)
with self.test_session() as sess:
miou, update_op = metrics.mean_iou(
labels, predictions, num_classes, weights=weights)
- sess.run(tf.local_variables_initializer())
+ sess.run(variables.local_variables_initializer())
self.assertAllEqual([[2, 2], [0, 4]], update_op.eval())
- desired_miou = np.mean([2./4., 4./6.])
+ desired_miou = np.mean([2. / 4., 4. / 6.])
self.assertAlmostEqual(desired_miou, miou.eval())
if __name__ == '__main__':
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/morphological_ops_test.py b/tensorflow/python/kernel_tests/morphological_ops_test.py
index 9856242914..ce4d8acfbd 100644
--- a/tensorflow/python/kernel_tests/morphological_ops_test.py
+++ b/tensorflow/python/kernel_tests/morphological_ops_test.py
@@ -13,15 +13,21 @@
# limitations under the License.
# ==============================================================================
"""Functional tests for morphological filtering operations."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import nn_ops
+import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
-class DilationTest(tf.test.TestCase):
+class DilationTest(test.TestCase):
def _VerifyValues(self, image, kernel, strides, rates, padding, out, use_gpu):
"""Verifies the output values of the dilation function.
@@ -39,9 +45,9 @@ class DilationTest(tf.test.TestCase):
rates = [1] + rates + [1]
with self.test_session(use_gpu=use_gpu):
- out_tensor = tf.nn.dilation2d(
- tf.constant(image),
- tf.constant(kernel),
+ out_tensor = nn_ops.dilation2d(
+ constant_op.constant(image),
+ constant_op.constant(kernel),
strides=strides,
rates=rates,
padding=padding,
@@ -55,13 +61,14 @@ class DilationTest(tf.test.TestCase):
kernel = [[[.4], [.3]], [[.1], [.0]]]
# [1, 1, 1, 1]
out = [[[[.5]]]]
- self._VerifyValues(image,
- kernel,
- strides=[1, 1],
- rates=[1, 1],
- padding="VALID",
- out=out,
- use_gpu=use_gpu)
+ self._VerifyValues(
+ image,
+ kernel,
+ strides=[1, 1],
+ rates=[1, 1],
+ padding="VALID",
+ out=out,
+ use_gpu=use_gpu)
def _testDilationSamePadding(self, use_gpu):
# [1, 2, 2, 1]
@@ -70,13 +77,14 @@ class DilationTest(tf.test.TestCase):
kernel = [[[.4], [.3]], [[.1], [.0]]]
# [1, 2, 2, 1]
out = [[[[.5], [.6]], [[.7], [.8]]]]
- self._VerifyValues(image,
- kernel,
- strides=[1, 1],
- rates=[1, 1],
- padding="SAME",
- out=out,
- use_gpu=use_gpu)
+ self._VerifyValues(
+ image,
+ kernel,
+ strides=[1, 1],
+ rates=[1, 1],
+ padding="SAME",
+ out=out,
+ use_gpu=use_gpu)
def _testDilationSamePaddingDepth(self, use_gpu):
# [1, 2, 2, 3]
@@ -85,13 +93,14 @@ class DilationTest(tf.test.TestCase):
kernel = [[[.4, .5, .3], [.3, .4, .2]], [[.1, .2, .0], [.0, .1, -.1]]]
# [1, 2, 2, 3]
out = [[[[.5, .7, .3], [.6, .8, .4]], [[.7, .9, .5], [.8, 1., .6]]]]
- self._VerifyValues(image,
- kernel,
- strides=[1, 1],
- rates=[1, 1],
- padding="SAME",
- out=out,
- use_gpu=use_gpu)
+ self._VerifyValues(
+ image,
+ kernel,
+ strides=[1, 1],
+ rates=[1, 1],
+ padding="SAME",
+ out=out,
+ use_gpu=use_gpu)
def _testDilationSamePaddingBatch(self, use_gpu):
# [2, 2, 2, 1]
@@ -100,13 +109,14 @@ class DilationTest(tf.test.TestCase):
kernel = [[[.4], [.3]], [[.1], [.0]]]
# [2, 2, 2, 1]
out = [[[[.5], [.6]], [[.7], [.8]]], [[[.6], [.7]], [[.8], [.9]]]]
- self._VerifyValues(image,
- kernel,
- strides=[1, 1],
- rates=[1, 1],
- padding="SAME",
- out=out,
- use_gpu=use_gpu)
+ self._VerifyValues(
+ image,
+ kernel,
+ strides=[1, 1],
+ rates=[1, 1],
+ padding="SAME",
+ out=out,
+ use_gpu=use_gpu)
def _testDilationValidPaddingNonSquareWindow(self, use_gpu):
# [1, 2, 2, 1]
@@ -115,13 +125,14 @@ class DilationTest(tf.test.TestCase):
kernel = [[[.4], [.3]]]
# [1, 2, 1, 1]
out = [[[[.5]], [[.7]]]]
- self._VerifyValues(image,
- kernel,
- strides=[1, 1],
- rates=[1, 1],
- padding="VALID",
- out=out,
- use_gpu=use_gpu)
+ self._VerifyValues(
+ image,
+ kernel,
+ strides=[1, 1],
+ rates=[1, 1],
+ padding="VALID",
+ out=out,
+ use_gpu=use_gpu)
def _testDilationSamePaddingRate(self, use_gpu):
# [1, 3, 3, 1]
@@ -134,13 +145,14 @@ class DilationTest(tf.test.TestCase):
# [[.1], [.0], [.2]]]
# [1, 3, 3, 1]
out = [[[[.7], [.8], [.6]], [[1.0], [1.1], [.9]], [[.8], [.9], [.9]]]]
- self._VerifyValues(image,
- kernel,
- strides=[1, 1],
- rates=[2, 2],
- padding="SAME",
- out=out,
- use_gpu=use_gpu)
+ self._VerifyValues(
+ image,
+ kernel,
+ strides=[1, 1],
+ rates=[2, 2],
+ padding="SAME",
+ out=out,
+ use_gpu=use_gpu)
def _testDilationValidPaddingUnevenStride(self, use_gpu):
# [1, 3, 3, 1]
@@ -150,13 +162,14 @@ class DilationTest(tf.test.TestCase):
kernel = [[[.4], [.3]], [[.1], [.2]]]
# [1, 2, 2, 1]
out = [[[[.8], [1.0]], [[1.2], [1.4]]]]
- self._VerifyValues(image,
- kernel,
- strides=[1, 2],
- rates=[1, 1],
- padding="VALID",
- out=out,
- use_gpu=use_gpu)
+ self._VerifyValues(
+ image,
+ kernel,
+ strides=[1, 2],
+ rates=[1, 1],
+ padding="VALID",
+ out=out,
+ use_gpu=use_gpu)
def testDilation(self):
for use_gpu in True, False:
@@ -192,81 +205,91 @@ class DilationTest(tf.test.TestCase):
rates = [1] + rates + [1]
with self.test_session(use_gpu=use_gpu):
- image_tensor = tf.constant(image, shape=image_shape, name="input")
- kernel_tensor = tf.constant(kernel, shape=kernel_shape, name="filter")
- out_tensor = tf.nn.dilation2d(image_tensor,
- kernel_tensor,
- strides=strides,
- rates=rates,
- padding=padding,
- name="dilation2d")
+ image_tensor = constant_op.constant(
+ image, shape=image_shape, name="input")
+ kernel_tensor = constant_op.constant(
+ kernel, shape=kernel_shape, name="filter")
+ out_tensor = nn_ops.dilation2d(
+ image_tensor,
+ kernel_tensor,
+ strides=strides,
+ rates=rates,
+ padding=padding,
+ name="dilation2d")
out_shape = out_tensor.eval().shape
# Small delta is necessary for argmax to remain the same.
- err = tf.test.compute_gradient_error([image_tensor, kernel_tensor],
- [image_shape, kernel_shape],
- out_tensor,
- out_shape, [image_init, kernel_init],
- delta=1e-3)
+ err = gradient_checker.compute_gradient_error(
+ [image_tensor, kernel_tensor], [image_shape, kernel_shape],
+ out_tensor,
+ out_shape, [image_init, kernel_init],
+ delta=1e-3)
print("Dilation gradient error = %f" % err)
self.assertLess(err, 1e-4)
def _testDilationGradValidPadding_1x1x1(self, use_gpu):
- self._ConstructAndTestGradient(image_shape=[1, 3, 3, 1],
- kernel_shape=[1, 1, 1],
- strides=[1, 1],
- rates=[1, 1],
- padding="VALID",
- use_gpu=use_gpu)
+ self._ConstructAndTestGradient(
+ image_shape=[1, 3, 3, 1],
+ kernel_shape=[1, 1, 1],
+ strides=[1, 1],
+ rates=[1, 1],
+ padding="VALID",
+ use_gpu=use_gpu)
def _testDilationGradSamePadding_1x1x1(self, use_gpu):
- self._ConstructAndTestGradient(image_shape=[1, 3, 3, 1],
- kernel_shape=[1, 1, 1],
- strides=[1, 1],
- rates=[1, 1],
- padding="SAME",
- use_gpu=use_gpu)
+ self._ConstructAndTestGradient(
+ image_shape=[1, 3, 3, 1],
+ kernel_shape=[1, 1, 1],
+ strides=[1, 1],
+ rates=[1, 1],
+ padding="SAME",
+ use_gpu=use_gpu)
def _testDilationGradSamePadding_1x1x2(self, use_gpu):
- self._ConstructAndTestGradient(image_shape=[1, 3, 3, 2],
- kernel_shape=[1, 1, 2],
- strides=[1, 1],
- rates=[1, 1],
- padding="SAME",
- use_gpu=use_gpu)
+ self._ConstructAndTestGradient(
+ image_shape=[1, 3, 3, 2],
+ kernel_shape=[1, 1, 2],
+ strides=[1, 1],
+ rates=[1, 1],
+ padding="SAME",
+ use_gpu=use_gpu)
def _testDilationGradValidPadding_2x2x1(self, use_gpu):
- self._ConstructAndTestGradient(image_shape=[1, 3, 3, 1],
- kernel_shape=[2, 2, 1],
- strides=[1, 1],
- rates=[1, 1],
- padding="VALID",
- use_gpu=use_gpu)
+ self._ConstructAndTestGradient(
+ image_shape=[1, 3, 3, 1],
+ kernel_shape=[2, 2, 1],
+ strides=[1, 1],
+ rates=[1, 1],
+ padding="VALID",
+ use_gpu=use_gpu)
def _testDilationGradSamePadding_2x2x1(self, use_gpu):
- self._ConstructAndTestGradient(image_shape=[1, 3, 3, 1],
- kernel_shape=[2, 2, 1],
- strides=[1, 1],
- rates=[1, 1],
- padding="SAME",
- use_gpu=use_gpu)
+ self._ConstructAndTestGradient(
+ image_shape=[1, 3, 3, 1],
+ kernel_shape=[2, 2, 1],
+ strides=[1, 1],
+ rates=[1, 1],
+ padding="SAME",
+ use_gpu=use_gpu)
def _testDilationGradSamePaddingBatch_2x2x1(self, use_gpu):
- self._ConstructAndTestGradient(image_shape=[4, 3, 3, 1],
- kernel_shape=[2, 2, 1],
- strides=[1, 1],
- rates=[1, 1],
- padding="SAME",
- use_gpu=use_gpu)
+ self._ConstructAndTestGradient(
+ image_shape=[4, 3, 3, 1],
+ kernel_shape=[2, 2, 1],
+ strides=[1, 1],
+ rates=[1, 1],
+ padding="SAME",
+ use_gpu=use_gpu)
def _testDilationGradSamePadding_2x2x4(self, use_gpu):
- self._ConstructAndTestGradient(image_shape=[1, 3, 3, 4],
- kernel_shape=[2, 2, 4],
- strides=[1, 1],
- rates=[1, 1],
- padding="SAME",
- use_gpu=use_gpu)
+ self._ConstructAndTestGradient(
+ image_shape=[1, 3, 3, 4],
+ kernel_shape=[2, 2, 4],
+ strides=[1, 1],
+ rates=[1, 1],
+ padding="SAME",
+ use_gpu=use_gpu)
def testDilationGrad(self):
for use_gpu in True, False:
@@ -279,7 +302,7 @@ class DilationTest(tf.test.TestCase):
self._testDilationGradSamePadding_2x2x4(use_gpu)
-class ErosionTest(tf.test.TestCase):
+class ErosionTest(test.TestCase):
def _VerifyValues(self, image, kernel, strides, rates, padding, out, use_gpu):
"""Verifies the output values of the erosion function.
@@ -297,9 +320,9 @@ class ErosionTest(tf.test.TestCase):
rates = [1] + rates + [1]
with self.test_session(use_gpu=use_gpu):
- out_tensor = tf.nn.erosion2d(
- tf.constant(image),
- tf.constant(kernel),
+ out_tensor = nn_ops.erosion2d(
+ constant_op.constant(image),
+ constant_op.constant(kernel),
strides=strides,
rates=rates,
padding=padding,
@@ -313,13 +336,14 @@ class ErosionTest(tf.test.TestCase):
kernel = [[[.4], [.3]], [[.1], [.0]]]
# [1, 1, 1, 1]
out = [[[[.0]]]]
- self._VerifyValues(image,
- kernel,
- strides=[1, 1],
- rates=[1, 1],
- padding="VALID",
- out=out,
- use_gpu=use_gpu)
+ self._VerifyValues(
+ image,
+ kernel,
+ strides=[1, 1],
+ rates=[1, 1],
+ padding="VALID",
+ out=out,
+ use_gpu=use_gpu)
def _testErosionSamePadding(self, use_gpu):
# [1, 2, 2, 1]
@@ -328,13 +352,14 @@ class ErosionTest(tf.test.TestCase):
kernel = [[[.4], [.3]], [[.1], [.0]]]
# [1, 2, 2, 1]
out = [[[[.0], [.1]], [[.3], [.4]]]]
- self._VerifyValues(image,
- kernel,
- strides=[1, 1],
- rates=[1, 1],
- padding="SAME",
- out=out,
- use_gpu=use_gpu)
+ self._VerifyValues(
+ image,
+ kernel,
+ strides=[1, 1],
+ rates=[1, 1],
+ padding="SAME",
+ out=out,
+ use_gpu=use_gpu)
def _testErosionSamePaddingDepth(self, use_gpu):
# [1, 2, 2, 3]
@@ -343,13 +368,14 @@ class ErosionTest(tf.test.TestCase):
kernel = [[[.4, .5, .3], [.3, .4, .2]], [[.1, .2, .0], [.0, .1, -.1]]]
# [1, 2, 2, 3]
out = [[[[.0, .0, .0], [.1, .1, .1]], [[.3, .3, .3], [.4, .4, .4]]]]
- self._VerifyValues(image,
- kernel,
- strides=[1, 1],
- rates=[1, 1],
- padding="SAME",
- out=out,
- use_gpu=use_gpu)
+ self._VerifyValues(
+ image,
+ kernel,
+ strides=[1, 1],
+ rates=[1, 1],
+ padding="SAME",
+ out=out,
+ use_gpu=use_gpu)
def _testErosionSamePaddingBatch(self, use_gpu):
# [2, 2, 2, 1]
@@ -358,13 +384,14 @@ class ErosionTest(tf.test.TestCase):
kernel = [[[.4], [.3]], [[.1], [.0]]]
# [2, 2, 2, 1]
out = [[[[.0], [.1]], [[.3], [.4]]], [[[.1], [.2]], [[.4], [.5]]]]
- self._VerifyValues(image,
- kernel,
- strides=[1, 1],
- rates=[1, 1],
- padding="SAME",
- out=out,
- use_gpu=use_gpu)
+ self._VerifyValues(
+ image,
+ kernel,
+ strides=[1, 1],
+ rates=[1, 1],
+ padding="SAME",
+ out=out,
+ use_gpu=use_gpu)
def _testErosionValidPaddingNonSquareWindow(self, use_gpu):
# [1, 2, 2, 1]
@@ -373,13 +400,14 @@ class ErosionTest(tf.test.TestCase):
kernel = [[[.4], [.3]]]
# [1, 2, 1, 1]
out = [[[[-.2]], [[.0]]]]
- self._VerifyValues(image,
- kernel,
- strides=[1, 1],
- rates=[1, 1],
- padding="VALID",
- out=out,
- use_gpu=use_gpu)
+ self._VerifyValues(
+ image,
+ kernel,
+ strides=[1, 1],
+ rates=[1, 1],
+ padding="VALID",
+ out=out,
+ use_gpu=use_gpu)
def _testErosionSamePaddingRate(self, use_gpu):
# [1, 3, 3, 1]
@@ -392,13 +420,14 @@ class ErosionTest(tf.test.TestCase):
# [[.1], [.0], [.2]]]
# [1, 3, 3, 1]
out = [[[[.1], [.1], [.2]], [[0.1], [-.1], [.0]], [[.4], [.2], [.3]]]]
- self._VerifyValues(image,
- kernel,
- strides=[1, 1],
- rates=[2, 2],
- padding="SAME",
- out=out,
- use_gpu=use_gpu)
+ self._VerifyValues(
+ image,
+ kernel,
+ strides=[1, 1],
+ rates=[2, 2],
+ padding="SAME",
+ out=out,
+ use_gpu=use_gpu)
def _testErosionValidPaddingUnevenStride(self, use_gpu):
# [1, 3, 3, 1]
@@ -408,13 +437,14 @@ class ErosionTest(tf.test.TestCase):
kernel = [[[.4], [.3]], [[.1], [.2]]]
# [1, 2, 2, 1]
out = [[[[-.1], [.1]], [[.3], [.5]]]]
- self._VerifyValues(image,
- kernel,
- strides=[1, 2],
- rates=[1, 1],
- padding="VALID",
- out=out,
- use_gpu=use_gpu)
+ self._VerifyValues(
+ image,
+ kernel,
+ strides=[1, 2],
+ rates=[1, 1],
+ padding="VALID",
+ out=out,
+ use_gpu=use_gpu)
def testErosion(self):
for use_gpu in True, False:
@@ -450,81 +480,91 @@ class ErosionTest(tf.test.TestCase):
rates = [1] + rates + [1]
with self.test_session(use_gpu=use_gpu):
- image_tensor = tf.constant(image, shape=image_shape, name="input")
- kernel_tensor = tf.constant(kernel, shape=kernel_shape, name="filter")
- out_tensor = tf.nn.erosion2d(image_tensor,
- kernel_tensor,
- strides=strides,
- rates=rates,
- padding=padding,
- name="erosion2d")
+ image_tensor = constant_op.constant(
+ image, shape=image_shape, name="input")
+ kernel_tensor = constant_op.constant(
+ kernel, shape=kernel_shape, name="filter")
+ out_tensor = nn_ops.erosion2d(
+ image_tensor,
+ kernel_tensor,
+ strides=strides,
+ rates=rates,
+ padding=padding,
+ name="erosion2d")
out_shape = out_tensor.eval().shape
# Small delta is necessary for argmax to remain the same.
- err = tf.test.compute_gradient_error([image_tensor, kernel_tensor],
- [image_shape, kernel_shape],
- out_tensor,
- out_shape, [image_init, kernel_init],
- delta=1e-3)
+ err = gradient_checker.compute_gradient_error(
+ [image_tensor, kernel_tensor], [image_shape, kernel_shape],
+ out_tensor,
+ out_shape, [image_init, kernel_init],
+ delta=1e-3)
print("Erosion gradient error = %f" % err)
self.assertLess(err, 1e-4)
def _testErosionGradValidPadding_1x1x1(self, use_gpu):
- self._ConstructAndTestGradient(image_shape=[1, 3, 3, 1],
- kernel_shape=[1, 1, 1],
- strides=[1, 1],
- rates=[1, 1],
- padding="VALID",
- use_gpu=use_gpu)
+ self._ConstructAndTestGradient(
+ image_shape=[1, 3, 3, 1],
+ kernel_shape=[1, 1, 1],
+ strides=[1, 1],
+ rates=[1, 1],
+ padding="VALID",
+ use_gpu=use_gpu)
def _testErosionGradSamePadding_1x1x1(self, use_gpu):
- self._ConstructAndTestGradient(image_shape=[1, 3, 3, 1],
- kernel_shape=[1, 1, 1],
- strides=[1, 1],
- rates=[1, 1],
- padding="SAME",
- use_gpu=use_gpu)
+ self._ConstructAndTestGradient(
+ image_shape=[1, 3, 3, 1],
+ kernel_shape=[1, 1, 1],
+ strides=[1, 1],
+ rates=[1, 1],
+ padding="SAME",
+ use_gpu=use_gpu)
def _testErosionGradSamePadding_1x1x2(self, use_gpu):
- self._ConstructAndTestGradient(image_shape=[1, 3, 3, 2],
- kernel_shape=[1, 1, 2],
- strides=[1, 1],
- rates=[1, 1],
- padding="SAME",
- use_gpu=use_gpu)
+ self._ConstructAndTestGradient(
+ image_shape=[1, 3, 3, 2],
+ kernel_shape=[1, 1, 2],
+ strides=[1, 1],
+ rates=[1, 1],
+ padding="SAME",
+ use_gpu=use_gpu)
def _testErosionGradValidPadding_2x2x1(self, use_gpu):
- self._ConstructAndTestGradient(image_shape=[1, 3, 3, 1],
- kernel_shape=[2, 2, 1],
- strides=[1, 1],
- rates=[1, 1],
- padding="VALID",
- use_gpu=use_gpu)
+ self._ConstructAndTestGradient(
+ image_shape=[1, 3, 3, 1],
+ kernel_shape=[2, 2, 1],
+ strides=[1, 1],
+ rates=[1, 1],
+ padding="VALID",
+ use_gpu=use_gpu)
def _testErosionGradSamePadding_2x2x1(self, use_gpu):
- self._ConstructAndTestGradient(image_shape=[1, 3, 3, 1],
- kernel_shape=[2, 2, 1],
- strides=[1, 1],
- rates=[1, 1],
- padding="SAME",
- use_gpu=use_gpu)
+ self._ConstructAndTestGradient(
+ image_shape=[1, 3, 3, 1],
+ kernel_shape=[2, 2, 1],
+ strides=[1, 1],
+ rates=[1, 1],
+ padding="SAME",
+ use_gpu=use_gpu)
def _testErosionGradSamePaddingBatch_2x2x1(self, use_gpu):
- self._ConstructAndTestGradient(image_shape=[4, 3, 3, 1],
- kernel_shape=[2, 2, 1],
- strides=[1, 1],
- rates=[1, 1],
- padding="SAME",
- use_gpu=use_gpu)
+ self._ConstructAndTestGradient(
+ image_shape=[4, 3, 3, 1],
+ kernel_shape=[2, 2, 1],
+ strides=[1, 1],
+ rates=[1, 1],
+ padding="SAME",
+ use_gpu=use_gpu)
def _testErosionGradSamePadding_2x2x4(self, use_gpu):
- self._ConstructAndTestGradient(image_shape=[1, 3, 3, 4],
- kernel_shape=[2, 2, 4],
- strides=[1, 1],
- rates=[1, 1],
- padding="SAME",
- use_gpu=use_gpu)
+ self._ConstructAndTestGradient(
+ image_shape=[1, 3, 3, 4],
+ kernel_shape=[2, 2, 4],
+ strides=[1, 1],
+ rates=[1, 1],
+ padding="SAME",
+ use_gpu=use_gpu)
def testErosionGrad(self):
for use_gpu in True, False:
@@ -538,4 +578,4 @@ class ErosionTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/multinomial_op_test.py b/tensorflow/python/kernel_tests/multinomial_op_test.py
index 1ad5345ad1..ab082a1efd 100644
--- a/tensorflow/python/kernel_tests/multinomial_op_test.py
+++ b/tensorflow/python/kernel_tests/multinomial_op_test.py
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for Multinomial."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
@@ -22,37 +22,44 @@ import collections
import timeit
import numpy as np
-import tensorflow as tf
+from tensorflow.core.protobuf import config_pb2
+from tensorflow.python.client import session
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import control_flow_ops
+from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
+from tensorflow.python.platform import test
def composed_sampler(logits, num_samples):
# [batch size, num classes, num samples]
- unif = tf.random_uniform(
- logits.get_shape().concatenate(tensor_shape.TensorShape([num_samples])))
- noise = -tf.log(-tf.log(unif))
+ unif = random_ops.random_uniform(logits.get_shape().concatenate(
+ tensor_shape.TensorShape([num_samples])))
+ noise = -math_ops.log(-math_ops.log(unif))
# [batch size, num classes, 1]
- logits = tf.expand_dims(logits, -1)
+ logits = array_ops.expand_dims(logits, -1)
# [batch size, num samples]
- return tf.argmax(logits + noise, dimension=1)
+ return math_ops.argmax(logits + noise, dimension=1)
native_sampler = random_ops.multinomial
-class MultinomialTest(tf.test.TestCase):
+class MultinomialTest(test.TestCase):
use_gpu = False
def testSmallEntropy(self):
- tf.set_random_seed(1618)
+ random_seed.set_random_seed(1618)
with self.test_session(use_gpu=self.use_gpu):
# A logit value of -10 corresponds to a probability of ~5e-5.
- logits = tf.constant([[-10., 10., -10.], [-10., -10., 10.]])
+ logits = constant_op.constant([[-10., 10., -10.], [-10., -10., 10.]])
num_samples = 1000
- samples = tf.multinomial(logits, num_samples).eval()
+ samples = random_ops.multinomial(logits, num_samples).eval()
self.assertAllEqual([[1] * num_samples, [2] * num_samples], samples)
def testOneOpMultipleStepsIndependent(self):
@@ -83,7 +90,7 @@ class MultinomialTest(tf.test.TestCase):
logits = np.array([[1000.] * 5])
if neg:
logits *= -1
- samples = tf.multinomial(logits, 10).eval()
+ samples = random_ops.multinomial(logits, 10).eval()
# Sampled classes should be in-range.
self.assertTrue((samples >= 0).all())
self.assertTrue((samples < 5).all())
@@ -117,8 +124,8 @@ class MultinomialTest(tf.test.TestCase):
check(composed_native_chi2)
def _make_ops(self, num_samples, seed=None):
- prob_dist = tf.constant([[0.15, 0.5, 0.3, 0.05]])
- logits = tf.log(prob_dist)
+ prob_dist = constant_op.constant([[0.15, 0.5, 0.3, 0.05]])
+ logits = math_ops.log(prob_dist)
# Two independent sets of samples from the same distribution
sample_op1 = random_ops.multinomial(logits, num_samples, seed)
sample_op2 = random_ops.multinomial(logits, num_samples, seed)
@@ -141,8 +148,8 @@ class MultinomialTest(tf.test.TestCase):
Frequencies from sampled classes; shape [batch_size, num_classes].
"""
with self.test_session(use_gpu=self.use_gpu) as sess:
- tf.set_random_seed(1618)
- op = sampler(tf.constant(logits), num_samples)
+ random_seed.set_random_seed(1618)
+ op = sampler(constant_op.constant(logits), num_samples)
d = sess.run(op)
batch_size, num_classes = logits.shape
@@ -172,21 +179,22 @@ class MultinomialTest(tf.test.TestCase):
with self.test_session(use_gpu=self.use_gpu):
for batch in 0, 3:
for samples in 0, 7:
- x = tf.multinomial(tf.zeros([batch, classes]), samples).eval()
+ x = random_ops.multinomial(
+ array_ops.zeros([batch, classes]), samples).eval()
self.assertEqual(x.shape, (batch, samples))
def testEmptyClasses(self):
with self.test_session(use_gpu=self.use_gpu):
- x = tf.multinomial(tf.zeros([5, 0]), 7)
+ x = random_ops.multinomial(array_ops.zeros([5, 0]), 7)
with self.assertRaisesOpError("num_classes should be positive"):
x.eval()
def testNegativeMinLogits(self):
- tf.set_random_seed(78844)
+ random_seed.set_random_seed(78844)
with self.test_session(use_gpu=self.use_gpu):
- logits = tf.constant([[np.finfo(np.float32).min] * 1023 + [0]])
+ logits = constant_op.constant([[np.finfo(np.float32).min] * 1023 + [0]])
num_samples = 1000
- samples = tf.multinomial(logits, num_samples).eval()
+ samples = random_ops.multinomial(logits, num_samples).eval()
self.assertAllEqual([[1023] * num_samples], samples)
@@ -201,21 +209,22 @@ def native_op_vs_composed_ops(batch_size, num_classes, num_samples, num_iters):
logits_np = np.random.randn(*shape).astype(np.float32)
# No CSE/CF.
- optimizer_options = tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L0)
- config = tf.ConfigProto(
- graph_options=tf.GraphOptions(optimizer_options=optimizer_options))
+ optimizer_options = config_pb2.OptimizerOptions(
+ opt_level=config_pb2.OptimizerOptions.L0)
+ config = config_pb2.ConfigProto(graph_options=config_pb2.GraphOptions(
+ optimizer_options=optimizer_options))
- with tf.Session(config=config) as sess:
- logits = tf.constant(logits_np, shape=shape)
- native_op = tf.group(native_sampler(logits, num_samples))
- composed_op = tf.group(composed_sampler(logits, num_samples))
+ with session.Session(config=config) as sess:
+ logits = constant_op.constant(logits_np, shape=shape)
+ native_op = control_flow_ops.group(native_sampler(logits, num_samples))
+ composed_op = control_flow_ops.group(composed_sampler(logits, num_samples))
native_dt = timeit.timeit(lambda: sess.run(native_op), number=num_iters)
composed_dt = timeit.timeit(lambda: sess.run(composed_op), number=num_iters)
return native_dt, composed_dt
-class MultinomialBenchmark(tf.test.Benchmark):
+class MultinomialBenchmark(test.Benchmark):
def benchmarkNativeOpVsComposedOps(self):
num_iters = 50
@@ -230,16 +239,20 @@ class MultinomialBenchmark(tf.test.Benchmark):
n_dt, c_dt = native_op_vs_composed_ops(batch_size, num_classes,
num_samples, num_iters)
print("%d\t%d\t%d\t%.3f\t%.3f\t%.2f" % (batch_size, num_classes,
- num_samples, c_dt, n_dt, c_dt
- / n_dt))
+ num_samples, c_dt, n_dt,
+ c_dt / n_dt))
- self.report_benchmark(name="native_batch%d_classes%d_s%d" %
- (batch_size, num_classes, num_samples),
- iters=num_iters, wall_time=n_dt)
- self.report_benchmark(name="composed_batch%d_classes%d_s%d" %
- (batch_size, num_classes, num_samples),
- iters=num_iters, wall_time=c_dt)
+ self.report_benchmark(
+ name="native_batch%d_classes%d_s%d" %
+ (batch_size, num_classes, num_samples),
+ iters=num_iters,
+ wall_time=n_dt)
+ self.report_benchmark(
+ name="composed_batch%d_classes%d_s%d" %
+ (batch_size, num_classes, num_samples),
+ iters=num_iters,
+ wall_time=c_dt)
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/numerics_test.py b/tensorflow/python/kernel_tests/numerics_test.py
index 6e0799363b..2bbb5595f4 100644
--- a/tensorflow/python/kernel_tests/numerics_test.py
+++ b/tensorflow/python/kernel_tests/numerics_test.py
@@ -12,26 +12,33 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.ops.numerics."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import numerics
+from tensorflow.python.platform import test
-class VerifyTensorAllFiniteTest(tf.test.TestCase):
+class VerifyTensorAllFiniteTest(test.TestCase):
def testVerifyTensorAllFiniteSucceeds(self):
x_shape = [5, 4]
x = np.random.random_sample(x_shape).astype(np.float32)
with self.test_session(use_gpu=True):
- t = tf.constant(x, shape=x_shape, dtype=tf.float32)
- t_verified = tf.verify_tensor_all_finite(t, "Input is not a number.")
+ t = constant_op.constant(x, shape=x_shape, dtype=dtypes.float32)
+ t_verified = numerics.verify_tensor_all_finite(t,
+ "Input is not a number.")
self.assertAllClose(x, t_verified.eval())
def testVerifyTensorAllFiniteFails(self):
@@ -43,59 +50,59 @@ class VerifyTensorAllFiniteTest(tf.test.TestCase):
x[0] = np.nan
with self.test_session(use_gpu=True):
with self.assertRaisesOpError(my_msg):
- t = tf.constant(x, shape=x_shape, dtype=tf.float32)
- t_verified = tf.verify_tensor_all_finite(t, my_msg)
+ t = constant_op.constant(x, shape=x_shape, dtype=dtypes.float32)
+ t_verified = numerics.verify_tensor_all_finite(t, my_msg)
t_verified.eval()
# Test Inf.
x[0] = np.inf
with self.test_session(use_gpu=True):
with self.assertRaisesOpError(my_msg):
- t = tf.constant(x, shape=x_shape, dtype=tf.float32)
- t_verified = tf.verify_tensor_all_finite(t, my_msg)
+ t = constant_op.constant(x, shape=x_shape, dtype=dtypes.float32)
+ t_verified = numerics.verify_tensor_all_finite(t, my_msg)
t_verified.eval()
-class NumericsTest(tf.test.TestCase):
+class NumericsTest(test.TestCase):
def testInf(self):
- with self.test_session(graph=tf.Graph()):
- t1 = tf.constant(1.0)
- t2 = tf.constant(0.0)
- a = tf.div(t1, t2)
- check = tf.add_check_numerics_ops()
+ with self.test_session(graph=ops.Graph()):
+ t1 = constant_op.constant(1.0)
+ t2 = constant_op.constant(0.0)
+ a = math_ops.div(t1, t2)
+ check = numerics.add_check_numerics_ops()
a = control_flow_ops.with_dependencies([check], a)
with self.assertRaisesOpError("Inf"):
a.eval()
def testNaN(self):
- with self.test_session(graph=tf.Graph()):
- t1 = tf.constant(0.0)
- t2 = tf.constant(0.0)
- a = tf.div(t1, t2)
- check = tf.add_check_numerics_ops()
+ with self.test_session(graph=ops.Graph()):
+ t1 = constant_op.constant(0.0)
+ t2 = constant_op.constant(0.0)
+ a = math_ops.div(t1, t2)
+ check = numerics.add_check_numerics_ops()
a = control_flow_ops.with_dependencies([check], a)
with self.assertRaisesOpError("NaN"):
a.eval()
def testBoth(self):
- with self.test_session(graph=tf.Graph()):
- t1 = tf.constant([1.0, 0.0])
- t2 = tf.constant([0.0, 0.0])
- a = tf.div(t1, t2)
- check = tf.add_check_numerics_ops()
+ with self.test_session(graph=ops.Graph()):
+ t1 = constant_op.constant([1.0, 0.0])
+ t2 = constant_op.constant([0.0, 0.0])
+ a = math_ops.div(t1, t2)
+ check = numerics.add_check_numerics_ops()
a = control_flow_ops.with_dependencies([check], a)
with self.assertRaisesOpError("Inf and NaN"):
a.eval()
def testPassThrough(self):
- with self.test_session(graph=tf.Graph()):
- t1 = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3])
- checked = tf.check_numerics(t1, message="pass through test")
+ with self.test_session(graph=ops.Graph()):
+ t1 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3])
+ checked = array_ops.check_numerics(t1, message="pass through test")
value = checked.eval()
self.assertAllEqual(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), value)
self.assertEqual([2, 3], checked.get_shape())
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/one_hot_op_test.py b/tensorflow/python/kernel_tests/one_hot_op_test.py
index 9a9dbfe8c9..b449a195a7 100644
--- a/tensorflow/python/kernel_tests/one_hot_op_test.py
+++ b/tensorflow/python/kernel_tests/one_hot_op_test.py
@@ -12,26 +12,34 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.ops.one_hot_op."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import array_ops
+from tensorflow.python.platform import test
-class OneHotTest(tf.test.TestCase):
+class OneHotTest(test.TestCase):
- def _testOneHot(self, truth, use_gpu=False, expected_err_re=None,
- raises=None, **inputs):
+ def _testOneHot(self,
+ truth,
+ use_gpu=False,
+ expected_err_re=None,
+ raises=None,
+ **inputs):
with self.test_session(use_gpu=use_gpu):
if raises is not None:
with self.assertRaises(raises):
- tf.one_hot(**inputs)
+ array_ops.one_hot(**inputs)
else:
- ans = tf.one_hot(**inputs)
+ ans = array_ops.one_hot(**inputs)
if expected_err_re is None:
tf_ans = ans.eval()
self.assertAllEqual(tf_ans, truth)
@@ -51,9 +59,7 @@ class OneHotTest(tf.test.TestCase):
off_value = np.asarray(-1.0, dtype=dtype)
truth = np.asarray(
- [[1.0, -1.0, -1.0],
- [-1.0, -1.0, 1.0],
- [-1.0, -1.0, -1.0],
+ [[1.0, -1.0, -1.0], [-1.0, -1.0, 1.0], [-1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0]],
dtype=dtype)
@@ -81,23 +87,15 @@ class OneHotTest(tf.test.TestCase):
depth = 3
truth = np.asarray(
- [[1.0, 0.0, 0.0],
- [0.0, 0.0, 1.0],
- [0.0, 0.0, 0.0],
- [0.0, 1.0, 0.0]],
- dtype=dtype)
+ [[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
+ dtype=dtype)
# axis == -1
- self._testBothOneHot(
- indices=indices,
- depth=depth,
- truth=truth)
+ self._testBothOneHot(indices=indices, depth=depth, truth=truth)
# axis == 0
self._testBothOneHot(
- indices=indices,
- depth=depth,
- axis=0,
+ indices=indices, depth=depth, axis=0,
truth=truth.T) # Output is transpose version in this case
def testFloatBasic(self):
@@ -125,22 +123,15 @@ class OneHotTest(tf.test.TestCase):
self._testDefaultBasic(np.complex128)
def _testBatch(self, dtype):
- indices = np.asarray([[0, 2, -1, 1],
- [1, 0, 1, -1]],
- dtype=np.int64)
+ indices = np.asarray([[0, 2, -1, 1], [1, 0, 1, -1]], dtype=np.int64)
depth = 3
on_value = np.asarray(1.0, dtype=dtype)
off_value = np.asarray(-1.0, dtype=dtype)
truth = np.asarray(
- [[[1.0, -1.0, -1.0],
- [-1.0, -1.0, 1.0],
- [-1.0, -1.0, -1.0],
- [-1.0, 1.0, -1.0]],
- [[-1.0, 1.0, -1.0],
- [1.0, -1.0, -1.0],
- [-1.0, 1.0, -1.0],
- [-1.0, -1.0, -1.0]]],
+ [[[1.0, -1.0, -1.0], [-1.0, -1.0, 1.0], [-1.0, -1.0, -1.0],
+ [-1.0, 1.0, -1.0]], [[-1.0, 1.0, -1.0], [1.0, -1.0, -1.0],
+ [-1.0, 1.0, -1.0], [-1.0, -1.0, -1.0]]],
dtype=dtype)
# axis == -1
@@ -163,75 +154,56 @@ class OneHotTest(tf.test.TestCase):
truth=[truth[0].T, truth[1].T]) # Do not transpose the batch
def _testDefaultValuesBatch(self, dtype):
- indices = np.asarray([[0, 2, -1, 1],
- [1, 0, 1, -1]],
- dtype=np.int64)
+ indices = np.asarray([[0, 2, -1, 1], [1, 0, 1, -1]], dtype=np.int64)
depth = 3
truth = np.asarray(
- [[[1.0, 0.0, 0.0],
- [0.0, 0.0, 1.0],
- [0.0, 0.0, 0.0],
- [0.0, 1.0, 0.0]],
- [[0.0, 1.0, 0.0],
- [1.0, 0.0, 0.0],
- [0.0, 1.0, 0.0],
- [0.0, 0.0, 0.0]]],
- dtype=dtype)
+ [[[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
+ [[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]]],
+ dtype=dtype)
# axis == -1
- self._testBothOneHot(
- indices=indices,
- depth=depth,
- dtype=dtype,
- truth=truth)
+ self._testBothOneHot(indices=indices, depth=depth, dtype=dtype, truth=truth)
# axis == 1
self._testBothOneHot(
- indices=indices,
- depth=depth,
- axis=1,
- dtype=dtype,
- truth=[truth[0].T, truth[1].T]) # Do not transpose the batch
+ indices=indices,
+ depth=depth,
+ axis=1,
+ dtype=dtype,
+ truth=[truth[0].T, truth[1].T]) # Do not transpose the batch
def _testValueTypeBatch(self, dtype):
- indices = np.asarray([[0, 2, -1, 1],
- [1, 0, 1, -1]],
- dtype=np.int64)
+ indices = np.asarray([[0, 2, -1, 1], [1, 0, 1, -1]], dtype=np.int64)
depth = 3
on_value = np.asarray(1.0, dtype=dtype)
off_value = np.asarray(-1.0, dtype=dtype)
truth = np.asarray(
- [[[1.0, -1.0, -1.0],
- [-1.0, -1.0, 1.0],
- [-1.0, -1.0, -1.0],
- [-1.0, 1.0, -1.0]],
- [[-1.0, 1.0, -1.0],
- [1.0, -1.0, -1.0],
- [-1.0, 1.0, -1.0],
- [-1.0, -1.0, -1.0]]],
+ [[[1.0, -1.0, -1.0], [-1.0, -1.0, 1.0], [-1.0, -1.0, -1.0],
+ [-1.0, 1.0, -1.0]], [[-1.0, 1.0, -1.0], [1.0, -1.0, -1.0],
+ [-1.0, 1.0, -1.0], [-1.0, -1.0, -1.0]]],
dtype=dtype)
# axis == -1
self._testBothOneHot(
- indices=indices,
- on_value=on_value,
- off_value=off_value,
- depth=depth,
- dtype=dtype,
- truth=truth)
+ indices=indices,
+ on_value=on_value,
+ off_value=off_value,
+ depth=depth,
+ dtype=dtype,
+ truth=truth)
# axis == 1
self._testBothOneHot(
- indices=indices,
- on_value=on_value,
- off_value=off_value,
- depth=depth,
- axis=1,
- dtype=dtype,
- truth=[truth[0].T, truth[1].T]) # Do not transpose the batch
+ indices=indices,
+ on_value=on_value,
+ off_value=off_value,
+ depth=depth,
+ axis=1,
+ dtype=dtype,
+ truth=[truth[0].T, truth[1].T]) # Do not transpose the batch
def _testEmpty(self, dtype):
indices = np.zeros((0, 16), dtype=np.int64)
@@ -286,133 +258,119 @@ class OneHotTest(tf.test.TestCase):
self._testValueTypeBatch(np.complex64)
def testSimpleCases(self):
- indices = [0,1,2]
+ indices = [0, 1, 2]
depth = 3
truth = np.asarray(
- [[1.0, 0.0, 0.0],
- [0.0, 1.0, 0.0],
- [0.0, 0.0, 1.0]],
- dtype=np.float32)
+ [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]], dtype=np.float32)
self._testBothOneHot(indices=indices, depth=depth, truth=truth)
- indices = [0,1,2]
+ indices = [0, 1, 2]
depth = 3
- truth = np.asarray(
- [[1, 0, 0],
- [0, 1, 0],
- [0, 0, 1]],
- dtype=np.int32)
- self._testBothOneHot(indices=indices, depth=depth, dtype=np.int32,
- truth=truth)
-
- indices = [0,1,2]
+ truth = np.asarray([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.int32)
+ self._testBothOneHot(
+ indices=indices, depth=depth, dtype=np.int32, truth=truth)
+
+ indices = [0, 1, 2]
depth = 3
- truth = np.asarray(
- [[1, -1, -1],
- [-1, 1, -1],
- [-1, -1, 1]],
- dtype=np.int32)
- self._testBothOneHot(indices=indices, depth=depth, on_value=1,
- off_value=-1, truth=truth)
+ truth = np.asarray([[1, -1, -1], [-1, 1, -1], [-1, -1, 1]], dtype=np.int32)
+ self._testBothOneHot(
+ indices=indices, depth=depth, on_value=1, off_value=-1, truth=truth)
def testSingleValueGiven(self):
# Only on_value provided
- indices = [0,1,2]
+ indices = [0, 1, 2]
depth = 3
- truth = np.asarray(
- [[1, 0, 0],
- [0, 1, 0],
- [0, 0, 1]],
- dtype=np.int32)
+ truth = np.asarray([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.int32)
self._testBothOneHot(indices=indices, depth=depth, on_value=1, truth=truth)
# Only off_value provided
- indices = [0,1,2]
+ indices = [0, 1, 2]
depth = 3
- truth = np.asarray(
- [[1, 0, 0],
- [0, 1, 0],
- [0, 0, 1]],
- dtype=np.float32)
- self._testBothOneHot(indices=indices, depth=depth,
- off_value=0.0, truth=truth)
+ truth = np.asarray([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.float32)
+ self._testBothOneHot(
+ indices=indices, depth=depth, off_value=0.0, truth=truth)
def testString(self):
- indices = [0,1,2]
+ indices = [0, 1, 2]
depth = 3
- truth = np.asarray(
- [[b"1.0", b"0.0", b"0.0"],
- [b"0.0", b"1.0", b"0.0"],
- [b"0.0", b"0.0", b"1.0"]])
+ truth = np.asarray([[b"1.0", b"0.0", b"0.0"], [b"0.0", b"1.0", b"0.0"],
+ [b"0.0", b"0.0", b"1.0"]])
on_value = np.asarray(b"1.0")
off_value = np.asarray(b"0.0")
- self._testBothOneHot(indices=indices, depth=depth, on_value=on_value,
- off_value=off_value, dtype=tf.string, truth=truth)
+ self._testBothOneHot(
+ indices=indices,
+ depth=depth,
+ on_value=on_value,
+ off_value=off_value,
+ dtype=dtypes.string,
+ truth=truth)
- on_value = tf.constant(b"1.0")
- off_value = tf.constant(b"0.0")
- self._testBothOneHot(indices=indices, depth=depth, on_value=on_value,
- off_value=off_value, dtype=tf.string, truth=truth)
+ on_value = constant_op.constant(b"1.0")
+ off_value = constant_op.constant(b"0.0")
+ self._testBothOneHot(
+ indices=indices,
+ depth=depth,
+ on_value=on_value,
+ off_value=off_value,
+ dtype=dtypes.string,
+ truth=truth)
on_value = b"1.0"
off_value = b"0.0"
- self._testBothOneHot(indices=indices, depth=depth, on_value=on_value,
- off_value=off_value, dtype=tf.string, truth=truth)
+ self._testBothOneHot(
+ indices=indices,
+ depth=depth,
+ on_value=on_value,
+ off_value=off_value,
+ dtype=dtypes.string,
+ truth=truth)
def testIndicesTypes(self):
- tf_types = [tf.uint8, tf.int32, tf.int64]
+ tf_types = [dtypes.uint8, dtypes.int32, dtypes.int64]
np_types = [np.int32, np.int64]
for itype in tf_types + np_types:
# Note: to keep the tests simple in the case of uint8 the index -1 below
# maps to 255 which is out of the depth range, just like -1.
if itype in tf_types:
- indices = tf.constant([[0, 2, -1, 1],
- [1, 0, 1, -1]],
- dtype=itype)
+ indices = constant_op.constant(
+ [[0, 2, -1, 1], [1, 0, 1, -1]], dtype=itype)
elif itype in np_types:
- indices = np.asarray([[0, 2, -1, 1],
- [1, 0, 1, -1]],
- dtype=itype)
+ indices = np.asarray([[0, 2, -1, 1], [1, 0, 1, -1]], dtype=itype)
depth = 3
on_value = np.asarray(1.0, dtype=np.float32)
off_value = np.asarray(-1.0, dtype=np.float32)
truth = np.asarray(
- [[[1.0, -1.0, -1.0],
- [-1.0, -1.0, 1.0],
- [-1.0, -1.0, -1.0],
- [-1.0, 1.0, -1.0]],
- [[-1.0, 1.0, -1.0],
- [1.0, -1.0, -1.0],
- [-1.0, 1.0, -1.0],
- [-1.0, -1.0, -1.0]]],
+ [[[1.0, -1.0, -1.0], [-1.0, -1.0, 1.0], [-1.0, -1.0, -1.0],
+ [-1.0, 1.0, -1.0]], [[-1.0, 1.0, -1.0], [1.0, -1.0, -1.0],
+ [-1.0, 1.0, -1.0], [-1.0, -1.0, -1.0]]],
dtype=np.float32)
# axis == -1
self._testBothOneHot(
- indices=indices,
- on_value=on_value,
- off_value=off_value,
- depth=depth,
- truth=truth)
+ indices=indices,
+ on_value=on_value,
+ off_value=off_value,
+ depth=depth,
+ truth=truth)
# axis == 1
self._testBothOneHot(
- indices=indices,
- on_value=on_value,
- off_value=off_value,
- depth=depth,
- axis=1,
- truth=[truth[0].T, truth[1].T]) # Do not transpose the batch
+ indices=indices,
+ on_value=on_value,
+ off_value=off_value,
+ depth=depth,
+ axis=1,
+ truth=[truth[0].T, truth[1].T]) # Do not transpose the batch
def testPrefixDimOverflow(self):
- for itype in [tf.int32, tf.int64, tf.uint8]:
+ for itype in [dtypes.int32, dtypes.int64, dtypes.uint8]:
prefix_dim_size = 65536
depth = 2
x = [i % depth for i in range(prefix_dim_size)]
- indices = tf.constant(x, dtype=itype)
+ indices = constant_op.constant(x, dtype=itype)
truth = np.zeros((prefix_dim_size, depth), np.float32)
for i in range(prefix_dim_size):
@@ -432,12 +390,12 @@ class OneHotTest(tf.test.TestCase):
off_value = np.asarray(0.0, np.float32)
self._testBothOneHot(
- indices=indices,
- depth=depth,
- on_value=on_value,
- off_value=off_value,
- truth=None,
- raises=TypeError)
+ indices=indices,
+ depth=depth,
+ on_value=on_value,
+ off_value=off_value,
+ truth=None,
+ raises=TypeError)
def testDtypeMismatchTypeError(self):
indices = [0, 1, 2]
@@ -447,21 +405,21 @@ class OneHotTest(tf.test.TestCase):
dtype = np.int32
self._testBothOneHot(
- indices=indices,
- depth=depth,
- on_value=on_value,
- dtype=dtype,
- truth=None,
- raises=TypeError)
+ indices=indices,
+ depth=depth,
+ on_value=on_value,
+ dtype=dtype,
+ truth=None,
+ raises=TypeError)
self._testBothOneHot(
- indices=indices,
- depth=depth,
- on_value=off_value,
- dtype=dtype,
- truth=None,
- raises=TypeError)
+ indices=indices,
+ depth=depth,
+ on_value=off_value,
+ dtype=dtype,
+ truth=None,
+ raises=TypeError)
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/pack_op_test.py b/tensorflow/python/kernel_tests/pack_op_test.py
index 3f1b9daf4c..6458a23741 100644
--- a/tensorflow/python/kernel_tests/pack_op_test.py
+++ b/tensorflow/python/kernel_tests/pack_op_test.py
@@ -12,25 +12,34 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Functional tests for Pack Op."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import errors_impl
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import variables
+from tensorflow.python.platform import test
def np_split_squeeze(array, axis):
axis_len = array.shape[axis]
return [
- np.squeeze(arr, axis=(axis,))
- for arr in np.split(array, axis_len, axis=axis)
+ np.squeeze(
+ arr, axis=(axis,)) for arr in np.split(
+ array, axis_len, axis=axis)
]
-class PackOpTest(tf.test.TestCase):
+class PackOpTest(test.TestCase):
def testSimple(self):
np.random.seed(7)
@@ -39,12 +48,12 @@ class PackOpTest(tf.test.TestCase):
data = np.random.randn(*shape)
# Convert [data[0], data[1], ...] separately to tensorflow
# TODO(irving): Remove list() once we handle maps correctly
- xs = list(map(tf.constant, data))
+ xs = list(map(constant_op.constant, data))
# Pack back into a single tensorflow tensor
- c = tf.pack(xs)
+ c = array_ops.pack(xs)
self.assertAllEqual(c.eval(), data)
- c = tf.stack(xs)
+ c = array_ops.stack(xs)
self.assertAllEqual(c.eval(), data)
def testConst(self):
@@ -53,7 +62,7 @@ class PackOpTest(tf.test.TestCase):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
data = np.random.randn(*shape).astype(np.float32)
# Pack back into a single tensorflow tensor directly using np array
- c = tf.pack(data)
+ c = array_ops.pack(data)
# This is implemented via a Const:
self.assertEqual(c.op.type, "Const")
self.assertAllEqual(c.eval(), data)
@@ -61,20 +70,20 @@ class PackOpTest(tf.test.TestCase):
# Python lists also work for 1-D case:
if len(shape) == 1:
data_list = list(data)
- cl = tf.pack(data_list)
+ cl = array_ops.pack(data_list)
self.assertEqual(cl.op.type, "Const")
self.assertAllEqual(cl.eval(), data)
- cl = tf.stack(data_list)
+ cl = array_ops.stack(data_list)
self.assertEqual(cl.op.type, "Const")
self.assertAllEqual(cl.eval(), data)
# Verify that shape induction works with shapes produced via const pack
- a = tf.constant([1, 2, 3, 4, 5, 6])
- b = tf.reshape(a, tf.pack([2, 3]))
+ a = constant_op.constant([1, 2, 3, 4, 5, 6])
+ b = array_ops.reshape(a, array_ops.pack([2, 3]))
self.assertAllEqual(b.get_shape(), [2, 3])
- b = tf.reshape(a, tf.stack([2, 3]))
+ b = array_ops.reshape(a, array_ops.stack([2, 3]))
self.assertAllEqual(b.get_shape(), [2, 3])
def testGradientsAxis0(self):
@@ -84,13 +93,13 @@ class PackOpTest(tf.test.TestCase):
shapes = [shape[1:]] * shape[0]
with self.test_session(use_gpu=True):
# TODO(irving): Remove list() once we handle maps correctly
- xs = list(map(tf.constant, data))
- c = tf.pack(xs)
- err = tf.test.compute_gradient_error(xs, shapes, c, shape)
+ xs = list(map(constant_op.constant, data))
+ c = array_ops.pack(xs)
+ err = gradient_checker.compute_gradient_error(xs, shapes, c, shape)
self.assertLess(err, 1e-6)
- c = tf.stack(xs)
- err = tf.test.compute_gradient_error(xs, shapes, c, shape)
+ c = array_ops.stack(xs)
+ err = gradient_checker.compute_gradient_error(xs, shapes, c, shape)
self.assertLess(err, 1e-6)
def testGradientsAxis1(self):
@@ -102,32 +111,32 @@ class PackOpTest(tf.test.TestCase):
out_shape.insert(1, shape[0])
with self.test_session(use_gpu=True):
# TODO(irving): Remove list() once we handle maps correctly
- xs = list(map(tf.constant, data))
- c = tf.pack(xs, axis=1)
- err = tf.test.compute_gradient_error(xs, shapes, c, out_shape)
+ xs = list(map(constant_op.constant, data))
+ c = array_ops.pack(xs, axis=1)
+ err = gradient_checker.compute_gradient_error(xs, shapes, c, out_shape)
self.assertLess(err, 1e-6)
- c = tf.stack(xs, axis=1)
- err = tf.test.compute_gradient_error(xs, shapes, c, out_shape)
+ c = array_ops.stack(xs, axis=1)
+ err = gradient_checker.compute_gradient_error(xs, shapes, c, out_shape)
self.assertLess(err, 1e-6)
def testZeroSize(self):
# Verify that pack doesn't crash for zero size inputs
with self.test_session(use_gpu=True):
- for shape in (0,), (3,0), (0, 3):
+ for shape in (0,), (3, 0), (0, 3):
x = np.zeros((2,) + shape)
- p = tf.pack(list(x)).eval()
+ p = array_ops.pack(list(x)).eval()
self.assertAllEqual(p, x)
- p = tf.stack(list(x)).eval()
+ p = array_ops.stack(list(x)).eval()
self.assertAllEqual(p, x)
def testAxis0Default(self):
with self.test_session(use_gpu=True):
- t = [tf.constant([1, 2, 3]), tf.constant([4, 5, 6])]
+ t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
- packed = tf.pack(t).eval()
- stacked = tf.stack(t).eval()
+ packed = array_ops.pack(t).eval()
+ stacked = array_ops.stack(t).eval()
self.assertAllEqual(packed, np.array([[1, 2, 3], [4, 5, 6]]))
self.assertAllEqual(stacked, np.array([[1, 2, 3], [4, 5, 6]]))
@@ -142,11 +151,11 @@ class PackOpTest(tf.test.TestCase):
test_arrays = np_split_squeeze(expected, j)
with self.test_session(use_gpu=True):
- actual_pack = tf.pack(test_arrays, axis=j)
+ actual_pack = array_ops.pack(test_arrays, axis=j)
self.assertEqual(expected.shape, actual_pack.get_shape())
actual_pack = actual_pack.eval()
- actual_stack = tf.pack(test_arrays, axis=j)
+ actual_stack = array_ops.pack(test_arrays, axis=j)
self.assertEqual(expected.shape, actual_stack.get_shape())
actual_stack = actual_stack.eval()
@@ -154,57 +163,56 @@ class PackOpTest(tf.test.TestCase):
self.assertNDArrayNear(expected, actual_stack, 1e-6)
def testDimOutOfRange(self):
- t = [tf.constant([1, 2, 3]), tf.constant([4, 5, 6])]
+ t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
with self.assertRaisesRegexp(ValueError, r"axis = 2 not in \[-2, 2\)"):
- tf.pack(t, axis=2)
+ array_ops.pack(t, axis=2)
with self.assertRaisesRegexp(ValueError, r"axis = 2 not in \[-2, 2\)"):
- tf.stack(t, axis=2)
+ array_ops.stack(t, axis=2)
def testDimOutOfNegativeRange(self):
- t = [tf.constant([1, 2, 3]), tf.constant([4, 5, 6])]
+ t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
with self.assertRaisesRegexp(ValueError, r"axis = -3 not in \[-2, 2\)"):
- tf.pack(t, axis=-3)
+ array_ops.pack(t, axis=-3)
with self.assertRaisesRegexp(ValueError, r"axis = -3 not in \[-2, 2\)"):
- tf.stack(t, axis=-3)
+ array_ops.stack(t, axis=-3)
-class AutomaticPackingTest(tf.test.TestCase):
+class AutomaticPackingTest(test.TestCase):
def testSimple(self):
with self.test_session(use_gpu=True):
- self.assertAllEqual([1, 0, 2],
- tf.convert_to_tensor([1, tf.constant(0), 2]).eval())
- self.assertAllEqual(
- [[0, 0, 0], [0, 1, 0], [0, 0, 0]],
- tf.convert_to_tensor([[0, 0, 0],
- [0, tf.constant(1), 0],
- [0, 0, 0]]).eval())
self.assertAllEqual(
- [[0, 0, 0], [0, 1, 0], [0, 0, 0]],
- tf.convert_to_tensor([[0, 0, 0],
- tf.constant([0, 1, 0]),
- [0, 0, 0]]).eval())
- self.assertAllEqual(
- [[0, 0, 0], [0, 1, 0], [0, 0, 0]],
- tf.convert_to_tensor([tf.constant([0, 0, 0]),
- tf.constant([0, 1, 0]),
- tf.constant([0, 0, 0])]).eval())
+ [1, 0, 2],
+ ops.convert_to_tensor([1, constant_op.constant(0), 2]).eval())
+ self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
+ ops.convert_to_tensor(
+ [[0, 0, 0], [0, constant_op.constant(1), 0],
+ [0, 0, 0]]).eval())
+ self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
+ ops.convert_to_tensor(
+ [[0, 0, 0], constant_op.constant([0, 1, 0]),
+ [0, 0, 0]]).eval())
+ self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
+ ops.convert_to_tensor([
+ constant_op.constant([0, 0, 0]),
+ constant_op.constant([0, 1, 0]),
+ constant_op.constant([0, 0, 0])
+ ]).eval())
def testWithNDArray(self):
with self.test_session(use_gpu=True):
- result = tf.convert_to_tensor([[[0., 0.],
- tf.constant([1., 1.])],
- np.array([[2., 2.], [3., 3.]],
- dtype=np.float32)])
- self.assertAllEqual(
- [[[0., 0.], [1., 1.]], [[2., 2.], [3., 3.]]], result.eval())
+ result = ops.convert_to_tensor([[[0., 0.],
+ constant_op.constant([1., 1.])],
+ np.array(
+ [[2., 2.], [3., 3.]],
+ dtype=np.float32)])
+ self.assertAllEqual([[[0., 0.], [1., 1.]], [[2., 2.], [3., 3.]]],
+ result.eval())
def testVariable(self):
with self.test_session(use_gpu=True):
- v = tf.Variable(17)
- result = tf.convert_to_tensor([[0, 0, 0],
- [0, v, 0],
- [0, 0, 0]])
+ v = variables.Variable(17)
+ result = ops.convert_to_tensor([[0, 0, 0], [0, v, 0], [0, 0, 0]])
v.initializer.run()
self.assertAllEqual([[0, 0, 0], [0, 17, 0], [0, 0, 0]], result.eval())
@@ -212,74 +220,67 @@ class AutomaticPackingTest(tf.test.TestCase):
self.assertAllEqual([[0, 0, 0], [0, 38, 0], [0, 0, 0]], result.eval())
def testDtype(self):
- t_0 = tf.convert_to_tensor([[0., 0., 0.],
- [0., 0., 0.],
- [0., 0., 0.]])
- self.assertEqual(tf.float32, t_0.dtype)
+ t_0 = ops.convert_to_tensor([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]])
+ self.assertEqual(dtypes.float32, t_0.dtype)
- t_1 = tf.convert_to_tensor([[0., 0., 0.],
- tf.constant([0., 0., 0.], dtype=tf.float64),
- [0., 0., 0.]])
- self.assertEqual(tf.float64, t_1.dtype)
+ t_1 = ops.convert_to_tensor([[0., 0., 0.], constant_op.constant(
+ [0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.]])
+ self.assertEqual(dtypes.float64, t_1.dtype)
- t_2 = tf.convert_to_tensor([[0., 0., 0.],
- [0., 0., 0.],
- [0., 0., 0.]], dtype=tf.float64)
- self.assertEqual(tf.float64, t_2.dtype)
+ t_2 = ops.convert_to_tensor(
+ [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], dtype=dtypes.float64)
+ self.assertEqual(dtypes.float64, t_2.dtype)
with self.assertRaises(TypeError):
- tf.convert_to_tensor([tf.constant([0., 0., 0.], dtype=tf.float32),
- tf.constant([0., 0., 0.], dtype=tf.float64),
- [0., 0., 0.]])
+ ops.convert_to_tensor([
+ constant_op.constant(
+ [0., 0., 0.], dtype=dtypes.float32), constant_op.constant(
+ [0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.]
+ ])
with self.assertRaises(TypeError):
- tf.convert_to_tensor([[0., 0., 0.],
- tf.constant([0., 0., 0.], dtype=tf.float64),
- [0., 0., 0.]], dtype=tf.float32)
+ ops.convert_to_tensor(
+ [[0., 0., 0.], constant_op.constant(
+ [0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.]],
+ dtype=dtypes.float32)
with self.assertRaises(TypeError):
- tf.convert_to_tensor([tf.constant([0., 0., 0.], dtype=tf.float64)],
- dtype=tf.float32)
+ ops.convert_to_tensor(
+ [constant_op.constant(
+ [0., 0., 0.], dtype=dtypes.float64)],
+ dtype=dtypes.float32)
def testPlaceholder(self):
with self.test_session(use_gpu=True):
# Test using placeholder with a defined shape.
- ph_0 = tf.placeholder(tf.int32, shape=[])
- result_0 = tf.convert_to_tensor([[0, 0, 0],
- [0, ph_0, 0],
- [0, 0, 0]])
- self.assertAllEqual([[0, 0, 0],
- [0, 1, 0],
- [0, 0, 0]],
- result_0.eval(feed_dict={ph_0: 1}))
- self.assertAllEqual([[0, 0, 0],
- [0, 2, 0],
- [0, 0, 0]],
- result_0.eval(feed_dict={ph_0: 2}))
+ ph_0 = array_ops.placeholder(dtypes.int32, shape=[])
+ result_0 = ops.convert_to_tensor([[0, 0, 0], [0, ph_0, 0], [0, 0, 0]])
+ self.assertAllEqual(
+ [[0, 0, 0], [0, 1, 0], [0, 0, 0]], result_0.eval(feed_dict={ph_0: 1}))
+ self.assertAllEqual(
+ [[0, 0, 0], [0, 2, 0], [0, 0, 0]], result_0.eval(feed_dict={ph_0: 2}))
# Test using placeholder with an undefined shape.
- ph_1 = tf.placeholder(tf.int32)
- result_1 = tf.convert_to_tensor([[0, 0, 0],
- [0, ph_1, 0],
- [0, 0, 0]])
- self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
- result_1.eval(feed_dict={ph_1: 1}))
- self.assertAllEqual([[0, 0, 0], [0, 2, 0], [0, 0, 0]],
- result_1.eval(feed_dict={ph_1: 2}))
+ ph_1 = array_ops.placeholder(dtypes.int32)
+ result_1 = ops.convert_to_tensor([[0, 0, 0], [0, ph_1, 0], [0, 0, 0]])
+ self.assertAllEqual(
+ [[0, 0, 0], [0, 1, 0], [0, 0, 0]], result_1.eval(feed_dict={ph_1: 1}))
+ self.assertAllEqual(
+ [[0, 0, 0], [0, 2, 0], [0, 0, 0]], result_1.eval(feed_dict={ph_1: 2}))
def testShapeErrors(self):
# Static shape error.
- ph_0 = tf.placeholder(tf.int32, shape=[1])
+ ph_0 = array_ops.placeholder(dtypes.int32, shape=[1])
with self.assertRaises(ValueError):
- tf.convert_to_tensor([[0, 0, 0], [0, ph_0, 0], [0, 0, 0]])
+ ops.convert_to_tensor([[0, 0, 0], [0, ph_0, 0], [0, 0, 0]])
# Dynamic shape error.
- ph_1 = tf.placeholder(tf.int32)
- result_1 = tf.convert_to_tensor([[0, 0, 0], [0, ph_1, 0], [0, 0, 0]])
+ ph_1 = array_ops.placeholder(dtypes.int32)
+ result_1 = ops.convert_to_tensor([[0, 0, 0], [0, ph_1, 0], [0, 0, 0]])
with self.test_session(use_gpu=True):
- with self.assertRaises(tf.errors.InvalidArgumentError):
+ with self.assertRaises(errors_impl.InvalidArgumentError):
result_1.eval(feed_dict={ph_1: [1]})
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/pad_op_test.py b/tensorflow/python/kernel_tests/pad_op_test.py
index 986571e5c6..c709be0b5b 100644
--- a/tensorflow/python/kernel_tests/pad_op_test.py
+++ b/tensorflow/python/kernel_tests/pad_op_test.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.ops.nn_ops.Pad."""
from __future__ import absolute_import
@@ -20,10 +19,16 @@ from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.platform import test
-class PadOpTest(tf.test.TestCase):
+class PadOpTest(test.TestCase):
def _npPad(self, inp, paddings, mode):
return np.pad(inp, paddings, mode=mode.lower())
@@ -64,24 +69,21 @@ class PadOpTest(tf.test.TestCase):
def _testPad(self, np_inputs, paddings, mode):
np_val = self._npPad(np_inputs, paddings, mode=mode)
with self.test_session(use_gpu=True):
- tf_val = tf.pad(np_inputs, paddings, mode=mode)
+ tf_val = array_ops.pad(np_inputs, paddings, mode=mode)
out = tf_val.eval()
self.assertAllEqual(np_val, out)
self.assertShapeEqual(np_val, tf_val)
def _testGradient(self, x, a, mode):
with self.test_session(use_gpu=True):
- inx = tf.convert_to_tensor(x)
+ inx = ops.convert_to_tensor(x)
xs = list(x.shape)
- ina = tf.convert_to_tensor(a)
- y = tf.pad(inx, ina, mode=mode)
+ ina = ops.convert_to_tensor(a)
+ y = array_ops.pad(inx, ina, mode=mode)
# Expected y's shape to be:
ys = list(np.array(x.shape) + np.sum(np.array(a), axis=1))
- jacob_t, jacob_n = tf.test.compute_gradient(inx,
- xs,
- y,
- ys,
- x_init_value=x)
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
+ inx, xs, y, ys, x_init_value=x)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _testAll(self, np_inputs, paddings):
@@ -97,88 +99,96 @@ class PadOpTest(tf.test.TestCase):
def testInputDims(self):
with self.test_session(use_gpu=True):
with self.assertRaises(ValueError):
- tf.pad(
- tf.reshape([1, 2], shape=[1, 2, 1, 1, 1, 1]),
- tf.reshape([1, 2], shape=[1, 2]))
+ array_ops.pad(array_ops.reshape(
+ [1, 2], shape=[1, 2, 1, 1, 1, 1]),
+ array_ops.reshape(
+ [1, 2], shape=[1, 2]))
def testPaddingsDim(self):
with self.test_session(use_gpu=True):
with self.assertRaises(ValueError):
- tf.pad(
- tf.reshape([1, 2], shape=[1, 2]),
- tf.reshape([1, 2], shape=[2]))
+ array_ops.pad(array_ops.reshape(
+ [1, 2], shape=[1, 2]),
+ array_ops.reshape(
+ [1, 2], shape=[2]))
def testPaddingsDim2(self):
with self.test_session(use_gpu=True):
with self.assertRaises(ValueError):
- tf.pad(
- tf.reshape([1, 2], shape=[1, 2]),
- tf.reshape([1, 2], shape=[2, 1]))
+ array_ops.pad(array_ops.reshape(
+ [1, 2], shape=[1, 2]),
+ array_ops.reshape(
+ [1, 2], shape=[2, 1]))
def testPaddingsDim3(self):
with self.test_session(use_gpu=True):
with self.assertRaises(ValueError):
- tf.pad(
- tf.reshape([1, 2], shape=[1, 2]),
- tf.reshape([1, 2], shape=[1, 2]))
+ array_ops.pad(array_ops.reshape(
+ [1, 2], shape=[1, 2]),
+ array_ops.reshape(
+ [1, 2], shape=[1, 2]))
def testPaddingsDim4(self):
with self.test_session(use_gpu=True):
with self.assertRaises(ValueError):
- tf.pad(
- tf.reshape([1, 2], shape=[1, 2]),
- tf.reshape([1, 2, 3, 4, 5, 6], shape=[3, 2]))
+ array_ops.pad(array_ops.reshape(
+ [1, 2], shape=[1, 2]),
+ array_ops.reshape(
+ [1, 2, 3, 4, 5, 6], shape=[3, 2]))
def testPaddingsNonNegative(self):
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(ValueError, "must be non-negative"):
- tf.pad(
- tf.constant([1], shape=[1]),
- tf.constant([-1, 0], shape=[1, 2]))
+ array_ops.pad(constant_op.constant(
+ [1], shape=[1]),
+ constant_op.constant(
+ [-1, 0], shape=[1, 2]))
def testPaddingsNonNegative2(self):
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(ValueError, "must be non-negative"):
- tf.pad(
- tf.constant([1], shape=[1]),
- tf.constant([-1, 0], shape=[1, 2]))
+ array_ops.pad(constant_op.constant(
+ [1], shape=[1]),
+ constant_op.constant(
+ [-1, 0], shape=[1, 2]))
def testPaddingsMaximum(self):
with self.test_session(use_gpu=True):
with self.assertRaises(Exception):
- tf.pad(
- tf.constant([1], shape=[2]),
- tf.constant([2, 0], shape=[1, 2]),
- mode="REFLECT").eval()
+ array_ops.pad(constant_op.constant(
+ [1], shape=[2]),
+ constant_op.constant(
+ [2, 0], shape=[1, 2]),
+ mode="REFLECT").eval()
with self.assertRaises(Exception):
- tf.pad(
- tf.constant([1], shape=[2]),
- tf.constant([0, 3], shape=[1, 2]),
- mode="SYMMETRIC").eval()
+ array_ops.pad(constant_op.constant(
+ [1], shape=[2]),
+ constant_op.constant(
+ [0, 3], shape=[1, 2]),
+ mode="SYMMETRIC").eval()
def testInvalid(self):
with self.test_session():
x = [[1, 2, 3], [4, 5, 6]]
with self.assertRaisesRegexp(ValueError, "Unknown padding mode"):
- tf.pad(x, [[1, 0], [2, 1]], mode="weird").eval()
+ array_ops.pad(x, [[1, 0], [2, 1]], mode="weird").eval()
def testIntTypes(self):
# TODO(touts): Figure out why the padding tests do not work on GPU
# for int types and rank > 2.
for t in [np.int32, np.int64]:
- self._testAll(np.random.randint(-100, 100, (4, 4, 3)).astype(t),
- [[1, 0], [2, 3], [0, 2]])
- self._testAll(np.random.randint(-100, 100, (4, 2, 1, 3)).astype(t),
- [[0, 0], [0, 0], [0, 0], [0, 0]])
+ self._testAll(
+ np.random.randint(-100, 100, (4, 4, 3)).astype(t),
+ [[1, 0], [2, 3], [0, 2]])
+ self._testAll(
+ np.random.randint(-100, 100, (4, 2, 1, 3)).astype(t),
+ [[0, 0], [0, 0], [0, 0], [0, 0]])
def testFloatTypes(self):
for t in [np.float32, np.float64]:
- self._testAll(np.random.rand(2, 5).astype(t),
- [[1, 0], [2, 0]])
- self._testAll(np.random.rand(2, 3, 4).astype(t),
- [[0, 0], [0, 0], [0, 0]])
- self._testAll(np.random.rand(0, 3, 4).astype(t),
- [[0, 0], [2, 1], [2, 3]])
+ self._testAll(np.random.rand(2, 5).astype(t), [[1, 0], [2, 0]])
+ self._testAll(np.random.rand(2, 3, 4).astype(t), [[0, 0], [0, 0], [0, 0]])
+ self._testAll(np.random.rand(0, 3, 4).astype(t), [[0, 0], [2, 1], [2, 3]])
def testComplexTypes(self):
for t in [np.complex64, np.complex128]:
@@ -189,29 +199,29 @@ class PadOpTest(tf.test.TestCase):
def testShapeFunctionEdgeCases(self):
# Unknown paddings shape.
- inp = tf.constant(0.0, shape=[4, 4, 4, 4])
- padded = tf.pad(inp, tf.placeholder(tf.int32))
+ inp = constant_op.constant(0.0, shape=[4, 4, 4, 4])
+ padded = array_ops.pad(inp, array_ops.placeholder(dtypes.int32))
self.assertEqual([None, None, None, None], padded.get_shape().as_list())
# Unknown input shape.
- inp = tf.placeholder(tf.float32)
- padded = tf.pad(inp, [[2, 2], [2, 2]])
+ inp = array_ops.placeholder(dtypes.float32)
+ padded = array_ops.pad(inp, [[2, 2], [2, 2]])
self.assertEqual([None, None], padded.get_shape().as_list())
# Unknown input and paddings shape.
- inp = tf.placeholder(tf.float32)
- padded = tf.pad(inp, tf.placeholder(tf.int32))
+ inp = array_ops.placeholder(dtypes.float32)
+ padded = array_ops.pad(inp, array_ops.placeholder(dtypes.int32))
self.assertAllEqual(None, padded.get_shape().ndims)
def testScalars(self):
paddings = np.zeros((0, 2), dtype=np.int32)
inp = np.asarray(7)
with self.test_session(use_gpu=True):
- tf_val = tf.pad(inp, paddings)
+ tf_val = array_ops.pad(inp, paddings)
out = tf_val.eval()
self.assertAllEqual(inp, out)
self.assertShapeEqual(inp, tf_val)
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/padding_fifo_queue_test.py b/tensorflow/python/kernel_tests/padding_fifo_queue_test.py
index 651a84cc64..5cd7571590 100644
--- a/tensorflow/python/kernel_tests/padding_fifo_queue_test.py
+++ b/tensorflow/python/kernel_tests/padding_fifo_queue_test.py
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.ops.data_flow_ops.PaddingFIFOQueue."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
@@ -23,15 +23,24 @@ import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes as dtypes_lib
+from tensorflow.python.framework import errors_impl
+from tensorflow.python.framework import ops
+from tensorflow.python.framework import tensor_shape
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import data_flow_ops
+from tensorflow.python.platform import test
-class PaddingFIFOQueueTest(tf.test.TestCase):
+
+class PaddingFIFOQueueTest(test.TestCase):
def testConstructor(self):
- with tf.Graph().as_default():
- q = tf.PaddingFIFOQueue(10, tf.float32, ((None,),), name="Q")
- self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
+ with ops.Graph().as_default():
+ q = data_flow_ops.PaddingFIFOQueue(
+ 10, dtypes_lib.float32, ((None,),), name="Q")
+ self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueue'
attr { key: 'component_types' value { list { type: DT_FLOAT } } }
@@ -42,11 +51,12 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
""", q.queue_ref.op.node_def)
def testMultiQueueConstructor(self):
- with tf.Graph().as_default():
- q = tf.PaddingFIFOQueue(5, (tf.int32, tf.float32),
- ((), ()),
- shared_name="foo", name="Q")
- self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
+ with ops.Graph().as_default():
+ q = data_flow_ops.PaddingFIFOQueue(
+ 5, (dtypes_lib.int32, dtypes_lib.float32), ((), ()),
+ shared_name="foo",
+ name="Q")
+ self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueue'
attr { key: 'component_types' value { list {
@@ -59,11 +69,13 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
""", q.queue_ref.op.node_def)
def testConstructorWithShapes(self):
- with tf.Graph().as_default():
- q = tf.PaddingFIFOQueue(5, (tf.int32, tf.float32),
- shapes=(tf.TensorShape([1, 1, 2, 3]),
- tf.TensorShape([5, 8])), name="Q")
- self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
+ with ops.Graph().as_default():
+ q = data_flow_ops.PaddingFIFOQueue(
+ 5, (dtypes_lib.int32, dtypes_lib.float32),
+ shapes=(tensor_shape.TensorShape([1, 1, 2, 3]),
+ tensor_shape.TensorShape([5, 8])),
+ name="Q")
+ self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueue'
attr { key: 'component_types' value { list {
@@ -84,13 +96,14 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testEnqueue(self):
with self.test_session():
- q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
+ q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueWithShape(self):
with self.test_session():
- q = tf.PaddingFIFOQueue(10, tf.float32, shapes=((3, 2),))
+ q = data_flow_ops.PaddingFIFOQueue(
+ 10, dtypes_lib.float32, shapes=((3, 2),))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
with self.assertRaises(ValueError):
@@ -99,14 +112,14 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testEnqueueManyWithShape(self):
with self.test_session():
- q = tf.PaddingFIFOQueue(10, [tf.int32, tf.int32],
- shapes=[(), (2,)])
+ q = data_flow_ops.PaddingFIFOQueue(
+ 10, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertEqual(4, q.size().eval())
def testParallelEnqueue(self):
with self.test_session() as sess:
- q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
+ q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
@@ -114,8 +127,11 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
- threads = [self.checkedThread(target=enqueue, args=(e,))
- for e in enqueue_ops]
+
+ threads = [
+ self.checkedThread(
+ target=enqueue, args=(e,)) for e in enqueue_ops
+ ]
for thread in threads:
thread.start()
for thread in threads:
@@ -129,7 +145,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testParallelDequeue(self):
with self.test_session() as sess:
- q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
+ q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
@@ -143,6 +159,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def dequeue():
results.append(sess.run(dequeued_t))
+
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
@@ -152,7 +169,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testDequeue(self):
with self.test_session():
- q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
+ q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
@@ -166,7 +183,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testEnqueueAndBlockingDequeue(self):
with self.test_session() as sess:
- q = tf.PaddingFIFOQueue(3, tf.float32, ((),))
+ q = data_flow_ops.PaddingFIFOQueue(3, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
@@ -196,7 +213,9 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testMultiEnqueueAndDequeue(self):
with self.test_session() as sess:
- q = tf.PaddingFIFOQueue(10, (tf.int32, tf.float32), ((), ()))
+ q = data_flow_ops.PaddingFIFOQueue(10,
+ (dtypes_lib.int32, dtypes_lib.float32),
+ ((), ()))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
@@ -212,12 +231,12 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testQueueSizeEmpty(self):
with self.test_session():
- q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
+ q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
self.assertEqual([0], q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.test_session():
- q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
+ q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
@@ -230,7 +249,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testEnqueueMany(self):
with self.test_session():
- q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
+ q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
@@ -243,9 +262,10 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testEmptyEnqueueMany(self):
with self.test_session():
- q = tf.PaddingFIFOQueue(10, tf.float32, ((None, None),))
- empty_t = tf.constant([], dtype=tf.float32,
- shape=[0, 2, 3])
+ q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, (
+ (None, None),))
+ empty_t = constant_op.constant(
+ [], dtype=dtypes_lib.float32, shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
@@ -255,7 +275,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testEmptyDequeueMany(self):
with self.test_session():
- q = tf.PaddingFIFOQueue(10, tf.float32, shapes=((),))
+ q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, shapes=((),))
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
@@ -265,7 +285,8 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testEmptyDequeueManyWithDynamicShape(self):
with self.test_session():
- q = tf.PaddingFIFOQueue(10, tf.float32, shapes=((None,),))
+ q = data_flow_ops.PaddingFIFOQueue(
+ 10, dtypes_lib.float32, shapes=((None,),))
enqueue_op = q.enqueue(([10.0],))
dequeued_t = q.dequeue_many(0)
@@ -275,7 +296,8 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testEmptyDequeueUpToWithDynamicShape(self):
with self.test_session():
- q = tf.PaddingFIFOQueue(10, tf.float32, shapes=((None,),))
+ q = data_flow_ops.PaddingFIFOQueue(
+ 10, dtypes_lib.float32, shapes=((None,),))
enqueue_op = q.enqueue(([10.0],))
dequeued_t = q.dequeue_up_to(0)
@@ -288,11 +310,14 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
with self.assertRaisesRegexp(
ValueError,
r"When providing partial shapes, a list of shapes must be provided."):
- tf.PaddingFIFOQueue(10, tf.float32, None).queue_ref.eval()
+ data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32,
+ None).queue_ref.eval()
def testMultiEnqueueMany(self):
with self.test_session() as sess:
- q = tf.PaddingFIFOQueue(10, (tf.float32, tf.int32), ((), (2,)))
+ q = data_flow_ops.PaddingFIFOQueue(10,
+ (dtypes_lib.float32, dtypes_lib.int32),
+ ((), (2,)))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
@@ -308,8 +333,8 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testMultiEnqueueManyWithPartiallyKnownShapes(self):
with self.test_session() as sess:
- q = tf.PaddingFIFOQueue(
- 10, (tf.float32, tf.int32), shapes=((), (None,)))
+ q = data_flow_ops.PaddingFIFOQueue(
+ 10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (None,)))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
@@ -325,7 +350,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testDequeueMany(self):
with self.test_session():
- q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
+ q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
@@ -337,7 +362,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testDequeueUpToNoBlocking(self):
with self.test_session():
- q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
+ q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
@@ -349,12 +374,13 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testMultiDequeueMany(self):
with self.test_session() as sess:
- q = tf.PaddingFIFOQueue(10, (tf.float32, tf.int32),
- shapes=((), (2,)))
+ q = data_flow_ops.PaddingFIFOQueue(
+ 10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
- 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
- int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
- [11, 12], [13, 14], [15, 16], [17, 18], [19, 20]]
+ 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
+ ]
+ int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
+ [15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
@@ -379,11 +405,13 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testMultiDequeueManyWithPartiallyKnownShapes(self):
with self.test_session() as sess:
- q = tf.PaddingFIFOQueue(10, (tf.float32, tf.int32), shapes=((), (None,)))
+ q = data_flow_ops.PaddingFIFOQueue(
+ 10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (None,)))
float_elems = [
- 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
- int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
- [11, 12], [13, 14], [15, 16], [17, 18], [19, 20]]
+ 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
+ ]
+ int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
+ [15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
@@ -394,11 +422,11 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertTrue(
- tf.TensorShape(float_val.shape).is_compatible_with(
+ tensor_shape.TensorShape(float_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
- tf.TensorShape(int_val.shape).is_compatible_with(
- dequeued_t[1].get_shape()))
+ tensor_shape.TensorShape(int_val.shape).is_compatible_with(dequeued_t[
+ 1].get_shape()))
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
@@ -408,31 +436,21 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertTrue(
- tf.TensorShape(float_val.shape).is_compatible_with(
+ tensor_shape.TensorShape(float_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
- tf.TensorShape(int_val.shape).is_compatible_with(
+ tensor_shape.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testMultiDequeueManyWithPartiallyKnownShapesAndVariableSizeInput(self):
with self.test_session() as sess:
- q = tf.PaddingFIFOQueue(10, (tf.string, tf.int32),
- shapes=((None,), (1, None)))
- str_elems = [
- ["a"],
- ["ab"],
- ["abc"],
- ["abc", "d"],
- ["abc", "d", "e"],
- ["abc", "d", "e", "f"]]
-
- int_elems = [
- [[1]],
- [[2]],
- [[3]],
- [[1, 2]],
- [[1, 2, 3]],
- [[1, 2, 3, 4]]]
+ q = data_flow_ops.PaddingFIFOQueue(
+ 10, (dtypes_lib.string, dtypes_lib.int32),
+ shapes=((None,), (1, None)))
+ str_elems = [["a"], ["ab"], ["abc"], ["abc", "d"], ["abc", "d", "e"],
+ ["abc", "d", "e", "f"]]
+
+ int_elems = [[[1]], [[2]], [[3]], [[1, 2]], [[1, 2, 3]], [[1, 2, 3, 4]]]
enqueue_ops = [q.enqueue((str_elems[i], int_elems[i])) for i in range(6)]
@@ -443,52 +461,37 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
enqueue_op.run()
string_val, int_val = sess.run(dequeued_t)
- self.assertAllEqual(
- [[b"a", b"", b""], [b"ab", b"", b""], [b"abc", b"", b""],
- [b"abc", b"d", b""], [b"abc", b"d", b"e"]], string_val)
- self.assertAllEqual(
- [[[1, 0, 0]],
- [[2, 0, 0]],
- [[3, 0, 0]],
- [[1, 2, 0]],
- [[1, 2, 3]]],
- int_val)
+ self.assertAllEqual([[b"a", b"", b""], [b"ab", b"", b""],
+ [b"abc", b"", b""], [b"abc", b"d", b""],
+ [b"abc", b"d", b"e"]], string_val)
+ self.assertAllEqual([[[1, 0, 0]], [[2, 0, 0]], [[3, 0, 0]], [[1, 2, 0]],
+ [[1, 2, 3]]], int_val)
self.assertTrue(
- tf.TensorShape(string_val.shape).is_compatible_with(
+ tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
- tf.TensorShape(int_val.shape).is_compatible_with(
- dequeued_t[1].get_shape()))
+ tensor_shape.TensorShape(int_val.shape).is_compatible_with(dequeued_t[
+ 1].get_shape()))
string_val, int_val = sess.run(dequeued_single_t)
self.assertAllEqual([b"abc", b"d", b"e", b"f"], string_val)
self.assertAllEqual([[1, 2, 3, 4]], int_val)
self.assertTrue(
- tf.TensorShape(string_val.shape).is_compatible_with(
+ tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
- tf.TensorShape(int_val.shape).is_compatible_with(
+ tensor_shape.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testMultiDequeueUpToPartiallyKnownShapesAndVariableInputNoBlocking(self):
with self.test_session() as sess:
- q = tf.PaddingFIFOQueue(10, (tf.string, tf.int32),
- shapes=((None,), (1, None)))
- str_elems = [
- ["a"],
- ["ab"],
- ["abc"],
- ["abc", "d"],
- ["abc", "d", "e"],
- ["abc", "d", "e", "f"]]
-
- int_elems = [
- [[1]],
- [[2]],
- [[3]],
- [[1, 2]],
- [[1, 2, 3]],
- [[1, 2, 3, 4]]]
+ q = data_flow_ops.PaddingFIFOQueue(
+ 10, (dtypes_lib.string, dtypes_lib.int32),
+ shapes=((None,), (1, None)))
+ str_elems = [["a"], ["ab"], ["abc"], ["abc", "d"], ["abc", "d", "e"],
+ ["abc", "d", "e", "f"]]
+
+ int_elems = [[[1]], [[2]], [[3]], [[1, 2]], [[1, 2, 3]], [[1, 2, 3, 4]]]
enqueue_ops = [q.enqueue((str_elems[i], int_elems[i])) for i in range(6)]
@@ -499,36 +502,31 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
enqueue_op.run()
string_val, int_val = sess.run(dequeued_t)
- self.assertAllEqual(
- [[b"a", b"", b""], [b"ab", b"", b""], [b"abc", b"", b""],
- [b"abc", b"d", b""], [b"abc", b"d", b"e"]], string_val)
- self.assertAllEqual(
- [[[1, 0, 0]],
- [[2, 0, 0]],
- [[3, 0, 0]],
- [[1, 2, 0]],
- [[1, 2, 3]]],
- int_val)
+ self.assertAllEqual([[b"a", b"", b""], [b"ab", b"", b""],
+ [b"abc", b"", b""], [b"abc", b"d", b""],
+ [b"abc", b"d", b"e"]], string_val)
+ self.assertAllEqual([[[1, 0, 0]], [[2, 0, 0]], [[3, 0, 0]], [[1, 2, 0]],
+ [[1, 2, 3]]], int_val)
self.assertTrue(
- tf.TensorShape(string_val.shape).is_compatible_with(
+ tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
- tf.TensorShape(int_val.shape).is_compatible_with(
- dequeued_t[1].get_shape()))
+ tensor_shape.TensorShape(int_val.shape).is_compatible_with(dequeued_t[
+ 1].get_shape()))
string_val, int_val = sess.run(dequeued_single_t)
self.assertAllEqual([b"abc", b"d", b"e", b"f"], string_val)
self.assertAllEqual([[1, 2, 3, 4]], int_val)
self.assertTrue(
- tf.TensorShape(string_val.shape).is_compatible_with(
+ tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
- tf.TensorShape(int_val.shape).is_compatible_with(
+ tensor_shape.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testHighDimension(self):
with self.test_session():
- q = tf.PaddingFIFOQueue(10, tf.int32, ((4, 4, 4, 4),))
+ q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, ((4, 4, 4, 4),))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
@@ -538,7 +536,8 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testPartiallyKnownHighDimension(self):
with self.test_session():
- q = tf.PaddingFIFOQueue(10, tf.int32, ((4, None, 4, None),))
+ q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, (
+ (4, None, 4, None),))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
@@ -547,7 +546,8 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
self.assertAllEqual(dequeued_t.eval(), elems)
def testEnqueueWrongShape(self):
- q = tf.PaddingFIFOQueue(10, (tf.int32, tf.int32), ((), (2,)))
+ q = data_flow_ops.PaddingFIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32),
+ ((), (2,)))
with self.assertRaises(ValueError):
q.enqueue(([1, 2], [2, 2]))
@@ -556,42 +556,53 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
q.enqueue_many((7, [[1, 2], [3, 4], [5, 6]]))
def testBatchSizeMismatch(self):
- q = tf.PaddingFIFOQueue(10, (tf.int32, tf.int32, tf.int32), ((), (), ()))
+ q = data_flow_ops.PaddingFIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32,
+ dtypes_lib.int32), ((), (), ()))
with self.assertRaises(ValueError):
q.enqueue_many(([1, 2, 3], [1, 2], [1, 2, 3]))
with self.assertRaises(ValueError):
- q.enqueue_many(([1, 2, 3], [1, 2], tf.placeholder(tf.int32)))
+ q.enqueue_many(
+ ([1, 2, 3], [1, 2], array_ops.placeholder(dtypes_lib.int32)))
with self.assertRaises(ValueError):
- q.enqueue_many((tf.placeholder(tf.int32), [1, 2], [1, 2, 3]))
+ q.enqueue_many(
+ (array_ops.placeholder(dtypes_lib.int32), [1, 2], [1, 2, 3]))
def testEnqueueManyEmptyTypeConversion(self):
- q = tf.PaddingFIFOQueue(10, (tf.int32, tf.float32), ((), ()))
+ q = data_flow_ops.PaddingFIFOQueue(10,
+ (dtypes_lib.int32, dtypes_lib.float32), (
+ (), ()))
enq = q.enqueue_many(([], []))
- self.assertEqual(tf.int32, enq.inputs[1].dtype)
- self.assertEqual(tf.float32, enq.inputs[2].dtype)
+ self.assertEqual(dtypes_lib.int32, enq.inputs[1].dtype)
+ self.assertEqual(dtypes_lib.float32, enq.inputs[2].dtype)
def testEnqueueWrongType(self):
- q = tf.PaddingFIFOQueue(10, (tf.int32, tf.float32), ((), ()))
+ q = data_flow_ops.PaddingFIFOQueue(10,
+ (dtypes_lib.int32, dtypes_lib.float32), (
+ (), ()))
with self.assertRaises(ValueError):
- q.enqueue((tf.placeholder(tf.int32), tf.placeholder(tf.int32)))
+ q.enqueue((array_ops.placeholder(dtypes_lib.int32),
+ array_ops.placeholder(dtypes_lib.int32)))
with self.assertRaises(ValueError):
- q.enqueue_many((tf.placeholder(tf.int32), tf.placeholder(tf.int32)))
+ q.enqueue_many((array_ops.placeholder(dtypes_lib.int32),
+ array_ops.placeholder(dtypes_lib.int32)))
def testEnqueueWrongPartiallyKnownShapeAtRuntime(self):
with self.test_session() as sess:
# First dimension of second component is unknown, second
# dimension must be 3.
- q = tf.PaddingFIFOQueue(10, (tf.int32, tf.int32), ((2, 2), (None, 3)))
+ q = data_flow_ops.PaddingFIFOQueue(10,
+ (dtypes_lib.int32, dtypes_lib.int32), (
+ (2, 2), (None, 3)))
elems_ok = np.array([1] * 4).reshape((2, 2)).astype(np.int32)
- elems_bad = tf.placeholder(tf.int32)
+ elems_bad = array_ops.placeholder(dtypes_lib.int32)
enqueue_op = q.enqueue((elems_ok, elems_bad))
- with self.assertRaisesRegexp(
- tf.errors.InvalidArgumentError, r"Expected \[\?,3\], got \[3,4\]"):
+ with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
+ r"Expected \[\?,3\], got \[3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 12).reshape((3, 4))})
@@ -599,22 +610,23 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
with self.test_session() as sess:
# First dimension of second component is unknown, second
# dimension must be 3.
- q = tf.PaddingFIFOQueue(10, (tf.int32, tf.int32), ((2, 2), (None, 3)))
+ q = data_flow_ops.PaddingFIFOQueue(10,
+ (dtypes_lib.int32, dtypes_lib.int32), (
+ (2, 2), (None, 3)))
elems_ok = np.array([1] * 8).reshape((2, 2, 2)).astype(np.int32)
- elems_bad = tf.placeholder(tf.int32)
+ elems_bad = array_ops.placeholder(dtypes_lib.int32)
enqueue_op = q.enqueue_many((elems_ok, elems_bad))
dequeued_t = q.dequeue_many(2)
- with self.assertRaisesRegexp(
- tf.errors.InvalidArgumentError,
- "Shape mismatch in tuple component 1. "
- r"Expected \[2,\?,3\], got \[2,3,4\]"):
+ with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
+ "Shape mismatch in tuple component 1. "
+ r"Expected \[2,\?,3\], got \[2,3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 24).reshape((2, 3, 4))})
dequeued_t.eval()
def testParallelEnqueueMany(self):
with self.test_session() as sess:
- q = tf.PaddingFIFOQueue(1000, tf.float32, shapes=((),))
+ q = data_flow_ops.PaddingFIFOQueue(1000, dtypes_lib.float32, shapes=((),))
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
@@ -622,6 +634,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
sess.run(enqueue_op)
+
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
@@ -632,7 +645,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testParallelDequeueMany(self):
with self.test_session() as sess:
- q = tf.PaddingFIFOQueue(1000, tf.float32, shapes=((),))
+ q = data_flow_ops.PaddingFIFOQueue(1000, dtypes_lib.float32, shapes=((),))
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
@@ -644,6 +657,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
+
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
@@ -653,7 +667,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testParallelDequeueUpTo(self):
with self.test_session() as sess:
- q = tf.PaddingFIFOQueue(1000, tf.float32, shapes=((),))
+ q = data_flow_ops.PaddingFIFOQueue(1000, dtypes_lib.float32, shapes=((),))
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
@@ -667,6 +681,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
+
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
@@ -676,7 +691,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testParallelEnqueueAndDequeue(self):
with self.test_session() as sess:
- q = tf.PaddingFIFOQueue(50, tf.float32, shapes=((),))
+ q = data_flow_ops.PaddingFIFOQueue(50, dtypes_lib.float32, shapes=((),))
initial_elements = [10.0] * 49
q.enqueue_many((initial_elements,)).run()
@@ -686,6 +701,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def enqueue():
for _ in xrange(100):
sess.run(enqueue_op)
+
def dequeue():
for _ in xrange(100):
self.assertTrue(sess.run(dequeued_t) in (10.0, 20.0))
@@ -708,11 +724,11 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testMixtureOfEnqueueAndEnqueueMany(self):
with self.test_session() as sess:
- q = tf.PaddingFIFOQueue(10, tf.int32, shapes=((),))
- enqueue_placeholder = tf.placeholder(tf.int32, shape=())
+ q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, shapes=((),))
+ enqueue_placeholder = array_ops.placeholder(dtypes_lib.int32, shape=())
enqueue_op = q.enqueue((enqueue_placeholder,))
- enqueuemany_placeholder = tf.placeholder(
- tf.int32, shape=(None,))
+ enqueuemany_placeholder = array_ops.placeholder(
+ dtypes_lib.int32, shape=(None,))
enqueuemany_op = q.enqueue_many((enqueuemany_placeholder,))
dequeued_t = q.dequeue()
@@ -721,6 +737,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def dequeue():
for i in xrange(250):
self.assertEqual(i, sess.run(dequeued_t))
+
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
@@ -732,9 +749,8 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
elements_enqueued += 1
else:
count = random.randint(0, min(20, 250 - elements_enqueued))
- range_to_enqueue = np.arange(elements_enqueued,
- elements_enqueued + count,
- dtype=np.int32)
+ range_to_enqueue = np.arange(
+ elements_enqueued, elements_enqueued + count, dtype=np.int32)
enqueuemany_op.run({enqueuemany_placeholder: range_to_enqueue})
elements_enqueued += count
@@ -744,14 +760,15 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testMixtureOfDequeueAndDequeueMany(self):
with self.test_session() as sess:
- q = tf.PaddingFIFOQueue(10, tf.int32, shapes=((),))
+ q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, shapes=((),))
enqueue_op = q.enqueue_many((np.arange(250, dtype=np.int32),))
dequeued_t = q.dequeue()
- count_placeholder = tf.placeholder(tf.int32, shape=())
+ count_placeholder = array_ops.placeholder(dtypes_lib.int32, shape=())
dequeuemany_t = q.dequeue_many(count_placeholder)
def enqueue():
sess.run(enqueue_op)
+
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
@@ -763,11 +780,12 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
elements_dequeued += 1
else:
count = random.randint(0, min(20, 250 - elements_dequeued))
- expected_range = np.arange(elements_dequeued,
- elements_dequeued + count,
- dtype=np.int32)
- self.assertAllEqual(
- expected_range, dequeuemany_t.eval({count_placeholder: count}))
+ expected_range = np.arange(
+ elements_dequeued, elements_dequeued + count, dtype=np.int32)
+ self.assertAllEqual(expected_range,
+ dequeuemany_t.eval({
+ count_placeholder: count
+ }))
elements_dequeued += count
q.close().run()
@@ -776,7 +794,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testBlockingDequeueMany(self):
with self.test_session() as sess:
- q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
+ q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
@@ -803,7 +821,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testBlockingDequeueUpTo(self):
with self.test_session() as sess:
- q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
+ q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
@@ -832,13 +850,13 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
with self.test_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
- count_q = tf.PaddingFIFOQueue(100, tf.int32, ((),))
+ count_q = data_flow_ops.PaddingFIFOQueue(100, dtypes_lib.int32, ((),))
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
- q = tf.PaddingFIFOQueue(total_count, tf.int32, ((),))
+ q = data_flow_ops.PaddingFIFOQueue(total_count, dtypes_lib.int32, ((),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
@@ -855,7 +873,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testDequeueFromClosedQueue(self):
with self.test_session():
- q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
+ q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
@@ -867,13 +885,13 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
self.assertEqual([elem], dequeued_t.eval())
# Expect the operation to fail due to the queue being closed.
- with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
+ with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
dequeued_t.eval()
def testBlockingDequeueFromClosedQueue(self):
with self.test_session() as sess:
- q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
+ q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
@@ -885,7 +903,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
for elem in elems:
self.assertEqual([elem], sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
- with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
+ with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
@@ -899,7 +917,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testDequeueUpToFromClosedQueueReturnsRemainder(self):
with self.test_session() as sess:
- q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
+ q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
@@ -921,13 +939,13 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.test_session() as sess:
- q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
+ q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue()
def dequeue():
# Expect the operation to fail due to the queue being closed.
- with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
+ with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
@@ -941,7 +959,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testBlockingDequeueManyFromClosedQueue(self):
with self.test_session() as sess:
- q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
+ q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
@@ -952,7 +970,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def dequeue():
self.assertAllEqual(elems, sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
- with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
+ with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
@@ -966,7 +984,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testBlockingDequeueManyButNotAllFromClosedQueue(self):
with self.test_session() as sess:
- q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
+ q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
@@ -977,7 +995,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def dequeue():
self.assertAllEqual(elems[:3], sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
- with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
+ with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
@@ -991,7 +1009,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testEnqueueManyLargerThanCapacityWithConcurrentDequeueMany(self):
with self.test_session() as sess:
- q = tf.PaddingFIFOQueue(4, tf.float32, ((),))
+ q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
@@ -1003,7 +1021,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def dequeue():
self.assertAllEqual(elems[0:3], sess.run(dequeued_t))
- with self.assertRaises(tf.errors.OutOfRangeError):
+ with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(dequeued_t)
self.assertEqual(elems[3], sess.run(cleanup_dequeue_t))
@@ -1028,7 +1046,8 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testClosedBlockingDequeueManyRestoresPartialBatch(self):
with self.test_session() as sess:
- q = tf.PaddingFIFOQueue(4, (tf.float32, tf.float32), ((), ()))
+ q = data_flow_ops.PaddingFIFOQueue(4, (dtypes_lib.float32,
+ dtypes_lib.float32), ((), ()))
elems_a = [1.0, 2.0, 3.0]
elems_b = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems_a, elems_b))
@@ -1039,7 +1058,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
enqueue_op.run()
def dequeue():
- with self.assertRaises(tf.errors.OutOfRangeError):
+ with self.assertRaises(errors_impl.OutOfRangeError):
sess.run([dequeued_a_t, dequeued_b_t])
dequeue_thread = self.checkedThread(target=dequeue)
@@ -1060,13 +1079,13 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.test_session() as sess:
- q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
+ q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
- with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
+ with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
@@ -1080,13 +1099,13 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testBlockingDequeueUpToFromClosedEmptyQueue(self):
with self.test_session() as sess:
- q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
+ q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_up_to(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
- with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
+ with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
@@ -1100,7 +1119,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testEnqueueToClosedQueue(self):
with self.test_session():
- q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
+ q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
@@ -1108,12 +1127,12 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
close_op.run()
# Expect the operation to fail due to the queue being closed.
- with self.assertRaisesRegexp(tf.errors.CancelledError, "is closed"):
+ with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.test_session():
- q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
+ q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
@@ -1122,12 +1141,12 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
close_op.run()
# Expect the operation to fail due to the queue being closed.
- with self.assertRaisesRegexp(tf.errors.CancelledError, "is closed"):
+ with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.test_session() as sess:
- q = tf.PaddingFIFOQueue(4, tf.float32, ((),))
+ q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
@@ -1137,6 +1156,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def blocking_enqueue():
sess.run(blocking_enqueue_op)
+
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
@@ -1149,7 +1169,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testBlockingEnqueueManyToFullQueue(self):
with self.test_session() as sess:
- q = tf.PaddingFIFOQueue(4, tf.float32, ((),))
+ q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
@@ -1159,6 +1179,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def blocking_enqueue():
sess.run(blocking_enqueue_op)
+
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
@@ -1172,7 +1193,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testBlockingEnqueueBeforeClose(self):
with self.test_session() as sess:
- q = tf.PaddingFIFOQueue(4, tf.float32, ((),))
+ q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
@@ -1184,6 +1205,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def blocking_enqueue():
# Expect the operation to succeed once the dequeue op runs.
sess.run(blocking_enqueue_op)
+
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
@@ -1193,6 +1215,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def close():
sess.run(close_op)
+
close_thread = self.checkedThread(target=close)
close_thread.start()
@@ -1207,7 +1230,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testBlockingEnqueueManyBeforeClose(self):
with self.test_session() as sess:
- q = tf.PaddingFIFOQueue(4, tf.float32, ((),))
+ q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
@@ -1217,6 +1240,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def blocking_enqueue():
sess.run(blocking_enqueue_op)
+
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
@@ -1226,6 +1250,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def close():
sess.run(close_op)
+
close_thread = self.checkedThread(target=close)
close_thread.start()
@@ -1238,7 +1263,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testDoesNotLoseValue(self):
with self.test_session():
- q = tf.PaddingFIFOQueue(1, tf.float32, ((),))
+ q = data_flow_ops.PaddingFIFOQueue(1, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
size_t = q.size()
@@ -1248,12 +1273,12 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testSharedQueueSameSession(self):
with self.test_session():
- q1 = tf.PaddingFIFOQueue(
- 1, tf.float32, ((),), shared_name="shared_queue")
+ q1 = data_flow_ops.PaddingFIFOQueue(
+ 1, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1.enqueue((10.0,)).run()
- q2 = tf.PaddingFIFOQueue(
- 1, tf.float32, ((),), shared_name="shared_queue")
+ q2 = data_flow_ops.PaddingFIFOQueue(
+ 1, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
@@ -1278,43 +1303,51 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testIncompatibleSharedQueueErrors(self):
with self.test_session():
- q_a_1 = tf.PaddingFIFOQueue(10, tf.float32, ((),), shared_name="q_a")
- q_a_2 = tf.PaddingFIFOQueue(15, tf.float32, ((),), shared_name="q_a")
+ q_a_1 = data_flow_ops.PaddingFIFOQueue(
+ 10, dtypes_lib.float32, ((),), shared_name="q_a")
+ q_a_2 = data_flow_ops.PaddingFIFOQueue(
+ 15, dtypes_lib.float32, ((),), shared_name="q_a")
q_a_1.queue_ref.eval()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.eval()
- q_b_1 = tf.PaddingFIFOQueue(10, tf.float32, ((),), shared_name="q_b")
- q_b_2 = tf.PaddingFIFOQueue(10, tf.int32, ((),), shared_name="q_b")
+ q_b_1 = data_flow_ops.PaddingFIFOQueue(
+ 10, dtypes_lib.float32, ((),), shared_name="q_b")
+ q_b_2 = data_flow_ops.PaddingFIFOQueue(
+ 10, dtypes_lib.int32, ((),), shared_name="q_b")
q_b_1.queue_ref.eval()
with self.assertRaisesOpError("component types"):
q_b_2.queue_ref.eval()
- q_c_1 = tf.PaddingFIFOQueue(10, tf.float32, ((),), shared_name="q_c")
- q_c_2 = tf.PaddingFIFOQueue(
- 10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_c")
+ q_c_1 = data_flow_ops.PaddingFIFOQueue(
+ 10, dtypes_lib.float32, ((),), shared_name="q_c")
+ q_c_2 = data_flow_ops.PaddingFIFOQueue(
+ 10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_c")
q_c_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_c_2.queue_ref.eval()
- q_d_1 = tf.PaddingFIFOQueue(
- 10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
- q_d_2 = tf.PaddingFIFOQueue(10, tf.float32, ((),), shared_name="q_d")
+ q_d_1 = data_flow_ops.PaddingFIFOQueue(
+ 10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
+ q_d_2 = data_flow_ops.PaddingFIFOQueue(
+ 10, dtypes_lib.float32, ((),), shared_name="q_d")
q_d_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.eval()
- q_e_1 = tf.PaddingFIFOQueue(
- 10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
- q_e_2 = tf.PaddingFIFOQueue(
- 10, tf.float32, shapes=[(1, 1, 2, 4)], shared_name="q_e")
+ q_e_1 = data_flow_ops.PaddingFIFOQueue(
+ 10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
+ q_e_2 = data_flow_ops.PaddingFIFOQueue(
+ 10, dtypes_lib.float32, shapes=[(1, 1, 2, 4)], shared_name="q_e")
q_e_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.eval()
- q_f_1 = tf.PaddingFIFOQueue(10, tf.float32, ((),), shared_name="q_f")
- q_f_2 = tf.PaddingFIFOQueue(
- 10, (tf.float32, tf.int32), ((), ()), shared_name="q_f")
+ q_f_1 = data_flow_ops.PaddingFIFOQueue(
+ 10, dtypes_lib.float32, ((),), shared_name="q_f")
+ q_f_2 = data_flow_ops.PaddingFIFOQueue(
+ 10, (dtypes_lib.float32, dtypes_lib.int32), ((), ()),
+ shared_name="q_f")
q_f_1.queue_ref.eval()
with self.assertRaisesOpError("component types"):
q_f_2.queue_ref.eval()
@@ -1324,19 +1357,20 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
num_queues = 10
qlist = list()
for _ in xrange(num_queues):
- qlist.append(tf.PaddingFIFOQueue(10, tf.float32, ((),)))
+ qlist.append(
+ data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),)))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
- q = tf.PaddingFIFOQueue.from_list(index, qlist)
+ q = data_flow_ops.PaddingFIFOQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.test_session():
- q1 = tf.PaddingFIFOQueue(10, tf.float32, ((),))
- q2 = tf.PaddingFIFOQueue(15, tf.float32, ((),))
- enq_q = tf.PaddingFIFOQueue.from_list(3, [q1, q2])
+ q1 = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
+ q2 = data_flow_ops.PaddingFIFOQueue(15, dtypes_lib.float32, ((),))
+ enq_q = data_flow_ops.PaddingFIFOQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("Index must be in the range"):
enq_q.dequeue().eval()
@@ -1358,22 +1392,25 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testResetOfBlockingOperation(self):
with self.test_session() as sess:
- q_empty = tf.PaddingFIFOQueue(5, tf.float32, ((),))
+ q_empty = data_flow_ops.PaddingFIFOQueue(5, dtypes_lib.float32, ((),))
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
- q_full = tf.PaddingFIFOQueue(5, tf.float32, ((),))
+ q_full = data_flow_ops.PaddingFIFOQueue(5, dtypes_lib.float32, ((),))
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
- self.checkedThread(self._blockingDequeue, args=(sess, dequeue_op)),
- self.checkedThread(self._blockingDequeueMany, args=(sess,
- dequeue_many_op)),
- self.checkedThread(self._blockingEnqueue, args=(sess, enqueue_op)),
- self.checkedThread(self._blockingEnqueueMany, args=(sess,
- enqueue_many_op))]
+ self.checkedThread(
+ self._blockingDequeue, args=(sess, dequeue_op)),
+ self.checkedThread(
+ self._blockingDequeueMany, args=(sess, dequeue_many_op)),
+ self.checkedThread(
+ self._blockingEnqueue, args=(sess, enqueue_op)),
+ self.checkedThread(
+ self._blockingEnqueueMany, args=(sess, enqueue_many_op))
+ ]
for t in threads:
t.start()
time.sleep(0.1)
@@ -1383,18 +1420,20 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testBigEnqueueMany(self):
with self.test_session() as sess:
- q = tf.PaddingFIFOQueue(5, tf.int32, ((),))
+ q = data_flow_ops.PaddingFIFOQueue(5, dtypes_lib.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
+
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
sess.run(enq)
enq_done.append(True)
+
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
@@ -1426,15 +1465,17 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testBigDequeueMany(self):
with self.test_session() as sess:
- q = tf.PaddingFIFOQueue(2, tf.int32, ((),))
+ q = data_flow_ops.PaddingFIFOQueue(2, dtypes_lib.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
+
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(sess.run(deq))
+
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
@@ -1450,18 +1491,21 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testDtypes(self):
with self.test_session() as sess:
- dtypes = [tf.float32, tf.float64, tf.int32, tf.uint8, tf.int16, tf.int8,
- tf.int64, tf.bool, tf.complex64, tf.complex128]
+ dtypes = [
+ dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
+ dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8, dtypes_lib.int64,
+ dtypes_lib.bool, dtypes_lib.complex64, dtypes_lib.complex128
+ ]
shape = (32, 4, 128)
- q = tf.PaddingFIFOQueue(32, dtypes, [shape[1:]] * len(dtypes))
+ q = data_flow_ops.PaddingFIFOQueue(32, dtypes, [shape[1:]] * len(dtypes))
input_tuple = []
for dtype in dtypes:
np_dtype = dtype.as_numpy_dtype
np_array = np.random.randint(-10, 10, shape)
- if dtype == tf.bool:
+ if dtype == dtypes_lib.bool:
np_array = np_array > 0
- elif dtype in (tf.complex64, tf.complex128):
+ elif dtype in (dtypes_lib.complex64, dtypes_lib.complex128):
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
@@ -1477,19 +1521,23 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
def testUnknownRank(self):
with self.assertRaisesRegexp(ValueError, "must have a defined rank"):
- tf.PaddingFIFOQueue(32, [tf.float32], [tf.TensorShape(None)])
+ data_flow_ops.PaddingFIFOQueue(32, [dtypes_lib.float32],
+ [tensor_shape.TensorShape(None)])
-class QueueFromListTest(tf.test.TestCase):
+class QueueFromListTest(test.TestCase):
def testQueueFromListShapes(self):
- which = tf.constant(1)
+ which = constant_op.constant(1)
+
def _cmp(expected, *shapes):
qs = [
- tf.PaddingFIFOQueue(10, [tf.float32], [tf.TensorShape(s)])
- for s in shapes]
- s_expected = tf.TensorShape(expected)
- s = tf.QueueBase.from_list(which, qs).shapes[0]
+ data_flow_ops.PaddingFIFOQueue(10, [dtypes_lib.float32],
+ [tensor_shape.TensorShape(s)])
+ for s in shapes
+ ]
+ s_expected = tensor_shape.TensorShape(expected)
+ s = data_flow_ops.QueueBase.from_list(which, qs).shapes[0]
if s_expected.ndims is None:
self.assertEqual(s_expected.ndims, s.ndims)
else:
@@ -1505,30 +1553,31 @@ class QueueFromListTest(tf.test.TestCase):
_cmp(None, [1, None], [1], [1])
def testQueueFromListShapesMultipleComponents(self):
- q_u_u = tf.PaddingFIFOQueue(
- 10,
- [tf.float32, tf.int32],
- [tf.TensorShape([None]), tf.TensorShape([None])])
- q_u_f = tf.PaddingFIFOQueue(
- 10, [tf.float32, tf.int32],
- [tf.TensorShape([None]), tf.TensorShape([1, 2])])
- q_f_f = tf.PaddingFIFOQueue(
- 10, [tf.float32, tf.int32],
- [tf.TensorShape([3, 4]), tf.TensorShape([1, 2])])
- which = tf.constant(1)
-
- s_cmp_1 = tf.QueueBase.from_list(which, [q_u_u, q_u_u, q_u_u]).shapes
+ q_u_u = data_flow_ops.PaddingFIFOQueue(
+ 10, [dtypes_lib.float32, dtypes_lib.int32],
+ [tensor_shape.TensorShape([None]), tensor_shape.TensorShape([None])])
+ q_u_f = data_flow_ops.PaddingFIFOQueue(
+ 10, [dtypes_lib.float32, dtypes_lib.int32],
+ [tensor_shape.TensorShape([None]), tensor_shape.TensorShape([1, 2])])
+ q_f_f = data_flow_ops.PaddingFIFOQueue(
+ 10, [dtypes_lib.float32, dtypes_lib.int32],
+ [tensor_shape.TensorShape([3, 4]), tensor_shape.TensorShape([1, 2])])
+ which = constant_op.constant(1)
+
+ s_cmp_1 = data_flow_ops.QueueBase.from_list(which,
+ [q_u_u, q_u_u, q_u_u]).shapes
self.assertEqual([1, 1], [x.ndims for x in s_cmp_1])
self.assertEqual([None, None], [x.as_list()[0] for x in s_cmp_1])
- s_cmp_2 = tf.QueueBase.from_list(which, [q_u_u, q_u_u, q_u_f]).shapes
+ s_cmp_2 = data_flow_ops.QueueBase.from_list(which,
+ [q_u_u, q_u_u, q_u_f]).shapes
self.assertEqual([1, None], [x.ndims for x in s_cmp_2])
self.assertEqual([None], s_cmp_2[0].as_list())
- s_cmp_3 = tf.QueueBase.from_list(which, [q_f_f, q_f_f]).shapes
+ s_cmp_3 = data_flow_ops.QueueBase.from_list(which, [q_f_f, q_f_f]).shapes
self.assertEqual([2, 2], [x.ndims for x in s_cmp_3])
self.assertEqual([[3, 4], [1, 2]], [x.as_list() for x in s_cmp_3])
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/parameterized_truncated_normal_op_test.py b/tensorflow/python/kernel_tests/parameterized_truncated_normal_op_test.py
index 1c09949598..a43f169df0 100644
--- a/tensorflow/python/kernel_tests/parameterized_truncated_normal_op_test.py
+++ b/tensorflow/python/kernel_tests/parameterized_truncated_normal_op_test.py
@@ -13,6 +13,7 @@
# limitations under the License.
# ==============================================================================
"""Tests for ParameterizedTruncatedNormalOp."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
@@ -23,9 +24,15 @@ import timeit
import numpy as np
from six.moves import range # pylint: disable=redefined-builtin
-import tensorflow as tf
+from tensorflow.core.protobuf import config_pb2
+from tensorflow.python.client import session
+from tensorflow.python.framework import ops
+from tensorflow.python.framework import random_seed
+from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import random_ops
+from tensorflow.python.platform import test
+from tensorflow.python.platform import tf_logging
class TruncatedNormalMoments(object):
@@ -96,7 +103,7 @@ def z_test(real, expected, i, num_samples):
return abs((real[i] - moment_mean) / math.sqrt(total_variance))
-class ParameterizedTruncatedNormalTest(tf.test.TestCase):
+class ParameterizedTruncatedNormalTest(test.TestCase):
_use_gpu = False
z_limit = 6.0
@@ -108,7 +115,7 @@ class ParameterizedTruncatedNormalTest(tf.test.TestCase):
# TruncatedNormalMoments requires scipy.stats.
# Give up early if we are unable to import it.
import scipy.stats # pylint: disable=g-import-not-at-top,unused-variable
- tf.set_random_seed(seed)
+ random_seed.set_random_seed(seed)
with self.test_session(use_gpu=self._use_gpu):
samples = random_ops.parameterized_truncated_normal(shape, mean, stddev,
minval,
@@ -121,7 +128,7 @@ class ParameterizedTruncatedNormalTest(tf.test.TestCase):
self.assertLess(
z_test(moments, expected_moments, i, num_samples), self.z_limit)
except ImportError as e:
- tf.logging.warn("Cannot test truncated normal op: %s" % str(e))
+ tf_logging.warn("Cannot test truncated normal op: %s" % str(e))
def validateKolmogorovSmirnov(self,
shape,
@@ -132,7 +139,7 @@ class ParameterizedTruncatedNormalTest(tf.test.TestCase):
seed=1618):
try:
import scipy.stats # pylint: disable=g-import-not-at-top
- tf.set_random_seed(seed)
+ random_seed.set_random_seed(seed)
with self.test_session(use_gpu=self._use_gpu):
samples = random_ops.parameterized_truncated_normal(shape, mean, stddev,
minval,
@@ -150,7 +157,7 @@ class ParameterizedTruncatedNormalTest(tf.test.TestCase):
pvalue = scipy.stats.kstest(samples, truncated_cdf)[1]
self.assertGreater(pvalue, 1e-10)
except ImportError as e:
- tf.logging.warn("Cannot test truncated normal op: %s" % str(e))
+ tf_logging.warn("Cannot test truncated normal op: %s" % str(e))
def testDefaults(self):
self.validateMoments([10**5], 0.0, 1.0, -2.0, 2.0)
@@ -186,14 +193,16 @@ def parameterized_vs_naive(shape, num_iters, use_gpu=False):
np.random.seed(1618) # Make it reproducible.
# No CSE/CF.
- optimizer_options = tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L0)
- config = tf.ConfigProto(
- graph_options=tf.GraphOptions(optimizer_options=optimizer_options))
+ optimizer_options = config_pb2.OptimizerOptions(
+ opt_level=config_pb2.OptimizerOptions.L0)
+ config = config_pb2.ConfigProto(graph_options=config_pb2.GraphOptions(
+ optimizer_options=optimizer_options))
- with tf.Session(config=config) as sess:
- with tf.device("/cpu:0" if not use_gpu else None):
- param_op = tf.group(random_ops.parameterized_truncated_normal(shape))
- naive_op = tf.group(random_ops.truncated_normal(shape))
+ with session.Session(config=config) as sess:
+ with ops.device("/cpu:0" if not use_gpu else None):
+ param_op = control_flow_ops.group(
+ random_ops.parameterized_truncated_normal(shape))
+ naive_op = control_flow_ops.group(random_ops.truncated_normal(shape))
# Burn-in to avoid session setup costs in the timing.
sess.run(param_op)
@@ -205,7 +214,7 @@ def parameterized_vs_naive(shape, num_iters, use_gpu=False):
return param_dt, naive_dt
-class TruncatedNormalBenchmark(tf.test.Benchmark):
+class TruncatedNormalBenchmark(test.Benchmark):
def benchmarkParameterizedOpVsNaiveOpCpu(self):
self._benchmarkParameterizedOpVsNaiveOp(False)
@@ -234,4 +243,4 @@ class TruncatedNormalBenchmark(tf.test.Benchmark):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/parsing_ops_test.py b/tensorflow/python/kernel_tests/parsing_ops_test.py
index 56bef40301..7ccd81898e 100644
--- a/tensorflow/python/kernel_tests/parsing_ops_test.py
+++ b/tensorflow/python/kernel_tests/parsing_ops_test.py
@@ -21,22 +21,33 @@ from __future__ import print_function
import itertools
import numpy as np
-import tensorflow as tf
from google.protobuf import json_format
+
+from tensorflow.core.example import example_pb2
+from tensorflow.core.example import feature_pb2
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import errors_impl
+from tensorflow.python.framework import ops
+from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import parsing_ops
+from tensorflow.python.platform import test
+from tensorflow.python.platform import tf_logging
# Helpers for creating Example objects
-example = tf.train.Example
-feature = tf.train.Feature
-features = lambda d: tf.train.Features(feature=d)
-bytes_feature = lambda v: feature(bytes_list=tf.train.BytesList(value=v))
-int64_feature = lambda v: feature(int64_list=tf.train.Int64List(value=v))
-float_feature = lambda v: feature(float_list=tf.train.FloatList(value=v))
+example = example_pb2.Example
+feature = feature_pb2.Feature
+features = lambda d: feature_pb2.Features(feature=d)
+bytes_feature = lambda v: feature(bytes_list=feature_pb2.BytesList(value=v))
+int64_feature = lambda v: feature(int64_list=feature_pb2.Int64List(value=v))
+float_feature = lambda v: feature(float_list=feature_pb2.FloatList(value=v))
# Helpers for creating SequenceExample objects
-feature_list = lambda l: tf.train.FeatureList(feature=l)
-feature_lists = lambda d: tf.train.FeatureLists(feature_list=d)
-sequence_example = tf.train.SequenceExample
+feature_list = lambda l: feature_pb2.FeatureList(feature=l)
+feature_lists = lambda d: feature_pb2.FeatureLists(feature_list=d)
+sequence_example = example_pb2.SequenceExample
def flatten(list_of_lists):
@@ -47,9 +58,8 @@ def flatten(list_of_lists):
def flatten_values_tensors_or_sparse(tensors_list):
"""Flatten each SparseTensor object into 3 Tensors for session.run()."""
return list(
- flatten([[v.indices, v.values, v.dense_shape]
- if isinstance(v, tf.SparseTensor)
- else [v] for v in tensors_list]))
+ flatten([[v.indices, v.values, v.dense_shape] if isinstance(
+ v, sparse_tensor.SparseTensor) else [v] for v in tensors_list]))
def _compare_output_to_expected(tester, dict_tensors, expected_tensors,
@@ -59,8 +69,8 @@ def _compare_output_to_expected(tester, dict_tensors, expected_tensors,
i = 0 # Index into the flattened output of session.run()
for k, v in dict_tensors.items():
expected_v = expected_tensors[k]
- tf.logging.info("Comparing key: %s", k)
- if isinstance(v, tf.SparseTensor):
+ tf_logging.info("Comparing key: %s", k)
+ if isinstance(v, sparse_tensor.SparseTensor):
# Three outputs for SparseTensor : indices, values, shape.
tester.assertEqual([k, len(expected_v)], [k, 3])
tester.assertAllEqual(expected_v[0], flat_output[i])
@@ -73,18 +83,18 @@ def _compare_output_to_expected(tester, dict_tensors, expected_tensors,
i += 1
-class ParseExampleTest(tf.test.TestCase):
+class ParseExampleTest(test.TestCase):
def _test(self, kwargs, expected_values=None, expected_err=None):
with self.test_session() as sess:
if expected_err:
- with self.assertRaisesWithPredicateMatch(
- expected_err[0], expected_err[1]):
- out = tf.parse_example(**kwargs)
+ with self.assertRaisesWithPredicateMatch(expected_err[0],
+ expected_err[1]):
+ out = parsing_ops.parse_example(**kwargs)
sess.run(flatten_values_tensors_or_sparse(out.values()))
else:
# Returns dict w/ Tensors and SparseTensors.
- out = tf.parse_example(**kwargs)
+ out = parsing_ops.parse_example(**kwargs)
result = flatten_values_tensors_or_sparse(out.values())
# Check values.
tf_result = sess.run(result)
@@ -93,13 +103,13 @@ class ParseExampleTest(tf.test.TestCase):
# Check shapes; if serialized is a Tensor we need its size to
# properly check.
serialized = kwargs["serialized"]
- batch_size = (serialized.eval().size if isinstance(serialized, tf.Tensor)
+ batch_size = (serialized.eval().size if isinstance(serialized, ops.Tensor)
else np.asarray(serialized).size)
for k, f in kwargs["features"].items():
- if isinstance(f, tf.FixedLenFeature) and f.shape is not None:
+ if isinstance(f, parsing_ops.FixedLenFeature) and f.shape is not None:
self.assertEqual(
tuple(out[k].get_shape().as_list()), (batch_size,) + f.shape)
- elif isinstance(f, tf.VarLenFeature):
+ elif isinstance(f, parsing_ops.VarLenFeature):
self.assertEqual(
tuple(out[k].indices.get_shape().as_list()), (None, 2))
self.assertEqual(tuple(out[k].values.get_shape().as_list()), (None,))
@@ -132,39 +142,47 @@ class ParseExampleTest(tf.test.TestCase):
self._test(
{
- "example_names": np.empty(
- (0,), dtype=bytes),
- "serialized": tf.convert_to_tensor(["", ""]),
+ "example_names":
+ np.empty(
+ (0,), dtype=bytes),
+ "serialized":
+ ops.convert_to_tensor(["", ""]),
"features": {
- sparse_name: tf.VarLenFeature(tf.int64),
- a_name: tf.FixedLenFeature(
- (1, 3), tf.int64, default_value=a_default),
- b_name: tf.FixedLenFeature(
- (3, 3), tf.string, default_value=b_default),
- c_name: tf.FixedLenFeature(
- (2,), tf.float32, default_value=c_default),
+ sparse_name:
+ parsing_ops.VarLenFeature(dtypes.int64),
+ a_name:
+ parsing_ops.FixedLenFeature(
+ (1, 3), dtypes.int64, default_value=a_default),
+ b_name:
+ parsing_ops.FixedLenFeature(
+ (3, 3), dtypes.string, default_value=b_default),
+ c_name:
+ parsing_ops.FixedLenFeature(
+ (2,), dtypes.float32, default_value=c_default),
}
},
expected_output)
def testEmptySerializedWithoutDefaultsShouldFail(self):
input_features = {
- "st_a": tf.VarLenFeature(tf.int64),
- "a": tf.FixedLenFeature(
- (1, 3), tf.int64, default_value=[0, 42, 0]),
- "b": tf.FixedLenFeature(
- (3, 3),
- tf.string,
- default_value=np.random.rand(3, 3).astype(bytes)),
+ "st_a":
+ parsing_ops.VarLenFeature(dtypes.int64),
+ "a":
+ parsing_ops.FixedLenFeature(
+ (1, 3), dtypes.int64, default_value=[0, 42, 0]),
+ "b":
+ parsing_ops.FixedLenFeature(
+ (3, 3),
+ dtypes.string,
+ default_value=np.random.rand(3, 3).astype(bytes)),
# Feature "c" is missing a default, this gap will cause failure.
- "c": tf.FixedLenFeature(
- (2,), dtype=tf.float32),
+ "c":
+ parsing_ops.FixedLenFeature(
+ (2,), dtype=dtypes.float32),
}
# Edge case where the key is there but the feature value is empty
- original = example(features=features({
- "c": feature()
- }))
+ original = example(features=features({"c": feature()}))
self._test(
{
"example_names": ["in1"],
@@ -172,7 +190,7 @@ class ParseExampleTest(tf.test.TestCase):
"features": input_features,
},
expected_err=(
- tf.OpError,
+ errors_impl.OpError,
"Name: in1, Feature: c \\(data type: float\\) is required"))
# Standard case of missing key and value.
@@ -183,7 +201,7 @@ class ParseExampleTest(tf.test.TestCase):
"features": input_features,
},
expected_err=(
- tf.OpError,
+ errors_impl.OpError,
"Name: in1, Feature: c \\(data type: float\\) is required"))
def testDenseNotMatchingShapeShouldFail(self):
@@ -201,10 +219,12 @@ class ParseExampleTest(tf.test.TestCase):
self._test(
{
"example_names": names,
- "serialized": tf.convert_to_tensor(serialized),
- "features": {"a": tf.FixedLenFeature((1, 3), tf.float32)}
+ "serialized": ops.convert_to_tensor(serialized),
+ "features": {
+ "a": parsing_ops.FixedLenFeature((1, 3), dtypes.float32)
+ }
},
- expected_err=(tf.OpError,
+ expected_err=(errors_impl.OpError,
"Name: failing, Key: a, Index: 1. Number of float val"))
def testDenseDefaultNoShapeShouldFail(self):
@@ -215,8 +235,10 @@ class ParseExampleTest(tf.test.TestCase):
self._test(
{
"example_names": ["failing"],
- "serialized": tf.convert_to_tensor(serialized),
- "features": {"a": tf.FixedLenFeature(None, tf.float32)}
+ "serialized": ops.convert_to_tensor(serialized),
+ "features": {
+ "a": parsing_ops.FixedLenFeature(None, dtypes.float32)
+ }
},
expected_err=(ValueError, "Missing shape for feature a"))
@@ -257,10 +279,10 @@ class ParseExampleTest(tf.test.TestCase):
}
self._test({
- "serialized": tf.convert_to_tensor(serialized),
+ "serialized": ops.convert_to_tensor(serialized),
"features": {
- "st_c": tf.VarLenFeature(tf.float32),
- "st_d": tf.VarLenFeature(tf.string)
+ "st_c": parsing_ops.VarLenFeature(dtypes.float32),
+ "st_d": parsing_ops.VarLenFeature(dtypes.string)
}
}, expected_output)
@@ -276,29 +298,30 @@ class ParseExampleTest(tf.test.TestCase):
})),
example(features=features({
"val": feature(), # feature with nothing in it
- # missing idx feature
+ # missing idx feature
})),
example(features=features({
"val": float_feature([1, 2, -1]),
- "idx": int64_feature([0, 9, 3]) # unsorted
+ "idx":
+ int64_feature([0, 9, 3]) # unsorted
}))
]
serialized = [m.SerializeToString() for m in original]
expected_sp = ( # indices, values, shape
- np.array([[0, 5], [0, 10], [3, 0], [3, 3], [3, 9]], dtype=np.int64),
- np.array([3.0, 4.0, 1.0, -1.0, 2.0], dtype=np.float32),
- np.array([4, 13], dtype=np.int64)) # batch == 4, max_elems = 13
+ np.array(
+ [[0, 5], [0, 10], [3, 0], [3, 3], [3, 9]], dtype=np.int64),
+ np.array(
+ [3.0, 4.0, 1.0, -1.0, 2.0], dtype=np.float32), np.array(
+ [4, 13], dtype=np.int64)) # batch == 4, max_elems = 13
- expected_output = {
- "sp": expected_sp,
- }
+ expected_output = {"sp": expected_sp,}
self._test({
- "serialized": tf.convert_to_tensor(serialized),
+ "serialized": ops.convert_to_tensor(serialized),
"features": {
- "sp": tf.SparseFeature("idx", "val", tf.float32, 13)
+ "sp": parsing_ops.SparseFeature("idx", "val", dtypes.float32, 13)
}
}, expected_output)
@@ -318,14 +341,16 @@ class ParseExampleTest(tf.test.TestCase):
serialized = [m.SerializeToString() for m in original]
expected_sp1 = ( # indices, values, shape
- np.array([[0, 5], [0, 10]], dtype=np.int64),
- np.array([3.0, 4.0], dtype=np.float32),
- np.array([2, 13], dtype=np.int64)) # batch == 2, max_elems = 13
+ np.array(
+ [[0, 5], [0, 10]], dtype=np.int64), np.array(
+ [3.0, 4.0], dtype=np.float32), np.array(
+ [2, 13], dtype=np.int64)) # batch == 2, max_elems = 13
expected_sp2 = ( # indices, values, shape
- np.array([[0, 5], [0, 10]], dtype=np.int64),
- np.array([5.0, 6.0], dtype=np.float32),
- np.array([2, 7], dtype=np.int64)) # batch == 2, max_elems = 13
+ np.array(
+ [[0, 5], [0, 10]], dtype=np.int64), np.array(
+ [5.0, 6.0], dtype=np.float32), np.array(
+ [2, 7], dtype=np.int64)) # batch == 2, max_elems = 13
expected_output = {
"sp1": expected_sp1,
@@ -333,10 +358,12 @@ class ParseExampleTest(tf.test.TestCase):
}
self._test({
- "serialized": tf.convert_to_tensor(serialized),
+ "serialized": ops.convert_to_tensor(serialized),
"features": {
- "sp1": tf.SparseFeature("idx", "val1", tf.float32, 13),
- "sp2": tf.SparseFeature("idx", "val2", tf.float32, 7)
+ "sp1":
+ parsing_ops.SparseFeature("idx", "val1", dtypes.float32, 13),
+ "sp2":
+ parsing_ops.SparseFeature("idx", "val2", dtypes.float32, 7)
}
}, expected_output)
@@ -356,21 +383,26 @@ class ParseExampleTest(tf.test.TestCase):
serialized = [m.SerializeToString() for m in original]
expected_output = {
- aname: np.array(
- [[1, 1], [-1, -1]], dtype=np.float32).reshape(2, 1, 2, 1),
- bname: np.array(
- ["b0_str", "b1"], dtype=bytes).reshape(2, 1, 1, 1, 1),
+ aname:
+ np.array(
+ [[1, 1], [-1, -1]], dtype=np.float32).reshape(2, 1, 2, 1),
+ bname:
+ np.array(
+ ["b0_str", "b1"], dtype=bytes).reshape(2, 1, 1, 1, 1),
}
# No defaults, values required
self._test(
{
- "serialized": tf.convert_to_tensor(serialized),
+ "serialized":
+ ops.convert_to_tensor(serialized),
"features": {
- aname: tf.FixedLenFeature(
- (1, 2, 1), dtype=tf.float32),
- bname: tf.FixedLenFeature(
- (1, 1, 1, 1), dtype=tf.string),
+ aname:
+ parsing_ops.FixedLenFeature(
+ (1, 2, 1), dtype=dtypes.float32),
+ bname:
+ parsing_ops.FixedLenFeature(
+ (1, 1, 1, 1), dtype=dtypes.string),
}
},
expected_output)
@@ -382,15 +414,12 @@ class ParseExampleTest(tf.test.TestCase):
bname = "b*has+a:tricky_name"
# TODO(lew): Feature appearing twice should be an error in future.
original = [
- (
- example(features=features({
- aname: float_feature([10, 10]),
- })),
- example(features=features({
- aname: float_feature([1, 1]),
- bname: bytes_feature([b"b0_str"]),
- }))
- ),
+ (example(features=features({
+ aname: float_feature([10, 10]),
+ })), example(features=features({
+ aname: float_feature([1, 1]),
+ bname: bytes_feature([b"b0_str"]),
+ }))),
(
example(features=features({
bname: bytes_feature([b"b100"]),
@@ -398,29 +427,34 @@ class ParseExampleTest(tf.test.TestCase):
example(features=features({
aname: float_feature([-1, -1]),
bname: bytes_feature([b"b1"]),
- })),
- ),
+ })),),
]
serialized = [
- m.SerializeToString() + n.SerializeToString() for (m, n) in original]
+ m.SerializeToString() + n.SerializeToString() for (m, n) in original
+ ]
expected_output = {
- aname: np.array(
- [[1, 1], [-1, -1]], dtype=np.float32).reshape(2, 1, 2, 1),
- bname: np.array(
- ["b0_str", "b1"], dtype=bytes).reshape(2, 1, 1, 1, 1),
+ aname:
+ np.array(
+ [[1, 1], [-1, -1]], dtype=np.float32).reshape(2, 1, 2, 1),
+ bname:
+ np.array(
+ ["b0_str", "b1"], dtype=bytes).reshape(2, 1, 1, 1, 1),
}
# No defaults, values required
self._test(
{
- "serialized": tf.convert_to_tensor(serialized),
+ "serialized":
+ ops.convert_to_tensor(serialized),
"features": {
- aname: tf.FixedLenFeature(
- (1, 2, 1), dtype=tf.float32),
- bname: tf.FixedLenFeature(
- (1, 1, 1, 1), dtype=tf.string),
+ aname:
+ parsing_ops.FixedLenFeature(
+ (1, 2, 1), dtype=dtypes.float32),
+ bname:
+ parsing_ops.FixedLenFeature(
+ (1, 1, 1, 1), dtype=dtypes.string),
}
},
expected_output)
@@ -435,15 +469,19 @@ class ParseExampleTest(tf.test.TestCase):
serialized = [m.SerializeToString() for m in original]
expected_output = {
- "a": np.array([[1], [-1]], dtype=np.float32) # 2x1 (column vector)
+ "a":
+ np.array(
+ [[1], [-1]], dtype=np.float32) # 2x1 (column vector)
}
self._test(
{
- "serialized": tf.convert_to_tensor(serialized),
+ "serialized":
+ ops.convert_to_tensor(serialized),
"features": {
- "a": tf.FixedLenFeature(
- (1,), dtype=tf.float32, default_value=-1),
+ "a":
+ parsing_ops.FixedLenFeature(
+ (1,), dtype=dtypes.float32, default_value=-1),
}
},
expected_output)
@@ -464,20 +502,31 @@ class ParseExampleTest(tf.test.TestCase):
serialized = [m.SerializeToString() for m in original]
expected_output = {
- "a": np.array(
- [[1, 1], [3, -3], [3, -3]], dtype=np.float32).reshape(3, 1, 2, 1),
- "b": np.array(
- ["tmp_str", "b1", "tmp_str"], dtype=bytes).reshape(3, 1, 1, 1, 1),
+ "a":
+ np.array(
+ [[1, 1], [3, -3], [3, -3]], dtype=np.float32).reshape(3, 1, 2,
+ 1),
+ "b":
+ np.array(
+ ["tmp_str", "b1", "tmp_str"], dtype=bytes).reshape(3, 1, 1, 1,
+ 1),
}
self._test(
{
- "serialized": tf.convert_to_tensor(serialized),
+ "serialized":
+ ops.convert_to_tensor(serialized),
"features": {
- "a": tf.FixedLenFeature(
- (1, 2, 1), dtype=tf.float32, default_value=[3.0, -3.0]),
- "b": tf.FixedLenFeature(
- (1, 1, 1, 1), dtype=tf.string, default_value="tmp_str"),
+ "a":
+ parsing_ops.FixedLenFeature(
+ (1, 2, 1),
+ dtype=dtypes.float32,
+ default_value=[3.0, -3.0]),
+ "b":
+ parsing_ops.FixedLenFeature(
+ (1, 1, 1, 1),
+ dtype=dtypes.string,
+ default_value="tmp_str"),
}
},
expected_output)
@@ -491,9 +540,10 @@ class ParseExampleTest(tf.test.TestCase):
np.array(
[2, 0], dtype=np.int64)) # batch == 2, max_elems = 0
expected_sp = ( # indices, values, shape
- np.array([[0, 0], [0, 3], [1, 7]], dtype=np.int64),
- np.array(["a", "b", "c"], dtype="|S"),
- np.array([2, 13], dtype=np.int64)) # batch == 4, max_elems = 13
+ np.array(
+ [[0, 0], [0, 3], [1, 7]], dtype=np.int64), np.array(
+ ["a", "b", "c"], dtype="|S"), np.array(
+ [2, 13], dtype=np.int64)) # batch == 4, max_elems = 13
original = [
example(features=features({
@@ -523,31 +573,40 @@ class ParseExampleTest(tf.test.TestCase):
self._test(
{
- "example_names": names,
- "serialized": tf.convert_to_tensor(serialized),
+ "example_names":
+ names,
+ "serialized":
+ ops.convert_to_tensor(serialized),
"features": {
- "st_a": tf.VarLenFeature(tf.int64),
- "sp": tf.SparseFeature("idx", "val", tf.string, 13),
- "a": tf.FixedLenFeature(
- (1, 3), tf.int64, default_value=a_default),
- "b": tf.FixedLenFeature(
- (3, 3), tf.string, default_value=b_default),
+ "st_a":
+ parsing_ops.VarLenFeature(dtypes.int64),
+ "sp":
+ parsing_ops.SparseFeature("idx", "val", dtypes.string, 13),
+ "a":
+ parsing_ops.FixedLenFeature(
+ (1, 3), dtypes.int64, default_value=a_default),
+ "b":
+ parsing_ops.FixedLenFeature(
+ (3, 3), dtypes.string, default_value=b_default),
# Feature "c" must be provided, since it has no default_value.
- "c": tf.FixedLenFeature((2,), tf.float32),
+ "c":
+ parsing_ops.FixedLenFeature((2,), dtypes.float32),
}
},
expected_output)
def testSerializedContainingSparseAndSparseFeatureWithReuse(self):
expected_idx = ( # indices, values, shape
- np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.int64),
- np.array([0, 3, 7, 1]),
- np.array([2, 2], dtype=np.int64)) # batch == 4, max_elems = 2
+ np.array(
+ [[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.int64),
+ np.array([0, 3, 7, 1]), np.array(
+ [2, 2], dtype=np.int64)) # batch == 4, max_elems = 2
expected_sp = ( # indices, values, shape
- np.array([[0, 0], [0, 3], [1, 1], [1, 7]], dtype=np.int64),
- np.array(["a", "b", "d", "c"], dtype="|S"),
- np.array([2, 13], dtype=np.int64)) # batch == 4, max_elems = 13
+ np.array(
+ [[0, 0], [0, 3], [1, 1], [1, 7]], dtype=np.int64), np.array(
+ ["a", "b", "d", "c"], dtype="|S"), np.array(
+ [2, 13], dtype=np.int64)) # batch == 4, max_elems = 13
original = [
example(features=features({
@@ -567,39 +626,37 @@ class ParseExampleTest(tf.test.TestCase):
"sp": expected_sp,
}
- self._test(
- {
- "example_names": names,
- "serialized": tf.convert_to_tensor(serialized),
- "features": {
- "idx": tf.VarLenFeature(tf.int64),
- "sp": tf.SparseFeature("idx", "val", tf.string, 13),
- }
- },
- expected_output)
+ self._test({
+ "example_names": names,
+ "serialized": ops.convert_to_tensor(serialized),
+ "features": {
+ "idx": parsing_ops.VarLenFeature(dtypes.int64),
+ "sp": parsing_ops.SparseFeature("idx", "val", dtypes.string, 13),
+ }
+ }, expected_output)
-class ParseSingleExampleTest(tf.test.TestCase):
+class ParseSingleExampleTest(test.TestCase):
def _test(self, kwargs, expected_values=None, expected_err=None):
with self.test_session() as sess:
if expected_err:
- with self.assertRaisesWithPredicateMatch(
- expected_err[0], expected_err[1]):
- out = tf.parse_single_example(**kwargs)
+ with self.assertRaisesWithPredicateMatch(expected_err[0],
+ expected_err[1]):
+ out = parsing_ops.parse_single_example(**kwargs)
sess.run(flatten_values_tensors_or_sparse(out.values()))
else:
# Returns dict w/ Tensors and SparseTensors.
- out = tf.parse_single_example(**kwargs)
+ out = parsing_ops.parse_single_example(**kwargs)
# Check values.
tf_result = sess.run(flatten_values_tensors_or_sparse(out.values()))
_compare_output_to_expected(self, out, expected_values, tf_result)
# Check shapes.
for k, f in kwargs["features"].items():
- if isinstance(f, tf.FixedLenFeature) and f.shape is not None:
+ if isinstance(f, parsing_ops.FixedLenFeature) and f.shape is not None:
self.assertEqual(tuple(out[k].get_shape()), f.shape)
- elif isinstance(f, tf.VarLenFeature):
+ elif isinstance(f, parsing_ops.VarLenFeature):
self.assertEqual(
tuple(out[k].indices.get_shape().as_list()), (None, 1))
self.assertEqual(tuple(out[k].values.get_shape().as_list()), (None,))
@@ -607,24 +664,28 @@ class ParseSingleExampleTest(tf.test.TestCase):
tuple(out[k].dense_shape.get_shape().as_list()), (1,))
def testSingleExampleWithSparseAndSparseFeatureAndDense(self):
- original = example(features=features({"c": float_feature([3, 4]),
- "val": bytes_feature([b"a", b"b"]),
- "idx": int64_feature([0, 3]),
- "st_a": float_feature([3.0, 4.0])}))
+ original = example(features=features({
+ "c": float_feature([3, 4]),
+ "val": bytes_feature([b"a", b"b"]),
+ "idx": int64_feature([0, 3]),
+ "st_a": float_feature([3.0, 4.0])
+ }))
serialized = original.SerializeToString()
- expected_st_a = (np.array(
- [[0], [1]], dtype=np.int64), # indices
- np.array(
- [3.0, 4.0], dtype=np.float32), # values
- np.array(
- [2], dtype=np.int64)) # shape: max_values = 2
+ expected_st_a = (
+ np.array(
+ [[0], [1]], dtype=np.int64), # indices
+ np.array(
+ [3.0, 4.0], dtype=np.float32), # values
+ np.array(
+ [2], dtype=np.int64)) # shape: max_values = 2
expected_sp = ( # indices, values, shape
- np.array([[0], [3]], dtype=np.int64),
- np.array(["a", "b"], dtype="|S"),
- np.array([13], dtype=np.int64)) # max_values = 13
+ np.array(
+ [[0], [3]], dtype=np.int64), np.array(
+ ["a", "b"], dtype="|S"), np.array(
+ [13], dtype=np.int64)) # max_values = 13
a_default = [1, 2, 3]
b_default = np.random.rand(3, 3).astype(bytes)
@@ -639,23 +700,30 @@ class ParseSingleExampleTest(tf.test.TestCase):
self._test(
{
- "example_names": tf.convert_to_tensor("in1"),
- "serialized": tf.convert_to_tensor(serialized),
+ "example_names":
+ ops.convert_to_tensor("in1"),
+ "serialized":
+ ops.convert_to_tensor(serialized),
"features": {
- "st_a": tf.VarLenFeature(tf.float32),
- "sp": tf.SparseFeature("idx", "val", tf.string, 13),
- "a": tf.FixedLenFeature(
- (1, 3), tf.int64, default_value=a_default),
- "b": tf.FixedLenFeature(
- (3, 3), tf.string, default_value=b_default),
+ "st_a":
+ parsing_ops.VarLenFeature(dtypes.float32),
+ "sp":
+ parsing_ops.SparseFeature("idx", "val", dtypes.string, 13),
+ "a":
+ parsing_ops.FixedLenFeature(
+ (1, 3), dtypes.int64, default_value=a_default),
+ "b":
+ parsing_ops.FixedLenFeature(
+ (3, 3), dtypes.string, default_value=b_default),
# Feature "c" must be provided, since it has no default_value.
- "c": tf.FixedLenFeature((2,), tf.float32),
+ "c":
+ parsing_ops.FixedLenFeature((2,), dtypes.float32),
}
},
expected_output)
-class ParseSequenceExampleTest(tf.test.TestCase):
+class ParseSequenceExampleTest(test.TestCase):
def testCreateSequenceExample(self):
value = sequence_example(
@@ -663,14 +731,17 @@ class ParseSequenceExampleTest(tf.test.TestCase):
"global_feature": float_feature([1, 2, 3]),
}),
feature_lists=feature_lists({
- "repeated_feature_2_frames": feature_list([
- bytes_feature([b"a", b"b", b"c"]),
- bytes_feature([b"a", b"d", b"e"])
- ]),
- "repeated_feature_3_frames": feature_list([
- int64_feature([3, 4, 5, 6, 7]), int64_feature([-1, 0, 0, 0, 0]),
- int64_feature([1, 2, 3, 4, 5])
- ])
+ "repeated_feature_2_frames":
+ feature_list([
+ bytes_feature([b"a", b"b", b"c"]),
+ bytes_feature([b"a", b"d", b"e"])
+ ]),
+ "repeated_feature_3_frames":
+ feature_list([
+ int64_feature([3, 4, 5, 6, 7]),
+ int64_feature([-1, 0, 0, 0, 0]),
+ int64_feature([1, 2, 3, 4, 5])
+ ])
}))
value.SerializeToString() # Smoke test
@@ -684,16 +755,17 @@ class ParseSequenceExampleTest(tf.test.TestCase):
with self.test_session() as sess:
if expected_err:
- with self.assertRaisesWithPredicateMatch(
- expected_err[0], expected_err[1]):
- c_out, fl_out = tf.parse_single_sequence_example(**kwargs)
+ with self.assertRaisesWithPredicateMatch(expected_err[0],
+ expected_err[1]):
+ c_out, fl_out = parsing_ops.parse_single_sequence_example(**kwargs)
if c_out:
sess.run(flatten_values_tensors_or_sparse(c_out.values()))
if fl_out:
sess.run(flatten_values_tensors_or_sparse(fl_out.values()))
else:
# Returns dicts w/ Tensors and SparseTensors.
- context_out, feat_list_out = tf.parse_single_sequence_example(**kwargs)
+ context_out, feat_list_out = parsing_ops.parse_single_sequence_example(
+ **kwargs)
context_result = sess.run(
flatten_values_tensors_or_sparse(context_out.values(
))) if context_out else []
@@ -710,10 +782,10 @@ class ParseSequenceExampleTest(tf.test.TestCase):
# properly check.
if "context_features" in kwargs:
for k, f in kwargs["context_features"].items():
- if isinstance(f, tf.FixedLenFeature) and f.shape is not None:
+ if isinstance(f, parsing_ops.FixedLenFeature) and f.shape is not None:
self.assertEqual(
tuple(context_out[k].get_shape().as_list()), f.shape)
- elif isinstance(f, tf.VarLenFeature):
+ elif isinstance(f, parsing_ops.VarLenFeature):
self.assertEqual(
tuple(context_out[k].indices.get_shape().as_list()), (None, 1))
self.assertEqual(
@@ -722,18 +794,20 @@ class ParseSequenceExampleTest(tf.test.TestCase):
tuple(context_out[k].dense_shape.get_shape().as_list()), (1,))
def testSequenceExampleWithSparseAndDenseContext(self):
- original = sequence_example(context=features({"c": float_feature([3, 4]),
- "st_a": float_feature(
- [3.0, 4.0])}))
+ original = sequence_example(context=features({
+ "c": float_feature([3, 4]),
+ "st_a": float_feature([3.0, 4.0])
+ }))
serialized = original.SerializeToString()
- expected_st_a = (np.array(
- [[0], [1]], dtype=np.int64), # indices
- np.array(
- [3.0, 4.0], dtype=np.float32), # values
- np.array(
- [2], dtype=np.int64)) # shape: num_features = 2
+ expected_st_a = (
+ np.array(
+ [[0], [1]], dtype=np.int64), # indices
+ np.array(
+ [3.0, 4.0], dtype=np.float32), # values
+ np.array(
+ [2], dtype=np.int64)) # shape: num_features = 2
a_default = [1, 2, 3]
b_default = np.random.rand(3, 3).astype(bytes)
@@ -747,34 +821,39 @@ class ParseSequenceExampleTest(tf.test.TestCase):
self._test(
{
- "example_name": "in1",
- "serialized": tf.convert_to_tensor(serialized),
+ "example_name":
+ "in1",
+ "serialized":
+ ops.convert_to_tensor(serialized),
"context_features": {
- "st_a": tf.VarLenFeature(tf.float32),
- "a": tf.FixedLenFeature(
- (1, 3), tf.int64, default_value=a_default),
- "b": tf.FixedLenFeature(
- (3, 3), tf.string, default_value=b_default),
+ "st_a":
+ parsing_ops.VarLenFeature(dtypes.float32),
+ "a":
+ parsing_ops.FixedLenFeature(
+ (1, 3), dtypes.int64, default_value=a_default),
+ "b":
+ parsing_ops.FixedLenFeature(
+ (3, 3), dtypes.string, default_value=b_default),
# Feature "c" must be provided, since it has no default_value.
- "c": tf.FixedLenFeature((2,), tf.float32),
+ "c":
+ parsing_ops.FixedLenFeature((2,), dtypes.float32),
}
},
expected_context_values=expected_context_output)
def testSequenceExampleWithMultipleSizeFeatureLists(self):
original = sequence_example(feature_lists=feature_lists({
- "a": feature_list([
- int64_feature([-1, 0, 1]),
- int64_feature([2, 3, 4]),
- int64_feature([5, 6, 7]),
- int64_feature([8, 9, 10]),
- ]),
- "b": feature_list([
- bytes_feature([b"r00", b"r01", b"r10", b"r11"])
- ]),
- "c": feature_list([
- float_feature([3, 4]), float_feature([-1, 2])
- ]),
+ "a":
+ feature_list([
+ int64_feature([-1, 0, 1]),
+ int64_feature([2, 3, 4]),
+ int64_feature([5, 6, 7]),
+ int64_feature([8, 9, 10]),
+ ]),
+ "b":
+ feature_list([bytes_feature([b"r00", b"r01", b"r10", b"r11"])]),
+ "c":
+ feature_list([float_feature([3, 4]), float_feature([-1, 2])]),
}))
serialized = original.SerializeToString()
@@ -804,30 +883,38 @@ class ParseSequenceExampleTest(tf.test.TestCase):
self._test(
{
- "example_name": "in1",
- "serialized": tf.convert_to_tensor(serialized),
+ "example_name":
+ "in1",
+ "serialized":
+ ops.convert_to_tensor(serialized),
"sequence_features": {
- "a": tf.FixedLenSequenceFeature((1, 3), tf.int64),
- "b": tf.FixedLenSequenceFeature((2, 2), tf.string),
- "c": tf.FixedLenSequenceFeature((2,), tf.float32),
- "d": tf.FixedLenSequenceFeature(
- (5,), tf.float32, allow_missing=True),
+ "a":
+ parsing_ops.FixedLenSequenceFeature((1, 3), dtypes.int64),
+ "b":
+ parsing_ops.FixedLenSequenceFeature((2, 2), dtypes.string),
+ "c":
+ parsing_ops.FixedLenSequenceFeature((2,), dtypes.float32),
+ "d":
+ parsing_ops.FixedLenSequenceFeature(
+ (5,), dtypes.float32, allow_missing=True),
}
},
expected_feat_list_values=expected_feature_list_output)
def testSequenceExampleWithoutDebugName(self):
original = sequence_example(feature_lists=feature_lists({
- "a": feature_list([
- int64_feature([3, 4]), int64_feature([1, 0])
- ]),
- "st_a": feature_list([
- float_feature([3.0, 4.0]), float_feature([5.0]), float_feature([])
- ]),
- "st_b": feature_list([
- bytes_feature([b"a"]), bytes_feature([]), bytes_feature([]),
- bytes_feature([b"b", b"c"])
- ])
+ "a":
+ feature_list([int64_feature([3, 4]), int64_feature([1, 0])]),
+ "st_a":
+ feature_list([
+ float_feature([3.0, 4.0]), float_feature([5.0]),
+ float_feature([])
+ ]),
+ "st_b":
+ feature_list([
+ bytes_feature([b"a"]), bytes_feature([]), bytes_feature([]),
+ bytes_feature([b"b", b"c"])
+ ])
}))
serialized = original.SerializeToString()
@@ -866,28 +953,30 @@ class ParseSequenceExampleTest(tf.test.TestCase):
self._test(
{
- "serialized": tf.convert_to_tensor(serialized),
+ "serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
- "st_a": tf.VarLenFeature(tf.float32),
- "st_b": tf.VarLenFeature(tf.string),
- "st_c": tf.VarLenFeature(tf.int64),
- "a": tf.FixedLenSequenceFeature((2,), tf.int64),
+ "st_a": parsing_ops.VarLenFeature(dtypes.float32),
+ "st_b": parsing_ops.VarLenFeature(dtypes.string),
+ "st_c": parsing_ops.VarLenFeature(dtypes.int64),
+ "a": parsing_ops.FixedLenSequenceFeature((2,), dtypes.int64),
}
},
expected_feat_list_values=expected_feature_list_output)
def testSequenceExampleWithSparseAndDenseFeatureLists(self):
original = sequence_example(feature_lists=feature_lists({
- "a": feature_list([
- int64_feature([3, 4]), int64_feature([1, 0])
- ]),
- "st_a": feature_list([
- float_feature([3.0, 4.0]), float_feature([5.0]), float_feature([])
- ]),
- "st_b": feature_list([
- bytes_feature([b"a"]), bytes_feature([]), bytes_feature([]),
- bytes_feature([b"b", b"c"])
- ])
+ "a":
+ feature_list([int64_feature([3, 4]), int64_feature([1, 0])]),
+ "st_a":
+ feature_list([
+ float_feature([3.0, 4.0]), float_feature([5.0]),
+ float_feature([])
+ ]),
+ "st_b":
+ feature_list([
+ bytes_feature([b"a"]), bytes_feature([]), bytes_feature([]),
+ bytes_feature([b"b", b"c"])
+ ])
}))
serialized = original.SerializeToString()
@@ -927,21 +1016,19 @@ class ParseSequenceExampleTest(tf.test.TestCase):
self._test(
{
"example_name": "in1",
- "serialized": tf.convert_to_tensor(serialized),
+ "serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
- "st_a": tf.VarLenFeature(tf.float32),
- "st_b": tf.VarLenFeature(tf.string),
- "st_c": tf.VarLenFeature(tf.int64),
- "a": tf.FixedLenSequenceFeature((2,), tf.int64),
+ "st_a": parsing_ops.VarLenFeature(dtypes.float32),
+ "st_b": parsing_ops.VarLenFeature(dtypes.string),
+ "st_c": parsing_ops.VarLenFeature(dtypes.int64),
+ "a": parsing_ops.FixedLenSequenceFeature((2,), dtypes.int64),
}
},
expected_feat_list_values=expected_feature_list_output)
def testSequenceExampleListWithInconsistentDataFails(self):
original = sequence_example(feature_lists=feature_lists({
- "a": feature_list([
- int64_feature([-1, 0]), float_feature([2, 3])
- ])
+ "a": feature_list([int64_feature([-1, 0]), float_feature([2, 3])])
}))
serialized = original.SerializeToString()
@@ -949,18 +1036,17 @@ class ParseSequenceExampleTest(tf.test.TestCase):
self._test(
{
"example_name": "in1",
- "serialized": tf.convert_to_tensor(serialized),
- "sequence_features": {"a": tf.FixedLenSequenceFeature(
- (2,), tf.int64)}
+ "serialized": ops.convert_to_tensor(serialized),
+ "sequence_features": {
+ "a": parsing_ops.FixedLenSequenceFeature((2,), dtypes.int64)
+ }
},
- expected_err=(tf.OpError, "Feature list: a, Index: 1."
+ expected_err=(errors_impl.OpError, "Feature list: a, Index: 1."
" Data types don't match. Expected type: int64"))
def testSequenceExampleListWithWrongDataTypeFails(self):
original = sequence_example(feature_lists=feature_lists({
- "a": feature_list([
- float_feature([2, 3])
- ])
+ "a": feature_list([float_feature([2, 3])])
}))
serialized = original.SerializeToString()
@@ -968,20 +1054,22 @@ class ParseSequenceExampleTest(tf.test.TestCase):
self._test(
{
"example_name": "in1",
- "serialized": tf.convert_to_tensor(serialized),
- "sequence_features": {"a": tf.FixedLenSequenceFeature(
- (2,), tf.int64)}
+ "serialized": ops.convert_to_tensor(serialized),
+ "sequence_features": {
+ "a": parsing_ops.FixedLenSequenceFeature((2,), dtypes.int64)
+ }
},
- expected_err=(tf.OpError,
+ expected_err=(errors_impl.OpError,
"Feature list: a, Index: 0. Data types don't match."
" Expected type: int64"))
def testSequenceExampleListWithWrongSparseDataTypeFails(self):
original = sequence_example(feature_lists=feature_lists({
- "a": feature_list([
- int64_feature([3, 4]), int64_feature([1, 2]),
- float_feature([2.0, 3.0])
- ])
+ "a":
+ feature_list([
+ int64_feature([3, 4]), int64_feature([1, 2]),
+ float_feature([2.0, 3.0])
+ ])
}))
serialized = original.SerializeToString()
@@ -989,19 +1077,19 @@ class ParseSequenceExampleTest(tf.test.TestCase):
self._test(
{
"example_name": "in1",
- "serialized": tf.convert_to_tensor(serialized),
- "sequence_features": {"a": tf.FixedLenSequenceFeature(
- (2,), tf.int64)}
+ "serialized": ops.convert_to_tensor(serialized),
+ "sequence_features": {
+ "a": parsing_ops.FixedLenSequenceFeature((2,), dtypes.int64)
+ }
},
- expected_err=(tf.OpError, "Name: in1, Feature list: a, Index: 2."
+ expected_err=(errors_impl.OpError,
+ "Name: in1, Feature list: a, Index: 2."
" Data types don't match. Expected type: int64"
" Feature is: float_list"))
def testSequenceExampleListWithWrongShapeFails(self):
original = sequence_example(feature_lists=feature_lists({
- "a": feature_list([
- int64_feature([2, 3]), int64_feature([2, 3, 4])
- ]),
+ "a": feature_list([int64_feature([2, 3]), int64_feature([2, 3, 4])]),
}))
serialized = original.SerializeToString()
@@ -1009,11 +1097,12 @@ class ParseSequenceExampleTest(tf.test.TestCase):
self._test(
{
"example_name": "in1",
- "serialized": tf.convert_to_tensor(serialized),
- "sequence_features": {"a": tf.FixedLenSequenceFeature(
- (2,), tf.int64)}
+ "serialized": ops.convert_to_tensor(serialized),
+ "sequence_features": {
+ "a": parsing_ops.FixedLenSequenceFeature((2,), dtypes.int64)
+ }
},
- expected_err=(tf.OpError, r"Name: in1, Key: a, Index: 1."
+ expected_err=(errors_impl.OpError, r"Name: in1, Key: a, Index: 1."
r" Number of int64 values != expected."
r" values size: 3 but output shape: \[2\]"))
@@ -1025,40 +1114,41 @@ class ParseSequenceExampleTest(tf.test.TestCase):
self._test(
{
"example_name": "in1",
- "serialized": tf.convert_to_tensor(original.SerializeToString()),
- "sequence_features": {"a": tf.FixedLenSequenceFeature(
- (2,), tf.int64)}
+ "serialized": ops.convert_to_tensor(original.SerializeToString()),
+ "sequence_features": {
+ "a": parsing_ops.FixedLenSequenceFeature((2,), dtypes.int64)
+ }
},
expected_err=(
- tf.OpError,
+ errors_impl.OpError,
"Name: in1, Feature list 'a' is required but could not be found."
" Did you mean to include it in"
" feature_list_dense_missing_assumed_empty or"
" feature_list_dense_defaults?"))
-class DecodeJSONExampleTest(tf.test.TestCase):
+class DecodeJSONExampleTest(test.TestCase):
def _testRoundTrip(self, examples):
with self.test_session() as sess:
examples = np.array(examples, dtype=np.object)
- json_tensor = tf.constant(
+ json_tensor = constant_op.constant(
[json_format.MessageToJson(m) for m in examples.flatten()],
shape=examples.shape,
- dtype=tf.string)
- binary_tensor = tf.decode_json_example(json_tensor)
+ dtype=dtypes.string)
+ binary_tensor = parsing_ops.decode_json_example(json_tensor)
binary_val = sess.run(binary_tensor)
if examples.shape:
self.assertShapeEqual(binary_val, json_tensor)
for input_example, output_binary in zip(
np.array(examples).flatten(), binary_val.flatten()):
- output_example = tf.train.Example()
+ output_example = example_pb2.Example()
output_example.ParseFromString(output_binary)
self.assertProtoEquals(input_example, output_example)
else:
- output_example = tf.train.Example()
+ output_example = example_pb2.Example()
output_example.ParseFromString(binary_val)
self.assertProtoEquals(examples.item(), output_example)
@@ -1071,56 +1161,78 @@ class DecodeJSONExampleTest(tf.test.TestCase):
def testDenseFeaturesScalar(self):
self._testRoundTrip(
- example(features=features({"a": float_feature([1, 1, 3])})))
+ example(features=features({
+ "a": float_feature([1, 1, 3])
+ })))
def testDenseFeaturesVector(self):
self._testRoundTrip([
- example(features=features({"a": float_feature([1, 1, 3])})),
- example(features=features({"a": float_feature([-1, -1, 2])})),
+ example(features=features({
+ "a": float_feature([1, 1, 3])
+ })),
+ example(features=features({
+ "a": float_feature([-1, -1, 2])
+ })),
])
def testDenseFeaturesMatrix(self):
self._testRoundTrip([
- [example(features=features({"a": float_feature([1, 1, 3])}))],
- [example(features=features({"a": float_feature([-1, -1, 2])}))],
+ [example(features=features({
+ "a": float_feature([1, 1, 3])
+ }))],
+ [example(features=features({
+ "a": float_feature([-1, -1, 2])
+ }))],
])
def testSparseFeatures(self):
self._testRoundTrip([
- example(features=features({"st_c": float_feature([3, 4])})),
- example(features=features({"st_c": float_feature([])})),
- example(features=features({"st_d": feature()})),
- example(features=features({"st_c": float_feature([1, 2, -1]),
- "st_d": bytes_feature([b"hi"])})),
+ example(features=features({
+ "st_c": float_feature([3, 4])
+ })),
+ example(features=features({
+ "st_c": float_feature([])
+ })),
+ example(features=features({
+ "st_d": feature()
+ })),
+ example(features=features({
+ "st_c": float_feature([1, 2, -1]),
+ "st_d": bytes_feature([b"hi"])
+ })),
])
def testSerializedContainingBytes(self):
aname = "a"
bname = "b*has+a:tricky_name"
self._testRoundTrip([
- example(features=features({aname: float_feature([1, 1]),
- bname: bytes_feature([b"b0_str"])})),
- example(features=features({aname: float_feature([-1, -1]),
- bname: bytes_feature([b"b1"])})),
+ example(features=features({
+ aname: float_feature([1, 1]),
+ bname: bytes_feature([b"b0_str"])
+ })),
+ example(features=features({
+ aname: float_feature([-1, -1]),
+ bname: bytes_feature([b"b1"])
+ })),
])
def testInvalidSyntax(self):
with self.test_session() as sess:
- json_tensor = tf.constant(["{]"])
- binary_tensor = tf.decode_json_example(json_tensor)
+ json_tensor = constant_op.constant(["{]"])
+ binary_tensor = parsing_ops.decode_json_example(json_tensor)
with self.assertRaisesOpError("Error while parsing JSON"):
sess.run(binary_tensor)
-class ParseTensorOpTest(tf.test.TestCase):
+class ParseTensorOpTest(test.TestCase):
def testToFloat32(self):
with self.test_session():
expected = np.random.rand(3, 4, 5).astype(np.float32)
tensor_proto = tensor_util.make_tensor_proto(expected)
- serialized = tf.placeholder(tf.string)
- tensor = tf.parse_tensor(serialized, tf.float32)
+ serialized = array_ops.placeholder(dtypes.string)
+ tensor = parsing_ops.parse_tensor(serialized, dtypes.float32)
result = tensor.eval(
feed_dict={serialized: tensor_proto.SerializeToString()})
@@ -1132,8 +1244,8 @@ class ParseTensorOpTest(tf.test.TestCase):
expected = np.random.rand(3, 4, 5).astype(np.uint8)
tensor_proto = tensor_util.make_tensor_proto(expected)
- serialized = tf.placeholder(tf.string)
- tensor = tf.parse_tensor(serialized, tf.uint8)
+ serialized = array_ops.placeholder(dtypes.string)
+ tensor = parsing_ops.parse_tensor(serialized, dtypes.uint8)
result = tensor.eval(
feed_dict={serialized: tensor_proto.SerializeToString()})
@@ -1145,8 +1257,8 @@ class ParseTensorOpTest(tf.test.TestCase):
expected = np.random.rand(3, 4, 5).astype(np.uint8)
tensor_proto = tensor_util.make_tensor_proto(expected)
- serialized = tf.placeholder(tf.string)
- tensor = tf.parse_tensor(serialized, tf.uint16)
+ serialized = array_ops.placeholder(dtypes.string)
+ tensor = parsing_ops.parse_tensor(serialized, dtypes.uint16)
with self.assertRaisesOpError(
r"Type mismatch between parsed tensor \(uint8\) and dtype "
@@ -1155,8 +1267,8 @@ class ParseTensorOpTest(tf.test.TestCase):
def testInvalidInput(self):
with self.test_session():
- serialized = tf.placeholder(tf.string)
- tensor = tf.parse_tensor(serialized, tf.uint16)
+ serialized = array_ops.placeholder(dtypes.string)
+ tensor = parsing_ops.parse_tensor(serialized, dtypes.uint16)
with self.assertRaisesOpError(
"Could not parse `serialized` as TensorProto: 'bogus'"):
@@ -1168,4 +1280,4 @@ class ParseTensorOpTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/partitioned_variables_test.py b/tensorflow/python/kernel_tests/partitioned_variables_test.py
index 225d593bb3..0d5a9339bc 100644
--- a/tensorflow/python/kernel_tests/partitioned_variables_test.py
+++ b/tensorflow/python/kernel_tests/partitioned_variables_test.py
@@ -12,38 +12,53 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for partitioned_variables.py."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import init_ops
+from tensorflow.python.ops import partitioned_variables
+from tensorflow.python.ops import random_ops
+from tensorflow.python.ops import variable_scope
+from tensorflow.python.ops import variables
+from tensorflow.python.platform import test
-class PartitionerCreatorsTest(tf.test.TestCase):
+
+class PartitionerCreatorsTest(test.TestCase):
def testFixedSizePartitioner(self):
with self.test_session():
- partitioner = tf.fixed_size_partitioner(5, axis=0)
- with tf.variable_scope("root", partitioner=partitioner):
- v0 = tf.get_variable("v0", dtype=tf.float32, shape=(10, 10))
+ partitioner = partitioned_variables.fixed_size_partitioner(5, axis=0)
+ with variable_scope.variable_scope("root", partitioner=partitioner):
+ v0 = variable_scope.get_variable(
+ "v0", dtype=dtypes.float32, shape=(10, 10))
v0_list = v0._get_variable_list()
v0_part = v0._get_partitions()
self.assertEqual(len(v0_list), 5)
self.assertAllEqual(v0_part, (5, 1))
- def _testVariableAxisSizePartitioner(self, name, axis, max_shard_bytes,
+ def _testVariableAxisSizePartitioner(self,
+ name,
+ axis,
+ max_shard_bytes,
expected_axis_shards,
expected_partitions,
max_shards=None):
- partitioner = tf.variable_axis_size_partitioner(
+ partitioner = partitioned_variables.variable_axis_size_partitioner(
axis=axis, max_shard_bytes=max_shard_bytes, max_shards=max_shards)
- with tf.variable_scope("root", partitioner=partitioner):
- v0 = tf.get_variable(name, dtype=tf.float32, shape=(4, 8, 16, 32))
+ with variable_scope.variable_scope("root", partitioner=partitioner):
+ v0 = variable_scope.get_variable(
+ name, dtype=dtypes.float32, shape=(4, 8, 16, 32))
v0_list = v0._get_variable_list()
v0_part = v0._get_partitions()
self.assertEqual(len(v0_list), expected_axis_shards)
@@ -62,35 +77,43 @@ class PartitionerCreatorsTest(tf.test.TestCase):
# Now partition it in different ways...
# No need to slice: bytes_per_slice * dim0 = 65536 < max_shard_bytes
- self._testVariableAxisSizePartitioner("v0", axis=0,
- max_shard_bytes=131072,
- expected_axis_shards=1,
- expected_partitions=(1, 1, 1, 1))
+ self._testVariableAxisSizePartitioner(
+ "v0",
+ axis=0,
+ max_shard_bytes=131072,
+ expected_axis_shards=1,
+ expected_partitions=(1, 1, 1, 1))
# Slice exactly once: bytes_per_slice * dim1 = 65536 = max_shard_bytes
- self._testVariableAxisSizePartitioner("v1", axis=1,
- max_shard_bytes=65536,
- expected_axis_shards=1,
- expected_partitions=(1, 1, 1, 1))
+ self._testVariableAxisSizePartitioner(
+ "v1",
+ axis=1,
+ max_shard_bytes=65536,
+ expected_axis_shards=1,
+ expected_partitions=(1, 1, 1, 1))
# Slice into 2 parts:
# bytes_per_slice = 4096
# slices_per_shard = 32768 / 4096 = 8
# axis_shards = 16 / 8 = 2
- self._testVariableAxisSizePartitioner("v2", axis=2,
- max_shard_bytes=32768,
- expected_axis_shards=2,
- expected_partitions=(1, 1, 2, 1))
+ self._testVariableAxisSizePartitioner(
+ "v2",
+ axis=2,
+ max_shard_bytes=32768,
+ expected_axis_shards=2,
+ expected_partitions=(1, 1, 2, 1))
# This partitioner makes sure we maximize the number of shards along
# axis 3. Slice it into 32 parts:
# bytes_per_slice = 2048
# slices_per_shard = 2048 / 2048 = 1
# axis_shards = 32 / 1 = 32
- self._testVariableAxisSizePartitioner("v3a", axis=3,
- max_shard_bytes=2048,
- expected_axis_shards=32,
- expected_partitions=(1, 1, 1, 32))
+ self._testVariableAxisSizePartitioner(
+ "v3a",
+ axis=3,
+ max_shard_bytes=2048,
+ expected_axis_shards=32,
+ expected_partitions=(1, 1, 1, 32))
# This partitioner makes sure we do not go past the bound of allowable
# number of shards along axis 3.
@@ -99,34 +122,41 @@ class PartitionerCreatorsTest(tf.test.TestCase):
# slices_per_shard = max(1, 1024 / 2048) = 1
# axis_shards = 32 / 1 = 32
# Slice into max of 32 parts because: max_shard_bytes < bytes_per_slice
- self._testVariableAxisSizePartitioner("v3b", axis=3,
- max_shard_bytes=1024,
- expected_axis_shards=32,
- expected_partitions=(1, 1, 1, 32))
+ self._testVariableAxisSizePartitioner(
+ "v3b",
+ axis=3,
+ max_shard_bytes=1024,
+ expected_axis_shards=32,
+ expected_partitions=(1, 1, 1, 32))
# Specify max_shards so that it won't affect sharding.
- self._testVariableAxisSizePartitioner("v3c", axis=3,
- max_shard_bytes=1024,
- expected_axis_shards=32,
- expected_partitions=(1, 1, 1, 32),
- max_shards=33)
+ self._testVariableAxisSizePartitioner(
+ "v3c",
+ axis=3,
+ max_shard_bytes=1024,
+ expected_axis_shards=32,
+ expected_partitions=(1, 1, 1, 32),
+ max_shards=33)
# Specify max_shards so that it will affect sharding.
- self._testVariableAxisSizePartitioner("v3d", axis=3,
- max_shard_bytes=1024,
- expected_axis_shards=2,
- expected_partitions=(1, 1, 1, 2),
- max_shards=2)
+ self._testVariableAxisSizePartitioner(
+ "v3d",
+ axis=3,
+ max_shard_bytes=1024,
+ expected_axis_shards=2,
+ expected_partitions=(1, 1, 1, 2),
+ max_shards=2)
# Use the partitioner with strings
- partitioner_axis3_str = tf.variable_axis_size_partitioner(
+ partitioner_axis3_str = partitioned_variables.variable_axis_size_partitioner(
axis=3, max_shard_bytes=32768, bytes_per_string_element=8)
- with tf.variable_scope("root", partitioner=partitioner_axis3_str):
- v3str = tf.get_variable(
+ with variable_scope.variable_scope(
+ "root", partitioner=partitioner_axis3_str):
+ v3str = variable_scope.get_variable(
"v3str",
initializer=np.array([""] * 4 * 8 * 16 * 32).reshape(4, 8, 16, 32),
- dtype=tf.string,
+ dtype=dtypes.string,
shape=(4, 8, 16, 32))
v3str_list = v3str._get_variable_list()
v3str_part = v3str._get_partitions()
@@ -142,13 +172,13 @@ class PartitionerCreatorsTest(tf.test.TestCase):
self.assertAllEqual(v3str_part, (1, 1, 1, 4))
def _testMinMaxVariablePartitioner(self, max_partitions, axis, min_slice_size,
- var_name, var_shape,
- expected_axis_shards, expected_partitions):
- partitioner = tf.min_max_variable_partitioner(max_partitions=max_partitions,
- axis=axis,
- min_slice_size=min_slice_size)
- with tf.variable_scope("root", partitioner=partitioner):
- v0 = tf.get_variable(var_name, dtype=tf.float32, shape=var_shape)
+ var_name, var_shape, expected_axis_shards,
+ expected_partitions):
+ partitioner = partitioned_variables.min_max_variable_partitioner(
+ max_partitions=max_partitions, axis=axis, min_slice_size=min_slice_size)
+ with variable_scope.variable_scope("root", partitioner=partitioner):
+ v0 = variable_scope.get_variable(
+ var_name, dtype=dtypes.float32, shape=var_shape)
v0_list = v0._get_variable_list()
v0_part = v0._get_partitions()
self.assertEqual(len(v0_list), expected_axis_shards)
@@ -157,88 +187,110 @@ class PartitionerCreatorsTest(tf.test.TestCase):
def testMinMaxVariablePartitioner(self):
with self.test_session():
# Partitioning a variable of shape=[2048] with a minimum of 2K per slice.
- self._testMinMaxVariablePartitioner(max_partitions=100, axis=0,
- min_slice_size=2 << 10,
- var_name="v0_0", var_shape=[2048],
- expected_axis_shards=4,
- expected_partitions=[4])
+ self._testMinMaxVariablePartitioner(
+ max_partitions=100,
+ axis=0,
+ min_slice_size=2 << 10,
+ var_name="v0_0",
+ var_shape=[2048],
+ expected_axis_shards=4,
+ expected_partitions=[4])
# Partitioning a variable of shape=[2048, 1024] with a minimum of 256K per
# slice.
- self._testMinMaxVariablePartitioner(max_partitions=100, axis=0,
- min_slice_size=256 << 10,
- var_name="v0", var_shape=[2048, 1024],
- expected_axis_shards=32,
- expected_partitions=[32, 1])
+ self._testMinMaxVariablePartitioner(
+ max_partitions=100,
+ axis=0,
+ min_slice_size=256 << 10,
+ var_name="v0",
+ var_shape=[2048, 1024],
+ expected_axis_shards=32,
+ expected_partitions=[32, 1])
# max_partitions restricts partitioning of the variable.
- self._testMinMaxVariablePartitioner(max_partitions=16, axis=0,
- min_slice_size=256 << 10,
- var_name="v1_max",
- var_shape=[2048, 1024],
- expected_axis_shards=16,
- expected_partitions=[16, 1])
- self._testMinMaxVariablePartitioner(max_partitions=1, axis=0,
- min_slice_size=256 << 10,
- var_name="v2_max",
- var_shape=[2048, 1024],
- expected_axis_shards=1,
- expected_partitions=[1, 1])
+ self._testMinMaxVariablePartitioner(
+ max_partitions=16,
+ axis=0,
+ min_slice_size=256 << 10,
+ var_name="v1_max",
+ var_shape=[2048, 1024],
+ expected_axis_shards=16,
+ expected_partitions=[16, 1])
+ self._testMinMaxVariablePartitioner(
+ max_partitions=1,
+ axis=0,
+ min_slice_size=256 << 10,
+ var_name="v2_max",
+ var_shape=[2048, 1024],
+ expected_axis_shards=1,
+ expected_partitions=[1, 1])
# Reducing/Increasing min_slice_size proportionately increases/reduces the
# number of partitions.
- self._testMinMaxVariablePartitioner(max_partitions=100, axis=0,
- min_slice_size=128 << 10,
- var_name="v3_slice",
- var_shape=[2048, 1024],
- expected_axis_shards=64,
- expected_partitions=[64, 1])
- self._testMinMaxVariablePartitioner(max_partitions=100, axis=0,
- min_slice_size=512 << 10,
- var_name="v4_slice",
- var_shape=[2048, 1024],
- expected_axis_shards=16,
- expected_partitions=[16, 1])
+ self._testMinMaxVariablePartitioner(
+ max_partitions=100,
+ axis=0,
+ min_slice_size=128 << 10,
+ var_name="v3_slice",
+ var_shape=[2048, 1024],
+ expected_axis_shards=64,
+ expected_partitions=[64, 1])
+ self._testMinMaxVariablePartitioner(
+ max_partitions=100,
+ axis=0,
+ min_slice_size=512 << 10,
+ var_name="v4_slice",
+ var_shape=[2048, 1024],
+ expected_axis_shards=16,
+ expected_partitions=[16, 1])
# Partitioning the variable along a different axis.
- self._testMinMaxVariablePartitioner(max_partitions=100, axis=1,
- min_slice_size=256 << 10,
- var_name="v5_axis",
- var_shape=[64, 1024, 1, 3],
- expected_axis_shards=3,
- expected_partitions=[1, 3, 1, 1])
- self._testMinMaxVariablePartitioner(max_partitions=100, axis=3,
- min_slice_size=256 << 10,
- var_name="v6_axis",
- var_shape=[64, 1024, 1, 3],
- expected_axis_shards=3,
- expected_partitions=[1, 1, 1, 3])
+ self._testMinMaxVariablePartitioner(
+ max_partitions=100,
+ axis=1,
+ min_slice_size=256 << 10,
+ var_name="v5_axis",
+ var_shape=[64, 1024, 1, 3],
+ expected_axis_shards=3,
+ expected_partitions=[1, 3, 1, 1])
+ self._testMinMaxVariablePartitioner(
+ max_partitions=100,
+ axis=3,
+ min_slice_size=256 << 10,
+ var_name="v6_axis",
+ var_shape=[64, 1024, 1, 3],
+ expected_axis_shards=3,
+ expected_partitions=[1, 1, 1, 3])
# Can not partition the variable more than what its shape allows.
- self._testMinMaxVariablePartitioner(max_partitions=100, axis=0,
- min_slice_size=256 << 10,
- var_name="v7_shape",
- var_shape=[16, 128, 1024],
- expected_axis_shards=16,
- expected_partitions=[16, 1, 1])
- self._testMinMaxVariablePartitioner(max_partitions=100, axis=0,
- min_slice_size=256 << 10,
- var_name="v8_shape",
- var_shape=[4, 512, 1024],
- expected_axis_shards=4,
- expected_partitions=[4, 1, 1])
-
-
-def _IotaInitializer(shape, dtype=tf.float32, partition_info=None):
- assert dtype == tf.float32
+ self._testMinMaxVariablePartitioner(
+ max_partitions=100,
+ axis=0,
+ min_slice_size=256 << 10,
+ var_name="v7_shape",
+ var_shape=[16, 128, 1024],
+ expected_axis_shards=16,
+ expected_partitions=[16, 1, 1])
+ self._testMinMaxVariablePartitioner(
+ max_partitions=100,
+ axis=0,
+ min_slice_size=256 << 10,
+ var_name="v8_shape",
+ var_shape=[4, 512, 1024],
+ expected_axis_shards=4,
+ expected_partitions=[4, 1, 1])
+
+
+def _IotaInitializer(shape, dtype=dtypes.float32, partition_info=None):
+ assert dtype == dtypes.float32
if len(shape) == 1:
return range(shape[0])
else:
val = _IotaInitializer(shape[1:], dtype)
- return [[(10 ** i) * v for v in val] for i in range(shape[0])]
+ return [[(10**i) * v for v in val] for i in range(shape[0])]
-class PartitionedVariablesTestCase(tf.test.TestCase):
+class PartitionedVariablesTestCase(test.TestCase):
def _TestSaveSpec(self, slices, expected_specs):
self.assertEqual(len(expected_specs), len(slices))
@@ -247,33 +299,36 @@ class PartitionedVariablesTestCase(tf.test.TestCase):
def testVecConstantInit(self):
with self.test_session():
- rnd_par = tf.constant([1, 2, 3, 4])
- vs = tf.create_partitioned_variables([4], [4], rnd_par)
- tf.global_variables_initializer().run()
- val = tf.concat_v2(vs, 0).eval()
+ rnd_par = constant_op.constant([1, 2, 3, 4])
+ vs = partitioned_variables.create_partitioned_variables([4], [4], rnd_par)
+ variables.global_variables_initializer().run()
+ val = array_ops.concat_v2(vs, 0).eval()
rnd = rnd_par.eval()
self.assertAllClose(rnd, val)
- self.assertEqual([tf.int32] * 4, [v.dtype.base_dtype for v in vs])
+ self.assertEqual([dtypes.int32] * 4, [v.dtype.base_dtype for v in vs])
self._TestSaveSpec(vs, ["4 0,1", "4 1,1", "4 2,1", "4 3,1"])
def testConstantInit(self):
with self.test_session():
- rnd_par = tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
- vs = tf.create_partitioned_variables([2, 4], [1, 2], rnd_par)
- tf.global_variables_initializer().run()
- val = tf.concat_v2(vs, 1).eval()
+ rnd_par = constant_op.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
+ vs = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
+ rnd_par)
+ variables.global_variables_initializer().run()
+ val = array_ops.concat_v2(vs, 1).eval()
rnd = rnd_par.eval()
self.assertAllClose(rnd, val)
- self.assertEqual([tf.int32] * 2, [v.dtype.base_dtype for v in vs])
+ self.assertEqual([dtypes.int32] * 2, [v.dtype.base_dtype for v in vs])
self._TestSaveSpec(vs, ["2 4 0,2:0,2", "2 4 0,2:2,2"])
def testName(self):
with self.test_session():
- rnd_par = tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
- with tf.variable_scope("hi"):
- vs1 = tf.create_partitioned_variables([2, 4], [1, 2], rnd_par)
- vs2 = tf.create_partitioned_variables([2, 4], [1, 2], rnd_par)
- tf.global_variables_initializer().run()
+ rnd_par = constant_op.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
+ with variable_scope.variable_scope("hi"):
+ vs1 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
+ rnd_par)
+ vs2 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
+ rnd_par)
+ variables.global_variables_initializer().run()
var1_name = vs1[0]._save_slice_info.full_name
var2_name = vs2[0]._save_slice_info.full_name
self.assertEqual("hi/PartitionedVariable", var1_name)
@@ -284,14 +339,14 @@ class PartitionedVariablesTestCase(tf.test.TestCase):
self.assertEqual(var2_name + "/part_1:0", vs2[1].name)
# Test same variable.
with self.test_session():
- rnd_par = tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
- with tf.variable_scope("hola") as vs:
- vs1 = tf.create_partitioned_variables(
- [2, 4], [1, 2], rnd_par, dtype=tf.int32)
- with tf.variable_scope(vs, reuse=True):
- vs2 = tf.create_partitioned_variables(
- [2, 4], [1, 2], rnd_par, dtype=tf.int32)
- tf.global_variables_initializer().run()
+ rnd_par = constant_op.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
+ with variable_scope.variable_scope("hola") as vs:
+ vs1 = partitioned_variables.create_partitioned_variables(
+ [2, 4], [1, 2], rnd_par, dtype=dtypes.int32)
+ with variable_scope.variable_scope(vs, reuse=True):
+ vs2 = partitioned_variables.create_partitioned_variables(
+ [2, 4], [1, 2], rnd_par, dtype=dtypes.int32)
+ variables.global_variables_initializer().run()
var1_name = vs1[0]._save_slice_info.full_name
var2_name = vs2[0]._save_slice_info.full_name
self.assertEqual("hola/PartitionedVariable", var1_name)
@@ -302,11 +357,13 @@ class PartitionedVariablesTestCase(tf.test.TestCase):
self.assertEqual(var2_name + "/part_1:0", vs2[1].name)
# Test name_scope
with self.test_session():
- rnd_par = tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
- with tf.name_scope("ola"):
- vs1 = tf.create_partitioned_variables([2, 4], [1, 2], rnd_par)
- vs2 = tf.create_partitioned_variables([2, 4], [1, 2], rnd_par)
- tf.global_variables_initializer().run()
+ rnd_par = constant_op.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
+ with ops.name_scope("ola"):
+ vs1 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
+ rnd_par)
+ vs2 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
+ rnd_par)
+ variables.global_variables_initializer().run()
var1_name = vs1[0]._save_slice_info.full_name
var2_name = vs2[0]._save_slice_info.full_name
# Currently, the name scope 'ola' has no effect.
@@ -319,170 +376,163 @@ class PartitionedVariablesTestCase(tf.test.TestCase):
def testRandomInitValue(self):
with self.test_session():
- rnd = tf.Variable(tf.random_uniform([200, 40]))
- vs = tf.create_partitioned_variables(
+ rnd = variables.Variable(random_ops.random_uniform([200, 40]))
+ vs = partitioned_variables.create_partitioned_variables(
rnd.get_shape(), [1, 10], rnd.initialized_value())
- tf.global_variables_initializer().run()
- val = tf.concat_v2(vs, 1).eval()
+ variables.global_variables_initializer().run()
+ val = array_ops.concat_v2(vs, 1).eval()
rnd = rnd.eval()
self.assertAllClose(rnd, val)
- self.assertEqual([tf.float32] * 10, [v.dtype.base_dtype for v in vs])
- self._TestSaveSpec(vs, ["200 40 0,200:0,4",
- "200 40 0,200:4,4",
- "200 40 0,200:8,4",
- "200 40 0,200:12,4",
- "200 40 0,200:16,4",
- "200 40 0,200:20,4",
- "200 40 0,200:24,4",
- "200 40 0,200:28,4",
- "200 40 0,200:32,4",
- "200 40 0,200:36,4"])
+ self.assertEqual([dtypes.float32] * 10, [v.dtype.base_dtype for v in vs])
+ self._TestSaveSpec(vs, [
+ "200 40 0,200:0,4", "200 40 0,200:4,4", "200 40 0,200:8,4",
+ "200 40 0,200:12,4", "200 40 0,200:16,4", "200 40 0,200:20,4",
+ "200 40 0,200:24,4", "200 40 0,200:28,4", "200 40 0,200:32,4",
+ "200 40 0,200:36,4"
+ ])
def testRandomInitUnevenPartitions(self):
with self.test_session():
- rnd = tf.Variable(
- tf.random_uniform([20, 43], dtype=tf.float64))
+ rnd = variables.Variable(
+ random_ops.random_uniform(
+ [20, 43], dtype=dtypes.float64))
var_lists = [
- tf.create_partitioned_variables(
- rnd.get_shape(), [1, i],
- rnd.initialized_value())
- for i in xrange(1, 10)]
- tf.global_variables_initializer().run()
+ partitioned_variables.create_partitioned_variables(
+ rnd.get_shape(), [1, i], rnd.initialized_value())
+ for i in xrange(1, 10)
+ ]
+ variables.global_variables_initializer().run()
rnd_val = rnd.eval()
# Only check the slice save specs for the first 5 tf.
save_specs = [
# One slice
["20 43 0,20:0,43"],
# Two slices
- ["20 43 0,20:0,22",
- "20 43 0,20:22,21"],
+ ["20 43 0,20:0,22", "20 43 0,20:22,21"],
# Three slices
- ["20 43 0,20:0,15",
- "20 43 0,20:15,14",
- "20 43 0,20:29,14"],
+ ["20 43 0,20:0,15", "20 43 0,20:15,14", "20 43 0,20:29,14"],
# Four slices
- ["20 43 0,20:0,11",
- "20 43 0,20:11,11",
- "20 43 0,20:22,11",
- "20 43 0,20:33,10"],
+ [
+ "20 43 0,20:0,11", "20 43 0,20:11,11", "20 43 0,20:22,11",
+ "20 43 0,20:33,10"
+ ],
# Five slices
- ["20 43 0,20:0,9",
- "20 43 0,20:9,9",
- "20 43 0,20:18,9",
- "20 43 0,20:27,8",
- "20 43 0,20:35,8"]]
+ [
+ "20 43 0,20:0,9", "20 43 0,20:9,9", "20 43 0,20:18,9",
+ "20 43 0,20:27,8", "20 43 0,20:35,8"
+ ]
+ ]
for i, vs in enumerate(var_lists):
- var_val = tf.concat_v2(vs, 1).eval()
+ var_val = array_ops.concat_v2(vs, 1).eval()
self.assertAllClose(rnd_val, var_val)
- self.assertEqual(
- [tf.float64] * len(vs), [v.dtype.base_dtype for v in vs])
+ self.assertEqual([dtypes.float64] * len(vs),
+ [v.dtype.base_dtype for v in vs])
if i < len(save_specs):
self._TestSaveSpec(vs, save_specs[i])
def testDegenerate(self):
with self.test_session():
- rnd = tf.Variable(tf.random_uniform([10, 43]))
- vs = tf.create_partitioned_variables(
+ rnd = variables.Variable(random_ops.random_uniform([10, 43]))
+ vs = partitioned_variables.create_partitioned_variables(
rnd.get_shape(), [1, 1], rnd.initialized_value())
- tf.global_variables_initializer().run()
- val = tf.concat_v2(vs, 0).eval()
+ variables.global_variables_initializer().run()
+ val = array_ops.concat_v2(vs, 0).eval()
rnd = rnd.eval()
self.assertAllClose(rnd, val)
self._TestSaveSpec(vs, ["10 43 0,10:0,43"])
def testSliceSizeOne(self):
with self.test_session():
- rnd = tf.Variable(tf.random_uniform([10, 43]))
- vs = tf.create_partitioned_variables(
+ rnd = variables.Variable(random_ops.random_uniform([10, 43]))
+ vs = partitioned_variables.create_partitioned_variables(
rnd.get_shape(), [10, 1], rnd.initialized_value())
- tf.global_variables_initializer().run()
- val = tf.concat_v2(vs, 0).eval()
+ variables.global_variables_initializer().run()
+ val = array_ops.concat_v2(vs, 0).eval()
rnd = rnd.eval()
self.assertAllClose(rnd, val)
- self._TestSaveSpec(vs, ["10 43 0,1:0,43",
- "10 43 1,1:0,43",
- "10 43 2,1:0,43",
- "10 43 3,1:0,43",
- "10 43 4,1:0,43",
- "10 43 5,1:0,43",
- "10 43 6,1:0,43",
- "10 43 7,1:0,43",
- "10 43 8,1:0,43",
- "10 43 9,1:0,43"])
+ self._TestSaveSpec(vs, [
+ "10 43 0,1:0,43", "10 43 1,1:0,43", "10 43 2,1:0,43",
+ "10 43 3,1:0,43", "10 43 4,1:0,43", "10 43 5,1:0,43",
+ "10 43 6,1:0,43", "10 43 7,1:0,43", "10 43 8,1:0,43", "10 43 9,1:0,43"
+ ])
def testIotaInitializer(self):
self.assertAllClose([0., 1., 2., 3.], _IotaInitializer([4]))
self.assertAllClose([[0., 1.], [0., 10.], [0., 100.], [0., 1000.]],
_IotaInitializer([4, 2]))
with self.test_session():
- vs = tf.create_partitioned_variables([13, 5], [3, 1], _IotaInitializer)
- tf.global_variables_initializer().run()
+ vs = partitioned_variables.create_partitioned_variables([13, 5], [3, 1],
+ _IotaInitializer)
+ variables.global_variables_initializer().run()
slice0 = _IotaInitializer([5, 5])
slice1 = _IotaInitializer([4, 5])
slice2 = _IotaInitializer([4, 5])
- val = tf.concat_v2(vs, 0).eval()
+ val = array_ops.concat_v2(vs, 0).eval()
self.assertAllClose(slice0 + slice1 + slice2, val)
- self._TestSaveSpec(vs, ["13 5 0,5:0,5",
- "13 5 5,4:0,5",
- "13 5 9,4:0,5"])
+ self._TestSaveSpec(vs, ["13 5 0,5:0,5", "13 5 5,4:0,5", "13 5 9,4:0,5"])
def testRandomInitializer(self):
# Sanity check that the slices uses a different seed when using a random
# initializer function.
with self.test_session():
- var0, var1 = tf.create_partitioned_variables(
- [20, 12], [1, 2], tf.random_uniform_initializer())
- tf.global_variables_initializer().run()
+ var0, var1 = partitioned_variables.create_partitioned_variables(
+ [20, 12], [1, 2], init_ops.random_uniform_initializer())
+ variables.global_variables_initializer().run()
val0, val1 = var0.eval().flatten(), var1.eval().flatten()
self.assertTrue(np.linalg.norm(val0 - val1) > 1e-6)
# Negative test that proves that slices have the same values if
# the random initializer uses a seed.
with self.test_session():
- var0, var1 = tf.create_partitioned_variables(
- [20, 12], [1, 2], tf.random_uniform_initializer(seed=201))
- tf.global_variables_initializer().run()
+ var0, var1 = partitioned_variables.create_partitioned_variables(
+ [20, 12], [1, 2], init_ops.random_uniform_initializer(seed=201))
+ variables.global_variables_initializer().run()
val0, val1 = var0.eval().flatten(), var1.eval().flatten()
self.assertAllClose(val0, val1)
def testSomeErrors(self):
with self.test_session():
- rnd = tf.Variable(tf.random_uniform([10, 43]))
+ rnd = variables.Variable(random_ops.random_uniform([10, 43]))
with self.assertRaises(ValueError):
- tf.create_partitioned_variables([10], [1, 1], rnd.initialized_value())
+ partitioned_variables.create_partitioned_variables(
+ [10], [1, 1], rnd.initialized_value())
with self.assertRaises(ValueError):
- tf.create_partitioned_variables([10, 20], [1], rnd.initialized_value())
+ partitioned_variables.create_partitioned_variables(
+ [10, 20], [1], rnd.initialized_value())
with self.assertRaises(ValueError):
- tf.create_partitioned_variables([10, 43], [1], rnd.initialized_value())
+ partitioned_variables.create_partitioned_variables(
+ [10, 43], [1], rnd.initialized_value())
with self.assertRaises(ValueError):
- tf.create_partitioned_variables(
+ partitioned_variables.create_partitioned_variables(
[10, 43], [1, 2, 3], rnd.initialized_value())
with self.assertRaises(ValueError):
- tf.create_partitioned_variables(
+ partitioned_variables.create_partitioned_variables(
[10, 43], [11, 1], rnd.initialized_value())
with self.assertRaises(ValueError):
- tf.create_partitioned_variables(
+ partitioned_variables.create_partitioned_variables(
[10, 43], [20, 1], rnd.initialized_value())
with self.assertRaises(ValueError):
- tf.create_partitioned_variables(
+ partitioned_variables.create_partitioned_variables(
[10, 43], [1, 50], rnd.initialized_value())
def testControlDepsNone(self):
with self.test_session() as session:
- c = tf.constant(1.0)
- with tf.control_dependencies([c]):
+ c = constant_op.constant(1.0)
+ with ops.control_dependencies([c]):
# d get the control dependency.
- d = tf.constant(2.0)
+ d = constant_op.constant(2.0)
# Partitioned variables do not.
- var_x = tf.get_variable(
+ var_x = variable_scope.get_variable(
"x",
shape=[2],
- initializer=tf.ones_initializer(),
- partitioner=tf.variable_axis_size_partitioner(4))
+ initializer=init_ops.ones_initializer(),
+ partitioner=partitioned_variables.variable_axis_size_partitioner(4))
ops_before_read = session.graph.get_operations()
var_x.as_tensor() # Caches the ops for subsequent reads.
- reading_ops = [op for op in session.graph.get_operations()
- if op not in ops_before_read]
+ reading_ops = [
+ op for op in session.graph.get_operations()
+ if op not in ops_before_read
+ ]
self.assertEqual([c.op], d.op.control_inputs)
# Tests that no control dependencies are added to reading a partitioned
@@ -492,26 +542,29 @@ class PartitionedVariablesTestCase(tf.test.TestCase):
def testConcat(self):
with self.test_session() as session:
- var_x = tf.get_variable(
+ var_x = variable_scope.get_variable(
"x",
- initializer=tf.constant([1., 2.]),
- partitioner=tf.variable_axis_size_partitioner(4))
+ initializer=constant_op.constant([1., 2.]),
+ partitioner=partitioned_variables.variable_axis_size_partitioner(4))
- c = tf.constant(1.0)
- with tf.control_dependencies([c]):
+ c = constant_op.constant(1.0)
+ with ops.control_dependencies([c]):
ops_before_concat = session.graph.get_operations()
value = var_x._concat() # pylint: disable=protected-access
- concat_ops = [op for op in session.graph.get_operations()
- if op not in ops_before_concat]
-
- concat_control_inputs = [ci for op in concat_ops
- for ci in op.control_inputs]
+ concat_ops = [
+ op for op in session.graph.get_operations()
+ if op not in ops_before_concat
+ ]
+
+ concat_control_inputs = [
+ ci for op in concat_ops for ci in op.control_inputs
+ ]
self.assertTrue(
c.op in concat_control_inputs,
"var_x._concat() should get control dependencies from its scope.")
- tf.global_variables_initializer().run()
+ variables.global_variables_initializer().run()
self.assertAllClose(value.eval(), var_x.as_tensor().eval())
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/pool_test.py b/tensorflow/python/kernel_tests/pool_test.py
index 79450c3f70..a3351d8478 100644
--- a/tensorflow/python/kernel_tests/pool_test.py
+++ b/tensorflow/python/kernel_tests/pool_test.py
@@ -21,16 +21,23 @@ from __future__ import print_function
import math
import numpy as np
-import tensorflow as tf
-
-def pool_direct_single_axis(input, # pylint: disable=redefined-builtin
- axis,
- window_size,
- pooling_type,
- padding,
- dilation_rate,
- stride):
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import nn_ops
+import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
+
+
+def pool_direct_single_axis(
+ input, # pylint: disable=redefined-builtin
+ axis,
+ window_size,
+ pooling_type,
+ padding,
+ dilation_rate,
+ stride):
"""Numpy implementation of pooling along a single axis.
This is intended for testing only, and therefore isn't particularly efficient.
@@ -88,8 +95,14 @@ def pool_direct_single_axis(input, # pylint: disable=redefined-builtin
return output
-def pool_direct(input, window_shape, pooling_type, padding, # pylint: disable=redefined-builtin
- dilation_rate, strides, data_format=None):
+def pool_direct(
+ input,
+ window_shape,
+ pooling_type,
+ padding, # pylint: disable=redefined-builtin
+ dilation_rate,
+ strides,
+ data_format=None):
"""Numpy implementation of pooling.
This is intended for testing only, and therefore isn't particularly efficient.
@@ -129,7 +142,7 @@ def pool_direct(input, window_shape, pooling_type, padding, # pylint: disable=r
return output
-class PoolingTest(tf.test.TestCase):
+class PoolingTest(test.TestCase):
def _test(self, input_shape, **kwargs):
# Use negative numbers to make sure there isn't any zero padding getting
@@ -137,7 +150,7 @@ class PoolingTest(tf.test.TestCase):
x = -np.arange(
np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1
y1 = pool_direct(input=x, **kwargs)
- y2 = tf.nn.pool(input=x, **kwargs)
+ y2 = nn_ops.pool(input=x, **kwargs)
self.assertAllClose(y1, y2.eval(), rtol=1e-2, atol=1e-2)
def testPoolSimple(self):
@@ -232,48 +245,51 @@ class PoolingTest(tf.test.TestCase):
strides=strides)
def testPoolNC(self):
- if tf.test.is_gpu_available(cuda_only=True):
+ if test.is_gpu_available(cuda_only=True):
# "NC*" format is currently only supported on CUDA.
with self.test_session(use_gpu=True):
for padding in ["SAME", "VALID"]:
- self._test(input_shape=[2, 2, 9],
- window_shape=[2],
- padding=padding,
- pooling_type="MAX",
- strides=[1],
- dilation_rate=[1],
- data_format="NCW")
- self._test(input_shape=[2, 2, 9],
- window_shape=[2],
- padding=padding,
- pooling_type="MAX",
- strides=[2],
- dilation_rate=[1],
- data_format="NCW")
- self._test(input_shape=[2, 2, 7, 9],
- window_shape=[2, 2],
- padding=padding,
- pooling_type="MAX",
- strides=[1, 2],
- dilation_rate=[1, 1],
- data_format="NCHW")
- self._test(input_shape=[2, 2, 7, 9],
- window_shape=[2, 2],
- padding="VALID",
- pooling_type="MAX",
- strides=[1, 1],
- dilation_rate=[2, 2],
- data_format="NCHW")
+ self._test(
+ input_shape=[2, 2, 9],
+ window_shape=[2],
+ padding=padding,
+ pooling_type="MAX",
+ strides=[1],
+ dilation_rate=[1],
+ data_format="NCW")
+ self._test(
+ input_shape=[2, 2, 9],
+ window_shape=[2],
+ padding=padding,
+ pooling_type="MAX",
+ strides=[2],
+ dilation_rate=[1],
+ data_format="NCW")
+ self._test(
+ input_shape=[2, 2, 7, 9],
+ window_shape=[2, 2],
+ padding=padding,
+ pooling_type="MAX",
+ strides=[1, 2],
+ dilation_rate=[1, 1],
+ data_format="NCHW")
+ self._test(
+ input_shape=[2, 2, 7, 9],
+ window_shape=[2, 2],
+ padding="VALID",
+ pooling_type="MAX",
+ strides=[1, 1],
+ dilation_rate=[2, 2],
+ data_format="NCHW")
def _test_gradient(self, input_shape, **kwargs):
x_val = -np.arange(
np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1
- x = tf.constant(x_val, name="x", dtype=tf.float32)
- output = tf.nn.pool(input=x, **kwargs)
+ x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
+ output = nn_ops.pool(input=x, **kwargs)
y_shape = output.get_shape().as_list()
- err = tf.test.compute_gradient_error(
- [x], [input_shape], output, y_shape, x_init_value=[x_val]
- )
+ err = gradient_checker.compute_gradient_error(
+ [x], [input_shape], output, y_shape, x_init_value=[x_val])
err_tolerance = 1e-2
self.assertLess(err, err_tolerance)
@@ -357,4 +373,4 @@ class PoolingTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/pooling_ops_3d_test.py b/tensorflow/python/kernel_tests/pooling_ops_3d_test.py
index 381505df85..c44e623635 100644
--- a/tensorflow/python/kernel_tests/pooling_ops_3d_test.py
+++ b/tensorflow/python/kernel_tests/pooling_ops_3d_test.py
@@ -13,15 +13,21 @@
# limitations under the License.
# ==============================================================================
"""Functional tests for 3d pooling operations."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import nn_ops
+import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
-class PoolingTest(tf.test.TestCase):
+class PoolingTest(test.TestCase):
def _VerifyValues(self, pool_func, input_sizes, window, strides, padding,
expected):
@@ -42,11 +48,12 @@ class PoolingTest(tf.test.TestCase):
# numbers from 1.
x = [f * 1.0 for f in range(1, total_size + 1)]
with self.test_session(use_gpu=True) as sess:
- t = tf.constant(x, shape=input_sizes)
- t = pool_func(t,
- ksize=[1, window[0], window[1], window[2], 1],
- strides=[1, strides[0], strides[1], strides[2], 1],
- padding=padding)
+ t = constant_op.constant(x, shape=input_sizes)
+ t = pool_func(
+ t,
+ ksize=[1, window[0], window[1], window[2], 1],
+ strides=[1, strides[0], strides[1], strides[2], 1],
+ padding=padding)
vals = sess.run(t)
# Verifies values.
actual = vals.flatten()
@@ -54,57 +61,63 @@ class PoolingTest(tf.test.TestCase):
def testAvgPool3dValidPadding(self):
expected_output = [20.5, 21.5, 22.5]
- self._VerifyValues(tf.nn.avg_pool3d,
- input_sizes=[1, 3, 3, 3, 3],
- window=(2, 2, 2),
- strides=(2, 2, 2),
- padding="VALID",
- expected=expected_output)
+ self._VerifyValues(
+ nn_ops.avg_pool3d,
+ input_sizes=[1, 3, 3, 3, 3],
+ window=(2, 2, 2),
+ strides=(2, 2, 2),
+ padding="VALID",
+ expected=expected_output)
def testAvgPool3dSamePadding(self):
expected_output = [20.5, 21.5, 22.5, 26.5, 27.5, 28.5]
- self._VerifyValues(tf.nn.avg_pool3d,
- input_sizes=[1, 2, 2, 4, 3],
- window=(2, 2, 2),
- strides=(2, 2, 2),
- padding="SAME",
- expected=expected_output)
+ self._VerifyValues(
+ nn_ops.avg_pool3d,
+ input_sizes=[1, 2, 2, 4, 3],
+ window=(2, 2, 2),
+ strides=(2, 2, 2),
+ padding="SAME",
+ expected=expected_output)
def testAvgPool3dSamePaddingDifferentStrides(self):
expected_output = [1.5, 4.5, 7.5, 17.5, 20.5, 23.5, 33.5, 36.5, 39.5]
- self._VerifyValues(tf.nn.avg_pool3d,
- input_sizes=[1, 5, 8, 1, 1],
- window=(1, 2, 3),
- strides=(2, 3, 1),
- padding="SAME",
- expected=expected_output)
+ self._VerifyValues(
+ nn_ops.avg_pool3d,
+ input_sizes=[1, 5, 8, 1, 1],
+ window=(1, 2, 3),
+ strides=(2, 3, 1),
+ padding="SAME",
+ expected=expected_output)
def testMaxPool3dValidPadding(self):
expected_output = [40.0, 41.0, 42.0]
- self._VerifyValues(tf.nn.max_pool3d,
- input_sizes=[1, 3, 3, 3, 3],
- window=(2, 2, 2),
- strides=(2, 2, 2),
- padding="VALID",
- expected=expected_output)
+ self._VerifyValues(
+ nn_ops.max_pool3d,
+ input_sizes=[1, 3, 3, 3, 3],
+ window=(2, 2, 2),
+ strides=(2, 2, 2),
+ padding="VALID",
+ expected=expected_output)
def testMaxPool3dSamePadding(self):
expected_output = [31., 32., 33., 34., 35., 36.]
- self._VerifyValues(tf.nn.max_pool3d,
- input_sizes=[1, 2, 2, 3, 3],
- window=(2, 2, 2),
- strides=(2, 2, 2),
- padding="SAME",
- expected=expected_output)
+ self._VerifyValues(
+ nn_ops.max_pool3d,
+ input_sizes=[1, 2, 2, 3, 3],
+ window=(2, 2, 2),
+ strides=(2, 2, 2),
+ padding="SAME",
+ expected=expected_output)
def testMaxPool3dSamePaddingDifferentStrides(self):
expected_output = [2., 5., 8., 18., 21., 24., 34., 37., 40.]
- self._VerifyValues(tf.nn.max_pool3d,
- input_sizes=[1, 5, 8, 1, 1],
- window=(1, 2, 3),
- strides=(2, 3, 1),
- padding="SAME",
- expected=expected_output)
+ self._VerifyValues(
+ nn_ops.max_pool3d,
+ input_sizes=[1, 5, 8, 1, 1],
+ window=(1, 2, 3),
+ strides=(2, 3, 1),
+ padding="SAME",
+ expected=expected_output)
# Test pooling on a larger input, with different stride and kernel
# size for the 'z' dimension.
@@ -118,34 +131,46 @@ class PoolingTest(tf.test.TestCase):
expected_output[:, :, -1, :] = input_data[:, 1::2, -2, :]
expected_output[:, -1, -1, :] = input_data[:, -2, -2, :]
- self._VerifyValues(tf.nn.max_pool3d,
- input_sizes=[1, 5, 27, 27, 64],
- window=(1, 2, 2),
- strides=(1, 2, 2),
- padding="SAME",
- expected=expected_output.flatten())
+ self._VerifyValues(
+ nn_ops.max_pool3d,
+ input_sizes=[1, 5, 27, 27, 64],
+ window=(1, 2, 2),
+ strides=(1, 2, 2),
+ padding="SAME",
+ expected=expected_output.flatten())
def testKernelSmallerThanStride(self):
- self._VerifyValues(tf.nn.max_pool3d, input_sizes=[1, 3, 3, 3, 1],
- window=[1, 1, 1], strides=[2, 2, 2],
- padding="SAME",
- expected=[1, 3, 7, 9, 19, 21, 25, 27])
-
- self._VerifyValues(tf.nn.max_pool3d, input_sizes=[1, 7, 7, 7, 1],
- window=[2, 2, 2], strides=[3, 3, 3],
- padding="VALID",
- expected=[58, 61, 79, 82, 205, 208, 226, 229])
-
- self._VerifyValues(tf.nn.avg_pool3d, input_sizes=[1, 3, 3, 3, 1],
- window=[1, 1, 1], strides=[2, 2, 2],
- padding="SAME",
- expected=[1, 3, 7, 9, 19, 21, 25, 27])
-
- self._VerifyValues(tf.nn.avg_pool3d, input_sizes=[1, 7, 7, 7, 1],
- window=[2, 2, 2], strides=[3, 3, 3],
- padding="VALID",
- expected=[29.5, 32.5, 50.5, 53.5,
- 176.5, 179.5, 197.5, 200.5])
+ self._VerifyValues(
+ nn_ops.max_pool3d,
+ input_sizes=[1, 3, 3, 3, 1],
+ window=[1, 1, 1],
+ strides=[2, 2, 2],
+ padding="SAME",
+ expected=[1, 3, 7, 9, 19, 21, 25, 27])
+
+ self._VerifyValues(
+ nn_ops.max_pool3d,
+ input_sizes=[1, 7, 7, 7, 1],
+ window=[2, 2, 2],
+ strides=[3, 3, 3],
+ padding="VALID",
+ expected=[58, 61, 79, 82, 205, 208, 226, 229])
+
+ self._VerifyValues(
+ nn_ops.avg_pool3d,
+ input_sizes=[1, 3, 3, 3, 1],
+ window=[1, 1, 1],
+ strides=[2, 2, 2],
+ padding="SAME",
+ expected=[1, 3, 7, 9, 19, 21, 25, 27])
+
+ self._VerifyValues(
+ nn_ops.avg_pool3d,
+ input_sizes=[1, 7, 7, 7, 1],
+ window=[2, 2, 2],
+ strides=[3, 3, 3],
+ padding="VALID",
+ expected=[29.5, 32.5, 50.5, 53.5, 176.5, 179.5, 197.5, 200.5])
def _ConstructAndTestGradient(self,
pool_func,
@@ -174,9 +199,9 @@ class PoolingTest(tf.test.TestCase):
# numbers from 1.
x = [f * 1.0 for f in range(1, total_size + 1)]
with self.test_session(use_gpu=True):
- input_tensor = tf.constant(x, shape=input_sizes, name="input")
+ input_tensor = constant_op.constant(x, shape=input_sizes, name="input")
err_margin = 1e-3
- if pool_func == tf.nn.avg_pool3d:
+ if pool_func == nn_ops.avg_pool3d:
func_name = "avg_pool3d"
else:
if x_init_value is None:
@@ -185,141 +210,158 @@ class PoolingTest(tf.test.TestCase):
dtype=np.float32).reshape(input_sizes)
func_name = "max_pool3d"
- t = pool_func(input_tensor,
- ksize=[1, window[0], window[1], window[2], 1],
- strides=[1, strides[0], strides[1], strides[2], 1],
- padding=padding,
- name=func_name)
-
- err = tf.test.compute_gradient_error(input_tensor,
- input_sizes,
- t,
- output_sizes,
- x_init_value=x_init_value,
- delta=1e-2)
+ t = pool_func(
+ input_tensor,
+ ksize=[1, window[0], window[1], window[2], 1],
+ strides=[1, strides[0], strides[1], strides[2], 1],
+ padding=padding,
+ name=func_name)
+
+ err = gradient_checker.compute_gradient_error(
+ input_tensor,
+ input_sizes,
+ t,
+ output_sizes,
+ x_init_value=x_init_value,
+ delta=1e-2)
print("%s gradient error = " % func_name, err)
self.assertLess(err, err_margin)
def testMaxPoolGradValidPadding1_1_3d(self):
- self._ConstructAndTestGradient(tf.nn.max_pool3d,
- input_sizes=[1, 3, 3, 3, 1],
- output_sizes=[1, 3, 3, 3, 1],
- window=(1, 1, 1),
- strides=(1, 1, 1),
- padding="VALID")
+ self._ConstructAndTestGradient(
+ nn_ops.max_pool3d,
+ input_sizes=[1, 3, 3, 3, 1],
+ output_sizes=[1, 3, 3, 3, 1],
+ window=(1, 1, 1),
+ strides=(1, 1, 1),
+ padding="VALID")
def testMaxPoolGradValidPadding2_1_6_3d(self):
- self._ConstructAndTestGradient(tf.nn.max_pool3d,
- input_sizes=[2, 3, 3, 6, 3],
- output_sizes=[2, 2, 2, 5, 3],
- window=(2, 2, 2),
- strides=(1, 1, 1),
- padding="VALID")
+ self._ConstructAndTestGradient(
+ nn_ops.max_pool3d,
+ input_sizes=[2, 3, 3, 6, 3],
+ output_sizes=[2, 2, 2, 5, 3],
+ window=(2, 2, 2),
+ strides=(1, 1, 1),
+ padding="VALID")
def testMaxPoolGradValidPadding2_1_7_3d(self):
- self._ConstructAndTestGradient(tf.nn.max_pool3d,
- input_sizes=[2, 3, 5, 7, 3],
- output_sizes=[2, 2, 4, 6, 3],
- window=(2, 2, 2),
- strides=(1, 1, 1),
- padding="VALID")
+ self._ConstructAndTestGradient(
+ nn_ops.max_pool3d,
+ input_sizes=[2, 3, 5, 7, 3],
+ output_sizes=[2, 2, 4, 6, 3],
+ window=(2, 2, 2),
+ strides=(1, 1, 1),
+ padding="VALID")
def testMaxPoolGradValidPadding2_2_3d(self):
- self._ConstructAndTestGradient(tf.nn.max_pool3d,
- input_sizes=[2, 2, 2, 2, 3],
- output_sizes=[2, 1, 1, 1, 3],
- window=(2, 2, 2),
- strides=(2, 2, 2),
- padding="VALID")
+ self._ConstructAndTestGradient(
+ nn_ops.max_pool3d,
+ input_sizes=[2, 2, 2, 2, 3],
+ output_sizes=[2, 1, 1, 1, 3],
+ window=(2, 2, 2),
+ strides=(2, 2, 2),
+ padding="VALID")
def testMaxPoolGradSamePadding1_1_3d(self):
- self._ConstructAndTestGradient(tf.nn.max_pool3d,
- input_sizes=[2, 3, 2, 4, 1],
- output_sizes=[2, 3, 2, 4, 1],
- window=(1, 1, 1),
- strides=(1, 1, 1),
- padding="SAME")
+ self._ConstructAndTestGradient(
+ nn_ops.max_pool3d,
+ input_sizes=[2, 3, 2, 4, 1],
+ output_sizes=[2, 3, 2, 4, 1],
+ window=(1, 1, 1),
+ strides=(1, 1, 1),
+ padding="SAME")
def testMaxPoolGradSamePadding2_1_3d(self):
- self._ConstructAndTestGradient(tf.nn.max_pool3d,
- input_sizes=[2, 3, 2, 4, 1],
- output_sizes=[2, 3, 2, 4, 1],
- window=(2, 2, 2),
- strides=(1, 1, 1),
- padding="SAME")
+ self._ConstructAndTestGradient(
+ nn_ops.max_pool3d,
+ input_sizes=[2, 3, 2, 4, 1],
+ output_sizes=[2, 3, 2, 4, 1],
+ window=(2, 2, 2),
+ strides=(1, 1, 1),
+ padding="SAME")
def testMaxPoolGradSamePadding2_2_3d(self):
- self._ConstructAndTestGradient(tf.nn.max_pool3d,
- input_sizes=[2, 5, 2, 4, 3],
- output_sizes=[2, 3, 1, 2, 3],
- window=(2, 2, 2),
- strides=(2, 2, 2),
- padding="SAME")
+ self._ConstructAndTestGradient(
+ nn_ops.max_pool3d,
+ input_sizes=[2, 5, 2, 4, 3],
+ output_sizes=[2, 3, 1, 2, 3],
+ window=(2, 2, 2),
+ strides=(2, 2, 2),
+ padding="SAME")
def testMaxPoolGradSamePadding3_1_3d(self):
- self._ConstructAndTestGradient(tf.nn.max_pool3d,
- input_sizes=[1, 3, 3, 7, 1],
- output_sizes=[1, 3, 3, 7, 1],
- window=(3, 3, 3),
- strides=(1, 1, 1),
- padding="SAME")
+ self._ConstructAndTestGradient(
+ nn_ops.max_pool3d,
+ input_sizes=[1, 3, 3, 7, 1],
+ output_sizes=[1, 3, 3, 7, 1],
+ window=(3, 3, 3),
+ strides=(1, 1, 1),
+ padding="SAME")
def testAvgPoolGradValidPadding1_1_3d(self):
- self._ConstructAndTestGradient(tf.nn.avg_pool3d,
- input_sizes=[2, 3, 3, 3, 3],
- output_sizes=[2, 3, 3, 3, 3],
- window=(1, 1, 1),
- strides=(1, 1, 1),
- padding="VALID")
+ self._ConstructAndTestGradient(
+ nn_ops.avg_pool3d,
+ input_sizes=[2, 3, 3, 3, 3],
+ output_sizes=[2, 3, 3, 3, 3],
+ window=(1, 1, 1),
+ strides=(1, 1, 1),
+ padding="VALID")
def testAvgPoolGradValidPadding2_1_3d(self):
- self._ConstructAndTestGradient(tf.nn.avg_pool3d,
- input_sizes=[2, 3, 3, 3, 3],
- output_sizes=[2, 2, 2, 2, 3],
- window=(2, 2, 2),
- strides=(1, 1, 1),
- padding="VALID")
+ self._ConstructAndTestGradient(
+ nn_ops.avg_pool3d,
+ input_sizes=[2, 3, 3, 3, 3],
+ output_sizes=[2, 2, 2, 2, 3],
+ window=(2, 2, 2),
+ strides=(1, 1, 1),
+ padding="VALID")
def testAvgPoolGradValidPadding2_2_3d(self):
- self._ConstructAndTestGradient(tf.nn.avg_pool3d,
- input_sizes=[2, 2, 2, 2, 3],
- output_sizes=[2, 1, 1, 1, 3],
- window=(2, 2, 2),
- strides=(2, 2, 2),
- padding="VALID")
+ self._ConstructAndTestGradient(
+ nn_ops.avg_pool3d,
+ input_sizes=[2, 2, 2, 2, 3],
+ output_sizes=[2, 1, 1, 1, 3],
+ window=(2, 2, 2),
+ strides=(2, 2, 2),
+ padding="VALID")
def testAvgPoolGradSamePadding1_1_3d(self):
- self._ConstructAndTestGradient(tf.nn.avg_pool3d,
- input_sizes=[2, 3, 2, 4, 3],
- output_sizes=[2, 3, 2, 4, 3],
- window=(1, 1, 1),
- strides=(1, 1, 1),
- padding="SAME")
+ self._ConstructAndTestGradient(
+ nn_ops.avg_pool3d,
+ input_sizes=[2, 3, 2, 4, 3],
+ output_sizes=[2, 3, 2, 4, 3],
+ window=(1, 1, 1),
+ strides=(1, 1, 1),
+ padding="SAME")
def testAvgPoolGradSamePadding2_1_3d(self):
- self._ConstructAndTestGradient(tf.nn.avg_pool3d,
- input_sizes=[1, 2, 2, 2, 1],
- output_sizes=[1, 2, 2, 2, 1],
- window=(2, 2, 2),
- strides=(1, 1, 1),
- padding="SAME")
+ self._ConstructAndTestGradient(
+ nn_ops.avg_pool3d,
+ input_sizes=[1, 2, 2, 2, 1],
+ output_sizes=[1, 2, 2, 2, 1],
+ window=(2, 2, 2),
+ strides=(1, 1, 1),
+ padding="SAME")
def testAvgPoolGradSamePadding2_2_3d(self):
- self._ConstructAndTestGradient(tf.nn.avg_pool3d,
- input_sizes=[2, 5, 2, 4, 3],
- output_sizes=[2, 3, 1, 2, 3],
- window=(2, 2, 2),
- strides=(2, 2, 2),
- padding="SAME")
+ self._ConstructAndTestGradient(
+ nn_ops.avg_pool3d,
+ input_sizes=[2, 5, 2, 4, 3],
+ output_sizes=[2, 3, 1, 2, 3],
+ window=(2, 2, 2),
+ strides=(2, 2, 2),
+ padding="SAME")
def testAvgPoolGradSamePadding3_1_3d(self):
- self._ConstructAndTestGradient(tf.nn.avg_pool3d,
- input_sizes=[1, 3, 6, 7, 1],
- output_sizes=[1, 3, 6, 7, 1],
- window=(3, 3, 3),
- strides=(1, 1, 1),
- padding="SAME")
+ self._ConstructAndTestGradient(
+ nn_ops.avg_pool3d,
+ input_sizes=[1, 3, 6, 7, 1],
+ output_sizes=[1, 3, 6, 7, 1],
+ window=(3, 3, 3),
+ strides=(1, 1, 1),
+ padding="SAME")
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/pooling_ops_test.py b/tensorflow/python/kernel_tests/pooling_ops_test.py
index 6fe112b6be..eb51d8023e 100644
--- a/tensorflow/python/kernel_tests/pooling_ops_test.py
+++ b/tensorflow/python/kernel_tests/pooling_ops_test.py
@@ -12,17 +12,25 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Functional tests for pooling operations."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import errors_impl
+from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
+from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import nn_ops
+import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
def NHWCToNCHW(input_tensor):
@@ -34,8 +42,8 @@ def NHWCToNCHW(input_tensor):
Returns:
the converted tensor or a shape array
"""
- if isinstance(input_tensor, tf.Tensor):
- return tf.transpose(input_tensor, [0, 3, 1, 2])
+ if isinstance(input_tensor, ops.Tensor):
+ return array_ops.transpose(input_tensor, [0, 3, 1, 2])
else:
return [input_tensor[0], input_tensor[3], input_tensor[1], input_tensor[2]]
@@ -49,8 +57,8 @@ def NCHWToNHWC(input_tensor):
Returns:
the converted tensor or a shape array
"""
- if isinstance(input_tensor, tf.Tensor):
- return tf.transpose(input_tensor, [0, 2, 3, 1])
+ if isinstance(input_tensor, ops.Tensor):
+ return array_ops.transpose(input_tensor, [0, 2, 3, 1])
else:
return [input_tensor[0], input_tensor[2], input_tensor[3], input_tensor[1]]
@@ -62,7 +70,7 @@ def GetTestConfigs():
all the valid test configs as tuples of data_format and use_gpu.
"""
test_configs = [("NHWC", False), ("NHWC", True)]
- if tf.test.is_gpu_available(cuda_only=True):
+ if test.is_gpu_available(cuda_only=True):
# "NCHW" format is currently supported exclusively on CUDA GPUs.
test_configs += [("NCHW", True)]
return test_configs
@@ -78,14 +86,12 @@ def GetShrunkInceptionMaxPoolShapes(shrink=30):
Tuple (name, input_size, filter_size, out_size, strides, padding)
"""
names = ["maxpool2", "maxpool3", "maxpool4", "maxpool5"]
- input_sizes = [[32, 71, 71, 192],
- [32, 35, 35, 288], [32, 17, 17, 1248], [32, 8, 8, 2048]]
- filter_sizes = [[1, 3, 3, 1], [1, 3, 3, 1],
- [1, 3, 3, 1], [1, 3, 3, 1]]
- output_sizes = [[32, 35, 35, 192], [32, 17, 17, 288],
- [32, 8, 8, 1248], [32, 8, 8, 2048]]
- strides = [[1, 2, 2, 1], [1, 2, 2, 1], [1, 2, 2, 1],
- [1, 1, 1, 1]]
+ input_sizes = [[32, 71, 71, 192], [32, 35, 35, 288], [32, 17, 17, 1248],
+ [32, 8, 8, 2048]]
+ filter_sizes = [[1, 3, 3, 1], [1, 3, 3, 1], [1, 3, 3, 1], [1, 3, 3, 1]]
+ output_sizes = [[32, 35, 35, 192], [32, 17, 17, 288], [32, 8, 8, 1248],
+ [32, 8, 8, 2048]]
+ strides = [[1, 2, 2, 1], [1, 2, 2, 1], [1, 2, 2, 1], [1, 1, 1, 1]]
# Shrink each depth value
for i in input_sizes:
i[3] //= shrink
@@ -97,7 +103,7 @@ def GetShrunkInceptionMaxPoolShapes(shrink=30):
yield n, i, f, o, s, p
-class PoolingTest(tf.test.TestCase):
+class PoolingTest(test.TestCase):
def _VerifyOneType(self, pool_func, input_sizes, ksize, strides, padding,
data_format, data_type, expected, use_gpu):
@@ -122,13 +128,17 @@ class PoolingTest(tf.test.TestCase):
# numbers from 1.
x = [f * 1.0 for f in range(1, total_size + 1)]
with self.test_session(use_gpu=use_gpu) as sess:
- t = tf.constant(x, shape=input_sizes, dtype=data_type)
+ t = constant_op.constant(x, shape=input_sizes, dtype=data_type)
if data_format == "NCHW":
t = NHWCToNCHW(t)
ksize = NHWCToNCHW(ksize)
strides = NHWCToNCHW(strides)
- t = pool_func(t, ksize=ksize, strides=strides, padding=padding,
- data_format=data_format)
+ t = pool_func(
+ t,
+ ksize=ksize,
+ strides=strides,
+ padding=padding,
+ data_format=data_format)
if data_format == "NCHW":
t = NCHWToNHWC(t)
actual = t.eval()
@@ -151,11 +161,11 @@ class PoolingTest(tf.test.TestCase):
use_gpu: Whether we are running on GPU.
"""
self._VerifyOneType(pool_func, input_sizes, ksize, strides, padding,
- data_format, tf.float32, expected, use_gpu)
+ data_format, dtypes.float32, expected, use_gpu)
if not use_gpu or test_util.CudaSupportsHalfMatMulAndConv():
self._VerifyOneType(pool_func, input_sizes, ksize, strides, padding,
- data_format, tf.float16, expected, use_gpu)
+ data_format, dtypes.float16, expected, use_gpu)
def _VerifyValues(self, pool_func, input_sizes, ksize, strides, padding,
expected, use_gpu):
@@ -178,17 +188,25 @@ class PoolingTest(tf.test.TestCase):
def _testAvgPoolValidPadding(self, use_gpu):
expected_output = [7.0, 8.0, 9.0]
- self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 3, 3, 3],
- ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
- padding="VALID",
- expected=expected_output, use_gpu=use_gpu)
+ self._VerifyValues(
+ nn_ops.avg_pool,
+ input_sizes=[1, 3, 3, 3],
+ ksize=[1, 2, 2, 1],
+ strides=[1, 2, 2, 1],
+ padding="VALID",
+ expected=expected_output,
+ use_gpu=use_gpu)
def _testAvgPoolSamePadding(self, use_gpu):
expected_output = [8.5, 9.5, 10.5, 14.5, 15.5, 16.5]
- self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 2, 4, 3],
- ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
- padding="SAME",
- expected=expected_output, use_gpu=use_gpu)
+ self._VerifyValues(
+ nn_ops.avg_pool,
+ input_sizes=[1, 2, 4, 3],
+ ksize=[1, 2, 2, 1],
+ strides=[1, 2, 2, 1],
+ padding="SAME",
+ expected=expected_output,
+ use_gpu=use_gpu)
def _testAvgPoolSamePaddingNonSquareWindow(self, use_gpu):
# input is:
@@ -198,87 +216,122 @@ class PoolingTest(tf.test.TestCase):
# Window of [x, x] should do:
# [avg(1.0, 2.0), avg(2.0, padded0),
# avg(3.0, 4.0), avg(4.0, padded0)]
- self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 2, 2, 1],
- ksize=[1, 1, 2, 1], strides=[1, 1, 1, 1],
- padding="SAME",
- expected=[1.5, 2.0, 3.5, 4.0], use_gpu=use_gpu)
+ self._VerifyValues(
+ nn_ops.avg_pool,
+ input_sizes=[1, 2, 2, 1],
+ ksize=[1, 1, 2, 1],
+ strides=[1, 1, 1, 1],
+ padding="SAME",
+ expected=[1.5, 2.0, 3.5, 4.0],
+ use_gpu=use_gpu)
# Window of [x,
# x] should do:
# [avg(1.0, 3.0), avg(2.0, 4.0)
# avg(3.0, padded0), avg(4.0, padded0)]
- self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 2, 2, 1],
- ksize=[1, 2, 1, 1], strides=[1, 1, 1, 1],
- padding="SAME",
- expected=[2.0, 3.0, 3.0, 4.0], use_gpu=use_gpu)
+ self._VerifyValues(
+ nn_ops.avg_pool,
+ input_sizes=[1, 2, 2, 1],
+ ksize=[1, 2, 1, 1],
+ strides=[1, 1, 1, 1],
+ padding="SAME",
+ expected=[2.0, 3.0, 3.0, 4.0],
+ use_gpu=use_gpu)
def _testAvgPoolSamePaddingNonSquareWindowMultiBatch(self, use_gpu):
- self._VerifyValues(tf.nn.avg_pool, input_sizes=[2, 2, 2, 2],
- ksize=[1, 1, 2, 1], strides=[1, 1, 1, 1],
- padding="SAME",
- expected=[2.0, 3.0, 3.0, 4.0,
- 6.0, 7.0, 7.0, 8.0,
- 10.0, 11.0, 11.0, 12.0,
- 14.0, 15.0, 15.0, 16.0],
- use_gpu=use_gpu)
- self._VerifyValues(tf.nn.avg_pool, input_sizes=[2, 2, 2, 2],
- ksize=[1, 2, 1, 1], strides=[1, 1, 1, 1],
- padding="SAME",
- expected=[3.0, 4.0, 5.0, 6.0,
- 5.0, 6.0, 7.0, 8.0,
- 11.0, 12.0, 13.0, 14.0,
- 13.0, 14.0, 15.0, 16.0],
- use_gpu=use_gpu)
+ self._VerifyValues(
+ nn_ops.avg_pool,
+ input_sizes=[2, 2, 2, 2],
+ ksize=[1, 1, 2, 1],
+ strides=[1, 1, 1, 1],
+ padding="SAME",
+ expected=[
+ 2.0, 3.0, 3.0, 4.0, 6.0, 7.0, 7.0, 8.0, 10.0, 11.0, 11.0, 12.0,
+ 14.0, 15.0, 15.0, 16.0
+ ],
+ use_gpu=use_gpu)
+ self._VerifyValues(
+ nn_ops.avg_pool,
+ input_sizes=[2, 2, 2, 2],
+ ksize=[1, 2, 1, 1],
+ strides=[1, 1, 1, 1],
+ padding="SAME",
+ expected=[
+ 3.0, 4.0, 5.0, 6.0, 5.0, 6.0, 7.0, 8.0, 11.0, 12.0, 13.0, 14.0,
+ 13.0, 14.0, 15.0, 16.0
+ ],
+ use_gpu=use_gpu)
def _testAvgPoolValidPaddingUnevenStride(self, use_gpu):
- self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 3, 3, 3],
- ksize=[1, 2, 2, 1], strides=[1, 1, 2, 1],
- padding="VALID",
- expected=[7.0, 8.0, 9.0, 16.0, 17.0, 18.0],
- use_gpu=use_gpu)
- self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 3, 3, 3],
- ksize=[1, 2, 2, 1], strides=[1, 2, 1, 1],
- padding="VALID",
- expected=[7.0, 8.0, 9.0, 10.0, 11.0, 12.0],
- use_gpu=use_gpu)
+ self._VerifyValues(
+ nn_ops.avg_pool,
+ input_sizes=[1, 3, 3, 3],
+ ksize=[1, 2, 2, 1],
+ strides=[1, 1, 2, 1],
+ padding="VALID",
+ expected=[7.0, 8.0, 9.0, 16.0, 17.0, 18.0],
+ use_gpu=use_gpu)
+ self._VerifyValues(
+ nn_ops.avg_pool,
+ input_sizes=[1, 3, 3, 3],
+ ksize=[1, 2, 2, 1],
+ strides=[1, 2, 1, 1],
+ padding="VALID",
+ expected=[7.0, 8.0, 9.0, 10.0, 11.0, 12.0],
+ use_gpu=use_gpu)
def _testAvgPoolSamePadding4(self, use_gpu):
- expected_output = [11.0, 12.0, 13.0, 14.0, 19.0, 20.0, 21.0, 22.0, 43.0,
- 44.0, 45.0, 46.0, 51.0, 52.0, 53.0, 54.0]
- self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 4, 4, 4],
- ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
- padding="SAME",
- expected=expected_output, use_gpu=use_gpu)
+ expected_output = [
+ 11.0, 12.0, 13.0, 14.0, 19.0, 20.0, 21.0, 22.0, 43.0, 44.0, 45.0, 46.0,
+ 51.0, 52.0, 53.0, 54.0
+ ]
+ self._VerifyValues(
+ nn_ops.avg_pool,
+ input_sizes=[1, 4, 4, 4],
+ ksize=[1, 2, 2, 1],
+ strides=[1, 2, 2, 1],
+ padding="SAME",
+ expected=expected_output,
+ use_gpu=use_gpu)
def _testAvgPoolSamePaddingPacket4(self, use_gpu):
- expected_output = [21.0, 22.0, 23.0, 24.0, 27.0, 28.0, 29.0, 30.0,
- 45.0, 46.0, 47.0, 48.0, 51.0, 52.0, 53.0, 54.0]
- self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 4, 4, 4],
- ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
- padding="SAME",
- expected=expected_output, use_gpu=use_gpu)
+ expected_output = [
+ 21.0, 22.0, 23.0, 24.0, 27.0, 28.0, 29.0, 30.0, 45.0, 46.0, 47.0, 48.0,
+ 51.0, 52.0, 53.0, 54.0
+ ]
+ self._VerifyValues(
+ nn_ops.avg_pool,
+ input_sizes=[1, 4, 4, 4],
+ ksize=[1, 3, 3, 1],
+ strides=[1, 2, 2, 1],
+ padding="SAME",
+ expected=expected_output,
+ use_gpu=use_gpu)
def _testAvgPoolSamePaddingPacket8(self, use_gpu):
- expected_output = [73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0, 80.0, 89.0,
- 90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 105.0, 106.0,
- 107.0, 108.0, 109.0, 110.0, 111.0, 112.0, 117.0, 118.0,
- 119.0, 120.0, 121.0, 122.0, 123.0, 124.0, 201.0, 202.0,
- 203.0, 204.0, 205.0, 206.0, 207.0, 208.0, 217.0, 218.0,
- 219.0, 220.0, 221.0, 222.0, 223.0, 224.0, 233.0, 234.0,
- 235.0, 236.0, 237.0, 238.0, 239.0, 240.0, 245.0, 246.0,
- 247.0, 248.0, 249.0, 250.0, 251.0, 252.0, 329.0, 330.0,
- 331.0, 332.0, 333.0, 334.0, 335.0, 336.0, 345.0, 346.0,
- 347.0, 348.0, 349.0, 350.0, 351.0, 352.0, 361.0, 362.0,
- 363.0, 364.0, 365.0, 366.0, 367.0, 368.0, 373.0, 374.0,
- 375.0, 376.0, 377.0, 378.0, 379.0, 380.0, 425.0, 426.0,
- 427.0, 428.0, 429.0, 430.0, 431.0, 432.0, 441.0, 442.0,
- 443.0, 444.0, 445.0, 446.0, 447.0, 448.0, 457.0, 458.0,
- 459.0, 460.0, 461.0, 462.0, 463.0, 464.0, 469.0, 470.0,
- 471.0, 472.0, 473.0, 474.0, 475.0, 476.0]
- self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 8, 8, 8],
- ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
- padding="SAME",
- expected=expected_output, use_gpu=use_gpu)
+ expected_output = [
+ 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0, 80.0, 89.0, 90.0, 91.0, 92.0,
+ 93.0, 94.0, 95.0, 96.0, 105.0, 106.0, 107.0, 108.0, 109.0, 110.0, 111.0,
+ 112.0, 117.0, 118.0, 119.0, 120.0, 121.0, 122.0, 123.0, 124.0, 201.0,
+ 202.0, 203.0, 204.0, 205.0, 206.0, 207.0, 208.0, 217.0, 218.0, 219.0,
+ 220.0, 221.0, 222.0, 223.0, 224.0, 233.0, 234.0, 235.0, 236.0, 237.0,
+ 238.0, 239.0, 240.0, 245.0, 246.0, 247.0, 248.0, 249.0, 250.0, 251.0,
+ 252.0, 329.0, 330.0, 331.0, 332.0, 333.0, 334.0, 335.0, 336.0, 345.0,
+ 346.0, 347.0, 348.0, 349.0, 350.0, 351.0, 352.0, 361.0, 362.0, 363.0,
+ 364.0, 365.0, 366.0, 367.0, 368.0, 373.0, 374.0, 375.0, 376.0, 377.0,
+ 378.0, 379.0, 380.0, 425.0, 426.0, 427.0, 428.0, 429.0, 430.0, 431.0,
+ 432.0, 441.0, 442.0, 443.0, 444.0, 445.0, 446.0, 447.0, 448.0, 457.0,
+ 458.0, 459.0, 460.0, 461.0, 462.0, 463.0, 464.0, 469.0, 470.0, 471.0,
+ 472.0, 473.0, 474.0, 475.0, 476.0
+ ]
+ self._VerifyValues(
+ nn_ops.avg_pool,
+ input_sizes=[1, 8, 8, 8],
+ ksize=[1, 3, 3, 1],
+ strides=[1, 2, 2, 1],
+ padding="SAME",
+ expected=expected_output,
+ use_gpu=use_gpu)
def testAvgPooling(self):
for use_gpu in True, False:
@@ -293,17 +346,25 @@ class PoolingTest(tf.test.TestCase):
def _testMaxPoolValidPadding(self, use_gpu):
expected_output = [13.0, 14.0, 15.0]
- self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 3, 3, 3],
- ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
- padding="VALID",
- expected=expected_output, use_gpu=use_gpu)
+ self._VerifyValues(
+ nn_ops.max_pool,
+ input_sizes=[1, 3, 3, 3],
+ ksize=[1, 2, 2, 1],
+ strides=[1, 2, 2, 1],
+ padding="VALID",
+ expected=expected_output,
+ use_gpu=use_gpu)
def _testMaxPoolSamePadding(self, use_gpu):
expected_output = [13.0, 14.0, 15.0, 16.0, 17.0, 18.0]
- self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 2, 3, 3],
- ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
- padding="SAME",
- expected=expected_output, use_gpu=use_gpu)
+ self._VerifyValues(
+ nn_ops.max_pool,
+ input_sizes=[1, 2, 3, 3],
+ ksize=[1, 2, 2, 1],
+ strides=[1, 2, 2, 1],
+ padding="SAME",
+ expected=expected_output,
+ use_gpu=use_gpu)
def _testMaxPoolSamePaddingNonSquareWindow(self, use_gpu):
# input is:
@@ -314,52 +375,71 @@ class PoolingTest(tf.test.TestCase):
#
# [max(1.0, 2.0), max(2.0, padded0),
# max(3.0, 4.0), max(4.0, padded0)]
- self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 2, 2, 1],
- ksize=[1, 1, 2, 1], strides=[1, 1, 1, 1],
- padding="SAME",
- expected=[2.0, 2.0, 4.0, 4.0], use_gpu=use_gpu)
+ self._VerifyValues(
+ nn_ops.max_pool,
+ input_sizes=[1, 2, 2, 1],
+ ksize=[1, 1, 2, 1],
+ strides=[1, 1, 1, 1],
+ padding="SAME",
+ expected=[2.0, 2.0, 4.0, 4.0],
+ use_gpu=use_gpu)
def _testMaxPoolValidPaddingUnevenStride(self, use_gpu):
- self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 4, 4, 1],
- ksize=[1, 2, 2, 1], strides=[1, 1, 2, 1],
- padding="VALID",
- expected=[6.0, 8.0, 10.0, 12.0, 14.0, 16.0],
- use_gpu=use_gpu)
- self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 4, 4, 1],
- ksize=[1, 2, 2, 1], strides=[1, 2, 1, 1],
- padding="VALID",
- expected=[6.0, 7.0, 8.0, 14.0, 15.0, 16.0],
- use_gpu=use_gpu)
+ self._VerifyValues(
+ nn_ops.max_pool,
+ input_sizes=[1, 4, 4, 1],
+ ksize=[1, 2, 2, 1],
+ strides=[1, 1, 2, 1],
+ padding="VALID",
+ expected=[6.0, 8.0, 10.0, 12.0, 14.0, 16.0],
+ use_gpu=use_gpu)
+ self._VerifyValues(
+ nn_ops.max_pool,
+ input_sizes=[1, 4, 4, 1],
+ ksize=[1, 2, 2, 1],
+ strides=[1, 2, 1, 1],
+ padding="VALID",
+ expected=[6.0, 7.0, 8.0, 14.0, 15.0, 16.0],
+ use_gpu=use_gpu)
def _testMaxPoolSamePaddingPacket4(self, use_gpu):
- expected_output = [21.0, 22.0, 23.0, 24.0, 29.0, 30.0, 31.0, 32.0, 53.0,
- 54.0, 55.0, 56.0, 61.0, 62.0, 63.0, 64.0]
- self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 4, 4, 4],
- ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
- padding="SAME",
- expected=expected_output, use_gpu=use_gpu)
+ expected_output = [
+ 21.0, 22.0, 23.0, 24.0, 29.0, 30.0, 31.0, 32.0, 53.0, 54.0, 55.0, 56.0,
+ 61.0, 62.0, 63.0, 64.0
+ ]
+ self._VerifyValues(
+ nn_ops.max_pool,
+ input_sizes=[1, 4, 4, 4],
+ ksize=[1, 2, 2, 1],
+ strides=[1, 2, 2, 1],
+ padding="SAME",
+ expected=expected_output,
+ use_gpu=use_gpu)
def _testMaxPoolSamePaddingPacket8(self, use_gpu):
- expected_output = [145.0, 146.0, 147.0, 148.0, 149.0, 150.0, 151.0, 152.0,
- 161.0, 162.0, 163.0, 164.0, 165.0, 166.0, 167.0, 168.0,
- 177.0, 178.0, 179.0, 180.0, 181.0, 182.0, 183.0, 184.0,
- 185.0, 186.0, 187.0, 188.0, 189.0, 190.0, 191.0, 192.0,
- 273.0, 274.0, 275.0, 276.0, 277.0, 278.0, 279.0, 280.0,
- 289.0, 290.0, 291.0, 292.0, 293.0, 294.0, 295.0, 296.0,
- 305.0, 306.0, 307.0, 308.0, 309.0, 310.0, 311.0, 312.0,
- 313.0, 314.0, 315.0, 316.0, 317.0, 318.0, 319.0, 320.0,
- 401.0, 402.0, 403.0, 404.0, 405.0, 406.0, 407.0, 408.0,
- 417.0, 418.0, 419.0, 420.0, 421.0, 422.0, 423.0, 424.0,
- 433.0, 434.0, 435.0, 436.0, 437.0, 438.0, 439.0, 440.0,
- 441.0, 442.0, 443.0, 444.0, 445.0, 446.0, 447.0, 448.0,
- 465.0, 466.0, 467.0, 468.0, 469.0, 470.0, 471.0, 472.0,
- 481.0, 482.0, 483.0, 484.0, 485.0, 486.0, 487.0, 488.0,
- 497.0, 498.0, 499.0, 500.0, 501.0, 502.0, 503.0, 504.0,
- 505.0, 506.0, 507.0, 508.0, 509.0, 510.0, 511.0, 512.0]
- self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 8, 8, 8],
- ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
- padding="SAME",
- expected=expected_output, use_gpu=use_gpu)
+ expected_output = [
+ 145.0, 146.0, 147.0, 148.0, 149.0, 150.0, 151.0, 152.0, 161.0, 162.0,
+ 163.0, 164.0, 165.0, 166.0, 167.0, 168.0, 177.0, 178.0, 179.0, 180.0,
+ 181.0, 182.0, 183.0, 184.0, 185.0, 186.0, 187.0, 188.0, 189.0, 190.0,
+ 191.0, 192.0, 273.0, 274.0, 275.0, 276.0, 277.0, 278.0, 279.0, 280.0,
+ 289.0, 290.0, 291.0, 292.0, 293.0, 294.0, 295.0, 296.0, 305.0, 306.0,
+ 307.0, 308.0, 309.0, 310.0, 311.0, 312.0, 313.0, 314.0, 315.0, 316.0,
+ 317.0, 318.0, 319.0, 320.0, 401.0, 402.0, 403.0, 404.0, 405.0, 406.0,
+ 407.0, 408.0, 417.0, 418.0, 419.0, 420.0, 421.0, 422.0, 423.0, 424.0,
+ 433.0, 434.0, 435.0, 436.0, 437.0, 438.0, 439.0, 440.0, 441.0, 442.0,
+ 443.0, 444.0, 445.0, 446.0, 447.0, 448.0, 465.0, 466.0, 467.0, 468.0,
+ 469.0, 470.0, 471.0, 472.0, 481.0, 482.0, 483.0, 484.0, 485.0, 486.0,
+ 487.0, 488.0, 497.0, 498.0, 499.0, 500.0, 501.0, 502.0, 503.0, 504.0,
+ 505.0, 506.0, 507.0, 508.0, 509.0, 510.0, 511.0, 512.0
+ ]
+ self._VerifyValues(
+ nn_ops.max_pool,
+ input_sizes=[1, 8, 8, 8],
+ ksize=[1, 3, 3, 1],
+ strides=[1, 2, 2, 1],
+ padding="SAME",
+ expected=expected_output,
+ use_gpu=use_gpu)
def testMaxPooling(self):
for use_gpu in True, False:
@@ -376,10 +456,14 @@ class PoolingTest(tf.test.TestCase):
# [1.0, ..., 10.0] along depth,
#
# We maxpool by depth in patches of 2.
- self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 1, 1, 10],
- ksize=[1, 1, 1, 2], strides=[1, 1, 1, 2],
- padding="SAME",
- expected=[2.0, 4.0, 6.0, 8.0, 10.0], use_gpu=False)
+ self._VerifyValues(
+ nn_ops.max_pool,
+ input_sizes=[1, 1, 1, 10],
+ ksize=[1, 1, 1, 2],
+ strides=[1, 1, 1, 2],
+ padding="SAME",
+ expected=[2.0, 4.0, 6.0, 8.0, 10.0],
+ use_gpu=False)
def testDepthwiseMaxPool2x2DepthWindow3(self):
# input is:
@@ -387,73 +471,84 @@ class PoolingTest(tf.test.TestCase):
# a 2x2x6 cube, and we depthwise max across 3 to produce a 2x2x2
# output. Each node has contiguous values, so the depthwise max
# should be multiples of 3.0.
- self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 2, 2, 6],
- ksize=[1, 1, 1, 3], strides=[1, 1, 1, 3],
- padding="SAME",
- expected=[3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0],
- use_gpu=False)
+ self._VerifyValues(
+ nn_ops.max_pool,
+ input_sizes=[1, 2, 2, 6],
+ ksize=[1, 1, 1, 3],
+ strides=[1, 1, 1, 3],
+ padding="SAME",
+ expected=[3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0],
+ use_gpu=False)
def testKernelSmallerThanStrideValid(self):
for use_gpu in [True, False]:
- self._VerifyValues(tf.nn.max_pool,
- input_sizes=[1, 7, 7, 1],
- ksize=[1, 2, 2, 1],
- strides=[1, 3, 3, 1],
- padding="VALID",
- expected=[9, 12, 30, 33],
- use_gpu=use_gpu)
-
- self._VerifyValues(tf.nn.avg_pool,
- input_sizes=[1, 7, 7, 1],
- ksize=[1, 2, 2, 1],
- strides=[1, 3, 3, 1],
- padding="VALID",
- expected=[5, 8, 26, 29],
- use_gpu=use_gpu)
+ self._VerifyValues(
+ nn_ops.max_pool,
+ input_sizes=[1, 7, 7, 1],
+ ksize=[1, 2, 2, 1],
+ strides=[1, 3, 3, 1],
+ padding="VALID",
+ expected=[9, 12, 30, 33],
+ use_gpu=use_gpu)
+
+ self._VerifyValues(
+ nn_ops.avg_pool,
+ input_sizes=[1, 7, 7, 1],
+ ksize=[1, 2, 2, 1],
+ strides=[1, 3, 3, 1],
+ padding="VALID",
+ expected=[5, 8, 26, 29],
+ use_gpu=use_gpu)
def testKernelSmallerThanStrideSame(self):
for use_gpu in [True, False]:
- for pool_func in [tf.nn.max_pool, tf.nn.avg_pool]:
- self._VerifyValues(pool_func,
- input_sizes=[1, 3, 3, 1],
- ksize=[1, 1, 1, 1],
- strides=[1, 2, 2, 1],
- padding="SAME",
- expected=[1, 3, 7, 9],
- use_gpu=use_gpu)
-
- self._VerifyValues(pool_func,
- input_sizes=[1, 4, 4, 1],
- ksize=[1, 1, 1, 1],
- strides=[1, 2, 2, 1],
- padding="SAME",
- expected=[1, 3, 9, 11],
- use_gpu=use_gpu)
-
- def _testDepthwiseMaxPoolInvalidConfig(self, in_size, ksize, strides,
- error_msg, use_gpu=False):
+ for pool_func in [nn_ops.max_pool, nn_ops.avg_pool]:
+ self._VerifyValues(
+ pool_func,
+ input_sizes=[1, 3, 3, 1],
+ ksize=[1, 1, 1, 1],
+ strides=[1, 2, 2, 1],
+ padding="SAME",
+ expected=[1, 3, 7, 9],
+ use_gpu=use_gpu)
+
+ self._VerifyValues(
+ pool_func,
+ input_sizes=[1, 4, 4, 1],
+ ksize=[1, 1, 1, 1],
+ strides=[1, 2, 2, 1],
+ padding="SAME",
+ expected=[1, 3, 9, 11],
+ use_gpu=use_gpu)
+
+ def _testDepthwiseMaxPoolInvalidConfig(self,
+ in_size,
+ ksize,
+ strides,
+ error_msg,
+ use_gpu=False):
with self.test_session(use_gpu=use_gpu) as sess:
- t = tf.constant(1.0, shape=in_size)
- with self.assertRaisesRegexp(tf.errors.UnimplementedError, error_msg):
- t = tf.nn.max_pool(
+ t = constant_op.constant(1.0, shape=in_size)
+ with self.assertRaisesRegexp(errors_impl.UnimplementedError, error_msg):
+ t = nn_ops.max_pool(
t, ksize=ksize, strides=strides, padding="SAME").eval()
def testDepthwiseMaxPoolInvalidConfigs(self):
self._testDepthwiseMaxPoolInvalidConfig(
- [1, 2, 2, 4], [1, 2, 2, 2],
- [1, 1, 1, 2], "exactly one of pooling across depth")
+ [1, 2, 2, 4], [1, 2, 2, 2], [1, 1, 1, 2],
+ "exactly one of pooling across depth")
self._testDepthwiseMaxPoolInvalidConfig(
- [1, 2, 2, 4], [1, 1, 1, 2],
- [1, 1, 1, 1], "depth window to equal the depth stride")
- self._testDepthwiseMaxPoolInvalidConfig(
- [1, 2, 2, 4], [1, 1, 1, 3],
- [1, 1, 1, 3], "evenly divide")
- if tf.test.is_gpu_available():
+ [1, 2, 2, 4], [1, 1, 1, 2], [1, 1, 1, 1],
+ "depth window to equal the depth stride")
+ self._testDepthwiseMaxPoolInvalidConfig([1, 2, 2, 4], [1, 1, 1, 3],
+ [1, 1, 1, 3], "evenly divide")
+ if test.is_gpu_available():
with self.test_session(use_gpu=True):
- t = tf.constant(1.0, shape=[1, 2, 2, 4])
+ t = constant_op.constant(1.0, shape=[1, 2, 2, 4])
with self.assertRaisesOpError("for CPU devices"):
- tf.nn.max_pool(t, ksize=[1, 1, 1, 2], strides=[1, 1, 1, 2],
- padding="SAME").eval()
+ nn_ops.max_pool(
+ t, ksize=[1, 1, 1, 2], strides=[1, 1, 1, 2],
+ padding="SAME").eval()
# The following are tests that verify that the CPU and GPU implementations
# produce the same resuts.
@@ -461,12 +556,12 @@ class PoolingTest(tf.test.TestCase):
for dtype in np.float32, np.float16:
tensor_input = np.random.rand(*input_shape).astype(dtype)
with self.test_session(use_gpu=True):
- t = tf.constant(tensor_input, shape=input_shape)
- out_op, _ = tf.nn.max_pool_with_argmax(t, ksize, strides, padding)
+ t = constant_op.constant(tensor_input, shape=input_shape)
+ out_op, _ = nn_ops.max_pool_with_argmax(t, ksize, strides, padding)
gpu_val = out_op.eval()
with self.test_session(use_gpu=False):
- t = tf.constant(tensor_input, shape=input_shape)
- out_op = tf.nn.max_pool(t, ksize, strides, padding)
+ t = constant_op.constant(tensor_input, shape=input_shape)
+ out_op = nn_ops.max_pool(t, ksize, strides, padding)
cpu_val = out_op.eval()
self.assertAllCloseAccordingToType(cpu_val, gpu_val)
@@ -478,19 +573,19 @@ class PoolingTest(tf.test.TestCase):
tensor_input = np.random.random_integers(0, 3, input_shape).astype(dtype)
tensor_output = np.random.rand(*output_shape).astype(dtype)
with self.test_session(use_gpu=True):
- t = tf.constant(tensor_input, shape=input_shape)
- _, argmax_op = tf.nn.max_pool_with_argmax(t, ksize, strides, padding)
+ t = constant_op.constant(tensor_input, shape=input_shape)
+ _, argmax_op = nn_ops.max_pool_with_argmax(t, ksize, strides, padding)
argmax = argmax_op.eval()
- grad_in = tf.constant(tensor_output, shape=output_shape)
+ grad_in = constant_op.constant(tensor_output, shape=output_shape)
out_op = gen_nn_ops._max_pool_grad_with_argmax(t, grad_in, argmax,
ksize, strides, padding)
gpu_val = out_op.eval()
self.assertShapeEqual(gpu_val, out_op)
with self.test_session(use_gpu=False):
- t = tf.constant(tensor_input, shape=input_shape)
- out_op = tf.nn.max_pool(t, ksize, strides, padding)
+ t = constant_op.constant(tensor_input, shape=input_shape)
+ out_op = nn_ops.max_pool(t, ksize, strides, padding)
orig_out = out_op.eval()
- grad_in = tf.constant(tensor_output, shape=output_shape)
+ grad_in = constant_op.constant(tensor_output, shape=output_shape)
out_op = gen_nn_ops._max_pool_grad(t, orig_out, grad_in, ksize, strides,
padding)
cpu_val = out_op.eval()
@@ -504,16 +599,17 @@ class PoolingTest(tf.test.TestCase):
def testMaxPoolingWithArgmax(self):
# MaxPoolWithArgMax is implemented only on GPU.
- if not tf.test.is_gpu_available():
+ if not test.is_gpu_available():
return
tensor_input = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]
with self.test_session(use_gpu=True) as sess:
- t = tf.constant(tensor_input, shape=[1, 3, 3, 1])
- out_op, argmax_op = tf.nn.max_pool_with_argmax(t,
- ksize=[1, 2, 2, 1],
- strides=[1, 1, 1, 1],
- Targmax=tf.int64,
- padding="VALID")
+ t = constant_op.constant(tensor_input, shape=[1, 3, 3, 1])
+ out_op, argmax_op = nn_ops.max_pool_with_argmax(
+ t,
+ ksize=[1, 2, 2, 1],
+ strides=[1, 1, 1, 1],
+ Targmax=dtypes.int64,
+ padding="VALID")
out, argmax = sess.run([out_op, argmax_op])
self.assertShapeEqual(out, out_op)
self.assertShapeEqual(argmax, argmax_op)
@@ -522,27 +618,38 @@ class PoolingTest(tf.test.TestCase):
def testMaxPoolingGradWithArgmax(self):
# MaxPoolWithArgMax is implemented only on GPU.
- if not tf.test.is_gpu_available():
+ if not test.is_gpu_available():
return
orig_input = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]
tensor_input = [11.0, 12.0, 13.0, 14.0]
tensor_argmax = list(np.array([0, 1, 3, 5], dtype=np.int64))
with self.test_session(use_gpu=True) as sess:
- orig_in = tf.constant(orig_input, shape=[1, 3, 3, 1])
- t = tf.constant(tensor_input, shape=[1, 2, 2, 1])
- argmax = tf.constant(tensor_argmax, shape=[1, 2, 2, 1],
- dtype=tf.int64)
- out_op = gen_nn_ops._max_pool_grad_with_argmax(orig_in, t, argmax,
- ksize=[1, 2, 2, 1],
- strides=[1, 1, 1, 1],
- padding="VALID")
+ orig_in = constant_op.constant(orig_input, shape=[1, 3, 3, 1])
+ t = constant_op.constant(tensor_input, shape=[1, 2, 2, 1])
+ argmax = constant_op.constant(
+ tensor_argmax, shape=[1, 2, 2, 1], dtype=dtypes.int64)
+ out_op = gen_nn_ops._max_pool_grad_with_argmax(
+ orig_in,
+ t,
+ argmax,
+ ksize=[1, 2, 2, 1],
+ strides=[1, 1, 1, 1],
+ padding="VALID")
out = out_op.eval().flatten()
- self.assertAllClose(out, [11.0, 12.0, 0.0, 13.0, 0.0,
- 14.0, 0.0, 0.0, 0.0])
-
- def _ConstructAndTestGradient(self, pool_func, input_sizes, output_sizes,
- window_rows, window_cols, row_stride,
- col_stride, padding, data_format, use_gpu,
+ self.assertAllClose(out,
+ [11.0, 12.0, 0.0, 13.0, 0.0, 14.0, 0.0, 0.0, 0.0])
+
+ def _ConstructAndTestGradient(self,
+ pool_func,
+ input_sizes,
+ output_sizes,
+ window_rows,
+ window_cols,
+ row_stride,
+ col_stride,
+ padding,
+ data_format,
+ use_gpu,
x_init_value=None):
"""Verifies the gradients of the avg pooling function.
@@ -569,8 +676,8 @@ class PoolingTest(tf.test.TestCase):
# numbers from 1.
x = [f * 1.0 for f in range(1, total_size + 1)]
with self.test_session(use_gpu=use_gpu):
- input_tensor = tf.constant(x, shape=input_sizes, name="input")
- if pool_func == tf.nn.avg_pool:
+ input_tensor = constant_op.constant(x, shape=input_sizes, name="input")
+ if pool_func == nn_ops.avg_pool:
func_name = "avg_pool"
err_margin = 1e-4
else:
@@ -588,67 +695,129 @@ class PoolingTest(tf.test.TestCase):
ksize = [1, window_rows, window_rows, 1]
strides = [1, row_stride, col_stride, 1]
t = input_tensor
- t = pool_func(t, ksize=ksize, strides=strides, padding=padding,
- data_format=data_format, name=func_name)
+ t = pool_func(
+ t,
+ ksize=ksize,
+ strides=strides,
+ padding=padding,
+ data_format=data_format,
+ name=func_name)
if data_format == "NCHW":
t = NCHWToNHWC(t)
- err = tf.test.compute_gradient_error(input_tensor,
- input_sizes,
- t,
- output_sizes,
- x_init_value=x_init_value,
- delta=1e-2)
+ err = gradient_checker.compute_gradient_error(
+ input_tensor,
+ input_sizes,
+ t,
+ output_sizes,
+ x_init_value=x_init_value,
+ delta=1e-2)
print("%s gradient error = " % func_name, err)
self.assertLess(err, err_margin)
def _testMaxPoolGradValidPadding1_1(self, data_format, use_gpu):
self._ConstructAndTestGradient(
- tf.nn.max_pool, input_sizes=[1, 3, 3, 1],
- output_sizes=[1, 3, 3, 1], window_rows=1, window_cols=1, row_stride=1,
- col_stride=1, padding="VALID", data_format=data_format, use_gpu=use_gpu)
+ nn_ops.max_pool,
+ input_sizes=[1, 3, 3, 1],
+ output_sizes=[1, 3, 3, 1],
+ window_rows=1,
+ window_cols=1,
+ row_stride=1,
+ col_stride=1,
+ padding="VALID",
+ data_format=data_format,
+ use_gpu=use_gpu)
def _testMaxPoolGradValidPadding2_1_6(self, data_format, use_gpu):
self._ConstructAndTestGradient(
- tf.nn.max_pool, input_sizes=[2, 6, 6, 3],
- output_sizes=[2, 5, 5, 3], window_rows=2, window_cols=2, row_stride=1,
- col_stride=1, padding="VALID", data_format=data_format, use_gpu=use_gpu)
+ nn_ops.max_pool,
+ input_sizes=[2, 6, 6, 3],
+ output_sizes=[2, 5, 5, 3],
+ window_rows=2,
+ window_cols=2,
+ row_stride=1,
+ col_stride=1,
+ padding="VALID",
+ data_format=data_format,
+ use_gpu=use_gpu)
def _testMaxPoolGradValidPadding2_1_7(self, data_format, use_gpu):
self._ConstructAndTestGradient(
- tf.nn.max_pool, input_sizes=[2, 7, 7, 3],
- output_sizes=[2, 6, 6, 3], window_rows=2, window_cols=2, row_stride=1,
- col_stride=1, padding="VALID", data_format=data_format, use_gpu=use_gpu)
+ nn_ops.max_pool,
+ input_sizes=[2, 7, 7, 3],
+ output_sizes=[2, 6, 6, 3],
+ window_rows=2,
+ window_cols=2,
+ row_stride=1,
+ col_stride=1,
+ padding="VALID",
+ data_format=data_format,
+ use_gpu=use_gpu)
def _testMaxPoolGradValidPadding2_2(self, data_format, use_gpu):
self._ConstructAndTestGradient(
- tf.nn.max_pool, input_sizes=[2, 2, 2, 3],
- output_sizes=[2, 1, 1, 3], window_rows=2, window_cols=2, row_stride=2,
- col_stride=2, padding="VALID", data_format=data_format, use_gpu=use_gpu)
+ nn_ops.max_pool,
+ input_sizes=[2, 2, 2, 3],
+ output_sizes=[2, 1, 1, 3],
+ window_rows=2,
+ window_cols=2,
+ row_stride=2,
+ col_stride=2,
+ padding="VALID",
+ data_format=data_format,
+ use_gpu=use_gpu)
def _testMaxPoolGradSamePadding1_1(self, data_format, use_gpu):
self._ConstructAndTestGradient(
- tf.nn.max_pool, input_sizes=[2, 2, 4, 3],
- output_sizes=[2, 2, 4, 3], window_rows=1, window_cols=1, row_stride=1,
- col_stride=1, padding="SAME", data_format=data_format, use_gpu=use_gpu)
+ nn_ops.max_pool,
+ input_sizes=[2, 2, 4, 3],
+ output_sizes=[2, 2, 4, 3],
+ window_rows=1,
+ window_cols=1,
+ row_stride=1,
+ col_stride=1,
+ padding="SAME",
+ data_format=data_format,
+ use_gpu=use_gpu)
def _testMaxPoolGradSamePadding2_1(self, data_format, use_gpu):
self._ConstructAndTestGradient(
- tf.nn.max_pool, input_sizes=[2, 2, 4, 3],
- output_sizes=[2, 2, 4, 3], window_rows=2, window_cols=2, row_stride=1,
- col_stride=1, padding="SAME", data_format=data_format, use_gpu=use_gpu)
+ nn_ops.max_pool,
+ input_sizes=[2, 2, 4, 3],
+ output_sizes=[2, 2, 4, 3],
+ window_rows=2,
+ window_cols=2,
+ row_stride=1,
+ col_stride=1,
+ padding="SAME",
+ data_format=data_format,
+ use_gpu=use_gpu)
def _testMaxPoolGradSamePadding2_2(self, data_format, use_gpu):
self._ConstructAndTestGradient(
- tf.nn.max_pool, input_sizes=[2, 2, 4, 3],
- output_sizes=[2, 1, 2, 3], window_rows=2, window_cols=2, row_stride=2,
- col_stride=2, padding="SAME", data_format=data_format, use_gpu=use_gpu)
+ nn_ops.max_pool,
+ input_sizes=[2, 2, 4, 3],
+ output_sizes=[2, 1, 2, 3],
+ window_rows=2,
+ window_cols=2,
+ row_stride=2,
+ col_stride=2,
+ padding="SAME",
+ data_format=data_format,
+ use_gpu=use_gpu)
def _testMaxPoolGradSamePadding3_1(self, data_format, use_gpu):
self._ConstructAndTestGradient(
- tf.nn.max_pool, input_sizes=[1, 7, 7, 1],
- output_sizes=[1, 7, 7, 1], window_rows=3, window_cols=3, row_stride=1,
- col_stride=1, padding="SAME", data_format=data_format, use_gpu=use_gpu)
+ nn_ops.max_pool,
+ input_sizes=[1, 7, 7, 1],
+ output_sizes=[1, 7, 7, 1],
+ window_rows=3,
+ window_cols=3,
+ row_stride=1,
+ col_stride=1,
+ padding="SAME",
+ data_format=data_format,
+ use_gpu=use_gpu)
def testMaxPoolGrad(self):
for (data_format, use_gpu) in GetTestConfigs():
@@ -679,26 +848,26 @@ class PoolingTest(tf.test.TestCase):
Returns:
A Tensor.
"""
- return gen_nn_ops._max_pool_grad(
- orig_input, orig_output, grad,
- [1, window_rows, window_cols, 1], [1, row_stride, col_stride, 1],
- padding)
+ return gen_nn_ops._max_pool_grad(orig_input, orig_output, grad,
+ [1, window_rows, window_cols, 1],
+ [1, row_stride, col_stride, 1], padding)
def _testMaxPoolGradDirect(self, input_data, output_backprop,
expected_input_backprop, input_sizes, output_sizes,
window_rows, window_cols, row_stride, col_stride,
padding, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
- input_tensor = tf.constant(input_data, shape=input_sizes)
- output_tensor = tf.nn.max_pool(
- input_tensor, [1, window_rows, window_cols, 1],
- [1, row_stride, col_stride, 1], padding)
- output_backprop_tensor = tf.constant(output_backprop,
- shape=output_sizes)
-
- input_backprop_tensor = self._MaxPoolGrad(
- input_tensor, output_tensor, output_backprop_tensor,
- window_rows, window_cols, row_stride, col_stride, padding)
+ input_tensor = constant_op.constant(input_data, shape=input_sizes)
+ output_tensor = nn_ops.max_pool(input_tensor,
+ [1, window_rows, window_cols, 1],
+ [1, row_stride, col_stride, 1], padding)
+ output_backprop_tensor = constant_op.constant(
+ output_backprop, shape=output_sizes)
+
+ input_backprop_tensor = self._MaxPoolGrad(input_tensor, output_tensor,
+ output_backprop_tensor,
+ window_rows, window_cols,
+ row_stride, col_stride, padding)
actual_input_backprop = input_backprop_tensor.eval()
self.assertShapeEqual(actual_input_backprop, input_backprop_tensor)
@@ -708,146 +877,203 @@ class PoolingTest(tf.test.TestCase):
actual_output = output_tensor.eval().flatten()
actual_output = self._GetNdArray(actual_output)
- self.assertAllClose(expected_input_backprop, actual_input_backprop,
- rtol=1e-6, atol=1e-6)
+ self.assertAllClose(
+ expected_input_backprop, actual_input_backprop, rtol=1e-6, atol=1e-6)
def _testMaxPoolGradDirect1_1(self):
input_data = [
- 1.0, 1.0, 1.0, 1.0,
- 1.0, 1.0, 1.0, 1.0,
- 1.0, 1.0, 1.0, 1.0,
- 1.0, 1.0, 1.0, 1.0]
- output_backprop = [
- 11.0, 12.0, 13.0,
- 15.0, 16.0, 17.0,
- 19.0, 20.0, 21.0]
+ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
+ 1.0, 1.0
+ ]
+ output_backprop = [11.0, 12.0, 13.0, 15.0, 16.0, 17.0, 19.0, 20.0, 21.0]
expected_input_backprop = [
- 11.0, 12.0, 13.0, 0.0,
- 15.0, 16.0, 17.0, 0.0,
- 19.0, 20.0, 21.0, 0.0,
- 0.0, 0.0, 0.0, 0.0]
+ 11.0, 12.0, 13.0, 0.0, 15.0, 16.0, 17.0, 0.0, 19.0, 20.0, 21.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0
+ ]
for use_gpu in True, False:
self._testMaxPoolGradDirect(
- input_data, output_backprop, expected_input_backprop,
- input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1],
- window_rows=2, window_cols=2, row_stride=1, col_stride=1,
- padding="VALID", use_gpu=use_gpu)
+ input_data,
+ output_backprop,
+ expected_input_backprop,
+ input_sizes=[1, 4, 4, 1],
+ output_sizes=[1, 3, 3, 1],
+ window_rows=2,
+ window_cols=2,
+ row_stride=1,
+ col_stride=1,
+ padding="VALID",
+ use_gpu=use_gpu)
def _testMaxPoolGradDirect1_2(self):
input_data = [
- 1.0, 0.0, 1.0, 0.0,
- 0.0, 1.0, 0.0, 1.0,
- 1.0, 0.0, 1.0, 0.0,
- 0.0, 1.0, 0.0, 1.0]
- output_backprop = [
- 11.0, 12.0, 13.0,
- 15.0, 16.0, 17.0,
- 19.0, 20.0, 21.0]
+ 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0,
+ 0.0, 1.0
+ ]
+ output_backprop = [11.0, 12.0, 13.0, 15.0, 16.0, 17.0, 19.0, 20.0, 21.0]
expected_input_backprop = [
- 11.0, 0.0, 25.0, 0.0,
- 0.0, 31.0, 0.0, 17.0,
- 19.0, 0.0, 41.0, 0.0,
- 0.0, 0.0, 0.0, 0.0]
+ 11.0, 0.0, 25.0, 0.0, 0.0, 31.0, 0.0, 17.0, 19.0, 0.0, 41.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0
+ ]
for use_gpu in True, False:
self._testMaxPoolGradDirect(
- input_data, output_backprop, expected_input_backprop,
- input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1],
- window_rows=2, window_cols=2, row_stride=1, col_stride=1,
- padding="VALID", use_gpu=use_gpu)
+ input_data,
+ output_backprop,
+ expected_input_backprop,
+ input_sizes=[1, 4, 4, 1],
+ output_sizes=[1, 3, 3, 1],
+ window_rows=2,
+ window_cols=2,
+ row_stride=1,
+ col_stride=1,
+ padding="VALID",
+ use_gpu=use_gpu)
def _testMaxPoolGradDirect1_3(self):
input_data = [
- 1.0, 0.0, 1.0, 0.0,
- 0.0, 1.0, 0.0, 1.0,
- 1.0, 0.0, 1.0, 0.0,
- 0.0, 1.0, 0.0, 1.0,]
+ 1.0,
+ 0.0,
+ 1.0,
+ 0.0,
+ 0.0,
+ 1.0,
+ 0.0,
+ 1.0,
+ 1.0,
+ 0.0,
+ 1.0,
+ 0.0,
+ 0.0,
+ 1.0,
+ 0.0,
+ 1.0,
+ ]
output_backprop = [
- 11.0, 12.0, 13.0, 14.0,
- 15.0, 16.0, 17.0, 18.0,
- 19.0, 20.0, 21.0, 22.0,
- 23.0, 24.0, 25.0, 26.0]
+ 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0,
+ 23.0, 24.0, 25.0, 26.0
+ ]
expected_input_backprop = [
- 54, 0.0, 62, 0.0,
- 0.0, 60, 0.0, 22.0,
- 47, 0.0, 51, 0.0,
- 0.0, 0.0, 0.0, 0.0,]
+ 54,
+ 0.0,
+ 62,
+ 0.0,
+ 0.0,
+ 60,
+ 0.0,
+ 22.0,
+ 47,
+ 0.0,
+ 51,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ ]
for use_gpu in True, False:
self._testMaxPoolGradDirect(
- input_data, output_backprop, expected_input_backprop,
- input_sizes=[1, 4, 4, 1], output_sizes=[1, 4, 4, 1],
- window_rows=3, window_cols=3, row_stride=1, col_stride=1,
- padding="SAME", use_gpu=use_gpu)
+ input_data,
+ output_backprop,
+ expected_input_backprop,
+ input_sizes=[1, 4, 4, 1],
+ output_sizes=[1, 4, 4, 1],
+ window_rows=3,
+ window_cols=3,
+ row_stride=1,
+ col_stride=1,
+ padding="SAME",
+ use_gpu=use_gpu)
def _testMaxPoolGradDirectWithNans2_1(self):
input_data = [float("nan")] * 16
- output_backprop = [
- 11.0, 12.0, 13.0,
- 15.0, 16.0, 17.0,
- 19.0, 20.0, 21.0]
+ output_backprop = [11.0, 12.0, 13.0, 15.0, 16.0, 17.0, 19.0, 20.0, 21.0]
# Test the CPU implementation, which propagates diffs in case of NaN
expected_input_backprop_tf_cpu = [
- 11.0, 12.0, 13.0, 0.0,
- 15.0, 16.0, 17.0, 0.0,
- 19.0, 20.0, 21.0, 0.0,
- 0.0, 0.0, 0.0, 0.0]
+ 11.0, 12.0, 13.0, 0.0, 15.0, 16.0, 17.0, 0.0, 19.0, 20.0, 21.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0
+ ]
self._testMaxPoolGradDirect(
- input_data, output_backprop, expected_input_backprop_tf_cpu,
- input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1],
- window_rows=2, window_cols=2, row_stride=1, col_stride=1,
- padding="VALID", use_gpu=False)
-
- if not tf.test.is_gpu_available():
+ input_data,
+ output_backprop,
+ expected_input_backprop_tf_cpu,
+ input_sizes=[1, 4, 4, 1],
+ output_sizes=[1, 3, 3, 1],
+ window_rows=2,
+ window_cols=2,
+ row_stride=1,
+ col_stride=1,
+ padding="VALID",
+ use_gpu=False)
+
+ if not test.is_gpu_available():
return
# Test the GPU implementation that uses cudnn for now.
# It does not propagate the diff in cases of NaNs
expected_input_backprop_cudnn = [
- 0.0, 0.0, 0.0, 0.0,
- 0.0, 0.0, 0.0, 0.0,
- 0.0, 0.0, 0.0, 0.0,
- 0.0, 0.0, 0.0, 0.0]
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0
+ ]
self._testMaxPoolGradDirect(
- input_data, output_backprop, expected_input_backprop_cudnn,
- input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1],
- window_rows=2, window_cols=2, row_stride=1, col_stride=1,
- padding="VALID", use_gpu=True)
+ input_data,
+ output_backprop,
+ expected_input_backprop_cudnn,
+ input_sizes=[1, 4, 4, 1],
+ output_sizes=[1, 3, 3, 1],
+ window_rows=2,
+ window_cols=2,
+ row_stride=1,
+ col_stride=1,
+ padding="VALID",
+ use_gpu=True)
def _testMaxPoolGradDirectWithNans2_2(self):
input_data = [float("nan")] * 16
output_backprop = [
- float("nan"), 12.0, 13.0,
- 15.0, float("nan"), 17.0,
- 19.0, 20.0, float("nan")]
+ float("nan"), 12.0, 13.0, 15.0, float("nan"), 17.0, 19.0, 20.0,
+ float("nan")
+ ]
# Test the CPU implementation, which propagates diffs in case of NaN
expected_input_backprop_tf_cpu = [
- float("nan"), 12.0, 13.0, 0.0,
- 15.0, float("nan"), 17.0, 0.0,
- 19.0, 20.0, float("nan"), 0.0,
- 0.0, 0.0, 0.0, 0.0]
+ float("nan"), 12.0, 13.0, 0.0, 15.0, float("nan"), 17.0, 0.0, 19.0,
+ 20.0, float("nan"), 0.0, 0.0, 0.0, 0.0, 0.0
+ ]
self._testMaxPoolGradDirect(
- input_data, output_backprop, expected_input_backprop_tf_cpu,
- input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1],
- window_rows=2, window_cols=2, row_stride=1, col_stride=1,
- padding="VALID", use_gpu=False)
-
- if not tf.test.is_gpu_available():
+ input_data,
+ output_backprop,
+ expected_input_backprop_tf_cpu,
+ input_sizes=[1, 4, 4, 1],
+ output_sizes=[1, 3, 3, 1],
+ window_rows=2,
+ window_cols=2,
+ row_stride=1,
+ col_stride=1,
+ padding="VALID",
+ use_gpu=False)
+
+ if not test.is_gpu_available():
return
# Test the GPU implementation that uses cudnn for now.
# It does not propagate the diff in cases of NaNs
expected_input_backprop_cudnn = [
- 0.0, 0.0, 0.0, 0.0,
- 0.0, 0.0, 0.0, 0.0,
- 0.0, 0.0, 0.0, 0.0,
- 0.0, 0.0, 0.0, 0.0]
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0
+ ]
self._testMaxPoolGradDirect(
- input_data, output_backprop, expected_input_backprop_cudnn,
- input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1],
- window_rows=2, window_cols=2, row_stride=1, col_stride=1,
- padding="VALID", use_gpu=True)
+ input_data,
+ output_backprop,
+ expected_input_backprop_cudnn,
+ input_sizes=[1, 4, 4, 1],
+ output_sizes=[1, 3, 3, 1],
+ window_rows=2,
+ window_cols=2,
+ row_stride=1,
+ col_stride=1,
+ padding="VALID",
+ use_gpu=True)
def testMaxPoolGradDirect(self):
self._testMaxPoolGradDirect1_1()
@@ -868,80 +1094,137 @@ class PoolingTest(tf.test.TestCase):
def _testAvgPoolGradValidPadding1_1(self, data_format, use_gpu):
self._ConstructAndTestGradient(
- tf.nn.avg_pool, input_sizes=[2, 3, 3, 3],
- output_sizes=[2, 3, 3, 3], window_rows=1, window_cols=1, row_stride=1,
- col_stride=1, padding="VALID", data_format=data_format, use_gpu=use_gpu)
+ nn_ops.avg_pool,
+ input_sizes=[2, 3, 3, 3],
+ output_sizes=[2, 3, 3, 3],
+ window_rows=1,
+ window_cols=1,
+ row_stride=1,
+ col_stride=1,
+ padding="VALID",
+ data_format=data_format,
+ use_gpu=use_gpu)
def _testAvgPoolGradValidPadding2_1(self, data_format, use_gpu):
self._ConstructAndTestGradient(
- tf.nn.avg_pool, input_sizes=[2, 3, 3, 3],
- output_sizes=[2, 2, 2, 3], window_rows=2, window_cols=2, row_stride=1,
- col_stride=1, padding="VALID", data_format=data_format, use_gpu=use_gpu)
+ nn_ops.avg_pool,
+ input_sizes=[2, 3, 3, 3],
+ output_sizes=[2, 2, 2, 3],
+ window_rows=2,
+ window_cols=2,
+ row_stride=1,
+ col_stride=1,
+ padding="VALID",
+ data_format=data_format,
+ use_gpu=use_gpu)
def _testAvgPoolGradValidPadding2_2(self, data_format, use_gpu):
self._ConstructAndTestGradient(
- tf.nn.avg_pool, input_sizes=[2, 2, 2, 3],
- output_sizes=[2, 1, 1, 3], window_rows=2, window_cols=2, row_stride=2,
- col_stride=2, padding="VALID", data_format=data_format, use_gpu=use_gpu)
+ nn_ops.avg_pool,
+ input_sizes=[2, 2, 2, 3],
+ output_sizes=[2, 1, 1, 3],
+ window_rows=2,
+ window_cols=2,
+ row_stride=2,
+ col_stride=2,
+ padding="VALID",
+ data_format=data_format,
+ use_gpu=use_gpu)
def _testAvgPoolGradSamePadding1_1(self, data_format, use_gpu):
self._ConstructAndTestGradient(
- tf.nn.avg_pool, input_sizes=[2, 2, 4, 3],
- output_sizes=[2, 2, 4, 3], window_rows=1, window_cols=1, row_stride=1,
- col_stride=1, padding="SAME", data_format=data_format, use_gpu=use_gpu)
+ nn_ops.avg_pool,
+ input_sizes=[2, 2, 4, 3],
+ output_sizes=[2, 2, 4, 3],
+ window_rows=1,
+ window_cols=1,
+ row_stride=1,
+ col_stride=1,
+ padding="SAME",
+ data_format=data_format,
+ use_gpu=use_gpu)
def _testAvgPoolGradSamePadding2_1(self, data_format, use_gpu):
self._ConstructAndTestGradient(
- tf.nn.avg_pool, input_sizes=[2, 2, 4, 3],
- output_sizes=[2, 2, 4, 3], window_rows=2, window_cols=2, row_stride=1,
- col_stride=1, padding="SAME", data_format=data_format, use_gpu=use_gpu)
+ nn_ops.avg_pool,
+ input_sizes=[2, 2, 4, 3],
+ output_sizes=[2, 2, 4, 3],
+ window_rows=2,
+ window_cols=2,
+ row_stride=1,
+ col_stride=1,
+ padding="SAME",
+ data_format=data_format,
+ use_gpu=use_gpu)
def _testAvgPoolGradSamePadding2_2(self, data_format, use_gpu):
self._ConstructAndTestGradient(
- tf.nn.avg_pool, input_sizes=[2, 2, 4, 3],
- output_sizes=[2, 1, 2, 3], window_rows=2, window_cols=2, row_stride=2,
- col_stride=2, padding="SAME", data_format=data_format, use_gpu=use_gpu)
+ nn_ops.avg_pool,
+ input_sizes=[2, 2, 4, 3],
+ output_sizes=[2, 1, 2, 3],
+ window_rows=2,
+ window_cols=2,
+ row_stride=2,
+ col_stride=2,
+ padding="SAME",
+ data_format=data_format,
+ use_gpu=use_gpu)
def _testAvgPoolGradSamePadding3_1(self, data_format, use_gpu):
self._ConstructAndTestGradient(
- tf.nn.avg_pool, input_sizes=[1, 7, 7, 1],
- output_sizes=[1, 7, 7, 1], window_rows=3, window_cols=3, row_stride=1,
- col_stride=1, padding="SAME", data_format=data_format, use_gpu=use_gpu)
+ nn_ops.avg_pool,
+ input_sizes=[1, 7, 7, 1],
+ output_sizes=[1, 7, 7, 1],
+ window_rows=3,
+ window_cols=3,
+ row_stride=1,
+ col_stride=1,
+ padding="SAME",
+ data_format=data_format,
+ use_gpu=use_gpu)
def testShapeFunctionEdgeCases(self):
# All shapes unknown.
- for pool_func in [tf.nn.max_pool, tf.nn.avg_pool]:
- p = pool_func(tf.placeholder(tf.float32),
- ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1],
- padding="SAME")
+ for pool_func in [nn_ops.max_pool, nn_ops.avg_pool]:
+ p = pool_func(
+ array_ops.placeholder(dtypes.float32),
+ ksize=[1, 1, 1, 1],
+ strides=[1, 1, 1, 1],
+ padding="SAME")
self.assertEqual([None, None, None, None], p.get_shape().as_list())
- p, am = tf.nn.max_pool_with_argmax(
- tf.placeholder(tf.float32),
- ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1],
+ p, am = nn_ops.max_pool_with_argmax(
+ array_ops.placeholder(dtypes.float32),
+ ksize=[1, 1, 1, 1],
+ strides=[1, 1, 1, 1],
padding="SAME")
self.assertEqual([None, None, None, None], p.get_shape().as_list())
self.assertEqual([None, None, None, None], am.get_shape().as_list())
# Incorrect input shape.
- for pool_func in [tf.nn.max_pool, tf.nn.avg_pool,
- tf.nn.max_pool_with_argmax]:
+ for pool_func in [
+ nn_ops.max_pool, nn_ops.avg_pool, nn_ops.max_pool_with_argmax
+ ]:
with self.assertRaises(ValueError):
- pool_func(tf.placeholder(tf.float32, shape=[1, 3]),
- ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1], padding="SAME")
+ pool_func(
+ array_ops.placeholder(
+ dtypes.float32, shape=[1, 3]),
+ ksize=[1, 1, 1, 1],
+ strides=[1, 1, 1, 1],
+ padding="SAME")
def testOpEdgeCases(self):
with self.test_session() as sess:
- pool_funcs = [tf.nn.max_pool, tf.nn.avg_pool]
- if tf.test.is_gpu_available():
- pool_funcs.append(tf.nn.max_pool_with_argmax)
+ pool_funcs = [nn_ops.max_pool, nn_ops.avg_pool]
+ if test.is_gpu_available():
+ pool_funcs.append(nn_ops.max_pool_with_argmax)
for pool_func in pool_funcs:
# Illegal strides.
with self.assertRaisesRegexp(
- tf.errors.UnimplementedError,
+ errors_impl.UnimplementedError,
"Pooling is not yet supported on the batch"):
sess.run(
pool_func(
- tf.placeholder(tf.float32),
+ array_ops.placeholder(dtypes.float32),
ksize=[1, 1, 1, 1],
strides=[2, 1, 1, 1],
padding="SAME"))
@@ -950,36 +1233,40 @@ class PoolingTest(tf.test.TestCase):
with self.assertRaisesRegexp(ValueError, "Negative dimension size"):
sess.run(
pool_func(
- tf.placeholder(
- tf.float32, shape=[32, 20, 20, 3]),
+ array_ops.placeholder(
+ dtypes.float32, shape=[32, 20, 20, 3]),
ksize=[1, 20, 21, 1],
strides=[1, 1, 1, 1],
padding="VALID"))
with self.assertRaisesRegexp(ValueError, "Negative dimension size"):
pool_func(
- tf.placeholder(
- tf.float32, shape=[32, 20, 20, 3]),
+ array_ops.placeholder(
+ dtypes.float32, shape=[32, 20, 20, 3]),
ksize=[1, 21, 20, 1],
strides=[1, 1, 1, 1],
padding="VALID")
def GetMaxPoolFwdTest(input_size, filter_size, strides, padding):
+
def Test(self):
# MaxPoolWithArgMax is implemented only on GPU.
- if not tf.test.is_gpu_available():
+ if not test.is_gpu_available():
return
self._CompareMaxPoolingFwd(input_size, filter_size, strides, padding)
+
return Test
def GetMaxPoolGradTest(input_size, filter_size, output_size, strides, padding):
+
def Test(self):
# MaxPoolWithArgMax is implemented only on GPU.
- if not tf.test.is_gpu_available():
+ if not test.is_gpu_available():
return
- self._CompareMaxPoolingBk(input_size, output_size,
- filter_size, strides, padding)
+ self._CompareMaxPoolingBk(input_size, output_size, filter_size, strides,
+ padding)
+
return Test
@@ -989,6 +1276,6 @@ if __name__ == "__main__":
setattr(PoolingTest, "testMaxPoolFwd_" + name_,
GetMaxPoolFwdTest(input_size_, filter_size_, stride_, padding_))
setattr(PoolingTest, "testMaxPoolGrad_" + name_,
- GetMaxPoolGradTest(input_size_, filter_size_, output_size_,
- stride_, padding_))
- tf.test.main()
+ GetMaxPoolGradTest(input_size_, filter_size_, output_size_, stride_,
+ padding_))
+ test.main()
diff --git a/tensorflow/python/kernel_tests/priority_queue_test.py b/tensorflow/python/kernel_tests/priority_queue_test.py
index b7982be80a..3fb9c9c468 100644
--- a/tensorflow/python/kernel_tests/priority_queue_test.py
+++ b/tensorflow/python/kernel_tests/priority_queue_test.py
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.ops.data_flow_ops.PriorityQueue."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
@@ -23,21 +23,29 @@ import random
import threading
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import errors_impl
+from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
+import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
-class PriorityQueueTest(tf.test.TestCase):
+class PriorityQueueTest(test.TestCase):
def testRoundTripInsertReadOnceSorts(self):
with self.test_session() as sess:
- q = data_flow_ops.PriorityQueue(2000, (tf.string, tf.string), ((), ()))
+ q = data_flow_ops.PriorityQueue(2000, (dtypes.string, dtypes.string), (
+ (), ()))
elem = np.random.randint(-5, 5, size=100).astype(np.int64)
side_value_0 = np.random.rand(100).astype(bytes)
side_value_1 = np.random.rand(100).astype(bytes)
- enq_list = [q.enqueue((e, tf.constant(v0), tf.constant(v1)))
- for e, v0, v1 in zip(elem, side_value_0, side_value_1)]
+ enq_list = [
+ q.enqueue((e, constant_op.constant(v0), constant_op.constant(v1)))
+ for e, v0, v1 in zip(elem, side_value_0, side_value_1)
+ ]
for enq in enq_list:
enq.run()
@@ -60,13 +68,17 @@ class PriorityQueueTest(tf.test.TestCase):
def testRoundTripInsertMultiThreadedReadOnceSorts(self):
with self.test_session() as sess:
- q = data_flow_ops.PriorityQueue(2000, (tf.string, tf.string), ((), ()))
+ q = data_flow_ops.PriorityQueue(2000, (dtypes.string, dtypes.string), (
+ (), ()))
elem = np.random.randint(-5, 5, size=100).astype(np.int64)
side_value_0 = np.random.rand(100).astype(bytes)
side_value_1 = np.random.rand(100).astype(bytes)
- enqueue_ops = [q.enqueue((e, tf.constant(v0), tf.constant(v1)))
- for e, v0, v1 in zip(elem, side_value_0, side_value_1)]
+ enqueue_ops = [
+ q.enqueue((e, constant_op.constant(v0), constant_op.constant(v1)))
+ for e, v0, v1 in zip(elem, side_value_0, side_value_1)
+ ]
+
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
@@ -74,7 +86,9 @@ class PriorityQueueTest(tf.test.TestCase):
dequeue_op = q.dequeue_many(100)
enqueue_threads = [
- self.checkedThread(target=enqueue, args=(op,)) for op in enqueue_ops]
+ self.checkedThread(
+ target=enqueue, args=(op,)) for op in enqueue_ops
+ ]
for t in enqueue_threads:
t.start()
@@ -100,18 +114,20 @@ class PriorityQueueTest(tf.test.TestCase):
def testRoundTripFillsCapacityMultiThreadedEnqueueAndDequeue(self):
with self.test_session() as sess:
- q = data_flow_ops.PriorityQueue(10, (tf.int64), (()))
+ q = data_flow_ops.PriorityQueue(10, (dtypes.int64), (()))
num_threads = 40
enqueue_counts = np.random.randint(10, size=num_threads)
enqueue_values = [
- np.random.randint(5, size=count) for count in enqueue_counts]
+ np.random.randint(
+ 5, size=count) for count in enqueue_counts
+ ]
enqueue_ops = [
- q.enqueue_many((values, values)) for values in enqueue_values]
+ q.enqueue_many((values, values)) for values in enqueue_values
+ ]
shuffled_counts = copy.deepcopy(enqueue_counts)
random.shuffle(shuffled_counts)
- dequeue_ops = [
- q.dequeue_many(count) for count in shuffled_counts]
+ dequeue_ops = [q.dequeue_many(count) for count in shuffled_counts]
all_enqueued_values = np.hstack(enqueue_values)
# Run one producer thread for each element in elems.
@@ -126,9 +142,13 @@ class PriorityQueueTest(tf.test.TestCase):
dequeued.extend(dequeue_indices)
enqueue_threads = [
- self.checkedThread(target=enqueue, args=(op,)) for op in enqueue_ops]
+ self.checkedThread(
+ target=enqueue, args=(op,)) for op in enqueue_ops
+ ]
dequeue_threads = [
- self.checkedThread(target=dequeue, args=(op,)) for op in dequeue_ops]
+ self.checkedThread(
+ target=dequeue, args=(op,)) for op in dequeue_ops
+ ]
# Dequeue and check
for t in dequeue_threads:
@@ -144,18 +164,20 @@ class PriorityQueueTest(tf.test.TestCase):
def testRoundTripInsertManyMultiThreadedReadManyMultithreadedSorts(self):
with self.test_session() as sess:
- q = data_flow_ops.PriorityQueue(2000, (tf.int64), (()))
+ q = data_flow_ops.PriorityQueue(2000, (dtypes.int64), (()))
num_threads = 40
enqueue_counts = np.random.randint(10, size=num_threads)
enqueue_values = [
- np.random.randint(5, size=count) for count in enqueue_counts]
+ np.random.randint(
+ 5, size=count) for count in enqueue_counts
+ ]
enqueue_ops = [
- q.enqueue_many((values, values)) for values in enqueue_values]
+ q.enqueue_many((values, values)) for values in enqueue_values
+ ]
shuffled_counts = copy.deepcopy(enqueue_counts)
random.shuffle(shuffled_counts)
- dequeue_ops = [
- q.dequeue_many(count) for count in shuffled_counts]
+ dequeue_ops = [q.dequeue_many(count) for count in shuffled_counts]
all_enqueued_values = np.hstack(enqueue_values)
dequeue_wait = threading.Condition()
@@ -173,10 +195,13 @@ class PriorityQueueTest(tf.test.TestCase):
dequeued = []
enqueue_threads = [
- self.checkedThread(target=enqueue, args=(op,)) for op in enqueue_ops]
+ self.checkedThread(
+ target=enqueue, args=(op,)) for op in enqueue_ops
+ ]
dequeue_threads = [
- self.checkedThread(target=dequeue, args=(op, dequeued))
- for op in dequeue_ops]
+ self.checkedThread(
+ target=dequeue, args=(op, dequeued)) for op in dequeue_ops
+ ]
for t in enqueue_threads:
t.start()
@@ -195,16 +220,20 @@ class PriorityQueueTest(tf.test.TestCase):
def testRoundTripInsertManyMultiThreadedReadOnceSorts(self):
with self.test_session() as sess:
- q = data_flow_ops.PriorityQueue(2000, (tf.string, tf.string), ((), ()))
+ q = data_flow_ops.PriorityQueue(2000, (dtypes.string, dtypes.string), (
+ (), ()))
elem = np.random.randint(-5, 5, size=100).astype(np.int64)
side_value_0 = np.random.rand(100).astype(bytes)
side_value_1 = np.random.rand(100).astype(bytes)
batch = 5
- enqueue_ops = [q.enqueue_many((elem[i*batch:(i+1)*batch],
- side_value_0[i*batch:(i+1)*batch],
- side_value_1[i*batch:(i+1)*batch]))
- for i in range(20)]
+ enqueue_ops = [
+ q.enqueue_many((elem[i * batch:(i + 1) * batch],
+ side_value_0[i * batch:(i + 1) * batch],
+ side_value_1[i * batch:(i + 1) * batch]))
+ for i in range(20)
+ ]
+
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
@@ -212,7 +241,9 @@ class PriorityQueueTest(tf.test.TestCase):
dequeue_op = q.dequeue_many(100)
enqueue_threads = [
- self.checkedThread(target=enqueue, args=(op,)) for op in enqueue_ops]
+ self.checkedThread(
+ target=enqueue, args=(op,)) for op in enqueue_ops
+ ]
for t in enqueue_threads:
t.start()
@@ -238,7 +269,8 @@ class PriorityQueueTest(tf.test.TestCase):
def testRoundTripInsertOnceReadOnceSorts(self):
with self.test_session() as sess:
- q = data_flow_ops.PriorityQueue(2000, (tf.string, tf.string), ((), ()))
+ q = data_flow_ops.PriorityQueue(2000, (dtypes.string, dtypes.string), (
+ (), ()))
elem = np.random.randint(-100, 100, size=1000).astype(np.int64)
side_value_0 = np.random.rand(1000).astype(bytes)
side_value_1 = np.random.rand(1000).astype(bytes)
@@ -258,16 +290,15 @@ class PriorityQueueTest(tf.test.TestCase):
def testRoundTripInsertOnceReadManySorts(self):
with self.test_session():
- q = data_flow_ops.PriorityQueue(2000, (tf.int64), (()))
+ q = data_flow_ops.PriorityQueue(2000, (dtypes.int64), (()))
elem = np.random.randint(-100, 100, size=1000).astype(np.int64)
q.enqueue_many((elem, elem)).run()
- deq_values = np.hstack(
- (q.dequeue_many(100)[0].eval() for _ in range(10)))
+ deq_values = np.hstack((q.dequeue_many(100)[0].eval() for _ in range(10)))
self.assertAllEqual(deq_values, sorted(elem))
def testRoundTripInsertOnceReadOnceLotsSorts(self):
with self.test_session():
- q = data_flow_ops.PriorityQueue(2000, (tf.int64), (()))
+ q = data_flow_ops.PriorityQueue(2000, (dtypes.int64), (()))
elem = np.random.randint(-100, 100, size=1000).astype(np.int64)
q.enqueue_many((elem, elem)).run()
dequeue_op = q.dequeue()
@@ -276,31 +307,37 @@ class PriorityQueueTest(tf.test.TestCase):
def testInsertingNonInt64Fails(self):
with self.test_session():
- q = data_flow_ops.PriorityQueue(2000, (tf.string), (()))
+ q = data_flow_ops.PriorityQueue(2000, (dtypes.string), (()))
with self.assertRaises(TypeError):
q.enqueue_many((["a", "b", "c"], ["a", "b", "c"])).run()
def testInsertingNonScalarFails(self):
with self.test_session() as sess:
- input_priority = tf.placeholder(tf.int64)
- input_other = tf.placeholder(tf.string)
- q = data_flow_ops.PriorityQueue(2000, (tf.string,), (()))
+ input_priority = array_ops.placeholder(dtypes.int64)
+ input_other = array_ops.placeholder(dtypes.string)
+ q = data_flow_ops.PriorityQueue(2000, (dtypes.string,), (()))
with self.assertRaisesRegexp(
- tf.errors.InvalidArgumentError,
+ errors_impl.InvalidArgumentError,
r"Shape mismatch in tuple component 0. Expected \[\], got \[2\]"):
sess.run([q.enqueue((input_priority, input_other))],
- feed_dict={input_priority: np.array([0, 2], dtype=np.int64),
- input_other: np.random.rand(3, 5).astype(bytes)})
+ feed_dict={
+ input_priority: np.array(
+ [0, 2], dtype=np.int64),
+ input_other: np.random.rand(3, 5).astype(bytes)
+ })
with self.assertRaisesRegexp(
- tf.errors.InvalidArgumentError,
+ errors_impl.InvalidArgumentError,
r"Shape mismatch in tuple component 0. Expected \[2\], got \[2,2\]"):
- sess.run([q.enqueue_many((input_priority, input_other))],
- feed_dict={input_priority: np.array([[0, 2], [3, 4]],
- dtype=np.int64),
- input_other: np.random.rand(2, 3).astype(bytes)})
+ sess.run(
+ [q.enqueue_many((input_priority, input_other))],
+ feed_dict={
+ input_priority: np.array(
+ [[0, 2], [3, 4]], dtype=np.int64),
+ input_other: np.random.rand(2, 3).astype(bytes)
+ })
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/py_func_test.py b/tensorflow/python/kernel_tests/py_func_test.py
index 025dbb71df..938e451cbc 100644
--- a/tensorflow/python/kernel_tests/py_func_test.py
+++ b/tensorflow/python/kernel_tests/py_func_test.py
@@ -13,6 +13,7 @@
# limitations under the License.
# ==============================================================================
"""Tests for py_func op."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
@@ -20,14 +21,18 @@ from __future__ import print_function
import numpy as np
from six.moves import queue
from six.moves import xrange # pylint: disable=redefined-builtin
-import tensorflow as tf
+from tensorflow.python.client import session as session_lib
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
from tensorflow.python.ops import script_ops
+from tensorflow.python.platform import test
-class PyOpTest(tf.test.TestCase):
+class PyOpTest(test.TestCase):
def testBasic(self):
@@ -36,72 +41,80 @@ class PyOpTest(tf.test.TestCase):
# single type
with self.test_session():
- x = tf.constant(1.0, tf.float32)
- y = tf.constant(2.0, tf.float32)
- z = tf.py_func(my_func, [x, y], tf.float32)
+ x = constant_op.constant(1.0, dtypes.float32)
+ y = constant_op.constant(2.0, dtypes.float32)
+ z = script_ops.py_func(my_func, [x, y], dtypes.float32)
self.assertEqual(z.eval(), my_func(1.0, 2.0).astype(np.float32))
# scalar
with self.test_session():
- x = tf.constant(1.0, tf.float32)
- y = tf.constant(2.0, tf.float32)
- z = tf.py_func(my_func, [x, y], [tf.float32])
+ x = constant_op.constant(1.0, dtypes.float32)
+ y = constant_op.constant(2.0, dtypes.float32)
+ z = script_ops.py_func(my_func, [x, y], [dtypes.float32])
self.assertEqual(z[0].eval(), my_func(1.0, 2.0).astype(np.float32))
# array
with self.test_session():
- x = tf.constant([1.0, 2.0], tf.float64)
- y = tf.constant([2.0, 3.0], tf.float64)
- z = tf.py_func(my_func, [x, y], [tf.float64])
- self.assertAllEqual(
- z[0].eval(),
- my_func([1.0, 2.0], [2.0, 3.0]).astype(np.float64))
+ x = constant_op.constant([1.0, 2.0], dtypes.float64)
+ y = constant_op.constant([2.0, 3.0], dtypes.float64)
+ z = script_ops.py_func(my_func, [x, y], [dtypes.float64])
+ self.assertAllEqual(z[0].eval(),
+ my_func([1.0, 2.0], [2.0, 3.0]).astype(np.float64))
# a bit exotic type (complex64)
with self.test_session():
- x = tf.constant(1+2j, tf.complex64)
- y = tf.constant(3+4j, tf.complex64)
- z, = tf.py_func(my_func, [x, y], [tf.complex64])
- self.assertAllClose(z.eval(), my_func(1+2j, 3+4j))
+ x = constant_op.constant(1 + 2j, dtypes.complex64)
+ y = constant_op.constant(3 + 4j, dtypes.complex64)
+ z, = script_ops.py_func(my_func, [x, y], [dtypes.complex64])
+ self.assertAllClose(z.eval(), my_func(1 + 2j, 3 + 4j))
# a bit excotic function (rfft)
with self.test_session():
- x = tf.constant([1., 2., 3., 4.], tf.float32)
+ x = constant_op.constant([1., 2., 3., 4.], dtypes.float32)
+
def rfft(x):
return np.fft.rfft(x).astype(np.complex64)
- y, = tf.py_func(rfft, [x], [tf.complex64])
+
+ y, = script_ops.py_func(rfft, [x], [dtypes.complex64])
self.assertAllClose(y.eval(), np.fft.rfft([1., 2., 3., 4.]))
# returns a python literal.
with self.test_session():
+
def literal(x):
return 1.0 if x == 0.0 else 0.0
- x = tf.constant(0.0, tf.float64)
- y, = tf.py_func(literal, [x], [tf.float64])
+
+ x = constant_op.constant(0.0, dtypes.float64)
+ y, = script_ops.py_func(literal, [x], [dtypes.float64])
self.assertAllClose(y.eval(), 1.0)
# returns a list
with self.test_session():
+
def list_func(x):
return [x, x + 1]
- x = tf.constant(0.0, tf.float64)
- y, z = tf.py_func(list_func, [x], [tf.float64] * 2)
+
+ x = constant_op.constant(0.0, dtypes.float64)
+ y, z = script_ops.py_func(list_func, [x], [dtypes.float64] * 2)
self.assertAllClose(y.eval(), 0.0)
self.assertAllClose(z.eval(), 1.0)
# returns a tuple
with self.test_session():
+
def tuple_func(x):
return x, x + 1
- x = tf.constant(0.0, tf.float64)
- y, z = tf.py_func(tuple_func, [x], [tf.float64] * 2)
+
+ x = constant_op.constant(0.0, dtypes.float64)
+ y, z = script_ops.py_func(tuple_func, [x], [dtypes.float64] * 2)
self.assertAllClose(y.eval(), 0.0)
self.assertAllClose(z.eval(), 1.0)
# returns a tuple, Tout and inp a tuple
with self.test_session():
- x = tf.constant(0.0, tf.float64)
- y, z = tf.py_func(tuple_func, (x,), (tf.float64, tf.float64))
+ x = constant_op.constant(0.0, dtypes.float64)
+ y, z = script_ops.py_func(tuple_func, (x,),
+ (dtypes.float64, dtypes.float64))
self.assertAllClose(y.eval(), 0.0)
self.assertAllClose(z.eval(), 1.0)
@@ -114,36 +127,37 @@ class PyOpTest(tf.test.TestCase):
return x + y
with self.test_session():
- x = tf.constant([b"hello", b"hi"], tf.string)
- y, = tf.py_func(read_fixed_length_numpy_strings, [], [tf.string])
- z, = tf.py_func(read_and_return_strings, [x, y], [tf.string])
+ x = constant_op.constant([b"hello", b"hi"], dtypes.string)
+ y, = script_ops.py_func(read_fixed_length_numpy_strings, [],
+ [dtypes.string])
+ z, = script_ops.py_func(read_and_return_strings, [x, y], [dtypes.string])
self.assertListEqual(list(z.eval()), [b"hello there", b"hi there"])
def testStringPadding(self):
correct = [b"this", b"is", b"a", b"test"]
with self.test_session():
- s, = tf.py_func(lambda: [correct], [], [tf.string])
+ s, = script_ops.py_func(lambda: [correct], [], [dtypes.string])
self.assertAllEqual(s.eval(), correct)
def testLarge(self):
with self.test_session() as sess:
- x = tf.zeros([1000000], dtype=np.float32)
- y = tf.py_func(lambda x: x + 1, [x], [tf.float32])
- z = tf.py_func(lambda x: x * 2, [x], [tf.float32])
+ x = array_ops.zeros([1000000], dtype=np.float32)
+ y = script_ops.py_func(lambda x: x + 1, [x], [dtypes.float32])
+ z = script_ops.py_func(lambda x: x * 2, [x], [dtypes.float32])
for _ in xrange(100):
sess.run([y[0].op, z[0].op])
def testNoInput(self):
with self.test_session():
- x, = tf.py_func(lambda: 42.0, [], [tf.float64])
+ x, = script_ops.py_func(lambda: 42.0, [], [dtypes.float64])
self.assertAllClose(x.eval(), 42.0)
def testCleanup(self):
for _ in xrange(1000):
- g = tf.Graph()
+ g = ops.Graph()
with g.as_default():
- c = tf.constant([1.], tf.float32)
- _ = tf.py_func(lambda x: x + 1, [c], [tf.float32])
+ c = constant_op.constant([1.], dtypes.float32)
+ _ = script_ops.py_func(lambda x: x + 1, [c], [dtypes.float32])
self.assertTrue(script_ops._py_funcs.size() < 100)
def testBadNumpyReturnType(self):
@@ -153,7 +167,7 @@ class PyOpTest(tf.test.TestCase):
# Structured numpy arrays aren't supported.
return np.array([], dtype=[("foo", np.float32)])
- y, = tf.py_func(bad, [], [tf.float32])
+ y, = script_ops.py_func(bad, [], [dtypes.float32])
with self.assertRaisesRegexp(errors.UnimplementedError,
"Unsupported numpy type"):
@@ -164,9 +178,9 @@ class PyOpTest(tf.test.TestCase):
def bad():
# Non-string python objects aren't supported.
- return tf.float32
+ return dtypes.float32
- z, = tf.py_func(bad, [], [tf.float64])
+ z, = script_ops.py_func(bad, [], [dtypes.float64])
with self.assertRaisesRegexp(errors.UnimplementedError,
"Unsupported object type"):
@@ -174,18 +188,19 @@ class PyOpTest(tf.test.TestCase):
def testStateful(self):
# Not using self.test_session(), which disables optimization.
- with tf.Session() as sess:
+ with session_lib.Session() as sess:
producer = iter(range(3))
- x, = tf.py_func(lambda: next(producer), [], [tf.int64])
+ x, = script_ops.py_func(lambda: next(producer), [], [dtypes.int64])
self.assertEqual(sess.run(x), 0)
self.assertEqual(sess.run(x), 1)
self.assertEqual(sess.run(x), 2)
def testStateless(self):
# Not using self.test_session(), which disables optimization.
- with tf.Session() as sess:
+ with session_lib.Session() as sess:
producer = iter(range(3))
- x, = tf.py_func(lambda: next(producer), [], [tf.int64], stateful=False)
+ x, = script_ops.py_func(
+ lambda: next(producer), [], [dtypes.int64], stateful=False)
self.assertEqual(sess.run(x), 0)
self.assertEqual(sess.run(x), 0)
self.assertEqual(sess.run(x), 0)
@@ -193,16 +208,16 @@ class PyOpTest(tf.test.TestCase):
def testGradientFunction(self):
# Input to tf.py_func is necessary, otherwise get_gradient_function()
# returns None per default.
- a = tf.constant(0)
- x, = tf.py_func(lambda a: 0, [a], [tf.int64])
- y, = tf.py_func(lambda a: 0, [a], [tf.int64], stateful=False)
+ a = constant_op.constant(0)
+ x, = script_ops.py_func(lambda a: 0, [a], [dtypes.int64])
+ y, = script_ops.py_func(lambda a: 0, [a], [dtypes.int64], stateful=False)
self.assertEqual(None, ops.get_gradient_function(x.op))
self.assertEqual(None, ops.get_gradient_function(y.op))
def testCOrder(self):
with self.test_session():
val = [[1, 2], [3, 4]]
- x, = tf.py_func(lambda: np.array(val, order="F"), [], [tf.int64])
+ x, = script_ops.py_func(lambda: np.array(val, order="F"), [], [dtypes.int64])
self.assertAllEqual(val, x.eval())
def testParallel(self):
@@ -220,8 +235,8 @@ class PyOpTest(tf.test.TestCase):
q.task_done()
return v
- x, = tf.py_func(blocking_put, [], [tf.int64])
- y, = tf.py_func(blocking_get, [], [tf.int64])
+ x, = script_ops.py_func(blocking_put, [], [dtypes.int64])
+ y, = script_ops.py_func(blocking_get, [], [dtypes.int64])
# This will result in a deadlock if the py_func's don't run in parallel.
session.run([x, y])
@@ -237,7 +252,7 @@ class PyOpTest(tf.test.TestCase):
self._value += diff
def increment(self, diff):
- return tf.py_func(self._increment, [diff], [], stateful=True)
+ return script_ops.py_func(self._increment, [diff], [], stateful=True)
@property
def value(self):
@@ -245,7 +260,7 @@ class PyOpTest(tf.test.TestCase):
with self.test_session() as sess:
s = State()
- op = s.increment(tf.constant(2, tf.int64))
+ op = s.increment(constant_op.constant(2, dtypes.int64))
ret = sess.run(op)
self.assertIsNone(ret)
self.assertAllEqual([3], s.value)
@@ -255,10 +270,11 @@ class PyOpTest(tf.test.TestCase):
def do_nothing(unused_x):
pass
- f = tf.py_func(do_nothing, [tf.constant(3, tf.int64)], [], stateful=False)
+ f = script_ops.py_func(
+ do_nothing, [constant_op.constant(3, dtypes.int64)], [], stateful=False)
with self.test_session() as sess:
self.assertEqual(sess.run(f), [])
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/qr_op_test.py b/tensorflow/python/kernel_tests/qr_op_test.py
index e6b2972e85..7867e0e42d 100644
--- a/tensorflow/python/kernel_tests/qr_op_test.py
+++ b/tensorflow/python/kernel_tests/qr_op_test.py
@@ -13,26 +13,32 @@
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_inverse."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import linalg_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.platform import test
-class QrOpTest(tf.test.TestCase):
+class QrOpTest(test.TestCase):
def testWrongDimensions(self):
# The input to qr should be a tensor of at least rank 2.
- scalar = tf.constant(1.)
+ scalar = constant_op.constant(1.)
with self.assertRaisesRegexp(ValueError,
"Shape must be at least rank 2 but is rank 0"):
- tf.qr(scalar)
- vector = tf.constant([1., 2.])
+ linalg_ops.qr(scalar)
+ vector = constant_op.constant([1., 2.])
with self.assertRaisesRegexp(ValueError,
"Shape must be at least rank 2 but is rank 1"):
- tf.qr(vector)
+ linalg_ops.qr(vector)
def _GetQrOpTest(dtype_, shape_, use_static_shape_):
@@ -67,13 +73,13 @@ def _GetQrOpTest(dtype_, shape_, use_static_shape_):
else:
tol = 1e-14
# Tests that a ~= q*r.
- a_recon = tf.matmul(q, r)
+ a_recon = math_ops.matmul(q, r)
self.assertAllClose(a_recon.eval(), a, rtol=tol, atol=tol)
def CheckUnitary(self, x):
# Tests that x[...,:,:]^H * x[...,:,:] is close to the identity.
- xx = tf.matmul(tf.conj(x), x, transpose_a=True)
- identity = tf.matrix_band_part(tf.ones_like(xx), 0, 0)
+ xx = math_ops.matmul(math_ops.conj(x), x, transpose_a=True)
+ identity = array_ops.matrix_band_part(array_ops.ones_like(xx), 0, 0)
if is_single:
tol = 1e-5
else:
@@ -92,10 +98,10 @@ def _GetQrOpTest(dtype_, shape_, use_static_shape_):
for full_matrices in False, True:
with self.test_session() as sess:
if use_static_shape_:
- x_tf = tf.constant(x_np)
+ x_tf = constant_op.constant(x_np)
else:
- x_tf = tf.placeholder(dtype_)
- q_tf, r_tf = tf.qr(x_tf, full_matrices=full_matrices)
+ x_tf = array_ops.placeholder(dtype_)
+ q_tf, r_tf = linalg_ops.qr(x_tf, full_matrices=full_matrices)
if use_static_shape_:
q_tf_val, r_tf_val = sess.run([q_tf, r_tf])
@@ -134,4 +140,4 @@ if __name__ == "__main__":
use_static_shape)
setattr(QrOpTest, "testQr_" + name,
_GetQrOpTest(dtype, shape, use_static_shape))
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/random_crop_test.py b/tensorflow/python/kernel_tests/random_crop_test.py
index bd213d6304..6028be1228 100644
--- a/tensorflow/python/kernel_tests/random_crop_test.py
+++ b/tensorflow/python/kernel_tests/random_crop_test.py
@@ -12,24 +12,26 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for random_crop."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.ops import random_ops
+from tensorflow.python.platform import test
-class RandomCropTest(tf.test.TestCase):
+class RandomCropTest(test.TestCase):
def testNoOp(self):
# No random cropping is performed since the size is value.shape.
for shape in (2, 1, 1), (2, 1, 3), (4, 5, 3):
value = np.arange(0, np.prod(shape), dtype=np.int32).reshape(shape)
with self.test_session():
- crop = tf.random_crop(value, shape).eval()
+ crop = random_ops.random_crop(value, shape).eval()
self.assertAllEqual(crop, value)
def testContains(self):
@@ -37,9 +39,10 @@ class RandomCropTest(tf.test.TestCase):
shape = (3, 5, 7)
target = (2, 3, 4)
value = np.random.randint(1000000, size=shape)
- value_set = set(tuple(value[i:i + 2, j:j + 3, k:k + 4].ravel())
- for i in range(2) for j in range(3) for k in range(4))
- crop = tf.random_crop(value, size=target)
+ value_set = set(
+ tuple(value[i:i + 2, j:j + 3, k:k + 4].ravel())
+ for i in range(2) for j in range(3) for k in range(4))
+ crop = random_ops.random_crop(value, size=target)
for _ in range(20):
y = crop.eval()
self.assertAllEqual(y.shape, target)
@@ -55,7 +58,7 @@ class RandomCropTest(tf.test.TestCase):
value = np.arange(size).reshape(shape)
with self.test_session():
- crop = tf.random_crop(value, single, seed=7)
+ crop = random_ops.random_crop(value, single, seed=7)
counts = np.zeros(size, dtype=np.int32)
for _ in range(num_samples):
y = crop.eval()
@@ -72,4 +75,4 @@ class RandomCropTest(tf.test.TestCase):
if __name__ == '__main__':
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/random_gamma_test.py b/tensorflow/python/kernel_tests/random_gamma_test.py
index 9231f0cc69..64595ce9cd 100644
--- a/tensorflow/python/kernel_tests/random_gamma_test.py
+++ b/tensorflow/python/kernel_tests/random_gamma_test.py
@@ -12,30 +12,39 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.ops.random_ops.random_gamma."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
+
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
-import tensorflow as tf
+
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.framework import random_seed
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import random_ops
+from tensorflow.python.platform import test
+from tensorflow.python.platform import tf_logging
-class RandomGammaTest(tf.test.TestCase):
+class RandomGammaTest(test.TestCase):
"""This is a medium test due to the moments computation taking some time."""
def setUp(self):
np.random.seed(137)
- tf.set_random_seed(137)
+ random_seed.set_random_seed(137)
def _Sampler(self, num, alpha, beta, dtype, use_gpu, seed=None):
def func():
- with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
- rng = tf.random_gamma([num], alpha, beta=beta, dtype=dtype, seed=seed)
+ with self.test_session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
+ rng = random_ops.random_gamma(
+ [num], alpha, beta=beta, dtype=dtype, seed=seed)
ret = np.empty([10, num])
for i in xrange(10):
ret[i, :] = sess.run(rng)
@@ -44,16 +53,16 @@ class RandomGammaTest(tf.test.TestCase):
return func
def testMomentsFloat32(self):
- self._testMoments(tf.float32)
+ self._testMoments(dtypes.float32)
def testMomentsFloat64(self):
- self._testMoments(tf.float64)
+ self._testMoments(dtypes.float64)
def _testMoments(self, dt):
try:
from scipy import stats # pylint: disable=g-import-not-at-top
except ImportError as e:
- tf.logging.warn("Cannot test moments: %s" % e)
+ tf_logging.warn("Cannot test moments: %s" % e)
return
# Check the given array of samples matches the given theoretical moment
@@ -73,7 +82,7 @@ class RandomGammaTest(tf.test.TestCase):
for stride in 0, 1, 4, 17:
alphas = [0.2, 1.0, 3.0]
- if dt == tf.float64:
+ if dt == dtypes.float64:
alphas = [0.01] + alphas
for alpha in alphas:
for scale in 9, 17:
@@ -107,13 +116,12 @@ class RandomGammaTest(tf.test.TestCase):
# This is just
# (moments_i_squared - moments_i_mean**2) / moments_sample_count[i]
normalized_moments_i_var = (
- moments_i_mean / moments_sample_count[i] * (
- moments_i_squared/moments_i_mean - moments_i_mean))
+ moments_i_mean / moments_sample_count[i] *
+ (moments_i_squared / moments_i_mean - moments_i_mean))
# Assume every operation has a small numerical error.
# It takes i multiplications to calculate one i-th moment.
error_per_moment = i * np.finfo(dt.as_numpy_dtype).eps
- total_variance = (
- normalized_moments_i_var + error_per_moment)
+ total_variance = (normalized_moments_i_var + error_per_moment)
tiny = np.finfo(dt.as_numpy_dtype).tiny
self.assertGreaterEqual(total_variance, 0)
if total_variance < tiny:
@@ -135,16 +143,16 @@ class RandomGammaTest(tf.test.TestCase):
try:
from scipy import stats # pylint: disable=g-import-not-at-top
except ImportError as e:
- tf.logging.warn("Cannot test zero density proportions: %s" % e)
+ tf_logging.warn("Cannot test zero density proportions: %s" % e)
return
allowable_zeros = {
- tf.float16: stats.gamma(alpha).cdf(np.finfo(np.float16).tiny),
- tf.float32: stats.gamma(alpha).cdf(np.finfo(np.float32).tiny),
- tf.float64: stats.gamma(alpha).cdf(np.finfo(np.float64).tiny)
+ dtypes.float16: stats.gamma(alpha).cdf(np.finfo(np.float16).tiny),
+ dtypes.float32: stats.gamma(alpha).cdf(np.finfo(np.float32).tiny),
+ dtypes.float64: stats.gamma(alpha).cdf(np.finfo(np.float64).tiny)
}
failures = []
for use_gpu in [False, True]:
- for dt in tf.float16, tf.float32, tf.float64:
+ for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sampler = self._Sampler(
10000, alpha, 1.0, dt, use_gpu=use_gpu, seed=12345)
x = sampler()
@@ -165,13 +173,13 @@ class RandomGammaTest(tf.test.TestCase):
# implementations which uses the same random number seed.
def testDistinct(self):
for use_gpu in [False, True]:
- for dt in tf.float16, tf.float32, tf.float64:
+ for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sampler = self._Sampler(1000, 2.0, 1.0, dt, use_gpu=use_gpu)
x = sampler()
y = sampler()
# Number of different samples.
count = (x == y).sum()
- count_limit = 20 if dt == tf.float16 else 10
+ count_limit = 20 if dt == dtypes.float16 else 10
if count >= count_limit:
print(use_gpu, dt)
print("x = ", x)
@@ -182,19 +190,19 @@ class RandomGammaTest(tf.test.TestCase):
# Checks that the CPU and GPU implementation returns the same results,
# given the same random seed
def testCPUGPUMatch(self):
- for dt in tf.float16, tf.float32, tf.float64:
+ for dt in dtypes.float16, dtypes.float32, dtypes.float64:
results = {}
for use_gpu in [False, True]:
sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=12345)
results[use_gpu] = sampler()
- if dt == tf.float16:
+ if dt == dtypes.float16:
self.assertAllClose(results[False], results[True], rtol=1e-3, atol=1e-3)
else:
self.assertAllClose(results[False], results[True], rtol=1e-6, atol=1e-6)
def testSeed(self):
for use_gpu in [False, True]:
- for dt in tf.float16, tf.float32, tf.float64:
+ for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sx = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=345)
sy = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=345)
self.assertAllEqual(sx(), sy())
@@ -205,37 +213,45 @@ class RandomGammaTest(tf.test.TestCase):
SetIsStateful() should prevent two identical random ops from getting
merged.
"""
- for dtype in tf.float16, tf.float32, tf.float64:
+ for dtype in dtypes.float16, dtypes.float32, dtypes.float64:
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
- rnd1 = tf.random_gamma([24], 2.0, dtype=dtype)
- rnd2 = tf.random_gamma([24], 2.0, dtype=dtype)
+ rnd1 = random_ops.random_gamma([24], 2.0, dtype=dtype)
+ rnd2 = random_ops.random_gamma([24], 2.0, dtype=dtype)
diff = rnd2 - rnd1
self.assertGreater(np.linalg.norm(diff.eval()), 0.1)
def testShape(self):
# Fully known shape.
- rnd = tf.random_gamma([150], 2.0)
+ rnd = random_ops.random_gamma([150], 2.0)
self.assertEqual([150], rnd.get_shape().as_list())
- rnd = tf.random_gamma([150], 2.0, beta=[3.0, 4.0])
+ rnd = random_ops.random_gamma([150], 2.0, beta=[3.0, 4.0])
self.assertEqual([150, 2], rnd.get_shape().as_list())
- rnd = tf.random_gamma([150], tf.ones([1, 2, 3]))
+ rnd = random_ops.random_gamma([150], array_ops.ones([1, 2, 3]))
self.assertEqual([150, 1, 2, 3], rnd.get_shape().as_list())
- rnd = tf.random_gamma([20, 30], tf.ones([1, 2, 3]))
+ rnd = random_ops.random_gamma([20, 30], array_ops.ones([1, 2, 3]))
self.assertEqual([20, 30, 1, 2, 3], rnd.get_shape().as_list())
- rnd = tf.random_gamma([123], tf.placeholder(tf.float32, shape=(2,)))
+ rnd = random_ops.random_gamma(
+ [123], array_ops.placeholder(
+ dtypes.float32, shape=(2,)))
self.assertEqual([123, 2], rnd.get_shape().as_list())
# Partially known shape.
- rnd = tf.random_gamma(tf.placeholder(tf.int32, shape=(1,)), tf.ones([7, 3]))
+ rnd = random_ops.random_gamma(
+ array_ops.placeholder(
+ dtypes.int32, shape=(1,)), array_ops.ones([7, 3]))
self.assertEqual([None, 7, 3], rnd.get_shape().as_list())
- rnd = tf.random_gamma(tf.placeholder(tf.int32, shape=(3,)), tf.ones([9, 6]))
+ rnd = random_ops.random_gamma(
+ array_ops.placeholder(
+ dtypes.int32, shape=(3,)), array_ops.ones([9, 6]))
self.assertEqual([None, None, None, 9, 6], rnd.get_shape().as_list())
# Unknown shape.
- rnd = tf.random_gamma(tf.placeholder(tf.int32), tf.placeholder(tf.float32))
+ rnd = random_ops.random_gamma(
+ array_ops.placeholder(dtypes.int32),
+ array_ops.placeholder(dtypes.float32))
self.assertIs(None, rnd.get_shape().ndims)
- rnd = tf.random_gamma([50], tf.placeholder(tf.float32))
+ rnd = random_ops.random_gamma([50], array_ops.placeholder(dtypes.float32))
self.assertIs(None, rnd.get_shape().ndims)
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/random_ops_test.py b/tensorflow/python/kernel_tests/random_ops_test.py
index 9fffe92781..fa323b2496 100644
--- a/tensorflow/python/kernel_tests/random_ops_test.py
+++ b/tensorflow/python/kernel_tests/random_ops_test.py
@@ -12,35 +12,42 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.ops.random_ops."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
-import tensorflow as tf
+
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import random_ops
+from tensorflow.python.platform import test
-class RandomNormalTest(tf.test.TestCase):
+class RandomNormalTest(test.TestCase):
def _Sampler(self, num, mu, sigma, dtype, use_gpu, seed=None):
+
def func():
- with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
- rng = tf.random_normal(
+ with self.test_session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
+ rng = random_ops.random_normal(
[num], mean=mu, stddev=sigma, dtype=dtype, seed=seed)
ret = np.empty([10, num])
for i in xrange(10):
ret[i, :] = sess.run(rng)
return ret
+
return func
# Asserts that different trials (1000 samples per trial) is unlikely
# to see the same sequence of values. Will catch buggy
# implementations which uses the same random number seed.
def testDistinct(self):
- for dt in tf.float16, tf.float32, tf.float64:
+ for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=True)
x = sampler()
y = sampler()
@@ -55,18 +62,18 @@ class RandomNormalTest(tf.test.TestCase):
# Checks that the CPU and GPU implementation returns the same results,
# given the same random seed
def testCPUGPUMatch(self):
- for dt in tf.float16, tf.float32, tf.float64:
+ for dt in dtypes.float16, dtypes.float32, dtypes.float64:
results = {}
for use_gpu in [False, True]:
sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=12345)
results[use_gpu] = sampler()
- if dt == tf.float16:
+ if dt == dtypes.float16:
self.assertAllClose(results[False], results[True], rtol=1e-3, atol=1e-3)
else:
self.assertAllClose(results[False], results[True], rtol=1e-6, atol=1e-6)
def testSeed(self):
- for dt in tf.float16, tf.float32, tf.float64:
+ for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sx = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=True, seed=345)
sy = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=True, seed=345)
self.assertAllEqual(sx(), sy())
@@ -75,23 +82,25 @@ class RandomNormalTest(tf.test.TestCase):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
shape = [2, 3, 4]
- rnd1 = tf.random_normal(shape, 0.0, 1.0, tf.float32)
- rnd2 = tf.random_normal(shape, 0.0, 1.0, tf.float32)
+ rnd1 = random_ops.random_normal(shape, 0.0, 1.0, dtypes.float32)
+ rnd2 = random_ops.random_normal(shape, 0.0, 1.0, dtypes.float32)
diff = rnd2 - rnd1
self.assertTrue(np.linalg.norm(diff.eval()) > 0.1)
-class TruncatedNormalTest(tf.test.TestCase):
+class TruncatedNormalTest(test.TestCase):
def _Sampler(self, num, mu, sigma, dtype, use_gpu, seed=None):
+
def func():
- with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
- rng = tf.truncated_normal(
+ with self.test_session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
+ rng = random_ops.truncated_normal(
[num], mean=mu, stddev=sigma, dtype=dtype, seed=seed)
ret = np.empty([10, num])
for i in xrange(10):
ret[i, :] = sess.run(rng)
return ret
+
return func
# Asserts that different trials (1000 samples per trial) is unlikely
@@ -99,8 +108,8 @@ class TruncatedNormalTest(tf.test.TestCase):
# implementations which uses the same random number seed.
def testDistinct(self):
# NOTE: TruncatedNormal on GPU is not supported.
- if not tf.test.is_gpu_available():
- for dt in tf.float16, tf.float32, tf.float64:
+ if not test.is_gpu_available():
+ for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=False)
x = sampler()
y = sampler()
@@ -116,24 +125,24 @@ class TruncatedNormalTest(tf.test.TestCase):
# given the same random seed
def testCPUGPUMatch(self):
# Skip the test if there is no GPU.
- if not tf.test.is_gpu_available():
+ if not test.is_gpu_available():
return
- for dt in tf.float16, tf.float32, tf.float64:
+ for dt in dtypes.float16, dtypes.float32, dtypes.float64:
results = {}
for use_gpu in [False, True]:
# We need a particular larger number of samples to test multiple rounds
# on GPU
- sampler = self._Sampler(200000, 0.0, 1.0, dt, use_gpu=use_gpu,
- seed=12345)
+ sampler = self._Sampler(
+ 200000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=12345)
results[use_gpu] = sampler()
- if dt == tf.float16:
+ if dt == dtypes.float16:
self.assertAllClose(results[False], results[True], rtol=1e-3, atol=1e-3)
else:
self.assertAllClose(results[False], results[True], rtol=1e-6, atol=1e-6)
def testSeed(self):
- for dt in tf.float16, tf.float32, tf.float64:
+ for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sx = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=True, seed=345)
sy = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=True, seed=345)
self.assertAllEqual(sx(), sy())
@@ -141,7 +150,7 @@ class TruncatedNormalTest(tf.test.TestCase):
# The effective standard deviation of truncated normal is 85% of the
# requested one.
def testStdDev(self):
- for dt in tf.float16, tf.float32, tf.float64:
+ for dt in dtypes.float16, dtypes.float32, dtypes.float64:
stddev = 3.0
sampler = self._Sampler(100000, 0.0, stddev, dt, use_gpu=True)
x = sampler()
@@ -151,29 +160,30 @@ class TruncatedNormalTest(tf.test.TestCase):
def testNoCSE(self):
with self.test_session(use_gpu=True):
shape = [2, 3, 4]
- rnd1 = tf.truncated_normal(shape, 0.0, 1.0, tf.float32)
- rnd2 = tf.truncated_normal(shape, 0.0, 1.0, tf.float32)
+ rnd1 = random_ops.truncated_normal(shape, 0.0, 1.0, dtypes.float32)
+ rnd2 = random_ops.truncated_normal(shape, 0.0, 1.0, dtypes.float32)
diff = rnd2 - rnd1
self.assertTrue(np.linalg.norm(diff.eval()) > 0.1)
-class RandomUniformTest(tf.test.TestCase):
+class RandomUniformTest(test.TestCase):
def _Sampler(self, num, minv, maxv, dtype, use_gpu, seed=None):
+
def func():
- with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
- rng = tf.random_uniform(
+ with self.test_session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
+ rng = random_ops.random_uniform(
[num], minval=minv, maxval=maxv, dtype=dtype, seed=seed)
ret = np.empty([10, num])
for i in xrange(10):
ret[i, :] = sess.run(rng)
return ret
+
return func
def testRange(self):
- for dt in tf.float16, tf.float32, tf.float64, tf.int32, tf.int64:
- sampler = self._Sampler(1000, minv=-2, maxv=8, dtype=dt,
- use_gpu=True)
+ for dt in dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64:
+ sampler = self._Sampler(1000, minv=-2, maxv=8, dtype=dt, use_gpu=True)
x = sampler()
self.assertTrue(-2 <= np.min(x))
self.assertTrue(np.max(x) < 8)
@@ -182,14 +192,13 @@ class RandomUniformTest(tf.test.TestCase):
# to see the same sequence of values. Will catch buggy
# implementations which uses the same random number seed.
def testDistinct(self):
- for dt in tf.float16, tf.float32, tf.float64, tf.int32, tf.int64:
+ for dt in dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64:
maxv = 1.0 if dt.is_floating else 1 << 30
- sampler = self._Sampler(1000, minv=0, maxv=maxv, dtype=dt,
- use_gpu=True)
+ sampler = self._Sampler(1000, minv=0, maxv=maxv, dtype=dt, use_gpu=True)
x = sampler()
y = sampler()
count = (x == y).sum()
- count_limit = 50 if dt == tf.float16 else 10
+ count_limit = 50 if dt == dtypes.float16 else 10
if count >= count_limit:
print("x = ", x)
print("y = ", y)
@@ -205,11 +214,11 @@ class RandomUniformTest(tf.test.TestCase):
# The counts should follow an (n, p) binomial distribution.
mean = p * n
std = np.sqrt(n * p * (1 - p))
- for dt in tf.int32, tf.int64:
+ for dt in dtypes.int32, dtypes.int64:
# Use a fixed seed here to make the test deterministic.
# Without the fixed seed, the 5 * std bound will (very rarely) fail.
- sampler = self._Sampler(n // 10, minv=minv, maxv=maxv, dtype=dt,
- use_gpu=True, seed=17)
+ sampler = self._Sampler(
+ n // 10, minv=minv, maxv=maxv, dtype=dt, use_gpu=True, seed=17)
x = sampler().ravel()
self.assertEqual(x.shape, (n,))
counts, _ = np.histogram(x, bins=maxv - minv)
@@ -221,17 +230,17 @@ class RandomUniformTest(tf.test.TestCase):
# Checks that the CPU and GPU implementation returns the same results,
# given the same random seed
def testCPUGPUMatch(self):
- for dt in tf.float16, tf.float32, tf.float64, tf.int32, tf.int64:
+ for dt in dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64:
maxv = 1.0 if dt.is_floating else 17
results = {}
for use_gpu in False, True:
- sampler = self._Sampler(1000, minv=0, maxv=maxv, dtype=dt,
- use_gpu=use_gpu, seed=12345)
+ sampler = self._Sampler(
+ 1000, minv=0, maxv=maxv, dtype=dt, use_gpu=use_gpu, seed=12345)
results[use_gpu] = sampler()
self.assertAllEqual(results[False], results[True])
def testSeed(self):
- for dt in tf.float16, tf.float32, tf.float64, tf.int32, tf.int64:
+ for dt in dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64:
for seed in [345, 2**100, -2**100]:
sx = self._Sampler(1000, 0, 17, dtype=dt, use_gpu=True, seed=seed)
sy = self._Sampler(1000, 0, 17, dtype=dt, use_gpu=True, seed=seed)
@@ -239,50 +248,55 @@ class RandomUniformTest(tf.test.TestCase):
def testNoCSE(self):
shape = [2, 3, 4]
- for dtype in tf.float16, tf.float32, tf.int32:
+ for dtype in dtypes.float16, dtypes.float32, dtypes.int32:
with self.test_session(use_gpu=True):
- rnd1 = tf.random_uniform(shape, 0, 17, dtype=dtype)
- rnd2 = tf.random_uniform(shape, 0, 17, dtype=dtype)
+ rnd1 = random_ops.random_uniform(shape, 0, 17, dtype=dtype)
+ rnd2 = random_ops.random_uniform(shape, 0, 17, dtype=dtype)
diff = (rnd2 - rnd1).eval()
self.assertTrue(np.linalg.norm(diff) > 0.1)
-class RandomShapeTest(tf.test.TestCase):
+class RandomShapeTest(test.TestCase):
def testTruncatedNormal(self):
# Fully known shape.
- rnd1 = tf.truncated_normal([1, 2, 3])
+ rnd1 = random_ops.truncated_normal([1, 2, 3])
self.assertEqual([1, 2, 3], rnd1.get_shape())
# Partially known shape.
- rnd2 = tf.truncated_normal(tf.placeholder(tf.int32, shape=(3,)))
+ rnd2 = random_ops.truncated_normal(
+ array_ops.placeholder(
+ dtypes.int32, shape=(3,)))
self.assertEqual([None, None, None], rnd2.get_shape().as_list())
# Unknown shape.
- rnd3 = tf.truncated_normal(tf.placeholder(tf.int32))
+ rnd3 = random_ops.truncated_normal(array_ops.placeholder(dtypes.int32))
self.assertIs(None, rnd3.get_shape().ndims)
def testRandomNormal(self):
# Fully known shape.
- rnd1 = tf.random_normal([1, 2, 3])
+ rnd1 = random_ops.random_normal([1, 2, 3])
self.assertEqual([1, 2, 3], rnd1.get_shape())
# Partially known shape.
- rnd2 = tf.random_normal(tf.placeholder(tf.int32, shape=(3,)))
+ rnd2 = random_ops.random_normal(
+ array_ops.placeholder(
+ dtypes.int32, shape=(3,)))
self.assertEqual([None, None, None], rnd2.get_shape().as_list())
# Unknown shape.
- rnd3 = tf.random_normal(tf.placeholder(tf.int32))
+ rnd3 = random_ops.random_normal(array_ops.placeholder(dtypes.int32))
self.assertIs(None, rnd3.get_shape().ndims)
def testRandomUniform(self):
# Fully known shape.
- rnd1 = tf.random_uniform([1, 2, 3])
+ rnd1 = random_ops.random_uniform([1, 2, 3])
self.assertEqual([1, 2, 3], rnd1.get_shape())
# Partially known shape.
- rnd2 = tf.random_uniform(
- tf.placeholder(tf.int32, shape=(3,)))
+ rnd2 = random_ops.random_uniform(
+ array_ops.placeholder(
+ dtypes.int32, shape=(3,)))
self.assertEqual([None, None, None], rnd2.get_shape().as_list())
# Unknown shape.
- rnd3 = tf.random_uniform(tf.placeholder(tf.int32))
+ rnd3 = random_ops.random_uniform(array_ops.placeholder(dtypes.int32))
self.assertIs(None, rnd3.get_shape().ndims)
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/random_shuffle_queue_test.py b/tensorflow/python/kernel_tests/random_shuffle_queue_test.py
index 9fd960ec94..8a92a4d0f0 100644
--- a/tensorflow/python/kernel_tests/random_shuffle_queue_test.py
+++ b/tensorflow/python/kernel_tests/random_shuffle_queue_test.py
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.ops.data_flow_ops.Queue."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
@@ -24,23 +24,31 @@ import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes as dtypes_lib
+from tensorflow.python.framework import errors_impl
+from tensorflow.python.framework import random_seed
+from tensorflow.python.framework import tensor_shape
+from tensorflow.python.ops import data_flow_ops
+from tensorflow.python.platform import test
+from tensorflow.python.platform import tf_logging
-class RandomShuffleQueueTest(tf.test.TestCase):
+
+class RandomShuffleQueueTest(test.TestCase):
def setUp(self):
# Useful for debugging when a test times out.
super(RandomShuffleQueueTest, self).setUp()
- tf.logging.error("Starting: %s", self._testMethodName)
+ tf_logging.error("Starting: %s", self._testMethodName)
def tearDown(self):
super(RandomShuffleQueueTest, self).tearDown()
- tf.logging.error("Finished: %s", self._testMethodName)
+ tf_logging.error("Finished: %s", self._testMethodName)
def testEnqueue(self):
with self.test_session():
- q = tf.RandomShuffleQueue(10, 5, tf.float32)
+ q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
self.assertAllEqual(0, q.size().eval())
enqueue_op.run()
@@ -48,8 +56,8 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testEnqueueWithShape(self):
with self.test_session():
- q = tf.RandomShuffleQueue(
- 10, 5, tf.float32, shapes=tf.TensorShape([3, 2]))
+ q = data_flow_ops.RandomShuffleQueue(
+ 10, 5, dtypes_lib.float32, shapes=tensor_shape.TensorShape([3, 2]))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
self.assertAllEqual(1, q.size().eval())
@@ -58,21 +66,20 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testEnqueueManyWithShape(self):
with self.test_session():
- q = tf.RandomShuffleQueue(
- 10, 5, [tf.int32, tf.int32],
- shapes=[(), (2,)])
+ q = data_flow_ops.RandomShuffleQueue(
+ 10, 5, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertAllEqual(4, q.size().eval())
- q2 = tf.RandomShuffleQueue(10, 5, tf.int32, shapes=tf.TensorShape([3]))
+ q2 = data_flow_ops.RandomShuffleQueue(
+ 10, 5, dtypes_lib.int32, shapes=tensor_shape.TensorShape([3]))
q2.enqueue(([1, 2, 3],))
q2.enqueue_many(([[1, 2, 3]],))
def testScalarShapes(self):
with self.test_session() as sess:
- q = tf.RandomShuffleQueue(
- 10, 0, [tf.int32, tf.int32],
- shapes=[(), (1,)])
+ q = data_flow_ops.RandomShuffleQueue(
+ 10, 0, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (1,)])
q.enqueue_many([[1, 2, 3, 4], [[5], [6], [7], [8]]]).run()
q.enqueue([9, [10]]).run()
dequeue_t = q.dequeue()
@@ -88,7 +95,7 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testParallelEnqueue(self):
with self.test_session() as sess:
- q = tf.RandomShuffleQueue(10, 0, tf.float32)
+ q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
@@ -96,8 +103,11 @@ class RandomShuffleQueueTest(tf.test.TestCase):
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
- threads = [self.checkedThread(target=enqueue, args=(e,))
- for e in enqueue_ops]
+
+ threads = [
+ self.checkedThread(
+ target=enqueue, args=(e,)) for e in enqueue_ops
+ ]
for thread in threads:
thread.start()
for thread in threads:
@@ -111,7 +121,7 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testParallelDequeue(self):
with self.test_session() as sess:
- q = tf.RandomShuffleQueue(10, 0, tf.float32)
+ q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
@@ -125,6 +135,7 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def dequeue():
results.append(sess.run(dequeued_t))
+
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
@@ -134,7 +145,7 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testDequeue(self):
with self.test_session():
- q = tf.RandomShuffleQueue(10, 0, tf.float32)
+ q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
@@ -147,7 +158,7 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testEnqueueAndBlockingDequeue(self):
with self.test_session() as sess:
- q = tf.RandomShuffleQueue(3, 0, tf.float32)
+ q = data_flow_ops.RandomShuffleQueue(3, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
@@ -176,8 +187,8 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testMultiEnqueueAndDequeue(self):
with self.test_session() as sess:
- q = tf.RandomShuffleQueue(
- 10, 0, (tf.int32, tf.float32))
+ q = data_flow_ops.RandomShuffleQueue(
+ 10, 0, (dtypes_lib.int32, dtypes_lib.float32))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
@@ -193,12 +204,12 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testQueueSizeEmpty(self):
with self.test_session():
- q = tf.RandomShuffleQueue(10, 5, tf.float32)
+ q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
self.assertEqual(0, q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.test_session():
- q = tf.RandomShuffleQueue(10, 0, tf.float32)
+ q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
@@ -211,7 +222,7 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testEnqueueMany(self):
with self.test_session():
- q = tf.RandomShuffleQueue(10, 0, tf.float32)
+ q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
@@ -225,9 +236,9 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testEmptyEnqueueMany(self):
with self.test_session():
- q = tf.RandomShuffleQueue(10, 5, tf.float32)
- empty_t = tf.constant([], dtype=tf.float32,
- shape=[0, 2, 3])
+ q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
+ empty_t = constant_op.constant(
+ [], dtype=dtypes_lib.float32, shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
@@ -237,7 +248,7 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testEmptyDequeueMany(self):
with self.test_session():
- q = tf.RandomShuffleQueue(10, 0, tf.float32, shapes=())
+ q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
@@ -247,7 +258,7 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testEmptyDequeueUpTo(self):
with self.test_session():
- q = tf.RandomShuffleQueue(10, 0, tf.float32, shapes=())
+ q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_up_to(0)
@@ -257,9 +268,9 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testEmptyDequeueManyWithNoShape(self):
with self.test_session():
- q = tf.RandomShuffleQueue(10, 0, tf.float32)
- enqueue_op = q.enqueue(
- (tf.constant([10.0, 20.0], shape=(1, 2)),))
+ q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
+ enqueue_op = q.enqueue((constant_op.constant(
+ [10.0, 20.0], shape=(1, 2)),))
dequeued_t = q.dequeue_many(0)
# Expect the operation to fail due to the shape not being constrained.
@@ -278,9 +289,9 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testEmptyDequeueUpToWithNoShape(self):
with self.test_session():
- q = tf.RandomShuffleQueue(10, 0, tf.float32)
- enqueue_op = q.enqueue(
- (tf.constant([10.0, 20.0], shape=(1, 2)),))
+ q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
+ enqueue_op = q.enqueue((constant_op.constant(
+ [10.0, 20.0], shape=(1, 2)),))
dequeued_t = q.dequeue_up_to(0)
# Expect the operation to fail due to the shape not being constrained.
@@ -299,8 +310,8 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testMultiEnqueueMany(self):
with self.test_session() as sess:
- q = tf.RandomShuffleQueue(
- 10, 0, (tf.float32, tf.int32))
+ q = data_flow_ops.RandomShuffleQueue(
+ 10, 0, (dtypes_lib.float32, dtypes_lib.int32))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
@@ -318,7 +329,7 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testDequeueMany(self):
with self.test_session():
- q = tf.RandomShuffleQueue(10, 0, tf.float32, ((),))
+ q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(5)
@@ -331,7 +342,7 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testDequeueUpToNoBlocking(self):
with self.test_session():
- q = tf.RandomShuffleQueue(10, 0, tf.float32, ((),))
+ q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(5)
@@ -344,13 +355,13 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testMultiDequeueMany(self):
with self.test_session() as sess:
- q = tf.RandomShuffleQueue(
- 10, 0, (tf.float32, tf.int32),
- shapes=((), (2,)))
+ q = data_flow_ops.RandomShuffleQueue(
+ 10, 0, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
- 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
- int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
- [11, 12], [13, 14], [15, 16], [17, 18], [19, 20]]
+ 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
+ ]
+ int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
+ [15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
@@ -378,13 +389,13 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testMultiDequeueUpToNoBlocking(self):
with self.test_session() as sess:
- q = tf.RandomShuffleQueue(
- 10, 0, (tf.float32, tf.int32),
- shapes=((), (2,)))
+ q = data_flow_ops.RandomShuffleQueue(
+ 10, 0, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
- 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
- int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
- [11, 12], [13, 14], [15, 16], [17, 18], [19, 20]]
+ 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
+ ]
+ int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
+ [15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_up_to(4)
dequeued_single_t = q.dequeue()
@@ -413,8 +424,8 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testHighDimension(self):
with self.test_session():
- q = tf.RandomShuffleQueue(
- 10, 0, tf.int32, ((4, 4, 4, 4)))
+ q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.int32, (
+ (4, 4, 4, 4)))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
@@ -424,7 +435,8 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testParallelEnqueueMany(self):
with self.test_session() as sess:
- q = tf.RandomShuffleQueue(1000, 0, tf.float32, shapes=())
+ q = data_flow_ops.RandomShuffleQueue(
+ 1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
@@ -432,6 +444,7 @@ class RandomShuffleQueueTest(tf.test.TestCase):
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
sess.run(enqueue_op)
+
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
@@ -442,7 +455,8 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testParallelDequeueMany(self):
with self.test_session() as sess:
- q = tf.RandomShuffleQueue(1000, 0, tf.float32, shapes=())
+ q = data_flow_ops.RandomShuffleQueue(
+ 1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
@@ -454,6 +468,7 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
+
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
@@ -463,7 +478,8 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testParallelDequeueUpTo(self):
with self.test_session() as sess:
- q = tf.RandomShuffleQueue(1000, 0, tf.float32, shapes=())
+ q = data_flow_ops.RandomShuffleQueue(
+ 1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(100)
@@ -475,6 +491,7 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
+
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
@@ -486,7 +503,8 @@ class RandomShuffleQueueTest(tf.test.TestCase):
with self.test_session() as sess:
dequeue_sizes = [random.randint(50, 150) for _ in xrange(10)]
total_elements = sum(dequeue_sizes)
- q = tf.RandomShuffleQueue(total_elements, 0, tf.float32, shapes=())
+ q = data_flow_ops.RandomShuffleQueue(
+ total_elements, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in xrange(total_elements)]
enqueue_op = q.enqueue_many((elems,))
@@ -499,6 +517,7 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def dequeue(dequeue_op):
dequeued_elems.extend(sess.run(dequeue_op))
+
threads = []
for dequeue_op in dequeue_ops:
threads.append(self.checkedThread(target=dequeue, args=(dequeue_op,)))
@@ -510,7 +529,7 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testBlockingDequeueMany(self):
with self.test_session() as sess:
- q = tf.RandomShuffleQueue(10, 0, tf.float32, ((),))
+ q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
@@ -537,7 +556,7 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testBlockingDequeueUpTo(self):
with self.test_session() as sess:
- q = tf.RandomShuffleQueue(10, 0, tf.float32, ((),))
+ q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
@@ -566,14 +585,14 @@ class RandomShuffleQueueTest(tf.test.TestCase):
with self.test_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
- count_q = tf.RandomShuffleQueue(100, 0, tf.int32)
+ count_q = data_flow_ops.RandomShuffleQueue(100, 0, dtypes_lib.int32)
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
- q = tf.RandomShuffleQueue(
- total_count, 0, tf.int32, ((),))
+ q = data_flow_ops.RandomShuffleQueue(total_count, 0, dtypes_lib.int32, (
+ (),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
@@ -592,14 +611,14 @@ class RandomShuffleQueueTest(tf.test.TestCase):
with self.test_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
- count_q = tf.RandomShuffleQueue(100, 0, tf.int32)
+ count_q = data_flow_ops.RandomShuffleQueue(100, 0, dtypes_lib.int32)
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
- q = tf.RandomShuffleQueue(
- total_count, 0, tf.int32, ((),))
+ q = data_flow_ops.RandomShuffleQueue(total_count, 0, dtypes_lib.int32, (
+ (),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesUpTo
@@ -616,7 +635,7 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testDequeueFromClosedQueue(self):
with self.test_session():
- q = tf.RandomShuffleQueue(10, 2, tf.float32)
+ q = data_flow_ops.RandomShuffleQueue(10, 2, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
@@ -629,13 +648,13 @@ class RandomShuffleQueueTest(tf.test.TestCase):
self.assertItemsEqual(expected, results)
# Expect the operation to fail due to the queue being closed.
- with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
+ with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
dequeued_t.eval()
def testBlockingDequeueFromClosedQueue(self):
with self.test_session() as sess:
- q = tf.RandomShuffleQueue(10, 2, tf.float32)
+ q = data_flow_ops.RandomShuffleQueue(10, 2, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
@@ -644,12 +663,13 @@ class RandomShuffleQueueTest(tf.test.TestCase):
enqueue_op.run()
results = []
+
def dequeue():
for _ in elems:
results.append(sess.run(dequeued_t))
self.assertItemsEqual(elems, results)
# Expect the operation to fail due to the queue being closed.
- with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
+ with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
@@ -667,14 +687,15 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.test_session() as sess:
- q = tf.RandomShuffleQueue(10, 0, tf.float32)
+ q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
close_op = q.close()
dequeued_t = q.dequeue()
finished = [] # Needs to be a mutable type
+
def dequeue():
# Expect the operation to fail due to the queue being closed.
- with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
+ with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
finished.append(True)
@@ -691,7 +712,7 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testBlockingDequeueManyFromClosedQueue(self):
with self.test_session() as sess:
- q = tf.RandomShuffleQueue(10, 0, tf.float32, ((),))
+ q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
@@ -700,11 +721,12 @@ class RandomShuffleQueueTest(tf.test.TestCase):
enqueue_op.run()
progress = [] # Must be mutable
+
def dequeue():
self.assertItemsEqual(elems, sess.run(dequeued_t))
progress.append(1)
# Expect the operation to fail due to the queue being closed.
- with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
+ with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
progress.append(2)
@@ -716,7 +738,8 @@ class RandomShuffleQueueTest(tf.test.TestCase):
# TODO(mrry): Figure out how to do this without sleeping.
for _ in range(100):
time.sleep(0.01)
- if len(progress) == 1: break
+ if len(progress) == 1:
+ break
self.assertEqual(len(progress), 1)
time.sleep(0.01)
close_op.run()
@@ -725,7 +748,7 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testBlockingDequeueUpToFromClosedQueueReturnsRemainder(self):
with self.test_session() as sess:
- q = tf.RandomShuffleQueue(10, 0, tf.float32, ((),))
+ q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
@@ -734,6 +757,7 @@ class RandomShuffleQueueTest(tf.test.TestCase):
enqueue_op.run()
results = []
+
def dequeue():
results.extend(sess.run(dequeued_t))
self.assertEquals(3, len(results))
@@ -751,8 +775,11 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testBlockingDequeueUpToSmallerThanMinAfterDequeue(self):
with self.test_session() as sess:
- q = tf.RandomShuffleQueue(capacity=10, min_after_dequeue=2,
- dtypes=tf.float32, shapes=((),))
+ q = data_flow_ops.RandomShuffleQueue(
+ capacity=10,
+ min_after_dequeue=2,
+ dtypes=dtypes_lib.float32,
+ shapes=((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
@@ -761,6 +788,7 @@ class RandomShuffleQueueTest(tf.test.TestCase):
enqueue_op.run()
results = []
+
def dequeue():
results.extend(sess.run(dequeued_t))
self.assertEquals(3, len(results))
@@ -780,7 +808,7 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testBlockingDequeueManyFromClosedQueueWithElementsRemaining(self):
with self.test_session() as sess:
- q = tf.RandomShuffleQueue(10, 0, tf.float32, ((),))
+ q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
@@ -790,11 +818,12 @@ class RandomShuffleQueueTest(tf.test.TestCase):
enqueue_op.run()
results = []
+
def dequeue():
results.extend(sess.run(dequeued_t))
self.assertEqual(len(results), 3)
# Expect the operation to fail due to the queue being closed.
- with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
+ with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
# While the last dequeue failed, we want to insure that it returns
@@ -813,13 +842,13 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.test_session() as sess:
- q = tf.RandomShuffleQueue(10, 5, tf.float32, ((),))
+ q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
- with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
+ with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
@@ -833,13 +862,13 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testBlockingDequeueUpToFromClosedEmptyQueue(self):
with self.test_session() as sess:
- q = tf.RandomShuffleQueue(10, 5, tf.float32, ((),))
+ q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_up_to(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
- with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
+ with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
@@ -853,7 +882,7 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testEnqueueToClosedQueue(self):
with self.test_session():
- q = tf.RandomShuffleQueue(10, 4, tf.float32)
+ q = data_flow_ops.RandomShuffleQueue(10, 4, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
@@ -861,12 +890,12 @@ class RandomShuffleQueueTest(tf.test.TestCase):
close_op.run()
# Expect the operation to fail due to the queue being closed.
- with self.assertRaisesRegexp(tf.errors.CancelledError, "is closed"):
+ with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.test_session():
- q = tf.RandomShuffleQueue(10, 5, tf.float32, ((),))
+ q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
@@ -875,12 +904,12 @@ class RandomShuffleQueueTest(tf.test.TestCase):
close_op.run()
# Expect the operation to fail due to the queue being closed.
- with self.assertRaisesRegexp(tf.errors.CancelledError, "is closed"):
+ with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.test_session() as sess:
- q = tf.RandomShuffleQueue(4, 0, tf.float32, ((),))
+ q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
@@ -890,6 +919,7 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def blocking_enqueue():
sess.run(blocking_enqueue_op)
+
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
@@ -907,7 +937,7 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testBlockingEnqueueManyToFullQueue(self):
with self.test_session() as sess:
- q = tf.RandomShuffleQueue(4, 0, tf.float32, ((),))
+ q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
@@ -917,6 +947,7 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def blocking_enqueue():
sess.run(blocking_enqueue_op)
+
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
@@ -939,7 +970,7 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testBlockingEnqueueToClosedQueue(self):
with self.test_session() as sess:
- q = tf.RandomShuffleQueue(4, 0, tf.float32, ((),))
+ q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
@@ -954,8 +985,9 @@ class RandomShuffleQueueTest(tf.test.TestCase):
sess.run(blocking_enqueue_op)
# Expect the operation to fail due to the queue being closed.
- with self.assertRaisesRegexp(tf.errors.CancelledError, "closed"):
+ with self.assertRaisesRegexp(errors_impl.CancelledError, "closed"):
sess.run(blocking_enqueue_op)
+
thread1 = self.checkedThread(target=blocking_enqueue)
thread1.start()
@@ -965,6 +997,7 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def blocking_close():
sess.run(close_op)
+
thread2 = self.checkedThread(target=blocking_close)
thread2.start()
@@ -982,7 +1015,7 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testBlockingEnqueueManyToClosedQueue(self):
with self.test_session() as sess:
- q = tf.RandomShuffleQueue(4, 0, tf.float32, ((),))
+ q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
@@ -997,8 +1030,9 @@ class RandomShuffleQueueTest(tf.test.TestCase):
sess.run(blocking_enqueue_op)
# At this point the close operation will become unblocked, so the
# next enqueue will fail.
- with self.assertRaisesRegexp(tf.errors.CancelledError, "closed"):
+ with self.assertRaisesRegexp(errors_impl.CancelledError, "closed"):
sess.run(blocking_enqueue_op)
+
thread1 = self.checkedThread(target=blocking_enqueue)
thread1.start()
# The close_op should run after the blocking_enqueue_op has blocked.
@@ -1010,6 +1044,7 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def blocking_close():
sess.run(close_op)
+
thread2 = self.checkedThread(target=blocking_close)
thread2.start()
@@ -1026,15 +1061,19 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testSharedQueueSameSession(self):
with self.test_session():
- q1 = tf.RandomShuffleQueue(
- 1, 0, tf.float32, ((),), shared_name="shared_queue")
+ q1 = data_flow_ops.RandomShuffleQueue(
+ 1, 0, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1.enqueue((10.0,)).run()
# TensorFlow TestCase adds a default graph seed (=87654321). We check if
# the seed computed from the default graph seed is reproduced.
seed = 887634792
- q2 = tf.RandomShuffleQueue(
- 1, 0, tf.float32, ((),), shared_name="shared_queue", seed=seed)
+ q2 = data_flow_ops.RandomShuffleQueue(
+ 1,
+ 0,
+ dtypes_lib.float32, ((),),
+ shared_name="shared_queue",
+ seed=seed)
q1_size_t = q1.size()
q2_size_t = q2.size()
@@ -1059,16 +1098,20 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testSharedQueueSameSessionGraphSeedNone(self):
with self.test_session():
- q1 = tf.RandomShuffleQueue(
- 1, 0, tf.float32, ((),), shared_name="shared_queue", seed=98765432)
+ q1 = data_flow_ops.RandomShuffleQueue(
+ 1,
+ 0,
+ dtypes_lib.float32, ((),),
+ shared_name="shared_queue",
+ seed=98765432)
q1.enqueue((10.0,)).run()
# If both graph and op seeds are not provided, the default value must be
# used, and in case a shared queue is already created, the second queue op
# must accept any previous seed value.
- tf.set_random_seed(None)
- q2 = tf.RandomShuffleQueue(
- 1, 0, tf.float32, ((),), shared_name="shared_queue")
+ random_seed.set_random_seed(None)
+ q2 = data_flow_ops.RandomShuffleQueue(
+ 1, 0, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
@@ -1078,66 +1121,66 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testIncompatibleSharedQueueErrors(self):
with self.test_session():
- q_a_1 = tf.RandomShuffleQueue(
- 10, 5, tf.float32, shared_name="q_a")
- q_a_2 = tf.RandomShuffleQueue(
- 15, 5, tf.float32, shared_name="q_a")
+ q_a_1 = data_flow_ops.RandomShuffleQueue(
+ 10, 5, dtypes_lib.float32, shared_name="q_a")
+ q_a_2 = data_flow_ops.RandomShuffleQueue(
+ 15, 5, dtypes_lib.float32, shared_name="q_a")
q_a_1.queue_ref.eval()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.eval()
- q_b_1 = tf.RandomShuffleQueue(
- 10, 0, tf.float32, shared_name="q_b")
- q_b_2 = tf.RandomShuffleQueue(
- 10, 5, tf.float32, shared_name="q_b")
+ q_b_1 = data_flow_ops.RandomShuffleQueue(
+ 10, 0, dtypes_lib.float32, shared_name="q_b")
+ q_b_2 = data_flow_ops.RandomShuffleQueue(
+ 10, 5, dtypes_lib.float32, shared_name="q_b")
q_b_1.queue_ref.eval()
with self.assertRaisesOpError("min_after_dequeue"):
q_b_2.queue_ref.eval()
- q_c_1 = tf.RandomShuffleQueue(
- 10, 5, tf.float32, shared_name="q_c")
- q_c_2 = tf.RandomShuffleQueue(
- 10, 5, tf.int32, shared_name="q_c")
+ q_c_1 = data_flow_ops.RandomShuffleQueue(
+ 10, 5, dtypes_lib.float32, shared_name="q_c")
+ q_c_2 = data_flow_ops.RandomShuffleQueue(
+ 10, 5, dtypes_lib.int32, shared_name="q_c")
q_c_1.queue_ref.eval()
with self.assertRaisesOpError("component types"):
q_c_2.queue_ref.eval()
- q_d_1 = tf.RandomShuffleQueue(
- 10, 5, tf.float32, shared_name="q_d")
- q_d_2 = tf.RandomShuffleQueue(
- 10, 5, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
+ q_d_1 = data_flow_ops.RandomShuffleQueue(
+ 10, 5, dtypes_lib.float32, shared_name="q_d")
+ q_d_2 = data_flow_ops.RandomShuffleQueue(
+ 10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.eval()
- q_e_1 = tf.RandomShuffleQueue(
- 10, 5, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
- q_e_2 = tf.RandomShuffleQueue(
- 10, 5, tf.float32, shared_name="q_e")
+ q_e_1 = data_flow_ops.RandomShuffleQueue(
+ 10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
+ q_e_2 = data_flow_ops.RandomShuffleQueue(
+ 10, 5, dtypes_lib.float32, shared_name="q_e")
q_e_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.eval()
- q_f_1 = tf.RandomShuffleQueue(
- 10, 5, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_f")
- q_f_2 = tf.RandomShuffleQueue(
- 10, 5, tf.float32, shapes=[(1, 1, 2, 4)], shared_name="q_f")
+ q_f_1 = data_flow_ops.RandomShuffleQueue(
+ 10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_f")
+ q_f_2 = data_flow_ops.RandomShuffleQueue(
+ 10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 4)], shared_name="q_f")
q_f_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_f_2.queue_ref.eval()
- q_g_1 = tf.RandomShuffleQueue(
- 10, 5, tf.float32, shared_name="q_g")
- q_g_2 = tf.RandomShuffleQueue(
- 10, 5, (tf.float32, tf.int32), shared_name="q_g")
+ q_g_1 = data_flow_ops.RandomShuffleQueue(
+ 10, 5, dtypes_lib.float32, shared_name="q_g")
+ q_g_2 = data_flow_ops.RandomShuffleQueue(
+ 10, 5, (dtypes_lib.float32, dtypes_lib.int32), shared_name="q_g")
q_g_1.queue_ref.eval()
with self.assertRaisesOpError("component types"):
q_g_2.queue_ref.eval()
- q_h_1 = tf.RandomShuffleQueue(
- 10, 5, tf.float32, seed=12, shared_name="q_h")
- q_h_2 = tf.RandomShuffleQueue(
- 10, 5, tf.float32, seed=21, shared_name="q_h")
+ q_h_1 = data_flow_ops.RandomShuffleQueue(
+ 10, 5, dtypes_lib.float32, seed=12, shared_name="q_h")
+ q_h_2 = data_flow_ops.RandomShuffleQueue(
+ 10, 5, dtypes_lib.float32, seed=21, shared_name="q_h")
q_h_1.queue_ref.eval()
with self.assertRaisesOpError("random seeds"):
q_h_2.queue_ref.eval()
@@ -1148,19 +1191,19 @@ class RandomShuffleQueueTest(tf.test.TestCase):
qlist = list()
for _ in xrange(num_queues):
qlist.append(
- tf.RandomShuffleQueue(10, 0, tf.float32))
+ data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
- q = tf.RandomShuffleQueue.from_list(index, qlist)
+ q = data_flow_ops.RandomShuffleQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.test_session():
- q1 = tf.RandomShuffleQueue(10, 0, tf.float32)
- q2 = tf.RandomShuffleQueue(15, 0, tf.float32)
- enq_q = tf.RandomShuffleQueue.from_list(3, [q1, q2])
+ q1 = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
+ q2 = data_flow_ops.RandomShuffleQueue(15, 0, dtypes_lib.float32)
+ enq_q = data_flow_ops.RandomShuffleQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("Index must be in the range"):
enq_q.dequeue().eval()
@@ -1186,26 +1229,29 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testResetOfBlockingOperation(self):
with self.test_session() as sess:
- q_empty = tf.RandomShuffleQueue(
- 5, 0, tf.float32, ((),))
+ q_empty = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.float32, (
+ (),))
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
dequeue_up_to_op = q_empty.dequeue_up_to(1)
- q_full = tf.RandomShuffleQueue(5, 0, tf.float32, ((),))
+ q_full = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.float32, ((),))
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
- self.checkedThread(self._blockingDequeue, args=(sess, dequeue_op)),
- self.checkedThread(self._blockingDequeueMany, args=(sess,
- dequeue_many_op)),
- self.checkedThread(self._blockingDequeueUpTo,
- args=(sess, dequeue_up_to_op)),
- self.checkedThread(self._blockingEnqueue, args=(sess, enqueue_op)),
- self.checkedThread(self._blockingEnqueueMany, args=(sess,
- enqueue_many_op))]
+ self.checkedThread(
+ self._blockingDequeue, args=(sess, dequeue_op)),
+ self.checkedThread(
+ self._blockingDequeueMany, args=(sess, dequeue_many_op)),
+ self.checkedThread(
+ self._blockingDequeueUpTo, args=(sess, dequeue_up_to_op)),
+ self.checkedThread(
+ self._blockingEnqueue, args=(sess, enqueue_op)),
+ self.checkedThread(
+ self._blockingEnqueueMany, args=(sess, enqueue_many_op))
+ ]
for t in threads:
t.start()
time.sleep(0.1)
@@ -1217,10 +1263,10 @@ class RandomShuffleQueueTest(tf.test.TestCase):
with self.test_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
- q1 = tf.RandomShuffleQueue(10, 5, tf.int32,
- ((),), seed=1729)
- q2 = tf.RandomShuffleQueue(10, 5, tf.int32,
- ((),), seed=87539319)
+ q1 = data_flow_ops.RandomShuffleQueue(
+ 10, 5, dtypes_lib.int32, ((),), seed=1729)
+ q2 = data_flow_ops.RandomShuffleQueue(
+ 10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue_many(5)
@@ -1251,8 +1297,10 @@ class RandomShuffleQueueTest(tf.test.TestCase):
with self.test_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
- q1 = tf.RandomShuffleQueue(10, 5, tf.int32, ((),), seed=1729)
- q2 = tf.RandomShuffleQueue(10, 5, tf.int32, ((),), seed=87539319)
+ q1 = data_flow_ops.RandomShuffleQueue(
+ 10, 5, dtypes_lib.int32, ((),), seed=1729)
+ q2 = data_flow_ops.RandomShuffleQueue(
+ 10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue_up_to(5)
@@ -1283,10 +1331,10 @@ class RandomShuffleQueueTest(tf.test.TestCase):
with self.test_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
- q1 = tf.RandomShuffleQueue(10, 5, tf.int32,
- ((),), seed=1729)
- q2 = tf.RandomShuffleQueue(10, 5, tf.int32,
- ((),), seed=87539319)
+ q1 = data_flow_ops.RandomShuffleQueue(
+ 10, 5, dtypes_lib.int32, ((),), seed=1729)
+ q2 = data_flow_ops.RandomShuffleQueue(
+ 10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue()
@@ -1317,19 +1365,20 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testBigEnqueueMany(self):
with self.test_session() as sess:
- q = tf.RandomShuffleQueue(
- 5, 0, tf.int32, ((),))
+ q = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
+
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
sess.run(enq)
enq_done.append(True)
+
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
@@ -1361,15 +1410,17 @@ class RandomShuffleQueueTest(tf.test.TestCase):
def testBigDequeueMany(self):
with self.test_session() as sess:
- q = tf.RandomShuffleQueue(2, 0, tf.int32, ((),))
+ q = data_flow_ops.RandomShuffleQueue(2, 0, dtypes_lib.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
+
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(sess.run(deq))
+
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
@@ -1385,4 +1436,4 @@ class RandomShuffleQueueTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/reader_ops_test.py b/tensorflow/python/kernel_tests/reader_ops_test.py
index 4af5c3c8a2..a2092388f1 100644
--- a/tensorflow/python/kernel_tests/reader_ops_test.py
+++ b/tensorflow/python/kernel_tests/reader_ops_test.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for Reader ops from io_ops."""
from __future__ import absolute_import
@@ -26,13 +25,21 @@ import threading
import zlib
import six
-import tensorflow as tf
+
+from tensorflow.core.protobuf import config_pb2
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import errors_impl
+from tensorflow.python.lib.io import tf_record
+from tensorflow.python.ops import data_flow_ops
+from tensorflow.python.ops import io_ops
+from tensorflow.python.ops import variables
+from tensorflow.python.platform import test
+from tensorflow.python.util import compat
# pylint: disable=invalid-name
-TFRecordCompressionType = tf.python_io.TFRecordCompressionType
+TFRecordCompressionType = tf_record.TFRecordCompressionType
# pylint: enable=invalid-name
-
# Edgar Allan Poe's 'Eldorado'
_TEXT = b"""Gaily bedight,
A gallant knight,
@@ -64,7 +71,7 @@ _TEXT = b"""Gaily bedight,
"""
-class IdentityReaderTest(tf.test.TestCase):
+class IdentityReaderTest(test.TestCase):
def _ExpectRead(self, sess, key, value, expected):
k, v = sess.run([key, value])
@@ -73,10 +80,10 @@ class IdentityReaderTest(tf.test.TestCase):
def testOneEpoch(self):
with self.test_session() as sess:
- reader = tf.IdentityReader("test_reader")
+ reader = io_ops.IdentityReader("test_reader")
work_completed = reader.num_work_units_completed()
produced = reader.num_records_produced()
- queue = tf.FIFOQueue(99, [tf.string], shapes=())
+ queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
queued_length = queue.size()
key, value = reader.read(queue)
@@ -107,8 +114,8 @@ class IdentityReaderTest(tf.test.TestCase):
def testMultipleEpochs(self):
with self.test_session() as sess:
- reader = tf.IdentityReader("test_reader")
- queue = tf.FIFOQueue(99, [tf.string], shapes=())
+ reader = io_ops.IdentityReader("test_reader")
+ queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
enqueue = queue.enqueue_many([["DD", "EE"]])
key, value = reader.read(queue)
@@ -128,9 +135,9 @@ class IdentityReaderTest(tf.test.TestCase):
def testSerializeRestore(self):
with self.test_session() as sess:
- reader = tf.IdentityReader("test_reader")
+ reader = io_ops.IdentityReader("test_reader")
produced = reader.num_records_produced()
- queue = tf.FIFOQueue(99, [tf.string], shapes=())
+ queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
queue.enqueue_many([["X", "Y", "Z"]]).run()
key, value = reader.read(queue)
@@ -183,10 +190,10 @@ class IdentityReaderTest(tf.test.TestCase):
def testReset(self):
with self.test_session() as sess:
- reader = tf.IdentityReader("test_reader")
+ reader = io_ops.IdentityReader("test_reader")
work_completed = reader.num_work_units_completed()
produced = reader.num_records_produced()
- queue = tf.FIFOQueue(99, [tf.string], shapes=())
+ queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
queued_length = queue.size()
key, value = reader.read(queue)
@@ -209,13 +216,14 @@ class IdentityReaderTest(tf.test.TestCase):
self._ExpectRead(sess, key, value, b"K")
-class WholeFileReaderTest(tf.test.TestCase):
+class WholeFileReaderTest(test.TestCase):
def setUp(self):
super(WholeFileReaderTest, self).setUp()
- self._filenames = [os.path.join(self.get_temp_dir(),
- "whole_file.%d.txt" % i)
- for i in range(3)]
+ self._filenames = [
+ os.path.join(self.get_temp_dir(), "whole_file.%d.txt" % i)
+ for i in range(3)
+ ]
self._content = [b"One\na\nb\n", b"Two\nC\nD", b"Three x, y, z"]
for fn, c in zip(self._filenames, self._content):
with open(fn, "wb") as h:
@@ -228,13 +236,13 @@ class WholeFileReaderTest(tf.test.TestCase):
def _ExpectRead(self, sess, key, value, index):
k, v = sess.run([key, value])
- self.assertAllEqual(tf.compat.as_bytes(self._filenames[index]), k)
+ self.assertAllEqual(compat.as_bytes(self._filenames[index]), k)
self.assertAllEqual(self._content[index], v)
def testOneEpoch(self):
with self.test_session() as sess:
- reader = tf.WholeFileReader("test_reader")
- queue = tf.FIFOQueue(99, [tf.string], shapes=())
+ reader = io_ops.WholeFileReader("test_reader")
+ queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
queue.enqueue_many([self._filenames]).run()
queue.close().run()
key, value = reader.read(queue)
@@ -249,8 +257,8 @@ class WholeFileReaderTest(tf.test.TestCase):
def testInfiniteEpochs(self):
with self.test_session() as sess:
- reader = tf.WholeFileReader("test_reader")
- queue = tf.FIFOQueue(99, [tf.string], shapes=())
+ reader = io_ops.WholeFileReader("test_reader")
+ queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
enqueue = queue.enqueue_many([self._filenames])
key, value = reader.read(queue)
@@ -266,7 +274,7 @@ class WholeFileReaderTest(tf.test.TestCase):
self._ExpectRead(sess, key, value, 0)
-class TextLineReaderTest(tf.test.TestCase):
+class TextLineReaderTest(test.TestCase):
def setUp(self):
super(TextLineReaderTest, self).setUp()
@@ -274,7 +282,7 @@ class TextLineReaderTest(tf.test.TestCase):
self._num_lines = 5
def _LineText(self, f, l):
- return tf.compat.as_bytes("%d: %d" % (f, l))
+ return compat.as_bytes("%d: %d" % (f, l))
def _CreateFiles(self, crlf=False):
filenames = []
@@ -292,8 +300,8 @@ class TextLineReaderTest(tf.test.TestCase):
def _testOneEpoch(self, files):
with self.test_session() as sess:
- reader = tf.TextLineReader(name="test_reader")
- queue = tf.FIFOQueue(99, [tf.string], shapes=())
+ reader = io_ops.TextLineReader(name="test_reader")
+ queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([files]).run()
@@ -301,7 +309,7 @@ class TextLineReaderTest(tf.test.TestCase):
for i in range(self._num_files):
for j in range(self._num_lines):
k, v = sess.run([key, value])
- self.assertAllEqual("%s:%d" % (files[i], j + 1), tf.compat.as_text(k))
+ self.assertAllEqual("%s:%d" % (files[i], j + 1), compat.as_text(k))
self.assertAllEqual(self._LineText(i, j), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
@@ -317,8 +325,8 @@ class TextLineReaderTest(tf.test.TestCase):
def testSkipHeaderLines(self):
files = self._CreateFiles()
with self.test_session() as sess:
- reader = tf.TextLineReader(skip_header_lines=1, name="test_reader")
- queue = tf.FIFOQueue(99, [tf.string], shapes=())
+ reader = io_ops.TextLineReader(skip_header_lines=1, name="test_reader")
+ queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([files]).run()
@@ -326,7 +334,7 @@ class TextLineReaderTest(tf.test.TestCase):
for i in range(self._num_files):
for j in range(self._num_lines - 1):
k, v = sess.run([key, value])
- self.assertAllEqual("%s:%d" % (files[i], j + 2), tf.compat.as_text(k))
+ self.assertAllEqual("%s:%d" % (files[i], j + 2), compat.as_text(k))
self.assertAllEqual(self._LineText(i, j + 1), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
@@ -334,7 +342,7 @@ class TextLineReaderTest(tf.test.TestCase):
k, v = sess.run([key, value])
-class FixedLengthRecordReaderTest(tf.test.TestCase):
+class FixedLengthRecordReaderTest(test.TestCase):
def setUp(self):
super(FixedLengthRecordReaderTest, self).setUp()
@@ -345,7 +353,7 @@ class FixedLengthRecordReaderTest(tf.test.TestCase):
self._footer_bytes = 2
def _Record(self, f, r):
- return tf.compat.as_bytes(str(f * 2 + r) * self._record_bytes)
+ return compat.as_bytes(str(f * 2 + r) * self._record_bytes)
def _CreateFiles(self):
filenames = []
@@ -362,12 +370,12 @@ class FixedLengthRecordReaderTest(tf.test.TestCase):
def testOneEpoch(self):
files = self._CreateFiles()
with self.test_session() as sess:
- reader = tf.FixedLengthRecordReader(
+ reader = io_ops.FixedLengthRecordReader(
header_bytes=self._header_bytes,
record_bytes=self._record_bytes,
footer_bytes=self._footer_bytes,
name="test_reader")
- queue = tf.FIFOQueue(99, [tf.string], shapes=())
+ queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([files]).run()
@@ -375,7 +383,7 @@ class FixedLengthRecordReaderTest(tf.test.TestCase):
for i in range(self._num_files):
for j in range(self._num_records):
k, v = sess.run([key, value])
- self.assertAllEqual("%s:%d" % (files[i], j), tf.compat.as_text(k))
+ self.assertAllEqual("%s:%d" % (files[i], j), compat.as_text(k))
self.assertAllEqual(self._Record(i, j), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
@@ -383,7 +391,7 @@ class FixedLengthRecordReaderTest(tf.test.TestCase):
k, v = sess.run([key, value])
-class TFRecordReaderTest(tf.test.TestCase):
+class TFRecordReaderTest(test.TestCase):
def setUp(self):
super(TFRecordReaderTest, self).setUp()
@@ -391,14 +399,14 @@ class TFRecordReaderTest(tf.test.TestCase):
self._num_records = 7
def _Record(self, f, r):
- return tf.compat.as_bytes("Record %d of file %d" % (r, f))
+ return compat.as_bytes("Record %d of file %d" % (r, f))
def _CreateFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
- writer = tf.python_io.TFRecordWriter(fn)
+ writer = tf_record.TFRecordWriter(fn)
for j in range(self._num_records):
writer.write(self._Record(i, j))
return filenames
@@ -406,8 +414,8 @@ class TFRecordReaderTest(tf.test.TestCase):
def testOneEpoch(self):
files = self._CreateFiles()
with self.test_session() as sess:
- reader = tf.TFRecordReader(name="test_reader")
- queue = tf.FIFOQueue(99, [tf.string], shapes=())
+ reader = io_ops.TFRecordReader(name="test_reader")
+ queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([files]).run()
@@ -415,7 +423,7 @@ class TFRecordReaderTest(tf.test.TestCase):
for i in range(self._num_files):
for j in range(self._num_records):
k, v = sess.run([key, value])
- self.assertTrue(tf.compat.as_text(k).startswith("%s:" % files[i]))
+ self.assertTrue(compat.as_text(k).startswith("%s:" % files[i]))
self.assertAllEqual(self._Record(i, j), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
@@ -425,8 +433,8 @@ class TFRecordReaderTest(tf.test.TestCase):
def testReadUpTo(self):
files = self._CreateFiles()
with self.test_session() as sess:
- reader = tf.TFRecordReader(name="test_reader")
- queue = tf.FIFOQueue(99, [tf.string], shapes=())
+ reader = io_ops.TFRecordReader(name="test_reader")
+ queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
batch_size = 3
key, value = reader.read_up_to(queue, batch_size)
@@ -443,7 +451,7 @@ class TFRecordReaderTest(tf.test.TestCase):
self.assertLessEqual(len(v), batch_size)
num_k += len(k)
num_v += len(v)
- except tf.errors.OutOfRangeError:
+ except errors_impl.OutOfRangeError:
break
# Test that we have read everything
@@ -463,9 +471,9 @@ class TFRecordReaderTest(tf.test.TestCase):
zlib_files.append(zfn)
with self.test_session() as sess:
- options = tf.python_io.TFRecordOptions(TFRecordCompressionType.ZLIB)
- reader = tf.TFRecordReader(name="test_reader", options=options)
- queue = tf.FIFOQueue(99, [tf.string], shapes=())
+ options = tf_record.TFRecordOptions(TFRecordCompressionType.ZLIB)
+ reader = io_ops.TFRecordReader(name="test_reader", options=options)
+ queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([zlib_files]).run()
@@ -473,8 +481,7 @@ class TFRecordReaderTest(tf.test.TestCase):
for i in range(self._num_files):
for j in range(self._num_records):
k, v = sess.run([key, value])
- self.assertTrue(
- tf.compat.as_text(k).startswith("%s:" % zlib_files[i]))
+ self.assertTrue(compat.as_text(k).startswith("%s:" % zlib_files[i]))
self.assertAllEqual(self._Record(i, j), v)
def testReadGzipFiles(self):
@@ -490,9 +497,9 @@ class TFRecordReaderTest(tf.test.TestCase):
gzip_files.append(zfn)
with self.test_session() as sess:
- options = tf.python_io.TFRecordOptions(TFRecordCompressionType.GZIP)
- reader = tf.TFRecordReader(name="test_reader", options=options)
- queue = tf.FIFOQueue(99, [tf.string], shapes=())
+ options = tf_record.TFRecordOptions(TFRecordCompressionType.GZIP)
+ reader = io_ops.TFRecordReader(name="test_reader", options=options)
+ queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([gzip_files]).run()
@@ -500,12 +507,11 @@ class TFRecordReaderTest(tf.test.TestCase):
for i in range(self._num_files):
for j in range(self._num_records):
k, v = sess.run([key, value])
- self.assertTrue(
- tf.compat.as_text(k).startswith("%s:" % gzip_files[i]))
+ self.assertTrue(compat.as_text(k).startswith("%s:" % gzip_files[i]))
self.assertAllEqual(self._Record(i, j), v)
-class TFRecordWriterZlibTest(tf.test.TestCase):
+class TFRecordWriterZlibTest(test.TestCase):
def setUp(self):
super(TFRecordWriterZlibTest, self).setUp()
@@ -513,16 +519,16 @@ class TFRecordWriterZlibTest(tf.test.TestCase):
self._num_records = 7
def _Record(self, f, r):
- return tf.compat.as_bytes("Record %d of file %d" % (r, f))
+ return compat.as_bytes("Record %d of file %d" % (r, f))
def _CreateFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
- options = tf.python_io.TFRecordOptions(
+ options = tf_record.TFRecordOptions(
compression_type=TFRecordCompressionType.ZLIB)
- writer = tf.python_io.TFRecordWriter(fn, options=options)
+ writer = tf_record.TFRecordWriter(fn, options=options)
for j in range(self._num_records):
writer.write(self._Record(i, j))
writer.close()
@@ -532,7 +538,7 @@ class TFRecordWriterZlibTest(tf.test.TestCase):
def _WriteRecordsToFile(self, records, name="tf_record"):
fn = os.path.join(self.get_temp_dir(), name)
- writer = tf.python_io.TFRecordWriter(fn, options=None)
+ writer = tf_record.TFRecordWriter(fn, options=None)
for r in records:
writer.write(r)
writer.close()
@@ -552,10 +558,10 @@ class TFRecordWriterZlibTest(tf.test.TestCase):
def testOneEpoch(self):
files = self._CreateFiles()
with self.test_session() as sess:
- options = tf.python_io.TFRecordOptions(
+ options = tf_record.TFRecordOptions(
compression_type=TFRecordCompressionType.ZLIB)
- reader = tf.TFRecordReader(name="test_reader", options=options)
- queue = tf.FIFOQueue(99, [tf.string], shapes=())
+ reader = io_ops.TFRecordReader(name="test_reader", options=options)
+ queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([files]).run()
@@ -563,7 +569,7 @@ class TFRecordWriterZlibTest(tf.test.TestCase):
for i in range(self._num_files):
for j in range(self._num_records):
k, v = sess.run([key, value])
- self.assertTrue(tf.compat.as_text(k).startswith("%s:" % files[i]))
+ self.assertTrue(compat.as_text(k).startswith("%s:" % files[i]))
self.assertAllEqual(self._Record(i, j), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
@@ -594,15 +600,15 @@ class TFRecordWriterZlibTest(tf.test.TestCase):
h.write(output)
with self.test_session() as sess:
- options = tf.python_io.TFRecordOptions(
+ options = tf_record.TFRecordOptions(
compression_type=TFRecordCompressionType.ZLIB)
- reader = tf.TFRecordReader(name="test_reader", options=options)
- queue = tf.FIFOQueue(1, [tf.string], shapes=())
+ reader = io_ops.TFRecordReader(name="test_reader", options=options)
+ queue = data_flow_ops.FIFOQueue(1, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue(fn).run()
queue.close().run()
k, v = sess.run([key, value])
- self.assertTrue(tf.compat.as_text(k).startswith("%s:" % fn))
+ self.assertTrue(compat.as_text(k).startswith("%s:" % fn))
self.assertAllEqual(b"small record", v)
def testZlibReadWrite(self):
@@ -613,9 +619,10 @@ class TFRecordWriterZlibTest(tf.test.TestCase):
# read the compressed contents and verify.
actual = []
- for r in tf.python_io.tf_record_iterator(
- zfn, options=tf.python_io.TFRecordOptions(
- tf.python_io.TFRecordCompressionType.ZLIB)):
+ for r in tf_record.tf_record_iterator(
+ zfn,
+ options=tf_record.TFRecordOptions(
+ tf_record.TFRecordCompressionType.ZLIB)):
actual.append(r)
self.assertEqual(actual, original)
@@ -629,13 +636,13 @@ class TFRecordWriterZlibTest(tf.test.TestCase):
# read the compressed contents and verify.
actual = []
- for r in tf.python_io.tf_record_iterator(
- zfn, options=tf.python_io.TFRecordOptions(
- tf.python_io.TFRecordCompressionType.ZLIB)):
+ for r in tf_record.tf_record_iterator(
+ zfn,
+ options=tf_record.TFRecordOptions(
+ tf_record.TFRecordCompressionType.ZLIB)):
actual.append(r)
self.assertEqual(actual, original)
-
def testGzipReadWrite(self):
"""Verify that files produced are gzip compatible."""
original = [b"foo", b"bar"]
@@ -649,30 +656,29 @@ class TFRecordWriterZlibTest(tf.test.TestCase):
f.write(cdata)
actual = []
- for r in tf.python_io.tf_record_iterator(
- gzfn,
- options=tf.python_io.TFRecordOptions(TFRecordCompressionType.GZIP)):
+ for r in tf_record.tf_record_iterator(
+ gzfn, options=tf_record.TFRecordOptions(TFRecordCompressionType.GZIP)):
actual.append(r)
self.assertEqual(actual, original)
-class TFRecordIteratorTest(tf.test.TestCase):
+class TFRecordIteratorTest(test.TestCase):
def setUp(self):
super(TFRecordIteratorTest, self).setUp()
self._num_records = 7
def _Record(self, r):
- return tf.compat.as_bytes("Record %d" % r)
+ return compat.as_bytes("Record %d" % r)
def _WriteCompressedRecordsToFile(
self,
records,
name="tfrecord.z",
- compression_type=tf.python_io.TFRecordCompressionType.ZLIB):
+ compression_type=tf_record.TFRecordCompressionType.ZLIB):
fn = os.path.join(self.get_temp_dir(), name)
- options = tf.python_io.TFRecordOptions(compression_type=compression_type)
- writer = tf.python_io.TFRecordWriter(fn, options=options)
+ options = tf_record.TFRecordOptions(compression_type=compression_type)
+ writer = tf_record.TFRecordWriter(fn, options=options)
for r in records:
writer.write(r)
writer.close()
@@ -691,9 +697,9 @@ class TFRecordIteratorTest(tf.test.TestCase):
fn = self._WriteCompressedRecordsToFile(
[self._Record(i) for i in range(self._num_records)],
"compressed_records")
- options = tf.python_io.TFRecordOptions(
+ options = tf_record.TFRecordOptions(
compression_type=TFRecordCompressionType.ZLIB)
- reader = tf.python_io.tf_record_iterator(fn, options)
+ reader = tf_record.tf_record_iterator(fn, options)
for i in range(self._num_records):
record = next(reader)
self.assertAllEqual(self._Record(i), record)
@@ -703,11 +709,11 @@ class TFRecordIteratorTest(tf.test.TestCase):
def testWriteZlibRead(self):
"""Verify compression with TFRecordWriter is zlib library compatible."""
original = [b"foo", b"bar"]
- fn = self._WriteCompressedRecordsToFile(
- original, "write_zlib_read.tfrecord.z")
+ fn = self._WriteCompressedRecordsToFile(original,
+ "write_zlib_read.tfrecord.z")
zfn = self._ZlibDecompressFile(fn, "write_zlib_read.tfrecord")
actual = []
- for r in tf.python_io.tf_record_iterator(zfn):
+ for r in tf_record.tf_record_iterator(zfn):
actual.append(r)
self.assertEqual(actual, original)
@@ -715,11 +721,11 @@ class TFRecordIteratorTest(tf.test.TestCase):
"""Verify compression for large records is zlib library compatible."""
# Make it large (about 5MB)
original = [_TEXT * 10240]
- fn = self._WriteCompressedRecordsToFile(
- original, "write_zlib_read_large.tfrecord.z")
+ fn = self._WriteCompressedRecordsToFile(original,
+ "write_zlib_read_large.tfrecord.z")
zfn = self._ZlibDecompressFile(fn, "write_zlib_read_large.tf_record")
actual = []
- for r in tf.python_io.tf_record_iterator(zfn):
+ for r in tf_record.tf_record_iterator(zfn):
actual.append(r)
self.assertEqual(actual, original)
@@ -737,14 +743,14 @@ class TFRecordIteratorTest(tf.test.TestCase):
f.write(cdata)
actual = []
- for r in tf.python_io.tf_record_iterator(zfn):
+ for r in tf_record.tf_record_iterator(zfn):
actual.append(r)
self.assertEqual(actual, original)
def testBadFile(self):
"""Verify that tf_record_iterator throws an exception on bad TFRecords."""
fn = os.path.join(self.get_temp_dir(), "bad_file")
- with tf.python_io.TFRecordWriter(fn) as writer:
+ with tf_record.TFRecordWriter(fn) as writer:
writer.write(b"123")
fn_truncated = os.path.join(self.get_temp_dir(), "bad_file_truncated")
with open(fn, "rb") as f:
@@ -752,17 +758,17 @@ class TFRecordIteratorTest(tf.test.TestCase):
# DataLossError requires that we've written the header, so this must
# be at least 12 bytes.
f2.write(f.read(14))
- with self.assertRaises(tf.errors.DataLossError):
- for _ in tf.python_io.tf_record_iterator(fn_truncated):
+ with self.assertRaises(errors_impl.DataLossError):
+ for _ in tf_record.tf_record_iterator(fn_truncated):
pass
-class AsyncReaderTest(tf.test.TestCase):
+class AsyncReaderTest(test.TestCase):
def testNoDeadlockFromQueue(self):
"""Tests that reading does not block main execution threads."""
- config = tf.ConfigProto(inter_op_parallelism_threads=1,
- intra_op_parallelism_threads=1)
+ config = config_pb2.ConfigProto(
+ inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
with self.test_session(config=config) as sess:
thread_data_t = collections.namedtuple("thread_data_t",
["thread", "queue", "output"])
@@ -770,16 +776,17 @@ class AsyncReaderTest(tf.test.TestCase):
# Create different readers, each with its own queue.
for i in range(3):
- queue = tf.FIFOQueue(99, [tf.string], shapes=())
- reader = tf.TextLineReader()
+ queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
+ reader = io_ops.TextLineReader()
_, line = reader.read(queue)
output = []
- t = threading.Thread(target=AsyncReaderTest._RunSessionAndSave,
- args=(sess, [line], output))
+ t = threading.Thread(
+ target=AsyncReaderTest._RunSessionAndSave,
+ args=(sess, [line], output))
thread_data.append(thread_data_t(t, queue, output))
# Start all readers. They are all blocked waiting for queue entries.
- sess.run(tf.global_variables_initializer())
+ sess.run(variables.global_variables_initializer())
for d in thread_data:
d.thread.start()
@@ -798,4 +805,4 @@ class AsyncReaderTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/reduce_join_op_test.py b/tensorflow/python/kernel_tests/reduce_join_op_test.py
index 695d442382..fc6bfd2cc7 100644
--- a/tensorflow/python/kernel_tests/reduce_join_op_test.py
+++ b/tensorflow/python/kernel_tests/reduce_join_op_test.py
@@ -12,17 +12,21 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for ReduceJoin op from string_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+
import itertools
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
-import tensorflow as tf
+
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import string_ops
+from tensorflow.python.platform import test
def _input_array(num_dims):
@@ -35,7 +39,7 @@ def _input_array(num_dims):
An ndarray of shape [2] * num_dims.
"""
formatter = "{:0%db}" % num_dims
- strings = [formatter.format(i) for i in xrange(2 ** num_dims)]
+ strings = [formatter.format(i) for i in xrange(2**num_dims)]
return np.array(strings, dtype="S%d" % num_dims).reshape([2] * num_dims)
@@ -52,19 +56,19 @@ def _joined_array(num_dims, reduce_dim):
formatter = "{:0%db}" % (num_dims - 1)
result = np.zeros(shape=[2] * (num_dims - 1), dtype="S%d" % (2 * num_dims))
flat = result.ravel()
- for i in xrange(2 ** (num_dims - 1)):
+ for i in xrange(2**(num_dims - 1)):
dims = formatter.format(i)
flat[i] = "".join([(dims[:reduce_dim] + "%d" + dims[reduce_dim:]) % j
for j in xrange(2)])
return result
-class UnicodeTestCase(tf.test.TestCase):
+class UnicodeTestCase(test.TestCase):
"""Test case with Python3-compatible string comparator."""
def assertAllEqualUnicode(self, truth, actual):
- self.assertAllEqual(np.array(truth).astype("U"),
- np.array(actual).astype("U"))
+ self.assertAllEqual(
+ np.array(truth).astype("U"), np.array(actual).astype("U"))
class ReduceJoinTestHelperTest(UnicodeTestCase):
@@ -72,7 +76,7 @@ class ReduceJoinTestHelperTest(UnicodeTestCase):
def testInputArray(self):
num_dims = 3
- truth = ["{:03b}".format(i) for i in xrange(2 ** num_dims)]
+ truth = ["{:03b}".format(i) for i in xrange(2**num_dims)]
output_array = _input_array(num_dims).reshape([-1])
self.assertAllEqualUnicode(truth, output_array)
@@ -91,8 +95,13 @@ class ReduceJoinTestHelperTest(UnicodeTestCase):
class ReduceJoinTest(UnicodeTestCase):
- def _testReduceJoin(self, input_array, truth, truth_shape,
- reduction_indices, keep_dims=False, separator=""):
+ def _testReduceJoin(self,
+ input_array,
+ truth,
+ truth_shape,
+ reduction_indices,
+ keep_dims=False,
+ separator=""):
"""Compares the output of reduce_join to an expected result.
Args:
@@ -104,16 +113,19 @@ class ReduceJoinTest(UnicodeTestCase):
separator: The separator to use for joining.
"""
with self.test_session():
- output = tf.reduce_join(inputs=input_array,
- reduction_indices=reduction_indices,
- keep_dims=keep_dims,
- separator=separator)
+ output = string_ops.reduce_join(
+ inputs=input_array,
+ reduction_indices=reduction_indices,
+ keep_dims=keep_dims,
+ separator=separator)
output_array = output.eval()
self.assertAllEqualUnicode(truth, output_array)
self.assertAllEqual(truth_shape, output.get_shape())
- def _testMultipleReduceJoin(self, input_array, reduction_indices,
+ def _testMultipleReduceJoin(self,
+ input_array,
+ reduction_indices,
separator=" "):
"""Tests reduce_join for one input and multiple reduction_indices.
@@ -129,19 +141,25 @@ class ReduceJoinTest(UnicodeTestCase):
num_dims = len(input_array.shape)
truth_red_indices = reduction_indices or list(reversed(xrange(num_dims)))
with self.test_session():
- output = tf.reduce_join(
- inputs=input_array, reduction_indices=reduction_indices,
- keep_dims=False, separator=separator)
- output_keep_dims = tf.reduce_join(
- inputs=input_array, reduction_indices=reduction_indices,
- keep_dims=True, separator=separator)
+ output = string_ops.reduce_join(
+ inputs=input_array,
+ reduction_indices=reduction_indices,
+ keep_dims=False,
+ separator=separator)
+ output_keep_dims = string_ops.reduce_join(
+ inputs=input_array,
+ reduction_indices=reduction_indices,
+ keep_dims=True,
+ separator=separator)
truth = input_array
for index in truth_red_indices:
- truth = tf.reduce_join(
- inputs=truth, reduction_indices=index, keep_dims=True,
+ truth = string_ops.reduce_join(
+ inputs=truth,
+ reduction_indices=index,
+ keep_dims=True,
separator=separator)
- truth_squeezed = tf.squeeze(truth, squeeze_dims=truth_red_indices)
+ truth_squeezed = array_ops.squeeze(truth, squeeze_dims=truth_red_indices)
output_array = output.eval()
output_keep_dims_array = output_keep_dims.eval()
truth_array = truth.eval()
@@ -164,36 +182,37 @@ class ReduceJoinTest(UnicodeTestCase):
truth_shape_dim_zero = [4]
truth_dim_one = ["thisisatest", "pleasedonotpanic"]
truth_shape_dim_one = [2]
- self._testReduceJoin(input_array, truth_dim_zero, truth_shape_dim_zero,
- reduction_indices=0)
- self._testReduceJoin(input_array, truth_dim_one, truth_shape_dim_one,
- reduction_indices=1)
+ self._testReduceJoin(
+ input_array, truth_dim_zero, truth_shape_dim_zero, reduction_indices=0)
+ self._testReduceJoin(
+ input_array, truth_dim_one, truth_shape_dim_one, reduction_indices=1)
def testRankFive(self):
input_array = _input_array(num_dims=5)
truths = [_joined_array(num_dims=5, reduce_dim=i) for i in xrange(5)]
truth_shape = [2] * 4
for i in xrange(5):
- self._testReduceJoin(input_array, truths[i], truth_shape,
- reduction_indices=i)
+ self._testReduceJoin(
+ input_array, truths[i], truth_shape, reduction_indices=i)
def testNegative(self):
input_array = _input_array(num_dims=5)
truths = [_joined_array(num_dims=5, reduce_dim=i) for i in xrange(5)]
truth_shape = [2] * 4
for i in xrange(5):
- self._testReduceJoin(input_array, truths[i], truth_shape,
- reduction_indices=i - 5)
+ self._testReduceJoin(
+ input_array, truths[i], truth_shape, reduction_indices=i - 5)
def testSingletonDimension(self):
- input_arrays = [_input_array(num_dims=5)
- .reshape([2] * i + [1] + [2] * (5 - i))
- for i in xrange(6)]
+ input_arrays = [
+ _input_array(num_dims=5).reshape([2] * i + [1] + [2] * (5 - i))
+ for i in xrange(6)
+ ]
truth = _input_array(num_dims=5)
truth_shape = [2] * 5
for i in xrange(6):
- self._testReduceJoin(input_arrays[i], truth, truth_shape,
- reduction_indices=i)
+ self._testReduceJoin(
+ input_arrays[i], truth, truth_shape, reduction_indices=i)
def testSeparator(self):
input_array = [["this", "is", "a", "test"],
@@ -203,18 +222,26 @@ class ReduceJoinTest(UnicodeTestCase):
truth_dim_one = ["this is a test", "please do not panic"]
truth_shape_dim_one = [2]
- self._testReduceJoin(input_array, truth_dim_zero, truth_shape_dim_zero,
- reduction_indices=0, separator=" ")
- self._testReduceJoin(input_array, truth_dim_one, truth_shape_dim_one,
- reduction_indices=1, separator=" ")
+ self._testReduceJoin(
+ input_array,
+ truth_dim_zero,
+ truth_shape_dim_zero,
+ reduction_indices=0,
+ separator=" ")
+ self._testReduceJoin(
+ input_array,
+ truth_dim_one,
+ truth_shape_dim_one,
+ reduction_indices=1,
+ separator=" ")
def testUnknownShape(self):
input_array = [["a"], ["b"]]
truth = ["ab"]
truth_shape = None
with self.test_session():
- placeholder = tf.placeholder(tf.string, name="placeholder")
- reduced = tf.reduce_join(placeholder, reduction_indices=0)
+ placeholder = array_ops.placeholder(dtypes.string, name="placeholder")
+ reduced = string_ops.reduce_join(placeholder, reduction_indices=0)
output_array = reduced.eval(feed_dict={placeholder.name: input_array})
self.assertAllEqualUnicode(truth, output_array)
self.assertAllEqual(truth_shape, reduced.get_shape())
@@ -226,8 +253,9 @@ class ReduceJoinTest(UnicodeTestCase):
truth_dim_one = ["thisisatest", "pleasedonotpanic"]
truth_shape = None
with self.test_session():
- placeholder = tf.placeholder(tf.int32, name="placeholder")
- reduced = tf.reduce_join(input_array, reduction_indices=placeholder)
+ placeholder = array_ops.placeholder(dtypes.int32, name="placeholder")
+ reduced = string_ops.reduce_join(
+ input_array, reduction_indices=placeholder)
output_array_dim_zero = reduced.eval(feed_dict={placeholder.name: [0]})
output_array_dim_one = reduced.eval(feed_dict={placeholder.name: [1]})
self.assertAllEqualUnicode(truth_dim_zero, output_array_dim_zero)
@@ -242,10 +270,18 @@ class ReduceJoinTest(UnicodeTestCase):
truth_dim_one = [["thisisatest"], ["pleasedonotpanic"]]
truth_shape_dim_one = [2, 1]
- self._testReduceJoin(input_array, truth_dim_zero, truth_shape_dim_zero,
- reduction_indices=0, keep_dims=True)
- self._testReduceJoin(input_array, truth_dim_one, truth_shape_dim_one,
- reduction_indices=1, keep_dims=True)
+ self._testReduceJoin(
+ input_array,
+ truth_dim_zero,
+ truth_shape_dim_zero,
+ reduction_indices=0,
+ keep_dims=True)
+ self._testReduceJoin(
+ input_array,
+ truth_dim_one,
+ truth_shape_dim_one,
+ reduction_indices=1,
+ keep_dims=True)
def testMultiIndex(self):
num_dims = 3
@@ -253,41 +289,41 @@ class ReduceJoinTest(UnicodeTestCase):
# Also tests [].
for i in xrange(num_dims + 1):
for permutation in itertools.permutations(xrange(num_dims), i):
- self._testMultipleReduceJoin(input_array,
- reduction_indices=permutation)
+ self._testMultipleReduceJoin(input_array, reduction_indices=permutation)
def testInvalidReductionIndices(self):
with self.test_session():
with self.assertRaisesRegexp(ValueError, "Invalid reduction dim"):
- tf.reduce_join(inputs="", reduction_indices=0)
+ string_ops.reduce_join(inputs="", reduction_indices=0)
with self.assertRaisesRegexp(ValueError,
"Invalid reduction dimension -3"):
- tf.reduce_join(inputs=[[""]], reduction_indices=-3)
+ string_ops.reduce_join(inputs=[[""]], reduction_indices=-3)
with self.assertRaisesRegexp(ValueError, "Invalid reduction dimension 2"):
- tf.reduce_join(inputs=[[""]], reduction_indices=2)
+ string_ops.reduce_join(inputs=[[""]], reduction_indices=2)
with self.assertRaisesRegexp(ValueError,
"Invalid reduction dimension -3"):
- tf.reduce_join(inputs=[[""]], reduction_indices=[0, -3])
+ string_ops.reduce_join(inputs=[[""]], reduction_indices=[0, -3])
with self.assertRaisesRegexp(ValueError, "Invalid reduction dimension 2"):
- tf.reduce_join(inputs=[[""]], reduction_indices=[0, 2])
+ string_ops.reduce_join(inputs=[[""]], reduction_indices=[0, 2])
with self.assertRaisesRegexp(ValueError, "Duplicate reduction index 0"):
- tf.reduce_join(inputs=[[""]], reduction_indices=[0, 0])
+ string_ops.reduce_join(inputs=[[""]], reduction_indices=[0, 0])
def testZeroDims(self):
valid_truth_shape = [0]
with self.test_session():
inputs = np.zeros([0, 1], dtype=str)
with self.assertRaisesRegexp(ValueError, "dimension 0 with size 0"):
- tf.reduce_join(inputs=inputs, reduction_indices=0)
- valid = tf.reduce_join(inputs=inputs, reduction_indices=1)
+ string_ops.reduce_join(inputs=inputs, reduction_indices=0)
+ valid = string_ops.reduce_join(inputs=inputs, reduction_indices=1)
valid_array_shape = valid.eval().shape
self.assertAllEqualUnicode(valid_truth_shape, valid_array_shape)
def testInvalidArgsUnknownShape(self):
with self.test_session():
- placeholder = tf.placeholder(tf.string, name="placeholder")
- index_too_high = tf.reduce_join(placeholder, reduction_indices=1)
- duplicate_index = tf.reduce_join(placeholder, reduction_indices=[-1, 1])
+ placeholder = array_ops.placeholder(dtypes.string, name="placeholder")
+ index_too_high = string_ops.reduce_join(placeholder, reduction_indices=1)
+ duplicate_index = string_ops.reduce_join(
+ placeholder, reduction_indices=[-1, 1])
with self.assertRaisesOpError("Invalid reduction dimension 1"):
index_too_high.eval(feed_dict={placeholder.name: [""]})
with self.assertRaisesOpError("Duplicate reduction dimension 1"):
@@ -295,14 +331,15 @@ class ReduceJoinTest(UnicodeTestCase):
def testInvalidArgsUnknownIndices(self):
with self.test_session():
- placeholder = tf.placeholder(tf.int32, name="placeholder")
- reduced = tf.reduce_join(["test", "test2"],
- reduction_indices=placeholder)
+ placeholder = array_ops.placeholder(dtypes.int32, name="placeholder")
+ reduced = string_ops.reduce_join(
+ ["test", "test2"], reduction_indices=placeholder)
with self.assertRaisesOpError("reduction dimension -2"):
reduced.eval(feed_dict={placeholder.name: -2})
with self.assertRaisesOpError("reduction dimension 2"):
reduced.eval(feed_dict={placeholder.name: 2})
+
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/reduction_ops_test.py b/tensorflow/python/kernel_tests/reduction_ops_test.py
index e01aa810a2..0da5a2ecc5 100644
--- a/tensorflow/python/kernel_tests/reduction_ops_test.py
+++ b/tensorflow/python/kernel_tests/reduction_ops_test.py
@@ -13,18 +13,24 @@
# limitations under the License.
# ==============================================================================
"""Functional tests for reduction ops."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
+from tensorflow.python.platform import test
-class ReducedShapeTest(tf.test.TestCase):
+class ReducedShapeTest(test.TestCase):
def _check(self, shape, axes, result):
output = math_ops.reduced_shape(shape, axes=axes)
@@ -62,7 +68,7 @@ class ReducedShapeTest(tf.test.TestCase):
self._check([10, 10, 10], [-3], [1, 10, 10])
-class SumReductionTest(tf.test.TestCase):
+class SumReductionTest(test.TestCase):
def _compare(self,
x,
@@ -78,7 +84,7 @@ class SumReductionTest(tf.test.TestCase):
for ra in reduction_axes.ravel()[::-1]:
np_ans = np.sum(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session(use_gpu=use_gpu) as sess:
- tf_ans = tf.reduce_sum(x, reduction_axes, keep_dims)
+ tf_ans = math_ops.reduce_sum(x, reduction_axes, keep_dims)
out = sess.run(tf_ans, feed_dict)
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
@@ -191,46 +197,47 @@ class SumReductionTest(tf.test.TestCase):
def testInvalidIndex(self):
np_arr = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
- input_tensor = tf.convert_to_tensor(np_arr)
+ input_tensor = ops.convert_to_tensor(np_arr)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
- tf.reduce_sum(input_tensor, [-3])
+ math_ops.reduce_sum(input_tensor, [-3])
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
- tf.reduce_sum(input_tensor, [2])
+ math_ops.reduce_sum(input_tensor, [2])
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
- tf.reduce_sum(input_tensor, [0, 2])
+ math_ops.reduce_sum(input_tensor, [0, 2])
def testPartialShapes(self):
np.random.seed(1618)
# Input shape is unknown.
reduction_axes = [1, 2]
- c_unknown = tf.placeholder(tf.float32)
- s_unknown = tf.reduce_sum(c_unknown, reduction_axes)
+ c_unknown = array_ops.placeholder(dtypes.float32)
+ s_unknown = math_ops.reduce_sum(c_unknown, reduction_axes)
self.assertEqual(tensor_shape.unknown_shape(), s_unknown.get_shape())
np_input = np.random.randn(3, 3, 3)
self._compareAll(np_input, reduction_axes, {c_unknown: np_input})
# Input shape only has known rank.
- c_known_rank = tf.placeholder(tf.float32)
+ c_known_rank = array_ops.placeholder(dtypes.float32)
c_known_rank.set_shape(tensor_shape.unknown_shape(ndims=3))
- s_known_rank = tf.reduce_sum(c_known_rank, reduction_axes, keep_dims=True)
+ s_known_rank = math_ops.reduce_sum(
+ c_known_rank, reduction_axes, keep_dims=True)
self.assertEqual(3, s_known_rank.get_shape().ndims)
np_input = np.random.randn(3, 3, 3)
self._compareAll(np_input, reduction_axes, {c_known_rank: np_input})
# Reduction indices are unknown.
- unknown_indices = tf.placeholder(tf.int32)
- c_unknown_indices = tf.constant([[10.0], [20.0]])
- s_unknown_indices = tf.reduce_sum(
+ unknown_indices = array_ops.placeholder(dtypes.int32)
+ c_unknown_indices = constant_op.constant([[10.0], [20.0]])
+ s_unknown_indices = math_ops.reduce_sum(
c_unknown_indices, unknown_indices, keep_dims=False)
self.assertEqual(tensor_shape.unknown_shape(),
s_unknown_indices.get_shape())
- s_unknown_indices_keep = tf.reduce_sum(
+ s_unknown_indices_keep = math_ops.reduce_sum(
c_unknown_indices, unknown_indices, keep_dims=True)
self.assertEqual(2, s_unknown_indices_keep.get_shape().ndims)
@@ -242,9 +249,9 @@ class SumReductionTest(tf.test.TestCase):
self._compareGradient(shape, sum_shape, reduction_axes[0])
x = np.arange(1.0, 49.0).reshape(shape).astype(np.float64)
with self.test_session():
- t = tf.convert_to_tensor(x)
- su = tf.reduce_sum(t, reduction_axes)
- jacob_t, jacob_n = tf.test.compute_gradient(
+ t = ops.convert_to_tensor(x)
+ su = math_ops.reduce_sum(t, reduction_axes)
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
t, shape, su, sum_shape, x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@@ -287,23 +294,23 @@ class SumReductionTest(tf.test.TestCase):
def testEmptyGradients(self):
with self.test_session():
- x = tf.zeros([0, 3])
- y = tf.reduce_sum(x, [1])
- error = tf.test.compute_gradient_error(x, [0, 3], y, [0])
+ x = array_ops.zeros([0, 3])
+ y = math_ops.reduce_sum(x, [1])
+ error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
def testDegenerate(self):
for use_gpu in False, True:
with self.test_session(use_gpu=use_gpu):
- for dtype in (tf.float16, tf.float32, tf.float64, tf.complex64,
- tf.complex128):
+ for dtype in (dtypes.float16, dtypes.float32, dtypes.float64,
+ dtypes.complex64, dtypes.complex128):
# A large number is needed to get Eigen to die
- x = tf.zeros((0, 9938), dtype=dtype)
- y = tf.reduce_sum(x, [0])
+ x = array_ops.zeros((0, 9938), dtype=dtype)
+ y = math_ops.reduce_sum(x, [0])
self.assertAllEqual(y.eval(), np.zeros(9938))
-class MeanReductionTest(tf.test.TestCase):
+class MeanReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
@@ -317,7 +324,7 @@ class MeanReductionTest(tf.test.TestCase):
count *= x.shape[ra]
np_ans /= count
with self.test_session(use_gpu=use_gpu):
- tf_ans = tf.reduce_mean(x, reduction_axes, keep_dims)
+ tf_ans = math_ops.reduce_mean(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
@@ -367,50 +374,46 @@ class MeanReductionTest(tf.test.TestCase):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float32)
with self.test_session():
- t = tf.convert_to_tensor(x)
- su = tf.reduce_mean(t, [1, 2])
- jacob_t, jacob_n = tf.test.compute_gradient(
+ t = ops.convert_to_tensor(x)
+ su = math_ops.reduce_mean(t, [1, 2])
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
- su = tf.reduce_mean(t, [0, 1, 2, 3])
- jacob_t, jacob_n = tf.test.compute_gradient(
+ su = math_ops.reduce_mean(t, [0, 1, 2, 3])
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [1], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
- su = tf.reduce_mean(t, [])
- jacob_t, jacob_n = tf.test.compute_gradient(
+ su = math_ops.reduce_mean(t, [])
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 3, 4, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
- su = tf.reduce_mean(t, 0)
- jacob_t, jacob_n = tf.test.compute_gradient(t,
- s,
- su,
- [3, 4, 2],
- x_init_value=x,
- delta=1)
+ su = math_ops.reduce_mean(t, 0)
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
+ t, s, su, [3, 4, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
def testEmptyGradients(self):
with self.test_session():
- x = tf.zeros([0, 3])
- y = tf.reduce_mean(x, [1])
- error = tf.test.compute_gradient_error(x, [0, 3], y, [0])
+ x = array_ops.zeros([0, 3])
+ y = math_ops.reduce_mean(x, [1])
+ error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
def testDegenerate(self):
for use_gpu in False, True:
with self.test_session(use_gpu=use_gpu):
- for dtype in (tf.float16, tf.float32, tf.float64):
+ for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
# A large number is needed to get Eigen to die
- x = tf.zeros((0, 9938), dtype=dtype)
- y = tf.reduce_mean(x, [0]).eval()
+ x = array_ops.zeros((0, 9938), dtype=dtype)
+ y = math_ops.reduce_mean(x, [0]).eval()
self.assertEqual(y.shape, (9938,))
self.assertTrue(np.all(np.isnan(y)))
-class ProdReductionTest(tf.test.TestCase):
+class ProdReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keep_dims):
np_ans = x
@@ -422,7 +425,7 @@ class ProdReductionTest(tf.test.TestCase):
with self.test_session():
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
- tf_ans = tf.reduce_prod(x, reduction_axes, keep_dims)
+ tf_ans = math_ops.reduce_prod(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
@@ -454,30 +457,26 @@ class ProdReductionTest(tf.test.TestCase):
def _compareGradient(self, x):
with self.test_session():
- t = tf.convert_to_tensor(x)
+ t = ops.convert_to_tensor(x)
- su = tf.reduce_prod(t, [])
- jacob_t, jacob_n = tf.test.compute_gradient(
+ su = math_ops.reduce_prod(t, [])
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
t, x.shape, su, [2, 3, 4, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
- su = tf.reduce_prod(t, [1, 2])
- jacob_t, jacob_n = tf.test.compute_gradient(
+ su = math_ops.reduce_prod(t, [1, 2])
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
t, x.shape, su, [2, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
- su = tf.reduce_prod(t, [0, 1, 2, 3])
- jacob_t, jacob_n = tf.test.compute_gradient(
+ su = math_ops.reduce_prod(t, [0, 1, 2, 3])
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
t, x.shape, su, [1], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
- su = tf.reduce_prod(t, 0)
- jacob_t, jacob_n = tf.test.compute_gradient(t,
- x.shape,
- su,
- [3, 4, 2],
- x_init_value=x,
- delta=1)
+ su = math_ops.reduce_prod(t, 0)
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
+ t, x.shape, su, [3, 4, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
def testGradientWithZeros(self):
@@ -504,22 +503,22 @@ class ProdReductionTest(tf.test.TestCase):
def testEmptyGradients(self):
with self.test_session():
- x = tf.zeros([0, 3])
- y = tf.reduce_prod(x, [1])
- error = tf.test.compute_gradient_error(x, [0, 3], y, [0])
+ x = array_ops.zeros([0, 3])
+ y = math_ops.reduce_prod(x, [1])
+ error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
def testDegenerate(self):
for use_gpu in False, True:
with self.test_session(use_gpu=use_gpu):
- for dtype in (tf.float16, tf.float32, tf.float64):
+ for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
# A large number is needed to get Eigen to die
- x = tf.zeros((0, 9938), dtype=dtype)
- y = tf.reduce_prod(x, [0])
+ x = array_ops.zeros((0, 9938), dtype=dtype)
+ y = math_ops.reduce_prod(x, [0])
self.assertAllEqual(y.eval(), np.ones(9938))
-class MinReductionTest(tf.test.TestCase):
+class MinReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
@@ -531,7 +530,7 @@ class MinReductionTest(tf.test.TestCase):
with self.test_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
- tf_ans = tf.reduce_min(x, reduction_axes, keep_dims)
+ tf_ans = math_ops.reduce_min(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
@@ -581,9 +580,9 @@ class MinReductionTest(tf.test.TestCase):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
- t = tf.convert_to_tensor(x)
- su = tf.reduce_min(t, [1, 2])
- jacob_t, jacob_n = tf.test.compute_gradient(
+ t = ops.convert_to_tensor(x)
+ su = math_ops.reduce_min(t, [1, 2])
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@@ -591,9 +590,9 @@ class MinReductionTest(tf.test.TestCase):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
- t = tf.convert_to_tensor(x)
- su = tf.reduce_min(t, [1])
- jacob_t, jacob_n = tf.test.compute_gradient(
+ t = ops.convert_to_tensor(x)
+ su = math_ops.reduce_min(t, [1])
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 4, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@@ -601,9 +600,9 @@ class MinReductionTest(tf.test.TestCase):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
- t = tf.convert_to_tensor(x)
- su = tf.reduce_min(t, [2])
- jacob_t, jacob_n = tf.test.compute_gradient(
+ t = ops.convert_to_tensor(x)
+ su = math_ops.reduce_min(t, [2])
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 3, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@@ -611,21 +610,21 @@ class MinReductionTest(tf.test.TestCase):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
- t = tf.convert_to_tensor(x)
- su = tf.reduce_min(t)
- jacob_t, jacob_n = tf.test.compute_gradient(
+ t = ops.convert_to_tensor(x)
+ su = math_ops.reduce_min(t)
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [1], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testEmptyGradients(self):
with self.test_session():
- x = tf.zeros([0, 3])
- y = tf.reduce_min(x, [1])
- error = tf.test.compute_gradient_error(x, [0, 3], y, [0])
+ x = array_ops.zeros([0, 3])
+ y = math_ops.reduce_min(x, [1])
+ error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
-class MaxReductionTest(tf.test.TestCase):
+class MaxReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
@@ -637,7 +636,7 @@ class MaxReductionTest(tf.test.TestCase):
with self.test_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
- tf_ans = tf.reduce_max(x, reduction_axes, keep_dims)
+ tf_ans = math_ops.reduce_max(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
@@ -687,9 +686,9 @@ class MaxReductionTest(tf.test.TestCase):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
- t = tf.convert_to_tensor(x)
- su = tf.reduce_max(t, [1, 2])
- jacob_t, jacob_n = tf.test.compute_gradient(
+ t = ops.convert_to_tensor(x)
+ su = math_ops.reduce_max(t, [1, 2])
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@@ -697,9 +696,9 @@ class MaxReductionTest(tf.test.TestCase):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
- t = tf.convert_to_tensor(x)
- su = tf.reduce_max(t, [1])
- jacob_t, jacob_n = tf.test.compute_gradient(
+ t = ops.convert_to_tensor(x)
+ su = math_ops.reduce_max(t, [1])
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 4, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@@ -707,9 +706,9 @@ class MaxReductionTest(tf.test.TestCase):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
- t = tf.convert_to_tensor(x)
- su = tf.reduce_max(t, [2])
- jacob_t, jacob_n = tf.test.compute_gradient(
+ t = ops.convert_to_tensor(x)
+ su = math_ops.reduce_max(t, [2])
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 3, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@@ -717,21 +716,21 @@ class MaxReductionTest(tf.test.TestCase):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
- t = tf.convert_to_tensor(x)
- su = tf.reduce_max(t)
- jacob_t, jacob_n = tf.test.compute_gradient(
+ t = ops.convert_to_tensor(x)
+ su = math_ops.reduce_max(t)
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [1], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testEmptyGradients(self):
with self.test_session():
- x = tf.zeros([0, 3])
- y = tf.reduce_max(x, [1])
- error = tf.test.compute_gradient_error(x, [0, 3], y, [0])
+ x = array_ops.zeros([0, 3])
+ y = math_ops.reduce_max(x, [1])
+ error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
-class AllReductionTest(tf.test.TestCase):
+class AllReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
@@ -743,7 +742,7 @@ class AllReductionTest(tf.test.TestCase):
with self.test_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
- tf_ans = tf.reduce_all(x, reduction_axes, keep_dims)
+ tf_ans = math_ops.reduce_all(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
@@ -772,7 +771,7 @@ class AllReductionTest(tf.test.TestCase):
self._compareAll([], [0])
-class AnyReductionTest(tf.test.TestCase):
+class AnyReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
@@ -784,7 +783,7 @@ class AnyReductionTest(tf.test.TestCase):
with self.test_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
- tf_ans = tf.reduce_any(x, reduction_axes, keep_dims)
+ tf_ans = math_ops.reduce_any(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
@@ -813,7 +812,7 @@ class AnyReductionTest(tf.test.TestCase):
self._compareAll([], [0])
-class CountNonzeroReductionTest(tf.test.TestCase):
+class CountNonzeroReductionTest(test.TestCase):
def _compare(self,
x,
@@ -829,7 +828,7 @@ class CountNonzeroReductionTest(tf.test.TestCase):
for ra in reduction_axes.ravel()[::-1]:
np_ans = np.sum(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session(use_gpu=use_gpu) as sess:
- tf_ans = tf.count_nonzero(x, reduction_axes, keep_dims)
+ tf_ans = math_ops.count_nonzero(x, reduction_axes, keep_dims)
out = sess.run(tf_ans, feed_dict)
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
@@ -881,12 +880,12 @@ class CountNonzeroReductionTest(tf.test.TestCase):
def testDegenerate(self):
for use_gpu in False, True:
with self.test_session(use_gpu=use_gpu):
- for dtype in (tf.bool,):
+ for dtype in (dtypes.bool,):
# A large number is needed to get Eigen to die
- x = tf.zeros((0, 9938), dtype=dtype)
- y = tf.count_nonzero(x, [0])
+ x = array_ops.zeros((0, 9938), dtype=dtype)
+ y = math_ops.count_nonzero(x, [0])
self.assertAllEqual(y.eval(), np.zeros(9938))
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/relu_op_test.py b/tensorflow/python/kernel_tests/relu_op_test.py
index 776d9b6665..229f27e9ca 100644
--- a/tensorflow/python/kernel_tests/relu_op_test.py
+++ b/tensorflow/python/kernel_tests/relu_op_test.py
@@ -12,32 +12,42 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for Relu and ReluGrad."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import gradients_impl
+from tensorflow.python.ops import nn_ops
+from tensorflow.python.ops import random_ops
+from tensorflow.python.ops import variables
+import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
+from tensorflow.python.training import gradient_descent
-class ReluTest(tf.test.TestCase):
+
+class ReluTest(test.TestCase):
def _npRelu(self, np_features):
return np.maximum(np_features, np.zeros(np_features.shape))
def testNpRelu(self):
self.assertAllClose(
- np.array([[0.0, 0.7, 0.0, 0.3, 0.0],
- [0.1, 0.0, 0.5, 0.0, 0.9]]),
- self._npRelu(np.array([[-0.9, 0.7, -0.5, 0.3, -0.1],
- [0.1, -0.3, 0.5, -0.7, 0.9]])))
+ np.array([[0.0, 0.7, 0.0, 0.3, 0.0], [0.1, 0.0, 0.5, 0.0, 0.9]]),
+ self._npRelu(
+ np.array([[-0.9, 0.7, -0.5, 0.3, -0.1], [0.1, -0.3, 0.5, -0.7, 0.9]
+ ])))
def _testRelu(self, np_features, use_gpu=False):
np_relu = self._npRelu(np_features)
with self.test_session(use_gpu=use_gpu):
- relu = tf.nn.relu(np_features)
+ relu = nn_ops.relu(np_features)
tf_relu = relu.eval()
self.assertAllClose(np_relu, tf_relu)
self.assertShapeEqual(np_relu, relu)
@@ -56,105 +66,103 @@ class ReluTest(tf.test.TestCase):
# defined at around zero and we want to avoid that in terms of input values.
def testGradientFloat32(self):
with self.test_session():
- x = tf.constant(
+ x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
- shape=[2, 5], name="x")
- y = tf.nn.relu(x, name="relu")
+ shape=[2, 5],
+ name="x")
+ y = nn_ops.relu(x, name="relu")
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
- dtype=np.float32, order="F")
- err = tf.test.compute_gradient_error(x,
- [2, 5],
- y,
- [2, 5],
- x_init_value=x_init)
+ dtype=np.float32,
+ order="F")
+ err = gradient_checker.compute_gradient_error(
+ x, [2, 5], y, [2, 5], x_init_value=x_init)
print("relu (float32) gradient err = ", err)
self.assertLess(err, 1e-4)
def testGradientFloat64(self):
with self.test_session():
- x = tf.constant(
+ x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
- shape=[2, 5], dtype=tf.float64, name="x")
- y = tf.nn.relu(x, name="relu")
+ shape=[2, 5],
+ dtype=dtypes.float64,
+ name="x")
+ y = nn_ops.relu(x, name="relu")
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
- dtype=np.float64, order="F")
- err = tf.test.compute_gradient_error(x,
- [2, 5],
- y,
- [2, 5],
- x_init_value=x_init)
+ dtype=np.float64,
+ order="F")
+ err = gradient_checker.compute_gradient_error(
+ x, [2, 5], y, [2, 5], x_init_value=x_init)
print("relu (float64) gradient err = ", err)
self.assertLess(err, 1e-10)
def testGradGradFloat32(self):
with self.test_session():
- x = tf.constant(
+ x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
- shape=[2, 5], name="x")
- y = tf.nn.relu(x, name="relu")
- z = tf.gradients(y, x)
+ shape=[2, 5],
+ name="x")
+ y = nn_ops.relu(x, name="relu")
+ z = gradients_impl.gradients(y, x)
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
- dtype=np.float32, order="F")
- err = tf.test.compute_gradient_error(x,
- [2, 5],
- z[0],
- [2, 5],
- x_init_value=x_init)
+ dtype=np.float32,
+ order="F")
+ err = gradient_checker.compute_gradient_error(
+ x, [2, 5], z[0], [2, 5], x_init_value=x_init)
print("relu (float32) gradient of gradient err = ", err)
self.assertLess(err, 1e-4)
def testGradGradFloat64(self):
with self.test_session():
- x = tf.constant(
+ x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
- shape=[2, 5], dtype=tf.float64, name="x")
- y = tf.nn.relu(x, name="relu")
- z = tf.gradients(y, x)
+ shape=[2, 5],
+ dtype=dtypes.float64,
+ name="x")
+ y = nn_ops.relu(x, name="relu")
+ z = gradients_impl.gradients(y, x)
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
- dtype=np.float64, order="F")
- err = tf.test.compute_gradient_error(x,
- [2, 5],
- z[0],
- [2, 5],
- x_init_value=x_init)
+ dtype=np.float64,
+ order="F")
+ err = gradient_checker.compute_gradient_error(
+ x, [2, 5], z[0], [2, 5], x_init_value=x_init)
print("relu (float64) gradient of gradient err = ", err)
self.assertLess(err, 1e-10)
def testGradientScalar(self):
with self.test_session() as sess:
- x = tf.Variable(100.)
- y = tf.nn.relu(x)
+ x = variables.Variable(100.)
+ y = nn_ops.relu(x)
loss = y**2
- optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.25)
+ optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.25)
train_op = optimizer.minimize(loss)
- sess.run(tf.global_variables_initializer())
+ sess.run(variables.global_variables_initializer())
sess.run(train_op)
self.assertAllClose(x.eval(), 50.0)
-class Relu6Test(tf.test.TestCase):
+class Relu6Test(test.TestCase):
def _npRelu6(self, np_features):
sixes = np.copy(np_features)
sixes.fill(6.0)
- return np.minimum(np.maximum(np_features, np.zeros(np_features.shape)),
- sixes)
+ return np.minimum(
+ np.maximum(np_features, np.zeros(np_features.shape)), sixes)
def testNpRelu6(self):
self.assertAllClose(
- np.array([[0.0, 0.7, 0.0, 0.3, 6.0],
- [0.1, 0.0, 6.0, 0.0, 0.9]]),
- self._npRelu6(np.array([[-0.9, 0.7, -0.5, 0.3, 6.0],
- [0.1, -0.3, 6.5, -0.7, 0.9]])))
+ np.array([[0.0, 0.7, 0.0, 0.3, 6.0], [0.1, 0.0, 6.0, 0.0, 0.9]]),
+ self._npRelu6(
+ np.array([[-0.9, 0.7, -0.5, 0.3, 6.0], [0.1, -0.3, 6.5, -0.7, 0.9]
+ ])))
def _testRelu6(self, np_features, use_gpu=False):
np_relu6 = self._npRelu6(np_features)
with self.test_session(use_gpu=use_gpu):
- relu6 = tf.nn.relu6(np_features)
+ relu6 = nn_ops.relu6(np_features)
tf_relu6 = relu6.eval()
self.assertAllClose(np_relu6, tf_relu6)
self.assertShapeEqual(np_relu6, relu6)
@@ -174,40 +182,39 @@ class Relu6Test(tf.test.TestCase):
# in terms of input values.
def testGradientFloat32(self):
with self.test_session():
- x = tf.constant(
+ x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 6.1, 6.3, 6.5, 6.7, 6.9],
- shape=[2, 5], name="x")
- y = tf.nn.relu6(x, name="relu6")
+ shape=[2, 5],
+ name="x")
+ y = nn_ops.relu6(x, name="relu6")
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [6.1, 6.3, 6.5, 6.7, 6.9]],
- dtype=np.float32, order="F")
- err = tf.test.compute_gradient_error(x,
- [2, 5],
- y,
- [2, 5],
- x_init_value=x_init)
+ dtype=np.float32,
+ order="F")
+ err = gradient_checker.compute_gradient_error(
+ x, [2, 5], y, [2, 5], x_init_value=x_init)
print("relu6 (float32) gradient err = ", err)
self.assertLess(err, 1e-4)
def testGradientFloat64(self):
with self.test_session():
- x = tf.constant(
+ x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 6.1, 6.3, 6.5, 6.7, 6.9],
- shape=[2, 5], dtype=tf.float64, name="x")
- y = tf.nn.relu6(x, name="relu6")
+ shape=[2, 5],
+ dtype=dtypes.float64,
+ name="x")
+ y = nn_ops.relu6(x, name="relu6")
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [6.1, 6.3, 6.5, 6.7, 6.9]],
- dtype=np.float64, order="F")
- err = tf.test.compute_gradient_error(x,
- [2, 5],
- y,
- [2, 5],
- x_init_value=x_init)
+ dtype=np.float64,
+ order="F")
+ err = gradient_checker.compute_gradient_error(
+ x, [2, 5], y, [2, 5], x_init_value=x_init)
print("relu6 (float64) gradient err = ", err)
self.assertLess(err, 1e-10)
-class EluTest(tf.test.TestCase):
+class EluTest(test.TestCase):
def _npElu(self, np_features):
return np.where(np_features < 0, np.exp(np_features) - 1, np_features)
@@ -216,13 +223,14 @@ class EluTest(tf.test.TestCase):
self.assertAllClose(
np.array([[-0.59343034025, 0.7, -0.39346934028, 0.3, -0.09516258196],
[0.1, -0.25918177931, 0.5, -0.5034146962, 0.9]]),
- self._npElu(np.array([[-0.9, 0.7, -0.5, 0.3, -0.1], [0.1, -0.3, 0.5, -
- 0.7, 0.9]])))
+ self._npElu(
+ np.array([[-0.9, 0.7, -0.5, 0.3, -0.1], [0.1, -0.3, 0.5, -0.7, 0.9]
+ ])))
def _testElu(self, np_features, use_gpu=False):
np_elu = self._npElu(np_features)
with self.test_session(use_gpu=use_gpu):
- elu = tf.nn.elu(np_features)
+ elu = nn_ops.elu(np_features)
tf_elu = elu.eval()
self.assertAllClose(np_elu, tf_elu)
self.assertShapeEqual(np_elu, elu)
@@ -239,83 +247,76 @@ class EluTest(tf.test.TestCase):
def testGradientFloat32(self):
with self.test_session():
x_val = [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]]
- x = tf.constant(x_val, name="x")
- y = tf.nn.elu(x, name="elu")
+ x = constant_op.constant(x_val, name="x")
+ y = nn_ops.elu(x, name="elu")
x_init = np.asarray(x_val, dtype=np.float32, order="F")
- err = tf.test.compute_gradient_error(x,
- [2, 5],
- y,
- [2, 5],
- x_init_value=x_init)
+ err = gradient_checker.compute_gradient_error(
+ x, [2, 5], y, [2, 5], x_init_value=x_init)
print("elu (float32) gradient err = ", err)
self.assertLess(err, 1e-4)
def testGradientFloat64(self):
with self.test_session():
x_val = [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]]
- x = tf.constant(x_val, dtype=tf.float64, name="x")
- y = tf.nn.elu(x, name="elu")
+ x = constant_op.constant(x_val, dtype=dtypes.float64, name="x")
+ y = nn_ops.elu(x, name="elu")
x_init = np.asarray(x_val, dtype=np.float64, order="F")
- err = tf.test.compute_gradient_error(x,
- [2, 5],
- y,
- [2, 5],
- x_init_value=x_init)
+ err = gradient_checker.compute_gradient_error(
+ x, [2, 5], y, [2, 5], x_init_value=x_init)
print("elu (float64) gradient err = ", err)
self.assertLess(err, 1e-6)
def testGradGradFloat32(self):
with self.test_session():
- x = tf.constant(
+ x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
- shape=[2, 5], name="x")
- y = tf.nn.elu(x, name="elu")
- z = tf.gradients(y, x)
+ shape=[2, 5],
+ name="x")
+ y = nn_ops.elu(x, name="elu")
+ z = gradients_impl.gradients(y, x)
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
- dtype=np.float32, order="F")
- err = tf.test.compute_gradient_error(x,
- [2, 5],
- z[0],
- [2, 5],
- x_init_value=x_init)
+ dtype=np.float32,
+ order="F")
+ err = gradient_checker.compute_gradient_error(
+ x, [2, 5], z[0], [2, 5], x_init_value=x_init)
print("elu (float32) gradient of gradient err = ", err)
self.assertLess(err, 1e-4)
def testGradGradFloat64(self):
with self.test_session():
- x = tf.constant(
+ x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
- shape=[2, 5], dtype=tf.float64, name="x")
- y = tf.nn.elu(x, name="elu")
- z = tf.gradients(y, x)
+ shape=[2, 5],
+ dtype=dtypes.float64,
+ name="x")
+ y = nn_ops.elu(x, name="elu")
+ z = gradients_impl.gradients(y, x)
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
- dtype=np.float64, order="F")
- err = tf.test.compute_gradient_error(x,
- [2, 5],
- z[0],
- [2, 5],
- x_init_value=x_init)
+ dtype=np.float64,
+ order="F")
+ err = gradient_checker.compute_gradient_error(
+ x, [2, 5], z[0], [2, 5], x_init_value=x_init)
print("elu (float64) gradient of gradient err = ", err)
self.assertLess(err, 1e-6)
-class CreluTest(tf.test.TestCase):
+class CreluTest(test.TestCase):
def testCreluShape(self):
- f = tf.random_normal([50, 5, 7, 10])
- t = tf.nn.crelu(f)
+ f = random_ops.random_normal([50, 5, 7, 10])
+ t = nn_ops.crelu(f)
self.assertEqual([50, 5, 7, 20], t.get_shape())
def _testCrelu(self, np_features, use_gpu=False):
np_relu = np.maximum(np_features, np.zeros_like(np_features))
np_neg_relu = np.maximum(-np_features, np.zeros_like(np_features))
- np_crelu = np.concatenate(
- (np_relu, np_neg_relu), len(np_features.shape) - 1)
+ np_crelu = np.concatenate((np_relu, np_neg_relu),
+ len(np_features.shape) - 1)
with self.test_session(use_gpu=use_gpu):
- crelu = tf.nn.crelu(np_features)
+ crelu = nn_ops.crelu(np_features)
tf_relu = crelu.eval()
self.assertAllClose(np_crelu, tf_relu)
@@ -331,5 +332,6 @@ class CreluTest(tf.test.TestCase):
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=True)
+
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/reshape_op_test.py b/tensorflow/python/kernel_tests/reshape_op_test.py
index fd09111753..ad852d76c2 100644
--- a/tensorflow/python/kernel_tests/reshape_op_test.py
+++ b/tensorflow/python/kernel_tests/reshape_op_test.py
@@ -12,22 +12,27 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.ops.reshape_op."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.platform import test
-class ReshapeTest(tf.test.TestCase):
+class ReshapeTest(test.TestCase):
def _testReshape(self, x, y, use_gpu=False):
with self.test_session(use_gpu=use_gpu):
np_ans = x.reshape(y)
- tf_ans = tf.reshape(x, y)
+ tf_ans = array_ops.reshape(x, y)
out = tf_ans.eval()
self.assertEqual(tf_ans.get_shape(), out.shape)
self.assertShapeEqual(np_ans, tf_ans)
@@ -79,13 +84,10 @@ class ReshapeTest(tf.test.TestCase):
x = np.arange(1., 25.).reshape([2, 3, 4]).astype(np.float32)
s = list(np.shape(x))
with self.test_session():
- input_tensor = tf.constant(x)
- reshape_out = tf.reshape(input_tensor, [1, 8, 3])
- err = tf.test.compute_gradient_error(input_tensor,
- s,
- reshape_out,
- s,
- x_init_value=x)
+ input_tensor = constant_op.constant(x)
+ reshape_out = array_ops.reshape(input_tensor, [1, 8, 3])
+ err = gradient_checker.compute_gradient_error(
+ input_tensor, s, reshape_out, s, x_init_value=x)
print("Reshape gradient error = " % err)
self.assertLess(err, 1e-3)
@@ -99,46 +101,50 @@ class ReshapeTest(tf.test.TestCase):
self._testBothReshape(x, [1, -1, 5])
def testErrors(self):
- y = tf.constant(0.0, shape=[23, 29, 31])
+ y = constant_op.constant(0.0, shape=[23, 29, 31])
with self.assertRaisesRegexp(ValueError, "must be evenly divisible by 17"):
- tf.reshape(y, [17, -1])
+ array_ops.reshape(y, [17, -1])
- z = tf.constant(0.0, shape=[32, 128])
+ z = constant_op.constant(0.0, shape=[32, 128])
with self.assertRaisesRegexp(ValueError,
"Cannot reshape a tensor with 4096 elements"):
- tf.reshape(z, [4095])
+ array_ops.reshape(z, [4095])
def testPartialShapes(self):
- x = tf.placeholder(tf.float32)
+ x = array_ops.placeholder(dtypes.float32)
# Unknown input shape, partial new shape.
- y = tf.reshape(x, [1, 1, -1, 1])
+ y = array_ops.reshape(x, [1, 1, -1, 1])
self.assertEqual([1, 1, None, 1], y.get_shape().as_list())
# Unknown input shape, unknown new shape.
- y = tf.reshape(x, tf.placeholder(tf.int32))
+ y = array_ops.reshape(x, array_ops.placeholder(dtypes.int32))
self.assertEqual(None, y.get_shape().ndims)
# Unknown input shape, known rank for new shape.
- y = tf.reshape(x, tf.placeholder(tf.int32, shape=(3,)))
+ y = array_ops.reshape(x, array_ops.placeholder(dtypes.int32, shape=(3,)))
self.assertEqual([None, None, None], y.get_shape().as_list())
# Unknown input shape, partial new shape using `tf.stack()`.
- y = tf.reshape(x, [tf.placeholder(tf.int32), 37])
+ y = array_ops.reshape(x, [array_ops.placeholder(dtypes.int32), 37])
self.assertEqual([None, 37], y.get_shape().as_list())
# Unknown input shape, partial new shape using `tf.concat_v2()`.
- y = tf.reshape(
- x, tf.concat_v2(
- [tf.placeholder(
- tf.int32, shape=(2,)), [37, 42]], 0))
+ y = array_ops.reshape(
+ x,
+ array_ops.concat_v2(
+ [array_ops.placeholder(
+ dtypes.int32, shape=(2,)), [37, 42]], 0))
self.assertEqual([None, None, 37, 42], y.get_shape().as_list())
# Unknown input shape, partial new shape using `tf.shape()`.
- y = tf.reshape(x, tf.shape(tf.placeholder(tf.float32,
- shape=[None, 37, None])))
+ y = array_ops.reshape(
+ x,
+ array_ops.shape(
+ array_ops.placeholder(
+ dtypes.float32, shape=[None, 37, None])))
self.assertEqual([None, 37, None], y.get_shape().as_list())
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/reverse_sequence_op_test.py b/tensorflow/python/kernel_tests/reverse_sequence_op_test.py
index ca6b198fa8..9beb615b2c 100644
--- a/tensorflow/python/kernel_tests/reverse_sequence_op_test.py
+++ b/tensorflow/python/kernel_tests/reverse_sequence_op_test.py
@@ -12,18 +12,23 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.ops.reverse_sequence_op."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.platform import test
-class ReverseSequenceTest(tf.test.TestCase):
+
+class ReverseSequenceTest(test.TestCase):
def _testReverseSequence(self,
x,
@@ -34,7 +39,7 @@ class ReverseSequenceTest(tf.test.TestCase):
use_gpu=False,
expected_err_re=None):
with self.test_session(use_gpu=use_gpu):
- ans = tf.reverse_sequence(
+ ans = array_ops.reverse_sequence(
x, batch_axis=batch_axis, seq_axis=seq_axis, seq_lengths=seq_lengths)
if expected_err_re is None:
tf_ans = ans.eval()
@@ -57,10 +62,10 @@ class ReverseSequenceTest(tf.test.TestCase):
False, expected_err_re)
def _testBasic(self, dtype, len_dtype=np.int64):
- x = np.asarray([
- [[1, 2, 3, 4], [5, 6, 7, 8]],
- [[9, 10, 11, 12], [13, 14, 15, 16]],
- [[17, 18, 19, 20], [21, 22, 23, 24]]], dtype=dtype)
+ x = np.asarray(
+ [[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], [13, 14, 15, 16]],
+ [[17, 18, 19, 20], [21, 22, 23, 24]]],
+ dtype=dtype)
x = x.reshape(3, 2, 4, 1, 1)
x = x.transpose([2, 1, 0, 3, 4]) # permute axes 0 <=> 2
@@ -68,9 +73,11 @@ class ReverseSequenceTest(tf.test.TestCase):
seq_lengths = np.asarray([3, 0, 4], dtype=len_dtype)
truth_orig = np.asarray(
- [[[3, 2, 1, 4], [7, 6, 5, 8]], # reverse 0:3
- [[9, 10, 11, 12], [13, 14, 15, 16]], # reverse none
- [[20, 19, 18, 17], [24, 23, 22, 21]]], # reverse 0:4 (all)
+ [
+ [[3, 2, 1, 4], [7, 6, 5, 8]], # reverse 0:3
+ [[9, 10, 11, 12], [13, 14, 15, 16]], # reverse none
+ [[20, 19, 18, 17], [24, 23, 22, 21]]
+ ], # reverse 0:4 (all)
dtype=dtype)
truth_orig = truth_orig.reshape(3, 2, 4, 1, 1)
truth = truth_orig.transpose([2, 1, 0, 3, 4]) # permute axes 0 <=> 2
@@ -101,10 +108,10 @@ class ReverseSequenceTest(tf.test.TestCase):
self._testBasic(np.complex128)
def testFloatReverseSequenceGrad(self):
- x = np.asarray([
- [[1, 2, 3, 4], [5, 6, 7, 8]],
- [[9, 10, 11, 12], [13, 14, 15, 16]],
- [[17, 18, 19, 20], [21, 22, 23, 24]]], dtype=np.float)
+ x = np.asarray(
+ [[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], [13, 14, 15, 16]],
+ [[17, 18, 19, 20], [21, 22, 23, 24]]],
+ dtype=np.float)
x = x.reshape(3, 2, 4, 1, 1)
x = x.transpose([2, 1, 0, 3, 4]) # transpose axes 0 <=> 2
@@ -114,70 +121,68 @@ class ReverseSequenceTest(tf.test.TestCase):
seq_lengths = np.asarray([3, 0, 4], dtype=np.int64)
with self.test_session():
- input_t = tf.constant(x, shape=x.shape)
- seq_lengths_t = tf.constant(seq_lengths, shape=seq_lengths.shape)
- reverse_sequence_out = tf.reverse_sequence(
+ input_t = constant_op.constant(x, shape=x.shape)
+ seq_lengths_t = constant_op.constant(seq_lengths, shape=seq_lengths.shape)
+ reverse_sequence_out = array_ops.reverse_sequence(
input_t,
batch_axis=batch_axis,
seq_axis=seq_axis,
seq_lengths=seq_lengths_t)
- err = tf.test.compute_gradient_error(input_t,
- x.shape,
- reverse_sequence_out,
- x.shape,
- x_init_value=x)
+ err = gradient_checker.compute_gradient_error(
+ input_t, x.shape, reverse_sequence_out, x.shape, x_init_value=x)
print("ReverseSequence gradient error = %g" % err)
self.assertLess(err, 1e-8)
def testShapeFunctionEdgeCases(self):
- t = tf.reverse_sequence(
- tf.placeholder(
- tf.float32, shape=None),
- seq_lengths=tf.placeholder(
- tf.int64, shape=(32,)),
+ t = array_ops.reverse_sequence(
+ array_ops.placeholder(
+ dtypes.float32, shape=None),
+ seq_lengths=array_ops.placeholder(
+ dtypes.int64, shape=(32,)),
batch_axis=0,
seq_axis=1)
self.assertIs(t.get_shape().ndims, None)
# Batch size mismatched between input and seq_lengths.
with self.assertRaises(ValueError):
- tf.reverse_sequence(
- tf.placeholder(
- tf.float32, shape=(32, 2, 3)),
- seq_lengths=tf.placeholder(
- tf.int64, shape=(33,)),
+ array_ops.reverse_sequence(
+ array_ops.placeholder(
+ dtypes.float32, shape=(32, 2, 3)),
+ seq_lengths=array_ops.placeholder(
+ dtypes.int64, shape=(33,)),
seq_axis=3)
# seq_axis out of bounds.
with self.assertRaisesRegexp(ValueError, "seq_dim must be < input rank"):
- tf.reverse_sequence(
- tf.placeholder(
- tf.float32, shape=(32, 2, 3)),
- seq_lengths=tf.placeholder(
- tf.int64, shape=(32,)),
+ array_ops.reverse_sequence(
+ array_ops.placeholder(
+ dtypes.float32, shape=(32, 2, 3)),
+ seq_lengths=array_ops.placeholder(
+ dtypes.int64, shape=(32,)),
seq_axis=3)
# batch_axis out of bounds.
- with self.assertRaisesRegexp(
- ValueError, "batch_dim must be < input rank"):
- tf.reverse_sequence(
- tf.placeholder(
- tf.float32, shape=(32, 2, 3)),
- seq_lengths=tf.placeholder(
- tf.int64, shape=(32,)),
+ with self.assertRaisesRegexp(ValueError, "batch_dim must be < input rank"):
+ array_ops.reverse_sequence(
+ array_ops.placeholder(
+ dtypes.float32, shape=(32, 2, 3)),
+ seq_lengths=array_ops.placeholder(
+ dtypes.int64, shape=(32,)),
seq_axis=0,
batch_axis=3)
with self.test_session():
- inputs = tf.placeholder(tf.float32, shape=(32, 2, 3))
- seq_lengths = tf.placeholder(tf.int64, shape=(32,))
- output = tf.reverse_sequence(
+ inputs = array_ops.placeholder(dtypes.float32, shape=(32, 2, 3))
+ seq_lengths = array_ops.placeholder(dtypes.int64, shape=(32,))
+ output = array_ops.reverse_sequence(
inputs, seq_lengths=seq_lengths,
seq_axis=0) # batch_axis default is 0
with self.assertRaisesOpError("batch_dim == seq_dim"):
- output.eval(feed_dict={inputs: np.random.rand(32, 2, 3),
- seq_lengths: xrange(32)})
+ output.eval(feed_dict={
+ inputs: np.random.rand(32, 2, 3),
+ seq_lengths: xrange(32)
+ })
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/rnn_test.py b/tensorflow/python/kernel_tests/rnn_test.py
index 2ba85ea04e..70b0d77497 100644
--- a/tensorflow/python/kernel_tests/rnn_test.py
+++ b/tensorflow/python/kernel_tests/rnn_test.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for rnn module."""
from __future__ import absolute_import
@@ -20,17 +19,46 @@ from __future__ import division
from __future__ import print_function
import itertools
+import sys
import time
import timeit
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
-import tensorflow as tf
+# TODO(mrry): Remove this hack which makes dlopen() in
+# sparse_feature_cross_op.py not crash in the open source world.
+if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
+ import ctypes
+ sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
+
+from tensorflow.contrib import rnn as rnn_lib
+from tensorflow.core.protobuf import config_pb2
+from tensorflow.python.client import session
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops as ops_lib
+from tensorflow.python.framework import tensor_shape
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import control_flow_ops
+from tensorflow.python.ops import gradients_impl
+from tensorflow.python.ops import init_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import rnn
+from tensorflow.python.ops import rnn_cell_impl
+from tensorflow.python.ops import tensor_array_ops
+from tensorflow.python.ops import variable_scope
+from tensorflow.python.ops import variables as variables_lib
+import tensorflow.python.ops.data_flow_grad # pylint: disable=unused-import
+import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
+import tensorflow.python.ops.sparse_grad # pylint: disable=unused-import
+import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
+from tensorflow.python.platform import tf_logging
from tensorflow.python.util import nest
-class Plus1RNNCell(tf.contrib.rnn.RNNCell):
+class Plus1RNNCell(rnn_cell_impl.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
@property
@@ -45,7 +73,7 @@ class Plus1RNNCell(tf.contrib.rnn.RNNCell):
return (input_ + 1, state + 1)
-class DummyMultiDimensionalLSTM(tf.contrib.rnn.RNNCell):
+class DummyMultiDimensionalLSTM(rnn_cell_impl.RNNCell):
"""LSTM Cell generating (output, new_state) = (input + 1, state + 1).
The input to this cell may have an arbitrary number of dimensions that follow
@@ -63,8 +91,9 @@ class DummyMultiDimensionalLSTM(tf.contrib.rnn.RNNCell):
raise TypeError("The dimensions passed to DummyMultiDimensionalLSTM"
"should be a tuple of ints.")
self._dims = dims
- self._output_size = tf.TensorShape(self._dims)
- self._state_size = (tf.TensorShape(self._dims), tf.TensorShape(self._dims))
+ self._output_size = tensor_shape.TensorShape(self._dims)
+ self._state_size = (tensor_shape.TensorShape(self._dims),
+ tensor_shape.TensorShape(self._dims))
@property
def output_size(self):
@@ -79,7 +108,7 @@ class DummyMultiDimensionalLSTM(tf.contrib.rnn.RNNCell):
return (input_ + 1, (h + 1, c + 1))
-class NestedRNNCell(tf.contrib.rnn.RNNCell):
+class NestedRNNCell(rnn_cell_impl.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1).
The input, output and state of this cell is a tuple of two tensors.
@@ -119,14 +148,14 @@ class TestStateSaver(object):
else:
raise TypeError("state_size should either be an int or a tuple")
- return tf.zeros((self._batch_size,) + state_size)
+ return array_ops.zeros((self._batch_size,) + state_size)
def save_state(self, name, state):
self.saved_state[name] = state
- return tf.identity(state)
+ return array_ops.identity(state)
-class RNNTest(tf.test.TestCase):
+class RNNTest(test.TestCase):
def setUp(self):
self._seed = 23489
@@ -134,13 +163,16 @@ class RNNTest(tf.test.TestCase):
def testInvalidSequenceLengthShape(self):
cell = Plus1RNNCell()
- inputs = [tf.placeholder(tf.float32, shape=(3, 4))]
+ inputs = [array_ops.placeholder(dtypes.float32, shape=(3, 4))]
with self.assertRaisesRegexp(ValueError, "must be a vector"):
- tf.nn.dynamic_rnn(
- cell, tf.stack(inputs), dtype=tf.float32, sequence_length=[[4]])
+ rnn.dynamic_rnn(
+ cell,
+ array_ops.stack(inputs),
+ dtype=dtypes.float32,
+ sequence_length=[[4]])
-class GRUTest(tf.test.TestCase):
+class GRUTest(test.TestCase):
def setUp(self):
self._seed = 23489
@@ -156,21 +188,24 @@ class GRUTest(tf.test.TestCase):
sequence_length = np.random.randint(0, time_steps, size=batch_size)
- with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
- concat_inputs = tf.placeholder(
- tf.float32, shape=(time_steps, batch_size, input_size))
+ with self.test_session(use_gpu=use_gpu, graph=ops_lib.Graph()) as sess:
+ concat_inputs = array_ops.placeholder(
+ dtypes.float32, shape=(time_steps, batch_size, input_size))
- cell = tf.contrib.rnn.GRUCell(num_units=num_units)
+ cell = rnn_cell_impl.GRUCell(num_units=num_units)
- with tf.variable_scope("dynamic_scope"):
- outputs_dynamic, state_dynamic = tf.nn.dynamic_rnn(
- cell, inputs=concat_inputs, sequence_length=sequence_length,
- time_major=True, dtype=tf.float32)
+ with variable_scope.variable_scope("dynamic_scope"):
+ outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
+ cell,
+ inputs=concat_inputs,
+ sequence_length=sequence_length,
+ time_major=True,
+ dtype=dtypes.float32)
feeds = {concat_inputs: input_values}
# Initialize
- tf.global_variables_initializer().run(feed_dict=feeds)
+ variables_lib.global_variables_initializer().run(feed_dict=feeds)
sess.run([outputs_dynamic, state_dynamic], feed_dict=feeds)
@@ -179,23 +214,23 @@ class GRUTest(tf.test.TestCase):
self._testDynamic(use_gpu=True)
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
- with self.test_session(use_gpu=True, graph=tf.Graph()):
+ with self.test_session(use_gpu=True, graph=ops_lib.Graph()):
if use_outer_scope:
- with tf.variable_scope(prefix) as scope:
+ with variable_scope.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
- tf.global_variables_initializer()
+ variables_lib.global_variables_initializer()
# check that all the variables names starts
# with the proper scope.
- all_vars = tf.global_variables()
+ all_vars = variables_lib.global_variables()
prefix = prefix or "rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
- tf.logging.info("RNN with scope: %s (%s)"
- % (prefix, "scope" if use_outer_scope else "str"))
+ tf_logging.info("RNN with scope: %s (%s)" %
+ (prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
- tf.logging.info(v.name)
+ tf_logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testDynamicScope(self):
@@ -206,30 +241,33 @@ class GRUTest(tf.test.TestCase):
sequence_length = np.random.randint(0, time_steps, size=batch_size)
def factory(scope):
- concat_inputs = tf.placeholder(
- tf.float32, shape=(time_steps, batch_size, input_size))
- cell = tf.contrib.rnn.GRUCell(num_units=num_units)
- return tf.nn.dynamic_rnn(cell, inputs=concat_inputs,
- sequence_length=sequence_length,
- time_major=True, dtype=tf.float32,
- scope=scope)
+ concat_inputs = array_ops.placeholder(
+ dtypes.float32, shape=(time_steps, batch_size, input_size))
+ cell = rnn_cell_impl.GRUCell(num_units=num_units)
+ return rnn.dynamic_rnn(
+ cell,
+ inputs=concat_inputs,
+ sequence_length=sequence_length,
+ time_major=True,
+ dtype=dtypes.float32,
+ scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
-class LSTMTest(tf.test.TestCase):
+class LSTMTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def testDynamicRNNAllowsUnknownTimeDimension(self):
- inputs = tf.placeholder(tf.float32, shape=[1, None, 20])
- cell = tf.contrib.rnn.GRUCell(30)
+ inputs = array_ops.placeholder(dtypes.float32, shape=[1, None, 20])
+ cell = rnn_cell_impl.GRUCell(30)
# Smoke test, this should not raise an error
- tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
+ rnn.dynamic_rnn(cell, inputs, dtype=dtypes.float32)
def testDynamicRNNWithTupleStates(self):
num_units = 3
@@ -238,44 +276,56 @@ class LSTMTest(tf.test.TestCase):
num_proj = 4
max_length = 8
sequence_length = [4, 6]
- with self.test_session(graph=tf.Graph()) as sess:
- initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
+ with self.test_session(graph=ops_lib.Graph()) as sess:
+ initializer = init_ops.random_uniform_initializer(
+ -0.01, 0.01, seed=self._seed)
inputs = max_length * [
- tf.placeholder(tf.float32, shape=(None, input_size))]
- inputs_c = tf.stack(inputs)
- cell = tf.contrib.rnn.LSTMCell(
- num_units, use_peepholes=True,
- num_proj=num_proj, initializer=initializer, state_is_tuple=True)
- with tf.variable_scope("root") as scope:
- outputs_static, state_static = tf.contrib.rnn.static_rnn(
- cell, inputs, dtype=tf.float32,
- sequence_length=sequence_length, scope=scope)
+ array_ops.placeholder(
+ dtypes.float32, shape=(None, input_size))
+ ]
+ inputs_c = array_ops.stack(inputs)
+ cell = rnn_cell_impl.LSTMCell(
+ num_units,
+ use_peepholes=True,
+ num_proj=num_proj,
+ initializer=initializer,
+ state_is_tuple=True)
+ with variable_scope.variable_scope("root") as scope:
+ outputs_static, state_static = rnn_lib.static_rnn(
+ cell,
+ inputs,
+ dtype=dtypes.float32,
+ sequence_length=sequence_length,
+ scope=scope)
scope.reuse_variables()
- outputs_dynamic, state_dynamic = tf.nn.dynamic_rnn(
- cell, inputs_c, dtype=tf.float32, time_major=True,
- sequence_length=sequence_length, scope=scope)
- self.assertTrue(isinstance(state_static, tf.contrib.rnn.LSTMStateTuple))
- self.assertTrue(isinstance(state_dynamic, tf.contrib.rnn.LSTMStateTuple))
+ outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
+ cell,
+ inputs_c,
+ dtype=dtypes.float32,
+ time_major=True,
+ sequence_length=sequence_length,
+ scope=scope)
+ self.assertTrue(isinstance(state_static, rnn_cell_impl.LSTMStateTuple))
+ self.assertTrue(isinstance(state_dynamic, rnn_cell_impl.LSTMStateTuple))
self.assertEqual(state_static[0], state_static.c)
self.assertEqual(state_static[1], state_static.h)
self.assertEqual(state_dynamic[0], state_dynamic.c)
self.assertEqual(state_dynamic[1], state_dynamic.h)
- tf.global_variables_initializer().run()
+ variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
- outputs_static_v = sess.run(
- outputs_static, feed_dict={inputs[0]: input_value})
- outputs_dynamic_v = sess.run(
- outputs_dynamic, feed_dict={inputs[0]: input_value})
+ outputs_static_v = sess.run(outputs_static,
+ feed_dict={inputs[0]: input_value})
+ outputs_dynamic_v = sess.run(outputs_dynamic,
+ feed_dict={inputs[0]: input_value})
self.assertAllEqual(outputs_static_v, outputs_dynamic_v)
- state_static_v = sess.run(
- state_static, feed_dict={inputs[0]: input_value})
- state_dynamic_v = sess.run(
- state_dynamic, feed_dict={inputs[0]: input_value})
- self.assertAllEqual(
- np.hstack(state_static_v), np.hstack(state_dynamic_v))
+ state_static_v = sess.run(state_static,
+ feed_dict={inputs[0]: input_value})
+ state_dynamic_v = sess.run(state_dynamic,
+ feed_dict={inputs[0]: input_value})
+ self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_dynamic_v))
def testDynamicRNNWithNestedTupleStates(self):
num_units = 3
@@ -284,54 +334,67 @@ class LSTMTest(tf.test.TestCase):
num_proj = 4
max_length = 8
sequence_length = [4, 6]
- with self.test_session(graph=tf.Graph()) as sess:
- initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
+ with self.test_session(graph=ops_lib.Graph()) as sess:
+ initializer = init_ops.random_uniform_initializer(
+ -0.01, 0.01, seed=self._seed)
inputs = max_length * [
- tf.placeholder(tf.float32, shape=(None, input_size))]
- inputs_c = tf.stack(inputs)
+ array_ops.placeholder(
+ dtypes.float32, shape=(None, input_size))
+ ]
+ inputs_c = array_ops.stack(inputs)
+
def _cell(i):
- return tf.contrib.rnn.LSTMCell(
- num_units + i, use_peepholes=True,
- num_proj=num_proj + i, initializer=initializer, state_is_tuple=True)
+ return rnn_cell_impl.LSTMCell(
+ num_units + i,
+ use_peepholes=True,
+ num_proj=num_proj + i,
+ initializer=initializer,
+ state_is_tuple=True)
# This creates a state tuple which has 4 sub-tuples of length 2 each.
- cell = tf.contrib.rnn.MultiRNNCell(
+ cell = rnn_cell_impl.MultiRNNCell(
[_cell(i) for i in range(4)], state_is_tuple=True)
self.assertEqual(len(cell.state_size), 4)
for i in range(4):
self.assertEqual(len(cell.state_size[i]), 2)
- test_zero = cell.zero_state(1, tf.float32)
+ test_zero = cell.zero_state(1, dtypes.float32)
self.assertEqual(len(test_zero), 4)
for i in range(4):
self.assertEqual(test_zero[i][0].get_shape()[1], cell.state_size[i][0])
self.assertEqual(test_zero[i][1].get_shape()[1], cell.state_size[i][1])
- with tf.variable_scope("root") as scope:
- outputs_static, state_static = tf.contrib.rnn.static_rnn(
- cell, inputs, dtype=tf.float32,
- sequence_length=sequence_length, scope=scope)
+ with variable_scope.variable_scope("root") as scope:
+ outputs_static, state_static = rnn_lib.static_rnn(
+ cell,
+ inputs,
+ dtype=dtypes.float32,
+ sequence_length=sequence_length,
+ scope=scope)
scope.reuse_variables()
- outputs_dynamic, state_dynamic = tf.nn.dynamic_rnn(
- cell, inputs_c, dtype=tf.float32, time_major=True,
- sequence_length=sequence_length, scope=scope)
+ outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
+ cell,
+ inputs_c,
+ dtype=dtypes.float32,
+ time_major=True,
+ sequence_length=sequence_length,
+ scope=scope)
- tf.global_variables_initializer().run()
+ variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
- outputs_static_v = sess.run(
- outputs_static, feed_dict={inputs[0]: input_value})
- outputs_dynamic_v = sess.run(
- outputs_dynamic, feed_dict={inputs[0]: input_value})
+ outputs_static_v = sess.run(outputs_static,
+ feed_dict={inputs[0]: input_value})
+ outputs_dynamic_v = sess.run(outputs_dynamic,
+ feed_dict={inputs[0]: input_value})
self.assertAllEqual(outputs_static_v, outputs_dynamic_v)
- state_static_v = sess.run(
- nest.flatten(state_static), feed_dict={inputs[0]: input_value})
- state_dynamic_v = sess.run(
- nest.flatten(state_dynamic), feed_dict={inputs[0]: input_value})
- self.assertAllEqual(
- np.hstack(state_static_v), np.hstack(state_dynamic_v))
+ state_static_v = sess.run(nest.flatten(state_static),
+ feed_dict={inputs[0]: input_value})
+ state_dynamic_v = sess.run(nest.flatten(state_dynamic),
+ feed_dict={inputs[0]: input_value})
+ self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_dynamic_v))
def _testDynamicEquivalentToStaticRNN(self, use_gpu, use_sequence_length):
time_steps = 8
@@ -348,46 +411,49 @@ class LSTMTest(tf.test.TestCase):
sequence_length = None
########### Step 1: Run static graph and generate readouts
- with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
- concat_inputs = tf.placeholder(tf.float32,
- shape=(time_steps, batch_size, input_size))
- inputs = tf.unstack(concat_inputs)
- initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
-
- cell = tf.contrib.rnn.LSTMCell(
- num_units, use_peepholes=True,
- initializer=initializer, num_proj=num_proj, state_is_tuple=False)
-
- with tf.variable_scope("dynamic_scope"):
- outputs_static, state_static = tf.contrib.rnn.static_rnn(
- cell, inputs, sequence_length=sequence_length, dtype=tf.float32)
+ with self.test_session(use_gpu=use_gpu, graph=ops_lib.Graph()) as sess:
+ concat_inputs = array_ops.placeholder(
+ dtypes.float32, shape=(time_steps, batch_size, input_size))
+ inputs = array_ops.unstack(concat_inputs)
+ initializer = init_ops.random_uniform_initializer(
+ -0.01, 0.01, seed=self._seed)
+
+ cell = rnn_cell_impl.LSTMCell(
+ num_units,
+ use_peepholes=True,
+ initializer=initializer,
+ num_proj=num_proj,
+ state_is_tuple=False)
+
+ with variable_scope.variable_scope("dynamic_scope"):
+ outputs_static, state_static = rnn_lib.static_rnn(
+ cell, inputs, sequence_length=sequence_length, dtype=dtypes.float32)
feeds = {concat_inputs: input_values}
# Initialize
- tf.global_variables_initializer().run(feed_dict=feeds)
+ variables_lib.global_variables_initializer().run(feed_dict=feeds)
# Generate gradients of sum of outputs w.r.t. inputs
- static_gradients = tf.gradients(
+ static_gradients = gradients_impl.gradients(
outputs_static + [state_static], [concat_inputs])
# Generate gradients of individual outputs w.r.t. inputs
static_individual_gradients = nest.flatten([
- tf.gradients(y, [concat_inputs])
- for y in [outputs_static[0],
- outputs_static[-1],
- state_static]])
+ gradients_impl.gradients(y, [concat_inputs])
+ for y in [outputs_static[0], outputs_static[-1], state_static]
+ ])
# Generate gradients of individual variables w.r.t. inputs
- trainable_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
- assert len(trainable_variables) > 1, (
- "Count of trainable variables: %d" % len(trainable_variables))
+ trainable_variables = ops_lib.get_collection(
+ ops_lib.GraphKeys.TRAINABLE_VARIABLES)
+ assert len(trainable_variables) > 1, ("Count of trainable variables: %d" %
+ len(trainable_variables))
# pylint: disable=bad-builtin
static_individual_variable_gradients = nest.flatten([
- tf.gradients(y, trainable_variables)
- for y in [outputs_static[0],
- outputs_static[-1],
- state_static]])
+ gradients_impl.gradients(y, trainable_variables)
+ for y in [outputs_static[0], outputs_static[-1], state_static]
+ ])
# Test forward pass
values_static = sess.run(outputs_static, feed_dict=feeds)
@@ -396,65 +462,72 @@ class LSTMTest(tf.test.TestCase):
# Test gradients to inputs and variables w.r.t. outputs & final state
static_grad_values = sess.run(static_gradients, feed_dict=feeds)
- static_individual_grad_values = sess.run(
- static_individual_gradients, feed_dict=feeds)
+ static_individual_grad_values = sess.run(static_individual_gradients,
+ feed_dict=feeds)
static_individual_var_grad_values = sess.run(
static_individual_variable_gradients, feed_dict=feeds)
########## Step 2: Run dynamic graph and generate readouts
- with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
- concat_inputs = tf.placeholder(tf.float32,
- shape=(time_steps, batch_size, input_size))
- inputs = tf.unstack(concat_inputs)
- initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
-
- cell = tf.contrib.rnn.LSTMCell(
- num_units, use_peepholes=True,
- initializer=initializer, num_proj=num_proj, state_is_tuple=False)
-
- with tf.variable_scope("dynamic_scope"):
- outputs_dynamic, state_dynamic = tf.nn.dynamic_rnn(
- cell, inputs=concat_inputs, sequence_length=sequence_length,
- time_major=True, dtype=tf.float32)
- split_outputs_dynamic = tf.unstack(outputs_dynamic, time_steps)
+ with self.test_session(use_gpu=use_gpu, graph=ops_lib.Graph()) as sess:
+ concat_inputs = array_ops.placeholder(
+ dtypes.float32, shape=(time_steps, batch_size, input_size))
+ inputs = array_ops.unstack(concat_inputs)
+ initializer = init_ops.random_uniform_initializer(
+ -0.01, 0.01, seed=self._seed)
+
+ cell = rnn_cell_impl.LSTMCell(
+ num_units,
+ use_peepholes=True,
+ initializer=initializer,
+ num_proj=num_proj,
+ state_is_tuple=False)
+
+ with variable_scope.variable_scope("dynamic_scope"):
+ outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
+ cell,
+ inputs=concat_inputs,
+ sequence_length=sequence_length,
+ time_major=True,
+ dtype=dtypes.float32)
+ split_outputs_dynamic = array_ops.unstack(outputs_dynamic, time_steps)
feeds = {concat_inputs: input_values}
# Initialize
- tf.global_variables_initializer().run(feed_dict=feeds)
+ variables_lib.global_variables_initializer().run(feed_dict=feeds)
# Generate gradients of sum of outputs w.r.t. inputs
- dynamic_gradients = tf.gradients(
+ dynamic_gradients = gradients_impl.gradients(
split_outputs_dynamic + [state_dynamic], [concat_inputs])
# Generate gradients of several individual outputs w.r.t. inputs
dynamic_individual_gradients = nest.flatten([
- tf.gradients(y, [concat_inputs])
- for y in [split_outputs_dynamic[0],
- split_outputs_dynamic[-1],
- state_dynamic]])
+ gradients_impl.gradients(y, [concat_inputs])
+ for y in
+ [split_outputs_dynamic[0], split_outputs_dynamic[-1], state_dynamic]
+ ])
# Generate gradients of individual variables w.r.t. inputs
- trainable_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
- assert len(trainable_variables) > 1, (
- "Count of trainable variables: %d" % len(trainable_variables))
+ trainable_variables = ops_lib.get_collection(
+ ops_lib.GraphKeys.TRAINABLE_VARIABLES)
+ assert len(trainable_variables) > 1, ("Count of trainable variables: %d" %
+ len(trainable_variables))
dynamic_individual_variable_gradients = nest.flatten([
- tf.gradients(y, trainable_variables)
- for y in [split_outputs_dynamic[0],
- split_outputs_dynamic[-1],
- state_dynamic]])
+ gradients_impl.gradients(y, trainable_variables)
+ for y in
+ [split_outputs_dynamic[0], split_outputs_dynamic[-1], state_dynamic]
+ ])
# Test forward pass
values_dynamic = sess.run(split_outputs_dynamic, feed_dict=feeds)
- (state_value_dynamic,) = sess.run(
- (state_dynamic,), feed_dict=feeds)
+ (state_value_dynamic,) = sess.run((state_dynamic,), feed_dict=feeds)
# Test gradients to inputs and variables w.r.t. outputs & final state
dynamic_grad_values = sess.run(dynamic_gradients, feed_dict=feeds)
- dynamic_individual_grad_values = sess.run(
- dynamic_individual_gradients, feed_dict=feeds)
+ dynamic_individual_grad_values = sess.run(dynamic_individual_gradients,
+ feed_dict=feeds)
dynamic_individual_var_grad_values = sess.run(
dynamic_individual_variable_gradients, feed_dict=feeds)
@@ -467,20 +540,22 @@ class LSTMTest(tf.test.TestCase):
self.assertAllEqual(static_grad_values, dynamic_grad_values)
- self.assertEqual(len(static_individual_grad_values),
- len(dynamic_individual_grad_values))
- self.assertEqual(len(static_individual_var_grad_values),
- len(dynamic_individual_var_grad_values))
+ self.assertEqual(
+ len(static_individual_grad_values), len(dynamic_individual_grad_values))
+ self.assertEqual(
+ len(static_individual_var_grad_values),
+ len(dynamic_individual_var_grad_values))
- for i, (a, b) in enumerate(zip(static_individual_grad_values,
- dynamic_individual_grad_values)):
- tf.logging.info("Comparing individual gradients iteration %d" % i)
+ for i, (a, b) in enumerate(
+ zip(static_individual_grad_values, dynamic_individual_grad_values)):
+ tf_logging.info("Comparing individual gradients iteration %d" % i)
self.assertAllEqual(a, b)
- for i, (a, b) in enumerate(zip(static_individual_var_grad_values,
- dynamic_individual_var_grad_values)):
- tf.logging.info(
- "Comparing individual variable gradients iteration %d" % i)
+ for i, (a, b) in enumerate(
+ zip(static_individual_var_grad_values,
+ dynamic_individual_var_grad_values)):
+ tf_logging.info("Comparing individual variable gradients iteration %d" %
+ i)
self.assertAllEqual(a, b)
def testDynamicEquivalentToStaticRNN(self):
@@ -494,64 +569,66 @@ class LSTMTest(tf.test.TestCase):
use_gpu=True, use_sequence_length=True)
-class BidirectionalRNNTest(tf.test.TestCase):
+class BidirectionalRNNTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
- def _createBidirectionalDynamicRNN(self, use_gpu, use_shape,
- use_state_tuple, use_time_major,
+ def _createBidirectionalDynamicRNN(self,
+ use_gpu,
+ use_shape,
+ use_state_tuple,
+ use_time_major,
scope=None):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
- initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
- sequence_length = tf.placeholder(tf.int64)
- cell_fw = tf.contrib.rnn.LSTMCell(num_units,
- initializer=initializer,
- state_is_tuple=use_state_tuple)
- cell_bw = tf.contrib.rnn.LSTMCell(num_units,
- initializer=initializer,
- state_is_tuple=use_state_tuple)
+ initializer = init_ops.random_uniform_initializer(
+ -0.01, 0.01, seed=self._seed)
+ sequence_length = array_ops.placeholder(dtypes.int64)
+ cell_fw = rnn_cell_impl.LSTMCell(
+ num_units, initializer=initializer, state_is_tuple=use_state_tuple)
+ cell_bw = rnn_cell_impl.LSTMCell(
+ num_units, initializer=initializer, state_is_tuple=use_state_tuple)
inputs = max_length * [
- tf.placeholder(tf.float32,
- shape=(batch_size if use_shape else None, input_size))]
- inputs_c = tf.stack(inputs)
+ array_ops.placeholder(
+ dtypes.float32,
+ shape=(batch_size if use_shape else None, input_size))
+ ]
+ inputs_c = array_ops.stack(inputs)
if not use_time_major:
- inputs_c = tf.transpose(inputs_c, [1, 0, 2])
- outputs, states = tf.nn.bidirectional_dynamic_rnn(
+ inputs_c = array_ops.transpose(inputs_c, [1, 0, 2])
+ outputs, states = rnn.bidirectional_dynamic_rnn(
cell_fw,
cell_bw,
inputs_c,
sequence_length,
- dtype=tf.float32,
+ dtype=dtypes.float32,
time_major=use_time_major,
scope=scope)
- outputs = tf.concat_v2(outputs, 2)
+ outputs = array_ops.concat_v2(outputs, 2)
state_fw, state_bw = states
outputs_shape = [None, max_length, 2 * num_units]
if use_shape:
outputs_shape[0] = batch_size
if use_time_major:
outputs_shape[0], outputs_shape[1] = outputs_shape[1], outputs_shape[0]
- self.assertEqual(
- outputs.get_shape().as_list(),
- outputs_shape)
+ self.assertEqual(outputs.get_shape().as_list(), outputs_shape)
input_value = np.random.randn(batch_size, input_size)
return input_value, inputs, outputs, state_fw, state_bw, sequence_length
- def _testBidirectionalDynamicRNN(self, use_gpu, use_shape,
- use_state_tuple, use_time_major):
- with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
+ def _testBidirectionalDynamicRNN(self, use_gpu, use_shape, use_state_tuple,
+ use_time_major):
+ with self.test_session(use_gpu=use_gpu, graph=ops_lib.Graph()) as sess:
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
- self._createBidirectionalDynamicRNN(
- use_gpu, use_shape, use_state_tuple, use_time_major))
- tf.global_variables_initializer().run()
+ self._createBidirectionalDynamicRNN(use_gpu, use_shape,
+ use_state_tuple, use_time_major))
+ variables_lib.global_variables_initializer().run()
# Run with pre-specified sequence length of 2, 3
if use_state_tuple:
out, c_fw, m_fw, c_bw, m_bw = sess.run(
@@ -561,9 +638,10 @@ class BidirectionalRNNTest(tf.test.TestCase):
s_fw = (c_fw, m_fw)
s_bw = (c_bw, m_bw)
else:
- out, s_fw, s_bw = sess.run([outputs, state_fw, state_bw],
- feed_dict={inputs[0]: input_value,
- sequence_length: [2, 3]})
+ out, s_fw, s_bw = sess.run(
+ [outputs, state_fw, state_bw],
+ feed_dict={inputs[0]: input_value,
+ sequence_length: [2, 3]})
# Since the forward and backward LSTM cells were initialized with the
# same parameters, the forward and backward output has to be the same,
@@ -606,39 +684,47 @@ class BidirectionalRNNTest(tf.test.TestCase):
# from [True, True, True, True] to [False, False, False, False]
options = itertools.product([True, False], repeat=4)
for option in options:
- self._testBidirectionalDynamicRNN(use_gpu=option[0], use_shape=option[1],
- use_state_tuple=option[2],
- use_time_major=option[3])
+ self._testBidirectionalDynamicRNN(
+ use_gpu=option[0],
+ use_shape=option[1],
+ use_state_tuple=option[2],
+ use_time_major=option[3])
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
# REMARKS: factory(scope) is a function accepting a scope
# as an argument, such scope can be None, a string
# or a VariableScope instance.
- with self.test_session(use_gpu=True, graph=tf.Graph()):
+ with self.test_session(use_gpu=True, graph=ops_lib.Graph()):
if use_outer_scope:
- with tf.variable_scope(prefix) as scope:
+ with variable_scope.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
# check that all the variables names starts
# with the proper scope.
- tf.global_variables_initializer()
- all_vars = tf.global_variables()
+ variables_lib.global_variables_initializer()
+ all_vars = variables_lib.global_variables()
prefix = prefix or "bidirectional_rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
- tf.logging.info("BiRNN with scope: %s (%s)"
- % (prefix, "scope" if use_outer_scope else "str"))
+ tf_logging.info("BiRNN with scope: %s (%s)" %
+ (prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
- tf.logging.info(v.name)
+ tf_logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testBidirectionalDynamicRNNScope(self):
+
def get_factory(use_time_major):
+
def factory(scope):
return self._createBidirectionalDynamicRNN(
- use_gpu=True, use_shape=True, use_state_tuple=True,
- use_time_major=use_time_major, scope=scope)
+ use_gpu=True,
+ use_shape=True,
+ use_state_tuple=True,
+ use_time_major=use_time_major,
+ scope=scope)
+
return factory
self._testScope(get_factory(True), use_outer_scope=True)
@@ -649,7 +735,7 @@ class BidirectionalRNNTest(tf.test.TestCase):
self._testScope(get_factory(False), prefix=None, use_outer_scope=False)
-class MultiDimensionalLSTMTest(tf.test.TestCase):
+class MultiDimensionalLSTMTest(test.TestCase):
def setUp(self):
self._seed = 23489
@@ -661,44 +747,49 @@ class MultiDimensionalLSTMTest(tf.test.TestCase):
batch_size = 2
max_length = 8
sequence_length = [4, 6]
- with self.test_session(graph=tf.Graph()) as sess:
+ with self.test_session(graph=ops_lib.Graph()) as sess:
inputs = max_length * [
- tf.placeholder(tf.float32, shape=(None,) + input_size)]
+ array_ops.placeholder(
+ dtypes.float32, shape=(None,) + input_size)
+ ]
inputs_using_dim = max_length * [
- tf.placeholder(tf.float32, shape=(batch_size,) + input_size)]
- inputs_c = tf.stack(inputs)
+ array_ops.placeholder(
+ dtypes.float32, shape=(batch_size,) + input_size)
+ ]
+ inputs_c = array_ops.stack(inputs)
# Create a cell for the whole test. This is fine because the cell has no
# variables.
cell = DummyMultiDimensionalLSTM(feature_dims)
state_saver = TestStateSaver(batch_size, input_size)
- outputs_static, state_static = tf.contrib.rnn.static_rnn(
- cell, inputs, dtype=tf.float32,
- sequence_length=sequence_length)
- outputs_dynamic, state_dynamic = tf.nn.dynamic_rnn(
- cell, inputs_c, dtype=tf.float32, time_major=True,
+ outputs_static, state_static = rnn_lib.static_rnn(
+ cell, inputs, dtype=dtypes.float32, sequence_length=sequence_length)
+ outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
+ cell,
+ inputs_c,
+ dtype=dtypes.float32,
+ time_major=True,
sequence_length=sequence_length)
self.assertEqual(outputs_dynamic.get_shape().as_list(),
inputs_c.get_shape().as_list())
- tf.global_variables_initializer().run()
+ variables_lib.global_variables_initializer().run()
input_total_size = (batch_size,) + input_size
input_value = np.random.randn(*input_total_size)
- outputs_static_v = sess.run(
- outputs_static, feed_dict={inputs[0]: input_value})
- outputs_dynamic_v = sess.run(
- outputs_dynamic, feed_dict={inputs[0]: input_value})
+ outputs_static_v = sess.run(outputs_static,
+ feed_dict={inputs[0]: input_value})
+ outputs_dynamic_v = sess.run(outputs_dynamic,
+ feed_dict={inputs[0]: input_value})
self.assertAllEqual(outputs_static_v, outputs_dynamic_v)
- state_static_v = sess.run(
- state_static, feed_dict={inputs[0]: input_value})
- state_dynamic_v = sess.run(
- state_dynamic, feed_dict={inputs[0]: input_value})
- self.assertAllEqual(
- np.hstack(state_static_v), np.hstack(state_dynamic_v))
+ state_static_v = sess.run(state_static,
+ feed_dict={inputs[0]: input_value})
+ state_dynamic_v = sess.run(state_dynamic,
+ feed_dict={inputs[0]: input_value})
+ self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_dynamic_v))
-class NestedLSTMTest(tf.test.TestCase):
+class NestedLSTMTest(test.TestCase):
def setUp(self):
self._seed = 23489
@@ -710,27 +801,32 @@ class NestedLSTMTest(tf.test.TestCase):
state_size = 6
max_length = 8
sequence_length = [4, 6]
- with self.test_session(graph=tf.Graph()) as sess:
+ with self.test_session(graph=ops_lib.Graph()) as sess:
state_saver = TestStateSaver(batch_size, state_size)
- single_input = (tf.placeholder(tf.float32, shape=(None, input_size)),
- tf.placeholder(tf.float32, shape=(None, input_size)))
+ single_input = (array_ops.placeholder(
+ dtypes.float32, shape=(None, input_size)), array_ops.placeholder(
+ dtypes.float32, shape=(None, input_size)))
inputs = max_length * [single_input]
- inputs_c = (tf.stack([input_[0] for input_ in inputs]),
- tf.stack([input_[1] for input_ in inputs]))
+ inputs_c = (array_ops.stack([input_[0] for input_ in inputs]),
+ array_ops.stack([input_[1] for input_ in inputs]))
single_input_using_dim = (
- tf.placeholder(tf.float32, shape=(batch_size, input_size)),
- tf.placeholder(tf.float32, shape=(batch_size, input_size)))
+ array_ops.placeholder(
+ dtypes.float32, shape=(batch_size, input_size)),
+ array_ops.placeholder(
+ dtypes.float32, shape=(batch_size, input_size)))
inputs_using_dim = max_length * [single_input_using_dim]
# Create a cell for the whole test. This is fine because the cell has no
# variables.
cell = NestedRNNCell()
- outputs_dynamic, state_dynamic = tf.nn.dynamic_rnn(
- cell, inputs_c, dtype=tf.float32, time_major=True,
- sequence_length=sequence_length)
- outputs_static, state_static = tf.contrib.rnn.static_rnn(
- cell, inputs, dtype=tf.float32,
+ outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
+ cell,
+ inputs_c,
+ dtype=dtypes.float32,
+ time_major=True,
sequence_length=sequence_length)
+ outputs_static, state_static = rnn_lib.static_rnn(
+ cell, inputs, dtype=dtypes.float32, sequence_length=sequence_length)
def _assert_same_shape(input1, input2, double=False):
flat_input1 = nest.flatten(input1)
@@ -744,79 +840,84 @@ class NestedLSTMTest(tf.test.TestCase):
_assert_same_shape(inputs_c, outputs_dynamic)
_assert_same_shape(inputs, outputs_static)
- tf.global_variables_initializer().run()
+ variables_lib.global_variables_initializer().run()
input_total_size = (batch_size, input_size)
input_value = (np.random.randn(*input_total_size),
np.random.randn(*input_total_size))
- outputs_dynamic_v = sess.run(
- outputs_dynamic, feed_dict={single_input: input_value})
- outputs_static_v = sess.run(
- outputs_static, feed_dict={single_input: input_value})
+ outputs_dynamic_v = sess.run(outputs_dynamic,
+ feed_dict={single_input: input_value})
+ outputs_static_v = sess.run(outputs_static,
+ feed_dict={single_input: input_value})
self.assertAllEqual(outputs_static_v,
np.transpose(outputs_dynamic_v, (1, 0, 2, 3)))
- state_dynamic_v = sess.run(
- state_dynamic, feed_dict={single_input: input_value})
- state_static_v = sess.run(
- state_static, feed_dict={single_input: input_value})
- self.assertAllEqual(
- np.hstack(state_static_v), np.hstack(state_dynamic_v))
+ state_dynamic_v = sess.run(state_dynamic,
+ feed_dict={single_input: input_value})
+ state_static_v = sess.run(state_static,
+ feed_dict={single_input: input_value})
+ self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_dynamic_v))
-class RawRNNTest(tf.test.TestCase):
+class RawRNNTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def _testRawRNN(self, max_time):
- with self.test_session(graph=tf.Graph()) as sess:
+ with self.test_session(graph=ops_lib.Graph()) as sess:
batch_size = 16
input_depth = 4
num_units = 3
- inputs = tf.placeholder(shape=(max_time, batch_size, input_depth),
- dtype=tf.float32)
- sequence_length = tf.placeholder(shape=(batch_size,), dtype=tf.int32)
- inputs_ta = tf.TensorArray(dtype=tf.float32, size=tf.shape(inputs)[0])
+ inputs = array_ops.placeholder(
+ shape=(max_time, batch_size, input_depth), dtype=dtypes.float32)
+ sequence_length = array_ops.placeholder(
+ shape=(batch_size,), dtype=dtypes.int32)
+ inputs_ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
inputs_ta = inputs_ta.unstack(inputs)
- cell = tf.contrib.rnn.LSTMCell(num_units, state_is_tuple=True)
+ cell = rnn_cell_impl.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, unused_loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
- next_state = cell.zero_state(batch_size, tf.float32)
+ next_state = cell.zero_state(batch_size, dtypes.float32)
else:
next_state = cell_state # copy state through
elements_finished = (time_ >= sequence_length)
- finished = tf.reduce_all(elements_finished)
+ finished = math_ops.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
- next_input = tf.cond(
+ next_input = control_flow_ops.cond(
finished,
- lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32),
+ lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output, None)
- reuse_scope = tf.get_variable_scope()
+ reuse_scope = variable_scope.get_variable_scope()
- outputs_ta, final_state, _ = tf.nn.raw_rnn(
- cell, loop_fn, scope=reuse_scope)
+ outputs_ta, final_state, _ = rnn.raw_rnn(cell, loop_fn, scope=reuse_scope)
outputs = outputs_ta.stack()
reuse_scope.reuse_variables()
- outputs_dynamic_rnn, final_state_dynamic_rnn = tf.nn.dynamic_rnn(
- cell, inputs, time_major=True, dtype=tf.float32,
- sequence_length=sequence_length, scope=reuse_scope)
-
- variables = tf.trainable_variables()
- gradients = tf.gradients([outputs, final_state], [inputs] + variables)
- gradients_dynamic_rnn = tf.gradients(
+ outputs_dynamic_rnn, final_state_dynamic_rnn = rnn.dynamic_rnn(
+ cell,
+ inputs,
+ time_major=True,
+ dtype=dtypes.float32,
+ sequence_length=sequence_length,
+ scope=reuse_scope)
+
+ variables = variables_lib.trainable_variables()
+ gradients = gradients_impl.gradients([outputs, final_state],
+ [inputs] + variables)
+ gradients_dynamic_rnn = gradients_impl.gradients(
[outputs_dynamic_rnn, final_state_dynamic_rnn], [inputs] + variables)
- tf.global_variables_initializer().run()
+ variables_lib.global_variables_initializer().run()
rand_input = np.random.randn(max_time, batch_size, input_depth)
if max_time == 0:
@@ -827,10 +928,11 @@ class RawRNNTest(tf.test.TestCase):
# To ensure same output lengths for dynamic_rnn and raw_rnn
rand_seq_len[0] = max_time
- (outputs_val, outputs_dynamic_rnn_val,
- final_state_val, final_state_dynamic_rnn_val) = sess.run(
+ (outputs_val, outputs_dynamic_rnn_val, final_state_val,
+ final_state_dynamic_rnn_val) = sess.run(
[outputs, outputs_dynamic_rnn, final_state, final_state_dynamic_rnn],
- feed_dict={inputs: rand_input, sequence_length: rand_seq_len})
+ feed_dict={inputs: rand_input,
+ sequence_length: rand_seq_len})
self.assertAllClose(outputs_dynamic_rnn_val, outputs_val)
self.assertAllClose(final_state_dynamic_rnn_val, final_state_val)
@@ -843,15 +945,17 @@ class RawRNNTest(tf.test.TestCase):
self.assertEqual(len(gradients), len(gradients_dynamic_rnn))
gradients_val = sess.run(
gradients,
- feed_dict={inputs: rand_input, sequence_length: rand_seq_len})
+ feed_dict={inputs: rand_input,
+ sequence_length: rand_seq_len})
gradients_dynamic_rnn_val = sess.run(
gradients_dynamic_rnn,
- feed_dict={inputs: rand_input, sequence_length: rand_seq_len})
+ feed_dict={inputs: rand_input,
+ sequence_length: rand_seq_len})
self.assertEqual(len(gradients_val), len(gradients_dynamic_rnn_val))
input_gradients_val = gradients_val[0]
input_gradients_dynamic_rnn_val = gradients_dynamic_rnn_val[0]
- self.assertAllClose(
- input_gradients_val, input_gradients_dynamic_rnn_val)
+ self.assertAllClose(input_gradients_val,
+ input_gradients_dynamic_rnn_val)
for i in range(1, len(gradients_val)):
self.assertAllClose(gradients_dynamic_rnn_val[i], gradients_val[i])
@@ -866,112 +970,123 @@ class RawRNNTest(tf.test.TestCase):
self._testRawRNN(max_time=10)
def testLoopState(self):
- with self.test_session(graph=tf.Graph()):
+ with self.test_session(graph=ops_lib.Graph()):
max_time = 10
batch_size = 16
input_depth = 4
num_units = 3
inputs = np.random.randn(max_time, batch_size, input_depth)
- inputs_ta = tf.TensorArray(dtype=tf.float32, size=tf.shape(inputs)[0])
+ inputs_ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
inputs_ta = inputs_ta.unstack(inputs)
- cell = tf.contrib.rnn.LSTMCell(num_units, state_is_tuple=True)
+ cell = rnn_cell_impl.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, loop_state):
if cell_output is None:
- loop_state = tf.constant([0])
- next_state = cell.zero_state(batch_size, tf.float32)
+ loop_state = constant_op.constant([0])
+ next_state = cell.zero_state(batch_size, dtypes.float32)
else:
- loop_state = tf.stack([tf.squeeze(loop_state) + 1])
+ loop_state = array_ops.stack([array_ops.squeeze(loop_state) + 1])
next_state = cell_state
emit_output = cell_output # == None for time == 0
- elements_finished = tf.tile([time_ >= max_time], [batch_size])
- finished = tf.reduce_all(elements_finished)
+ elements_finished = array_ops.tile([time_ >= max_time], [batch_size])
+ finished = math_ops.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
- next_input = tf.cond(
+ next_input = control_flow_ops.cond(
finished,
- lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32),
+ lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),
lambda: inputs_ta.read(time_))
- return (elements_finished, next_input,
- next_state, emit_output, loop_state)
+ return (elements_finished, next_input, next_state, emit_output,
+ loop_state)
- r = tf.nn.raw_rnn(cell, loop_fn)
+ r = rnn.raw_rnn(cell, loop_fn)
loop_state = r[-1]
self.assertEqual([10], loop_state.eval())
def testLoopStateWithTensorArray(self):
- with self.test_session(graph=tf.Graph()):
+ with self.test_session(graph=ops_lib.Graph()):
max_time = 4
batch_size = 16
input_depth = 4
num_units = 3
inputs = np.random.randn(max_time, batch_size, input_depth)
- inputs_ta = tf.TensorArray(dtype=tf.float32, size=tf.shape(inputs)[0])
+ inputs_ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
inputs_ta = inputs_ta.unstack(inputs)
- cell = tf.contrib.rnn.LSTMCell(num_units, state_is_tuple=True)
+ cell = rnn_cell_impl.LSTMCell(num_units, state_is_tuple=True)
+
def loop_fn(time_, cell_output, cell_state, loop_state):
if cell_output is None:
- loop_state = tf.TensorArray(
- dynamic_size=True, size=0, dtype=tf.int32, clear_after_read=False)
+ loop_state = tensor_array_ops.TensorArray(
+ dynamic_size=True,
+ size=0,
+ dtype=dtypes.int32,
+ clear_after_read=False)
loop_state = loop_state.write(0, 1)
- next_state = cell.zero_state(batch_size, tf.float32)
+ next_state = cell.zero_state(batch_size, dtypes.float32)
else:
- loop_state = loop_state.write(
- time_, loop_state.read(time_ - 1) + time_)
+ loop_state = loop_state.write(time_,
+ loop_state.read(time_ - 1) + time_)
next_state = cell_state
emit_output = cell_output # == None for time == 0
- elements_finished = tf.tile([time_ >= max_time], [batch_size])
- finished = tf.reduce_all(elements_finished)
+ elements_finished = array_ops.tile([time_ >= max_time], [batch_size])
+ finished = math_ops.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
- next_input = tf.cond(
+ next_input = control_flow_ops.cond(
finished,
- lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32),
+ lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),
lambda: inputs_ta.read(time_))
- return (elements_finished, next_input,
- next_state, emit_output, loop_state)
+ return (elements_finished, next_input, next_state, emit_output,
+ loop_state)
- r = tf.nn.raw_rnn(cell, loop_fn)
+ r = rnn.raw_rnn(cell, loop_fn)
loop_state = r[-1]
loop_state = loop_state.stack()
self.assertAllEqual([1, 2, 2 + 2, 4 + 3, 7 + 4], loop_state.eval())
def testEmitDifferentStructureThanCellOutput(self):
- with self.test_session(graph=tf.Graph()) as sess:
+ with self.test_session(graph=ops_lib.Graph()) as sess:
max_time = 10
batch_size = 16
input_depth = 4
num_units = 3
inputs = np.random.randn(max_time, batch_size, input_depth)
- inputs_ta = tf.TensorArray(dtype=tf.float32, size=tf.shape(inputs)[0])
+ inputs_ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
inputs_ta = inputs_ta.unstack(inputs)
- cell = tf.contrib.rnn.LSTMCell(num_units, state_is_tuple=True)
+ cell = rnn_cell_impl.LSTMCell(num_units, state_is_tuple=True)
+
def loop_fn(time_, cell_output, cell_state, _):
if cell_output is None:
- emit_output = (tf.zeros([2, 3], dtype=tf.int32),
- tf.zeros([1], dtype=tf.int64))
- next_state = cell.zero_state(batch_size, tf.float32)
+ emit_output = (array_ops.zeros(
+ [2, 3], dtype=dtypes.int32), array_ops.zeros(
+ [1], dtype=dtypes.int64))
+ next_state = cell.zero_state(batch_size, dtypes.float32)
else:
- emit_output = (tf.ones([batch_size, 2, 3], dtype=tf.int32),
- tf.ones([batch_size, 1], dtype=tf.int64))
+ emit_output = (array_ops.ones(
+ [batch_size, 2, 3], dtype=dtypes.int32), array_ops.ones(
+ [batch_size, 1], dtype=dtypes.int64))
next_state = cell_state
- elements_finished = tf.tile([time_ >= max_time], [batch_size])
- finished = tf.reduce_all(elements_finished)
+ elements_finished = array_ops.tile([time_ >= max_time], [batch_size])
+ finished = math_ops.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
- next_input = tf.cond(
+ next_input = control_flow_ops.cond(
finished,
- lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32),
+ lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output, None)
- r = tf.nn.raw_rnn(cell, loop_fn)
+ r = rnn.raw_rnn(cell, loop_fn)
output_ta = r[0]
self.assertEqual(2, len(output_ta))
- self.assertEqual([tf.int32, tf.int64], [ta.dtype for ta in output_ta])
+ self.assertEqual([dtypes.int32, dtypes.int64],
+ [ta.dtype for ta in output_ta])
output = [ta.stack() for ta in output_ta]
output_vals = sess.run(output)
self.assertAllEqual(
@@ -980,23 +1095,23 @@ class RawRNNTest(tf.test.TestCase):
np.ones((max_time, batch_size, 1), np.int64), output_vals[1])
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
- with self.test_session(use_gpu=True, graph=tf.Graph()):
+ with self.test_session(use_gpu=True, graph=ops_lib.Graph()):
if use_outer_scope:
- with tf.variable_scope(prefix) as scope:
+ with variable_scope.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
- tf.global_variables_initializer()
+ variables_lib.global_variables_initializer()
# check that all the variables names starts
# with the proper scope.
- all_vars = tf.global_variables()
+ all_vars = variables_lib.global_variables()
prefix = prefix or "rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
- tf.logging.info("RNN with scope: %s (%s)"
- % (prefix, "scope" if use_outer_scope else "str"))
+ tf_logging.info("RNN with scope: %s (%s)" %
+ (prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
- tf.logging.info(v.name)
+ tf_logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testRawRNNScope(self):
@@ -1006,37 +1121,40 @@ class RawRNNTest(tf.test.TestCase):
num_units = 3
def factory(scope):
- inputs = tf.placeholder(shape=(max_time, batch_size, input_depth),
- dtype=tf.float32)
- sequence_length = tf.placeholder(shape=(batch_size,), dtype=tf.int32)
- inputs_ta = tf.TensorArray(dtype=tf.float32, size=tf.shape(inputs)[0])
+ inputs = array_ops.placeholder(
+ shape=(max_time, batch_size, input_depth), dtype=dtypes.float32)
+ sequence_length = array_ops.placeholder(
+ shape=(batch_size,), dtype=dtypes.int32)
+ inputs_ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
inputs_ta = inputs_ta.unstack(inputs)
- cell = tf.contrib.rnn.LSTMCell(num_units, state_is_tuple=True)
+ cell = rnn_cell_impl.LSTMCell(num_units, state_is_tuple=True)
+
def loop_fn(time_, cell_output, cell_state, unused_loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
- next_state = cell.zero_state(batch_size, tf.float32)
+ next_state = cell.zero_state(batch_size, dtypes.float32)
else:
next_state = cell_state
elements_finished = (time_ >= sequence_length)
- finished = tf.reduce_all(elements_finished)
+ finished = math_ops.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
- next_input = tf.cond(
+ next_input = control_flow_ops.cond(
finished,
- lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32),
+ lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output, None)
- return tf.nn.raw_rnn(cell, loop_fn, scope=scope)
+ return rnn.raw_rnn(cell, loop_fn, scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
-class DeviceWrapperCell(tf.contrib.rnn.RNNCell):
+class DeviceWrapperCell(rnn_cell_impl.RNNCell):
"""Class to ensure cell calculation happens on a specific device."""
def __init__(self, cell, device):
@@ -1053,49 +1171,57 @@ class DeviceWrapperCell(tf.contrib.rnn.RNNCell):
def __call__(self, input_, state, scope=None):
if self._device is not None:
- with tf.device(self._device):
+ with ops_lib.device(self._device):
return self._cell(input_, state, scope)
else:
return self._cell(input_, state, scope)
-class TensorArrayOnCorrectDeviceTest(tf.test.TestCase):
+class TensorArrayOnCorrectDeviceTest(test.TestCase):
- def _execute_rnn_on(
- self, rnn_device=None, cell_device=None, input_device=None):
+ def _execute_rnn_on(self,
+ rnn_device=None,
+ cell_device=None,
+ input_device=None):
batch_size = 3
time_steps = 7
input_size = 5
num_units = 10
- cell = tf.contrib.rnn.LSTMCell(num_units, use_peepholes=True)
+ cell = rnn_cell_impl.LSTMCell(num_units, use_peepholes=True)
gpu_cell = DeviceWrapperCell(cell, cell_device)
- inputs = np.random.randn(batch_size, time_steps, input_size).astype(
- np.float32)
+ inputs = np.random.randn(batch_size, time_steps,
+ input_size).astype(np.float32)
sequence_length = np.random.randint(0, time_steps, size=batch_size)
if input_device is not None:
- with tf.device(input_device):
- inputs = tf.constant(inputs)
+ with ops_lib.device(input_device):
+ inputs = constant_op.constant(inputs)
if rnn_device is not None:
- with tf.device(rnn_device):
- outputs, _ = tf.nn.dynamic_rnn(
- gpu_cell, inputs, sequence_length=sequence_length, dtype=tf.float32)
+ with ops_lib.device(rnn_device):
+ outputs, _ = rnn.dynamic_rnn(
+ gpu_cell,
+ inputs,
+ sequence_length=sequence_length,
+ dtype=dtypes.float32)
else:
- outputs, _ = tf.nn.dynamic_rnn(
- gpu_cell, inputs, sequence_length=sequence_length, dtype=tf.float32)
+ outputs, _ = rnn.dynamic_rnn(
+ gpu_cell,
+ inputs,
+ sequence_length=sequence_length,
+ dtype=dtypes.float32)
with self.test_session(use_gpu=True) as sess:
- opts = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
- run_metadata = tf.RunMetadata()
- tf.global_variables_initializer().run()
+ opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
+ run_metadata = config_pb2.RunMetadata()
+ variables_lib.global_variables_initializer().run()
sess.run(outputs, options=opts, run_metadata=run_metadata)
return run_metadata
def testRNNOnCPUCellOnGPU(self):
- if not tf.test.is_gpu_available():
+ if not test.is_gpu_available():
return # Test requires access to a GPU
run_metadata = self._execute_rnn_on(
@@ -1104,6 +1230,7 @@ class TensorArrayOnCorrectDeviceTest(tf.test.TestCase):
ix = 0 if "gpu" in step_stats.dev_stats[0].device else 1
gpu_stats = step_stats.dev_stats[ix].node_stats
cpu_stats = step_stats.dev_stats[1 - ix].node_stats
+
def _assert_in(op_str, in_stats, out_stats):
self.assertTrue(any(op_str in s.node_name for s in in_stats))
self.assertFalse(any(op_str in s.node_name for s in out_stats))
@@ -1118,7 +1245,7 @@ class TensorArrayOnCorrectDeviceTest(tf.test.TestCase):
_assert_in("TensorArrayScatter", cpu_stats, gpu_stats)
def testRNNOnCPUCellOnCPU(self):
- if not tf.test.is_gpu_available():
+ if not test.is_gpu_available():
return # Test requires access to a GPU
run_metadata = self._execute_rnn_on(
@@ -1127,6 +1254,7 @@ class TensorArrayOnCorrectDeviceTest(tf.test.TestCase):
ix = 0 if "gpu" in step_stats.dev_stats[0].device else 1
gpu_stats = step_stats.dev_stats[ix].node_stats
cpu_stats = step_stats.dev_stats[1 - ix].node_stats
+
def _assert_in(op_str, in_stats, out_stats):
self.assertTrue(any(op_str in s.node_name for s in in_stats))
self.assertFalse(any(op_str in s.node_name for s in out_stats))
@@ -1135,7 +1263,7 @@ class TensorArrayOnCorrectDeviceTest(tf.test.TestCase):
_assert_in("TensorArray", cpu_stats, gpu_stats)
def testInputOnGPUCellNotDeclared(self):
- if not tf.test.is_gpu_available():
+ if not test.is_gpu_available():
return # Test requires access to a GPU
run_metadata = self._execute_rnn_on(input_device="/gpu:0")
@@ -1143,6 +1271,7 @@ class TensorArrayOnCorrectDeviceTest(tf.test.TestCase):
ix = 0 if "gpu" in step_stats.dev_stats[0].device else 1
gpu_stats = step_stats.dev_stats[ix].node_stats
cpu_stats = step_stats.dev_stats[1 - ix].node_stats
+
def _assert_in(op_str, in_stats, out_stats):
self.assertTrue(any(op_str in s.node_name for s in in_stats))
self.assertFalse(any(op_str in s.node_name for s in out_stats))
@@ -1156,36 +1285,47 @@ class TensorArrayOnCorrectDeviceTest(tf.test.TestCase):
def _static_vs_dynamic_rnn_benchmark_static(inputs_list_t, sequence_length):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
- initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=127)
- cell = tf.contrib.rnn.LSTMCell(
- num_units=input_size, use_peepholes=True, initializer=initializer,
+ initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
+ cell = rnn_cell_impl.LSTMCell(
+ num_units=input_size,
+ use_peepholes=True,
+ initializer=initializer,
state_is_tuple=False)
- outputs, final_state = tf.contrib.rnn.static_rnn(
- cell, inputs_list_t, sequence_length=sequence_length, dtype=tf.float32)
+ outputs, final_state = rnn_lib.static_rnn(
+ cell,
+ inputs_list_t,
+ sequence_length=sequence_length,
+ dtype=dtypes.float32)
- trainable_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
- gradients = tf.gradients(outputs + [final_state], trainable_variables)
+ trainable_variables = ops_lib.get_collection(
+ ops_lib.GraphKeys.TRAINABLE_VARIABLES)
+ gradients = gradients_impl.gradients(outputs + [final_state],
+ trainable_variables)
- return tf.group(final_state, *(gradients + outputs))
+ return control_flow_ops.group(final_state, *(gradients + outputs))
def _static_vs_dynamic_rnn_benchmark_dynamic(inputs_t, sequence_length):
(unused_0, unused_1, input_size) = inputs_t.get_shape().as_list()
- initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=127)
- cell = tf.contrib.rnn.LSTMCell(
- num_units=input_size, use_peepholes=True, initializer=initializer,
+ initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
+ cell = rnn_cell_impl.LSTMCell(
+ num_units=input_size,
+ use_peepholes=True,
+ initializer=initializer,
state_is_tuple=False)
- outputs, final_state = tf.nn.dynamic_rnn(
- cell, inputs_t, sequence_length=sequence_length, dtype=tf.float32)
+ outputs, final_state = rnn.dynamic_rnn(
+ cell, inputs_t, sequence_length=sequence_length, dtype=dtypes.float32)
- trainable_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
- gradients = tf.gradients([outputs, final_state], trainable_variables)
+ trainable_variables = ops_lib.get_collection(
+ ops_lib.GraphKeys.TRAINABLE_VARIABLES)
+ gradients = gradients_impl.gradients([outputs, final_state],
+ trainable_variables)
- return tf.group(final_state, outputs, *gradients)
+ return control_flow_ops.group(final_state, outputs, *gradients)
def graph_creation_static_vs_dynamic_rnn_benchmark(max_time):
- config = tf.ConfigProto()
+ config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# These parameters don't matter
@@ -1197,27 +1337,29 @@ def graph_creation_static_vs_dynamic_rnn_benchmark(max_time):
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
- for _ in range(max_time)]
+ for _ in range(max_time)
+ ]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
def _create_static_rnn():
- with tf.Session(config=config, graph=tf.Graph()) as sess:
+ with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_list_t = [
- tf.Variable(x, trainable=False).value() for x in inputs_list]
- ops = _static_vs_dynamic_rnn_benchmark_static(
- inputs_list_t, sequence_length)
+ variables_lib.Variable(
+ x, trainable=False).value() for x in inputs_list
+ ]
+ ops = _static_vs_dynamic_rnn_benchmark_static(inputs_list_t,
+ sequence_length)
def _create_dynamic_rnn():
- with tf.Session(config=config, graph=tf.Graph()) as sess:
- inputs_t = tf.Variable(inputs, trainable=False).value()
- ops = _static_vs_dynamic_rnn_benchmark_dynamic(
- inputs_t, sequence_length)
+ with session.Session(config=config, graph=ops_lib.Graph()) as sess:
+ inputs_t = variables_lib.Variable(inputs, trainable=False).value()
+ ops = _static_vs_dynamic_rnn_benchmark_dynamic(inputs_t, sequence_length)
delta_static = timeit.timeit(_create_static_rnn, number=5)
delta_dynamic = timeit.timeit(_create_dynamic_rnn, number=5)
print("%d \t %f \t %f \t %f" %
- (max_time, delta_static, delta_dynamic, delta_dynamic/delta_static))
+ (max_time, delta_static, delta_dynamic, delta_dynamic / delta_static))
return delta_static, delta_dynamic
@@ -1232,11 +1374,11 @@ def _timer(sess, ops):
for _ in range(runs):
sess.run(ops)
end = time.time()
- return (end - start)/float(runs)
+ return (end - start) / float(runs)
def static_vs_dynamic_rnn_benchmark(batch_size, max_time, num_units, use_gpu):
- config = tf.ConfigProto()
+ config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
@@ -1244,53 +1386,62 @@ def static_vs_dynamic_rnn_benchmark(batch_size, max_time, num_units, use_gpu):
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
- for _ in range(max_time)]
+ for _ in range(max_time)
+ ]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
# Using rnn()
- with tf.Session(config=config, graph=tf.Graph()) as sess:
- with tf.device("/cpu:0" if not use_gpu else None):
+ with session.Session(config=config, graph=ops_lib.Graph()) as sess:
+ with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
- tf.Variable(x, trainable=False).value() for x in inputs_list]
- ops = _static_vs_dynamic_rnn_benchmark_static(
- inputs_list_t, sequence_length)
- tf.global_variables_initializer().run()
+ variables_lib.Variable(
+ x, trainable=False).value() for x in inputs_list
+ ]
+ ops = _static_vs_dynamic_rnn_benchmark_static(inputs_list_t,
+ sequence_length)
+ variables_lib.global_variables_initializer().run()
delta_static = _timer(sess, ops)
# Using dynamic_rnn()
- with tf.Session(config=config, graph=tf.Graph()) as sess:
- with tf.device("/cpu:0" if not use_gpu else None):
- inputs_t = tf.Variable(inputs, trainable=False).value()
- ops = _static_vs_dynamic_rnn_benchmark_dynamic(
- inputs_t, sequence_length)
- tf.global_variables_initializer().run()
+ with session.Session(config=config, graph=ops_lib.Graph()) as sess:
+ with ops_lib.device("/cpu:0" if not use_gpu else None):
+ inputs_t = variables_lib.Variable(inputs, trainable=False).value()
+ ops = _static_vs_dynamic_rnn_benchmark_dynamic(inputs_t, sequence_length)
+ variables_lib.global_variables_initializer().run()
delta_dynamic = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t %f \t %f" %
- (batch_size, max_time, num_units, use_gpu, delta_static,
- delta_dynamic, delta_dynamic/delta_static))
+ (batch_size, max_time, num_units, use_gpu, delta_static, delta_dynamic,
+ delta_dynamic / delta_static))
return delta_static, delta_dynamic
def _half_seq_len_vs_unroll_half_rnn_benchmark(inputs_list_t, sequence_length):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
- initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=127)
- cell = tf.contrib.rnn.LSTMCell(
- num_units=input_size, use_peepholes=True, initializer=initializer,
+ initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
+ cell = rnn_cell_impl.LSTMCell(
+ num_units=input_size,
+ use_peepholes=True,
+ initializer=initializer,
state_is_tuple=False)
- outputs, final_state = tf.contrib.rnn.static_rnn(
- cell, inputs_list_t, sequence_length=sequence_length, dtype=tf.float32)
+ outputs, final_state = rnn_lib.static_rnn(
+ cell,
+ inputs_list_t,
+ sequence_length=sequence_length,
+ dtype=dtypes.float32)
- trainable_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
- gradients = tf.gradients(outputs + [final_state], trainable_variables)
+ trainable_variables = ops_lib.get_collection(
+ ops_lib.GraphKeys.TRAINABLE_VARIABLES)
+ gradients = gradients_impl.gradients(outputs + [final_state],
+ trainable_variables)
- return tf.group(final_state, *(gradients + outputs))
+ return control_flow_ops.group(final_state, *(gradients + outputs))
-def half_seq_len_vs_unroll_half_rnn_benchmark(
- batch_size, max_time, num_units, use_gpu):
- config = tf.ConfigProto()
+def half_seq_len_vs_unroll_half_rnn_benchmark(batch_size, max_time, num_units,
+ use_gpu):
+ config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
@@ -1298,55 +1449,67 @@ def half_seq_len_vs_unroll_half_rnn_benchmark(
sequence_length = max_time * np.ones((batch_size,))
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
- for _ in range(max_time)]
+ for _ in range(max_time)
+ ]
# Halve the sequence length, full static unroll
- with tf.Session(config=config, graph=tf.Graph()) as sess:
- with tf.device("/cpu:0" if not use_gpu else None):
+ with session.Session(config=config, graph=ops_lib.Graph()) as sess:
+ with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
- tf.Variable(x, trainable=False).value() for x in inputs_list]
- ops = _half_seq_len_vs_unroll_half_rnn_benchmark(
- inputs_list_t, sequence_length / 2)
- tf.global_variables_initializer().run()
+ variables_lib.Variable(
+ x, trainable=False).value() for x in inputs_list
+ ]
+ ops = _half_seq_len_vs_unroll_half_rnn_benchmark(inputs_list_t,
+ sequence_length / 2)
+ variables_lib.global_variables_initializer().run()
delta_half_seq_len = _timer(sess, ops)
# Halve the unroll size, don't use sequence length
- with tf.Session(config=config, graph=tf.Graph()) as sess:
- with tf.device("/cpu:0" if not use_gpu else None):
+ with session.Session(config=config, graph=ops_lib.Graph()) as sess:
+ with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
- tf.Variable(x, trainable=False).value() for x in inputs_list]
+ variables_lib.Variable(
+ x, trainable=False).value() for x in inputs_list
+ ]
ops = _half_seq_len_vs_unroll_half_rnn_benchmark(
inputs_list_t[:(max_time // 2)], sequence_length / 2)
- tf.global_variables_initializer().run()
+ variables_lib.global_variables_initializer().run()
delta_unroll_half = _timer(sess, ops)
print("%d \t %d \t\t %d \t %s \t %f \t\t %f \t\t %f" %
(batch_size, max_time, num_units, use_gpu, delta_half_seq_len,
- delta_unroll_half, delta_half_seq_len/delta_unroll_half))
+ delta_unroll_half, delta_half_seq_len / delta_unroll_half))
return delta_half_seq_len, delta_unroll_half
-def _concat_state_vs_tuple_state_rnn_benchmark(
- inputs_list_t, sequence_length, state_is_tuple):
+def _concat_state_vs_tuple_state_rnn_benchmark(inputs_list_t, sequence_length,
+ state_is_tuple):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
- initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=127)
- cell = tf.contrib.rnn.LSTMCell(
- num_units=input_size, use_peepholes=True,
- initializer=initializer, state_is_tuple=state_is_tuple)
- outputs, final_state = tf.contrib.rnn.static_rnn(
- cell, inputs_list_t, sequence_length=sequence_length, dtype=tf.float32)
+ initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
+ cell = rnn_cell_impl.LSTMCell(
+ num_units=input_size,
+ use_peepholes=True,
+ initializer=initializer,
+ state_is_tuple=state_is_tuple)
+ outputs, final_state = rnn_lib.static_rnn(
+ cell,
+ inputs_list_t,
+ sequence_length=sequence_length,
+ dtype=dtypes.float32)
final_state = list(final_state) if state_is_tuple else [final_state]
- trainable_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
- gradients = tf.gradients(outputs + final_state, trainable_variables)
+ trainable_variables = ops_lib.get_collection(
+ ops_lib.GraphKeys.TRAINABLE_VARIABLES)
+ gradients = gradients_impl.gradients(outputs + final_state,
+ trainable_variables)
- return tf.group(*(final_state + gradients + outputs))
+ return control_flow_ops.group(*(final_state + gradients + outputs))
-def concat_state_vs_tuple_state_rnn_benchmark(
- batch_size, max_time, num_units, use_gpu):
- config = tf.ConfigProto()
+def concat_state_vs_tuple_state_rnn_benchmark(batch_size, max_time, num_units,
+ use_gpu):
+ config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
@@ -1354,53 +1517,64 @@ def concat_state_vs_tuple_state_rnn_benchmark(
sequence_length = max_time * np.ones((batch_size,))
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
- for _ in range(max_time)]
+ for _ in range(max_time)
+ ]
# Run with concatenated states (default)
- with tf.Session(config=config, graph=tf.Graph()) as sess:
- with tf.device("/cpu:0" if not use_gpu else None):
+ with session.Session(config=config, graph=ops_lib.Graph()) as sess:
+ with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
- tf.Variable(x, trainable=False).value() for x in inputs_list]
+ variables_lib.Variable(
+ x, trainable=False).value() for x in inputs_list
+ ]
ops = _concat_state_vs_tuple_state_rnn_benchmark(
inputs_list_t, sequence_length, state_is_tuple=False)
- tf.global_variables_initializer().run()
+ variables_lib.global_variables_initializer().run()
delta_concat_state = _timer(sess, ops)
# Run with tuple states (new)
- with tf.Session(config=config, graph=tf.Graph()) as sess:
- with tf.device("/cpu:0" if not use_gpu else None):
+ with session.Session(config=config, graph=ops_lib.Graph()) as sess:
+ with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
- tf.Variable(x, trainable=False).value() for x in inputs_list]
+ variables_lib.Variable(
+ x, trainable=False).value() for x in inputs_list
+ ]
ops = _concat_state_vs_tuple_state_rnn_benchmark(
inputs_list_t, sequence_length, state_is_tuple=True)
- tf.global_variables_initializer().run()
+ variables_lib.global_variables_initializer().run()
delta_tuple_state = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t\t %f \t\t %f" %
(batch_size, max_time, num_units, use_gpu, delta_concat_state,
- delta_tuple_state, delta_concat_state/delta_tuple_state))
+ delta_tuple_state, delta_concat_state / delta_tuple_state))
return delta_concat_state, delta_tuple_state
-def _dynamic_rnn_swap_memory_benchmark(inputs_t, sequence_length,
- swap_memory):
+def _dynamic_rnn_swap_memory_benchmark(inputs_t, sequence_length, swap_memory):
(unused_0, unused_1, input_size) = inputs_t.get_shape().as_list()
- initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=127)
- cell = tf.contrib.rnn.LSTMCell(
- num_units=input_size, use_peepholes=True, initializer=initializer,
+ initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
+ cell = rnn_cell_impl.LSTMCell(
+ num_units=input_size,
+ use_peepholes=True,
+ initializer=initializer,
state_is_tuple=False)
- outputs, final_state = tf.nn.dynamic_rnn(
- cell, inputs_t, sequence_length=sequence_length,
- swap_memory=swap_memory, dtype=tf.float32)
+ outputs, final_state = rnn.dynamic_rnn(
+ cell,
+ inputs_t,
+ sequence_length=sequence_length,
+ swap_memory=swap_memory,
+ dtype=dtypes.float32)
- trainable_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
- gradients = tf.gradients([outputs, final_state], trainable_variables)
+ trainable_variables = ops_lib.get_collection(
+ ops_lib.GraphKeys.TRAINABLE_VARIABLES)
+ gradients = gradients_impl.gradients([outputs, final_state],
+ trainable_variables)
- return tf.group(final_state, outputs, *gradients)
+ return control_flow_ops.group(final_state, outputs, *gradients)
def dynamic_rnn_swap_memory_benchmark(batch_size, max_time, num_units):
- config = tf.ConfigProto()
+ config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
@@ -1408,33 +1582,34 @@ def dynamic_rnn_swap_memory_benchmark(batch_size, max_time, num_units):
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
- for _ in range(max_time)]
+ for _ in range(max_time)
+ ]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
# No memory swap
- with tf.Session(config=config, graph=tf.Graph()) as sess:
- inputs_t = tf.Variable(inputs, trainable=False).value()
+ with session.Session(config=config, graph=ops_lib.Graph()) as sess:
+ inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=False)
- tf.global_variables_initializer().run()
+ variables_lib.global_variables_initializer().run()
no_swap = _timer(sess, ops)
# Memory swap
- with tf.Session(config=config, graph=tf.Graph()) as sess:
- inputs_t = tf.Variable(inputs, trainable=False).value()
+ with session.Session(config=config, graph=ops_lib.Graph()) as sess:
+ inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=True)
- tf.global_variables_initializer().run()
+ variables_lib.global_variables_initializer().run()
swap = _timer(sess, ops)
print("%d \t %d \t %d \t %f \t %f \t %f" %
- (batch_size, max_time, num_units, no_swap, swap, swap/no_swap))
+ (batch_size, max_time, num_units, no_swap, swap, swap / no_swap))
return no_swap, swap
-def rnn_long_sequence_benchmark(batch_size, seqlen, num_units,
- dynamic, swap_memory):
- config = tf.ConfigProto()
+def rnn_long_sequence_benchmark(batch_size, seqlen, num_units, dynamic,
+ swap_memory):
+ config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
@@ -1442,42 +1617,49 @@ def rnn_long_sequence_benchmark(batch_size, seqlen, num_units,
sequence_length = [seqlen for _ in range(batch_size)]
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
- for _ in range(seqlen)]
+ for _ in range(seqlen)
+ ]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
for _ in range(5):
if dynamic:
- with tf.Session(config=config, graph=tf.Graph()) as sess:
- inputs_t = tf.Variable(inputs, trainable=False).value()
+ with session.Session(config=config, graph=ops_lib.Graph()) as sess:
+ inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=swap_memory)
- tf.global_variables_initializer().run()
+ variables_lib.global_variables_initializer().run()
elapsed = _timer(sess, ops)
else:
- with tf.Session(config=config, graph=tf.Graph()) as sess:
+ with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_list_t = [
- tf.Variable(x, trainable=False).value() for x in inputs_list]
- ops = _static_vs_dynamic_rnn_benchmark_static(
- inputs_list_t, sequence_length)
- tf.global_variables_initializer().run()
+ variables_lib.Variable(
+ x, trainable=False).value() for x in inputs_list
+ ]
+ ops = _static_vs_dynamic_rnn_benchmark_static(inputs_list_t,
+ sequence_length)
+ variables_lib.global_variables_initializer().run()
elapsed = _timer(sess, ops)
- print("%d \t %d \t %d \t %s \t %f \t %f" %
- (batch_size, seqlen, num_units, dynamic, elapsed,
- elapsed/seqlen))
+ print("%d \t %d \t %d \t %s \t %f \t %f" % (batch_size, seqlen, num_units,
+ dynamic, elapsed,
+ elapsed / seqlen))
-class BenchmarkRNN(tf.test.Benchmark):
+class BenchmarkRNN(test.Benchmark):
def benchmarkGraphCreationStaticVsDynamicLSTM(self):
print("Graph Creation: Static Unroll vs. Dynamic Unroll LSTM")
print("max_t \t dt(static) \t dt(dynamic) \t dt(dynamic)/dt(static)")
for max_time in (1, 25, 50):
s_dt, d_dt = graph_creation_static_vs_dynamic_rnn_benchmark(max_time)
- self.report_benchmark(name="graph_creation_time_static_T%02d" % max_time,
- iters=5, wall_time=s_dt)
- self.report_benchmark(name="graph_creation_time_dynamic_T%02d" % max_time,
- iters=5, wall_time=d_dt)
+ self.report_benchmark(
+ name="graph_creation_time_static_T%02d" % max_time,
+ iters=5,
+ wall_time=s_dt)
+ self.report_benchmark(
+ name="graph_creation_time_dynamic_T%02d" % max_time,
+ iters=5,
+ wall_time=d_dt)
def benchmarkStaticUnrollVsDynamicFlowLSTM(self):
print("Calculation: Static Unroll with Dynamic Flow LSTM "
@@ -1488,16 +1670,18 @@ class BenchmarkRNN(tf.test.Benchmark):
for max_time in (50,):
for num_units in (512, 256, 128):
for use_gpu in (False, True):
- s_dt, d_dt = static_vs_dynamic_rnn_benchmark(
- batch_size, max_time, num_units, use_gpu)
+ s_dt, d_dt = static_vs_dynamic_rnn_benchmark(batch_size, max_time,
+ num_units, use_gpu)
self.report_benchmark(
- name="static_unroll_time_T%02d_B%03d_N%03d_gpu_%s"
- % (max_time, batch_size, num_units, use_gpu),
- iters=20, wall_time=s_dt)
+ name="static_unroll_time_T%02d_B%03d_N%03d_gpu_%s" %
+ (max_time, batch_size, num_units, use_gpu),
+ iters=20,
+ wall_time=s_dt)
self.report_benchmark(
- name="dynamic_unroll_time_T%02d_B%03d_N%03d_gpu_%s"
- % (max_time, batch_size, num_units, use_gpu),
- iters=20, wall_time=d_dt)
+ name="dynamic_unroll_time_T%02d_B%03d_N%03d_gpu_%s" %
+ (max_time, batch_size, num_units, use_gpu),
+ iters=20,
+ wall_time=d_dt)
def benchmarkDynamicLSTMNoMemorySwapVsMemorySwap(self):
print("Calculation: Dynamic LSTM No Memory Swap vs. Memory Swap")
@@ -1505,16 +1689,18 @@ class BenchmarkRNN(tf.test.Benchmark):
for batch_size in (256, 512):
for max_time in (100,):
for num_units in (512, 256, 128):
- no_swap, swap = dynamic_rnn_swap_memory_benchmark(
- batch_size, max_time, num_units)
+ no_swap, swap = dynamic_rnn_swap_memory_benchmark(batch_size,
+ max_time, num_units)
self.report_benchmark(
- name="dynamic_lstm_no_memory_swap_T%02d_B%03d_N%03d"
- % (max_time, batch_size, num_units),
- iters=20, wall_time=no_swap)
+ name="dynamic_lstm_no_memory_swap_T%02d_B%03d_N%03d" %
+ (max_time, batch_size, num_units),
+ iters=20,
+ wall_time=no_swap)
self.report_benchmark(
- name="dynamic_lstm_with_memory_swap_T%02d_B%03d_N%03d"
- % (max_time, batch_size, num_units),
- iters=20, wall_time=swap)
+ name="dynamic_lstm_with_memory_swap_T%02d_B%03d_N%03d" %
+ (max_time, batch_size, num_units),
+ iters=20,
+ wall_time=swap)
def benchmarkStaticUnrollHalfSequenceLengthVsHalfUnroll(self):
print("Calculation: Static Unroll with Halved Sequence Length "
@@ -1525,37 +1711,49 @@ class BenchmarkRNN(tf.test.Benchmark):
for max_time in (50,):
for num_units in (256,):
for use_gpu in (False, True):
- s_dt, d_dt = half_seq_len_vs_unroll_half_rnn_benchmark(
- batch_size, max_time, num_units, use_gpu)
+ s_dt, d_dt = half_seq_len_vs_unroll_half_rnn_benchmark(batch_size,
+ max_time,
+ num_units,
+ use_gpu)
self.report_benchmark(
- name="half_seq_len_time_T%02d_B%03d_N%03d_gpu_%s"
- % (max_time, batch_size, num_units, use_gpu),
- iters=20, wall_time=s_dt)
+ name="half_seq_len_time_T%02d_B%03d_N%03d_gpu_%s" %
+ (max_time, batch_size, num_units, use_gpu),
+ iters=20,
+ wall_time=s_dt)
self.report_benchmark(
- name="unroll_half_time_T%02d_B%03d_N%03d_gpu_%s"
- % (max_time, batch_size, num_units, use_gpu),
- iters=20, wall_time=d_dt)
+ name="unroll_half_time_T%02d_B%03d_N%03d_gpu_%s" %
+ (max_time, batch_size, num_units, use_gpu),
+ iters=20,
+ wall_time=d_dt)
def benchmarkStaticUnrollStateConcatVsStateTuple(self):
print("Calculation: Static Unroll with Concatenated State "
"vs. Tuple State")
print("batch \t time \t units \t gpu \t dt(concat_state) "
"\t dt(tuple_state) \t dt(concat_state)/dt(tuple_state)")
- for batch_size in (16, 128,):
+ for batch_size in (
+ 16,
+ 128,):
for max_time in (50,):
- for num_units in (16, 128,):
+ for num_units in (
+ 16,
+ 128,):
for use_gpu in (False, True):
- c_dt, t_dt = concat_state_vs_tuple_state_rnn_benchmark(
- batch_size, max_time, num_units, use_gpu)
+ c_dt, t_dt = concat_state_vs_tuple_state_rnn_benchmark(batch_size,
+ max_time,
+ num_units,
+ use_gpu)
self.report_benchmark(
- name="concat_state_time_T%02d_B%03d_N%03d_gpu_%s"
- % (max_time, batch_size, num_units, use_gpu),
- iters=20, wall_time=c_dt)
+ name="concat_state_time_T%02d_B%03d_N%03d_gpu_%s" %
+ (max_time, batch_size, num_units, use_gpu),
+ iters=20,
+ wall_time=c_dt)
self.report_benchmark(
- name="tuple_state_time_T%02d_B%03d_N%03d_gpu_%s"
- % (max_time, batch_size, num_units, use_gpu),
- iters=20, wall_time=t_dt)
+ name="tuple_state_time_T%02d_B%03d_N%03d_gpu_%s" %
+ (max_time, batch_size, num_units, use_gpu),
+ iters=20,
+ wall_time=t_dt)
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/save_restore_ops_test.py b/tensorflow/python/kernel_tests/save_restore_ops_test.py
index d478efbf95..ddc93eb339 100644
--- a/tensorflow/python/kernel_tests/save_restore_ops_test.py
+++ b/tensorflow/python/kernel_tests/save_restore_ops_test.py
@@ -12,27 +12,29 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.ops.io_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import tensorflow as tf
+from tensorflow.core.protobuf import config_pb2
+from tensorflow.python.client import session
from tensorflow.python.ops import gen_io_ops
+from tensorflow.python.platform import test
-class ShardedFileOpsTest(tf.test.TestCase):
+class ShardedFileOpsTest(test.TestCase):
def testShardedFileName(self):
- with tf.Session(
- target="",
- config=tf.ConfigProto(device_count={"CPU": 2})):
- self.assertEqual(gen_io_ops._sharded_filename("foo", 4, 100).eval(),
- b"foo-00004-of-00100")
- self.assertEqual(gen_io_ops._sharded_filespec("foo", 100).eval(),
- b"foo-?????-of-00100")
+ with session.Session(
+ target="", config=config_pb2.ConfigProto(device_count={"CPU": 2})):
+ self.assertEqual(
+ gen_io_ops._sharded_filename("foo", 4, 100).eval(),
+ b"foo-00004-of-00100")
+ self.assertEqual(
+ gen_io_ops._sharded_filespec("foo", 100).eval(),
+ b"foo-?????-of-00100")
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/scalar_strict_test.py b/tensorflow/python/kernel_tests/scalar_strict_test.py
index eec6ec3f94..6c49644859 100644
--- a/tensorflow/python/kernel_tests/scalar_strict_test.py
+++ b/tensorflow/python/kernel_tests/scalar_strict_test.py
@@ -12,20 +12,26 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for scalar strictness and scalar leniency."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_io_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import random_ops
+from tensorflow.python.ops import sparse_ops
+import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import control_imports
+from tensorflow.python.platform import test
-class ScalarStrictTest(tf.test.TestCase):
+class ScalarStrictTest(test.TestCase):
def check(self, op, args, error, correct=None):
# Within Google, the switch to scalar strict occurred at version 6.
@@ -42,14 +48,14 @@ class ScalarStrictTest(tf.test.TestCase):
if isinstance(args, tuple):
return [placeholders(x, feed) for x in args]
else:
- x = tf.convert_to_tensor(args).eval()
- fake = tf.placeholder(np.asarray(x).dtype)
+ x = ops.convert_to_tensor(args).eval()
+ fake = array_ops.placeholder(np.asarray(x).dtype)
feed[fake] = x
return fake
# Test various GraphDef versions
for version in strict + lenient:
- with tf.Graph().as_default() as g:
+ with ops.Graph().as_default() as g:
g.graph_def_versions.producer = version
with self.test_session(graph=g) as sess:
feed = {}
@@ -64,61 +70,61 @@ class ScalarStrictTest(tf.test.TestCase):
self.assertAllEqual(r, correct)
def testConcat(self):
- self.check(tf.concat, ([0], ([2], [3], [7])),
+ self.check(array_ops.concat, ([0], ([2], [3], [7])),
'axis tensor should be a scalar integer', [2, 3, 7])
for data in (2, 3, 7), (2, [3], 7), (2, 3, [7]):
- self.check(tf.concat, (0, data),
+ self.check(array_ops.concat, (0, data),
r'Expected \w+ dimensions in the range \[0, 0\)', [2, 3, 7])
for data in ([2], 3, 7), ([2], [3], 7):
- self.check(tf.concat, (0, data),
+ self.check(array_ops.concat, (0, data),
r'Ranks of all input tensors should match', [2, 3, 7])
def testFill(self):
- self.check(tf.fill, (2, 3), 'dims must be a vector', [3, 3])
- self.check(tf.fill, ([2], [3]), 'value must be a scalar', [3, 3])
+ self.check(array_ops.fill, (2, 3), 'dims must be a vector', [3, 3])
+ self.check(array_ops.fill, ([2], [3]), 'value must be a scalar', [3, 3])
def testPad(self):
- self.check(tf.pad, (7, [[1, 2]]),
+ self.check(array_ops.pad, (7, [[1, 2]]),
'The first dimension of paddings must be the rank of inputs',
[0, 7, 0, 0])
def testRandom(self):
- self.check(tf.random_uniform, (3,), 'shape must be a vector')
+ self.check(random_ops.random_uniform, (3,), 'shape must be a vector')
def testReshape(self):
- self.check(tf.reshape, (7, 1), 'sizes input must be 1-D', [7])
+ self.check(array_ops.reshape, (7, 1), 'sizes input must be 1-D', [7])
def testShardedFilename(self):
self.check(gen_io_ops._sharded_filename, ('foo', 4, [100]),
'must be a scalar', b'foo-00004-of-00100')
def testShardedFilespec(self):
- self.check(gen_io_ops._sharded_filespec, ('foo', [100]),
- 'must be a scalar', b'foo-?????-of-00100')
+ self.check(gen_io_ops._sharded_filespec, ('foo', [100]), 'must be a scalar',
+ b'foo-?????-of-00100')
def testUnsortedSegmentSum(self):
- self.check(tf.unsorted_segment_sum, (7, 1, [4]),
+ self.check(math_ops.unsorted_segment_sum, (7, 1, [4]),
'num_segments should be a scalar', [0, 7, 0, 0])
def testRange(self):
- self.check(tf.range, ([0], 3, 2), 'start must be a scalar', [0, 2])
- self.check(tf.range, (0, [3], 2), 'limit must be a scalar', [0, 2])
- self.check(tf.range, (0, 3, [2]), 'delta must be a scalar', [0, 2])
+ self.check(math_ops.range, ([0], 3, 2), 'start must be a scalar', [0, 2])
+ self.check(math_ops.range, (0, [3], 2), 'limit must be a scalar', [0, 2])
+ self.check(math_ops.range, (0, 3, [2]), 'delta must be a scalar', [0, 2])
def testSlice(self):
data = np.arange(10)
error = 'Expected begin and size arguments to be 1-D tensors'
- self.check(tf.slice, (data, 2, 3), error, [2, 3, 4])
- self.check(tf.slice, (data, [2], 3), error, [2, 3, 4])
- self.check(tf.slice, (data, 2, [3]), error, [2, 3, 4])
+ self.check(array_ops.slice, (data, 2, 3), error, [2, 3, 4])
+ self.check(array_ops.slice, (data, [2], 3), error, [2, 3, 4])
+ self.check(array_ops.slice, (data, 2, [3]), error, [2, 3, 4])
def testSparseToDense(self):
- self.check(tf.sparse_to_dense, (1, 4, 7),
+ self.check(sparse_ops.sparse_to_dense, (1, 4, 7),
'output_shape should be a vector', [0, 7, 0, 0])
def testTile(self):
- self.check(tf.tile, ([7], 2), 'Expected multiples to be 1-D', [7, 7])
+ self.check(array_ops.tile, ([7], 2), 'Expected multiples to be 1-D', [7, 7])
if __name__ == '__main__':
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/scan_ops_test.py b/tensorflow/python/kernel_tests/scan_ops_test.py
index 058bb53342..6b2b589a06 100644
--- a/tensorflow/python/kernel_tests/scan_ops_test.py
+++ b/tensorflow/python/kernel_tests/scan_ops_test.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Functional tests for scan ops."""
from __future__ import absolute_import
@@ -20,7 +19,12 @@ from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import errors_impl
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import math_ops
+from tensorflow.python.platform import test
def numpy_reverse(x, axis):
@@ -28,8 +32,9 @@ def numpy_reverse(x, axis):
if axis < 0:
axis = length + axis
- ix = [slice(None, None, -1)
- if i == axis else slice(None) for i in range(length)]
+ ix = [
+ slice(None, None, -1) if i == axis else slice(None) for i in range(length)
+ ]
return x[ix]
@@ -43,10 +48,10 @@ def handle_options(func, x, axis, exclusive, reverse):
x = numpy_reverse(x, axis)
if exclusive:
- ix_head = [slice(0, 1) if i == axis else slice(None)
- for i in range(length)]
- ix_init = [slice(0, -1) if i == axis else slice(None)
- for i in range(length)]
+ ix_head = [slice(0, 1) if i == axis else slice(None) for i in range(length)]
+ ix_init = [
+ slice(0, -1) if i == axis else slice(None) for i in range(length)
+ ]
if func == np.cumsum:
init = np.zeros_like(x[ix_head])
elif func == np.cumprod:
@@ -62,15 +67,17 @@ def handle_options(func, x, axis, exclusive, reverse):
return x
-class CumsumTest(tf.test.TestCase):
+class CumsumTest(test.TestCase):
- valid_dtypes = [np.int32, np.int64, np.float16, np.float32,
- np.float64, np.complex64, np.complex128]
+ valid_dtypes = [
+ np.int32, np.int64, np.float16, np.float32, np.float64, np.complex64,
+ np.complex128
+ ]
def _compare(self, x, axis, exclusive, reverse):
np_out = handle_options(np.cumsum, x, axis, exclusive, reverse)
with self.test_session(use_gpu=True):
- tf_out = tf.cumsum(x, axis, exclusive, reverse).eval()
+ tf_out = math_ops.cumsum(x, axis, exclusive, reverse).eval()
self.assertAllClose(np_out, tf_out)
@@ -111,32 +118,28 @@ class CumsumTest(tf.test.TestCase):
def testInvalidAxis(self):
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
- input_tensor = tf.convert_to_tensor(x)
+ input_tensor = ops.convert_to_tensor(x)
with self.test_session(use_gpu=True):
with self.assertRaisesWithPredicateMatch(
- tf.errors.InvalidArgumentError,
+ errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
- tf.cumsum(input_tensor, -3).eval()
+ math_ops.cumsum(input_tensor, -3).eval()
with self.assertRaisesWithPredicateMatch(
- tf.errors.InvalidArgumentError,
+ errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
- tf.cumsum(input_tensor, 2).eval()
+ math_ops.cumsum(input_tensor, 2).eval()
with self.assertRaisesWithPredicateMatch(
- tf.errors.InvalidArgumentError,
+ errors_impl.InvalidArgumentError,
lambda e: "axis must be a scalar" in str(e)):
- tf.cumsum(input_tensor, [0]).eval()
+ math_ops.cumsum(input_tensor, [0]).eval()
def _compareGradient(self, shape, axis, exclusive, reverse):
x = np.arange(0, 50).reshape(shape).astype(np.float64)
with self.test_session(use_gpu=True):
- t = tf.convert_to_tensor(x)
- result = tf.cumsum(t, axis, exclusive, reverse)
- jacob_t, jacob_n = tf.test.compute_gradient(t,
- shape,
- result,
- shape,
- x_init_value=x,
- delta=1)
+ t = ops.convert_to_tensor(x)
+ result = math_ops.cumsum(t, axis, exclusive, reverse)
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
+ t, shape, result, shape, x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient(self):
@@ -162,15 +165,17 @@ class CumsumTest(tf.test.TestCase):
self._compareGradient([5, 10], axis, exclusive, reverse)
-class CumprodTest(tf.test.TestCase):
+class CumprodTest(test.TestCase):
- valid_dtypes = [np.int32, np.int64, np.float16, np.float32,
- np.float64, np.complex64, np.complex128]
+ valid_dtypes = [
+ np.int32, np.int64, np.float16, np.float32, np.float64, np.complex64,
+ np.complex128
+ ]
def _compare(self, x, axis, exclusive, reverse):
np_out = handle_options(np.cumprod, x, axis, exclusive, reverse)
with self.test_session(use_gpu=True):
- tf_out = tf.cumprod(x, axis, exclusive, reverse).eval()
+ tf_out = math_ops.cumprod(x, axis, exclusive, reverse).eval()
self.assertAllClose(np_out, tf_out)
@@ -211,32 +216,28 @@ class CumprodTest(tf.test.TestCase):
def testInvalidAxis(self):
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
- input_tensor = tf.convert_to_tensor(x)
+ input_tensor = ops.convert_to_tensor(x)
with self.test_session(use_gpu=True):
with self.assertRaisesWithPredicateMatch(
- tf.errors.InvalidArgumentError,
+ errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
- tf.cumprod(input_tensor, -3).eval()
+ math_ops.cumprod(input_tensor, -3).eval()
with self.assertRaisesWithPredicateMatch(
- tf.errors.InvalidArgumentError,
+ errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
- tf.cumprod(input_tensor, 2).eval()
+ math_ops.cumprod(input_tensor, 2).eval()
with self.assertRaisesWithPredicateMatch(
- tf.errors.InvalidArgumentError,
+ errors_impl.InvalidArgumentError,
lambda e: "axis must be a scalar" in str(e)):
- tf.cumprod(input_tensor, [0]).eval()
+ math_ops.cumprod(input_tensor, [0]).eval()
def _compareGradient(self, shape, axis, exclusive, reverse):
x = np.arange(1, 9).reshape(shape).astype(np.float64)
with self.test_session(use_gpu=True):
- t = tf.convert_to_tensor(x)
- result = tf.cumprod(t, axis, exclusive, reverse)
- jacob_t, jacob_n = tf.test.compute_gradient(t,
- shape,
- result,
- shape,
- x_init_value=x,
- delta=1)
+ t = ops.convert_to_tensor(x)
+ result = math_ops.cumprod(t, axis, exclusive, reverse)
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
+ t, shape, result, shape, x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient(self):
@@ -263,4 +264,4 @@ class CumprodTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/scatter_nd_ops_test.py b/tensorflow/python/kernel_tests/scatter_nd_ops_test.py
index 1b1810e175..bad6a2fc78 100644
--- a/tensorflow/python/kernel_tests/scatter_nd_ops_test.py
+++ b/tensorflow/python/kernel_tests/scatter_nd_ops_test.py
@@ -13,6 +13,7 @@
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.scatter_nd."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
@@ -20,7 +21,15 @@ from __future__ import print_function
import functools
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.client import session
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradients_impl
+from tensorflow.python.ops import state_ops
+from tensorflow.python.ops import variables
+from tensorflow.python.platform import test
def _AsType(v, vtype):
@@ -29,16 +38,16 @@ def _AsType(v, vtype):
def _FlatInnerDims(tensor, ndims=2):
shape = list(tensor.shape)
- return tensor.reshape([functools.reduce(lambda x, y: x * y,
- shape[:-ndims + 1], 1)] +
- shape[-ndims + 1:])
+ return tensor.reshape([
+ functools.reduce(lambda x, y: x * y, shape[:-ndims + 1], 1)
+ ] + shape[-ndims + 1:])
def _FlatOuterDims(tensor, ndims=2):
shape = list(tensor.shape)
- return tensor.reshape(shape[:ndims - 1] +
- [functools.reduce(lambda x, y: x * y,
- shape[ndims - 1:], 1)])
+ return tensor.reshape(shape[:ndims - 1] + [
+ functools.reduce(lambda x, y: x * y, shape[ndims - 1:], 1)
+ ])
def _NumpyScatterNd(ref, indices, updates, op):
@@ -78,7 +87,7 @@ def _NumpyDiv(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: p / u)
-class ScatterNdTest(tf.test.TestCase):
+class ScatterNdTest(test.TestCase):
def _VariableRankTest(self,
np_scatter,
@@ -125,7 +134,7 @@ class ScatterNdTest(tf.test.TestCase):
new = ref.copy()
np_scatter(new, indices, updates)
# Scatter via tensorflow
- ref_var = tf.Variable(ref)
+ ref_var = variables.Variable(ref)
ref_var.initializer.run()
tf_scatter(ref_var, indices, updates).eval()
@@ -139,13 +148,13 @@ class ScatterNdTest(tf.test.TestCase):
self._VariableRankTest(np_scatter, tf_scatter, vtype, itype, use_gpu)
def testVariableRankUpdate(self):
- self._VariableRankTests(_NumpyUpdate, tf.scatter_nd_update)
+ self._VariableRankTests(_NumpyUpdate, state_ops.scatter_nd_update)
def testVariableRankAdd(self):
- self._VariableRankTests(_NumpyAdd, tf.scatter_nd_add)
+ self._VariableRankTests(_NumpyAdd, state_ops.scatter_nd_add)
def testVariableRankSub(self):
- self._VariableRankTests(_NumpySub, tf.scatter_nd_sub)
+ self._VariableRankTests(_NumpySub, state_ops.scatter_nd_sub)
# TODO(simister): Re-enable once binary size increase due to
# scatter_nd ops is under control.
@@ -169,8 +178,8 @@ class ScatterNdTest(tf.test.TestCase):
def testScatterRepeatIndices(self):
"""This tests scatter_add using indices that repeat."""
- self._ScatterRepeatIndicesTest(_NumpyAdd, tf.scatter_nd_add)
- self._ScatterRepeatIndicesTest(_NumpySub, tf.scatter_nd_sub)
+ self._ScatterRepeatIndicesTest(_NumpyAdd, state_ops.scatter_nd_add)
+ self._ScatterRepeatIndicesTest(_NumpySub, state_ops.scatter_nd_sub)
# TODO(simister): Re-enable once binary size increase due to
# extra templating is back under control.
# self._ScatterRepeatIndicesTest(_NumpyMul, tf.scatter_nd_mul)
@@ -193,11 +202,12 @@ class ScatterNdTest(tf.test.TestCase):
# TODO(simister): Re-enable once binary size increase due to
# scatter_nd ops is under control.
# tf.scatter_nd_mul, tf.scatter_nd_div,
- for op in (tf.scatter_nd_add, tf.scatter_nd_sub, tf.scatter_nd_update):
+ for op in (state_ops.scatter_nd_add, state_ops.scatter_nd_sub,
+ state_ops.scatter_nd_update):
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
with self.test_session(use_gpu=False):
- ref = tf.Variable(params)
+ ref = variables.Variable(params)
ref.initializer.run()
# Indices all in range, no problem.
@@ -216,29 +226,30 @@ class ScatterNdTest(tf.test.TestCase):
op(ref, indices, updates).eval()
def testRank3ValidShape(self):
- indices = tf.zeros([2, 2, 2], tf.int32)
- updates = tf.zeros([2, 2, 2], tf.int32)
+ indices = array_ops.zeros([2, 2, 2], dtypes.int32)
+ updates = array_ops.zeros([2, 2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
self.assertAllEqual(
- tf.scatter_nd(indices, updates, shape).get_shape().as_list(), shape)
+ array_ops.scatter_nd(indices, updates, shape).get_shape().as_list(),
+ shape)
- ref = tf.Variable(tf.zeros(shape, tf.int32))
+ ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
self.assertAllEqual(
- tf.scatter_nd_update(ref, indices, updates).get_shape().as_list(),
- shape)
+ state_ops.scatter_nd_update(ref, indices,
+ updates).get_shape().as_list(), shape)
def testExtraIndicesDimensions(self):
- indices = tf.zeros([1, 1, 2], tf.int32)
- updates = tf.zeros([1, 1], tf.int32)
+ indices = array_ops.zeros([1, 1, 2], dtypes.int32)
+ updates = array_ops.zeros([1, 1], dtypes.int32)
shape = np.array([2, 2])
- scatter = tf.scatter_nd(indices, updates, shape)
+ scatter = array_ops.scatter_nd(indices, updates, shape)
self.assertAllEqual(scatter.get_shape().as_list(), shape)
expected_result = np.zeros([2, 2], dtype=np.int32)
with self.test_session():
self.assertAllEqual(expected_result, scatter.eval())
- ref = tf.Variable(tf.zeros(shape, tf.int32))
- scatter_update = tf.scatter_nd_update(ref, indices, updates)
+ ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
+ scatter_update = state_ops.scatter_nd_update(ref, indices, updates)
self.assertAllEqual(scatter_update.get_shape().as_list(), shape)
with self.test_session():
@@ -246,39 +257,39 @@ class ScatterNdTest(tf.test.TestCase):
self.assertAllEqual(expected_result, scatter_update.eval())
def testUndefinedIndicesShape(self):
- indices = tf.placeholder(tf.int32, shape=None)
- updates = tf.placeholder(tf.int32, shape=[2, 2, 2])
- shape = tf.constant([2, 2, 2], tf.int32)
- tf.scatter_nd(indices, updates, shape)
+ indices = array_ops.placeholder(dtypes.int32, shape=None)
+ updates = array_ops.placeholder(dtypes.int32, shape=[2, 2, 2])
+ shape = constant_op.constant([2, 2, 2], dtypes.int32)
+ array_ops.scatter_nd(indices, updates, shape)
def testUndefinedUpdatesShape(self):
- indices = tf.placeholder(tf.int32, shape=[2, 2, 2])
- updates = tf.placeholder(tf.int32, shape=None)
- shape = tf.constant([2, 2, 2], tf.int32)
- tf.scatter_nd(indices, updates, shape)
+ indices = array_ops.placeholder(dtypes.int32, shape=[2, 2, 2])
+ updates = array_ops.placeholder(dtypes.int32, shape=None)
+ shape = constant_op.constant([2, 2, 2], dtypes.int32)
+ array_ops.scatter_nd(indices, updates, shape)
def testUndefinedOutputShape(self):
- indices = tf.placeholder(tf.int32, shape=[2, 2, 2])
- updates = tf.placeholder(tf.int32, shape=[2, 2, 2])
- shape = tf.placeholder(tf.int32, shape=[None])
- tf.scatter_nd(indices, updates, shape)
+ indices = array_ops.placeholder(dtypes.int32, shape=[2, 2, 2])
+ updates = array_ops.placeholder(dtypes.int32, shape=[2, 2, 2])
+ shape = array_ops.placeholder(dtypes.int32, shape=[None])
+ array_ops.scatter_nd(indices, updates, shape)
def testEmptyOutputShape1(self):
- indices = tf.zeros([2, 2, 2], tf.int32)
- updates = tf.zeros([2, 2, 2], tf.int32)
- shape = tf.constant([0, 3, 2], tf.int32)
+ indices = array_ops.zeros([2, 2, 2], dtypes.int32)
+ updates = array_ops.zeros([2, 2, 2], dtypes.int32)
+ shape = constant_op.constant([0, 3, 2], dtypes.int32)
with self.assertRaisesWithPredicateMatch(
ValueError, "Indices and updates specified for empty output shape"):
- tf.scatter_nd(indices, updates, shape)
+ array_ops.scatter_nd(indices, updates, shape)
def testEmptyOutputShape2(self):
- indices = tf.placeholder(tf.int32, shape=None)
- updates = tf.placeholder(tf.int32, shape=None)
- shape = tf.constant([0, 3, 2], tf.int32)
+ indices = array_ops.placeholder(dtypes.int32, shape=None)
+ updates = array_ops.placeholder(dtypes.int32, shape=None)
+ shape = constant_op.constant([0, 3, 2], dtypes.int32)
with self.test_session():
- tf.scatter_nd(indices, updates, shape).eval(feed_dict={
+ array_ops.scatter_nd(indices, updates, shape).eval(feed_dict={
indices: np.zeros(
[2, 2, 2], dtype=np.int32),
updates: np.zeros(
@@ -286,74 +297,75 @@ class ScatterNdTest(tf.test.TestCase):
})
def testEmptyOutputShape3(self):
- indices = tf.zeros([0], tf.int32)
- updates = tf.zeros([0], tf.int32)
- shape = tf.constant([0], tf.int32)
- scatter = tf.scatter_nd(indices, updates, shape)
+ indices = array_ops.zeros([0], dtypes.int32)
+ updates = array_ops.zeros([0], dtypes.int32)
+ shape = constant_op.constant([0], dtypes.int32)
+ scatter = array_ops.scatter_nd(indices, updates, shape)
with self.test_session():
self.assertEqual(scatter.eval().size, 0)
def testRank3InvalidShape1(self):
- indices = tf.zeros([3, 2, 2], tf.int32)
- updates = tf.zeros([2, 2, 2], tf.int32)
+ indices = array_ops.zeros([3, 2, 2], dtypes.int32)
+ updates = array_ops.zeros([2, 2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
with self.assertRaisesWithPredicateMatch(
ValueError, "The outer \\d+ dimensions of indices\\.shape="):
- tf.scatter_nd(indices, updates, shape)
+ array_ops.scatter_nd(indices, updates, shape)
- ref = tf.Variable(tf.zeros(shape, tf.int32))
+ ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
with self.assertRaisesWithPredicateMatch(
ValueError, "The outer \\d+ dimensions of indices\\.shape="):
- tf.scatter_nd_update(ref, indices, updates)
+ state_ops.scatter_nd_update(ref, indices, updates)
def testRank3InvalidShape2(self):
- indices = tf.zeros([2, 2, 1], tf.int32)
- updates = tf.zeros([2, 2], tf.int32)
+ indices = array_ops.zeros([2, 2, 1], dtypes.int32)
+ updates = array_ops.zeros([2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
with self.assertRaisesWithPredicateMatch(
ValueError, "The inner \\d+ dimensions of output\\.shape="):
- tf.scatter_nd(indices, updates, shape)
+ array_ops.scatter_nd(indices, updates, shape)
- ref = tf.Variable(tf.zeros(shape, tf.int32))
+ ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
with self.assertRaisesWithPredicateMatch(
ValueError, "The inner \\d+ dimensions of ref\\.shape="):
- tf.scatter_nd_update(ref, indices, updates)
+ state_ops.scatter_nd_update(ref, indices, updates)
def testGradientsRank2ElementUpdate(self):
- indices = tf.constant([[0, 0], [1, 1]], dtype=tf.int32)
- updates = tf.constant([1, 4], dtype=tf.float64)
- shape = tf.constant([2, 2], dtype=tf.int32)
- outputs = tf.scatter_nd(indices, updates, shape)
+ indices = constant_op.constant([[0, 0], [1, 1]], dtype=dtypes.int32)
+ updates = constant_op.constant([1, 4], dtype=dtypes.float64)
+ shape = constant_op.constant([2, 2], dtype=dtypes.int32)
+ outputs = array_ops.scatter_nd(indices, updates, shape)
- grad_vals = tf.constant([[1, 2], [3, 4]], dtype=tf.float64)
- grads = tf.gradients([outputs], [updates], [grad_vals])[0]
+ grad_vals = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float64)
+ grads = gradients_impl.gradients([outputs], [updates], [grad_vals])[0]
expected_grads = np.array([1, 4], dtype=np.float64)
with self.test_session():
self.assertAllEqual(expected_grads, grads.eval())
def testGradientsRank2SliceUpdate(self):
- indices = tf.constant([[1], [0]], dtype=tf.int32)
- updates = tf.constant([[3, 4], [1, 2]], dtype=tf.float64)
- shape = tf.constant([2, 2], dtype=tf.int32)
- outputs = tf.scatter_nd(indices, updates, shape)
+ indices = constant_op.constant([[1], [0]], dtype=dtypes.int32)
+ updates = constant_op.constant([[3, 4], [1, 2]], dtype=dtypes.float64)
+ shape = constant_op.constant([2, 2], dtype=dtypes.int32)
+ outputs = array_ops.scatter_nd(indices, updates, shape)
- grad_vals = tf.constant([[3, 4], [1, 2]], dtype=tf.float64)
- grads = tf.gradients([outputs], [updates], [grad_vals])[0]
+ grad_vals = constant_op.constant([[3, 4], [1, 2]], dtype=dtypes.float64)
+ grads = gradients_impl.gradients([outputs], [updates], [grad_vals])[0]
expected_grads = np.array([[1, 2], [3, 4]], dtype=np.float64)
with self.test_session():
self.assertAllEqual(expected_grads, grads.eval())
def testGradientsRank3SliceUpdate(self):
- indices = tf.constant([[[0, 1], [1, 0]], [[0, 0], [1, 1]]], dtype=tf.int32)
- updates = tf.constant(
- [[[5, 7], [2, 4]], [[1, 3], [6, 8]]], dtype=tf.float64)
- shape = tf.constant([2, 2, 2], dtype=tf.int32)
- outputs = tf.scatter_nd(indices, updates, shape)
-
- grad_vals = tf.constant(
- [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=tf.float64)
- grads = tf.gradients([outputs], [updates], [grad_vals])[0]
+ indices = constant_op.constant(
+ [[[0, 1], [1, 0]], [[0, 0], [1, 1]]], dtype=dtypes.int32)
+ updates = constant_op.constant(
+ [[[5, 7], [2, 4]], [[1, 3], [6, 8]]], dtype=dtypes.float64)
+ shape = constant_op.constant([2, 2, 2], dtype=dtypes.int32)
+ outputs = array_ops.scatter_nd(indices, updates, shape)
+
+ grad_vals = constant_op.constant(
+ [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=dtypes.float64)
+ grads = gradients_impl.gradients([outputs], [updates], [grad_vals])[0]
expected_grads = np.array(
[[[3, 4], [5, 6]], [[1, 2], [7, 8]]], dtype=np.float64)
with self.test_session():
@@ -362,35 +374,36 @@ class ScatterNdTest(tf.test.TestCase):
def testConcurrentUpdates(self):
num_updates = 10000
update_values = np.random.rand(num_updates)
- ref = tf.Variable(np.zeros([2, 2]), dtype=tf.float64)
- indices = tf.constant([[0, 1]] * num_updates, dtype=tf.int32)
- updates = tf.constant(update_values, dtype=tf.float64)
+ ref = variables.Variable(np.zeros([2, 2]), dtype=dtypes.float64)
+ indices = constant_op.constant([[0, 1]] * num_updates, dtype=dtypes.int32)
+ updates = constant_op.constant(update_values, dtype=dtypes.float64)
exepected_result = np.zeros([2, 2], dtype=np.float64)
exepected_result[0, 1] = np.sum(update_values)
- scatter = tf.scatter_nd_add(ref, indices, updates)
- init = tf.global_variables_initializer()
+ scatter = state_ops.scatter_nd_add(ref, indices, updates)
+ init = variables.global_variables_initializer()
- with tf.Session() as sess:
+ with session.Session() as sess:
sess.run(init)
result = sess.run(scatter)
assert np.allclose(result, exepected_result)
# TODO(fpmc): Re-enable this test when gpu_pip test actually runs on a GPU.
def _disabledTestScatterOutOfRangeGpu(self):
- if not tf.test.IsBuiltWithCuda():
+ if not test.IsBuiltWithCuda():
return
# TODO(simister): Re-enable once binary size increase due to
# scatter_nd ops is under control.
# tf.scatter_nd_mul, tf.scatter_nd_div,
- for op in (tf.scatter_nd_add, tf.scatter_nd_sub, tf.scatter_nd_update):
+ for op in (state_ops.scatter_nd_add, state_ops.scatter_nd_sub,
+ state_ops.scatter_nd_update):
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
# With GPU, the code ignores indices that are out of range.
# We don't test the implementation; just test there's no failures.
with self.test_session(force_gpu=True):
- ref = tf.Variable(params)
+ ref = variables.Variable(params)
ref.initializer.run()
# Indices all in range, no problem.
@@ -404,13 +417,13 @@ class ScatterNdTest(tf.test.TestCase):
op(ref, indices, updates).eval()
def testScatterNdRepatedIndicesAdd(self):
- indices = tf.zeros([100000, 1], tf.int32)
+ indices = array_ops.zeros([100000, 1], dtypes.int32)
values = np.random.randn(100000)
shape = [1]
with self.test_session():
- val = tf.scatter_nd(indices, values, shape).eval()
+ val = array_ops.scatter_nd(indices, values, shape).eval()
self.assertAllClose([np.sum(values)], val)
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/scatter_ops_test.py b/tensorflow/python/kernel_tests/scatter_ops_test.py
index dba14e6daf..7cdf11d884 100644
--- a/tensorflow/python/kernel_tests/scatter_ops_test.py
+++ b/tensorflow/python/kernel_tests/scatter_ops_test.py
@@ -13,12 +13,18 @@
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.scatter."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import state_ops
+from tensorflow.python.ops import variables
+from tensorflow.python.platform import test
def _AsType(v, vtype):
@@ -53,21 +59,17 @@ def _NumpyUpdate(ref, indices, updates):
_TF_OPS_TO_NUMPY = {
- tf.scatter_update: _NumpyUpdate,
- tf.scatter_add: _NumpyAdd,
- tf.scatter_sub: _NumpySub,
- tf.scatter_mul: _NumpyMul,
- tf.scatter_div: _NumpyDiv,
+ state_ops.scatter_update: _NumpyUpdate,
+ state_ops.scatter_add: _NumpyAdd,
+ state_ops.scatter_sub: _NumpySub,
+ state_ops.scatter_mul: _NumpyMul,
+ state_ops.scatter_div: _NumpyDiv,
}
-class ScatterTest(tf.test.TestCase):
+class ScatterTest(test.TestCase):
- def _VariableRankTest(self,
- tf_scatter,
- vtype,
- itype,
- repeat_indices=False):
+ def _VariableRankTest(self, tf_scatter, vtype, itype, repeat_indices=False):
np.random.seed(8)
with self.test_session(use_gpu=True):
for indices_shape in (), (2,), (3, 7), (3, 4, 7):
@@ -89,9 +91,11 @@ class ScatterTest(tf.test.TestCase):
indices = indices.reshape(indices_shape)
updates = _AsType(
np.random.randn(*(indices_shape + extra_shape)), vtype)
+
# Clips small values to avoid division by zero.
def clip_small_values(x):
return 1e-4 * np.sign(x) if np.abs(x) < 1e-4 else x
+
updates = np.vectorize(clip_small_values)(updates)
old = _AsType(np.random.randn(*((first_dim,) + extra_shape)), vtype)
@@ -100,7 +104,7 @@ class ScatterTest(tf.test.TestCase):
np_scatter = _TF_OPS_TO_NUMPY[tf_scatter]
np_scatter(new, indices, updates)
# Scatter via tensorflow
- ref = tf.Variable(old)
+ ref = variables.Variable(old)
ref.initializer.run()
tf_scatter(ref, indices, updates).eval()
self.assertAllClose(ref.eval(), new)
@@ -111,38 +115,40 @@ class ScatterTest(tf.test.TestCase):
self._VariableRankTest(tf_scatter, vtype, itype, repeat_indices)
def testVariableRankUpdate(self):
- self._VariableRankTests(tf.scatter_update)
+ self._VariableRankTests(state_ops.scatter_update)
def testVariableRankAdd(self):
- self._VariableRankTests(tf.scatter_add)
+ self._VariableRankTests(state_ops.scatter_add)
def testVariableRankSub(self):
- self._VariableRankTests(tf.scatter_sub)
+ self._VariableRankTests(state_ops.scatter_sub)
def testVariableRankMul(self):
- self._VariableRankTests(tf.scatter_mul)
+ self._VariableRankTests(state_ops.scatter_mul)
def testVariableRankDiv(self):
- self._VariableRankTests(tf.scatter_div)
+ self._VariableRankTests(state_ops.scatter_div)
def testRepeatIndicesAdd(self):
- self._VariableRankTests(tf.scatter_add, True)
+ self._VariableRankTests(state_ops.scatter_add, True)
def testRepeatIndicesSub(self):
- self._VariableRankTests(tf.scatter_sub, True)
+ self._VariableRankTests(state_ops.scatter_sub, True)
def testRepeatIndicesMul(self):
- self._VariableRankTests(tf.scatter_mul, True)
+ self._VariableRankTests(state_ops.scatter_mul, True)
def testRepeatIndicesDiv(self):
- self._VariableRankTests(tf.scatter_div, True)
+ self._VariableRankTests(state_ops.scatter_div, True)
def testBooleanScatterUpdate(self):
- if not tf.test.is_gpu_available():
+ if not test.is_gpu_available():
with self.test_session(use_gpu=False) as session:
- var = tf.Variable([True, False])
- update0 = tf.scatter_update(var, 1, True)
- update1 = tf.scatter_update(var, tf.constant(0, dtype=tf.int64), False)
+ var = variables.Variable([True, False])
+ update0 = state_ops.scatter_update(var, 1, True)
+ update1 = state_ops.scatter_update(
+ var, constant_op.constant(
+ 0, dtype=dtypes.int64), False)
var.initializer.run()
session.run([update0, update1])
@@ -153,9 +159,9 @@ class ScatterTest(tf.test.TestCase):
for op, _ in _TF_OPS_TO_NUMPY.items():
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
- if not tf.test.is_gpu_available():
+ if not test.is_gpu_available():
with self.test_session(use_gpu=False):
- ref = tf.Variable(params)
+ ref = variables.Variable(params)
ref.initializer.run()
# Indices all in range, no problem.
@@ -174,7 +180,7 @@ class ScatterTest(tf.test.TestCase):
# TODO(fpmc): Re-enable this test when gpu_pip test actually runs on a GPU.
def _disabledTestScatterOutOfRangeGpu(self):
- if tf.test.is_gpu_available():
+ if test.is_gpu_available():
return
for op, _ in _TF_OPS_TO_NUMPY.items():
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
@@ -182,7 +188,7 @@ class ScatterTest(tf.test.TestCase):
# With GPU, the code ignores indices that are out of range.
# We don't test the implementation; just test there's no failures.
with self.test_session(force_gpu=True):
- ref = tf.Variable(params)
+ ref = variables.Variable(params)
ref.initializer.run()
# Indices all in range, no problem.
@@ -197,4 +203,4 @@ class ScatterTest(tf.test.TestCase):
if __name__ == '__main__':
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/segment_reduction_ops_test.py b/tensorflow/python/kernel_tests/segment_reduction_ops_test.py
index e53ac706d8..d7e3b3e79b 100644
--- a/tensorflow/python/kernel_tests/segment_reduction_ops_test.py
+++ b/tensorflow/python/kernel_tests/segment_reduction_ops_test.py
@@ -12,31 +12,36 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Functional tests for segment reduction ops."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
-from tensorflow.python.util.all_util import reveal_undocumented
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes as dtypes_lib
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import math_ops
+import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
-class SegmentReductionHelper(tf.test.TestCase):
+class SegmentReductionHelper(test.TestCase):
- def _input(self, input_shape, dtype=tf.int32):
+ def _input(self, input_shape, dtype=dtypes_lib.int32):
num_elem = 1
for x in input_shape:
num_elem *= x
values = np.arange(1, num_elem + 1)
np_values = values.reshape(input_shape).astype(dtype.as_numpy_dtype)
- return tf.constant(values, shape=input_shape,
- dtype=dtype), np_values
+ return constant_op.constant(
+ values, shape=input_shape, dtype=dtype), np_values
def _segmentReduce(self, indices, x, op1, op2=None, num_out_rows=None):
- if not x.size: return np.array([])
+ if not x.size:
+ return np.array([])
indices = np.asarray(indices)
if num_out_rows is None:
num_out_rows = indices[-1] + 1
@@ -63,36 +68,34 @@ class SegmentReductionHelper(tf.test.TestCase):
return (x[0] + y, x[1] + 1) if isinstance(x, tuple) else (x + y, 2)
def _mean_reduce_op(self, x):
- return x[0] / x[1] if isinstance(x, tuple) else x
+ return x[0] / x[1] if isinstance(x, tuple) else x
class SegmentReductionOpTest(SegmentReductionHelper):
def testValues(self):
- dtypes = [tf.float32,
- tf.float64,
- tf.int64,
- tf.int32,
- tf.complex64,
- tf.complex128]
+ dtypes = [
+ dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int64,
+ dtypes_lib.int32, dtypes_lib.complex64, dtypes_lib.complex128
+ ]
# Each item is np_op1, np_op2, tf_op
- ops_list = [(np.add, None, tf.segment_sum),
- (self._mean_cum_op, self._mean_reduce_op,
- tf.segment_mean),
- (np.ndarray.__mul__, None, tf.segment_prod),
- (np.minimum, None, tf.segment_min),
- (np.maximum, None, tf.segment_max)]
+ ops_list = [(np.add, None, math_ops.segment_sum), (self._mean_cum_op,
+ self._mean_reduce_op,
+ math_ops.segment_mean),
+ (np.ndarray.__mul__, None, math_ops.segment_prod),
+ (np.minimum, None, math_ops.segment_min),
+ (np.maximum, None, math_ops.segment_max)]
# A subset of ops has been enabled for complex numbers
- complex_ops_list = [(np.add, None, tf.segment_sum),
- (np.ndarray.__mul__, None, tf.segment_prod)]
+ complex_ops_list = [(np.add, None, math_ops.segment_sum),
+ (np.ndarray.__mul__, None, math_ops.segment_prod)]
n = 10
shape = [n, 2]
indices = [i // 3 for i in range(n)]
for dtype in dtypes:
- if dtype in (tf.complex64, tf.complex128):
+ if dtype in (dtypes_lib.complex64, dtypes_lib.complex128):
curr_ops_list = complex_ops_list
else:
curr_ops_list = ops_list
@@ -113,16 +116,16 @@ class SegmentReductionOpTest(SegmentReductionHelper):
def testSegmentIdsShape(self):
shape = [4, 4]
tf_x, _ = self._input(shape)
- indices = tf.constant([0, 1, 2, 2], shape=[2, 2])
+ indices = constant_op.constant([0, 1, 2, 2], shape=[2, 2])
with self.assertRaises(ValueError):
- tf.segment_sum(data=tf_x, segment_ids=indices)
+ math_ops.segment_sum(data=tf_x, segment_ids=indices)
def testSegmentIdsSize(self):
shape = [4, 4]
with self.test_session():
tf_x, _ = self._input(shape)
indices = [0, 1]
- s = tf.segment_sum(data=tf_x, segment_ids=indices)
+ s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment_ids should be the same size"):
s.eval()
@@ -132,7 +135,7 @@ class SegmentReductionOpTest(SegmentReductionHelper):
with self.test_session():
tf_x, _ = self._input(shape)
indices = [0, 0, 0, 1]
- result = tf.segment_sum(data=tf_x, segment_ids=indices).eval()
+ result = math_ops.segment_sum(data=tf_x, segment_ids=indices).eval()
self.assertAllEqual([[15, 18, 21, 24], [13, 14, 15, 16]], result)
def testSegmentIdsInvalid1(self):
@@ -140,7 +143,7 @@ class SegmentReductionOpTest(SegmentReductionHelper):
with self.test_session():
tf_x, _ = self._input(shape)
indices = [-1, -1, 0, 0]
- s = tf.segment_sum(data=tf_x, segment_ids=indices)
+ s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment ids do not start at 0"):
s.eval()
@@ -149,7 +152,7 @@ class SegmentReductionOpTest(SegmentReductionHelper):
with self.test_session():
tf_x, _ = self._input(shape)
indices = [1, 1, 2, 2]
- s = tf.segment_sum(data=tf_x, segment_ids=indices)
+ s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment ids do not start at 0"):
s.eval()
@@ -158,7 +161,7 @@ class SegmentReductionOpTest(SegmentReductionHelper):
with self.test_session():
tf_x, _ = self._input(shape)
indices = [0, 0, 2, 2]
- s = tf.segment_sum(data=tf_x, segment_ids=indices)
+ s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment ids are not increasing by 1"):
s.eval()
@@ -167,7 +170,7 @@ class SegmentReductionOpTest(SegmentReductionHelper):
with self.test_session():
tf_x, _ = self._input(shape)
indices = [0, 1, 0, 1]
- s = tf.segment_sum(data=tf_x, segment_ids=indices)
+ s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment ids are not increasing by 1"):
s.eval()
@@ -176,7 +179,7 @@ class SegmentReductionOpTest(SegmentReductionHelper):
with self.test_session():
tf_x, _ = self._input(shape)
indices = [0, 1, 2, 0]
- s = tf.segment_sum(data=tf_x, segment_ids=indices)
+ s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError(
r"Segment id 1 out of range \[0, 1\), probably "
"because 'segment_ids' input is not sorted."):
@@ -187,7 +190,7 @@ class SegmentReductionOpTest(SegmentReductionHelper):
with self.test_session():
tf_x, _ = self._input(shape)
indices = [0, 0, 0, -1]
- s = tf.segment_sum(data=tf_x, segment_ids=indices)
+ s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment ids must be >= 0"):
s.eval()
@@ -196,25 +199,24 @@ class SegmentReductionOpTest(SegmentReductionHelper):
with self.test_session():
tf_x, _ = self._input(shape)
indices = [0, 0, 0, -2]
- s = tf.segment_sum(data=tf_x, segment_ids=indices)
+ s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment ids must be >= 0"):
s.eval()
def testGradient(self):
shape = [4, 4]
indices = [0, 1, 2, 2]
- for tf_op in [tf.segment_sum,
- tf.segment_mean,
- tf.segment_min,
- tf.segment_max]:
+ for tf_op in [
+ math_ops.segment_sum, math_ops.segment_mean, math_ops.segment_min,
+ math_ops.segment_max
+ ]:
with self.test_session():
- tf_x, np_x = self._input(shape, dtype=tf.float64)
+ tf_x, np_x = self._input(shape, dtype=dtypes_lib.float64)
s = tf_op(data=tf_x, segment_ids=indices)
- jacob_t, jacob_n = tf.test.compute_gradient(
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
- s,
- [3, 4],
+ s, [3, 4],
x_init_value=np_x.astype(np.double),
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
@@ -224,12 +226,10 @@ class UnsortedSegmentSumTest(SegmentReductionHelper):
use_gpu = False
def testValues(self):
- dtypes = [tf.float32,
- tf.float64,
- tf.int64,
- tf.int32,
- tf.complex64,
- tf.complex128]
+ dtypes = [
+ dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int64,
+ dtypes_lib.int32, dtypes_lib.complex64, dtypes_lib.complex128
+ ]
indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])
num_segments = 12
for indices in indices_flat, indices_flat.reshape(5, 2):
@@ -237,14 +237,10 @@ class UnsortedSegmentSumTest(SegmentReductionHelper):
for dtype in dtypes:
with self.test_session(use_gpu=self.use_gpu):
tf_x, np_x = self._input(shape, dtype=dtype)
- np_ans = self._segmentReduce(indices,
- np_x,
- np.add,
- op2=None,
- num_out_rows=num_segments)
- s = tf.unsorted_segment_sum(data=tf_x,
- segment_ids=indices,
- num_segments=num_segments)
+ np_ans = self._segmentReduce(
+ indices, np_x, np.add, op2=None, num_out_rows=num_segments)
+ s = math_ops.unsorted_segment_sum(
+ data=tf_x, segment_ids=indices, num_segments=num_segments)
tf_ans = s.eval()
self._assertAllClose(indices, np_ans, tf_ans)
self.assertShapeEqual(np_ans, s)
@@ -256,15 +252,13 @@ class UnsortedSegmentSumTest(SegmentReductionHelper):
for indices in indices_flat, indices_flat.reshape(5, 2):
shape = indices.shape + (num_cols,)
with self.test_session(use_gpu=self.use_gpu):
- tf_x, np_x = self._input(shape, dtype=tf.float64)
- s = tf.unsorted_segment_sum(data=tf_x,
- segment_ids=indices,
- num_segments=num_segments)
- jacob_t, jacob_n = tf.test.compute_gradient(
+ tf_x, np_x = self._input(shape, dtype=dtypes_lib.float64)
+ s = math_ops.unsorted_segment_sum(
+ data=tf_x, segment_ids=indices, num_segments=num_segments)
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
- s,
- [num_segments, num_cols],
+ s, [num_segments, num_cols],
x_init_value=np_x.astype(np.double),
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
@@ -281,25 +275,22 @@ class UnsortedSegmentSumTest(SegmentReductionHelper):
shape = [n, num_cols]
num_segments = max(indices) + 1
with self.test_session(use_gpu=self.use_gpu):
- tf_x, np_x = self._input(shape, dtype=tf.float64)
+ tf_x, np_x = self._input(shape, dtype=dtypes_lib.float64)
# Results from UnsortedSegmentSum
- unsorted_s = tf.unsorted_segment_sum(data=tf_x,
- segment_ids=indices,
- num_segments=num_segments)
- (unsorted_jacob_t, unsorted_jacob_n) = tf.test.compute_gradient(
+ unsorted_s = math_ops.unsorted_segment_sum(
+ data=tf_x, segment_ids=indices, num_segments=num_segments)
+ (unsorted_jacob_t, unsorted_jacob_n) = gradient_checker.compute_gradient(
tf_x,
shape,
- unsorted_s,
- [num_segments, num_cols],
+ unsorted_s, [num_segments, num_cols],
x_init_value=np_x.astype(np.double),
delta=1)
# Results from SegmentSum
- sorted_s = tf.segment_sum(data=tf_x, segment_ids=indices)
- sorted_jacob_t, sorted_jacob_n = tf.test.compute_gradient(
+ sorted_s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
+ sorted_jacob_t, sorted_jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
- sorted_s,
- [num_segments, num_cols],
+ sorted_s, [num_segments, num_cols],
x_init_value=np_x.astype(np.double),
delta=1)
self.assertAllClose(unsorted_jacob_t, sorted_jacob_t, rtol=1e-3, atol=1e-3)
@@ -310,24 +301,21 @@ class UnsortedSegmentSumTest(SegmentReductionHelper):
# test, so this test is marked as cpu-only.
with self.test_session(use_gpu=False):
for bad in [[-1]], [[7]]:
- unsorted = tf.unsorted_segment_sum([[17]], bad, num_segments=2)
+ unsorted = math_ops.unsorted_segment_sum([[17]], bad, num_segments=2)
with self.assertRaisesOpError(
r"segment_ids\[0,0\] = %d is out of range \[0, 2\)" % bad[0][0]):
unsorted.eval()
def testEmptySecondDimension(self):
- dtypes = [np.float32,
- np.float64,
- np.int64,
- np.int32,
- np.complex64,
- np.complex128]
+ dtypes = [
+ np.float32, np.float64, np.int64, np.int32, np.complex64, np.complex128
+ ]
with self.test_session(use_gpu=self.use_gpu):
for dtype in dtypes:
for itype in (np.int32, np.int64):
data = np.zeros((2, 0), dtype=dtype)
segment_ids = np.array([0, 1], dtype=itype)
- unsorted = tf.unsorted_segment_sum(data, segment_ids, 2)
+ unsorted = math_ops.unsorted_segment_sum(data, segment_ids, 2)
self.assertAllEqual(unsorted.eval(), np.zeros((2, 0), dtype=dtype))
@@ -337,13 +325,11 @@ class UnsortedSegmentSumGpuTest(UnsortedSegmentSumTest):
class SparseSegmentReductionHelper(SegmentReductionHelper):
- def _sparse_input(self, input_shape, num_indices,
- dtype=tf.int32):
- a, b = super(SparseSegmentReductionHelper, self)._input(input_shape,
- dtype)
+ def _sparse_input(self, input_shape, num_indices, dtype=dtypes_lib.int32):
+ a, b = super(SparseSegmentReductionHelper, self)._input(input_shape, dtype)
indices = np.random.randint(0, input_shape[0], num_indices).astype(np.int32)
- return (tf.constant(indices, dtype=tf.int32),
- indices, a, b)
+ return (constant_op.constant(
+ indices, dtype=dtypes_lib.int32), indices, a, b)
def _sparseSegmentReduce(self, x, indices, segment_indices, op1, op2=None):
return self._segmentReduce(segment_indices, x[indices], op1, op2)
@@ -351,25 +337,18 @@ class SparseSegmentReductionHelper(SegmentReductionHelper):
class SparseSegmentReductionOpTest(SparseSegmentReductionHelper):
- def setUp(self):
- reveal_undocumented("tensorflow.python."
- "sparse_segment_mean_grad", tf)
- reveal_undocumented("tensorflow.python."
- "sparse_segment_sqrt_n_grad", tf)
-
def testValues(self):
- dtypes = [tf.float32,
- tf.float64,
- tf.int64,
- tf.int32]
+ dtypes = [
+ dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int64,
+ dtypes_lib.int32
+ ]
- mean_dtypes = [tf.float32,
- tf.float64]
+ mean_dtypes = [dtypes_lib.float32, dtypes_lib.float64]
# Each item is np_op1, np_op2, tf_op
- ops_list = [(np.add, None, tf.sparse_segment_sum),
+ ops_list = [(np.add, None, math_ops.sparse_segment_sum),
(self._mean_cum_op, self._mean_reduce_op,
- tf.sparse_segment_mean)]
+ math_ops.sparse_segment_mean)]
n = 400
shape = [n, 2]
@@ -380,11 +359,10 @@ class SparseSegmentReductionOpTest(SparseSegmentReductionHelper):
num_indices = len(segment_indices)
for dtype in dtypes:
with self.test_session(use_gpu=False):
- tf_indices, np_indices, tf_x, np_x = self._sparse_input(shape,
- num_indices,
- dtype=dtype)
+ tf_indices, np_indices, tf_x, np_x = self._sparse_input(
+ shape, num_indices, dtype=dtype)
for np_op1, np_op2, tf_op in ops_list:
- if tf_op == tf.sparse_segment_mean and dtype not in mean_dtypes:
+ if tf_op == math_ops.sparse_segment_mean and dtype not in mean_dtypes:
continue
np_ans = self._sparseSegmentReduce(np_x, np_indices, segment_indices,
np_op1, np_op2)
@@ -399,8 +377,8 @@ class SparseSegmentReductionOpTest(SparseSegmentReductionHelper):
def testValid(self):
# Baseline for the test*Invalid* methods below.
- tf_x, _ = self._input([10, 4], dtype=tf.float32)
- ops_list = [tf.sparse_segment_sum, tf.sparse_segment_mean]
+ tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
+ ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
@@ -409,8 +387,8 @@ class SparseSegmentReductionOpTest(SparseSegmentReductionHelper):
s.eval()
def testIndicesInvalid1(self):
- tf_x, _ = self._input([10, 4], dtype=tf.float32)
- ops_list = [tf.sparse_segment_sum, tf.sparse_segment_mean]
+ tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
+ ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, -1, 0, 9]
with self.test_session(use_gpu=False):
@@ -421,8 +399,8 @@ class SparseSegmentReductionOpTest(SparseSegmentReductionHelper):
s.eval()
def testIndicesInvalid2(self):
- tf_x, _ = self._input([10, 4], dtype=tf.float32)
- ops_list = [tf.sparse_segment_sum, tf.sparse_segment_mean]
+ tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
+ ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, 0, 10]
with self.test_session(use_gpu=False):
@@ -433,8 +411,8 @@ class SparseSegmentReductionOpTest(SparseSegmentReductionHelper):
s.eval()
def testSegmentsInvalid1(self):
- tf_x, _ = self._input([10, 4], dtype=tf.float32)
- ops_list = [tf.sparse_segment_sum, tf.sparse_segment_mean]
+ tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
+ ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 2, 2, 2]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
@@ -444,8 +422,8 @@ class SparseSegmentReductionOpTest(SparseSegmentReductionHelper):
s.eval()
def testSegmentsInvalid2(self):
- tf_x, _ = self._input([10, 4], dtype=tf.float32)
- ops_list = [tf.sparse_segment_sum, tf.sparse_segment_mean]
+ tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
+ ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 1, 0, 1]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
@@ -455,8 +433,8 @@ class SparseSegmentReductionOpTest(SparseSegmentReductionHelper):
s.eval()
def testSegmentsInvalid3(self):
- tf_x, _ = self._input([10, 4], dtype=tf.float32)
- ops_list = [tf.sparse_segment_sum, tf.sparse_segment_mean]
+ tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
+ ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 1, 2, 0]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
@@ -468,8 +446,8 @@ class SparseSegmentReductionOpTest(SparseSegmentReductionHelper):
s.eval()
def testSegmentsInvalid4(self):
- tf_x, _ = self._input([10, 4], dtype=tf.float32)
- ops_list = [tf.sparse_segment_sum, tf.sparse_segment_mean]
+ tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
+ ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [-1, 0, 1, 1]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
@@ -479,8 +457,8 @@ class SparseSegmentReductionOpTest(SparseSegmentReductionHelper):
s.eval()
def testSegmentsInvalid5(self):
- tf_x, _ = self._input([10, 4], dtype=tf.float32)
- ops_list = [tf.sparse_segment_sum, tf.sparse_segment_mean]
+ tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
+ ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [1, 2, 2, 2]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
@@ -490,8 +468,8 @@ class SparseSegmentReductionOpTest(SparseSegmentReductionHelper):
s.eval()
def testSegmentsInvalid6(self):
- tf_x, _ = self._input([10, 4], dtype=tf.float32)
- ops_list = [tf.sparse_segment_sum, tf.sparse_segment_mean]
+ tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
+ ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 0, 0, -1]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
@@ -501,8 +479,8 @@ class SparseSegmentReductionOpTest(SparseSegmentReductionHelper):
s.eval()
def testSegmentsInvalid7(self):
- tf_x, _ = self._input([10, 4], dtype=tf.float32)
- ops_list = [tf.sparse_segment_sum, tf.sparse_segment_mean]
+ tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
+ ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 0, 0, -2]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
@@ -516,24 +494,25 @@ class SparseSegmentReductionOpTest(SparseSegmentReductionHelper):
segment_indices = [0, 1, 2, 2]
num_indices = len(segment_indices)
- for tf_op in [tf.sparse_segment_sum, tf.sparse_segment_mean]:
+ for tf_op in [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]:
with self.test_session():
tf_indices, _, tf_x, np_x = self._sparse_input(
- shape, num_indices, dtype=tf.float64)
+ shape, num_indices, dtype=dtypes_lib.float64)
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
- jacob_t, jacob_n = tf.test.compute_gradient(
+ jacob_t, jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
- s,
- [3, 4],
+ s, [3, 4],
x_init_value=np_x.astype(np.double),
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
def testGradientValid(self):
# Baseline for the testGradient*Invalid* methods below.
- tf_x, _ = self._input([3, 4], dtype=tf.float32)
- ops_list = [tf.sparse_segment_mean_grad, tf.sparse_segment_sqrt_n_grad]
+ tf_x, _ = self._input([3, 4], dtype=dtypes_lib.float32)
+ ops_list = [
+ math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
+ ]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
@@ -542,8 +521,10 @@ class SparseSegmentReductionOpTest(SparseSegmentReductionHelper):
s.eval()
def testGradientIndicesInvalid1(self):
- tf_x, _ = self._input([3, 4], dtype=tf.float32)
- ops_list = [tf.sparse_segment_mean_grad, tf.sparse_segment_sqrt_n_grad]
+ tf_x, _ = self._input([3, 4], dtype=dtypes_lib.float32)
+ ops_list = [
+ math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
+ ]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, 0, 10]
with self.test_session(use_gpu=False):
@@ -553,8 +534,10 @@ class SparseSegmentReductionOpTest(SparseSegmentReductionHelper):
s.eval()
def testGradientIndicesInvalid2(self):
- tf_x, _ = self._input([3, 4], dtype=tf.float32)
- ops_list = [tf.sparse_segment_mean_grad, tf.sparse_segment_sqrt_n_grad]
+ tf_x, _ = self._input([3, 4], dtype=dtypes_lib.float32)
+ ops_list = [
+ math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
+ ]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, -1, 9]
with self.test_session(use_gpu=False):
@@ -564,8 +547,11 @@ class SparseSegmentReductionOpTest(SparseSegmentReductionHelper):
s.eval()
def testGradientSegmentsInvalid1(self):
- tf_x, _ = self._input([3, 4], dtype=tf.float32) # expecting 3 segments
- ops_list = [tf.sparse_segment_mean_grad, tf.sparse_segment_sqrt_n_grad]
+ tf_x, _ = self._input(
+ [3, 4], dtype=dtypes_lib.float32) # expecting 3 segments
+ ops_list = [
+ math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
+ ]
segment_indices = [0, 1, 1, 1] # 2 segments
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
@@ -575,8 +561,10 @@ class SparseSegmentReductionOpTest(SparseSegmentReductionHelper):
s.eval()
def testGradientSegmentsInvalid2(self):
- tf_x, _ = self._input([1, 4], dtype=tf.float32)
- ops_list = [tf.sparse_segment_mean_grad, tf.sparse_segment_sqrt_n_grad]
+ tf_x, _ = self._input([1, 4], dtype=dtypes_lib.float32)
+ ops_list = [
+ math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
+ ]
segment_indices = [0, 1, 2, 0]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
@@ -586,8 +574,10 @@ class SparseSegmentReductionOpTest(SparseSegmentReductionHelper):
s.eval()
def testGradientSegmentsInvalid3(self):
- tf_x, _ = self._input([2, 4], dtype=tf.float32)
- ops_list = [tf.sparse_segment_mean_grad, tf.sparse_segment_sqrt_n_grad]
+ tf_x, _ = self._input([2, 4], dtype=dtypes_lib.float32)
+ ops_list = [
+ math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
+ ]
segment_indices = [-1, 0, 1, 1]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
@@ -597,8 +587,10 @@ class SparseSegmentReductionOpTest(SparseSegmentReductionHelper):
s.eval()
def testGradientSegmentsInvalid4(self):
- tf_x, _ = self._input([0, 4], dtype=tf.float32)
- ops_list = [tf.sparse_segment_mean_grad, tf.sparse_segment_sqrt_n_grad]
+ tf_x, _ = self._input([0, 4], dtype=dtypes_lib.float32)
+ ops_list = [
+ math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
+ ]
segment_indices = [0, 1, 2, -1]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
@@ -609,4 +601,4 @@ class SparseSegmentReductionOpTest(SparseSegmentReductionHelper):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/self_adjoint_eig_op_test.py b/tensorflow/python/kernel_tests/self_adjoint_eig_op_test.py
index 5be7d51f90..36b3ed33d8 100644
--- a/tensorflow/python/kernel_tests/self_adjoint_eig_op_test.py
+++ b/tensorflow/python/kernel_tests/self_adjoint_eig_op_test.py
@@ -13,25 +13,32 @@
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_inverse."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import linalg_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.platform import test
-class SelfAdjointEigTest(tf.test.TestCase):
+class SelfAdjointEigTest(test.TestCase):
def testWrongDimensions(self):
# The input to self_adjoint_eig should be a tensor of
# at least rank 2.
- scalar = tf.constant(1.)
+ scalar = constant_op.constant(1.)
with self.assertRaises(ValueError):
- tf.self_adjoint_eig(scalar)
- vector = tf.constant([1., 2.])
+ linalg_ops.self_adjoint_eig(scalar)
+ vector = constant_op.constant([1., 2.])
with self.assertRaises(ValueError):
- tf.self_adjoint_eig(vector)
+ linalg_ops.self_adjoint_eig(vector)
def SortEigenDecomposition(e, v):
@@ -79,25 +86,27 @@ def _GetSelfAdjointEigTest(dtype_, shape_):
np_e, np_v = np.linalg.eig(a)
with self.test_session():
if compute_v:
- tf_e, tf_v = tf.self_adjoint_eig(tf.constant(a))
+ tf_e, tf_v = linalg_ops.self_adjoint_eig(constant_op.constant(a))
# Check that V*diag(E)*V^T is close to A.
- a_ev = tf.matmul(
- tf.matmul(tf_v, tf.matrix_diag(tf_e)), tf_v, adjoint_b=True)
+ a_ev = math_ops.matmul(
+ math_ops.matmul(tf_v, array_ops.matrix_diag(tf_e)),
+ tf_v,
+ adjoint_b=True)
self.assertAllClose(a_ev.eval(), a, atol=atol)
# Compare to numpy.linalg.eig.
- CompareEigenDecompositions(self, np_e, np_v, tf_e.eval(), tf_v.eval(),
- atol)
+ CompareEigenDecompositions(self, np_e, np_v,
+ tf_e.eval(), tf_v.eval(), atol)
else:
- tf_e = tf.self_adjoint_eigvals(tf.constant(a))
+ tf_e = linalg_ops.self_adjoint_eigvals(constant_op.constant(a))
self.assertAllClose(
np.sort(np_e, -1), np.sort(tf_e.eval(), -1), atol=atol)
return Test
-class SelfAdjointEigGradTest(tf.test.TestCase):
+class SelfAdjointEigGradTest(test.TestCase):
pass # Filled in below
@@ -121,14 +130,14 @@ def _GetSelfAdjointEigGradTest(dtype_, shape_):
else:
tol = 1e-7
with self.test_session():
- tf_a = tf.constant(a)
- tf_e, tf_v = tf.self_adjoint_eig(tf_a)
+ tf_a = constant_op.constant(a)
+ tf_e, tf_v = linalg_ops.self_adjoint_eig(tf_a)
for b in tf_e, tf_v:
x_init = np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(dtype_)
x_init += x_init.T
x_init = np.tile(x_init, batch_shape + (1, 1))
- theoretical, numerical = tf.test.compute_gradient(
+ theoretical, numerical = gradient_checker.compute_gradient(
tf_a,
tf_a.get_shape().as_list(),
b,
@@ -150,4 +159,4 @@ if __name__ == '__main__':
_GetSelfAdjointEigTest(dtype, shape))
setattr(SelfAdjointEigGradTest, 'testSelfAdjointEigGrad_' + name,
_GetSelfAdjointEigGradTest(dtype, shape))
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/session_ops_test.py b/tensorflow/python/kernel_tests/session_ops_test.py
index 0dfee75fd9..616946c622 100644
--- a/tensorflow/python/kernel_tests/session_ops_test.py
+++ b/tensorflow/python/kernel_tests/session_ops_test.py
@@ -17,32 +17,37 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import session_ops
+from tensorflow.python.platform import test
-class SessionOpsTest(tf.test.TestCase):
+class SessionOpsTest(test.TestCase):
def testHandleBasic(self):
with self.test_session() as sess:
# Return a handle.
- a = tf.constant(10)
- b = tf.constant(5)
- c = tf.mul(a, b)
- h = tf.get_session_handle(c)
+ a = constant_op.constant(10)
+ b = constant_op.constant(5)
+ c = math_ops.mul(a, b)
+ h = session_ops.get_session_handle(c)
h = sess.run(h)
# Feed a tensor handle.
- f, x = tf.get_session_tensor(h.handle, tf.int32)
- y = tf.mul(x, 10)
+ f, x = session_ops.get_session_tensor(h.handle, dtypes.int32)
+ y = math_ops.mul(x, 10)
self.assertEqual(500, sess.run(y, feed_dict={f: h.handle}))
def testHandleEval(self):
with self.test_session() as sess:
# Return a handle.
- a = tf.constant(10)
- b = tf.constant(5)
- c = tf.mul(a, b)
- h = tf.get_session_handle(c)
+ a = constant_op.constant(10)
+ b = constant_op.constant(5)
+ c = math_ops.mul(a, b)
+ h = session_ops.get_session_handle(c)
h = sess.run(h)
# Get the tensor from its handle.
@@ -51,11 +56,11 @@ class SessionOpsTest(tf.test.TestCase):
def testHandleAndValue(self):
with self.test_session() as sess:
# Return a handle and a value.
- a = tf.constant(10)
- b = tf.constant(5)
- c = tf.mul(a, b)
- h = tf.get_session_handle(c)
- v = tf.mul(a, c)
+ a = constant_op.constant(10)
+ b = constant_op.constant(5)
+ c = math_ops.mul(a, b)
+ h = session_ops.get_session_handle(c)
+ v = math_ops.mul(a, c)
h, v = sess.run([h, v])
self.assertEqual(50, h.eval())
@@ -64,19 +69,19 @@ class SessionOpsTest(tf.test.TestCase):
def testHandleCond(self):
with self.test_session() as sess:
# Return a handle and a value
- a = tf.constant(10)
- b = tf.constant(5)
- p = tf.less(a, b)
- c = tf.mul(a, b)
- h = tf.get_session_handle(c)
+ a = constant_op.constant(10)
+ b = constant_op.constant(5)
+ p = math_ops.less(a, b)
+ c = math_ops.mul(a, b)
+ h = session_ops.get_session_handle(c)
p, h = sess.run([p, h])
# Run by feeding a tensor handle.
- f, x = tf.get_session_tensor(h.handle, tf.int32)
+ f, x = session_ops.get_session_tensor(h.handle, dtypes.int32)
if p:
- y = tf.mul(x, 10)
+ y = math_ops.mul(x, 10)
else:
- y = tf.mul(x, 100)
+ y = math_ops.mul(x, 100)
result = sess.run(y, feed_dict={f: h.handle})
self.assertEqual(5000, result)
@@ -84,14 +89,14 @@ class SessionOpsTest(tf.test.TestCase):
def testHandleForLoop(self):
with self.test_session() as sess:
# Initialize a handle.
- a = tf.constant(0)
- h = tf.get_session_handle(a)
+ a = constant_op.constant(0)
+ h = session_ops.get_session_handle(a)
h = sess.run(h)
# Do some computation.
- f, x = tf.get_session_tensor(h.handle, tf.int32)
+ f, x = session_ops.get_session_tensor(h.handle, dtypes.int32)
# Must define the loop body outside the loop.
- h_x = tf.get_session_handle(tf.add(x, 1))
+ h_x = session_ops.get_session_handle(math_ops.add(x, 1))
for _ in range(100):
# This exercises garbage collection.
h = sess.run(h_x, feed_dict={f: h.handle})
@@ -101,16 +106,16 @@ class SessionOpsTest(tf.test.TestCase):
def testHandleWhileLoop(self):
with self.test_session() as sess:
# Initialize a handle.
- a = tf.constant(0)
- h = tf.get_session_handle(a)
+ a = constant_op.constant(0)
+ h = session_ops.get_session_handle(a)
h = sess.run(h)
# Do some computation.
- f, x = tf.get_session_tensor(h.handle, tf.int32)
- b = tf.constant(100)
- p = tf.less(x, b)
+ f, x = session_ops.get_session_tensor(h.handle, dtypes.int32)
+ b = constant_op.constant(100)
+ p = math_ops.less(x, b)
# Must define the loop body outside the loop.
- h_x = tf.get_session_handle(tf.add(x, 1))
+ h_x = session_ops.get_session_handle(math_ops.add(x, 1))
while True:
rp, h = sess.run([p, h_x], feed_dict={f: h.handle})
if not rp:
@@ -121,61 +126,61 @@ class SessionOpsTest(tf.test.TestCase):
def testHandleMover(self):
with self.test_session() as sess:
# Return a handle.
- a = tf.constant(10)
- b = tf.constant(5)
- c = tf.mul(a, b)
- h = tf.get_session_handle(c)
+ a = constant_op.constant(10)
+ b = constant_op.constant(5)
+ c = math_ops.mul(a, b)
+ h = session_ops.get_session_handle(c)
h = sess.run(h)
# Feed a tensor handle.
- f, x = tf.get_session_tensor(h.handle, tf.int32)
- y = tf.mul(x, 10)
+ f, x = session_ops.get_session_tensor(h.handle, dtypes.int32)
+ y = math_ops.mul(x, 10)
self.assertEqual(500, sess.run(y, feed_dict={f: h.handle}))
# Feed another tensor handle.
- with tf.device("/gpu:0"):
- a = tf.constant(10)
- h = tf.get_session_handle(a)
+ with ops.device("/gpu:0"):
+ a = constant_op.constant(10)
+ h = session_ops.get_session_handle(a)
h = sess.run(h)
self.assertEqual(100, sess.run(y, feed_dict={f: h.handle}))
def testHandleDelete(self):
with self.test_session() as sess:
# Return a handle.
- a = tf.constant(10)
- b = tf.constant(5)
- c = tf.mul(a, b)
- h = tf.get_session_handle(c)
+ a = constant_op.constant(10)
+ b = constant_op.constant(5)
+ c = math_ops.mul(a, b)
+ h = session_ops.get_session_handle(c)
sess.run(h).delete()
def testHandleDeleteRaw(self):
with self.test_session() as sess:
# Return a handle.
- a = tf.constant(10)
- b = tf.constant(5)
- c = tf.mul(a, b)
- h = tf.get_session_handle(c)
+ a = constant_op.constant(10)
+ b = constant_op.constant(5)
+ c = math_ops.mul(a, b)
+ h = session_ops.get_session_handle(c)
h = sess.run(h)
# Delete using a raw tensor handle.
raw_h = h.get_raw_handle()
- f, x = tf.delete_session_tensor(raw_h)
+ f, x = session_ops.delete_session_tensor(raw_h)
sess.run(x, feed_dict={f: raw_h})
def testMultiDevices(self):
with self.test_session() as sess:
- with tf.device("/gpu:0"):
- a = tf.constant(1.0)
- a_handle = sess.run(tf.get_session_handle(a))
- with tf.device("/cpu:0"):
- b = tf.constant(2.0)
- b_handle = sess.run(tf.get_session_handle(b))
-
- a_p, a_t = tf.get_session_tensor(a_handle.handle, tf.float32)
- b_p, b_t = tf.get_session_tensor(b_handle.handle, tf.float32)
- c = tf.add(a_t, b_t)
+ with ops.device("/gpu:0"):
+ a = constant_op.constant(1.0)
+ a_handle = sess.run(session_ops.get_session_handle(a))
+ with ops.device("/cpu:0"):
+ b = constant_op.constant(2.0)
+ b_handle = sess.run(session_ops.get_session_handle(b))
+
+ a_p, a_t = session_ops.get_session_tensor(a_handle.handle, dtypes.float32)
+ b_p, b_t = session_ops.get_session_tensor(b_handle.handle, dtypes.float32)
+ c = math_ops.add(a_t, b_t)
c_handle = sess.run(
- tf.get_session_handle(c),
+ session_ops.get_session_handle(c),
feed_dict={a_p: a_handle.handle,
b_p: b_handle.handle})
self.assertEqual(3.0, c_handle.eval())
@@ -183,43 +188,47 @@ class SessionOpsTest(tf.test.TestCase):
def testHandleGC(self):
with self.test_session() as sess:
# initial values live on CPU
- with tf.device("/cpu:0"):
- one = tf.constant(1, dtype=tf.float32)
- one_handle = sess.run(tf.get_session_handle(one))
- x_handle = sess.run(tf.get_session_handle(one))
+ with ops.device("/cpu:0"):
+ one = constant_op.constant(1, dtype=dtypes.float32)
+ one_handle = sess.run(session_ops.get_session_handle(one))
+ x_handle = sess.run(session_ops.get_session_handle(one))
# addition lives on GPU
- with tf.device("/gpu:0"):
- add_h1, add_t1 = tf.get_session_tensor(one_handle.handle, tf.float32)
- add_h2, add_t2 = tf.get_session_tensor(x_handle.handle, tf.float32)
- add_op = tf.add(add_t1, add_t2)
- add_output = tf.get_session_handle(add_op)
+ with ops.device("/gpu:0"):
+ add_h1, add_t1 = session_ops.get_session_tensor(one_handle.handle,
+ dtypes.float32)
+ add_h2, add_t2 = session_ops.get_session_tensor(x_handle.handle,
+ dtypes.float32)
+ add_op = math_ops.add(add_t1, add_t2)
+ add_output = session_ops.get_session_handle(add_op)
# add 1 to tensor 20 times
for _ in range(20):
- x_handle = sess.run(add_output,
- feed_dict={add_h1: one_handle.handle,
- add_h2: x_handle.handle})
+ x_handle = sess.run(
+ add_output,
+ feed_dict={add_h1: one_handle.handle,
+ add_h2: x_handle.handle})
def testHandlePlacement(self):
with self.test_session() as sess:
- a = tf.constant(1.0)
- a_handle_op = tf.get_session_handle(a)
- b = tf.constant(2.0)
- b_handle_op = tf.get_session_handle(b)
+ a = constant_op.constant(1.0)
+ a_handle_op = session_ops.get_session_handle(a)
+ b = constant_op.constant(2.0)
+ b_handle_op = session_ops.get_session_handle(b)
a_handle = sess.run(a_handle_op)
b_handle = sess.run(b_handle_op)
- a_p, a_t = tf.get_session_tensor(a_handle.handle, tf.float32)
- b_p, b_t = tf.get_session_tensor(b_handle.handle, tf.float32)
+ a_p, a_t = session_ops.get_session_tensor(a_handle.handle, dtypes.float32)
+ b_p, b_t = session_ops.get_session_tensor(b_handle.handle, dtypes.float32)
- c = tf.add(a_t, b_t)
+ c = math_ops.add(a_t, b_t)
c_handle = sess.run(
- tf.get_session_handle(c),
+ session_ops.get_session_handle(c),
feed_dict={a_p: a_handle.handle,
b_p: b_handle.handle})
self.assertEqual(3.0, c_handle.eval())
+
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/sets_test.py b/tensorflow/python/kernel_tests/sets_test.py
index 44ec440d6c..cd1bb4e753 100644
--- a/tensorflow/python/kernel_tests/sets_test.py
+++ b/tensorflow/python/kernel_tests/sets_test.py
@@ -13,29 +13,37 @@
# limitations under the License.
# ==============================================================================
"""Tests for set_ops."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import errors_impl
+from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.framework import test_util
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import sets
+from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import googletest
-
_DTYPES = set([
- tf.int8, tf.int16, tf.int32, tf.int64, tf.uint8, tf.uint16, tf.string])
+ dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64, dtypes.uint8,
+ dtypes.uint16, dtypes.string
+])
def _values(values, dtype):
return np.array(
values,
- dtype=(np.unicode if (dtype == tf.string) else dtype.as_numpy_dtype))
+ dtype=(np.unicode if (dtype == dtypes.string) else dtype.as_numpy_dtype))
def _constant(values, dtype):
- return tf.constant(_values(values, dtype), dtype=dtype)
+ return constant_op.constant(_values(values, dtype), dtype=dtype)
def _dense_to_sparse(dense, dtype):
@@ -50,13 +58,13 @@ def _dense_to_sparse(dense, dtype):
col_ix = 0
for cell in row:
indices.append([row_ix, col_ix])
- values.append(str(cell) if dtype == tf.string else cell)
+ values.append(str(cell) if dtype == dtypes.string else cell)
col_ix += 1
row_ix += 1
- return tf.SparseTensor(
- tf.constant(indices, tf.int64),
- tf.constant(values, dtype),
- tf.constant(shape, tf.int64))
+ return sparse_tensor_lib.SparseTensor(
+ constant_op.constant(indices, dtypes.int64),
+ constant_op.constant(values, dtype),
+ constant_op.constant(shape, dtypes.int64))
class SetOpsTest(test_util.TensorFlowTestCase):
@@ -66,10 +74,9 @@ class SetOpsTest(test_util.TensorFlowTestCase):
self._test_set_size_2d(dtype)
def _test_set_size_2d(self, dtype):
- self.assertAllEqual(
- [1], self._set_size(_dense_to_sparse([[1]], dtype)))
- self.assertAllEqual(
- [2, 1], self._set_size(_dense_to_sparse([[1, 9], [1]], dtype)))
+ self.assertAllEqual([1], self._set_size(_dense_to_sparse([[1]], dtype)))
+ self.assertAllEqual([2, 1],
+ self._set_size(_dense_to_sparse([[1, 9], [1]], dtype)))
self.assertAllEqual(
[3, 0], self._set_size(_dense_to_sparse([[1, 9, 2], []], dtype)))
self.assertAllEqual(
@@ -82,15 +89,11 @@ class SetOpsTest(test_util.TensorFlowTestCase):
def _test_set_size_duplicates_2d(self, dtype):
self.assertAllEqual(
[1], self._set_size(_dense_to_sparse([[1, 1, 1, 1, 1, 1]], dtype)))
- self.assertAllEqual(
- [2, 7, 3, 0, 1],
- self._set_size(_dense_to_sparse([
- [1, 9],
- [6, 7, 8, 8, 6, 7, 5, 3, 3, 0, 6, 6, 9, 0, 0, 0],
- [999, 1, -1000],
- [],
- [-1]
- ], dtype)))
+ self.assertAllEqual([2, 7, 3, 0, 1],
+ self._set_size(
+ _dense_to_sparse([[1, 9], [
+ 6, 7, 8, 8, 6, 7, 5, 3, 3, 0, 6, 6, 9, 0, 0, 0
+ ], [999, 1, -1000], [], [-1]], dtype)))
def test_set_size_3d(self):
for dtype in _DTYPES:
@@ -102,25 +105,25 @@ class SetOpsTest(test_util.TensorFlowTestCase):
def _test_set_size_3d(self, dtype, invalid_indices=False):
if invalid_indices:
- indices = tf.constant([
+ indices = constant_op.constant([
[0, 1, 0], [0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
[0, 0, 0], [0, 0, 2], # 0,0
# 2,0
[2, 1, 1] # 2,1
- ], tf.int64)
+ ], dtypes.int64)
else:
- indices = tf.constant([
+ indices = constant_op.constant([
[0, 0, 0], [0, 0, 2], # 0,0
[0, 1, 0], [0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
# 2,0
[2, 1, 1] # 2,1
- ], tf.int64)
+ ], dtypes.int64)
- sp = tf.SparseTensor(
+ sp = sparse_tensor_lib.SparseTensor(
indices,
_constant([
1, 9, # 0,0
@@ -130,10 +133,10 @@ class SetOpsTest(test_util.TensorFlowTestCase):
# 2,0
5 # 2,1
], dtype),
- tf.constant([3, 2, 3], tf.int64))
+ constant_op.constant([3, 2, 3], dtypes.int64))
if invalid_indices:
- with self.assertRaisesRegexp(tf.OpError, "out of order"):
+ with self.assertRaisesRegexp(errors_impl.OpError, "out of order"):
self._set_size(sp)
else:
self.assertAllEqual([
@@ -148,12 +151,12 @@ class SetOpsTest(test_util.TensorFlowTestCase):
def _set_size(self, sparse_data):
# Validate that we get the same results with or without `validate_indices`.
ops = [
- tf.contrib.metrics.set_size(sparse_data, validate_indices=True),
- tf.contrib.metrics.set_size(sparse_data, validate_indices=False)
+ sets.set_size(sparse_data, validate_indices=True),
+ sets.set_size(sparse_data, validate_indices=False)
]
for op in ops:
self.assertEqual(None, op.get_shape().dims)
- self.assertEqual(tf.int32, op.dtype)
+ self.assertEqual(dtypes.int32, op.dtype)
with self.test_session() as sess:
results = sess.run(ops)
self.assertAllEqual(results[0], results[1])
@@ -175,15 +178,23 @@ class SetOpsTest(test_util.TensorFlowTestCase):
a = _constant(a_values, dtype=dtype)
sp_b = _dense_to_sparse(b_values, dtype=dtype)
intersection = self._set_intersection(a, sp_b)
- self._assert_set_operation(expected_indices, expected_values,
- expected_shape, intersection, dtype=dtype)
+ self._assert_set_operation(
+ expected_indices,
+ expected_values,
+ expected_shape,
+ intersection,
+ dtype=dtype)
self.assertAllEqual(expected_counts, self._set_intersection_count(a, sp_b))
# Sparse to sparse.
sp_a = _dense_to_sparse(a_values, dtype=dtype)
intersection = self._set_intersection(sp_a, sp_b)
- self._assert_set_operation(expected_indices, expected_values,
- expected_shape, intersection, dtype=dtype)
+ self._assert_set_operation(
+ expected_indices,
+ expected_values,
+ expected_shape,
+ intersection,
+ dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_intersection_count(sp_a, sp_b))
@@ -203,8 +214,12 @@ class SetOpsTest(test_util.TensorFlowTestCase):
a = _constant(a_values, dtype)
b = _constant(b_values, dtype)
intersection = self._set_intersection(a, b)
- self._assert_set_operation(expected_indices, expected_values,
- expected_shape, intersection, dtype=dtype)
+ self._assert_set_operation(
+ expected_indices,
+ expected_values,
+ expected_shape,
+ intersection,
+ dtype=dtype)
self.assertAllEqual(expected_counts, self._set_intersection_count(a, b))
def test_set_intersection_duplicates_2d(self):
@@ -223,22 +238,34 @@ class SetOpsTest(test_util.TensorFlowTestCase):
a = _constant(a_values, dtype=dtype)
b = _constant(b_values, dtype=dtype)
intersection = self._set_intersection(a, b)
- self._assert_set_operation(expected_indices, expected_values,
- expected_shape, intersection, dtype=dtype)
+ self._assert_set_operation(
+ expected_indices,
+ expected_values,
+ expected_shape,
+ intersection,
+ dtype=dtype)
self.assertAllEqual(expected_counts, self._set_intersection_count(a, b))
# Dense to sparse.
sp_b = _dense_to_sparse(b_values, dtype=dtype)
intersection = self._set_intersection(a, sp_b)
- self._assert_set_operation(expected_indices, expected_values,
- expected_shape, intersection, dtype=dtype)
+ self._assert_set_operation(
+ expected_indices,
+ expected_values,
+ expected_shape,
+ intersection,
+ dtype=dtype)
self.assertAllEqual(expected_counts, self._set_intersection_count(a, sp_b))
# Sparse to sparse.
sp_a = _dense_to_sparse(a_values, dtype=dtype)
intersection = self._set_intersection(sp_a, sp_b)
- self._assert_set_operation(expected_indices, expected_values,
- expected_shape, intersection, dtype=dtype)
+ self._assert_set_operation(
+ expected_indices,
+ expected_values,
+ expected_shape,
+ intersection,
+ dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_intersection_count(sp_a, sp_b))
@@ -252,138 +279,187 @@ class SetOpsTest(test_util.TensorFlowTestCase):
def _test_set_intersection_3d(self, dtype, invalid_indices=False):
if invalid_indices:
- indices = tf.constant([
- [0, 1, 0], [0, 1, 1], # 0,1
- [1, 0, 0], # 1,0
- [1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
- [0, 0, 0], [0, 0, 2], # 0,0
- # 2,0
- [2, 1, 1] # 2,1
- # 3,*
- ], tf.int64)
+ indices = constant_op.constant(
+ [
+ [0, 1, 0],
+ [0, 1, 1], # 0,1
+ [1, 0, 0], # 1,0
+ [1, 1, 0],
+ [1, 1, 1],
+ [1, 1, 2], # 1,1
+ [0, 0, 0],
+ [0, 0, 2], # 0,0
+ # 2,0
+ [2, 1, 1] # 2,1
+ # 3,*
+ ],
+ dtypes.int64)
else:
- indices = tf.constant([
- [0, 0, 0], [0, 0, 2], # 0,0
- [0, 1, 0], [0, 1, 1], # 0,1
- [1, 0, 0], # 1,0
- [1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
- # 2,0
- [2, 1, 1] # 2,1
- # 3,*
- ], tf.int64)
- sp_a = tf.SparseTensor(
+ indices = constant_op.constant(
+ [
+ [0, 0, 0],
+ [0, 0, 2], # 0,0
+ [0, 1, 0],
+ [0, 1, 1], # 0,1
+ [1, 0, 0], # 1,0
+ [1, 1, 0],
+ [1, 1, 1],
+ [1, 1, 2], # 1,1
+ # 2,0
+ [2, 1, 1] # 2,1
+ # 3,*
+ ],
+ dtypes.int64)
+ sp_a = sparse_tensor_lib.SparseTensor(
indices,
- _constant([
- 1, 9, # 0,0
- 3, 3, # 0,1
- 1, # 1,0
- 9, 7, 8, # 1,1
- # 2,0
- 5 # 2,1
- # 3,*
- ], dtype),
- tf.constant([4, 2, 3], tf.int64))
- sp_b = tf.SparseTensor(
- tf.constant([
- [0, 0, 0], [0, 0, 3], # 0,0
- # 0,1
- [1, 0, 0], # 1,0
- [1, 1, 0], [1, 1, 1], # 1,1
- [2, 0, 1], # 2,0
- [2, 1, 1], # 2,1
- [3, 0, 0], # 3,0
- [3, 1, 0] # 3,1
- ], tf.int64),
- _constant([
- 1, 3, # 0,0
- # 0,1
- 3, # 1,0
- 7, 8, # 1,1
- 2, # 2,0
- 5, # 2,1
- 4, # 3,0
- 4 # 3,1
- ], dtype),
- tf.constant([4, 2, 4], tf.int64))
+ _constant(
+ [
+ 1,
+ 9, # 0,0
+ 3,
+ 3, # 0,1
+ 1, # 1,0
+ 9,
+ 7,
+ 8, # 1,1
+ # 2,0
+ 5 # 2,1
+ # 3,*
+ ],
+ dtype),
+ constant_op.constant([4, 2, 3], dtypes.int64))
+ sp_b = sparse_tensor_lib.SparseTensor(
+ constant_op.constant(
+ [
+ [0, 0, 0],
+ [0, 0, 3], # 0,0
+ # 0,1
+ [1, 0, 0], # 1,0
+ [1, 1, 0],
+ [1, 1, 1], # 1,1
+ [2, 0, 1], # 2,0
+ [2, 1, 1], # 2,1
+ [3, 0, 0], # 3,0
+ [3, 1, 0] # 3,1
+ ],
+ dtypes.int64),
+ _constant(
+ [
+ 1,
+ 3, # 0,0
+ # 0,1
+ 3, # 1,0
+ 7,
+ 8, # 1,1
+ 2, # 2,0
+ 5, # 2,1
+ 4, # 3,0
+ 4 # 3,1
+ ],
+ dtype),
+ constant_op.constant([4, 2, 4], dtypes.int64))
if invalid_indices:
- with self.assertRaisesRegexp(tf.OpError, "out of order"):
+ with self.assertRaisesRegexp(errors_impl.OpError, "out of order"):
self._set_intersection(sp_a, sp_b)
else:
expected_indices = [
- [0, 0, 0], # 0,0
- # 0,1
- # 1,0
- [1, 1, 0], [1, 1, 1], # 1,1
- # 2,0
- [2, 1, 0], # 2,1
- # 3,*
+ [0, 0, 0], # 0,0
+ # 0,1
+ # 1,0
+ [1, 1, 0],
+ [1, 1, 1], # 1,1
+ # 2,0
+ [2, 1, 0], # 2,1
+ # 3,*
]
- expected_values = _values([
- 1, # 0,0
- # 0,1
- # 1,0
- 7, 8, # 1,1
- # 2,0
- 5, # 2,1
- # 3,*
- ], dtype)
+ expected_values = _values(
+ [
+ 1, # 0,0
+ # 0,1
+ # 1,0
+ 7,
+ 8, # 1,1
+ # 2,0
+ 5, # 2,1
+ # 3,*
+ ],
+ dtype)
expected_shape = [4, 2, 2]
- expected_counts = [[
- 1, # 0,0
- 0 # 0,1
- ], [
- 0, # 1,0
- 2 # 1,1
- ], [
- 0, # 2,0
- 1 # 2,1
- ], [
- 0, # 3,0
- 0 # 3,1
- ]]
+ expected_counts = [
+ [
+ 1, # 0,0
+ 0 # 0,1
+ ],
+ [
+ 0, # 1,0
+ 2 # 1,1
+ ],
+ [
+ 0, # 2,0
+ 1 # 2,1
+ ],
+ [
+ 0, # 3,0
+ 0 # 3,1
+ ]
+ ]
# Sparse to sparse.
intersection = self._set_intersection(sp_a, sp_b)
- self._assert_set_operation(expected_indices, expected_values,
- expected_shape, intersection, dtype=dtype)
+ self._assert_set_operation(
+ expected_indices,
+ expected_values,
+ expected_shape,
+ intersection,
+ dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_intersection_count(sp_a, sp_b))
# NOTE: sparse_to_dense doesn't support uint8 and uint16.
- if dtype not in [tf.uint8, tf.uint16]:
+ if dtype not in [dtypes.uint8, dtypes.uint16]:
# Dense to sparse.
- a = tf.cast(
- tf.sparse_to_dense(
+ a = math_ops.cast(
+ sparse_ops.sparse_to_dense(
sp_a.indices,
sp_a.dense_shape,
sp_a.values,
- default_value="-1" if dtype == tf.string else -1),
+ default_value="-1" if dtype == dtypes.string else -1),
dtype=dtype)
intersection = self._set_intersection(a, sp_b)
- self._assert_set_operation(expected_indices, expected_values,
- expected_shape, intersection, dtype=dtype)
- self.assertAllEqual(
- expected_counts, self._set_intersection_count(a, sp_b))
+ self._assert_set_operation(
+ expected_indices,
+ expected_values,
+ expected_shape,
+ intersection,
+ dtype=dtype)
+ self.assertAllEqual(expected_counts,
+ self._set_intersection_count(a, sp_b))
# Dense to dense.
- b = tf.cast(
- tf.sparse_to_dense(
+ b = math_ops.cast(
+ sparse_ops.sparse_to_dense(
sp_b.indices,
sp_b.dense_shape,
sp_b.values,
- default_value="-2" if dtype == tf.string else -2),
+ default_value="-2" if dtype == dtypes.string else -2),
dtype=dtype)
intersection = self._set_intersection(a, b)
- self._assert_set_operation(expected_indices, expected_values,
- expected_shape, intersection, dtype=dtype)
+ self._assert_set_operation(
+ expected_indices,
+ expected_values,
+ expected_shape,
+ intersection,
+ dtype=dtype)
self.assertAllEqual(expected_counts, self._set_intersection_count(a, b))
def _assert_shapes(self, input_tensor, result_sparse_tensor):
- expected_rows = (None if isinstance(input_tensor, tf.SparseTensor) else
- input_tensor.get_shape().as_list()[0])
- expected_rank = (None if isinstance(input_tensor, tf.SparseTensor) else
- input_tensor.get_shape().ndims)
+ expected_rows = (None if
+ isinstance(input_tensor, sparse_tensor_lib.SparseTensor)
+ else input_tensor.get_shape().as_list()[0])
+ expected_rank = (None if
+ isinstance(input_tensor, sparse_tensor_lib.SparseTensor)
+ else input_tensor.get_shape().ndims)
self.assertAllEqual((expected_rows, expected_rank),
result_sparse_tensor.indices.get_shape().as_list())
self.assertAllEqual((expected_rows,),
@@ -395,11 +471,14 @@ class SetOpsTest(test_util.TensorFlowTestCase):
# Validate that we get the same results with or without `validate_indices`,
# and with a & b swapped.
ops = (
- tf.contrib.metrics.set_intersection(a, b, validate_indices=True),
- tf.contrib.metrics.set_intersection(a, b, validate_indices=False),
- tf.contrib.metrics.set_intersection(b, a, validate_indices=True),
- tf.contrib.metrics.set_intersection(b, a, validate_indices=False),
- )
+ sets.set_intersection(
+ a, b, validate_indices=True),
+ sets.set_intersection(
+ a, b, validate_indices=False),
+ sets.set_intersection(
+ b, a, validate_indices=True),
+ sets.set_intersection(
+ b, a, validate_indices=False),)
for op in ops:
self._assert_shapes(a, op)
with self.test_session() as sess:
@@ -411,7 +490,7 @@ class SetOpsTest(test_util.TensorFlowTestCase):
return results[0]
def _set_intersection_count(self, a, b):
- op = tf.contrib.metrics.set_size(tf.contrib.metrics.set_intersection(a, b))
+ op = sets.set_size(sets.set_intersection(a, b))
with self.test_session() as sess:
return sess.run(op)
@@ -424,9 +503,8 @@ class SetOpsTest(test_util.TensorFlowTestCase):
b_values = [[], [1, 2], [1, 2, 2], []]
# a - b.
- expected_indices = [
- [0, 0], [1, 0], [1, 1], [2, 0], [2, 1], [2, 2], [3, 0], [3, 1]
- ]
+ expected_indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1], [2, 2], [3, 0],
+ [3, 1]]
expected_values = _values([1, 5, 9, 3, 4, 5, 1, 5], dtype)
expected_shape = [4, 3]
expected_counts = [1, 2, 3, 2]
@@ -435,16 +513,24 @@ class SetOpsTest(test_util.TensorFlowTestCase):
a = _constant(a_values, dtype=dtype)
sp_b = _dense_to_sparse(b_values, dtype=dtype)
difference = self._set_difference(a, sp_b, True)
- self._assert_set_operation(expected_indices, expected_values,
- expected_shape, difference, dtype=dtype)
+ self._assert_set_operation(
+ expected_indices,
+ expected_values,
+ expected_shape,
+ difference,
+ dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(a, sp_b, True))
# Sparse to sparse.
sp_a = _dense_to_sparse(a_values, dtype=dtype)
difference = self._set_difference(sp_a, sp_b, True)
- self._assert_set_operation(expected_indices, expected_values,
- expected_shape, difference, dtype=dtype)
+ self._assert_set_operation(
+ expected_indices,
+ expected_values,
+ expected_shape,
+ difference,
+ dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(sp_a, sp_b, True))
@@ -456,15 +542,23 @@ class SetOpsTest(test_util.TensorFlowTestCase):
# Dense to sparse.
difference = self._set_difference(a, sp_b, False)
- self._assert_set_operation(expected_indices, expected_values,
- expected_shape, difference, dtype=dtype)
+ self._assert_set_operation(
+ expected_indices,
+ expected_values,
+ expected_shape,
+ difference,
+ dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(a, sp_b, False))
# Sparse to sparse.
difference = self._set_difference(sp_a, sp_b, False)
- self._assert_set_operation(expected_indices, expected_values,
- expected_shape, difference, dtype=dtype)
+ self._assert_set_operation(
+ expected_indices,
+ expected_values,
+ expected_shape,
+ difference,
+ dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(sp_a, sp_b, False))
@@ -486,8 +580,12 @@ class SetOpsTest(test_util.TensorFlowTestCase):
a = _constant(a_values, dtype=dtype)
b = _constant(b_values, dtype=dtype)
difference = self._set_difference(a, b, True)
- self._assert_set_operation(expected_indices, expected_values,
- expected_shape, difference, dtype=dtype)
+ self._assert_set_operation(
+ expected_indices,
+ expected_values,
+ expected_shape,
+ difference,
+ dtype=dtype)
self.assertAllEqual(expected_counts, self._set_difference_count(a, b, True))
# b - a.
@@ -498,8 +596,12 @@ class SetOpsTest(test_util.TensorFlowTestCase):
# Dense to dense.
difference = self._set_difference(a, b, False)
- self._assert_set_operation(expected_indices, expected_values,
- expected_shape, difference, dtype=dtype)
+ self._assert_set_operation(
+ expected_indices,
+ expected_values,
+ expected_shape,
+ difference,
+ dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(a, b, False))
@@ -519,8 +621,12 @@ class SetOpsTest(test_util.TensorFlowTestCase):
expected_counts = [0, 2, 3, 2]
difference = self._set_difference(sp_a, sp_b, True)
- self._assert_set_operation(expected_indices, expected_values,
- expected_shape, difference, dtype=dtype)
+ self._assert_set_operation(
+ expected_indices,
+ expected_values,
+ expected_shape,
+ difference,
+ dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(sp_a, sp_b, True))
@@ -531,8 +637,12 @@ class SetOpsTest(test_util.TensorFlowTestCase):
expected_counts = [0, 1, 2, 0]
difference = self._set_difference(sp_a, sp_b, False)
- self._assert_set_operation(expected_indices, expected_values,
- expected_shape, difference, dtype=dtype)
+ self._assert_set_operation(
+ expected_indices,
+ expected_values,
+ expected_shape,
+ difference,
+ dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(sp_a, sp_b, False))
@@ -555,19 +665,25 @@ class SetOpsTest(test_util.TensorFlowTestCase):
sp_b = _dense_to_sparse(b_values, dtype=dtype)
difference = self._set_difference(a, sp_b, True)
self._assert_set_operation(
- expected_indices, expected_values, expected_shape, difference,
+ expected_indices,
+ expected_values,
+ expected_shape,
+ difference,
dtype=dtype)
- self.assertAllEqual(
- expected_counts, self._set_difference_count(a, sp_b, True))
+ self.assertAllEqual(expected_counts,
+ self._set_difference_count(a, sp_b, True))
# Sparse to sparse.
sp_a = _dense_to_sparse(a_values, dtype=dtype)
difference = self._set_difference(sp_a, sp_b, True)
self._assert_set_operation(
- expected_indices, expected_values, expected_shape, difference,
+ expected_indices,
+ expected_values,
+ expected_shape,
+ difference,
dtype=dtype)
- self.assertAllEqual(
- expected_counts, self._set_difference_count(a, sp_b, True))
+ self.assertAllEqual(expected_counts,
+ self._set_difference_count(a, sp_b, True))
# b - a.
expected_indices = [[0, 0]]
@@ -578,18 +694,24 @@ class SetOpsTest(test_util.TensorFlowTestCase):
# Dense to sparse.
difference = self._set_difference(a, sp_b, False)
self._assert_set_operation(
- expected_indices, expected_values, expected_shape, difference,
+ expected_indices,
+ expected_values,
+ expected_shape,
+ difference,
dtype=dtype)
- self.assertAllEqual(
- expected_counts, self._set_difference_count(a, sp_b, False))
+ self.assertAllEqual(expected_counts,
+ self._set_difference_count(a, sp_b, False))
# Sparse to sparse.
difference = self._set_difference(sp_a, sp_b, False)
self._assert_set_operation(
- expected_indices, expected_values, expected_shape, difference,
+ expected_indices,
+ expected_values,
+ expected_shape,
+ difference,
dtype=dtype)
- self.assertAllEqual(
- expected_counts, self._set_difference_count(a, sp_b, False))
+ self.assertAllEqual(expected_counts,
+ self._set_difference_count(a, sp_b, False))
def test_sparse_set_difference_3d(self):
for dtype in _DTYPES:
@@ -601,64 +723,90 @@ class SetOpsTest(test_util.TensorFlowTestCase):
def _test_sparse_set_difference_3d(self, dtype, invalid_indices=False):
if invalid_indices:
- indices = tf.constant([
- [0, 1, 0], [0, 1, 1], # 0,1
- [1, 0, 0], # 1,0
- [1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
- [0, 0, 0], [0, 0, 2], # 0,0
- # 2,0
- [2, 1, 1] # 2,1
- # 3,*
- ], tf.int64)
+ indices = constant_op.constant(
+ [
+ [0, 1, 0],
+ [0, 1, 1], # 0,1
+ [1, 0, 0], # 1,0
+ [1, 1, 0],
+ [1, 1, 1],
+ [1, 1, 2], # 1,1
+ [0, 0, 0],
+ [0, 0, 2], # 0,0
+ # 2,0
+ [2, 1, 1] # 2,1
+ # 3,*
+ ],
+ dtypes.int64)
else:
- indices = tf.constant([
- [0, 0, 0], [0, 0, 2], # 0,0
- [0, 1, 0], [0, 1, 1], # 0,1
- [1, 0, 0], # 1,0
- [1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
- # 2,0
- [2, 1, 1] # 2,1
- # 3,*
- ], tf.int64)
- sp_a = tf.SparseTensor(
+ indices = constant_op.constant(
+ [
+ [0, 0, 0],
+ [0, 0, 2], # 0,0
+ [0, 1, 0],
+ [0, 1, 1], # 0,1
+ [1, 0, 0], # 1,0
+ [1, 1, 0],
+ [1, 1, 1],
+ [1, 1, 2], # 1,1
+ # 2,0
+ [2, 1, 1] # 2,1
+ # 3,*
+ ],
+ dtypes.int64)
+ sp_a = sparse_tensor_lib.SparseTensor(
indices,
- _constant([
- 1, 9, # 0,0
- 3, 3, # 0,1
- 1, # 1,0
- 9, 7, 8, # 1,1
- # 2,0
- 5 # 2,1
- # 3,*
- ], dtype),
- tf.constant([4, 2, 3], tf.int64))
- sp_b = tf.SparseTensor(
- tf.constant([
- [0, 0, 0], [0, 0, 3], # 0,0
- # 0,1
- [1, 0, 0], # 1,0
- [1, 1, 0], [1, 1, 1], # 1,1
- [2, 0, 1], # 2,0
- [2, 1, 1], # 2,1
- [3, 0, 0], # 3,0
- [3, 1, 0] # 3,1
- ], tf.int64),
- _constant([
- 1, 3, # 0,0
- # 0,1
- 3, # 1,0
- 7, 8, # 1,1
- 2, # 2,0
- 5, # 2,1
- 4, # 3,0
- 4 # 3,1
- ], dtype),
- tf.constant([4, 2, 4], tf.int64))
+ _constant(
+ [
+ 1,
+ 9, # 0,0
+ 3,
+ 3, # 0,1
+ 1, # 1,0
+ 9,
+ 7,
+ 8, # 1,1
+ # 2,0
+ 5 # 2,1
+ # 3,*
+ ],
+ dtype),
+ constant_op.constant([4, 2, 3], dtypes.int64))
+ sp_b = sparse_tensor_lib.SparseTensor(
+ constant_op.constant(
+ [
+ [0, 0, 0],
+ [0, 0, 3], # 0,0
+ # 0,1
+ [1, 0, 0], # 1,0
+ [1, 1, 0],
+ [1, 1, 1], # 1,1
+ [2, 0, 1], # 2,0
+ [2, 1, 1], # 2,1
+ [3, 0, 0], # 3,0
+ [3, 1, 0] # 3,1
+ ],
+ dtypes.int64),
+ _constant(
+ [
+ 1,
+ 3, # 0,0
+ # 0,1
+ 3, # 1,0
+ 7,
+ 8, # 1,1
+ 2, # 2,0
+ 5, # 2,1
+ 4, # 3,0
+ 4 # 3,1
+ ],
+ dtype),
+ constant_op.constant([4, 2, 4], dtypes.int64))
if invalid_indices:
- with self.assertRaisesRegexp(tf.OpError, "out of order"):
+ with self.assertRaisesRegexp(errors_impl.OpError, "out of order"):
self._set_difference(sp_a, sp_b, False)
- with self.assertRaisesRegexp(tf.OpError, "out of order"):
+ with self.assertRaisesRegexp(errors_impl.OpError, "out of order"):
self._set_difference(sp_a, sp_b, True)
else:
# a-b
@@ -667,77 +815,99 @@ class SetOpsTest(test_util.TensorFlowTestCase):
[0, 1, 0], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0], # 1,1
- # 2,*
- # 3,*
+ # 2,*
+ # 3,*
]
- expected_values = _values([
- 9, # 0,0
- 3, # 0,1
- 1, # 1,0
- 9, # 1,1
+ expected_values = _values(
+ [
+ 9, # 0,0
+ 3, # 0,1
+ 1, # 1,0
+ 9, # 1,1
# 2,*
# 3,*
- ], dtype)
+ ],
+ dtype)
expected_shape = [4, 2, 1]
- expected_counts = [[
- 1, # 0,0
- 1 # 0,1
- ], [
- 1, # 1,0
- 1 # 1,1
- ], [
- 0, # 2,0
- 0 # 2,1
- ], [
- 0, # 3,0
- 0 # 3,1
- ]]
+ expected_counts = [
+ [
+ 1, # 0,0
+ 1 # 0,1
+ ],
+ [
+ 1, # 1,0
+ 1 # 1,1
+ ],
+ [
+ 0, # 2,0
+ 0 # 2,1
+ ],
+ [
+ 0, # 3,0
+ 0 # 3,1
+ ]
+ ]
difference = self._set_difference(sp_a, sp_b, True)
- self._assert_set_operation(expected_indices, expected_values,
- expected_shape, difference, dtype=dtype)
- self.assertAllEqual(
- expected_counts, self._set_difference_count(sp_a, sp_b))
+ self._assert_set_operation(
+ expected_indices,
+ expected_values,
+ expected_shape,
+ difference,
+ dtype=dtype)
+ self.assertAllEqual(expected_counts,
+ self._set_difference_count(sp_a, sp_b))
# b-a
expected_indices = [
[0, 0, 0], # 0,0
- # 0,1
+ # 0,1
[1, 0, 0], # 1,0
- # 1,1
+ # 1,1
[2, 0, 0], # 2,0
- # 2,1
+ # 2,1
[3, 0, 0], # 3,0
- [3, 1, 0] # 3,1
+ [3, 1, 0] # 3,1
]
- expected_values = _values([
- 3, # 0,0
+ expected_values = _values(
+ [
+ 3, # 0,0
# 0,1
- 3, # 1,0
+ 3, # 1,0
# 1,1
- 2, # 2,0
+ 2, # 2,0
# 2,1
- 4, # 3,0
- 4, # 3,1
- ], dtype)
+ 4, # 3,0
+ 4, # 3,1
+ ],
+ dtype)
expected_shape = [4, 2, 1]
- expected_counts = [[
- 1, # 0,0
- 0 # 0,1
- ], [
- 1, # 1,0
- 0 # 1,1
- ], [
- 1, # 2,0
- 0 # 2,1
- ], [
- 1, # 3,0
- 1 # 3,1
- ]]
+ expected_counts = [
+ [
+ 1, # 0,0
+ 0 # 0,1
+ ],
+ [
+ 1, # 1,0
+ 0 # 1,1
+ ],
+ [
+ 1, # 2,0
+ 0 # 2,1
+ ],
+ [
+ 1, # 3,0
+ 1 # 3,1
+ ]
+ ]
difference = self._set_difference(sp_a, sp_b, False)
- self._assert_set_operation(expected_indices, expected_values,
- expected_shape, difference, dtype=dtype)
+ self._assert_set_operation(
+ expected_indices,
+ expected_values,
+ expected_shape,
+ difference,
+ dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(sp_a, sp_b, False))
@@ -745,15 +915,14 @@ class SetOpsTest(test_util.TensorFlowTestCase):
# Validate that we get the same results with or without `validate_indices`,
# and with a & b swapped.
ops = (
- tf.contrib.metrics.set_difference(
+ sets.set_difference(
a, b, aminusb=aminusb, validate_indices=True),
- tf.contrib.metrics.set_difference(
+ sets.set_difference(
a, b, aminusb=aminusb, validate_indices=False),
- tf.contrib.metrics.set_difference(
+ sets.set_difference(
b, a, aminusb=not aminusb, validate_indices=True),
- tf.contrib.metrics.set_difference(
- b, a, aminusb=not aminusb, validate_indices=False),
- )
+ sets.set_difference(
+ b, a, aminusb=not aminusb, validate_indices=False),)
for op in ops:
self._assert_shapes(a, op)
with self.test_session() as sess:
@@ -765,8 +934,7 @@ class SetOpsTest(test_util.TensorFlowTestCase):
return results[0]
def _set_difference_count(self, a, b, aminusb=True):
- op = tf.contrib.metrics.set_size(
- tf.contrib.metrics.set_difference(a, b, aminusb))
+ op = sets.set_size(sets.set_difference(a, b, aminusb))
with self.test_session() as sess:
return sess.run(op)
@@ -786,15 +954,15 @@ class SetOpsTest(test_util.TensorFlowTestCase):
a = _constant(a_values, dtype=dtype)
sp_b = _dense_to_sparse(b_values, dtype=dtype)
union = self._set_union(a, sp_b)
- self._assert_set_operation(expected_indices, expected_values,
- expected_shape, union, dtype=dtype)
+ self._assert_set_operation(
+ expected_indices, expected_values, expected_shape, union, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_union_count(a, sp_b))
# Sparse to sparse.
sp_a = _dense_to_sparse(a_values, dtype=dtype)
union = self._set_union(sp_a, sp_b)
- self._assert_set_operation(expected_indices, expected_values,
- expected_shape, union, dtype=dtype)
+ self._assert_set_operation(
+ expected_indices, expected_values, expected_shape, union, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_union_count(sp_a, sp_b))
def test_dense_set_union_multirow_2d(self):
@@ -813,8 +981,8 @@ class SetOpsTest(test_util.TensorFlowTestCase):
a = _constant(a_values, dtype=dtype)
b = _constant(b_values, dtype=dtype)
union = self._set_union(a, b)
- self._assert_set_operation(expected_indices, expected_values,
- expected_shape, union, dtype=dtype)
+ self._assert_set_operation(
+ expected_indices, expected_values, expected_shape, union, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_union_count(a, b))
def test_set_union_duplicates_2d(self):
@@ -853,113 +1021,163 @@ class SetOpsTest(test_util.TensorFlowTestCase):
def _test_sparse_set_union_3d(self, dtype, invalid_indices=False):
if invalid_indices:
- indices = tf.constant([
- [0, 1, 0], [0, 1, 1], # 0,1
- [1, 0, 0], # 1,0
- [0, 0, 0], [0, 0, 2], # 0,0
- [1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
- # 2,0
- [2, 1, 1] # 2,1
- # 3,*
- ], tf.int64)
+ indices = constant_op.constant(
+ [
+ [0, 1, 0],
+ [0, 1, 1], # 0,1
+ [1, 0, 0], # 1,0
+ [0, 0, 0],
+ [0, 0, 2], # 0,0
+ [1, 1, 0],
+ [1, 1, 1],
+ [1, 1, 2], # 1,1
+ # 2,0
+ [2, 1, 1] # 2,1
+ # 3,*
+ ],
+ dtypes.int64)
else:
- indices = tf.constant([
- [0, 0, 0], [0, 0, 2], # 0,0
- [0, 1, 0], [0, 1, 1], # 0,1
- [1, 0, 0], # 1,0
- [1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
- # 2,0
- [2, 1, 1] # 2,1
- # 3,*
- ], tf.int64)
- sp_a = tf.SparseTensor(
+ indices = constant_op.constant(
+ [
+ [0, 0, 0],
+ [0, 0, 2], # 0,0
+ [0, 1, 0],
+ [0, 1, 1], # 0,1
+ [1, 0, 0], # 1,0
+ [1, 1, 0],
+ [1, 1, 1],
+ [1, 1, 2], # 1,1
+ # 2,0
+ [2, 1, 1] # 2,1
+ # 3,*
+ ],
+ dtypes.int64)
+ sp_a = sparse_tensor_lib.SparseTensor(
indices,
- _constant([
- 1, 9, # 0,0
- 3, 3, # 0,1
- 1, # 1,0
- 9, 7, 8, # 1,1
- # 2,0
- 5 # 2,1
- # 3,*
- ], dtype),
- tf.constant([4, 2, 3], tf.int64))
- sp_b = tf.SparseTensor(
- tf.constant([
- [0, 0, 0], [0, 0, 3], # 0,0
- # 0,1
- [1, 0, 0], # 1,0
- [1, 1, 0], [1, 1, 1], # 1,1
- [2, 0, 1], # 2,0
- [2, 1, 1], # 2,1
- [3, 0, 0], # 3,0
- [3, 1, 0] # 3,1
- ], tf.int64),
- _constant([
- 1, 3, # 0,0
- # 0,1
- 3, # 1,0
- 7, 8, # 1,1
- 2, # 2,0
- 5, # 2,1
- 4, # 3,0
- 4 # 3,1
- ], dtype),
- tf.constant([4, 2, 4], tf.int64))
+ _constant(
+ [
+ 1,
+ 9, # 0,0
+ 3,
+ 3, # 0,1
+ 1, # 1,0
+ 9,
+ 7,
+ 8, # 1,1
+ # 2,0
+ 5 # 2,1
+ # 3,*
+ ],
+ dtype),
+ constant_op.constant([4, 2, 3], dtypes.int64))
+ sp_b = sparse_tensor_lib.SparseTensor(
+ constant_op.constant(
+ [
+ [0, 0, 0],
+ [0, 0, 3], # 0,0
+ # 0,1
+ [1, 0, 0], # 1,0
+ [1, 1, 0],
+ [1, 1, 1], # 1,1
+ [2, 0, 1], # 2,0
+ [2, 1, 1], # 2,1
+ [3, 0, 0], # 3,0
+ [3, 1, 0] # 3,1
+ ],
+ dtypes.int64),
+ _constant(
+ [
+ 1,
+ 3, # 0,0
+ # 0,1
+ 3, # 1,0
+ 7,
+ 8, # 1,1
+ 2, # 2,0
+ 5, # 2,1
+ 4, # 3,0
+ 4 # 3,1
+ ],
+ dtype),
+ constant_op.constant([4, 2, 4], dtypes.int64))
if invalid_indices:
- with self.assertRaisesRegexp(tf.OpError, "out of order"):
+ with self.assertRaisesRegexp(errors_impl.OpError, "out of order"):
self._set_union(sp_a, sp_b)
else:
expected_indices = [
- [0, 0, 0], [0, 0, 1], [0, 0, 2], # 0,0
- [0, 1, 0], # 0,1
- [1, 0, 0], [1, 0, 1], # 1,0
- [1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
- [2, 0, 0], # 2,0
- [2, 1, 0], # 2,1
- [3, 0, 0], # 3,0
- [3, 1, 0], # 3,1
+ [0, 0, 0],
+ [0, 0, 1],
+ [0, 0, 2], # 0,0
+ [0, 1, 0], # 0,1
+ [1, 0, 0],
+ [1, 0, 1], # 1,0
+ [1, 1, 0],
+ [1, 1, 1],
+ [1, 1, 2], # 1,1
+ [2, 0, 0], # 2,0
+ [2, 1, 0], # 2,1
+ [3, 0, 0], # 3,0
+ [3, 1, 0], # 3,1
]
- expected_values = _values([
- 1, 3, 9, # 0,0
- 3, # 0,1
- 1, 3, # 1,0
- 7, 8, 9, # 1,1
- 2, # 2,0
- 5, # 2,1
- 4, # 3,0
- 4, # 3,1
- ], dtype)
+ expected_values = _values(
+ [
+ 1,
+ 3,
+ 9, # 0,0
+ 3, # 0,1
+ 1,
+ 3, # 1,0
+ 7,
+ 8,
+ 9, # 1,1
+ 2, # 2,0
+ 5, # 2,1
+ 4, # 3,0
+ 4, # 3,1
+ ],
+ dtype)
expected_shape = [4, 2, 3]
- expected_counts = [[
- 3, # 0,0
- 1 # 0,1
- ], [
- 2, # 1,0
- 3 # 1,1
- ], [
- 1, # 2,0
- 1 # 2,1
- ], [
- 1, # 3,0
- 1 # 3,1
- ]]
+ expected_counts = [
+ [
+ 3, # 0,0
+ 1 # 0,1
+ ],
+ [
+ 2, # 1,0
+ 3 # 1,1
+ ],
+ [
+ 1, # 2,0
+ 1 # 2,1
+ ],
+ [
+ 1, # 3,0
+ 1 # 3,1
+ ]
+ ]
intersection = self._set_union(sp_a, sp_b)
- self._assert_set_operation(expected_indices, expected_values,
- expected_shape, intersection, dtype=dtype)
+ self._assert_set_operation(
+ expected_indices,
+ expected_values,
+ expected_shape,
+ intersection,
+ dtype=dtype)
self.assertAllEqual(expected_counts, self._set_union_count(sp_a, sp_b))
def _set_union(self, a, b):
# Validate that we get the same results with or without `validate_indices`,
# and with a & b swapped.
ops = (
- tf.contrib.metrics.set_union(a, b, validate_indices=True),
- tf.contrib.metrics.set_union(a, b, validate_indices=False),
- tf.contrib.metrics.set_union(b, a, validate_indices=True),
- tf.contrib.metrics.set_union(b, a, validate_indices=False),
- )
+ sets.set_union(
+ a, b, validate_indices=True),
+ sets.set_union(
+ a, b, validate_indices=False),
+ sets.set_union(
+ b, a, validate_indices=True),
+ sets.set_union(
+ b, a, validate_indices=False),)
for op in ops:
self._assert_shapes(a, op)
with self.test_session() as sess:
@@ -971,7 +1189,7 @@ class SetOpsTest(test_util.TensorFlowTestCase):
return results[0]
def _set_union_count(self, a, b):
- op = tf.contrib.metrics.set_size(tf.contrib.metrics.set_union(a, b))
+ op = sets.set_size(sets.set_union(a, b))
with self.test_session() as sess:
return sess.run(op)
@@ -983,22 +1201,23 @@ class SetOpsTest(test_util.TensorFlowTestCase):
expected_set = set()
actual_set = set()
last_indices = None
- for indices, expected_value, actual_value in zip(
- expected_indices, expected_values, sparse_tensor.values):
- if dtype == tf.string:
+ for indices, expected_value, actual_value in zip(expected_indices,
+ expected_values,
+ sparse_tensor.values):
+ if dtype == dtypes.string:
actual_value = actual_value.decode("utf-8")
if last_indices and (last_indices[:-1] != indices[:-1]):
- self.assertEqual(
- expected_set, actual_set, "Expected %s, got %s, at %s." % (
- expected_set, actual_set, indices))
+ self.assertEqual(expected_set, actual_set,
+ "Expected %s, got %s, at %s." % (expected_set,
+ actual_set, indices))
expected_set.clear()
actual_set.clear()
expected_set.add(expected_value)
actual_set.add(actual_value)
last_indices = indices
- self.assertEqual(
- expected_set, actual_set, "Expected %s, got %s, at %s." % (
- expected_set, actual_set, last_indices))
+ self.assertEqual(expected_set, actual_set,
+ "Expected %s, got %s, at %s." % (expected_set, actual_set,
+ last_indices))
self.assertAllEqual(expected_shape, sparse_tensor.dense_shape)
diff --git a/tensorflow/python/kernel_tests/shape_ops_test.py b/tensorflow/python/kernel_tests/shape_ops_test.py
index 03110b0e9b..875ac3a427 100644
--- a/tensorflow/python/kernel_tests/shape_ops_test.py
+++ b/tensorflow/python/kernel_tests/shape_ops_test.py
@@ -12,18 +12,24 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for various tensorflow.ops.tf."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
-
+from tensorflow.core.framework import node_def_pb2
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import importer
+from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import gradients_impl
+from tensorflow.python.platform import test
# TODO(zongheng): it'd be great to factor out this function and various random
@@ -36,16 +42,17 @@ def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x_values = x[non_zero]
x_shape = x.shape
- return tf.SparseTensor(
+ return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), len(x_values)
-class ShapeOpsTest(tf.test.TestCase):
+
+class ShapeOpsTest(test.TestCase):
def _compareShape(self, x, use_gpu=False):
np_ans = np.array(np.shape(x))
with self.test_session(use_gpu=use_gpu):
- tf_ans = tf.shape(x)
- tf_ans_64 = tf.shape(x, out_type=tf.int64)
+ tf_ans = array_ops.shape(x)
+ tf_ans_64 = array_ops.shape(x, out_type=dtypes.int64)
result = tf_ans.eval()
result_64 = tf_ans_64.eval()
self.assertAllEqual(np_ans, result)
@@ -56,7 +63,7 @@ class ShapeOpsTest(tf.test.TestCase):
np_ans = np.array(np.shape(x_np))
x_tf, unused_nnz = _sparsify(x_np)
with self.test_session(use_gpu=use_gpu):
- tf_ans = tf.shape(x_tf)
+ tf_ans = array_ops.shape(x_tf)
result = tf_ans.eval()
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
@@ -64,8 +71,8 @@ class ShapeOpsTest(tf.test.TestCase):
def _compareShapeN(self, x, use_gpu=False):
np_ans = np.array(np.shape(x))
with self.test_session(use_gpu=use_gpu) as sess:
- tf_ans = tf.shape_n([x, x, x])
- tf_ans_64 = tf.shape_n([x, x, x], out_type=tf.int64)
+ tf_ans = array_ops.shape_n([x, x, x])
+ tf_ans_64 = array_ops.shape_n([x, x, x], out_type=dtypes.int64)
result = sess.run(tf_ans)
result_64 = sess.run(tf_ans_64)
for i in range(3):
@@ -76,7 +83,7 @@ class ShapeOpsTest(tf.test.TestCase):
def _compareRank(self, x, use_gpu=False):
np_ans = np.asarray(np.ndim(x))
with self.test_session(use_gpu=use_gpu):
- tf_ans = tf.rank(x)
+ tf_ans = array_ops.rank(x)
result = tf_ans.eval()
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
@@ -85,7 +92,7 @@ class ShapeOpsTest(tf.test.TestCase):
np_ans = np.asarray(np.ndim(x_np))
x_tf, unused_nnz = _sparsify(x_np)
with self.test_session(use_gpu=use_gpu):
- tf_ans = tf.rank(x_tf)
+ tf_ans = array_ops.rank(x_tf)
result = tf_ans.eval()
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
@@ -93,9 +100,9 @@ class ShapeOpsTest(tf.test.TestCase):
def _compareSize(self, x, use_gpu=False):
np_ans = np.asarray(np.size(x))
with self.test_session(use_gpu=use_gpu):
- tf_ans = tf.size(x)
+ tf_ans = array_ops.size(x)
result = tf_ans.eval()
- tf_ans_64 = tf.size(x, out_type=tf.int64)
+ tf_ans_64 = array_ops.size(x, out_type=dtypes.int64)
result_64 = tf_ans_64.eval()
self.assertAllEqual(np_ans, result)
self.assertAllEqual(np_ans, result_64)
@@ -105,7 +112,7 @@ class ShapeOpsTest(tf.test.TestCase):
np_ans = np.asarray(np.size(x_np))
x_tf, unused_nnz = _sparsify(x_np)
with self.test_session(use_gpu=use_gpu):
- tf_ans = tf.size(x_tf)
+ tf_ans = array_ops.size(x_tf)
result = tf_ans.eval()
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
@@ -144,23 +151,23 @@ class ShapeOpsTest(tf.test.TestCase):
# as passing at time of writing.
def _test64BitOutput(self):
with self.test_session():
- inp = tf.zeros([2**31])
+ inp = array_ops.zeros([2**31])
num_elements = array_ops.size_internal(
- inp, optimize=False, out_type=tf.int64)
+ inp, optimize=False, out_type=dtypes.int64)
self.assertEqual(2**31, num_elements.eval())
# Too large for tf.int32 output.
- with self.assertRaises(tf.errors.InvalidArgumentError):
+ with self.assertRaises(errors_impl.InvalidArgumentError):
with self.test_session():
- inp = tf.zeros([2**31])
+ inp = array_ops.zeros([2**31])
num_elements = array_ops.size_internal(
- inp, optimize=False, out_type=tf.int32)
+ inp, optimize=False, out_type=dtypes.int32)
self.assertEqual(2**31, num_elements.eval())
def _compareExpandDims(self, x, dim, use_gpu):
np_ans = np.expand_dims(x, axis=dim)
with self.test_session(use_gpu=use_gpu):
- tensor = tf.expand_dims(x, dim)
+ tensor = array_ops.expand_dims(x, dim)
tf_ans = tensor.eval()
self.assertShapeEqual(np_ans, tensor)
self.assertAllEqual(np_ans, tf_ans)
@@ -192,33 +199,36 @@ class ShapeOpsTest(tf.test.TestCase):
def testExpandDimsErrors(self):
with self.test_session():
- self.assertRaises(ValueError, tf.expand_dims, np.zeros([2, 3, 5]), -5)
- self.assertRaises(ValueError, tf.expand_dims, np.zeros([2, 3, 5]), 4)
+ self.assertRaises(ValueError, array_ops.expand_dims,
+ np.zeros([2, 3, 5]), -5)
+ self.assertRaises(ValueError, array_ops.expand_dims,
+ np.zeros([2, 3, 5]), 4)
def testExpandDimsGradient(self):
with self.test_session():
- inp = tf.constant(np.random.rand(4, 2).astype("f"),
- dtype=tf.float32)
- squeezed = tf.expand_dims(inp, 1)
+ inp = constant_op.constant(
+ np.random.rand(4, 2).astype("f"), dtype=dtypes.float32)
+ squeezed = array_ops.expand_dims(inp, 1)
- err = tf.test.compute_gradient_error(inp, [4, 2], squeezed, [4, 1, 2])
+ err = gradient_checker.compute_gradient_error(inp, [4, 2], squeezed,
+ [4, 1, 2])
self.assertLess(err, 1e-3)
def testExpandDimsScalar(self):
with self.test_session():
- inp = tf.constant(7)
- self.assertAllEqual([7], tf.expand_dims(inp, 0).eval())
- self.assertAllEqual([7], tf.expand_dims(inp, -1).eval())
+ inp = constant_op.constant(7)
+ self.assertAllEqual([7], array_ops.expand_dims(inp, 0).eval())
+ self.assertAllEqual([7], array_ops.expand_dims(inp, -1).eval())
def _compareSqueeze(self, x, squeeze_dims, use_gpu):
with self.test_session(use_gpu=use_gpu):
if squeeze_dims:
np_ans = np.squeeze(x, axis=tuple(squeeze_dims))
- tensor = tf.squeeze(x, squeeze_dims)
+ tensor = array_ops.squeeze(x, squeeze_dims)
tf_ans = tensor.eval()
else:
np_ans = np.squeeze(x)
- tensor = tf.squeeze(x)
+ tensor = array_ops.squeeze(x)
tf_ans = tensor.eval()
self.assertShapeEqual(np_ans, tensor)
self.assertAllEqual(np_ans, tf_ans)
@@ -256,7 +266,7 @@ class ShapeOpsTest(tf.test.TestCase):
# Verify that we do the same.
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
- tensor = tf.squeeze(np.zeros([1, 1, 1]), [])
+ tensor = array_ops.squeeze(np.zeros([1, 1, 1]), [])
self.assertEqual(np.shape(1), tensor.get_shape())
tf_ans = tensor.eval()
self.assertEqual(np.shape(1), tf_ans.shape)
@@ -268,55 +278,61 @@ class ShapeOpsTest(tf.test.TestCase):
self._compareSqueezeAll(input_1x1x3)
self._compareSqueezeAll(input_1x1x3, [0])
self._compareSqueezeAll(input_1x1x3, [1])
- self.assertRaises(ValueError, tf.squeeze, input_1x1x3, [2])
+ self.assertRaises(ValueError, array_ops.squeeze, input_1x1x3, [2])
def testSqueezeErrors(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
- self.assertRaises(ValueError, tf.squeeze, np.zeros([1, 2, 1]), [-4])
- self.assertRaises(ValueError, tf.squeeze, np.zeros([1, 2, 1]), [0, -4])
- self.assertRaises(ValueError, tf.squeeze, np.zeros([1, 2, 1]), [3])
- self.assertRaises(ValueError, tf.squeeze, np.zeros([1, 2, 1]), [2, 3])
+ self.assertRaises(ValueError, array_ops.squeeze,
+ np.zeros([1, 2, 1]), [-4])
+ self.assertRaises(ValueError, array_ops.squeeze,
+ np.zeros([1, 2, 1]), [0, -4])
+ self.assertRaises(ValueError, array_ops.squeeze,
+ np.zeros([1, 2, 1]), [3])
+ self.assertRaises(ValueError, array_ops.squeeze,
+ np.zeros([1, 2, 1]), [2, 3])
def testSqueezeGradient(self):
with self.test_session():
inp = np.random.rand(4, 2).astype("f")
- a = tf.reshape(inp, [4, 1, 2])
- squeezed = tf.squeeze(a, [])
+ a = array_ops.reshape(inp, [4, 1, 2])
+ squeezed = array_ops.squeeze(a, [])
- err = tf.test.compute_gradient_error(a, [4, 1, 2], squeezed, [4, 2])
+ err = gradient_checker.compute_gradient_error(a, [4, 1, 2], squeezed,
+ [4, 2])
self.assertLess(err, 1e-3)
def testSqueezeGradientWithSqueezeDims(self):
with self.test_session():
inp = np.random.rand(4, 2).astype("f")
- a = tf.reshape(inp, [4, 1, 2, 1])
- squeezed = tf.squeeze(a, [1])
+ a = array_ops.reshape(inp, [4, 1, 2, 1])
+ squeezed = array_ops.squeeze(a, [1])
- err = tf.test.compute_gradient_error(a, [4, 1, 2, 1], squeezed, [4, 2, 1])
+ err = gradient_checker.compute_gradient_error(a, [4, 1, 2, 1], squeezed,
+ [4, 2, 1])
self.assertLess(err, 1e-3)
def testSqueezeWithUnknownShape(self):
with self.test_session():
- a = tf.placeholder(tf.float32, shape=[2, None])
+ a = array_ops.placeholder(dtypes.float32, shape=[2, None])
- squeezed = tf.squeeze(a, [1])
+ squeezed = array_ops.squeeze(a, [1])
self.assertEqual([2], squeezed.get_shape().as_list())
- squeezed = tf.squeeze(a)
+ squeezed = array_ops.squeeze(a)
self.assertEqual(None, squeezed.get_shape())
- self.assertRaises(ValueError, tf.squeeze, a, [0])
- self.assertRaises(ValueError, tf.squeeze, a, [100])
+ self.assertRaises(ValueError, array_ops.squeeze, a, [0])
+ self.assertRaises(ValueError, array_ops.squeeze, a, [100])
-class TileTest(tf.test.TestCase):
+class TileTest(test.TestCase):
def testScalar(self):
for use_gpu in False, True:
with self.test_session(use_gpu=use_gpu):
- a = tf.constant(7, shape=[], dtype=tf.float32)
- tiled = tf.tile(a, [])
+ a = constant_op.constant(7, shape=[], dtype=dtypes.float32)
+ tiled = array_ops.tile(a, [])
result = tiled.eval()
self.assertEqual(result.shape, ())
self.assertEqual([], tiled.get_shape())
@@ -325,8 +341,8 @@ class TileTest(tf.test.TestCase):
def testSimple(self):
with self.test_session():
inp = np.random.rand(4, 1).astype(np.float32)
- a = tf.constant(inp)
- tiled = tf.tile(a, [1, 4])
+ a = constant_op.constant(inp)
+ tiled = array_ops.tile(a, [1, 4])
result = tiled.eval()
self.assertEqual(result.shape, (4, 4))
self.assertEqual([4, 4], tiled.get_shape())
@@ -335,8 +351,8 @@ class TileTest(tf.test.TestCase):
def testEmpty(self):
with self.test_session():
inp = np.random.rand(2, 3).astype(np.float32)
- a = tf.constant(inp)
- tiled = tf.tile(a, [5, 0])
+ a = constant_op.constant(inp)
+ tiled = array_ops.tile(a, [5, 0])
result = tiled.eval()
self.assertEqual(result.shape, (10, 0))
self.assertEqual([10, 0], tiled.get_shape())
@@ -344,9 +360,9 @@ class TileTest(tf.test.TestCase):
def testUnknownInputShape(self):
"""Importing can call _TileShape without shape of <multiples> known."""
with self.test_session():
- inp = tf.placeholder(tf.float32) # unknown shape
- multiples = tf.constant([1, 2, 3, 4], dtype=np.int32)
- tiled = tf.tile(inp, multiples)
+ inp = array_ops.placeholder(dtypes.float32) # unknown shape
+ multiples = constant_op.constant([1, 2, 3, 4], dtype=np.int32)
+ tiled = array_ops.tile(inp, multiples)
gdef = tiled.graph.as_graph_def()
# Move the tile op to the start of the graph so that shapes of its inputs
@@ -356,35 +372,37 @@ class TileTest(tf.test.TestCase):
if n.op == "Tile":
# Swap tile op to be first in gdef.node
assert i != 0
- new_node = tf.NodeDef()
+ new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(gdef.node[i])
gdef.node[i].CopyFrom(gdef.node[0])
gdef.node[0].CopyFrom(new_node)
swapped = True
assert swapped
- tiled_imported, = importer.import_graph_def(gdef,
- return_elements=[tiled.name])
+ tiled_imported, = importer.import_graph_def(
+ gdef, return_elements=[tiled.name])
self.assertEqual(4, tiled_imported.get_shape().ndims)
def testTypes(self):
types_to_test = {
- "bool": (tf.bool, bool),
- "float32": (tf.float32, float),
- "float64": (tf.float64, float),
- "complex64": (tf.complex64, complex),
- "complex128": (tf.complex128, complex),
- "uint8": (tf.uint8, int),
- "int32": (tf.int32, int),
- "int64": (tf.int64, int),
- bytes: (tf.string, bytes)
+ "bool": (dtypes.bool, bool),
+ "float32": (dtypes.float32, float),
+ "float64": (dtypes.float64, float),
+ "complex64": (dtypes.complex64, complex),
+ "complex128": (dtypes.complex128, complex),
+ "uint8": (dtypes.uint8, int),
+ "int32": (dtypes.int32, int),
+ "int64": (dtypes.int64, int),
+ bytes: (dtypes.string, bytes)
}
for dtype_np, (dtype_tf, cast) in types_to_test.items():
with self.test_session(use_gpu=True):
inp = np.random.rand(4, 1).astype(dtype_np)
- a = tf.constant([cast(x) for x in inp.ravel(order="C")], shape=[4, 1],
- dtype=dtype_tf)
- tiled = tf.tile(a, [1, 4])
+ a = constant_op.constant(
+ [cast(x) for x in inp.ravel(order="C")],
+ shape=[4, 1],
+ dtype=dtype_tf)
+ tiled = array_ops.tile(a, [1, 4])
result = tiled.eval()
self.assertEqual(result.shape, (4, 4))
self.assertEqual([4, 4], tiled.get_shape())
@@ -393,27 +411,31 @@ class TileTest(tf.test.TestCase):
def testInvalidDim(self):
with self.test_session():
inp = np.random.rand(4, 1).astype("f")
- a = tf.constant([float(x) for x in inp.ravel(order="C")],
- shape=[4, 1], dtype=tf.float32)
+ a = constant_op.constant(
+ [float(x) for x in inp.ravel(order="C")],
+ shape=[4, 1],
+ dtype=dtypes.float32)
# Wrong length of multiples.
with self.assertRaises(ValueError):
- tf.tile(a, [1, 4, 2])
+ array_ops.tile(a, [1, 4, 2])
# Wrong rank for multiples.
with self.assertRaises(ValueError):
- tf.tile(a, [[2, 3], [3, 4]]).eval()
+ array_ops.tile(a, [[2, 3], [3, 4]]).eval()
def _RunAndVerifyResult(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
# Random dims of rank 5
input_shape = np.random.randint(1, 4, size=5)
inp = np.random.rand(*input_shape).astype("f")
- a = tf.constant([float(x) for x in inp.ravel(order="C")],
- shape=input_shape, dtype=tf.float32)
+ a = constant_op.constant(
+ [float(x) for x in inp.ravel(order="C")],
+ shape=input_shape,
+ dtype=dtypes.float32)
multiples = np.random.randint(1, 4, size=5).astype(np.int32)
- tiled = tf.tile(a, multiples)
+ tiled = array_ops.tile(a, multiples)
result = tiled.eval()
- self.assertTrue((np.array(multiples) * np.array(inp.shape) ==
- np.array(result.shape)).all())
+ self.assertTrue((np.array(multiples) * np.array(inp.shape) == np.array(
+ result.shape)).all())
self.assertAllEqual(result, np.tile(inp, tuple(multiples)))
self.assertShapeEqual(result, tiled)
@@ -426,14 +448,14 @@ class TileTest(tf.test.TestCase):
def testGradientSimpleReduction(self):
with self.test_session():
inp = np.random.rand(4, 1).astype("f")
- a = tf.constant([float(x) for x in inp.flatten()],
- shape=[4, 1], dtype=tf.float32)
- tiled = tf.tile(a, [1, 4])
+ a = constant_op.constant(
+ [float(x) for x in inp.flatten()], shape=[4, 1], dtype=dtypes.float32)
+ tiled = array_ops.tile(a, [1, 4])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
- grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()],
- shape=grad_shape)
- grad = tf.gradients([tiled], [a], [grad_tensor])[0]
+ grad_tensor = constant_op.constant(
+ [float(x) for x in grad_inp.flatten()], shape=grad_shape)
+ grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0]
self.assertShapeEqual(inp, grad)
result = grad.eval()
self.assertAllClose(np.sum(grad_inp, axis=1).reshape(4, 1), result, 1e-3)
@@ -441,14 +463,14 @@ class TileTest(tf.test.TestCase):
def testGradientStridedReduction(self):
with self.test_session():
inp = np.random.rand(4, 2).astype("f")
- a = tf.constant([float(x) for x in inp.flatten()],
- shape=[4, 2], dtype=tf.float32)
- tiled = tf.tile(a, [1, 2])
+ a = constant_op.constant(
+ [float(x) for x in inp.flatten()], shape=[4, 2], dtype=dtypes.float32)
+ tiled = array_ops.tile(a, [1, 2])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
- grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()],
- shape=grad_shape)
- grad = tf.gradients([tiled], [a], [grad_tensor])[0]
+ grad_tensor = constant_op.constant(
+ [float(x) for x in grad_inp.flatten()], shape=grad_shape)
+ grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0]
self.assertShapeEqual(inp, grad)
result = grad.eval()
expected_shape = [4, 2]
@@ -460,28 +482,28 @@ class TileTest(tf.test.TestCase):
def testGradientSimpleReductionOnGPU(self):
with self.test_session(use_gpu=True):
inp = np.random.rand(4, 1).astype("f")
- a = tf.constant([float(x) for x in inp.flatten()],
- shape=[4, 1], dtype=tf.float32)
- tiled = tf.tile(a, [1, 4])
+ a = constant_op.constant(
+ [float(x) for x in inp.flatten()], shape=[4, 1], dtype=dtypes.float32)
+ tiled = array_ops.tile(a, [1, 4])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
- grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()],
- shape=grad_shape)
- grad = tf.gradients([tiled], [a], [grad_tensor])[0]
+ grad_tensor = constant_op.constant(
+ [float(x) for x in grad_inp.flatten()], shape=grad_shape)
+ grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0]
result = grad.eval()
self.assertAllClose(np.sum(grad_inp, axis=1).reshape(4, 1), result, 1e-3)
def testGradientStridedReductionOnGPU(self):
with self.test_session(use_gpu=True):
inp = np.random.rand(4, 2).astype("f")
- a = tf.constant([float(x) for x in inp.flatten()],
- shape=[4, 2], dtype=tf.float32)
- tiled = tf.tile(a, [1, 2])
+ a = constant_op.constant(
+ [float(x) for x in inp.flatten()], shape=[4, 2], dtype=dtypes.float32)
+ tiled = array_ops.tile(a, [1, 2])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
- grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()],
- shape=grad_shape)
- grad = tf.gradients([tiled], [a], [grad_tensor])[0]
+ grad_tensor = constant_op.constant(
+ [float(x) for x in grad_inp.flatten()], shape=grad_shape)
+ grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0]
result = grad.eval()
expected_shape = [4, 2]
expected = np.zeros(expected_shape)
@@ -494,14 +516,11 @@ class TileTest(tf.test.TestCase):
with self.test_session(use_gpu=use_gpu):
# Random values
inp = np.asarray(np.random.rand(*input_shape))
- a = tf.constant(inp, dtype=tf.float64)
- tiled = tf.tile(a, multiples)
+ a = constant_op.constant(inp, dtype=dtypes.float64)
+ tiled = array_ops.tile(a, multiples)
grad_shape = list(np.array(multiples) * np.array(inp.shape))
- err = tf.test.compute_gradient_error(a,
- list(input_shape),
- tiled,
- grad_shape,
- x_init_value=inp)
+ err = gradient_checker.compute_gradient_error(
+ a, list(input_shape), tiled, grad_shape, x_init_value=inp)
print("tile(float) error = ", err)
self.assertLess(err, 1e-3)
@@ -516,38 +535,40 @@ class TileTest(tf.test.TestCase):
def testGradientStridedReductionGC(self):
with self.test_session():
inp = np.random.rand(4, 2).astype("f")
- a = tf.constant([float(x) for x in inp.flatten()],
- shape=[4, 2], dtype=tf.float32)
- tiled = tf.tile(a, [1, 2])
- err = tf.test.compute_gradient_error(a, [4, 2], tiled, [4, 4])
+ a = constant_op.constant(
+ [float(x) for x in inp.flatten()], shape=[4, 2], dtype=dtypes.float32)
+ tiled = array_ops.tile(a, [1, 2])
+ err = gradient_checker.compute_gradient_error(a, [4, 2], tiled, [4, 4])
self.assertLess(err, 1e-3)
def testShapeFunctionEdgeCases(self):
# Unknown multiples shape.
- inp = tf.constant(0.0, shape=[4, 4, 4, 4])
- tiled = tf.tile(inp, tf.placeholder(tf.int32))
+ inp = constant_op.constant(0.0, shape=[4, 4, 4, 4])
+ tiled = array_ops.tile(inp, array_ops.placeholder(dtypes.int32))
self.assertEqual([None, None, None, None], tiled.get_shape().as_list())
# Unknown input shape.
- inp = tf.placeholder(tf.float32)
- tiled = tf.tile(inp, [2, 2, 2, 2])
+ inp = array_ops.placeholder(dtypes.float32)
+ tiled = array_ops.tile(inp, [2, 2, 2, 2])
self.assertEqual([None, None, None, None], tiled.get_shape().as_list())
# Unknown input and multiples shape.
- inp = tf.placeholder(tf.float32)
- tiled = tf.tile(inp, tf.placeholder(tf.int32))
+ inp = array_ops.placeholder(dtypes.float32)
+ tiled = array_ops.tile(inp, array_ops.placeholder(dtypes.int32))
self.assertIs(None, tiled.get_shape().ndims)
# Known input and partially known multiples.
- inp = tf.constant(0.0, shape=[1, 1])
- tiled = tf.tile(inp, [tf.placeholder(tf.int32), 7])
+ inp = constant_op.constant(0.0, shape=[1, 1])
+ tiled = array_ops.tile(inp, [array_ops.placeholder(dtypes.int32), 7])
self.assertEqual([None, 7], tiled.get_shape().as_list())
# Mismatched input rank and multiples length.
- inp = tf.placeholder(tf.float32, shape=[None, None])
+ inp = array_ops.placeholder(dtypes.float32, shape=[None, None])
with self.assertRaises(ValueError):
- tiled = tf.tile(inp, tf.placeholder(tf.int32, shape=[3]))
+ tiled = array_ops.tile(
+ inp, array_ops.placeholder(
+ dtypes.int32, shape=[3]))
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/slice_op_test.py b/tensorflow/python/kernel_tests/slice_op_test.py
index fd5c66c99d..29f76a2182 100644
--- a/tensorflow/python/kernel_tests/slice_op_test.py
+++ b/tensorflow/python/kernel_tests/slice_op_test.py
@@ -12,24 +12,30 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Functional tests for slice op."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import errors_impl
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradients_impl
+from tensorflow.python.platform import test
-class SliceTest(tf.test.TestCase):
+class SliceTest(test.TestCase):
def testEmpty(self):
inp = np.random.rand(4, 4).astype("f")
for k in xrange(4):
with self.test_session(use_gpu=True):
- a = tf.constant(inp, shape=[4, 4], dtype=tf.float32)
+ a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.float32)
slice_t = a[2, k:k]
slice_val = slice_t.eval()
self.assertAllEqual(slice_val, inp[2, k:k])
@@ -38,7 +44,7 @@ class SliceTest(tf.test.TestCase):
inp = np.random.rand(4, 4).astype("i")
for k in xrange(4):
with self.test_session(use_gpu=True):
- a = tf.constant(inp, shape=[4, 4], dtype=tf.int32)
+ a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.int32)
slice_t = a[2, k:k]
slice_val = slice_t.eval()
self.assertAllEqual(slice_val, inp[2, k:k])
@@ -47,10 +53,9 @@ class SliceTest(tf.test.TestCase):
for _ in range(10):
with self.test_session(use_gpu=True):
inp = np.random.rand(4, 4, 4, 4).astype("f")
- a = tf.constant(inp, shape=[4, 4, 4, 4],
- dtype=tf.float32)
+ a = constant_op.constant(inp, shape=[4, 4, 4, 4], dtype=dtypes.float32)
- slice_explicit_t = tf.slice(a, [0, 0, 0, 0], [-1, -1, -1, -1])
+ slice_explicit_t = array_ops.slice(a, [0, 0, 0, 0], [-1, -1, -1, -1])
slice_implicit_t = a[:, :, :, :]
self.assertAllEqual(inp, slice_explicit_t.eval())
@@ -62,7 +67,7 @@ class SliceTest(tf.test.TestCase):
for _ in range(10):
with self.test_session(use_gpu=True):
inp = np.random.rand(10).astype("f")
- a = tf.constant(inp, shape=[10], dtype=tf.float32)
+ a = constant_op.constant(inp, shape=[10], dtype=dtypes.float32)
hi = np.random.randint(0, 9)
scalar_t = a[hi]
@@ -82,12 +87,12 @@ class SliceTest(tf.test.TestCase):
with self.test_session() as sess:
# Test with constant input; shape inference fails.
with self.assertRaisesWithPredicateMatch(ValueError, "out of range"):
- tf.constant(input_val)[:].get_shape()
+ constant_op.constant(input_val)[:].get_shape()
# Test evaluating with non-constant input; kernel execution fails.
- input_t = tf.placeholder(tf.int32)
+ input_t = array_ops.placeholder(dtypes.int32)
slice_t = input_t[:]
- with self.assertRaisesWithPredicateMatch(tf.errors.InvalidArgumentError,
+ with self.assertRaisesWithPredicateMatch(errors_impl.InvalidArgumentError,
"out of range"):
sess.run([slice_t], feed_dict={input_t: input_val})
@@ -96,26 +101,26 @@ class SliceTest(tf.test.TestCase):
with self.test_session() as sess:
# Test with constant input; shape inference fails.
with self.assertRaisesWithPredicateMatch(ValueError, "out of range"):
- tf.constant(input_val)[1:, 1:].get_shape()
+ constant_op.constant(input_val)[1:, 1:].get_shape()
# Test evaluating with non-constant input; kernel execution fails.
- input_t = tf.placeholder(tf.int32)
+ input_t = array_ops.placeholder(dtypes.int32)
slice_t = input_t[1:, 1:]
- with self.assertRaisesWithPredicateMatch(tf.errors.InvalidArgumentError,
+ with self.assertRaisesWithPredicateMatch(errors_impl.InvalidArgumentError,
"out of range"):
sess.run([slice_t], feed_dict={input_t: input_val})
def _testSliceMatrixDim0(self, x, begin, size):
with self.test_session(use_gpu=True):
- tf_ans = tf.slice(x, [begin, 0], [size, x.shape[1]]).eval()
- np_ans = x[begin:begin+size, :]
+ tf_ans = array_ops.slice(x, [begin, 0], [size, x.shape[1]]).eval()
+ np_ans = x[begin:begin + size, :]
self.assertAllEqual(tf_ans, np_ans)
def testSliceMatrixDim0(self):
x = np.random.rand(8, 4).astype("f")
self._testSliceMatrixDim0(x, 1, 2)
self._testSliceMatrixDim0(x, 3, 3)
- y = np.random.rand(8, 7).astype("f") # 7 * sizeof(float) is not aligned
+ y = np.random.rand(8, 7).astype("f") # 7 * sizeof(float) is not aligned
self._testSliceMatrixDim0(y, 1, 2)
self._testSliceMatrixDim0(y, 3, 3)
@@ -123,7 +128,7 @@ class SliceTest(tf.test.TestCase):
for _ in range(10):
with self.test_session(use_gpu=True):
inp = np.random.rand(4, 4).astype("f")
- a = tf.constant(inp, shape=[4, 4], dtype=tf.float32)
+ a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.float32)
x, y = np.random.randint(0, 3, size=2).tolist()
slice_t = a[x, 0:y]
@@ -133,9 +138,11 @@ class SliceTest(tf.test.TestCase):
def testSimple(self):
with self.test_session(use_gpu=True) as sess:
inp = np.random.rand(4, 4).astype("f")
- a = tf.constant([float(x) for x in inp.ravel(order="C")],
- shape=[4, 4], dtype=tf.float32)
- slice_t = tf.slice(a, [0, 0], [2, 2])
+ a = constant_op.constant(
+ [float(x) for x in inp.ravel(order="C")],
+ shape=[4, 4],
+ dtype=dtypes.float32)
+ slice_t = array_ops.slice(a, [0, 0], [2, 2])
slice2_t = a[:2, :2]
slice_val, slice2_val = sess.run([slice_t, slice2_t])
self.assertAllEqual(slice_val, inp[:2, :2])
@@ -146,7 +153,7 @@ class SliceTest(tf.test.TestCase):
def testComplex(self):
with self.test_session(use_gpu=True):
inp = np.random.rand(4, 10, 10, 4).astype("f")
- a = tf.constant(inp, dtype=tf.float32)
+ a = constant_op.constant(inp, dtype=dtypes.float32)
x = np.random.randint(0, 9)
z = np.random.randint(0, 9)
@@ -162,27 +169,27 @@ class SliceTest(tf.test.TestCase):
input_shape = np.random.randint(0, 20, size=6)
inp = np.random.rand(*input_shape).astype("f")
with self.test_session(use_gpu=True) as sess:
- a = tf.constant([float(x) for x in inp.ravel(order="C")],
- shape=input_shape, dtype=tf.float32)
+ a = constant_op.constant(
+ [float(x) for x in inp.ravel(order="C")],
+ shape=input_shape,
+ dtype=dtypes.float32)
indices = [0 if x == 0 else np.random.randint(x) for x in input_shape]
- sizes = [np.random.randint(0, input_shape[i] - indices[i] + 1)
- for i in range(6)]
- slice_t = tf.slice(a, indices, sizes)
- slice2_t = a[indices[0]:indices[0]+sizes[0],
- indices[1]:indices[1]+sizes[1],
- indices[2]:indices[2]+sizes[2],
- indices[3]:indices[3]+sizes[3],
- indices[4]:indices[4]+sizes[4],
- indices[5]:indices[5]+sizes[5]]
+ sizes = [
+ np.random.randint(0, input_shape[i] - indices[i] + 1)
+ for i in range(6)
+ ]
+ slice_t = array_ops.slice(a, indices, sizes)
+ slice2_t = a[indices[0]:indices[0] + sizes[0], indices[1]:indices[
+ 1] + sizes[1], indices[2]:indices[2] + sizes[2], indices[3]:indices[3]
+ + sizes[3], indices[4]:indices[4] + sizes[4], indices[5]:
+ indices[5] + sizes[5]]
slice_val, slice2_val = sess.run([slice_t, slice2_t])
- expected_val = inp[indices[0]:indices[0]+sizes[0],
- indices[1]:indices[1]+sizes[1],
- indices[2]:indices[2]+sizes[2],
- indices[3]:indices[3]+sizes[3],
- indices[4]:indices[4]+sizes[4],
- indices[5]:indices[5]+sizes[5]]
+ expected_val = inp[indices[0]:indices[0] + sizes[0], indices[1]:indices[
+ 1] + sizes[1], indices[2]:indices[2] + sizes[2], indices[3]:indices[
+ 3] + sizes[3], indices[4]:indices[4] + sizes[4], indices[5]:indices[
+ 5] + sizes[5]]
self.assertAllEqual(slice_val, expected_val)
self.assertAllEqual(slice2_val, expected_val)
self.assertEqual(expected_val.shape, slice_t.get_shape())
@@ -193,12 +200,14 @@ class SliceTest(tf.test.TestCase):
num_inputs = np.prod(input_shape)
num_grads = np.prod(slice_size)
inp = np.random.rand(num_inputs).astype("f").reshape(input_shape)
- a = tf.constant([float(x) for x in inp.ravel(order="C")],
- shape=input_shape, dtype=tf.float32)
- slice_t = tf.slice(a, slice_begin, slice_size)
+ a = constant_op.constant(
+ [float(x) for x in inp.ravel(order="C")],
+ shape=input_shape,
+ dtype=dtypes.float32)
+ slice_t = array_ops.slice(a, slice_begin, slice_size)
grads = np.random.rand(num_grads).astype("f").reshape(slice_size)
- grad_tensor = tf.constant(grads)
- grad = tf.gradients(slice_t, [a], grad_tensor)[0]
+ grad_tensor = constant_op.constant(grads)
+ grad = gradients_impl.gradients(slice_t, [a], grad_tensor)[0]
result = grad.eval()
# Create a zero tensor of the input shape ane place
@@ -213,9 +222,9 @@ class SliceTest(tf.test.TestCase):
def _testGradientVariableSize(self):
with self.test_session(use_gpu=True):
- inp = tf.constant([1.0, 2.0, 3.0], name="in")
- out = tf.slice(inp, [1], [-1])
- grad_actual = tf.gradients(out, inp)[0].eval()
+ inp = constant_op.constant([1.0, 2.0, 3.0], name="in")
+ out = array_ops.slice(inp, [1], [-1])
+ grad_actual = gradients_impl.gradients(out, inp)[0].eval()
self.assertAllClose([0., 1., 1.], grad_actual)
def testGradientsAll(self):
@@ -239,10 +248,9 @@ class SliceTest(tf.test.TestCase):
# operator, Python will valiantly attempt to iterate over the
# Tensor from 0 to infinity. This test ensures that this
# unintended behavior is prevented.
- c = tf.constant(5.0)
+ c = constant_op.constant(5.0)
with self.assertRaisesWithPredicateMatch(
- TypeError,
- lambda e: "'Tensor' object is not iterable" in str(e)):
+ TypeError, lambda e: "'Tensor' object is not iterable" in str(e)):
for _ in c:
pass
@@ -251,16 +259,16 @@ class SliceTest(tf.test.TestCase):
# because `tf.slice()` uses -1 to specify a wildcard size, and
# this can't be handled using the
# `tensor_util.constant_value_as_shape()` trick.
- a = tf.constant([[1, 2, 3], [4, 5, 6]])
- begin = tf.constant(0)
- size = tf.constant(1)
- b = tf.slice(a, [begin, 0], [size, 2])
+ a = constant_op.constant([[1, 2, 3], [4, 5, 6]])
+ begin = constant_op.constant(0)
+ size = constant_op.constant(1)
+ b = array_ops.slice(a, [begin, 0], [size, 2])
self.assertEqual([1, 2], b.get_shape())
- begin = tf.placeholder(tf.int32, shape=())
- c = tf.slice(a, [begin, 0], [-1, 2])
+ begin = array_ops.placeholder(dtypes.int32, shape=())
+ c = array_ops.slice(a, [begin, 0], [-1, 2])
self.assertEqual([None, 2], c.get_shape().as_list())
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/softmax_op_test.py b/tensorflow/python/kernel_tests/softmax_op_test.py
index 7c591707e9..6a31aac5cd 100644
--- a/tensorflow/python/kernel_tests/softmax_op_test.py
+++ b/tensorflow/python/kernel_tests/softmax_op_test.py
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for SoftmaxOp and LogSoftmaxOp."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
@@ -21,10 +21,16 @@ from __future__ import print_function
import sys
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import errors_impl
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import nn_ops
+from tensorflow.python.platform import test
-class SoftmaxTest(tf.test.TestCase):
+
+class SoftmaxTest(test.TestCase):
def _npSoftmax(self, features, dim=-1, log=False):
if dim is -1:
@@ -48,9 +54,9 @@ class SoftmaxTest(tf.test.TestCase):
np_softmax = self._npSoftmax(np_features, dim=dim, log=log)
with self.test_session(use_gpu=use_gpu):
if log:
- tf_softmax = tf.nn.log_softmax(np_features, dim=dim, name=name)
+ tf_softmax = nn_ops.log_softmax(np_features, dim=dim, name=name)
else:
- tf_softmax = tf.nn.softmax(np_features, dim=dim, name=name)
+ tf_softmax = nn_ops.softmax(np_features, dim=dim, name=name)
out = tf_softmax.eval()
self.assertAllCloseAccordingToType(np_softmax, out)
self.assertShapeEqual(np_softmax, tf_softmax)
@@ -67,7 +73,6 @@ class SoftmaxTest(tf.test.TestCase):
self._testSoftmax(features, log=True, use_gpu=True)
self._testOverflow(use_gpu=True)
-
def testNpSoftmax(self):
features = [[1., 1., 1., 1.], [1., 2., 3., 4.]]
# Batch 0: All exps are 1. The expected result is
@@ -84,13 +89,15 @@ class SoftmaxTest(tf.test.TestCase):
np.array([[0.25, 0.25, 0.25, 0.25],
[0.0320586, 0.08714432, 0.23688282, 0.64391426]]),
np_sm,
- rtol=1.e-5, atol=1.e-5)
+ rtol=1.e-5,
+ atol=1.e-5)
np_lsm = self._npSoftmax(np.array(features), log=True)
self.assertAllClose(
np.array([[-1.386294, -1.386294, -1.386294, -1.386294],
[-3.4401897, -2.4401897, -1.4401897, -0.4401897]]),
np_lsm,
- rtol=1.e-5, atol=1.e-5)
+ rtol=1.e-5,
+ atol=1.e-5)
def _testOverflow(self, use_gpu=False):
if use_gpu:
@@ -98,17 +105,16 @@ class SoftmaxTest(tf.test.TestCase):
else:
type = np.float64
max = np.finfo(type).max
- features = np.array(
- [[1., 1., 1., 1.],
- [max, 1., 2., 3.]]).astype(type)
+ features = np.array([[1., 1., 1., 1.], [max, 1., 2., 3.]]).astype(type)
with self.test_session(use_gpu=use_gpu):
- tf_log_softmax = tf.nn.log_softmax(features)
+ tf_log_softmax = nn_ops.log_softmax(features)
out = tf_log_softmax.eval()
self.assertAllClose(
np.array([[-1.386294, -1.386294, -1.386294, -1.386294],
[0, -max, -max, -max]]),
out,
- rtol=1.e-5, atol=1.e-5)
+ rtol=1.e-5,
+ atol=1.e-5)
def testFloat(self):
self._testAll(
@@ -155,23 +161,23 @@ class SoftmaxTest(tf.test.TestCase):
self._testOverflow(use_gpu=False)
def testShapeInference(self):
- op = tf.nn.softmax([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
- [[2., 3., 4., 5.], [6., 7., 8., 9.]],
- [[5., 4., 3., 2.], [1., 2., 3., 4.]]])
+ op = nn_ops.softmax([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
+ [[2., 3., 4., 5.], [6., 7., 8., 9.]],
+ [[5., 4., 3., 2.], [1., 2., 3., 4.]]])
self.assertEqual([3, 2, 4], op.get_shape())
def testEmptyInput(self):
with self.test_session():
- x = tf.constant([[]], shape=[0, 3])
- self.assertEqual(0, tf.size(x).eval())
+ x = constant_op.constant([[]], shape=[0, 3])
+ self.assertEqual(0, array_ops.size(x).eval())
# reshape would raise if logits is empty
- with self.assertRaises(tf.errors.InvalidArgumentError):
- tf.nn.softmax(x, dim=0).eval()
+ with self.assertRaises(errors_impl.InvalidArgumentError):
+ nn_ops.softmax(x, dim=0).eval()
def testDimTooLarge(self):
with self.test_session():
- with self.assertRaises(tf.errors.InvalidArgumentError):
- tf.nn.softmax([1., 2., 3., 4.], dim=100).eval()
+ with self.assertRaises(errors_impl.InvalidArgumentError):
+ nn_ops.softmax([1., 2., 3., 4.], dim=100).eval()
def testLargeDims(self):
# Make sure that we properly handle large inputs. See
@@ -182,11 +188,11 @@ class SoftmaxTest(tf.test.TestCase):
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
- x = tf.placeholder(tf.float32)
- y = tf.nn.softmax(x)
+ x = array_ops.placeholder(dtypes.float32)
+ y = nn_ops.softmax(x)
tf_softmax = sess.run(y, feed_dict={x: ones})
self.assertAllClose(tf_softmax, np_softmax)
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/softplus_op_test.py b/tensorflow/python/kernel_tests/softplus_op_test.py
index 6223cacf61..f70f60c0f5 100644
--- a/tensorflow/python/kernel_tests/softplus_op_test.py
+++ b/tensorflow/python/kernel_tests/softplus_op_test.py
@@ -12,17 +12,22 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for Softplus and SoftplusGrad."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import nn_ops
+import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
-class SoftplusTest(tf.test.TestCase):
+class SoftplusTest(test.TestCase):
def _npSoftplus(self, np_features):
np_features = np.asarray(np_features)
@@ -32,7 +37,7 @@ class SoftplusTest(tf.test.TestCase):
def _testSoftplus(self, np_features, use_gpu=False):
np_softplus = self._npSoftplus(np_features)
with self.test_session(use_gpu=use_gpu):
- softplus = tf.nn.softplus(np_features)
+ softplus = nn_ops.softplus(np_features)
tf_softplus = softplus.eval()
self.assertAllCloseAccordingToType(np_softplus, tf_softplus)
self.assertTrue(np.all(tf_softplus > 0))
@@ -50,35 +55,36 @@ class SoftplusTest(tf.test.TestCase):
one = t(1)
ten = t(10)
self._testSoftplus(
- [log_eps, log_eps - one, log_eps + one,
- log_eps - ten, log_eps + ten,
- -log_eps, -log_eps - one, -log_eps + one,
- -log_eps - ten, -log_eps + ten],
+ [
+ log_eps, log_eps - one, log_eps + one, log_eps - ten,
+ log_eps + ten, -log_eps, -log_eps - one, -log_eps + one,
+ -log_eps - ten, -log_eps + ten
+ ],
use_gpu=False)
self._testSoftplus(
- [log_eps, log_eps - one, log_eps + one,
- log_eps - ten, log_eps + ten
- -log_eps, -log_eps - one, -log_eps + one,
- -log_eps - ten, -log_eps + ten],
+ [
+ log_eps, log_eps - one, log_eps + one, log_eps - ten,
+ log_eps + ten - log_eps, -log_eps - one, -log_eps + one,
+ -log_eps - ten, -log_eps + ten
+ ],
use_gpu=True)
def testGradient(self):
with self.test_session():
- x = tf.constant(
+ x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
- shape=[2, 5], name="x")
- y = tf.nn.softplus(x, name="softplus")
+ shape=[2, 5],
+ name="x")
+ y = nn_ops.softplus(x, name="softplus")
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
- dtype=np.float32, order="F")
- err = tf.test.compute_gradient_error(x,
- [2, 5],
- y,
- [2, 5],
- x_init_value=x_init)
+ dtype=np.float32,
+ order="F")
+ err = gradient_checker.compute_gradient_error(
+ x, [2, 5], y, [2, 5], x_init_value=x_init)
print("softplus (float) gradient err = ", err)
self.assertLess(err, 1e-4)
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/softsign_op_test.py b/tensorflow/python/kernel_tests/softsign_op_test.py
index 8b9467e783..5fd5253c09 100644
--- a/tensorflow/python/kernel_tests/softsign_op_test.py
+++ b/tensorflow/python/kernel_tests/softsign_op_test.py
@@ -12,17 +12,22 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for Softsign and SoftsignGrad."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import nn_ops
+import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
-class SoftsignTest(tf.test.TestCase):
+class SoftsignTest(test.TestCase):
def _npSoftsign(self, np_features):
return np_features / (1 + np.abs(np_features))
@@ -30,7 +35,7 @@ class SoftsignTest(tf.test.TestCase):
def _testSoftsign(self, np_features, use_gpu=False):
np_softsign = self._npSoftsign(np_features)
with self.test_session(use_gpu=use_gpu):
- softsign = tf.nn.softsign(np_features)
+ softsign = nn_ops.softsign(np_features)
tf_softsign = softsign.eval()
self.assertAllClose(np_softsign, tf_softsign)
self.assertShapeEqual(np_softsign, softsign)
@@ -46,21 +51,20 @@ class SoftsignTest(tf.test.TestCase):
def testGradient(self):
with self.test_session():
- x = tf.constant(
+ x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
- shape=[2, 5], name="x")
- y = tf.nn.softsign(x, name="softsign")
+ shape=[2, 5],
+ name="x")
+ y = nn_ops.softsign(x, name="softsign")
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
- dtype=np.float32, order="F")
- err = tf.test.compute_gradient_error(x,
- [2, 5],
- y,
- [2, 5],
- x_init_value=x_init)
+ dtype=np.float32,
+ order="F")
+ err = gradient_checker.compute_gradient_error(
+ x, [2, 5], y, [2, 5], x_init_value=x_init)
print("softsign (float) gradient err = ", err)
self.assertLess(err, 1e-4)
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/spacetobatch_op_test.py b/tensorflow/python/kernel_tests/spacetobatch_op_test.py
index 4e14d7c5c5..b943dfa4e5 100644
--- a/tensorflow/python/kernel_tests/spacetobatch_op_test.py
+++ b/tensorflow/python/kernel_tests/spacetobatch_op_test.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Functional tests for SpaceToBatch and BatchToSpace ops."""
from __future__ import absolute_import
@@ -20,10 +19,15 @@ from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
+from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import math_ops
+from tensorflow.python.platform import test
def space_to_batch_direct(input_array, block_shape, paddings):
@@ -61,8 +65,9 @@ def space_to_batch_direct(input_array, block_shape, paddings):
reshaped_padded = padded.reshape(reshaped_padded_shape)
permuted_reshaped_padded = np.transpose(reshaped_padded, (
list(np.arange(num_block_dims) * 2 + 2) + [0] +
- list(np.arange(num_block_dims) * 2 + 1) + list(np.arange(
- input_array.ndim - num_block_dims - 1) + 1 + num_block_dims * 2)))
+ list(np.arange(num_block_dims) * 2 + 1) + list(
+ np.arange(input_array.ndim - num_block_dims - 1) + 1 + num_block_dims
+ * 2)))
return permuted_reshaped_padded.reshape(output_shape)
@@ -70,11 +75,11 @@ class PythonOpImpl(object):
@staticmethod
def space_to_batch(*args, **kwargs):
- return tf.space_to_batch(*args, **kwargs)
+ return array_ops.space_to_batch(*args, **kwargs)
@staticmethod
def batch_to_space(*args, **kwargs):
- return tf.batch_to_space(*args, **kwargs)
+ return array_ops.batch_to_space(*args, **kwargs)
class CppOpImpl(object):
@@ -88,7 +93,7 @@ class CppOpImpl(object):
return gen_array_ops._batch_to_space(*args, **kwargs)
-class SpaceToBatchTest(tf.test.TestCase, PythonOpImpl):
+class SpaceToBatchTest(test.TestCase, PythonOpImpl):
"""Tests input-output pairs for the SpaceToBatch and BatchToSpace ops.
This uses the Python compatibility wrapper that forwards to space_to_batch_nd.
@@ -98,13 +103,11 @@ class SpaceToBatchTest(tf.test.TestCase, PythonOpImpl):
with self.test_session(use_gpu=True):
# outputs = space_to_batch(inputs)
x_tf = self.space_to_batch(
- tf.to_float(inputs),
- paddings, block_size=block_size)
+ math_ops.to_float(inputs), paddings, block_size=block_size)
self.assertAllEqual(x_tf.eval(), outputs)
# inputs = batch_to_space(outputs)
x_tf = self.batch_to_space(
- tf.to_float(outputs),
- paddings, block_size=block_size)
+ math_ops.to_float(outputs), paddings, block_size=block_size)
self.assertAllEqual(x_tf.eval(), inputs)
def _testOne(self, inputs, block_size, outputs):
@@ -123,9 +126,8 @@ class SpaceToBatchTest(tf.test.TestCase, PythonOpImpl):
x_np = [[[[1], [2]], [[3], [4]]]]
paddings = np.array([[1, 0], [1, 0]], dtype=np.int32)
block_size = 3
- x_out = [[[[0]]], [[[0]]], [[[0]]],
- [[[0]]], [[[1]]], [[[2]]],
- [[[0]]], [[[3]]], [[[4]]]]
+ x_out = [[[[0]]], [[[0]]], [[[0]]], [[[0]]], [[[1]]], [[[2]]], [[[0]]],
+ [[[3]]], [[[4]]]]
self._testPad(x_np, paddings, block_size, x_out)
# Test with depth larger than 1.
@@ -139,24 +141,18 @@ class SpaceToBatchTest(tf.test.TestCase, PythonOpImpl):
# Test for larger input dimensions.
# [1, 4, 4, 1] <-> [4, 2, 2, 1]
def testLargerInput2x2(self):
- x_np = [[[[1], [2], [3], [4]],
- [[5], [6], [7], [8]],
- [[9], [10], [11], [12]],
- [[13], [14], [15], [16]]]]
+ x_np = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]],
+ [[9], [10], [11], [12]], [[13], [14], [15], [16]]]]
block_size = 2
- x_out = [[[[1], [3]], [[9], [11]]],
- [[[2], [4]], [[10], [12]]],
- [[[5], [7]], [[13], [15]]],
- [[[6], [8]], [[14], [16]]]]
+ x_out = [[[[1], [3]], [[9], [11]]], [[[2], [4]], [[10], [12]]],
+ [[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]]
self._testOne(x_np, block_size, x_out)
# Test with batch larger than 1.
# [2, 2, 4, 1] <-> [8, 1, 2, 1]
def testBatchInput2x2(self):
- x_np = [[[[1], [2], [3], [4]],
- [[5], [6], [7], [8]]],
- [[[9], [10], [11], [12]],
- [[13], [14], [15], [16]]]]
+ x_np = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]]],
+ [[[9], [10], [11], [12]], [[13], [14], [15], [16]]]]
block_size = 2
x_out = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
[[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
@@ -167,22 +163,14 @@ class SpaceToBatchTest(tf.test.TestCase, PythonOpImpl):
# along the batch dimension.
# [2, 4, 4, 1] <-> [8, 2, 2, 1]
def testLargerInputBatch2x2(self):
- x_np = [[[[1], [2], [3], [4]],
- [[5], [6], [7], [8]],
- [[9], [10], [11], [12]],
- [[13], [14], [15], [16]]],
- [[[17], [18], [19], [20]],
- [[21], [22], [23], [24]],
- [[25], [26], [27], [28]],
- [[29], [30], [31], [32]]]]
- x_out = [[[[1], [3]], [[9], [11]]],
- [[[17], [19]], [[25], [27]]],
- [[[2], [4]], [[10], [12]]],
- [[[18], [20]], [[26], [28]]],
- [[[5], [7]], [[13], [15]]],
- [[[21], [23]], [[29], [31]]],
- [[[6], [8]], [[14], [16]]],
- [[[22], [24]], [[30], [32]]]]
+ x_np = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]],
+ [[9], [10], [11], [12]], [[13], [14], [15], [16]]],
+ [[[17], [18], [19], [20]], [[21], [22], [23], [24]],
+ [[25], [26], [27], [28]], [[29], [30], [31], [32]]]]
+ x_out = [[[[1], [3]], [[9], [11]]], [[[17], [19]], [[25], [27]]],
+ [[[2], [4]], [[10], [12]]], [[[18], [20]], [[26], [28]]],
+ [[[5], [7]], [[13], [15]]], [[[21], [23]], [[29], [31]]],
+ [[[6], [8]], [[14], [16]]], [[[22], [24]], [[30], [32]]]]
block_size = 2
self._testOne(x_np, block_size, x_out)
@@ -195,7 +183,7 @@ class SpaceToBatchCppTest(SpaceToBatchTest, CppOpImpl):
pass
-class SpaceToBatchNDTest(tf.test.TestCase):
+class SpaceToBatchNDTest(test.TestCase):
"""Tests input-output pairs for the SpaceToBatchND and BatchToSpaceND ops."""
def _testPad(self, inputs, block_shape, paddings, outputs):
@@ -204,10 +192,12 @@ class SpaceToBatchNDTest(tf.test.TestCase):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
# outputs = space_to_batch(inputs)
- x_tf = tf.space_to_batch_nd(tf.to_float(inputs), block_shape, paddings)
+ x_tf = array_ops.space_to_batch_nd(
+ math_ops.to_float(inputs), block_shape, paddings)
self.assertAllEqual(x_tf.eval(), outputs)
# inputs = batch_to_space(outputs)
- x_tf = tf.batch_to_space_nd(tf.to_float(outputs), block_shape, paddings)
+ x_tf = array_ops.batch_to_space_nd(
+ math_ops.to_float(outputs), block_shape, paddings)
self.assertAllEqual(x_tf.eval(), inputs)
def _testDirect(self, input_shape, block_shape, paddings):
@@ -217,46 +207,53 @@ class SpaceToBatchNDTest(tf.test.TestCase):
space_to_batch_direct(inputs, block_shape, paddings))
def testZeroBlockDimsZeroRemainingDims(self):
- self._testPad(inputs=[1, 2],
- block_shape=[],
- paddings=[],
- outputs=[1, 2],)
+ self._testPad(
+ inputs=[1, 2],
+ block_shape=[],
+ paddings=[],
+ outputs=[1, 2],)
def testZeroBlockDimsOneRemainingDim(self):
- self._testPad(inputs=[[1, 2], [3, 4]],
- block_shape=[],
- paddings=[],
- outputs=[[1, 2], [3, 4]])
+ self._testPad(
+ inputs=[[1, 2], [3, 4]],
+ block_shape=[],
+ paddings=[],
+ outputs=[[1, 2], [3, 4]])
# Same thing, but with a no-op block dim.
- self._testPad(inputs=[[1, 2], [3, 4]],
- block_shape=[1],
- paddings=[[0, 0]],
- outputs=[[1, 2], [3, 4]])
+ self._testPad(
+ inputs=[[1, 2], [3, 4]],
+ block_shape=[1],
+ paddings=[[0, 0]],
+ outputs=[[1, 2], [3, 4]])
def testZeroBlockDimsTwoRemainingDims(self):
- self._testPad(inputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
- block_shape=[],
- paddings=[],
- outputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
+ self._testPad(
+ inputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
+ block_shape=[],
+ paddings=[],
+ outputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
# Same thing, but with a no-op block dim.
- self._testPad(inputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
- block_shape=[1],
- paddings=[[0, 0]],
- outputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
+ self._testPad(
+ inputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
+ block_shape=[1],
+ paddings=[[0, 0]],
+ outputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
# Same thing, but with two no-op block dims.
- self._testPad(inputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
- block_shape=[1, 1],
- paddings=[[0, 0], [0, 0]],
- outputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
+ self._testPad(
+ inputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
+ block_shape=[1, 1],
+ paddings=[[0, 0], [0, 0]],
+ outputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
def testOneBlockDimZeroRemainingDims(self):
- self._testPad(inputs=[[1, 2, 3], [4, 5, 6]],
- block_shape=[2],
- paddings=[1, 0],
- outputs=[[0, 2], [0, 5], [1, 3], [4, 6]])
+ self._testPad(
+ inputs=[[1, 2, 3], [4, 5, 6]],
+ block_shape=[2],
+ paddings=[1, 0],
+ outputs=[[0, 2], [0, 5], [1, 3], [4, 6]])
def testOneBlockDimOneRemainingDim(self):
self._testPad(
@@ -268,31 +265,31 @@ class SpaceToBatchNDTest(tf.test.TestCase):
def testDirect(self):
# Test with zero-size remaining dimension.
- self._testDirect(input_shape=[3, 1, 2, 0],
- block_shape=[3],
- paddings=[[0, 2]])
+ self._testDirect(
+ input_shape=[3, 1, 2, 0], block_shape=[3], paddings=[[0, 2]])
# Test with zero-size blocked dimension.
- self._testDirect(input_shape=[3, 0, 2, 5],
- block_shape=[3],
- paddings=[[0, 0]])
+ self._testDirect(
+ input_shape=[3, 0, 2, 5], block_shape=[3], paddings=[[0, 0]])
# Test with padding up from zero size.
- self._testDirect(input_shape=[3, 0, 2, 5],
- block_shape=[3],
- paddings=[[1, 2]])
+ self._testDirect(
+ input_shape=[3, 0, 2, 5], block_shape=[3], paddings=[[1, 2]])
- self._testDirect(input_shape=[3, 3, 4, 5, 2],
- block_shape=[3, 4, 2],
- paddings=[[1, 2], [0, 0], [3, 0]])
+ self._testDirect(
+ input_shape=[3, 3, 4, 5, 2],
+ block_shape=[3, 4, 2],
+ paddings=[[1, 2], [0, 0], [3, 0]])
- self._testDirect(input_shape=[3, 3, 4, 5, 2],
- block_shape=[3, 4, 2, 2],
- paddings=[[1, 2], [0, 0], [3, 0], [0, 0]])
+ self._testDirect(
+ input_shape=[3, 3, 4, 5, 2],
+ block_shape=[3, 4, 2, 2],
+ paddings=[[1, 2], [0, 0], [3, 0], [0, 0]])
- self._testDirect(input_shape=[3, 2, 2, 3, 4, 5, 2, 5],
- block_shape=[1, 1, 3, 4, 2, 2],
- paddings=[[0, 0], [0, 0], [1, 2], [0, 0], [3, 0], [0, 0]])
+ self._testDirect(
+ input_shape=[3, 2, 2, 3, 4, 5, 2, 5],
+ block_shape=[1, 1, 3, 4, 2, 2],
+ paddings=[[0, 0], [0, 0], [1, 2], [0, 0], [3, 0], [0, 0]])
self._testDirect(
input_shape=[3, 2, 2, 3, 4, 5, 2, 5],
@@ -300,7 +297,7 @@ class SpaceToBatchNDTest(tf.test.TestCase):
paddings=[[0, 0], [0, 0], [1, 2], [0, 0], [3, 0], [0, 0], [0, 0]])
-class SpaceToBatchSpaceToDepth(tf.test.TestCase, PythonOpImpl):
+class SpaceToBatchSpaceToDepth(test.TestCase, PythonOpImpl):
# Verifies that: space_to_batch(x) = transpose(space_to_depth(transpose(x)))
def testSpaceToDepthTranspose(self):
@@ -308,10 +305,10 @@ class SpaceToBatchSpaceToDepth(tf.test.TestCase, PythonOpImpl):
block_size = 2
paddings = np.zeros((2, 2), dtype=np.int32)
y1 = self.space_to_batch(x, paddings, block_size=block_size)
- y2 = tf.transpose(
- tf.space_to_depth(
- tf.transpose(x, [3, 1, 2, 0]),
- block_size=block_size), [3, 1, 2, 0])
+ y2 = array_ops.transpose(
+ array_ops.space_to_depth(
+ array_ops.transpose(x, [3, 1, 2, 0]), block_size=block_size),
+ [3, 1, 2, 0])
with self.test_session(use_gpu=True):
self.assertAllEqual(y1.eval(), y2.eval())
@@ -320,7 +317,7 @@ class SpaceToBatchSpaceToDepthCpp(SpaceToBatchSpaceToDepth, CppOpImpl):
pass
-class SpaceToBatchErrorHandlingTest(tf.test.TestCase, PythonOpImpl):
+class SpaceToBatchErrorHandlingTest(test.TestCase, PythonOpImpl):
def testInputWrongDimMissingBatch(self):
# The input is missing the first dimension ("batch")
@@ -383,8 +380,8 @@ class SpaceToBatchErrorHandlingTest(tf.test.TestCase, PythonOpImpl):
def testUnknownShape(self):
t = self.space_to_batch(
- tf.placeholder(tf.float32),
- tf.placeholder(tf.int32),
+ array_ops.placeholder(dtypes.float32),
+ array_ops.placeholder(dtypes.int32),
block_size=4)
self.assertEqual(4, t.get_shape().ndims)
@@ -394,7 +391,7 @@ class SpaceToBatchErrorHandlingCppTest(SpaceToBatchErrorHandlingTest,
pass
-class SpaceToBatchNDErrorHandlingTest(tf.test.TestCase):
+class SpaceToBatchNDErrorHandlingTest(test.TestCase):
def _testStaticShape(self, input_shape, block_shape, paddings, error):
block_shape = np.array(block_shape)
@@ -402,23 +399,26 @@ class SpaceToBatchNDErrorHandlingTest(tf.test.TestCase):
# Try with sizes known at graph construction time.
with self.assertRaises(error):
- _ = tf.space_to_batch_nd(
+ _ = array_ops.space_to_batch_nd(
np.zeros(input_shape, np.float32), block_shape, paddings)
def _testDynamicShape(self, input_shape, block_shape, paddings):
block_shape = np.array(block_shape)
paddings = np.array(paddings)
# Try with sizes unknown at graph construction time.
- input_placeholder = tf.placeholder(tf.float32)
- block_shape_placeholder = tf.placeholder(tf.int32, shape=block_shape.shape)
- paddings_placeholder = tf.placeholder(tf.int32)
- t = tf.space_to_batch_nd(input_placeholder, block_shape_placeholder,
- paddings_placeholder)
+ input_placeholder = array_ops.placeholder(dtypes.float32)
+ block_shape_placeholder = array_ops.placeholder(
+ dtypes.int32, shape=block_shape.shape)
+ paddings_placeholder = array_ops.placeholder(dtypes.int32)
+ t = array_ops.space_to_batch_nd(input_placeholder, block_shape_placeholder,
+ paddings_placeholder)
with self.assertRaises(ValueError):
- _ = t.eval({input_placeholder: np.zeros(input_shape, np.float32),
- block_shape_placeholder: block_shape,
- paddings_placeholder: paddings})
+ _ = t.eval({
+ input_placeholder: np.zeros(input_shape, np.float32),
+ block_shape_placeholder: block_shape,
+ paddings_placeholder: paddings
+ })
def _testShape(self, input_shape, block_shape, paddings, error):
self._testStaticShape(input_shape, block_shape, paddings, error)
@@ -445,54 +445,60 @@ class SpaceToBatchNDErrorHandlingTest(tf.test.TestCase):
def testUnknown(self):
# Verify that input shape and paddings shape can be unknown.
- _ = tf.space_to_batch_nd(
- tf.placeholder(tf.float32),
- tf.placeholder(tf.int32, shape=(2,)),
- tf.placeholder(tf.int32))
+ _ = array_ops.space_to_batch_nd(
+ array_ops.placeholder(dtypes.float32),
+ array_ops.placeholder(
+ dtypes.int32, shape=(2,)),
+ array_ops.placeholder(dtypes.int32))
# Only number of input dimensions is known.
- t = tf.space_to_batch_nd(
- tf.placeholder(tf.float32, shape=(None, None, None, None)),
- tf.placeholder(tf.int32, shape=(2,)),
- tf.placeholder(tf.int32))
+ t = array_ops.space_to_batch_nd(
+ array_ops.placeholder(
+ dtypes.float32, shape=(None, None, None, None)),
+ array_ops.placeholder(
+ dtypes.int32, shape=(2,)),
+ array_ops.placeholder(dtypes.int32))
self.assertEqual(4, t.get_shape().ndims)
# Dimensions are partially known.
- t = tf.space_to_batch_nd(
- tf.placeholder(tf.float32, shape=(None, None, None, 2)),
- tf.placeholder(tf.int32, shape=(2,)),
- tf.placeholder(tf.int32))
+ t = array_ops.space_to_batch_nd(
+ array_ops.placeholder(
+ dtypes.float32, shape=(None, None, None, 2)),
+ array_ops.placeholder(
+ dtypes.int32, shape=(2,)),
+ array_ops.placeholder(dtypes.int32))
self.assertEqual([None, None, None, 2], t.get_shape().as_list())
# Dimensions are partially known.
- t = tf.space_to_batch_nd(
- tf.placeholder(tf.float32, shape=(3, None, None, 2)), [2, 3],
- tf.placeholder(tf.int32))
+ t = array_ops.space_to_batch_nd(
+ array_ops.placeholder(
+ dtypes.float32, shape=(3, None, None, 2)), [2, 3],
+ array_ops.placeholder(dtypes.int32))
self.assertEqual([3 * 2 * 3, None, None, 2], t.get_shape().as_list())
# Dimensions are partially known.
- t = tf.space_to_batch_nd(
- tf.placeholder(tf.float32, shape=(3, None, 2, 2)), [2, 3],
- [[1, 1], [0, 1]])
+ t = array_ops.space_to_batch_nd(
+ array_ops.placeholder(
+ dtypes.float32, shape=(3, None, 2, 2)), [2, 3], [[1, 1], [0, 1]])
self.assertEqual([3 * 2 * 3, None, 1, 2], t.get_shape().as_list())
# Dimensions are fully known.
- t = tf.space_to_batch_nd(
- tf.placeholder(tf.float32, shape=(3, 2, 3, 2)), [2, 3],
- [[1, 1], [0, 0]])
+ t = array_ops.space_to_batch_nd(
+ array_ops.placeholder(
+ dtypes.float32, shape=(3, 2, 3, 2)), [2, 3], [[1, 1], [0, 0]])
self.assertEqual([3 * 2 * 3, 2, 1, 2], t.get_shape().as_list())
-class SpaceToBatchGradientTest(tf.test.TestCase, PythonOpImpl):
+class SpaceToBatchGradientTest(test.TestCase, PythonOpImpl):
# Check the gradients.
def _checkGrad(self, x, paddings, block_size):
assert 4 == x.ndim
with self.test_session(use_gpu=True):
- tf_x = tf.convert_to_tensor(x)
+ tf_x = ops.convert_to_tensor(x)
tf_y = self.space_to_batch(tf_x, paddings, block_size)
epsilon = 1e-5
- ((x_jacob_t, x_jacob_n)) = tf.test.compute_gradient(
+ ((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
tf_x,
x.shape,
tf_y,
@@ -509,8 +515,8 @@ class SpaceToBatchGradientTest(tf.test.TestCase, PythonOpImpl):
x = np.random.normal(0, 1, b * h * w * d *
block_size_sq).astype(np.float32).reshape(
[b, h * block_size, w * block_size, d])
- paddings = np.array([[pad_beg, pad_end], [pad_beg, pad_end]],
- dtype=np.int32)
+ paddings = np.array(
+ [[pad_beg, pad_end], [pad_beg, pad_end]], dtype=np.int32)
self._checkGrad(x, paddings, block_size)
@@ -539,17 +545,17 @@ class SpaceToBatchGradientCppTest(SpaceToBatchGradientTest, CppOpImpl):
pass
-class SpaceToBatchNDGradientTest(tf.test.TestCase):
+class SpaceToBatchNDGradientTest(test.TestCase):
# Check the gradients.
def _checkGrad(self, x, block_shape, paddings):
block_shape = np.array(block_shape)
paddings = np.array(paddings).reshape((len(block_shape), 2))
with self.test_session():
- tf_x = tf.convert_to_tensor(x)
- tf_y = tf.space_to_batch_nd(tf_x, block_shape, paddings)
+ tf_x = ops.convert_to_tensor(x)
+ tf_y = array_ops.space_to_batch_nd(tf_x, block_shape, paddings)
epsilon = 1e-5
- ((x_jacob_t, x_jacob_n)) = tf.test.compute_gradient(
+ ((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
tf_x,
x.shape,
tf_y,
@@ -579,7 +585,7 @@ class SpaceToBatchNDGradientTest(tf.test.TestCase):
self._compare([2, 2, 4, 3, 2], [2, 2, 2], [[1, 1], [1, 1], [1, 0]])
-class RequiredSpaceToBatchPaddingsTest(tf.test.TestCase):
+class RequiredSpaceToBatchPaddingsTest(test.TestCase):
def _checkProperties(self, input_shape, block_shape, base_paddings, paddings,
crops):
@@ -595,8 +601,9 @@ class RequiredSpaceToBatchPaddingsTest(tf.test.TestCase):
self.assertEqual(paddings[i, 0], base_paddings[i, 0])
self.assertLessEqual(0, paddings[i, 1] - base_paddings[i, 1])
self.assertLess(paddings[i, 1] - base_paddings[i, 1], block_shape[i])
- self.assertEqual((input_shape[i] + paddings[i, 0] + paddings[i, 1]) %
- block_shape[i], 0)
+ self.assertEqual(
+ (input_shape[i] + paddings[i, 0] + paddings[i, 1]) % block_shape[i],
+ 0)
self.assertEqual(crops[i, 0], 0)
self.assertEqual(crops[i, 1], paddings[i, 1] - base_paddings[i, 1])
@@ -606,8 +613,9 @@ class RequiredSpaceToBatchPaddingsTest(tf.test.TestCase):
if base_paddings is not None:
base_paddings = np.array(base_paddings)
# Check with constants.
- paddings, crops = tf.required_space_to_batch_paddings(
- input_shape, block_shape, base_paddings)
+ paddings, crops = array_ops.required_space_to_batch_paddings(input_shape,
+ block_shape,
+ base_paddings)
paddings_const = tensor_util.constant_value(paddings)
crops_const = tensor_util.constant_value(crops)
self.assertIsNotNone(paddings_const)
@@ -616,17 +624,18 @@ class RequiredSpaceToBatchPaddingsTest(tf.test.TestCase):
paddings_const, crops_const)
# Check with non-constants.
assignments = {}
- input_shape_placeholder = tf.placeholder(tf.int32)
+ input_shape_placeholder = array_ops.placeholder(dtypes.int32)
assignments[input_shape_placeholder] = input_shape
- block_shape_placeholder = tf.placeholder(tf.int32, [len(block_shape)])
+ block_shape_placeholder = array_ops.placeholder(dtypes.int32,
+ [len(block_shape)])
assignments[block_shape_placeholder] = block_shape
if base_paddings is not None:
- base_paddings_placeholder = tf.placeholder(tf.int32,
- [len(block_shape), 2])
+ base_paddings_placeholder = array_ops.placeholder(dtypes.int32,
+ [len(block_shape), 2])
assignments[base_paddings_placeholder] = base_paddings
else:
base_paddings_placeholder = None
- t_paddings, t_crops = tf.required_space_to_batch_paddings(
+ t_paddings, t_crops = array_ops.required_space_to_batch_paddings(
input_shape_placeholder, block_shape_placeholder,
base_paddings_placeholder)
with self.test_session():
@@ -636,21 +645,22 @@ class RequiredSpaceToBatchPaddingsTest(tf.test.TestCase):
self.assertAllEqual(crops_result, crops_const)
def testSimple(self):
- self._test(input_shape=np.zeros((0,), np.int32),
- block_shape=np.zeros((0,), np.int32),
- base_paddings=None)
- self._test(input_shape=np.zeros((0,), np.int32),
- block_shape=np.zeros((0,), np.int32),
- base_paddings=np.zeros((0, 2), np.int32))
+ self._test(
+ input_shape=np.zeros((0,), np.int32),
+ block_shape=np.zeros((0,), np.int32),
+ base_paddings=None)
+ self._test(
+ input_shape=np.zeros((0,), np.int32),
+ block_shape=np.zeros((0,), np.int32),
+ base_paddings=np.zeros((0, 2), np.int32))
self._test(input_shape=[1], block_shape=[2], base_paddings=None)
self._test(input_shape=[1], block_shape=[2], base_paddings=[[1, 0]])
self._test(input_shape=[3], block_shape=[1], base_paddings=[[1, 2]])
self._test(input_shape=[1], block_shape=[2], base_paddings=[[2, 3]])
self._test(input_shape=[4, 5], block_shape=[3, 2], base_paddings=None)
- self._test(input_shape=[4, 5],
- block_shape=[3, 2],
- base_paddings=[[0, 0], [0, 1]])
+ self._test(
+ input_shape=[4, 5], block_shape=[3, 2], base_paddings=[[0, 0], [0, 1]])
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/spacetodepth_op_test.py b/tensorflow/python/kernel_tests/spacetodepth_op_test.py
index 2f181c004b..3d4abbb8dd 100644
--- a/tensorflow/python/kernel_tests/spacetodepth_op_test.py
+++ b/tensorflow/python/kernel_tests/spacetodepth_op_test.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Functional tests for SpacetoDepth op."""
from __future__ import absolute_import
@@ -20,19 +19,24 @@ from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import math_ops
+from tensorflow.python.platform import test
-class SpaceToDepthTest(tf.test.TestCase):
+class SpaceToDepthTest(test.TestCase):
def _testOne(self, inputs, block_size, outputs):
with self.test_session(use_gpu=True):
- x_tf = tf.space_to_depth(tf.to_float(inputs), block_size)
+ x_tf = array_ops.space_to_depth(math_ops.to_float(inputs), block_size)
self.assertAllEqual(x_tf.eval(), outputs)
def testBasic(self):
- x_np = [[[[1], [2]],
- [[3], [4]]]]
+ x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 2
x_out = [[[[1, 2, 3, 4]]]]
self._testOne(x_np, block_size, x_out)
@@ -40,24 +44,18 @@ class SpaceToDepthTest(tf.test.TestCase):
# Tests for larger input dimensions. To make sure elements are
# correctly ordered spatially.
def testLargerInput2x2(self):
- x_np = [[[[1], [2], [5], [6]],
- [[3], [4], [7], [8]],
- [[9], [10], [13], [14]],
- [[11], [12], [15], [16]]]]
+ x_np = [[[[1], [2], [5], [6]], [[3], [4], [7], [8]],
+ [[9], [10], [13], [14]], [[11], [12], [15], [16]]]]
block_size = 2
- x_out = [[[[1, 2, 3, 4],
- [5, 6, 7, 8]],
- [[9, 10, 11, 12],
- [13, 14, 15, 16]]]]
+ x_out = [[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12],
+ [13, 14, 15, 16]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input dimensions. To make sure elements are
# correctly ordered in depth. Here, larger block size.
def testLargerInput4x4(self):
- x_np = [[[[1], [2], [5], [6]],
- [[3], [4], [7], [8]],
- [[9], [10], [13], [14]],
- [[11], [12], [15], [16]]]]
+ x_np = [[[[1], [2], [5], [6]], [[3], [4], [7], [8]],
+ [[9], [10], [13], [14]], [[11], [12], [15], [16]]]]
block_size = 4
x_out = [[[[1, 2, 5, 6, 3, 4, 7, 8, 9, 10, 13, 14, 11, 12, 15, 16]]]]
self._testOne(x_np, block_size, x_out)
@@ -65,8 +63,7 @@ class SpaceToDepthTest(tf.test.TestCase):
# Tests for larger input depths.
# To make sure elements are properly interleaved in depth.
def testDepthInterleaved(self):
- x_np = [[[[1, 10], [2, 20]],
- [[3, 30], [4, 40]]]]
+ x_np = [[[[1, 10], [2, 20]], [[3, 30], [4, 40]]]]
block_size = 2
x_out = [[[[1, 10, 2, 20, 3, 30, 4, 40]]]]
self._testOne(x_np, block_size, x_out)
@@ -74,8 +71,7 @@ class SpaceToDepthTest(tf.test.TestCase):
# Tests for larger input depths. Here an odd depth.
# To make sure elements are properly interleaved in depth.
def testDepthInterleavedDepth3(self):
- x_np = [[[[1, 2, 3], [4, 5, 6]],
- [[7, 8, 9], [10, 11, 12]]]]
+ x_np = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]]
block_size = 2
x_out = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
self._testOne(x_np, block_size, x_out)
@@ -89,24 +85,25 @@ class SpaceToDepthTest(tf.test.TestCase):
[[9, 90], [10, 100], [13, 130], [14, 140]],
[[11, 110], [12, 120], [15, 150], [16, 160]]]]
block_size = 2
- x_out = [[[[1, 10, 2, 20, 3, 30, 4, 40],
- [5, 50, 6, 60, 7, 70, 8, 80]],
+ x_out = [[[[1, 10, 2, 20, 3, 30, 4, 40], [5, 50, 6, 60, 7, 70, 8, 80]],
[[9, 90, 10, 100, 11, 110, 12, 120],
[13, 130, 14, 140, 15, 150, 16, 160]]]]
self._testOne(x_np, block_size, x_out)
def testBlockSize2Batch10(self):
block_size = 2
+
def batch_input_elt(i):
return [[[1 * i], [2 * i], [5 * i], [6 * i]],
[[3 * i], [4 * i], [7 * i], [8 * i]],
[[9 * i], [10 * i], [13 * i], [14 * i]],
[[11 * i], [12 * i], [15 * i], [16 * i]]]
+
def batch_output_elt(i):
- return [[[1 * i, 2 * i, 3 * i, 4 * i],
- [5 * i, 6 * i, 7 * i, 8 * i]],
+ return [[[1 * i, 2 * i, 3 * i, 4 * i], [5 * i, 6 * i, 7 * i, 8 * i]],
[[9 * i, 10 * i, 11 * i, 12 * i],
[13 * i, 14 * i, 15 * i, 16 * i]]]
+
batch_size = 10
x_np = [batch_input_elt(i) for i in range(batch_size)]
x_out = [batch_output_elt(i) for i in range(batch_size)]
@@ -114,15 +111,10 @@ class SpaceToDepthTest(tf.test.TestCase):
# Tests for different width and height.
def testNonSquare(self):
- x_np = [[[[1, 10], [2, 20]],
- [[3, 30], [4, 40]],
- [[5, 50], [6, 60]],
- [[7, 70], [8, 80]],
- [[9, 90], [10, 100]],
- [[11, 110], [12, 120]]]]
+ x_np = [[[[1, 10], [2, 20]], [[3, 30], [4, 40]], [[5, 50], [6, 60]],
+ [[7, 70], [8, 80]], [[9, 90], [10, 100]], [[11, 110], [12, 120]]]]
block_size = 2
- x_out = [[[[1, 10, 2, 20, 3, 30, 4, 40]],
- [[5, 50, 6, 60, 7, 70, 8, 80]],
+ x_out = [[[[1, 10, 2, 20, 3, 30, 4, 40]], [[5, 50, 6, 60, 7, 70, 8, 80]],
[[9, 90, 10, 100, 11, 110, 12, 120]]]]
self._testOne(x_np, block_size, x_out)
@@ -130,88 +122,80 @@ class SpaceToDepthTest(tf.test.TestCase):
def testInputWrongDimMissingDepth(self):
# The input is missing the last dimension ("depth")
- x_np = [[[1, 2],
- [3, 4]]]
+ x_np = [[[1, 2], [3, 4]]]
block_size = 2
with self.assertRaises(ValueError):
- out_tf = tf.space_to_depth(x_np, block_size)
+ out_tf = array_ops.space_to_depth(x_np, block_size)
out_tf.eval()
def testInputWrongDimMissingBatch(self):
# The input is missing the first dimension ("batch")
- x_np = [[[1], [2]],
- [[3], [4]]]
+ x_np = [[[1], [2]], [[3], [4]]]
block_size = 2
with self.assertRaises(ValueError):
- _ = tf.space_to_depth(x_np, block_size)
+ _ = array_ops.space_to_depth(x_np, block_size)
def testBlockSize0(self):
# The block size is 0.
- x_np = [[[[1], [2]],
- [[3], [4]]]]
+ x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 0
with self.assertRaises(ValueError):
- out_tf = tf.space_to_depth(x_np, block_size)
+ out_tf = array_ops.space_to_depth(x_np, block_size)
out_tf.eval()
def testBlockSizeOne(self):
# The block size is 1. The block size needs to be > 1.
- x_np = [[[[1], [2]],
- [[3], [4]]]]
+ x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 1
with self.assertRaises(ValueError):
- out_tf = tf.space_to_depth(x_np, block_size)
+ out_tf = array_ops.space_to_depth(x_np, block_size)
out_tf.eval()
def testBlockSizeLarger(self):
# The block size is too large for this input.
- x_np = [[[[1], [2]],
- [[3], [4]]]]
+ x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 10
with self.assertRaises(ValueError):
- out_tf = tf.space_to_depth(x_np, block_size)
+ out_tf = array_ops.space_to_depth(x_np, block_size)
out_tf.eval()
def testBlockSizeNotDivisibleWidth(self):
# The block size divides width but not height.
- x_np = [[[[1], [2], [3]],
- [[3], [4], [7]]]]
+ x_np = [[[[1], [2], [3]], [[3], [4], [7]]]]
block_size = 3
with self.assertRaises(ValueError):
- _ = tf.space_to_depth(x_np, block_size)
+ _ = array_ops.space_to_depth(x_np, block_size)
def testBlockSizeNotDivisibleHeight(self):
# The block size divides height but not width.
- x_np = [[[[1], [2]],
- [[3], [4]],
- [[5], [6]]]]
+ x_np = [[[[1], [2]], [[3], [4]], [[5], [6]]]]
block_size = 3
with self.assertRaises(ValueError):
- _ = tf.space_to_depth(x_np, block_size)
+ _ = array_ops.space_to_depth(x_np, block_size)
def testBlockSizeNotDivisibleBoth(self):
# The block size does not divide neither width or height.
- x_np = [[[[1], [2]],
- [[3], [4]]]]
+ x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 3
with self.assertRaises(ValueError):
- _ = tf.space_to_depth(x_np, block_size)
+ _ = array_ops.space_to_depth(x_np, block_size)
def testUnknownShape(self):
- t = tf.space_to_depth(tf.placeholder(tf.float32), block_size=4)
+ t = array_ops.space_to_depth(
+ array_ops.placeholder(dtypes.float32), block_size=4)
self.assertEqual(4, t.get_shape().ndims)
-class SpaceToDepthGradientTest(tf.test.TestCase):
+class SpaceToDepthGradientTest(test.TestCase):
# Check the gradients.
def _checkGrad(self, x, block_size):
assert 4 == x.ndim
with self.test_session(use_gpu=True):
- tf_x = tf.convert_to_tensor(x)
- tf_y = tf.space_to_depth(tf_x, block_size)
+ tf_x = ops.convert_to_tensor(x)
+ tf_y = array_ops.space_to_depth(tf_x, block_size)
epsilon = 1e-2
- ((x_jacob_t, x_jacob_n)) = tf.test.compute_gradient(
+ ((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
tf_x,
x.shape,
tf_y,
@@ -225,9 +209,9 @@ class SpaceToDepthGradientTest(tf.test.TestCase):
# tensor of shape [b, h * block_size, w * block_size, d].
def _compare(self, b, h, w, d, block_size):
block_size_sq = block_size * block_size
- x = np.random.normal(
- 0, 1, b * h * w * d * block_size_sq).astype(np.float32).reshape(
- [b, h * block_size, w * block_size, d])
+ x = np.random.normal(0, 1, b * h * w * d *
+ block_size_sq).astype(np.float32).reshape(
+ [b, h * block_size, w * block_size, d])
self._checkGrad(x, block_size)
@@ -243,4 +227,4 @@ class SpaceToDepthGradientTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/sparse_add_op_test.py b/tensorflow/python/kernel_tests/sparse_add_op_test.py
index 6184c4edc4..874dcbabf1 100644
--- a/tensorflow/python/kernel_tests/sparse_add_op_test.py
+++ b/tensorflow/python/kernel_tests/sparse_add_op_test.py
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for SparseAdd."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
@@ -21,7 +21,17 @@ from __future__ import print_function
import timeit
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.client import session
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.framework import sparse_tensor
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import sparse_ops
+import tensorflow.python.ops.sparse_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
@@ -32,11 +42,11 @@ def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x_values = x[non_zero]
x_shape = x.shape
- return tf.SparseTensor(
+ return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), len(x_values)
-class SparseAddTest(tf.test.TestCase):
+class SparseAddTest(test.TestCase):
def _randomTensor(self, size, np_dtype, sparse=True):
n, m = size
@@ -53,13 +63,13 @@ class SparseAddTest(tf.test.TestCase):
if negate:
val = -np.array([1, 2, 3, 4])
shape = np.array([3, 3])
- return tf.SparseTensorValue(
+ return sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
- np.array(val, np.float32),
- np.array(shape, np.int64))
+ np.array(val, np.float32), np.array(shape, np.int64))
def _SparseTensor_3x3(self, negate=False):
- return tf.SparseTensor.from_value(self._SparseTensorValue_3x3(negate))
+ return sparse_tensor.SparseTensor.from_value(
+ self._SparseTensorValue_3x3(negate))
def _SparseTensor_3x3_v2(self):
# [ 1]
@@ -68,22 +78,21 @@ class SparseAddTest(tf.test.TestCase):
ind = np.array([[0, 1], [1, 0], [2, 0], [2, 1]])
val = np.array([1, -1.9, 3, -4.2])
shape = np.array([3, 3])
- return tf.SparseTensor(
- tf.constant(ind, tf.int64),
- tf.constant(val, tf.float32),
- tf.constant(shape, tf.int64))
+ return sparse_tensor.SparseTensor(
+ constant_op.constant(ind, dtypes.int64),
+ constant_op.constant(val, dtypes.float32),
+ constant_op.constant(shape, dtypes.int64))
def testAddSelf(self):
with self.test_session(use_gpu=False) as sess:
for sp_a in (self._SparseTensorValue_3x3(), self._SparseTensor_3x3()):
for sp_b in (self._SparseTensorValue_3x3(), self._SparseTensor_3x3()):
- sp_sum = tf.sparse_add(sp_a, sp_b)
+ sp_sum = sparse_ops.sparse_add(sp_a, sp_b)
sum_out = sess.run(sp_sum)
self.assertEqual(sp_sum.dense_shape.get_shape(), [2])
- self.assertAllEqual(
- sum_out.indices, [[0, 1], [1, 0], [2, 0], [2, 1]])
+ self.assertAllEqual(sum_out.indices, [[0, 1], [1, 0], [2, 0], [2, 1]])
self.assertAllEqual(sum_out.values, [2, 4, 6, 8])
self.assertAllEqual(sum_out.dense_shape, [3, 3])
@@ -92,7 +101,7 @@ class SparseAddTest(tf.test.TestCase):
sp_a = self._SparseTensor_3x3()
sp_b = self._SparseTensor_3x3(negate=True)
- sp_sum = tf.sparse_add(sp_a, sp_b, 0.1)
+ sp_sum = sparse_ops.sparse_add(sp_a, sp_b, 0.1)
sum_out = sess.run(sp_sum)
self.assertEqual(sp_sum.dense_shape.get_shape(), [2])
@@ -111,7 +120,7 @@ class SparseAddTest(tf.test.TestCase):
# [ 6 -.2]
# two values should vanish: |.1| < .21, and |-.2| < .21
- sp_sum = tf.sparse_add(sp_a, sp_b, thresh=0.21)
+ sp_sum = sparse_ops.sparse_add(sp_a, sp_b, thresh=0.21)
sum_out = sess.run(sp_sum)
self.assertEqual(sp_sum.dense_shape.get_shape(), [2])
@@ -120,7 +129,7 @@ class SparseAddTest(tf.test.TestCase):
self.assertAllEqual(sum_out.dense_shape, [3, 3])
# only .1 vanishes
- sp_sum = tf.sparse_add(sp_a, sp_b, thresh=0.11)
+ sp_sum = sparse_ops.sparse_add(sp_a, sp_b, thresh=0.11)
sum_out = sess.run(sp_sum)
self.assertEqual(sp_sum.dense_shape.get_shape(), [2])
@@ -135,12 +144,12 @@ class SparseAddTest(tf.test.TestCase):
for m in [4, 17]:
sp_a, nnz_a = self._randomTensor([n, m], np.float32)
sp_b, nnz_b = self._randomTensor([n, m], np.float32)
- sp_sum = tf.sparse_add(sp_a, sp_b)
+ sp_sum = sparse_ops.sparse_add(sp_a, sp_b)
nnz_sum = len(sp_sum.values.eval())
- err = tf.test.compute_gradient_error([sp_a.values, sp_b.values],
- [(nnz_a,), (nnz_b,)],
- sp_sum.values, (nnz_sum,))
+ err = gradient_checker.compute_gradient_error(
+ [sp_a.values, sp_b.values], [(nnz_a,), (nnz_b,)], sp_sum.values,
+ (nnz_sum,))
self.assertLess(err, 1e-3)
def testAddSparseDense(self):
@@ -153,12 +162,14 @@ class SparseAddTest(tf.test.TestCase):
with self.test_session(use_gpu=False):
sparse, unused_nnz = _sparsify(rand_vals_np, index_dtype=index_dtype)
- s = tf.sparse_add(sparse, tf.constant(dense_np)).eval()
+ s = sparse_ops.sparse_add(sparse,
+ constant_op.constant(dense_np)).eval()
self.assertAllEqual(dense_np + rand_vals_np, s)
self.assertTrue(s.dtype == dtype)
# check commutativity
- s = tf.sparse_add(tf.constant(dense_np), sparse).eval()
+ s = sparse_ops.sparse_add(constant_op.constant(dense_np),
+ sparse).eval()
self.assertAllEqual(dense_np + rand_vals_np, s)
self.assertTrue(s.dtype == dtype)
@@ -170,11 +181,11 @@ class SparseAddTest(tf.test.TestCase):
with self.test_session(use_gpu=False):
sparse, nnz = _sparsify(rand_vals_np)
- dense = tf.constant(dense_np, dtype=tf.float32)
- s = tf.sparse_add(sparse, dense)
+ dense = constant_op.constant(dense_np, dtype=dtypes.float32)
+ s = sparse_ops.sparse_add(sparse, dense)
- err = tf.test.compute_gradient_error(
- [sparse.values, dense], [(nnz,), (n, m)], s, (n, m))
+ err = gradient_checker.compute_gradient_error([sparse.values, dense],
+ [(nnz,), (n, m)], s, (n, m))
self.assertLess(err, 1e-3)
@@ -184,13 +195,14 @@ class SparseAddTest(tf.test.TestCase):
def _s2d_add_vs_sparse_add(sparsity, n, m, num_iters=50):
np.random.seed(1618)
- with tf.Session(graph=tf.Graph()) as sess:
+ with session.Session(graph=ops.Graph()) as sess:
sp_vals = np.random.rand(n, m).astype(np.float32)
sp_t, unused_nnz = _sparsify(sp_vals, thresh=sparsity, index_dtype=np.int32)
vals = np.random.rand(n, m).astype(np.float32)
- s2d = tf.add(tf.sparse_tensor_to_dense(sp_t), tf.constant(vals))
- sa = tf.sparse_add(sp_t, tf.constant(vals))
+ s2d = math_ops.add(
+ sparse_ops.sparse_tensor_to_dense(sp_t), constant_op.constant(vals))
+ sa = sparse_ops.sparse_add(sp_t, constant_op.constant(vals))
timeit.timeit(lambda: sess.run(s2d), number=3)
timeit.timeit(lambda: sess.run(sa), number=3)
@@ -202,7 +214,7 @@ def _s2d_add_vs_sparse_add(sparsity, n, m, num_iters=50):
return s2d_total * 1e3 / num_iters, sa_total * 1e3 / num_iters
-class SparseAddBenchmark(tf.test.Benchmark):
+class SparseAddBenchmark(test.Benchmark):
def benchmarkSparseAddDense(self):
@@ -217,5 +229,6 @@ class SparseAddBenchmark(tf.test.Benchmark):
s2d_dt, sa_dt,
s2d_dt / sa_dt))
+
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/sparse_concat_op_test.py b/tensorflow/python/kernel_tests/sparse_concat_op_test.py
index 2b4bd24ff5..d3c7983128 100644
--- a/tensorflow/python/kernel_tests/sparse_concat_op_test.py
+++ b/tensorflow/python/kernel_tests/sparse_concat_op_test.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for SparseConcat."""
from __future__ import absolute_import
@@ -20,17 +19,28 @@ from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import sparse_tensor
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import sparse_ops
+from tensorflow.python.platform import test
-class SparseConcatTest(tf.test.TestCase):
+class SparseConcatTest(test.TestCase):
- def _SparseTensor_UnknownShape(self, ind_shape=None, val_shape=None,
+ def _SparseTensor_UnknownShape(self,
+ ind_shape=None,
+ val_shape=None,
shape_shape=None):
- return tf.SparseTensor(
- tf.placeholder(tf.int64, shape=ind_shape),
- tf.placeholder(tf.float32, shape=val_shape),
- tf.placeholder(tf.int64, shape=shape_shape))
+ return sparse_tensor.SparseTensor(
+ array_ops.placeholder(
+ dtypes.int64, shape=ind_shape),
+ array_ops.placeholder(
+ dtypes.float32, shape=val_shape),
+ array_ops.placeholder(
+ dtypes.int64, shape=shape_shape))
def _SparseTensorValue_3x3(self):
# [ 1]
@@ -39,13 +49,12 @@ class SparseConcatTest(tf.test.TestCase):
ind = np.array([[0, 2], [1, 0], [2, 0], [2, 2]])
val = np.array([1, 2, 3, 4])
shape = np.array([3, 3])
- return tf.SparseTensorValue(
+ return sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
- np.array(val, np.float32),
- np.array(shape, np.int64))
+ np.array(val, np.float32), np.array(shape, np.int64))
def _SparseTensor_3x3(self):
- return tf.SparseTensor.from_value(self._SparseTensorValue_3x3())
+ return sparse_tensor.SparseTensor.from_value(self._SparseTensorValue_3x3())
def _SparseTensorValue_3x5(self):
# [ ]
@@ -54,13 +63,12 @@ class SparseConcatTest(tf.test.TestCase):
ind = np.array([[1, 1], [2, 0], [2, 3], [2, 4]])
val = np.array([1, 2, 1, 0])
shape = np.array([3, 5])
- return tf.SparseTensorValue(
+ return sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
- np.array(val, np.float32),
- np.array(shape, np.int64))
+ np.array(val, np.float32), np.array(shape, np.int64))
def _SparseTensor_3x5(self):
- return tf.SparseTensor.from_value(self._SparseTensorValue_3x5())
+ return sparse_tensor.SparseTensor.from_value(self._SparseTensorValue_3x5())
def _SparseTensor_3x2(self):
# [ ]
@@ -69,10 +77,10 @@ class SparseConcatTest(tf.test.TestCase):
ind = np.array([[1, 0], [2, 0]])
val = np.array([1, 2])
shape = np.array([3, 2])
- return tf.SparseTensor(
- tf.constant(ind, tf.int64),
- tf.constant(val, tf.float32),
- tf.constant(shape, tf.int64))
+ return sparse_tensor.SparseTensor(
+ constant_op.constant(ind, dtypes.int64),
+ constant_op.constant(val, dtypes.float32),
+ constant_op.constant(shape, dtypes.int64))
def _SparseTensor_2x3(self):
# [ 1 ]
@@ -80,10 +88,10 @@ class SparseConcatTest(tf.test.TestCase):
ind = np.array([[0, 1], [1, 0], [1, 2]])
val = np.array([1, 1, 2])
shape = np.array([2, 3])
- return tf.SparseTensor(
- tf.constant(ind, tf.int64),
- tf.constant(val, tf.float32),
- tf.constant(shape, tf.int64))
+ return sparse_tensor.SparseTensor(
+ constant_op.constant(ind, dtypes.int64),
+ constant_op.constant(val, dtypes.float32),
+ constant_op.constant(shape, dtypes.int64))
def _SparseTensor_2x3x4(self):
ind = np.array([
@@ -94,10 +102,10 @@ class SparseConcatTest(tf.test.TestCase):
[1, 2, 2]])
val = np.array([1, 10, 12, 103, 111, 113, 122])
shape = np.array([2, 3, 4])
- return tf.SparseTensor(
- tf.constant(ind, tf.int64),
- tf.constant(val, tf.float32),
- tf.constant(shape, tf.int64))
+ return sparse_tensor.SparseTensor(
+ constant_op.constant(ind, dtypes.int64),
+ constant_op.constant(val, dtypes.float32),
+ constant_op.constant(shape, dtypes.int64))
def _SparseTensor_String3x3(self):
# [ a]
@@ -106,10 +114,10 @@ class SparseConcatTest(tf.test.TestCase):
ind = np.array([[0, 2], [1, 0], [2, 0], [2, 2]])
val = np.array(["a", "b", "c", "d"])
shape = np.array([3, 3])
- return tf.SparseTensor(
- tf.constant(ind, tf.int64),
- tf.constant(val, tf.string),
- tf.constant(shape, tf.int64))
+ return sparse_tensor.SparseTensor(
+ constant_op.constant(ind, dtypes.int64),
+ constant_op.constant(val, dtypes.string),
+ constant_op.constant(shape, dtypes.int64))
def _SparseTensor_String3x5(self):
# [ ]
@@ -118,10 +126,10 @@ class SparseConcatTest(tf.test.TestCase):
ind = np.array([[1, 1], [2, 0], [2, 3], [2, 4]])
val = np.array(["e", "f", "g", "h"])
shape = np.array([3, 5])
- return tf.SparseTensor(
- tf.constant(ind, tf.int64),
- tf.constant(val, tf.string),
- tf.constant(shape, tf.int64))
+ return sparse_tensor.SparseTensor(
+ constant_op.constant(ind, dtypes.int64),
+ constant_op.constant(val, dtypes.string),
+ constant_op.constant(shape, dtypes.int64))
def testConcat1(self):
with self.test_session(use_gpu=False) as sess:
@@ -133,7 +141,7 @@ class SparseConcatTest(tf.test.TestCase):
# Note that we ignore concat_dim in this case since we short-circuit the
# single-input case in python.
for concat_dim in (-2000, 1, 2000):
- sp_concat = tf.sparse_concat(concat_dim, [sp_a])
+ sp_concat = sparse_ops.sparse_concat(concat_dim, [sp_a])
self.assertEqual(sp_concat.indices.get_shape(), [4, 2])
self.assertEqual(sp_concat.values.get_shape(), [4])
@@ -155,7 +163,7 @@ class SparseConcatTest(tf.test.TestCase):
for sp_a in (self._SparseTensorValue_3x3(), self._SparseTensor_3x3()):
for sp_b in (self._SparseTensorValue_3x5(), self._SparseTensor_3x5()):
for concat_dim in (-1, 1):
- sp_concat = tf.sparse_concat(concat_dim, [sp_a, sp_b])
+ sp_concat = sparse_ops.sparse_concat(concat_dim, [sp_a, sp_b])
self.assertEqual(sp_concat.indices.get_shape(), [8, 2])
self.assertEqual(sp_concat.values.get_shape(), [8])
@@ -181,7 +189,7 @@ class SparseConcatTest(tf.test.TestCase):
sp_d = self._SparseTensor_2x3()
for concat_dim in (-2, 0):
- sp_concat = tf.sparse_concat(concat_dim, [sp_a, sp_d])
+ sp_concat = sparse_ops.sparse_concat(concat_dim, [sp_a, sp_d])
self.assertEqual(sp_concat.indices.get_shape(), [7, 2])
self.assertEqual(sp_concat.values.get_shape(), [7])
@@ -206,7 +214,7 @@ class SparseConcatTest(tf.test.TestCase):
sp_c = self._SparseTensor_3x2()
for concat_dim in (-1, 1):
- sp_concat = tf.sparse_concat(concat_dim, [sp_a, sp_b, sp_c])
+ sp_concat = sparse_ops.sparse_concat(concat_dim, [sp_a, sp_b, sp_c])
self.assertEqual(sp_concat.indices.get_shape(), [10, 2])
self.assertEqual(sp_concat.values.get_shape(), [10])
@@ -230,7 +238,7 @@ class SparseConcatTest(tf.test.TestCase):
sp_b = self._SparseTensor_String3x5()
for concat_dim in (-1, 1):
- sp_concat = tf.sparse_concat(concat_dim, [sp_a, sp_b])
+ sp_concat = sparse_ops.sparse_concat(concat_dim, [sp_a, sp_b])
self.assertEqual(sp_concat.indices.get_shape(), [8, 2])
self.assertEqual(sp_concat.values.get_shape(), [8])
@@ -253,7 +261,7 @@ class SparseConcatTest(tf.test.TestCase):
# Rank mismatches can be caught at shape-inference time
for concat_dim in (-1, 1):
with self.assertRaises(ValueError):
- tf.sparse_concat(concat_dim, [sp_a, sp_e])
+ sparse_ops.sparse_concat(concat_dim, [sp_a, sp_e])
def testMismatchedRankExpandNonconcatDim(self):
with self.test_session(use_gpu=False):
@@ -264,7 +272,8 @@ class SparseConcatTest(tf.test.TestCase):
# expand_nonconcat_dim=True.
for concat_dim in (-1, 1):
with self.assertRaises(ValueError):
- tf.sparse_concat(concat_dim, [sp_a, sp_e], expand_nonconcat_dim=True)
+ sparse_ops.sparse_concat(
+ concat_dim, [sp_a, sp_e], expand_nonconcat_dim=True)
def testMismatchedShapes(self):
with self.test_session(use_gpu=False) as sess:
@@ -273,7 +282,8 @@ class SparseConcatTest(tf.test.TestCase):
sp_c = self._SparseTensor_3x2()
sp_d = self._SparseTensor_2x3()
for concat_dim in (-1, 1):
- sp_concat = tf.sparse_concat(concat_dim, [sp_a, sp_b, sp_c, sp_d])
+ sp_concat = sparse_ops.sparse_concat(concat_dim,
+ [sp_a, sp_b, sp_c, sp_d])
# Shape mismatches can only be caught when the op is run
with self.assertRaisesOpError("Input shapes must match"):
@@ -287,9 +297,9 @@ class SparseConcatTest(tf.test.TestCase):
sp_d = self._SparseTensor_2x3()
for concat_dim0 in (-2, 0):
for concat_dim1 in (-1, 1):
- sp_concat_dim0 = tf.sparse_concat(
+ sp_concat_dim0 = sparse_ops.sparse_concat(
concat_dim0, [sp_a, sp_b, sp_c, sp_d], expand_nonconcat_dim=True)
- sp_concat_dim1 = tf.sparse_concat(
+ sp_concat_dim1 = sparse_ops.sparse_concat(
concat_dim1, [sp_a, sp_b, sp_c, sp_d], expand_nonconcat_dim=True)
sp_concat_dim0_out = sess.run(sp_concat_dim0)
@@ -317,10 +327,11 @@ class SparseConcatTest(tf.test.TestCase):
self._SparseTensor_UnknownShape(),
self._SparseTensor_UnknownShape(val_shape=[3]),
self._SparseTensor_UnknownShape(ind_shape=[1, 3]),
- self._SparseTensor_UnknownShape(shape_shape=[3])]
+ self._SparseTensor_UnknownShape(shape_shape=[3])
+ ]
for concat_dim in (-2, 0):
- sp_concat = tf.sparse_concat(concat_dim, sp_inputs)
+ sp_concat = sparse_ops.sparse_concat(concat_dim, sp_inputs)
self.assertEqual(sp_concat.indices.get_shape().as_list(), [None, 3])
self.assertEqual(sp_concat.values.get_shape().as_list(), [None])
@@ -328,4 +339,4 @@ class SparseConcatTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/sparse_conditional_accumulator_test.py b/tensorflow/python/kernel_tests/sparse_conditional_accumulator_test.py
index c538532251..d749843410 100644
--- a/tensorflow/python/kernel_tests/sparse_conditional_accumulator_test.py
+++ b/tensorflow/python/kernel_tests/sparse_conditional_accumulator_test.py
@@ -20,7 +20,15 @@ from __future__ import print_function
import time
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes as dtypes_lib
+from tensorflow.python.framework import errors_impl
+from tensorflow.python.framework import ops
+from tensorflow.python.framework import tensor_shape
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import data_flow_ops
+from tensorflow.python.platform import test
def _indexedslice(x, noshape=False):
@@ -31,11 +39,11 @@ def _indexedslice(x, noshape=False):
values = x[indices]
if noshape:
dense_shape = None
- return tf.IndexedSlices(
+ return ops.IndexedSlices(
indices=indices.tolist(), values=values, dense_shape=dense_shape)
-class IndexedSlicesConditionalAccumulatorTest(tf.test.TestCase):
+class IndexedSlicesConditionalAccumulatorTest(test.TestCase):
def _assertEqual_indexedslices(self, expected_tensor, result):
self.assertAllEqual(expected_tensor.indices, result.indices)
@@ -49,9 +57,10 @@ class IndexedSlicesConditionalAccumulatorTest(tf.test.TestCase):
self._assertEqual_indexedslices(expected_tensor, result)
def testConstructor(self):
- with tf.Graph().as_default():
- q = tf.SparseConditionalAccumulator(tf.float32, name="Q")
- self.assertTrue(isinstance(q.accumulator_ref, tf.Tensor))
+ with ops.Graph().as_default():
+ q = data_flow_ops.SparseConditionalAccumulator(
+ dtypes_lib.float32, name="Q")
+ self.assertTrue(isinstance(q.accumulator_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'SparseConditionalAccumulator'
attr { key: 'dtype' value { type: DT_FLOAT } }
@@ -61,10 +70,12 @@ class IndexedSlicesConditionalAccumulatorTest(tf.test.TestCase):
""", q.accumulator_ref.op.node_def)
def testConstructorWithShape(self):
- with tf.Graph().as_default():
- q = tf.SparseConditionalAccumulator(
- tf.float32, name="Q", shape=tf.TensorShape([1, 5, 2, 8]))
- self.assertTrue(isinstance(q.accumulator_ref, tf.Tensor))
+ with ops.Graph().as_default():
+ q = data_flow_ops.SparseConditionalAccumulator(
+ dtypes_lib.float32,
+ name="Q",
+ shape=tensor_shape.TensorShape([1, 5, 2, 8]))
+ self.assertTrue(isinstance(q.accumulator_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'SparseConditionalAccumulator'
attr { key: 'dtype' value { type: DT_FLOAT } }
@@ -79,22 +90,23 @@ class IndexedSlicesConditionalAccumulatorTest(tf.test.TestCase):
def testAccumulatorSizeEmpty(self):
with self.test_session():
- q = tf.SparseConditionalAccumulator(tf.float32, name="Q")
+ q = data_flow_ops.SparseConditionalAccumulator(
+ dtypes_lib.float32, name="Q")
self.assertEqual(q.num_accumulated().eval(), 0)
def testAccumulatorSetGlobalStep(self):
with self.test_session():
- q = tf.SparseConditionalAccumulator(
- tf.float32, name="Q", shape=tf.TensorShape([1]))
+ q = data_flow_ops.SparseConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
set_global_step_op = q.set_global_step(1)
set_global_step_op.run()
def testAccumulatorApplyGradFloat32(self):
with self.test_session():
- q = tf.SparseConditionalAccumulator(
- tf.float32, name="Q", shape=tf.TensorShape([3, 3]))
+ q = data_flow_ops.SparseConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([3, 3]))
accum_op = q.apply_indexed_slices_grad(
- tf.IndexedSlices(
+ ops.IndexedSlices(
indices=[0, 2],
values=np.array([[0, 0, 1], [3, 0, 4]]).astype(np.float32)))
accum_op.run()
@@ -102,12 +114,12 @@ class IndexedSlicesConditionalAccumulatorTest(tf.test.TestCase):
def testDtypes(self):
with self.test_session() as sess:
- dtypes = [tf.float16, tf.float32, tf.float64]
+ dtypes = [dtypes_lib.float16, dtypes_lib.float32, dtypes_lib.float64]
for i in range(len(dtypes)):
dtype = dtypes[i]
- q = tf.SparseConditionalAccumulator(
- dtype, shape=tf.TensorShape([3, 3, 3]))
+ q = data_flow_ops.SparseConditionalAccumulator(
+ dtype, shape=tensor_shape.TensorShape([3, 3, 3]))
elems = np.arange(2)
sum_elems = np.zeros([3, 3, 3]).astype(dtype.as_numpy_dtype)
@@ -124,14 +136,14 @@ class IndexedSlicesConditionalAccumulatorTest(tf.test.TestCase):
def testAccumulatorMultipleAccumulators(self):
with self.test_session() as sess:
- q_f32_0 = tf.SparseConditionalAccumulator(
- tf.float32, name="Q", shape=tf.TensorShape([2, 2]))
- q_f32_1 = tf.SparseConditionalAccumulator(
- tf.float32, name="Q", shape=tf.TensorShape([2, 2]))
- q_f16_0 = tf.SparseConditionalAccumulator(
- tf.float16, name="Q", shape=tf.TensorShape([2, 2]))
- q_f16_1 = tf.SparseConditionalAccumulator(
- tf.float16, name="Q", shape=tf.TensorShape([2, 2]))
+ q_f32_0 = data_flow_ops.SparseConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([2, 2]))
+ q_f32_1 = data_flow_ops.SparseConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([2, 2]))
+ q_f16_0 = data_flow_ops.SparseConditionalAccumulator(
+ dtypes_lib.float16, name="Q", shape=tensor_shape.TensorShape([2, 2]))
+ q_f16_1 = data_flow_ops.SparseConditionalAccumulator(
+ dtypes_lib.float16, name="Q", shape=tensor_shape.TensorShape([2, 2]))
accums = [q_f16_0, q_f16_1, q_f32_0, q_f32_1]
@@ -141,8 +153,8 @@ class IndexedSlicesConditionalAccumulatorTest(tf.test.TestCase):
expected_tensors = []
for i in range(len(accums)):
- tensor_to_add = np.array(elems[i]).astype(accums[
- i].dtype.as_numpy_dtype)
+ tensor_to_add = np.array(elems[i]).astype(accums[i]
+ .dtype.as_numpy_dtype)
expected_tensor = _indexedslice(tensor_to_add)
expected_tensors.append(expected_tensor)
st = _indexedslice(tensor_to_add)
@@ -154,9 +166,10 @@ class IndexedSlicesConditionalAccumulatorTest(tf.test.TestCase):
def testAccumulatorTakeGrad(self):
with self.test_session() as sess:
- q = tf.SparseConditionalAccumulator(tf.float32, name="Q", shape=())
+ q = data_flow_ops.SparseConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=())
- grad_indexed_slices = tf.IndexedSlices(
+ grad_indexed_slices = ops.IndexedSlices(
indices=[0, 1], values=np.array([[1, 0], [0, 2]]).astype(np.float32))
accum_op = q.apply_indexed_slices_grad(grad_indexed_slices)
accum_op.run()
@@ -173,9 +186,10 @@ class IndexedSlicesConditionalAccumulatorTest(tf.test.TestCase):
def testAccumulatorRepeatedTakeGrad(self):
with self.test_session() as sess:
- q = tf.SparseConditionalAccumulator(tf.float32, name="Q", shape=())
+ q = data_flow_ops.SparseConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=())
- grad_indexed_slices = tf.IndexedSlices(
+ grad_indexed_slices = ops.IndexedSlices(
indices=[0, 1], values=np.array([[1, 0], [0, 2]]).astype(np.float32))
accum_op = q.apply_indexed_slices_grad(grad_indexed_slices, local_step=0)
accum_op.run()
@@ -191,7 +205,7 @@ class IndexedSlicesConditionalAccumulatorTest(tf.test.TestCase):
self.assertAllEqual(val.values, [[0.5, 0.5], [0, 2], [3, 0]])
self.assertAllEqual(val.dense_shape, [-1, 2])
- grad_indexed_slices = tf.IndexedSlices(
+ grad_indexed_slices = ops.IndexedSlices(
indices=[0, 1],
values=np.array([[10, 0], [0, 20]]).astype(np.float32))
accum_op = q.apply_indexed_slices_grad(grad_indexed_slices, local_step=1)
@@ -210,8 +224,8 @@ class IndexedSlicesConditionalAccumulatorTest(tf.test.TestCase):
def testParallelApplyGrad(self):
with self.test_session() as sess:
- q = tf.SparseConditionalAccumulator(
- tf.float32, name="Q", shape=tf.TensorShape([2, 2]))
+ q = data_flow_ops.SparseConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([2, 2]))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
accum_ops = []
for x in elems:
@@ -222,8 +236,10 @@ class IndexedSlicesConditionalAccumulatorTest(tf.test.TestCase):
def apply_indexed_slices_grad(accum_op):
sess.run(accum_op)
- threads = [self.checkedThread(
- target=apply_indexed_slices_grad, args=(o,)) for o in accum_ops]
+ threads = [
+ self.checkedThread(
+ target=apply_indexed_slices_grad, args=(o,)) for o in accum_ops
+ ]
for thread in threads:
thread.start()
@@ -239,8 +255,8 @@ class IndexedSlicesConditionalAccumulatorTest(tf.test.TestCase):
def testParallelTakeGrad(self):
with self.test_session() as sess:
- q = tf.SparseConditionalAccumulator(
- tf.float32, name="Q", shape=tf.TensorShape([2, 2]))
+ q = data_flow_ops.SparseConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([2, 2]))
elems = [e + 1 for e in range(10)]
accum_ops = []
for e in elems:
@@ -278,8 +294,8 @@ class IndexedSlicesConditionalAccumulatorTest(tf.test.TestCase):
def testAccumulatorApplyAndBlockingTake(self):
with self.test_session() as sess:
- q = tf.SparseConditionalAccumulator(
- tf.float32, name="Q", shape=tf.TensorShape([2, 2]))
+ q = data_flow_ops.SparseConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([2, 2]))
elems = [10.0, 20.0, 30.0]
elems_ave = sum(elems) / len(elems)
@@ -313,8 +329,10 @@ class IndexedSlicesConditionalAccumulatorTest(tf.test.TestCase):
def testAccumulatorCancel(self):
with self.test_session() as sess:
- q = tf.SparseConditionalAccumulator(
- tf.float32, name="Q", shape=tf.TensorShape([1, 2, 3]))
+ q = data_flow_ops.SparseConditionalAccumulator(
+ dtypes_lib.float32,
+ name="Q",
+ shape=tensor_shape.TensorShape([1, 2, 3]))
takeg_t = q.take_indexed_slices_grad(1)
takeg_thread = self.checkedThread(
@@ -330,11 +348,11 @@ class IndexedSlicesConditionalAccumulatorTest(tf.test.TestCase):
def testNonVectorIndices(self):
with self.test_session():
- q = tf.SparseConditionalAccumulator(
- tf.float32, name="Q", shape=tf.TensorShape([3, 3]))
+ q = data_flow_ops.SparseConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([3, 3]))
with self.assertRaisesRegexp(
- tf.errors.InvalidArgumentError,
+ errors_impl.InvalidArgumentError,
"Input indices should be vector but received shape:"):
q.apply_grad(
grad_indices=[[0, 1], [1, 0]],
@@ -342,20 +360,20 @@ class IndexedSlicesConditionalAccumulatorTest(tf.test.TestCase):
def testZeroDimensionValues(self):
with self.test_session():
- q = tf.SparseConditionalAccumulator(
- tf.float32, name="Q", shape=tf.TensorShape([3, 3]))
+ q = data_flow_ops.SparseConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([3, 3]))
- with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
+ with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Values cannot be 0-dimensional."):
q.apply_grad(
grad_indices=[0], grad_values=np.array(1).astype(np.float32)).run()
def testWrongNonEmptyInputValues(self):
with self.test_session():
- q = tf.SparseConditionalAccumulator(
- tf.float32, name="Q", shape=tf.TensorShape([3, 3]))
+ q = data_flow_ops.SparseConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([3, 3]))
- with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
+ with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
" non-empty input values, got "):
q.apply_grad(
grad_indices=[0, 1],
@@ -363,55 +381,59 @@ class IndexedSlicesConditionalAccumulatorTest(tf.test.TestCase):
def testDynamicNonVectorIndices(self):
with self.test_session() as sess:
- q = tf.SparseConditionalAccumulator(
- tf.float32, name="Q", shape=tf.TensorShape([3, 3]))
+ q = data_flow_ops.SparseConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([3, 3]))
- x_indices = tf.placeholder(tf.int64)
- x_values = tf.placeholder(tf.float32)
+ x_indices = array_ops.placeholder(dtypes_lib.int64)
+ x_values = array_ops.placeholder(dtypes_lib.float32)
accum_op = q.apply_grad(grad_indices=x_indices, grad_values=x_values)
with self.assertRaisesRegexp(
- tf.errors.InvalidArgumentError,
+ errors_impl.InvalidArgumentError,
"Input indices should be vector but received shape:"):
sess.run(accum_op,
- feed_dict={x_indices: [[0, 1], [1, 0]],
- x_values: np.array([1, 2]).astype(np.float32)})
+ feed_dict={
+ x_indices: [[0, 1], [1, 0]],
+ x_values: np.array([1, 2]).astype(np.float32)
+ })
def testDynamicWrongNonEmptyInputValues(self):
with self.test_session() as sess:
- q = tf.SparseConditionalAccumulator(
- tf.float32, name="Q", shape=tf.TensorShape([3, 3]))
+ q = data_flow_ops.SparseConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([3, 3]))
- x_indices = tf.placeholder(tf.int64)
- x_values = tf.placeholder(tf.float32)
+ x_indices = array_ops.placeholder(dtypes_lib.int64)
+ x_values = array_ops.placeholder(dtypes_lib.float32)
accum_op = q.apply_grad(grad_indices=x_indices, grad_values=x_values)
- with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
+ with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
" non-empty input values, got "):
sess.run(accum_op,
- feed_dict={x_indices: [0, 1],
- x_values: np.array([[0, 1, 1]]).astype(np.float32)})
+ feed_dict={
+ x_indices: [0, 1],
+ x_values: np.array([[0, 1, 1]]).astype(np.float32)
+ })
def testEmptyShapeApply(self):
with self.test_session():
- q = tf.SparseConditionalAccumulator(
- tf.float32, name="Q", shape=tf.TensorShape([]))
+ q = data_flow_ops.SparseConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([]))
- with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
+ with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Input indices should be vector"):
q.apply_grad(grad_indices=0, grad_values=[1.0], grad_shape=[]).run()
- with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
+ with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Input indices should be vector"):
q.apply_grad(grad_indices=0, grad_values=[1.0]).run()
- with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
+ with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Values cannot be 0-dimensional."):
q.apply_grad(grad_indices=[0], grad_values=1.0, grad_shape=[]).run()
- with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
+ with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Values cannot be 0-dimensional."):
q.apply_grad(grad_indices=[0], grad_values=1.0).run()
@@ -421,12 +443,12 @@ class IndexedSlicesConditionalAccumulatorTest(tf.test.TestCase):
def testValidateShape(self):
with self.test_session() as sess:
- q = tf.SparseConditionalAccumulator(
- tf.float32, name="Q", shape=[2, 2, None])
+ q = data_flow_ops.SparseConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=[2, 2, None])
# Provided shape has wrong rank
with self.assertRaisesRegexp(
- tf.errors.InvalidArgumentError,
+ errors_impl.InvalidArgumentError,
"Shape mismatch: expected shape rank at least 3, got 2"):
q.apply_grad(
grad_indices=[0],
@@ -435,7 +457,7 @@ class IndexedSlicesConditionalAccumulatorTest(tf.test.TestCase):
# Provided shape has wrong dim
with self.assertRaisesRegexp(
- tf.errors.InvalidArgumentError,
+ errors_impl.InvalidArgumentError,
"Shape mismatch: expected shape dim 1 to be 2, got 3"):
q.apply_grad(
grad_indices=[0],
@@ -444,7 +466,7 @@ class IndexedSlicesConditionalAccumulatorTest(tf.test.TestCase):
# Indices exceeded accumulator's shape's limits
with self.assertRaisesRegexp(
- tf.errors.InvalidArgumentError,
+ errors_impl.InvalidArgumentError,
"Shape mismatch: index of slice 0 exceeded limits of shape;"
" index is 3 exceeded 2"):
q.apply_grad(
@@ -453,7 +475,7 @@ class IndexedSlicesConditionalAccumulatorTest(tf.test.TestCase):
# Values' rank does not match shape
with self.assertRaisesRegexp(
- tf.errors.InvalidArgumentError,
+ errors_impl.InvalidArgumentError,
"Shape mismatch: expected values rank at least 3, got 2"):
q.apply_grad(
grad_indices=[0, 1],
@@ -461,7 +483,7 @@ class IndexedSlicesConditionalAccumulatorTest(tf.test.TestCase):
# Values' dim does not match shape
with self.assertRaisesRegexp(
- tf.errors.InvalidArgumentError,
+ errors_impl.InvalidArgumentError,
"Shape mismatch: expected values dim 1 to be 2, got 3"):
q.apply_grad(
grad_indices=[0],
@@ -477,7 +499,7 @@ class IndexedSlicesConditionalAccumulatorTest(tf.test.TestCase):
# Values' rank does not match accumulated gradient
with self.assertRaisesRegexp(
- tf.errors.InvalidArgumentError,
+ errors_impl.InvalidArgumentError,
"Shape mismatch: expected values rank 4, got 3"):
q.apply_grad(
grad_indices=[0],
@@ -485,7 +507,7 @@ class IndexedSlicesConditionalAccumulatorTest(tf.test.TestCase):
# Values' dim does not match accumulated gradient
with self.assertRaisesRegexp(
- tf.errors.InvalidArgumentError,
+ errors_impl.InvalidArgumentError,
"Shape mismatch: expected values dim 3 to be 2, got 3"):
q.apply_grad(
grad_indices=[0],
@@ -506,7 +528,7 @@ class IndexedSlicesConditionalAccumulatorTest(tf.test.TestCase):
local_step=1).run()
with self.assertRaisesRegexp(
- tf.errors.InvalidArgumentError,
+ errors_impl.InvalidArgumentError,
"Shape mismatch: expected values dim 3 to be 3, got 2"):
q.apply_grad(
grad_indices=[0],
@@ -516,7 +538,8 @@ class IndexedSlicesConditionalAccumulatorTest(tf.test.TestCase):
def testReturnShape(self):
with self.test_session() as sess:
- q = tf.SparseConditionalAccumulator(tf.float32, name="Q", shape=[2, None])
+ q = data_flow_ops.SparseConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=[2, None])
q.apply_grad(
grad_indices=[0],
@@ -526,7 +549,8 @@ class IndexedSlicesConditionalAccumulatorTest(tf.test.TestCase):
val = sess.run(q.take_indexed_slices_grad(1))
self.assertAllEqual(val.dense_shape, [2, 2, 2, 2])
- q = tf.SparseConditionalAccumulator(tf.float32, name="Q", shape=[None, 2])
+ q = data_flow_ops.SparseConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=[None, 2])
q.apply_grad(
grad_indices=[0],
@@ -539,24 +563,24 @@ class IndexedSlicesConditionalAccumulatorTest(tf.test.TestCase):
def testApplyGradtInt32IndicesAndShape(self):
with self.test_session() as sess:
- q = tf.SparseConditionalAccumulator(
- tf.float32, name="Q", shape=tf.TensorShape([3, 3]))
+ q = data_flow_ops.SparseConditionalAccumulator(
+ dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([3, 3]))
accum_op = q.apply_grad(
- grad_indices=tf.constant(
- [0, 2], dtype=tf.int32),
- grad_values=tf.constant(
- [[0, 0, 1], [3, 0, 4]], dtype=tf.float32),
- grad_shape=tf.constant(
- [3, 3], dtype=tf.int32))
+ grad_indices=constant_op.constant(
+ [0, 2], dtype=dtypes_lib.int32),
+ grad_values=constant_op.constant(
+ [[0, 0, 1], [3, 0, 4]], dtype=dtypes_lib.float32),
+ grad_shape=constant_op.constant(
+ [3, 3], dtype=dtypes_lib.int32))
accum_op.run()
accum_op = q.apply_indexed_slices_grad(
- tf.IndexedSlices(
- indices=tf.constant(
- [0, 2], dtype=tf.int32),
- values=tf.constant(
- [[0, 0, 1], [3, 0, 4]], dtype=tf.float32),
- dense_shape=tf.constant(
- [3, 3], dtype=tf.int32)))
+ ops.IndexedSlices(
+ indices=constant_op.constant(
+ [0, 2], dtype=dtypes_lib.int32),
+ values=constant_op.constant(
+ [[0, 0, 1], [3, 0, 4]], dtype=dtypes_lib.float32),
+ dense_shape=constant_op.constant(
+ [3, 3], dtype=dtypes_lib.int32)))
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 2)
@@ -567,4 +591,4 @@ class IndexedSlicesConditionalAccumulatorTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/sparse_matmul_op_test.py b/tensorflow/python/kernel_tests/sparse_matmul_op_test.py
index 9f789798b0..6ca4479671 100644
--- a/tensorflow/python/kernel_tests/sparse_matmul_op_test.py
+++ b/tensorflow/python/kernel_tests/sparse_matmul_op_test.py
@@ -12,41 +12,55 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.ops.tf.matmul."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import math_ops
+from tensorflow.python.platform import test
def RandMatrix(rows, cols, tr, round_bfloat=False):
if tr:
rows, cols = cols, rows
rand_func = np.random.randint if round_bfloat else np.random.uniform
- return (np.clip(rand_func(low=-256.0, high=256.0, size=rows * cols),
- -64, 64) / 128.0).reshape([rows, cols]).astype(np.float32)
-
-
-class SparseMatMulTest(tf.test.TestCase):
-
- def _testCpuMatmul(self, x, y,
- tr_a=False, tr_b=False,
- sp_a=True, sp_b=False,
- x_dtype=tf.float32,
- y_dtype=tf.float32):
+ return (np.clip(
+ rand_func(
+ low=-256.0, high=256.0, size=rows * cols), -64,
+ 64) / 128.0).reshape([rows, cols]).astype(np.float32)
+
+
+class SparseMatMulTest(test.TestCase):
+
+ def _testCpuMatmul(self,
+ x,
+ y,
+ tr_a=False,
+ tr_b=False,
+ sp_a=True,
+ sp_b=False,
+ x_dtype=dtypes.float32,
+ y_dtype=dtypes.float32):
with self.test_session(use_gpu=False):
- tf_x = tf.cast(x, x_dtype)
- tf_y = tf.cast(y, y_dtype)
- tf_ans = tf.matmul(tf_x, tf_y,
- transpose_a=tr_a, transpose_b=tr_b,
- a_is_sparse=sp_a,
- b_is_sparse=sp_b)
+ tf_x = math_ops.cast(x, x_dtype)
+ tf_y = math_ops.cast(y, y_dtype)
+ tf_ans = math_ops.matmul(
+ tf_x,
+ tf_y,
+ transpose_a=tr_a,
+ transpose_b=tr_b,
+ a_is_sparse=sp_a,
+ b_is_sparse=sp_b)
out = tf_ans.eval()
- np_x = tf.cast(tf_x, tf.float32).eval()
- np_y = tf.cast(tf_y, tf.float32).eval()
+ np_x = math_ops.cast(tf_x, dtypes.float32).eval()
+ np_y = math_ops.cast(tf_y, dtypes.float32).eval()
if tr_a:
np_x = np.transpose(np_x)
@@ -60,22 +74,22 @@ class SparseMatMulTest(tf.test.TestCase):
def testBasic(self):
x = np.arange(0., 4.).reshape([4, 1]).astype(np.float32)
y = np.arange(-1., 1.).reshape([1, 2]).astype(np.float32)
- for x_dtype in (tf.float32, tf.bfloat16):
- for y_dtype in (tf.float32, tf.bfloat16):
+ for x_dtype in (dtypes.float32, dtypes.bfloat16):
+ for y_dtype in (dtypes.float32, dtypes.bfloat16):
self._testCpuMatmul(x, y, x_dtype=x_dtype, y_dtype=y_dtype)
def testZeroDim(self):
x = np.ones((4, 0)).astype(np.float32)
y = np.ones((0, 3)).astype(np.float32)
- for x_dtype in (tf.float32, tf.bfloat16):
- for y_dtype in (tf.float32, tf.bfloat16):
+ for x_dtype in (dtypes.float32, dtypes.bfloat16):
+ for y_dtype in (dtypes.float32, dtypes.bfloat16):
self._testCpuMatmul(x, y, x_dtype=x_dtype, y_dtype=y_dtype)
def testEmpty(self):
x = np.ones((0, 0)).astype(np.float32)
y = np.ones((0, 0)).astype(np.float32)
- for x_dtype in (tf.float32, tf.bfloat16):
- for y_dtype in (tf.float32, tf.bfloat16):
+ for x_dtype in (dtypes.float32, dtypes.bfloat16):
+ for y_dtype in (dtypes.float32, dtypes.bfloat16):
self._testCpuMatmul(x, y, x_dtype=x_dtype, y_dtype=y_dtype)
# Tests setting one dimension to be a high value.
@@ -83,11 +97,9 @@ class SparseMatMulTest(tf.test.TestCase):
r1 = np.random.randint(6000, 20000)
r2 = np.random.randint(1, 10)
r3 = np.random.randint(1, 10)
- for m, k, n in [(r1, r2, r3),
- (r2, r1, r3),
- (r2, r3, r1)]:
- for x_dtype in (tf.float32, tf.bfloat16):
- for y_dtype in (tf.float32, tf.bfloat16):
+ for m, k, n in [(r1, r2, r3), (r2, r1, r3), (r2, r3, r1)]:
+ for x_dtype in (dtypes.float32, dtypes.bfloat16):
+ for y_dtype in (dtypes.float32, dtypes.bfloat16):
x = RandMatrix(m, k, False)
y = RandMatrix(k, n, False)
self._testCpuMatmul(x, y, x_dtype=x_dtype, y_dtype=y_dtype)
@@ -98,52 +110,65 @@ class SparseMatMulTest(tf.test.TestCase):
for tr_b in [True, False]:
for sp_a in [True, False]:
for sp_b in [True, False]:
- for x_dtype in (tf.float32, tf.bfloat16):
- for y_dtype in (tf.float32, tf.bfloat16):
+ for x_dtype in (dtypes.float32, dtypes.bfloat16):
+ for y_dtype in (dtypes.float32, dtypes.bfloat16):
n, k, m = np.random.randint(1, 100, size=3)
x = RandMatrix(n, k, tr_a)
y = RandMatrix(k, m, tr_b)
- self._testCpuMatmul(x, y, tr_a, tr_b, sp_a, sp_b,
- x_dtype=x_dtype, y_dtype=y_dtype)
+ self._testCpuMatmul(
+ x,
+ y,
+ tr_a,
+ tr_b,
+ sp_a,
+ sp_b,
+ x_dtype=x_dtype,
+ y_dtype=y_dtype)
-class MatMulGradientTest(tf.test.TestCase):
+class MatMulGradientTest(test.TestCase):
def _testGradients(self, tr_a, tr_b, sp_a, sp_b, a_dtype, b_dtype, name):
with self.test_session():
- a = tf.constant(RandMatrix(3, 2, tr_a, round_bfloat=True),
- dtype=tf.float32)
- b = tf.constant(RandMatrix(2, 4, tr_b, round_bfloat=True),
- dtype=tf.float32)
- tf_a = tf.cast(a, a_dtype) if a_dtype != tf.float32 else a
- tf_b = tf.cast(b, b_dtype) if b_dtype != tf.float32 else b
-
- m = tf.matmul(tf_a, tf_b,
- name=name,
- transpose_a=tr_a,
- transpose_b=tr_b,
- a_is_sparse=sp_a,
- b_is_sparse=sp_b)
- err = (tf.test.compute_gradient_error(a, [2, 3]
- if tr_a else [3, 2], m, [3, 4],
- x_init_value=a.eval(),
- delta=1/64.) +
- tf.test.compute_gradient_error(b, [4, 2]
- if tr_b else [2, 4], m, [3, 4],
- x_init_value=b.eval(),
- delta=1/64.))
- self.assertLess(err, 1/128.)
+ a = constant_op.constant(
+ RandMatrix(
+ 3, 2, tr_a, round_bfloat=True), dtype=dtypes.float32)
+ b = constant_op.constant(
+ RandMatrix(
+ 2, 4, tr_b, round_bfloat=True), dtype=dtypes.float32)
+ tf_a = math_ops.cast(a, a_dtype) if a_dtype != dtypes.float32 else a
+ tf_b = math_ops.cast(b, b_dtype) if b_dtype != dtypes.float32 else b
+
+ m = math_ops.matmul(
+ tf_a,
+ tf_b,
+ name=name,
+ transpose_a=tr_a,
+ transpose_b=tr_b,
+ a_is_sparse=sp_a,
+ b_is_sparse=sp_b)
+ err = (gradient_checker.compute_gradient_error(
+ a, [2, 3] if tr_a else [3, 2],
+ m, [3, 4],
+ x_init_value=a.eval(),
+ delta=1 / 64.) + gradient_checker.compute_gradient_error(
+ b, [4, 2] if tr_b else [2, 4],
+ m, [3, 4],
+ x_init_value=b.eval(),
+ delta=1 / 64.))
+ self.assertLess(err, 1 / 128.)
def testGradientInput(self):
for tr_a in [True, False]:
for tr_b in [True, False]:
for sp_a in [True, False]:
for sp_b in [True, False]:
- for a_dtype in (tf.float32, tf.bfloat16):
- for b_dtype in (tf.float32, tf.bfloat16):
+ for a_dtype in (dtypes.float32, dtypes.bfloat16):
+ for b_dtype in (dtypes.float32, dtypes.bfloat16):
name = "sparse_matmul_%s_%s_%s_%s" % (tr_a, tr_b, sp_a, sp_b)
- self._testGradients(tr_a, tr_b, sp_a, sp_b,
- a_dtype, b_dtype, name)
+ self._testGradients(tr_a, tr_b, sp_a, sp_b, a_dtype, b_dtype,
+ name)
+
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/sparse_ops_test.py b/tensorflow/python/kernel_tests/sparse_ops_test.py
index 41a8a486d1..608bb0f635 100644
--- a/tensorflow/python/kernel_tests/sparse_ops_test.py
+++ b/tensorflow/python/kernel_tests/sparse_ops_test.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for Python ops defined in sparse_ops."""
from __future__ import absolute_import
@@ -20,14 +19,18 @@ from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import sparse_ops
+import tensorflow.python.ops.sparse_grad # pylint: disable=unused-import
from tensorflow.python.platform import googletest
+from tensorflow.python.platform import test
# TODO(zongheng): it'd be great to factor out this function and various random
@@ -40,38 +43,28 @@ def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x_values = x[non_zero]
x_shape = x.shape
- return tf.SparseTensor(
+ return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), len(x_values)
class SparseToIndicatorTest(test_util.TensorFlowTestCase):
def _SparseTensor_5x6(self, dtype):
- ind = np.array([
- [0, 0],
- [1, 0], [1, 3], [1, 4],
- [3, 2], [3, 3]])
+ ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
- return tf.SparseTensor(
+ return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtype),
constant_op.constant(shape, dtypes.int64))
def _SparseTensor_2x3x4(self, dtype):
# Includes two entries with the form [1, 1, x] : 150.
- ind = np.array([
- [0, 0, 1],
- [0, 1, 0],
- [0, 1, 2],
- [1, 0, 3],
- [1, 1, 0],
- [1, 1, 1],
- [1, 1, 2],
- [1, 2, 2]])
+ ind = np.array([[0, 0, 1], [0, 1, 0], [0, 1, 2], [1, 0, 3], [1, 1, 0],
+ [1, 1, 1], [1, 1, 2], [1, 2, 2]])
val = np.array([1, 10, 12, 103, 150, 149, 150, 122])
shape = np.array([2, 3, 4])
- return tf.SparseTensor(
+ return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtype),
constant_op.constant(shape, dtypes.int64))
@@ -106,9 +99,8 @@ class SparseToIndicatorTest(test_util.TensorFlowTestCase):
output = sparse_ops.sparse_to_indicator(sp_input, 200).eval()
expected_output = np.zeros((2, 3, 200), dtype=np.bool)
- expected_trues = [(0, 0, 1), (0, 1, 10), (0, 1, 12),
- (1, 0, 103), (1, 1, 149), (1, 1, 150),
- (1, 2, 122)]
+ expected_trues = [(0, 0, 1), (0, 1, 10), (0, 1, 12), (1, 0, 103),
+ (1, 1, 149), (1, 1, 150), (1, 2, 122)]
for expected_true in expected_trues:
expected_output[expected_true] = True
@@ -120,59 +112,44 @@ class SparseMergeTest(test_util.TensorFlowTestCase):
def _SparseTensorValue_3x50(self, indices_dtype, values_dtype):
# NOTE: This input is intentionally not sorted to validate the
# already_sorted flag below.
- ind = np.array([
- [0, 0],
- [1, 0], [1, 2],
- [2, 0], [2, 1],
- [1, 1]])
+ ind = np.array([[0, 0], [1, 0], [1, 2], [2, 0], [2, 1], [1, 1]])
# NB: these are not sorted
indices = np.array([0, 13, 10, 33, 32, 14])
values = np.array([-3, 4, 1, 9, 5, 1])
shape = np.array([3, 3])
- indices = tf.SparseTensorValue(
+ indices = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
- np.array(indices, indices_dtype),
- np.array(shape, np.int64))
- values = tf.SparseTensorValue(
+ np.array(indices, indices_dtype), np.array(shape, np.int64))
+ values = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
- np.array(values, values_dtype),
- np.array(shape, np.int64))
+ np.array(values, values_dtype), np.array(shape, np.int64))
return indices, values
def _SparseTensor_3x50(self, indices_dtype, values_dtype):
indices, values = self._SparseTensorValue_3x50(indices_dtype, values_dtype)
- return (
- tf.SparseTensor.from_value(indices),
- tf.SparseTensor.from_value(values))
+ return (sparse_tensor.SparseTensor.from_value(indices),
+ sparse_tensor.SparseTensor.from_value(values))
def _AssertResultsSorted(self, output, vocab_size):
- self.assertAllEqual(
- output.indices,
- [[0, 0], [1, 10], [1, 13], [1, 14], [2, 32], [2, 33]])
- self.assertAllEqual(
- output.values,
- [-3, 1, 4, 1, 5, 9])
- self.assertAllEqual(
- output.dense_shape,
- [3, vocab_size])
+ self.assertAllEqual(output.indices,
+ [[0, 0], [1, 10], [1, 13], [1, 14], [2, 32], [2, 33]])
+ self.assertAllEqual(output.values, [-3, 1, 4, 1, 5, 9])
+ self.assertAllEqual(output.dense_shape, [3, vocab_size])
def _AssertResultsNotSorted(self, output, vocab_size):
- self.assertAllEqual(
- output.indices,
- [[0, 0], [1, 13], [1, 10], [2, 33], [2, 32], [1, 14]])
- self.assertAllEqual(
- output.values,
- [-3, 4, 1, 9, 5, 1])
- self.assertAllEqual(
- output.dense_shape,
- [3, vocab_size])
+ self.assertAllEqual(output.indices,
+ [[0, 0], [1, 13], [1, 10], [2, 33], [2, 32], [1, 14]])
+ self.assertAllEqual(output.values, [-3, 4, 1, 9, 5, 1])
+ self.assertAllEqual(output.dense_shape, [3, vocab_size])
def testInt32AndFloat32(self):
vocab_size = 50
indices_v, values_v = self._SparseTensorValue_3x50(np.int32, np.float32)
with self.test_session(use_gpu=False) as sess:
- for indices in (indices_v, tf.SparseTensor.from_value(indices_v)):
- for values in (values_v, tf.SparseTensor.from_value(values_v)):
+ for indices in (indices_v,
+ sparse_tensor.SparseTensor.from_value(indices_v)):
+ for values in (values_v,
+ sparse_tensor.SparseTensor.from_value(values_v)):
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = sess.run(sp_output)
@@ -230,19 +207,15 @@ class SparseMergeTest(test_util.TensorFlowTestCase):
class SparseRetainTest(test_util.TensorFlowTestCase):
def _SparseTensorValue_5x6(self):
- ind = np.array([
- [0, 0],
- [1, 0], [1, 3], [1, 4],
- [3, 2], [3, 3]])
+ ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
- return tf.SparseTensorValue(
+ return sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
- np.array(val, np.int32),
- np.array(shape, np.int64))
+ np.array(val, np.int32), np.array(shape, np.int64))
def _SparseTensor_5x6(self):
- return tf.SparseTensor.from_value(self._SparseTensorValue_5x6())
+ return sparse_tensor.SparseTensor.from_value(self._SparseTensorValue_5x6())
def testBasic(self):
with self.test_session(use_gpu=False) as sess:
@@ -278,20 +251,21 @@ class SparseRetainTest(test_util.TensorFlowTestCase):
class SparseResetShapeTest(test_util.TensorFlowTestCase):
- _IND_2_5_6 = np.array([[0, 0, 0], [0, 1, 0], [0, 1, 3], [1, 1, 4],
- [1, 3, 2], [1, 3, 3]], dtype=np.int64)
+ _IND_2_5_6 = np.array(
+ [[0, 0, 0], [0, 1, 0], [0, 1, 3], [1, 1, 4], [1, 3, 2], [1, 3, 3]],
+ dtype=np.int64)
_VAL_2_5_6 = np.array([0, 10, 13, 14, 32, 33], dtype=np.int32)
_SHP_2_5_6 = np.array([2, 5, 6], dtype=np.int64)
def _SparseTensor_2x5x6(self):
- return tf.SparseTensor(
+ return sparse_tensor.SparseTensor(
constant_op.constant(self._IND_2_5_6, dtypes.int64),
constant_op.constant(self._VAL_2_5_6, dtypes.int32),
constant_op.constant(self._SHP_2_5_6, dtypes.int64))
def _SparseTensorValue_2x5x6(self):
- return tf.SparseTensorValue(self._IND_2_5_6, self._VAL_2_5_6,
- self._SHP_2_5_6)
+ return sparse_tensor.SparseTensorValue(self._IND_2_5_6, self._VAL_2_5_6,
+ self._SHP_2_5_6)
def testBasic(self):
with self.test_session(use_gpu=False) as sess:
@@ -301,9 +275,8 @@ class SparseResetShapeTest(test_util.TensorFlowTestCase):
output = sess.run(sp_output)
- self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0],
- [0, 1, 3], [1, 1, 4],
- [1, 3, 2], [1, 3, 3]])
+ self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3],
+ [1, 1, 4], [1, 3, 2], [1, 3, 3]])
self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33])
self.assertAllEqual(output.dense_shape, [3, 6, 7])
@@ -315,9 +288,8 @@ class SparseResetShapeTest(test_util.TensorFlowTestCase):
output = sess.run(sp_output)
- self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0],
- [0, 1, 3], [1, 1, 4],
- [1, 3, 2], [1, 3, 3]])
+ self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3],
+ [1, 1, 4], [1, 3, 2], [1, 3, 3]])
self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33])
self.assertAllEqual(output.dense_shape, [3, 6, 7])
@@ -330,9 +302,8 @@ class SparseResetShapeTest(test_util.TensorFlowTestCase):
output = sess.run(sp_output,
feed_dict={sp_input: self._SparseTensorValue_2x5x6()})
- self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0],
- [0, 1, 3], [1, 1, 4],
- [1, 3, 2], [1, 3, 3]])
+ self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3],
+ [1, 1, 4], [1, 3, 2], [1, 3, 3]])
self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33])
self.assertAllEqual(output.dense_shape, [3, 6, 7])
@@ -343,9 +314,8 @@ class SparseResetShapeTest(test_util.TensorFlowTestCase):
output = sess.run(sp_output)
- self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0],
- [0, 1, 3], [1, 1, 4],
- [1, 3, 2], [1, 3, 3]])
+ self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3],
+ [1, 1, 4], [1, 3, 2], [1, 3, 3]])
self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33])
self.assertAllEqual(output.dense_shape, [2, 4, 5])
@@ -388,28 +358,21 @@ class SparseResetShapeTest(test_util.TensorFlowTestCase):
class SparseFillEmptyRowsTest(test_util.TensorFlowTestCase):
def _SparseTensorValue_5x6(self):
- ind = np.array([
- [0, 0],
- [1, 0], [1, 3], [1, 4],
- [3, 2], [3, 3]])
+ ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
- return tf.SparseTensorValue(
+ return sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
- np.array(val, np.int32),
- np.array(shape, np.int64))
+ np.array(val, np.int32), np.array(shape, np.int64))
def _SparseTensor_5x6(self):
- return tf.SparseTensor.from_value(self._SparseTensorValue_5x6())
+ return sparse_tensor.SparseTensor.from_value(self._SparseTensorValue_5x6())
def _SparseTensor_String5x6(self):
- ind = np.array([
- [0, 0],
- [1, 0], [1, 3], [1, 4],
- [3, 2], [3, 3]])
+ ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])
val = np.array(["a", "b", "c", "d", "e", "f"])
shape = np.array([5, 6])
- return tf.SparseTensor(
+ return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.string),
constant_op.constant(shape, dtypes.int64))
@@ -418,7 +381,7 @@ class SparseFillEmptyRowsTest(test_util.TensorFlowTestCase):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4]])
val = np.array([0, 10, 13, 14])
shape = np.array([2, 6])
- return tf.SparseTensor(
+ return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.int32),
constant_op.constant(shape, dtypes.int64))
@@ -517,7 +480,7 @@ class SparseReduceSumTest(test_util.TensorFlowTestCase):
self._compare(sp_t, reduction_axes, ndims, True)
def testSimpleAndRandomInputs(self):
- sp_t = tf.SparseTensor(self.ind, self.vals, self.dense_shape)
+ sp_t = sparse_tensor.SparseTensor(self.ind, self.vals, self.dense_shape)
with self.test_session(use_gpu=False):
self._compare_all(sp_t, None, ndims=2)
@@ -541,7 +504,7 @@ class SparseReduceSumTest(test_util.TensorFlowTestCase):
self._compare_all(sp_t, axes, ndims=len(dims))
def testInvalidAxes(self):
- sp_t = tf.SparseTensor(self.ind, self.vals, self.dense_shape)
+ sp_t = sparse_tensor.SparseTensor(self.ind, self.vals, self.dense_shape)
with self.test_session(use_gpu=False):
with self.assertRaisesOpError("Invalid reduction dimension -3"):
sparse_ops.sparse_reduce_sum(sp_t, -3).eval()
@@ -559,25 +522,27 @@ class SparseReduceSumTest(test_util.TensorFlowTestCase):
axes = np.random.choice(len(dims), size=d, replace=False).tolist()
reduced = sparse_ops.sparse_reduce_sum(sp_t, axes)
- err = tf.test.compute_gradient_error(sp_t.values, (nnz,), reduced,
- reduced.eval().shape)
+ err = gradient_checker.compute_gradient_error(sp_t.values, (nnz,),
+ reduced,
+ reduced.eval().shape)
self.assertLess(err, 1e-3)
# Tests for negative axes.
reduced = sparse_ops.sparse_reduce_sum(sp_t, -1)
- err = tf.test.compute_gradient_error(sp_t.values, (nnz,), reduced,
- reduced.eval().shape)
+ err = gradient_checker.compute_gradient_error(sp_t.values, (nnz,),
+ reduced,
+ reduced.eval().shape)
self.assertLess(err, 1e-3)
class SparseMathOpsTest(test_util.TensorFlowTestCase):
def _check(self, result_tensor, result_np, input_sp_t):
- self.assertTrue(isinstance(result_tensor, tf.SparseTensor))
- self.assertTrue(isinstance(input_sp_t, tf.SparseTensor))
+ self.assertTrue(isinstance(result_tensor, sparse_tensor.SparseTensor))
+ self.assertTrue(isinstance(input_sp_t, sparse_tensor.SparseTensor))
self.assertAllEqual(input_sp_t.indices.eval(), result_tensor.indices.eval())
- self.assertAllEqual(
- input_sp_t.dense_shape.eval(), result_tensor.dense_shape.eval())
+ self.assertAllEqual(input_sp_t.dense_shape.eval(),
+ result_tensor.dense_shape.eval())
res_densified = sparse_ops.sparse_to_dense(result_tensor.indices,
result_tensor.dense_shape,
@@ -596,7 +561,7 @@ class SparseMathOpsTest(test_util.TensorFlowTestCase):
dense_vals_np = np.random.rand(*dense_shape).astype(dtype) + 1
sp_t, unused_nnz = _sparsify(sp_vals_np, thresh=1.5)
sp_t_densified = sparse_ops.sparse_tensor_to_dense(sp_t).eval()
- dense_t = tf.constant(dense_vals_np)
+ dense_t = constant_op.constant(dense_vals_np)
self._check(sp_t / dense_t, sp_t_densified / dense_vals_np, sp_t)
# Check commutative.
@@ -614,15 +579,17 @@ class SparseMathOpsTest(test_util.TensorFlowTestCase):
vals = [1, 1]
shape = (2, 2)
- sp_t = tf.SparseTensor(indices, vals, shape)
- dense_t = tf.ones(shape, dtype=dtypes.int32)
- self._check(sparse_ops.sparse_dense_cwise_add(sp_t, dense_t),
- np.identity(2) * 2, sp_t)
+ sp_t = sparse_tensor.SparseTensor(indices, vals, shape)
+ dense_t = array_ops.ones(shape, dtype=dtypes.int32)
+ self._check(
+ sparse_ops.sparse_dense_cwise_add(sp_t, dense_t),
+ np.identity(2) * 2, sp_t)
# Variant of above, but broadcasts the dense side.
- dense_t = tf.ones([1], dtype=dtypes.int32)
- self._check(sparse_ops.sparse_dense_cwise_add(sp_t, dense_t),
- np.identity(2) * 2, sp_t)
+ dense_t = array_ops.ones([1], dtype=dtypes.int32)
+ self._check(
+ sparse_ops.sparse_dense_cwise_add(sp_t, dense_t),
+ np.identity(2) * 2, sp_t)
def testGradients(self):
np.random.seed(1618)
@@ -635,21 +602,23 @@ class SparseMathOpsTest(test_util.TensorFlowTestCase):
sp_vals_np = np.random.rand(*sp_shape).astype(dtype) + 1
dense_vals_np = np.random.rand(*dense_shape).astype(dtype) + 1
sp_t, nnz = _sparsify(sp_vals_np, thresh=1.5)
- dense_t = tf.constant(dense_vals_np)
+ dense_t = constant_op.constant(dense_vals_np)
cmul = sp_t * dense_t
- err = tf.test.compute_gradient_error([sp_t.values, dense_t],
- [(nnz,), dense_shape],
- cmul.values, (nnz,))
+ err = gradient_checker.compute_gradient_error([sp_t.values, dense_t],
+ [(nnz,), dense_shape],
+ cmul.values, (nnz,))
self.assertLess(err, 1e-4)
cdiv = sp_t / dense_t
- err = tf.test.compute_gradient_error(sp_t.values, (nnz,),
- cdiv.values, (nnz,))
+ err = gradient_checker.compute_gradient_error(sp_t.values, (nnz,),
+ cdiv.values, (nnz,))
self.assertLess(err, 1e-4)
- err = tf.test.compute_gradient_error(dense_t, dense_shape,
- cdiv.values, (nnz,),
- x_init_value=dense_vals_np)
+ err = gradient_checker.compute_gradient_error(
+ dense_t,
+ dense_shape,
+ cdiv.values, (nnz,),
+ x_init_value=dense_vals_np)
self.assertLess(err, 2e-4)
@@ -666,11 +635,11 @@ class SparseSoftmaxTest(test_util.TensorFlowTestCase):
sp_vals_np.reshape((1, n, m)), thresh=0.) # No masking.
with self.test_session(use_gpu=False):
- densified = tf.constant(sp_vals_np)
+ densified = constant_op.constant(sp_vals_np)
- sp_result = sparse_ops.sparse_softmax(
- batched_sp_t).eval().values.reshape((n, m))
- dense_result = tf.nn.softmax(densified)
+ sp_result = sparse_ops.sparse_softmax(batched_sp_t).eval(
+ ).values.reshape((n, m))
+ dense_result = nn_ops.softmax(densified)
self.assertAllClose(dense_result.eval(), sp_result)
@@ -709,9 +678,9 @@ class SparseSoftmaxTest(test_util.TensorFlowTestCase):
for dtype in [np.float32, np.float64]:
x_np = np.random.randn(*x_shape).astype(dtype)
x_tf, nnz = _sparsify(x_np)
- y_tf = tf.sparse_softmax(x_tf)
- err = tf.test.compute_gradient_error(x_tf.values, (nnz,), y_tf.values,
- (nnz,))
+ y_tf = sparse_ops.sparse_softmax(x_tf)
+ err = gradient_checker.compute_gradient_error(x_tf.values, (nnz,),
+ y_tf.values, (nnz,))
self.assertLess(err, 1e-4)
@@ -725,19 +694,19 @@ class SparseMinimumMaximumTest(test_util.TensorFlowTestCase):
def testBasic(self):
with self.test_session(use_gpu=False):
# 1-D, values at index 0.
- sp_zero = tf.SparseTensor([[0]], [0], [7])
- sp_one = tf.SparseTensor([[0]], [1], [7])
- max_tf = tf.sparse_maximum(sp_zero, sp_one).eval()
- min_tf = tf.sparse_minimum(sp_zero, sp_one).eval()
+ sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
+ sp_one = sparse_tensor.SparseTensor([[0]], [1], [7])
+ max_tf = sparse_ops.sparse_maximum(sp_zero, sp_one).eval()
+ min_tf = sparse_ops.sparse_minimum(sp_zero, sp_one).eval()
self._assertSparseTensorValueEqual(sp_one.eval(), max_tf)
self._assertSparseTensorValueEqual(sp_zero.eval(), min_tf)
# Values at different indices.
- sp_zero = tf.SparseTensor([[0]], [0], [7])
- sp_zero_2 = tf.SparseTensor([[1]], [0], [7])
- expected = tf.SparseTensor([[0], [1]], [0, 0], [7])
- max_tf = tf.sparse_maximum(sp_zero, sp_zero_2).eval()
- min_tf = tf.sparse_minimum(sp_zero, sp_zero_2).eval()
+ sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
+ sp_zero_2 = sparse_tensor.SparseTensor([[1]], [0], [7])
+ expected = sparse_tensor.SparseTensor([[0], [1]], [0, 0], [7])
+ max_tf = sparse_ops.sparse_maximum(sp_zero, sp_zero_2).eval()
+ min_tf = sparse_ops.sparse_minimum(sp_zero, sp_zero_2).eval()
self._assertSparseTensorValueEqual(expected.eval(), max_tf)
self._assertSparseTensorValueEqual(expected.eval(), min_tf)
@@ -752,13 +721,15 @@ class SparseMinimumMaximumTest(test_util.TensorFlowTestCase):
sp_b, unused_b_nnz = _sparsify(b_np, thresh=-.5)
with self.test_session(use_gpu=False):
- maximum_tf = tf.sparse_maximum(sp_a, sp_b)
- maximum_tf_densified = tf.sparse_tensor_to_dense(maximum_tf).eval()
- minimum_tf = tf.sparse_minimum(sp_a, sp_b)
- minimum_tf_densified = tf.sparse_tensor_to_dense(minimum_tf).eval()
+ maximum_tf = sparse_ops.sparse_maximum(sp_a, sp_b)
+ maximum_tf_densified = sparse_ops.sparse_tensor_to_dense(
+ maximum_tf).eval()
+ minimum_tf = sparse_ops.sparse_minimum(sp_a, sp_b)
+ minimum_tf_densified = sparse_ops.sparse_tensor_to_dense(
+ minimum_tf).eval()
- a_densified = tf.sparse_tensor_to_dense(sp_a).eval()
- b_densified = tf.sparse_tensor_to_dense(sp_b).eval()
+ a_densified = sparse_ops.sparse_tensor_to_dense(sp_a).eval()
+ b_densified = sparse_ops.sparse_tensor_to_dense(sp_b).eval()
self.assertAllEqual(
np.maximum(a_densified, b_densified), maximum_tf_densified)
@@ -767,18 +738,18 @@ class SparseMinimumMaximumTest(test_util.TensorFlowTestCase):
def testMismatchedShapes(self):
with self.test_session(use_gpu=False):
- sp_zero = tf.SparseTensor([[0, 0]], [0], [1, 1])
- sp_one = tf.SparseTensor([[0]], [1], [2])
+ sp_zero = sparse_tensor.SparseTensor([[0, 0]], [0], [1, 1])
+ sp_one = sparse_tensor.SparseTensor([[0]], [1], [2])
with self.assertRaisesOpError("Operands do not have the same ranks"):
- tf.sparse_maximum(sp_zero, sp_one).eval()
+ sparse_ops.sparse_maximum(sp_zero, sp_one).eval()
- sp_zero = tf.SparseTensor([[0]], [0], [1])
- sp_one = tf.SparseTensor([[0]], [1], [2])
+ sp_zero = sparse_tensor.SparseTensor([[0]], [0], [1])
+ sp_one = sparse_tensor.SparseTensor([[0]], [1], [2])
with self.assertRaisesOpError("Operands' shapes do not match"):
- tf.sparse_maximum(sp_zero, sp_one).eval()
+ sparse_ops.sparse_maximum(sp_zero, sp_one).eval()
-class SparseTransposeTest(tf.test.TestCase):
+class SparseTransposeTest(test.TestCase):
def testTranspose(self):
with self.test_session(use_gpu=False):
@@ -787,13 +758,14 @@ class SparseTransposeTest(tf.test.TestCase):
for shape in shapes:
for dtype in [np.int32, np.int64, np.float32, np.float64]:
dn_input = np.random.randn(*shape).astype(dtype)
- rank = tf.rank(dn_input).eval()
+ rank = array_ops.rank(dn_input).eval()
perm = np.random.choice(rank, rank, False)
sp_input, unused_a_nnz = _sparsify(dn_input)
- sp_trans = tf.sparse_transpose(sp_input, perm=perm)
- dn_trans = tf.sparse_tensor_to_dense(sp_trans).eval()
- expected_trans = tf.transpose(dn_input, perm=perm).eval()
+ sp_trans = sparse_ops.sparse_transpose(sp_input, perm=perm)
+ dn_trans = sparse_ops.sparse_tensor_to_dense(sp_trans).eval()
+ expected_trans = array_ops.transpose(dn_input, perm=perm).eval()
self.assertAllEqual(dn_trans, expected_trans)
+
if __name__ == "__main__":
googletest.main()
diff --git a/tensorflow/python/kernel_tests/sparse_reorder_op_test.py b/tensorflow/python/kernel_tests/sparse_reorder_op_test.py
index 5c8c3fb433..5136cdadea 100644
--- a/tensorflow/python/kernel_tests/sparse_reorder_op_test.py
+++ b/tensorflow/python/kernel_tests/sparse_reorder_op_test.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for SparseReorder."""
from __future__ import absolute_import
@@ -20,34 +19,39 @@ from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import sparse_tensor
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import sparse_ops
+import tensorflow.python.ops.sparse_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
-class SparseReorderTest(tf.test.TestCase):
+class SparseReorderTest(test.TestCase):
def _SparseTensorPlaceholder(self):
- return tf.SparseTensor(
- tf.placeholder(tf.int64),
- tf.placeholder(tf.float64),
- tf.placeholder(tf.int64))
+ return sparse_tensor.SparseTensor(
+ array_ops.placeholder(dtypes.int64),
+ array_ops.placeholder(dtypes.float64),
+ array_ops.placeholder(dtypes.int64))
def _SparseTensorValue_5x6(self, permutation):
- ind = np.array([
- [0, 0],
- [1, 0], [1, 3], [1, 4],
- [3, 2], [3, 3]]).astype(np.int64)
+ ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2],
+ [3, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.float64)
ind = ind[permutation]
val = val[permutation]
shape = np.array([5, 6]).astype(np.int64)
- return tf.SparseTensorValue(ind, val, shape)
+ return sparse_tensor.SparseTensorValue(ind, val, shape)
def testAlreadyInOrder(self):
with self.test_session(use_gpu=False) as sess:
input_val = self._SparseTensorValue_5x6(np.arange(6))
- sp_output = tf.sparse_reorder(input_val)
+ sp_output = sparse_ops.sparse_reorder(input_val)
output_val = sess.run(sp_output)
self.assertAllEqual(output_val.indices, input_val.indices)
@@ -58,7 +62,7 @@ class SparseReorderTest(tf.test.TestCase):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6(np.arange(6))
- sp_output = tf.sparse_reorder(sp_input)
+ sp_output = sparse_ops.sparse_reorder(sp_input)
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, input_val.indices)
@@ -70,13 +74,13 @@ class SparseReorderTest(tf.test.TestCase):
with self.test_session(use_gpu=False) as sess:
for _ in range(5): # To test various random permutations
input_val = self._SparseTensorValue_5x6(np.random.permutation(6))
- sp_output = tf.sparse_reorder(input_val)
+ sp_output = sparse_ops.sparse_reorder(input_val)
output_val = sess.run(sp_output)
self.assertAllEqual(output_val.indices, expected_output_val.indices)
self.assertAllEqual(output_val.values, expected_output_val.values)
- self.assertAllEqual(
- output_val.dense_shape, expected_output_val.dense_shape)
+ self.assertAllEqual(output_val.dense_shape,
+ expected_output_val.dense_shape)
def testFeedOutOfOrder(self):
expected_output_val = self._SparseTensorValue_5x6(np.arange(6))
@@ -84,23 +88,24 @@ class SparseReorderTest(tf.test.TestCase):
for _ in range(5): # To test various random permutations
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6(np.random.permutation(6))
- sp_output = tf.sparse_reorder(sp_input)
+ sp_output = sparse_ops.sparse_reorder(sp_input)
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, expected_output_val.indices)
self.assertAllEqual(output_val.values, expected_output_val.values)
- self.assertAllEqual(
- output_val.dense_shape, expected_output_val.dense_shape)
+ self.assertAllEqual(output_val.dense_shape,
+ expected_output_val.dense_shape)
def testGradients(self):
with self.test_session(use_gpu=False):
for _ in range(5): # To test various random permutations
input_val = self._SparseTensorValue_5x6(np.random.permutation(6))
- sp_input = tf.SparseTensor(
- input_val.indices, input_val.values, input_val.dense_shape)
- sp_output = tf.sparse_reorder(sp_input)
+ sp_input = sparse_tensor.SparseTensor(input_val.indices,
+ input_val.values,
+ input_val.dense_shape)
+ sp_output = sparse_ops.sparse_reorder(sp_input)
- err = tf.test.compute_gradient_error(
+ err = gradient_checker.compute_gradient_error(
sp_input.values,
input_val.values.shape,
sp_output.values,
@@ -110,4 +115,4 @@ class SparseReorderTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/sparse_reshape_op_test.py b/tensorflow/python/kernel_tests/sparse_reshape_op_test.py
index 052a41dda3..1bb05aa3b2 100644
--- a/tensorflow/python/kernel_tests/sparse_reshape_op_test.py
+++ b/tensorflow/python/kernel_tests/sparse_reshape_op_test.py
@@ -19,38 +19,41 @@ from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import sparse_tensor
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import sparse_ops
+from tensorflow.python.platform import test
-class SparseReshapeTest(tf.test.TestCase):
+
+class SparseReshapeTest(test.TestCase):
def _SparseTensorPlaceholder(self):
- return tf.SparseTensor(
- tf.placeholder(tf.int64), tf.placeholder(tf.float64),
- tf.placeholder(tf.int64))
+ return sparse_tensor.SparseTensor(
+ array_ops.placeholder(dtypes.int64),
+ array_ops.placeholder(dtypes.float64),
+ array_ops.placeholder(dtypes.int64))
def _SparseTensorValue_5x6(self):
- ind = np.array([
- [0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]
- ]).astype(np.int64)
+ ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2],
+ [3, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.float64)
shape = np.array([5, 6]).astype(np.int64)
- return tf.SparseTensorValue(ind, val, shape)
+ return sparse_tensor.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_2x3x4(self):
- ind = np.array([
- [0, 0, 1], [0, 1, 0], [0, 1, 2], [1, 0, 3], [1, 1, 1], [1, 1, 3],
- [1, 2, 2]
- ])
+ ind = np.array([[0, 0, 1], [0, 1, 0], [0, 1, 2], [1, 0, 3], [1, 1, 1],
+ [1, 1, 3], [1, 2, 2]])
val = np.array([1, 10, 12, 103, 111, 113, 122])
shape = np.array([2, 3, 4])
- return tf.SparseTensorValue(ind, val, shape)
+ return sparse_tensor.SparseTensorValue(ind, val, shape)
def testSameShape(self):
with self.test_session(use_gpu=False) as sess:
input_val = self._SparseTensorValue_5x6()
- sp_output = tf.sparse_reshape(input_val, [5, 6])
+ sp_output = sparse_ops.sparse_reshape(input_val, [5, 6])
output_val = sess.run(sp_output)
self.assertAllEqual(output_val.indices, input_val.indices)
@@ -61,7 +64,7 @@ class SparseReshapeTest(tf.test.TestCase):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
- sp_output = tf.sparse_reshape(sp_input, [5, 6])
+ sp_output = sparse_ops.sparse_reshape(sp_input, [5, 6])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, input_val.indices)
@@ -72,7 +75,7 @@ class SparseReshapeTest(tf.test.TestCase):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
- sp_output = tf.sparse_reshape(sp_input, [-1, 6])
+ sp_output = sparse_ops.sparse_reshape(sp_input, [-1, 6])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, input_val.indices)
@@ -83,12 +86,12 @@ class SparseReshapeTest(tf.test.TestCase):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
- sp_output = tf.sparse_reshape(sp_input, [3, 10])
+ sp_output = sparse_ops.sparse_reshape(sp_input, [3, 10])
output_val = sess.run(sp_output, {sp_input: input_val})
- self.assertAllEqual(output_val.indices, np.array([
- [0, 0], [0, 6], [0, 9], [1, 0], [2, 0], [2, 1]
- ]))
+ self.assertAllEqual(output_val.indices,
+ np.array([[0, 0], [0, 6], [0, 9], [1, 0], [2, 0],
+ [2, 1]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [3, 10])
@@ -96,24 +99,24 @@ class SparseReshapeTest(tf.test.TestCase):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
- sp_output = tf.sparse_reshape(sp_input, [3, -1])
+ sp_output = sparse_ops.sparse_reshape(sp_input, [3, -1])
output_val = sess.run(sp_output, {sp_input: input_val})
- self.assertAllEqual(output_val.indices, np.array([
- [0, 0], [0, 6], [0, 9], [1, 0], [2, 0], [2, 1]
- ]))
+ self.assertAllEqual(output_val.indices,
+ np.array([[0, 0], [0, 6], [0, 9], [1, 0], [2, 0],
+ [2, 1]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [3, 10])
def testUpRank(self):
with self.test_session(use_gpu=False) as sess:
input_val = self._SparseTensorValue_5x6()
- sp_output = tf.sparse_reshape(input_val, [2, 3, 5])
+ sp_output = sparse_ops.sparse_reshape(input_val, [2, 3, 5])
output_val = sess.run(sp_output)
- self.assertAllEqual(output_val.indices, np.array([
- [0, 0, 0], [0, 1, 1], [0, 1, 4], [0, 2, 0], [1, 1, 0], [1, 1, 1]
- ]))
+ self.assertAllEqual(output_val.indices,
+ np.array([[0, 0, 0], [0, 1, 1], [0, 1, 4], [0, 2, 0],
+ [1, 1, 0], [1, 1, 1]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [2, 3, 5])
@@ -121,12 +124,12 @@ class SparseReshapeTest(tf.test.TestCase):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
- sp_output = tf.sparse_reshape(sp_input, [2, 3, 5])
+ sp_output = sparse_ops.sparse_reshape(sp_input, [2, 3, 5])
output_val = sess.run(sp_output, {sp_input: input_val})
- self.assertAllEqual(output_val.indices, np.array([
- [0, 0, 0], [0, 1, 1], [0, 1, 4], [0, 2, 0], [1, 1, 0], [1, 1, 1]
- ]))
+ self.assertAllEqual(output_val.indices,
+ np.array([[0, 0, 0], [0, 1, 1], [0, 1, 4], [0, 2, 0],
+ [1, 1, 0], [1, 1, 1]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [2, 3, 5])
@@ -134,12 +137,12 @@ class SparseReshapeTest(tf.test.TestCase):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
- sp_output = tf.sparse_reshape(sp_input, [2, -1, 5])
+ sp_output = sparse_ops.sparse_reshape(sp_input, [2, -1, 5])
output_val = sess.run(sp_output, {sp_input: input_val})
- self.assertAllEqual(output_val.indices, np.array([
- [0, 0, 0], [0, 1, 1], [0, 1, 4], [0, 2, 0], [1, 1, 0], [1, 1, 1]
- ]))
+ self.assertAllEqual(output_val.indices,
+ np.array([[0, 0, 0], [0, 1, 1], [0, 1, 4], [0, 2, 0],
+ [1, 1, 0], [1, 1, 1]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [2, 3, 5])
@@ -147,12 +150,12 @@ class SparseReshapeTest(tf.test.TestCase):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_2x3x4()
- sp_output = tf.sparse_reshape(sp_input, [6, 4])
+ sp_output = sparse_ops.sparse_reshape(sp_input, [6, 4])
output_val = sess.run(sp_output, {sp_input: input_val})
- self.assertAllEqual(output_val.indices, np.array([
- [0, 1], [1, 0], [1, 2], [3, 3], [4, 1], [4, 3], [5, 2]
- ]))
+ self.assertAllEqual(output_val.indices,
+ np.array([[0, 1], [1, 0], [1, 2], [3, 3], [4, 1],
+ [4, 3], [5, 2]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [6, 4])
@@ -160,12 +163,12 @@ class SparseReshapeTest(tf.test.TestCase):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_2x3x4()
- sp_output = tf.sparse_reshape(sp_input, [6, -1])
+ sp_output = sparse_ops.sparse_reshape(sp_input, [6, -1])
output_val = sess.run(sp_output, {sp_input: input_val})
- self.assertAllEqual(output_val.indices, np.array([
- [0, 1], [1, 0], [1, 2], [3, 3], [4, 1], [4, 3], [5, 2]
- ]))
+ self.assertAllEqual(output_val.indices,
+ np.array([[0, 1], [1, 0], [1, 2], [3, 3], [4, 1],
+ [4, 3], [5, 2]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [6, 4])
@@ -173,7 +176,7 @@ class SparseReshapeTest(tf.test.TestCase):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
- sp_output = tf.sparse_reshape(sp_input, [4, -1, -1])
+ sp_output = sparse_ops.sparse_reshape(sp_input, [4, -1, -1])
with self.assertRaisesOpError("only one output shape size may be -1"):
sess.run(sp_output, {sp_input: input_val})
@@ -181,7 +184,7 @@ class SparseReshapeTest(tf.test.TestCase):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
- sp_output = tf.sparse_reshape(sp_input, [4, 7])
+ sp_output = sparse_ops.sparse_reshape(sp_input, [4, 7])
with self.assertRaisesOpError(
"Input to reshape is a tensor with 30 dense values"):
sess.run(sp_output, {sp_input: input_val})
@@ -190,7 +193,7 @@ class SparseReshapeTest(tf.test.TestCase):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
- sp_output = tf.sparse_reshape(sp_input, [4, -1])
+ sp_output = sparse_ops.sparse_reshape(sp_input, [4, -1])
with self.assertRaisesOpError("requested shape requires a multiple"):
sess.run(sp_output, {sp_input: input_val})
@@ -198,7 +201,7 @@ class SparseReshapeTest(tf.test.TestCase):
with self.test_session(use_gpu=False):
# Incorporate new rank into shape information if known
sp_input = self._SparseTensorPlaceholder()
- sp_output = tf.sparse_reshape(sp_input, [2, 3, 5])
+ sp_output = sparse_ops.sparse_reshape(sp_input, [2, 3, 5])
self.assertListEqual(sp_output.indices.get_shape().as_list(), [None, 3])
self.assertListEqual(sp_output.dense_shape.get_shape().as_list(), [3])
@@ -206,7 +209,7 @@ class SparseReshapeTest(tf.test.TestCase):
# indices
sp_input = self._SparseTensorPlaceholder()
sp_input.indices.set_shape([5, None])
- sp_output = tf.sparse_reshape(sp_input, [2, 3, 5])
+ sp_output = sparse_ops.sparse_reshape(sp_input, [2, 3, 5])
self.assertListEqual(sp_output.indices.get_shape().as_list(), [5, 3])
self.assertListEqual(sp_output.dense_shape.get_shape().as_list(), [3])
@@ -214,8 +217,8 @@ class SparseReshapeTest(tf.test.TestCase):
# output indices and shape
sp_input = self._SparseTensorPlaceholder()
sp_input.indices.set_shape([5, None])
- new_shape = tf.placeholder(tf.int64)
- sp_output = tf.sparse_reshape(sp_input, new_shape)
+ new_shape = array_ops.placeholder(dtypes.int64)
+ sp_output = sparse_ops.sparse_reshape(sp_input, new_shape)
self.assertListEqual(sp_output.indices.get_shape().as_list(), [5, None])
self.assertListEqual(sp_output.dense_shape.get_shape().as_list(), [None])
@@ -241,8 +244,9 @@ class SparseReshapeTest(tf.test.TestCase):
new_values = new_dense[new_dense < 0.5]
sp_input = self._SparseTensorPlaceholder()
- input_val = tf.SparseTensorValue(orig_indices, orig_values, orig_shape)
- sp_output = tf.sparse_reshape(sp_input, new_shape)
+ input_val = sparse_tensor.SparseTensorValue(orig_indices, orig_values,
+ orig_shape)
+ sp_output = sparse_ops.sparse_reshape(sp_input, new_shape)
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, new_indices)
@@ -251,4 +255,4 @@ class SparseReshapeTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/sparse_serialization_ops_test.py b/tensorflow/python/kernel_tests/sparse_serialization_ops_test.py
index 159c5d9d81..af395b31bf 100644
--- a/tensorflow/python/kernel_tests/sparse_serialization_ops_test.py
+++ b/tensorflow/python/kernel_tests/sparse_serialization_ops_test.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for SerializeSparse."""
from __future__ import absolute_import
@@ -20,60 +19,61 @@ from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import sparse_ops
+from tensorflow.python.platform import test
-class SerializeSparseTest(tf.test.TestCase):
+class SerializeSparseTest(test.TestCase):
def _SparseTensorPlaceholder(self, dtype=None):
- if dtype is None: dtype = tf.int32
- return tf.SparseTensor(
- tf.placeholder(tf.int64),
- tf.placeholder(dtype),
- tf.placeholder(tf.int64))
+ if dtype is None:
+ dtype = dtypes.int32
+ return sparse_tensor_lib.SparseTensor(
+ array_ops.placeholder(dtypes.int64),
+ array_ops.placeholder(dtype), array_ops.placeholder(dtypes.int64))
def _SparseTensorValue_5x6(self, permutation):
- ind = np.array([
- [0, 0],
- [1, 0], [1, 3], [1, 4],
- [3, 2], [3, 3]]).astype(np.int64)
+ ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2],
+ [3, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32)
ind = ind[permutation]
val = val[permutation]
shape = np.array([5, 6]).astype(np.int64)
- return tf.SparseTensorValue(ind, val, shape)
+ return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_3x4(self, permutation):
- ind = np.array([
- [0, 0],
- [1, 0], [1, 2], [1, 3],
- [2, 2], [2, 3]]).astype(np.int64)
+ ind = np.array([[0, 0], [1, 0], [1, 2], [1, 3], [2, 2],
+ [2, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32)
ind = ind[permutation]
val = val[permutation]
shape = np.array([3, 4]).astype(np.int64)
- return tf.SparseTensorValue(ind, val, shape)
+ return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_1x1x1(self):
ind = np.array([[0, 0, 0]]).astype(np.int64)
val = np.array([0]).astype(np.int32)
shape = np.array([3, 4, 5]).astype(np.int64)
- return tf.SparseTensorValue(ind, val, shape)
+ return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def testSerializeDeserializeMany(self):
with self.test_session(use_gpu=False) as sess:
sp_input0 = self._SparseTensorValue_5x6(np.arange(6))
sp_input1 = self._SparseTensorValue_3x4(np.arange(6))
- serialized0 = tf.serialize_sparse(sp_input0)
- serialized1 = tf.serialize_sparse(sp_input1)
- serialized_concat = tf.stack([serialized0, serialized1])
+ serialized0 = sparse_ops.serialize_sparse(sp_input0)
+ serialized1 = sparse_ops.serialize_sparse(sp_input1)
+ serialized_concat = array_ops.stack([serialized0, serialized1])
- sp_deserialized = tf.deserialize_many_sparse(
- serialized_concat, dtype=tf.int32)
+ sp_deserialized = sparse_ops.deserialize_many_sparse(
+ serialized_concat, dtype=dtypes.int32)
combined_indices, combined_values, combined_shape = sess.run(
sp_deserialized)
@@ -92,15 +92,16 @@ class SerializeSparseTest(tf.test.TestCase):
sp_input1 = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
input1_val = self._SparseTensorValue_3x4(np.arange(6))
- serialized0 = tf.serialize_sparse(sp_input0)
- serialized1 = tf.serialize_sparse(sp_input1)
- serialized_concat = tf.stack([serialized0, serialized1])
+ serialized0 = sparse_ops.serialize_sparse(sp_input0)
+ serialized1 = sparse_ops.serialize_sparse(sp_input1)
+ serialized_concat = array_ops.stack([serialized0, serialized1])
- sp_deserialized = tf.deserialize_many_sparse(
- serialized_concat, dtype=tf.int32)
+ sp_deserialized = sparse_ops.deserialize_many_sparse(
+ serialized_concat, dtype=dtypes.int32)
combined_indices, combined_values, combined_shape = sess.run(
- sp_deserialized, {sp_input0: input0_val, sp_input1: input1_val})
+ sp_deserialized, {sp_input0: input0_val,
+ sp_input1: input1_val})
self.assertAllEqual(combined_indices[:6, 0], [0] * 6) # minibatch 0
self.assertAllEqual(combined_indices[:6, 1:], input0_val[0])
@@ -116,14 +117,17 @@ class SerializeSparseTest(tf.test.TestCase):
indices_value = np.array([[0, 0], [0, 1], [2, 0]], dtype=np.int64)
values_value = np.array([b"a", b"b", b"c"])
shape_value = np.array([4, 5], dtype=np.int64)
- sparse_tensor = self._SparseTensorPlaceholder(dtype=tf.string)
- serialized = tf.serialize_many_sparse(sparse_tensor)
- deserialized = tf.deserialize_many_sparse(serialized, dtype=tf.string)
+ sparse_tensor = self._SparseTensorPlaceholder(dtype=dtypes.string)
+ serialized = sparse_ops.serialize_many_sparse(sparse_tensor)
+ deserialized = sparse_ops.deserialize_many_sparse(
+ serialized, dtype=dtypes.string)
serialized_value, deserialized_value = sess.run(
[serialized, deserialized],
- feed_dict={sparse_tensor.indices: indices_value,
- sparse_tensor.values: values_value,
- sparse_tensor.dense_shape: shape_value})
+ feed_dict={
+ sparse_tensor.indices: indices_value,
+ sparse_tensor.values: values_value,
+ sparse_tensor.dense_shape: shape_value
+ })
self.assertEqual(serialized_value.shape, (4, 3))
self.assertAllEqual(deserialized_value.indices, indices_value)
self.assertAllEqual(deserialized_value.values, values_value)
@@ -135,18 +139,19 @@ class SerializeSparseTest(tf.test.TestCase):
sp_input1 = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
input1_val = self._SparseTensorValue_3x4(np.arange(6))
- serialized0 = tf.serialize_sparse(sp_input0)
- serialized1 = tf.serialize_sparse(sp_input1)
- serialized_concat = tf.stack([serialized0, serialized1])
+ serialized0 = sparse_ops.serialize_sparse(sp_input0)
+ serialized1 = sparse_ops.serialize_sparse(sp_input1)
+ serialized_concat = array_ops.stack([serialized0, serialized1])
- sp_deserialized = tf.deserialize_many_sparse(
- serialized_concat, dtype=tf.int64)
+ sp_deserialized = sparse_ops.deserialize_many_sparse(
+ serialized_concat, dtype=dtypes.int64)
with self.assertRaisesOpError(
r"Requested SparseTensor of type int64 but "
r"SparseTensor\[0\].values.dtype\(\) == int32"):
- sess.run(
- sp_deserialized, {sp_input0: input0_val, sp_input1: input1_val})
+ sess.run(sp_deserialized,
+ {sp_input0: input0_val,
+ sp_input1: input1_val})
def testDeserializeFailsInconsistentRank(self):
with self.test_session(use_gpu=False) as sess:
@@ -154,29 +159,30 @@ class SerializeSparseTest(tf.test.TestCase):
sp_input1 = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
input1_val = self._SparseTensorValue_1x1x1()
- serialized0 = tf.serialize_sparse(sp_input0)
- serialized1 = tf.serialize_sparse(sp_input1)
- serialized_concat = tf.stack([serialized0, serialized1])
+ serialized0 = sparse_ops.serialize_sparse(sp_input0)
+ serialized1 = sparse_ops.serialize_sparse(sp_input1)
+ serialized_concat = array_ops.stack([serialized0, serialized1])
- sp_deserialized = tf.deserialize_many_sparse(
- serialized_concat, dtype=tf.int32)
+ sp_deserialized = sparse_ops.deserialize_many_sparse(
+ serialized_concat, dtype=dtypes.int32)
with self.assertRaisesOpError(
r"Inconsistent rank across SparseTensors: rank prior to "
r"SparseTensor\[1\] was: 3 but rank of SparseTensor\[1\] is: 4"):
- sess.run(
- sp_deserialized, {sp_input0: input0_val, sp_input1: input1_val})
+ sess.run(sp_deserialized,
+ {sp_input0: input0_val,
+ sp_input1: input1_val})
def testDeserializeFailsInvalidProto(self):
with self.test_session(use_gpu=False) as sess:
sp_input0 = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
- serialized0 = tf.serialize_sparse(sp_input0)
+ serialized0 = sparse_ops.serialize_sparse(sp_input0)
serialized1 = ["a", "b", "c"]
- serialized_concat = tf.stack([serialized0, serialized1])
+ serialized_concat = array_ops.stack([serialized0, serialized1])
- sp_deserialized = tf.deserialize_many_sparse(
- serialized_concat, dtype=tf.int32)
+ sp_deserialized = sparse_ops.deserialize_many_sparse(
+ serialized_concat, dtype=dtypes.int32)
with self.assertRaisesOpError(
r"Could not parse serialized_sparse\[1, 0\]"):
@@ -184,4 +190,4 @@ class SerializeSparseTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/sparse_split_op_test.py b/tensorflow/python/kernel_tests/sparse_split_op_test.py
index dedaa1b1c4..23c6c390b2 100644
--- a/tensorflow/python/kernel_tests/sparse_split_op_test.py
+++ b/tensorflow/python/kernel_tests/sparse_split_op_test.py
@@ -19,23 +19,26 @@ from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import sparse_tensor
+from tensorflow.python.ops import sparse_ops
+from tensorflow.python.platform import test
-class SparseSplitOpTest(tf.test.TestCase):
+
+class SparseSplitOpTest(test.TestCase):
def _SparseTensor_4x6(self):
# [0 | |2 | |4 |5 ]
# [ |11| |13|14| ]
# [20| | |23| |25]
# [30| |32|33| |35]
- ind = np.array(
- [[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1, 4], [2, 0], [2, 3],
- [2, 5], [3, 0], [3, 2], [3, 3], [3, 5]]).astype(np.int64)
- val = np.array([0, 2, 4, 5, 11, 13, 14, 20, 23, 25, 30, 32, 33, 35]).astype(
- np.int64)
+ ind = np.array([[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1, 4],
+ [2, 0], [2, 3], [2, 5], [3, 0], [3, 2], [3, 3],
+ [3, 5]]).astype(np.int64)
+ val = np.array(
+ [0, 2, 4, 5, 11, 13, 14, 20, 23, 25, 30, 32, 33, 35]).astype(np.int64)
shape = np.array([4, 6]).astype(np.int64)
- return tf.SparseTensor(ind, val, shape)
+ return sparse_tensor.SparseTensor(ind, val, shape)
def _SparseTensor_5x7(self):
# [0 | |2 | |4 |5 | ]
@@ -43,14 +46,14 @@ class SparseSplitOpTest(tf.test.TestCase):
# [20| | |23| |25| ]
# [30| |32|33| |35| ]
# [ |41| | |44| |46]
- ind = np.array([
- [0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1, 4], [1, 6], [2, 0],
- [2, 3], [2, 5], [3, 0], [3, 2], [3, 3], [3, 5], [4, 1], [4, 4], [4, 6]
- ]).astype(np.int64)
- val = np.array([0, 2, 4, 5, 11, 13, 14, 16, 20, 23, 25, 30, 32, 33, 35, 41,
- 44, 46]).astype(np.int64)
+ ind = np.array([[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1, 4],
+ [1, 6], [2, 0], [2, 3], [2, 5], [3, 0], [3, 2], [3, 3],
+ [3, 5], [4, 1], [4, 4], [4, 6]]).astype(np.int64)
+ val = np.array(
+ [0, 2, 4, 5, 11, 13, 14, 16, 20, 23, 25, 30, 32, 33, 35, 41, 44,
+ 46]).astype(np.int64)
shape = np.array([5, 7]).astype(np.int64)
- return tf.SparseTensor(ind, val, shape)
+ return sparse_tensor.SparseTensor(ind, val, shape)
def _SparseTensorValue_3x4x2(self):
# slice(:,:, 0)
@@ -61,56 +64,55 @@ class SparseSplitOpTest(tf.test.TestCase):
# ['a1'| |'b1'| ]
# [ |'c1'| |'d1']
# [ | |'e1'| ]
- ind = np.array([[0, 0, 0], [0, 0, 1], [0, 2, 0], [0, 2, 1],
- [1, 1, 0], [1, 1, 1], [1, 3, 0], [1, 3, 1],
- [2, 2, 0], [2, 2, 1]]).astype(np.int64)
+ ind = np.array([[0, 0, 0], [0, 0, 1], [0, 2, 0], [0, 2, 1], [1, 1, 0],
+ [1, 1, 1], [1, 3, 0], [1, 3, 1], [2, 2, 0],
+ [2, 2, 1]]).astype(np.int64)
val = np.array(['a0', 'a1', 'b0', 'b1', 'c0', 'c1', 'd0', 'd1', 'e0', 'e1'])
shape = np.array([3, 4, 2]).astype(np.int64)
- return tf.SparseTensorValue(ind, val, shape)
+ return sparse_tensor.SparseTensorValue(ind, val, shape)
def _SparseTensor_3x4x2(self):
- return tf.SparseTensor.from_value(self._SparseTensorValue_3x4x2())
+ return sparse_tensor.SparseTensor.from_value(self._SparseTensorValue_3x4x2(
+ ))
def testSplitMatrixRows(self):
with self.test_session(use_gpu=False):
- sp_tensors = tf.sparse_split(
+ sp_tensors = sparse_ops.sparse_split(
sp_input=self._SparseTensor_4x6(), num_split=2, axis=0)
self.assertAllEqual(len(sp_tensors), 2)
- self.assertAllEqual(sp_tensors[0].indices.eval(),
- [[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1,
- 4]])
+ self.assertAllEqual(sp_tensors[0].indices.eval(), [[0, 0], [0, 2], [0, 4],
+ [0, 5], [1, 1], [1, 3],
+ [1, 4]])
self.assertAllEqual(sp_tensors[0].values.eval(), [0, 2, 4, 5, 11, 13, 14])
self.assertAllEqual(sp_tensors[0].dense_shape.eval(), [2, 6])
- self.assertAllEqual(sp_tensors[1].indices.eval(),
- [[0, 0], [0, 3], [0, 5], [1, 0], [1, 2], [1, 3], [1,
- 5]])
- self.assertAllEqual(sp_tensors[1].values.eval(), [20, 23, 25, 30, 32, 33,
- 35])
+ self.assertAllEqual(sp_tensors[1].indices.eval(), [[0, 0], [0, 3], [0, 5],
+ [1, 0], [1, 2], [1, 3],
+ [1, 5]])
+ self.assertAllEqual(sp_tensors[1].values.eval(),
+ [20, 23, 25, 30, 32, 33, 35])
self.assertAllEqual(sp_tensors[1].dense_shape.eval(), [2, 6])
def testSplitMatrixUnevenCols(self):
with self.test_session(use_gpu=False):
- sp_tensors_3 = tf.sparse_split(
+ sp_tensors_3 = sparse_ops.sparse_split(
sp_input=self._SparseTensor_5x7(), num_split=3, axis=1)
self.assertAllEqual(len(sp_tensors_3), 3)
self.assertAllEqual(sp_tensors_3[0].indices.eval(),
[[0, 0], [0, 2], [1, 1], [2, 0], [3, 0], [3, 2],
[4, 1]])
- self.assertAllEqual(sp_tensors_3[0].values.eval(), [0, 2, 11, 20, 30, 32,
- 41])
+ self.assertAllEqual(sp_tensors_3[0].values.eval(),
+ [0, 2, 11, 20, 30, 32, 41])
self.assertAllEqual(sp_tensors_3[0].dense_shape.eval(), [5, 3])
- self.assertAllEqual(sp_tensors_3[1].indices.eval(), [[0, 1], [1, 0],
- [1, 1], [2, 0],
- [3, 0], [4, 1]])
- self.assertAllEqual(sp_tensors_3[1].values.eval(), [4, 13, 14, 23, 33,
- 44])
+ self.assertAllEqual(sp_tensors_3[1].indices.eval(),
+ [[0, 1], [1, 0], [1, 1], [2, 0], [3, 0], [4, 1]])
+ self.assertAllEqual(sp_tensors_3[1].values.eval(),
+ [4, 13, 14, 23, 33, 44])
self.assertAllEqual(sp_tensors_3[1].dense_shape.eval(), [5, 2])
- self.assertAllEqual(sp_tensors_3[2].indices.eval(), [[0, 0], [1, 1],
- [2, 0], [3, 0],
- [4, 1]])
+ self.assertAllEqual(sp_tensors_3[2].indices.eval(),
+ [[0, 0], [1, 1], [2, 0], [3, 0], [4, 1]])
self.assertAllEqual(sp_tensors_3[2].values.eval(), [5, 16, 25, 35, 46])
self.assertAllEqual(sp_tensors_3[2].dense_shape.eval(), [5, 2])
- sp_tensors_4 = tf.sparse_split(
+ sp_tensors_4 = sparse_ops.sparse_split(
sp_input=self._SparseTensor_5x7(), num_split=4, axis=1)
self.assertAllEqual(len(sp_tensors_4), 4)
self.assertAllEqual(sp_tensors_4[0].indices.eval(),
@@ -131,33 +133,33 @@ class SparseSplitOpTest(tf.test.TestCase):
def testSplitMatrixUnevenRows(self):
with self.test_session(use_gpu=False):
- sp_tensors_2 = tf.sparse_split(
+ sp_tensors_2 = sparse_ops.sparse_split(
sp_input=self._SparseTensor_5x7(), num_split=2, axis=0)
self.assertAllEqual(sp_tensors_2[0].indices.eval(),
[[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3],
[1, 4], [1, 6], [2, 0], [2, 3], [2, 5]])
- self.assertAllEqual(sp_tensors_2[0].values.eval(), [0, 2, 4, 5, 11, 13,
- 14, 16, 20, 23, 25])
+ self.assertAllEqual(sp_tensors_2[0].values.eval(),
+ [0, 2, 4, 5, 11, 13, 14, 16, 20, 23, 25])
self.assertAllEqual(sp_tensors_2[0].dense_shape.eval(), [3, 7])
self.assertAllEqual(sp_tensors_2[1].indices.eval(),
[[0, 0], [0, 2], [0, 3], [0, 5], [1, 1], [1, 4],
[1, 6]])
- self.assertAllEqual(sp_tensors_2[1].values.eval(), [30, 32, 33, 35, 41,
- 44, 46])
+ self.assertAllEqual(sp_tensors_2[1].values.eval(),
+ [30, 32, 33, 35, 41, 44, 46])
self.assertAllEqual(sp_tensors_2[1].dense_shape.eval(), [2, 7])
self.assertAllEqual(len(sp_tensors_2), 2)
- sp_tensors_3 = tf.sparse_split(
+ sp_tensors_3 = sparse_ops.sparse_split(
sp_input=self._SparseTensor_5x7(), num_split=3, axis=0)
self.assertAllEqual(len(sp_tensors_3), 3)
self.assertAllEqual(sp_tensors_3[0].indices.eval(),
[[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3],
[1, 4], [1, 6]])
- self.assertAllEqual(sp_tensors_3[0].values.eval(), [0, 2, 4, 5, 11, 13,
- 14, 16])
+ self.assertAllEqual(sp_tensors_3[0].values.eval(),
+ [0, 2, 4, 5, 11, 13, 14, 16])
self.assertAllEqual(sp_tensors_3[0].dense_shape.eval(), [2, 7])
- self.assertAllEqual(sp_tensors_3[1].values.eval(), [20, 23, 25, 30, 32,
- 33, 35])
+ self.assertAllEqual(sp_tensors_3[1].values.eval(),
+ [20, 23, 25, 30, 32, 33, 35])
self.assertAllEqual(sp_tensors_3[1].dense_shape.eval(), [2, 7])
self.assertAllEqual(sp_tensors_3[2].indices.eval(), [[0, 1], [0, 4],
[0, 6]])
@@ -167,7 +169,7 @@ class SparseSplitOpTest(tf.test.TestCase):
def testSplitAllRows(self):
with self.test_session(use_gpu=False):
- sp_tensors = tf.sparse_split(
+ sp_tensors = sparse_ops.sparse_split(
sp_input=self._SparseTensor_4x6(), num_split=4, axis=0)
self.assertAllEqual(len(sp_tensors), 4)
self.assertAllEqual(sp_tensors[0].indices.eval(), [[0, 0], [0, 2], [0, 4],
@@ -189,7 +191,7 @@ class SparseSplitOpTest(tf.test.TestCase):
def testSplitColumns(self):
with self.test_session(use_gpu=False):
- sparse_tensors = tf.sparse_split(
+ sparse_tensors = sparse_ops.sparse_split(
sp_input=self._SparseTensor_4x6(), num_split=3, axis=1)
self.assertAllEqual(len(sparse_tensors), 3)
self.assertAllEqual(sparse_tensors[0].indices.eval(), [[0, 0], [1, 1],
@@ -207,7 +209,7 @@ class SparseSplitOpTest(tf.test.TestCase):
def testSplitAllColumns(self):
with self.test_session(use_gpu=False):
- sparse_tensors = tf.sparse_split(
+ sparse_tensors = sparse_ops.sparse_split(
sp_input=self._SparseTensor_4x6(), num_split=6, axis=1)
self.assertAllEqual(len(sparse_tensors), 6)
self.assertAllEqual(sparse_tensors[0].indices.eval(), [[0, 0], [2, 0],
@@ -233,26 +235,26 @@ class SparseSplitOpTest(tf.test.TestCase):
self.assertAllEqual(sparse_tensors[5].dense_shape.eval(), [4, 1])
def testSliceConcat(self):
- for sp_input in (
- self._SparseTensorValue_3x4x2(), self._SparseTensor_3x4x2()):
+ for sp_input in (self._SparseTensorValue_3x4x2(),
+ self._SparseTensor_3x4x2()):
with self.test_session(use_gpu=False):
- sparse_tensors = tf.sparse_split(
+ sparse_tensors = sparse_ops.sparse_split(
sp_input=sp_input, num_split=2, axis=1)
- concat_tensor = tf.sparse_concat(1, sparse_tensors)
+ concat_tensor = sparse_ops.sparse_concat(1, sparse_tensors)
expected_output = self._SparseTensor_3x4x2()
self.assertAllEqual(concat_tensor.indices.eval(),
expected_output.indices.eval())
def testArgumentErrors(self):
with self.assertRaisesRegexp(ValueError, 'Keyword arguments are required'):
- tf.sparse_split(3, 2, 1)
+ sparse_ops.sparse_split(3, 2, 1)
with self.assertRaisesRegexp(ValueError, 'sp_input is required'):
- tf.sparse_split()
+ sparse_ops.sparse_split()
with self.assertRaisesRegexp(ValueError, 'num_split is required'):
- tf.sparse_split(sp_input=1)
+ sparse_ops.sparse_split(sp_input=1)
with self.assertRaisesRegexp(ValueError, 'axis is required'):
- tf.sparse_split(num_split=2, sp_input=1)
+ sparse_ops.sparse_split(num_split=2, sp_input=1)
if __name__ == '__main__':
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_grad_test.py b/tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_grad_test.py
index b443358fda..df5462dd2d 100644
--- a/tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_grad_test.py
+++ b/tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_grad_test.py
@@ -12,17 +12,23 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for the gradient of `tf.sparse_tensor_dense_matmul()`."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import sparse_tensor
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import sparse_ops
+import tensorflow.python.ops.sparse_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
-class SparseTensorDenseMatMulGradientTest(tf.test.TestCase):
+class SparseTensorDenseMatMulGradientTest(test.TestCase):
def _sparsify(self, x):
x[x < 0.5] = 0
@@ -32,7 +38,7 @@ class SparseTensorDenseMatMulGradientTest(tf.test.TestCase):
x_values = x[non_zero]
x_shape = x.shape
- return tf.SparseTensor(
+ return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), len(x_values)
def _randomTensor(self, size, np_dtype, adjoint=False, sparse=False):
@@ -45,7 +51,7 @@ class SparseTensorDenseMatMulGradientTest(tf.test.TestCase):
if sparse:
return self._sparsify(x)
else:
- return tf.constant(x, dtype=np_dtype)
+ return constant_op.constant(x, dtype=np_dtype)
def _testGradients(self, adjoint_a, adjoint_b, name, np_dtype):
n, k, m = np.random.randint(1, 10, size=3)
@@ -53,15 +59,15 @@ class SparseTensorDenseMatMulGradientTest(tf.test.TestCase):
[n, k], np_dtype, adjoint=adjoint_a, sparse=True)
dense_t = self._randomTensor([k, m], np_dtype, adjoint=adjoint_b)
- matmul = tf.sparse_tensor_dense_matmul(
+ matmul = sparse_ops.sparse_tensor_dense_matmul(
sp_t, dense_t, adjoint_a=adjoint_a, adjoint_b=adjoint_b, name=name)
with self.test_session(use_gpu=True):
dense_t_shape = [m, k] if adjoint_b else [k, m]
sp_t_val_shape = [nnz]
- err = tf.test.compute_gradient_error([dense_t, sp_t.values],
- [dense_t_shape, sp_t_val_shape],
- matmul, [n, m])
+ err = gradient_checker.compute_gradient_error(
+ [dense_t, sp_t.values], [dense_t_shape, sp_t_val_shape], matmul,
+ [n, m])
print("%s gradient err = %s" % (name, err))
self.assertLess(err, 1e-3)
@@ -79,4 +85,4 @@ class SparseTensorDenseMatMulGradientTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_op_test.py b/tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_op_test.py
index 5685668f68..25da6691e6 100644
--- a/tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_op_test.py
+++ b/tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_op_test.py
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for sparse_ops.sparse_tensor_dense_matmul."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
@@ -22,19 +22,28 @@ import sys
import time
import numpy as np
-import tensorflow as tf
+from tensorflow.core.protobuf import config_pb2
+from tensorflow.python.client import session
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.framework import sparse_tensor
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import control_flow_ops
+from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
-# pylint: enable=g-bad-import-order,unused-import
+from tensorflow.python.platform import app
+from tensorflow.python.platform import test
def _maybe_complex(x):
if x.dtype.kind == "c": # complex
- return (x + 1j*x) / 2
+ return (x + 1j * x) / 2
return x
-class SparseTensorDenseMatMulTest(tf.test.TestCase):
+class SparseTensorDenseMatMulTest(test.TestCase):
def _testMatmul(self, x, y, adjoint_a=False, adjoint_b=False):
x_mat = np.matrix(x)
@@ -51,12 +60,14 @@ class SparseTensorDenseMatMulTest(tf.test.TestCase):
x_shape = x.shape
with self.test_session(use_gpu=True):
- sp_x_value = tf.SparseTensorValue(
+ sp_x_value = sparse_tensor.SparseTensorValue(
indices=x_indices, values=x_values, dense_shape=x_shape)
tf_value_ans = sparse_ops.sparse_tensor_dense_matmul(
sp_x_value, y, adjoint_a=adjoint_a, adjoint_b=adjoint_b)
tf_tensor_ans = sparse_ops.sparse_tensor_dense_matmul(
- tf.SparseTensor.from_value(sp_x_value), y, adjoint_a=adjoint_a,
+ sparse_tensor.SparseTensor.from_value(sp_x_value),
+ y,
+ adjoint_a=adjoint_a,
adjoint_b=adjoint_b)
# Ensure that the RHS shape is known at least.
@@ -94,22 +105,23 @@ class SparseTensorDenseMatMulTest(tf.test.TestCase):
x_indices = np.vstack(np.where(x)).astype(np.int64).T
x_values = x[np.where(x)]
x_shape = x.shape
- x_st = tf.SparseTensor(x_indices, x_values, x_shape)
- result = tf.sparse_tensor_dense_matmul(x_st, y)
+ x_st = sparse_tensor.SparseTensor(x_indices, x_values, x_shape)
+ result = sparse_ops.sparse_tensor_dense_matmul(x_st, y)
self.assertEqual(result.get_shape(), (10, 20))
- x_shape_unknown = tf.placeholder(dtype=tf.int64, shape=None)
- x_st_shape_unknown = tf.SparseTensor(x_indices, x_values, x_shape_unknown)
- result_left_shape_unknown = tf.sparse_tensor_dense_matmul(
+ x_shape_unknown = array_ops.placeholder(dtype=dtypes.int64, shape=None)
+ x_st_shape_unknown = sparse_tensor.SparseTensor(x_indices, x_values,
+ x_shape_unknown)
+ result_left_shape_unknown = sparse_ops.sparse_tensor_dense_matmul(
x_st_shape_unknown, y)
- self.assertEqual(
- result_left_shape_unknown.get_shape().as_list(), [None, 20])
+ self.assertEqual(result_left_shape_unknown.get_shape().as_list(),
+ [None, 20])
x_shape_inconsistent = [10, 15]
- x_st_shape_inconsistent = tf.SparseTensor(
- x_indices, x_values, x_shape_inconsistent)
+ x_st_shape_inconsistent = sparse_tensor.SparseTensor(x_indices, x_values,
+ x_shape_inconsistent)
with self.assertRaisesRegexp(ValueError, "Dimensions must be equal"):
- tf.sparse_tensor_dense_matmul(x_st_shape_inconsistent, y)
+ sparse_ops.sparse_tensor_dense_matmul(x_st_shape_inconsistent, y)
# Tests setting one dimension to be a high value.
def _testLarge(self, np_dtype):
@@ -150,46 +162,67 @@ class SparseTensorDenseMatMulTest(tf.test.TestCase):
self._testMatmul(x, y, adjoint_a, adjoint_b)
-def _sparse_tensor_dense_vs_dense_matmul_benchmark_dense(
- x, y, adjoint_a, adjoint_b):
+def _sparse_tensor_dense_vs_dense_matmul_benchmark_dense(x, y, adjoint_a,
+ adjoint_b):
+
def body(t, prev):
- with tf.control_dependencies([prev]):
- return (t + 1, tf.matmul(
- x, y, transpose_a=adjoint_a, transpose_b=adjoint_b,
- a_is_sparse=True, b_is_sparse=False))
- t0 = tf.constant(0)
- v0 = tf.constant(0.0)
+ with ops.control_dependencies([prev]):
+ return (t + 1, math_ops.matmul(
+ x,
+ y,
+ transpose_a=adjoint_a,
+ transpose_b=adjoint_b,
+ a_is_sparse=True,
+ b_is_sparse=False))
+
+ t0 = constant_op.constant(0)
+ v0 = constant_op.constant(0.0)
+
def _timeit(iterations, _):
- (_, final) = tf.while_loop(
- lambda t, _: t < iterations, body, (t0, v0),
- parallel_iterations=1, back_prop=False)
+ (_, final) = control_flow_ops.while_loop(
+ lambda t, _: t < iterations,
+ body, (t0, v0),
+ parallel_iterations=1,
+ back_prop=False)
return [final]
+
return _timeit
-def _sparse_tensor_dense_vs_dense_matmul_benchmark_sparse(
- x_ind, x_val, x_shape, y, adjoint_a, adjoint_b):
- sp_x = tf.SparseTensor(indices=x_ind, values=x_val, dense_shape=x_shape)
+def _sparse_tensor_dense_vs_dense_matmul_benchmark_sparse(x_ind, x_val, x_shape,
+ y, adjoint_a,
+ adjoint_b):
+ sp_x = sparse_tensor.SparseTensor(
+ indices=x_ind, values=x_val, dense_shape=x_shape)
def body(t, prev):
- with tf.control_dependencies([prev]):
- return (t + 1,
- sparse_ops.sparse_tensor_dense_matmul(
- sp_x, y, adjoint_a=adjoint_a, adjoint_b=adjoint_b))
+ with ops.control_dependencies([prev]):
+ return (t + 1, sparse_ops.sparse_tensor_dense_matmul(
+ sp_x, y, adjoint_a=adjoint_a, adjoint_b=adjoint_b))
+
+ t0 = constant_op.constant(0)
+ v0 = constant_op.constant(0.0)
- t0 = tf.constant(0)
- v0 = tf.constant(0.0)
def _timeit(iterations, _):
- (_, final) = tf.while_loop(
- lambda t, _: t < iterations, body, (t0, v0),
- parallel_iterations=1, back_prop=False)
+ (_, final) = control_flow_ops.while_loop(
+ lambda t, _: t < iterations,
+ body, (t0, v0),
+ parallel_iterations=1,
+ back_prop=False)
return [final]
+
return _timeit
-def sparse_tensor_dense_vs_dense_matmul_benchmark(
- thresh, m, k, n, adjoint_a, adjoint_b, use_gpu, skip_dense=False):
- config = tf.ConfigProto()
+def sparse_tensor_dense_vs_dense_matmul_benchmark(thresh,
+ m,
+ k,
+ n,
+ adjoint_a,
+ adjoint_b,
+ use_gpu,
+ skip_dense=False):
+ config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Configurable for benchmarking:
@@ -200,8 +233,10 @@ def sparse_tensor_dense_vs_dense_matmul_benchmark(
x = np.random.rand(m, k).astype(np.float32)
x[x < thresh] = 0
y = np.random.randn(k, n).astype(np.float32)
- if adjoint_a: x = x.T
- if adjoint_b: y = y.T
+ if adjoint_a:
+ x = x.T
+ if adjoint_b:
+ y = y.T
def _timer(sess, ops_fn, iterations):
# Warm in
@@ -212,49 +247,49 @@ def sparse_tensor_dense_vs_dense_matmul_benchmark(
sess.run(ops_fn(iterations, sess))
end = time.time()
- return (end - start)/(1.0 * iterations) # Average runtime per iteration
+ return (end - start) / (1.0 * iterations) # Average runtime per iteration
# Using regular matmul, marking one of the matrices as dense.
if skip_dense:
delta_dense = float("nan")
else:
- with tf.Session("", config=config, graph=tf.Graph()) as sess:
+ with session.Session("", config=config, graph=ops.Graph()) as sess:
if not use_gpu:
- with tf.device("/cpu:0"):
- x_t = tf.constant(x)
- y_t = tf.constant(y)
+ with ops.device("/cpu:0"):
+ x_t = constant_op.constant(x)
+ y_t = constant_op.constant(y)
ops_fn = _sparse_tensor_dense_vs_dense_matmul_benchmark_dense(
x_t, y_t, adjoint_a, adjoint_b)
else:
- x_t = tf.constant(x)
- y_t = tf.constant(y)
- ops_fn = _sparse_tensor_dense_vs_dense_matmul_benchmark_dense(
- x_t, y_t, adjoint_a, adjoint_b)
+ x_t = constant_op.constant(x)
+ y_t = constant_op.constant(y)
+ ops_fn = _sparse_tensor_dense_vs_dense_matmul_benchmark_dense(x_t, y_t,
+ adjoint_a,
+ adjoint_b)
delta_dense = _timer(sess, ops_fn, 1000)
# Using sparse_tensor_dense_matmul.
- with tf.Session("", config=config, graph=tf.Graph()) as sess:
+ with session.Session("", config=config, graph=ops.Graph()) as sess:
if not use_gpu:
- with tf.device("/cpu:0"):
- x_ind = tf.constant(np.vstack(np.where(x)).astype(np.int64).T)
- x_val = tf.constant(x[np.where(x)])
- x_shape = tf.constant(np.array(x.shape).astype(np.int64))
- y_t = tf.constant(y)
+ with ops.device("/cpu:0"):
+ x_ind = constant_op.constant(np.vstack(np.where(x)).astype(np.int64).T)
+ x_val = constant_op.constant(x[np.where(x)])
+ x_shape = constant_op.constant(np.array(x.shape).astype(np.int64))
+ y_t = constant_op.constant(y)
ops_fn = _sparse_tensor_dense_vs_dense_matmul_benchmark_sparse(
x_ind, x_val, x_shape, y_t, adjoint_a, adjoint_b)
else:
- x_ind = tf.constant(np.vstack(np.where(x)).astype(np.int64).T)
- x_val = tf.constant(x[np.where(x)])
- x_shape = tf.constant(np.array(x.shape).astype(np.int64))
- y_t = tf.constant(y)
+ x_ind = constant_op.constant(np.vstack(np.where(x)).astype(np.int64).T)
+ x_val = constant_op.constant(x[np.where(x)])
+ x_shape = constant_op.constant(np.array(x.shape).astype(np.int64))
+ y_t = constant_op.constant(y)
ops_fn = _sparse_tensor_dense_vs_dense_matmul_benchmark_sparse(
x_ind, x_val, x_shape, y_t, adjoint_a, adjoint_b)
delta_sparse = _timer(sess, ops_fn, 1000)
- print(
- "%g \t %d \t %s \t %d \t %d \t %g \t %g \t %g"
- % (1 - thresh, n, use_gpu, m, k, delta_dense, delta_sparse,
- delta_sparse/delta_dense))
+ print("%g \t %d \t %s \t %d \t %d \t %g \t %g \t %g" %
+ (1 - thresh, n, use_gpu, m, k, delta_dense, delta_sparse,
+ delta_sparse / delta_dense))
def main(_):
@@ -285,6 +320,6 @@ def main(_):
if __name__ == "__main__":
if "--benchmarks" in sys.argv:
sys.argv.remove("--benchmarks")
- tf.app.run()
+ app.run()
else:
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/sparse_tensors_map_ops_test.py b/tensorflow/python/kernel_tests/sparse_tensors_map_ops_test.py
index 08c41bc4b6..96793d5af3 100644
--- a/tensorflow/python/kernel_tests/sparse_tensors_map_ops_test.py
+++ b/tensorflow/python/kernel_tests/sparse_tensors_map_ops_test.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for SparseTensorsMap."""
from __future__ import absolute_import
@@ -20,67 +19,70 @@ from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+from tensorflow.python.client import session
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
+from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
+from tensorflow.python.ops import variables
+from tensorflow.python.platform import test
# pylint: disable=protected-access
add_sparse_to_tensors_map = sparse_ops._add_sparse_to_tensors_map
add_many_sparse_to_tensors_map = sparse_ops._add_many_sparse_to_tensors_map
take_many_sparse_from_tensors_map = (
sparse_ops._take_many_sparse_from_tensors_map)
+
# pylint: enable=protected-access
-class SparseTensorsMapTest(tf.test.TestCase):
+class SparseTensorsMapTest(test.TestCase):
def _SparseTensorPlaceholder(self, dtype=None):
- if dtype is None: dtype = tf.int32
- return tf.SparseTensor(
- tf.placeholder(tf.int64),
- tf.placeholder(dtype),
- tf.placeholder(tf.int64))
+ if dtype is None:
+ dtype = dtypes.int32
+ return sparse_tensor_lib.SparseTensor(
+ array_ops.placeholder(dtypes.int64),
+ array_ops.placeholder(dtype), array_ops.placeholder(dtypes.int64))
def _SparseTensorValue_5x6(self, permutation):
- ind = np.array([
- [0, 0],
- [1, 0], [1, 3], [1, 4],
- [3, 2], [3, 3]]).astype(np.int64)
+ ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2],
+ [3, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32)
ind = ind[permutation]
val = val[permutation]
shape = np.array([5, 6]).astype(np.int64)
- return tf.SparseTensorValue(ind, val, shape)
+ return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_3x4(self, permutation):
- ind = np.array([
- [0, 0],
- [1, 0], [1, 2], [1, 3],
- [2, 2], [2, 3]]).astype(np.int64)
+ ind = np.array([[0, 0], [1, 0], [1, 2], [1, 3], [2, 2],
+ [2, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32)
ind = ind[permutation]
val = val[permutation]
shape = np.array([3, 4]).astype(np.int64)
- return tf.SparseTensorValue(ind, val, shape)
+ return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_1x1x1(self):
ind = np.array([[0, 0, 0]]).astype(np.int64)
val = np.array([0]).astype(np.int32)
shape = np.array([3, 4, 5]).astype(np.int64)
- return tf.SparseTensorValue(ind, val, shape)
+ return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def testAddTakeMany(self):
- with self.test_session(graph=tf.Graph(), use_gpu=False) as sess:
+ with self.test_session(graph=ops.Graph(), use_gpu=False) as sess:
sp_input0 = self._SparseTensorValue_5x6(np.arange(6))
sp_input1 = self._SparseTensorValue_3x4(np.arange(6))
handle0 = add_sparse_to_tensors_map(sp_input0, shared_name="a")
handle1 = add_sparse_to_tensors_map(sp_input1, shared_name="a")
self.assertEqual(handle0.get_shape(), ())
- handles_concat = tf.stack([handle0, handle1])
+ handles_concat = array_ops.stack([handle0, handle1])
sp_out = take_many_sparse_from_tensors_map(
sparse_map_op=handle0.op, sparse_handles=handles_concat)
@@ -102,19 +104,16 @@ class SparseTensorsMapTest(tf.test.TestCase):
input1_val = self._SparseTensorValue_3x4(np.arange(6))
handle = add_sparse_to_tensors_map(sp_input)
- handle0_value = sess.run(
- handle, feed_dict={sp_input: input0_val})
- handle1_value = sess.run(
- handle, feed_dict={sp_input: input1_val})
+ handle0_value = sess.run(handle, feed_dict={sp_input: input0_val})
+ handle1_value = sess.run(handle, feed_dict={sp_input: input1_val})
- sparse_handles = tf.convert_to_tensor(
- [handle0_value, handle1_value], dtype=tf.int64)
+ sparse_handles = ops.convert_to_tensor(
+ [handle0_value, handle1_value], dtype=dtypes.int64)
sp_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=handle.op, sparse_handles=sparse_handles)
- combined_indices, combined_values, combined_shape = sess.run(
- sp_roundtrip)
+ combined_indices, combined_values, combined_shape = sess.run(sp_roundtrip)
self.assertAllEqual(combined_indices[:6, 0], [0] * 6) # minibatch 0
self.assertAllEqual(combined_indices[:6, 1:], input0_val[0])
@@ -130,15 +129,17 @@ class SparseTensorsMapTest(tf.test.TestCase):
indices_value = np.array([[0, 0], [0, 1], [2, 0]], dtype=np.int64)
values_value = np.array([b"a", b"b", b"c"])
shape_value = np.array([4, 5], dtype=np.int64)
- sparse_tensor = self._SparseTensorPlaceholder(dtype=tf.string)
+ sparse_tensor = self._SparseTensorPlaceholder(dtype=dtypes.string)
handles = add_many_sparse_to_tensors_map(sparse_tensor)
roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=handles.op, sparse_handles=handles)
handles_value, roundtrip_value = sess.run(
[handles, roundtrip],
- feed_dict={sparse_tensor.indices: indices_value,
- sparse_tensor.values: values_value,
- sparse_tensor.dense_shape: shape_value})
+ feed_dict={
+ sparse_tensor.indices: indices_value,
+ sparse_tensor.values: values_value,
+ sparse_tensor.dense_shape: shape_value
+ })
self.assertEqual(handles_value.shape, (4,))
self.assertAllEqual(roundtrip_value.indices, indices_value)
self.assertAllEqual(roundtrip_value.values, values_value)
@@ -151,13 +152,11 @@ class SparseTensorsMapTest(tf.test.TestCase):
input1_val = self._SparseTensorValue_1x1x1()
handle = add_sparse_to_tensors_map(sp_input)
- handle0_value = sess.run(
- handle, feed_dict={sp_input: input0_val})
- handle1_value = sess.run(
- handle, feed_dict={sp_input: input1_val})
+ handle0_value = sess.run(handle, feed_dict={sp_input: input0_val})
+ handle1_value = sess.run(handle, feed_dict={sp_input: input1_val})
- handle_concat = tf.convert_to_tensor(
- [handle0_value, handle1_value], dtype=tf.int64)
+ handle_concat = ops.convert_to_tensor(
+ [handle0_value, handle1_value], dtype=dtypes.int64)
sp_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=handle.op, sparse_handles=handle_concat)
@@ -174,14 +173,13 @@ class SparseTensorsMapTest(tf.test.TestCase):
handle_value = sess.run(handle)
bad_handle = handle_value + 10
sp_roundtrip = take_many_sparse_from_tensors_map(
- sparse_map_op=handle.op,
- sparse_handles=[handle_value, bad_handle])
+ sparse_map_op=handle.op, sparse_handles=[handle_value, bad_handle])
with self.assertRaisesOpError(r"Unable to find SparseTensor: 10"):
sess.run(sp_roundtrip)
-class BenchmarkSparseTensorsMapVsSerialization(tf.test.Benchmark):
+class BenchmarkSparseTensorsMapVsSerialization(test.Benchmark):
def benchmarkVeryLarge2DFloatSparseTensor(self):
np.random.seed(127)
@@ -194,41 +192,45 @@ class BenchmarkSparseTensorsMapVsSerialization(tf.test.Benchmark):
sorted(zip(indices_batch, indices_value)), dtype=np.int64)
values = ["feature_value_for_embedding_lookup"] * num_elements
shape = np.asarray([batch_size, num_elements], dtype=np.int64)
- with tf.Session() as sess:
- with tf.device("/cpu:0"):
- indices = tf.Variable(indices)
- values = tf.Variable(values)
- shape = tf.Variable(shape)
- st = tf.SparseTensor(indices, values, shape)
+ with session.Session() as sess:
+ with ops.device("/cpu:0"):
+ indices = variables.Variable(indices)
+ values = variables.Variable(values)
+ shape = variables.Variable(shape)
+ st = sparse_tensor_lib.SparseTensor(indices, values, shape)
st_handles = add_many_sparse_to_tensors_map(st)
st_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=st_handles.op, sparse_handles=st_handles)
st_roundtrip_op = st_roundtrip.values.op
- st_serialized = tf.serialize_many_sparse(st)
- st_deserialized = tf.deserialize_many_sparse(
+ st_serialized = sparse_ops.serialize_many_sparse(st)
+ st_deserialized = sparse_ops.deserialize_many_sparse(
st_serialized, dtype=values.dtype)
st_deserialized_op = st_deserialized.values.op
- tf.global_variables_initializer().run()
+ variables.global_variables_initializer().run()
st_roundtrip_values = sess.run(st_roundtrip)
st_deserialized_values = sess.run(st_deserialized)
- np.testing.assert_equal(
- st_roundtrip_values.values, st_deserialized_values.values)
- np.testing.assert_equal(
- st_roundtrip_values.indices, st_deserialized_values.indices)
- np.testing.assert_equal(
- st_roundtrip_values.dense_shape, st_deserialized_values.dense_shape)
+ np.testing.assert_equal(st_roundtrip_values.values,
+ st_deserialized_values.values)
+ np.testing.assert_equal(st_roundtrip_values.indices,
+ st_deserialized_values.indices)
+ np.testing.assert_equal(st_roundtrip_values.dense_shape,
+ st_deserialized_values.dense_shape)
self.run_op_benchmark(
- sess, st_roundtrip_op, min_iters=2000,
+ sess,
+ st_roundtrip_op,
+ min_iters=2000,
name="benchmark_very_large_2d_float_st_tensor_maps")
self.run_op_benchmark(
- sess, st_deserialized_op, min_iters=2000,
+ sess,
+ st_deserialized_op,
+ min_iters=2000,
name="benchmark_very_large_2d_float_st_serialization")
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/sparse_to_dense_op_py_test.py b/tensorflow/python/kernel_tests/sparse_to_dense_op_py_test.py
index 092e510340..87a4eb9c7b 100644
--- a/tensorflow/python/kernel_tests/sparse_to_dense_op_py_test.py
+++ b/tensorflow/python/kernel_tests/sparse_to_dense_op_py_test.py
@@ -12,25 +12,34 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.kernels.sparse_op."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import sparse_ops
+from tensorflow.python.platform import test
-def _SparseToDense(sparse_indices, output_size, sparse_values,
- default_value, validate_indices=True):
- return tf.sparse_to_dense(sparse_indices, output_size,
- sparse_values,
- default_value=default_value,
- validate_indices=validate_indices)
+def _SparseToDense(sparse_indices,
+ output_size,
+ sparse_values,
+ default_value,
+ validate_indices=True):
+ return sparse_ops.sparse_to_dense(
+ sparse_indices,
+ output_size,
+ sparse_values,
+ default_value=default_value,
+ validate_indices=validate_indices)
-class SparseToDenseTest(tf.test.TestCase):
+class SparseToDenseTest(test.TestCase):
def testInt(self):
with self.test_session(use_gpu=False):
@@ -73,7 +82,7 @@ class SparseToDenseTest(tf.test.TestCase):
def testZeroDefault(self):
with self.test_session():
- x = tf.sparse_to_dense(2, [4], 7).eval()
+ x = sparse_ops.sparse_to_dense(2, [4], 7).eval()
self.assertAllEqual(x, [0, 0, 7, 0])
def test3d(self):
@@ -113,64 +122,78 @@ class SparseToDenseTest(tf.test.TestCase):
def testOutOfBoundsIndicesWithWithoutValidation(self):
with self.test_session():
dense = _SparseToDense(
- sparse_indices=[[1], [10]], output_size=[5],
- sparse_values=[-1.0, 1.0], default_value=0.0)
+ sparse_indices=[[1], [10]],
+ output_size=[5],
+ sparse_values=[-1.0, 1.0],
+ default_value=0.0)
with self.assertRaisesOpError(
r"indices\[1\] = \[10\] is out of bounds: need 0 <= index < \[5\]"):
dense.eval()
# Disable checks, the allocation should still fail.
with self.assertRaisesOpError("out of bounds"):
dense_without_validation = _SparseToDense(
- sparse_indices=[[1], [10]], output_size=[5],
- sparse_values=[-1.0, 1.0], default_value=0.0,
+ sparse_indices=[[1], [10]],
+ output_size=[5],
+ sparse_values=[-1.0, 1.0],
+ default_value=0.0,
validate_indices=False)
dense_without_validation.eval()
def testRepeatingIndicesWithWithoutValidation(self):
with self.test_session():
dense = _SparseToDense(
- sparse_indices=[[1], [1]], output_size=[5],
- sparse_values=[-1.0, 1.0], default_value=0.0)
+ sparse_indices=[[1], [1]],
+ output_size=[5],
+ sparse_values=[-1.0, 1.0],
+ default_value=0.0)
with self.assertRaisesOpError(r"indices\[1\] = \[1\] is repeated"):
dense.eval()
# Disable checks
dense_without_validation = _SparseToDense(
- sparse_indices=[[1], [1]], output_size=[5],
- sparse_values=[-1.0, 1.0], default_value=0.0, validate_indices=False)
+ sparse_indices=[[1], [1]],
+ output_size=[5],
+ sparse_values=[-1.0, 1.0],
+ default_value=0.0,
+ validate_indices=False)
dense_without_validation.eval()
def testUnsortedIndicesWithWithoutValidation(self):
with self.test_session():
dense = _SparseToDense(
- sparse_indices=[[2], [1]], output_size=[5],
- sparse_values=[-1.0, 1.0], default_value=0.0)
+ sparse_indices=[[2], [1]],
+ output_size=[5],
+ sparse_values=[-1.0, 1.0],
+ default_value=0.0)
with self.assertRaisesOpError(r"indices\[1\] = \[1\] is out of order"):
dense.eval()
# Disable checks
dense_without_validation = _SparseToDense(
- sparse_indices=[[2], [1]], output_size=[5],
- sparse_values=[-1.0, 1.0], default_value=0.0, validate_indices=False)
+ sparse_indices=[[2], [1]],
+ output_size=[5],
+ sparse_values=[-1.0, 1.0],
+ default_value=0.0,
+ validate_indices=False)
dense_without_validation.eval()
def testShapeInferenceKnownShape(self):
with self.test_session(use_gpu=False):
- indices = tf.placeholder(tf.int64)
+ indices = array_ops.placeholder(dtypes.int64)
shape = [4, 5, 6]
- output = tf.sparse_to_dense(indices, shape, 1, 0)
+ output = sparse_ops.sparse_to_dense(indices, shape, 1, 0)
self.assertEqual(output.get_shape(), [4, 5, 6])
- shape = tf.placeholder(tf.int64, shape=(3,))
- output = tf.sparse_to_dense(indices, shape, 1, 0)
+ shape = array_ops.placeholder(dtypes.int64, shape=(3,))
+ output = sparse_ops.sparse_to_dense(indices, shape, 1, 0)
self.assertEqual(output.get_shape().as_list(), [None, None, None])
def testShapeInferenceUnknownShape(self):
with self.test_session(use_gpu=False):
- indices = tf.placeholder(tf.int64)
- shape = tf.placeholder(tf.int64)
- output = tf.sparse_to_dense(indices, shape, 1, 0)
+ indices = array_ops.placeholder(dtypes.int64)
+ shape = array_ops.placeholder(dtypes.int64)
+ output = sparse_ops.sparse_to_dense(indices, shape, 1, 0)
self.assertEqual(output.get_shape().ndims, None)
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/sparse_xent_op_test.py b/tensorflow/python/kernel_tests/sparse_xent_op_test.py
index 6794b09ac4..ef94af54fe 100644
--- a/tensorflow/python/kernel_tests/sparse_xent_op_test.py
+++ b/tensorflow/python/kernel_tests/sparse_xent_op_test.py
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for SparseSoftmaxCrossEntropyWithLogits op."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
@@ -22,14 +22,26 @@ import sys
import time
import numpy as np
-import tensorflow as tf
+from tensorflow.core.protobuf import config_pb2
+from tensorflow.python.client import session
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import errors_impl
+from tensorflow.python.framework import ops as ops_lib
+from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import gradients_impl
+from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import sparse_ops
+import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
+from tensorflow.python.platform import app
+from tensorflow.python.platform import test
-class SparseXentTest(tf.test.TestCase):
+class SparseXentTest(test.TestCase):
def _npXent(self, features, labels):
features = np.reshape(features, [-1, features.shape[-1]])
@@ -37,8 +49,9 @@ class SparseXentTest(tf.test.TestCase):
batch_dim = 0
class_dim = 1
batch_size = features.shape[batch_dim]
- e = np.exp(features -
- np.reshape(np.amax(features, axis=class_dim), [batch_size, 1]))
+ e = np.exp(features - np.reshape(
+ np.amax(
+ features, axis=class_dim), [batch_size, 1]))
probs = e / np.reshape(np.sum(e, axis=class_dim), [batch_size, 1])
labels_mat = np.zeros_like(probs).astype(probs.dtype)
labels_mat[np.arange(batch_size), labels] = 1.0
@@ -66,32 +79,27 @@ class SparseXentTest(tf.test.TestCase):
self.assertAllClose([[0.0], [0.0], [0.0]], tf_backprop)
def testInvalidLabel(self):
- features = [
- [1., 1., 1., 1.],
- [1., 1., 1., 1.],
- [1., 2., 3., 4.],
- [1., 2., 3., 4.]]
+ features = [[1., 1., 1., 1.], [1., 1., 1., 1.], [1., 2., 3., 4.],
+ [1., 2., 3., 4.]]
labels = [4, 3, 0, -1]
- if tf.test.is_built_with_cuda() and tf.test.is_gpu_available():
+ if test.is_built_with_cuda() and test.is_gpu_available():
with self.test_session(use_gpu=True) as sess:
- loss, backprop = (
- gen_nn_ops._sparse_softmax_cross_entropy_with_logits(
- features, labels))
+ loss, backprop = (gen_nn_ops._sparse_softmax_cross_entropy_with_logits(
+ features, labels))
tf_loss, tf_backprop = sess.run([loss, backprop])
self.assertAllClose(
- [[np.nan] * 4,
- [0.25, 0.25, 0.25, -0.75],
- [-0.968, 0.087, 0.237, 0.6439],
- [np.nan] * 4],
- tf_backprop, rtol=1e-3, atol=1e-3)
+ [[np.nan] * 4, [0.25, 0.25, 0.25, -0.75],
+ [-0.968, 0.087, 0.237, 0.6439], [np.nan] * 4],
+ tf_backprop,
+ rtol=1e-3,
+ atol=1e-3)
self.assertAllClose(
[np.nan, 1.3862, 3.4420, np.nan], tf_loss, rtol=1e-3, atol=1e-3)
with self.test_session(use_gpu=False) as sess:
- loss, backprop = (
- gen_nn_ops._sparse_softmax_cross_entropy_with_logits(
- features, labels))
+ loss, backprop = (gen_nn_ops._sparse_softmax_cross_entropy_with_logits(
+ features, labels))
with self.assertRaisesOpError("Received a label value of"):
sess.run([loss, backprop])
@@ -121,36 +129,37 @@ class SparseXentTest(tf.test.TestCase):
# The loss for this batch is [1.0 * -log(0.25), 1.0 * -log(0.032)]
# = [1.3862, 3.4420]
np_loss, np_backprop = self._npXent(np.array(features), np.array(labels))
- self.assertAllClose(np.array([[0.25, 0.25, 0.25, -0.75],
- [-0.968, 0.087, 0.237, 0.6439]]),
- np_backprop,
- rtol=1.e-3, atol=1.e-3)
- self.assertAllClose(np.array([1.3862, 3.4420]), np_loss,
- rtol=1.e-3, atol=1.e-3)
+ self.assertAllClose(
+ np.array([[0.25, 0.25, 0.25, -0.75], [-0.968, 0.087, 0.237, 0.6439]]),
+ np_backprop,
+ rtol=1.e-3,
+ atol=1.e-3)
+ self.assertAllClose(
+ np.array([1.3862, 3.4420]), np_loss, rtol=1.e-3, atol=1.e-3)
def testShapeMismatch(self):
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(ValueError, ".*Rank mismatch:*"):
- tf.nn.sparse_softmax_cross_entropy_with_logits(
+ nn_ops.sparse_softmax_cross_entropy_with_logits(
[[0., 1.], [2., 3.], [2., 3.]], [[0, 2]])
def testScalar(self):
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(ValueError, ".*Logits cannot be scalars*"):
- tf.nn.sparse_softmax_cross_entropy_with_logits(
- tf.constant(1.0), tf.constant(0))
+ nn_ops.sparse_softmax_cross_entropy_with_logits(
+ constant_op.constant(1.0), constant_op.constant(0))
def testLabelsPlaceholderScalar(self):
with self.test_session(use_gpu=True):
- labels = tf.placeholder(np.int32)
- y = tf.nn.sparse_softmax_cross_entropy_with_logits([[7.]], labels)
+ labels = array_ops.placeholder(np.int32)
+ y = nn_ops.sparse_softmax_cross_entropy_with_logits([[7.]], labels)
with self.assertRaisesOpError("labels must be 1-D"):
y.eval(feed_dict={labels: 0})
def testVector(self):
with self.test_session(use_gpu=True):
- loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
- tf.constant([1.0]), tf.constant(0))
+ loss = nn_ops.sparse_softmax_cross_entropy_with_logits(
+ constant_op.constant([1.0]), constant_op.constant(0))
self.assertAllClose(0.0, loss.eval())
def testFloat(self):
@@ -176,13 +185,14 @@ class SparseXentTest(tf.test.TestCase):
def testGradient(self):
with self.test_session(use_gpu=True):
- l = tf.constant([3, 0, 1], name="l")
- f = tf.constant([0.1, 0.2, 0.3, 0.4,
- 0.1, 0.4, 0.9, 1.6,
- 0.1, 0.8, 2.7, 6.4], shape=[3, 4],
- dtype=tf.float64, name="f")
- x = tf.nn.sparse_softmax_cross_entropy_with_logits(f, l, name="xent")
- err = tf.test.compute_gradient_error(f, [3, 4], x, [3])
+ l = constant_op.constant([3, 0, 1], name="l")
+ f = constant_op.constant(
+ [0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4],
+ shape=[3, 4],
+ dtype=dtypes.float64,
+ name="f")
+ x = nn_ops.sparse_softmax_cross_entropy_with_logits(f, l, name="xent")
+ err = gradient_checker.compute_gradient_error(f, [3, 4], x, [3])
print("cross entropy gradient err = ", err)
self.assertLess(err, 5e-8)
@@ -191,8 +201,7 @@ class SparseXentTest(tf.test.TestCase):
# manually reshape loss
np_loss = np.reshape(np_loss, np.array(labels).shape)
with self.test_session(use_gpu=True) as sess:
- loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
- features, labels)
+ loss = nn_ops.sparse_softmax_cross_entropy_with_logits(features, labels)
backprop = loss.op.inputs[0].op.outputs[1]
tf_loss, tf_backprop = sess.run([loss, backprop])
self.assertAllCloseAccordingToType(np_loss, tf_loss)
@@ -211,33 +220,32 @@ class SparseXentTest(tf.test.TestCase):
def testScalarHandling(self):
with self.test_session(use_gpu=False) as sess:
- with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
+ with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
".*labels must be 1-D.*"):
- labels = tf.placeholder(tf.int32, shape=[None, 1])
- logits = tf.placeholder(tf.float32, shape=[None, 3])
- ce = tf.nn.sparse_softmax_cross_entropy_with_logits(
- logits,
- tf.squeeze(labels))
+ labels = array_ops.placeholder(dtypes.int32, shape=[None, 1])
+ logits = array_ops.placeholder(dtypes.float32, shape=[None, 3])
+ ce = nn_ops.sparse_softmax_cross_entropy_with_logits(
+ logits, array_ops.squeeze(labels))
labels_v2 = np.zeros((1, 1), dtype=np.int32)
logits_v2 = np.random.randn(1, 3)
- sess.run([ce], feed_dict={labels: labels_v2,
- logits: logits_v2})
+ sess.run([ce], feed_dict={labels: labels_v2, logits: logits_v2})
def _sparse_vs_dense_xent_benchmark_dense(labels, logits):
- labels = tf.identity(labels)
- logits = tf.identity(logits)
- with tf.device("/cpu:0"): # Sparse-to-dense must be on CPU
- batch_size = tf.shape(logits)[0]
- num_entries = tf.shape(logits)[1]
+ labels = array_ops.identity(labels)
+ logits = array_ops.identity(logits)
+ with ops_lib.device("/cpu:0"): # Sparse-to-dense must be on CPU
+ batch_size = array_ops.shape(logits)[0]
+ num_entries = array_ops.shape(logits)[1]
length = batch_size * num_entries
- labels += num_entries * tf.range(batch_size)
- target = sparse_ops.sparse_to_dense(labels, tf.stack([length]), 1.0, 0.0)
- target = tf.reshape(target, tf.stack([-1, num_entries]))
- crossent = tf.nn.softmax_cross_entropy_with_logits(
+ labels += num_entries * math_ops.range(batch_size)
+ target = sparse_ops.sparse_to_dense(labels,
+ array_ops.stack([length]), 1.0, 0.0)
+ target = array_ops.reshape(target, array_ops.stack([-1, num_entries]))
+ crossent = nn_ops.softmax_cross_entropy_with_logits(
logits, target, name="SequenceLoss/CrossEntropy")
- crossent_sum = tf.reduce_sum(crossent)
- grads = tf.gradients([crossent_sum], [logits])[0]
+ crossent_sum = math_ops.reduce_sum(crossent)
+ grads = gradients_impl.gradients([crossent_sum], [logits])[0]
return (crossent_sum, grads)
@@ -245,18 +253,18 @@ def _sparse_vs_dense_xent_benchmark_dense(labels, logits):
def _sparse_vs_dense_xent_benchmark_sparse(labels, logits):
# Using sparse_softmax_cross_entropy_with_logits
labels = labels.astype(np.int64)
- labels = tf.identity(labels)
- logits = tf.identity(logits)
- crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(
+ labels = array_ops.identity(labels)
+ logits = array_ops.identity(logits)
+ crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
logits, labels, name="SequenceLoss/CrossEntropy")
- crossent_sum = tf.reduce_sum(crossent)
- grads = tf.gradients([crossent_sum], [logits])[0]
+ crossent_sum = math_ops.reduce_sum(crossent)
+ grads = gradients_impl.gradients([crossent_sum], [logits])[0]
return (crossent_sum, grads)
def sparse_vs_dense_xent_benchmark(batch_size, num_entries, use_gpu):
- config = tf.ConfigProto()
+ config = config_pb2.ConfigProto()
config.allow_soft_placement = True
config.gpu_options.per_process_gpu_memory_fraction = 0.3
labels = np.random.randint(num_entries, size=batch_size).astype(np.int32)
@@ -273,30 +281,29 @@ def sparse_vs_dense_xent_benchmark(batch_size, num_entries, use_gpu):
sess.run(ops)
end = time.time()
- return (end - start)/20.0 # Average runtime per iteration
+ return (end - start) / 20.0 # Average runtime per iteration
# Using sparse_to_dense and softmax_cross_entropy_with_logits
- with tf.Session(config=config) as sess:
+ with session.Session(config=config) as sess:
if not use_gpu:
- with tf.device("/cpu:0"):
+ with ops_lib.device("/cpu:0"):
ops = _sparse_vs_dense_xent_benchmark_dense(labels, logits)
else:
ops = _sparse_vs_dense_xent_benchmark_dense(labels, logits)
delta_dense = _timer(sess, ops)
# Using sparse_softmax_cross_entropy_with_logits
- with tf.Session(config=config) as sess:
+ with session.Session(config=config) as sess:
if not use_gpu:
- with tf.device("/cpu:0"):
+ with ops_lib.device("/cpu:0"):
ops = _sparse_vs_dense_xent_benchmark_sparse(labels, logits)
else:
ops = _sparse_vs_dense_xent_benchmark_sparse(labels, logits)
delta_sparse = _timer(sess, ops)
- print(
- "%d \t %d \t %s \t %f \t %f \t %f"
- % (batch_size, num_entries, use_gpu, delta_dense, delta_sparse,
- delta_sparse/delta_dense))
+ print("%d \t %d \t %s \t %f \t %f \t %f" % (batch_size, num_entries, use_gpu,
+ delta_dense, delta_sparse,
+ delta_sparse / delta_dense))
def main(_):
@@ -306,17 +313,14 @@ def main(_):
for use_gpu in (False, True):
for batch_size in (32, 64, 128):
for num_entries in (100, 1000, 10000):
- sparse_vs_dense_xent_benchmark(
- batch_size, num_entries, use_gpu)
- sparse_vs_dense_xent_benchmark(
- 32, 100000, use_gpu)
- sparse_vs_dense_xent_benchmark(
- 8, 1000000, use_gpu)
+ sparse_vs_dense_xent_benchmark(batch_size, num_entries, use_gpu)
+ sparse_vs_dense_xent_benchmark(32, 100000, use_gpu)
+ sparse_vs_dense_xent_benchmark(8, 1000000, use_gpu)
if __name__ == "__main__":
if "--benchmarks" in sys.argv:
sys.argv.remove("--benchmarks")
- tf.app.run()
+ app.run()
else:
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/sparsemask_op_test.py b/tensorflow/python/kernel_tests/sparsemask_op_test.py
index 59f1aec8d0..cf6c9494ae 100644
--- a/tensorflow/python/kernel_tests/sparsemask_op_test.py
+++ b/tensorflow/python/kernel_tests/sparsemask_op_test.py
@@ -18,10 +18,13 @@ from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
+from tensorflow.python.platform import test
-class SparseMaskTest(tf.test.TestCase):
+
+class SparseMaskTest(test.TestCase):
def testBasic(self):
values = np.random.rand(4, 4).astype(np.single)
@@ -32,18 +35,19 @@ class SparseMaskTest(tf.test.TestCase):
out_indices = np.array([2, 3, 4], dtype=np.int32)
with self.test_session() as sess:
- values_tensor = tf.convert_to_tensor(values)
- indices_tensor = tf.convert_to_tensor(indices)
- mask_indices_tensor = tf.convert_to_tensor(mask_indices)
+ values_tensor = ops.convert_to_tensor(values)
+ indices_tensor = ops.convert_to_tensor(indices)
+ mask_indices_tensor = ops.convert_to_tensor(mask_indices)
- t = tf.IndexedSlices(values_tensor, indices_tensor)
- masked_t = tf.sparse_mask(t, mask_indices_tensor)
+ t = ops.IndexedSlices(values_tensor, indices_tensor)
+ masked_t = array_ops.sparse_mask(t, mask_indices_tensor)
- tf_out_values, tf_out_indices = sess.run([masked_t.values,
- masked_t.indices])
+ tf_out_values, tf_out_indices = sess.run(
+ [masked_t.values, masked_t.indices])
self.assertAllEqual(tf_out_values, out_values)
self.assertAllEqual(tf_out_indices, out_indices)
+
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/split_op_test.py b/tensorflow/python/kernel_tests/split_op_test.py
index a76acea4cd..5f8a3f3ab2 100644
--- a/tensorflow/python/kernel_tests/split_op_test.py
+++ b/tensorflow/python/kernel_tests/split_op_test.py
@@ -12,44 +12,51 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Functional tests for Split Op."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradients_impl
+from tensorflow.python.ops import math_ops
+from tensorflow.python.platform import test
-class SplitOpTest(tf.test.TestCase):
+class SplitOpTest(test.TestCase):
def testExplicitNum(self):
- size_splits = tf.placeholder(dtype=tf.int32, shape=[None])
+ size_splits = array_ops.placeholder(dtype=dtypes.int32, shape=[None])
value = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
with self.test_session(use_gpu=False) as sess:
with self.assertRaises(ValueError) as context:
- sess.run(tf.split(value, size_splits), {size_splits: [2, 2, 6]})
+ sess.run(array_ops.split(value, size_splits), {size_splits: [2, 2, 6]})
self.assertTrue("Cannot infer num from shape" in str(context.exception))
- result = sess.run(tf.split(value, size_splits, num=3),
- {size_splits: [2, 2, 6]})
+ result = sess.run(array_ops.split(
+ value, size_splits, num=3), {size_splits: [2, 2, 6]})
self.assertAllEqual(result[0], value[0:2])
self.assertAllEqual(result[1], value[2:4])
self.assertAllEqual(result[2], value[4:])
def testListOfScalarTensors(self):
- a = tf.to_int32(5)
- b = tf.to_int32(6)
+ a = math_ops.to_int32(5)
+ b = math_ops.to_int32(6)
value = np.random.rand(11, 11)
with self.test_session(use_gpu=False) as sess:
- result = sess.run(tf.split(value, [a, b]))
+ result = sess.run(array_ops.split(value, [a, b]))
self.assertAllEqual(result[0], value[0:5, :])
self.assertAllEqual(result[1], value[5:, :])
@@ -66,7 +73,7 @@ class SplitOpTest(tf.test.TestCase):
shape[split_dim] = np.sum(size_splits)
inp = np.random.rand(*shape).astype("f")
with self.test_session(use_gpu=use_gpu) as sess:
- result = sess.run(tf.split(inp, size_splits, split_dim))
+ result = sess.run(array_ops.split(inp, size_splits, split_dim))
slices = [slice(0, x) for x in shape]
offset = 0
for i in range(num_split):
@@ -78,10 +85,10 @@ class SplitOpTest(tf.test.TestCase):
inp = np.random.rand(4, 4).astype("f")
with self.test_session(use_gpu=use_gpu) as sess:
- result = sess.run(tf.split(inp, [4], 0))
+ result = sess.run(array_ops.split(inp, [4], 0))
self.assertAllEqual(result[0], inp)
- result = sess.run(tf.split(inp, [-1, 3], 0))
+ result = sess.run(array_ops.split(inp, [-1, 3], 0))
self.assertAllEqual(result[0], inp[0:1, :])
self.assertAllEqual(result[1], inp[1:4, :])
@@ -92,7 +99,7 @@ class SplitOpTest(tf.test.TestCase):
split_dim = 1
inp = np.random.rand(*shape).astype("f")
with self.test_session(use_gpu=use_gpu) as sess:
- result = sess.run(tf.split(inp, size_splits, split_dim))
+ result = sess.run(array_ops.split(inp, size_splits, split_dim))
slices = [slice(0, x) for x in shape]
offset = 0
for i in range(num_split):
@@ -109,13 +116,13 @@ class SplitOpTest(tf.test.TestCase):
def _testGradientsSimpleVariable(self, use_gpu):
inp = np.random.rand(4, 4).astype("f")
with self.test_session(use_gpu=use_gpu):
- inp_tensor = tf.convert_to_tensor(inp)
- s = tf.split(inp_tensor, [1, 4], 1)
+ inp_tensor = ops.convert_to_tensor(inp)
+ s = array_ops.split(inp_tensor, [1, 4], 1)
inp_grads = [
np.random.rand(4, 1).astype("f"), np.random.rand(4, 3).astype("f")
]
- grad_tensors = [tf.constant(x) for x in inp_grads]
- grad = tf.gradients(s, [inp_tensor], grad_tensors)[-1]
+ grad_tensors = [constant_op.constant(x) for x in inp_grads]
+ grad = gradients_impl.gradients(s, [inp_tensor], grad_tensors)[-1]
result = grad.eval()
self.assertAllEqual(result[:, 0:1], inp_grads[0])
@@ -124,7 +131,7 @@ class SplitOpTest(tf.test.TestCase):
def _compare(self, x, dim, num, use_gpu):
np_ans = np.split(x, num, dim)
with self.test_session(use_gpu=use_gpu) as sess:
- tf_ans = tf.split(value=x, num_or_size_splits=num, axis=dim)
+ tf_ans = array_ops.split(value=x, num_or_size_splits=num, axis=dim)
out = sess.run(tf_ans)
self.assertEqual(num, len(np_ans))
self.assertEqual(num, len(np_ans))
@@ -151,7 +158,7 @@ class SplitOpTest(tf.test.TestCase):
def _testEmpty(self, x, dim, num, expected_shape):
with self.test_session() as sess:
- tf_ans = tf.split(value=x, num_or_size_splits=num, axis=dim)
+ tf_ans = array_ops.split(value=x, num_or_size_splits=num, axis=dim)
out = sess.run(tf_ans)
self.assertEqual(x.size, 0)
self.assertEqual(len(out), num)
@@ -194,7 +201,7 @@ class SplitOpTest(tf.test.TestCase):
inp = np.random.rand(*shape).astype("f")
with self.test_session(use_gpu=use_gpu) as sess:
result = sess.run(
- tf.split(
+ array_ops.split(
value=inp, num_or_size_splits=num_split, axis=split_dim))
slices = [slice(0, x) for x in shape]
offset = 0
@@ -216,14 +223,14 @@ class SplitOpTest(tf.test.TestCase):
def _testGradientsSimple(self, use_gpu):
inp = np.random.rand(4, 4).astype("f")
with self.test_session(use_gpu=use_gpu):
- inp_tensor = tf.convert_to_tensor(inp)
- s = tf.split(value=inp_tensor, num_or_size_splits=4, axis=1)
+ inp_tensor = ops.convert_to_tensor(inp)
+ s = array_ops.split(value=inp_tensor, num_or_size_splits=4, axis=1)
inp_grads = [np.random.rand(4, 1).astype("f") for _ in range(4)]
- grad_tensors = [tf.constant(x) for x in inp_grads]
- grad = tf.gradients(s, [inp_tensor], grad_tensors)[0]
+ grad_tensors = [constant_op.constant(x) for x in inp_grads]
+ grad = gradients_impl.gradients(s, [inp_tensor], grad_tensors)[0]
result = grad.eval()
for i in range(4):
- self.assertAllEqual(result[:, i:i+1], inp_grads[i])
+ self.assertAllEqual(result[:, i:i + 1], inp_grads[i])
def testGradientsAll(self):
self._testGradientsSimple(use_gpu=False)
@@ -234,28 +241,28 @@ class SplitOpTest(tf.test.TestCase):
def testShapeFunctionEdgeCases(self):
# split_dim greater than rank of input.
with self.assertRaises(ValueError):
- tf.split(value=[[0, 1], [2, 3]], num_or_size_splits=4, axis=2)
+ array_ops.split(value=[[0, 1], [2, 3]], num_or_size_splits=4, axis=2)
# num_split does not evenly divide the size in split_dim.
with self.assertRaisesRegexp(ValueError, "should evenly divide"):
- tf.split(value=[0, 1, 2, 3], num_or_size_splits=3, axis=0)
+ array_ops.split(value=[0, 1, 2, 3], num_or_size_splits=3, axis=0)
# Unknown split_dim.
- splits = tf.split(
+ splits = array_ops.split(
value=[[0, 1, 2, 3]],
num_or_size_splits=4,
- axis=tf.placeholder(tf.int32))
+ axis=array_ops.placeholder(dtypes.int32))
for s in splits:
self.assertEqual([None, None], s.get_shape().as_list())
# Unknown split_dim and input shape.
- splits = tf.split(
- value=tf.placeholder(tf.float32),
+ splits = array_ops.split(
+ value=array_ops.placeholder(dtypes.float32),
num_or_size_splits=4,
- axis=tf.placeholder(tf.int32))
+ axis=array_ops.placeholder(dtypes.int32))
for s in splits:
self.assertEqual(None, s.get_shape().ndims)
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/stack_ops_test.py b/tensorflow/python/kernel_tests/stack_ops_test.py
index c0ed8a9b0e..5003fb6dea 100644
--- a/tensorflow/python/kernel_tests/stack_ops_test.py
+++ b/tensorflow/python/kernel_tests/stack_ops_test.py
@@ -12,28 +12,33 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.ops.stack_ops."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
+from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
+from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.platform import test
-class StackOpTest(tf.test.TestCase):
+class StackOpTest(test.TestCase):
def _testStackPushPop(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
- h = gen_data_flow_ops._stack(tf.float32, stack_name="foo")
+ h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c = gen_data_flow_ops._stack_push(h, [[4.0, 5.0]])
- with tf.control_dependencies([c]):
- c1 = gen_data_flow_ops._stack_pop(h, tf.float32)
+ with ops.control_dependencies([c]):
+ c1 = gen_data_flow_ops._stack_pop(h, dtypes.float32)
self.assertAllClose([[4.0, 5.0]], c1.eval())
def testStackPushPop(self):
@@ -43,11 +48,11 @@ class StackOpTest(tf.test.TestCase):
def _testStackPushPopSwap(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
a = np.arange(2000)
- x = tf.constant(a, dtype=tf.float32)
- h = gen_data_flow_ops._stack(tf.float32, stack_name="foo")
+ x = constant_op.constant(a, dtype=dtypes.float32)
+ h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c = gen_data_flow_ops._stack_push(h, x, swap_memory=True)
- with tf.control_dependencies([c]):
- c1 = gen_data_flow_ops._stack_pop(h, tf.float32)
+ with ops.control_dependencies([c]):
+ c1 = gen_data_flow_ops._stack_pop(h, dtypes.float32)
self.assertAllClose(a, c1.eval())
def testStackPushPopSwap(self):
@@ -56,28 +61,33 @@ class StackOpTest(tf.test.TestCase):
def _testStackWhileSwap(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
- n = tf.constant(0)
- h = gen_data_flow_ops._stack(tf.float32, stack_name="foo")
+ n = constant_op.constant(0)
+ h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
def c(x):
- return tf.less(x, 10)
+ return math_ops.less(x, 10)
+
def b(x):
- with tf.control_dependencies([x]):
- a = tf.constant(np.ones(2000), dtype=tf.float32)
+ with ops.control_dependencies([x]):
+ a = constant_op.constant(np.ones(2000), dtype=dtypes.float32)
v = gen_data_flow_ops._stack_push(h, a, swap_memory=True)
- with tf.control_dependencies([v]):
- return tf.add(x, 1)
- r = tf.while_loop(c, b, [n])
+ with ops.control_dependencies([v]):
+ return math_ops.add(x, 1)
+
+ r = control_flow_ops.while_loop(c, b, [n])
+
+ v = constant_op.constant(np.zeros(2000), dtype=dtypes.float32)
- v = tf.constant(np.zeros(2000), dtype=tf.float32)
def c1(x, y):
- return tf.greater(x, 0)
+ return math_ops.greater(x, 0)
+
def b1(x, y):
- nx = tf.sub(x, 1)
- ny = y + gen_data_flow_ops._stack_pop(h, tf.float32)
+ nx = math_ops.sub(x, 1)
+ ny = y + gen_data_flow_ops._stack_pop(h, dtypes.float32)
return [nx, ny]
- rx, ry = tf.while_loop(c1, b1, [r, v],
- [r.get_shape(), tensor_shape.unknown_shape()])
+
+ rx, ry = control_flow_ops.while_loop(
+ c1, b1, [r, v], [r.get_shape(), tensor_shape.unknown_shape()])
self.assertAllClose(np.ones(2000) * 10.0, ry.eval())
def testStackWhileSwap(self):
@@ -86,14 +96,14 @@ class StackOpTest(tf.test.TestCase):
def _testMultiStack(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
- h1 = gen_data_flow_ops._stack(tf.float32, stack_name="foo")
+ h1 = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops._stack_push(h1, 4.0)
- with tf.control_dependencies([c1]):
- c1 = gen_data_flow_ops._stack_pop(h1, tf.float32)
- h2 = gen_data_flow_ops._stack(tf.float32, stack_name="bar")
+ with ops.control_dependencies([c1]):
+ c1 = gen_data_flow_ops._stack_pop(h1, dtypes.float32)
+ h2 = gen_data_flow_ops._stack(dtypes.float32, stack_name="bar")
c2 = gen_data_flow_ops._stack_push(h2, 5.0)
- with tf.control_dependencies([c2]):
- c2 = gen_data_flow_ops._stack_pop(h2, tf.float32)
+ with ops.control_dependencies([c2]):
+ c2 = gen_data_flow_ops._stack_pop(h2, dtypes.float32)
r = c1 + c2
self.assertAllClose(9.0, r.eval())
@@ -103,9 +113,9 @@ class StackOpTest(tf.test.TestCase):
def _testSameNameStacks(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
- h1 = gen_data_flow_ops._stack(tf.float32, stack_name="foo")
+ h1 = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops._stack_push(h1, 4.0)
- h2 = gen_data_flow_ops._stack(tf.float32, stack_name="foo")
+ h2 = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c2 = gen_data_flow_ops._stack_push(h2, 5.0)
r = c1 + c2
self.assertNotEqual(h1.eval()[1], h2.eval()[1])
@@ -116,7 +126,7 @@ class StackOpTest(tf.test.TestCase):
def _testCloseStack(self, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
- h = gen_data_flow_ops._stack(tf.float32, stack_name="foo")
+ h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops._stack_close(h)
sess.run(c1)
@@ -126,9 +136,9 @@ class StackOpTest(tf.test.TestCase):
def _testPushCloseStack(self, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
- h = gen_data_flow_ops._stack(tf.float32, stack_name="foo")
+ h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c = gen_data_flow_ops._stack_push(h, [[4.0, 5.0]])
- with tf.control_dependencies([c]):
+ with ops.control_dependencies([c]):
c1 = gen_data_flow_ops._stack_close(h)
sess.run(c1)
@@ -136,5 +146,6 @@ class StackOpTest(tf.test.TestCase):
self._testPushCloseStack(use_gpu=False)
self._testPushCloseStack(use_gpu=True)
+
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/string_join_op_test.py b/tensorflow/python/kernel_tests/string_join_op_test.py
index 575d6011b9..ce19333654 100644
--- a/tensorflow/python/kernel_tests/string_join_op_test.py
+++ b/tensorflow/python/kernel_tests/string_join_op_test.py
@@ -12,16 +12,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for string_join_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import tensorflow as tf
+from tensorflow.python.ops import string_ops
+from tensorflow.python.platform import test
-class StringJoinOpTest(tf.test.TestCase):
+class StringJoinOpTest(test.TestCase):
def testStringJoin(self):
input0 = ["a", "b"]
@@ -29,24 +29,24 @@ class StringJoinOpTest(tf.test.TestCase):
input2 = [["b"], ["c"]]
with self.test_session():
- output = tf.string_join([input0, input1])
+ output = string_ops.string_join([input0, input1])
self.assertAllEqual(output.eval(), [b"aa", b"ba"])
- output = tf.string_join([input0, input1], separator="--")
+ output = string_ops.string_join([input0, input1], separator="--")
self.assertAllEqual(output.eval(), [b"a--a", b"b--a"])
- output = tf.string_join([input0, input1, input0], separator="--")
+ output = string_ops.string_join([input0, input1, input0], separator="--")
self.assertAllEqual(output.eval(), [b"a--a--a", b"b--a--b"])
- output = tf.string_join([input1] * 4, separator="!")
+ output = string_ops.string_join([input1] * 4, separator="!")
self.assertEqual(output.eval(), b"a!a!a!a")
- output = tf.string_join([input2] * 2, separator="")
+ output = string_ops.string_join([input2] * 2, separator="")
self.assertAllEqual(output.eval(), [[b"bb"], [b"cc"]])
with self.assertRaises(ValueError): # Inconsistent shapes
- tf.string_join([input0, input2]).eval()
+ string_ops.string_join([input0, input2]).eval()
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/string_split_op_test.py b/tensorflow/python/kernel_tests/string_split_op_test.py
index 5aa1390a9a..60ba16c1ac 100644
--- a/tensorflow/python/kernel_tests/string_split_op_test.py
+++ b/tensorflow/python/kernel_tests/string_split_op_test.py
@@ -13,21 +13,27 @@
# limitations under the License.
# ==============================================================================
"""Tests for string_split_op."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import errors_impl
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import string_ops
+from tensorflow.python.platform import test
-class StringSplitOpTest(tf.test.TestCase):
+class StringSplitOpTest(test.TestCase):
def testStringSplit(self):
strings = ["pigs on the wing", "animals"]
with self.test_session() as sess:
- tokens = tf.string_split(strings)
+ tokens = string_ops.string_split(strings)
indices, values, shape = sess.run(tokens)
self.assertAllEqual(indices, [[0, 0], [0, 1], [0, 2], [0, 3], [1, 0]])
self.assertAllEqual(values, [b"pigs", b"on", b"the", b"wing", b"animals"])
@@ -37,14 +43,17 @@ class StringSplitOpTest(tf.test.TestCase):
strings = ["hello", "hola", b"\xF0\x9F\x98\x8E"] # Last string is U+1F60E
with self.test_session() as sess:
- tokens = tf.string_split(strings, delimiter="")
+ tokens = string_ops.string_split(strings, delimiter="")
indices, values, shape = sess.run(tokens)
self.assertAllEqual(indices, [[0, 0], [0, 1], [0, 2], [0, 3], [0, 4],
- [1, 0], [1, 1], [1, 2], [1, 3],
- [2, 0], [2, 1], [2, 2], [2, 3]])
+ [1, 0], [1, 1], [1, 2], [1, 3], [2, 0],
+ [2, 1], [2, 2], [2, 3]])
expected = np.array(
- ['h', 'e', 'l', 'l', 'o', 'h', 'o', 'l',
- 'a', b'\xf0', b'\x9f', b'\x98', b'\x8e'], dtype='|S1')
+ [
+ "h", "e", "l", "l", "o", "h", "o", "l", "a", b"\xf0", b"\x9f",
+ b"\x98", b"\x8e"
+ ],
+ dtype="|S1")
self.assertAllEqual(values.tolist(), expected)
self.assertAllEqual(shape, [3, 5])
@@ -52,7 +61,7 @@ class StringSplitOpTest(tf.test.TestCase):
strings = [" hello ", "", "world "]
with self.test_session() as sess:
- tokens = tf.string_split(strings)
+ tokens = string_ops.string_split(strings)
indices, values, shape = sess.run(tokens)
self.assertAllEqual(indices, [[0, 0], [2, 0]])
self.assertAllEqual(values, [b"hello", b"world"])
@@ -63,17 +72,18 @@ class StringSplitOpTest(tf.test.TestCase):
with self.test_session() as sess:
self.assertRaises(
- ValueError, tf.string_split, strings, delimiter=["|", ""])
+ ValueError, string_ops.string_split, strings, delimiter=["|", ""])
- self.assertRaises(ValueError, tf.string_split, strings, delimiter=["a"])
+ self.assertRaises(
+ ValueError, string_ops.string_split, strings, delimiter=["a"])
- tokens = tf.string_split(strings, delimiter="|")
+ tokens = string_ops.string_split(strings, delimiter="|")
indices, values, shape = sess.run(tokens)
self.assertAllEqual(indices, [[0, 0], [0, 1], [1, 0]])
self.assertAllEqual(values, [b"hello", b"world", b"hello world"])
self.assertAllEqual(shape, [2, 2])
- tokens = tf.string_split(strings, delimiter="| ")
+ tokens = string_ops.string_split(strings, delimiter="| ")
indices, values, shape = sess.run(tokens)
self.assertAllEqual(indices, [[0, 0], [0, 1], [1, 0], [1, 1]])
self.assertAllEqual(values, [b"hello", b"world", b"hello", b"world"])
@@ -83,13 +93,13 @@ class StringSplitOpTest(tf.test.TestCase):
strings = ["hello|world", "hello world"]
with self.test_session() as sess:
- delimiter = tf.placeholder(tf.string)
+ delimiter = array_ops.placeholder(dtypes.string)
- tokens = tf.string_split(strings, delimiter=delimiter)
+ tokens = string_ops.string_split(strings, delimiter=delimiter)
- with self.assertRaises(tf.errors.InvalidArgumentError):
+ with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(tokens, feed_dict={delimiter: ["a", "b"]})
- with self.assertRaises(tf.errors.InvalidArgumentError):
+ with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(tokens, feed_dict={delimiter: ["a"]})
indices, values, shape = sess.run(tokens, feed_dict={delimiter: "|"})
@@ -101,21 +111,21 @@ class StringSplitOpTest(tf.test.TestCase):
strings = ["hello.cruel,world", "hello cruel world"]
with self.test_session() as sess:
- delimiter = tf.placeholder(tf.string)
+ delimiter = array_ops.placeholder(dtypes.string)
- tokens = tf.string_split(strings, delimiter=delimiter)
+ tokens = string_ops.string_split(strings, delimiter=delimiter)
- with self.assertRaises(tf.errors.InvalidArgumentError):
+ with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(tokens, feed_dict={delimiter: ["a", "b"]})
- with self.assertRaises(tf.errors.InvalidArgumentError):
+ with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(tokens, feed_dict={delimiter: ["a"]})
indices, values, shape = sess.run(tokens, feed_dict={delimiter: ".,"})
self.assertAllEqual(indices, [[0, 0], [0, 1], [0, 2], [1, 0]])
- self.assertAllEqual(values, [b"hello", b"cruel", b"world",
- b"hello cruel world"])
+ self.assertAllEqual(values,
+ [b"hello", b"cruel", b"world", b"hello cruel world"])
self.assertAllEqual(shape, [2, 3])
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/string_to_hash_bucket_op_test.py b/tensorflow/python/kernel_tests/string_to_hash_bucket_op_test.py
index da167e8025..2c6064e64b 100644
--- a/tensorflow/python/kernel_tests/string_to_hash_bucket_op_test.py
+++ b/tensorflow/python/kernel_tests/string_to_hash_bucket_op_test.py
@@ -12,29 +12,32 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for StringToHashBucket op from string_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import string_ops
+from tensorflow.python.platform import test
-class StringToHashBucketOpTest(tf.test.TestCase):
+class StringToHashBucketOpTest(test.TestCase):
def testStringToOneHashBucketFast(self):
with self.test_session():
- input_string = tf.placeholder(tf.string)
- output = tf.string_to_hash_bucket_fast(input_string, 1)
+ input_string = array_ops.placeholder(dtypes.string)
+ output = string_ops.string_to_hash_bucket_fast(input_string, 1)
result = output.eval(feed_dict={input_string: ['a', 'b', 'c']})
self.assertAllEqual([0, 0, 0], result)
def testStringToHashBucketsFast(self):
with self.test_session():
- input_string = tf.placeholder(tf.string)
- output = tf.string_to_hash_bucket_fast(input_string, 10)
+ input_string = array_ops.placeholder(dtypes.string)
+ output = string_ops.string_to_hash_bucket_fast(input_string, 10)
result = output.eval(feed_dict={input_string: ['a', 'b', 'c', 'd']})
# Fingerprint64('a') -> 12917804110809363939 -> mod 10 -> 9
@@ -45,21 +48,17 @@ class StringToHashBucketOpTest(tf.test.TestCase):
def testStringToOneHashBucketLegacyHash(self):
with self.test_session():
- input_string = tf.placeholder(tf.string)
- output = tf.string_to_hash_bucket(input_string, 1)
- result = output.eval(feed_dict={
- input_string: ['a', 'b', 'c']
- })
+ input_string = array_ops.placeholder(dtypes.string)
+ output = string_ops.string_to_hash_bucket(input_string, 1)
+ result = output.eval(feed_dict={input_string: ['a', 'b', 'c']})
self.assertAllEqual([0, 0, 0], result)
def testStringToHashBucketsLegacyHash(self):
with self.test_session():
- input_string = tf.placeholder(tf.string)
- output = tf.string_to_hash_bucket(input_string, 10)
- result = output.eval(feed_dict={
- input_string: ['a', 'b', 'c']
- })
+ input_string = array_ops.placeholder(dtypes.string)
+ output = string_ops.string_to_hash_bucket(input_string, 10)
+ result = output.eval(feed_dict={input_string: ['a', 'b', 'c']})
# Hash64('a') -> 2996632905371535868 -> mod 10 -> 8
# Hash64('b') -> 5795986006276551370 -> mod 10 -> 0
@@ -68,16 +67,16 @@ class StringToHashBucketOpTest(tf.test.TestCase):
def testStringToOneHashBucketStrongOneHashBucket(self):
with self.test_session():
- input_string = tf.constant(['a', 'b', 'c'])
- output = tf.string_to_hash_bucket_strong(input_string, 1, key=[123, 345])
+ input_string = constant_op.constant(['a', 'b', 'c'])
+ output = string_ops.string_to_hash_bucket_strong(
+ input_string, 1, key=[123, 345])
self.assertAllEqual([0, 0, 0], output.eval())
def testStringToHashBucketsStrong(self):
with self.test_session():
- input_string = tf.constant(['a', 'b', 'c'])
- output = tf.string_to_hash_bucket_strong(input_string,
- 10,
- key=[98765, 132])
+ input_string = constant_op.constant(['a', 'b', 'c'])
+ output = string_ops.string_to_hash_bucket_strong(
+ input_string, 10, key=[98765, 132])
# key = [98765, 132]
# StrongKeyedHash(key, 'a') -> 7157389809176466784 -> mod 10 -> 4
# StrongKeyedHash(key, 'b') -> 15805638358933211562 -> mod 10 -> 2
@@ -86,10 +85,11 @@ class StringToHashBucketOpTest(tf.test.TestCase):
def testStringToHashBucketsStrongInvalidKey(self):
with self.test_session():
- input_string = tf.constant(['a', 'b', 'c'])
+ input_string = constant_op.constant(['a', 'b', 'c'])
with self.assertRaisesOpError('Key must have 2 elements'):
- tf.string_to_hash_bucket_strong(input_string, 10, key=[98765]).eval()
+ string_ops.string_to_hash_bucket_strong(
+ input_string, 10, key=[98765]).eval()
if __name__ == '__main__':
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/string_to_number_op_test.py b/tensorflow/python/kernel_tests/string_to_number_op_test.py
index 849088ef82..8a7a7285a6 100644
--- a/tensorflow/python/kernel_tests/string_to_number_op_test.py
+++ b/tensorflow/python/kernel_tests/string_to_number_op_test.py
@@ -12,60 +12,62 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for StringToNumber op from parsing_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import tensorflow as tf
-
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import parsing_ops
+from tensorflow.python.platform import test
_ERROR_MESSAGE = "StringToNumberOp could not correctly convert string: "
-class StringToNumberOpTest(tf.test.TestCase):
+class StringToNumberOpTest(test.TestCase):
def testToFloat(self):
with self.test_session():
- input_string = tf.placeholder(tf.string)
- output = tf.string_to_number(
- input_string,
- out_type=tf.float32)
+ input_string = array_ops.placeholder(dtypes.string)
+ output = parsing_ops.string_to_number(
+ input_string, out_type=dtypes.float32)
result = output.eval(feed_dict={
- input_string: ["0",
- "3",
- "-1",
- "1.12",
- "0xF",
- " -10.5",
- "3.40282e+38",
- # The next two exceed maximum value for float, so we
- # expect +/-INF to be returned instead.
- "3.40283e+38",
- "-3.40283e+38",
- "NAN",
- "INF"]
+ input_string: [
+ "0",
+ "3",
+ "-1",
+ "1.12",
+ "0xF",
+ " -10.5",
+ "3.40282e+38",
+ # The next two exceed maximum value for float, so we
+ # expect +/-INF to be returned instead.
+ "3.40283e+38",
+ "-3.40283e+38",
+ "NAN",
+ "INF"
+ ]
})
- self.assertAllClose([0, 3, -1, 1.12, 0xF, -10.5, 3.40282e+38,
- float("INF"), float("-INF"), float("NAN"),
- float("INF")], result)
+ self.assertAllClose([
+ 0, 3, -1, 1.12, 0xF, -10.5, 3.40282e+38, float("INF"), float("-INF"),
+ float("NAN"), float("INF")
+ ], result)
with self.assertRaisesOpError(_ERROR_MESSAGE + "10foobar"):
output.eval(feed_dict={input_string: ["10foobar"]})
def testToInt32(self):
with self.test_session():
- input_string = tf.placeholder(tf.string)
- output = tf.string_to_number(
- input_string,
- out_type=tf.int32)
+ input_string = array_ops.placeholder(dtypes.string)
+ output = parsing_ops.string_to_number(input_string, out_type=dtypes.int32)
result = output.eval(feed_dict={
- input_string: ["0", "3", "-1", " -10", "-2147483648", "2147483647"]
+ input_string:
+ ["0", "3", "-1", " -10", "-2147483648", "2147483647"]
})
self.assertAllEqual([0, 3, -1, -10, -2147483648, 2147483647], result)
@@ -80,4 +82,4 @@ class StringToNumberOpTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/substr_op_test.py b/tensorflow/python/kernel_tests/substr_op_test.py
index 7c78ab14ba..0c0710fed4 100644
--- a/tensorflow/python/kernel_tests/substr_op_test.py
+++ b/tensorflow/python/kernel_tests/substr_op_test.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for Substr op from string_ops."""
from __future__ import absolute_import
@@ -20,10 +19,13 @@ from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import errors_impl
+from tensorflow.python.ops import string_ops
+from tensorflow.python.platform import test
-class SubstrOpTest(tf.test.TestCase):
+class SubstrOpTest(test.TestCase):
def _testScalarString(self, dtype):
test_string = b"Hello"
@@ -31,7 +33,7 @@ class SubstrOpTest(tf.test.TestCase):
length = np.array(3, dtype)
expected_value = b"ell"
- substr_op = tf.substr(test_string, position, length)
+ substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
substr = substr_op.eval()
self.assertAllEqual(substr, expected_value)
@@ -42,7 +44,7 @@ class SubstrOpTest(tf.test.TestCase):
length = np.array(3, dtype)
expected_value = [b"ell", b"orl"]
- substr_op = tf.substr(test_string, position, length)
+ substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
substr = substr_op.eval()
self.assertAllEqual(substr, expected_value)
@@ -53,11 +55,10 @@ class SubstrOpTest(tf.test.TestCase):
[b"sixteen", b"seventeen", b"eighteen"]]
position = np.array(1, dtype)
length = np.array(4, dtype)
- expected_value = [[b"en", b"leve", b"welv"],
- [b"hirt", b"ourt", b"ifte"],
+ expected_value = [[b"en", b"leve", b"welv"], [b"hirt", b"ourt", b"ifte"],
[b"ixte", b"even", b"ight"]]
- substr_op = tf.substr(test_string, position, length)
+ substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
substr = substr_op.eval()
self.assertAllEqual(substr, expected_value)
@@ -66,17 +67,12 @@ class SubstrOpTest(tf.test.TestCase):
test_string = [[b"ten", b"eleven", b"twelve"],
[b"thirteen", b"fourteen", b"fifteen"],
[b"sixteen", b"seventeen", b"eighteen"]]
- position = np.array([[1, 2, 3],
- [1, 2, 3],
- [1, 2, 3]], dtype)
- length = np.array([[2, 3, 4],
- [4, 3, 2],
- [5, 5, 5]], dtype)
- expected_value = [[b"en", b"eve", b"lve"],
- [b"hirt", b"urt", b"te"],
+ position = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]], dtype)
+ length = np.array([[2, 3, 4], [4, 3, 2], [5, 5, 5]], dtype)
+ expected_value = [[b"en", b"eve", b"lve"], [b"hirt", b"urt", b"te"],
[b"ixtee", b"vente", b"hteen"]]
- substr_op = tf.substr(test_string, position, length)
+ substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
substr = substr_op.eval()
self.assertAllEqual(substr, expected_value)
@@ -89,27 +85,20 @@ class SubstrOpTest(tf.test.TestCase):
[b"nineteen", b"twenty", b"twentyone"]]
position = np.array([1, 2, 3], dtype)
length = np.array([1, 2, 3], dtype)
- expected_value = [[b"e", b"ev", b"lve"],
- [b"h", b"ur", b"tee"],
- [b"i", b"ve", b"hte"],
- [b"i", b"en", b"nty"]]
- substr_op = tf.substr(test_string, position, length)
+ expected_value = [[b"e", b"ev", b"lve"], [b"h", b"ur", b"tee"],
+ [b"i", b"ve", b"hte"], [b"i", b"en", b"nty"]]
+ substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
substr = substr_op.eval()
self.assertAllEqual(substr, expected_value)
# Broadcast input string onto pos/len
test_string = [b"thirteen", b"fourteen", b"fifteen"]
- position = np.array([[1, 2, 3],
- [3, 2, 1],
- [5, 5, 5]], dtype)
- length = np.array([[3, 2, 1],
- [1, 2, 3],
- [2, 2, 2]], dtype)
- expected_value = [[b"hir", b"ur", b"t"],
- [b"r", b"ur", b"ift"],
+ position = np.array([[1, 2, 3], [3, 2, 1], [5, 5, 5]], dtype)
+ length = np.array([[3, 2, 1], [1, 2, 3], [2, 2, 2]], dtype)
+ expected_value = [[b"hir", b"ur", b"t"], [b"r", b"ur", b"ift"],
[b"ee", b"ee", b"en"]]
- substr_op = tf.substr(test_string, position, length)
+ substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
substr = substr_op.eval()
self.assertAllEqual(substr, expected_value)
@@ -119,7 +108,7 @@ class SubstrOpTest(tf.test.TestCase):
position = np.array([1, 5, 7], dtype)
length = np.array([3, 2, 1], dtype)
expected_value = [b"hir", b"ee", b"n"]
- substr_op = tf.substr(test_string, position, length)
+ substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
substr = substr_op.eval()
self.assertAllEqual(substr, expected_value)
@@ -130,63 +119,56 @@ class SubstrOpTest(tf.test.TestCase):
[b"sixteen", b"seventeen", b"eighteen"]]
position = np.array([1, 2, 3, 4], dtype)
length = np.array([1, 2, 3, 4], dtype)
- expected_value = [[b"e", b"ev", b"lve"],
- [b"h", b"ur", b"tee"],
+ expected_value = [[b"e", b"ev", b"lve"], [b"h", b"ur", b"tee"],
[b"i", b"ve", b"hte"]]
with self.assertRaises(ValueError):
- substr_op = tf.substr(test_string, position, length)
+ substr_op = string_ops.substr(test_string, position, length)
def _testOutOfRangeError(self, dtype):
# Scalar/Scalar
test_string = b"Hello"
position = np.array(7, dtype)
length = np.array(3, dtype)
- substr_op = tf.substr(test_string, position, length)
+ substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
- with self.assertRaises(tf.errors.InvalidArgumentError):
+ with self.assertRaises(errors_impl.InvalidArgumentError):
substr = substr_op.eval()
# Vector/Scalar
test_string = [b"good", b"good", b"bad", b"good"]
position = np.array(3, dtype)
length = np.array(1, dtype)
- substr_op = tf.substr(test_string, position, length)
+ substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
- with self.assertRaises(tf.errors.InvalidArgumentError):
+ with self.assertRaises(errors_impl.InvalidArgumentError):
substr = substr_op.eval()
# Negative pos
test_string = b"Hello"
position = np.array(-1, dtype)
length = np.array(3, dtype)
- substr_op = tf.substr(test_string, position, length)
+ substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
- with self.assertRaises(tf.errors.InvalidArgumentError):
+ with self.assertRaises(errors_impl.InvalidArgumentError):
substr = substr_op.eval()
# Matrix/Matrix
- test_string = [[b"good", b"good", b"good"],
- [b"good", b"good", b"bad"],
+ test_string = [[b"good", b"good", b"good"], [b"good", b"good", b"bad"],
[b"good", b"good", b"good"]]
- position = np.array([[1, 2, 3],
- [1, 2, 3],
- [1, 2, 3]], dtype)
- length = np.array([[3, 2, 1],
- [1, 2, 3],
- [2, 2, 2]], dtype)
- substr_op = tf.substr(test_string, position, length)
+ position = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]], dtype)
+ length = np.array([[3, 2, 1], [1, 2, 3], [2, 2, 2]], dtype)
+ substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
- with self.assertRaises(tf.errors.InvalidArgumentError):
+ with self.assertRaises(errors_impl.InvalidArgumentError):
substr = substr_op.eval()
# Broadcast
- test_string = [[b"good", b"good", b"good"],
- [b"good", b"good", b"bad"]]
+ test_string = [[b"good", b"good", b"good"], [b"good", b"good", b"bad"]]
position = np.array([1, 2, 3], dtype)
length = np.array([1, 2, 3], dtype)
- substr_op = tf.substr(test_string, position, length)
+ substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
- with self.assertRaises(tf.errors.InvalidArgumentError):
+ with self.assertRaises(errors_impl.InvalidArgumentError):
substr = substr_op.eval()
def _testMismatchPosLenShapes(self, dtype):
@@ -197,15 +179,13 @@ class SubstrOpTest(tf.test.TestCase):
length = np.array([2, 3, 4], dtype)
# Should fail: position/length have different rank
with self.assertRaises(ValueError):
- substr_op = tf.substr(test_string, position, length)
+ substr_op = string_ops.substr(test_string, position, length)
- position = np.array([[1, 2, 3],
- [1, 2, 3],
- [1, 2, 3]], dtype)
+ position = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]], dtype)
length = np.array([[2, 3, 4]], dtype)
# Should fail: postion/length have different dimensionality
with self.assertRaises(ValueError):
- substr_op = tf.substr(test_string, position, length)
+ substr_op = string_ops.substr(test_string, position, length)
def _testAll(self, dtype):
self._testScalarString(dtype)
@@ -219,17 +199,17 @@ class SubstrOpTest(tf.test.TestCase):
def testInt32(self):
self._testAll(np.int32)
-
+
def testInt64(self):
self._testAll(np.int64)
def testWrongDtype(self):
with self.test_session():
with self.assertRaises(TypeError):
- tf.substr(b"test", 3.0, 1)
+ string_ops.substr(b"test", 3.0, 1)
with self.assertRaises(TypeError):
- tf.substr(b"test", 3, 1.0)
+ string_ops.substr(b"test", 3, 1.0)
+
-
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/summary_audio_op_test.py b/tensorflow/python/kernel_tests/summary_audio_op_test.py
index 4ca8bf5dfb..eaae671192 100644
--- a/tensorflow/python/kernel_tests/summary_audio_op_test.py
+++ b/tensorflow/python/kernel_tests/summary_audio_op_test.py
@@ -13,19 +13,24 @@
# limitations under the License.
# ==============================================================================
"""Tests for summary sound op."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
-import tensorflow as tf
+
+from tensorflow.core.framework import summary_pb2
+from tensorflow.python.framework import ops
+from tensorflow.python.platform import test
+from tensorflow.python.summary import summary
-class SummaryAudioOpTest(tf.test.TestCase):
+class SummaryAudioOpTest(test.TestCase):
def _AsSummary(self, s):
- summ = tf.Summary()
+ summ = summary_pb2.Summary()
summ.ParseFromString(s)
return summ
@@ -45,7 +50,7 @@ class SummaryAudioOpTest(tf.test.TestCase):
def testAudioSummary(self):
np.random.seed(7)
for channels in (1, 2, 5, 8):
- with self.test_session(graph=tf.Graph()) as sess:
+ with self.test_session(graph=ops.Graph()) as sess:
num_frames = 7
shape = (4, num_frames, channels)
# Generate random audio in the range [-1.0, 1.0).
@@ -53,7 +58,7 @@ class SummaryAudioOpTest(tf.test.TestCase):
# Summarize
sample_rate = 8000
- summ = tf.summary.audio(
+ summ = summary.audio(
"snd", const, max_outputs=3, sample_rate=sample_rate)
value = sess.run(summ)
self.assertEqual([], summ.get_shape())
@@ -64,4 +69,4 @@ class SummaryAudioOpTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/summary_image_op_test.py b/tensorflow/python/kernel_tests/summary_image_op_test.py
index a0370cd978..aa45416c3d 100644
--- a/tensorflow/python/kernel_tests/summary_image_op_test.py
+++ b/tensorflow/python/kernel_tests/summary_image_op_test.py
@@ -12,23 +12,28 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for summary image op."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
-import tensorflow as tf
+from tensorflow.core.framework import summary_pb2
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
from tensorflow.python.ops import image_ops
+import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
+from tensorflow.python.summary import summary
-class SummaryImageOpTest(tf.test.TestCase):
+class SummaryImageOpTest(test.TestCase):
def _AsSummary(self, s):
- summ = tf.Summary()
+ summ = summary_pb2.Summary()
summ.ParseFromString(s)
return summ
@@ -37,7 +42,7 @@ class SummaryImageOpTest(tf.test.TestCase):
# Only the first 3 images are returned.
for v in image_summ.value:
v.image.ClearField("encoded_image_string")
- expected = '\n'.join("""
+ expected = "\n".join("""
value {
tag: "img/image/%d"
image { height: %d width: %d colorspace: %d }
@@ -48,7 +53,7 @@ class SummaryImageOpTest(tf.test.TestCase):
np.random.seed(7)
for depth in (1, 3, 4):
for positive in False, True:
- with self.test_session(graph=tf.Graph()) as sess:
+ with self.test_session(graph=ops.Graph()) as sess:
shape = (4, 5, 7) + (depth,)
bad_color = [255, 0, 0, 255][:depth]
# Build a mostly random image with one nan
@@ -65,14 +70,14 @@ class SummaryImageOpTest(tf.test.TestCase):
const[0, 1, 2, depth // 2] = np.nan
# Summarize
- summ = tf.summary.image("img", const)
+ summ = summary.image("img", const)
value = sess.run(summ)
self.assertEqual([], summ.get_shape())
image_summ = self._AsSummary(value)
# Decode the first image and check consistency
- image = image_ops.decode_png(
- image_summ.value[0].image.encoded_image_string).eval()
+ image = image_ops.decode_png(image_summ.value[0]
+ .image.encoded_image_string).eval()
self.assertAllEqual(image[1, 2], bad_color)
image[1, 2] = adjusted[0, 1, 2]
self.assertAllClose(image, adjusted[0])
@@ -83,24 +88,24 @@ class SummaryImageOpTest(tf.test.TestCase):
def testImageSummaryUint8(self):
np.random.seed(7)
for depth in (1, 3, 4):
- with self.test_session(graph=tf.Graph()) as sess:
+ with self.test_session(graph=ops.Graph()) as sess:
shape = (4, 5, 7) + (depth,)
# Build a random uint8 image
images = np.random.randint(256, size=shape).astype(np.uint8)
- tf_images = tf.convert_to_tensor(images)
- self.assertEqual(tf_images.dtype, tf.uint8)
+ tf_images = ops.convert_to_tensor(images)
+ self.assertEqual(tf_images.dtype, dtypes.uint8)
# Summarize
- summ = tf.summary.image("img", tf_images)
+ summ = summary.image("img", tf_images)
value = sess.run(summ)
self.assertEqual([], summ.get_shape())
image_summ = self._AsSummary(value)
# Decode the first image and check consistency.
# Since we're uint8, everything should be exact.
- image = image_ops.decode_png(
- image_summ.value[0].image.encoded_image_string).eval()
+ image = image_ops.decode_png(image_summ.value[0]
+ .image.encoded_image_string).eval()
self.assertAllEqual(image, images[0])
# Check the rest of the proto
@@ -108,4 +113,4 @@ class SummaryImageOpTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/summary_ops_test.py b/tensorflow/python/kernel_tests/summary_ops_test.py
index 1870dd17a9..2da7107f61 100644
--- a/tensorflow/python/kernel_tests/summary_ops_test.py
+++ b/tensorflow/python/kernel_tests/summary_ops_test.py
@@ -12,26 +12,31 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for summary ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import tensorflow as tf
+from tensorflow.core.framework import summary_pb2
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import logging_ops
+from tensorflow.python.platform import test
+from tensorflow.python.summary import summary
+
-class SummaryOpsTest(tf.test.TestCase):
+class SummaryOpsTest(test.TestCase):
def _AsSummary(self, s):
- summ = tf.Summary()
+ summ = summary_pb2.Summary()
summ.ParseFromString(s)
return summ
def testScalarSummary(self):
with self.test_session() as sess:
- const = tf.constant([10.0, 20.0])
- summ = tf.contrib.deprecated.scalar_summary(
- ["c1", "c2"], const, name="mysumm")
+ const = constant_op.constant([10.0, 20.0])
+ summ = logging_ops.scalar_summary(["c1", "c2"], const, name="mysumm")
value = sess.run(summ)
self.assertEqual([], summ.get_shape())
self.assertProtoEquals("""
@@ -41,8 +46,8 @@ class SummaryOpsTest(tf.test.TestCase):
def testScalarSummaryDefaultName(self):
with self.test_session() as sess:
- const = tf.constant([10.0, 20.0])
- summ = tf.contrib.deprecated.scalar_summary(["c1", "c2"], const)
+ const = constant_op.constant([10.0, 20.0])
+ summ = logging_ops.scalar_summary(["c1", "c2"], const)
value = sess.run(summ)
self.assertEqual([], summ.get_shape())
self.assertProtoEquals("""
@@ -52,10 +57,10 @@ class SummaryOpsTest(tf.test.TestCase):
def testMergeSummary(self):
with self.test_session() as sess:
- const = tf.constant(10.0)
- summ1 = tf.summary.histogram("h", const)
- summ2 = tf.contrib.deprecated.scalar_summary("c", const)
- merge = tf.summary.merge([summ1, summ2])
+ const = constant_op.constant(10.0)
+ summ1 = summary.histogram("h", const)
+ summ2 = logging_ops.scalar_summary("c", const)
+ merge = summary.merge([summ1, summ2])
value = sess.run(merge)
self.assertEqual([], merge.get_shape())
self.assertProtoEquals("""
@@ -79,30 +84,29 @@ class SummaryOpsTest(tf.test.TestCase):
""", self._AsSummary(value))
def testMergeAllSummaries(self):
- with tf.Graph().as_default():
- const = tf.constant(10.0)
- summ1 = tf.summary.histogram("h", const)
- summ2 = tf.summary.scalar("o", const, collections=["foo_key"])
- summ3 = tf.summary.scalar("c", const)
- merge = tf.summary.merge_all()
+ with ops.Graph().as_default():
+ const = constant_op.constant(10.0)
+ summ1 = summary.histogram("h", const)
+ summ2 = summary.scalar("o", const, collections=["foo_key"])
+ summ3 = summary.scalar("c", const)
+ merge = summary.merge_all()
self.assertEqual("MergeSummary", merge.op.type)
self.assertEqual(2, len(merge.op.inputs))
self.assertEqual(summ1, merge.op.inputs[0])
self.assertEqual(summ3, merge.op.inputs[1])
- merge = tf.summary.merge_all("foo_key")
+ merge = summary.merge_all("foo_key")
self.assertEqual("MergeSummary", merge.op.type)
self.assertEqual(1, len(merge.op.inputs))
self.assertEqual(summ2, merge.op.inputs[0])
- self.assertTrue(
- tf.summary.merge_all("bar_key") is None)
+ self.assertTrue(summary.merge_all("bar_key") is None)
def testHistogramSummaryTypes(self):
- with tf.Graph().as_default():
- for dtype in (tf.int8, tf.uint8, tf.int16, tf.int32,
- tf.float32, tf.float64):
- const = tf.constant(10, dtype=dtype)
- tf.summary.histogram("h", const)
+ with ops.Graph().as_default():
+ for dtype in (dtypes.int8, dtypes.uint8, dtypes.int16, dtypes.int32,
+ dtypes.float32, dtypes.float64):
+ const = constant_op.constant(10, dtype=dtype)
+ summary.histogram("h", const)
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/summary_tensor_op_test.py b/tensorflow/python/kernel_tests/summary_tensor_op_test.py
index aab49b0f06..ee3f5aa250 100644
--- a/tensorflow/python/kernel_tests/summary_tensor_op_test.py
+++ b/tensorflow/python/kernel_tests/summary_tensor_op_test.py
@@ -13,21 +13,27 @@
# limitations under the License.
# ==============================================================================
"""Tests for summary ops."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
-import tensorflow as tf
+from tensorflow.core.framework import summary_pb2
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import summary_ops
+from tensorflow.python.platform import test
-class SummaryOpsTest(tf.test.TestCase):
+class SummaryOpsTest(test.TestCase):
def _SummarySingleValue(self, s):
- summ = tf.Summary()
+ summ = summary_pb2.Summary()
summ.ParseFromString(s)
self.assertEqual(len(summ.value), 1)
return summ.value[0]
@@ -37,13 +43,13 @@ class SummaryOpsTest(tf.test.TestCase):
def testNodeNames(self):
with self.test_session() as sess:
- c = tf.constant(1)
- s1 = tf.summary.tensor_summary("s1", c)
- with tf.name_scope("foo"):
- s2 = tf.summary.tensor_summary("s2", c)
- with tf.name_scope("zod"):
- s3 = tf.summary.tensor_summary("s3", c)
- s4 = tf.summary.tensor_summary("TensorSummary", c)
+ c = constant_op.constant(1)
+ s1 = summary_ops.tensor_summary("s1", c)
+ with ops.name_scope("foo"):
+ s2 = summary_ops.tensor_summary("s2", c)
+ with ops.name_scope("zod"):
+ s3 = summary_ops.tensor_summary("s3", c)
+ s4 = summary_ops.tensor_summary("TensorSummary", c)
summ1, summ2, summ3, summ4 = sess.run([s1, s2, s3, s4])
v1 = self._SummarySingleValue(summ1)
@@ -60,8 +66,8 @@ class SummaryOpsTest(tf.test.TestCase):
def testScalarSummary(self):
with self.test_session() as sess:
- const = tf.constant(10.0)
- summ = tf.summary.tensor_summary("foo", const)
+ const = constant_op.constant(10.0)
+ summ = summary_ops.tensor_summary("foo", const)
result = sess.run(summ)
value = self._SummarySingleValue(result)
@@ -71,8 +77,8 @@ class SummaryOpsTest(tf.test.TestCase):
def testStringSummary(self):
s = six.b("foobar")
with self.test_session() as sess:
- const = tf.constant(s)
- summ = tf.summary.tensor_summary("foo", const)
+ const = constant_op.constant(s)
+ summ = summary_ops.tensor_summary("foo", const)
result = sess.run(summ)
value = self._SummarySingleValue(result)
@@ -81,8 +87,8 @@ class SummaryOpsTest(tf.test.TestCase):
def testManyScalarSummary(self):
with self.test_session() as sess:
- const = tf.ones([5, 5, 5])
- summ = tf.summary.tensor_summary("foo", const)
+ const = array_ops.ones([5, 5, 5])
+ summ = summary_ops.tensor_summary("foo", const)
result = sess.run(summ)
value = self._SummarySingleValue(result)
n = tensor_util.MakeNdarray(value.tensor)
@@ -91,8 +97,8 @@ class SummaryOpsTest(tf.test.TestCase):
def testManyStringSummary(self):
strings = [[six.b("foo bar"), six.b("baz")], [six.b("zoink"), six.b("zod")]]
with self.test_session() as sess:
- const = tf.constant(strings)
- summ = tf.summary.tensor_summary("foo", const)
+ const = constant_op.constant(strings)
+ summ = summary_ops.tensor_summary("foo", const)
result = sess.run(summ)
value = self._SummarySingleValue(result)
n = tensor_util.MakeNdarray(value.tensor)
@@ -101,8 +107,8 @@ class SummaryOpsTest(tf.test.TestCase):
def testManyBools(self):
bools = [True, True, True, False, False, False]
with self.test_session() as sess:
- const = tf.constant(bools)
- summ = tf.summary.tensor_summary("foo", const)
+ const = constant_op.constant(bools)
+ summ = summary_ops.tensor_summary("foo", const)
result = sess.run(summ)
value = self._SummarySingleValue(result)
@@ -111,4 +117,4 @@ class SummaryOpsTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/svd_op_test.py b/tensorflow/python/kernel_tests/svd_op_test.py
index 2934e90ea2..3f6b6958fc 100644
--- a/tensorflow/python/kernel_tests/svd_op_test.py
+++ b/tensorflow/python/kernel_tests/svd_op_test.py
@@ -13,26 +13,32 @@
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_inverse."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import linalg_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.platform import test
-class SvdOpTest(tf.test.TestCase):
+class SvdOpTest(test.TestCase):
def testWrongDimensions(self):
# The input to svd should be a tensor of at least rank 2.
- scalar = tf.constant(1.)
+ scalar = constant_op.constant(1.)
with self.assertRaisesRegexp(ValueError,
"Shape must be at least rank 2 but is rank 0"):
- tf.svd(scalar)
- vector = tf.constant([1., 2.])
+ linalg_ops.svd(scalar)
+ vector = constant_op.constant([1., 2.])
with self.assertRaisesRegexp(ValueError,
"Shape must be at least rank 2 but is rank 1"):
- tf.svd(vector)
+ linalg_ops.svd(vector)
def _GetSvdOpTest(dtype_, shape_, use_static_shape_):
@@ -77,22 +83,22 @@ def _GetSvdOpTest(dtype_, shape_, use_static_shape_):
batch_shape = a.shape[:-2]
m = a.shape[-2]
n = a.shape[-1]
- diag_s = tf.cast(tf.matrix_diag(s), dtype=dtype_)
+ diag_s = math_ops.cast(array_ops.matrix_diag(s), dtype=dtype_)
if full_matrices:
if m > n:
- zeros = tf.zeros(batch_shape + (m - n, n), dtype=dtype_)
- diag_s = tf.concat_v2([diag_s, zeros], a.ndim - 2)
+ zeros = array_ops.zeros(batch_shape + (m - n, n), dtype=dtype_)
+ diag_s = array_ops.concat_v2([diag_s, zeros], a.ndim - 2)
elif n > m:
- zeros = tf.zeros(batch_shape + (m, n - m), dtype=dtype_)
- diag_s = tf.concat_v2([diag_s, zeros], a.ndim - 1)
- a_recon = tf.matmul(u, diag_s)
- a_recon = tf.matmul(a_recon, v, adjoint_b=True)
+ zeros = array_ops.zeros(batch_shape + (m, n - m), dtype=dtype_)
+ diag_s = array_ops.concat_v2([diag_s, zeros], a.ndim - 1)
+ a_recon = math_ops.matmul(u, diag_s)
+ a_recon = math_ops.matmul(a_recon, v, adjoint_b=True)
self.assertAllClose(a_recon.eval(), a, rtol=tol, atol=tol)
def CheckUnitary(self, x):
# Tests that x[...,:,:]^H * x[...,:,:] is close to the identity.
- xx = tf.matmul(x, x, adjoint_a=True)
- identity = tf.matrix_band_part(tf.ones_like(xx), 0, 0)
+ xx = math_ops.matmul(x, x, adjoint_a=True)
+ identity = array_ops.matrix_band_part(array_ops.ones_like(xx), 0, 0)
if is_single:
tol = 1e-5
else:
@@ -112,23 +118,23 @@ def _GetSvdOpTest(dtype_, shape_, use_static_shape_):
for full_matrices in False, True:
with self.test_session() as sess:
if use_static_shape_:
- x_tf = tf.constant(x_np)
+ x_tf = constant_op.constant(x_np)
else:
- x_tf = tf.placeholder(dtype_)
+ x_tf = array_ops.placeholder(dtype_)
if compute_uv:
- s_tf, u_tf, v_tf = tf.svd(x_tf,
- compute_uv=compute_uv,
- full_matrices=full_matrices)
+ s_tf, u_tf, v_tf = linalg_ops.svd(x_tf,
+ compute_uv=compute_uv,
+ full_matrices=full_matrices)
if use_static_shape_:
s_tf_val, u_tf_val, v_tf_val = sess.run([s_tf, u_tf, v_tf])
else:
s_tf_val, u_tf_val, v_tf_val = sess.run([s_tf, u_tf, v_tf],
feed_dict={x_tf: x_np})
else:
- s_tf = tf.svd(x_tf,
- compute_uv=compute_uv,
- full_matrices=full_matrices)
+ s_tf = linalg_ops.svd(x_tf,
+ compute_uv=compute_uv,
+ full_matrices=full_matrices)
if use_static_shape_:
s_tf_val = sess.run(s_tf)
else:
@@ -171,4 +177,4 @@ if __name__ == "__main__":
use_static_shape)
setattr(SvdOpTest, "testSvd_" + name,
_GetSvdOpTest(dtype, shape, use_static_shape))
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/template_test.py b/tensorflow/python/kernel_tests/template_test.py
index 12d71dd1ca..56b5305391 100644
--- a/tensorflow/python/kernel_tests/template_test.py
+++ b/tensorflow/python/kernel_tests/template_test.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for make_template."""
from __future__ import absolute_import
from __future__ import division
@@ -20,28 +19,37 @@ from __future__ import print_function
import traceback
-import tensorflow as tf
-
+from tensorflow.python.client import session
+from tensorflow.python.framework import random_seed
+from tensorflow.python.ops import init_ops
+from tensorflow.python.ops import math_ops
from tensorflow.python.ops import template
+from tensorflow.python.ops import variable_scope
+from tensorflow.python.ops import variables
+import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
+from tensorflow.python.training import gradient_descent
def var_scoped_function():
- return tf.get_variable("dummy", shape=[1], initializer=tf.zeros_initializer())
+ return variable_scope.get_variable(
+ "dummy", shape=[1], initializer=init_ops.zeros_initializer())
def internally_var_scoped_function(scope_name):
- with tf.variable_scope(scope_name):
- return tf.get_variable(
- "dummy", shape=[1], initializer=tf.zeros_initializer())
+ with variable_scope.variable_scope(scope_name):
+ return variable_scope.get_variable(
+ "dummy", shape=[1], initializer=init_ops.zeros_initializer())
def function_with_create(trainable):
"""Creates a variable as a side effect using tf.Variable."""
- tf.Variable(0, trainable=trainable)
- return tf.get_variable("dummy", shape=[1], initializer=tf.zeros_initializer())
+ variables.Variable(0, trainable=trainable)
+ return variable_scope.get_variable(
+ "dummy", shape=[1], initializer=init_ops.zeros_initializer())
-class TemplateTest(tf.test.TestCase):
+class TemplateTest(test.TestCase):
def test_end_to_end(self):
"""This test shows a very simple line model with test_loss.
@@ -52,13 +60,13 @@ class TemplateTest(tf.test.TestCase):
training_input, training_output = ([1., 2., 3., 4.], [2.8, 5.1, 7.2, 8.7])
test_input, test_output = ([5., 6., 7., 8.], [11, 13, 15, 17])
- tf.set_random_seed(1234)
+ random_seed.set_random_seed(1234)
def test_line(x):
- m = tf.get_variable("w", shape=[],
- initializer=tf.truncated_normal_initializer())
- b = tf.get_variable("b", shape=[],
- initializer=tf.truncated_normal_initializer())
+ m = variable_scope.get_variable(
+ "w", shape=[], initializer=init_ops.truncated_normal_initializer())
+ b = variable_scope.get_variable(
+ "b", shape=[], initializer=init_ops.truncated_normal_initializer())
return x * m + b
line_template = template.make_template("line", test_line)
@@ -66,14 +74,16 @@ class TemplateTest(tf.test.TestCase):
train_prediction = line_template(training_input)
test_prediction = line_template(test_input)
- train_loss = tf.reduce_mean(tf.square(train_prediction - training_output))
- test_loss = tf.reduce_mean(tf.square(test_prediction - test_output))
+ train_loss = math_ops.reduce_mean(
+ math_ops.square(train_prediction - training_output))
+ test_loss = math_ops.reduce_mean(
+ math_ops.square(test_prediction - test_output))
- optimizer = tf.train.GradientDescentOptimizer(0.1)
+ optimizer = gradient_descent.GradientDescentOptimizer(0.1)
train_op = optimizer.minimize(train_loss)
- with tf.Session() as sess:
- sess.run(tf.global_variables_initializer())
+ with session.Session() as sess:
+ sess.run(variables.global_variables_initializer())
initial_test_loss = sess.run(test_loss)
sess.run(train_op)
final_test_loss = sess.run(test_loss)
@@ -112,7 +122,7 @@ class TemplateTest(tf.test.TestCase):
v1 = tmpl1()
v2 = tmpl1()
- tf.get_variable_scope().reuse_variables()
+ variable_scope.get_variable_scope().reuse_variables()
tmpl2 = template.make_template("_", var_scoped_function, unique_name_="s1")
v3 = tmpl2()
@@ -124,12 +134,12 @@ class TemplateTest(tf.test.TestCase):
tmpl1 = template.make_template("s1", var_scoped_function)
tmpl2 = template.make_template("s1", var_scoped_function)
- with tf.variable_scope("scope"):
+ with variable_scope.variable_scope("scope"):
v1 = tmpl1()
v3 = tmpl2()
# The template contract requires the following to ignore scope2.
- with tf.variable_scope("scope2"):
+ with variable_scope.variable_scope("scope2"):
v2 = tmpl1()
self.assertEqual(v1, v2)
self.assertNotEqual(v1, v3)
@@ -182,11 +192,13 @@ class TemplateTest(tf.test.TestCase):
self.assertEqual(tmpl(), tmpl())
def test_internal_variable_reuse(self):
+
def nested():
- with tf.variable_scope("nested") as vs:
- v1 = tf.get_variable("x", initializer=tf.zeros_initializer(), shape=[])
- with tf.variable_scope(vs, reuse=True):
- v2 = tf.get_variable("x")
+ with variable_scope.variable_scope("nested") as vs:
+ v1 = variable_scope.get_variable(
+ "x", initializer=init_ops.zeros_initializer(), shape=[])
+ with variable_scope.variable_scope(vs, reuse=True):
+ v2 = variable_scope.get_variable("x")
self.assertEqual(v1, v2)
return v1
@@ -202,6 +214,7 @@ class TemplateTest(tf.test.TestCase):
self.assertEqual("s1_1/nested/x:0", v3.name)
def test_nested_templates(self):
+
def nested_template():
nested1 = template.make_template("nested", var_scoped_function)
nested2 = template.make_template("nested", var_scoped_function)
@@ -225,12 +238,12 @@ class TemplateTest(tf.test.TestCase):
# Create templates in scope a then call in scope b. make_template should
# capture the scope the first time it is called, and make_immediate_template
# should capture the scope at construction time.
- with tf.variable_scope("ctor_scope"):
- tmpl_immed = template.make_template(
- "a", var_scoped_function, True) # create scope here
+ with variable_scope.variable_scope("ctor_scope"):
+ tmpl_immed = template.make_template("a", var_scoped_function,
+ True) # create scope here
tmpl_defer = template.make_template(
"b", var_scoped_function, False) # default: create scope at __call__
- with tf.variable_scope("call_scope"):
+ with variable_scope.variable_scope("call_scope"):
inner_imm_var = tmpl_immed()
inner_defer_var = tmpl_defer()
outer_imm_var = tmpl_immed()
@@ -247,7 +260,7 @@ class TemplateTest(tf.test.TestCase):
# Ensure that we can access the scope inside the template, because the name
# of that scope may be different from the name we pass to make_template, due
# to having been made unique by variable_scope.
- with tf.variable_scope("foo"):
+ with variable_scope.variable_scope("foo"):
# Create two templates with the same name, ensure scopes are made unique.
ta = template.make_template("bar", var_scoped_function, True)
tb = template.make_template("bar", var_scoped_function, True)
@@ -256,7 +269,7 @@ class TemplateTest(tf.test.TestCase):
self.assertEqual(ta.var_scope.name, "foo/bar")
self.assertEqual(tb.var_scope.name, "foo/bar_1")
- with tf.variable_scope("foo_2"):
+ with variable_scope.variable_scope("foo_2"):
# Create a template which defers scope creation.
tc = template.make_template("blah", var_scoped_function, False)
@@ -270,14 +283,15 @@ class TemplateTest(tf.test.TestCase):
def test_custom_getter(self):
# Custom getter that maintains call count and forwards to true getter
custom_getter_count = [0]
+
def custom_getter(getter, name, *args, **kwargs):
custom_getter_count[0] += 1
return getter(name, *args, **kwargs)
# Test that custom getter is called both when variables are created and
# subsequently accessed
- tmpl1 = template.make_template("s1", var_scoped_function,
- custom_getter_=custom_getter)
+ tmpl1 = template.make_template(
+ "s1", var_scoped_function, custom_getter_=custom_getter)
self.assertEqual(custom_getter_count[0], 0)
tmpl1()
self.assertEqual(custom_getter_count[0], 1)
@@ -287,14 +301,17 @@ class TemplateTest(tf.test.TestCase):
# Test that custom getter is called when the variable scope is created
# during construction
custom_getter_count[0] = 0
- tmpl2 = template.make_template("s2", var_scoped_function,
- custom_getter_=custom_getter,
- create_scope_now_=True)
+ tmpl2 = template.make_template(
+ "s2",
+ var_scoped_function,
+ custom_getter_=custom_getter,
+ create_scope_now_=True)
self.assertEqual(custom_getter_count[0], 0)
tmpl2()
self.assertEqual(custom_getter_count[0], 1)
tmpl2()
self.assertEqual(custom_getter_count[0], 2)
+
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/tensor_array_ops_test.py b/tensorflow/python/kernel_tests/tensor_array_ops_test.py
index b7a0b1e5b8..7779048396 100644
--- a/tensorflow/python/kernel_tests/tensor_array_ops_test.py
+++ b/tensorflow/python/kernel_tests/tensor_array_ops_test.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.ops.tensor_array_ops."""
from __future__ import absolute_import
@@ -20,19 +19,32 @@ from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
+from tensorflow.python.ops import gradients_impl
+from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_grad
+from tensorflow.python.ops import tensor_array_ops
+from tensorflow.python.ops import variables
+import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
-class TensorArrayTest(tf.test.TestCase):
+class TensorArrayTest(test.TestCase):
def testTensorArrayWriteRead(self):
with self.test_session(use_gpu=True) as session:
- ta = tf.TensorArray(
- dtype=tf.float32, tensor_array_name="foo", size=3, infer_shape=False)
+ ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32,
+ tensor_array_name="foo",
+ size=3,
+ infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0]])
@@ -50,10 +62,10 @@ class TensorArrayTest(tf.test.TestCase):
def _testTensorArrayWritePack(self, tf_dtype):
dtype = tf_dtype.as_numpy_dtype()
with self.test_session(use_gpu=True):
- ta = tf.TensorArray(
+ ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
- if tf_dtype == tf.string:
+ if tf_dtype == dtypes.string:
# In Python3, np.str is unicode, while we always want bytes
convert = lambda x: np.asarray(x).astype("|S")
else:
@@ -69,13 +81,13 @@ class TensorArrayTest(tf.test.TestCase):
convert([[[4.0, 5.0]], [[6.0, 7.0]], [[8.0, 9.0]]]), c0.eval())
def _testTensorArrayWritePackMaybeLegacy(self):
- self._testTensorArrayWritePack(tf.float32)
- self._testTensorArrayWritePack(tf.float64)
- self._testTensorArrayWritePack(tf.int32)
- self._testTensorArrayWritePack(tf.int64)
- self._testTensorArrayWritePack(tf.complex64)
- self._testTensorArrayWritePack(tf.complex128)
- self._testTensorArrayWritePack(tf.string)
+ self._testTensorArrayWritePack(dtypes.float32)
+ self._testTensorArrayWritePack(dtypes.float64)
+ self._testTensorArrayWritePack(dtypes.int32)
+ self._testTensorArrayWritePack(dtypes.int64)
+ self._testTensorArrayWritePack(dtypes.complex64)
+ self._testTensorArrayWritePack(dtypes.complex128)
+ self._testTensorArrayWritePack(dtypes.string)
def testTensorArrayWritePack(self):
self._testTensorArrayWritePackMaybeLegacy()
@@ -83,10 +95,10 @@ class TensorArrayTest(tf.test.TestCase):
def _testTensorArrayWriteConcat(self, tf_dtype):
dtype = tf_dtype.as_numpy_dtype()
with self.test_session(use_gpu=True):
- ta = tf.TensorArray(
+ ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3, infer_shape=False)
- if tf_dtype == tf.string:
+ if tf_dtype == dtypes.string:
# In Python3, np.str is unicode, while we always want bytes
convert = lambda x: np.asarray(x).astype("|S")
else:
@@ -99,30 +111,25 @@ class TensorArrayTest(tf.test.TestCase):
c0 = w2.concat()
self.assertAllEqual(
- convert([[4.0, 5.0],
- [104.0, 105.0],
- [204.0, 205.0],
- [6.0, 7.0],
- [106.0, 107.0],
- [8.0, 9.0]]), c0.eval())
+ convert([[4.0, 5.0], [104.0, 105.0], [204.0, 205.0], [6.0, 7.0],
+ [106.0, 107.0], [8.0, 9.0]]), c0.eval())
def testTensorArrayWriteConcat(self):
- self._testTensorArrayWriteConcat(tf.float32)
- self._testTensorArrayWriteConcat(tf.float64)
- self._testTensorArrayWriteConcat(tf.int32)
- self._testTensorArrayWriteConcat(tf.int64)
- self._testTensorArrayWriteConcat(tf.complex64)
- self._testTensorArrayWriteConcat(tf.complex128)
- self._testTensorArrayWriteConcat(tf.string)
+ self._testTensorArrayWriteConcat(dtypes.float32)
+ self._testTensorArrayWriteConcat(dtypes.float64)
+ self._testTensorArrayWriteConcat(dtypes.int32)
+ self._testTensorArrayWriteConcat(dtypes.int64)
+ self._testTensorArrayWriteConcat(dtypes.complex64)
+ self._testTensorArrayWriteConcat(dtypes.complex128)
+ self._testTensorArrayWriteConcat(dtypes.string)
def _testTensorArrayPackNotAllValuesAvailableFails(self):
with self.test_session():
- ta = tf.TensorArray(
- dtype=tf.float32, tensor_array_name="foo", size=3)
+ ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32, tensor_array_name="foo", size=3)
- with self.assertRaisesOpError(
- "Could not read from TensorArray index 1 "
- "because it has not yet been written to."):
+ with self.assertRaisesOpError("Could not read from TensorArray index 1 "
+ "because it has not yet been written to."):
ta.write(0, [[4.0, 5.0]]).stack().eval()
def testTensorArrayPackNotAllValuesAvailableFails(self):
@@ -131,10 +138,10 @@ class TensorArrayTest(tf.test.TestCase):
def _testTensorArrayUnpackRead(self, tf_dtype):
dtype = tf_dtype.as_numpy_dtype()
with self.test_session(use_gpu=True) as session:
- ta = tf.TensorArray(
+ ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
- if tf_dtype is tf.string:
+ if tf_dtype is dtypes.string:
# In Python3, np.str is unicode, while we always want bytes
convert = lambda x: np.asarray(x).astype("|S")
else:
@@ -151,7 +158,7 @@ class TensorArrayTest(tf.test.TestCase):
self.assertAllEqual(convert(2.0), d1)
self.assertAllEqual(convert(3.0), d2)
- ta = tf.TensorArray(
+ ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
# Unpack a matrix into vectors
@@ -167,7 +174,7 @@ class TensorArrayTest(tf.test.TestCase):
# Reset ta because we're going to change the shape, else shape
# inference will throw an error.
- ta = tf.TensorArray(
+ ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
# Try unpacking an empty matrix, which should not cause an error.
@@ -182,13 +189,13 @@ class TensorArrayTest(tf.test.TestCase):
self.assertAllEqual(convert([]), d2)
def _testTensorArrayUnpackReadMaybeLegacy(self):
- self._testTensorArrayUnpackRead(tf.float32)
- self._testTensorArrayUnpackRead(tf.float64)
- self._testTensorArrayUnpackRead(tf.int32)
- self._testTensorArrayUnpackRead(tf.int64)
- self._testTensorArrayUnpackRead(tf.complex64)
- self._testTensorArrayUnpackRead(tf.complex128)
- self._testTensorArrayUnpackRead(tf.string)
+ self._testTensorArrayUnpackRead(dtypes.float32)
+ self._testTensorArrayUnpackRead(dtypes.float64)
+ self._testTensorArrayUnpackRead(dtypes.int32)
+ self._testTensorArrayUnpackRead(dtypes.int64)
+ self._testTensorArrayUnpackRead(dtypes.complex64)
+ self._testTensorArrayUnpackRead(dtypes.complex128)
+ self._testTensorArrayUnpackRead(dtypes.string)
def testTensorArrayUnpackRead(self):
self._testTensorArrayUnpackReadMaybeLegacy()
@@ -196,17 +203,17 @@ class TensorArrayTest(tf.test.TestCase):
def _testTensorArraySplitRead(self, tf_dtype):
dtype = tf_dtype.as_numpy_dtype()
with self.test_session(use_gpu=True) as session:
- ta = tf.TensorArray(
+ ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3, infer_shape=False)
- if tf_dtype == tf.string:
+ if tf_dtype == dtypes.string:
# In Python3, np.str is unicode, while we always want bytes
convert = lambda x: np.asarray(x).astype("|S")
else:
convert = lambda x: np.asarray(x).astype(dtype)
# Split an empty vector
- lengths = tf.constant([0, 0, 0])
+ lengths = constant_op.constant([0, 0, 0])
w0 = ta.split(convert([]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
@@ -218,9 +225,8 @@ class TensorArrayTest(tf.test.TestCase):
self.assertAllEqual(convert([]), d2)
# Split a vector
- lengths = tf.constant([2, 0, 1])
- w0 = ta.split(
- convert([1.0, 2.0, 3.0]), lengths=lengths)
+ lengths = constant_op.constant([2, 0, 1])
+ w0 = ta.split(convert([1.0, 2.0, 3.0]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
@@ -231,7 +237,7 @@ class TensorArrayTest(tf.test.TestCase):
self.assertAllEqual(convert([3.0]), d2)
# Split a matrix
- lengths = tf.constant([2, 0, 1])
+ lengths = constant_op.constant([2, 0, 1])
w0 = ta.split(
convert([[1.0, 101.0], [2.0, 201.0], [3.0, 301.0]]), lengths=lengths)
r0 = w0.read(0)
@@ -244,18 +250,21 @@ class TensorArrayTest(tf.test.TestCase):
self.assertAllEqual(convert([[3.0, 301.0]]), d2)
def testTensorArraySplitRead(self):
- self._testTensorArraySplitRead(tf.float32)
- self._testTensorArraySplitRead(tf.float64)
- self._testTensorArraySplitRead(tf.int32)
- self._testTensorArraySplitRead(tf.int64)
- self._testTensorArraySplitRead(tf.complex64)
- self._testTensorArraySplitRead(tf.complex128)
- self._testTensorArraySplitRead(tf.string)
+ self._testTensorArraySplitRead(dtypes.float32)
+ self._testTensorArraySplitRead(dtypes.float64)
+ self._testTensorArraySplitRead(dtypes.int32)
+ self._testTensorArraySplitRead(dtypes.int64)
+ self._testTensorArraySplitRead(dtypes.complex64)
+ self._testTensorArraySplitRead(dtypes.complex128)
+ self._testTensorArraySplitRead(dtypes.string)
def testTensorGradArrayWriteRead(self):
with self.test_session(use_gpu=True) as session:
- ta = tf.TensorArray(
- dtype=tf.float32, tensor_array_name="foo", size=3, infer_shape=False)
+ ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32,
+ tensor_array_name="foo",
+ size=3,
+ infer_shape=False)
g_ta = ta.grad("grad")
w0 = ta.write(0, [[4.0, 5.0]])
@@ -284,8 +293,11 @@ class TensorArrayTest(tf.test.TestCase):
def testTensorGradArrayDynamicWriteRead(self):
with self.test_session(use_gpu=True) as session:
- ta = tf.TensorArray(
- dtype=tf.float32, tensor_array_name="foo", size=0, dynamic_size=True,
+ ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32,
+ tensor_array_name="foo",
+ size=0,
+ dynamic_size=True,
infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
@@ -309,8 +321,8 @@ class TensorArrayTest(tf.test.TestCase):
g_r1 = g_w2.read(1)
g_r2 = g_w2.read(2)
- d0, d1, d2, g_d0, g_d1, g_d2, vs, g_vs = session.run([
- r0, r1, r2, g_r0, g_r1, g_r2, s, g_s])
+ d0, d1, d2, g_d0, g_d1, g_d2, vs, g_vs = session.run(
+ [r0, r1, r2, g_r0, g_r1, g_r2, s, g_s])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual(-3.0, d2)
@@ -322,12 +334,12 @@ class TensorArrayTest(tf.test.TestCase):
def testTensorGradAccessTwiceReceiveSameObject(self):
with self.test_session(use_gpu=True) as session:
- ta = tf.TensorArray(
- dtype=tf.float32, tensor_array_name="foo", size=3)
+ ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32, tensor_array_name="foo", size=3)
g_ta_0 = ta.grad("grad")
g_ta_1 = ta.grad("grad")
- with tf.control_dependencies([g_ta_0.write(0, [[4.0, 5.0]]).flow]):
+ with ops.control_dependencies([g_ta_0.write(0, [[4.0, 5.0]]).flow]):
# Write with one gradient handle, read with another copy of it
r1_0 = g_ta_1.read(0)
@@ -338,8 +350,8 @@ class TensorArrayTest(tf.test.TestCase):
def testTensorArrayWriteWrongIndexOrDataTypeFails(self):
with self.test_session(use_gpu=True):
- ta = tf.TensorArray(
- dtype=tf.float32, tensor_array_name="foo", size=3)
+ ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32, tensor_array_name="foo", size=3)
# Test writing the wrong datatype
with self.assertRaisesOpError(
@@ -360,14 +372,14 @@ class TensorArrayTest(tf.test.TestCase):
def testTensorArrayReadWrongIndexOrDataTypeFails(self):
with self.test_session(use_gpu=True):
- ta = tf.TensorArray(
- dtype=tf.float32, tensor_array_name="foo", size=3)
+ ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32, tensor_array_name="foo", size=3)
w0 = ta.write(0, [[4.0, 5.0]])
# Test reading wrong datatype
r0_bad = gen_data_flow_ops._tensor_array_read_v2(
- handle=w0.handle, index=0, dtype=tf.float64, flow_in=w0.flow)
+ handle=w0.handle, index=0, dtype=dtypes.float64, flow_in=w0.flow)
with self.assertRaisesOpError(
"TensorArray dtype is float but Op requested dtype double."):
r0_bad.eval()
@@ -391,8 +403,8 @@ class TensorArrayTest(tf.test.TestCase):
def testTensorArrayWriteMultipleFails(self):
with self.test_session(use_gpu=True):
- ta = tf.TensorArray(
- dtype=tf.float32, tensor_array_name="foo", size=3)
+ ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32, tensor_array_name="foo", size=3)
with self.assertRaisesOpError(
"Could not write to TensorArray index 2 because "
@@ -401,8 +413,11 @@ class TensorArrayTest(tf.test.TestCase):
def testTensorArrayConcatIncompatibleShapesFails(self):
with self.test_session(use_gpu=True):
- ta = tf.TensorArray(
- dtype=tf.float32, tensor_array_name="foo", size=3, infer_shape=False)
+ ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32,
+ tensor_array_name="foo",
+ size=3,
+ infer_shape=False)
w1 = ta.write(0, 3.0)
w2 = w1.write(1, 4.0)
@@ -412,8 +427,11 @@ class TensorArrayTest(tf.test.TestCase):
"Concat saw a scalar shape at index 0 but requires at least vectors"):
w3.concat().eval()
- ta = tf.TensorArray(
- dtype=tf.float32, tensor_array_name="foo", size=3, infer_shape=False)
+ ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32,
+ tensor_array_name="foo",
+ size=3,
+ infer_shape=False)
w1 = ta.write(0, [3.0])
w2 = w1.write(1, [4.0])
@@ -427,12 +445,15 @@ class TensorArrayTest(tf.test.TestCase):
def testTensorArraySplitIncompatibleShapesFails(self):
with self.test_session(use_gpu=True):
- ta = tf.TensorArray(
- dtype=tf.float32, tensor_array_name="foo", size=3, infer_shape=False)
+ ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32,
+ tensor_array_name="foo",
+ size=3,
+ infer_shape=False)
with self.assertRaisesOpError(
r"Expected lengths to be a vector, received shape: \[\]"):
- lengths = tf.placeholder(tf.int64)
+ lengths = array_ops.placeholder(dtypes.int64)
ta.split([1.0, 2.0, 3.0], lengths).flow.eval(feed_dict={lengths: 1})
with self.assertRaisesOpError(
@@ -444,8 +465,11 @@ class TensorArrayTest(tf.test.TestCase):
r"Expected value to be at least a vector, but received shape: \[\]"):
ta.split(1.0, [1]).flow.eval()
- ta = tf.TensorArray(
- dtype=tf.float32, tensor_array_name="foo", size=2, infer_shape=False)
+ ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32,
+ tensor_array_name="foo",
+ size=2,
+ infer_shape=False)
with self.assertRaisesOpError(
r"TensorArray's size is not equal to the size of lengths "
@@ -455,7 +479,7 @@ class TensorArrayTest(tf.test.TestCase):
def _testTensorArrayWriteGradientAddMultipleAdds(self, dtype):
with self.test_session(use_gpu=True):
- ta = tf.TensorArray(
+ ta = tensor_array_ops.TensorArray(
dtype=dtype, tensor_array_name="foo", size=3, infer_shape=False)
ta_grad = ta.grad("grad")
@@ -488,19 +512,19 @@ class TensorArrayTest(tf.test.TestCase):
wb1_grad.flow.eval()
def testTensorArrayWriteGradientAddMultipleAdds(self):
- for dtype in (tf.int32, tf.int64, tf.float32,
- tf.float64, tf.complex64, tf.complex128):
+ for dtype in (dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64,
+ dtypes.complex64, dtypes.complex128):
self._testTensorArrayWriteGradientAddMultipleAdds(dtype)
def testMultiTensorArray(self):
with self.test_session(use_gpu=True):
- h1 = tf.TensorArray(
- size=1, dtype=tf.float32, tensor_array_name="foo")
+ h1 = tensor_array_ops.TensorArray(
+ size=1, dtype=dtypes.float32, tensor_array_name="foo")
w1 = h1.write(0, 4.0)
r1 = w1.read(0)
- h2 = tf.TensorArray(
- size=1, dtype=tf.float32, tensor_array_name="bar")
+ h2 = tensor_array_ops.TensorArray(
+ size=1, dtype=dtypes.float32, tensor_array_name="bar")
w2 = h2.write(0, 5.0)
r2 = w2.read(0)
@@ -509,11 +533,11 @@ class TensorArrayTest(tf.test.TestCase):
def testDuplicateTensorArrayHasDifferentName(self):
with self.test_session(use_gpu=True) as session:
- h1 = tf.TensorArray(
- size=1, dtype=tf.float32, tensor_array_name="foo")
+ h1 = tensor_array_ops.TensorArray(
+ size=1, dtype=dtypes.float32, tensor_array_name="foo")
c1 = h1.write(0, 4.0)
- h2 = tf.TensorArray(
- size=1, dtype=tf.float32, tensor_array_name="foo")
+ h2 = tensor_array_ops.TensorArray(
+ size=1, dtype=dtypes.float32, tensor_array_name="foo")
c2 = h2.write(0, 5.0)
_, _, c1h, c2h = session.run([c1.flow, c2.flow, c1.handle, c2.handle])
c1h = [x.decode("ascii") for x in c1h]
@@ -526,14 +550,16 @@ class TensorArrayTest(tf.test.TestCase):
def _testTensorArrayGradientWriteReadType(self, dtype):
with self.test_session(use_gpu=True) as session:
- ta = tf.TensorArray(
- dtype=tf.as_dtype(dtype), tensor_array_name="foo", size=3,
+ ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.as_dtype(dtype),
+ tensor_array_name="foo",
+ size=3,
infer_shape=False)
c = lambda x: np.array(x, dtype=dtype)
- value_0 = tf.constant(c([[4.0, 5.0]]))
- value_1 = tf.constant(c(3.0))
+ value_0 = constant_op.constant(c([[4.0, 5.0]]))
+ value_1 = constant_op.constant(c(3.0))
w0 = ta.write(0, value_0)
w1 = w0.write(1, value_1)
@@ -542,25 +568,27 @@ class TensorArrayTest(tf.test.TestCase):
r0_2 = w1.read(0)
# Test individual components' gradients
- grad_just_r0 = tf.gradients(
+ grad_just_r0 = gradients_impl.gradients(
ys=[r0], xs=[value_0], grad_ys=[c([[2.0, 3.0]])])
grad_just_r0_vals = session.run(grad_just_r0)
self.assertAllEqual(c([[2.0, 3.0]]), grad_just_r0_vals[0])
- grad_r0_r0_2 = tf.gradients(
- ys=[r0, r0_2], xs=[value_0],
+ grad_r0_r0_2 = gradients_impl.gradients(
+ ys=[r0, r0_2],
+ xs=[value_0],
grad_ys=[c([[2.0, 3.0]]), c([[1.0, -1.0]])])
grad_r0_r0_2_vals = session.run(grad_r0_r0_2)
self.assertAllEqual(c([[3.0, 2.0]]), grad_r0_r0_2_vals[0])
- grad_just_r1 = tf.gradients(
+ grad_just_r1 = gradients_impl.gradients(
ys=[r1], xs=[value_1], grad_ys=[c(-2.0)])
grad_just_r1_vals = session.run(grad_just_r1)
self.assertAllEqual(c(-2.0), grad_just_r1_vals[0])
# Test combined gradients
- grad = tf.gradients(
- ys=[r0, r0_2, r1], xs=[value_0, value_1],
+ grad = gradients_impl.gradients(
+ ys=[r0, r0_2, r1],
+ xs=[value_0, value_1],
grad_ys=[c([[2.0, 3.0]]), c([[1.0, -1.0]]), c(-2.0)])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 2)
@@ -568,18 +596,20 @@ class TensorArrayTest(tf.test.TestCase):
self.assertAllEqual(c(-2.0), grad_vals[1])
def testTensorArrayGradientWriteRead(self):
- for dtype in (np.float32, np.float64, np.int32,
- np.int64, np.complex64, np.complex128):
+ for dtype in (np.float32, np.float64, np.int32, np.int64, np.complex64,
+ np.complex128):
self._testTensorArrayGradientWriteReadType(dtype)
def _testTensorArrayGradientWritePackConcatAndRead(self):
with self.test_session(use_gpu=True) as sess:
- ta = tf.TensorArray(
- dtype=tf.float32, tensor_array_name="foo", size=2,
+ ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32,
+ tensor_array_name="foo",
+ size=2,
clear_after_read=False)
- value_0 = tf.constant([-1.0, 1.0])
- value_1 = tf.constant([-10.0, 10.0])
+ value_0 = constant_op.constant([-1.0, 1.0])
+ value_1 = constant_op.constant([-10.0, 10.0])
w0 = ta.write(0, value_0)
w1 = w0.write(1, value_1)
@@ -588,13 +618,15 @@ class TensorArrayTest(tf.test.TestCase):
s0 = w1.concat()
# Test gradient accumulation between read(0), pack(), and concat()
- with tf.control_dependencies([p0, r0, s0]):
- grad_r = tf.gradients(
- ys=[p0, r0, s0], xs=[value_0, value_1],
+ with ops.control_dependencies([p0, r0, s0]):
+ grad_r = gradients_impl.gradients(
+ ys=[p0, r0, s0],
+ xs=[value_0, value_1],
grad_ys=[
[[2.0, 3.0], [4.0, 5.0]], # pack gradient
[-0.5, 1.5], # read(0) gradient
- [20.0, 30.0, 40.0, 50.0]]) # concat gradient
+ [20.0, 30.0, 40.0, 50.0]
+ ]) # concat gradient
grad_vals = sess.run(grad_r) # 2 + 2 entries
self.assertAllClose([2.0 - 0.5 + 20.0, 3.0 + 1.5 + 30.0], grad_vals[0])
@@ -605,14 +637,14 @@ class TensorArrayTest(tf.test.TestCase):
def testTensorArrayReadTwice(self):
with self.test_session(use_gpu=True):
- value = tf.constant([[1.0, -1.0], [10.0, -10.0]])
+ value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
- ta_readonce = tf.TensorArray(
- dtype=tf.float32, tensor_array_name="foo", size=2)
+ ta_readonce = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32, tensor_array_name="foo", size=2)
w_readonce = ta_readonce.unstack(value)
r0_readonce = w_readonce.read(0)
- with tf.control_dependencies([r0_readonce]):
+ with ops.control_dependencies([r0_readonce]):
r1_readonce = w_readonce.read(0)
with self.assertRaisesOpError(
@@ -620,23 +652,27 @@ class TensorArrayTest(tf.test.TestCase):
r"previous read \(perhaps try setting clear_after_read = false\?\)"):
r1_readonce.eval()
- ta_readtwice = tf.TensorArray(
- dtype=tf.float32, tensor_array_name="foo", size=2,
+ ta_readtwice = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32,
+ tensor_array_name="foo",
+ size=2,
clear_after_read=False)
w_readtwice = ta_readtwice.unstack(value)
r0_readtwice = w_readtwice.read(0)
- with tf.control_dependencies([r0_readtwice]):
+ with ops.control_dependencies([r0_readtwice]):
r1_readtwice = w_readtwice.read(0)
self.assertAllEqual([1.0, -1.0], r1_readtwice.eval())
def _testTensorArrayGradientUnpackRead(self):
with self.test_session(use_gpu=True) as session:
- ta = tf.TensorArray(
- dtype=tf.float32, tensor_array_name="foo", size=2,
+ ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32,
+ tensor_array_name="foo",
+ size=2,
clear_after_read=False)
- value = tf.constant([[1.0, -1.0], [10.0, -10.0]])
+ value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.unstack(value)
r0 = w.read(0)
@@ -644,8 +680,9 @@ class TensorArrayTest(tf.test.TestCase):
r1 = w.read(1)
# Test combined gradients + aggregation of read(0)
- grad = tf.gradients(
- ys=[r0, r0_1, r1], xs=[value],
+ grad = gradients_impl.gradients(
+ ys=[r0, r0_1, r1],
+ xs=[value],
grad_ys=[[2.0, 3.0], [-1.5, 1.5], [4.0, 5.0]])
grad_vals = session.run(grad)
@@ -657,37 +694,42 @@ class TensorArrayTest(tf.test.TestCase):
def testTensorArrayGradientSplitConcat(self):
with self.test_session(use_gpu=True) as session:
- ta = tf.TensorArray(
- dtype=tf.float32, tensor_array_name="foo", size=2)
+ ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32, tensor_array_name="foo", size=2)
- value = tf.constant([[1.0, -1.0], [10.0, -10.0], [100.0, -100.0]])
+ value = constant_op.constant(
+ [[1.0, -1.0], [10.0, -10.0], [100.0, -100.0]])
w = ta.split(value, [2, 1])
r = w.concat()
# Test combined gradients
- grad = tf.gradients(
- ys=[r], xs=[value],
+ grad = gradients_impl.gradients(
+ ys=[r],
+ xs=[value],
grad_ys=[[[2.0, -2.0], [20.0, -20.0], [200.0, -200.0]]])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 1)
- self.assertAllEqual(
- [[2.0, -2.0], [20.0, -20.0], [200.0, -200.0]], grad_vals[0])
+ self.assertAllEqual([[2.0, -2.0], [20.0, -20.0], [200.0, -200.0]],
+ grad_vals[0])
def _testTensorArrayGradientDynamicUnpackRead(self):
with self.test_session(use_gpu=True) as session:
- ta = tf.TensorArray(
- dtype=tf.float32, tensor_array_name="foo", size=0, dynamic_size=True)
+ ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32,
+ tensor_array_name="foo",
+ size=0,
+ dynamic_size=True)
- value = tf.constant([[1.0, -1.0], [10.0, -10.0]])
+ value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.unstack(value)
r0 = w.read(0)
r1 = w.read(1)
# Test combined gradients + aggregation of read(0)
- grad = tf.gradients(
+ grad = gradients_impl.gradients(
ys=[r0, r1], xs=[value], grad_ys=[[2.0, 3.0], [4.0, 5.0]])
grad_vals = session.run(grad)
@@ -699,22 +741,25 @@ class TensorArrayTest(tf.test.TestCase):
def testCloseTensorArray(self):
with self.test_session(use_gpu=True) as session:
- ta = tf.TensorArray(
- dtype=tf.float32, tensor_array_name="foo", size=3)
+ ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32, tensor_array_name="foo", size=3)
c1 = ta.close()
session.run(c1)
def testSizeTensorArray(self):
with self.test_session(use_gpu=True):
- ta = tf.TensorArray(
- dtype=tf.float32, tensor_array_name="foo", size=3)
+ ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32, tensor_array_name="foo", size=3)
s = ta.size()
self.assertAllEqual(3, s.eval())
def testWriteCloseTensorArray(self):
with self.test_session(use_gpu=True):
- ta = tf.TensorArray(
- dtype=tf.float32, tensor_array_name="foo", size=3, infer_shape=False)
+ ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32,
+ tensor_array_name="foo",
+ size=3,
+ infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [3.0])
w1.close().run() # Expected to run without problems
@@ -722,38 +767,40 @@ class TensorArrayTest(tf.test.TestCase):
def _testWhileLoopWritePackGradients(self, dynamic_size, dtype):
np_dtype = dtype.as_numpy_dtype
with self.test_session(use_gpu=True) as session:
- v0 = tf.identity(np.arange(3*5, dtype=np_dtype).reshape(3, 5))
- var = tf.Variable(np.arange(100, 105, dtype=np_dtype))
- state0 = tf.identity(np.array([1] * 5, dtype=np_dtype))
- ta = tf.TensorArray(
- dtype=dtype, tensor_array_name="foo",
- size=0 if dynamic_size else 3, dynamic_size=dynamic_size)
- time_0 = tf.identity(0)
+ v0 = array_ops.identity(np.arange(3 * 5, dtype=np_dtype).reshape(3, 5))
+ var = variables.Variable(np.arange(100, 105, dtype=np_dtype))
+ state0 = array_ops.identity(np.array([1] * 5, dtype=np_dtype))
+ ta = tensor_array_ops.TensorArray(
+ dtype=dtype,
+ tensor_array_name="foo",
+ size=0 if dynamic_size else 3,
+ dynamic_size=dynamic_size)
+ time_0 = array_ops.identity(0)
def body(time, ta_t, state):
- sliced = tf.slice(v0, begin=tf.stack([time, 0]), size=[1, -1])
- sliced = tf.squeeze(sliced)
+ sliced = array_ops.slice(
+ v0, begin=array_ops.stack([time, 0]), size=[1, -1])
+ sliced = array_ops.squeeze(sliced)
out = sliced + var + state
state += sliced
ta_t = ta_t.write(time, out)
- return (time+1, ta_t, state)
+ return (time + 1, ta_t, state)
- (unused_0, h_final, unused_2) = tf.while_loop(
+ (unused_0, h_final, unused_2) = control_flow_ops.while_loop(
cond=lambda time, unused_1, unused_2: time < 3,
body=body,
loop_vars=(time_0, ta, state0),
- shape_invariants=(time_0.get_shape(),
- tensor_shape.unknown_shape(),
+ shape_invariants=(time_0.get_shape(), tensor_shape.unknown_shape(),
tensor_shape.unknown_shape()),
parallel_iterations=3)
vout = h_final.stack()
- grad_val = -np.arange(3*5, dtype=np_dtype).reshape(3, 5)
- v0_grad = tf.gradients([vout], [v0], [grad_val])[0]
- state0_grad = tf.gradients([vout], [state0], [grad_val])[0]
- var_grad = tf.gradients([vout], [var], [grad_val])[0]
+ grad_val = -np.arange(3 * 5, dtype=np_dtype).reshape(3, 5)
+ v0_grad = gradients_impl.gradients([vout], [v0], [grad_val])[0]
+ state0_grad = gradients_impl.gradients([vout], [state0], [grad_val])[0]
+ var_grad = gradients_impl.gradients([vout], [var], [grad_val])[0]
- tf.global_variables_initializer().run()
+ variables.global_variables_initializer().run()
state0_t, var_t, v0_t, vout_t, v0_grad_t, var_grad_t, state0_grad_t = (
session.run([state0, var, v0, vout, v0_grad, var_grad, state0_grad]))
just_v0_grad_t, = session.run([v0_grad])
@@ -772,10 +819,8 @@ class TensorArrayTest(tf.test.TestCase):
# d(vout)/d(var) = [1 | 1 | 1]
# d(vout)/d(state0) = [ 1 | 1 | 1 ]
- state_per_time = np.array([
- state0_t,
- state0_t + v0_t[0, :],
- state0_t + v0_t[0, :] + v0_t[1, :]])
+ state_per_time = np.array(
+ [state0_t, state0_t + v0_t[0, :], state0_t + v0_t[0, :] + v0_t[1, :]])
# Compare forward prop
self.assertAllClose(v0_t + var_t + state_per_time, vout_t)
@@ -783,8 +828,8 @@ class TensorArrayTest(tf.test.TestCase):
# Compare backward prop
expected_v0_grad_t = np.array([
grad_val[0, :] + grad_val[1, :] + grad_val[2, :],
- grad_val[1, :] + grad_val[2, :],
- grad_val[2, :]])
+ grad_val[1, :] + grad_val[2, :], grad_val[2, :]
+ ])
self.assertAllEqual(expected_v0_grad_t, v0_grad_t)
self.assertAllEqual(expected_v0_grad_t, just_v0_grad_t)
@@ -793,53 +838,67 @@ class TensorArrayTest(tf.test.TestCase):
def testWhileLoopWritePackGradients(self):
self._testWhileLoopWritePackGradients(
- dynamic_size=False, dtype=tf.float32)
+ dynamic_size=False, dtype=dtypes.float32)
# TODO(ebrevdo): re-enable when While supports non-float32 gradients.
# self._testWhileLoopWritePackGradients(
# dynamic_size=False, dtype=tf.int64)
def testWhileLoopDynamicWritePackGradients(self):
self._testWhileLoopWritePackGradients(
- dynamic_size=True, dtype=tf.float32)
+ dynamic_size=True, dtype=dtypes.float32)
def testGradSerialTwoLoops(self):
with self.test_session():
num_steps = 100
- acc = tf.TensorArray(dtype=tf.float32, size=num_steps,
- clear_after_read=False,
- element_shape=tensor_shape.scalar())
- i = tf.constant(0, name="i")
- x = tf.constant(2.0, name="x")
+ acc = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32,
+ size=num_steps,
+ clear_after_read=False,
+ element_shape=tensor_shape.scalar())
+ i = constant_op.constant(0, name="i")
+ x = constant_op.constant(2.0, name="x")
c = lambda i, acc: i < 5
+
def b(i, acc):
- x1 = tf.cond(tf.equal(i, 0),
- lambda: x,
- lambda: tf.mul(acc.read(i - 1), 2.0))
+ x1 = control_flow_ops.cond(
+ math_ops.equal(i, 0), lambda: x,
+ lambda: math_ops.mul(acc.read(i - 1), 2.0))
return i + 1, acc.write(i, x1)
- i1, acc1 = tf.while_loop(c, b, [i, acc])
- z = tf.constant(0.0)
+ i1, acc1 = control_flow_ops.while_loop(c, b, [i, acc])
+
+ z = constant_op.constant(0.0)
+
def fn(i, acc):
return i + 1, acc.write(i, z)
- _, acc2 = tf.while_loop(lambda i, acc: i < num_steps, fn, [i1, acc1])
+
+ _, acc2 = control_flow_ops.while_loop(lambda i, acc: i < num_steps, fn,
+ [i1, acc1])
r = acc2.stack()
- grad = tf.gradients(r, [x])[0]
+ grad = gradients_impl.gradients(r, [x])[0]
self.assertAllClose(31.0, grad.eval())
def testSumOfTwoReadVariablesWithoutRepeatGrad(self):
with self.test_session(use_gpu=True) as session:
- a = tf.identity(np.arange(3*5, dtype=np.float32).reshape(3, 5) + 1)
- b = tf.identity(np.arange(3*5, dtype=np.float32).reshape(3, 5) + 1 + 3*5)
- ta = tf.TensorArray(dtype=tf.float32, size=2)
+ a = array_ops.identity(
+ np.arange(
+ 3 * 5, dtype=np.float32).reshape(3, 5) + 1)
+ b = array_ops.identity(
+ np.arange(
+ 3 * 5, dtype=np.float32).reshape(3, 5) + 1 + 3 * 5)
+ ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
ta = ta.write(0, a, name="write_a")
ta = ta.write(1, b, name="write_b")
- c = (ta.read(0, name="read_a_0") + # a + b
- ta.read(1, name="read_b_0"))
- g0 = -(np.arange(3*5, dtype=np.float32).reshape(3, 5) + 1)
- grad_a = tf.gradients([c], [a], [g0])[0] # d(a+b)/da = 1
- grad_b = tf.gradients([c], [b], [g0])[0] # d(a+b)/db = 1
+ c = (
+ ta.read(
+ 0, name="read_a_0") + # a + b
+ ta.read(
+ 1, name="read_b_0"))
+ g0 = -(np.arange(3 * 5, dtype=np.float32).reshape(3, 5) + 1)
+ grad_a = gradients_impl.gradients([c], [a], [g0])[0] # d(a+b)/da = 1
+ grad_b = gradients_impl.gradients([c], [b], [g0])[0] # d(a+b)/db = 1
# Test gradients calculated individually
grad_a_t, = session.run([grad_a])
@@ -854,7 +913,7 @@ class TensorArrayTest(tf.test.TestCase):
self.assertAllEqual(joint_grad_b_t, g0)
def _grad_source_for_name(self, name):
- return tensor_array_grad._GetGradSource(tf.constant(0, name=name))
+ return tensor_array_grad._GetGradSource(constant_op.constant(0, name=name))
def testGetGradSource_Invalid(self):
with self.assertRaises(ValueError):
@@ -868,28 +927,26 @@ class TensorArrayTest(tf.test.TestCase):
self.assertEqual("gradients:0", self._grad_source_for_name("gradients"))
self.assertEqual("gradients_0:0", self._grad_source_for_name("gradients_0"))
self.assertEqual("gradients", self._grad_source_for_name("gradients/foo"))
- self.assertEqual(
- "gradients_0", self._grad_source_for_name("gradients_0/foo"))
- self.assertEqual(
- "gradients", self._grad_source_for_name("gradients/foo/bar"))
- self.assertEqual(
- "gradients_0", self._grad_source_for_name("gradients_0/foo/bar"))
+ self.assertEqual("gradients_0",
+ self._grad_source_for_name("gradients_0/foo"))
+ self.assertEqual("gradients",
+ self._grad_source_for_name("gradients/foo/bar"))
+ self.assertEqual("gradients_0",
+ self._grad_source_for_name("gradients_0/foo/bar"))
def testGetGradSource_EnclosingScope(self):
- self.assertEqual(
- "foo/gradients:0", self._grad_source_for_name("foo/gradients"))
- self.assertEqual(
- "foo/gradients_0:0", self._grad_source_for_name("foo/gradients_0"))
- self.assertEqual(
- "foo/gradients", self._grad_source_for_name("foo/gradients/bar"))
- self.assertEqual(
- "foo/gradients_0", self._grad_source_for_name("foo/gradients_0/bar"))
- self.assertEqual(
- "foo/bar/gradients",
- self._grad_source_for_name("foo/bar/gradients/baz"))
- self.assertEqual(
- "foo/bar/gradients_0",
- self._grad_source_for_name("foo/bar/gradients_0/baz"))
+ self.assertEqual("foo/gradients:0",
+ self._grad_source_for_name("foo/gradients"))
+ self.assertEqual("foo/gradients_0:0",
+ self._grad_source_for_name("foo/gradients_0"))
+ self.assertEqual("foo/gradients",
+ self._grad_source_for_name("foo/gradients/bar"))
+ self.assertEqual("foo/gradients_0",
+ self._grad_source_for_name("foo/gradients_0/bar"))
+ self.assertEqual("foo/bar/gradients",
+ self._grad_source_for_name("foo/bar/gradients/baz"))
+ self.assertEqual("foo/bar/gradients_0",
+ self._grad_source_for_name("foo/bar/gradients_0/baz"))
def testGetGradSource_NestedUsesInnermost(self):
self.assertEqual(
@@ -898,44 +955,48 @@ class TensorArrayTest(tf.test.TestCase):
def testWriteShape(self):
with self.test_session():
- ta = tf.TensorArray(
- dtype=tf.float32, tensor_array_name="foo", size=3)
- c0 = tf.constant([4.0, 5.0])
+ ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32, tensor_array_name="foo", size=3)
+ c0 = constant_op.constant([4.0, 5.0])
w0 = ta.write(0, c0)
r0 = w0.read(0)
self.assertAllEqual(c0.get_shape(), r0.get_shape())
- ta = tf.TensorArray(
- dtype=tf.float32, tensor_array_name="foo", size=3)
- c1 = tf.constant([6.0, 7.0])
+ ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32, tensor_array_name="foo", size=3)
+ c1 = constant_op.constant([6.0, 7.0])
w1 = w0.write(1, c1)
r0 = w1.read(0)
r1 = w1.read(1)
self.assertAllEqual(c0.get_shape(), r0.get_shape())
self.assertAllEqual(c1.get_shape(), r1.get_shape())
- ta = tf.TensorArray(
- dtype=tf.float32, tensor_array_name="foo", size=3)
- c2 = tf.constant([4.0, 5.0, 6.0])
+ ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32, tensor_array_name="foo", size=3)
+ c2 = constant_op.constant([4.0, 5.0, 6.0])
with self.assertRaises(ValueError):
w0.write(0, c2)
def _testUnpackShape(self):
with self.test_session():
- ta = tf.TensorArray(
- dtype=tf.float32, tensor_array_name="foo",
- size=0, dynamic_size=True, infer_shape=True)
- value = tf.constant([[1.0, -1.0], [10.0, -10.0], [100.0, -100.0]])
+ ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32,
+ tensor_array_name="foo",
+ size=0,
+ dynamic_size=True,
+ infer_shape=True)
+ value = constant_op.constant(
+ [[1.0, -1.0], [10.0, -10.0], [100.0, -100.0]])
w0 = ta.unstack(value)
r0 = w0.read(0)
self.assertAllEqual((2,), r0.get_shape())
- c1 = tf.constant([4.0, 5.0])
+ c1 = constant_op.constant([4.0, 5.0])
w1 = w0.write(3, c1)
r1 = w1.read(0)
self.assertAllEqual(c1.get_shape(), r1.get_shape())
- c2 = tf.constant([4.0, 5.0, 6.0])
+ c2 = constant_op.constant([4.0, 5.0, 6.0])
with self.assertRaises(ValueError):
w1.write(4, c2)
@@ -944,38 +1005,47 @@ class TensorArrayTest(tf.test.TestCase):
def testSplitShape(self):
with self.test_session():
- ta = tf.TensorArray(
- dtype=tf.float32, tensor_array_name="foo",
- size=0, dynamic_size=True, infer_shape=True)
- value = tf.constant([[1.0, -1.0], [2.0, -2.0], [3.0, -3.0]])
+ ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32,
+ tensor_array_name="foo",
+ size=0,
+ dynamic_size=True,
+ infer_shape=True)
+ value = constant_op.constant([[1.0, -1.0], [2.0, -2.0], [3.0, -3.0]])
w0 = ta.split(value, [1, 1, 1])
r0 = w0.read(0)
self.assertAllEqual((1, 2), r0.get_shape())
- ta1 = tf.TensorArray(
- dtype=tf.float32, tensor_array_name="foo1",
- size=0, dynamic_size=True, infer_shape=True)
+ ta1 = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32,
+ tensor_array_name="foo1",
+ size=0,
+ dynamic_size=True,
+ infer_shape=True)
w0 = ta1.split(value, [1, 2])
r0 = w0.read(0)
self.assertAllEqual(r0.get_shape(), tensor_shape.unknown_shape())
def testWriteUnknownShape(self):
with self.test_session():
- ta = tf.TensorArray(
- dtype=tf.float32, tensor_array_name="foo", size=3, infer_shape=True)
- c0 = tf.placeholder(tf.float32)
+ ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32,
+ tensor_array_name="foo",
+ size=3,
+ infer_shape=True)
+ c0 = array_ops.placeholder(dtypes.float32)
w0 = ta.write(0, c0)
r0 = w0.read(0)
self.assertAllEqual(r0.get_shape(), tensor_shape.unknown_shape())
def _testGradientWhenNotAllComponentsRead(self):
with self.test_session(use_gpu=True) as session:
- ta = tf.TensorArray(dtype=tf.float32, size=2)
- x = tf.constant([2.0, 3.0])
+ ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
+ x = constant_op.constant([2.0, 3.0])
w = ta.unstack(x)
r0 = w.read(0)
# calculate (dr0/dx0, dr0/dx1). since r0 = x0, gradients are (1, 0).
- grad_r0 = tf.gradients(ys=[r0], xs=[x], grad_ys=[1.0])
+ grad_r0 = gradients_impl.gradients(ys=[r0], xs=[x], grad_ys=[1.0])
grad_r0_vals = session.run(grad_r0)[0]
self.assertAllEqual(grad_r0_vals, [1.0, 0.0])
@@ -984,39 +1054,35 @@ class TensorArrayTest(tf.test.TestCase):
def _testTensorArrayUnpackDynamic(self):
with self.test_session(use_gpu=True) as sess:
- ta = tf.TensorArray(dtype=tf.float32, size=3,
- dynamic_size=True)
- x = tf.constant([1.0, 2.0, 3.0])
+ ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32, size=3, dynamic_size=True)
+ x = constant_op.constant([1.0, 2.0, 3.0])
w0 = ta.unstack(x)
w1 = w0.write(3, 4.0)
r = w1.stack()
self.assertAllEqual(np.array([1.0, 2.0, 3.0, 4.0]), r.eval())
- grad = tf.gradients(ys=[r], xs=[x])
- self.assertAllEqual(np.array([1.0, 1.0, 1.0]),
- sess.run(grad)[0])
+ grad = gradients_impl.gradients(ys=[r], xs=[x])
+ self.assertAllEqual(np.array([1.0, 1.0, 1.0]), sess.run(grad)[0])
def testTensorArrayUnpackDynamic(self):
self._testTensorArrayUnpackDynamic()
def testTensorArraySplitDynamic(self):
with self.test_session(use_gpu=True) as sess:
- ta = tf.TensorArray(dtype=tf.float32, size=3,
- dynamic_size=True)
- x = tf.constant([1.0, 2.0, 3.0])
+ ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32, size=3, dynamic_size=True)
+ x = constant_op.constant([1.0, 2.0, 3.0])
w0 = ta.split(x, [1, 1, 1])
w1 = w0.write(3, [4.0])
r = w1.concat()
self.assertAllEqual(np.array([1.0, 2.0, 3.0, 4.0]), r.eval())
- grad = tf.gradients(ys=[r], xs=[x])
- self.assertAllEqual(np.array([1.0, 1.0, 1.0]),
- sess.run(grad)[0])
+ grad = gradients_impl.gradients(ys=[r], xs=[x])
+ self.assertAllEqual(np.array([1.0, 1.0, 1.0]), sess.run(grad)[0])
def _testTensorArrayEvalEmpty(self):
with self.test_session(use_gpu=True):
- ta = tf.TensorArray(dtype=tf.float32,
- size=0,
- dynamic_size=False,
- infer_shape=False)
+ ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32, size=0, dynamic_size=False, infer_shape=False)
with self.assertRaisesOpError(
"TensorArray has size zero, but element shape <unknown> is not fully "
"defined. Currently only static shapes are supported when packing "
@@ -1028,13 +1094,11 @@ class TensorArrayTest(tf.test.TestCase):
def _testTensorArrayEvalEmptyWithDefault(self):
with self.test_session(use_gpu=True):
- ta = tf.TensorArray(dtype=tf.float32,
- size=0,
- dynamic_size=False,
- infer_shape=True)
+ ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32, size=0, dynamic_size=False, infer_shape=True)
self.assertEqual(0, ta.size().eval())
# Don't actually perform the pack. This stores the static shape.
- ta.unstack(tf.zeros([0, 3, 5]))
+ ta.unstack(array_ops.zeros([0, 3, 5]))
packed = ta.stack()
self.assertAllEqual([0, 3, 5], packed.eval().shape)
# Concatenating zero tensors along their first dimension gives a
@@ -1046,18 +1110,21 @@ class TensorArrayTest(tf.test.TestCase):
def testTensorArrayScatterReadAndGradients(self):
with self.test_session(use_gpu=True) as session:
- ta = tf.TensorArray(
- dtype=tf.float32, tensor_array_name="foo", size=0, dynamic_size=True)
+ ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32,
+ tensor_array_name="foo",
+ size=0,
+ dynamic_size=True)
- indices = tf.constant([1, 8])
- value = tf.constant([[1.0, -1.0], [10.0, -10.0]])
+ indices = constant_op.constant([1, 8])
+ value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.scatter(indices, value)
r0 = w.read(1)
r1 = w.read(8)
# Test combined gradients + aggregation of read(0)
- grad = tf.gradients(
+ grad = gradients_impl.gradients(
ys=[r0, r1], xs=[value], grad_ys=[[2.0, 3.0], [4.0, 5.0]])
read_vals, grad_vals = session.run([[r0, r1], grad])
@@ -1069,17 +1136,20 @@ class TensorArrayTest(tf.test.TestCase):
def testTensorArrayWriteGatherAndGradients(self):
with self.test_session(use_gpu=True) as session:
- ta = tf.TensorArray(
- dtype=tf.float32, tensor_array_name="foo", size=0, dynamic_size=True)
+ ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32,
+ tensor_array_name="foo",
+ size=0,
+ dynamic_size=True)
- values = tf.constant([[1.0*x, -1.0*x] for x in range(10)])
- indices = tf.constant([1, 8])
+ values = constant_op.constant([[1.0 * x, -1.0 * x] for x in range(10)])
+ indices = constant_op.constant([1, 8])
w = ta.unstack(values)
g = w.gather(indices)
# Test combined gradients + aggregation of read(0)
- grad = tf.gradients(
+ grad = gradients_impl.gradients(
ys=[g], xs=[values], grad_ys=[[[2.0, 3.0], [4.0, 5.0]]])
g_vals, grad_vals = session.run([[g], grad])
@@ -1094,17 +1164,17 @@ class TensorArrayTest(tf.test.TestCase):
self.assertAllEqual(expected_grad, grad_vals[0])
def testTensorArrayGetsDeviceFromFirstWrite(self):
- with tf.device("/gpu:1"):
- ta = tf.TensorArray(dtype=tf.float32, size=2)
+ with ops.device("/gpu:1"):
+ ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
# parent device was ignored when creating the TensorArray
self.assertEqual(ta.handle.device, "")
self.assertEqual(ta.flow.device, "")
- with tf.device("/gpu:0"):
+ with ops.device("/gpu:0"):
# the first write sets the op's device
ta = ta.write(0, 1.0)
self.assertTrue("gpu:0" in ta.handle.device.lower())
self.assertTrue("gpu:0" in ta.flow.device.lower())
- with tf.device("/gpu:1"):
+ with ops.device("/gpu:1"):
# subsequent writes do not modify the op's device
ta = ta.write(1, 1.0)
self.assertTrue("gpu:0" in ta.handle.device.lower())
@@ -1115,40 +1185,41 @@ class TensorArrayTest(tf.test.TestCase):
self.assertTrue("gpu:0" in ta_grad.flow.device.lower())
# Similar tests for unpack and split
- ta = tf.TensorArray(dtype=tf.float32, size=2)
+ ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
self.assertEqual(ta.handle.device, "")
self.assertEqual(ta.flow.device, "")
- with tf.device("/gpu:0"):
+ with ops.device("/gpu:0"):
ta = ta.unstack([1.0, 2.0])
self.assertTrue("gpu:0" in ta.handle.device.lower())
self.assertTrue("gpu:0" in ta.flow.device.lower())
- with tf.device("/gpu:1"):
+ with ops.device("/gpu:1"):
ta = ta.unstack([1.0, 2.0])
self.assertTrue("gpu:0" in ta.handle.device.lower())
self.assertTrue("gpu:0" in ta.flow.device.lower())
- ta = tf.TensorArray(dtype=tf.float32, size=2)
+ ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
self.assertEqual(ta.handle.device, "")
self.assertEqual(ta.flow.device, "")
- with tf.device("/gpu:0"):
+ with ops.device("/gpu:0"):
ta = ta.split([1.0, 2.0], [1, 1])
self.assertTrue("gpu:0" in ta.handle.device.lower())
self.assertTrue("gpu:0" in ta.flow.device.lower())
- with tf.device("/gpu:1"):
+ with ops.device("/gpu:1"):
ta = ta.split([1.0, 2.0], [1, 1])
self.assertTrue("gpu:0" in ta.handle.device.lower())
self.assertTrue("gpu:0" in ta.flow.device.lower())
def testTensorArrayGetsDeviceFromFirstWriteInWhileLoop(self):
- ta = tf.TensorArray(dtype=tf.float32, size=2)
+ ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
+
def _body(i, ta_i):
- with tf.device("/gpu:0"):
+ with ops.device("/gpu:0"):
return i + 1, ta_i.write(i, 0.0)
self.assertEqual(ta.handle.device, "")
self.assertEqual(ta.flow.device, "")
- _, ta_out = tf.while_loop(
+ _, ta_out = control_flow_ops.while_loop(
lambda i, ta: i < 2, _body, loop_vars=[0, ta])
self.assertTrue("gpu:0" in ta_out.handle.device.lower())
@@ -1156,12 +1227,12 @@ class TensorArrayTest(tf.test.TestCase):
def testTensorArrayLazyDeviceSettingDoesNotConfuseInitialAccess(self):
with self.test_session(use_gpu=True) as session:
- ta = tf.TensorArray(dtype=tf.float32, size=2)
+ ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
self.assertEqual(ta.handle.device, "")
- with tf.device("/cpu:0"):
+ with ops.device("/cpu:0"):
size = ta.size()
- with tf.device("/gpu:0"):
+ with ops.device("/gpu:0"):
ta = ta.write(0, 0.0)
self.assertTrue("gpu:0" in ta.handle.device.lower())
@@ -1172,4 +1243,4 @@ class TensorArrayTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/topk_op_test.py b/tensorflow/python/kernel_tests/topk_op_test.py
index 1f9632ef03..9d89e250f5 100644
--- a/tensorflow/python/kernel_tests/topk_op_test.py
+++ b/tensorflow/python/kernel_tests/topk_op_test.py
@@ -12,24 +12,35 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for TopK op."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradients_impl
+from tensorflow.python.ops import nn_ops
+import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
-class TopKTest(tf.test.TestCase):
+class TopKTest(test.TestCase):
- def _validateTopK(
- self, inputs, k, expected_values, expected_indices, sorted=True):
+ def _validateTopK(self,
+ inputs,
+ k,
+ expected_values,
+ expected_indices,
+ sorted=True):
np_values = np.array(expected_values)
np_indices = np.array(expected_indices)
with self.test_session():
- values_op, indices_op = tf.nn.top_k(inputs, k, sorted=sorted)
+ values_op, indices_op = nn_ops.top_k(inputs, k, sorted=sorted)
values = values_op.eval()
indices = indices_op.eval()
self.assertAllClose(np_values, values)
@@ -39,27 +50,23 @@ class TopKTest(tf.test.TestCase):
def testTop1(self):
inputs = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.3, 0.3, 0.2]]
- self._validateTopK(inputs, 1,
- [[0.4], [0.3]],
- [[3], [1]])
+ self._validateTopK(inputs, 1, [[0.4], [0.3]], [[3], [1]])
def testTop2(self):
inputs = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.3, 0.3, 0.2]]
- self._validateTopK(inputs, 2,
- [[0.4, 0.3], [0.3, 0.3]],
- [[3, 1], [1, 2]])
+ self._validateTopK(inputs, 2, [[0.4, 0.3], [0.3, 0.3]], [[3, 1], [1, 2]])
def testTopAll(self):
inputs = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.3, 0.3, 0.2]]
- self._validateTopK(inputs, 4,
- [[0.4, 0.3, 0.2, 0.1], [0.3, 0.3, 0.2, 0.1]],
+ self._validateTopK(inputs, 4, [[0.4, 0.3, 0.2, 0.1], [0.3, 0.3, 0.2, 0.1]],
[[3, 1, 2, 0], [1, 2, 3, 0]])
def testTop3Unsorted(self):
inputs = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.3, 0.3, 0.2]]
- self._validateTopK(inputs, 3,
- [[0.2, 0.3, 0.4], [0.2, 0.3, 0.3]],
- [[2, 1, 3], [3, 1, 2]], sorted=False)
+ self._validateTopK(
+ inputs,
+ 3, [[0.2, 0.3, 0.4], [0.2, 0.3, 0.3]], [[2, 1, 3], [3, 1, 2]],
+ sorted=False)
def testTop3Vector(self):
inputs = [3, 6, 15, 18, 6, 12, 1, 17, 3, 0, 4, 19, 1, 6]
@@ -67,34 +74,33 @@ class TopKTest(tf.test.TestCase):
def testTensorK(self):
inputs = [3, 6, 15, 18, 6, 12, 1, 17, 3, 0, 4, 19, 1, 6]
- k = tf.constant(3)
+ k = constant_op.constant(3)
self._validateTopK(inputs, k, [19, 18, 17], [11, 3, 7])
def testKNegative(self):
inputs = [[0.1, 0.2], [0.3, 0.4]]
with self.test_session():
- k = tf.placeholder(tf.int32)
- values, _ = tf.nn.top_k(inputs, k)
+ k = array_ops.placeholder(dtypes.int32)
+ values, _ = nn_ops.top_k(inputs, k)
with self.assertRaisesOpError("Need k >= 0, got -7"):
values.eval(feed_dict={k: -7})
def testKTooLarge(self):
inputs = [[0.1, 0.2], [0.3, 0.4]]
- with self.assertRaisesRegexp(
- ValueError, r"must have last dimension >= k = 4"):
- tf.nn.top_k(inputs, 4)
+ with self.assertRaisesRegexp(ValueError,
+ r"must have last dimension >= k = 4"):
+ nn_ops.top_k(inputs, 4)
def testTopKGradients(self):
with self.test_session() as sess:
- inputs = tf.placeholder(tf.int32, shape=[2, 5])
- values, _ = tf.nn.top_k(inputs, 3)
+ inputs = array_ops.placeholder(dtypes.int32, shape=[2, 5])
+ values, _ = nn_ops.top_k(inputs, 3)
grad = sess.run(
- tf.gradients(values,
- inputs,
- grad_ys=[[[1, 2, 3], [4, 5, 6]]]),
+ gradients_impl.gradients(
+ values, inputs, grad_ys=[[[1, 2, 3], [4, 5, 6]]]),
feed_dict={inputs: [[2, -1, 1000, 3, 4], [1, 5, 2, 4, 3]]})[0]
self.assertEqual(grad.tolist(), [[0, 0, 1, 3, 2], [0, 4, 0, 5, 6]])
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/trace_op_test.py b/tensorflow/python/kernel_tests/trace_op_test.py
index 759d6a708b..a5d5bcc149 100644
--- a/tensorflow/python/kernel_tests/trace_op_test.py
+++ b/tensorflow/python/kernel_tests/trace_op_test.py
@@ -18,10 +18,12 @@ from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+from tensorflow.python.ops import math_ops
+from tensorflow.python.platform import test
-class TraceTest(tf.test.TestCase):
+
+class TraceTest(test.TestCase):
def setUp(self):
x = np.random.seed(0)
@@ -29,7 +31,7 @@ class TraceTest(tf.test.TestCase):
def compare(self, x):
np_ans = np.trace(x, axis1=-2, axis2=-1)
with self.test_session(use_gpu=True):
- tf_ans = tf.trace(x).eval()
+ tf_ans = math_ops.trace(x).eval()
self.assertAllClose(tf_ans, np_ans)
def testTrace(self):
@@ -40,4 +42,4 @@ class TraceTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/transpose_op_test.py b/tensorflow/python/kernel_tests/transpose_op_test.py
index 5bc3f5358a..968996be4b 100644
--- a/tensorflow/python/kernel_tests/transpose_op_test.py
+++ b/tensorflow/python/kernel_tests/transpose_op_test.py
@@ -12,18 +12,26 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Functional tests for Transpose op."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
+
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.framework import tensor_shape
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.platform import test
-class TransposeTest(tf.test.TestCase):
+
+class TransposeTest(test.TestCase):
def _np_transpose(self, x, perm):
ret = np.copy(x)
@@ -33,8 +41,8 @@ class TransposeTest(tf.test.TestCase):
def _compareCpu(self, x, p):
np_ans = self._np_transpose(x, p)
with self.test_session(use_gpu=False):
- inx = tf.convert_to_tensor(x)
- y = tf.transpose(inx, p)
+ inx = ops.convert_to_tensor(x)
+ y = array_ops.transpose(inx, p)
tf_ans = y.eval()
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, y)
@@ -44,10 +52,12 @@ class TransposeTest(tf.test.TestCase):
xs = list(np.shape(x))
ys = list(np.shape(tf_ans))
if x.dtype == np.float32:
- jacob_t, jacob_n = tf.test.compute_gradient(inx, xs, y, ys, x, 1e-2)
+ jacob_t, jacob_n = gradient_checker.compute_gradient(inx, xs, y, ys, x,
+ 1e-2)
self.assertAllClose(jacob_t, jacob_n, 1e-3, 1e-3)
elif x.dtype == np.float64:
- jacob_t, jacob_n = tf.test.compute_gradient(inx, xs, y, ys, x, 1e-2)
+ jacob_t, jacob_n = gradient_checker.compute_gradient(inx, xs, y, ys, x,
+ 1e-2)
self.assertAllClose(jacob_t, jacob_n, 1e-6, 1e-6)
return tf_ans, jacob_t
@@ -55,8 +65,8 @@ class TransposeTest(tf.test.TestCase):
def _compareGpu(self, x, p):
np_ans = self._np_transpose(x, p)
with self.test_session(use_gpu=True):
- inx = tf.convert_to_tensor(x)
- y = tf.transpose(inx, p)
+ inx = ops.convert_to_tensor(x)
+ y = array_ops.transpose(inx, p)
tf_ans = y.eval()
self.assertAllEqual(np_ans, tf_ans)
@@ -67,10 +77,12 @@ class TransposeTest(tf.test.TestCase):
xs = list(np.shape(x))
ys = list(np.shape(tf_ans))
if x.dtype == np.float32:
- jacob_t, jacob_n = tf.test.compute_gradient(inx, xs, y, ys, x, 1e-2)
+ jacob_t, jacob_n = gradient_checker.compute_gradient(inx, xs, y, ys, x,
+ 1e-2)
self.assertAllClose(jacob_t, jacob_n, 1e-3, 1e-3)
elif x.dtype == np.float64:
- jacob_t, jacob_n = tf.test.compute_gradient(inx, xs, y, ys, x, 1e-2)
+ jacob_t, jacob_n = gradient_checker.compute_gradient(inx, xs, y, ys, x,
+ 1e-2)
self.assertAllClose(jacob_t, jacob_n, 1e-6, 1e-6)
return tf_ans, jacob_t
@@ -119,13 +131,13 @@ class TransposeTest(tf.test.TestCase):
self._compareCpu(np.arange(0, 6).reshape([3, 2]).astype(np.float32), [0, 1])
def testSimple(self):
- self._compareCpu(np.arange(0, 8).reshape([2, 4]).astype(np.float32),
- np.array([1, 0]).astype(np.int32))
+ self._compareCpu(
+ np.arange(0, 8).reshape([2, 4]).astype(np.float32),
+ np.array([1, 0]).astype(np.int32))
def testHalf(self):
self._compare(np.arange(0, 21).reshape([3, 7]).astype(np.float16))
- self._compare(
- np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.float16))
+ self._compare(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.float16))
self._compare(
np.arange(0, 16).reshape([1, 2, 1, 2, 1, 2, 1, 2]).astype(np.float16))
@@ -144,19 +156,23 @@ class TransposeTest(tf.test.TestCase):
np.arange(0, 16).reshape([1, 2, 1, 2, 1, 2, 1, 2]).astype(np.float64))
def testComplex64(self):
- self._testBoth(np.complex(1, 2) *
- np.arange(0, 21).reshape([3, 7]).astype(np.complex64))
- self._testBoth(np.complex(1, 2) *
- np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.complex64))
+ self._testBoth(
+ np.complex(1, 2) *
+ np.arange(0, 21).reshape([3, 7]).astype(np.complex64))
+ self._testBoth(
+ np.complex(1, 2) *
+ np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.complex64))
self._testBoth(
np.complex(1, 2) *
np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.complex64))
def testComplex128(self):
- self._testBoth(np.complex(1, 2) *
- np.arange(0, 21).reshape([3, 7]).astype(np.complex128))
- self._testBoth(np.complex(1, 2) *
- np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.complex128))
+ self._testBoth(
+ np.complex(1, 2) *
+ np.arange(0, 21).reshape([3, 7]).astype(np.complex128))
+ self._testBoth(
+ np.complex(1, 2) *
+ np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.complex128))
self._testBoth(
np.complex(1, 2) *
np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.complex128))
@@ -164,73 +180,90 @@ class TransposeTest(tf.test.TestCase):
def testInt8(self):
self._testBoth(np.arange(0, 21).reshape([3, 7]).astype(np.int8))
self._testBoth(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int8))
- self._testBoth(np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(
- np.int8))
+ self._testBoth(
+ np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.int8))
def testInt16(self):
self._testBoth(np.arange(0, 21).reshape([3, 7]).astype(np.int16))
self._testBoth(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int16))
- self._testBoth(np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(
- np.int16))
+ self._testBoth(
+ np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.int16))
def testInt32(self):
self._testBoth(np.arange(0, 21).reshape([3, 7]).astype(np.int32))
self._testBoth(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int32))
- self._testBoth(np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(
- np.int32))
+ self._testBoth(
+ np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.int32))
def testInt64(self):
self._testBoth(np.arange(0, 21).reshape([3, 7]).astype(np.int64))
self._testBoth(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int64))
- self._testBoth(np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(
- np.int64))
+ self._testBoth(
+ np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.int64))
def testTranspose2DAuto(self):
x_np = [[1, 2, 3], [4, 5, 6]]
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
- x_tf = tf.transpose(x_np).eval()
+ x_tf = array_ops.transpose(x_np).eval()
self.assertAllEqual(x_tf, [[1, 4], [2, 5], [3, 6]])
def testTransposeShapes(self):
- self.assertEqual([], tf.transpose(
- tf.placeholder(tf.int32, shape=[])).get_shape().dims)
- self.assertEqual([100], tf.transpose(
- tf.placeholder(tf.int32, shape=[100])).get_shape().dims)
- self.assertEqual([37, 100], tf.transpose(
- tf.placeholder(tf.int32, shape=[100, 37])).get_shape().dims)
- self.assertEqual([100, 37], tf.transpose(
- tf.placeholder(tf.int32, shape=[100, 37]), [0, 1]).get_shape().dims)
- self.assertEqual([15, 37, 100], tf.transpose(
- tf.placeholder(tf.int32, shape=[100, 37, 15])).get_shape().dims)
- self.assertEqual([15, 100, 37], tf.transpose(
- tf.placeholder(tf.int32,
- shape=[100, 37, 15]), [2, 0, 1]).get_shape().dims)
- self.assertEqual(tf.TensorShape(None), tf.transpose(
- tf.placeholder(tf.int32)).get_shape())
+ self.assertEqual(
+ [],
+ array_ops.transpose(array_ops.placeholder(
+ dtypes.int32, shape=[])).get_shape().dims)
+ self.assertEqual(
+ [100],
+ array_ops.transpose(array_ops.placeholder(
+ dtypes.int32, shape=[100])).get_shape().dims)
+ self.assertEqual(
+ [37, 100],
+ array_ops.transpose(
+ array_ops.placeholder(
+ dtypes.int32, shape=[100, 37])).get_shape().dims)
+ self.assertEqual(
+ [100, 37],
+ array_ops.transpose(
+ array_ops.placeholder(
+ dtypes.int32, shape=[100, 37]), [0, 1]).get_shape().dims)
+ self.assertEqual(
+ [15, 37, 100],
+ array_ops.transpose(
+ array_ops.placeholder(
+ dtypes.int32, shape=[100, 37, 15])).get_shape().dims)
+ self.assertEqual(
+ [15, 100, 37],
+ array_ops.transpose(
+ array_ops.placeholder(
+ dtypes.int32, shape=[100, 37, 15]), [2, 0, 1]).get_shape().dims)
+ self.assertEqual(
+ tensor_shape.TensorShape(None),
+ array_ops.transpose(array_ops.placeholder(dtypes.int32)).get_shape())
def testNullTensor(self):
with self.test_session():
- x = tf.constant([], dtype=tf.float32, shape=[1, 4, 0])
- xt = tf.transpose(x, [0, 2, 1]).eval()
+ x = constant_op.constant([], dtype=dtypes.float32, shape=[1, 4, 0])
+ xt = array_ops.transpose(x, [0, 2, 1]).eval()
self.assertAllEqual(xt.shape, (1, 0, 4))
def _testError(self, x, p, err):
with self.test_session():
with self.assertRaisesOpError(err):
- tf.transpose(x, p).eval()
+ array_ops.transpose(x, p).eval()
def testError(self):
with self.assertRaises(ValueError):
- tf.transpose(np.arange(0., 30).reshape([2, 3, 5]), [[0, 1], [2, 3]])
- self._testError(np.arange(0., 2 ** 11).reshape([2] * 11),
- np.arange(11),
- "not implemented")
+ array_ops.transpose(
+ np.arange(0., 30).reshape([2, 3, 5]), [[0, 1], [2, 3]])
+ self._testError(
+ np.arange(0., 2**11).reshape([2] * 11), np.arange(11),
+ "not implemented")
with self.assertRaises(ValueError):
- tf.transpose(np.arange(0., 30).reshape([2, 3, 5]), [0, 1, 3])
- self._testError(np.arange(0., 30).reshape([2, 3, 5]),
- [0, 1, 1],
- "2 is missing")
+ array_ops.transpose(np.arange(0., 30).reshape([2, 3, 5]), [0, 1, 3])
+ self._testError(
+ np.arange(0., 30).reshape([2, 3, 5]), [0, 1, 1], "2 is missing")
+
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/unique_op_test.py b/tensorflow/python/kernel_tests/unique_op_test.py
index b92c1c1a5e..a1903887c7 100644
--- a/tensorflow/python/kernel_tests/unique_op_test.py
+++ b/tensorflow/python/kernel_tests/unique_op_test.py
@@ -12,22 +12,24 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.kernels.unique_op."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+
+from tensorflow.python.ops import array_ops
+from tensorflow.python.platform import test
-class UniqueTest(tf.test.TestCase):
+class UniqueTest(test.TestCase):
def testInt32(self):
x = np.random.randint(2, high=10, size=7000)
with self.test_session() as sess:
- y, idx = tf.unique(x)
+ y, idx = array_ops.unique(x)
tf_y, tf_idx = sess.run([y, idx])
self.assertEqual(len(x), len(tf_idx))
@@ -39,7 +41,7 @@ class UniqueTest(tf.test.TestCase):
indx = np.random.randint(65, high=122, size=7000)
x = [chr(i) for i in indx]
with self.test_session() as sess:
- y, idx = tf.unique(x)
+ y, idx = array_ops.unique(x)
tf_y, tf_idx = sess.run([y, idx])
self.assertEqual(len(x), len(tf_idx))
@@ -48,12 +50,12 @@ class UniqueTest(tf.test.TestCase):
self.assertEqual(x[i], tf_y[tf_idx[i]].decode('ascii'))
-class UniqueWithCountsTest(tf.test.TestCase):
+class UniqueWithCountsTest(test.TestCase):
def testInt32(self):
x = np.random.randint(2, high=10, size=7000)
with self.test_session() as sess:
- y, idx, count = tf.unique_with_counts(x)
+ y, idx, count = array_ops.unique_with_counts(x)
tf_y, tf_idx, tf_count = sess.run([y, idx, count])
self.assertEqual(len(x), len(tf_idx))
@@ -68,7 +70,7 @@ class UniqueWithCountsTest(tf.test.TestCase):
x = [chr(i) for i in indx]
with self.test_session() as sess:
- y, idx, count = tf.unique_with_counts(x)
+ y, idx, count = array_ops.unique_with_counts(x)
tf_y, tf_idx, tf_count = sess.run([y, idx, count])
self.assertEqual(len(x), len(tf_idx))
@@ -81,4 +83,4 @@ class UniqueWithCountsTest(tf.test.TestCase):
if __name__ == '__main__':
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/unpack_op_test.py b/tensorflow/python/kernel_tests/unpack_op_test.py
index 9b5e8ce328..9ba7f1fe5f 100644
--- a/tensorflow/python/kernel_tests/unpack_op_test.py
+++ b/tensorflow/python/kernel_tests/unpack_op_test.py
@@ -12,26 +12,31 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Functional tests for Unpack Op."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
-import tensorflow as tf
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.platform import test
def np_split_squeeze(array, axis):
axis_len = array.shape[axis]
return [
- np.squeeze(arr, axis=(axis,))
- for arr in np.split(array, axis_len, axis=axis)
+ np.squeeze(
+ arr, axis=(axis,)) for arr in np.split(
+ array, axis_len, axis=axis)
]
-class UnpackOpTest(tf.test.TestCase):
+class UnpackOpTest(test.TestCase):
def testSimple(self):
np.random.seed(7)
@@ -39,10 +44,10 @@ class UnpackOpTest(tf.test.TestCase):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
data = np.random.randn(*shape)
# Convert data to a single tensorflow tensor
- x = tf.constant(data)
+ x = constant_op.constant(data)
# Unpack into a list of tensors
- cs_unpacked = tf.unpack(x, num=shape[0])
- cs_unstacked = tf.unpack(x, num=shape[0])
+ cs_unpacked = array_ops.unpack(x, num=shape[0])
+ cs_unstacked = array_ops.unpack(x, num=shape[0])
for cs in (cs_unpacked, cs_unstacked):
self.assertEqual(type(cs), list)
self.assertEqual(len(cs), shape[0])
@@ -55,13 +60,15 @@ class UnpackOpTest(tf.test.TestCase):
shapes = [shape[1:]] * shape[0]
for i in xrange(shape[0]):
with self.test_session(use_gpu=True):
- x = tf.constant(data)
- cs = tf.unpack(x, num=shape[0])
- err = tf.test.compute_gradient_error(x, shape, cs[i], shapes[i])
+ x = constant_op.constant(data)
+ cs = array_ops.unpack(x, num=shape[0])
+ err = gradient_checker.compute_gradient_error(x, shape, cs[i],
+ shapes[i])
self.assertLess(err, 1e-6)
- cs = tf.unstack(x, num=shape[0])
- err = tf.test.compute_gradient_error(x, shape, cs[i], shapes[i])
+ cs = array_ops.unstack(x, num=shape[0])
+ err = gradient_checker.compute_gradient_error(x, shape, cs[i],
+ shapes[i])
self.assertLess(err, 1e-6)
def testGradientsAxis1(self):
@@ -71,49 +78,51 @@ class UnpackOpTest(tf.test.TestCase):
del out_shape[1]
for i in xrange(shape[1]):
with self.test_session(use_gpu=True):
- x = tf.constant(data)
- cs = tf.unpack(x, num=shape[1], axis=1)
- err = tf.test.compute_gradient_error(x, shape, cs[i], out_shape)
+ x = constant_op.constant(data)
+ cs = array_ops.unpack(x, num=shape[1], axis=1)
+ err = gradient_checker.compute_gradient_error(x, shape, cs[i],
+ out_shape)
self.assertLess(err, 1e-6)
- cs = tf.unstack(x, num=shape[1], axis=1)
- err = tf.test.compute_gradient_error(x, shape, cs[i], out_shape)
+ cs = array_ops.unstack(x, num=shape[1], axis=1)
+ err = gradient_checker.compute_gradient_error(x, shape, cs[i],
+ out_shape)
self.assertLess(err, 1e-6)
def testInferNum(self):
with self.test_session():
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
- x = tf.placeholder(np.float32, shape=shape)
- cs = tf.unpack(x)
+ x = array_ops.placeholder(np.float32, shape=shape)
+ cs = array_ops.unpack(x)
self.assertEqual(type(cs), list)
self.assertEqual(len(cs), shape[0])
- cs = tf.unstack(x)
+ cs = array_ops.unstack(x)
self.assertEqual(type(cs), list)
self.assertEqual(len(cs), shape[0])
def testCannotInferNumFromUnknownShape(self):
- x = tf.placeholder(np.float32)
- with self.assertRaisesRegexp(
- ValueError, r'Cannot infer num from shape <unknown>'):
- tf.unpack(x)
- with self.assertRaisesRegexp(
- ValueError, r'Cannot infer num from shape <unknown>'):
- tf.unstack(x)
+ x = array_ops.placeholder(np.float32)
+ with self.assertRaisesRegexp(ValueError,
+ r'Cannot infer num from shape <unknown>'):
+ array_ops.unpack(x)
+ with self.assertRaisesRegexp(ValueError,
+ r'Cannot infer num from shape <unknown>'):
+ array_ops.unstack(x)
def testUnknownShapeOkWithNum(self):
- x = tf.placeholder(np.float32)
- tf.unpack(x, num=2)
- tf.unstack(x, num=2)
+ x = array_ops.placeholder(np.float32)
+ array_ops.unpack(x, num=2)
+ array_ops.unstack(x, num=2)
def testCannotInferNumFromNoneShape(self):
- x = tf.placeholder(np.float32, shape=(None,))
+ x = array_ops.placeholder(np.float32, shape=(None,))
with self.assertRaisesRegexp(ValueError,
r'Cannot infer num from shape \(\?,\)'):
- tf.unpack(x)
+ array_ops.unpack(x)
with self.assertRaisesRegexp(ValueError,
r'Cannot infer num from shape \(\?,\)'):
- tf.unstack(x)
+ array_ops.unstack(x)
def testAgainstNumpy(self):
# For 1 to 5 dimensions.
@@ -125,18 +134,18 @@ class UnpackOpTest(tf.test.TestCase):
expected = np_split_squeeze(a, j)
with self.test_session() as sess:
- actual_unpack = sess.run(tf.unpack(a, axis=j))
- actual_unstack = sess.run(tf.unstack(a, axis=j))
+ actual_unpack = sess.run(array_ops.unpack(a, axis=j))
+ actual_unstack = sess.run(array_ops.unstack(a, axis=j))
self.assertAllEqual(expected, actual_unpack)
self.assertAllEqual(expected, actual_unstack)
def testAxis0Default(self):
with self.test_session() as sess:
- a = tf.constant([[1, 2, 3], [4, 5, 6]], name='a')
+ a = constant_op.constant([[1, 2, 3], [4, 5, 6]], name='a')
- unpacked = sess.run(tf.unpack(a))
- unstacked = sess.run(tf.unstack(a))
+ unpacked = sess.run(array_ops.unpack(a))
+ unstacked = sess.run(array_ops.unstack(a))
self.assertEqual(len(unpacked), 2)
self.assertAllEqual(unpacked[0], [1, 2, 3])
@@ -146,28 +155,28 @@ class UnpackOpTest(tf.test.TestCase):
self.assertAllEqual(unstacked[1], [4, 5, 6])
def testAxisOutOfRange(self):
- a = tf.constant([[1, 2, 3], [4, 5, 6]], name='a')
+ a = constant_op.constant([[1, 2, 3], [4, 5, 6]], name='a')
with self.assertRaisesRegexp(ValueError, r'axis = 2 not in \[-2, 2\)'):
- tf.unpack(a, axis=2)
+ array_ops.unpack(a, axis=2)
with self.assertRaisesRegexp(ValueError, r'axis = 2 not in \[-2, 2\)'):
- tf.unstack(a, axis=2)
+ array_ops.unstack(a, axis=2)
def testAxisOutOfNegativeRange(self):
- a = tf.constant([[1, 2, 3], [4, 5, 6]], name='a')
+ a = constant_op.constant([[1, 2, 3], [4, 5, 6]], name='a')
with self.assertRaisesRegexp(ValueError, r'axis = -3 not in \[-2, 2\)'):
- tf.unpack(a, axis=-3)
+ array_ops.unpack(a, axis=-3)
with self.assertRaisesRegexp(ValueError, r'axis = -3 not in \[-2, 2\)'):
- tf.unstack(a, axis=-3)
+ array_ops.unstack(a, axis=-3)
def testZeroLengthDim(self):
with self.test_session():
- x = tf.zeros(shape=(0, 1, 2))
- y = tf.unpack(x, axis=1)[0].eval()
+ x = array_ops.zeros(shape=(0, 1, 2))
+ y = array_ops.unpack(x, axis=1)[0].eval()
self.assertEqual(y.shape, (0, 2))
- y = tf.unstack(x, axis=1)[0].eval()
+ y = array_ops.unstack(x, axis=1)[0].eval()
self.assertEqual(y.shape, (0, 2))
if __name__ == '__main__':
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/variable_ops_test.py b/tensorflow/python/kernel_tests/variable_ops_test.py
index 456e50beec..964c23433a 100644
--- a/tensorflow/python/kernel_tests/variable_ops_test.py
+++ b/tensorflow/python/kernel_tests/variable_ops_test.py
@@ -12,35 +12,39 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.ops.tf.variable_op."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
+from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
+from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_state_ops
+from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
-
+from tensorflow.python.ops import variables
+from tensorflow.python.platform import test
_NP_TO_TF = {
- np.float32: tf.float32,
- np.float64: tf.float64,
- np.int32: tf.int32,
- np.int64: tf.int64,
+ np.float32: dtypes.float32,
+ np.float64: dtypes.float64,
+ np.int32: dtypes.int32,
+ np.int64: dtypes.int64,
}
-class VariableOpTest(tf.test.TestCase):
+class VariableOpTest(test.TestCase):
def _initFetch(self, x, tftype, use_gpu=None):
with self.test_session(use_gpu=use_gpu):
p = state_ops.variable_op(x.shape, tftype)
- op = tf.assign(p, x)
+ op = state_ops.assign(p, x)
op.op.run()
return p.eval()
@@ -59,118 +63,118 @@ class VariableOpTest(tf.test.TestCase):
self._testTypes(np.arange(0, 20).reshape([4, 5]))
def testset_shape(self):
- p = state_ops.variable_op([1, 2], tf.float32)
+ p = state_ops.variable_op([1, 2], dtypes.float32)
self.assertEqual([1, 2], p.get_shape())
- p = state_ops.variable_op([1, 2], tf.float32, set_shape=False)
+ p = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), p.get_shape())
def testAssign(self):
value = np.array([[42.0, 43.0]])
- var = state_ops.variable_op(value.shape, tf.float32)
+ var = state_ops.variable_op(value.shape, dtypes.float32)
self.assertShapeEqual(value, var)
- assigned = tf.assign(var, value)
+ assigned = state_ops.assign(var, value)
self.assertShapeEqual(value, assigned)
def testAssignNoValidateShape(self):
value = np.array([[42.0, 43.0]])
- var = state_ops.variable_op(value.shape, tf.float32)
+ var = state_ops.variable_op(value.shape, dtypes.float32)
self.assertShapeEqual(value, var)
- assigned = tf.assign(var, value, validate_shape=False)
+ assigned = state_ops.assign(var, value, validate_shape=False)
self.assertShapeEqual(value, assigned)
def testAssignNoVarShape(self):
value = np.array([[42.0, 43.0]])
- var = state_ops.variable_op(value.shape, tf.float32, set_shape=False)
+ var = state_ops.variable_op(value.shape, dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
- assigned = tf.assign(var, value)
+ assigned = state_ops.assign(var, value)
self.assertShapeEqual(value, assigned)
def testAssignNoVarShapeNoValidateShape(self):
value = np.array([[42.0, 43.0]])
- var = state_ops.variable_op(value.shape, tf.float32, set_shape=False)
+ var = state_ops.variable_op(value.shape, dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
- assigned = tf.assign(var, value, validate_shape=False)
+ assigned = state_ops.assign(var, value, validate_shape=False)
self.assertShapeEqual(value, assigned)
def _NewShapelessTensor(self):
- tensor = tf.placeholder(tf.float32)
+ tensor = array_ops.placeholder(dtypes.float32)
self.assertEqual(tensor_shape.unknown_shape(), tensor.get_shape())
return tensor
def testAssignNoValueShape(self):
value = self._NewShapelessTensor()
shape = [1, 2]
- var = state_ops.variable_op(shape, tf.float32)
- assigned = tf.assign(var, value)
+ var = state_ops.variable_op(shape, dtypes.float32)
+ assigned = state_ops.assign(var, value)
self.assertEqual(shape, var.get_shape())
self.assertEqual(shape, assigned.get_shape())
def testAssignNoValueShapeNoValidateShape(self):
value = self._NewShapelessTensor()
shape = [1, 2]
- var = state_ops.variable_op(shape, tf.float32)
+ var = state_ops.variable_op(shape, dtypes.float32)
self.assertEqual(shape, var.get_shape())
- assigned = tf.assign(var, value, validate_shape=False)
+ assigned = state_ops.assign(var, value, validate_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), assigned.get_shape())
def testAssignNoShape(self):
with self.test_session():
value = self._NewShapelessTensor()
- var = state_ops.variable_op([1, 2], tf.float32, set_shape=False)
+ var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
self.assertEqual(tensor_shape.unknown_shape(),
- tf.assign(var, value).get_shape())
+ state_ops.assign(var, value).get_shape())
def testAssignNoShapeNoValidateShape(self):
with self.test_session():
value = self._NewShapelessTensor()
- var = state_ops.variable_op([1, 2], tf.float32, set_shape=False)
+ var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
- self.assertEqual(tensor_shape.unknown_shape(),
- tf.assign(var, value, validate_shape=False).get_shape())
+ self.assertEqual(
+ tensor_shape.unknown_shape(),
+ state_ops.assign(
+ var, value, validate_shape=False).get_shape())
def testAssignUpdate(self):
- var = state_ops.variable_op([1, 2], tf.float32)
- added = tf.assign_add(var, [[2.0, 3.0]])
+ var = state_ops.variable_op([1, 2], dtypes.float32)
+ added = state_ops.assign_add(var, [[2.0, 3.0]])
self.assertEqual([1, 2], added.get_shape())
- subbed = tf.assign_sub(var, [[12.0, 13.0]])
+ subbed = state_ops.assign_sub(var, [[12.0, 13.0]])
self.assertEqual([1, 2], subbed.get_shape())
def testAssignUpdateNoVarShape(self):
- var = state_ops.variable_op([1, 2], tf.float32, set_shape=False)
- added = tf.assign_add(var, [[2.0, 3.0]])
+ var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
+ added = state_ops.assign_add(var, [[2.0, 3.0]])
self.assertEqual([1, 2], added.get_shape())
- subbed = tf.assign_sub(var, [[12.0, 13.0]])
+ subbed = state_ops.assign_sub(var, [[12.0, 13.0]])
self.assertEqual([1, 2], subbed.get_shape())
def testAssignUpdateNoValueShape(self):
- var = state_ops.variable_op([1, 2], tf.float32)
- added = tf.assign_add(var, self._NewShapelessTensor())
+ var = state_ops.variable_op([1, 2], dtypes.float32)
+ added = state_ops.assign_add(var, self._NewShapelessTensor())
self.assertEqual([1, 2], added.get_shape())
- subbed = tf.assign_sub(var, self._NewShapelessTensor())
+ subbed = state_ops.assign_sub(var, self._NewShapelessTensor())
self.assertEqual([1, 2], subbed.get_shape())
def testAssignUpdateNoShape(self):
- var = state_ops.variable_op([1, 2], tf.float32, set_shape=False)
- added = tf.assign_add(var, self._NewShapelessTensor())
+ var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
+ added = state_ops.assign_add(var, self._NewShapelessTensor())
self.assertEqual(tensor_shape.unknown_shape(), added.get_shape())
- subbed = tf.assign_sub(var, self._NewShapelessTensor())
+ subbed = state_ops.assign_sub(var, self._NewShapelessTensor())
self.assertEqual(tensor_shape.unknown_shape(), subbed.get_shape())
def testTemporaryVariable(self):
with self.test_session(use_gpu=True):
var = gen_state_ops._temporary_variable(
- [1, 2],
- tf.float32,
- var_name="foo")
- var = tf.assign(var, [[4.0, 5.0]])
- var = tf.assign_add(var, [[6.0, 7.0]])
+ [1, 2], dtypes.float32, var_name="foo")
+ var = state_ops.assign(var, [[4.0, 5.0]])
+ var = state_ops.assign_add(var, [[6.0, 7.0]])
final = gen_state_ops._destroy_temporary_variable(var, var_name="foo")
self.assertAllClose([[10.0, 12.0]], final.eval())
def testDestroyNonexistentTemporaryVariable(self):
with self.test_session(use_gpu=True):
- var = gen_state_ops._temporary_variable([1, 2], tf.float32)
+ var = gen_state_ops._temporary_variable([1, 2], dtypes.float32)
final = gen_state_ops._destroy_temporary_variable(var, var_name="bad")
with self.assertRaises(errors.NotFoundError):
final.eval()
@@ -178,22 +182,18 @@ class VariableOpTest(tf.test.TestCase):
def testDuplicateTemporaryVariable(self):
with self.test_session(use_gpu=True):
var1 = gen_state_ops._temporary_variable(
- [1, 2],
- tf.float32,
- var_name="dup")
- var1 = tf.assign(var1, [[1.0, 2.0]])
+ [1, 2], dtypes.float32, var_name="dup")
+ var1 = state_ops.assign(var1, [[1.0, 2.0]])
var2 = gen_state_ops._temporary_variable(
- [1, 2],
- tf.float32,
- var_name="dup")
- var2 = tf.assign(var2, [[3.0, 4.0]])
+ [1, 2], dtypes.float32, var_name="dup")
+ var2 = state_ops.assign(var2, [[3.0, 4.0]])
final = var1 + var2
with self.assertRaises(errors.AlreadyExistsError):
final.eval()
def testDestroyTemporaryVariableTwice(self):
with self.test_session(use_gpu=True):
- var = gen_state_ops._temporary_variable([1, 2], tf.float32)
+ var = gen_state_ops._temporary_variable([1, 2], dtypes.float32)
val1 = gen_state_ops._destroy_temporary_variable(var, var_name="dup")
val2 = gen_state_ops._destroy_temporary_variable(var, var_name="dup")
final = val1 + val2
@@ -203,48 +203,42 @@ class VariableOpTest(tf.test.TestCase):
def testTemporaryVariableNoLeak(self):
with self.test_session(use_gpu=True):
var = gen_state_ops._temporary_variable(
- [1, 2],
- tf.float32,
- var_name="bar")
- final = tf.identity(var)
+ [1, 2], dtypes.float32, var_name="bar")
+ final = array_ops.identity(var)
final.eval()
def testTwoTemporaryVariablesNoLeaks(self):
with self.test_session(use_gpu=True):
var1 = gen_state_ops._temporary_variable(
- [1, 2],
- tf.float32,
- var_name="var1")
+ [1, 2], dtypes.float32, var_name="var1")
var2 = gen_state_ops._temporary_variable(
- [1, 2],
- tf.float32,
- var_name="var2")
+ [1, 2], dtypes.float32, var_name="var2")
final = var1 + var2
final.eval()
def testAssignDependencyAcrossDevices(self):
with self.test_session(use_gpu=True):
# The variable and an op to increment it are on the GPU.
- var = state_ops.variable_op([1], tf.float32)
- tf.assign(var, [1.0]).eval()
- increment = tf.assign_add(var, [1.0])
- with tf.control_dependencies([increment]):
- with tf.device("/cpu:0"):
+ var = state_ops.variable_op([1], dtypes.float32)
+ state_ops.assign(var, [1.0]).eval()
+ increment = state_ops.assign_add(var, [1.0])
+ with ops.control_dependencies([increment]):
+ with ops.device("/cpu:0"):
# This mul op is pinned to the CPU, but reads the variable from the
# GPU. The test ensures that the dependency on 'increment' is still
# honored, i.e., the Send and Recv from GPU to CPU should take place
# only after the increment.
- result = tf.mul(var, var)
+ result = math_ops.mul(var, var)
self.assertAllClose([4.0], result.eval())
def testIsVariableInitialized(self):
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
- v0 = state_ops.variable_op([1, 2], tf.float32)
- self.assertEqual(False, tf.is_variable_initialized(v0).eval())
- tf.assign(v0, [[2.0, 3.0]]).eval()
- self.assertEqual(True, tf.is_variable_initialized(v0).eval())
+ v0 = state_ops.variable_op([1, 2], dtypes.float32)
+ self.assertEqual(False, variables.is_variable_initialized(v0).eval())
+ state_ops.assign(v0, [[2.0, 3.0]]).eval()
+ self.assertEqual(True, variables.is_variable_initialized(v0).eval())
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/variable_scope_test.py b/tensorflow/python/kernel_tests/variable_scope_test.py
index 58772d9a23..7c1842a9cd 100644
--- a/tensorflow/python/kernel_tests/variable_scope_test.py
+++ b/tensorflow/python/kernel_tests/variable_scope_test.py
@@ -12,22 +12,27 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for variable store."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
-from tensorflow.python.ops import init_ops
+from tensorflow.python.framework import errors_impl
+from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
+from tensorflow.python.ops import init_ops
+from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
+from tensorflow.python.ops import variables as variables_lib
+from tensorflow.python.platform import test
-class VariableScopeTest(tf.test.TestCase):
+class VariableScopeTest(test.TestCase):
def testGetVar(self):
vs = variable_scope._get_default_variable_store()
@@ -55,173 +60,188 @@ class VariableScopeTest(tf.test.TestCase):
vs.get_variable("v1", [2])
vs.get_variable("v2", [2])
expected_names = ["%s:0" % name for name in ["v1", "v2"]]
- self.assertEqual(set(expected_names),
- set([v.name for v in vs._vars.values()]))
+ self.assertEqual(
+ set(expected_names), set([v.name for v in vs._vars.values()]))
def testVarScopeInitializer(self):
with self.test_session() as sess:
- init = tf.constant_initializer(0.3)
- with tf.variable_scope("tower") as tower:
- with tf.variable_scope("foo", initializer=init):
- v = tf.get_variable("v", [])
- sess.run(tf.initialize_variables([v]))
+ init = init_ops.constant_initializer(0.3)
+ with variable_scope.variable_scope("tower") as tower:
+ with variable_scope.variable_scope("foo", initializer=init):
+ v = variable_scope.get_variable("v", [])
+ sess.run(variables_lib.initialize_variables([v]))
self.assertAllClose(v.eval(), 0.3)
- with tf.variable_scope(tower, initializer=init):
- w = tf.get_variable("w", [])
- sess.run(tf.initialize_variables([w]))
+ with variable_scope.variable_scope(tower, initializer=init):
+ w = variable_scope.get_variable("w", [])
+ sess.run(variables_lib.initialize_variables([w]))
self.assertAllClose(w.eval(), 0.3)
def testVarScopeDType(self):
with self.test_session():
- with tf.variable_scope("tower") as tower:
- with tf.variable_scope("foo", dtype=tf.float16):
- v = tf.get_variable("v", [])
+ with variable_scope.variable_scope("tower") as tower:
+ with variable_scope.variable_scope("foo", dtype=dtypes.float16):
+ v = variable_scope.get_variable("v", [])
self.assertEqual(v.dtype, dtypes.float16_ref)
- with tf.variable_scope(tower, dtype=tf.float16):
- w = tf.get_variable("w", [])
+ with variable_scope.variable_scope(tower, dtype=dtypes.float16):
+ w = variable_scope.get_variable("w", [])
self.assertEqual(w.dtype, dtypes.float16_ref)
def testInitFromNonTensorValue(self):
with self.test_session() as sess:
- v = tf.get_variable("v", initializer=4, dtype=tf.int32)
- sess.run(tf.initialize_variables([v]))
+ v = variable_scope.get_variable("v", initializer=4, dtype=dtypes.int32)
+ sess.run(variables_lib.initialize_variables([v]))
self.assertAllClose(v.eval(), 4)
- w = tf.get_variable("w",
- initializer=numpy.array([1, 2, 3]),
- dtype=tf.int64)
- sess.run(tf.initialize_variables([w]))
+ w = variable_scope.get_variable(
+ "w", initializer=numpy.array([1, 2, 3]), dtype=dtypes.int64)
+ sess.run(variables_lib.initialize_variables([w]))
self.assertAllClose(w.eval(), [1, 2, 3])
with self.assertRaises(TypeError):
- tf.get_variable("x", initializer={})
+ variable_scope.get_variable("x", initializer={})
def testInitFromNonInitializer(self):
with self.test_session() as sess:
# Test various dtypes with zeros initializer as following:
- types = [tf.int8, tf.uint8, tf.int16, tf.uint16, tf.int32, tf.int64,
- tf.bool]
+ types = [
+ dtypes.int8, dtypes.uint8, dtypes.int16, dtypes.uint16, dtypes.int32,
+ dtypes.int64, dtypes.bool
+ ]
# Use different varibale_name to distinguish various dtypes
for (i, dtype) in enumerate(types):
- x = tf.get_variable(name='x%d' % i, shape=(3, 4), dtype=dtype)
- y = tf.get_variable(name='y%d' % i, shape=(3, 4), dtype=dtype,
- initializer=init_ops.zeros_initializer(dtype=dtype))
+ x = variable_scope.get_variable(
+ name="x%d" % i, shape=(3, 4), dtype=dtype)
+ y = variable_scope.get_variable(
+ name="y%d" % i,
+ shape=(3, 4),
+ dtype=dtype,
+ initializer=init_ops.zeros_initializer(dtype=dtype))
- tf.global_variables_initializer().run()
+ variables_lib.global_variables_initializer().run()
self.assertAllEqual(x.eval(), y.eval())
def testVarScopeCachingDevice(self):
with self.test_session():
caching_device = "/job:moo"
- with tf.variable_scope("tower"):
- with tf.variable_scope("caching", caching_device=caching_device):
- v = tf.get_variable("v", [])
+ with variable_scope.variable_scope("tower"):
+ with variable_scope.variable_scope(
+ "caching", caching_device=caching_device):
+ v = variable_scope.get_variable("v", [])
self.assertTrue(v.value().device.startswith(caching_device))
- with tf.variable_scope("child"):
- v2 = tf.get_variable("v", [])
+ with variable_scope.variable_scope("child"):
+ v2 = variable_scope.get_variable("v", [])
self.assertTrue(v2.value().device.startswith(caching_device))
- with tf.variable_scope("not_cached", caching_device=""):
- v2_not_cached = tf.get_variable("v", [])
- self.assertFalse(
- v2_not_cached.value().device.startswith(caching_device))
+ with variable_scope.variable_scope("not_cached", caching_device=""):
+ v2_not_cached = variable_scope.get_variable("v", [])
+ self.assertFalse(v2_not_cached.value().device.startswith(
+ caching_device))
- with tf.variable_scope(
+ with variable_scope.variable_scope(
"not_cached_identity_device",
caching_device=lambda op: op.device):
- v2_identity_device = tf.get_variable("v", [])
- self.assertFalse(
- v2_identity_device.value().device.startswith(caching_device))
+ v2_identity_device = variable_scope.get_variable("v", [])
+ self.assertFalse(v2_identity_device.value().device.startswith(
+ caching_device))
- with tf.variable_scope("we_will_do_it_live") as vs_live:
+ with variable_scope.variable_scope("we_will_do_it_live") as vs_live:
vs_live.set_caching_device("/job:live")
- v_live = tf.get_variable("v", [])
+ v_live = variable_scope.get_variable("v", [])
self.assertTrue(v_live.value().device.startswith("/job:live"))
- v_tower = tf.get_variable("v", [])
+ v_tower = variable_scope.get_variable("v", [])
self.assertFalse(v_tower.value().device.startswith(caching_device))
def testVarScopeRegularizer(self):
with self.test_session() as sess:
- init = tf.constant_initializer(0.3)
+ init = init_ops.constant_initializer(0.3)
+
def regularizer1(v):
- return tf.reduce_mean(v) + 0.1
+ return math_ops.reduce_mean(v) + 0.1
+
def regularizer2(v):
- return tf.reduce_mean(v) + 0.2
- with tf.variable_scope("tower", regularizer=regularizer1) as tower:
- with tf.variable_scope("foo", initializer=init):
- v = tf.get_variable("v", [])
- sess.run(tf.initialize_variables([v]))
- losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
+ return math_ops.reduce_mean(v) + 0.2
+
+ with variable_scope.variable_scope(
+ "tower", regularizer=regularizer1) as tower:
+ with variable_scope.variable_scope("foo", initializer=init):
+ v = variable_scope.get_variable("v", [])
+ sess.run(variables_lib.initialize_variables([v]))
+ losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(1, len(losses))
self.assertAllClose(losses[0].eval(), 0.4)
- with tf.variable_scope(tower, initializer=init) as vs:
- u = tf.get_variable("u", [])
+ with variable_scope.variable_scope(tower, initializer=init) as vs:
+ u = variable_scope.get_variable("u", [])
vs.set_regularizer(regularizer2)
- w = tf.get_variable("w", [])
+ w = variable_scope.get_variable("w", [])
# Next 3 variable not regularized to test disabling regularization.
- x = tf.get_variable("x", [], regularizer=tf.no_regularizer)
- with tf.variable_scope("baz", regularizer=tf.no_regularizer):
- y = tf.get_variable("y", [])
- vs.set_regularizer(tf.no_regularizer)
- z = tf.get_variable("z", [])
+ x = variable_scope.get_variable(
+ "x", [], regularizer=variable_scope.no_regularizer)
+ with variable_scope.variable_scope(
+ "baz", regularizer=variable_scope.no_regularizer):
+ y = variable_scope.get_variable("y", [])
+ vs.set_regularizer(variable_scope.no_regularizer)
+ z = variable_scope.get_variable("z", [])
# Check results.
- losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
+ losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(3, len(losses))
- sess.run(tf.initialize_variables([u, w, x, y, z]))
+ sess.run(variables_lib.initialize_variables([u, w, x, y, z]))
self.assertAllClose(losses[0].eval(), 0.4)
self.assertAllClose(losses[1].eval(), 0.4)
self.assertAllClose(losses[2].eval(), 0.5)
- with tf.variable_scope("foo", reuse=True):
- v = tf.get_variable("v", []) # "v" is alredy there, reused
- losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
+ with variable_scope.variable_scope("foo", reuse=True):
+ v = variable_scope.get_variable("v",
+ []) # "v" is alredy there, reused
+ losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(3, len(losses)) # No new loss added.
def testInitializeFromValue(self):
with self.test_session() as sess:
- init = tf.constant(0.1)
- w = tf.get_variable("v", initializer=init)
- sess.run(tf.initialize_variables([w]))
+ init = constant_op.constant(0.1)
+ w = variable_scope.get_variable("v", initializer=init)
+ sess.run(variables_lib.initialize_variables([w]))
self.assertAllClose(w.eval(), 0.1)
with self.assertRaisesRegexp(ValueError, "shape"):
# We disallow explicit shape specification when initializer is constant.
- tf.get_variable("u", [1], initializer=init)
+ variable_scope.get_variable("u", [1], initializer=init)
- with tf.variable_scope("foo", initializer=init):
+ with variable_scope.variable_scope("foo", initializer=init):
# Constant initializer can be passed through scopes if needed.
- v = tf.get_variable("v")
- sess.run(tf.initialize_variables([v]))
+ v = variable_scope.get_variable("v")
+ sess.run(variables_lib.initialize_variables([v]))
self.assertAllClose(v.eval(), 0.1)
# Check that non-float32 initializer creates a non-float32 variable.
- init = tf.constant(1, dtype=tf.int32)
- t = tf.get_variable("t", initializer=init)
- self.assertEqual(t.dtype.base_dtype, tf.int32)
+ init = constant_op.constant(1, dtype=dtypes.int32)
+ t = variable_scope.get_variable("t", initializer=init)
+ self.assertEqual(t.dtype.base_dtype, dtypes.int32)
# Raise error if `initializer` dtype and `dtype` are not identical.
with self.assertRaisesRegexp(ValueError, "don't match"):
- tf.get_variable("s", initializer=init, dtype=tf.float64)
+ variable_scope.get_variable("s", initializer=init, dtype=dtypes.float64)
def testControlDeps(self):
with self.test_session() as sess:
- v0 = tf.get_variable("v0", [1], initializer=tf.constant_initializer(0))
- with tf.control_dependencies([v0.value()]):
- v1 = tf.get_variable("v1", [1], initializer=tf.constant_initializer(1))
+ v0 = variable_scope.get_variable(
+ "v0", [1], initializer=init_ops.constant_initializer(0))
+ with ops.control_dependencies([v0.value()]):
+ v1 = variable_scope.get_variable(
+ "v1", [1], initializer=init_ops.constant_initializer(1))
add = v1 + v0
# v0 should be uninitialized.
- with self.assertRaisesRegexp(tf.OpError, "uninitialized"):
+ with self.assertRaisesRegexp(errors_impl.OpError, "uninitialized"):
sess.run(v0)
# We should be able to initialize and run v1 without initializing
# v0, even if the variable was created with a control dep on v0.
sess.run(v1.initializer)
self.assertEqual(1, sess.run(v1))
# v0 should still be uninitialized.
- with self.assertRaisesRegexp(tf.OpError, "uninitialized"):
+ with self.assertRaisesRegexp(errors_impl.OpError, "uninitialized"):
sess.run(v0)
- with self.assertRaisesRegexp(tf.OpError, "uninitialized"):
+ with self.assertRaisesRegexp(errors_impl.OpError, "uninitialized"):
sess.run(add)
# If we initialize v0 we should be able to run 'add'.
sess.run(v0.initializer)
@@ -229,20 +249,25 @@ class VariableScopeTest(tf.test.TestCase):
def testControlFlow(self):
with self.test_session() as sess:
- v0 = tf.get_variable("v0", [], initializer=tf.constant_initializer(0))
+ v0 = variable_scope.get_variable(
+ "v0", [], initializer=init_ops.constant_initializer(0))
var_dict = {}
+
# Call get_variable in each of the cond clauses.
def var_in_then_clause():
- v1 = tf.get_variable("v1", [1], initializer=tf.constant_initializer(1))
+ v1 = variable_scope.get_variable(
+ "v1", [1], initializer=init_ops.constant_initializer(1))
var_dict["v1"] = v1
return v1 + v0
+
def var_in_else_clause():
- v2 = tf.get_variable("v2", [1], initializer=tf.constant_initializer(2))
+ v2 = variable_scope.get_variable(
+ "v2", [1], initializer=init_ops.constant_initializer(2))
var_dict["v2"] = v2
return v2 + v0
- add = control_flow_ops.cond(tf.less(v0, 10),
- var_in_then_clause,
- var_in_else_clause)
+
+ add = control_flow_ops.cond(
+ math_ops.less(v0, 10), var_in_then_clause, var_in_else_clause)
v1 = var_dict["v1"]
v2 = var_dict["v2"]
# We should be able to initialize and run v1 and v2 without initializing
@@ -252,10 +277,10 @@ class VariableScopeTest(tf.test.TestCase):
sess.run(v2.initializer)
self.assertEqual([2], sess.run(v2))
# v0 should still be uninitialized.
- with self.assertRaisesRegexp(tf.OpError, "uninitialized"):
+ with self.assertRaisesRegexp(errors_impl.OpError, "uninitialized"):
sess.run(v0)
# We should not be able to run 'add' yet.
- with self.assertRaisesRegexp(tf.OpError, "uninitialized"):
+ with self.assertRaisesRegexp(errors_impl.OpError, "uninitialized"):
sess.run(add)
# If we initialize v0 we should be able to run 'add'.
sess.run(v0.initializer)
@@ -264,396 +289,398 @@ class VariableScopeTest(tf.test.TestCase):
def testGetVariableScope(self):
# Test the get_variable_scope() function and setting properties of result.
with self.test_session() as sess:
- init = tf.constant_initializer(0.3)
- with tf.variable_scope("foo"):
- new_init1 = tf.get_variable_scope().initializer
+ init = init_ops.constant_initializer(0.3)
+ with variable_scope.variable_scope("foo"):
+ new_init1 = variable_scope.get_variable_scope().initializer
self.assertEqual(new_init1, None)
# Check that we can set initializer like this.
- tf.get_variable_scope().set_initializer(init)
- v = tf.get_variable("v", [])
- sess.run(tf.initialize_variables([v]))
+ variable_scope.get_variable_scope().set_initializer(init)
+ v = variable_scope.get_variable("v", [])
+ sess.run(variables_lib.initialize_variables([v]))
self.assertAllClose(v.eval(), 0.3)
# Check that we can set reuse.
- tf.get_variable_scope().reuse_variables()
+ variable_scope.get_variable_scope().reuse_variables()
with self.assertRaises(ValueError): # Fail, w does not exist yet.
- tf.get_variable("w", [1])
+ variable_scope.get_variable("w", [1])
# Check that the set initializer goes away.
- new_init = tf.get_variable_scope().initializer
+ new_init = variable_scope.get_variable_scope().initializer
self.assertEqual(new_init, None)
def testVarScope(self):
with self.test_session():
- with tf.variable_scope("tower") as tower:
+ with variable_scope.variable_scope("tower") as tower:
self.assertEqual(tower.name, "tower")
- with tf.name_scope("scope") as sc:
+ with ops.name_scope("scope") as sc:
self.assertEqual(sc, "tower/scope/")
- with tf.variable_scope("foo"):
- with tf.variable_scope("bar") as bar:
+ with variable_scope.variable_scope("foo"):
+ with variable_scope.variable_scope("bar") as bar:
self.assertEqual(bar.name, "foo/bar")
- with tf.name_scope("scope") as sc:
+ with ops.name_scope("scope") as sc:
self.assertEqual(sc, "foo/bar/scope/")
- with tf.variable_scope("foo"):
- with tf.variable_scope(tower, reuse=True) as tower_shared:
+ with variable_scope.variable_scope("foo"):
+ with variable_scope.variable_scope(tower, reuse=True) as tower_shared:
self.assertEqual(tower_shared.name, "tower")
- with tf.name_scope("scope") as sc:
+ with ops.name_scope("scope") as sc:
self.assertEqual(sc, "foo_1/tower/scope/")
def testVarScopeNameScope(self):
with self.test_session():
- with tf.name_scope("scope1"):
- with tf.variable_scope("tower") as tower:
- with tf.name_scope("scope2") as sc2:
+ with ops.name_scope("scope1"):
+ with variable_scope.variable_scope("tower") as tower:
+ with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower/scope2/")
- with tf.variable_scope(tower): # Re-entering acts like another "tower".
- with tf.name_scope("scope2") as sc2:
+ with variable_scope.variable_scope(
+ tower): # Re-entering acts like another "tower".
+ with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower_1/scope2/")
- with tf.variable_scope("tower"): # Re-entering by string acts the same.
- with tf.name_scope("scope2") as sc2:
+ with variable_scope.variable_scope(
+ "tower"): # Re-entering by string acts the same.
+ with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower_2/scope2/")
- with tf.name_scope("scope3"):
- with tf.variable_scope("tower"):
- with tf.name_scope("scope2") as sc2:
+ with ops.name_scope("scope3"):
+ with variable_scope.variable_scope("tower"):
+ with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope3/tower/scope2/")
- with tf.variable_scope(tower):
- with tf.name_scope("scope2") as sc2:
+ with variable_scope.variable_scope(tower):
+ with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope3/tower_1/scope2/")
- root_var_scope = tf.get_variable_scope()
- with tf.name_scope("scope4"):
- with tf.variable_scope(root_var_scope):
- with tf.name_scope("scope2") as sc2:
+ root_var_scope = variable_scope.get_variable_scope()
+ with ops.name_scope("scope4"):
+ with variable_scope.variable_scope(root_var_scope):
+ with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope4/scope2/")
def testVarScopeOriginalNameScope(self):
with self.test_session():
- with tf.name_scope("scope1"):
- with tf.variable_scope("tower") as tower:
+ with ops.name_scope("scope1"):
+ with variable_scope.variable_scope("tower") as tower:
self.assertEqual(tower.original_name_scope, "scope1/tower/")
- with tf.name_scope("scope2") as sc2:
+ with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower/scope2/")
- with tf.name_scope("scope2"):
- with tf.variable_scope(tower) as tower1:
+ with ops.name_scope("scope2"):
+ with variable_scope.variable_scope(tower) as tower1:
# Re-entering preserves original name scope.
self.assertEqual(tower1.original_name_scope, "scope1/tower/")
- with tf.name_scope("foo") as sc2:
+ with ops.name_scope("foo") as sc2:
self.assertEqual(sc2, "scope2/tower/foo/")
# Test re-entering original name scope.
- with tf.name_scope(tower.original_name_scope):
- with tf.name_scope("bar") as sc3:
+ with ops.name_scope(tower.original_name_scope):
+ with ops.name_scope("bar") as sc3:
self.assertEqual(sc3, "scope1/tower/bar/")
- with tf.name_scope("scope2"):
- with tf.variable_scope(tower):
- with tf.name_scope(tower.original_name_scope):
- with tf.name_scope("bar") as sc3:
+ with ops.name_scope("scope2"):
+ with variable_scope.variable_scope(tower):
+ with ops.name_scope(tower.original_name_scope):
+ with ops.name_scope("bar") as sc3:
self.assertEqual(sc3, "scope1/tower/bar_1/")
def testVarScopeObjectReuse(self):
with self.test_session():
vs = None
- with tf.variable_scope("jump", reuse=True) as scope:
+ with variable_scope.variable_scope("jump", reuse=True) as scope:
vs = scope
- with tf.variable_scope(vs) as jump:
+ with variable_scope.variable_scope(vs) as jump:
self.assertTrue(jump.reuse)
- with tf.variable_scope(vs, reuse=True) as jump_reuse:
+ with variable_scope.variable_scope(vs, reuse=True) as jump_reuse:
self.assertTrue(jump_reuse.reuse)
- with tf.variable_scope(vs, reuse=False) as jump_no_reuse:
+ with variable_scope.variable_scope(vs, reuse=False) as jump_no_reuse:
self.assertFalse(jump_no_reuse.reuse)
- with tf.variable_scope("jump", reuse=False) as scope:
+ with variable_scope.variable_scope("jump", reuse=False) as scope:
vs = scope
- with tf.variable_scope(vs) as jump:
+ with variable_scope.variable_scope(vs) as jump:
self.assertFalse(jump.reuse)
- with tf.variable_scope(vs, reuse=True) as jump_reuse:
+ with variable_scope.variable_scope(vs, reuse=True) as jump_reuse:
self.assertTrue(jump_reuse.reuse)
- with tf.variable_scope(vs, reuse=False) as jump_no_reuse:
+ with variable_scope.variable_scope(vs, reuse=False) as jump_no_reuse:
self.assertFalse(jump_no_reuse.reuse)
def testVarOpScope(self):
with self.test_session():
- with tf.name_scope("scope1"):
- with tf.variable_scope("tower", "default", []):
- self.assertEqual(tf.get_variable("w", []).name,
- "tower/w:0")
- with tf.name_scope("scope2") as sc2:
+ with ops.name_scope("scope1"):
+ with variable_scope.variable_scope("tower", "default", []):
+ self.assertEqual(
+ variable_scope.get_variable("w", []).name, "tower/w:0")
+ with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower/scope2/")
- with tf.variable_scope("tower", "default", []):
+ with variable_scope.variable_scope("tower", "default", []):
with self.assertRaises(ValueError):
- tf.get_variable("w", [])
- with tf.name_scope("scope2") as sc2:
+ variable_scope.get_variable("w", [])
+ with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower_1/scope2/")
- with tf.name_scope("scope2"):
- with tf.variable_scope(None, "default", []):
- self.assertEqual(tf.get_variable("w", []).name,
- "default/w:0")
- with tf.name_scope("scope2") as sc2:
+ with ops.name_scope("scope2"):
+ with variable_scope.variable_scope(None, "default", []):
+ self.assertEqual(
+ variable_scope.get_variable("w", []).name, "default/w:0")
+ with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope2/default/scope2/")
- with tf.variable_scope(None, "default", []):
- self.assertEqual(tf.get_variable("w", []).name,
- "default_1/w:0")
- with tf.name_scope("scope2") as sc2:
+ with variable_scope.variable_scope(None, "default", []):
+ self.assertEqual(
+ variable_scope.get_variable("w", []).name, "default_1/w:0")
+ with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope2/default_1/scope2/")
def testVarOpScopeUniqueNamesInterleavedSubstringScopes(self):
with self.test_session():
- with tf.variable_scope(None, "defaultScope1"):
- with tf.variable_scope(None, "layer"):
- self.assertEqual(tf.get_variable("w", []).name,
- "defaultScope1/layer/w:0")
- with tf.variable_scope(None, "defaultScope1"):
- with tf.variable_scope(None, "layer"):
- self.assertEqual(tf.get_variable("w", []).name,
- "defaultScope1_1/layer/w:0")
- with tf.variable_scope(None, "defaultScope"):
- with tf.variable_scope(None, "layer"):
- self.assertEqual(tf.get_variable("w", []).name,
- "defaultScope/layer/w:0")
- with tf.variable_scope(None, "defaultScope1"):
- with tf.variable_scope(None, "layer"):
- self.assertEqual(tf.get_variable("w", []).name,
- "defaultScope1_2/layer/w:0")
+ with variable_scope.variable_scope(None, "defaultScope1"):
+ with variable_scope.variable_scope(None, "layer"):
+ self.assertEqual(
+ variable_scope.get_variable("w", []).name,
+ "defaultScope1/layer/w:0")
+ with variable_scope.variable_scope(None, "defaultScope1"):
+ with variable_scope.variable_scope(None, "layer"):
+ self.assertEqual(
+ variable_scope.get_variable("w", []).name,
+ "defaultScope1_1/layer/w:0")
+ with variable_scope.variable_scope(None, "defaultScope"):
+ with variable_scope.variable_scope(None, "layer"):
+ self.assertEqual(
+ variable_scope.get_variable("w", []).name,
+ "defaultScope/layer/w:0")
+ with variable_scope.variable_scope(None, "defaultScope1"):
+ with variable_scope.variable_scope(None, "layer"):
+ self.assertEqual(
+ variable_scope.get_variable("w", []).name,
+ "defaultScope1_2/layer/w:0")
def testVarOpScopeReuse(self):
with self.test_session():
- with tf.variable_scope("outer") as outer:
- with tf.variable_scope("tower", "default", []):
- self.assertEqual(tf.get_variable("w", []).name,
- "outer/tower/w:0")
- with tf.name_scope("scope2") as sc2:
+ with variable_scope.variable_scope("outer") as outer:
+ with variable_scope.variable_scope("tower", "default", []):
+ self.assertEqual(
+ variable_scope.get_variable("w", []).name, "outer/tower/w:0")
+ with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/tower/scope2/")
- with tf.variable_scope(None, "default", []):
- self.assertEqual(tf.get_variable("w", []).name,
- "outer/default/w:0")
- with tf.name_scope("scope2") as sc2:
+ with variable_scope.variable_scope(None, "default", []):
+ self.assertEqual(
+ variable_scope.get_variable("w", []).name, "outer/default/w:0")
+ with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
- with tf.variable_scope(outer, reuse=True) as outer:
- with tf.variable_scope("tower", "default", []):
- self.assertEqual(tf.get_variable("w", []).name,
- "outer/tower/w:0")
- with tf.name_scope("scope2") as sc2:
+ with variable_scope.variable_scope(outer, reuse=True) as outer:
+ with variable_scope.variable_scope("tower", "default", []):
+ self.assertEqual(
+ variable_scope.get_variable("w", []).name, "outer/tower/w:0")
+ with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/tower/scope2/")
- with tf.variable_scope(None, "default", []):
- self.assertEqual(tf.get_variable("w", []).name,
- "outer/default/w:0")
- with tf.name_scope("scope2") as sc2:
+ with variable_scope.variable_scope(None, "default", []):
+ self.assertEqual(
+ variable_scope.get_variable("w", []).name, "outer/default/w:0")
+ with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
def testVarScopeGetVar(self):
with self.test_session():
- with tf.variable_scope("root"):
- with tf.variable_scope("towerA") as tower_a:
- va = tf.get_variable("v", [1])
+ with variable_scope.variable_scope("root"):
+ with variable_scope.variable_scope("towerA") as tower_a:
+ va = variable_scope.get_variable("v", [1])
self.assertEqual(va.name, "root/towerA/v:0")
- with tf.variable_scope(tower_a, reuse=True):
- va2 = tf.get_variable("v", [1])
+ with variable_scope.variable_scope(tower_a, reuse=True):
+ va2 = variable_scope.get_variable("v", [1])
self.assertEqual(va2, va)
- with tf.variable_scope("towerB"):
- vb = tf.get_variable("v", [1])
+ with variable_scope.variable_scope("towerB"):
+ vb = variable_scope.get_variable("v", [1])
self.assertEqual(vb.name, "root/towerB/v:0")
with self.assertRaises(ValueError):
- with tf.variable_scope("towerA"):
- va2 = tf.get_variable("v", [1])
+ with variable_scope.variable_scope("towerA"):
+ va2 = variable_scope.get_variable("v", [1])
- with tf.variable_scope("towerA", reuse=True):
- va2 = tf.get_variable("v", [1])
+ with variable_scope.variable_scope("towerA", reuse=True):
+ va2 = variable_scope.get_variable("v", [1])
self.assertEqual(va2, va)
- with tf.variable_scope("foo"):
- with tf.variable_scope("bar"):
- v = tf.get_variable("v", [1])
+ with variable_scope.variable_scope("foo"):
+ with variable_scope.variable_scope("bar"):
+ v = variable_scope.get_variable("v", [1])
self.assertEqual(v.name, "root/foo/bar/v:0")
- with tf.variable_scope(tower_a, reuse=True):
- va3 = tf.get_variable("v", [1])
+ with variable_scope.variable_scope(tower_a, reuse=True):
+ va3 = variable_scope.get_variable("v", [1])
self.assertEqual(va, va3)
with self.assertRaises(ValueError):
- with tf.variable_scope(tower_a, reuse=True):
- with tf.variable_scope("baz"):
- tf.get_variable("v", [1])
+ with variable_scope.variable_scope(tower_a, reuse=True):
+ with variable_scope.variable_scope("baz"):
+ variable_scope.get_variable("v", [1])
with self.assertRaises(ValueError) as exc:
- with tf.variable_scope(tower_a, reuse=True):
- tf.get_variable("v", [2]) # Different shape.
+ with variable_scope.variable_scope(tower_a, reuse=True):
+ variable_scope.get_variable("v", [2]) # Different shape.
self.assertEqual("shape" in str(exc.exception), True)
with self.assertRaises(ValueError) as exc:
- with tf.variable_scope(tower_a, reuse=True):
- tf.get_variable("v", [1], dtype=tf.int32)
+ with variable_scope.variable_scope(tower_a, reuse=True):
+ variable_scope.get_variable("v", [1], dtype=dtypes.int32)
self.assertEqual("dtype" in str(exc.exception), True)
def testVarScopeOuterScope(self):
with self.test_session():
- with tf.variable_scope("outer") as outer:
+ with variable_scope.variable_scope("outer") as outer:
pass
- with tf.variable_scope(outer):
- self.assertEqual(tf.get_variable("w", []).name,
- "outer/w:0")
- with tf.name_scope("scope2") as sc2:
+ with variable_scope.variable_scope(outer):
+ self.assertEqual(variable_scope.get_variable("w", []).name, "outer/w:0")
+ with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
- with tf.variable_scope("default"):
- self.assertEqual(tf.get_variable("w", []).name,
- "outer/default/w:0")
- with tf.name_scope("scope2") as sc2:
+ with variable_scope.variable_scope("default"):
+ self.assertEqual(
+ variable_scope.get_variable("w", []).name, "outer/default/w:0")
+ with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
- with tf.variable_scope(outer, reuse=True):
- self.assertEqual(tf.get_variable("w", []).name,
- "outer/w:0")
- with tf.name_scope("scope2") as sc2:
+ with variable_scope.variable_scope(outer, reuse=True):
+ self.assertEqual(variable_scope.get_variable("w", []).name, "outer/w:0")
+ with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/scope2/")
- with tf.variable_scope("default", reuse=True):
- self.assertEqual(tf.get_variable("w", []).name,
- "outer/default/w:0")
- with tf.name_scope("scope2") as sc2:
+ with variable_scope.variable_scope("default", reuse=True):
+ self.assertEqual(
+ variable_scope.get_variable("w", []).name, "outer/default/w:0")
+ with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/default/scope2/")
def testVarScopeNestedOuterScope(self):
with self.test_session():
- with tf.variable_scope("outer") as outer:
- with tf.variable_scope(outer):
- self.assertEqual(tf.get_variable("w", []).name,
- "outer/w:0")
- with tf.name_scope("scope2") as sc2:
+ with variable_scope.variable_scope("outer") as outer:
+ with variable_scope.variable_scope(outer):
+ self.assertEqual(
+ variable_scope.get_variable("w", []).name, "outer/w:0")
+ with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer/scope2/")
- with tf.variable_scope("default"):
- self.assertEqual(tf.get_variable("w", []).name,
- "outer/default/w:0")
- with tf.name_scope("scope2") as sc2:
+ with variable_scope.variable_scope("default"):
+ self.assertEqual(
+ variable_scope.get_variable("w", []).name, "outer/default/w:0")
+ with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
- with tf.variable_scope(outer, reuse=True):
- self.assertEqual(tf.get_variable("w", []).name,
- "outer/w:0")
- with tf.name_scope("scope2") as sc2:
+ with variable_scope.variable_scope(outer, reuse=True):
+ self.assertEqual(
+ variable_scope.get_variable("w", []).name, "outer/w:0")
+ with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer_1/scope2/")
- with tf.variable_scope("default", reuse=True):
- self.assertEqual(tf.get_variable("w", []).name,
- "outer/default/w:0")
- with tf.name_scope("scope2") as sc2:
+ with variable_scope.variable_scope("default", reuse=True):
+ self.assertEqual(
+ variable_scope.get_variable("w", []).name, "outer/default/w:0")
+ with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default_1/scope2/")
def testVarOpScopeReuseParam(self):
with self.test_session():
- with tf.variable_scope("outer") as outer:
- with tf.variable_scope("tower", "default", []):
- self.assertEqual(tf.get_variable("w", []).name,
- "outer/tower/w:0")
- with tf.name_scope("scope2") as sc2:
+ with variable_scope.variable_scope("outer") as outer:
+ with variable_scope.variable_scope("tower", "default", []):
+ self.assertEqual(
+ variable_scope.get_variable("w", []).name, "outer/tower/w:0")
+ with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/tower/scope2/")
- with tf.variable_scope(None, "default", []):
- self.assertEqual(tf.get_variable("w", []).name,
- "outer/default/w:0")
- with tf.name_scope("scope2") as sc2:
+ with variable_scope.variable_scope(None, "default", []):
+ self.assertEqual(
+ variable_scope.get_variable("w", []).name, "outer/default/w:0")
+ with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
- with tf.variable_scope(outer) as outer:
- with tf.variable_scope("tower", "default", reuse=True):
- self.assertEqual(tf.get_variable("w", []).name,
- "outer/tower/w:0")
- with tf.name_scope("scope2") as sc2:
+ with variable_scope.variable_scope(outer) as outer:
+ with variable_scope.variable_scope("tower", "default", reuse=True):
+ self.assertEqual(
+ variable_scope.get_variable("w", []).name, "outer/tower/w:0")
+ with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/tower/scope2/")
outer.reuse_variables()
- with tf.variable_scope(None, "default", []):
- self.assertEqual(tf.get_variable("w", []).name,
- "outer/default/w:0")
- with tf.name_scope("scope2") as sc2:
+ with variable_scope.variable_scope(None, "default", []):
+ self.assertEqual(
+ variable_scope.get_variable("w", []).name, "outer/default/w:0")
+ with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
def testVarOpScopeReuseError(self):
with self.test_session():
with self.assertRaises(ValueError):
- with tf.variable_scope(None, "default", reuse=True):
- self.assertEqual(tf.get_variable("w", []).name,
- "outer/tower/w:0")
+ with variable_scope.variable_scope(None, "default", reuse=True):
+ self.assertEqual(
+ variable_scope.get_variable("w", []).name, "outer/tower/w:0")
def testVarOpScopeOuterScope(self):
with self.test_session():
- with tf.variable_scope("outer") as outer:
+ with variable_scope.variable_scope("outer") as outer:
pass
- with tf.variable_scope(outer, "default", []):
- self.assertEqual(tf.get_variable("w", []).name,
- "outer/w:0")
- with tf.name_scope("scope2") as sc2:
+ with variable_scope.variable_scope(outer, "default", []):
+ self.assertEqual(variable_scope.get_variable("w", []).name, "outer/w:0")
+ with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
- with tf.variable_scope(None, "default", []):
- self.assertEqual(tf.get_variable("w", []).name,
- "outer/default/w:0")
- with tf.name_scope("scope2") as sc2:
+ with variable_scope.variable_scope(None, "default", []):
+ self.assertEqual(
+ variable_scope.get_variable("w", []).name, "outer/default/w:0")
+ with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
- with tf.variable_scope(outer, "default", reuse=True):
- self.assertEqual(tf.get_variable("w", []).name,
- "outer/w:0")
- with tf.name_scope("scope2") as sc2:
+ with variable_scope.variable_scope(outer, "default", reuse=True):
+ self.assertEqual(variable_scope.get_variable("w", []).name, "outer/w:0")
+ with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/scope2/")
outer.reuse_variables()
- with tf.variable_scope(None, "default", []):
- self.assertEqual(tf.get_variable("w", []).name,
- "outer/default/w:0")
- with tf.name_scope("scope2") as sc2:
+ with variable_scope.variable_scope(None, "default", []):
+ self.assertEqual(
+ variable_scope.get_variable("w", []).name, "outer/default/w:0")
+ with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/default/scope2/")
def testVarOpScopeNestedOuterScope(self):
with self.test_session():
- with tf.variable_scope("outer") as outer:
- with tf.variable_scope(outer, "default", []):
- self.assertEqual(tf.get_variable("w", []).name,
- "outer/w:0")
- with tf.name_scope("scope2") as sc2:
+ with variable_scope.variable_scope("outer") as outer:
+ with variable_scope.variable_scope(outer, "default", []):
+ self.assertEqual(
+ variable_scope.get_variable("w", []).name, "outer/w:0")
+ with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer/scope2/")
- with tf.variable_scope(None, "default", []):
- self.assertEqual(tf.get_variable("w", []).name,
- "outer/default/w:0")
- with tf.name_scope("scope2") as sc2:
+ with variable_scope.variable_scope(None, "default", []):
+ self.assertEqual(
+ variable_scope.get_variable("w", []).name, "outer/default/w:0")
+ with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
- with tf.variable_scope(outer, "default", reuse=True):
- self.assertEqual(tf.get_variable("w", []).name,
- "outer/w:0")
- with tf.name_scope("scope2") as sc2:
+ with variable_scope.variable_scope(outer, "default", reuse=True):
+ self.assertEqual(variable_scope.get_variable("w", []).name, "outer/w:0")
+ with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
- with tf.variable_scope(None, "default", []):
- self.assertEqual(tf.get_variable("w", []).name,
- "outer/default/w:0")
- with tf.name_scope("scope2") as sc2:
+ with variable_scope.variable_scope(None, "default", []):
+ self.assertEqual(
+ variable_scope.get_variable("w", []).name, "outer/default/w:0")
+ with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
def testGetLocalVar(self):
with self.test_session():
# Check that local variable respects naming.
- with tf.variable_scope("outer") as outer:
- with tf.variable_scope(outer, "default", []):
+ with variable_scope.variable_scope("outer") as outer:
+ with variable_scope.variable_scope(outer, "default", []):
local_var = variable_scope.get_local_variable(
"w", [], collections=["foo"])
self.assertEqual(local_var.name, "outer/w:0")
# Since variable is local, it should be in the local variable collection
# but not the trainable collection.
- self.assertIn(local_var, tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES))
- self.assertIn(local_var, tf.get_collection("foo"))
- self.assertNotIn(
- local_var, tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES))
+ self.assertIn(local_var,
+ ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES))
+ self.assertIn(local_var, ops.get_collection("foo"))
+ self.assertNotIn(local_var,
+ ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
# Check that local variable respects `reuse`.
- with tf.variable_scope(outer, "default", reuse=True):
- self.assertEqual(variable_scope.get_local_variable("w", []).name,
- "outer/w:0")
+ with variable_scope.variable_scope(outer, "default", reuse=True):
+ self.assertEqual(
+ variable_scope.get_local_variable("w", []).name, "outer/w:0")
def testGetVarWithDevice(self):
- g = tf.Graph()
+ g = ops.Graph()
varname_type = []
def device_func(op):
@@ -662,11 +689,12 @@ class VariableScopeTest(tf.test.TestCase):
return "/gpu:0"
with g.as_default():
- with tf.device(device_func):
- _ = tf.get_variable("x", (100, 200))
- _ = tf.get_variable("y", dtype=tf.int64, initializer=numpy.arange(73))
- self.assertEqual(varname_type[0], ("x", tf.float32))
- self.assertEqual(varname_type[1], ("y", tf.int64))
+ with ops.device(device_func):
+ _ = variable_scope.get_variable("x", (100, 200))
+ _ = variable_scope.get_variable(
+ "y", dtype=dtypes.int64, initializer=numpy.arange(73))
+ self.assertEqual(varname_type[0], ("x", dtypes.float32))
+ self.assertEqual(varname_type[1], ("y", dtypes.int64))
def axis0_into1_partitioner(shape=None, **unused_kwargs):
@@ -686,23 +714,31 @@ def axis0_into3_partitioner(shape=None, **unused_kwargs):
return part
-class VariableScopeWithPartitioningTest(tf.test.TestCase):
+class VariableScopeWithPartitioningTest(test.TestCase):
def testInitFromNonInitializer(self):
with self.test_session() as sess:
# Test various dtypes with zeros initializer as following:
- types = [tf.int8, tf.uint8, tf.int16, tf.uint16, tf.int32, tf.int64,
- tf.bool]
+ types = [
+ dtypes.int8, dtypes.uint8, dtypes.int16, dtypes.uint16, dtypes.int32,
+ dtypes.int64, dtypes.bool
+ ]
# Use different varibale_name to distinguish various dtypes
for (i, dtype) in enumerate(types):
- x = tf.get_variable(name='x%d' % i, shape=(3, 4), dtype=dtype,
+ x = variable_scope.get_variable(
+ name="x%d" % i,
+ shape=(3, 4),
+ dtype=dtype,
partitioner=axis0_into2_partitioner)
- y = tf.get_variable(name='y%d' % i, shape=(6, 4), dtype=dtype,
+ y = variable_scope.get_variable(
+ name="y%d" % i,
+ shape=(6, 4),
+ dtype=dtype,
partitioner=axis0_into2_partitioner,
initializer=init_ops.zeros_initializer(dtype=dtype))
- tf.global_variables_initializer().run()
+ variables_lib.global_variables_initializer().run()
# x and y would become var list after partition
val_x = sess.run(list(x))
val_y = sess.run(list(y))
@@ -710,140 +746,152 @@ class VariableScopeWithPartitioningTest(tf.test.TestCase):
self.assertAllEqual(val_x, val_y)
def testResultNameMatchesRequested(self):
- with tf.variable_scope("scope0", partitioner=axis0_into2_partitioner):
- v = tf.get_variable("name0", shape=(3, 1, 1))
+ with variable_scope.variable_scope(
+ "scope0", partitioner=axis0_into2_partitioner):
+ v = variable_scope.get_variable("name0", shape=(3, 1, 1))
self.assertEqual(v.name, "scope0/name0")
v_concat = v.as_tensor()
self.assertEqual(v_concat.name, "scope0/name0:0")
- variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
+ variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertIn("scope0/name0/part_0:0", [x.name for x in variables])
self.assertIn("scope0/name0/part_1:0", [x.name for x in variables])
self.assertNotIn("scope0/name0/part_2:0", [x.name for x in variables])
def testBreaksIfPartitioningChanges(self):
- with tf.variable_scope("scope0", partitioner=axis0_into2_partitioner):
- tf.get_variable("name0", shape=(3, 1, 1))
+ with variable_scope.variable_scope(
+ "scope0", partitioner=axis0_into2_partitioner):
+ variable_scope.get_variable("name0", shape=(3, 1, 1))
- with tf.variable_scope("scope0",
- partitioner=axis0_into3_partitioner,
- reuse=True):
+ with variable_scope.variable_scope(
+ "scope0", partitioner=axis0_into3_partitioner, reuse=True):
with self.assertRaisesRegexp(
ValueError,
"Trying to reuse partitioned variable .* but specified partitions .* "
"and found partitions .*"):
- tf.get_variable("name0", shape=(3, 1, 1))
+ variable_scope.get_variable("name0", shape=(3, 1, 1))
- with tf.variable_scope("scope0",
- partitioner=axis0_into1_partitioner,
- reuse=True):
+ with variable_scope.variable_scope(
+ "scope0", partitioner=axis0_into1_partitioner, reuse=True):
with self.assertRaisesRegexp(
ValueError,
"Trying to reuse partitioned variable .* but specified partitions .* "
"and found partitions .*"):
- tf.get_variable("name0", shape=(3, 1, 1))
+ variable_scope.get_variable("name0", shape=(3, 1, 1))
def testReturnsExistingConcatenatedValueIfReuse(self):
- with tf.variable_scope("scope0", partitioner=axis0_into2_partitioner):
- v_concat = tf.get_variable("name0", shape=(3, 1, 1))
- tf.get_variable_scope().reuse_variables()
- v_concat_2 = tf.get_variable("name0", shape=(3, 1, 1))
+ with variable_scope.variable_scope(
+ "scope0", partitioner=axis0_into2_partitioner):
+ v_concat = variable_scope.get_variable("name0", shape=(3, 1, 1))
+ variable_scope.get_variable_scope().reuse_variables()
+ v_concat_2 = variable_scope.get_variable("name0", shape=(3, 1, 1))
self.assertEqual(v_concat, v_concat_2)
def testAllowsReuseWithoutPartitioner(self):
- with tf.variable_scope("scope0", partitioner=axis0_into2_partitioner):
- v = tf.get_variable("name0", shape=(3, 1, 1))
- with tf.variable_scope("scope0", reuse=True):
- v_reused = tf.get_variable("name0")
+ with variable_scope.variable_scope(
+ "scope0", partitioner=axis0_into2_partitioner):
+ v = variable_scope.get_variable("name0", shape=(3, 1, 1))
+ with variable_scope.variable_scope("scope0", reuse=True):
+ v_reused = variable_scope.get_variable("name0")
self.assertEqual(v, v_reused)
def testPropagatePartitionerOnReopening(self):
- with tf.variable_scope("scope0", partitioner=axis0_into2_partitioner) as vs:
+ with variable_scope.variable_scope(
+ "scope0", partitioner=axis0_into2_partitioner) as vs:
self.assertEqual(axis0_into2_partitioner, vs.partitioner)
- with tf.variable_scope(vs) as vs1:
+ with variable_scope.variable_scope(vs) as vs1:
self.assertEqual(axis0_into2_partitioner, vs1.partitioner)
def testScalarIgnoresPartitioner(self):
- with tf.variable_scope("scope0", partitioner=axis0_into2_partitioner):
- v = tf.get_variable("name0", shape=())
+ with variable_scope.variable_scope(
+ "scope0", partitioner=axis0_into2_partitioner):
+ v = variable_scope.get_variable("name0", shape=())
self.assertEqual(v.name, "scope0/name0:0")
- variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
+ variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertIn("scope0/name0:0", [x.name for x in variables])
def testPartitionConcatenatesAlongCorrectAxis(self):
+
def _part_axis_0(**unused_kwargs):
return (2, 1, 1)
def _part_axis_1(**unused_kwargs):
return (1, 2, 1)
- with tf.variable_scope("root"):
- v0 = tf.get_variable("n0", shape=(2, 2, 2), partitioner=_part_axis_0)
- v1 = tf.get_variable("n1", shape=(2, 2, 2), partitioner=_part_axis_1)
+ with variable_scope.variable_scope("root"):
+ v0 = variable_scope.get_variable(
+ "n0", shape=(2, 2, 2), partitioner=_part_axis_0)
+ v1 = variable_scope.get_variable(
+ "n1", shape=(2, 2, 2), partitioner=_part_axis_1)
self.assertEqual(v0.get_shape(), (2, 2, 2))
self.assertEqual(v1.get_shape(), (2, 2, 2))
- n0_0 = tf.get_default_graph().get_tensor_by_name("root/n0/part_0:0")
- n0_1 = tf.get_default_graph().get_tensor_by_name("root/n0/part_1:0")
+ n0_0 = ops.get_default_graph().get_tensor_by_name("root/n0/part_0:0")
+ n0_1 = ops.get_default_graph().get_tensor_by_name("root/n0/part_1:0")
self.assertEqual(n0_0.get_shape(), (1, 2, 2))
self.assertEqual(n0_1.get_shape(), (1, 2, 2))
- n1_0 = tf.get_default_graph().get_tensor_by_name("root/n1/part_0:0")
- n1_1 = tf.get_default_graph().get_tensor_by_name("root/n1/part_1:0")
+ n1_0 = ops.get_default_graph().get_tensor_by_name("root/n1/part_0:0")
+ n1_1 = ops.get_default_graph().get_tensor_by_name("root/n1/part_1:0")
self.assertEqual(n1_0.get_shape(), (2, 1, 2))
self.assertEqual(n1_1.get_shape(), (2, 1, 2))
-class VariableScopeWithCustomGetterTest(tf.test.TestCase):
+class VariableScopeWithCustomGetterTest(test.TestCase):
def testNonCallableGetterFails(self):
with self.assertRaisesRegexp(ValueError, r"custom_getter .* not callable:"):
- with tf.variable_scope("scope0", custom_getter=3):
- tf.get_variable("name0")
+ with variable_scope.variable_scope("scope0", custom_getter=3):
+ variable_scope.get_variable("name0")
with self.assertRaisesRegexp(ValueError, r"custom_getter .* not callable:"):
- tf.get_variable("name0", custom_getter=3)
+ variable_scope.get_variable("name0", custom_getter=3)
def testNoSideEffectsWithIdentityCustomGetter(self):
called = [0]
+
def custom_getter(getter, *args, **kwargs):
called[0] += 1
return getter(*args, **kwargs)
- with tf.variable_scope("scope", custom_getter=custom_getter) as scope:
- v = tf.get_variable("v", [1])
- with tf.variable_scope(scope, reuse=True):
- v2 = tf.get_variable("v", [1])
- with tf.variable_scope("new_scope") as new_scope:
- v3 = tf.get_variable("v3", [1])
- with tf.variable_scope(new_scope, reuse=True, custom_getter=custom_getter):
- v4 = tf.get_variable("v3", [1])
+
+ with variable_scope.variable_scope(
+ "scope", custom_getter=custom_getter) as scope:
+ v = variable_scope.get_variable("v", [1])
+ with variable_scope.variable_scope(scope, reuse=True):
+ v2 = variable_scope.get_variable("v", [1])
+ with variable_scope.variable_scope("new_scope") as new_scope:
+ v3 = variable_scope.get_variable("v3", [1])
+ with variable_scope.variable_scope(
+ new_scope, reuse=True, custom_getter=custom_getter):
+ v4 = variable_scope.get_variable("v3", [1])
self.assertEqual(v, v2)
self.assertEqual(v3, v4)
self.assertEqual(3, called[0]) # skipped one in the first new_scope
def testGetterThatCreatesTwoVariablesAndSumsThem(self):
+
def custom_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/0" % name, *args, **kwargs)
g_1 = getter("%s/1" % name, *args, **kwargs)
- with tf.name_scope("custom_getter"):
+ with ops.name_scope("custom_getter"):
return g_0 + g_1
- with tf.variable_scope("scope", custom_getter=custom_getter):
- v = tf.get_variable("v", [1, 2, 3])
+ with variable_scope.variable_scope("scope", custom_getter=custom_getter):
+ v = variable_scope.get_variable("v", [1, 2, 3])
self.assertEqual([1, 2, 3], v.get_shape())
- true_vars = tf.trainable_variables()
+ true_vars = variables_lib.trainable_variables()
self.assertEqual(2, len(true_vars))
self.assertEqual("scope/v/0:0", true_vars[0].name)
self.assertEqual("scope/v/1:0", true_vars[1].name)
self.assertEqual("custom_getter/add:0", v.name)
with self.test_session() as sess:
- tf.global_variables_initializer().run()
+ variables_lib.global_variables_initializer().run()
np_vars, np_v = sess.run([true_vars, v])
self.assertAllClose(np_v, sum(np_vars))
-class PartitionInfoTest(tf.test.TestCase):
+class PartitionInfoTest(test.TestCase):
def testConstructorChecks(self):
# Invalid arg types.
@@ -901,4 +949,4 @@ class PartitionInfoTest(tf.test.TestCase):
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/variables_test.py b/tensorflow/python/kernel_tests/variables_test.py
index 38c84cdef1..7f15e13aae 100644
--- a/tensorflow/python/kernel_tests/variables_test.py
+++ b/tensorflow/python/kernel_tests/variables_test.py
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tf.py."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
@@ -21,24 +21,32 @@ from __future__ import print_function
import operator
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import errors_impl
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_state_ops
+from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
+from tensorflow.python.platform import test
+from tensorflow.python.training import gradient_descent
+from tensorflow.python.util import compat
-class VariablesTestCase(tf.test.TestCase):
+class VariablesTestCase(test.TestCase):
def testInitialization(self):
with self.test_session():
- var0 = tf.Variable(0.0)
+ var0 = variables.Variable(0.0)
self.assertEqual("Variable:0", var0.name)
self.assertEqual([], var0.get_shape())
self.assertEqual([], var0.get_shape())
- var1 = tf.Variable(1.1)
+ var1 = variables.Variable(1.1)
self.assertEqual("Variable_1:0", var1.name)
self.assertEqual([], var1.get_shape())
self.assertEqual([], var1.get_shape())
@@ -49,19 +57,19 @@ class VariablesTestCase(tf.test.TestCase):
with self.assertRaisesOpError("Attempting to use uninitialized value"):
var1.eval()
- tf.global_variables_initializer().run()
+ variables.global_variables_initializer().run()
self.assertAllClose(0.0, var0.eval())
self.assertAllClose(1.1, var1.eval())
def testInitializationOrder(self):
with self.test_session():
- rnd = tf.Variable(random_ops.random_uniform([3, 6]), name="rnd")
+ rnd = variables.Variable(random_ops.random_uniform([3, 6]), name="rnd")
self.assertEqual("rnd:0", rnd.name)
self.assertEqual([3, 6], rnd.get_shape())
self.assertEqual([3, 6], rnd.get_shape())
- dep = tf.Variable(rnd.initialized_value(), name="dep")
+ dep = variables.Variable(rnd.initialized_value(), name="dep")
self.assertEqual("dep:0", dep.name)
self.assertEqual([3, 6], dep.get_shape())
self.assertEqual([3, 6], dep.get_shape())
@@ -70,32 +78,31 @@ class VariablesTestCase(tf.test.TestCase):
added_val = rnd.initialized_value() + dep.initialized_value() + 2.0
added_val.set_shape(rnd.get_shape())
- depdep = tf.Variable(added_val, name="depdep")
+ depdep = variables.Variable(added_val, name="depdep")
self.assertEqual("depdep:0", depdep.name)
self.assertEqual([3, 6], depdep.get_shape())
self.assertEqual([3, 6], depdep.get_shape())
- tf.global_variables_initializer().run()
+ variables.global_variables_initializer().run()
self.assertAllClose(rnd.eval(), dep.eval())
- self.assertAllClose(rnd.eval() + dep.eval() + 2.0,
- depdep.eval())
+ self.assertAllClose(rnd.eval() + dep.eval() + 2.0, depdep.eval())
def testIterable(self):
with self.assertRaisesRegexp(TypeError, "not iterable"):
- for _ in tf.Variable(0.0):
+ for _ in variables.Variable(0.0):
pass
with self.assertRaisesRegexp(TypeError, "not iterable"):
- for _ in tf.Variable([0.0, 1.0]):
+ for _ in variables.Variable([0.0, 1.0]):
pass
def testAssignments(self):
with self.test_session():
- var = tf.Variable(0.0)
+ var = variables.Variable(0.0)
plus_one = var.assign_add(1.0)
minus_one = var.assign_sub(2.0)
four = var.assign(4.0)
- tf.global_variables_initializer().run()
+ variables.global_variables_initializer().run()
self.assertAllClose(0.0, var.eval())
self.assertAllClose(1.0, plus_one.eval())
@@ -109,11 +116,11 @@ class VariablesTestCase(tf.test.TestCase):
def _countUpToTest(self, dtype):
with self.test_session():
- zero = tf.constant(0, dtype=dtype)
- var = tf.Variable(zero)
+ zero = constant_op.constant(0, dtype=dtype)
+ var = variables.Variable(zero)
count_up_to = var.count_up_to(3)
- tf.global_variables_initializer().run()
+ variables.global_variables_initializer().run()
self.assertEqual(0, var.eval())
self.assertEqual(0, count_up_to.eval())
@@ -134,19 +141,19 @@ class VariablesTestCase(tf.test.TestCase):
self.assertEqual(3, var.eval())
def testCountUpToInt32(self):
- self._countUpToTest(tf.int32)
+ self._countUpToTest(dtypes.int32)
def testCountUpToInt64(self):
- self._countUpToTest(tf.int64)
+ self._countUpToTest(dtypes.int64)
def testControlDepsNone(self):
with self.test_session():
- c = tf.constant(1.0)
- with tf.control_dependencies([c]):
+ c = constant_op.constant(1.0)
+ with ops.control_dependencies([c]):
# d get the control dep.
- d = tf.constant(2.0)
+ d = constant_op.constant(2.0)
# variables do not.
- var_x = tf.Variable(2.0)
+ var_x = variables.Variable(2.0)
# initialized_value do not either.
inited_x = var_x.initialized_value()
self.assertEqual([c.op], d.op.control_inputs)
@@ -157,20 +164,22 @@ class VariablesTestCase(tf.test.TestCase):
def testControlFlow(self):
with self.test_session() as sess:
- v0 = tf.Variable(0, name="v0")
+ v0 = variables.Variable(0, name="v0")
var_dict = {}
+
# Call get_variable in each of the cond clauses.
def var_in_then_clause():
- v1 = tf.Variable(1, name="v1")
+ v1 = variables.Variable(1, name="v1")
var_dict["v1"] = v1
return v1 + v0
+
def var_in_else_clause():
- v2 = tf.Variable(2, name="v2")
+ v2 = variables.Variable(2, name="v2")
var_dict["v2"] = v2
return v2 + v0
- add = control_flow_ops.cond(tf.less(v0, 10),
- var_in_then_clause,
- var_in_else_clause)
+
+ add = control_flow_ops.cond(
+ math_ops.less(v0, 10), var_in_then_clause, var_in_else_clause)
v1 = var_dict["v1"]
v2 = var_dict["v2"]
# We should be able to initialize and run v1 and v2 without initializing
@@ -180,10 +189,10 @@ class VariablesTestCase(tf.test.TestCase):
sess.run(v2.initializer)
self.assertEqual([2], sess.run(v2))
# v0 should still be uninitialized.
- with self.assertRaisesRegexp(tf.OpError, "uninitialized"):
+ with self.assertRaisesRegexp(errors_impl.OpError, "uninitialized"):
sess.run(v0)
# We should not be able to run 'add' yet.
- with self.assertRaisesRegexp(tf.OpError, "uninitialized"):
+ with self.assertRaisesRegexp(errors_impl.OpError, "uninitialized"):
sess.run(add)
# If we initialize v0 we should be able to run 'add'.
sess.run(v0.initializer)
@@ -191,51 +200,55 @@ class VariablesTestCase(tf.test.TestCase):
def testUseVariableAsTensor(self):
with self.test_session():
- var_x = tf.Variable(2.0)
- var_y = tf.Variable(3.0)
- tf.global_variables_initializer().run()
+ var_x = variables.Variable(2.0)
+ var_y = variables.Variable(3.0)
+ variables.global_variables_initializer().run()
self.assertAllClose(2.0, var_x.eval())
self.assertAllClose(3.0, var_y.eval())
- self.assertAllClose(5.0, tf.add(var_x, var_y).eval())
+ self.assertAllClose(5.0, math_ops.add(var_x, var_y).eval())
def testZeroSizeVarSameAsConst(self):
with self.test_session():
- zero_size_var = tf.Variable(tf.zeros([0, 2]))
- zero_size_const = tf.ones([2, 0])
- variable_mul = tf.matmul(zero_size_const, zero_size_var)
- const_mul = tf.matmul(zero_size_const, zero_size_const, transpose_b=True)
- tf.global_variables_initializer().run()
+ zero_size_var = variables.Variable(array_ops.zeros([0, 2]))
+ zero_size_const = array_ops.ones([2, 0])
+ variable_mul = math_ops.matmul(zero_size_const, zero_size_var)
+ const_mul = math_ops.matmul(
+ zero_size_const, zero_size_const, transpose_b=True)
+ variables.global_variables_initializer().run()
variable_output = variable_mul.eval()
self.assertAllClose(const_mul.eval(), variable_output)
self.assertAllClose([[0., 0.], [0., 0.]], variable_output)
def testCachingDevice(self):
with self.test_session():
- var = tf.Variable(2.0)
+ var = variables.Variable(2.0)
self.assertEqual(var.device, var.value().device)
self.assertEqual(var.device, var.initialized_value().device)
- var_cached = tf.Variable(2.0, caching_device="/job:foo")
+ var_cached = variables.Variable(2.0, caching_device="/job:foo")
self.assertFalse(var_cached.device.startswith("/job:foo"))
self.assertTrue(var_cached.value().device.startswith("/job:foo"))
- self.assertTrue(
- var_cached.initialized_value().device.startswith("/job:foo"))
+ self.assertTrue(var_cached.initialized_value().device.startswith(
+ "/job:foo"))
def testCollections(self):
with self.test_session():
- var_x = tf.Variable(2.0)
- var_y = tf.Variable(2.0, trainable=False)
- var_z = tf.Variable(2.0, trainable=True)
- var_t = tf.Variable(
- 2.0, trainable=True,
- collections=[tf.GraphKeys.TRAINABLE_VARIABLES,
- tf.GraphKeys.GLOBAL_VARIABLES])
- self.assertEqual([var_x, var_y, var_z, var_t], tf.global_variables())
- self.assertEqual([var_x, var_z, var_t], tf.trainable_variables())
+ var_x = variables.Variable(2.0)
+ var_y = variables.Variable(2.0, trainable=False)
+ var_z = variables.Variable(2.0, trainable=True)
+ var_t = variables.Variable(
+ 2.0,
+ trainable=True,
+ collections=[
+ ops.GraphKeys.TRAINABLE_VARIABLES, ops.GraphKeys.GLOBAL_VARIABLES
+ ])
+ self.assertEqual([var_x, var_y, var_z, var_t],
+ variables.global_variables())
+ self.assertEqual([var_x, var_z, var_t], variables.trainable_variables())
def testOperators(self):
with self.test_session():
- var_f = tf.Variable([2.0])
+ var_f = variables.Variable([2.0])
add = var_f + 0.0
radd = 1.0 + var_f
sub = var_f - 1.0
@@ -255,21 +268,21 @@ class VariablesTestCase(tf.test.TestCase):
neg = -var_f
abs_v = abs(var_f)
- var_i = tf.Variable([20])
+ var_i = variables.Variable([20])
mod = var_i % 7
rmod = 103 % var_i
- var_b = tf.Variable([True, False])
+ var_b = variables.Variable([True, False])
and_v = operator.and_(var_b, [True, True])
or_v = operator.or_(var_b, [False, True])
xor_v = operator.xor(var_b, [False, False])
invert_v = ~var_b
rnd = np.random.rand(4, 4).astype("f")
- var_t = tf.Variable(rnd)
+ var_t = variables.Variable(rnd)
slice_v = var_t[2, 0:0]
- tf.global_variables_initializer().run()
+ variables.global_variables_initializer().run()
self.assertAllClose([2.0], add.eval())
self.assertAllClose([3.0], radd.eval())
self.assertAllClose([1.0], sub.eval())
@@ -301,24 +314,24 @@ class VariablesTestCase(tf.test.TestCase):
def testSession(self):
with self.test_session() as sess:
- var = tf.Variable([1, 12])
- tf.global_variables_initializer().run()
+ var = variables.Variable([1, 12])
+ variables.global_variables_initializer().run()
self.assertAllClose([1, 12], sess.run(var))
def testDevicePlacement(self):
with self.test_session() as sess:
- with tf.device("/cpu:0"):
- var = tf.Variable([1, 12])
+ with ops.device("/cpu:0"):
+ var = variables.Variable([1, 12])
init_value = var.initialized_value()
- init_op = tf.global_variables_initializer()
+ init_op = variables.global_variables_initializer()
self.assertEqual(var.op.device, init_value.device)
self.assertEqual(var.op.device, init_op.device)
sess.run(init_op)
def testColocation(self):
- with tf.device("/job:ps"):
- var = tf.Variable(0, name="v")
- with tf.device("/job:worker/task:7"):
+ with ops.device("/job:ps"):
+ var = variables.Variable(0, name="v")
+ with ops.device("/job:worker/task:7"):
assign_op = var.assign(1)
self.assertDeviceEqual("/job:ps", assign_op.device)
self.assertEqual([b"loc:@v"], assign_op.op.colocation_groups())
@@ -327,15 +340,16 @@ class VariablesTestCase(tf.test.TestCase):
value = [[-42], [133.7]]
shape = [2, 1]
with self.test_session():
- initializer = lambda: tf.constant(value)
+ initializer = lambda: constant_op.constant(value)
- v1 = tf.Variable(initializer, dtype=tf.float32)
+ v1 = variables.Variable(initializer, dtype=dtypes.float32)
self.assertEqual(shape, v1.get_shape())
self.assertAllClose(value, v1.initial_value.eval())
- with self.assertRaises(tf.errors.FailedPreconditionError):
+ with self.assertRaises(errors_impl.FailedPreconditionError):
v1.eval()
- v2 = tf.Variable(tf.neg(v1.initialized_value()), dtype=tf.float32)
+ v2 = variables.Variable(
+ math_ops.neg(v1.initialized_value()), dtype=dtypes.float32)
self.assertEqual(v1.get_shape(), v2.get_shape())
self.assertAllClose(np.negative(value), v2.initial_value.eval())
@@ -343,16 +357,16 @@ class VariablesTestCase(tf.test.TestCase):
# initialized.
self.assertAllClose(value, v1.eval())
- with self.assertRaises(tf.errors.FailedPreconditionError):
+ with self.assertRaises(errors_impl.FailedPreconditionError):
v2.eval()
- tf.global_variables_initializer().run()
+ variables.global_variables_initializer().run()
self.assertAllClose(np.negative(value), v2.eval())
def testInitializerFunctionDevicePlacement(self):
with self.test_session():
- initializer = lambda: tf.constant(42.0)
- with tf.device("/cpu:100"):
- v1 = tf.Variable(initializer, dtype=tf.float32, name="v1")
+ initializer = lambda: constant_op.constant(42.0)
+ with ops.device("/cpu:100"):
+ v1 = variables.Variable(initializer, dtype=dtypes.float32, name="v1")
expected_device = "/device:CPU:100"
expected_group_v1 = [b"loc:@v1"]
self.assertEqual(expected_device, v1.op.device)
@@ -360,7 +374,7 @@ class VariablesTestCase(tf.test.TestCase):
for i in v1.initializer.inputs:
self.assertEqual(expected_group_v1, i.op.colocation_groups())
- v2 = tf.Variable(initializer, dtype=tf.float32, name="v2")
+ v2 = variables.Variable(initializer, dtype=dtypes.float32, name="v2")
expected_group_v2 = [b"loc:@v2"]
self.assertEqual(expected_group_v2, v2.op.colocation_groups())
for i in v2.initializer.inputs:
@@ -368,35 +382,35 @@ class VariablesTestCase(tf.test.TestCase):
def testLoad(self):
with self.test_session():
- var = tf.Variable(np.zeros((5,5), np.float32))
- tf.global_variables_initializer().run()
+ var = variables.Variable(np.zeros((5, 5), np.float32))
+ variables.global_variables_initializer().run()
var.load(np.ones((5, 5), np.float32))
self.assertAllClose(np.ones((5, 5), np.float32), var.eval())
-class IsInitializedTest(tf.test.TestCase):
+class IsInitializedTest(test.TestCase):
def testNoVars(self):
- with tf.Graph().as_default(), self.test_session() as sess:
- uninited = tf.report_uninitialized_variables()
+ with ops.Graph().as_default(), self.test_session() as sess:
+ uninited = variables.report_uninitialized_variables()
self.assertEqual(0, sess.run(uninited).size)
def testAssertVariablesInitialized(self):
- with tf.Graph().as_default(), self.test_session() as sess:
- v = tf.Variable([1, 2], name="v")
- w = tf.Variable([3, 4], name="w")
+ with ops.Graph().as_default(), self.test_session() as sess:
+ v = variables.Variable([1, 2], name="v")
+ w = variables.Variable([3, 4], name="w")
_ = v, w
- uninited = tf.report_uninitialized_variables()
+ uninited = variables.report_uninitialized_variables()
self.assertAllEqual(np.array([b"v", b"w"]), sess.run(uninited))
- tf.global_variables_initializer().run()
+ variables.global_variables_initializer().run()
self.assertEqual(0, sess.run(uninited).size)
def testVariableList(self):
- with tf.Graph().as_default(), self.test_session() as sess:
- v = tf.Variable([1, 2], name="v")
- w = tf.Variable([3, 4], name="w")
- uninited = tf.report_uninitialized_variables()
+ with ops.Graph().as_default(), self.test_session() as sess:
+ v = variables.Variable([1, 2], name="v")
+ w = variables.Variable([3, 4], name="w")
+ uninited = variables.report_uninitialized_variables()
self.assertAllEqual(np.array([b"v", b"w"]), sess.run(uninited))
sess.run(w.initializer)
self.assertAllEqual(np.array([b"v"]), sess.run(uninited))
@@ -404,45 +418,47 @@ class IsInitializedTest(tf.test.TestCase):
self.assertEqual(0, sess.run(uninited).size)
def testZeroSizeVarInitialized(self):
- with tf.Graph().as_default(), self.test_session() as sess:
- v = tf.Variable(tf.zeros([0, 2]), name="v")
- uninited = tf.report_uninitialized_variables()
+ with ops.Graph().as_default(), self.test_session() as sess:
+ v = variables.Variable(array_ops.zeros([0, 2]), name="v")
+ uninited = variables.report_uninitialized_variables()
v.initializer.run() # not strictly necessary
self.assertEqual(0, sess.run(uninited).size)
def testTrainingWithZeroSizeVar(self):
- with tf.Graph().as_default(), self.test_session() as sess:
- a = tf.Variable(tf.zeros([0, 2]))
- b = tf.Variable(tf.ones([2, 2]))
- objective = tf.reduce_sum(b + tf.matmul(a, a, transpose_a=True))
- tf.global_variables_initializer().run()
- do_opt = tf.train.GradientDescentOptimizer(0.1).minimize(objective)
+ with ops.Graph().as_default(), self.test_session() as sess:
+ a = variables.Variable(array_ops.zeros([0, 2]))
+ b = variables.Variable(array_ops.ones([2, 2]))
+ objective = math_ops.reduce_sum(b + math_ops.matmul(
+ a, a, transpose_a=True))
+ variables.global_variables_initializer().run()
+ do_opt = gradient_descent.GradientDescentOptimizer(0.1).minimize(
+ objective)
sess.run([do_opt])
self.assertAllClose([[0.9, 0.9], [0.9, 0.9]], b.eval())
-class ObsoleteIsInitializedTest(tf.test.TestCase):
+class ObsoleteIsInitializedTest(test.TestCase):
def testNoVars(self):
- with tf.Graph().as_default():
- self.assertEqual(None, tf.assert_variables_initialized())
+ with ops.Graph().as_default():
+ self.assertEqual(None, variables.assert_variables_initialized())
def testVariables(self):
- with tf.Graph().as_default(), self.test_session() as sess:
- v = tf.Variable([1, 2])
- w = tf.Variable([3, 4])
+ with ops.Graph().as_default(), self.test_session() as sess:
+ v = variables.Variable([1, 2])
+ w = variables.Variable([3, 4])
_ = v, w
- inited = tf.assert_variables_initialized()
+ inited = variables.assert_variables_initialized()
with self.assertRaisesOpError("Attempting to use uninitialized value"):
sess.run(inited)
- tf.global_variables_initializer().run()
+ variables.global_variables_initializer().run()
sess.run(inited)
def testVariableList(self):
- with tf.Graph().as_default(), self.test_session() as sess:
- v = tf.Variable([1, 2])
- w = tf.Variable([3, 4])
- inited = tf.assert_variables_initialized([v])
+ with ops.Graph().as_default(), self.test_session() as sess:
+ v = variables.Variable([1, 2])
+ w = variables.Variable([3, 4])
+ inited = variables.assert_variables_initialized([v])
with self.assertRaisesOpError("Attempting to use uninitialized value"):
inited.op.run()
sess.run(w.initializer)
@@ -452,16 +468,16 @@ class ObsoleteIsInitializedTest(tf.test.TestCase):
inited.op.run()
-class PartitionedVariableTest(tf.test.TestCase):
+class PartitionedVariableTest(test.TestCase):
def testPartitionedVariable(self):
- with tf.Graph().as_default():
- v0 = tf.Variable([0])
- v1 = tf.Variable([1])
- v0._set_save_slice_info(variables.Variable.SaveSliceInfo(
- v0.name, [2], [0], [1]))
- v1._set_save_slice_info(variables.Variable.SaveSliceInfo(
- v0.name, [2], [1], [1]))
+ with ops.Graph().as_default():
+ v0 = variables.Variable([0])
+ v1 = variables.Variable([1])
+ v0._set_save_slice_info(
+ variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
+ v1._set_save_slice_info(
+ variables.Variable.SaveSliceInfo(v0.name, [2], [1], [1]))
partitions = [2]
# Pass variable_list as [v1, v0] to ensure they are properly
@@ -473,7 +489,7 @@ class PartitionedVariableTest(tf.test.TestCase):
variable_list=[v1, v0],
partitions=partitions)
- concatenated = tf.convert_to_tensor(partitioned_variable)
+ concatenated = ops.convert_to_tensor(partitioned_variable)
num_partitions = len(partitioned_variable)
iterated_partitions = list(partitioned_variable)
self.assertEqual(2, num_partitions)
@@ -481,17 +497,17 @@ class PartitionedVariableTest(tf.test.TestCase):
self.assertEqual([2], concatenated.get_shape())
def testPartitionedVariableFailures(self):
- with tf.Graph().as_default():
+ with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, "empty"):
variables.PartitionedVariable(
name="fail",
shape=2,
- dtype=tf.int32,
+ dtype=dtypes.int32,
variable_list=[],
partitions=[])
with self.assertRaisesRegexp(ValueError, "must have a save_slice_info"):
- v0 = tf.Variable([0])
+ v0 = variables.Variable([0])
partitions = [1]
variables.PartitionedVariable(
name="two_vars",
@@ -501,12 +517,12 @@ class PartitionedVariableTest(tf.test.TestCase):
partitions=partitions)
with self.assertRaisesRegexp(ValueError, "full shapes must match"):
- v0 = tf.Variable([0])
- v1 = tf.Variable([1])
- v0._set_save_slice_info(variables.Variable.SaveSliceInfo(
- v0.name, [2], [0], [1]))
- v1._set_save_slice_info(variables.Variable.SaveSliceInfo(
- v0.name, [2], [1], [1]))
+ v0 = variables.Variable([0])
+ v1 = variables.Variable([1])
+ v0._set_save_slice_info(
+ variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
+ v1._set_save_slice_info(
+ variables.Variable.SaveSliceInfo(v0.name, [2], [1], [1]))
partitions = [2]
variables.PartitionedVariable(
@@ -517,9 +533,9 @@ class PartitionedVariableTest(tf.test.TestCase):
partitions=partitions)
with self.assertRaisesRegexp(ValueError, "must be positive"):
- v0 = tf.Variable([0])
- v0._set_save_slice_info(variables.Variable.SaveSliceInfo(
- v0.name, [2], [0], [1]))
+ v0 = variables.Variable([0])
+ v0._set_save_slice_info(
+ variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
partitions = [0]
variables.PartitionedVariable(
@@ -530,27 +546,30 @@ class PartitionedVariableTest(tf.test.TestCase):
partitions=partitions)
-class VariableContainerTest(tf.test.TestCase):
+class VariableContainerTest(test.TestCase):
def testContainer(self):
- with tf.Graph().as_default():
- v0 = tf.Variable([0])
- with tf.container("l1"):
- v1 = tf.Variable([1])
- with tf.container("l2"):
- v2 = tf.Variable([2])
- special_v = gen_state_ops._variable(shape=[1], dtype=tf.float32,
- name="VariableInL3", container="l3", shared_name="")
- v3 = tf.Variable([3])
- v4 = tf.Variable([4])
- self.assertEqual(tf.compat.as_bytes(""), v0.op.get_attr("container"))
- self.assertEqual(tf.compat.as_bytes("l1"), v1.op.get_attr("container"))
- self.assertEqual(tf.compat.as_bytes("l2"), v2.op.get_attr("container"))
- self.assertEqual(tf.compat.as_bytes("l3"),
- special_v.op.get_attr("container"))
- self.assertEqual(tf.compat.as_bytes("l1"), v3.op.get_attr("container"))
- self.assertEqual(tf.compat.as_bytes(""), v4.op.get_attr("container"))
+ with ops.Graph().as_default():
+ v0 = variables.Variable([0])
+ with ops.container("l1"):
+ v1 = variables.Variable([1])
+ with ops.container("l2"):
+ v2 = variables.Variable([2])
+ special_v = gen_state_ops._variable(
+ shape=[1],
+ dtype=dtypes.float32,
+ name="VariableInL3",
+ container="l3",
+ shared_name="")
+ v3 = variables.Variable([3])
+ v4 = variables.Variable([4])
+ self.assertEqual(compat.as_bytes(""), v0.op.get_attr("container"))
+ self.assertEqual(compat.as_bytes("l1"), v1.op.get_attr("container"))
+ self.assertEqual(compat.as_bytes("l2"), v2.op.get_attr("container"))
+ self.assertEqual(compat.as_bytes("l3"), special_v.op.get_attr("container"))
+ self.assertEqual(compat.as_bytes("l1"), v3.op.get_attr("container"))
+ self.assertEqual(compat.as_bytes(""), v4.op.get_attr("container"))
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/where_op_test.py b/tensorflow/python/kernel_tests/where_op_test.py
index 00d35d8265..b47159ae7f 100644
--- a/tensorflow/python/kernel_tests/where_op_test.py
+++ b/tensorflow/python/kernel_tests/where_op_test.py
@@ -12,21 +12,24 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for tensorflow.ops.reverse_sequence_op."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.ops import array_ops
+from tensorflow.python.platform import test
-class WhereOpTest(tf.test.TestCase):
+
+class WhereOpTest(test.TestCase):
def _testWhere(self, x, truth, expected_err_re=None):
with self.test_session():
- ans = tf.where(x)
+ ans = array_ops.where(x)
self.assertEqual([None, x.ndim], ans.get_shape().as_list())
if expected_err_re is None:
tf_ans = ans.eval()
@@ -38,9 +41,9 @@ class WhereOpTest(tf.test.TestCase):
def testWrongNumbers(self):
with self.test_session():
with self.assertRaises(ValueError):
- tf.where([False, True], [1, 2], None)
+ array_ops.where([False, True], [1, 2], None)
with self.assertRaises(ValueError):
- tf.where([False, True], None, [1, 2])
+ array_ops.where([False, True], None, [1, 2])
def testBasicMat(self):
x = np.asarray([[True, False], [True, False]])
@@ -51,23 +54,23 @@ class WhereOpTest(tf.test.TestCase):
self._testWhere(x, truth)
def testBasic3Tensor(self):
- x = np.asarray(
- [[[True, False], [True, False]], [[False, True], [False, True]],
- [[False, False], [False, True]]])
+ x = np.asarray([[[True, False], [True, False]],
+ [[False, True], [False, True]],
+ [[False, False], [False, True]]])
# Ensure RowMajor mode
truth = np.asarray(
- [[0, 0, 0], [0, 1, 0], [1, 0, 1], [1, 1, 1], [2, 1, 1]],
- dtype=np.int64)
+ [[0, 0, 0], [0, 1, 0], [1, 0, 1], [1, 1, 1], [2, 1, 1]], dtype=np.int64)
self._testWhere(x, truth)
def testThreeArgument(self):
x = np.array([[-2, 3, -1], [1, -3, -3]])
- np_val = np.where(x > 0, x*x, -x)
+ np_val = np.where(x > 0, x * x, -x)
with self.test_session():
- tf_val = tf.where(tf.constant(x) > 0, x*x, -x).eval()
+ tf_val = array_ops.where(constant_op.constant(x) > 0, x * x, -x).eval()
self.assertAllEqual(tf_val, np_val)
+
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/xent_op_test.py b/tensorflow/python/kernel_tests/xent_op_test.py
index 2ae4fa1396..ac56f567ce 100644
--- a/tensorflow/python/kernel_tests/xent_op_test.py
+++ b/tensorflow/python/kernel_tests/xent_op_test.py
@@ -12,19 +12,24 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for SoftmaxCrossEntropyWithLogits op."""
+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
from tensorflow.python.ops import gen_nn_ops
+from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import nn_ops
+import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
+from tensorflow.python.platform import test
-class XentTest(tf.test.TestCase):
+class XentTest(test.TestCase):
def _npXent(self, features, labels, dim=-1):
if dim is -1:
@@ -51,7 +56,7 @@ class XentTest(tf.test.TestCase):
def _testXentWrapper(self, np_features, np_labels, dim=-1, use_gpu=False):
np_loss, _ = self._npXent(np_features, np_labels, dim=dim)
with self.test_session(use_gpu=use_gpu) as sess:
- loss = tf.nn.softmax_cross_entropy_with_logits(
+ loss = nn_ops.softmax_cross_entropy_with_logits(
np_features, np_labels, dim=dim)
tf_loss = sess.run(loss)
print("np_loss:", np_loss)
@@ -113,12 +118,14 @@ class XentTest(tf.test.TestCase):
# The loss for this batch is [0.5 * -log(0.087), 0.5 * -log(0.237)]
# = [1.3862, 1.9401]
np_loss, np_backprop = self._npXent(np.array(features), np.array(labels))
- self.assertAllClose(np.array([[0.25, 0.25, 0.25, -0.75],
- [0.0321, -0.4129, -0.2632, 0.6439]]),
- np_backprop,
- rtol=1.e-3, atol=1.e-3)
- self.assertAllClose(np.array([1.3862, 1.9401]), np_loss,
- rtol=1.e-3, atol=1.e-3)
+ self.assertAllClose(
+ np.array([[0.25, 0.25, 0.25, -0.75],
+ [0.0321, -0.4129, -0.2632, 0.6439]]),
+ np_backprop,
+ rtol=1.e-3,
+ atol=1.e-3)
+ self.assertAllClose(
+ np.array([1.3862, 1.9401]), np_loss, rtol=1.e-3, atol=1.e-3)
def testShapeMismatch(self):
with self.test_session():
@@ -149,16 +156,18 @@ class XentTest(tf.test.TestCase):
def testGradient(self):
with self.test_session():
- l = tf.constant([0.0, 0.0, 1.0, 0.0,
- 1.0, 0.0, 0.0, 0.0,
- 0.0, 0.5, 0.0, 0.5], shape=[3, 4],
- dtype=tf.float64, name="l")
- f = tf.constant([0.1, 0.2, 0.3, 0.4,
- 0.1, 0.4, 0.9, 1.6,
- 0.1, 0.8, 2.7, 6.4], shape=[3, 4],
- dtype=tf.float64, name="f")
- x = tf.nn.softmax_cross_entropy_with_logits(f, l, name="xent")
- err = tf.test.compute_gradient_error(f, [3, 4], x, [3])
+ l = constant_op.constant(
+ [0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.5],
+ shape=[3, 4],
+ dtype=dtypes.float64,
+ name="l")
+ f = constant_op.constant(
+ [0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4],
+ shape=[3, 4],
+ dtype=dtypes.float64,
+ name="f")
+ x = nn_ops.softmax_cross_entropy_with_logits(f, l, name="xent")
+ err = gradient_checker.compute_gradient_error(f, [3, 4], x, [3])
print("cross entropy gradient err = ", err)
self.assertLess(err, 5e-8)
@@ -177,5 +186,6 @@ class XentTest(tf.test.TestCase):
self._testXentWrapper(features, labels, dim=-1, use_gpu=False)
self._testXentWrapper(features, labels, dim=-1, use_gpu=True)
+
if __name__ == "__main__":
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/kernel_tests/zero_division_test.py b/tensorflow/python/kernel_tests/zero_division_test.py
index 85455e1e31..dd0214e0f1 100644
--- a/tensorflow/python/kernel_tests/zero_division_test.py
+++ b/tensorflow/python/kernel_tests/zero_division_test.py
@@ -12,30 +12,32 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Tests for integer division by zero."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import tensorflow as tf
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import errors_impl
+from tensorflow.python.platform import test
-class ZeroDivisionTest(tf.test.TestCase):
+class ZeroDivisionTest(test.TestCase):
def testZeros(self):
with self.test_session(use_gpu=True):
- for dtype in tf.uint8, tf.int16, tf.int32, tf.int64:
- zero = tf.constant(0, dtype=dtype)
- one = tf.constant(1, dtype=dtype)
+ for dtype in dtypes.uint8, dtypes.int16, dtypes.int32, dtypes.int64:
+ zero = constant_op.constant(0, dtype=dtype)
+ one = constant_op.constant(1, dtype=dtype)
bads = [one // zero]
- if dtype in (tf.int32, tf.int64):
+ if dtype in (dtypes.int32, dtypes.int64):
bads.append(one % zero)
for bad in bads:
try:
result = bad.eval()
- except tf.OpError as e:
+ except errors_impl.OpError as e:
# Ideally, we'd get a nice exception. In theory, this should only
# happen on CPU, but 32 bit integer GPU division is actually on
# CPU due to a placer bug.
@@ -47,9 +49,9 @@ class ZeroDivisionTest(tf.test.TestCase):
# means 32 bits set, so we allow 0xffffffff as well. This isn't
# very portable, so we may need to expand this list if other GPUs
# do different things.
- self.assertTrue(tf.test.is_gpu_available())
+ self.assertTrue(test.is_gpu_available())
self.assertIn(result, (-1, 0xff, 0xffffffff))
if __name__ == '__main__':
- tf.test.main()
+ test.main()
diff --git a/tensorflow/python/ops/losses/BUILD b/tensorflow/python/ops/losses/BUILD
index 4a2edd99b2..aba65275c2 100644
--- a/tensorflow/python/ops/losses/BUILD
+++ b/tensorflow/python/ops/losses/BUILD
@@ -23,6 +23,7 @@ py_library(
"//tensorflow/python:math_ops",
"//tensorflow/python:nn",
"//tensorflow/python:nn_ops",
+ "//tensorflow/python:platform",
],
)
diff --git a/tensorflow/python/saved_model/BUILD b/tensorflow/python/saved_model/BUILD
index 9854cd2c04..d32b2835d2 100644
--- a/tensorflow/python/saved_model/BUILD
+++ b/tensorflow/python/saved_model/BUILD
@@ -37,9 +37,11 @@ py_library(
":constants",
"//tensorflow/core:protos_all_py",
"//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:lib",
"//tensorflow/python:platform",
"//tensorflow/python:training",
"//tensorflow/python:util",
+ "//tensorflow/python:variables",
],
)
@@ -50,8 +52,10 @@ py_library(
deps = [
":constants",
"//tensorflow/core:protos_all_py",
+ "//tensorflow/python:lib",
"//tensorflow/python:platform",
"//tensorflow/python:training",
+ "//tensorflow/python:util",
],
)
@@ -62,6 +66,7 @@ py_library(
deps = [
":constants",
"//tensorflow/python:data_flow_ops",
+ "//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:platform",
],
)
@@ -80,14 +85,18 @@ py_test(
visibility = ["//visibility:private"],
deps = [
":builder",
+ ":constants",
":loader",
":main_op",
+ ":signature_def_utils",
":tag_constants",
":utils",
"//tensorflow:tensorflow_py",
"//tensorflow/core:protos_all_py",
+ "//tensorflow/python:errors",
"//tensorflow/python:framework",
"//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:lib",
"//tensorflow/python:platform",
"//tensorflow/python:training",
"//tensorflow/python:util",
@@ -100,6 +109,7 @@ py_library(
srcs_version = "PY2AND3",
deps = [
"//tensorflow/core:protos_all_py",
+ "//tensorflow/python:framework_for_generated_wrappers",
],
)
@@ -137,7 +147,9 @@ py_test(
srcs_version = "PY2AND3",
visibility = ["//visibility:private"],
deps = [
+ ":signature_constants",
":signature_def_utils",
+ ":utils",
"//tensorflow:tensorflow_py",
],
)
diff --git a/tensorflow/python/saved_model/example/BUILD b/tensorflow/python/saved_model/example/BUILD
index 594f1b5c03..4128f14c44 100644
--- a/tensorflow/python/saved_model/example/BUILD
+++ b/tensorflow/python/saved_model/example/BUILD
@@ -33,9 +33,12 @@ py_binary(
deps = [
"//tensorflow:tensorflow_py",
"//tensorflow/core:protos_all_py",
+ "//tensorflow/python:lib",
+ "//tensorflow/python:util",
"//tensorflow/python/saved_model:builder",
"//tensorflow/python/saved_model:constants",
"//tensorflow/python/saved_model:signature_constants",
+ "//tensorflow/python/saved_model:signature_def_utils",
"//tensorflow/python/saved_model:tag_constants",
"//tensorflow/python/saved_model:utils",
],
diff --git a/tensorflow/python/tools/BUILD b/tensorflow/python/tools/BUILD
index 2548bdb67f..3611279bf4 100644
--- a/tensorflow/python/tools/BUILD
+++ b/tensorflow/python/tools/BUILD
@@ -25,6 +25,7 @@ py_binary(
srcs_version = "PY2AND3",
deps = [
"//tensorflow:tensorflow_py",
+ "//tensorflow/python:framework",
"//tensorflow/python:platform",
],
)
@@ -60,6 +61,7 @@ py_library(
srcs_version = "PY2AND3",
deps = [
"//tensorflow:tensorflow_py",
+ "//tensorflow/python:framework",
"//tensorflow/python:platform",
],
)
@@ -95,7 +97,9 @@ py_library(
srcs_version = "PY2AND3",
deps = [
":strip_unused",
+ ":strip_unused_lib",
"//tensorflow:tensorflow_py",
+ "//tensorflow/python:framework",
"//tensorflow/python:platform",
],
)
@@ -120,7 +124,10 @@ py_test(
srcs_version = "PY2AND3",
deps = [
":optimize_for_inference_lib",
+ "//tensorflow/python:framework",
"//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:nn_ops_gen",
"//tensorflow/python:platform_test",
],
)
diff --git a/tensorflow/tensorboard/backend/BUILD b/tensorflow/tensorboard/backend/BUILD
index 47b051a696..a16626915e 100644
--- a/tensorflow/tensorboard/backend/BUILD
+++ b/tensorflow/tensorboard/backend/BUILD
@@ -37,7 +37,7 @@ py_library(
srcs = ["process_graph.py"],
srcs_version = "PY2AND3",
visibility = ["//visibility:public"],
- deps = [],
+ deps = ["//tensorflow/python:util"],
)
py_library(
@@ -60,6 +60,7 @@ py_test(
deps = [
":server",
"//tensorflow:tensorflow_py",
+ "//tensorflow/python:platform",
"//tensorflow/python:summary",
],
)
diff --git a/tensorflow/tensorboard/scripts/BUILD b/tensorflow/tensorboard/scripts/BUILD
index f3850c16a0..e2e5416bf0 100644
--- a/tensorflow/tensorboard/scripts/BUILD
+++ b/tensorflow/tensorboard/scripts/BUILD
@@ -14,6 +14,7 @@ py_binary(
deps = [
"//tensorflow:tensorflow_py",
"//tensorflow/python:platform",
+ "//tensorflow/python:summary",
"//tensorflow/tensorboard/backend:server",
],
)
diff --git a/tensorflow/tools/quantization/BUILD b/tensorflow/tools/quantization/BUILD
index 4c026068f8..9220fd99ab 100644
--- a/tensorflow/tools/quantization/BUILD
+++ b/tensorflow/tools/quantization/BUILD
@@ -23,6 +23,8 @@ py_binary(
srcs_version = "PY2AND3",
deps = [
"//tensorflow:tensorflow_py",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:platform",
],
)
@@ -37,6 +39,7 @@ py_test(
tags = ["nomsan"], # http://b/32242946
deps = [
":quantize_graph",
+ "//tensorflow/python:framework",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:platform_test",
],
@@ -51,6 +54,7 @@ py_binary(
srcs_version = "PY2AND3",
deps = [
"//tensorflow:tensorflow_py",
+ "//tensorflow/python:platform",
],
)
diff --git a/tensorflow/tools/test/BUILD b/tensorflow/tools/test/BUILD
index b4dc3c7133..4bea35d349 100644
--- a/tensorflow/tools/test/BUILD
+++ b/tensorflow/tools/test/BUILD
@@ -20,7 +20,11 @@ py_library(
"system_info_lib.py",
],
srcs_version = "PY2AND3",
- deps = ["//tensorflow:tensorflow_py"],
+ deps = [
+ "//tensorflow:tensorflow_py",
+ "//tensorflow/python:client",
+ "//tensorflow/python:errors",
+ ],
)
py_binary(